diff options
152 files changed, 14152 insertions, 0 deletions
diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..fffadb0 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,26 @@ + +The following licence applies to all files in this and subdirectories. Licences +are included in individual source files where appropriate, and if it differs +from this text, it supersedes this. Any file that does not have licence text +defaults to being covered by this text; not all files support the addition of +licenses. + +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/LICENSE b/conductor/LICENSE new file mode 100644 index 0000000..fffadb0 --- /dev/null +++ b/conductor/LICENSE @@ -0,0 +1,26 @@ + +The following licence applies to all files in this and subdirectories. Licences +are included in individual source files where appropriate, and if it differs +from this text, it supersedes this. Any file that does not have licence text +defaults to being covered by this text; not all files support the addition of +licenses. + +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/README.md b/conductor/README.md new file mode 100644 index 0000000..ba0316d --- /dev/null +++ b/conductor/README.md @@ -0,0 +1,16 @@ +# Conductor + +OF-HAS is the implementation of the ONAP Homing Service. The formal project name in ONAP is *OF-HAS*. The informal name for the project is *Conductor* (inherited from the seed-code), which is interchangeably used through the project. + +Given the description of what needs to be deployed (demands) and the placement requirements (constraints), Conductor determines placement candidates that meet all constraints while optimizing the resource usage of the AIC infrastructure. A customer request may be satisfied by deploying new VMs in AIC (AIC inventory) or by using existing service instances with enough remaining capacity (service inventory). + +From a canonical standpoint, Conductor is known as a *homing service*, in the same way OpenStack Heat is an orchestration service, or Nova is a compute service. + +* License: Licensed under the Apache License, Version 2.0 +* [PyPI]() - package installation +* [Python/Linux Distribution Notes](/doc/distribution/README.md) +* [Conductor Template Guide](/doc/template/README.md) +* [Example Templates](/doc/examples/README.md) +* [Homing API](/doc/api/README.md) +* [Bugs](https://jira.onap.org/projects/OPTFRA/summary) - issue tracking +* [Source](https://gerrit.onap.org/r/optf/has) diff --git a/conductor/README.rst b/conductor/README.rst new file mode 100644 index 0000000..c90eff6 --- /dev/null +++ b/conductor/README.rst @@ -0,0 +1,26 @@ +========= +Conductor +========= + +OF-HAS is the implementation of the ONAP Homing Service. The formal project name in ONAP is *OF-HAS*. The informal name for the project is *Conductor* (inherited from the seed-code), which is interchangeably used through the project. + +Given the description of what needs to be deployed (demands) and the placement requirements (constraints), Conductor determines placement candidates that meet all constraints while optimizing the resource usage of the AIC infrastructure. A customer request may be satisfied by deploying new VMs in AIC (AIC inventory) or by using existing service instances with enough remaining capacity (service inventory). + +From a canonical standpoint, Conductor is known as a *homing service*, in the same way OpenStack Heat is an orchestration service, or Nova is a compute service. + +* License: Licensed under the Apache License, Version 2.0 +* `PyPI`_ - package installation +* `Python/Linux Distribution Notes`_ +* `Conductor Template Guide`_ +* `Example Templates`_ +* `Homing API`_ +* `Bugs`_ - issue tracking +* `Source`_ + +.. _PyPI: +.. _Python/Linux Distribution Notes: /doc/distribution/README.md +.. _Conductor Template Guide: /doc/template/README.md +.. _Example Templates: /examples/README.md +.. _Homing API: /doc/api/README.md +.. _Bugs: https://jira.onap.org/projects/OPTFRA/summary +.. _Source: https://gerrit.onap.org/r/optf/has diff --git a/conductor/assembly.xml b/conductor/assembly.xml new file mode 100644 index 0000000..b190852 --- /dev/null +++ b/conductor/assembly.xml @@ -0,0 +1,36 @@ +<!-- + Copyright (c) 2018 Intel Corporation. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +--> +<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd"> + <id>conductor</id> + <formats> + <format>zip</format> + </formats> + <fileSets> + <fileSet> + <includes> + <include>LICENSE</include> + <include>README.rst</include> + <include>conductor/**</include> + <include>doc/**</include> + <include>etc/**</include> + <include>requirements.txt</include> + </includes> + <excludes> + <exclude>**/*.pyc</exclude> + </excludes> + </fileSet> + </fileSets> + <baseDirectory>conductor</baseDirectory> +</assembly> diff --git a/conductor/conductor/__init__.py b/conductor/conductor/__init__.py new file mode 100644 index 0000000..313be31 --- /dev/null +++ b/conductor/conductor/__init__.py @@ -0,0 +1,26 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + + +class NotImplementedError(NotImplementedError): + # FIXME(jd) This is used by WSME to return a correct HTTP code. We should + # not expose it here but wrap our methods in the API to convert it to a + # proper HTTP error. + code = 501 diff --git a/conductor/conductor/api/__init__.py b/conductor/conductor/api/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/api/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/api/app.py b/conductor/conductor/api/app.py new file mode 100644 index 0000000..70d54b5 --- /dev/null +++ b/conductor/conductor/api/app.py @@ -0,0 +1,137 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import os +import uuid + +from oslo_config import cfg +from oslo_log import log +from paste import deploy +import pecan + +from conductor.api import hooks +from conductor.api import middleware +from conductor import service + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + +OPTS = [ + cfg.StrOpt('api_paste_config', + default="api_paste.ini", + help="Configuration file for WSGI definition of API." + ), +] + +API_OPTS = [ + cfg.BoolOpt('pecan_debug', + default=False, + help='Toggle Pecan Debug Middleware.'), + cfg.IntOpt('default_api_return_limit', + min=1, + default=100, + help='Default maximum number of items returned by API request.' + ), +] + +CONF.register_opts(OPTS) +CONF.register_opts(API_OPTS, group='api') + +# Pull in service opts. We use them here. +OPTS = service.OPTS +CONF.register_opts(OPTS) + +# Can call like so to force a particular config: +# conductor-api --port=8091 -- --config-file=my_config +# +# For api command-line options: +# conductor-api -- --help + + +def setup_app(pecan_config=None, conf=None): + if conf is None: + raise RuntimeError("No configuration passed") + + app_hooks = [ + hooks.ConfigHook(conf), + hooks.MessagingHook(conf), + ] + + pecan_config = pecan_config or { + "app": { + 'root': 'conductor.api.controllers.root.RootController', + 'modules': ['conductor.api'], + } + } + + pecan.configuration.set_config(dict(pecan_config), overwrite=True) + + app = pecan.make_app( + pecan_config['app']['root'], + debug=conf.api.pecan_debug, + hooks=app_hooks, + wrap_app=middleware.ParsableErrorMiddleware, + guess_content_type_from_ext=False, + default_renderer='json', + force_canonical=False, + ) + + return app + + +# pastedeploy uses ConfigParser to handle global_conf, since Python 3's +# ConfigParser doesn't allow storing objects as config values. Only strings +# are permitted. Thus, to be able to pass an object created before paste +# loads the app, we store them in a global variable. Then each loaded app +# stores it's configuration using a unique key to be concurrency safe. +global APPCONFIGS +APPCONFIGS = {} + + +def load_app(conf): + global APPCONFIGS + + # Build the WSGI app + cfg_file = None + cfg_path = conf.api_paste_config + if not os.path.isabs(cfg_path): + cfg_file = conf.find_file(cfg_path) + elif os.path.exists(cfg_path): + cfg_file = cfg_path + + if not cfg_file: + raise cfg.ConfigFilesNotFoundError([conf.api_paste_config]) + + configkey = str(uuid.uuid4()) + APPCONFIGS[configkey] = conf + + LOG.info("Full WSGI config used: %s" % cfg_file) + return deploy.loadapp("config:" + cfg_file, + global_conf={'configkey': configkey}) + + +def app_factory(global_config, **local_conf): + global APPCONFIGS + conf = APPCONFIGS.get(global_config.get('configkey')) + return setup_app(conf=conf) + + +def build_wsgi_app(argv=None): + return load_app(service.prepare_service(argv=argv)) diff --git a/conductor/conductor/api/app.wsgi b/conductor/conductor/api/app.wsgi new file mode 100644 index 0000000..573d3d2 --- /dev/null +++ b/conductor/conductor/api/app.wsgi @@ -0,0 +1,9 @@ +"""Use this file for deploying the API under mod_wsgi. +See http://pecan.readthedocs.org/en/latest/deployment.html for details. +""" +from conductor import service +from conductor.api import app + +# Initialize the oslo configuration library and logging +conf = service.prepare_service([]) +application = app.load_app(conf)
\ No newline at end of file diff --git a/conductor/conductor/api/controllers/__init__.py b/conductor/conductor/api/controllers/__init__.py new file mode 100644 index 0000000..4f46681 --- /dev/null +++ b/conductor/conductor/api/controllers/__init__.py @@ -0,0 +1,54 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from os import path + +from notario import exceptions +from notario.utils import forced_leaf_validator +import pecan +import six + + +# +# Error Handler +# +def error(url, msg=None, **kwargs): + """Error handler""" + if msg: + pecan.request.context['error_message'] = msg + if kwargs: + pecan.request.context['kwargs'] = kwargs + url = path.join(url, '?error_message=%s' % msg) + pecan.redirect(url, internal=True) + + +# +# Notario Custom Validators +# +@forced_leaf_validator +def string_or_dict(_object, *args): + """Validator - Must be Basestring or Dictionary""" + error_msg = 'not of type dictionary or string' + + if isinstance(_object, six.string_types): + return + if isinstance(_object, dict): + return + raise exceptions.Invalid('dict or basestring type', pair='value', + msg=None, reason=error_msg, *args) diff --git a/conductor/conductor/api/controllers/errors.py b/conductor/conductor/api/controllers/errors.py new file mode 100644 index 0000000..6216721 --- /dev/null +++ b/conductor/conductor/api/controllers/errors.py @@ -0,0 +1,149 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import traceback + +from oslo_log import log +import pecan +from webob.exc import status_map + +from conductor.i18n import _ + +LOG = log.getLogger(__name__) + + +def error_wrapper(func): + """Error decorator.""" + def func_wrapper(self, **kw): + """Wrapper.""" + + kwargs = func(self, **kw) + status = status_map.get(pecan.response.status_code) + message = getattr(status, 'explanation', '') + explanation = \ + pecan.request.context.get('error_message', message) + error_type = status.__name__ + title = status.title + traceback = getattr(kwargs, 'traceback', None) + + LOG.error(explanation) + + # Modeled after Heat's format + error = { + "explanation": explanation, + "code": pecan.response.status_code, + "error": { + "message": message, + "type": error_type, + }, + "title": title, + } + if traceback: + error['error']['traceback'] = traceback + return error + return func_wrapper + + +class ErrorsController(object): + """Errors Controller /errors/{error_name}""" + + @pecan.expose('json') + @error_wrapper + def schema(self, **kw): + """400""" + pecan.request.context['error_message'] = \ + str(pecan.request.validation_error) + pecan.response.status = 400 + return pecan.request.context.get('kwargs') + + @pecan.expose('json') + @error_wrapper + def invalid(self, **kw): + """400""" + pecan.response.status = 400 + return pecan.request.context.get('kwargs') + + @pecan.expose() + def unauthorized(self, **kw): + """401""" + # This error is terse and opaque on purpose. + # Don't give any clues to help AuthN along. + pecan.response.status = 401 + pecan.response.content_type = 'text/plain' + LOG.error('unauthorized') + traceback.print_stack() + LOG.error(self.__class__) + LOG.error(kw) + pecan.response.body = _('Authentication required') + LOG.error(pecan.response.body) + return pecan.response + + @pecan.expose('json') + @error_wrapper + def forbidden(self, **kw): + """403""" + pecan.response.status = 403 + return pecan.request.context.get('kwargs') + + @pecan.expose('json') + @error_wrapper + def not_found(self, **kw): + """404""" + pecan.response.status = 404 + return pecan.request.context.get('kwargs') + + @pecan.expose('json') + @error_wrapper + def not_allowed(self, **kw): + """405""" + kwargs = pecan.request.context.get('kwargs') + if kwargs: + allow = kwargs.get('allow', None) + if allow: + pecan.response.headers['Allow'] = allow + pecan.response.status = 405 + return kwargs + + @pecan.expose('json') + @error_wrapper + def conflict(self, **kw): + """409""" + pecan.response.status = 409 + return pecan.request.context.get('kwargs') + + @pecan.expose('json') + @error_wrapper + def server_error(self, **kw): + """500""" + pecan.response.status = 500 + return pecan.request.context.get('kwargs') + + @pecan.expose('json') + @error_wrapper + def unimplemented(self, **kw): + """501""" + pecan.response.status = 501 + return pecan.request.context.get('kwargs') + + @pecan.expose('json') + @error_wrapper + def unavailable(self, **kw): + """503""" + pecan.response.status = 503 + return pecan.request.context.get('kwargs') diff --git a/conductor/conductor/api/controllers/root.py b/conductor/conductor/api/controllers/root.py new file mode 100644 index 0000000..d7c4a7e --- /dev/null +++ b/conductor/conductor/api/controllers/root.py @@ -0,0 +1,64 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import pecan + +from conductor.api.controllers import errors +from conductor.api.controllers.v1 import root as v1 + +MEDIA_TYPE_JSON = 'application/vnd.onap.has-%s+json' + + +class RootController(object): + """Root Controller /""" + + def __init__(self): + self.errors = errors.ErrorsController() + self.v1 = v1.V1Controller() + + @pecan.expose(generic=True, template='json') + def index(self): + """Catchall for all methods""" + base_url = pecan.request.application_url + available = [{'tag': 'v1', 'date': '2016-11-01T00:00:00Z', }] + collected = [version_descriptor(base_url, v['tag'], v['date']) + for v in available] + versions = {'versions': collected} + return versions + + +def version_descriptor(base_url, version, released_on): + """Version Descriptor""" + url = version_url(base_url, version) + return { + 'id': version, + 'links': [ + {'href': url, 'rel': 'self', }, + {'href': 'https://wiki.onap.org/pages/viewpage.action?pageId=16005528', + 'rel': 'describedby', 'type': 'text/html', }], + 'media-types': [ + {'base': 'application/json', 'type': MEDIA_TYPE_JSON % version, }], + 'status': 'EXPERIMENTAL', + 'updated': released_on, + } + + +def version_url(base_url, version_number): + """Version URL""" + return '%s/%s' % (base_url, version_number) diff --git a/conductor/conductor/api/controllers/v1/__init__.py b/conductor/conductor/api/controllers/v1/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/api/controllers/v1/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/api/controllers/v1/plans.py b/conductor/conductor/api/controllers/v1/plans.py new file mode 100644 index 0000000..fa635f7 --- /dev/null +++ b/conductor/conductor/api/controllers/v1/plans.py @@ -0,0 +1,261 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import six +import yaml +from yaml.constructor import ConstructorError + +from notario import decorators +from notario.validators import types +from oslo_log import log +import pecan +from pecan_notario import validate + +from conductor.api.controllers import error +from conductor.api.controllers import string_or_dict +from conductor.api.controllers import validator +from conductor.i18n import _, _LI + +LOG = log.getLogger(__name__) + +CREATE_SCHEMA = ( + (decorators.optional('files'), types.dictionary), + (decorators.optional('id'), types.string), + (decorators.optional('limit'), types.integer), + (decorators.optional('name'), types.string), + ('template', string_or_dict), + (decorators.optional('template_url'), types.string), + (decorators.optional('timeout'), types.integer), +) + + +class PlansBaseController(object): + """Plans Base Controller - Common Methods""" + + def plan_link(self, plan_id): + return [ + { + "href": "%(url)s/v1/%(endpoint)s/%(id)s" % + { + 'url': pecan.request.application_url, + 'endpoint': 'plans', + 'id': plan_id, + }, + "rel": "self" + } + ] + + def plans_get(self, plan_id=None): + ctx = {} + method = 'plans_get' + if plan_id: + args = {'plan_id': plan_id} + LOG.debug('Plan {} requested.'.format(plan_id)) + else: + args = {} + LOG.debug('All plans requested.') + + plans_list = [] + + client = pecan.request.controller + result = client.call(ctx, method, args) + plans = result and result.get('plans') + + for the_plan in plans: + the_plan_id = the_plan.get('id') + the_plan['links'] = [self.plan_link(the_plan_id)] + plans_list.append(the_plan) + + if plan_id: + if len(plans_list) == 1: + return plans_list[0] + else: + # For a single plan, we return None if not found + return None + else: + # For all plans, it's ok to return an empty list + return plans_list + + def plan_create(self, args): + ctx = {} + method = 'plan_create' + + # TODO(jdandrea): Enhance notario errors to use similar syntax + # valid_keys = ['files', 'id', 'limit', 'name', + # 'template', 'template_url', 'timeout'] + # if not set(args.keys()).issubset(valid_keys): + # invalid = [name for name in args if name not in valid_keys] + # invalid_str = ', '.join(invalid) + # error('/errors/invalid', + # _('Invalid keys found: {}').format(invalid_str)) + # required_keys = ['template'] + # if not set(required_keys).issubset(args): + # required = [name for name in required_keys if name not in args] + # required_str = ', '.join(required) + # error('/errors/invalid', + # _('Missing required keys: {}').format(required_str)) + + LOG.debug('Plan creation requested (name "{}").'.format( + args.get('name'))) + + client = pecan.request.controller + result = client.call(ctx, method, args) + plan = result and result.get('plan') + if plan: + plan_name = plan.get('name') + plan_id = plan.get('id') + plan['links'] = [self.plan_link(plan_id)] + LOG.info(_LI('Plan {} (name "{}") created.').format( + plan_id, plan_name)) + return plan + + def plan_delete(self, plan): + ctx = {} + method = 'plans_delete' + + plan_name = plan.get('name') + plan_id = plan.get('id') + LOG.debug('Plan {} (name "{}") deletion requested.'.format( + plan_id, plan_name)) + + args = {'plan_id': plan_id} + client = pecan.request.controller + client.call(ctx, method, args) + LOG.info(_LI('Plan {} (name "{}") deleted.').format( + plan_id, plan_name)) + + +class PlansItemController(PlansBaseController): + """Plans Item Controller /v1/plans/{plan_id}""" + + def __init__(self, uuid4): + """Initializer.""" + self.uuid = uuid4 + self.plan = self.plans_get(plan_id=self.uuid) + + if not self.plan: + error('/errors/not_found', + _('Plan {} not found').format(self.uuid)) + pecan.request.context['plan_id'] = self.uuid + + @classmethod + def allow(cls): + """Allowed methods""" + return 'GET,DELETE' + + @pecan.expose(generic=True, template='json') + def index(self): + """Catchall for unallowed methods""" + message = _('The {} method is not allowed.').format( + pecan.request.method) + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + """Options""" + pecan.response.headers['Allow'] = self.allow() + pecan.response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + """Get plan""" + return {"plans": [self.plan]} + + @index.when(method='DELETE', template='json') + def index_delete(self): + """Delete a Plan""" + self.plan_delete(self.plan) + pecan.response.status = 204 + + +class PlansController(PlansBaseController): + """Plans Controller /v1/plans""" + + @classmethod + def allow(cls): + """Allowed methods""" + return 'GET,POST' + + @pecan.expose(generic=True, template='json') + def index(self): + """Catchall for unallowed methods""" + message = _('The {} method is not allowed.').format( + pecan.request.method) + kwargs = {'allow': self.allow()} + error('/errors/not_allowed', message, **kwargs) + + @index.when(method='OPTIONS', template='json') + def index_options(self): + """Options""" + pecan.response.headers['Allow'] = self.allow() + pecan.response.status = 204 + + @index.when(method='GET', template='json') + def index_get(self): + """Get all the plans""" + plans = self.plans_get() + return {"plans": plans} + + @index.when(method='POST', template='json') + @validate(CREATE_SCHEMA, '/errors/schema') + def index_post(self): + """Create a Plan""" + + # Look for duplicate keys in the YAML/JSON, first in the + # entire request, and then again if the template parameter + # value is itself an embedded JSON/YAML string. + where = "API Request" + try: + parsed = yaml.load(pecan.request.text, validator.UniqueKeyLoader) + if 'template' in parsed: + where = "Template" + template = parsed['template'] + if isinstance(template, six.string_types): + yaml.load(template, validator.UniqueKeyLoader) + except ConstructorError as exc: + # Only bail on the duplicate key problem (problem and problem_mark + # attributes are available in ConstructorError): + if exc.problem is \ + validator.UniqueKeyLoader.DUPLICATE_KEY_PROBLEM_MARK: + # ConstructorError messages have a two line snippet. + # Grab it, get rid of the second line, and strip any + # remaining whitespace so we can fashion a one line msg. + snippet = exc.problem_mark.get_snippet() + snippet = snippet.split('\n')[0].strip() + msg = _('{} has a duplicate key on line {}: {}') + error('/errors/invalid', + msg.format(where, exc.problem_mark.line + 1, snippet)) + except Exception as exc: + # Let all others pass through for now. + pass + + args = pecan.request.json + plan = self.plan_create(args) + + if not plan: + error('/errors/server_error', _('Unable to create Plan.')) + else: + pecan.response.status = 201 + return plan + + @pecan.expose() + def _lookup(self, uuid4, *remainder): + """Pecan subcontroller routing callback""" + return PlansItemController(uuid4), remainder diff --git a/conductor/conductor/api/controllers/v1/root.py b/conductor/conductor/api/controllers/v1/root.py new file mode 100644 index 0000000..87b4a35 --- /dev/null +++ b/conductor/conductor/api/controllers/v1/root.py @@ -0,0 +1,47 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from oslo_log import log +import pecan +from pecan import secure + +from conductor.api.controllers import error +from conductor.api.controllers.v1 import plans +from conductor.i18n import _ + +LOG = log.getLogger(__name__) + + +class V1Controller(secure.SecureController): + """Version 1 API controller root.""" + + plans = plans.PlansController() + + @classmethod + def check_permissions(cls): + """SecureController permission check callback""" + return True + # error('/errors/unauthorized', msg) + + @pecan.expose(generic=True, template='json') + def index(self): + """Catchall for unallowed methods""" + message = _('The %s method is not allowed.') % pecan.request.method + kwargs = {} + error('/errors/not_allowed', message, **kwargs) diff --git a/conductor/conductor/api/controllers/validator.py b/conductor/conductor/api/controllers/validator.py new file mode 100644 index 0000000..f9bff3f --- /dev/null +++ b/conductor/conductor/api/controllers/validator.py @@ -0,0 +1,63 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from yaml.constructor import ConstructorError +from yaml.nodes import MappingNode + +try: + from yaml import CLoader as Loader +except ImportError: + from yaml import Loader + + +class UniqueKeyLoader(Loader): + """Unique Key Loader for PyYAML + + Ensures no duplicate keys on any given level. + + https://gist.github.com/pypt/94d747fe5180851196eb#gistcomment-2084028 + """ + + DUPLICATE_KEY_PROBLEM_MARK = "found duplicate key" + + def construct_mapping(self, node, deep=False): + """Check for duplicate keys while constructing a mapping.""" + if not isinstance(node, MappingNode): + raise ConstructorError( + None, None, "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + try: + hash(key) + except (TypeError) as exc: + raise ConstructorError("while constructing a mapping", + node.start_mark, + "found unacceptable key (%s)" % exc, + key_node.start_mark) + # check for duplicate keys + if key in mapping: + raise ConstructorError("while constructing a mapping", + node.start_mark, + self.DUPLICATE_KEY_PROBLEM_MARK, + key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping diff --git a/conductor/conductor/api/hooks.py b/conductor/conductor/api/hooks.py new file mode 100644 index 0000000..08677cc --- /dev/null +++ b/conductor/conductor/api/hooks.py @@ -0,0 +1,137 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from oslo_log import log +from pecan import hooks + +# from conductor.common.models import plan +# from conductor.common.music import api +from conductor.common.music import messaging as music_messaging +# from conductor.common.music.model import base +from conductor import messaging + +LOG = log.getLogger(__name__) + + +class ConfigHook(hooks.PecanHook): + """Attach the configuration object to the request. + + That allows controllers to get it. + """ + + def __init__(self, conf): + super(ConfigHook, self).__init__() + self.conf = conf + + def on_route(self, state): + state.request.cfg = self.conf + + +class MessagingHook(hooks.PecanHook): + """Create and attach a controller RPC client to the request.""" + + def __init__(self, conf): + super(MessagingHook, self).__init__() + topic = "controller" + transport = messaging.get_transport(conf=conf) + target = music_messaging.Target(topic=topic) + self.controller = \ + music_messaging.RPCClient(conf=conf, + transport=transport, + target=target) + + def on_route(self, state): + state.request.controller = self.controller + + +# NOTE: We no longer use ModelHook, since the API should be asking +# the controller (via RPC) for info about plans, not requesting them directly. + +# class ModelHook(hooks.PecanHook): +# """Create and attach dynamic model classes to the request.""" +# +# def __init__(self, conf): +# super(ModelHook, self).__init__() +# +# # TODO(jdandrea) Move this to DBHook? +# music = api.API() +# music.keyspace_create(keyspace=conf.keyspace) +# +# # Dynamically create a plan class for the specified keyspace +# self.Plan = base.create_dynamic_model( +# keyspace=conf.keyspace, baseclass=plan.Plan, classname="Plan") +# +# def before(self, state): +# state.request.models = { +# "Plan": self.Plan, +# } + + +# class DBHook(hooks.PecanHook): +# +# def __init__(self): +# self.storage_connection = DBHook.get_connection('metering') +# self.event_storage_connection = DBHook.get_connection('event') +# +# if (not self.storage_connection +# and not self.event_storage_connection): +# raise Exception("API failed to start. Failed to connect to " +# "databases, purpose: %s" % +# ', '.join(['metering', 'event'])) +# +# def before(self, state): +# state.request.storage_conn = self.storage_connection +# state.request.event_storage_conn = self.event_storage_connection +# +# @staticmethod +# def get_connection(purpose): +# try: +# return storage.get_connection_from_config(cfg.CONF, purpose) +# except Exception as err: +# params = {"purpose": purpose, "err": err} +# LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s " +# "retry later: %(err)s") % params) +# +# +# class NotifierHook(hooks.PecanHook): +# """Create and attach a notifier to the request. +# Usually, samples will be push to notification bus by notifier when they +# are posted via /v2/meters/ API. +# """ +# +# def __init__(self): +# transport = messaging.get_transport() +# self.notifier = oslo_messaging.Notifier( +# transport, driver=cfg.CONF.publisher_notifier.homing_driver, +# publisher_id="conductor.api") +# +# def before(self, state): +# state.request.notifier = self.notifier +# +# +# class TranslationHook(hooks.PecanHook): +# +# def after(self, state): +# # After a request has been done, we need to see if +# # ClientSideError has added an error onto the response. +# # If it has we need to get it info the thread-safe WSGI +# # environ to be used by the ParsableErrorMiddleware. +# if hasattr(state.response, 'translatable_error'): +# state.request.environ['translatable_error'] = ( +# state.response.translatable_error) diff --git a/conductor/conductor/api/middleware.py b/conductor/conductor/api/middleware.py new file mode 100644 index 0000000..dc0664a --- /dev/null +++ b/conductor/conductor/api/middleware.py @@ -0,0 +1,132 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""Middleware to replace the plain text message body of an error +response with one formatted so the client can parse it. + +Based on pecan.middleware.errordocument +""" + +import json + +from lxml import etree +from oslo_log import log +import six +import webob + +from conductor import i18n +from conductor.i18n import _ + +LOG = log.getLogger(__name__) + + +class ParsableErrorMiddleware(object): + """Replace error body with something the client can parse.""" + + @staticmethod + def best_match_language(accept_language): + """Determines best available locale from the Accept-Language header. + + :returns: the best language match or None if the 'Accept-Language' + header was not available in the request. + """ + if not accept_language: + return None + all_languages = i18n.get_available_languages() + return accept_language.best_match(all_languages) + + def __init__(self, app): + self.app = app + + def __call__(self, environ, start_response): + # Request for this state, modified by replace_start_response() + # and used when an error is being reported. + state = {} + + def replacement_start_response(status, headers, exc_info=None): + """Overrides the default response to make errors parsable.""" + try: + status_code = int(status.split(' ')[0]) + state['status_code'] = status_code + except (ValueError, TypeError): # pragma: nocover + raise Exception(( + 'ErrorDocumentMiddleware received an invalid ' + 'status %s' % status + )) + else: + if (state['status_code'] // 100) not in (2, 3): + # Remove some headers so we can replace them later + # when we have the full error message and can + # compute the length. + headers = [(h, v) + for (h, v) in headers + if h not in ('Content-Length', 'Content-Type') + ] + # Save the headers in case we need to modify them. + state['headers'] = headers + return start_response(status, headers, exc_info) + + app_iter = self.app(environ, replacement_start_response) + if (state['status_code'] // 100) not in (2, 3): + req = webob.Request(environ) + error = environ.get('translatable_error') + user_locale = self.best_match_language(req.accept_language) + if (req.accept.best_match(['application/json', 'application/xml']) + == 'application/xml'): + content_type = 'application/xml' + try: + # simple check xml is valid + fault = etree.fromstring(b'\n'.join(app_iter)) + # Add the translated error to the xml data + if error is not None: + for fault_string in fault.findall('faultstring'): + fault_string.text = i18n.translate(error, + user_locale) + error_message = etree.tostring(fault) + body = b''.join((b'<error_message>', + error_message, + b'</error_message>')) + except etree.XMLSyntaxError as err: + LOG.error(_('Error parsing HTTP response: %s'), err) + error_message = state['status_code'] + body = '<error_message>%s</error_message>' % error_message + if six.PY3: + body = body.encode('utf-8') + else: + content_type = 'application/json' + app_data = b'\n'.join(app_iter) + if six.PY3: + app_data = app_data.decode('utf-8') + try: + fault = json.loads(app_data) + if error is not None and 'faultstring' in fault: + fault['faultstring'] = i18n.translate(error, + user_locale) + except ValueError as err: + fault = app_data + body = json.dumps({'error_message': fault}) + if six.PY3: + body = body.encode('utf-8') + + state['headers'].append(('Content-Length', str(len(body)))) + state['headers'].append(('Content-Type', content_type)) + body = [body] + else: + body = app_iter + return body diff --git a/conductor/conductor/api/rbac.py b/conductor/conductor/api/rbac.py new file mode 100644 index 0000000..6caaad3 --- /dev/null +++ b/conductor/conductor/api/rbac.py @@ -0,0 +1,106 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""Access Control Lists (ACL's) control access the API server.""" + +from oslo_config import cfg +from oslo_policy import policy +import pecan + +_ENFORCER = None + +CONF = cfg.CONF + + +def reset(): + global _ENFORCER + if _ENFORCER: + _ENFORCER.clear() + _ENFORCER = None + + +def _has_rule(name): + return name in _ENFORCER.rules.keys() + + +def enforce(policy_name, request): + """Return the user and project the request should be limited to. + + :param request: HTTP request + :param policy_name: the policy name to validate AuthZ against. + """ + global _ENFORCER + if not _ENFORCER: + _ENFORCER = policy.Enforcer(CONF) + _ENFORCER.load_rules() + + rule_method = "homing:" + policy_name + headers = request.headers + + policy_dict = dict() + policy_dict['roles'] = headers.get('X-Roles', "").split(",") + policy_dict['user_id'] = (headers.get('X-User-Id')) + policy_dict['project_id'] = (headers.get('X-Project-Id')) + + # maintain backward compat with Juno and previous by allowing the action if + # there is no rule defined for it + if ((_has_rule('default') or _has_rule(rule_method)) and + not _ENFORCER.enforce(rule_method, {}, policy_dict)): + pecan.core.abort(status_code=403, detail='RBAC Authorization Failed') + + +# TODO(fabiog): these methods are still used because the scoping part is really +# convoluted and difficult to separate out. + +def get_limited_to(headers): + """Return the user and project the request should be limited to. + + :param headers: HTTP headers dictionary + :return: A tuple of (user, project), set to None if there's no limit on + one of these. + """ + global _ENFORCER + if not _ENFORCER: + _ENFORCER = policy.Enforcer(CONF) + _ENFORCER.load_rules() + + policy_dict = dict() + policy_dict['roles'] = headers.get('X-Roles', "").split(",") + policy_dict['user_id'] = (headers.get('X-User-Id')) + policy_dict['project_id'] = (headers.get('X-Project-Id')) + + # maintain backward compat with Juno and previous by using context_is_admin + # rule if the segregation rule (added in Kilo) is not defined + rule_name = 'segregation' if _has_rule( + 'segregation') else 'context_is_admin' + if not _ENFORCER.enforce(rule_name, + {}, + policy_dict): + return headers.get('X-User-Id'), headers.get('X-Project-Id') + + return None, None + + +def get_limited_to_project(headers): + """Return the project the request should be limited to. + + :param headers: HTTP headers dictionary + :return: A project, or None if there's no limit on it. + """ + return get_limited_to(headers)[1] diff --git a/conductor/conductor/clean.sh b/conductor/conductor/clean.sh new file mode 100755 index 0000000..40ba5d9 --- /dev/null +++ b/conductor/conductor/clean.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +#find . -name '*.pyc' -delete +find */. -name '*.pyc' -delete diff --git a/conductor/conductor/cmd/__init__.py b/conductor/conductor/cmd/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/cmd/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/cmd/controller.py b/conductor/conductor/cmd/controller.py new file mode 100644 index 0000000..c00e233 --- /dev/null +++ b/conductor/conductor/cmd/controller.py @@ -0,0 +1,27 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from conductor import controller +from conductor import service + + +def main(): + conf = service.prepare_service() + controller_service = controller.ControllerServiceLauncher(conf=conf) + controller_service.run() diff --git a/conductor/conductor/cmd/data.py b/conductor/conductor/cmd/data.py new file mode 100644 index 0000000..a880c35 --- /dev/null +++ b/conductor/conductor/cmd/data.py @@ -0,0 +1,27 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from conductor import data +from conductor import service + + +def main(): + conf = service.prepare_service() + data_service = data.DataServiceLauncher(conf=conf) + data_service.run() diff --git a/conductor/conductor/cmd/reservation.py b/conductor/conductor/cmd/reservation.py new file mode 100644 index 0000000..7a38375 --- /dev/null +++ b/conductor/conductor/cmd/reservation.py @@ -0,0 +1,27 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from conductor import reservation +from conductor import service + + +def main(): + conf = service.prepare_service() + reservation_service = reservation.ReservationServiceLauncher(conf=conf) + reservation_service.run() diff --git a/conductor/conductor/cmd/solver.py b/conductor/conductor/cmd/solver.py new file mode 100644 index 0000000..8efea99 --- /dev/null +++ b/conductor/conductor/cmd/solver.py @@ -0,0 +1,27 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from conductor import service +from conductor import solver + + +def main(): + conf = service.prepare_service() + solver_service = solver.SolverServiceLauncher(conf=conf) + solver_service.run() diff --git a/conductor/conductor/common/__init__.py b/conductor/conductor/common/__init__.py new file mode 100644 index 0000000..4d222ec --- /dev/null +++ b/conductor/conductor/common/__init__.py @@ -0,0 +1,44 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""Music - Common Methods""" + +from oslo_log import log as logging + +from conductor.common.music import api + +LOG = logging.getLogger(__name__) + + +def music_api(configuration): + """Create or return a Music API instance""" + + configuration = dict(configuration) + kwargs = { + 'host': configuration.get('host'), + 'port': configuration.get('port'), + 'replication_factor': configuration.get('replication_factor'), + } + api_instance = api.API(**kwargs) + + # Create the keyspace if necessary + # TODO(jdandrea): Use oslo.config with a [music] section + # keyspace = conf.music.get('keyspace') + # api_instance.create_keyspace(keyspace) + return api_instance diff --git a/conductor/conductor/common/classes.py b/conductor/conductor/common/classes.py new file mode 100644 index 0000000..4f44fd7 --- /dev/null +++ b/conductor/conductor/common/classes.py @@ -0,0 +1,79 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""Class Helpers""" + +from conductor.i18n import _LE # pylint: disable=W0212 + + +def get_class(kls): + """Returns a class given a fully qualified class name""" + parts = kls.split('.') + module = ".".join(parts[:-1]) + mod = __import__(module) + for comp in parts[1:]: + mod = getattr(mod, comp) + return mod + + +class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903 + """Abstract Class Method Decorator from Python 3.3's abc module""" + + __isabstractmethod__ = True + + def __init__(self, callable): # pylint: disable=W0622 + callable.__isabstractmethod__ = True + super(abstractclassmethod, self).__init__(callable) + + +class ClassPropertyDescriptor(object): # pylint: disable=R0903 + """Supports the notion of a class property""" + + def __init__(self, fget, fset=None): + """Initializer""" + self.fget = fget + self.fset = fset + + def __get__(self, obj, klass=None): + """Get attribute""" + if klass is None: + klass = type(obj) + return self.fget.__get__(obj, klass)() + + def __set__(self, obj, value): + """Set attribute""" + if not self.fset: + raise AttributeError(_LE("Can't set attribute")) + type_ = type(obj) + return self.fset.__get__(obj, type_)(value) + + def setter(self, func): + """Setter""" + if not isinstance(func, (classmethod, staticmethod)): + func = classmethod(func) + self.fset = func + return self + + +def classproperty(func): + """Class Property decorator""" + if not isinstance(func, (classmethod, staticmethod)): + func = classmethod(func) + + return ClassPropertyDescriptor(func) diff --git a/conductor/conductor/common/models/__init__.py b/conductor/conductor/common/models/__init__.py new file mode 100644 index 0000000..ce07a87 --- /dev/null +++ b/conductor/conductor/common/models/__init__.py @@ -0,0 +1,47 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from uuid import UUID + +import six + + +def validate_uuid4(uuid_string): + """Validate that a UUID string is in fact a valid uuid4. + + Happily, the uuid module does the actual checking for us. + It is vital that the 'version' kwarg be passed + to the UUID() call, otherwise any 32-character + hex string is considered valid. + """ + + if not isinstance(uuid_string, six.string_types): + return False + + try: + val = UUID(uuid_string.translate(None, '-'), version=4) + except ValueError: + # If it's a value error, then the string + # is not a valid hex code for a UUID. + return False + + # If the uuid_string is a valid hex code, + # but an invalid uuid4, the UUID.__init__ will convert it to a + # valid uuid4. This is bad for validation purposes. + return val.hex == uuid_string diff --git a/conductor/conductor/common/models/plan.py b/conductor/conductor/common/models/plan.py new file mode 100644 index 0000000..3dbc8f5 --- /dev/null +++ b/conductor/conductor/common/models/plan.py @@ -0,0 +1,205 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""Plan Model""" + +import json +import time + +from conductor.common.models import validate_uuid4 +from conductor.common.music.model import base + + +def current_time_millis(): + """Current time in milliseconds.""" + return int(round(time.time() * 1000)) + + +class Plan(base.Base): + """Plan model. + + DO NOT use this class directly! + + Only create Plan-based classes using: + base.create_dynamic_model(keyspace=KEYSPACE, + baseclass=Plan, classname=CLASS). + The table will be automatically created if it doesn't exist. + """ + + __tablename__ = "plans" + __keyspace__ = None + + id = None # pylint: disable=C0103 + status = None + created = None + updated = None + name = None + timeout = None + recommend_max = None + message = None + template = None + translation = None + solution = None + + # Status + TEMPLATE = "template" # Template ready for translation + TRANSLATED = "translated" # Translation ready for solving + SOLVING = "solving" # Search for solutions in progress + # Search complete, solution with n>0 recommendations found + SOLVED = "solved" + # Search failed, no recommendations found + NOT_FOUND = "not found" + ERROR = "error" # Error + # Solved, but reservation of resources in progress + RESERVING = "reserving" + # Final state, Solved and Reserved resources (if required) + DONE = "done" + STATUS = [TEMPLATE, TRANSLATED, SOLVING, SOLVED, NOT_FOUND, + ERROR, RESERVING, DONE, ] + WORKING = [TEMPLATE, TRANSLATED, SOLVING, RESERVING, ] + FINISHED = [SOLVED, NOT_FOUND, ERROR, DONE, ] + + @classmethod + def schema(cls): + """Return schema.""" + schema = { + 'id': 'text', # Plan ID in UUID4 format + 'status': 'text', # Plan status (see STATUS for valid values) + 'created': 'bigint', # Creation time in msec from epoch + 'updated': 'bigint', # Last update time in msec from epoch + 'name': 'text', # Plan name/alias + 'timeout': 'int', # Timeout in seconds + 'recommend_max': 'int', # Max recommendations + 'message': 'text', # Message (e.g., error or other info) + 'template': 'text', # Plan template + 'translation': 'text', # Translated template for the solver + 'solution': 'text', # The (ocean is the ultimate) solution (FZ) + 'PRIMARY KEY': '(id)', + } + return schema + + @classmethod + def atomic(cls): + """Use atomic operations""" + return False + + @classmethod + def pk_name(cls): + """Primary key name""" + return 'id' + + def pk_value(self): + """Primary key value""" + return self.id + + @property + def error(self): + return self.status == self.ERROR + + @property + def finished(self): + return self.status in self.FINISHED + + @property + def solved(self): + return self.status == self.SOLUTION + + @property + def done(self): + return self.status == self.DONE + + @property + def timedout(self): + """Calculate if a plan has timed out""" + elapsed_msec = (current_time_millis() - self.created) + return elapsed_msec >= self.timeout * 1000 + + @property + def working(self): + return self.status in self.WORKING + + def update(self): + """Update plan + + Side-effect: Sets the updated field to the current time. + """ + self.updated = current_time_millis() + super(Plan, self).update() + + def values(self): + """Values""" + value_dict = { + 'status': self.status, + 'created': self.created, + 'updated': self.updated, + 'name': self.name, + 'timeout': self.timeout, + 'recommend_max': self.recommend_max, + 'message': self.message, + 'template': json.dumps(self.template), + 'translation': json.dumps(self.translation), + 'solution': json.dumps(self.solution), + } + if self.id: + value_dict['id'] = self.id + return value_dict + + def __init__(self, name, timeout, recommend_max, template, + id=None, created=None, updated=None, status=None, + message=None, translation=None, solution=None, _insert=True): + """Initializer""" + super(Plan, self).__init__() + self.status = status or self.TEMPLATE + self.created = created or current_time_millis() + self.updated = updated or current_time_millis() + self.name = name + self.timeout = timeout + self.recommend_max = recommend_max + self.message = message or "" + if _insert: + if validate_uuid4(id): + self.id = id + self.template = template or {} + self.translation = translation or {} + self.solution = solution or {} + self.insert() + else: + self.template = json.loads(template) + self.translation = json.loads(translation) + self.solution = json.loads(solution) + + def __repr__(self): + """Object representation""" + return '<Plan {} ({})>'.format(self.name, self.id) + + def __json__(self): + """JSON representation""" + json_ = {} + json_['id'] = self.id + json_['status'] = self.status + json_['created'] = self.created + json_['updated'] = self.updated + json_['name'] = self.name + json_['timeout'] = self.timeout + json_['recommend_max'] = self.recommend_max + json_['message'] = self.message + json_['template'] = self.template + json_['translation'] = self.translation + json_['solution'] = self.solution + return json_ diff --git a/conductor/conductor/common/music/__init__.py b/conductor/conductor/common/music/__init__.py new file mode 100644 index 0000000..31ad7e1 --- /dev/null +++ b/conductor/conductor/common/music/__init__.py @@ -0,0 +1,31 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from oslo_config import cfg + +CONF = cfg.CONF + +MUSIC_API_COMMON_OPTS = [ + cfg.BoolOpt('debug', + default=False, + help='Log debug messages. ' + 'Default value is False.'), +] + +CONF.register_opts(MUSIC_API_COMMON_OPTS, group='music_api') diff --git a/conductor/conductor/common/music/api.py b/conductor/conductor/common/music/api.py new file mode 100644 index 0000000..013dc79 --- /dev/null +++ b/conductor/conductor/common/music/api.py @@ -0,0 +1,493 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""Music Data Store API""" + +import copy +import time + +from oslo_config import cfg +from oslo_log import log + +from conductor.common import rest +from conductor.i18n import _LE, _LI # pylint: disable=W0212 + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + +global MUSIC_API + +MUSIC_API_OPTS = [ + cfg.StrOpt('server_url', + default='http://controller:8080/MUSIC/rest', + help='Base URL for Music REST API without a trailing slash.'), + cfg.ListOpt('hostnames', + deprecated_for_removal=True, + deprecated_reason='Use server_url instead', + help='List of hostnames (round-robin access)'), + cfg.IntOpt('port', + deprecated_for_removal=True, + deprecated_reason='Use server_url instead', + help='Port'), + cfg.StrOpt('path', + deprecated_for_removal=True, + deprecated_reason='Use server_url instead', + help='Path'), + cfg.FloatOpt('connect_timeout', + default=3.05, + help='Socket connection timeout'), + cfg.FloatOpt('read_timeout', + default=12.05, + help='Socket read timeout'), + cfg.IntOpt('lock_timeout', + default=10, + help='Lock timeout'), + cfg.IntOpt('replication_factor', + default=1, + help='Replication factor'), + cfg.BoolOpt('debug', + default=False, + help='Log debug messages. Default value is False.'), + cfg.BoolOpt('mock', + default=False, + help='Use mock API'), +] + +CONF.register_opts(MUSIC_API_OPTS, group='music_api') + + +class MusicAPI(object): + """Wrapper for Music API""" + + lock_ids = None # Cache of lock ids, indexed by name + lock_timeout = None # Maximum time in seconds to acquire a lock + + rest = None # API Endpoint + replication_factor = None # Number of Music nodes to replicate across + + def __init__(self): + """Initializer.""" + global MUSIC_API + + LOG.info(_LI("Initializing Music API")) + server_url = CONF.music_api.server_url.rstrip('/') + if not server_url: + # host/port/path are deprecated and should not be used anymore. + # Defaults removed from oslo_config to give more incentive. + + # No more round robin either. Just take the first entry. + host = next(iter(CONF.music_api.hostnames or []), 'controller') + port = CONF.music_api.port or 8080 + path = CONF.music_api.path or '/MUSIC/rest' + server_url = 'http://{}:{}/{}'.format( + host, port, path.rstrip('/').lstrip('/')) + + kwargs = { + 'server_url': server_url, + 'log_debug': CONF.music_api.debug, + 'connect_timeout': CONF.music_api.connect_timeout, + 'read_timeout': CONF.music_api.read_timeout, + } + self.rest = rest.REST(**kwargs) + + self.lock_ids = {} + + # TODO(jdandrea): Allow override at creation time. + self.lock_timeout = CONF.music_api.lock_timeout + self.replication_factor = CONF.music_api.replication_factor + + MUSIC_API = self + + def __del__(self): + """Deletion.""" + if type(self.lock_ids) is dict: + for lock_name in self.lock_ids.keys(): + self.lock_delete(lock_name) + + @staticmethod + def _row_url_path(keyspace, table, pk_name, pk_value): + """Returns a Music-compliant row URL path.""" + path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % { + 'keyspace': keyspace, + 'table': table, + } + + if pk_name and pk_value: + path += '?%s=%s' % (pk_name, pk_value) + return path + + @staticmethod + def _lock_name_generate(keyspace, table, pk_value): + """Generate a lock name.""" + + # The Music API dictates that the lock name must be of the form + # keyspace.table.primary_key + lock_name = '%(keyspace)s.%(table)s.%(primary_key)s' % { + 'keyspace': keyspace, + 'table': table, + 'primary_key': pk_value, + } + return lock_name + + def _lock_id_create(self, lock_name): + """Returns the lock id. Use for acquiring and releasing.""" + path = '/locks/create/%s' % lock_name + response = self.rest.request(method='post', + content_type='text/plain', path=path) + lock_id = None + if response and response.ok: + lock_id = response.text + return lock_id + + def _lock_id_acquire(self, lock_id): + """Acquire a lock by id. Returns True if successful.""" + path = '/locks/acquire/%s' % lock_id + response = self.rest.request(method='get', + content_type='text/plain', path=path) + status = False + if response and response.ok: + status = (response.text.lower() == 'true') + return status + + def _lock_id_release(self, lock_id): + """Release a lock by id. Returns True if successful.""" + path = '/locks/release/%s' % lock_id + response = self.rest.request(method='delete', + content_type='text/plain', path=path) + return response and response.ok + + def payload_init(self, keyspace=None, table=None, + pk_value=None, atomic=False): + """Initialize payload for Music requests. + + Supports atomic operations. + Returns a payload of data and lock_name (if any). + """ + if atomic: + lock_name = self.lock_create(keyspace, table, pk_value) + else: + lock_name = None + + lock_id = self.lock_ids.get(lock_name) + data = { + 'consistencyInfo': { + 'type': 'atomic' if atomic else 'eventual', + 'lockId': lock_id, + } + } + return {'data': data, 'lock_name': lock_name} + + def payload_delete(self, payload): + """Delete payload for Music requests. Cleans up atomic operations.""" + + # Doesn't actually delete the payload. + # We just delete the lock inside of it! + # This way payload_init/payload_delete is paired up neatly. + lock_name = payload.get('lock_name') + if lock_name: + self.lock_delete(lock_name) + + def keyspace_create(self, keyspace): + """Creates a keyspace.""" + payload = self.payload_init() + data = payload.get('data') + data['durabilityOfWrites'] = True + data['replicationInfo'] = { + 'class': 'SimpleStrategy', + 'replication_factor': self.replication_factor, + } + + path = '/keyspaces/%s' % keyspace + if CONF.music_api.debug: + LOG.debug("Creating keyspace {}".format(keyspace)) + response = self.rest.request(method='post', path=path, data=data) + return response and response.ok + + def keyspace_delete(self, keyspace): + """Drops a keyspace.""" + payload = self.payload_init() + data = payload.get('data') + + path = '/keyspaces/%s' % keyspace + if CONF.music_api.debug: + LOG.debug("Deleting keyspace {}".format(keyspace)) + response = self.rest.request(method='delete', path=path, data=data) + return response and response.ok + + def lock_create(self, keyspace, table, pk_value): + """Create and acquire a lock. Returns a lock name.""" + + # Generate the lock name, then create/acquire the lock id. + lock_name = self._lock_name_generate(keyspace, table, pk_value) + if CONF.music_api.debug: + LOG.debug("Creating lock {}".format(lock_name)) + lock_id = self._lock_id_create(lock_name) + time_now = time.time() + while not self._lock_id_acquire(lock_id): + if time.time() - time_now > self.lock_timeout: + raise IndexError( + _LE('Lock id acquire timeout: %s') % lock_name) + + # Cache the lock name/id. + self.lock_ids[lock_name] = lock_id + return lock_name + + def lock_release(self, lock_name): + """Release lock by name. Returns True if successful""" + + # No need to delete the lock. lock_create() will not complain + # if a lock with the same name is created later. + if CONF.music_api.debug: + LOG.debug("Releasing lock {}".format(lock_name)) + if lock_name: + return self._lock_id_release(self.lock_ids.get(lock_name)) + + def lock_delete(self, lock_name): + """Delete a lock by name. Returns True if successful.""" + path = '/locks/delete/%s' % lock_name + if CONF.music_api.debug: + LOG.debug("Deleting lock {}".format(lock_name)) + response = self.rest.request(content_type='text/plain', + method='delete', path=path) + if response and response.ok: + del self.lock_ids[lock_name] + return response and response.ok + + def row_create(self, keyspace, table, # pylint: disable=R0913 + pk_name, pk_value, values, atomic=False): + """Create a row.""" + payload = self.payload_init(keyspace, table, pk_value, atomic) + data = payload.get('data') + data['values'] = values + + path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % { + 'keyspace': keyspace, + 'table': table, + } + if CONF.music_api.debug: + LOG.debug("Creating row with pk_value {} in table " + "{}, keyspace {}".format(pk_value, table, keyspace)) + response = self.rest.request(method='post', path=path, data=data) + self.payload_delete(payload) + return response and response.ok + + def row_update(self, keyspace, table, # pylint: disable=R0913 + pk_name, pk_value, values, atomic=False): + """Update a row.""" + payload = self.payload_init(keyspace, table, pk_value, atomic) + data = payload.get('data') + data['values'] = values + + path = self._row_url_path(keyspace, table, pk_name, pk_value) + if CONF.music_api.debug: + LOG.debug("Updating row with pk_value {} in table " + "{}, keyspace {}".format(pk_value, table, keyspace)) + response = self.rest.request(method='put', path=path, data=data) + self.payload_delete(payload) + return response and response.ok + + def row_read(self, keyspace, table, pk_name=None, pk_value=None): + """Read one or more rows. Not atomic.""" + path = self._row_url_path(keyspace, table, pk_name, pk_value) + if CONF.music_api.debug: + LOG.debug("Reading row with pk_value {} from table " + "{}, keyspace {}".format(pk_value, table, keyspace)) + response = self.rest.request(path=path) + return response and response.json() + + def row_delete(self, keyspace, table, pk_name, pk_value, atomic=False): + """Delete a row.""" + payload = self.payload_init(keyspace, table, pk_value, atomic) + data = payload.get('data') + + path = self._row_url_path(keyspace, table, pk_name, pk_value) + if CONF.music_api.debug: + LOG.debug("Deleting row with pk_value {} from table " + "{}, keyspace {}".format(pk_value, table, keyspace)) + response = self.rest.request(method='delete', path=path, data=data) + self.payload_delete(payload) + return response and response.ok + + @staticmethod + def _table_path_generate(keyspace, table): + path = '/keyspaces/%(keyspace)s/tables/%(table)s/' % { + 'keyspace': keyspace, + 'table': table, + } + return path + + def table_create(self, keyspace, table, schema): + """Creates a table.""" + payload = self.payload_init() + data = payload.get('data') + data['fields'] = schema + + path = self._table_path_generate(keyspace, table) + if CONF.music_api.debug: + LOG.debug("Creating table {}, keyspace {}".format(table, keyspace)) + response = self.rest.request(method='post', path=path, data=data) + return response and response.ok + + def table_delete(self, keyspace, table): + """Creates a table.""" + payload = self.payload_init() + data = payload.get('data') + + path = self._table_path_generate(keyspace, table) + if CONF.music_api.debug: + LOG.debug("Deleting table {}, keyspace {}".format(table, keyspace)) + response = self.rest.request(method='delete', path=path, data=data) + return response and response.ok + + def version(self): + """Returns version string.""" + path = '/version' + if CONF.music_api.debug: + LOG.debug("Requesting version info") + response = self.rest.request(method='get', + content_type='text/plain', path=path) + return response and response.text + + +class MockAPI(object): + """Wrapper for Music API""" + + # Mock state for Music + music = { + 'keyspaces': {} + } + + def __init__(self): + """Initializer.""" + LOG.info(_LI("Initializing Music Mock API")) + + global MUSIC_API + + self.music['keyspaces'] = {} + + MUSIC_API = self + + @property + def _keyspaces(self): + return self.music.get('keyspaces') + + def _set_keyspace(self, keyspace): + self._keyspaces[keyspace] = {} + + def _unset_keyspace(self, keyspace): + self._keyspaces.pop(keyspace) + + def _set_table(self, keyspace, table): + self._keyspaces[keyspace][table] = {} + + def _unset_table(self, keyspace, table): + self._keyspaces[keyspace].pop(table) + + def _get_row(self, keyspace, table, key=None): + rows = {} + row_num = 0 + for row_key, row in self._keyspaces[keyspace][table].items(): + if not key or key == row_key: + row_num += 1 + rows['row {}'.format(row_num)] = copy.deepcopy(row) + return rows + + def _set_row(self, keyspace, table, key, row): + self._keyspaces[keyspace][table][key] = row + + def _unset_row(self, keyspace, table, row): + self._keyspaces[keyspace][table].pop(row) + + def keyspace_create(self, keyspace): + """Creates a keyspace.""" + if CONF.music_api.debug: + LOG.debug("Creating keyspace {}".format(keyspace)) + self._set_keyspace(keyspace) + return True + + def keyspace_delete(self, keyspace): + """Drops a keyspace.""" + if CONF.music_api.debug: + LOG.debug("Deleting keyspace {}".format(keyspace)) + self._unset_keyspace(keyspace) + return True + + def row_create(self, keyspace, table, # pylint: disable=R0913 + pk_name, pk_value, values, atomic=False): + """Create a row.""" + if CONF.music_api.debug: + LOG.debug("Creating row with pk_value {} in table " + "{}, keyspace {}".format(pk_value, table, keyspace)) + self._set_row(keyspace, table, pk_value, values) + return True + + def row_update(self, keyspace, table, # pylint: disable=R0913 + pk_name, pk_value, values, atomic=False): + """Update a row.""" + if CONF.music_api.debug: + LOG.debug("Updating row with pk_value {} in table " + "{}, keyspace {}".format(pk_value, table, keyspace)) + self._set_row(keyspace, table, pk_value, values) + return True + + def row_read(self, keyspace, table, pk_name=None, pk_value=None): + """Read one or more rows. Not atomic.""" + if CONF.music_api.debug: + LOG.debug("Reading row with pk_value {} from table " + "{}, keyspace {}".format(pk_value, table, keyspace)) + values = self._get_row(keyspace, table, pk_value) + return values + + def row_delete(self, keyspace, table, pk_name, pk_value, atomic=False): + """Delete a row.""" + if CONF.music_api.debug: + LOG.debug("Deleting row with pk_value {} from table " + "{}, keyspace {}".format(pk_value, table, keyspace)) + self._unset_row(keyspace, table, pk_value) + return True + + def table_create(self, keyspace, table, schema): + """Creates a table.""" + if CONF.music_api.debug: + LOG.debug("Creating table {}, keyspace {}".format(table, keyspace)) + self._set_table(keyspace, table) + return True + + def table_delete(self, keyspace, table): + """Creates a table.""" + if CONF.music_api.debug: + LOG.debug("Deleting table {}, keyspace {}".format(table, keyspace)) + self._unset_table(keyspace, table) + return True + + def version(self): + """Returns version string.""" + if CONF.music_api.debug: + LOG.debug("Requesting version info") + return "v1-mock" + + +def API(): + """Wrapper for Music and Music Mock API""" + + # FIXME(jdandrea): Follow more formal practices for defining/using mocks + if CONF.music_api.mock: + return MockAPI() + return MusicAPI() diff --git a/conductor/conductor/common/music/messaging/__init__.py b/conductor/conductor/common/music/messaging/__init__.py new file mode 100644 index 0000000..6cbca8c --- /dev/null +++ b/conductor/conductor/common/music/messaging/__init__.py @@ -0,0 +1,22 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from .component import RPCClient # noqa: F401 +from .component import RPCService # noqa: F401 +from .component import Target # noqa: F401 diff --git a/conductor/conductor/common/music/messaging/component.py b/conductor/conductor/common/music/messaging/component.py new file mode 100644 index 0000000..becd02e --- /dev/null +++ b/conductor/conductor/common/music/messaging/component.py @@ -0,0 +1,435 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import inspect +import sys +import time + +import cotyledon +import futurist +from oslo_config import cfg +from oslo_log import log +from oslo_messaging._drivers import common as rpc_common + +from conductor.common.music.messaging import message +from conductor.common.music.model import base +from conductor.i18n import _LE, _LI # pylint: disable=W0212 + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + +MESSAGING_SERVER_OPTS = [ + cfg.StrOpt('keyspace', + default='conductor_rpc', + help='Music keyspace for messages'), + cfg.IntOpt('check_interval', + default=1, + min=1, + help='Wait interval while checking for a message response. ' + 'Default value is 1 second.'), + cfg.IntOpt('timeout', + default=10, + min=1, + help='Overall message response timeout. ' + 'Default value is 10 seconds.'), + cfg.IntOpt('workers', + default=1, + min=1, + help='Number of workers for messaging service. ' + 'Default value is 1.'), + cfg.IntOpt('polling_interval', + default=1, + min=1, + help='Time between checking for new messages. ' + 'Default value is 1.'), + cfg.BoolOpt('debug', + default=False, + help='Log debug messages. ' + 'Default value is False.'), +] + +CONF.register_opts(MESSAGING_SERVER_OPTS, group='messaging_server') + +# Some class/method descriptions taken from this Oslo Messaging +# RPC API Tutorial/Demo: https://www.youtube.com/watch?v=Bf4gkeoBzvA + +RPCSVRNAME = "Music-RPC Server" + + +class Target(object): + """Returns a messaging target. + + A target encapsulates all the information to identify where a message + should be sent or what messages a server is listening for. + """ + _topic = None + _topic_class = None + + def __init__(self, topic): + """Set the topic and topic class""" + self._topic = topic + + # Because this is Music-specific, the server is + # built-in to the API class, stored as the transport. + # Thus, unlike oslo.messaging, there is no server + # specified for a target. There also isn't an + # exchange, namespace, or version at the moment. + + # Dynamically create a message class for this topic. + self._topic_class = base.create_dynamic_model( + keyspace=CONF.messaging_server.keyspace, + baseclass=message.Message, classname=self.topic) + + if not self._topic_class: + raise RuntimeError("Error setting the topic class " + "for the messaging layer.") + + @property + def topic(self): + """Topic Property""" + return self._topic + + @property + def topic_class(self): + """Topic Class Property""" + return self._topic_class + + +class RPCClient(object): + """Returns an RPC client using Music as a transport. + + The RPC client is responsible for sending method invocations + to remote servers via a messaging transport. + + A method invocation consists of a request context dictionary + a method name, and a dictionary of arguments. A cast() invocation + just sends the request and returns immediately. A call() invocation + waits for the server to send a return value. + """ + + def __init__(self, conf, transport, target): + """Set the transport and target""" + self.conf = conf + self.transport = transport + self.target = target + self.RPC = self.target.topic_class + + # introduced as a quick means to cache messages + # with the aim of preventing unnecessary communication + # across conductor components. + # self.message_cache = dict() + + def __check_rpc_status(self, rpc_id, rpc_method): + """Check status for a given message id""" + # Wait check_interval seconds before proceeding + check_interval = self.conf.messaging_server.check_interval + time.sleep(check_interval) + if self.conf.messaging_server.debug: + LOG.debug("Checking status for message {} method {} on " + "topic {}".format(rpc_id, rpc_method, self.target.topic)) + rpc = self.RPC.query.one(rpc_id) + return rpc + + def cast(self, ctxt, method, args): + """Asynchronous Call""" + rpc = self.RPC(action=self.RPC.CAST, + ctxt=ctxt, method=method, args=args) + assert(rpc.enqueued) + + rpc_id = rpc.id + topic = self.target.topic + LOG.info( + _LI("Message {} on topic {} enqueued").format(rpc_id, topic)) + if self.conf.messaging_server.debug: + LOG.debug("Casting method {} with args {}".format(method, args)) + + return rpc_id + + def call(self, ctxt, method, args): + """Synchronous Call""" + # # check if the call has a message saved in cache + # # key: string concatenation of ctxt + method + args + # # value: rpc response object + # key = "" + # for k, v in ctxt.items(): + # key += str(k) + # key += '#' + str(v) + '#' + # key += '|' + str(method) + '|' + # for k, v in args.items(): + # key += str(k) + # key += '#' + str(v) + '#' + # + # # check if the method has been called before + # # and cached + # if key in self.message_cache: + # LOG.debug("Retrieved method {} with args " + # "{} from cache".format(method, args)) + # return self.message_cache[key] + + rpc_start_time = time.time() + + rpc = self.RPC(action=self.RPC.CALL, + ctxt=ctxt, method=method, args=args) + + # TODO(jdandrea): Do something if the assert fails. + assert(rpc.enqueued) + + rpc_id = rpc.id + topic = self.target.topic + LOG.info( + _LI("Message {} on topic {} enqueued.").format(rpc_id, topic)) + if self.conf.messaging_server.debug: + LOG.debug("Calling method {} with args {}".format(method, args)) + + # Check message status within a thread + executor = futurist.ThreadPoolExecutor() + started_at = time.time() + while (time.time() - started_at) <= \ + self.conf.messaging_server.timeout: + fut = executor.submit(self.__check_rpc_status, rpc_id, method) + rpc = fut.result() + if rpc and rpc.finished: + if self.conf.messaging_server.debug: + LOG.debug("Message {} method {} response received". + format(rpc_id, method)) + break + executor.shutdown() + + # Get response, delete message, and return response + if not rpc or not rpc.finished: + LOG.error(_LE("Message {} on topic {} timed out at {} seconds"). + format(rpc_id, topic, + self.conf.messaging_server.timeout)) + elif not rpc.ok: + LOG.error(_LE("Message {} on topic {} returned an error"). + format(rpc_id, topic)) + response = rpc.response + failure = rpc.failure + rpc.delete() # TODO(jdandrea): Put a TTL on the msg instead? + # self.message_cache[key] = response + + LOG.debug("Elapsed time: {0:.3f} sec".format( + time.time() - rpc_start_time) + ) + # If there's a failure, raise it as an exception + allowed = [] + if failure is not None and failure != '': + # TODO(jdandrea): Do we need to populate allowed(_remote_exmods)? + raise rpc_common.deserialize_remote_exception(failure, allowed) + return response + + +class RPCService(cotyledon.Service): + """Listener for the RPC service. + + An RPC Service exposes a number of endpoints, each of which contain + a set of methods which may be invoked remotely by clients over a + given transport. To create an RPC server, you supply a transport, + target, and a list of endpoints. + + Start the server with server.run() + """ + + # This will appear in 'ps xaf' + name = RPCSVRNAME + + def __init__(self, worker_id, conf, **kwargs): + """Initializer""" + super(RPCService, self).__init__(worker_id) + if conf.messaging_server.debug: + LOG.debug("%s" % self.__class__.__name__) + self._init(conf, **kwargs) + self.running = True + + def _init(self, conf, **kwargs): + """Prepare to process requests""" + self.conf = conf + self.rpc_listener = None + self.transport = kwargs.pop('transport') + self.target = kwargs.pop('target') + self.endpoints = kwargs.pop('endpoints') + self.flush = kwargs.pop('flush') + self.kwargs = kwargs + self.RPC = self.target.topic_class + self.name = "{}, topic({})".format(RPCSVRNAME, self.target.topic) + + if self.flush: + self._flush_enqueued() + + def _flush_enqueued(self): + """Flush all messages with an enqueued status. + + Use this only when the parent service is not running concurrently. + """ + + msgs = self.RPC.query.all() + for msg in msgs: + if msg.enqueued: + msg.delete() + + def _log_error_and_update_msg(self, msg, error_msg): + LOG.error(error_msg) + msg.response = { + 'error': { + 'message': error_msg + } + } + msg.status = message.Message.ERROR + msg.update() + + def __check_for_messages(self): + """Wait for the polling interval, then do the real message check.""" + + # Wait for at least poll_interval sec + polling_interval = self.conf.messaging_server.polling_interval + time.sleep(polling_interval) + if self.conf.messaging_server.debug: + LOG.debug("Topic {}: Checking for new messages".format( + self.target.topic)) + self._do() + return True + + # FIXME(jdandrea): Better name for this, please, kthx. + def _do(self): + """Look for a new RPC call and serve it""" + # Get all the messages in queue + msgs = self.RPC.query.all() + for msg in msgs: + # Find the first msg marked as enqueued. + if not msg.enqueued: + continue + + # RPC methods must not start/end with an underscore. + if msg.method.startswith('_') or msg.method.endswith('_'): + error_msg = _LE("Method {} must not start or end" + "with underscores").format(msg.method) + self._log_error_and_update_msg(msg, error_msg) + return + + # The first endpoint that supports the method wins. + method = None + for endpoint in self.endpoints: + if msg.method not in dir(endpoint): + continue + endpoint_method = getattr(endpoint, msg.method) + if callable(endpoint_method): + method = endpoint_method + if self.conf.messaging_server.debug: + LOG.debug("Message {} method {} is " + "handled by endpoint {}". + format(msg.id, msg.method, + method.__str__.__name__)) + break + if not method: + error_msg = _LE("Message {} method {} unsupported " + "in endpoints.").format(msg.id, msg.method) + self._log_error_and_update_msg(msg, error_msg) + return + + # All methods must take a ctxt and args param. + if inspect.getargspec(method).args != ['self', 'ctx', 'arg']: + error_msg = _LE("Method {} must take three args: " + "self, ctx, arg").format(msg.method) + self._log_error_and_update_msg(msg, error_msg) + return + + LOG.info(_LI("Message {} method {} received").format( + msg.id, msg.method)) + if self.conf.messaging_server.debug: + LOG.debug( + _LI("Message {} method {} context: {}, args: {}").format( + msg.id, msg.method, msg.ctxt, msg.args)) + + failure = None + try: + # Methods return an opaque dictionary + result = method(msg.ctxt, msg.args) + + # FIXME(jdandrea): Remove response/error and make it opaque. + # That means this would just be assigned result outright. + msg.response = result.get('response', result) + except Exception: + # Current sys.exc_info() content can be overridden + # by another exception raised by a log handler during + # LOG.exception(). So keep a copy and delete it later. + failure = sys.exc_info() + + # Do not log details about the failure here. It will + # be returned later upstream. + LOG.exception(_LE('Exception during message handling')) + + try: + if failure is None: + msg.status = message.Message.COMPLETED + else: + msg.failure = \ + rpc_common.serialize_remote_exception(failure) + msg.status = message.Message.ERROR + LOG.info(_LI("Message {} method {}, status: {}").format( + msg.id, msg.method, msg.status)) + if self.conf.messaging_server.debug: + LOG.debug("Message {} method {}, response: {}".format( + msg.id, msg.method, msg.response)) + msg.update() + except Exception: + LOG.exception(_LE("Can not send reply for message {} " + "method {}"). + format(msg.id, msg.method)) + finally: + # Remove circular object reference between the current + # stack frame and the traceback in exc_info. + del failure + + def _gracefully_stop(self): + """Gracefully stop working on things""" + pass + + def _restart(self): + """Prepare to restart the RPC Server""" + pass + + def run(self): + """Run""" + # The server listens for messages and calls the + # appropriate methods. It also deletes messages once + # processed. + if self.conf.messaging_server.debug: + LOG.debug("%s" % self.__class__.__name__) + + # Listen for messages within a thread + executor = futurist.ThreadPoolExecutor() + while self.running: + fut = executor.submit(self.__check_for_messages) + fut.result() + executor.shutdown() + + def terminate(self): + """Terminate""" + if self.conf.messaging_server.debug: + LOG.debug("%s" % self.__class__.__name__) + self.running = False + self._gracefully_stop() + super(RPCService, self).terminate() + + def reload(self): + """Reload""" + if self.conf.messaging_server.debug: + LOG.debug("%s" % self.__class__.__name__) + self._restart() diff --git a/conductor/conductor/common/music/messaging/message.py b/conductor/conductor/common/music/messaging/message.py new file mode 100644 index 0000000..8f20162 --- /dev/null +++ b/conductor/conductor/common/music/messaging/message.py @@ -0,0 +1,178 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""Message Model""" + +import json +import time + +from conductor.common.music.model import base + + +def current_time_millis(): + """Current time in milliseconds.""" + return int(round(time.time() * 1000)) + + +class Message(base.Base): + """Message model. + + DO NOT use this class directly! With messaging, the table + name must be the message topic, thus this class has a + __tablename__ and __keyspace__ of None. + + Only create Message-based classes using: + base.create_dynamic_model(keyspace=KEYSPACE, + baseclass=Message, classname=TOPIC_NAME). + The table will be automatically created if it doesn't exist. + """ + + __tablename__ = None + __keyspace__ = None + + id = None # pylint: disable=C0103 + action = None + created = None + updated = None + ctxt = None + method = None + args = None + status = None + response = None + failure = None + + # Actions + CALL = "call" + CAST = "cast" + ACTIONS = [CALL, CAST, ] + + # Status + ENQUEUED = "enqueued" + COMPLETED = "completed" + ERROR = "error" + STATUS = [ENQUEUED, COMPLETED, ERROR, ] + FINISHED = [COMPLETED, ERROR, ] + + @classmethod + def schema(cls): + """Return schema.""" + schema = { + 'id': 'text', # Message ID in UUID4 format + 'action': 'text', # Message type (call, cast) + 'created': 'bigint', # Creation time in msec from epoch + 'updated': 'bigint', # Last update time in msec from epoch + 'ctxt': 'text', # JSON request context dictionary + 'method': 'text', # RPC method name + 'args': 'text', # JSON argument dictionary + 'status': 'text', # Status (enqueued, complete, error) + 'response': 'text', # Response JSON + 'failure': 'text', # Failure JSON (used for exceptions) + 'PRIMARY KEY': '(id)', + } + return schema + + @classmethod + def atomic(cls): + """Use atomic operations""" + return False # FIXME: this should be True for atomic operations + + @classmethod + def pk_name(cls): + """Primary key name""" + return 'id' + + def pk_value(self): + """Primary key value""" + return self.id + + @property + def enqueued(self): + return self.status == self.ENQUEUED + + @property + def finished(self): + return self.status in self.FINISHED + + @property + def ok(self): + return self.status == self.COMPLETED + + def update(self): + """Update message + + Side-effect: Sets the updated field to the current time. + """ + self.updated = current_time_millis() + super(Message, self).update() + + def values(self): + """Values""" + return { + 'action': self.action, + 'created': self.created, + 'updated': self.updated, + 'ctxt': json.dumps(self.ctxt), + 'method': self.method, + 'args': json.dumps(self.args), + 'status': self.status, + 'response': json.dumps(self.response), + 'failure': self.failure, # already serialized by oslo_messaging + } + + def __init__(self, action, ctxt, method, args, + created=None, updated=None, status=None, + response=None, failure=None, _insert=True): + """Initializer""" + super(Message, self).__init__() + self.action = action + self.created = created or current_time_millis() + self.updated = updated or current_time_millis() + self.method = method + self.status = status or self.ENQUEUED + if _insert: + self.ctxt = ctxt or {} + self.args = args or {} + self.response = response or {} + self.failure = failure or "" + self.insert() + else: + self.ctxt = json.loads(ctxt) + self.args = json.loads(args) + self.response = json.loads(response) + self.failure = failure # oslo_messaging will deserialize this + + def __repr__(self): + """Object representation""" + return '<Message Topic %r>' % self.__tablename__ + + def __json__(self): + """JSON representation""" + json_ = {} + json_['id'] = self.id + json_['action'] = self.action + # TODO(jdandrea): Format timestamps as ISO + json_['created'] = self.created + json_['updated'] = self.updated + json_['ctxt'] = self.ctxt + json_['method'] = self.method + json_['args'] = self.args + json_['status'] = self.status + json_['response'] = self.response + json_['failure'] = self.failure + return json_ diff --git a/conductor/conductor/common/music/model/__init__.py b/conductor/conductor/common/music/model/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/common/music/model/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/common/music/model/base.py b/conductor/conductor/common/music/model/base.py new file mode 100644 index 0000000..cecb6d2 --- /dev/null +++ b/conductor/conductor/common/music/model/base.py @@ -0,0 +1,168 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""Music ORM - Model""" + +from abc import ABCMeta +from abc import abstractmethod +import uuid + +from oslo_config import cfg +from oslo_log import log as logging +import six + +from conductor.common.classes import abstractclassmethod +from conductor.common.classes import classproperty +from conductor.common.music import api +from conductor.common.music.model import search + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +@six.add_metaclass(ABCMeta) +class Base(object): + """A custom declarative base ORM-style class. + + Provides some Elixir-inspired shortcuts as well. + """ + + # These must be set in the derived class! + __tablename__ = None + __keyspace__ = None + + @classproperty + def query(cls): # pylint: disable=E0213 + """Return a query object a la sqlalchemy""" + return search.Query(cls) + + @classmethod + def __kwargs(cls): + """Return common keyword args""" + kwargs = { + 'keyspace': cls.__keyspace__, + 'table': cls.__tablename__, + } + return kwargs + + @classmethod + def table_create(cls): + """Create table""" + kwargs = cls.__kwargs() + kwargs['schema'] = cls.schema() + api.MUSIC_API.table_create(**kwargs) + + @abstractclassmethod + def atomic(cls): + """Use atomic operations""" + return False + + @abstractclassmethod + def schema(cls): + """Return schema""" + return cls() + + @abstractclassmethod + def pk_name(cls): + """Primary key name""" + return cls() + + @abstractmethod + def pk_value(self): + """Primary key value""" + pass + + @abstractmethod + def values(self): + """Values""" + pass + + def insert(self): + """Insert row""" + kwargs = self.__kwargs() + kwargs['pk_name'] = self.pk_name() + kwargs['values'] = self.values() + kwargs['atomic'] = self.atomic() + pk_name = kwargs['pk_name'] + if pk_name not in kwargs['values']: + # TODO(jdandrea): Make uuid4() generation a default method in Base. + the_id = str(uuid.uuid4()) + kwargs['values'][pk_name] = the_id + kwargs['pk_value'] = the_id + setattr(self, pk_name, the_id) + else: + kwargs['pk_value'] = kwargs['values'][pk_name] + api.MUSIC_API.row_create(**kwargs) + + def update(self): + """Update row""" + kwargs = self.__kwargs() + kwargs['pk_name'] = self.pk_name() + kwargs['pk_value'] = self.pk_value() + kwargs['values'] = self.values() + kwargs['atomic'] = self.atomic() + # FIXME(jdandrea): Do we need this test/pop clause? + pk_name = kwargs['pk_name'] + if pk_name in kwargs['values']: + kwargs['values'].pop(pk_name) + api.MUSIC_API.row_update(**kwargs) + + def delete(self): + """Delete row""" + kwargs = self.__kwargs() + kwargs['pk_name'] = self.pk_name() + kwargs['pk_value'] = self.pk_value() + kwargs['atomic'] = self.atomic() + api.MUSIC_API.row_delete(**kwargs) + + @classmethod + def filter_by(cls, **kwargs): + """Filter objects""" + return cls.query.filter_by(**kwargs) # pylint: disable=E1101 + + def flush(self, *args, **kwargs): + """Flush changes to storage""" + # TODO(jdandrea): Implement in music? May be a no-op + pass + + def as_dict(self): + """Return object representation as a dictionary""" + return dict((k, v) for k, v in self.__dict__.items() + if not k.startswith('_')) + + +def create_dynamic_model(keyspace, classname, baseclass): + """Create a dynamic ORM class with a custom keyspace/class/table. + + Given a keyspace, a camelcase class name, and a base class + derived from Base, create a dynamic model that adopts a + table name based on a lower-cased version of the class name, + then create the table in the keyspace if it doesn't already exist. + If the baseclass already has __tablename__ or __keyspace__ set, those + will take precedence. Set those to None to use keyspace/classname here. + """ + + # The comma after baseclass belongs there! Tuple of length 1. + model = type( + classname, (baseclass,), { + '__tablename__': baseclass.__tablename__ or classname.lower(), + '__keyspace__': baseclass.__keyspace__ or keyspace}) + model.table_create() + return model diff --git a/conductor/conductor/common/music/model/search.py b/conductor/conductor/common/music/model/search.py new file mode 100644 index 0000000..67ff92e --- /dev/null +++ b/conductor/conductor/common/music/model/search.py @@ -0,0 +1,121 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""Music ORM - Search""" + +import inspect + +from oslo_config import cfg +from oslo_log import log as logging + +from conductor.common.music import api + +# FIXME(jdandrea): Keep for the __init__ +# from conductor.common.classes import get_class + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +class Query(object): + """Data Query""" + model = None + + def __init__(self, model): + """Initializer""" + if inspect.isclass(model): + self.model = model + # FIXME(jdandrea): Bring this back so it's path-agnostic. + # elif isinstance(model, basestring): + # self.model = get_class('conductor_api.models.' + model) + assert inspect.isclass(self.model) + + def __kwargs(self): + """Return common keyword args""" + kwargs = { + 'keyspace': self.model.__keyspace__, + 'table': self.model.__tablename__, # pylint: disable=E1101 + } + return kwargs + + def __rows_to_objects(self, rows): + """Convert query response rows to objects""" + results = [] + pk_name = self.model.pk_name() # pylint: disable=E1101 + for row_id, row in rows.items(): # pylint: disable=W0612 + the_id = row.pop(pk_name) + result = self.model(_insert=False, **row) + setattr(result, pk_name, the_id) + results.append(result) + return Results(results) + + def one(self, pk_value): + """Return object with pk_name matching pk_value""" + pk_name = self.model.pk_name() + kwargs = self.__kwargs() + rows = api.MUSIC_API.row_read( + pk_name=pk_name, pk_value=pk_value, **kwargs) + return self.__rows_to_objects(rows).first() + + def all(self): + """Return all objects""" + kwargs = self.__kwargs() + rows = api.MUSIC_API.row_read(**kwargs) + return self.__rows_to_objects(rows) + + def filter_by(self, **kwargs): + """Filter objects""" + # Music doesn't allow filtering on anything but the primary key. + # We need to get all items and then go looking for what we want. + all_items = self.all() + filtered_items = Results([]) + + # For every candidate ... + for item in all_items: + passes = True + # All filters are AND-ed. + for key, value in kwargs.items(): + if getattr(item, key) != value: + passes = False + break + if passes: + filtered_items.append(item) + return filtered_items + + def first(self): + """Return first object""" + return self.all().first() + + +class Results(list): + """Query results""" + + def __init__(self, *args, **kwargs): # pylint: disable=W0613 + """Initializer""" + super(Results, self).__init__(args[0]) + + def all(self): + """Return all""" + return self + + def first(self): + """Return first""" + if len(self) > 0: + return self[0] diff --git a/conductor/conductor/common/music/model/transaction.py b/conductor/conductor/common/music/model/transaction.py new file mode 100644 index 0000000..ced4e42 --- /dev/null +++ b/conductor/conductor/common/music/model/transaction.py @@ -0,0 +1,54 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""Music ORM - Transactions""" + +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +def start(): + """Start transaction""" + pass + + +def start_read_only(): + """Start read-only transaction""" + start() + + +def commit(): + """Commit transaction""" + pass + + +def rollback(): + """Rollback transaction""" + pass + + +def clear(): + """Clear transaction""" + pass + + +def flush(): + """Flush to disk""" + pass diff --git a/conductor/conductor/common/music/voting.py b/conductor/conductor/common/music/voting.py new file mode 100644 index 0000000..c9c02ed --- /dev/null +++ b/conductor/conductor/common/music/voting.py @@ -0,0 +1,106 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import time + +from oslo_config import cfg + +from conductor.common.music import api +from conductor import service + +CONF = cfg.CONF + + +def current_time_millis(): + """Current time in milliseconds.""" + return int(round(time.time() * 1000)) + + +def main(): + """Sample usage of Music.""" + service.prepare_service() + CONF.set_override('debug', True, 'music_api') + CONF.set_override('mock', True, 'music_api') + CONF.set_override('hostnames', ['music2'], 'music_api') + music = api.API() + print("Music version %s" % music.version()) + + # Randomize the name so that we don't step on each other. + keyspace = 'NewVotingApp' + str(current_time_millis() / 100) + music.keyspace_create(keyspace) + + # Create the table + kwargs = { + 'keyspace': keyspace, + 'table': 'votecount', + 'schema': { + 'name': 'text', + 'count': 'varint', + 'PRIMARY KEY': '(name)' + } + } + music.table_create(**kwargs) + + # Candidate data + data = { + 'Joe': 5, + 'Shankar': 7, + 'Gueyoung': 8, + 'Matti': 2, + 'Kaustubh': 0 + } + + # Create an entry in the voting table for each candidate + # and with a vote count of 0. + kwargs = {'keyspace': keyspace, 'table': 'votecount', 'pk_name': 'name'} + for name in data: # We only want the keys + kwargs['pk_value'] = name + kwargs['values'] = {'name': name, 'count': 0} + music.row_create(**kwargs) + + # Update each candidate's count atomically. + kwargs = {'keyspace': keyspace, 'table': 'votecount', 'pk_name': 'name'} + for name in data: + count = data[name] + kwargs['pk_value'] = name + kwargs['values'] = {'count': count} + kwargs['atomic'] = True + music.row_update(**kwargs) + + # Read all rows + kwargs = {'keyspace': keyspace, 'table': 'votecount'} + print(music.row_read(**kwargs)) # Reads all rows + + # Delete Joe, read Matti + kwargs = {'keyspace': keyspace, 'table': 'votecount', 'pk_name': 'name'} + kwargs['pk_value'] = 'Joe' + music.row_delete(**kwargs) + kwargs['pk_value'] = 'Matti' + print(music.row_read(**kwargs)) + + # Read all rows again + kwargs = {'keyspace': keyspace, 'table': 'votecount'} + print(music.row_read(**kwargs)) # Reads all rows + + # Cleanup. + music.keyspace_delete(keyspace) + + +if __name__ == "__main__": + main() diff --git a/conductor/conductor/common/rest.py b/conductor/conductor/common/rest.py new file mode 100644 index 0000000..7bbe5af --- /dev/null +++ b/conductor/conductor/common/rest.py @@ -0,0 +1,172 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""REST Helper""" + +import json +from os import path + +from oslo_config import cfg +from oslo_log import log +import requests +from requests.auth import HTTPBasicAuth +from six.moves.urllib import parse + +from conductor.i18n import _LE, _LW # pylint: disable=W0212 + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + + +class RESTException(IOError): + """Basic exception for errors raised by REST""" + + +class CertificateFileNotFoundException(RESTException, ValueError): + """Certificate file was not found""" + + +class MissingURLNetlocException(RESTException, ValueError): + """URL is missing a host/port""" + + +class ProhibitedURLSchemeException(RESTException, ValueError): + """URL is using a prohibited scheme""" + + +class REST(object): + """Helper class for REST operations.""" + + server_url = None + timeout = None + + # Why the funny looking connect/read timeouts? Here, read this: + # http://docs.python-requests.org/en/master/user/advanced/#timeouts + + def __init__(self, server_url, retries=3, connect_timeout=3.05, + read_timeout=12.05, username=None, password=None, + cert_file=None, cert_key_file=None, ca_bundle_file=None, + log_debug=False): + """Initializer.""" + parsed = parse.urlparse(server_url, 'http') + if parsed.scheme not in ('http', 'https'): + raise ProhibitedURLSchemeException + if not parsed.netloc: + raise MissingURLNetlocException + + for file_path in (cert_file, cert_key_file, ca_bundle_file): + if file_path and not path.exists(file_path): + raise CertificateFileNotFoundException + + self.server_url = server_url.rstrip('/') + self.retries = int(retries) + self.timeout = (float(connect_timeout), float(read_timeout)) + self.log_debug = log_debug + self.username = username + self.password = password + self.cert = cert_file + self.key = cert_key_file + self.verify = ca_bundle_file + + # FIXME(jdandrea): Require a CA bundle; do not suppress warnings. + # This is here due to an A&AI's cert/server name mismatch. + # Permitting this defeats the purpose of using SSL/TLS. + if self.verify == "": + requests.packages.urllib3.disable_warnings() + self.verify = False + + # Use connection pooling, kthx. + # http://docs.python-requests.org/en/master/user/advanced/ + self.session = requests.Session() + + def request(self, method='get', content_type='application/json', + path='', headers=None, data=None): + """Performs HTTP request. Returns a requests.Response object.""" + if method not in ('post', 'get', 'put', 'delete'): + method = 'get' + method_fn = getattr(self.session, method) + + full_headers = { + 'Accept': content_type, + 'Content-Type': content_type, + } + if headers: + full_headers.update(headers) + full_url = '{}/{}'.format(self.server_url, path.lstrip('/')) + + # Prepare the request args + try: + data_str = json.dumps(data) if data else None + except (TypeError, ValueError): + data_str = data + kwargs = { + 'data': data_str, + 'headers': full_headers, + 'timeout': self.timeout, + 'cert': (self.cert, self.key), + 'verify': self.verify, + 'stream': False, + } + if self.username or self.password: + LOG.debug("Using HTTPBasicAuth") + kwargs['auth'] = HTTPBasicAuth(self.username, self.password) + if self.cert and self.key: + LOG.debug("Using SSL/TLS Certificate/Key") + + if self.log_debug: + LOG.debug("Request: {} {}".format(method.upper(), full_url)) + if data: + LOG.debug("Request Body: {}".format(json.dumps(data))) + response = None + for attempt in range(self.retries): + if attempt > 0: + LOG.warn(_LW("Retry #{}/{}").format( + attempt + 1, self.retries)) + + try: + response = method_fn(full_url, **kwargs) + + # We shouldn't have to do this since stream is set to False, + # but we're gonna anyway. See "Body Content Workflow" here: + # http://docs.python-requests.org/en/master/user/advanced/ + response.close() + + if not response.ok: + LOG.warn("Response Status: {} {}".format( + response.status_code, response.reason)) + if self.log_debug and response.text: + try: + response_dict = json.loads(response.text) + LOG.debug("Response JSON: {}".format( + json.dumps(response_dict))) + except ValueError: + LOG.debug("Response Body: {}".format(response.text)) + if response.ok: + break + except requests.exceptions.RequestException as err: + LOG.error("Exception: %s", err.message) + + # Response.__bool__ returns false if status is not ok. Ruh roh! + # That means we must check the object type vs treating it as a bool. + # More info: https://github.com/kennethreitz/requests/issues/2002 + if isinstance(response, requests.models.Response) and not response.ok: + LOG.error(_LE("Status {} {} after {} retries for URL: {}").format( + response.status_code, response.reason, self.retries, full_url)) + return response diff --git a/conductor/conductor/common/threshold.py b/conductor/conductor/common/threshold.py new file mode 100644 index 0000000..4ab81fd --- /dev/null +++ b/conductor/conductor/common/threshold.py @@ -0,0 +1,281 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import itertools + +import six + + +class ThresholdException(Exception): + pass + + +def is_number(input): + """Returns True if the value is a number""" + try: + if type(input) is int or type(input) is float: + return True + elif isinstance(input, six.string_types) and float(input): + return True + except ValueError: + pass + return False + + +class Threshold(object): + OPERATORS = ['=', '<', '>', '<=', '>='] + UNITS = { + 'currency': { + 'USD': 1.0, + }, + 'time': { + 'ms': 1.0, + 'sec': 1000.0, + }, + 'distance': { + 'km': 1.0, + 'mi': 1.609344, + }, + 'throughput': { + 'Kbps': 0.001, + 'Mbps': 1.0, + 'Gbps': 1000.0, + }, + } + + def __init__(self, expression, base_unit): + if not isinstance(expression, six.string_types): + raise ThresholdException("Expression must be a string") + if not isinstance(base_unit, six.string_types): + raise ThresholdException("Base unit must be a string") + if base_unit not in self.UNITS: + raise ThresholdException( + "Base unit {} unsupported, must be one of: {}".format( + base_unit, ', '.join(self.UNITS.keys()))) + + self._expression = expression + self._base_unit = base_unit + self._parse() + + def __repr__(self): + """Object representation""" + return "<Threshold expression: '{}', base_unit: '{}', " \ + "parts: {}>".format(self.expression, self.base_unit, self.parts) + + def _all_units(self): + """Returns a single list of all supported units""" + unit_lists = [self.UNITS[k].keys() for k in self.UNITS.keys()] + return list(itertools.chain.from_iterable(unit_lists)) + + def _default_for_base_unit(self, base_unit): + """Returns the default unit (1.0 multiplier) for a given base unit + + Returns None if not found. + """ + units = self.UNITS.get(base_unit) + if units: + for name, multiplier in units.items(): + if multiplier == 1.0: + return name + return None + + def _multiplier_for_unit(self, unit): + """Returns the multiplier for a given unit + + Returns None if not found. + """ + return self.UNITS.get(self.base_unit).get(unit) + + def _reset(self): + """Resets parsed components""" + self._operator = None + self._value = None + self._min_value = None + self._max_value = None + self._unit = None + self._parsed = False + + def _parse(self): + """Parses the expression into parts""" + self._reset() + parts = self.expression.split() + for part in parts: + # Is it an operator? + if not self.operator and part in self.OPERATORS: + if self.value: + raise ThresholdException( + "Value {} encountered before operator {} " + "in expression '{}'".format( + self.value, part, self.expression)) + if self.has_range: + raise ThresholdException( + "Range {}-{} encountered before operator {} " + "in expression '{}'".format( + self.min_value, self.max_value, + part, self.expression)) + if self.unit: + raise ThresholdException( + "Unit '{}' encountered before operator {} " + "in expression '{}'".format( + self.unit, part, self.expression)) + + self._operator = part + + # Is it a lone value? + elif not self.value and is_number(part): + if self.has_range: + raise ThresholdException( + "Range {}-{} encountered before value {} " + "in expression '{}'".format( + self.min_value, self.max_value, + part, self.expression)) + if self.unit: + raise ThresholdException( + "Unit '{}' encountered before value {} " + "in expression '{}'".format( + self.unit, part, self.expression)) + self._value = float(part) + if not self.operator: + self._operator = '=' + + # Is it a value range? + elif not self.has_range and part.count('-') == 1: + part1, part2 = part.split('-') + if is_number(part1) and is_number(part2): + if self.operator and self.operator != '=': + raise ThresholdException( + "Operator {} not supported with range {} " + "in expression '{}'".format( + self.operator, part, self.expression)) + if self.value: + raise ThresholdException( + "Value {} encountered before range {} " + "in expression '{}'".format( + self.value, part, self.expression)) + if self.unit: + raise ThresholdException( + "Unit '{}' encountered before range {} " + "in expression '{}'".format( + self.unit, part, self.expression)) + self._min_value = min(float(part1), float(part2)) + self._max_value = max(float(part1), float(part2)) + if not self.operator: + self._operator = '=' + + # Is it a unit? + elif part in self._all_units(): + if not self.value and not self.has_range: + if not self.value: + raise ThresholdException( + "Value {} encountered before unit {} " + "in expression '{}'".format( + self.value, part, self.expression)) + else: + raise ThresholdException( + "Range {}-{} encountered before unit {} " + "in expression '{}'".format( + self.min_value, self.max_value, + part, self.expression)) + self._unit = part + + # Well then, we don't know. + else: + raise ThresholdException( + "Unknown part '{}' in expression '{}'".format( + part, self._expression)) + + if not self.has_range and not self._value: + raise ThresholdException( + "Value/range missing in expression '{}'".format( + self._expression)) + + if self._unit: + # Convert from stated units to default. + multiplier = self._multiplier_for_unit(self._unit) + if self.value: + self._value = self._value * multiplier + if self.has_range: + self._min_value = self._min_value * multiplier + self._max_value = self._max_value * multiplier + + # Always use the default unit. + self._unit = self._default_for_base_unit(self._base_unit) + + self._parsed = True + + @property + def base_unit(self): + """Returns the original base unit""" + return self._base_unit + + @property + def expression(self): + """Returns the original expression""" + return self._expression + + @property + def has_range(self): + """Returns True if a minimum/maximum value range exists""" + return self.min_value and self.max_value + + @property + def max_value(self): + """Returns the detected maximum value, if any""" + return self._max_value + + @property + def min_value(self): + """Returns the detected minimum value, if any""" + return self._min_value + + @property + def operator(self): + """Returns the operator""" + return self._operator + + @property + def parsed(self): + """Returns True if the expression was successfully parsed""" + return self._parsed + + @property + def parts(self): + """Returns the expression as a dictionary of parts""" + result = {} + if self.parsed: + result['operator'] = self.operator + if self.has_range: + result['value'] = { + 'min': self.min_value, + 'max': self.max_value, + } + else: + result['value'] = self.value + result['units'] = self.unit + return result + + @property + def unit(self): + """Returns the units""" + return self._unit + + @property + def value(self): + """Returns the detected value, if any""" + return self._value diff --git a/conductor/conductor/conf/__init__.py b/conductor/conductor/conf/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/conf/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/conf/defaults.py b/conductor/conductor/conf/defaults.py new file mode 100644 index 0000000..667e047 --- /dev/null +++ b/conductor/conductor/conf/defaults.py @@ -0,0 +1,40 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from oslo_config import cfg +from oslo_middleware import cors + + +def set_cors_middleware_defaults(): + """Update default configuration options for oslo.middleware.""" + # CORS Defaults + # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ + # TODO(jdandrea): Adjust allow/expose headers for Conductor vs OpenStack + cfg.set_defaults(cors.CORS_OPTS, + allow_headers=['X-Auth-Token', + 'X-Conductor-Request-Id'], + expose_headers=['X-Auth-Token', + 'X-Conductor-Request-Id'], + allow_methods=['GET', + 'PUT', + 'POST', + 'DELETE', + 'OPTIONS', + 'HEAD'] + ) diff --git a/conductor/conductor/conf/inventory_provider.py b/conductor/conductor/conf/inventory_provider.py new file mode 100644 index 0000000..759ccf9 --- /dev/null +++ b/conductor/conductor/conf/inventory_provider.py @@ -0,0 +1,32 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from oslo_config import cfg + +from conductor.i18n import _ + +INV_PROVIDER_EXT_MANAGER_OPTS = [ + cfg.ListOpt('extensions', + default=['aai'], + help=_('Extensions list to use')), +] + + +def register_extension_manager_opts(cfg=cfg.CONF): + cfg.register_opts(INV_PROVIDER_EXT_MANAGER_OPTS, 'inventory_provider') diff --git a/conductor/conductor/conf/service_controller.py b/conductor/conductor/conf/service_controller.py new file mode 100644 index 0000000..f85f81f --- /dev/null +++ b/conductor/conductor/conf/service_controller.py @@ -0,0 +1,32 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from oslo_config import cfg + +from conductor.i18n import _ + +SVC_CONTROLLER_EXT_MANAGER_OPTS = [ + cfg.ListOpt('extensions', + default=['sdnc'], + help=_('Extensions list to use')), +] + + +def register_extension_manager_opts(cfg=cfg.CONF): + cfg.register_opts(SVC_CONTROLLER_EXT_MANAGER_OPTS, 'service_controller') diff --git a/conductor/conductor/controller/__init__.py b/conductor/conductor/controller/__init__.py new file mode 100644 index 0000000..013ad0a --- /dev/null +++ b/conductor/conductor/controller/__init__.py @@ -0,0 +1,20 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from .service import ControllerServiceLauncher # noqa: F401 diff --git a/conductor/conductor/controller/rpc.py b/conductor/conductor/controller/rpc.py new file mode 100644 index 0000000..fb385ac --- /dev/null +++ b/conductor/conductor/controller/rpc.py @@ -0,0 +1,99 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import uuid + + +class ControllerRPCEndpoint(object): + """Controller Endpoint""" + + def __init__(self, conf, plan_class): + self.conf = conf + self.Plan = plan_class + + def plan_create(self, ctx, arg): + """Create a plan""" + name = arg.get('name', str(uuid.uuid4())) + timeout = arg.get('timeout', self.conf.controller.timeout) + recommend_max = arg.get('limit', self.conf.controller.limit) + template = arg.get('template', None) + status = self.Plan.TEMPLATE + new_plan = self.Plan(name, timeout, recommend_max, template, + status=status) + + if new_plan: + plan_json = { + "plan": { + "name": new_plan.name, + "id": new_plan.id, + "status": status, + } + } + rtn = { + 'response': plan_json, + 'error': False} + else: + # TODO(jdandrea): Catch and show the error here + rtn = { + 'response': {}, + 'error': True} + return rtn + + def plans_delete(self, ctx, arg): + """Delete one or more plans""" + plan_id = arg.get('plan_id') + if plan_id: + plans = self.Plan.query.filter_by(id=plan_id) + else: + plans = self.Plan.query.all() + for the_plan in plans: + the_plan.delete() + + rtn = { + 'response': {}, + 'error': False} + return rtn + + def plans_get(self, ctx, arg): + """Get one or more plans""" + plan_id = arg.get('plan_id') + if plan_id: + plans = self.Plan.query.filter_by(id=plan_id) + else: + plans = self.Plan.query.all() + + plan_list = [] + for the_plan in plans: + plan_json = { + "name": the_plan.name, + "id": the_plan.id, + "status": the_plan.status, + } + if the_plan.message: + plan_json["message"] = the_plan.message + if the_plan.solution: + recs = the_plan.solution.get('recommendations') + if recs: + plan_json["recommendations"] = recs + plan_list.append(plan_json) + + rtn = { + 'response': {"plans": plan_list}, + 'error': False} + return rtn diff --git a/conductor/conductor/controller/service.py b/conductor/conductor/controller/service.py new file mode 100644 index 0000000..d13518c --- /dev/null +++ b/conductor/conductor/controller/service.py @@ -0,0 +1,104 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import cotyledon +from oslo_config import cfg +from oslo_log import log + +from conductor.common.models import plan +from conductor.common.music import api +from conductor.common.music import messaging as music_messaging +from conductor.common.music.model import base +from conductor.controller import rpc +from conductor.controller import translator_svc +from conductor import messaging +from conductor import service + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + +CONTROLLER_OPTS = [ + cfg.IntOpt('timeout', + default=10, + min=1, + help='Timeout for planning requests. ' + 'Default value is 10.'), + cfg.IntOpt('limit', + default=1, + min=1, + help='Maximum number of result sets to return. ' + 'Default value is 1.'), + cfg.IntOpt('workers', + default=1, + min=1, + help='Number of workers for controller service. ' + 'Default value is 1.'), + cfg.BoolOpt('concurrent', + default=False, + help='Set to True when controller will run in active-active ' + 'mode. When set to False, controller will flush any ' + 'abandoned messages at startup. The controller always ' + 'restarts abandoned template translations at startup.'), +] + +CONF.register_opts(CONTROLLER_OPTS, group='controller') + +# Pull in service opts. We use them here. +OPTS = service.OPTS +CONF.register_opts(OPTS) + + +class ControllerServiceLauncher(object): + """Launcher for the controller service.""" + def __init__(self, conf): + self.conf = conf + + # Set up Music access. + self.music = api.API() + self.music.keyspace_create(keyspace=conf.keyspace) + + # Dynamically create a plan class for the specified keyspace + self.Plan = base.create_dynamic_model( + keyspace=conf.keyspace, baseclass=plan.Plan, classname="Plan") + + if not self.Plan: + raise + + def run(self): + transport = messaging.get_transport(self.conf) + if transport: + topic = "controller" + target = music_messaging.Target(topic=topic) + endpoints = [rpc.ControllerRPCEndpoint(self.conf, self.Plan), ] + flush = not self.conf.controller.concurrent + kwargs = {'transport': transport, + 'target': target, + 'endpoints': endpoints, + 'flush': flush, } + svcmgr = cotyledon.ServiceManager() + svcmgr.add(music_messaging.RPCService, + workers=self.conf.controller.workers, + args=(self.conf,), kwargs=kwargs) + + kwargs = {'plan_class': self.Plan, } + svcmgr.add(translator_svc.TranslatorService, + workers=self.conf.controller.workers, + args=(self.conf,), kwargs=kwargs) + svcmgr.run() diff --git a/conductor/conductor/controller/translator.py b/conductor/conductor/controller/translator.py new file mode 100644 index 0000000..eb467fe --- /dev/null +++ b/conductor/conductor/controller/translator.py @@ -0,0 +1,822 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import copy +import datetime +import json +import os +import uuid +import yaml + +from oslo_config import cfg +from oslo_log import log +import six + +from conductor import __file__ as conductor_root +from conductor.common.music import messaging as music_messaging +from conductor.common import threshold +from conductor import messaging +from conductor import service + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + +VERSIONS = ["2016-11-01", "2017-10-10"] +LOCATION_KEYS = ['latitude', 'longitude', 'host_name', 'clli_code'] +INVENTORY_PROVIDERS = ['aai'] +INVENTORY_TYPES = ['cloud', 'service'] +DEFAULT_INVENTORY_PROVIDER = INVENTORY_PROVIDERS[0] +CANDIDATE_KEYS = ['inventory_type', 'candidate_id', 'location_id', + 'location_type', 'cost'] +DEMAND_KEYS = ['inventory_provider', 'inventory_type', 'service_type', + 'service_id', 'service_resource_id', 'customer_id', + 'default_cost', 'candidates', 'region', 'complex', + 'required_candidates', 'excluded_candidates', + 'subdivision', 'flavor'] +CONSTRAINT_KEYS = ['type', 'demands', 'properties'] +CONSTRAINTS = { + # constraint_type: { + # split: split into individual constraints, one per demand + # required: list of required property names, + # optional: list of optional property names, + # thresholds: dict of property/base-unit pairs for threshold parsing + # allowed: dict of keys and allowed values (if controlled vocab); + # only use this for Conductor-controlled values! + # } + 'attribute': { + 'split': True, + 'required': ['evaluate'], + }, + 'distance_between_demands': { + 'required': ['distance'], + 'thresholds': { + 'distance': 'distance' + }, + }, + 'distance_to_location': { + 'split': True, + 'required': ['distance', 'location'], + 'thresholds': { + 'distance': 'distance' + }, + }, + 'instance_fit': { + 'split': True, + 'required': ['controller'], + 'optional': ['request'], + }, + 'inventory_group': {}, + 'region_fit': { + 'split': True, + 'required': ['controller'], + 'optional': ['request'], + }, + 'zone': { + 'required': ['qualifier', 'category'], + 'allowed': {'qualifier': ['same', 'different'], + 'category': ['disaster', 'region', 'complex', + 'time', 'maintenance']}, + }, +} + + +class TranslatorException(Exception): + pass + + +class Translator(object): + """Template translator. + + Takes an input template and translates it into + something the solver can use. Calls the data service + as needed, giving it the inventory provider as context. + Presently the only inventory provider is A&AI. Others + may be added in the future. + """ + + def __init__(self, conf, plan_name, plan_id, template): + self.conf = conf + self._template = copy.deepcopy(template) + self._plan_name = plan_name + self._plan_id = plan_id + self._translation = None + self._valid = False + self._ok = False + + # Set up the RPC service(s) we want to talk to. + self.data_service = self.setup_rpc(self.conf, "data") + + def setup_rpc(self, conf, topic): + """Set up the RPC Client""" + # TODO(jdandrea): Put this pattern inside music_messaging? + transport = messaging.get_transport(conf=conf) + target = music_messaging.Target(topic=topic) + client = music_messaging.RPCClient(conf=conf, + transport=transport, + target=target) + return client + + def create_components(self): + # TODO(jdandrea): Make deep copies so the template is untouched + self._version = self._template.get("homing_template_version") + self._parameters = self._template.get("parameters", {}) + self._locations = self._template.get("locations", {}) + self._demands = self._template.get("demands", {}) + self._constraints = self._template.get("constraints", {}) + self._optmization = self._template.get("optimization", {}) + self._reservations = self._template.get("reservation", {}) + + if type(self._version) is datetime.date: + self._version = str(self._version) + + def validate_components(self): + """Cursory validation of template components. + + More detailed validation happens while parsing each component. + """ + self._valid = False + + # Check version + if self._version not in VERSIONS: + raise TranslatorException( + "conductor_template_version must be one " + "of: {}".format(', '.join(VERSIONS))) + + # Check top level structure + components = { + "parameters": { + "name": "Parameter", + "content": self._parameters, + }, + "locations": { + "name": "Location", + "keys": LOCATION_KEYS, + "content": self._locations, + }, + "demands": { + "name": "Demand", + "content": self._demands, + }, + "constraints": { + "name": "Constraint", + "keys": CONSTRAINT_KEYS, + "content": self._constraints, + }, + "optimization": { + "name": "Optimization", + "content": self._optmization, + }, + "reservations": { + "name": "Reservation", + "content": self._reservations, + } + } + for name, component in components.items(): + name = component.get('name') + keys = component.get('keys', None) + content = component.get('content') + + if type(content) is not dict: + raise TranslatorException( + "{} section must be a dictionary".format(name)) + for content_name, content_def in content.items(): + if not keys: + continue + + for key in content_def: + if key not in keys: + raise TranslatorException( + "{} {} has an invalid key {}".format( + name, content_name, key)) + + demand_keys = self._demands.keys() + location_keys = self._locations.keys() + for constraint_name, constraint in self._constraints.items(): + + # Require a single demand (string), or a list of one or more. + demands = constraint.get('demands') + if isinstance(demands, six.string_types): + demands = [demands] + if not isinstance(demands, list) or len(demands) < 1: + raise TranslatorException( + "Demand list for Constraint {} must be " + "a list of names or a string with one name".format( + constraint_name)) + if not set(demands).issubset(demand_keys): + raise TranslatorException( + "Undefined Demand(s) {} in Constraint '{}'".format( + list(set(demands).difference(demand_keys)), + constraint_name)) + + properties = constraint.get('properties', None) + if properties: + location = properties.get('location', None) + if location: + if location not in location_keys: + raise TranslatorException( + "Location {} in Constraint {} is undefined".format( + location, constraint_name)) + + self._valid = True + + def _parse_parameters(self, obj, path=[]): + """Recursively parse all {get_param: X} occurrences + + This modifies obj in-place. If you want to keep the original, + pass in a deep copy. + """ + # Ok to start with a string ... + if isinstance(path, six.string_types): + # ... but the breadcrumb trail goes in an array. + path = [path] + + # Traverse a list + if type(obj) is list: + for idx, val in enumerate(obj, start=0): + # Add path to the breadcrumb trail + new_path = list(path) + new_path[-1] += "[{}]".format(idx) + + # Look at each element. + obj[idx] = self._parse_parameters(val, new_path) + + # Traverse a dict + elif type(obj) is dict: + # Did we find a "{get_param: ...}" intrinsic? + if obj.keys() == ['get_param']: + param_name = obj['get_param'] + + # The parameter name must be a string. + if not isinstance(param_name, six.string_types): + path_str = ' > '.join(path) + raise TranslatorException( + "Parameter name '{}' not a string in path {}".format( + param_name, path_str)) + + # Parameter name must be defined. + if param_name not in self._parameters: + path_str = ' > '.join(path) + raise TranslatorException( + "Parameter '{}' undefined in path {}".format( + param_name, path_str)) + + # Return the value in place of the call. + return self._parameters.get(param_name) + + # Not an intrinsic. Traverse as usual. + for key in obj.keys(): + # Add path to the breadcrumb trail. + new_path = list(path) + new_path.append(key) + + # Look at each key/value pair. + obj[key] = self._parse_parameters(obj[key], new_path) + + # Return whatever we have after unwinding. + return obj + + def parse_parameters(self): + """Resolve all parameters references.""" + locations = copy.deepcopy(self._locations) + self._locations = self._parse_parameters(locations, 'locations') + + demands = copy.deepcopy(self._demands) + self._demands = self._parse_parameters(demands, 'demands') + + constraints = copy.deepcopy(self._constraints) + self._constraints = self._parse_parameters(constraints, 'constraints') + + reservations = copy.deepcopy(self._reservations) + self._reservations = self._parse_parameters(reservations, + 'reservations') + + def parse_locations(self, locations): + """Prepare the locations for use by the solver.""" + parsed = {} + for location, args in locations.items(): + ctxt = {} + response = self.data_service.call( + ctxt=ctxt, + method="resolve_location", + args=args) + + resolved_location = \ + response and response.get('resolved_location') + if not resolved_location: + raise TranslatorException( + "Unable to resolve location {}".format(location) + ) + parsed[location] = resolved_location + return parsed + + def parse_demands(self, demands): + """Validate/prepare demands for use by the solver.""" + if type(demands) is not dict: + raise TranslatorException("Demands must be provided in " + "dictionary form") + + # Look at each demand + demands_copy = copy.deepcopy(demands) + parsed = {} + for name, requirements in demands_copy.items(): + inventory_candidates = [] + for requirement in requirements: + for key in requirement: + if key not in DEMAND_KEYS: + raise TranslatorException( + "Demand {} has an invalid key {}".format( + requirement, key)) + + if 'candidates' in requirement: + # Candidates *must* specify an inventory provider + provider = requirement.get("inventory_provider") + if provider and provider not in INVENTORY_PROVIDERS: + raise TranslatorException( + "Unsupported inventory provider {} " + "in demand {}".format(provider, name)) + else: + provider = DEFAULT_INVENTORY_PROVIDER + + # Check each candidate + for candidate in requirement.get('candidates'): + # Must be a dictionary + if type(candidate) is not dict: + raise TranslatorException( + "Candidate found in demand {} that is " + "not a dictionary".format(name)) + + # Must have only supported keys + for key in candidate.keys(): + if key not in CANDIDATE_KEYS: + raise TranslatorException( + "Candidate with invalid key {} found " + "in demand {}".format(key, name) + ) + + # TODO(jdandrea): Check required/optional keys + + # Set the inventory provider if not already + candidate['inventory_provider'] = \ + candidate.get('inventory_provider', provider) + + # Set cost if not already (default cost is 0?) + candidate['cost'] = candidate.get('cost', 0) + + # Add to our list of parsed candidates + inventory_candidates.append(candidate) + + # candidates are specified through inventory providers + # Do the basic sanity checks for inputs + else: + # inventory provider MUST be specified + provider = requirement.get("inventory_provider") + if not provider: + raise TranslatorException( + "Inventory provider not specified " + "in demand {}".format(name) + ) + elif provider and provider not in INVENTORY_PROVIDERS: + raise TranslatorException( + "Unsupported inventory provider {} " + "in demand {}".format(provider, name) + ) + else: + provider = DEFAULT_INVENTORY_PROVIDER + requirement['provider'] = provider + + # inventory type MUST be specified + inventory_type = requirement.get('inventory_type') + if not inventory_type or inventory_type == '': + raise TranslatorException( + "Inventory type not specified for " + "demand {}".format(name) + ) + if inventory_type and \ + inventory_type not in INVENTORY_TYPES: + raise TranslatorException( + "Unknown inventory type {} specified for " + "demand {}".format(inventory_type, name) + ) + + # For service inventories, customer_id and + # service_type MUST be specified + if inventory_type == 'service': + customer_id = requirement.get('customer_id') + if not customer_id: + raise TranslatorException( + "Customer ID not specified for " + "demand {}".format(name) + ) + service_type = requirement.get('service_type') + if not service_type: + raise TranslatorException( + "Service Type not specified for " + "demand {}".format(name) + ) + + # TODO(jdandrea): Check required/optional keys for requirement + # elif 'inventory_type' in requirement: + # # For now this is just a stand-in candidate + # candidate = { + # 'inventory_provider': + # requirement.get('inventory_provider', + # DEFAULT_INVENTORY_PROVIDER), + # 'inventory_type': + # requirement.get('inventory_type', ''), + # 'candidate_id': '', + # 'location_id': '', + # 'location_type': '', + # 'cost': 0, + # } + # + # # Add to our list of parsed candidates + # inventory_candidates.append(candidate) + + # Ask conductor-data for one or more candidates. + ctxt = { + "plan_id": self._plan_id, + "plan_name": self._plan_name, + } + args = { + "demands": { + name: requirements, + } + } + + # Check if required_candidate and excluded candidate + # are mutually exclusive. + for requirement in requirements: + required_candidates = requirement.get("required_candidates") + excluded_candidates = requirement.get("excluded_candidates") + if (required_candidates and + excluded_candidates and + set(map(lambda entry: entry['candidate_id'], + required_candidates)) + & set(map(lambda entry: entry['candidate_id'], + excluded_candidates))): + raise TranslatorException( + "Required candidate list and excluded candidate" + " list are not mutually exclusive for demand" + " {}".format(name) + ) + + response = self.data_service.call( + ctxt=ctxt, + method="resolve_demands", + args=args) + + resolved_demands = \ + response and response.get('resolved_demands') + + required_candidates = resolved_demands\ + .get('required_candidates') + if not resolved_demands: + raise TranslatorException( + "Unable to resolve inventory " + "candidates for demand {}" + .format(name) + ) + resolved_candidates = resolved_demands.get(name) + for candidate in resolved_candidates: + inventory_candidates.append(candidate) + if len(inventory_candidates) < 1: + if not required_candidates: + raise TranslatorException( + "Unable to find any candidate for " + "demand {}".format(name) + ) + else: + raise TranslatorException( + "Unable to find any required " + "candidate for demand {}" + .format(name) + ) + parsed[name] = { + "candidates": inventory_candidates, + } + + return parsed + + def parse_constraints(self, constraints): + """Validate/prepare constraints for use by the solver.""" + if type(constraints) is not dict: + raise TranslatorException("Constraints must be provided in " + "dictionary form") + + # Look at each constraint. Properties must exist, even if empty. + constraints_copy = copy.deepcopy(constraints) + + parsed = {} + for name, constraint in constraints_copy.items(): + + if not constraint.get('properties'): + constraint['properties'] = {} + + constraint_type = constraint.get('type') + constraint_def = CONSTRAINTS.get(constraint_type) + + # Is it a supported type? + if constraint_type not in CONSTRAINTS: + raise TranslatorException( + "Unsupported type '{}' found in constraint " + "named '{}'".format(constraint_type, name)) + + # Now walk through the constraint's content + for key, value in constraint.items(): + # Must be a supported key + if key not in CONSTRAINT_KEYS: + raise TranslatorException( + "Invalid key '{}' found in constraint " + "named '{}'".format(key, name)) + + # For properties ... + if key == 'properties': + # Make sure all required properties are present + required = constraint_def.get('required', []) + for req_prop in required: + if req_prop not in value.keys(): + raise TranslatorException( + "Required property '{}' not found in " + "constraint named '{}'".format( + req_prop, name)) + if not value.get(req_prop) \ + or value.get(req_prop) == '': + raise TranslatorException( + "No value specified for property '{}' in " + "constraint named '{}'".format( + req_prop, name)) + + # Make sure there are no unknown properties + optional = constraint_def.get('optional', []) + for prop_name in value.keys(): + if prop_name not in required + optional: + raise TranslatorException( + "Unknown property '{}' in " + "constraint named '{}'".format( + prop_name, name)) + + # If a property has a controlled vocabulary, make + # sure its value is one of the allowed ones. + allowed = constraint_def.get('allowed', {}) + for prop_name, allowed_values in allowed.items(): + if prop_name in value.keys(): + prop_value = value.get(prop_name, '') + if prop_value not in allowed_values: + raise TranslatorException( + "Property '{}' value '{}' unsupported in " + "constraint named '{}' (must be one of " + "{})".format(prop_name, prop_value, + name, allowed_values)) + + # Break all threshold-formatted values into parts + thresholds = constraint_def.get('thresholds', {}) + for thr_prop, base_units in thresholds.items(): + if thr_prop in value.keys(): + expression = value.get(thr_prop) + thr = threshold.Threshold(expression, base_units) + value[thr_prop] = thr.parts + + # We already know we have one or more demands due to + # validate_components(). We still need to coerce the demands + # into a list in case only one demand was provided. + constraint_demands = constraint.get('demands') + if isinstance(constraint_demands, six.string_types): + constraint['demands'] = [constraint_demands] + + # Either split the constraint into parts, one per demand, + # or use it as-is + if constraint_def.get('split'): + for demand in constraint.get('demands', []): + constraint_demand = name + '_' + demand + parsed[constraint_demand] = copy.deepcopy(constraint) + parsed[constraint_demand]['name'] = name + parsed[constraint_demand]['demands'] = demand + else: + parsed[name] = copy.deepcopy(constraint) + parsed[name]['name'] = name + + return parsed + + def parse_optimization(self, optimization): + """Validate/prepare optimization for use by the solver.""" + + # WARNING: The template format for optimization is generalized, + # however the solver is very particular about the expected + # goal, functions, and operands. Therefore, for the time being, + # we are choosing to be highly conservative in what we accept + # at the template level. Once the solver can handle the more + # general form, we can make the translation pass using standard + # compiler techniques and tools like antlr (antlr4-python2-runtime). + + if not optimization: + LOG.debug("No objective function or " + "optimzation provided in the template") + return + + optimization_copy = copy.deepcopy(optimization) + parsed = { + "goal": "min", + "operation": "sum", + "operands": [], + } + + if type(optimization_copy) is not dict: + raise TranslatorException("Optimization must be a dictionary.") + + goals = optimization_copy.keys() + if goals != ['minimize']: + raise TranslatorException( + "Optimization must contain a single goal of 'minimize'.") + + funcs = optimization_copy['minimize'].keys() + if funcs != ['sum']: + raise TranslatorException( + "Optimization goal 'minimize' must " + "contain a single function of 'sum'.") + + operands = optimization_copy['minimize']['sum'] + if type(operands) is not list: + # or len(operands) != 2: + raise TranslatorException( + "Optimization goal 'minimize', function 'sum' " + "must be a list of exactly two operands.") + + def get_distance_between_args(operand): + args = operand.get('distance_between') + if type(args) is not list and len(args) != 2: + raise TranslatorException( + "Optimization 'distance_between' arguments must " + "be a list of length two.") + + got_demand = False + got_location = False + for arg in args: + if not got_demand and arg in self._demands.keys(): + got_demand = True + if not got_location and arg in self._locations.keys(): + got_location = True + if not got_demand or not got_location: + raise TranslatorException( + "Optimization 'distance_between' arguments {} must " + "include one valid demand name and one valid " + "location name.".format(args)) + + return args + + for operand in operands: + weight = 1.0 + args = None + + if operand.keys() == ['distance_between']: + # Value must be a list of length 2 with one + # location and one demand + args = get_distance_between_args(operand) + + elif operand.keys() == ['product']: + for product_op in operand['product']: + if threshold.is_number(product_op): + weight = product_op + elif type(product_op) is dict: + if product_op.keys() == ['distance_between']: + function = 'distance_between' + args = get_distance_between_args(product_op) + elif product_op.keys() == ['cloud_version']: + function = 'cloud_version' + args = product_op.get('cloud_version') + + if not args: + raise TranslatorException( + "Optimization products must include at least " + "one 'distance_between' function call and " + "one optional number to be used as a weight.") + + # We now have our weight/function_param. + parsed['operands'].append( + { + "operation": "product", + "weight": weight, + "function": function, + "function_param": args, + } + ) + return parsed + + def parse_reservations(self, reservations): + demands = self._demands + if type(reservations) is not dict: + raise TranslatorException("Reservations must be provided in " + "dictionary form") + + parsed = {} + if reservations: + parsed['counter'] = 0 + for name, reservation in reservations.items(): + if not reservation.get('properties'): + reservation['properties'] = {} + for demand in reservation.get('demands', []): + if demand in demands.keys(): + constraint_demand = name + '_' + demand + parsed['demands'] = {} + parsed['demands'][constraint_demand] = \ + copy.deepcopy(reservation) + parsed['demands'][constraint_demand]['name'] = name + parsed['demands'][constraint_demand]['demand'] = demand + + return parsed + + def do_translation(self): + """Perform the translation.""" + if not self.valid: + raise TranslatorException("Can't translate an invalid template.") + self._translation = { + "conductor_solver": { + "version": self._version, + "plan_id": self._plan_id, + "locations": self.parse_locations(self._locations), + "demands": self.parse_demands(self._demands), + "constraints": self.parse_constraints(self._constraints), + "objective": self.parse_optimization(self._optmization), + "reservations": self.parse_reservations(self._reservations), + } + } + + def translate(self): + """Translate the template for the solver.""" + self._ok = False + try: + self.create_components() + self.validate_components() + self.parse_parameters() + self.do_translation() + self._ok = True + except Exception as exc: + self._error_message = exc.message + + @property + def valid(self): + """Returns True if the template has been validated.""" + return self._valid + + @property + def ok(self): + """Returns True if the translation was successful.""" + return self._ok + + @property + def translation(self): + """Returns the translation if it was successful.""" + return self._translation + + @property + def error_message(self): + """Returns the last known error message.""" + return self._error_message + + +def main(): + template_name = 'some_template' + + path = os.path.abspath(conductor_root) + dir_path = os.path.dirname(path) + + # Prepare service-wide components (e.g., config) + conf = service.prepare_service( + [], config_files=[dir_path + '/../etc/conductor/conductor.conf']) + # conf.set_override('mock', True, 'music_api') + + t1 = threshold.Threshold("< 500 ms", "time") + t2 = threshold.Threshold("= 120 mi", "distance") + t3 = threshold.Threshold("160", "currency") + t4 = threshold.Threshold("60-80 Gbps", "throughput") + print('t1: {}\nt2: {}\nt3: {}\nt4: {}\n'.format(t1, t2, t3, t4)) + + template_file = dir_path + '/tests/data/' + template_name + '.yaml' + fd = open(template_file, "r") + template = yaml.load(fd) + + trns = Translator(conf, template_name, str(uuid.uuid4()), template) + trns.translate() + if trns.ok: + print(json.dumps(trns.translation, indent=2)) + else: + print("TESTING - Translator Error: {}".format(trns.error_message)) + +if __name__ == '__main__': + main() diff --git a/conductor/conductor/controller/translator_svc.py b/conductor/conductor/controller/translator_svc.py new file mode 100644 index 0000000..425ff36 --- /dev/null +++ b/conductor/conductor/controller/translator_svc.py @@ -0,0 +1,162 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import time + +import cotyledon +import futurist +from oslo_config import cfg +from oslo_log import log + +from conductor.common.music import api +from conductor.common.music import messaging as music_messaging +from conductor.controller import translator +from conductor.i18n import _LE, _LI +from conductor import messaging + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + +CONTROLLER_OPTS = [ + cfg.IntOpt('polling_interval', + default=1, + min=1, + help='Time between checking for new plans. ' + 'Default value is 1.'), +] + +CONF.register_opts(CONTROLLER_OPTS, group='controller') + + +class TranslatorService(cotyledon.Service): + """Template Translator service. + + This service looks for untranslated templates and + preps them for solving by the Solver service. + """ + + # This will appear in 'ps xaf' + name = "Template Translator" + + def __init__(self, worker_id, conf, **kwargs): + """Initializer""" + LOG.debug("%s" % self.__class__.__name__) + super(TranslatorService, self).__init__(worker_id) + self._init(conf, **kwargs) + self.running = True + + def _init(self, conf, **kwargs): + self.conf = conf + self.Plan = kwargs.get('plan_class') + self.kwargs = kwargs + + # Set up the RPC service(s) we want to talk to. + self.data_service = self.setup_rpc(conf, "data") + + # Set up Music access. + self.music = api.API() + + def _gracefully_stop(self): + """Gracefully stop working on things""" + pass + + def _restart(self): + """Prepare to restart the service""" + pass + + def setup_rpc(self, conf, topic): + """Set up the RPC Client""" + # TODO(jdandrea): Put this pattern inside music_messaging? + transport = messaging.get_transport(conf=conf) + target = music_messaging.Target(topic=topic) + client = music_messaging.RPCClient(conf=conf, + transport=transport, + target=target) + return client + + def translate(self, plan): + """Translate the plan to a format the solver can use""" + # Update the translation field and set status to TRANSLATED. + try: + LOG.info(_LI("Requesting plan {} translation").format( + plan.id)) + trns = translator.Translator( + self.conf, plan.name, plan.id, plan.template) + trns.translate() + if trns.ok: + plan.translation = trns.translation + plan.status = self.Plan.TRANSLATED + LOG.info(_LI( + "Plan {} translated. Ready for solving").format( + plan.id)) + else: + plan.message = trns.error_message + plan.status = self.Plan.ERROR + LOG.error(_LE( + "Plan {} translation error encountered").format( + plan.id)) + except Exception as ex: + template = "An exception of type {0} occurred, arguments:\n{1!r}" + plan.message = template.format(type(ex).__name__, ex.args) + plan.status = self.Plan.ERROR + + plan.update() + + def __check_for_templates(self): + """Wait for the polling interval, then do the real template check.""" + + # Wait for at least poll_interval sec + polling_interval = self.conf.controller.polling_interval + time.sleep(polling_interval) + + # Look for plans with the status set to TEMPLATE + plans = self.Plan.query.all() + for plan in plans: + # If there's a template to be translated, do it! + if plan.status == self.Plan.TEMPLATE: + self.translate(plan) + break + elif plan.timedout: + # Move plan to error status? Create a new timed-out status? + # todo(snarayanan) + continue + + def run(self): + """Run""" + LOG.debug("%s" % self.__class__.__name__) + + # Look for templates to translate from within a thread + executor = futurist.ThreadPoolExecutor() + while self.running: + fut = executor.submit(self.__check_for_templates) + fut.result() + executor.shutdown() + + def terminate(self): + """Terminate""" + LOG.debug("%s" % self.__class__.__name__) + self.running = False + self._gracefully_stop() + super(TranslatorService, self).terminate() + + def reload(self): + """Reload""" + LOG.debug("%s" % self.__class__.__name__) + self._restart() diff --git a/conductor/conductor/data/__init__.py b/conductor/conductor/data/__init__.py new file mode 100644 index 0000000..9c965aa --- /dev/null +++ b/conductor/conductor/data/__init__.py @@ -0,0 +1,20 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from .service import DataServiceLauncher # noqa: F401 diff --git a/conductor/conductor/data/plugins/__init__.py b/conductor/conductor/data/plugins/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/data/plugins/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/data/plugins/base.py b/conductor/conductor/data/plugins/base.py new file mode 100644 index 0000000..a124e29 --- /dev/null +++ b/conductor/conductor/data/plugins/base.py @@ -0,0 +1,30 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import abc + +from oslo_log import log +import six + +LOG = log.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class DataPlugin(object): + """Base Data Plugin Class""" diff --git a/conductor/conductor/data/plugins/inventory_provider/__init__.py b/conductor/conductor/data/plugins/inventory_provider/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/data/plugins/inventory_provider/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/data/plugins/inventory_provider/aai.py b/conductor/conductor/data/plugins/inventory_provider/aai.py new file mode 100644 index 0000000..35b4ba7 --- /dev/null +++ b/conductor/conductor/data/plugins/inventory_provider/aai.py @@ -0,0 +1,1070 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import re +import time +import uuid + + +from oslo_config import cfg +from oslo_log import log + +from conductor.common import rest +from conductor.data.plugins.inventory_provider import base +from conductor.i18n import _LE, _LI + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + +AAI_OPTS = [ + cfg.IntOpt('cache_refresh_interval', + default=1440, + help='Interval with which to refresh the local cache, ' + 'in minutes.'), + cfg.IntOpt('complex_cache_refresh_interval', + default=1440, + help='Interval with which to refresh the local complex cache, ' + 'in minutes.'), + cfg.StrOpt('table_prefix', + default='aai', + help='Data Store table prefix.'), + cfg.StrOpt('server_url', + default='https://controller:8443/aai', + help='Base URL for A&AI, up to and not including ' + 'the version, and without a trailing slash.'), + cfg.StrOpt('server_url_version', + default='v10', + help='The version of A&AI in v# format.'), + cfg.StrOpt('certificate_file', + default='certificate.pem', + help='SSL/TLS certificate file in pem format. ' + 'This certificate must be registered with the A&AI ' + 'endpoint.'), + cfg.StrOpt('certificate_key_file', + default='certificate_key.pem', + help='Private Certificate Key file in pem format.'), + cfg.StrOpt('certificate_authority_bundle_file', + default='certificate_authority_bundle.pem', + help='Certificate Authority Bundle file in pem format. ' + 'Must contain the appropriate trust chain for the ' + 'Certificate file.'), +] + +CONF.register_opts(AAI_OPTS, group='aai') + + +class AAI(base.InventoryProviderBase): + """Active and Available Inventory Provider""" + + def __init__(self): + """Initializer""" + + # FIXME(jdandrea): Pass this in to init. + self.conf = CONF + + self.base = self.conf.aai.server_url.rstrip('/') + self.version = self.conf.aai.server_url_version.rstrip('/') + self.cert = self.conf.aai.certificate_file + self.key = self.conf.aai.certificate_key_file + self.verify = self.conf.aai.certificate_authority_bundle_file + self.cache_refresh_interval = self.conf.aai.cache_refresh_interval + self.last_refresh_time = None + self.complex_cache_refresh_interval = \ + self.conf.aai.complex_cache_refresh_interval + self.complex_last_refresh_time = None + + # TODO(jdandrea): Make these config options? + self.timeout = 30 + self.retries = 3 + + kwargs = { + "server_url": self.base, + "retries": self.retries, + "cert_file": self.cert, + "cert_key_file": self.key, + "ca_bundle_file": self.verify, + "log_debug": self.conf.debug, + } + self.rest = rest.REST(**kwargs) + + # Cache is initially empty + self._aai_cache = {} + self._aai_complex_cache = {} + + def initialize(self): + """Perform any late initialization.""" + + # Refresh the cache once for now + self._refresh_cache() + + # TODO(jdandrea): Make this periodic, and without a True condition! + # executor = futurist.ThreadPoolExecutor() + # while True: + # fut = executor.submit(self.refresh_cache) + # fut.result() + # + # # Now wait for the next time. + # # FIXME(jdandrea): Put inside refresh_cache()? + # refresh_interval = self.conf.aai.cache_refresh_interval + # time.sleep(refresh_interval) + # executor.shutdown() + + def name(self): + """Return human-readable name.""" + return "A&AI" + + def _get_version_from_string(self, string): + """Extract version number from string""" + return re.sub("[^0-9.]", "", string) + + def _aai_versioned_path(self, path): + """Return a URL path with the A&AI version prepended""" + return '/{}/{}'.format(self.version, path.lstrip('/')) + + def _request(self, method='get', path='/', data=None, + context=None, value=None): + """Performs HTTP request.""" + headers = { + 'X-FromAppId': 'CONDUCTOR', + 'X-TransactionId': str(uuid.uuid4()), + } + kwargs = { + "method": method, + "path": path, + "headers": headers, + "data": data, + } + + # TODO(jdandrea): Move timing/response logging into the rest helper? + start_time = time.time() + response = self.rest.request(**kwargs) + elapsed = time.time() - start_time + LOG.debug("Total time for A&AI request " + "({0:}: {1:}): {2:.3f} sec".format(context, value, elapsed)) + + if response is None: + LOG.error(_LE("No response from A&AI ({}: {})"). + format(context, value)) + elif response.status_code != 200: + LOG.error(_LE("A&AI request ({}: {}) returned HTTP " + "status {} {}, link: {}{}"). + format(context, value, + response.status_code, response.reason, + self.base, path)) + return response + + def _refresh_cache(self): + """Refresh the A&AI cache.""" + if not self.last_refresh_time or \ + (time.time() - self.last_refresh_time) > \ + self.cache_refresh_interval * 60: + # TODO(snarayanan): + # The cache is not persisted to Music currently. + # A general purpose ORM caching + # object likely needs to be made, with a key (hopefully we + # can use one that is not just a UUID), a value, and a + # timestamp. The other alternative is to not use the ORM + # layer and call the API directly, but that is + # also trading one set of todos for another ... + + # Get all A&AI sites + LOG.info(_LI("**** Refreshing A&AI cache *****")) + path = self._aai_versioned_path( + '/cloud-infrastructure/cloud-regions/?depth=0') + response = self._request( + path=path, context="cloud regions", value="all") + if response is None: + return + regions = {} + if response.status_code == 200: + body = response.json() + regions = body.get('cloud-region', {}) + if not regions: + # Nothing to update the cache with + LOG.error(_LE("A&AI returned no regions, link: {}{}"). + format(self.base, path)) + return + cache = { + 'cloud_region': {}, + 'service': {}, + } + for region in regions: + cloud_region_version = region.get('cloud-region-version') + cloud_region_id = region.get('cloud-region-id') + cloud_owner = region.get('cloud-owner') + if not (cloud_region_version and + cloud_region_id): + continue + rel_link_data_list = \ + self._get_aai_rel_link_data( + data=region, + related_to='complex', + search_key='complex.physical-location-id') + if len(rel_link_data_list) > 1: + LOG.error(_LE("Region {} has more than one complex"). + format(cloud_region_id)) + LOG.debug("Region {}: {}".format(cloud_region_id, region)) + continue + rel_link_data = rel_link_data_list[0] + complex_id = rel_link_data.get("d_value") + complex_link = rel_link_data.get("link") + if complex_id and complex_link: + complex_info = self._get_complex( + complex_link=complex_link, + complex_id=complex_id) + else: # no complex information + LOG.error(_LE("Region {} does not reference a complex"). + format(cloud_region_id)) + continue + if not complex_info: + LOG.error(_LE("Region {}, complex {} info not found, " + "link {}").format(cloud_region_id, + complex_id, complex_link)) + continue + + latitude = complex_info.get('latitude') + longitude = complex_info.get('longitude') + complex_name = complex_info.get('complex-name') + city = complex_info.get('city') + state = complex_info.get('state') + region = complex_info.get('region') + country = complex_info.get('country') + if not (complex_name and latitude and longitude + and city and region and country): + keys = ('latitude', 'longitude', 'city', + 'complex-name', 'region', 'country') + missing_keys = \ + list(set(keys).difference(complex_info.keys())) + LOG.error(_LE("Complex {} is missing {}, link: {}"). + format(complex_id, missing_keys, complex_link)) + LOG.debug("Complex {}: {}". + format(complex_id, complex_info)) + continue + cache['cloud_region'][cloud_region_id] = { + 'cloud_region_version': cloud_region_version, + 'cloud_owner': cloud_owner, + 'complex': { + 'complex_id': complex_id, + 'complex_name': complex_name, + 'latitude': latitude, + 'longitude': longitude, + 'city': city, + 'state': state, + 'region': region, + 'country': country, + } + } + self._aai_cache = cache + self.last_refresh_time = time.time() + LOG.info(_LI("**** A&AI cache refresh complete *****")) + + # Helper functions to parse the relationships that + # AAI uses to tie information together. This should ideally be + # handled with libraries built for graph databases. Needs more + # exploration for such libraries. + @staticmethod + def _get_aai_rel_link(data, related_to): + """Given an A&AI data structure, return the related-to link""" + rel_dict = data.get('relationship-list') + if rel_dict: + for key, rel_list in rel_dict.items(): + for rel in rel_list: + if related_to == rel.get('related-to'): + return rel.get('related-link') + + @staticmethod + def _get_aai_rel_link_data(data, related_to, search_key=None, + match_dict=None): + # some strings that we will encounter frequently + rel_lst = "relationship-list" + rkey = "relationship-key" + rval = "relationship-value" + rdata = "relationship-data" + response = list() + if match_dict: + m_key = match_dict.get('key') + m_value = match_dict.get('value') + else: + m_key = None + m_value = None + rel_dict = data.get(rel_lst) + if rel_dict: # check if data has relationship lists + for key, rel_list in rel_dict.items(): + for rel in rel_list: + if rel.get("related-to") == related_to: + dval = None + matched = False + link = rel.get("related-link") + r_data = rel.get(rdata, []) + if search_key: + for rd in r_data: + if rd.get(rkey) == search_key: + dval = rd.get(rval) + if not match_dict: # return first match + response.append( + {"link": link, "d_value": dval} + ) + break # go to next relation + if rd.get(rkey) == m_key \ + and rd.get(rval) == m_value: + matched = True + if match_dict and matched: # if matching required + response.append( + {"link": link, "d_value": dval} + ) + # matched, return search value corresponding + # to the matched r_data group + else: # no search key; just return the link + response.append( + {"link": link, "d_value": dval} + ) + if len(response) == 0: + response.append( + {"link": None, "d_value": None} + ) + return response + + def _get_complex(self, complex_link, complex_id=None): + if not self.complex_last_refresh_time or \ + (time.time() - self.complex_last_refresh_time) > \ + self.complex_cache_refresh_interval * 60: + self._aai_complex_cache.clear() + if complex_id and complex_id in self._aai_complex_cache: + return self._aai_complex_cache[complex_id] + else: + path = self._aai_versioned_path( + self._get_aai_path_from_link(complex_link)) + response = self._request( + path=path, context="complex", value=complex_id) + if response is None: + return + if response.status_code == 200: + complex_info = response.json() + if 'complex' in complex_info: + complex_info = complex_info.get('complex') + latitude = complex_info.get('latitude') + longitude = complex_info.get('longitude') + complex_name = complex_info.get('complex-name') + city = complex_info.get('city') + region = complex_info.get('region') + country = complex_info.get('country') + if not (complex_name and latitude and longitude + and city and region and country): + keys = ('latitude', 'longitude', 'city', + 'complex-name', 'region', 'country') + missing_keys = \ + list(set(keys).difference(complex_info.keys())) + LOG.error(_LE("Complex {} is missing {}, link: {}"). + format(complex_id, missing_keys, complex_link)) + LOG.debug("Complex {}: {}". + format(complex_id, complex_info)) + return + + if complex_id: # cache only if complex_id is given + self._aai_complex_cache[complex_id] = response.json() + self.complex_last_refresh_time = time.time() + + return complex_info + + def _get_regions(self): + self._refresh_cache() + regions = self._aai_cache.get('cloud_region', {}) + return regions + + def _get_aai_path_from_link(self, link): + path = link.split(self.version) + if not path or len(path) <= 1: + # TODO(shankar): Treat this as a critical error? + LOG.error(_LE("A&AI version {} not found in link {}"). + format(self.version, link)) + else: + return "{}?depth=0".format(path[1]) + + def check_network_roles(self, network_role_id=None): + # the network role query from A&AI is not using + # the version number in the query + network_role_uri = \ + '/network/l3-networks?network-role=' + network_role_id + path = self._aai_versioned_path(network_role_uri) + network_role_id = network_role_id + + # This UUID is usually reserved by A&AI for a Conductor-specific named query. + named_query_uid = "" + + data = { + "query-parameters": { + "named-query": { + "named-query-uuid": named_query_uid + } + }, + "instance-filters": { + "instance-filter": [ + { + "l3-network": { + "network-role": network_role_id + } + } + ] + } + } + region_ids = set() + response = self._request('get', path=path, data=data, + context="role", value=network_role_id) + if response is None: + return None + body = response.json() + + response_items = body.get('l3-network', []) + + for item in response_items: + cloud_region_instances = self._get_aai_rel_link_data( + data=item, + related_to='cloud-region', + search_key='cloud-region.cloud-region-id' + ) + + if len(cloud_region_instances) > 0: + for r_instance in cloud_region_instances: + region_id = r_instance.get('d_value') + if region_id is not None: + region_ids.add(region_id) + + # return region ids that fit the role + return region_ids + + def resolve_host_location(self, host_name): + path = self._aai_versioned_path('/query?format=id') + data = {"start": ["network/pnfs/pnf/" + host_name, + "cloud-infrastructure/pservers/pserver/" + host_name], + "query": "query/ucpe-instance" + } + response = self._request('put', path=path, data=data, + context="host name", value=host_name) + if response is None or response.status_code != 200: + return None + body = response.json() + results = body.get('results', []) + complex_link = None + for result in results: + if "resource-type" in result and \ + "resource-link" in result and \ + result["resource-type"] == "complex": + complex_link = result["resource-link"] + if not complex_link: + LOG.error(_LE("Unable to get a complex link for hostname {} " + " in response {}").format(host_name, response)) + return None + complex_info = self._get_complex( + complex_link=complex_link, + complex_id=None + ) + if complex_info: + lat = complex_info.get('latitude') + lon = complex_info.get('longitude') + if lat and lon: + location = {"latitude": lat, "longitude": lon} + return location + else: + LOG.error(_LE("Unable to get a latitude and longitude " + "information for hostname {} from complex " + " link {}").format(host_name, complex_link)) + return None + else: + LOG.error(_LE("Unable to get a complex information for " + " hostname {} from complex " + " link {}").format(host_name, complex_link)) + return None + + def resolve_clli_location(self, clli_name): + clli_uri = '/cloud-infrastructure/complexes/complex/' + clli_name + path = self._aai_versioned_path(clli_uri) + + response = self._request('get', path=path, data=None, + context="clli name", value=clli_name) + if response is None or response.status_code != 200: + return None + + body = response.json() + + if body: + lat = body.get('latitude') + lon = body.get('longitude') + if lat and lon: + location = {"latitude": lat, "longitude": lon} + return location + else: + LOG.error(_LE("Unable to get a latitude and longitude " + "information for CLLI code {} from complex"). + format(clli_name)) + return None + + def get_inventory_group_pairs(self, service_description): + pairs = list() + path = self._aai_versioned_path( + '/network/instance-groups/?description={}&depth=0'.format( + service_description)) + response = self._request(path=path, context="inventory group", + value=service_description) + if response is None or response.status_code != 200: + return + body = response.json() + if "instance-group" not in body: + LOG.error(_LE("Unable to get instance groups from inventory " + " in response {}").format(response)) + return + for instance_groups in body["instance-group"]: + s_instances = self._get_aai_rel_link_data( + data=instance_groups, + related_to='service-instance', + search_key='service-instance.service-instance-id' + ) + if s_instances and len(s_instances) == 2: + pair = list() + for s_inst in s_instances: + pair.append(s_inst.get('d_value')) + pairs.append(pair) + else: + LOG.error(_LE("Number of instance pairs not found to " + "be two: {}").format(instance_groups)) + return pairs + + def _log_multiple_item_error(self, name, service_type, + related_to, search_key='', + context=None, value=None): + """Helper method to log multiple-item errors + + Used by resolve_demands + """ + LOG.error(_LE("Demand {}, role {} has more than one {} ({})"). + format(name, service_type, related_to, search_key)) + if context and value: + LOG.debug("{} details: {}".format(context, value)) + + def check_sriov_automation(self, aic_version, demand_name, candidate_name): + + """Check if specific candidate has SRIOV automation available or not + + Used by resolve_demands + """ + + if aic_version: + LOG.debug(_LI("Demand {}, candidate {} has an AIC version " + "number {}").format(demand_name, candidate_name, + aic_version) + ) + if aic_version == "3.6": + return True + return False + + def check_orchestration_status(self, orchestration_status, demand_name, candidate_name): + + """Check if the orchestration-status of a candidate is activated + + Used by resolve_demands + """ + + if orchestration_status: + LOG.debug(_LI("Demand {}, candidate {} has an orchestration " + "status {}").format(demand_name, candidate_name, + orchestration_status)) + if orchestration_status.lower() == "activated": + return True + return False + + def match_candidate_attribute(self, candidate, attribute_name, + restricted_value, demand_name, + inventory_type): + """Check if specific candidate attribute matches the restricted value + + Used by resolve_demands + """ + if restricted_value and \ + restricted_value is not '' and \ + candidate[attribute_name] != restricted_value: + LOG.info(_LI("Demand: {} " + "Discarded {} candidate as " + "it doesn't match the " + "{} attribute " + "{} ").format(demand_name, + inventory_type, + attribute_name, + restricted_value + ) + ) + return True + return False + + def match_vserver_attribute(self, vserver_list): + + value = None + for i in range(0, len(vserver_list)): + if value and \ + value != vserver_list[i].get('d_value'): + return False + value = vserver_list[i].get('d_value') + return True + + def resolve_demands(self, demands): + """Resolve demands into inventory candidate lists""" + + resolved_demands = {} + for name, requirements in demands.items(): + resolved_demands[name] = [] + for requirement in requirements: + inventory_type = requirement.get('inventory_type').lower() + service_type = requirement.get('service_type') + # service_id = requirement.get('service_id') + customer_id = requirement.get('customer_id') + + # region_id is OPTIONAL. This will restrict the initial + # candidate set to come from the given region id + restricted_region_id = requirement.get('region') + restricted_complex_id = requirement.get('complex') + + # get required candidates from the demand + required_candidates = requirement.get("required_candidates") + if required_candidates: + resolved_demands['required_candidates'] = \ + required_candidates + + # get excluded candidate from the demand + excluded_candidates = requirement.get("excluded_candidates") + + # service_resource_id is OPTIONAL and is + # transparent to Conductor + service_resource_id = requirement.get('service_resource_id') \ + if requirement.get('service_resource_id') else '' + + # add all the candidates of cloud type + if inventory_type == 'cloud': + # load region candidates from cache + regions = self._get_regions() + + if not regions or len(regions) < 1: + LOG.debug("Region information is not " + "available in cache") + for region_id, region in regions.items(): + # Pick only candidates from the restricted_region + + candidate = dict() + candidate['inventory_provider'] = 'aai' + candidate['service_resource_id'] = service_resource_id + candidate['inventory_type'] = 'cloud' + candidate['candidate_id'] = region_id + candidate['location_id'] = region_id + candidate['location_type'] = 'att_aic' + candidate['cost'] = 0 + candidate['cloud_region_version'] = \ + self._get_version_from_string( + region['cloud_region_version']) + candidate['cloud_owner'] = \ + region['cloud_owner'] + candidate['physical_location_id'] = \ + region['complex']['complex_id'] + candidate['complex_name'] = \ + region['complex']['complex_name'] + candidate['latitude'] = \ + region['complex']['latitude'] + candidate['longitude'] = \ + region['complex']['longitude'] + candidate['city'] = \ + region['complex']['city'] + candidate['state'] = \ + region['complex']['state'] + candidate['region'] = \ + region['complex']['region'] + candidate['country'] = \ + region['complex']['country'] + + if self.check_sriov_automation( + candidate['cloud_region_version'], name, + candidate['candidate_id']): + candidate['sriov_automation'] = 'true' + else: + candidate['sriov_automation'] = 'false' + + if self.match_candidate_attribute( + candidate, "candidate_id", + restricted_region_id, name, + inventory_type) or \ + self.match_candidate_attribute( + candidate, "physical_location_id", + restricted_complex_id, name, + inventory_type): + continue + + # Pick only candidates not in the excluded list + # if excluded candidate list is provided + if excluded_candidates: + has_excluded_candidate = False + for excluded_candidate in excluded_candidates: + if excluded_candidate \ + and excluded_candidate.get('inventory_type') == \ + candidate.get('inventory_type') \ + and excluded_candidate.get('candidate_id') == \ + candidate.get('candidate_id'): + has_excluded_candidate = True + break + + if has_excluded_candidate: + continue + + # Pick only candidates in the required list + # if required candidate list is provided + if required_candidates: + has_required_candidate = False + for required_candidate in required_candidates: + if required_candidate \ + and required_candidate.get('inventory_type') \ + == candidate.get('inventory_type') \ + and required_candidate.get('candidate_id') \ + == candidate.get('candidate_id'): + has_required_candidate = True + break + + if not has_required_candidate: + continue + + # add candidate to demand candidates + resolved_demands[name].append(candidate) + + elif inventory_type == 'service' \ + and service_type and customer_id: + # First level query to get the list of generic vnfs + path = self._aai_versioned_path( + '/network/generic-vnfs/' + '?prov-status=PROV&equipment-role={}&depth=0'.format(service_type)) + response = self._request( + path=path, context="demand, GENERIC-VNF role", + value="{}, {}".format(name, service_type)) + if response is None or response.status_code != 200: + continue # move ahead with next requirement + body = response.json() + generic_vnf = body.get("generic-vnf", []) + for vnf in generic_vnf: + # create a default candidate + candidate = dict() + candidate['inventory_provider'] = 'aai' + candidate['service_resource_id'] = service_resource_id + candidate['inventory_type'] = 'service' + candidate['candidate_id'] = '' + candidate['location_id'] = '' + candidate['location_type'] = 'att_aic' + candidate['host_id'] = '' + candidate['cost'] = 0 + candidate['cloud_owner'] = '' + candidate['cloud_region_version'] = '' + + # start populating the candidate + candidate['host_id'] = vnf.get("vnf-name") + + # check orchestration-status attribute, only keep Activated candidate + if (not self.check_orchestration_status( + vnf.get("orchestration-status"), name, candidate['host_id'])): + continue + + related_to = "vserver" + search_key = "cloud-region.cloud-owner" + rl_data_list = self._get_aai_rel_link_data( + data=vnf, related_to=related_to, + search_key=search_key) + + if len(rl_data_list) > 1: + if not self.match_vserver_attribute(rl_data_list): + self._log_multiple_item_error( + name, service_type, related_to, search_key, + "GENERIC-VNF", vnf) + continue + rl_data = rl_data_list[0] + + vs_link_list = list() + for i in range(0, len(rl_data_list)): + vs_link_list.append(rl_data_list[i].get('link')) + + candidate['cloud_owner'] = rl_data.get('d_value') + + search_key = "cloud-region.cloud-region-id" + + rl_data_list = self._get_aai_rel_link_data( + data=vnf, + related_to=related_to, + search_key=search_key + ) + if len(rl_data_list) > 1: + if not self.match_vserver_attribute(rl_data_list): + self._log_multiple_item_error( + name, service_type, related_to, search_key, + "GENERIC-VNF", vnf) + continue + rl_data = rl_data_list[0] + cloud_region_id = rl_data.get('d_value') + candidate['location_id'] = cloud_region_id + + # get AIC version for service candidate + if cloud_region_id: + cloud_region_uri = '/cloud-infrastructure/cloud-regions' \ + '/?cloud-region-id=' \ + + cloud_region_id + path = self._aai_versioned_path(cloud_region_uri) + + response = self._request('get', + path=path, + data=None) + if response is None or response.status_code != 200: + return None + + body = response.json() + regions = body.get('cloud-region', []) + + for region in regions: + if "cloud-region-version" in region: + candidate['cloud_region_version'] = \ + self._get_version_from_string( + region["cloud-region-version"]) + + if self.check_sriov_automation( + candidate['cloud_region_version'], name, + candidate['host_id']): + candidate['sriov_automation'] = 'true' + else: + candidate['sriov_automation'] = 'false' + + related_to = "service-instance" + search_key = "customer.global-customer-id" + match_key = "customer.global-customer-id" + rl_data_list = self._get_aai_rel_link_data( + data=vnf, + related_to=related_to, + search_key=search_key, + match_dict={'key': match_key, + 'value': customer_id} + ) + if len(rl_data_list) > 1: + if not self.match_vserver_attribute(rl_data_list): + self._log_multiple_item_error( + name, service_type, related_to, search_key, + "GENERIC-VNF", vnf) + continue + rl_data = rl_data_list[0] + vs_cust_id = rl_data.get('d_value') + + search_key = "service-instance.service-instance-id" + match_key = "customer.global-customer-id" + rl_data_list = self._get_aai_rel_link_data( + data=vnf, + related_to=related_to, + search_key=search_key, + match_dict={'key': match_key, + 'value': customer_id} + ) + if len(rl_data_list) > 1: + if not self.match_vserver_attribute(rl_data_list): + self._log_multiple_item_error( + name, service_type, related_to, search_key, + "GENERIC-VNF", vnf) + continue + rl_data = rl_data_list[0] + vs_service_instance_id = rl_data.get('d_value') + + if vs_cust_id and vs_cust_id == customer_id: + candidate['candidate_id'] = \ + vs_service_instance_id + else: # vserver is for a different customer + continue + + # Second level query to get the pserver from vserver + complex_list = list() + + for vs_link in vs_link_list: + + if not vs_link: + LOG.error(_LE("{} VSERVER link information not " + "available from A&AI").format(name)) + LOG.debug("Related link data: {}".format(rl_data)) + continue # move ahead with the next vnf + + vs_path = self._get_aai_path_from_link(vs_link) + if not vs_path: + LOG.error(_LE("{} VSERVER path information not " + "available from A&AI - {}"). + format(name, vs_path)) + continue # move ahead with the next vnf + path = self._aai_versioned_path(vs_path) + response = self._request( + path=path, context="demand, VSERVER", + value="{}, {}".format(name, vs_path)) + if response is None or response.status_code != 200: + continue + body = response.json() + + related_to = "pserver" + rl_data_list = self._get_aai_rel_link_data( + data=body, + related_to=related_to, + search_key=None + ) + if len(rl_data_list) > 1: + self._log_multiple_item_error( + name, service_type, related_to, "item", + "VSERVER", body) + continue + rl_data = rl_data_list[0] + ps_link = rl_data.get('link') + + # Third level query to get cloud region from pserver + if not ps_link: + LOG.error(_LE("{} pserver related link " + "not found in A&AI: {}"). + format(name, rl_data)) + continue + ps_path = self._get_aai_path_from_link(ps_link) + if not ps_path: + LOG.error(_LE("{} pserver path information " + "not found in A&AI: {}"). + format(name, ps_link)) + continue # move ahead with the next vnf + path = self._aai_versioned_path(ps_path) + response = self._request( + path=path, context="PSERVER", value=ps_path) + if response is None or response.status_code != 200: + continue + body = response.json() + + related_to = "complex" + search_key = "complex.physical-location-id" + rl_data_list = self._get_aai_rel_link_data( + data=body, + related_to=related_to, + search_key=search_key + ) + if len(rl_data_list) > 1: + if not self.match_vserver_attribute(rl_data_list): + self._log_multiple_item_error( + name, service_type, related_to, search_key, + "PSERVER", body) + continue + rl_data = rl_data_list[0] + complex_list.append(rl_data) + + if not complex_list or \ + len(complex_list) < 1: + LOG.error("Complex information not " + "available from A&AI") + continue + + if len(complex_list) > 1: + if not self.match_vserver_attribute(complex_list): + self._log_multiple_item_error( + name, service_type, related_to, search_key, + "GENERIC-VNF", vnf) + continue + + rl_data = complex_list[0] + complex_link = rl_data.get('link') + complex_id = rl_data.get('d_value') + + # Final query for the complex information + if not (complex_link and complex_id): + LOG.debug("{} complex information not " + "available from A&AI - {}". + format(name, complex_link)) + continue # move ahead with the next vnf + else: + complex_info = self._get_complex( + complex_link=complex_link, + complex_id=complex_id + ) + if not complex_info: + LOG.debug("{} complex information not " + "available from A&AI - {}". + format(name, complex_link)) + continue # move ahead with the next vnf + candidate['physical_location_id'] = \ + complex_id + candidate['complex_name'] = \ + complex_info.get('complex-name') + candidate['latitude'] = \ + complex_info.get('latitude') + candidate['longitude'] = \ + complex_info.get('longitude') + candidate['state'] = \ + complex_info.get('state') + candidate['country'] = \ + complex_info.get('country') + candidate['city'] = \ + complex_info.get('city') + candidate['region'] = \ + complex_info.get('region') + + # Pick only candidates not in the excluded list + # if excluded candidate list is provided + if excluded_candidates: + has_excluded_candidate = False + for excluded_candidate in excluded_candidates: + if excluded_candidate \ + and excluded_candidate.get('inventory_type') == \ + candidate.get('inventory_type') \ + and excluded_candidate.get('candidate_id') == \ + candidate.get('candidate_id'): + has_excluded_candidate = True + break + + if has_excluded_candidate: + continue + + # Pick only candidates in the required list + # if required candidate list is provided + if required_candidates: + has_required_candidate = False + for required_candidate in required_candidates: + if required_candidate \ + and required_candidate.get('inventory_type') \ + == candidate.get('inventory_type') \ + and required_candidate.get('candidate_id') \ + == candidate.get('candidate_id'): + has_required_candidate = True + break + + if not has_required_candidate: + continue + + # add the candidate to the demand + # Pick only candidates from the restricted_region + # or restricted_complex + if self.match_candidate_attribute( + candidate, + "location_id", + restricted_region_id, + name, + inventory_type) or \ + self.match_candidate_attribute( + candidate, + "physical_location_id", + restricted_complex_id, + name, + inventory_type): + continue + else: + resolved_demands[name].append(candidate) + else: + LOG.error("Unknown inventory_type " + " {}".format(inventory_type)) + + return resolved_demands diff --git a/conductor/conductor/data/plugins/inventory_provider/base.py b/conductor/conductor/data/plugins/inventory_provider/base.py new file mode 100644 index 0000000..8afb090 --- /dev/null +++ b/conductor/conductor/data/plugins/inventory_provider/base.py @@ -0,0 +1,42 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import abc + +from oslo_log import log +import six + +from conductor.data.plugins import base + +LOG = log.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class InventoryProviderBase(base.DataPlugin): + """Base class for Inventory Provider plugins""" + + @abc.abstractmethod + def name(self): + """Return human-readable name.""" + pass + + @abc.abstractmethod + def resolve_demands(self, demands): + """Resolve demands into inventory candidate lists""" + pass diff --git a/conductor/conductor/data/plugins/inventory_provider/extensions.py b/conductor/conductor/data/plugins/inventory_provider/extensions.py new file mode 100644 index 0000000..18f4c4b --- /dev/null +++ b/conductor/conductor/data/plugins/inventory_provider/extensions.py @@ -0,0 +1,45 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from oslo_log import log +import stevedore + +from conductor.conf import inventory_provider +from conductor.i18n import _LI + +LOG = log.getLogger(__name__) + +inventory_provider.register_extension_manager_opts() + + +class Manager(stevedore.named.NamedExtensionManager): + """Manage Inventory Provider extensions.""" + + def __init__(self, conf, namespace): + super(Manager, self).__init__( + namespace, conf.inventory_provider.extensions, + invoke_on_load=True, name_order=True) + LOG.info(_LI("Loaded inventory provider extensions: %s"), self.names()) + + def initialize(self): + """Initialize enabled inventory provider extensions.""" + for extension in self.extensions: + LOG.info(_LI("Initializing inventory provider extension '%s'"), + extension.name) + extension.obj.initialize() diff --git a/conductor/conductor/data/plugins/service_controller/__init__.py b/conductor/conductor/data/plugins/service_controller/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/data/plugins/service_controller/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/data/plugins/service_controller/base.py b/conductor/conductor/data/plugins/service_controller/base.py new file mode 100644 index 0000000..ad00c98 --- /dev/null +++ b/conductor/conductor/data/plugins/service_controller/base.py @@ -0,0 +1,42 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import abc + +from oslo_log import log +import six + +from conductor.data.plugins import base + +LOG = log.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class ServiceControllerBase(base.DataPlugin): + """Base class for Service Controller plugins""" + + @abc.abstractmethod + def name(self): + """Return human-readable name.""" + pass + + @abc.abstractmethod + def filter_candidates(self, candidates): + """Reduce candidate list based on SDN-C intelligence""" + pass diff --git a/conductor/conductor/data/plugins/service_controller/extensions.py b/conductor/conductor/data/plugins/service_controller/extensions.py new file mode 100644 index 0000000..f309102 --- /dev/null +++ b/conductor/conductor/data/plugins/service_controller/extensions.py @@ -0,0 +1,45 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from oslo_log import log +import stevedore + +from conductor.conf import service_controller +from conductor.i18n import _LI + +LOG = log.getLogger(__name__) + +service_controller.register_extension_manager_opts() + + +class Manager(stevedore.named.NamedExtensionManager): + """Manage Service Controller extensions.""" + + def __init__(self, conf, namespace): + super(Manager, self).__init__( + namespace, conf.service_controller.extensions, + invoke_on_load=True, name_order=True) + LOG.info(_LI("Loaded service controller extensions: %s"), self.names()) + + def initialize(self): + """Initialize enabled service controller extensions.""" + for extension in self.extensions: + LOG.info(_LI("Initializing service controller extension '%s'"), + extension.name) + extension.obj.initialize() diff --git a/conductor/conductor/data/plugins/service_controller/sdnc.py b/conductor/conductor/data/plugins/service_controller/sdnc.py new file mode 100644 index 0000000..23968f0 --- /dev/null +++ b/conductor/conductor/data/plugins/service_controller/sdnc.py @@ -0,0 +1,126 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import time + +from oslo_config import cfg +from oslo_log import log + +from conductor.common import rest +from conductor.data.plugins.service_controller import base +from conductor.i18n import _LE + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + +SDNC_OPTS = [ + cfg.IntOpt('cache_refresh_interval', + default=1440, + help='Interval with which to refresh the local cache, ' + 'in minutes.'), + cfg.StrOpt('table_prefix', + default='sdnc', + help='Data Store table prefix.'), + cfg.StrOpt('server_url', + default='https://controller:8443/restconf/', + help='Base URL for SDN-C, up to and including the version.'), + cfg.StrOpt('username', + help='Basic Authentication Username'), + cfg.StrOpt('password', + help='Basic Authentication Password'), + cfg.StrOpt('sdnc_rest_timeout', + default=60, + help='Timeout for SDNC Rest Call'), + cfg.StrOpt('sdnc_retries', + default=3, + help='Retry Numbers for SDNC Rest Call'), +] + +CONF.register_opts(SDNC_OPTS, group='sdnc') + + +class SDNC(base.ServiceControllerBase): + """SDN Service Controller""" + + def __init__(self): + """Initializer""" + + # FIXME(jdandrea): Pass this in to init. + self.conf = CONF + + self.base = self.conf.sdnc.server_url.rstrip('/') + self.password = self.conf.sdnc.password + self.timeout = self.conf.sdnc.sdnc_rest_timeout + self.verify = False + self.retries = self.conf.sdnc.sdnc_retries + self.username = self.conf.sdnc.username + + kwargs = { + "server_url": self.base, + "retries": self.retries, + "username": self.username, + "password": self.password, + "log_debug": self.conf.debug, + } + self.rest = rest.REST(**kwargs) + + # Not sure what info from SDNC is cacheable + self._sdnc_cache = {} + + def initialize(self): + """Perform any late initialization.""" + pass + + def name(self): + """Return human-readable name.""" + return "SDN-C" + + def _request(self, method='get', path='/', data=None, + context=None, value=None): + """Performs HTTP request.""" + kwargs = { + "method": method, + "path": path, + "data": data, + } + + # TODO(jdandrea): Move timing/response logging into the rest helper? + start_time = time.time() + response = self.rest.request(**kwargs) + elapsed = time.time() - start_time + LOG.debug("Total time for SDN-C request " + "({0:}: {1:}): {2:.3f} sec".format(context, value, elapsed)) + + if response is None: + LOG.error(_LE("No response from SDN-C ({}: {})"). + format(context, value)) + elif response.status_code != 200: + LOG.error(_LE("SDN-C request ({}: {}) returned HTTP " + "status {} {}, link: {}{}"). + format(context, value, + response.status_code, response.reason, + self.base, path)) + return response + + def filter_candidates(self, request, candidate_list, + constraint_name, constraint_type): + """Reduce candidate list based on SDN-C intelligence""" + selected_candidates = candidate_list + return selected_candidates diff --git a/conductor/conductor/data/service.py b/conductor/conductor/data/service.py new file mode 100644 index 0000000..33d467f --- /dev/null +++ b/conductor/conductor/data/service.py @@ -0,0 +1,460 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +# import json +# import os + +import cotyledon +from oslo_config import cfg +from oslo_log import log +# from stevedore import driver + +# from conductor import __file__ as conductor_root +from conductor.common.music import messaging as music_messaging +from conductor.data.plugins.inventory_provider import extensions as ip_ext +from conductor.data.plugins.service_controller import extensions as sc_ext +from conductor.i18n import _LE, _LI, _LW +from conductor import messaging +# from conductor.solver.resource import region +# from conductor.solver.resource import service + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + +DATA_OPTS = [ + cfg.IntOpt('workers', + default=1, + min=1, + help='Number of workers for data service. ' + 'Default value is 1.'), + cfg.BoolOpt('concurrent', + default=False, + help='Set to True when data will run in active-active ' + 'mode. When set to False, data will flush any abandoned ' + 'messages at startup.'), +] + +CONF.register_opts(DATA_OPTS, group='data') + + +class DataServiceLauncher(object): + """Listener for the data service.""" + + def __init__(self, conf): + """Initializer.""" + self.conf = conf + self.init_extension_managers(conf) + + def init_extension_managers(self, conf): + """Initialize extension managers.""" + self.ip_ext_manager = ( + ip_ext.Manager(conf, 'conductor.inventory_provider.plugin')) + self.ip_ext_manager.initialize() + self.sc_ext_manager = ( + sc_ext.Manager(conf, 'conductor.service_controller.plugin')) + self.sc_ext_manager.initialize() + + def run(self): + transport = messaging.get_transport(self.conf) + if transport: + topic = "data" + target = music_messaging.Target(topic=topic) + endpoints = [DataEndpoint(self.ip_ext_manager, + self.sc_ext_manager), ] + flush = not self.conf.data.concurrent + kwargs = {'transport': transport, + 'target': target, + 'endpoints': endpoints, + 'flush': flush, } + svcmgr = cotyledon.ServiceManager() + svcmgr.add(music_messaging.RPCService, + workers=self.conf.data.workers, + args=(self.conf,), kwargs=kwargs) + svcmgr.run() + + +class DataEndpoint(object): + def __init__(self, ip_ext_manager, sc_ext_manager): + + self.ip_ext_manager = ip_ext_manager + self.sc_ext_manager = sc_ext_manager + self.plugin_cache = {} + + def get_candidate_location(self, ctx, arg): + # candidates should have lat long info already + error = False + location = None + candidate = arg["candidate"] + lat = candidate.get('latitude', None) + lon = candidate.get('longitude', None) + if lat and lon: + location = (float(lat), float(lon)) + else: + error = True + return {'response': location, 'error': error} + + def get_candidate_zone(self, ctx, arg): + candidate = arg["candidate"] + category = arg["category"] + zone = None + error = False + + if category == 'region': + zone = candidate['location_id'] + elif category == 'complex': + zone = candidate['complex_name'] + else: + error = True + + if error: + LOG.error(_LE("Unresolvable zone category {}").format(category)) + else: + LOG.info(_LI("Candidate zone is {}").format(zone)) + return {'response': zone, 'error': error} + + def get_candidates_from_service(self, ctx, arg): + candidate_list = arg["candidate_list"] + constraint_name = arg["constraint_name"] + constraint_type = arg["constraint_type"] + # inventory_type = arg["inventory_type"] + controller = arg["controller"] + request = arg["request"] + # cost = arg["cost"] + error = False + filtered_candidates = [] + # call service and fetch candidates + # TODO(jdandrea): Get rid of the SDN-C reference (outside of plugin!) + if controller == "SDN-C": + service_model = request.get("service_model") + results = self.sc_ext_manager.map_method( + 'filter_candidates', + request=request, + candidate_list=candidate_list, + constraint_name=constraint_name, + constraint_type=constraint_type + ) + if results and len(results) > 0: + filtered_candidates = results[0] + else: + LOG.warn( + _LW("No candidates returned by service " + "controller: {}; may be a new service " + "instantiation.").format(controller)) + else: + LOG.error(_LE("Unknown service controller: {}").format(controller)) + # if response from service controller is empty + if filtered_candidates is None: + LOG.error("No capacity found from SDN-GC for candidates: " + "{}".format(candidate_list)) + return {'response': [], 'error': error} + else: + LOG.debug("Filtered candidates: {}".format(filtered_candidates)) + candidate_list = [c for c in candidate_list + if c in filtered_candidates] + return {'response': candidate_list, 'error': error} + + def get_candidate_discard_set(self, value, candidate_list, value_attrib): + discard_set = set() + value_dict = value + value_condition = '' + if value_dict: + if "all" in value_dict: + value_list = value_dict.get("all") + value_condition = "all" + elif "any" in value_dict: + value_list = value_dict.get("any") + value_condition = "any" + + if not value_list: + return discard_set + + for candidate in candidate_list: + c_any = False + c_all = True + for value in value_list: + if candidate.get(value_attrib) == value: + c_any = True # include if any one is met + elif candidate.get(value_attrib) != value: + c_all = False # discard even if one is not met + if value_condition == 'any' and not c_any: + discard_set.add(candidate.get("candidate_id")) + elif value_condition == 'all' and not c_all: + discard_set.add(candidate.get("candidate_id")) + return discard_set + + def get_inventory_group_candidates(self, ctx, arg): + candidate_list = arg["candidate_list"] + resolved_candidate = arg["resolved_candidate"] + candidate_names = [] + error = False + service_description = 'DHV_VVIG_PAIR' + results = self.ip_ext_manager.map_method( + 'get_inventory_group_pairs', + service_description=service_description + ) + if not results or len(results) < 1: + LOG.error( + _LE("Empty inventory group response for service: {}").format( + service_description)) + error = True + else: + pairs = results[0] + if not pairs or len(pairs) < 1: + LOG.error( + _LE("No inventory group candidates found for service: {}, " + "inventory provider: {}").format( + service_description, self.ip_ext_manager.names()[0])) + error = True + else: + LOG.debug( + "Inventory group pairs: {}, service: {}, " + "inventory provider: {}".format( + pairs, service_description, + self.ip_ext_manager.names()[0])) + for pair in pairs: + if resolved_candidate.get("candidate_id") == pair[0]: + candidate_names.append(pair[1]) + elif resolved_candidate.get("candidate_id") == pair[1]: + candidate_names.append(pair[0]) + + candidate_list = [c for c in candidate_list + if c["candidate_id"] in candidate_names] + LOG.info( + _LI("Inventory group candidates: {}, service: {}, " + "inventory provider: {}").format( + candidate_list, service_description, + self.ip_ext_manager.names()[0])) + return {'response': candidate_list, 'error': error} + + def get_candidates_by_attributes(self, ctx, arg): + candidate_list = arg["candidate_list"] + # demand_name = arg["demand_name"] + properties = arg["properties"] + discard_set = set() + + attributes_to_evaluate = properties.get('evaluate') + for attrib, value in attributes_to_evaluate.items(): + if value == '': + continue + if attrib == 'network_roles': + role_candidates = dict() + role_list = [] + nrc_dict = value + role_condition = '' + if nrc_dict: + if "all" in nrc_dict: + role_list = nrc_dict.get("all") + role_condition = "all" + elif "any" in nrc_dict: + role_list = nrc_dict.get("any") + role_condition = "any" + + # if the role_list is empty do nothing + if not role_list or role_list == '': + LOG.error( + _LE("No roles available, " + "inventory provider: {}").format( + self.ip_ext_manager.names()[0])) + continue + for role in role_list: + # query inventory provider to check if + # the candidate is in role + results = self.ip_ext_manager.map_method( + 'check_network_roles', + network_role_id=role + ) + if not results or len(results) < 1: + LOG.error( + _LE("Empty response from inventory " + "provider {} for network role {}").format( + self.ip_ext_manager.names()[0], role)) + continue + region_ids = results[0] + if not region_ids: + LOG.error( + _LE("No candidates from inventory provider {} " + "for network role {}").format( + self.ip_ext_manager.names()[0], role)) + continue + LOG.debug( + "Network role candidates: {}, role: {}," + "inventory provider: {}".format( + region_ids, role, + self.ip_ext_manager.names()[0])) + role_candidates[role] = region_ids + + # find candidates that meet conditions + for candidate in candidate_list: + # perform this check only for cloud candidates + if candidate["inventory_type"] != "cloud": + continue + c_any = False + c_all = True + for role in role_list: + if role not in role_candidates: + c_all = False + continue + rc = role_candidates.get(role) + if rc and candidate.get("candidate_id") not in rc: + c_all = False + # discard even if one role is not met + elif rc and candidate.get("candidate_id") in rc: + c_any = True + # include if any one role is met + if role_condition == 'any' and not c_any: + discard_set.add(candidate.get("candidate_id")) + elif role_condition == 'all' and not c_all: + discard_set.add(candidate.get("candidate_id")) + + elif attrib == 'complex': + v_discard_set = \ + self.get_candidate_discard_set( + value=value, + candidate_list=candidate_list, + value_attrib="complex_name") + discard_set.update(v_discard_set) + elif attrib == "country": + v_discard_set = \ + self.get_candidate_discard_set( + value=value, + candidate_list=candidate_list, + value_attrib="country") + discard_set.update(v_discard_set) + elif attrib == "state": + v_discard_set = \ + self.get_candidate_discard_set( + value=value, + candidate_list=candidate_list, + value_attrib="state") + discard_set.update(v_discard_set) + elif attrib == "region": + v_discard_set = \ + self.get_candidate_discard_set( + value=value, + candidate_list=candidate_list, + value_attrib="region") + discard_set.update(v_discard_set) + + # return candidates not in discard set + candidate_list[:] = [c for c in candidate_list + if c['candidate_id'] not in discard_set] + LOG.info( + "Available candidates after attribute checks: {}, " + "inventory provider: {}".format( + candidate_list, self.ip_ext_manager.names()[0])) + return {'response': candidate_list, 'error': False} + + def resolve_demands(self, ctx, arg): + error = False + demands = arg.get('demands') + resolved_demands = None + results = self.ip_ext_manager.map_method( + 'resolve_demands', + demands + ) + if results and len(results) > 0: + resolved_demands = results[0] + else: + error = True + + return {'response': {'resolved_demands': resolved_demands}, + 'error': error} + + def resolve_location(self, ctx, arg): + + error = False + resolved_location = None + + host_name = arg.get('host_name') + clli_code = arg.get('clli_code') + + if host_name: + results = self.ip_ext_manager.map_method( + 'resolve_host_location', + host_name + ) + + elif clli_code: + results = self.ip_ext_manager.map_method( + 'resolve_clli_location', + clli_code + ) + else: + # unknown location response + LOG.error(_LE("Unknown location type from the input template." + "Expected location types are host_name" + " or clli_code.")) + + if results and len(results) > 0: + resolved_location = results[0] + else: + error = True + return {'response': {'resolved_location': resolved_location}, + 'error': error} + + def call_reservation_operation(self, ctx, arg): + result = True + reserved_candidates = None + method = arg["method"] + candidate_list = arg["candidate_list"] + reservation_name = arg["reservation_name"] + reservation_type = arg["reservation_type"] + controller = arg["controller"] + request = arg["request"] + + if controller == "SDN-C": + results = self.sc_ext_manager.map_method( + 'call_reservation_operation', + method=method, + candidate_list=candidate_list, + reservation_name=reservation_name, + reservation_type=reservation_type, + request=request + ) + if results and len(results) > 0: + reserved_candidates = results[0] + else: + LOG.error(_LE("Unknown service controller: {}").format(controller)) + if reserved_candidates is None or not reserved_candidates: + result = False + LOG.debug( + _LW("Unable to {} for " + "candidate {}.").format(method, reserved_candidates)) + return {'response': result, + 'error': not result} + else: + LOG.debug("{} for the candidate: " + "{}".format(method, reserved_candidates)) + return {'response': result, + 'error': not result} + + # def do_something(self, ctx, arg): + # """RPC endpoint for data messages + # + # When another service sends a notification over the message + # bus, this method receives it. + # """ + # LOG.debug("Got a message!") + # + # res = { + # 'note': 'do_something called!', + # 'arg': str(arg), + # } + # return {'response': res, 'error': False} diff --git a/conductor/conductor/i18n.py b/conductor/conductor/i18n.py new file mode 100644 index 0000000..700e083 --- /dev/null +++ b/conductor/conductor/i18n.py @@ -0,0 +1,59 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +"""oslo.i18n integration module. + +See http://docs.openstack.org/developer/oslo.i18n/usage.html + +""" + +import oslo_i18n + +DOMAIN = "conductor" + +_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# The contextual translation function using the name "_C" +# requires oslo.i18n >=2.1.0 +_C = _translators.contextual_form + +# The plural translation function using the name "_P" +# requires oslo.i18n >=2.1.0 +_P = _translators.plural_form + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical + + +def translate(value, user_locale): + return oslo_i18n.translate(value, user_locale) + + +def get_available_languages(): + return oslo_i18n.get_available_languages(DOMAIN) diff --git a/conductor/conductor/messaging.py b/conductor/conductor/messaging.py new file mode 100644 index 0000000..84a34a9 --- /dev/null +++ b/conductor/conductor/messaging.py @@ -0,0 +1,73 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from oslo_config import cfg + +from conductor.common import music +from conductor.common.music.messaging import component + +DEFAULT_URL = "__default__" +TRANSPORTS = {} + +CONF = cfg.CONF + +# Pull in messaging server opts. We use them here. +MESSAGING_SERVER_OPTS = component.MESSAGING_SERVER_OPTS +CONF.register_opts(MESSAGING_SERVER_OPTS, group='messaging_server') + + +def setup(): + """Messaging setup, if any""" + # oslo_messaging.set_transport_defaults('conductor') + pass + + +# TODO(jdandrea): Remove Music-specific aspects (keyspace -> namespace?) +# TODO(jdandrea): Make Music an oslo rpc backend (difficulty level: high?) +def get_transport(conf, url=None, optional=False, cache=True): + """Initialise the Music messaging layer.""" + global TRANSPORTS + cache_key = url or DEFAULT_URL + transport = TRANSPORTS.get(cache_key) + + if not transport or not cache: + try: + # "Somebody set up us the API." ;) + # Yes, we know an API is not a transport. Cognitive dissonance FTW! + # TODO(jdandrea): try/except to catch problems + keyspace = conf.messaging_server.keyspace + transport = music.api.API() + transport.keyspace_create(keyspace=keyspace) + except Exception: + if not optional or url: + # NOTE(sileht): oslo_messaging is configured but unloadable + # so reraise the exception + raise + return None + else: + if cache: + TRANSPORTS[cache_key] = transport + return transport + + +def cleanup(): + """Cleanup the Music messaging layer.""" + global TRANSPORTS + for url in TRANSPORTS: + del TRANSPORTS[url] diff --git a/conductor/conductor/middleware.py b/conductor/conductor/middleware.py new file mode 100644 index 0000000..5476ff3 --- /dev/null +++ b/conductor/conductor/middleware.py @@ -0,0 +1,67 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +""" Middleware """ + +# from oslo_config import cfg +# import oslo_messaging +# +# from conductor.agent import plugin_base +# from conductor import sample +# +# OPTS = [ +# cfg.MultiStrOpt('http_control_exchanges', +# default=[cfg.CONF.nova_control_exchange, +# cfg.CONF.glance_control_exchange, +# cfg.CONF.neutron_control_exchange, +# cfg.CONF.cinder_control_exchange], +# help="Exchanges name to listen for notifications."), +# ] +# +# cfg.CONF.register_opts(OPTS) +# +# +# class HTTPRequest(plugin_base.NotificationBase, +# plugin_base.NonMetricNotificationBase): +# event_types = ['http.request'] +# +# def get_targets(self, conf): +# """Return a sequence of oslo_messaging.Target +# This sequence is defining the exchange and topics to be connected for +# this plugin. +# """ +# return [oslo_messaging.Target(topic=topic, exchange=exchange) +# for topic in self.get_notification_topics(conf) +# for exchange in conf.http_control_exchanges] +# +# def process_notification(self, message): +# yield sample.Sample.from_notification( +# name=message['event_type'], +# type=sample.TYPE_DELTA, +# volume=1, +# unit=message['event_type'].split('.')[1], +# user_id=message['payload']['request'].get('HTTP_X_USER_ID'), +# project_id=message['payload']['request'].get('HTTP_X_PROJECT_ID'), +# resource_id=message['payload']['request'].get( +# 'HTTP_X_SERVICE_NAME'), +# message=message) +# +# +# class HTTPResponse(HTTPRequest): +# event_types = ['http.response'] diff --git a/conductor/conductor/opts.py b/conductor/conductor/opts.py new file mode 100644 index 0000000..bb18ac2 --- /dev/null +++ b/conductor/conductor/opts.py @@ -0,0 +1,63 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import itertools + +import conductor.api.app +import conductor.common.music.api +import conductor.common.music.messaging.component +import conductor.conf.inventory_provider +import conductor.conf.service_controller +import conductor.controller.service +import conductor.controller.translator_svc +import conductor.data.plugins.inventory_provider.aai +import conductor.data.plugins.service_controller.sdnc +import conductor.reservation.service +import conductor.service +import conductor.solver.service + + +def list_opts(): + return [ + ('DEFAULT', itertools.chain( + conductor.api.app.OPTS, + conductor.service.OPTS)), + ('api', conductor.api.app.API_OPTS), + ('controller', itertools.chain( + conductor.controller.service.CONTROLLER_OPTS, + conductor.controller.translator_svc.CONTROLLER_OPTS)), + # ('data', conductor.data.plugins.inventory_provider.aai.DATA_OPTS), + ('inventory_provider', + itertools.chain( + conductor.conf.inventory_provider. + INV_PROVIDER_EXT_MANAGER_OPTS) + ), + ('aai', conductor.data.plugins.inventory_provider.aai.AAI_OPTS), + ('service_controller', + itertools.chain( + conductor.conf.service_controller. + SVC_CONTROLLER_EXT_MANAGER_OPTS) + ), + ('sdnc', conductor.data.plugins.service_controller.sdnc.SDNC_OPTS), + ('messaging_server', + conductor.common.music.messaging.component.MESSAGING_SERVER_OPTS), + ('music_api', conductor.common.music.api.MUSIC_API_OPTS), + ('solver', conductor.solver.service.SOLVER_OPTS), + ('reservation', conductor.reservation.service.reservation_OPTS), + ] diff --git a/conductor/conductor/reservation/__init__.py b/conductor/conductor/reservation/__init__.py new file mode 100644 index 0000000..e615a9c --- /dev/null +++ b/conductor/conductor/reservation/__init__.py @@ -0,0 +1,20 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from .service import ReservationServiceLauncher # noqa: F401 diff --git a/conductor/conductor/reservation/service.py b/conductor/conductor/reservation/service.py new file mode 100644 index 0000000..c2b0ba8 --- /dev/null +++ b/conductor/conductor/reservation/service.py @@ -0,0 +1,370 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import cotyledon +from oslo_config import cfg +from oslo_log import log + +from conductor.common.models import plan +from conductor.common.music import api +from conductor.common.music import messaging as music_messaging +from conductor.common.music.model import base +from conductor.i18n import _LE, _LI +from conductor import messaging +from conductor import service + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + +reservation_OPTS = [ + cfg.IntOpt('workers', + default=1, + min=1, + help='Number of workers for reservation service. ' + 'Default value is 1.'), + cfg.IntOpt('reserve_retries', + default=3, + help='Number of times reservation/release ' + 'should be attempted.'), + cfg.IntOpt('reserve_counter', + default=3, + help='Number of times a plan should' + 'be attempted to reserve.'), + cfg.BoolOpt('concurrent', + default=False, + help='Set to True when reservation will run in active-active ' + 'mode. When set to False, reservation will restart any ' + 'orphaned reserving requests at startup.'), +] + +CONF.register_opts(reservation_OPTS, group='reservation') + +# Pull in service opts. We use them here. +OPTS = service.OPTS +CONF.register_opts(OPTS) + + +class ReservationServiceLauncher(object): + """Launcher for the reservation service.""" + + def __init__(self, conf): + self.conf = conf + + # Set up Music access. + self.music = api.API() + self.music.keyspace_create(keyspace=conf.keyspace) + + # Dynamically create a plan class for the specified keyspace + self.Plan = base.create_dynamic_model( + keyspace=conf.keyspace, baseclass=plan.Plan, classname="Plan") + + if not self.Plan: + raise + + def run(self): + kwargs = {'plan_class': self.Plan} + svcmgr = cotyledon.ServiceManager() + svcmgr.add(ReservationService, + workers=self.conf.reservation.workers, + args=(self.conf,), kwargs=kwargs) + svcmgr.run() + + +class ReservationService(cotyledon.Service): + """reservation service.""" + + # This will appear in 'ps xaf' + name = "Conductor Reservation" + + def __init__(self, worker_id, conf, **kwargs): + """Initializer""" + LOG.debug("%s" % self.__class__.__name__) + super(ReservationService, self).__init__(worker_id) + self._init(conf, **kwargs) + self.running = True + + def _init(self, conf, **kwargs): + """Set up the necessary ingredients.""" + self.conf = conf + self.kwargs = kwargs + + self.Plan = kwargs.get('plan_class') + + # Set up the RPC service(s) we want to talk to. + self.data_service = self.setup_rpc(conf, "data") + + # Set up Music access. + self.music = api.API() + + # Number of retries for reservation/release + self.reservation_retries = self.conf.reservation.reserve_retries + self.reservation_counter = self.conf.reservation.reserve_counter + + if not self.conf.reservation.concurrent: + self._reset_reserving_status() + + def _gracefully_stop(self): + """Gracefully stop working on things""" + pass + + def _reset_reserving_status(self): + """Reset plans being reserved so they can be reserved again. + + Use this only when the reservation service is not running concurrently. + """ + plans = self.Plan.query.all() + for the_plan in plans: + if the_plan.status == self.Plan.RESERVING: + the_plan.status = self.Plan.SOLVED + the_plan.update() + + def _restart(self): + """Prepare to restart the service""" + pass + + def setup_rpc(self, conf, topic): + """Set up the RPC Client""" + # TODO(jdandrea): Put this pattern inside music_messaging? + transport = messaging.get_transport(conf=conf) + target = music_messaging.Target(topic=topic) + client = music_messaging.RPCClient(conf=conf, + transport=transport, + target=target) + return client + + def try_reservation_call(self, method, candidate_list, + reservation_name, reservation_type, + controller, request): + # Call data service for reservation + # need to do this for self.reserve_retries times + ctxt = {} + args = {'method': method, + 'candidate_list': candidate_list, + 'reservation_name': reservation_name, + 'reservation_type': reservation_type, + 'controller': controller, + 'request': request + } + + method_name = "call_reservation_operation" + attempt_count = 1 + while attempt_count <= self.reservation_retries: + is_success = self.data_service.call(ctxt=ctxt, + method=method_name, + args=args) + LOG.debug("Attempt #{} calling method {} for candidate " + "{} - response: {}".format(attempt_count, + method, + candidate_list, + is_success)) + if is_success: + return True + attempt_count += 1 + return False + + def rollback_reservation(self, reservation_list): + """Function to rollback(release) reservations""" + # TODO(snarayanan): Need to test this once the API is ready + for reservation in reservation_list: + candidate_list = reservation['candidate_list'] + reservation_name = reservation['reservation_name'] + reservation_type = reservation['reservation_type'] + controller = reservation['controller'] + request = reservation['request'] + + is_success = self.try_reservation_call( + method="release", + candidate_list=candidate_list, + reservation_name=reservation_name, + reservation_type=reservation_type, + controller=controller, + request=request + ) + if not is_success: + # rollback failed report error to SDNC + message = _LE("Unable to release reservation " + "{}").format(reservation) + LOG.error(message) + return False + # move to the next reserved candidate + return True + + def run(self): + """Run""" + LOG.debug("%s" % self.__class__.__name__) + # TODO(snarayanan): This is really meant to be a control loop + # As long as self.running is true, we process another request. + while self.running: + # plans = Plan.query().all() + # Find the first plan with a status of SOLVED. + # Change its status to RESERVING. + + solution = None + translation = None + # requests_to_reserve = dict() + plans = self.Plan.query.all() + found_solved_template = False + + for p in plans: + if p.status == self.Plan.SOLVED: + solution = p.solution + translation = p.translation + found_solved_template = True + break + if found_solved_template and not solution: + message = _LE("Plan {} status is solved, yet " + "the solution wasn't found").format(p.id) + LOG.error(message) + p.status = self.Plan.ERROR + p.message = message + p.update() + continue # continue looping + elif not solution: + continue # continue looping + + # update status to reserving + p.status = self.Plan.RESERVING + p.update() + + # begin reservations + # if plan needs reservation proceed with reservation + # else set status to done. + reservations = None + if translation: + conductor_solver = translation.get("conductor_solver") + if conductor_solver: + reservations = conductor_solver.get("reservations") + else: + LOG.error("no conductor_solver in " + "translation for Plan {}".format(p.id)) + + if reservations: + counter = reservations.get("counter") + 1 + reservations['counter'] = counter + if counter <= self.reservation_counter: + recommendations = solution.get("recommendations") + reservation_list = list() + + for reservation, resource in reservations.get("demands", + {}).items(): + candidates = list() + reservation_demand = resource.get("demand") + reservation_name = resource.get("name") + reservation_type = resource.get("type") + + reservation_properties = resource.get("properties") + if reservation_properties: + controller = reservation_properties.get( + "controller") + request = reservation_properties.get("request") + + for recommendation in recommendations: + for demand, r_resource in recommendation.items(): + if demand == reservation_demand: + # get selected candidate from translation + selected_candidate_id = \ + r_resource.get("candidate")\ + .get("candidate_id") + demands = \ + translation.get("conductor_solver")\ + .get("demands") + for demand_name, d_resource in \ + demands.items(): + if demand_name == demand: + for candidate in d_resource\ + .get("candidates"): + if candidate\ + .get("candidate_id") ==\ + selected_candidate_id: + candidates\ + .append(candidate) + + is_success = self.try_reservation_call( + method="reserve", + candidate_list=candidates, + reservation_name=reservation_name, + reservation_type=reservation_type, + controller=controller, + request=request) + + # if reservation succeeds continue with next candidate + if is_success: + curr_reservation = dict() + curr_reservation['candidate_list'] = candidates + curr_reservation['reservation_name'] = \ + reservation_name + curr_reservation['reservation_type'] = \ + reservation_type + curr_reservation['controller'] = controller + curr_reservation['request'] = request + reservation_list.append(curr_reservation) + else: + # begin roll back of all reserved resources on + # the first failed reservation + rollback_status = \ + self.rollback_reservation(reservation_list) + # statuses + if rollback_status: + # released all reservations, + # move plan to translated + p.status = self.Plan.TRANSLATED + p.update() + del reservation_list[:] + else: + LOG.error("Reservation rollback failed") + p.status = self.Plan.ERROR + p.message = "Reservation release failed" + p.update() + break # reservation failed + + continue + # continue with reserving the next candidate + else: + LOG.error("Tried {} times. Plan {} is unable to make" + "reservation " + .format(self.reservation_counter, p.id)) + p.status = self.Plan.ERROR + p.message = "Reservation failed" + p.update() + continue + + # verify if all candidates have been reserved + if p.status == self.Plan.RESERVING: + # all reservations succeeded. + LOG.info(_LI("Plan {} Reservation complete"). + format(p.id)) + LOG.debug("Plan {} Reservation complete".format(p.id)) + p.status = self.Plan.DONE + p.update() + + continue + # done reserving continue to loop + + def terminate(self): + """Terminate""" + LOG.debug("%s" % self.__class__.__name__) + self.running = False + self._gracefully_stop() + super(ReservationService, self).terminate() + + def reload(self): + """Reload""" + LOG.debug("%s" % self.__class__.__name__) + self._restart() diff --git a/conductor/conductor/service.py b/conductor/conductor/service.py new file mode 100644 index 0000000..5d86cce --- /dev/null +++ b/conductor/conductor/service.py @@ -0,0 +1,104 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +# import socket +import sys + +# from keystoneauth1 import loading as ka_loading +from oslo_config import cfg +import oslo_i18n +from oslo_log import log +# from oslo_policy import opts as policy_opts +from oslo_reports import guru_meditation_report as gmr + +from conductor.conf import defaults +# from conductor import keystone_client +from conductor import messaging +from conductor import version + +OPTS = [ + # cfg.StrOpt('host', + # default=socket.gethostname(), + # sample_default='<your_hostname>', + # help='Name of this node, which must be valid in an AMQP ' + # 'key. Can be an opaque identifier. For ZeroMQ only, must ' + # 'be a valid host name, FQDN, or IP address.'), + # cfg.IntOpt('http_timeout', + # default=600, + # help='Timeout seconds for HTTP requests. Set it to None to ' + # 'disable timeout.'), + cfg.StrOpt('keyspace', + default='conductor', + help='Music keyspace for content'), +] +cfg.CONF.register_opts(OPTS) + +# DATA_OPT = cfg.IntOpt('workers', +# default=1, +# min=1, +# help='Number of workers for data service, ' +# 'default value is 1.') +# cfg.CONF.register_opt(DATA_OPT, 'data') +# +# PARSER_OPT = cfg.IntOpt('workers', +# default=1, +# min=1, +# help='Number of workers for parser service. ' +# 'default value is 1.') +# cfg.CONF.register_opt(PARSER_OPT, 'parser') +# +# SOLVER_OPT = cfg.IntOpt('workers', +# default=1, +# min=1, +# help='Number of workers for solver service. ' +# 'default value is 1.') +# cfg.CONF.register_opt(SOLVER_OPT, 'solver') + +# keystone_client.register_keystoneauth_opts(cfg.CONF) + + +def prepare_service(argv=None, config_files=None): + if argv is None: + argv = sys.argv + + # FIXME(sileht): Use ConfigOpts() instead + conf = cfg.CONF + + oslo_i18n.enable_lazy() + log.register_options(conf) + log_levels = (conf.default_log_levels + + ['futurist=INFO']) + log.set_defaults(default_log_levels=log_levels) + defaults.set_cors_middleware_defaults() + # policy_opts.set_defaults(conf) + + conf(argv[1:], project='conductor', validate_default_values=True, + version=version.version_info.version_string(), + default_config_files=config_files) + + # ka_loading.load_auth_from_conf_options(conf, "service_credentials") + + log.setup(conf, 'conductor') + # NOTE(liusheng): guru cannot run with service under apache daemon, so when + # conductor-api running with mod_wsgi, the argv is [], we don't start + # guru. + if argv: + gmr.TextGuruMeditation.setup_autorun(version) + messaging.setup() + return conf diff --git a/conductor/conductor/solver/__init__.py b/conductor/conductor/solver/__init__.py new file mode 100644 index 0000000..ff501ef --- /dev/null +++ b/conductor/conductor/solver/__init__.py @@ -0,0 +1,20 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from .service import SolverServiceLauncher # noqa: F401 diff --git a/conductor/conductor/solver/optimizer/__init__.py b/conductor/conductor/solver/optimizer/__init__.py new file mode 100755 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/solver/optimizer/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/solver/optimizer/best_first.py b/conductor/conductor/solver/optimizer/best_first.py new file mode 100755 index 0000000..65e435d --- /dev/null +++ b/conductor/conductor/solver/optimizer/best_first.py @@ -0,0 +1,163 @@ +#!/bin/python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +import copy +import operator +from oslo_log import log +import sys + +from conductor.solver.optimizer import decision_path as dpath +from conductor.solver.optimizer import search + +LOG = log.getLogger(__name__) + + +class BestFirst(search.Search): + + def __init__(self, conf): + search.Search.__init__(self, conf) + + def search(self, _demand_list, _objective): + dlist = copy.deepcopy(_demand_list) + heuristic_solution = self._search_by_fit_first(dlist, _objective) + if heuristic_solution is None: + LOG.debug("no solution") + return None + + open_list = [] + close_paths = {} + + ''' for the decision length heuristic ''' + # current_decision_length = 0 + + # create root path + decision_path = dpath.DecisionPath() + decision_path.set_decisions({}) + + # insert the root path into open_list + open_list.append(decision_path) + + while len(open_list) > 0: + p = open_list.pop(0) + + ''' for the decision length heuristic ''' + # dl = len(p.decisions) + # if dl >= current_decision_length: + # current_decision_length = dl + # else: + # continue + + # if explored all demands in p, complete the search with p + unexplored_demand = self._get_new_demand(p, _demand_list) + if unexplored_demand is None: + return p + + p.current_demand = unexplored_demand + + msg = "demand = {}, decisions = {}, value = {}" + LOG.debug(msg.format(p.current_demand.name, + p.decision_id, p.total_value)) + + # constraint solving + candidate_list = self._solve_constraints(p) + if len(candidate_list) > 0: + for candidate in candidate_list: + # create path for each candidate for given demand + np = dpath.DecisionPath() + np.set_decisions(p.decisions) + np.decisions[p.current_demand.name] = candidate + _objective.compute(np) + + valid_candidate = True + + # check closeness for this decision + np.set_decision_id(p, candidate.name) + if np.decision_id in close_paths.keys(): + valid_candidate = False + + ''' for base comparison heuristic ''' + # TODO(gjung): how to know this is about min + if _objective.goal == "min": + if np.total_value >= heuristic_solution.total_value: + valid_candidate = False + + if valid_candidate is True: + open_list.append(np) + + # sort open_list by value + open_list.sort(key=operator.attrgetter("total_value")) + else: + LOG.debug("no candidates") + + # insert p into close_paths + close_paths[p.decision_id] = p + + return heuristic_solution + + def _get_new_demand(self, _p, _demand_list): + for demand in _demand_list: + if demand.name not in _p.decisions.keys(): + return demand + + return None + + def _search_by_fit_first(self, _demand_list, _objective): + decision_path = dpath.DecisionPath() + decision_path.set_decisions({}) + + return self._find_current_best(_demand_list, _objective, decision_path) + + def _find_current_best(self, _demand_list, _objective, _decision_path): + if len(_demand_list) == 0: + LOG.debug("search done") + return _decision_path + + demand = _demand_list.pop(0) + LOG.debug("demand = {}".format(demand.name)) + _decision_path.current_demand = demand + candidate_list = self._solve_constraints(_decision_path) + + bound_value = 0.0 + if _objective.goal == "min": + bound_value = sys.float_info.max + + while True: + best_resource = None + for candidate in candidate_list: + _decision_path.decisions[demand.name] = candidate + _objective.compute(_decision_path) + if _objective.goal == "min": + if _decision_path.total_value < bound_value: + bound_value = _decision_path.total_value + best_resource = candidate + + if best_resource is None: + LOG.debug("no resource, rollback") + return None + else: + _decision_path.decisions[demand.name] = best_resource + _decision_path.total_value = bound_value + decision_path = self._find_current_best( + _demand_list, _objective, _decision_path) + if decision_path is None: + candidate_list.remove(best_resource) + else: + return decision_path diff --git a/conductor/conductor/solver/optimizer/constraints/__init__.py b/conductor/conductor/solver/optimizer/constraints/__init__.py new file mode 100755 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/solver/optimizer/constraints/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/solver/optimizer/constraints/access_distance.py b/conductor/conductor/solver/optimizer/constraints/access_distance.py new file mode 100755 index 0000000..7c400b8 --- /dev/null +++ b/conductor/conductor/solver/optimizer/constraints/access_distance.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +import operator +from oslo_log import log + +from conductor.solver.optimizer.constraints import constraint +from conductor.solver.utils import utils + +LOG = log.getLogger(__name__) + + +class AccessDistance(constraint.Constraint): + def __init__(self, _name, _type, _demand_list, _priority=0, + _comparison_operator=operator.le, + _threshold=None, _location=None): + constraint.Constraint.__init__( + self, _name, _type, _demand_list, _priority) + + # The distance threshold for the constraint + self.distance_threshold = _threshold + # The comparison operator from the constraint. + self.comparison_operator = _comparison_operator + # This has to be reference to a function + # from the python operator class + self.location = _location # Location instance + + def solve(self, _decision_path, _candidate_list, _request): + if _candidate_list is None: + LOG.debug("Empty candidate list, need to get " + + "the candidate list for the demand/service") + return _candidate_list + conflict_list = [] + cei = _request.cei + for candidate in _candidate_list: + air_distance = utils.compute_air_distance( + self.location.value, + cei.get_candidate_location(candidate)) + if not self.comparison_operator(air_distance, + self.distance_threshold): + if candidate not in conflict_list: + conflict_list.append(candidate) + + _candidate_list = \ + [c for c in _candidate_list if c not in conflict_list] + # self.distance_threshold + # cei = _request.constraint_engine_interface + # _candidate_list = \ + # [candidate for candidate in _candidate_list if \ + # (self.comparison_operator( + # utils.compute_air_distance(self.location.value, + # cei.get_candidate_location(candidate)), + # self.distance_threshold))] + + # # This section may be relevant ONLY when the candidate list + # # of two demands are identical and we want to optimize the solver + # # to winnow the candidate list of the current demand based on + # # whether this constraint will be met for other demands + # + # # local candidate list + # tmp_candidate_list = copy.deepcopy(_candidate_list) + # for candidate in tmp_candidate_list: + # # TODO(snarayanan): Check if the location type matches + # # the candidate location type + # # if self.location.loc_type != candidate_location.loc_type: + # # LOG.debug("Mismatch in the location types being compared.") + # + # + # satisfies_all_demands = True + # for demand in self.demand_list: + # # Ideally candidate should be in resources for + # # current demand if the candidate list is generated + # # from the demand.resources + # # However, this may not be guaranteed for other demands. + # if candidate not in demand.resources: + # LOG.debug("Candidate not in the demand's resources") + # satisfies_all_demands = False + # break + # + # candidate_location = demand.resources[candidate].location + # + # if not self.comparison_operator(utils.compute_air_distance( + # self.location.value, candidate_location), + # self.distance_threshold): + # # can we assume that the type of candidate_location + # # will be compatible with location.value ? + # satisfies_all_demands = False + # break + # + # if not satisfies_all_demands: + # _candidate_list.remove(candidate) + + return _candidate_list diff --git a/conductor/conductor/solver/optimizer/constraints/attribute.py b/conductor/conductor/solver/optimizer/constraints/attribute.py new file mode 100644 index 0000000..18f9332 --- /dev/null +++ b/conductor/conductor/solver/optimizer/constraints/attribute.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +# python imports + +# Conductor imports +from conductor.solver.optimizer.constraints import constraint + +# Third-party library imports +from oslo_log import log + +LOG = log.getLogger(__name__) + + +class Attribute(constraint.Constraint): + def __init__(self, _name, _type, _demand_list, _priority=0, + _properties=None): + constraint.Constraint.__init__( + self, _name, _type, _demand_list, _priority) + self.properties = _properties + + def solve(self, _decision_path, _candidate_list, _request): + # call conductor engine with request parameters + cei = _request.cei + demand_name = _decision_path.current_demand.name + select_list = cei.get_candidates_by_attributes(demand_name, + _candidate_list, + self.properties) + _candidate_list[:] = \ + [c for c in _candidate_list if c in select_list] + return _candidate_list diff --git a/conductor/conductor/solver/optimizer/constraints/cloud_distance.py b/conductor/conductor/solver/optimizer/constraints/cloud_distance.py new file mode 100755 index 0000000..1e862d4 --- /dev/null +++ b/conductor/conductor/solver/optimizer/constraints/cloud_distance.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +import operator +from oslo_log import log + +from conductor.solver.optimizer.constraints import constraint +from conductor.solver.utils import utils + +LOG = log.getLogger(__name__) + + +class CloudDistance(constraint.Constraint): + def __init__(self, _name, _type, _demand_list, _priority=0, + _comparison_operator=operator.le, _threshold=None): + constraint.Constraint.__init__( + self, _name, _type, _demand_list, _priority) + self.distance_threshold = _threshold + self.comparison_operator = _comparison_operator + if len(_demand_list) <= 1: + LOG.debug("Insufficient number of demands.") + raise ValueError + + def solve(self, _decision_path, _candidate_list, _request): + conflict_list = [] + + # get the list of candidates filtered from the previous demand + solved_demands = list() # demands that have been solved in the past + decision_list = list() + future_demands = list() # demands that will be solved in future + + # LOG.debug("initial candidate list {}".format(_candidate_list.name)) + + # find previously made decisions for the constraint's demand list + for demand in self.demand_list: + # decision made for demand + if demand in _decision_path.decisions: + solved_demands.append(demand) + # only one candidate expected per demand in decision path + decision_list.append( + _decision_path.decisions[demand]) + else: # decision will be made in future + future_demands.append(demand) + # placeholder for any optimization we may + # want to do for demands in the constraint's demand + # list that conductor will solve in the future + + # LOG.debug("decisions = {}".format(decision_list)) + + # temp copy to iterate + # temp_candidate_list = copy.deepcopy(_candidate_list) + # for candidate in temp_candidate_list: + for candidate in _candidate_list: + # check if candidate satisfies constraint + # for all relevant decisions thus far + is_candidate = True + for filtered_candidate in decision_list: + cei = _request.cei + if not self.comparison_operator( + utils.compute_air_distance( + cei.get_candidate_location(candidate), + cei.get_candidate_location(filtered_candidate)), + self.distance_threshold): + is_candidate = False + + if not is_candidate: + if candidate not in conflict_list: + conflict_list.append(candidate) + + _candidate_list = \ + [c for c in _candidate_list if c not in conflict_list] + + # msg = "final candidate list for demand {} is " + # LOG.debug(msg.format(_decision_path.current_demand.name)) + # for c in _candidate_list: + # LOG.debug(" " + c.name) + + return _candidate_list diff --git a/conductor/conductor/solver/optimizer/constraints/constraint.py b/conductor/conductor/solver/optimizer/constraints/constraint.py new file mode 100755 index 0000000..03e2c33 --- /dev/null +++ b/conductor/conductor/solver/optimizer/constraints/constraint.py @@ -0,0 +1,50 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import abc + +from oslo_log import log +import six + +LOG = log.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class Constraint(object): + """Base class for Constraints""" + + def __init__(self, _name, _type, _demand_list, _priority=0): + """Common initializer. + + Be sure to call this superclass when initializing. + """ + self.name = _name + self.constraint_type = _type + self.demand_list = _demand_list + self.check_priority = _priority + + @abc.abstractmethod + def solve(self, _decision_path, _candidate_list, _request): + """Solve. + + Implement the constraint solving in each inherited class, + depending on constraint type. + """ + + return _candidate_list diff --git a/conductor/conductor/solver/optimizer/constraints/inventory_group.py b/conductor/conductor/solver/optimizer/constraints/inventory_group.py new file mode 100755 index 0000000..f0f8089 --- /dev/null +++ b/conductor/conductor/solver/optimizer/constraints/inventory_group.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +from oslo_log import log + +from constraint import Constraint + +LOG = log.getLogger(__name__) + + +class InventoryGroup(Constraint): + def __init__(self, _name, _type, _demand_list, _priority=0): + Constraint.__init__(self, _name, _type, _demand_list, _priority) + if not len(self.demand_list) == 2: + LOG.debug("More than two demands in the list") + raise ValueError + + def solve(self, _decision_path, _candidate_list, _request): + + # check if other demand in the demand pair has been already solved + # other demand in pair + other_demand = [d for d in self.demand_list if + d != _decision_path.current_demand.name][0] + if other_demand not in _decision_path.decisions: + LOG.debug("Other demand not yet resolved, " + + "return the current candidates") + return _candidate_list + # expect only one candidate per demand in decision + resolved_candidate = _decision_path.decisions[other_demand] + cei = _request.cei + inventory_group_candidates = cei.get_inventory_group_candidates( + _candidate_list, + _decision_path.current_demand.name, + resolved_candidate) + _candidate_list = [candidate for candidate in _candidate_list if + (candidate in inventory_group_candidates)] + + ''' + # Alternate implementation that *may* be more efficient + # if the decision path has multiple candidates per solved demand + # *and* inventory group is smaller than than the candidate list + + select_list = list() + # get candidates for current demand + current_demand = _decision_path.current_demand + current_candidates = _candidate_list + + # get inventory groups for current demand, + # assuming that group information is tied with demand + inventory_groups = cei.get_inventory_groups(current_demand) + + for group in inventory_groups: + if group[0] in current_candidates and group[1] in other_candidates: + # is the symmetric candidacy valid too ? + if group[0] not in select_list: + select_list.append(group[0]) + _candidate_list[:] = [c for c in _candidate_list if c in select_list] + ''' + + return _candidate_list diff --git a/conductor/conductor/solver/optimizer/constraints/service.py b/conductor/conductor/solver/optimizer/constraints/service.py new file mode 100644 index 0000000..bdbe267 --- /dev/null +++ b/conductor/conductor/solver/optimizer/constraints/service.py @@ -0,0 +1,76 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from oslo_log import log + +from conductor.i18n import _LE +from conductor.solver.optimizer.constraints import constraint + +LOG = log.getLogger(__name__) + + +class Service(constraint.Constraint): + def __init__(self, _name, _type, _demand_list, _priority=0, + _controller=None, _request=None, _cost=None, + _inventory_type=None): + constraint.Constraint.__init__( + self, _name, _type, _demand_list, _priority) + if _controller is None: + LOG.debug("Provider URL not available") + raise ValueError + self.request = _request + self.controller = _controller + self.cost = _cost + self.inventory_type = _inventory_type + + def solve(self, _decision_path, _candidate_list, _request): + select_list = list() + candidates_to_check = list() + demand_name = _decision_path.current_demand.name + # service-check candidates of the same inventory type + # select candidate of all other types + for candidate in _candidate_list: + if self.inventory_type == "cloud": + if candidate["inventory_type"] == "cloud": + candidates_to_check.append(candidate) + else: + select_list.append(candidate) + elif self.inventory_type == "service": + if candidate["inventory_type"] == "service": + candidates_to_check.append(candidate) + else: + select_list.append(candidate) + # call conductor data with request parameters + if len(candidates_to_check) > 0: + cei = _request.cei + filtered_list = cei.get_candidates_from_service( + self.name, self.constraint_type, candidates_to_check, + self.controller, self.inventory_type, self.request, + self.cost, demand_name) + for c in filtered_list: + select_list.append(c) + else: + LOG.error(_LE("Constraint {} ({}) has no candidates of " + "inventory type {} for demand {}").format( + self.name, self.constraint_type, + self.inventory_type, demand_name) + ) + + _candidate_list[:] = [c for c in _candidate_list if c in select_list] + return _candidate_list diff --git a/conductor/conductor/solver/optimizer/constraints/zone.py b/conductor/conductor/solver/optimizer/constraints/zone.py new file mode 100755 index 0000000..c7a968f --- /dev/null +++ b/conductor/conductor/solver/optimizer/constraints/zone.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +import operator +from oslo_log import log + +from constraint import Constraint + +LOG = log.getLogger(__name__) + + +class Zone(Constraint): + def __init__(self, _name, _type, _demand_list, _priority=0, + _qualifier=None, _category=None): + Constraint.__init__(self, _name, _type, _demand_list, _priority) + + self.qualifier = _qualifier # different or same + self.category = _category # disaster, region, or update + self.comparison_operator = None + + if self.qualifier == "same": + self.comparison_operator = operator.eq + elif self.qualifier == "different": + self.comparison_operator = operator.ne + + def solve(self, _decision_path, _candidate_list, _request): + conflict_list = [] + + decision_list = list() + # find previously made decisions for the constraint's demand list + for demand in self.demand_list: + # decision made for demand + if demand in _decision_path.decisions: + decision_list.append(_decision_path.decisions[demand]) + # temp copy to iterate + # temp_candidate_list = copy.deepcopy(_candidate_list) + # for candidate in temp_candidate_list: + for candidate in _candidate_list: + # check if candidate satisfies constraint + # for all relevant decisions thus far + is_candidate = True + for filtered_candidate in decision_list: + cei = _request.cei + if not self.comparison_operator( + cei.get_candidate_zone(candidate, self.category), + cei.get_candidate_zone(filtered_candidate, + self.category)): + is_candidate = False + + if not is_candidate: + if candidate not in conflict_list: + conflict_list.append(candidate) + # _candidate_list.remove(candidate) + + _candidate_list[:] =\ + [c for c in _candidate_list if c not in conflict_list] + + # msg = "final candidate list for demand {} is " + # LOG.debug(msg.format(_decision_path.current_demand.name)) + # for c in _candidate_list: + # print " " + c.name + + return _candidate_list diff --git a/conductor/conductor/solver/optimizer/decision_path.py b/conductor/conductor/solver/optimizer/decision_path.py new file mode 100755 index 0000000..0890f52 --- /dev/null +++ b/conductor/conductor/solver/optimizer/decision_path.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +import copy + + +class DecisionPath(object): + + def __init__(self): + """local copy of decisions so far + + key = demand.name, value = region or service instance + """ + + self.decisions = None + + ''' to identify this decision path in the search ''' + self.decision_id = "" + + ''' current demand to be dealt with''' + self.current_demand = None + + ''' decision values so far ''' + self.cumulated_value = 0.0 + self.cumulated_cost = 0.0 + self.heuristic_to_go_value = 0.0 + self.heuristic_to_go_cost = 0.0 + # cumulated_value + heuristic_to_go_value (if exist) + self.total_value = 0.0 + # cumulated_cost + heuristic_to_go_cost (if exist) + self.total_cost = 0.0 + + def set_decisions(self, _prior_decisions): + self.decisions = copy.deepcopy(_prior_decisions) + + def set_decision_id(self, _dk, _rk): + self.decision_id += (str(_dk) + ":" + str(_rk) + ">") diff --git a/conductor/conductor/solver/optimizer/fit_first.py b/conductor/conductor/solver/optimizer/fit_first.py new file mode 100755 index 0000000..42d8fed --- /dev/null +++ b/conductor/conductor/solver/optimizer/fit_first.py @@ -0,0 +1,160 @@ +#!/bin/python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +from oslo_log import log +import sys + +from conductor.solver.optimizer import decision_path as dpath +from conductor.solver.optimizer import search + +LOG = log.getLogger(__name__) + + +class FitFirst(search.Search): + + def __init__(self, conf): + search.Search.__init__(self, conf) + + def search(self, _demand_list, _objective, _request): + decision_path = dpath.DecisionPath() + decision_path.set_decisions({}) + + # Begin the recursive serarch + return self._find_current_best( + _demand_list, _objective, decision_path, _request) + + def _find_current_best(self, _demand_list, _objective, + _decision_path, _request): + # _demand_list is common across all recursions + if len(_demand_list) == 0: + LOG.debug("search done") + return _decision_path + + # get next demand to resolve + demand = _demand_list.pop(0) + LOG.debug("demand = {}".format(demand.name)) + _decision_path.current_demand = demand + + # call constraints to whittle initial candidates + # candidate_list meets all constraints for the demand + candidate_list = self._solve_constraints(_decision_path, _request) + + # find the best candidate among the list + + # bound_value keeps track of the max value discovered + # thus far for the _decision_path. For every demand + # added to the _decision_path bound_value will be set + # to a really large value to begin with + bound_value = 0.0 + version_value = "0.0" + + if "min" in _objective.goal: + bound_value = sys.float_info.max + + # Start recursive search + while True: + best_resource = None + # Find best candidate that optimizes the cost for demand. + # The candidate list can be empty if the constraints + # rule out all candidates + for candidate in candidate_list: + _decision_path.decisions[demand.name] = candidate + _objective.compute(_decision_path, _request) + # this will set the total_value of the _decision_path + # thus far up to the demand + if _objective.goal is None: + best_resource = candidate + + elif _objective.goal == "min_cloud_version": + # convert the unicode to string + candidate_version = candidate \ + .get("cloud_region_version").encode('utf-8') + if _decision_path.total_value < bound_value or \ + (_decision_path.total_value == bound_value and + self._compare_version(candidate_version, + version_value) > 0): + bound_value = _decision_path.total_value + version_value = candidate_version + best_resource = candidate + + elif _objective.goal == "min": + # if the path value is less than bound value + # we have found the better candidate + if _decision_path.total_value < bound_value: + # relax the bound_value to the value of + # the path - this will ensure a future + # candidate will be picked only if it has + # a value lesser than the current best candidate + bound_value = _decision_path.total_value + best_resource = candidate + + # Rollback if we don't have any candidate picked for + # the demand. + if best_resource is None: + LOG.debug("no resource, rollback") + # Put the current demand (which failed to find a + # candidate) back in the list so that it can be picked + # up in the next iteration of the recursion + _demand_list.insert(0, demand) + return None # return None back to the recursion + else: + # best resource is found, add to the decision path + _decision_path.decisions[demand.name] = best_resource + _decision_path.total_value = bound_value + + # Begin the next recursive call to find candidate + # for the next demand in the list + decision_path = self._find_current_best( + _demand_list, _objective, _decision_path, _request) + + # The point of return from the previous recursion. + # If the call returns no candidates, no solution exists + # in that path of the decision tree. Rollback the + # current best_resource and remove it from the list + # of potential candidates. + if decision_path is None: + candidate_list.remove(best_resource) + # reset bound_value to a large value so that + # the next iteration of the current recursion + # will pick the next best candidate, which + # will have a value larger than the current + # bound_value (proof by contradiction: + # it cannot have a smaller value, if it wasn't + # the best_resource. + if _objective.goal == "min": + bound_value = sys.float_info.max + else: + # A candidate was found for the demand, and + # was added to the decision path. Return current + # path back to the recursion. + return decision_path + + def _compare_version(self, version1, version2): + version1 = version1.split('.') + version2 = version2.split('.') + for i in range(max(len(version1), len(version2))): + v1 = int(version1[i]) if i < len(version1) else 0 + v2 = int(version2[i]) if i < len(version2) else 0 + if v1 > v2: + return 1 + elif v1 < v2: + return -1 + return 0 diff --git a/conductor/conductor/solver/optimizer/greedy.py b/conductor/conductor/solver/optimizer/greedy.py new file mode 100755 index 0000000..eae1b12 --- /dev/null +++ b/conductor/conductor/solver/optimizer/greedy.py @@ -0,0 +1,65 @@ +#!/bin/python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +from oslo_log import log +import sys + +from conductor.solver.optimizer import decision_path as dpath +from conductor.solver.optimizer import search + +LOG = log.getLogger(__name__) + + +class Greedy(search.Search): + + def __init__(self, conf): + search.Search.__init__(self, conf) + + def search(self, _demand_list, _objective): + decision_path = dpath.DecisionPath() + decision_path.set_decisions({}) + + for demand in _demand_list: + LOG.debug("demand = {}".format(demand.name)) + + decision_path.current_demand = demand + candidate_list = self._solve_constraints(decision_path) + + bound_value = 0.0 + if _objective.goal == "min": + bound_value = sys.float_info.max + + best_resource = None + for candidate in candidate_list: + decision_path.decisions[demand.name] = candidate + _objective.compute(decision_path) + if _objective.goal == "min": + if decision_path.total_value < bound_value: + bound_value = decision_path.total_value + best_resource = candidate + + if best_resource is not None: + decision_path.decisions[demand.name] = best_resource + decision_path.total_value = bound_value + else: + return None + + return decision_path diff --git a/conductor/conductor/solver/optimizer/optimizer.py b/conductor/conductor/solver/optimizer/optimizer.py new file mode 100755 index 0000000..c7155c4 --- /dev/null +++ b/conductor/conductor/solver/optimizer/optimizer.py @@ -0,0 +1,196 @@ +#!/bin/python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +from oslo_config import cfg +from oslo_log import log +import time + +from conductor import service +# from conductor.solver.optimizer import decision_path as dpath +# from conductor.solver.optimizer import best_first +# from conductor.solver.optimizer import greedy +from conductor.solver.optimizer import fit_first +from conductor.solver.optimizer import random_pick +from conductor.solver.request import demand + +LOG = log.getLogger(__name__) + +CONF = cfg.CONF + +SOLVER_OPTS = [ + +] + +CONF.register_opts(SOLVER_OPTS, group='solver') + + +class Optimizer(object): + + # FIXME(gjung): _requests should be request (no underscore, one item) + def __init__(self, conf, _requests=None): + self.conf = conf + + # self.search = greedy.Greedy(self.conf) + self.search = None + # self.search = best_first.BestFirst(self.conf) + + if _requests is not None: + self.requests = _requests + + def get_solution(self): + LOG.debug("search start") + + for rk in self.requests: + request = self.requests[rk] + LOG.debug("--- request = {}".format(rk)) + + LOG.debug("1. sort demands") + demand_list = self._sort_demands(request) + + for d in demand_list: + LOG.debug(" demand = {}".format(d.name)) + + LOG.debug("2. search") + st = time.time() + + if not request.objective.goal: + LOG.debug("No objective function is provided. " + "Random pick algorithm is used") + self.search = random_pick.RandomPick(self.conf) + best_path = self.search.search(demand_list, request) + else: + LOG.debug("Fit first algorithm is used") + self.search = fit_first.FitFirst(self.conf) + best_path = self.search.search(demand_list, + request.objective, request) + + if best_path is not None: + self.search.print_decisions(best_path) + else: + LOG.debug("no solution found") + LOG.debug("search delay = {} sec".format(time.time() - st)) + return best_path + + def _sort_demands(self, _request): + demand_list = [] + + # first, find loc-demand dependencies + # using constraints and objective functions + open_demand_list = [] + for key in _request.constraints: + c = _request.constraints[key] + if c.constraint_type == "distance_to_location": + for dk in c.demand_list: + if _request.demands[dk].sort_base != 1: + _request.demands[dk].sort_base = 1 + open_demand_list.append(_request.demands[dk]) + for op in _request.objective.operand_list: + if op.function.func_type == "distance_between": + if isinstance(op.function.loc_a, demand.Location): + if _request.demands[op.function.loc_z.name].sort_base != 1: + _request.demands[op.function.loc_z.name].sort_base = 1 + open_demand_list.append(op.function.loc_z) + elif isinstance(op.function.loc_z, demand.Location): + if _request.demands[op.function.loc_a.name].sort_base != 1: + _request.demands[op.function.loc_a.name].sort_base = 1 + open_demand_list.append(op.function.loc_a) + + if len(open_demand_list) == 0: + init_demand = self._exist_not_sorted_demand(_request.demands) + open_demand_list.append(init_demand) + + # second, find demand-demand dependencies + while True: + d_list = self._get_depended_demands(open_demand_list, _request) + for d in d_list: + demand_list.append(d) + + init_demand = self._exist_not_sorted_demand(_request.demands) + if init_demand is None: + break + open_demand_list.append(init_demand) + + return demand_list + + def _get_depended_demands(self, _open_demand_list, _request): + demand_list = [] + + while True: + if len(_open_demand_list) == 0: + break + + d = _open_demand_list.pop(0) + if d.sort_base != 1: + d.sort_base = 1 + demand_list.append(d) + + for key in _request.constraints: + c = _request.constraints[key] + if c.constraint_type == "distance_between_demands": + if d.name in c.demand_list: + for dk in c.demand_list: + if dk != d.name and \ + _request.demands[dk].sort_base != 1: + _request.demands[dk].sort_base = 1 + _open_demand_list.append( + _request.demands[dk]) + + for op in _request.objective.operand_list: + if op.function.func_type == "distance_between": + if op.function.loc_a.name == d.name: + if op.function.loc_z.name in \ + _request.demands.keys(): + if _request.demands[ + op.function.loc_z.name].sort_base != 1: + _request.demands[ + op.function.loc_z.name].sort_base = 1 + _open_demand_list.append(op.function.loc_z) + elif op.function.loc_z.name == d.name: + if op.function.loc_a.name in \ + _request.demands.keys(): + if _request.demands[ + op.function.loc_a.name].sort_base != 1: + _request.demands[ + op.function.loc_a.name].sort_base = 1 + _open_demand_list.append(op.function.loc_a) + + return demand_list + + def _exist_not_sorted_demand(self, _demands): + not_sorted_demand = None + for key in _demands: + demand = _demands[key] + if demand.sort_base != 1: + not_sorted_demand = demand + break + return not_sorted_demand + + +# Used for testing. This file is in .gitignore and will NOT be checked in. +CONFIG_FILE = '' + +''' for unit test ''' +if __name__ == "__main__": + # Prepare service-wide components (e.g., config) + conf = service.prepare_service([], config_files=[CONFIG_FILE]) + + opt = Optimizer(conf) + opt.get_solution() diff --git a/conductor/conductor/solver/optimizer/random_pick.py b/conductor/conductor/solver/optimizer/random_pick.py new file mode 100644 index 0000000..2896757 --- /dev/null +++ b/conductor/conductor/solver/optimizer/random_pick.py @@ -0,0 +1,43 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +from oslo_log import log + +from conductor.solver.optimizer import decision_path as dpath +from conductor.solver.optimizer import search +from random import randint + +LOG = log.getLogger(__name__) + + +class RandomPick(search.Search): + def __init__(self, conf): + search.Search.__init__(self, conf) + + def search(self, _demand_list, _request): + decision_path = dpath.DecisionPath() + decision_path.set_decisions({}) + return self._find_current_best(_demand_list, decision_path, _request) + + def _find_current_best(self, _demand_list, _decision_path, _request): + for demand in _demand_list: + r_index = randint(0, len(demand.resources) - 1) + best_resource = demand.resources[demand.resources.keys()[r_index]] + _decision_path.decisions[demand.name] = best_resource + return _decision_path diff --git a/conductor/conductor/solver/optimizer/search.py b/conductor/conductor/solver/optimizer/search.py new file mode 100755 index 0000000..9d138e4 --- /dev/null +++ b/conductor/conductor/solver/optimizer/search.py @@ -0,0 +1,90 @@ +#!/bin/python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +from operator import itemgetter +from oslo_log import log + +from conductor.solver.optimizer import decision_path as dpath + +LOG = log.getLogger(__name__) + + +class Search(object): + + def __init__(self, conf): + self.conf = conf + + def search(self, _demand_list, _objective): + decision_path = dpath.DecisionPath() + decision_path.set_decisions({}) + + ''' implement search algorithm ''' + + return decision_path + + def _solve_constraints(self, _decision_path, _request): + candidate_list = [] + for key in _decision_path.current_demand.resources: + resource = _decision_path.current_demand.resources[key] + candidate_list.append(resource) + + for constraint in _decision_path.current_demand.constraint_list: + LOG.debug("Evaluating constraint = {}".format(constraint.name)) + LOG.debug("Available candidates before solving " + "constraint {}".format(candidate_list)) + + candidate_list =\ + constraint.solve(_decision_path, candidate_list, _request) + LOG.debug("Available candidates after solving " + "constraint {}".format(candidate_list)) + if len(candidate_list) == 0: + LOG.error("No candidates found for demand {} " + "when constraint {} was evaluated " + "".format(_decision_path.current_demand, + constraint.name) + ) + break + + if len(candidate_list) > 0: + self._set_candidate_cost(candidate_list) + + return candidate_list + + def _set_candidate_cost(self, _candidate_list): + for c in _candidate_list: + if c["inventory_type"] == "service": + c["cost"] = "1" + else: + c["cost"] = "2" + _candidate_list[:] = sorted(_candidate_list, key=itemgetter("cost")) + + def print_decisions(self, _best_path): + if _best_path: + msg = "--- demand = {}, chosen resource = {} at {}" + for demand_name in _best_path.decisions: + resource = _best_path.decisions[demand_name] + LOG.debug(msg.format(demand_name, resource["candidate_id"], + resource["location_id"])) + + msg = "--- total value of decision = {}" + LOG.debug(msg.format(_best_path.total_value)) + msg = "--- total cost of decision = {}" + LOG.debug(msg.format(_best_path.total_cost)) diff --git a/conductor/conductor/solver/request/__init__.py b/conductor/conductor/solver/request/__init__.py new file mode 100755 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/solver/request/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/solver/request/demand.py b/conductor/conductor/solver/request/demand.py new file mode 100755 index 0000000..5554cfe --- /dev/null +++ b/conductor/conductor/solver/request/demand.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + + +class Demand(object): + + def __init__(self, _name=None): + self.name = _name + + # initial candidates (regions or services) for this demand + # key = region_id (or service_id), + # value = region (or service) instance + self.resources = {} + + # applicable constraint checkers + # a list of constraint instances to be applied + self.constraint_list = [] + + # to sort demands in the optimization process + self.sort_base = -1 + + +class Location(object): + + def __init__(self, _name=None): + self.name = _name + # clli, coordinates, or placemark + self.loc_type = None + + # depending on type + self.value = None diff --git a/conductor/conductor/solver/request/functions/__init__.py b/conductor/conductor/solver/request/functions/__init__.py new file mode 100755 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/solver/request/functions/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/solver/request/functions/cloud_version.py b/conductor/conductor/solver/request/functions/cloud_version.py new file mode 100644 index 0000000..564468b --- /dev/null +++ b/conductor/conductor/solver/request/functions/cloud_version.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + + +class CloudVersion(object): + + def __init__(self, _type): + self.func_type = _type + self.loc = None diff --git a/conductor/conductor/solver/request/functions/distance_between.py b/conductor/conductor/solver/request/functions/distance_between.py new file mode 100755 index 0000000..8cf3f86 --- /dev/null +++ b/conductor/conductor/solver/request/functions/distance_between.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + + +from conductor.solver.utils import utils + + +class DistanceBetween(object): + + def __init__(self, _type): + self.func_type = _type + + self.loc_a = None + self.loc_z = None + + def compute(self, _loc_a, _loc_z): + distance = utils.compute_air_distance(_loc_a, _loc_z) + + return distance diff --git a/conductor/conductor/solver/request/objective.py b/conductor/conductor/solver/request/objective.py new file mode 100755 index 0000000..ca1e614 --- /dev/null +++ b/conductor/conductor/solver/request/objective.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +from conductor.solver.request import demand +# from conductor.solver.resource import region +# from conductor.solver.resource import service + + +class Objective(object): + + def __init__(self): + self.goal = None + self.operation = None + self.operand_list = [] + + def compute(self, _decision_path, _request): + value = 0.0 + + for op in self.operand_list: + if self.operation == "sum": + value += op.compute(_decision_path, _request) + + _decision_path.cumulated_value = value + _decision_path.total_value = \ + _decision_path.cumulated_value + \ + _decision_path.heuristic_to_go_value + + +class Operand(object): + + def __init__(self): + self.operation = None + self.weight = 0 + self.function = None + + def compute(self, _decision_path, _request): + value = 0.0 + cei = _request.cei + if self.function.func_type == "distance_between": + if isinstance(self.function.loc_a, demand.Location): + if self.function.loc_z.name in \ + _decision_path.decisions.keys(): + resource = \ + _decision_path.decisions[self.function.loc_z.name] + loc = None + # if isinstance(resource, region.Region): + # loc = resource.location + # elif isinstance(resource, service.Service): + # loc = resource.region.location + loc = cei.get_candidate_location(resource) + value = \ + self.function.compute(self.function.loc_a.value, loc) + elif isinstance(self.function.loc_z, demand.Location): + if self.function.loc_a.name in \ + _decision_path.decisions.keys(): + resource = \ + _decision_path.decisions[self.function.loc_a.name] + loc = None + # if isinstance(resource, region.Region): + # loc = resource.location + # elif isinstance(resource, service.Service): + # loc = resource.region.location + loc = cei.get_candidate_location(resource) + value = \ + self.function.compute(self.function.loc_z.value, loc) + else: + if self.function.loc_a.name in \ + _decision_path.decisions.keys() and \ + self.function.loc_z.name in \ + _decision_path.decisions.keys(): + resource_a = \ + _decision_path.decisions[self.function.loc_a.name] + loc_a = None + # if isinstance(resource_a, region.Region): + # loc_a = resource_a.location + # elif isinstance(resource_a, service.Service): + # loc_a = resource_a.region.location + loc_a = cei.get_candidate_location(resource_a) + resource_z = \ + _decision_path.decisions[self.function.loc_z.name] + loc_z = None + # if isinstance(resource_z, region.Region): + # loc_z = resource_z.location + # elif isinstance(resource_z, service.Service): + # loc_z = resource_z.region.location + loc_z = cei.get_candidate_location(resource_z) + + value = self.function.compute(loc_a, loc_z) + + if self.operation == "product": + value *= self.weight + + return value diff --git a/conductor/conductor/solver/request/parser.py b/conductor/conductor/solver/request/parser.py new file mode 100755 index 0000000..6e30549 --- /dev/null +++ b/conductor/conductor/solver/request/parser.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +# import json +import operator +from oslo_log import log +import random +# import sys + +from conductor.solver.optimizer.constraints \ + import access_distance as access_dist +from conductor.solver.optimizer.constraints \ + import cloud_distance as cloud_dist +from conductor.solver.optimizer.constraints \ + import attribute as attribute_constraint +# from conductor.solver.optimizer.constraints import constraint +from conductor.solver.optimizer.constraints \ + import inventory_group +from conductor.solver.optimizer.constraints \ + import service as service_constraint +from conductor.solver.optimizer.constraints import zone +from conductor.solver.request import demand +from conductor.solver.request.functions import cloud_version +from conductor.solver.request.functions import distance_between +from conductor.solver.request import objective + +# from conductor.solver.request.functions import distance_between +# from conductor.solver.request import objective +# from conductor.solver.resource import region +# from conductor.solver.resource import service +# from conductor.solver.utils import constraint_engine_interface as cei +# from conductor.solver.utils import utils + +LOG = log.getLogger(__name__) + + +# FIXME(snarayanan): This is really a SolverRequest (or Request) object +class Parser(object): + + def __init__(self, _region_gen=None): + self.demands = {} + self.locations = {} + self.region_gen = _region_gen + self.constraints = {} + self.objective = None + self.cei = None + self.request_id = None + + # def get_data_engine_interface(self): + # self.cei = cei.ConstraintEngineInterface() + + # FIXME(snarayanan): This should just be parse_template + def parse_template(self, json_template=None): + if json_template is None: + LOG.error("No template specified") + return "Error" + + # get demands + demand_list = json_template["conductor_solver"]["demands"] + for demand_id, candidate_list in demand_list.items(): + current_demand = demand.Demand(demand_id) + # candidate should only have minimal information like location_id + for candidate in candidate_list["candidates"]: + candidate_id = candidate["candidate_id"] + current_demand.resources[candidate_id] = candidate + current_demand.sort_base = 0 # this is only for testing + self.demands[demand_id] = current_demand + + # get locations + location_list = json_template["conductor_solver"]["locations"] + for location_id, location_info in location_list.items(): + loc = demand.Location(location_id) + loc.loc_type = "coordinates" + loc.value = (float(location_info["latitude"]), + float(location_info["longitude"])) + self.locations[location_id] = loc + + # get constraints + input_constraints = json_template["conductor_solver"]["constraints"] + for constraint_id, constraint_info in input_constraints.items(): + constraint_type = constraint_info["type"] + constraint_demands = list() + parsed_demands = constraint_info["demands"] + if isinstance(parsed_demands, list): + for d in parsed_demands: + constraint_demands.append(d) + else: + constraint_demands.append(parsed_demands) + if constraint_type == "distance_to_location": + c_property = constraint_info.get("properties") + location_id = c_property.get("location") + op = operator.le # default operator + c_op = c_property.get("distance").get("operator") + if c_op == ">": + op = operator.gt + elif c_op == ">=": + op = operator.ge + elif c_op == "<": + op = operator.lt + elif c_op == "<=": + op = operator.le + elif c_op == "=": + op = operator.eq + dist_value = c_property.get("distance").get("value") + my_access_distance_constraint = access_dist.AccessDistance( + constraint_id, constraint_type, constraint_demands, + _comparison_operator=op, _threshold=dist_value, + _location=self.locations[location_id]) + self.constraints[my_access_distance_constraint.name] = \ + my_access_distance_constraint + elif constraint_type == "distance_between_demands": + c_property = constraint_info.get("properties") + op = operator.le # default operator + c_op = c_property.get("distance").get("operator") + if c_op == ">": + op = operator.gt + elif c_op == ">=": + op = operator.ge + elif c_op == "<": + op = operator.lt + elif c_op == "<=": + op = operator.le + elif c_op == "=": + op = operator.eq + dist_value = c_property.get("distance").get("value") + my_cloud_distance_constraint = cloud_dist.CloudDistance( + constraint_id, constraint_type, constraint_demands, + _comparison_operator=op, _threshold=dist_value) + self.constraints[my_cloud_distance_constraint.name] = \ + my_cloud_distance_constraint + elif constraint_type == "inventory_group": + my_inventory_group_constraint = \ + inventory_group.InventoryGroup( + constraint_id, constraint_type, constraint_demands) + self.constraints[my_inventory_group_constraint.name] = \ + my_inventory_group_constraint + elif constraint_type == "region_fit": + c_property = constraint_info.get("properties") + controller = c_property.get("controller") + request = c_property.get("request") + # inventory type is cloud for region_fit + inventory_type = "cloud" + my_service_constraint = service_constraint.Service( + constraint_id, constraint_type, constraint_demands, + _controller=controller, _request=request, _cost=None, + _inventory_type=inventory_type) + self.constraints[my_service_constraint.name] = \ + my_service_constraint + elif constraint_type == "instance_fit": + c_property = constraint_info.get("properties") + controller = c_property.get("controller") + request = c_property.get("request") + # inventory type is service for instance_fit + inventory_type = "service" + my_service_constraint = service_constraint.Service( + constraint_id, constraint_type, constraint_demands, + _controller=controller, _request=request, _cost=None, + _inventory_type=inventory_type) + self.constraints[my_service_constraint.name] = \ + my_service_constraint + elif constraint_type == "zone": + c_property = constraint_info.get("properties") + qualifier = c_property.get("qualifier") + category = c_property.get("category") + my_zone_constraint = zone.Zone( + constraint_id, constraint_type, constraint_demands, + _qualifier=qualifier, _category=category) + self.constraints[my_zone_constraint.name] = my_zone_constraint + elif constraint_type == "attribute": + c_property = constraint_info.get("properties") + my_attribute_constraint = \ + attribute_constraint.Attribute(constraint_id, + constraint_type, + constraint_demands, + _properties=c_property) + self.constraints[my_attribute_constraint.name] = \ + my_attribute_constraint + else: + LOG.error("unknown constraint type {}".format(constraint_type)) + return + + # get objective function + if "objective" not in json_template["conductor_solver"]\ + or not json_template["conductor_solver"]["objective"]: + self.objective = objective.Objective() + else: + input_objective = json_template["conductor_solver"]["objective"] + self.objective = objective.Objective() + self.objective.goal = input_objective["goal"] + self.objective.operation = input_objective["operation"] + for operand_data in input_objective["operands"]: + operand = objective.Operand() + operand.operation = operand_data["operation"] + operand.weight = float(operand_data["weight"]) + if operand_data["function"] == "distance_between": + func = distance_between.DistanceBetween("distance_between") + param = operand_data["function_param"][0] + if param in self.locations: + func.loc_a = self.locations[param] + elif param in self.demands: + func.loc_a = self.demands[param] + param = operand_data["function_param"][1] + if param in self.locations: + func.loc_z = self.locations[param] + elif param in self.demands: + func.loc_z = self.demands[param] + operand.function = func + elif operand_data["function"] == "cloud_version": + self.objective.goal = "min_cloud_version" + func = cloud_version.CloudVersion("cloud_version") + func.loc = operand_data["function_param"] + operand.function = func + + self.objective.operand_list.append(operand) + + def map_constraints_to_demands(self): + # spread the constraints over the demands + for constraint_name, constraint in self.constraints.items(): + for d in constraint.demand_list: + if d in self.demands.keys(): + self.demands[d].constraint_list.append(constraint) + diff --git a/conductor/conductor/solver/resource/__init__.py b/conductor/conductor/solver/resource/__init__.py new file mode 100755 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/solver/resource/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/solver/resource/region.py b/conductor/conductor/solver/resource/region.py new file mode 100755 index 0000000..fc42bd1 --- /dev/null +++ b/conductor/conductor/solver/resource/region.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +"""Cloud region""" + + +class Region(object): + + def __init__(self, _rid=None): + self.name = _rid + + self.status = "active" + + '''general region properties''' + # S (i.e., medium_lite), M (i.e., medium), or L (i.e., large) + self.region_type = None + # (latitude, longitude) + self.location = None + + ''' + placemark: + + country_code (e.g., US), + postal_code (e.g., 07920), + administrative_area (e.g., NJ), + sub_administrative_area (e.g., Somerset), + locality (e.g., Bedminster), + thoroughfare (e.g., AT&T Way), + sub_thoroughfare (e.g., 1) + ''' + self.address = {} + + self.zones = {} # Zone instances (e.g., disaster and/or update) + self.cost = 0.0 + + '''abstracted resource capacity status''' + self.capacity = {} + + self.allocated_demand_list = [] + + '''other opaque metadata such as cloud_version, sriov, etc.''' + self.properties = {} + + '''known neighbor regions to be used for constraint solving''' + self.neighbor_list = [] # a list of Link instances + + self.last_update = 0 + + '''update resource capacity after allocating demand''' + def update_capacity(self): + pass + + '''for logging''' + def get_json_summary(self): + pass + + +class Zone(object): + + def __init__(self, _zid=None): + self.name = _zid + self.zone_type = None # disaster or update + + self.region_list = [] # a list of region names + + def get_json_summary(self): + pass + + +class Link(object): + + def __init__(self, _region_name): + self.destination_region_name = _region_name + + self.distance = 0.0 + self.nw_distance = 0.0 + self.latency = 0.0 + self.bandwidth = 0.0 + + def get_json_summary(self): + pass diff --git a/conductor/conductor/solver/resource/service.py b/conductor/conductor/solver/resource/service.py new file mode 100755 index 0000000..faedb53 --- /dev/null +++ b/conductor/conductor/solver/resource/service.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +"""Existing service instance in a region""" + + +class Service(object): + + def __init__(self, _sid=None): + self.name = _sid + + self.region = None + + self.status = "active" + + self.cost = 0.0 + + """abstracted resource capacity status""" + self.capacity = {} + + self.allocated_demand_list = [] + + """other opaque metadata if necessary""" + self.properties = {} + + self.last_update = 0 + + """update resource capacity after allocating demand""" + def update_capacity(self): + pass + + """for logging""" + def get_json_summary(self): + pass diff --git a/conductor/conductor/solver/service.py b/conductor/conductor/solver/service.py new file mode 100644 index 0000000..60aa092 --- /dev/null +++ b/conductor/conductor/solver/service.py @@ -0,0 +1,307 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import cotyledon +from oslo_config import cfg +from oslo_log import log + +from conductor.common.models import plan +from conductor.common.music import api +from conductor.common.music import messaging as music_messaging +from conductor.common.music.model import base +from conductor.i18n import _LE, _LI +from conductor import messaging +from conductor import service +from conductor.solver.optimizer import optimizer +from conductor.solver.request import parser +from conductor.solver.utils import constraint_engine_interface as cei + + +# To use oslo.log in services: +# +# 0. Note that conductor.service.prepare_service() bootstraps this. +# It's set up within conductor.cmd.SERVICENAME. +# 1. Add "from oslo_log import log" +# 2. Also add "LOG = log.getLogger(__name__)" +# 3. For i18n support, import appropriate shortcuts as well: +# "from i18n import _, _LC, _LE, _LI, _LW # noqa" +# (that's for primary, critical, error, info, warning) +# 4. Use LOG.info, LOG.warning, LOG.error, LOG.critical, LOG.debug, e.g.: +# "LOG.info(_LI("Something happened with {}").format(thingie))" +# 5. Do NOT put translation wrappers around any LOG.debug text. +# 6. Be liberal with logging, especially in the absence of unit tests! +# 7. Calls to print() are verboten within the service proper. +# Logging can be redirected! (In a CLI-side script, print() is fine.) +# +# Usage: http://docs.openstack.org/developer/oslo.i18n/usage.html + +LOG = log.getLogger(__name__) + +# To use oslo.config in services: +# +# 0. Note that conductor.service.prepare_service() bootstraps this. +# It's set up within conductor.cmd.SERVICENAME. +# 1. Add "from oslo_config import cfg" +# 2. Also add "CONF = cfg.CONF" +# 3. Set a list of locally used options (SOLVER_OPTS is fine). +# Choose key names thoughtfully. Be technology-agnostic, avoid TLAs, etc. +# 4. Register, e.g. "CONF.register_opts(SOLVER_OPTS, group='solver')" +# 5. Add file reference to opts.py (may need to use itertools.chain()) +# 6. Run tox -e genconfig to build a new config template. +# 7. If you want to load an entire config from a CLI you can do this: +# "conf = service.prepare_service([], config_files=[CONFIG_FILE])" +# 8. You can even use oslo_config from a CLI and override values on the fly, +# e.g., "CONF.set_override('hostnames', ['music2'], 'music_api')" +# (leave the third arg out to use the DEFAULT group). +# 9. Loading a config from a CLI is optional. So long as all the options +# have defaults (or you override them as needed), it should all work. +# +# Docs: http://docs.openstack.org/developer/oslo.config/ + +CONF = cfg.CONF + +SOLVER_OPTS = [ + cfg.IntOpt('workers', + default=1, + min=1, + help='Number of workers for solver service. ' + 'Default value is 1.'), + cfg.BoolOpt('concurrent', + default=False, + help='Set to True when solver will run in active-active ' + 'mode. When set to False, solver will restart any ' + 'orphaned solving requests at startup.'), +] + +CONF.register_opts(SOLVER_OPTS, group='solver') + +# Pull in service opts. We use them here. +OPTS = service.OPTS +CONF.register_opts(OPTS) + + +class SolverServiceLauncher(object): + """Launcher for the solver service.""" + def __init__(self, conf): + self.conf = conf + + # Set up Music access. + self.music = api.API() + self.music.keyspace_create(keyspace=conf.keyspace) + + # Dynamically create a plan class for the specified keyspace + self.Plan = base.create_dynamic_model( + keyspace=conf.keyspace, baseclass=plan.Plan, classname="Plan") + + if not self.Plan: + raise + + def run(self): + kwargs = {'plan_class': self.Plan} + svcmgr = cotyledon.ServiceManager() + svcmgr.add(SolverService, + workers=self.conf.solver.workers, + args=(self.conf,), kwargs=kwargs) + svcmgr.run() + + +class SolverService(cotyledon.Service): + """Solver service.""" + + # This will appear in 'ps xaf' + name = "Conductor Solver" + + def __init__(self, worker_id, conf, **kwargs): + """Initializer""" + LOG.debug("%s" % self.__class__.__name__) + super(SolverService, self).__init__(worker_id) + self._init(conf, **kwargs) + self.running = True + + def _init(self, conf, **kwargs): + """Set up the necessary ingredients.""" + self.conf = conf + self.kwargs = kwargs + + self.Plan = kwargs.get('plan_class') + + # Set up the RPC service(s) we want to talk to. + self.data_service = self.setup_rpc(conf, "data") + + # Set up the cei and optimizer + self.cei = cei.ConstraintEngineInterface(self.data_service) + # self.optimizer = optimizer.Optimizer(conf) + + # Set up Music access. + self.music = api.API() + + if not self.conf.solver.concurrent: + self._reset_solving_status() + + def _gracefully_stop(self): + """Gracefully stop working on things""" + pass + + def _reset_solving_status(self): + """Reset plans being solved so they are solved again. + + Use this only when the solver service is not running concurrently. + """ + plans = self.Plan.query.all() + for the_plan in plans: + if the_plan.status == self.Plan.SOLVING: + the_plan.status = self.Plan.TRANSLATED + the_plan.update() + + def _restart(self): + """Prepare to restart the service""" + pass + + def setup_rpc(self, conf, topic): + """Set up the RPC Client""" + # TODO(jdandrea): Put this pattern inside music_messaging? + transport = messaging.get_transport(conf=conf) + target = music_messaging.Target(topic=topic) + client = music_messaging.RPCClient(conf=conf, + transport=transport, + target=target) + return client + + def run(self): + """Run""" + LOG.debug("%s" % self.__class__.__name__) + # TODO(snarayanan): This is really meant to be a control loop + # As long as self.running is true, we process another request. + while self.running: + # plans = Plan.query().all() + # Find the first plan with a status of TRANSLATED. + # Change its status to SOLVING. + # Then, read the "translated" field as "template". + json_template = None + requests_to_solve = dict() + plans = self.Plan.query.all() + found_translated_template = False + for p in plans: + if p.status == self.Plan.TRANSLATED: + json_template = p.translation + found_translated_template = True + break + if found_translated_template and not json_template: + message = _LE("Plan {} status is translated, yet " + "the translation wasn't found").format(p.id) + LOG.error(message) + p.status = self.Plan.ERROR + p.message = message + p.update() + continue + elif not json_template: + continue + + p.status = self.Plan.SOLVING + p.update() + + request = parser.Parser() + request.cei = self.cei + try: + request.parse_template(json_template) + except Exception as err: + message = _LE("Plan {} status encountered a " + "parsing error: {}").format(p.id, err.message) + LOG.error(message) + p.status = self.Plan.ERROR + p.message = message + p.update() + continue + + request.map_constraints_to_demands() + requests_to_solve[p.id] = request + opt = optimizer.Optimizer(self.conf, _requests=requests_to_solve) + solution = opt.get_solution() + + recommendations = [] + if not solution or not solution.decisions: + message = _LI("Plan {} search failed, no " + "recommendations found").format(p.id) + LOG.info(message) + # Update the plan status + p.status = self.Plan.NOT_FOUND + p.message = message + p.update() + else: + # Assemble recommendation result JSON + for demand_name in solution.decisions: + resource = solution.decisions[demand_name] + + rec = { + # FIXME(shankar) A&AI must not be hardcoded here. + # Also, account for more than one Inventory Provider. + "inventory_provider": "aai", + "service_resource_id": + resource.get("service_resource_id"), + "candidate": { + "candidate_id": resource.get("candidate_id"), + "inventory_type": resource.get("inventory_type"), + "cloud_owner": resource.get("cloud_owner"), + "location_type": resource.get("location_type"), + "location_id": resource.get("location_id")}, + "attributes": { + "physical-location-id": + resource.get("physical_location_id"), + "sriov_automation": + resource.get("sriov_automation"), + "cloud_owner": resource.get("cloud_owner"), + 'cloud_version': resource.get("cloud_region_version")}, + } + if rec["candidate"]["inventory_type"] == "service": + rec["attributes"]["host_id"] = resource.get("host_id") + rec["candidate"]["host_id"] = resource.get("host_id") + + # TODO(snarayanan): Add total value to recommendations? + # msg = "--- total value of decision = {}" + # LOG.debug(msg.format(_best_path.total_value)) + # msg = "--- total cost of decision = {}" + # LOG.debug(msg.format(_best_path.total_cost)) + + recommendations.append({demand_name: rec}) + + # Update the plan with the solution + p.solution = { + "recommendations": recommendations + } + p.status = self.Plan.SOLVED + p.update() + LOG.info(_LI("Plan {} search complete, solution with {} " + "recommendations found"). + format(p.id, len(recommendations))) + LOG.debug("Plan {} detailed solution: {}". + format(p.id, p.solution)) + + # Check status, update plan with response, SOLVED or ERROR + + def terminate(self): + """Terminate""" + LOG.debug("%s" % self.__class__.__name__) + self.running = False + self._gracefully_stop() + super(SolverService, self).terminate() + + def reload(self): + """Reload""" + LOG.debug("%s" % self.__class__.__name__) + self._restart() diff --git a/conductor/conductor/solver/simulators/__init__.py b/conductor/conductor/solver/simulators/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/solver/simulators/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/solver/simulators/a_and_ai/__init__.py b/conductor/conductor/solver/simulators/a_and_ai/__init__.py new file mode 100755 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/solver/simulators/a_and_ai/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/solver/simulators/valet/__init__.py b/conductor/conductor/solver/simulators/valet/__init__.py new file mode 100755 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/solver/simulators/valet/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/solver/utils/__init__.py b/conductor/conductor/solver/utils/__init__.py new file mode 100755 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/solver/utils/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/solver/utils/constraint_engine_interface.py b/conductor/conductor/solver/utils/constraint_engine_interface.py new file mode 100644 index 0000000..de335d6 --- /dev/null +++ b/conductor/conductor/solver/utils/constraint_engine_interface.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +"""Constraint/Engine Interface + +Utility library that defines the interface between +the constraints and the conductor data engine. + +""" + +from oslo_log import log + +LOG = log.getLogger(__name__) + + +class ConstraintEngineInterface(object): + def __init__(self, client): + self.client = client + + def get_candidate_location(self, candidate): + # Try calling a method (remember, "calls" are synchronous) + # FIXME(jdandrea): Doing this because Music calls are expensive. + lat = candidate.get('latitude') + lon = candidate.get('longitude') + if lat and lon: + response = (float(lat), float(lon)) + else: + ctxt = {} + args = {"candidate": candidate} + response = self.client.call(ctxt=ctxt, + method="get_candidate_location", + args=args) + LOG.debug("get_candidate_location response: {}".format(response)) + return response + + def get_candidate_zone(self, candidate, _category=None): + # FIXME(jdandrea): Doing this because Music calls are expensive. + if _category == 'region': + response = candidate['location_id'] + elif _category == 'complex': + response = candidate['complex_name'] + else: + ctxt = {} + args = {"candidate": candidate, "category": _category} + response = self.client.call(ctxt=ctxt, + method="get_candidate_zone", + args=args) + LOG.debug("get_candidate_zone response: {}".format(response)) + return response + + def get_candidates_from_service(self, constraint_name, + constraint_type, candidate_list, + controller, inventory_type, + request, cost, demand_name): + ctxt = {} + args = {"constraint_name": constraint_name, + "constraint_type": constraint_type, + "candidate_list": candidate_list, + "controller": controller, + "inventory_type": inventory_type, + "request": request, + "cost": cost, + "demand_name": demand_name} + response = self.client.call(ctxt=ctxt, + method="get_candidates_from_service", + args=args) + LOG.debug("get_candidates_from_service response: {}".format(response)) + # response is a list of (candidate, cost) tuples + return response + + def get_inventory_group_candidates(self, candidate_list, + demand_name, resolved_candidate): + # return a list of the "pair" candidates for the given candidate + ctxt = {} + args = {"candidate_list": candidate_list, + "demand_name": demand_name, + "resolved_candidate": resolved_candidate} + response = self.client.call(ctxt=ctxt, + method="get_inventory_group_candidates", + args=args) + LOG.debug("get_inventory_group_candidates \ + response: {}".format(response)) + return response + + def get_candidates_by_attributes(self, demand_name, + candidate_list, properties): + ctxt = {} + args = {"candidate_list": candidate_list, + "properties": properties, + "demand_name": demand_name} + response = self.client.call(ctxt=ctxt, + method="get_candidates_by_attributes", + args=args) + LOG.debug("get_candidates_by_attribute response: {}".format(response)) + # response is a list of (candidate, cost) tuples + return response diff --git a/conductor/conductor/solver/utils/utils.py b/conductor/conductor/solver/utils/utils.py new file mode 100755 index 0000000..5cec51f --- /dev/null +++ b/conductor/conductor/solver/utils/utils.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +import math + + +def compute_air_distance(_src, _dst): + """Compute Air Distance + + based on latitude and longitude + input: a pair of (lat, lon)s + output: air distance as km + """ + distance = 0.0 + + if _src == _dst: + return distance + + radius = 6371.0 # km + + dlat = math.radians(_dst[0] - _src[0]) + dlon = math.radians(_dst[1] - _src[1]) + a = math.sin(dlat / 2.0) * math.sin(dlat / 2.0) + \ + math.cos(math.radians(_src[0])) * \ + math.cos(math.radians(_dst[0])) * \ + math.sin(dlon / 2.0) * math.sin(dlon / 2.0) + c = 2.0 * math.atan2(math.sqrt(a), math.sqrt(1.0 - a)) + distance = radius * c + + return distance + + +def convert_km_to_miles(_km): + return _km * 0.621371 + + +def convert_miles_to_km(_miles): + return _miles / 0.621371 diff --git a/conductor/conductor/tests/__init__.py b/conductor/conductor/tests/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/tests/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/tests/data/__init__.py b/conductor/conductor/tests/data/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/tests/data/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/tests/functional/__init__.py b/conductor/conductor/tests/functional/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/tests/functional/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/tests/integration/__init__.py b/conductor/conductor/tests/integration/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/tests/integration/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/tests/tempest/__init__.py b/conductor/conductor/tests/tempest/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/tests/tempest/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/tests/testing-overview.txt b/conductor/conductor/tests/testing-overview.txt new file mode 100644 index 0000000..fe6955d --- /dev/null +++ b/conductor/conductor/tests/testing-overview.txt @@ -0,0 +1,67 @@ +Conductor testing +----------------- + +All unit tests are to be placed in the conductor/tests directory, +and tests might be organized by tested subsystem. Each subsystem directory +must contain a separate blank __init__.py for test discovery to function. + +An example directory structure illustrating the above: + +conductor/tests +|-- engine +| |-- __init__.py +| |-- test1.py +| |-- test2.py +| |-- test3.py +|-- __init__.py +|-- test_plan.py + +If a given test has no overlapping requirements (variables or same +routines) a new test does not need to create a subdirectory under the +test type. + +Implementing a test +------------------- + +testrepository - http://pypi.python.org/pypi/testrepository is used to +find and run tests, parallelize their runs, and record timing/results. + +If new dependencies are introduced upon the development of a test, the +test-requirements.txt file needs to be updated so that the virtual +environment will be able to successfully execute all tests. + +Running the tests +----------------- + +The advised way of running tests is by using tox: + +$ tox + +By default, this will run the unit test suite with Python 2.7 and PEP8/HACKING +style checks. To run only one type of test you can explicitly invoke tox +with the test environment to use. + +$ tox -epy27 # test suite on python 2.7 +$ tox -epep8 # run full source code checker + +To run only a subset of tests, you can provide tox with a regex argument +defining which tests to execute. + +$ tox -epy27 -- FooTests + +To use a debugger like pdb during the test run, one has to run tests directly +with another, non-concurrent test runner instead of using testr. + +That also presumes you have a virtual env with all conductor dependencies active. +Below is an example bash script using the testtools test runner that also allows +running single tests by providing a regex. + +#!/usr/bin/env sh +testlist=$(mktemp) +testr list-tests "$1" > $testlist +python -m testtools.run --load-list $testlist + +Another way to use debugger for testing is run tox with command: +$ tox -e debug -- conductor.tests.test_foo.FooTest.test_foo_does_something + +Note: This last approach is mostly useful to run single tests.
\ No newline at end of file diff --git a/conductor/conductor/tests/unit/__init__.py b/conductor/conductor/tests/unit/__init__.py new file mode 100644 index 0000000..f2bbdfd --- /dev/null +++ b/conductor/conductor/tests/unit/__init__.py @@ -0,0 +1,19 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + diff --git a/conductor/conductor/version.py b/conductor/conductor/version.py new file mode 100644 index 0000000..6cdecfe --- /dev/null +++ b/conductor/conductor/version.py @@ -0,0 +1,22 @@ +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +import pbr.version + +version_info = pbr.version.VersionInfo('of_has') diff --git a/conductor/doc/api/README.md b/conductor/doc/api/README.md new file mode 100644 index 0000000..59bc295 --- /dev/null +++ b/conductor/doc/api/README.md @@ -0,0 +1,283 @@ +# Homing API v1 + +*Updated: 4 April 2017* + +This document describes the Homing API, used by the Conductor service. It is a work in progress and subject to frequent revision. + +# General API Information + +Authenticated calls that target a known URI but that use an HTTP method the implementation does not support return a 405 Method Not Allowed status. In addition, the HTTP OPTIONS method is supported for each known URI. In both cases, the Allow response header indicates the supported HTTP methods. See the API Errors section for more information about the error response structure. + +# API versions + +## List all Homing API versions + +**GET** ``/``F + +**Normal response codes:** 200 + +```json +{ + "versions": [ + { + "status": "EXPERIMENTAL", + "id": "v1", + "updated": "2016-11-01T00:00:00Z", + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.ecomp.homing-v1+json" + } + ], + "links": [ + { + "href": "http://135.197.226.83:8091/v1", + "rel": "self" + }, + { + "href": "http://conductor.research.att.com/", + "type": "text/html", + "rel": "describedby" + } + ] + } + ] +} +``` + +This operation does not accept a request body. + +# Plans + +## Create a plan + +**POST** ``/v1/plans`` + +* **Normal response codes:** 201 +* **Error response codes:** badRequest (400), unauthorized (401), internalServerError (500) + +Request an inventory plan for one or more related service demands. + +The request includes or references a declarative **template**, consisting of: + +* **Parameters** that can be referenced like macros +* **Demands** for service made against inventory +* **Locations** that are common to the overall plan +* **Constraints** made against demands, resulting in a set of inventory candidates +* **Optimizations** to further narrow down the remaining candidates + +The response contains an inventory **plan**, consisting of one or more sets of recommended pairings of demands with an inventory candidate's attributes and region. + +### Request Parameters + +| Parameter | Style | Type | Description | +|-----------|-------|------|-------------| +| ``name`` (Optional) | plain | xsd:string | A name for the new plan. If a name is not provided, it will be auto-generated based on the homing template. This name must be unique within a given Conductor environment. When deleting a plan, its name will not become available for reuse until the deletion completes successfully. Must only contain letters, numbers, hypens, full stops, underscores, and tildes (RFC 3986, Section 2.3). This parameter is immutable. | +| ``id`` (Optional) | plain | csapi:UUID | The UUID of the plan. UUID is assigned by Conductor if no id is provided in the request. | +| ``transaction_id`` | plain | csapi:UUID | The transaction id assigned by MSO. The logs should have this transaction id for tracking purposes. | +| ``files`` (Optional) | plain | xsd:dict | Supplies the contents of files referenced in the template. Conductor templates can explicitly reference files by using the ``get_file`` intrinsic function. The value is a JSON object, where each key is a relative or absolute URI which serves as the name of a file, and the associated value provides the contents of the file. Additionally, some template authors encode their user data in a local file. The Homing client (e.g., a CLI) can examine the template for the ``get_file`` intrinsic function (e.g., ``{get_file: file.yaml}``) and add an entry to the ``files`` map with the path to the file as the name and the file contents as the value. Do not use this parameter to provide the content of the template located at the ``template_url`` address. Instead, use the ``template`` parameter to supply the template content as part of the request. | +| ``template_url`` (Optional) | plain | xsd:string | A URI to the location containing the template on which to perform the operation. See the description of the ``template`` parameter for information about the expected template content located at the URI. This parameter is only required when you omit the ``template`` parameter. If you specify both parameters, this parameter is ignored. | +| ``template``| plain | xsd:string or xsd:dict | The template on which to perform the operation. See the [Conductor Template Guide](/doc/template/README.md) for complete information on the format. This parameter is either provided as a ``string`` or ``dict`` in the JSON request body. For ``string`` content it may be a JSON- or YAML-formatted Conductor template. For ``dict`` content it must be a direct JSON representation of the Conductor template. This parameter is required only when you omit the ``template_url`` parameter. If you specify both parameters, this value overrides the ``template_url`` parameter value. | +| ``timeout`` (Optional) | plain | xsd:number | The timeout for plan creation in minutes. Default is 1. | +| ``limit`` (Optional) | plain | xsd:number | The maximum number of recommendations to return. Default is 1. | + +**NOTE**: ``files``, ``template_url``, and ``timeout`` are not yet supported. + +### Response Parameters + +| Parameter | Style | Type | Description | +|-----------|-------|------|-------------| +| ``plan`` | plain | xsd:dict | The ``plan`` object. | +| ``id`` | plain | csapi:UUID | The UUID of the plan. | +| ``transaction_id`` | plain | csapi:UUID | The transaction id assigned by the MSO. | +| ``name`` | plain | xsd:string | The plan name. | +| ``status`` | plain | xsd:string | The plan status. One of ``template``, ``translated``, ``solving``, ``solved``, or ``error``. See **Plan Status** table for descriptions of each value. | +| ``message`` | plain | xsd:string | Additional context, if any, around the message status. If the status is ``error``, this may include a reason and suggested remediation, if available. | +| ``links`` | plain | xsd:list | A list of URLs for the plan. Each URL is a JSON object with an ``href`` key indicating the URL and a ``rel`` key indicating its relationship to the plan in question. There may be multiple links returned. The ``self`` relationship identifies the URL of the plan itself. | +| ``recommendations`` | plain | xsd:list | A list of one or more recommendations. A recommendation pairs each requested demand with an inventory provider, a single candidate, and an opaque dictionary of attributes. Refer to the Demand candidate schema in the [Conductor Template Guide](/doc/template/README.md) for further details. (Note that, when ``inventory_type`` is ``cloud`` the candidate's ``candidate_id`` field is redundant and thus omitted.) | + +### Plan Status + +| Status | Description | +|--------|-------------| +| ``template`` | Plan request and homing template have been received. Awaiting translation. | +| ``translated`` | Homing template has been translated, and candidates have been obtained from inventory providers. Awaiting solving. | +| ``solving`` | Search for a solution is in progress. This may incorporate requests to service controllers for additional information. | +| ``solved`` | Search is complete. A solution with one or more recommendations was found. | +| ``not found`` | Search is complete. No recommendations were found. | +| ``error`` | An error was encountered. | + +#### State Diagram + +```text + ---------------------------------------- + | | + | /---> solved ---> reserving ---> done + | / / + template -> translated -> solving ------> not found / + | ^ | \ / + | | conditionally | \---> error <----/ + | | (see note) | ^ + | \---------------/ | + \---------------------------------------/ +``` +**NOTE**: When Conductor's solver service is started in non-concurrent mode (the default), it will reset any plans found waiting and stuck in the ``solving`` state back to ``translated``. + +```json +{ + "name": "PLAN_NAME", + "template": "CONDUCTOR_TEMPLATE", + "limit": 3 +} +``` + +```json +{ + "plan": { + "name": "PLAN_NAME", + "id": "ee1c5269-c7f0-492a-8652-f0ceb15ed3bc", + "transaction_id": "6bca5f2b-ee7e-4637-8b58-1b4b36ed10f9", + "status": "solved", + "message", "Plan PLAN_NAME is solved.", + "links": [ + { + "href": "http://homing/v1/plans/ee1c5269-c7f0-492a-8652-f0ceb15ed3bc", + "rel": "self" + } + ], + "recommendations": [ + { + "DEMAND_NAME_1": { + "inventory_provider": "aai", + "service_resource_id": "4feb0545-69e2-424c-b3c4-b270e5f2a15d", + "candidate": { + "candidate_id": "99befee8-e8c0-425b-8f36-fb7a8098d9a9", + "inventory_type": "service", + "location_type": "aic", + "location_id": "dal01", + "host_id" : "vig20002vm001vig001" + }, + "attributes": {OPAQUE-DICT} + }, + "DEMAND_NAME_2": { + "inventory_provider": "aai", + "service_resource_id": "578eb063-b24a-4654-ba9e-1e5cf7eb9183", + "candidate": { + "inventory_type": "cloud", + "location_type": "aic", + "location_id": "dal02" + }, + "attributes": {OPAQUE-DICT} + } + }, + { + "DEMAND_NAME_1": { + "inventory_provider": "aai", + "service_resource_id": "4feb0545-69e2-424c-b3c4-b270e5f2a15d", + "candidate": { + "candidate_id": "99befee8-e8c0-425b-8f36-fb7a8098d9a9", + "inventory_type": "service", + "location_type": "aic", + "location_id": "dal03", + "host_id" : "vig20001vm001vig001" + }, + "attributes": {OPAQUE-DICT} + }, + "DEMAND_NAME_2": { + "inventory_provider": "aai", + "service_resource_id": "578eb063-b24a-4654-ba9e-1e5cf7eb9183", + "candidate": { + "inventory_type": "cloud", + "location_type": "aic", + "location_id": "dal04" + }, + "attributes": {OPAQUE-DICT} + } + }, + ... + ] + } +} +``` + +## Show plan details + +**GET** ``/v1/plans/{plan_id}`` + +* **Normal response codes:** 200 +* **Error response codes:** unauthorized (401), itemNotFound (404) + +### Request parameters + +| Parameter | Style | Type | Description | +|-------------|-------|------------|---------------------------------------------------| +| ``plan_id`` | plain | csapi:UUID | The UUID of the plan. | + +### Response Parameters + +See the Response Parameters for **Create a plan**. + +## Delete a plan + +**DELETE** ``/v1/plans/{plan_id}`` + +* **Normal response codes:** 204 +* **Error response codes:** badRequest (400), unauthorized (401), itemNotFound (404) + +### Request parameters + +| Parameter | Style | Type | Description | +|-------------|-------|------------|---------------------------------------------------| +| ``plan_id`` | plain | csapi:UUID | The UUID of the plan. | + +This operation does not accept a request body and does not return a response body. + +## API Errors + +In the event of an error with a status other than unauthorized (401), a detailed repsonse body is returned. + +### Response parameters + +| Parameter | Style | Type | Description | +|-------------|-------|------------|---------------------------------------------------| +| ``title`` | plain | xsd:string | Human-readable name. | +| ``explanation`` | plain | xsd:string | Detailed explanation with remediation (if any). | +| ``code`` | plain | xsd:int | HTTP Status Code. | +| ``error`` | plain | xsd:dict | Error dictionary. Keys include **message**, **traceback**, and **type**. | +| ``message`` | plain | xsd:string | Internal error message. | +| ``traceback`` | plain | xsd:string | Python traceback (if available). | +| ``type`` | plain | xsd:string | HTTP Status class name (from python-webob) | + +#### Examples + +A plan with the name "pl an" is considered a bad request because the name contains a space. + +```json +{ + "title": "Bad Request", + "explanation": "-> name -> pl an did not pass validation against callable: plan_name_type (must contain only uppercase and lowercase letters, decimal digits, hyphens, periods, underscores, and tildes [RFC 3986, Section 2.3])", + "code": 400, + "error": { + "message": "The server could not comply with the request since it is either malformed or otherwise incorrect.", + "type": "HTTPBadRequest" + } +} +``` + +The HTTP COPY method was attempted but is not allowed. + +```json +{ + "title": "Method Not Allowed", + "explanation": "The COPY method is not allowed.", + "code": 405, + "error": { + "message": "The server could not comply with the request since it is either malformed or otherwise incorrect.", + "type": "HTTPMethodNotAllowed" + } +} +``` + +## Contact ## + +Shankar Narayanan <shankarpnsn@gmail.com> diff --git a/conductor/doc/distribution/README.md b/conductor/doc/distribution/README.md new file mode 100644 index 0000000..2b8f5dc --- /dev/null +++ b/conductor/doc/distribution/README.md @@ -0,0 +1,551 @@ +# Python/Linux Distribution Notes + +*Updated: 10 Nov 2017 23:30 GMT* + +This document exists to help bridge the gap between the Conductor python package and any downstream distribution. The steps outlined herein may be taken into consideration when creating an AT&T SWM package, Ubuntu/Debian package, Chef cookbook, or Ansible playbook. + +## Components + +Conductor consists of five services that work together: + +* **``conductor-api``**: An HTTP REST API +* **``conductor-controller``**: Validation, translation, and status/results +* **``conductor-data``**: Inventory provider and service controller gateway +* **``conductor-solver``**: Processing and solution calculation +* **``conductor-reservation``**: Reserves the suggested solution solved by Solver component. + +## Workflow + +* Deployment **plans** are created, viewed, and deleted via ``conductor-api`` and its [REST API](doc/api/README.md). +* Included within each ``conductor-api`` plan request is a [Homing Template](doc/template/README.md). +* Homing Templates describe a set of inventory demands and constraints to be solved against. +* ``conductor-api`` hands off all API requests to ``conductor-controller`` for handling. +* All deployment plans are assigned a unique identifier (UUID-4), which can be used to check for solution status asynchronously. (Conductor does not support callbacks at this time.) +* ``conductor-controller`` ensures templates are well-formed and valid. Errors and remediation are made visible through ``conductor-api``. When running in debug mode, the API will also include a python traceback in the response body, if available. +* ``conductor-controller`` uses ``conductor-data`` to resolve demands against a particular **inventory provider** (e.g., A&AI). +* ``conductor-controller`` translates the template into a format suitable for solving. +* As each template is translated, ``conductor-solver`` begins working on it. +* ``conductor-solver`` uses ``conductor-data`` to resolve constraints against a particular **service controller** (e.g., SDN-C). +* ``conductor-solver`` determines the most suitable inventory to recommend. +* ``conductor-reservation`` attempts to reserve the solved solution in SDN-GC + +**NOTE**: There is no Command Line Interface or Python API Library at this time. + +## Pre-Flight and Pre-Installation Considerations + +### AT&T Application Identifiers and Roles + +* App/Tool Name: ECOMP Conductor +* MOTS Application ID: 26213 +* MechID: m04308 +* ECOMP Feature ID: F13704 +* PMT: 461306 +* UAM Role Name: Conductor Production Support +* UAM Role id: 0000025248 + +### Root + +Be aware that some commands may require ``sudo``, depending on the account being used to perform the installation. + +### Proxy + +If line-of-sight to internet-facing repositories is permitted and available, set the following shell environment variables if AT&T proxy services are required: + +```bash +$ export http_proxy="http://one.proxy.att.com:8080/" +$ export https_proxy="http://one.proxy.att.com:8080/" +``` + +### Requirements + +Conductor is officially supported on [Ubuntu 14.04 LTS (Trusty Tahr)](http://releases.ubuntu.com/14.04/), though it should also work on newer releases. + +Ensure the following Ubuntu packages are present, as they may not be included by default: + +* [libffi-dev](http://packages.ubuntu.com/trusty/libffi-dev) +* [postgresql-server-dev-9.3](http://packages.ubuntu.com/trusty/postgresql-server-dev-9.3) +* [python2.7](http://packages.ubuntu.com/trusty/python2.7) + +``conductor-api`` may be run as-is for development and test purposes. When used in a production environment, it is recommended that ``conductor-api`` run under a multithreaded httpd service supporting [WSGI](https://www.wikipedia.org/wiki/Web_Server_Gateway_Interface), tuned as appropriate. + +Configuration instructions for **apache2 httpd** and **nginx** are included herein. Respective package requirements are: + +* [apache2](http://packages.ubuntu.com/trusty/apache2) and [libapache2-mod-wsgi](http://packages.ubuntu.com/trusty/libapache2-mod-wsgi) +* [nginx](http://packages.ubuntu.com/trusty/nginx) and [uwsgi](http://packages.ubuntu.com/trusty/uwsgi) + +All Conductor services use AT&T [Music](https://github.com/att/music) for data storage/persistence and/or as a RPC transport mechanism. Consult the [Music Local Installation Guide](https://github.com/att/music/blob/master/README.md) for installation/configuration steps. + +### Networking + +All conductor services require line-of-sight access to all Music servers/ports. + +The ``conductor-api`` service uses TCP port 8091. + +### Security + +``conductor-api`` is accessed via HTTP. SSL/TLS certificates and AuthN/AuthZ (e.g., AAF) are not supported at this time. + +Conductor makes use of plugins that act as gateways to *inventory providers* and *service controllers*. At present, two plugins are supported out-of-the-box: **A&AI** and **SDN-C**, respectively. + +A&AI requires two-way SSL/TLS. Certificates must be registered and whitelisted with A&AI. SDN-C uses HTTP Basic Authentication. Consult with each respective service for official information on how to obtain access. + +### Storage + +For a cloud environment in particular, it may be desirable to use a separate block storage device (e.g., an OpenStack Cinder volume) for logs, configuration, and other data persistence. In this way, it becomes a trivial matter to replace the entire VM if necessary, followed by reinstallation of the app and any supplemental configuration. Take this into consideration when setting various Conductor config options. + +### Python Virtual Environments + +At present, Conductor installation is only supported at the (upstream) python package level and not the (downstream) Ubuntu distribution or SWM package levels. + +To mitigate/eliminate the risk of introducing conflicts with other python applications or Ubuntu/SWM package dependencies, consider installing Conductor in a [python virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs/) (or *venv* for short). + +Example venv-aware WSGI app configurations, sysvinit scripts, and upstart scripts can be found in the Conductor repository under [examples](/examples/). + +### Python Package Dependencies + +Conductor is installed using the python ``pip`` command. ``pip`` uses a python project's [requirements manifest](/requirements.txt) to install all python module dependencies. + +**NOTE**: When line-of-sight access to a PyPI-compatible package index is not available, advance installation of Conductor's python package dependencies is required *before* installation. + +### Other Production Environment Considerations + +TBD. ``:)`` + +Over time, considerations may include services such as: + +* AAF +* AppMetrics +* Introscope +* Nagios +* Splunk +* UAM + +## Installation and Configuration + +**IMPORTANT**: Perform the steps in this section after *optionally* configuring and activating a python virtual environment. + +### Installing From a PyPI Repository + +In ONAP, the ``conductor`` package can be found on ````. + +Installation is via the ``pip`` command. Here is an example ``pip.conf`` file that uses both the internet and intranet-facing PyPI repositories: + +```ini +[global] +index = https://pypi.python.org/pypi +index-url = https://pypi.python.org/simple +extra-index-url = +trusted-host = +``` + +Once the configuration is in place, installation is simple: + +```bash +$ pip install of-has +``` + +To upgrade or downgrade, simply re-run ``pip install`` using the appropriate ``pip`` command line options. + +**NOTE**: Be sure proxy settings are in place if they're required to access ``pypi.python.org``. + +### Installing From Source + +Conductor source in ONAP is maintained in https://gerrit.onap.org/r/optf/has. + +Clone the git repository, and then install from within the ``conductor`` directory: + +```bash +$ git clone https://gerrit.onap.org/r/optf/has +Cloning into 'conductor'... +remote: Counting objects: 2291, done. +remote: Compressing objects: 88% (1918/2179) +remote: Compressing objects: 100% (2179/2179), done. +remote: Total 2291 (delta 1422), reused 0 (delta 0) +Receiving objects: 100% (2291/2291), 477.59 KiB | 0 bytes/s, done. +Resolving deltas: 100% (1422/1422), done. +$ cd conductor +$ pip install . +``` + +The latest source can be pulled from ONAP at any time and reinstalled: + +```bash +$ git pull +$ pip install . +``` + +### Verifying Installation + +Each of the five Conductor services may be invoked with the ``--help`` option: + +```bash +$ conductor-api -- --help +$ conductor-controller --help +$ conductor-data --help +$ conductor-solver --help +$ conductor-reservation --help +``` + +**NOTE**: The ``conductor-api`` command is deliberate. ``--`` is used as as separator between the arguments used to start the WSGI server and the arguments passed to the WSGI application. + +## Post-Flight and Post-Installation Considerations + +### User and Group + +It's good practice to create an unprivileged account (e.g., a user/group named ``conductor``) and run all Conductor services as that user: + +```bash +$ sudo addgroup --system conductor +$ sudo adduser --system --home /var/lib/conductor --ingroup conductor --no-create-home --shell /bin/false conductor +``` + +### SSL/TLS Certificates + +The A&AI Inventory Provider Plugin requiries two-way SSL/TLS. After provisioning a certificate per A&AI guidelines, it will be necessary to securely install the certificate, key, and certificate authority bundle. + +When running conductor services as ``conductor:conductor`` (recommended), consider co-locating all of these files under the configuration directory. For example, when using ``/etc/conductor``: + +```bash +$ # Certificate files (crt extension, 644 permissions) +$ sudo mkdir /etc/conductor/ssl/certs +$ # Private Certificate Key files (key extension, 640 permissions) +$ sudo mkdir /etc/conductor/ssl/private +$ # Certificate Authority (CA) Bundles (crt extension, 644 permissions) +$ sudo mkdir /etc/conductor/ssl/ca-certificates +$ # Add files to newly created directories, then set ownership +$ sudo chmod -R conductor:conductor /etc/conductor/ssl +``` + +For a hypothetical domain name ``imacculate.client.research.att.com``, example filenames could be as follows: + +```bash +$ find ssl -type f -printf '%M %u:%g %f\n' +-rw-r----- conductor:conductor imacculate.client.research.att.com.key +-rw-r--r-- conductor:conductor Symantec_Class_3_Secure_Server_CA.crt +-rw-r--r-- conductor:conductor imacculate.client.research.att.com.crt +``` + +When running conductor services as ``root``, consider these existing Ubuntu filesystem locations for SSL/TLS files: + +**Certificate** files (``crt`` extension) are typically stored in ``/etc/ssl/certs`` with ``root:root`` ownership and 644 permissions. + +**Private Certificate Key** files (``key`` extension) are typically stored in ``/etc/ssl/private`` with ``root:root`` ownership and 640 permissions. + +**Certificate Authority (CA) Bundles** (``crt`` extension) are typically stored in ``/usr/share/ca-certificates/conductor`` with ``root:root`` ownership, and 644 permissions. These Bundle files are then symlinked within ``/etc/ssl/certs`` using equivalent filenames, a ``pem`` extension, and ``root:root`` ownership. + +**NOTE**: LUKS (Linux Unified Key Setup) is not supported by Conductor at this time. + +### Configuration + +Configuration files are located in ``etc/conductor`` relative to the python environment Conductor is installed in. + +To generate a sample configuration file, change to the directory just above where ``etc/conductor`` is located (e.g., `/` for the default environment, or the virtual environment root directory). Then: + +```bash +$ oslo-config-generator --config-file=etc/conductor/conductor-config-generator.conf +``` + +This will generate ``etc/conductor/conductor.conf.sample``. + +Because the configuration directory and files will include credentials, consider removing world permissions: + +```bash +$ find etc/conductor -type f -exec chmod 640 {} + +$ find etc/conductor -type d -exec chmod 750 {} + +``` + +The sample config may then be copied and edited. Be sure to backup any previous ``conductor.conf`` if necessary. + +```bash +$ cd etc/conductor +$ cp -p conductor.conf.sample conductor.conf +``` + +``conductor.conf`` is fully annotated with descriptions of all options. Defaults are included, with all options commented out. Conductor will use defaults even if an option is not present in the file. To change an option, simply uncomment it and edit its value. + +With the exception of the ``DEFAULT`` section, it's best to restart the Conductor services after making any config changes. In some cases, only one particular service actually needs to be restarted. When in doubt, however, it's best to restart all of them. + +A few options in particular warrant special attention: + +``` +[DEFAULT] + +# If set to true, the logging level will be set to DEBUG instead of the default +# INFO level. (boolean value) +# Note: This option can be changed without restarting. +#debug = false +``` + +For more verbose logging across all Conductor services, set ``debug`` to true. + +``` +[aai] + +# Base URL for A&AI, up to and not including the version, and without a +# trailing slash. (string value) +#server_url = https://controller:8443/aai + +# SSL/TLS certificate file in pem format. This certificate must be registered +# with the A&AI endpoint. (string value) +#certificate_file = certificate.pem + +# Private Certificate Key file in pem format. (string value) +#certificate_key_file = certificate_key.pem + +# Certificate Authority Bundle file in pem format. Must contain the appropriate +# trust chain for the Certificate file. (string value) +#certificate_authority_bundle_file = certificate_authority_bundle.pem +``` + +Set ``server_url`` to the A&AI server URL, to but not including the version, omitting any trailing slash. Conductor supports A&AI API v9 at a minimum. + +Set the ``certificate`` prefixed keys to the appropriate SSL/TLS-related files. + +**IMPORTANT**: The A&AI server may have a mismatched host/domain name and SSL/TLS certificate. In such cases, certificate verification will fail. To mitigate this, ``certificate_authority_bundle_file`` may be set to an empty value. While Conductor normally requires a CA Bundle (otherwise why bother using SSL/TLS), this requirement has been temporarily relaxed so that development and testing may continue. + +``` +[messaging_server] + +# Log debug messages. Default value is False. (boolean value) +#debug = false +``` + +When the ``DEFAULT`` section's ``debug`` option is ``true``, set this section's ``debug`` option to ``true`` to enable detailed Conductor-side RPC-over-Music debug messages. + +Be aware, it is voluminous. "You have been warned." ``:)`` + +``` +[music_api] + +# List of hostnames (round-robin access) (list value) +#hostnames = localhost + +# Log debug messages. Default value is False. (boolean value) +#debug = false +``` + +Set ``hostnames`` to match wherever the Music REST API is being hosted (wherever Apache Tomcat and ``MUSIC.war`` are located). + +When the ``DEFAULT`` section's ``debug`` option is ``true``, set this section's ``debug`` option to ``true`` to enable detailed Conductor-side MUSIC API debug messages. + +The previous comment around the volume of log lines applies even more so here. (Srsly. We're not kidding.) + +**IMPORTANT**: Conductor does not presently use Music's atomic consistency features due to concern around lock creation/acquisition. Instead, Conductor uses eventual consistency. For this reason, consistency issues may occur when using Music in a multi-server, High Availability configuration. + +``` +[sdnc] + +# Base URL for SDN-C. (string value) +#server_url = https://controller:8443/restconf + +# Basic Authentication Username (string value) +#username = <None> + +# Basic Authentication Password (string value) +#password = <None> + +``` + +Set ``server_url`` to the SDN-C server URL, omitting any trailing slash. + +Set ``username`` and ``password`` to the appropriate values as directed by SDN-C. + +### Running for the First Time + +Each Conductor component may be run interactively. In this case, the user does not necessarily matter. + +When running interactively, it is suggested to run each command in a separate terminal session and in the following order: + +```bash +conductor-data --config-file=/etc/conductor/conductor.conf +conductor-controller --config-file=/etc/conductor/conductor.conf +conductor-solver --config-file=/etc/conductor/conductor.conf +conductor-reservation --config-file=/etc/conductor/conductor.conf +conductor-api --port=8091 -- --config-file=/etc/conductor/conductor.conf +``` + +Optionally, use an application like [screen](http://packages.ubuntu.com/trusty/screen) to nest all five terminal sessions within one detachable session. (This is also the same package used by [DevStack](https://docs.openstack.org/developer/devstack/).) + +To verify that ``conductor-api`` can be reached, browse to ``http://HOST:8091/``, where HOST is the hostname ``conductor-api`` is running on. No AuthN/AuthZ is required at this time. Depending on network considerations, it may be necessary to use a command like ``wget`` instead of a desktop browser. + +The response should look similar to: + +```json +{ + "versions": { + "values": [ + { + "status": "development", + "updated": "2016-11-01T00:00:00Z", + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.ecomp.homing-v1+json" + } + ], + "id": "v1", + "links": [ + { + "href": "http://127.0.0.1:8091/v1", + "rel": "self" + }, + { + "href": "http://conductor.research.att.com/", + "type": "text/html", + "rel": "describedby" + } + ] + } + ] + } +} +``` + +### Sample API Calls and Homing Templates + +A [Postman](http://getpostman.com/) collection illustrating sample requests is available upon request. The collection will also be added in a future revision. + +[Sample homing templates](/doc/examples/README.md) are also available. + +### Ubuntu Service Scripts + +Ubuntu sysvinit (init.d) and upstart (init) scripts are typically installed at the Ubuntu package level. Since there is no such packaging at this time, example scripts have been provided in the repository. + +To install, place all Conductor [sysvinit scripts](/examples/distribution/ubuntu/init.d) in ``/etc/init.d``, and all [upstart scripts](/examples/distribution/ubuntu/init) in ``/etc/init``. + +Set file permissions: + +```bash +$ sudo chmod 644 /etc/init/conductor* +$ sudo chmod 755 /etc/init.d/conductor* +``` + +If a python virtual environment is being used, edit each ``/etc/init/conductor*`` and ``/etc/init.d/conductor*`` prefixed file so that ``PYTHON_HOME`` is set to the python virtual environment root directory. + +Next, enable the scripts: + +```bash +$ sudo update-rc.d conductor-api defaults +$ sudo update-rc.d conductor-controller defaults +$ sudo update-rc.d conductor-data defaults +$ sudo update-rc.d conductor-solver defaults +$ sudo update-rc.d conductor-reservation defaults +$ sudo initctl reload-configuration +``` + +Conductor components may now be started/stopped like any other Ubuntu service, for example: + +```bash +$ sudo service conductor-api start +$ sudo service conductor-api status +$ sudo service conductor-api restart +$ sudo service conductor-api stop +``` + +Conductor service scripts automatically create directories for ``log``, ``lock``, ``run``, ``lib``, and ``log`` files, e.g., ``/var/log/conductor`` and so on. + +### Log File Rotation + +Sample ``logrotate.d`` configuration files have been provided in the repository. + +To install, place all Conductor [logrotate files](/examples/distribution/ubuntu/logrotate.d) in ``/etc/logrotate.d``. + +Set file ownership and permissions: + +```bash +$ sudo chown root:root /etc/logrotate.d/conductor* +$ sudo chmod 644 /etc/logrotate.d/conductor* +``` + +``logrotate.d`` automatically recognizes new files at the next log rotation opportunity and does not require restarting. + +## Running conductor-api Under apache2 httpd and mod_wsgi + +Sample configuration files have been provided in the repository. + +These instructions presume a ``conductor`` user exists. See the **Service Scripts** section for details. + +First, set up a few directories: + +```bash +$ sudo mkdir -p /var/www/conductor +$ sudo mkdir /var/log/apache2/conductor +``` + +To install, place the Conductor [WSGI application file](/conductor/api/app.wsgi) in ``/var/www/conductor``. + +Set the owner/group of both directories/files to ``conductor``: + +```bash +$ sudo chown -R conductor:conductor /var/log/apache2/conductor /var/www/conductor +``` + +Next, place the Conductor [apache2 httpd site config file](/examples/apache2/conductor.conf) in ``/etc/apache2/sites-available``. + +Set the owner/group to ``root``: + +```bash +$ sudo chown -R root:root /etc/apache2/sites-available/conductor.conf +``` + +If Conductor was installed in a python virtual environment, append ``python-home=VENV`` to ``WSGIDaemonProcess``, where ``VENV`` is the python virtual environment root directory. + +**IMPORTANT**: Before proceeding, disable the ``conductor-api`` sysvinit and upstart services, as the REST API will now be handled by apache2 httpd. Otherwise there will be a port conflict, and you will be sad. + +Enable the Conductor site, ensure the configuration syntax is valid, and gracefully restart apache2 httpd. + +```bash +$ sudo a2ensite conductor +$ sudo apachectl -t +Syntax OK +$ sudo apachectl graceful +``` + +To disable the Conductor site, run ``sudo a2dissite conductor``, then gracefully restart once again. Optionally, re-enable the ``conductor-api`` sysvinit and upstart services. + +## Running conductor-api Under nginx and uWSGI + +Sample configuration files have been provided in the repository. + +These instructions presume a ``conductor`` user exists. See the **Service Scripts** section for details. + +To install, place the Conductor [nginx config files](/examples/nginx/) and [WSGI application file](/conductor/api/app.wsgi) in ``/etc/nginx`` (taking care to backup any prior configuration files). It may be desirable to incorporate Conductor's ``nginx.conf`` into the existing config. + +Rename ``app.wsgi`` to ``conductor.wsgi``: + +```bash +$ cd /etc/nginx +$ sudo mv app.wsgi conductor.wsgi +``` + +In ``nginx.conf``, set ``CONDUCTOR_API_FQDN`` to the server name. + +**IMPORTANT**: Before proceeding, disable the ``conductor-api`` sysvinit and upstart services, as the REST API will now be handled by nginx. Otherwise there will be a port conflict, and you will be sad. + +Restart nginx: + +```bash +$ sudo service nginx restart +``` + +Then, run ``conductor-api`` under nginx using uWSGI: + +```bash +$ sudo uwsgi -s /tmp/uwsgi.sock --chmod-socket=777 --wsgi-file /etc/nginx/conductor.wsgi --callable application --set port=8091 +``` + +To use a python virtual environment, add ``--venv VENV`` to the ``uwsgi`` command, where ``VENV`` is the python virtual environment root directory. + +## Uninstallation + +Activate a virtual environment (venv) first, if necessary, then uninstall with: + +```bash +$ pip uninstall ecomp-conductor +``` + +Remove any previously made configuration file changes, user accounts, Ubuntu/SWM packages, and other settings as needed. + +## Bug Reporting and Feedback + +... is encouraged. Please raise an issue at: https://jira.onap.org/projects/OPTFRA/summary diff --git a/conductor/doc/examples/README.md b/conductor/doc/examples/README.md new file mode 100644 index 0000000..84e7e6b --- /dev/null +++ b/conductor/doc/examples/README.md @@ -0,0 +1,96 @@ +# Example Conductor Templates + +*Updated: 10 Oct 2017* + +## Example 1 + +```yaml + +# Homing Specification Version +homing_template_version: 2017-10-10 + +# Runtime order Parameters +parameters: + service_name: Residential vCPE + service_id: vcpe_service_id + customer_lat: 32.897480 + customer_long: -97.040443 + +# List of geographical locations +locations: + customer_loc: + latitude: {get_param: customer_lat} + longitude: {get_param: customer_long} + +# List of VNFs (demands) to be homed +demands: + vGMuxInfra: + - inventory_provider: aai + inventory_type: service + attributes: + equipment_type: vG_Mux + customer_id: some_company + excluded_candidates: + - candidate_id: + 1ac71fb8-ad43-4e16-9459-c3f372b8236d + existing_placement: + - candidate_id: 21d5f3e8-e714-4383-8f99-cc480144505a + vG: + - inventory_provider: aai + inventory_type: service + attributes: + equipment_type: vG + modelId: vG_model_id + customer_id: some_company + excluded_candidates: + - candidate_id: 1ac71fb8-ad43-4e16-9459-c3f372b8236d + existing_placement: + - candidate_id: 21d5f3e8-e714-4383-8f99-cc480144505a + - inventory_provider: aai + inventory_type: cloud + +# List of homing policies (constraints) +constraints: + # distance constraint + - constraint_vgmux_customer: + type: distance_to_location + demands: [vGMuxInfra] + properties: + distance: <Â 100Â km + location: customer_loc + # cloud region co-location constraint + - colocation: + type: zone + demands: [vGMuxInfra, vG] + properties: + qualifier: same + category: region + # platform capability constraint + - numa_cpu_pin_capabilities: + type: attribute + demands: [vG] + properties: + evaluate: + vcpu_pinning: True + numa_topology: numa_spanning + # cloud provider constraint + - cloud_version_capabilities: + type: attribute + demands: [vGMuxInfra] + properties: + evaluate: + cloud_version: 1.11.84 + cloud_provider: AWS + +# Objective function to minimize +optimization: + minimize: + sum: + - {distance_between: [customer_loc, vGMuxInfra]} + - {distance_between: [customer_loc, vG]} + +``` + +## Contact ## + +Shankar Narayanan <shankarpnsn@gmail.com> diff --git a/conductor/doc/glossary/README.md b/conductor/doc/glossary/README.md new file mode 100644 index 0000000..5b673ac --- /dev/null +++ b/conductor/doc/glossary/README.md @@ -0,0 +1,26 @@ +# Glossary + +| Term | Description | +|------|-------------| +| **A&AI** | Active and Available Inventory | +| **Cloud** | tbd | +| **Conductor** | The AIC/ECOMP Homing service. | +| **Constraint** | tbd | +| **Cost Function** | tbd | +| **Data Center** | tbd | +| **DCAE** | Data Collection, Analytics, and Events | +| **Demand** | tbd | +| **Homing** | Canonical service name for Conductor. | +| **Host** | tbd | +| **Inventory** | tbd | +| **Inventory Source** | tbd | +| **LCP (and vLCP)** | Local Control Plane (or virtual LCP). Synonymous with **Region**. | +| **Location** | tbd | +| **Network Link** | tbd | +| **Region** | Synonymous with **LCP**. | +| **Service Inventory** | tbd | +| **Site** | tbd | + +## Contact ## + +Joe D'Andrea <jdandrea@research.att.com> diff --git a/conductor/doc/template/README.md b/conductor/doc/template/README.md new file mode 100644 index 0000000..f8afcca --- /dev/null +++ b/conductor/doc/template/README.md @@ -0,0 +1,875 @@ +###### Apache License, Version 2.0 + +=========================== + +``Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.`` + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +# Homing Specification Guide + +*Updated: 10 October 2017* + +This document describes the Homing Template format, used by the Homing service. It is a work in progress and subject to frequent revision. + +## Template Structure + +Homing templates are defined in YAML and follow the structure outlined below. + +```yaml +homing_template_version: 2017-10-10 +parameters: + PARAMETER_DICT +locations: + LOCATION_DICT +demands: + DEMAND_DICT +constraints: + CONSTRAINT_DICT +reservations: + RESERVATION_DICT +optimization: + OPTIMIZATION +``` + +* ``homing_template_version``: This key with value 2017-10-10 (or a later date) indicates that the YAML document is a Homing template of the specified version. +* ``parameters``: This section allows for specifying input parameters that have to be provided when instantiating the homing template. Typically, this section is used for providing runtime parameters (like SLA thresholds), which in turn is used in the existing homing policies. The section is optional and can be omitted when no input is required. +* ``locations``: This section contains the declaration of geographic locations. This section is optional and can be omitted when no input is required. +* ``demands``: This section contains the declaration of demands. This section with at least one demand should be defined in any Homing template, or the template would not really do anything when being instantiated. +* ``constraints``: This section contains the declaration of constraints. The section is optional and can be omitted when no input is required. +* ``reservations``: This section contains the declaration of required reservations. This section is optional and can be omitted when reservations are not required. +* ``optimization``: This section allows the declaration of an optimization. This section is optional and can be omitted when no input is required. + +## Homing Template Version + +The value of ``homing_template_version`` tells HAS not only the format of the template but also features that will be validated and supported. Only one value is supported: ``2017-10-10`` in the initial release of HAS. + +```yaml +homing_template_version: 2017-10-10 +``` + +## Parameters + +The **parameters** section allows for specifying input parameters that have to be provided when instantiating the template. Such parameters are typically used for providing runtime inputs (like SLA thresholds), which in turn is used in the existing homing policies. This also helps build reusable homing constraints where these parameters can be embedded design time, and it corresponding values can be supplied during runtime. + +Each parameter is specified with the name followed by its value. Values can be strings, lists, or dictionaries. + +### Example + +In this example, ``provider_name`` is a string and ``service_info`` is a dictionary containing both a string and a list (keyed by ``base_url`` and ``nod_config``, respectively). + +```yaml +parameters: + provider_name: multicloud + service_info: + base_url: http://serviceprovider.sdngc.com/ + nod_config: + - http://nod/config_a.yaml + - http://nod/config_b.yaml + - http://nod/config_c.yaml + - http://nod/config_d.yaml +``` + +A parameter can be referenced in place of any value. See the **Intrinsic Functions** section for more details. + +## Locations + +One or more **locations** may be declared. A location may be referenced by one or more ``constraints``. Locations may be defined in any of the following ways: + +### Coordinate + +A geographic coordinate expressed as a latitude and longitude. + +| Key | Value | +|-----------------------------|----------------------------| +| ``latitude`` | Latitude of the location. | +| ``longitude`` | Longitude of the location. | + +### Host Name + +An opaque host name that can be translated to a coordinate via an inventory provider (e.g., A&AI). + +| Key | Value | +|-----------------------------|----------------------------| +| ``host_name`` | Host name identifying a location. | + +### CLLI + +Common Language Location Identification (CLLI) code(https://en.wikipedia.org/wiki/CLLI_code). + +| Key | Value | +|-----------------------------|----------------------------| +| ``clli_code`` | 8 character CLLI. | + +**Questions** + +* Do we need functions that can convert one of these to the other? E.g., CLLI Codes to a latitude/longitude + +### Placemark + +An address expressed in geographic region-agnostic terms (referred to as a *placemark*). + +*Support for this schema is deferred.* + +| Key | Value | +|-----------------------------|----------------------------| +| ``iso_country_code`` | The abbreviated country name associated with the placemark. | +| ``postal_code`` | The postal code associated with the placemark. | +| ``administrative_area`` | The state or province associated with the placemark. | +| ``sub_administrative_area`` | Additional administrative area information for the placemark. | +| ``locality`` | The city associated with the placemark. | +| ``sub_locality`` | Additional city-level information for the placemark. | +| ``thoroughfare`` | The street address associated with the placemark. | +| ``sub_thoroughfare`` | Additional street-level information for the placemark. | + +**Questions** + +* What geocoder can we use to convert placemarks to a latitude/longitude? + +### Examples + +The following examples illustrate a location expressed in coordinate, host_name, CLLI, and placemark, respectively. + +```yaml +locations: + location_using_coordinates: + latitude: 32.897480 + longitude: -97.040443 + + host_location_using_host_name: + host_name: USESTCDLLSTX55ANZ123 + + location_using_clli: + clli_code: DLLSTX55 + + location_using_placemark: + sub_thoroughfare: 1 + thoroughfare: ATT Way + locality: Bedminster + administrative_area: NJ + postal_code: 07921-2694 +``` + +## Demands + +A **demand** can be satisfied by using candidates drawn from inventories. Each demand is uniquely named. Inventory is considered to be opaque and can represent anything from which candidates can be drawn. + +A demand's resource requirements are determined by asking an **inventory provider** for one or more sets of **inventory candidates** against which the demand will be made. An explicit set of candidates may also be declared, for example, if the only candidates for a demand are predetermined. + +Demand criteria is dependent upon the inventory provider in use. + +**Provider-agnostic Schema** + +| Key | Value | +|------------------------|--------------------------| +| ``inventory_provider`` | A HAS-supported inventory provider. | +| ``inventory_type`` | The reserved word ``cloud`` (for cloud regions) or the reserved word ``service`` (for existing service instances). Exactly one inventory type may be specified. | +| ``attributes`` (Optional) | A list of key-value pairs, that is used to select inventory candidates that match *all* the specified attributes. The key should be a uniquely identifiable attribute at the inventory provider. | +| ``service_type`` (Optional) | If ``inventory_type`` is ``service``, a list of one or more provider-defined service types. If only one service type is specified, it may appear without list markers (``[]``). | +| ``service_id`` (Optional) | If ``inventory_type`` is ``service``, a list of one or more provider-defined service ids. If only one service id is specified, it may appear without list markers (``[]``). | +| ``default_cost`` (Optional) | The default cost of an inventory candidate, expressed as currency. This must be specified if the inventory provider may not always return a cost. | +| ``required_candidates`` (Optional) | A list of one or more candidates from which a solution will be explored. Must be a valid candidate as described in the **candidate schema**. | +| ``excluded_candidates`` (Optional) | A list of one or more candidates that should be excluded from the search space. Must be a valid candidate as described in the **candidate schema**. | +| ``existing_placement`` (Optional) | The current placement for the demand. Must be a valid candidate as described in the **candidate schema**. | + + +### Examples + +The following example helps understand a demand specification using Active & Available Inventory (A&AI), the inventory provider-of-record for ONAP. + +**Inventory Provider Criteria** + +| Key | Value | +|------------------------|--------------------------| +| ``inventory_provider`` | Examples: ``aai``, ``multicloud``. | +| ``inventory_type`` | The reserved word ``cloud`` (for new inventory) or the reserved word ``service`` (for existing inventory). Exactly one inventory type may be specified. | +| ``attributes`` (Optional) | A list of key-value pairs to match against inventory when drawing candidates. | +| ``service_type`` (Optional) | Examples may include ``vG``, ``vG_MuxInfra``, etc. | +| ``service_id`` (Optional) | Must be a valid service id. Examples may include ``vCPE``, ``VoLTE``, etc. | +| ``default_cost`` (Optional) | The default cost of an inventory candidate, expressed as a unitless number. | +| ``required_candidates`` (Optional) | A list of one or more valid candidates. See **Candidate Schema** for details. | +| ``excluded_candidates`` (Optional) | A list of one or more valid candidates. See **Candidate Schema** for details. | +| ``existing_placement`` (Optional) | A single valid candidate, representing the current placement for the demand. See **candidate schema** for details. | + +**Candidate Schema** + +The following is the schema for a valid ``candidate``: +* ``candidate_id`` uniquely identifies a candidate. Currently, it is either a Service Instance ID or Cloud Region ID. +* ``candidate_type`` identifies the type of the candidate. Currently, it is either ``cloud`` or ``service``. +* ``inventory_type`` is defined as described in **Inventory Provider Criteria** (above). +* ``inventory_provider`` identifies the inventory from which the candidate was drawn. +* ``host_id`` is an ID of a specific host (used only when referring to service/existing inventory). +* ``cost`` is expressed as a unitless number. +* ``location_id`` is always a location ID of the specified location type (e.g., for a type of ``cloud`` this will be an Cloud Region ID). +* ``location_type`` is an inventory provider supported location type. +* ``latitude`` is a valid latitude corresponding to the *location_id*. +* ``longitude`` is a valid longitude corresponding to the *location_id*. +* ``city`` (Optional) city corresponding to the *location_id*. +* ``state`` (Optional) state corresponding to the *location_id*. +* ``country`` (Optional) country corresponding to the *location_id*. +* ``region`` (Optional) geographic region corresponding to the *location_id*. +* ``complex_name`` (Optional) Name of the complex corresponding to the *location_id*. +* ``cloud_owner`` (Optional) refers to the *cloud owner* (e.g., ``azure``, ``aws``, ``att``, etc.). +* ``cloud_region_version`` (Optional) is an inventory provider supported version of the cloud region. +* ``physical_location_id`` (Optional) is an inventory provider supported CLLI code corresponding to the cloud region. + +**Examples** + +```json +{ + "candidate_id": "1ac71fb8-ad43-4e16-9459-c3f372b8236d", + "candidate_type": "service", + "inventory_type": "service", + "inventory_provider": "aai", + "host_id": "vnf_123456", + "cost": "100", + "location_id": "DLLSTX9A", + "location_type": "azure", + "latitude": "32.897480", + "longitude": "-97.040443", + "city": "Dallas", + "state": "TX", + "country": "USA", + "region": "US", + "complex_name": "dalls_one", + "cloud_owner": "att-aic", + "cloud_region_version": "1.1", + "physical_location_id": "DLLSTX9A", +} +``` + +**Questions** +* Currently, candidates are either service instances or cloud regions. As new services are on-boarded, this can be evolved to represent different types of resources. + +**Examples** + +The following examples illustrate two demands: + +* ``vGMuxInfra``: A vGMuxInfra service, drawing candidates of type *service* from the inventory. Only candidates that match the customer_id and orchestration-status will be included in the search space. +* ``vG``: A vG, drawing candidates of type *service* and *cloud* from the inventory. Only candidates that match the customer_id and provisioning-status will be included in the search space. + + +```yaml +demands: + vGMuxInfra: + - inventory_provider: aai + inventory_type: service + attributes: + equipment_type: vG_Mux + customer_id: some_company + orchestration-status: Activated + model-id: 174e371e-f514-4913-a93d-ed7e7f8fbdca + model-version: 2.0 + vG: + - inventory_provider: aai + inventory_type: service + attributes: + equipment_type: vG + customer_id: some_company + provisioning-status: provisioned + - inventory_provider: aai + inventory_type: cloud +``` + +**Questions** +* Do we need to support cost as a function ? + +## Constraints + +A **Constraint** is used to *eliminate* inventory candidates from one or more demands that do not meet the requirements specified by the constraint. Since reusability is one of the cornerstones of HAS, Constraints are designed to be service-agnostic, and is parameterized such that it can be reused across a wide range of services. Further, HAS is designed with a plug-in architecture that facilitates easy addition of new constraint types. + +Constraints are denoted by a ``constraints`` key. Each constraint is uniquely named and set to a dictionary containing a constraint type, a list of demands to apply the constraint to, and a dictionary of constraint properties. + +**Considerations while using multiple constraints** +* Constraints should be treated as a unordered list, and no assumptions should be made as regards to the order in which the constraints are evaluated for any given demand. +* All constraints are effectively AND-ed together. Constructs such as "Constraint X OR Y" are unsupported. +* Constraints are reducing in nature, and does not increase the available candidates at any point during the constraint evaluations. + + +**Schema** + +| Key | Value | +|---------------------|-------------| +| ``CONSTRAINT_NAME`` | Key is a unique name. | +| ``type`` | The type of constraint. See **Constraint Types** for a list of currently supported values. | +| ``demands`` | One or more previously declared demands. If only one demand is specified, it may appear without list markers (``[]``). | +| ``properties`` (Optional) | Properties particular to the specified constraint type. Use if required by the constraint. | + +```yaml +constraints: + CONSTRAINT_NAME_1: + type: CONSTRAINT_TYPE + demands: DEMAND_NAME | [DEMAND_NAME_1, DEMAND_NAME_2, ...] + properties: PROPERTY_DICT + + CONSTRAINT_NAME_2: + type: CONSTRAINT_TYPE + demands: DEMAND_NAME | [DEMAND_NAME_1, DEMAND_NAME_2, ...] + properties: PROPERTY_DICT + + ... +``` + +#### Constraint Types + +| Type | Description | +|---------------------|-------------| +| ``attribute`` | Constraint that matches the specified list of Attributes. | +| ``distance_between_demands`` | Geographic distance constraint between each pair of a list of demands. | +| ``distance_to_location`` | Geographic distance constraint between each of a list of demands and a specific location. | +| ``instance_fit`` | Constraint that ensures available capacity in an existing service instance for an incoming demand. | +| ``inventory_group`` | Constraint that enforces two or more demands are satisfied using candidates from a pre-established group in the inventory. | +| ``region_fit`` | Constraint that ensures available capacity in an existing cloud region for an incoming demand. | +| ``zone`` | Constraint that enforces co-location/diversity at the granularities of clouds/regions/availability-zones. | +| ``license`` (Deferred) | License availability constraint. | +| ``network_between_demands`` (Deferred) | Network constraint between each pair of a list of demands. | +| ``network_to_location`` (Deferred) | Network constraint between each of a list of demands and a specific location/address. | + +*Note: Constraint names marked "Deferred" **will not** be supported in the initial release of HAS.* + +#### Threshold Values + +Constraint property values representing a threshold may be an integer or floating point number, optionally prefixed with a comparison operator: ``=``, ``<``, ``>``, ``<=``, or ``>=``. The default is ``=`` and optionally suffixed with a unit. + +Whitespace may appear between the comparison operator and value, and between the value and units. When a range values is specified (e.g., ``10-20 km``), the comparison operator is omitted. + +Each property is documented with a default unit. The following units are supported: + +| Unit | Values | Default | +|------------|------------------------------|----------| +| Currency | ``USD`` | ``USD`` | +| Time | ``ms``, ``sec`` | ``ms`` | +| Distance | ``km``, ``mi`` | ``km`` | +| Throughput | ``Kbps``, ``Mbps``, ``Gbps`` | ``Mbps`` | + +### Attribute + +Constrain one or more demands by one or more attributes, expressed as properties. Attributes are mapped to the **inventory provider** specified properties, referenced by the demands. For example, properties could be hardware capabilities provided by the platform (flavor, CPU-Pinning, NUMA), features supported by the services, etc. + +**Schema** + +| Property | Value | +|--------------|-------------------------------------------------------------| +| ``evaluate`` | Opaque dictionary of attribute name and value pairs. Values must be strings or numbers. Encoded and sent to the service provider via a plugin. | + +*Note: Attribute values are not detected/parsed as thresholds by the Homing framework. Such interpretations and evaluations are inventory provider-specific and delegated to the corresponding plugin* + +```yaml +constraints: + sriov_nj: + type: attribute + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + evaluate: + cloud_version: 1.1 + flavor: SRIOV + subdivision: US-TX + vcpu_pinning: True + numa_topology: numa_spanning +``` + +#### Proposal: Evaluation Operators + +To assist in evaluating attributes, the following operators and notation are proposed: + +| Operator | Name | Operand | +|--------------|-----------|------------------------------------------------| +| ``eq`` | ``==`` | Any object (string, number, list, dict) | +| ``ne`` | ``!=`` | | +| ``lt`` | ``<`` | A number (strings are converted to float) | +| ``gt`` | ``>`` | | +| ``lte`` | ``<=`` | | +| ``gte`` | ``>=`` | | +| ``any`` | ``Any`` | A list of objects (string, number, list, dict) | +| ``all`` | ``All`` | | +| ``regex`` | ``RegEx`` | A regular expression pattern | + +Example usage: + +```yaml +constraints: + sriov_nj: + type: attribute + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + evaluate: + cloud_version: {gt: 1.0} + flavor: {regex: /^SRIOV$/i} + subdivision: {any: [US-TX, US-NY, US-CA]} +``` + +### Distance Between Demands + +Constrain each pairwise combination of two or more demands by distance requirements. + +**Schema** + +| Name | Value | +|--------------|-------------------------------------------------------------| +| ``distance`` | Distance between demands, measured by the geographic path. | + +The constraint is applied between each pairwise combination of demands. For this reason, at least two demands must be specified, implicitly or explicitly. + +```yaml +constraints: + distance_vnf1_vnf2: + type: distance_between_demands + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + distance: < 250 km +``` + +### Distance To Location + +Constrain one or more demands by distance requirements relative to a specific location. + +**Schema** + +| Property | Value | +|--------------|------------------------------------------------------------| +| ``distance`` | Distance between demands, measured by the geographic path. | +| ``location`` | A previously declared location. | + +The constraint is applied between each demand and the referenced location, not across all pairwise combinations of Demands. + +```yaml +constraints: + distance_vnf1_loc: + type: distance_to_location + demands: [my_vnf_demand, my_other_vnf_demand, another_vnf_demand] + properties: + distance: < 250 km + location: LOCATION_ID +``` + +### Instance Fit + +Constrain each demand by its service requirements. + +Requirements are sent as a request to a **service controller**. Service controllers are defined by plugins in Homing (e.g., ``sdn-c``). + +A service controller plugin knows how to communicate with a particular endpoint (via HTTP/REST, DMaaP, etc.), obtain necessary information, and make a decision. The endpoint and credentials can be configured through plugin settings. + +**Schema** + +| Property | Description | +|----------------|-----------------------------------| +| ``controller`` | Name of a service controller. | +| ``request`` | Opaque dictionary of key/value pairs. Values must be strings or numbers. Encoded and sent to the service provider via a plugin. | + +```yaml +constraints: + check_for_availability: + type: instance_fit + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + controller: sdn-c + request: REQUEST_DICT +``` + +### Region Fit + +Constrain each demand's inventory candidates based on inventory provider membership. + +Requirements are sent as a request to a **service controller**. Service controllers are defined by plugins in Homing (e.g., ``sdn-c``). + +A service controller plugin knows how to communicate with a particular endpoint (via HTTP/REST, DMaaP, etc.), obtain necessary information, and make a decision. The endpoint and credentials can be configured through plugin settings. + +**Schema** + +| Property | Description | +|----------------|-----------------------------------| +| ``controller`` | Name of a service controller. | +| ``request`` | Opaque dictionary of key/value pairs. Values must be strings or numbers. Encoded and sent to the service provider via a plugin. | + +```yaml +constraints: + check_for_membership: + type: region_fit + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + controller: sdn-c + request: REQUEST_DICT +``` +### Zone + +Constrain two or more demands such that each is located in the same or different zone category. + +Zone categories are inventory provider-defined, based on the demands being constrained. + +**Schema** + +| Property | Value | +|---------------|-------------------------------------------------------------| +| ``qualifier`` | Zone qualifier. One of ``same`` or ``different``. | +| ``category`` | Zone category. One of ``disaster``, ``region``, ``complex``, ``time``, or ``maintenance``. | + +For example, to place two demands in different disaster zones: + +```yaml +constraints: + vnf_diversity: + type: zone + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + qualifier: different + category: disaster +``` + +Or, to place two demands in the same region: + +```yaml +constraints: + vnf_affinity: + type: zone + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + qualifier: same + category: region +``` + +**Notes** + +* These categories could be any of the following: ``disaster_zone``, ``region``, ``complex``, ``time_zone``, and ``maintenance_zone``. Really, we are talking affinity/anti-affinity at the level of DCs, but these terms may cause confusion with affinity/anti-affinity in OpenStack. + +### Inventory Group + +Constrain demands such that inventory items are grouped across two demands. + +This constraint has no properties. + +```yaml +constraints: + my_group: + type: inventory_group + demands: [demand_1, demand_2] +``` + +*Note: Only pair-wise groups are supported at this time. If three or more demands are specified, only the first two will be used.* + +### License + +Constrain demands according to license availability. + +*Support for this constraint is deferred.* + +**Schema** + +| Property | Value | +|----------|----------------------------------------------------------| +| ``id`` | Unique license identifier | +| ``key`` | Opaque license key, particular to the license identifier | + +```yaml +constraints: + my_software: + type: license + demands: [demand_1, demand_2, ...] + properties: + id: SOFTWARE_ID + key: LICENSE_KEY +``` + +### Network Between Demands + +Constrain each pairwise combination of two or more demands by network requirements. + +*Support for this constraint is deferred.* + +**Schema** + +| Property | Value | +|--------------------------|-----------------------------------------------------------------| +| ``bandwidth`` (Optional) | Desired network bandwidth. | +| ``distance`` (Optional) | Desired distance between demands, measured by the network path. | +| ``latency`` (Optional) | Desired network latency. | + +Any combination of ``bandwidth``, ``distance``, or ``latency`` must be specified. If none of these properties are used, it is treated as a malformed request. + +The constraint is applied between each pairwise combination of demands. For this reason, at least two demands must be specified, implicitly or explicitly. + +```yaml +constraints: + network_requirements: + type: network_between_demands + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + bandwidth: >= 1000 Mbps + distance: < 250 km + latency: < 50 ms +``` + +### Network To Location + +Constrain one or more demands by network requirements relative to a specific location. + +*Support for this constraint is deferred.* + +**Schema** + +| Property | Value | +|---------------|-----------------------------------------------------------------| +| ``bandwidth`` | Desired network bandwidth. | +| ``distance`` | Desired distance between demands, measured by the network path. | +| ``latency`` | Desired network latency. | +| ``location`` | A previously declared location. | + +Any combination of ``bandwidth``, ``distance``, or ``latency`` must be specified. If none of these properties are used, it is treated as a malformed request. + +The constraint is applied between each demand and the referenced location, not across all pairwise combinations of Demands. + +```yaml +constraints: + my_access_network_constraint: + type: network_to_location + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + bandwidth: >= 1000 Mbps + distance: < 250 km + latency: < 50 ms + location: LOCATION_ID +``` +### Capabilities + +Constrain each demand by its cluster capability requirements. For example, as described by an OpenStack Heat template and operational environment. + +*Support for this constraint is deferred.* + +**Schema** + +| Property | Value | +|--------------|-------------------------------------------------------------| +| ``specification`` | Indicates the kind of specification being provided in the properties. Must be ``heat``. Future values may include ``tosca``, ``Homing``, etc. | +| ``template`` | For specifications of type ``heat``, a single stack in OpenStack Heat Orchestration Template (HOT) format. Stacks may be expressed as a URI reference or a string of well-formed YAML/JSON. Templates are validated by the Heat service configured for use by HAS. Nested stack references are unsupported. | +| ``environment`` (Optional) | For specifications of type ``heat``, an optional Heat environment. Environments may be expressed as a URI reference or a string of well-formed YAML/JSON. Environments are validated by the Heat service configured for use by Homing. | + +```yaml +constraints: + check_for_fit: + type: capability + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + specification: heat + template: http://repository/my/stack_template + environment: http://repository/my/stack_environment +``` + + +## Reservations + +A **Reservation** allows reservation of resources associated with candidate that satisfies one or more demands. + +Similar to the *instance_fit* constraint, requirements are sent as a request to a **service controller** that handles the reservation. Service controllers are defined by plugins in Homing (e.g., ``sdn-c``). + +The service controller plugin knows how to make a reservation (and initiate rollback on a failure) with a particular endpoint (via HTTP/REST, DMaaP, etc.) of the service controller. The endpoint and credentials can be configured through plugin settings. + +**Schema** + +| Property | Description | +|----------------|-----------------------------------| +| ``controller`` | Name of a service controller. | +| ``request`` | Opaque dictionary of key/value pairs. Values must be strings or numbers. Encoded and sent to the service provider via a plugin. | + + +```yaml +resource_reservation: + type: instance_reservation + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + controller: sdn-c + request: REQUEST_DICT +``` + +## Optimizations + +An **Optimization** allows specification of a objective function, which aims to maximize or minimize a certain value that varies based on the choice of candidates for one or more demands that are a part of the objective function. For example, an objective function may be to find the *closest* cloud-region to a customer to home a demand. + +### Optimization Components + +Optimization definitions can be broken down into three components: + +| Component | Key | Value | +|-----------|----------------------|---------------------------------------------------------| +| Goal | ``minimize`` | A single Operand (usually ``sum``) or Function | +| Operator | ``sum``, ``product`` | Two or more Operands (Numbers, Operators, Functions) | +| Function | ``distance_between`` | A two-element list consisting of a location and demand. | + + +### Example + +Given a customer location ``cl``, two demands ``vG1`` and ``vG2``, and weights ``w1`` and ``w2``, the optimization criteria can be expressed as: + +``minimize(weight1 * distance_between(cl, vG1) + weight2 * distance_between(cl, vG2))`` + +This can be read as: "Minimize the sum of weighted distances from cl to vG1 and from cl to vG2." + +Such optimizations may be expressed in a template as follows: + +```yaml +parameters: + w1: 10 + w2: 20 + +optimization: + minimize: + sum: + - product: + - {get_param: w1} + - {distance_between: [cl, vG1]} + - product: + - {get_param: w2} + - {distance_between: [cl, vG2]} +``` + +Or without the weights as: + +```yaml +optimization: + minimize: + sum: + - {distance_between: [cl, vG1]} + - {distance_between: [cl, vG2]} +``` + +**Template Restriction** + +While the template format supports any number of arrangements of numbers, operators, and functions, HAS's solver presently expects a very specific arrangement. + +Until further notice: + +* Optimizations must conform to a single goal of ``minimize`` followed by a ``sum`` operator. +* The sum can consist of two ``distance_between`` function calls, or two ``product`` operators. +* If a ``product`` operator is present, it must contain at least a ``distance_between`` function call, plus one optional number to be used for weighting. +* Numbers may be referenced via ``get_param``. +* The objective function has to be written in the sum-of-product format. In the future, HAS can convert product-of-sum into sum-of-product automatically. + +The first two examples in this section illustrate both of these use cases. + +**Inline Operations** + +If desired, operations can be rewritten inline. For example, the two ``product`` operations from the previous example can also be expressed as: + +```yaml +parameters: + w1: 10 + w2: 20 + +optimization: + minimize: + sum: + - {product: [{get_param: w1}, {distance_between: [cl, vG1]}]} + - {product: [{get_param: w2}, {distance_between: [cl, vG2]}]} +``` + +In turn, even the ``sum`` operation can be rewritten inline, however there is a point of diminishing returns in terms of readability! + +**Notes** + +* In the first version, we do not support more than one dimension in the optimization (e.g., Minimize distance and cost). For supporting multiple dimensions we would need a function the normalize the unit across dimensions. + +## Intrinsic Functions + +Homing provides a set of intrinsic functions that can be used inside templates to perform specific tasks. The following section describes the role and syntax of the intrinsic functions. + +Functions are written as a dictionary with one key/value pair. The key is the function name. The value is a list of arguments. If only one argument is provided, a string may be used instead. + +```yaml +a_property: {FUNCTION_NAME: [ARGUMENT_LIST]} + +a_property: {FUNCTION_NAME: ARGUMENT_STRING} +``` + +*Note: These functions can only be used within "properties" sections.* + +### get_file + +The ``get_file`` function inserts the content of a file into the template. It is generally used as a file inclusion mechanism for files containing templates from other services (e.g., Heat). + +The syntax of the ``get_file`` function is: + +```yaml +{get_file: <content key>} +``` + +The ``content`` key is used to look up the ``files`` dictionary that is provided in the REST API call. The Homing client command (``Homing``) is ``get_file`` aware and populates the ``files`` dictionary with the actual content of fetched paths and URLs. The Homing client command supports relative paths and transforms these to the absolute URLs required by the Homing API. + +**Note**: The ``get_file`` argument must be a static path or URL and not rely on intrinsic functions like ``get_param``. The Homing client does not process intrinsic functions. They are only processed by the Homing engine. + +The example below demonstrates the ``get_file`` function usage with both relative and absolute URLs: + +```yaml +constraints: + check_for_fit: + type: capacity + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + template: {get_file: stack_template.yaml} + environment: {get_file: http://hostname/environment.yaml} +``` + +The ``files`` dictionary generated by the Homing client during instantiation of the plan would contain the following keys. Each value would be of that file's contents. + +* ``file:///path/to/stack_template.yaml`` +* ``http://hostname/environment.yaml`` + +**Questions** + +* If Homing will only be accessed over DMaaP, files will need to be embedded using the Homing API request format. + +### get_param + +The ``get_param`` function references an input parameter of a template. It resolves to the value provided for this input parameter at runtime. + +The syntax of the ``get_param`` function is: + +```yaml +{get_param: <parameter name>} + +{get_param: [<parameter name>, <key/index1> (optional), <key/index2> (optional), ...]} +``` + +**parameter name** is the parameter name to be resolved. If the parameters returns a complex data structure such as a list or a dict, then subsequent keys or indices can be specified. These additional parameters are used to navigate the data structure to return the desired value. Indices are zero-based. + +The following example demonstrates how the ``get_param`` function is used: + +```yaml +parameters: + software_id: SOFTWARE_ID + license_key: LICENSE_KEY + service_info: + provider: dmaap:///full.topic.name + costs: [10, 20, 30, 40, 50, 60, 70, 80, 90, 100] + +constraints: + my_software: + type: license + demands: [demand_1, demand_2, ...] + properties: + id: {get_param: software_id} + key: {get_param: license_key} + + check_for_availability: + type: service + demands: [my_vnf_demand, my_other_vnf_demand] + properties: + provider_url: {get_param: [service_info, provider]} + request: REQUEST_DICT + cost: {get_param: [service_info, costs, 4]} +``` + +In this example, properties would be set as follows: + +| Key | Value | +|------------------|--------------------------| +| ``id`` | SOFTWARE_ID | +| ``key`` | LICENSE_KEY | +| ``provider_url`` | dmaap:///full.topic.name | +| ``cost`` | 50 | + +## Contact ## + +Shankar Narayanan <shankarpnsn@gmail.com> diff --git a/conductor/etc/conductor/api_paste.ini b/conductor/etc/conductor/api_paste.ini new file mode 100644 index 0000000..9d6b32a --- /dev/null +++ b/conductor/etc/conductor/api_paste.ini @@ -0,0 +1,26 @@ +# Conductor API WSGI Pipeline +# Define the filters that make up the pipeline for processing WSGI requests +# Note: This pipeline is PasteDeploy's term rather than Conductor's pipeline +# used for processing samples + +# Remove authtoken from the pipeline if you don't want to use keystone authentication +[pipeline:main] +pipeline = cors http_proxy_to_wsgi api-server +#pipeline = cors http_proxy_to_wsgi request_id authtoken api-server + +[app:api-server] +paste.app_factory = conductor.api.app:app_factory + +#[filter:authtoken] +#paste.filter_factory = keystonemiddleware.auth_token:filter_factory + +#[filter:request_id] +#paste.filter_factory = oslo_middleware:RequestId.factory + +[filter:cors] +paste.filter_factory = oslo_middleware.cors:filter_factory +oslo_config_project = conductor + +[filter:http_proxy_to_wsgi] +paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory +oslo_config_project = conductor
\ No newline at end of file diff --git a/conductor/etc/conductor/conductor-config-generator.conf b/conductor/etc/conductor/conductor-config-generator.conf new file mode 100644 index 0000000..fa694d2 --- /dev/null +++ b/conductor/etc/conductor/conductor-config-generator.conf @@ -0,0 +1,7 @@ +[DEFAULT] +output_file = etc/conductor/conductor.conf.sample +wrap_width = 79 +namespace = conductor +namespace = oslo.log +# namespace = oslo.messaging +# namespace = oslo.middleware.cors
\ No newline at end of file diff --git a/conductor/etc/conductor/log.conf b/conductor/etc/conductor/log.conf new file mode 100644 index 0000000..070da15 --- /dev/null +++ b/conductor/etc/conductor/log.conf @@ -0,0 +1,49 @@ +[loggers] +keys=root + +[handlers] +keys=trfhand,infohand,errhand,debughand,warninghand + +[logger_root] +level=NOTSET +handlers=trfhand,infohand,errhand,debughand,warninghand + + +[handler_trfhand] +class=FileHandler +formatter=generic +level=NOTSET +args=('/home/larry/Desktop/log/application.log',) + +[handler_errhand] +class=FileHandler +level=ERROR +formatter=generic +args=('/home/larry/Desktop/log/error.log', ) + +[handler_infohand] +class=FileHandler +level=INFO +formatter=generic +args=('/home/larry/Desktop/log/info.log',) + +[handler_debughand] +class=FileHandler +level=DEBUG +formatter=generic +args=('/home/larry/Desktop/log/debug.log',) + +[handler_warninghand] +class=FileHandler +level=WARNING +formatter=generic +args=('/home/larry/Desktop/log/warning.log',) + + +[formatters] +keys=generic + +[formatter_generic] +class=logging.Formatter +format=%(asctime)s %(levelname)s %(name)s: [-] %(message)s +datefmt=
\ No newline at end of file diff --git a/conductor/etc/conductor/policy.json b/conductor/etc/conductor/policy.json new file mode 100644 index 0000000..95e5e19 --- /dev/null +++ b/conductor/etc/conductor/policy.json @@ -0,0 +1,18 @@ +{ + "context_is_admin": "role:admin", + "segregation": "rule:context_is_admin", + + "homing:get_samples": "", + "homing:get_sample": "", + "homing:query_sample": "", + "homing:create_samples": "", + + "homing:compute_statistics": "", + "homing:get_meters": "", + + "homing:get_resource": "", + "homing:get_resources": "", + + "homing:events:index": "", + "homing:events:show": "" +}
\ No newline at end of file diff --git a/conductor/etc/conductor/rootwrap.conf b/conductor/etc/conductor/rootwrap.conf new file mode 100644 index 0000000..75275ca --- /dev/null +++ b/conductor/etc/conductor/rootwrap.conf @@ -0,0 +1,27 @@ +# Configuration for conductor-rootwrap +# This file should be owned by (and only-writeable by) the root user + +[DEFAULT] +# List of directories to load filter definitions from (separated by ','). +# These directories MUST all be only writeable by root ! +filters_path=/etc/conductor/rootwrap.d,/usr/share/conductor/rootwrap + +# List of directories to search executables in, in case filters do not +# explicitely specify a full path (separated by ',') +# If not specified, defaults to system PATH environment variable. +# These directories MUST all be only writeable by root ! +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin + +# Enable logging to syslog +# Default value is False +use_syslog=False + +# Which syslog facility to use. +# Valid values include auth, authpriv, syslog, user0, user1... +# Default value is 'syslog' +syslog_log_facility=syslog + +# Which messages to log. +# INFO means log all usage +# ERROR means only log unsuccessful attempts +syslog_log_level=ERROR
\ No newline at end of file diff --git a/conductor/etc/conductor/rootwrap.d/README.txt b/conductor/etc/conductor/rootwrap.d/README.txt new file mode 100644 index 0000000..a3808e1 --- /dev/null +++ b/conductor/etc/conductor/rootwrap.d/README.txt @@ -0,0 +1 @@ +This space reserved for future rootwrap command filters.
\ No newline at end of file diff --git a/conductor/examples/apache2/conductor.conf b/conductor/examples/apache2/conductor.conf new file mode 100644 index 0000000..c882555 --- /dev/null +++ b/conductor/examples/apache2/conductor.conf @@ -0,0 +1,25 @@ +# This is an example Apache2 configuration file for using the +# conductor API through mod_wsgi. + +# Note: If you are using a Debian-based system then the paths +# "/var/log/httpd" and "/var/run/httpd" will use "apache2" instead +# of "httpd". +# +# The number of processes and threads is an example only and should +# be adjusted according to local requirements. + +Listen 8091 + +<VirtualHost *:8091> + WSGIDaemonProcess conductor-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP} + WSGIProcessGroup conductor-api + WSGIScriptAlias / /var/www/conductor/app + WSGIApplicationGroup %{GLOBAL} + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/httpd/conductor_error.log + CustomLog /var/log/httpd/conductor_access.log combined +</VirtualHost> + +WSGISocketPrefix /var/run/httpd
\ No newline at end of file diff --git a/conductor/examples/distribution/ubuntu/init.d/conductor-api b/conductor/examples/distribution/ubuntu/init.d/conductor-api new file mode 100644 index 0000000..e67a9dc --- /dev/null +++ b/conductor/examples/distribution/ubuntu/init.d/conductor-api @@ -0,0 +1,149 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: conductor-api +# Required-Start: $network $local_fs $remote_fs $syslog +# Required-Stop: $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Conductor API +# Description: Conductor API server +### END INIT INFO + +# Author: Joe D'Andrea <jdandrea@research.att.com> +# Based on work by Thomas Goirand <zigo@debian.or> + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Conductor API" +PROJECT_NAME=conductor +NAME=${PROJECT_NAME}-api +PYTHON_HOME= +PORT=8091 + +#!/bin/sh +# The content after this line comes from openstack-pkg-tools +# and has been automatically added to a .init.in script, which +# contains only the descriptive part for the daemon. Everything +# else is standardized as a single unique script. + +# Author: Joe D'Andrea <jdandrea@research.att.com> +# Based on work by Thomas Goirand <zigo@debian.or> + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/sbin:/usr/sbin:/bin:/usr/bin + +if [ -z "${DAEMON}" ] ; then + if [ -d "${PYTHON_HOME}" ] ; then + DAEMON=${PYTHON_HOME}/bin/${NAME} + else + DAEMON=/usr/bin/${NAME} + fi +fi +PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid +if [ -z "${SCRIPTNAME}" ] ; then + SCRIPTNAME=/etc/init.d/${NAME} +fi +if [ -z "${SYSTEM_USER}" ] ; then + SYSTEM_USER=${PROJECT_NAME} +fi +if [ -z "${SYSTEM_GROUP}" ] ; then + SYSTEM_GROUP=${PROJECT_NAME} +fi +if [ "${SYSTEM_USER}" != "root" ] ; then + STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}" +fi +if [ -z "${CONFIG_FILE}" ] ; then + CONFIG_FILE=/etc/${PROJECT_NAME}/${PROJECT_NAME}.conf +fi +LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log +if [ -z "${NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG}" ] ; then + DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}" +fi + +# Exit if the package is not installed +[ -x $DAEMON ] || exit 0 + +# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed +if [ `whoami` = "root" ] ; then + for i in lock run log lib ; do + mkdir -p /var/$i/${PROJECT_NAME} + chown ${SYSTEM_USER}:${SYSTEM_USER} /var/$i/${PROJECT_NAME} + done +fi + +# This defines init_is_upstart which we use later on (+ more...) +. /lib/lsb/init-functions + +# Manage log options: logfile and/or syslog, depending on user's choosing +[ -r /etc/default/openstack ] && . /etc/default/openstack +[ -r /etc/default/$NAME ] && . /etc/default/$NAME +[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog" +[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE" + +do_start() { + start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \ + --test > /dev/null || return 1 + start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \ + -- --port ${PORT} -- $DAEMON_ARGS || return 2 +} + +do_stop() { + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE + RETVAL=$? + rm -f $PIDFILE + return "$RETVAL" +} + +do_systemd_start() { + exec $DAEMON $DAEMON_ARGS +} + +case "$1" in +start) + #init_is_upstart > /dev/null 2>&1 && exit 1 + log_daemon_msg "Starting $DESC" "$NAME" + do_start + case $? in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +stop) + #init_is_upstart > /dev/null 2>&1 && exit 0 + log_daemon_msg "Stopping $DESC" "$NAME" + do_stop + case $? in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +status) + status_of_proc -p "${PIDFILE}" "$DAEMON" "$NAME" && exit 0 || exit $? +;; +systemd-start) + do_systemd_start +;; +restart|force-reload) + #init_is_upstart > /dev/null 2>&1 && exit 1 + log_daemon_msg "Restarting $DESC" "$NAME" + do_stop + case $? in + 0|1) + do_start + case $? in + 0) log_end_msg 0 ;; + 1) log_end_msg 1 ;; # Old process is still running + *) log_end_msg 1 ;; # Failed to start + esac + ;; + *) log_end_msg 1 ;; # Failed to stop + esac +;; +*) + echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2 + exit 3 +;; +esac + +exit 0 + diff --git a/conductor/examples/distribution/ubuntu/init.d/conductor-controller b/conductor/examples/distribution/ubuntu/init.d/conductor-controller new file mode 100644 index 0000000..f09d302 --- /dev/null +++ b/conductor/examples/distribution/ubuntu/init.d/conductor-controller @@ -0,0 +1,148 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: conductor-controller +# Required-Start: $network $local_fs $remote_fs $syslog +# Required-Stop: $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Conductor Controller +# Description: Conductor Controller server +### END INIT INFO + +# Author: Joe D'Andrea <jdandrea@research.att.com> +# Based on work by Thomas Goirand <zigo@debian.or> + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Conductor Controller" +PROJECT_NAME=conductor +NAME=${PROJECT_NAME}-controller +PYTHON_HOME= + +#!/bin/sh +# The content after this line comes from openstack-pkg-tools +# and has been automatically added to a .init.in script, which +# contains only the descriptive part for the daemon. Everything +# else is standardized as a single unique script. + +# Author: Joe D'Andrea <jdandrea@research.att.com> +# Based on work by Thomas Goirand <zigo@debian.or> + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/sbin:/usr/sbin:/bin:/usr/bin + +if [ -z "${DAEMON}" ] ; then + if [ -d "${PYTHON_HOME}" ] ; then + DAEMON=${PYTHON_HOME}/bin/${NAME} + else + DAEMON=/usr/bin/${NAME} + fi +fi +PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid +if [ -z "${SCRIPTNAME}" ] ; then + SCRIPTNAME=/etc/init.d/${NAME} +fi +if [ -z "${SYSTEM_USER}" ] ; then + SYSTEM_USER=${PROJECT_NAME} +fi +if [ -z "${SYSTEM_GROUP}" ] ; then + SYSTEM_GROUP=${PROJECT_NAME} +fi +if [ "${SYSTEM_USER}" != "root" ] ; then + STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}" +fi +if [ -z "${CONFIG_FILE}" ] ; then + CONFIG_FILE=/etc/${PROJECT_NAME}/${PROJECT_NAME}.conf +fi +LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log +if [ -z "${NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG}" ] ; then + DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}" +fi + +# Exit if the package is not installed +[ -x $DAEMON ] || exit 0 + +# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed +if [ `whoami` = "root" ] ; then + for i in lock run log lib ; do + mkdir -p /var/$i/${PROJECT_NAME} + chown ${SYSTEM_USER} /var/$i/${PROJECT_NAME} + done +fi + +# This defines init_is_upstart which we use later on (+ more...) +. /lib/lsb/init-functions + +# Manage log options: logfile and/or syslog, depending on user's choosing +[ -r /etc/default/openstack ] && . /etc/default/openstack +[ -r /etc/default/$NAME ] && . /etc/default/$NAME +[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog" +[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE" + +do_start() { + start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \ + --test > /dev/null || return 1 + start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \ + -- $DAEMON_ARGS || return 2 +} + +do_stop() { + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE + RETVAL=$? + rm -f $PIDFILE + return "$RETVAL" +} + +do_systemd_start() { + exec $DAEMON $DAEMON_ARGS +} + +case "$1" in +start) + #init_is_upstart > /dev/null 2>&1 && exit 1 + log_daemon_msg "Starting $DESC" "$NAME" + do_start + case $? in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +stop) + #init_is_upstart > /dev/null 2>&1 && exit 0 + log_daemon_msg "Stopping $DESC" "$NAME" + do_stop + case $? in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +status) + status_of_proc -p "${PIDFILE}" "$DAEMON" "$NAME" && exit 0 || exit $? +;; +systemd-start) + do_systemd_start +;; +restart|force-reload) + #init_is_upstart > /dev/null 2>&1 && exit 1 + log_daemon_msg "Restarting $DESC" "$NAME" + do_stop + case $? in + 0|1) + do_start + case $? in + 0) log_end_msg 0 ;; + 1) log_end_msg 1 ;; # Old process is still running + *) log_end_msg 1 ;; # Failed to start + esac + ;; + *) log_end_msg 1 ;; # Failed to stop + esac +;; +*) + echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2 + exit 3 +;; +esac + +exit 0 + diff --git a/conductor/examples/distribution/ubuntu/init.d/conductor-data b/conductor/examples/distribution/ubuntu/init.d/conductor-data new file mode 100644 index 0000000..70b3e2f --- /dev/null +++ b/conductor/examples/distribution/ubuntu/init.d/conductor-data @@ -0,0 +1,148 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: conductor-data +# Required-Start: $network $local_fs $remote_fs $syslog +# Required-Stop: $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Conductor Data +# Description: Conductor Data server +### END INIT INFO + +# Author: Joe D'Andrea <jdandrea@research.att.com> +# Based on work by Thomas Goirand <zigo@debian.or> + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Conductor Data" +PROJECT_NAME=conductor +NAME=${PROJECT_NAME}-data +PYTHON_HOME= + +#!/bin/sh +# The content after this line comes from openstack-pkg-tools +# and has been automatically added to a .init.in script, which +# contains only the descriptive part for the daemon. Everything +# else is standardized as a single unique script. + +# Author: Joe D'Andrea <jdandrea@research.att.com> +# Based on work by Thomas Goirand <zigo@debian.or> + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/sbin:/usr/sbin:/bin:/usr/bin + +if [ -z "${DAEMON}" ] ; then + if [ -d "${PYTHON_HOME}" ] ; then + DAEMON=${PYTHON_HOME}/bin/${NAME} + else + DAEMON=/usr/bin/${NAME} + fi +fi +PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid +if [ -z "${SCRIPTNAME}" ] ; then + SCRIPTNAME=/etc/init.d/${NAME} +fi +if [ -z "${SYSTEM_USER}" ] ; then + SYSTEM_USER=${PROJECT_NAME} +fi +if [ -z "${SYSTEM_GROUP}" ] ; then + SYSTEM_GROUP=${PROJECT_NAME} +fi +if [ "${SYSTEM_USER}" != "root" ] ; then + STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}" +fi +if [ -z "${CONFIG_FILE}" ] ; then + CONFIG_FILE=/etc/${PROJECT_NAME}/${PROJECT_NAME}.conf +fi +LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log +if [ -z "${NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG}" ] ; then + DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}" +fi + +# Exit if the package is not installed +[ -x $DAEMON ] || exit 0 + +# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed +if [ `whoami` = "root" ] ; then + for i in lock run log lib ; do + mkdir -p /var/$i/${PROJECT_NAME} + chown ${SYSTEM_USER} /var/$i/${PROJECT_NAME} + done +fi + +# This defines init_is_upstart which we use later on (+ more...) +. /lib/lsb/init-functions + +# Manage log options: logfile and/or syslog, depending on user's choosing +[ -r /etc/default/openstack ] && . /etc/default/openstack +[ -r /etc/default/$NAME ] && . /etc/default/$NAME +[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog" +[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE" + +do_start() { + start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \ + --test > /dev/null || return 1 + start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \ + -- $DAEMON_ARGS || return 2 +} + +do_stop() { + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE + RETVAL=$? + rm -f $PIDFILE + return "$RETVAL" +} + +do_systemd_start() { + exec $DAEMON $DAEMON_ARGS +} + +case "$1" in +start) + #init_is_upstart > /dev/null 2>&1 && exit 1 + log_daemon_msg "Starting $DESC" "$NAME" + do_start + case $? in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +stop) + #init_is_upstart > /dev/null 2>&1 && exit 0 + log_daemon_msg "Stopping $DESC" "$NAME" + do_stop + case $? in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +status) + status_of_proc -p "${PIDFILE}" "$DAEMON" "$NAME" && exit 0 || exit $? +;; +systemd-start) + do_systemd_start +;; +restart|force-reload) + #init_is_upstart > /dev/null 2>&1 && exit 1 + log_daemon_msg "Restarting $DESC" "$NAME" + do_stop + case $? in + 0|1) + do_start + case $? in + 0) log_end_msg 0 ;; + 1) log_end_msg 1 ;; # Old process is still running + *) log_end_msg 1 ;; # Failed to start + esac + ;; + *) log_end_msg 1 ;; # Failed to stop + esac +;; +*) + echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2 + exit 3 +;; +esac + +exit 0 + diff --git a/conductor/examples/distribution/ubuntu/init.d/conductor-reservation b/conductor/examples/distribution/ubuntu/init.d/conductor-reservation new file mode 100644 index 0000000..6365b32 --- /dev/null +++ b/conductor/examples/distribution/ubuntu/init.d/conductor-reservation @@ -0,0 +1,148 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: conductor-reservation +# Required-Start: $network $local_fs $remote_fs $syslog +# Required-Stop: $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Conductor reservation +# Description: Conductor reservation server +### END INIT INFO + +# Author: Shankaranarayanan Puzhavakath Narayanan <snarayanan@research.att.com> +# Based on work by Thomas Goirand <zigo@debian.or> + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Conductor Reservation" +PROJECT_NAME=conductor +NAME=${PROJECT_NAME}-reservation +PYTHON_HOME= + +#!/bin/sh +# The content after this line comes from openstack-pkg-tools +# and has been automatically added to a .init.in script, which +# contains only the descriptive part for the daemon. Everything +# else is standardized as a single unique script. + +# Author: Shankaranarayanan Puzhavakath Narayanan <snarayanan@research.att.com> +# Based on work by Thomas Goirand <zigo@debian.or> + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/sbin:/usr/sbin:/bin:/usr/bin + +if [ -z "${DAEMON}" ] ; then + if [ -d "${PYTHON_HOME}" ] ; then + DAEMON=${PYTHON_HOME}/bin/${NAME} + else + DAEMON=/usr/bin/${NAME} + fi +fi +PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid +if [ -z "${SCRIPTNAME}" ] ; then + SCRIPTNAME=/etc/init.d/${NAME} +fi +if [ -z "${SYSTEM_USER}" ] ; then + SYSTEM_USER=${PROJECT_NAME} +fi +if [ -z "${SYSTEM_GROUP}" ] ; then + SYSTEM_GROUP=${PROJECT_NAME} +fi +if [ "${SYSTEM_USER}" != "root" ] ; then + STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}" +fi +if [ -z "${CONFIG_FILE}" ] ; then + CONFIG_FILE=/etc/${PROJECT_NAME}/${PROJECT_NAME}.conf +fi +LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log +if [ -z "${NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG}" ] ; then + DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}" +fi + +# Exit if the package is not installed +[ -x $DAEMON ] || exit 0 + +# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed +if [ `whoami` = "root" ] ; then + for i in lock run log lib ; do + mkdir -p /var/$i/${PROJECT_NAME} + chown ${SYSTEM_USER} /var/$i/${PROJECT_NAME} + done +fi + +# This defines init_is_upstart which we use later on (+ more...) +. /lib/lsb/init-functions + +# Manage log options: logfile and/or syslog, depending on user's choosing +[ -r /etc/default/openstack ] && . /etc/default/openstack +[ -r /etc/default/$NAME ] && . /etc/default/$NAME +[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog" +[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE" + +do_start() { + start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \ + --test > /dev/null || return 1 + start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \ + -- $DAEMON_ARGS || return 2 +} + +do_stop() { + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE + RETVAL=$? + rm -f $PIDFILE + return "$RETVAL" +} + +do_systemd_start() { + exec $DAEMON $DAEMON_ARGS +} + +case "$1" in +start) + #init_is_upstart > /dev/null 2>&1 && exit 1 + log_daemon_msg "Starting $DESC" "$NAME" + do_start + case $? in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +stop) + #init_is_upstart > /dev/null 2>&1 && exit 0 + log_daemon_msg "Stopping $DESC" "$NAME" + do_stop + case $? in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +status) + status_of_proc -p "${PIDFILE}" "$DAEMON" "$NAME" && exit 0 || exit $? +;; +systemd-start) + do_systemd_start +;; +restart|force-reload) + #init_is_upstart > /dev/null 2>&1 && exit 1 + log_daemon_msg "Restarting $DESC" "$NAME" + do_stop + case $? in + 0|1) + do_start + case $? in + 0) log_end_msg 0 ;; + 1) log_end_msg 1 ;; # Old process is still running + *) log_end_msg 1 ;; # Failed to start + esac + ;; + *) log_end_msg 1 ;; # Failed to stop + esac +;; +*) + echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2 + exit 3 +;; +esac + +exit 0 + diff --git a/conductor/examples/distribution/ubuntu/init.d/conductor-solver b/conductor/examples/distribution/ubuntu/init.d/conductor-solver new file mode 100644 index 0000000..99cc98b --- /dev/null +++ b/conductor/examples/distribution/ubuntu/init.d/conductor-solver @@ -0,0 +1,148 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: conductor-solver +# Required-Start: $network $local_fs $remote_fs $syslog +# Required-Stop: $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Conductor Solver +# Description: Conductor Solver server +### END INIT INFO + +# Author: Joe D'Andrea <jdandrea@research.att.com> +# Based on work by Thomas Goirand <zigo@debian.or> + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC="Conductor Solver" +PROJECT_NAME=conductor +NAME=${PROJECT_NAME}-solver +PYTHON_HOME= + +#!/bin/sh +# The content after this line comes from openstack-pkg-tools +# and has been automatically added to a .init.in script, which +# contains only the descriptive part for the daemon. Everything +# else is standardized as a single unique script. + +# Author: Joe D'Andrea <jdandrea@research.att.com> +# Based on work by Thomas Goirand <zigo@debian.or> + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/sbin:/usr/sbin:/bin:/usr/bin + +if [ -z "${DAEMON}" ] ; then + if [ -d "${PYTHON_HOME}" ] ; then + DAEMON=${PYTHON_HOME}/bin/${NAME} + else + DAEMON=/usr/bin/${NAME} + fi +fi +PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid +if [ -z "${SCRIPTNAME}" ] ; then + SCRIPTNAME=/etc/init.d/${NAME} +fi +if [ -z "${SYSTEM_USER}" ] ; then + SYSTEM_USER=${PROJECT_NAME} +fi +if [ -z "${SYSTEM_GROUP}" ] ; then + SYSTEM_GROUP=${PROJECT_NAME} +fi +if [ "${SYSTEM_USER}" != "root" ] ; then + STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}" +fi +if [ -z "${CONFIG_FILE}" ] ; then + CONFIG_FILE=/etc/${PROJECT_NAME}/${PROJECT_NAME}.conf +fi +LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log +if [ -z "${NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG}" ] ; then + DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}" +fi + +# Exit if the package is not installed +[ -x $DAEMON ] || exit 0 + +# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed +if [ `whoami` = "root" ] ; then + for i in lock run log lib ; do + mkdir -p /var/$i/${PROJECT_NAME} + chown ${SYSTEM_USER} /var/$i/${PROJECT_NAME} + done +fi + +# This defines init_is_upstart which we use later on (+ more...) +. /lib/lsb/init-functions + +# Manage log options: logfile and/or syslog, depending on user's choosing +[ -r /etc/default/openstack ] && . /etc/default/openstack +[ -r /etc/default/$NAME ] && . /etc/default/$NAME +[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog" +[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE" + +do_start() { + start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \ + --test > /dev/null || return 1 + start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \ + -- $DAEMON_ARGS || return 2 +} + +do_stop() { + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE + RETVAL=$? + rm -f $PIDFILE + return "$RETVAL" +} + +do_systemd_start() { + exec $DAEMON $DAEMON_ARGS +} + +case "$1" in +start) + #init_is_upstart > /dev/null 2>&1 && exit 1 + log_daemon_msg "Starting $DESC" "$NAME" + do_start + case $? in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +stop) + #init_is_upstart > /dev/null 2>&1 && exit 0 + log_daemon_msg "Stopping $DESC" "$NAME" + do_stop + case $? in + 0|1) log_end_msg 0 ;; + 2) log_end_msg 1 ;; + esac +;; +status) + status_of_proc -p "${PIDFILE}" "$DAEMON" "$NAME" && exit 0 || exit $? +;; +systemd-start) + do_systemd_start +;; +restart|force-reload) + #init_is_upstart > /dev/null 2>&1 && exit 1 + log_daemon_msg "Restarting $DESC" "$NAME" + do_stop + case $? in + 0|1) + do_start + case $? in + 0) log_end_msg 0 ;; + 1) log_end_msg 1 ;; # Old process is still running + *) log_end_msg 1 ;; # Failed to start + esac + ;; + *) log_end_msg 1 ;; # Failed to stop + esac +;; +*) + echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2 + exit 3 +;; +esac + +exit 0 + diff --git a/conductor/examples/distribution/ubuntu/init/conductor-api.conf b/conductor/examples/distribution/ubuntu/init/conductor-api.conf new file mode 100644 index 0000000..f6f805e --- /dev/null +++ b/conductor/examples/distribution/ubuntu/init/conductor-api.conf @@ -0,0 +1,37 @@ +description "Conductor API" +author "Joe D'Andrea <jdandrea@research.att.com>" + +env PYTHON_HOME= +env PORT=8091 + +start on runlevel [2345] +stop on runlevel [!2345] + +chdir /var/run + +respawn +limit nofile 65535 65535 + +pre-start script + for i in lock run log lib ; do + mkdir -p /var/$i/conductor + chown conductor:conductor /var/$i/conductor + done +end script + +script + DAEMON="/usr/bin/conductor-api" + [ -d "$PYTHON_HOME" ] && DAEMON="$PYTHON_HOME/bin/conductor-api" + [ -x "$DAEMON" ] || exit 0 + DAEMON_ARGS="" + [ -r /etc/default/openstack ] && . /etc/default/openstack + [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB + [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog" + [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/conductor/conductor-api.log" + + exec start-stop-daemon --start --chdir /var/lib/conductor \ + --chuid conductor:conductor --make-pidfile --pidfile /var/run/conductor/conductor-engine.pid \ + --exec $DAEMON -- --port $PORT -- --config-file=/etc/conductor/conductor.conf ${DAEMON_ARGS} +end script + + diff --git a/conductor/examples/distribution/ubuntu/init/conductor-controller.conf b/conductor/examples/distribution/ubuntu/init/conductor-controller.conf new file mode 100644 index 0000000..9d44be2 --- /dev/null +++ b/conductor/examples/distribution/ubuntu/init/conductor-controller.conf @@ -0,0 +1,36 @@ +description "Conductor Controller" +author "Joe D'Andrea <jdandrea@research.att.com>" + +env PYTHON_HOME= + +start on runlevel [2345] +stop on runlevel [!2345] + +chdir /var/run + +respawn +limit nofile 65535 65535 + +pre-start script + for i in lock run log lib ; do + mkdir -p /var/$i/conductor + chown conductor /var/$i/conductor + done +end script + +script + DAEMON="/usr/bin/conductor-controller" + [ -d "$PYTHON_HOME" ] && DAEMON="$PYTHON_HOME/bin/conductor-controller" + [ -x "$DAEMON" ] || exit 0 + DAEMON_ARGS="" + [ -r /etc/default/openstack ] && . /etc/default/openstack + [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB + [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog" + [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/conductor/conductor-controller.log" + + exec start-stop-daemon --start --chdir /var/lib/conductor \ + --chuid conductor:conductor --make-pidfile --pidfile /var/run/conductor/conductor-controller.pid \ + --exec $DAEMON -- --config-file=/etc/conductor/conductor.conf ${DAEMON_ARGS} +end script + + diff --git a/conductor/examples/distribution/ubuntu/init/conductor-data.conf b/conductor/examples/distribution/ubuntu/init/conductor-data.conf new file mode 100644 index 0000000..643206d --- /dev/null +++ b/conductor/examples/distribution/ubuntu/init/conductor-data.conf @@ -0,0 +1,36 @@ +description "Conductor Data" +author "Joe D'Andrea <jdandrea@research.att.com>" + +env PYTHON_HOME= + +start on runlevel [2345] +stop on runlevel [!2345] + +chdir /var/run + +respawn +limit nofile 65535 65535 + +pre-start script + for i in lock run log lib ; do + mkdir -p /var/$i/conductor + chown conductor /var/$i/conductor + done +end script + +script + DAEMON="/usr/bin/conductor-data" + [ -d "$PYTHON_HOME" ] && DAEMON="$PYTHON_HOME/bin/conductor-data" + [ -x "$DAEMON" ] || exit 0 + DAEMON_ARGS="" + [ -r /etc/default/openstack ] && . /etc/default/openstack + [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB + [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog" + [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/conductor/conductor-data.log" + + exec start-stop-daemon --start --chdir /var/lib/conductor \ + --chuid conductor:conductor --make-pidfile --pidfile /var/run/conductor/conductor-data.pid \ + --exec $DAEMON -- --config-file=/etc/conductor/conductor.conf ${DAEMON_ARGS} +end script + + diff --git a/conductor/examples/distribution/ubuntu/init/conductor-reservation.conf b/conductor/examples/distribution/ubuntu/init/conductor-reservation.conf new file mode 100644 index 0000000..0af5603 --- /dev/null +++ b/conductor/examples/distribution/ubuntu/init/conductor-reservation.conf @@ -0,0 +1,36 @@ +description "Conductor Reservation" +author "Shankaranarayanan Puzhavakath Narayanan <snarayanan@research.att.com>" + +env PYTHON_HOME= + +start on runlevel [2345] +stop on runlevel [!2345] + +chdir /var/run + +respawn +limit nofile 65535 65535 + +pre-start script + for i in lock run log lib ; do + mkdir -p /var/$i/conductor + chown conductor /var/$i/conductor + done +end script + +script + DAEMON="/usr/bin/conductor-reservation" + [ -d "$PYTHON_HOME" ] && DAEMON="$PYTHON_HOME/bin/conductor-reservation" + [ -x "$DAEMON" ] || exit 0 + DAEMON_ARGS="" + [ -r /etc/default/openstack ] && . /etc/default/openstack + [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB + [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog" + [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/conductor/conductor-reservation.log" + + exec start-stop-daemon --start --chdir /var/lib/conductor \ + --chuid conductor:conductor --make-pidfile --pidfile /var/run/conductor/conductor-reservation.pid \ + --exec $DAEMON -- --config-file=/etc/conductor/conductor.conf ${DAEMON_ARGS} +end script + + diff --git a/conductor/examples/distribution/ubuntu/init/conductor-solver.conf b/conductor/examples/distribution/ubuntu/init/conductor-solver.conf new file mode 100644 index 0000000..649c8c6 --- /dev/null +++ b/conductor/examples/distribution/ubuntu/init/conductor-solver.conf @@ -0,0 +1,36 @@ +description "Conductor Solver" +author "Joe D'Andrea <jdandrea@research.att.com>" + +env PYTHON_HOME= + +start on runlevel [2345] +stop on runlevel [!2345] + +chdir /var/run + +respawn +limit nofile 65535 65535 + +pre-start script + for i in lock run log lib ; do + mkdir -p /var/$i/conductor + chown conductor /var/$i/conductor + done +end script + +script + DAEMON="/usr/bin/conductor-solver" + [ -d "$PYTHON_HOME" ] && DAEMON="$PYTHON_HOME/bin/conductor-solver" + [ -x "$DAEMON" ] || exit 0 + DAEMON_ARGS="" + [ -r /etc/default/openstack ] && . /etc/default/openstack + [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB + [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog" + [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/conductor/conductor-solver.log" + + exec start-stop-daemon --start --chdir /var/lib/conductor \ + --chuid conductor:conductor --make-pidfile --pidfile /var/run/conductor/conductor-solver.pid \ + --exec $DAEMON -- --config-file=/etc/conductor/conductor.conf ${DAEMON_ARGS} +end script + + diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-api b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-api new file mode 100644 index 0000000..8599adf --- /dev/null +++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-api @@ -0,0 +1,7 @@ +/var/log/conductor/conductor-api.log { + daily + missingok + compress + delaycompress + notifempty +} diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-common b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-common new file mode 100644 index 0000000..6efb26d --- /dev/null +++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-common @@ -0,0 +1,7 @@ +/var/log/conductor/*.log { + daily + missingok + compress + delaycompress + copytruncate +} diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-controller b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-controller new file mode 100644 index 0000000..af03403 --- /dev/null +++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-controller @@ -0,0 +1,7 @@ +/var/log/conductor/conductor-controller.log { + daily + missingok + compress + delaycompress + notifempty +} diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-data b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-data new file mode 100644 index 0000000..1e4dc75 --- /dev/null +++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-data @@ -0,0 +1,7 @@ +/var/log/conductor/conductor-data.log { + daily + missingok + compress + delaycompress + notifempty +} diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-reservation b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-reservation new file mode 100644 index 0000000..648d3e5 --- /dev/null +++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-reservation @@ -0,0 +1,7 @@ +/var/log/conductor/conductor-reservation.log { + daily + missingok + compress + delaycompress + notifempty +} diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-solver b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-solver new file mode 100644 index 0000000..a433b9c --- /dev/null +++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-solver @@ -0,0 +1,7 @@ +/var/log/conductor/conductor-solver.log { + daily + missingok + compress + delaycompress + notifempty +} diff --git a/conductor/examples/nginx/conductor-api.upstart.conf b/conductor/examples/nginx/conductor-api.upstart.conf new file mode 100644 index 0000000..fd8275f --- /dev/null +++ b/conductor/examples/nginx/conductor-api.upstart.conf @@ -0,0 +1,19 @@ +# cat /etc/init/conductor-uwsgi.conf +description "uWSGI server for conductor" + +start on runlevel [2345] # start on all runlevels. +stop on runlevel [!2345] # stop when shutting down. + +respawn + +script + /opt/app/conductor/bin/uwsgi \ + -s /tmp/uwsgi.sock \ + --chmod-socket=777 \ + --wsgi-file /etc/nginx/conductor.wsgi \ + --callable application \ + --set port=8091 \ + --venv /opt/app/conductor/ \ + --die-on-term \ + --logto /var/log/conductor/conductor-api.log +end scrip
\ No newline at end of file diff --git a/conductor/examples/nginx/nginx.conf b/conductor/examples/nginx/nginx.conf new file mode 100644 index 0000000..90f78c9 --- /dev/null +++ b/conductor/examples/nginx/nginx.conf @@ -0,0 +1,15 @@ +# This is an example nginx configuration file for using the +# conductor API through uWSGI. Include the "server" section +# within the broader "http" configuration. + +http { + server { + listen 8091; + server_name CONDUCTOR_API_FQDN; + + location / { + include uwsgi_params; + uwsgi_pass unix:/tmp/uwsgi.sock; + } + } +} diff --git a/conductor/examples/nginx/uwsgi_params b/conductor/examples/nginx/uwsgi_params new file mode 100644 index 0000000..c7727cd --- /dev/null +++ b/conductor/examples/nginx/uwsgi_params @@ -0,0 +1,13 @@ +uwsgi_param QUERY_STRING $query_string; +uwsgi_param REQUEST_METHOD $request_method; +uwsgi_param CONTENT_TYPE $content_type; +uwsgi_param CONTENT_LENGTH $content_length; +uwsgi_param REQUEST_URI $request_uri; +uwsgi_param PATH_INFO $document_uri; +uwsgi_param DOCUMENT_ROOT $document_root; +uwsgi_param SERVER_PROTOCOL $server_protocol; +uwsgi_param REMOTE_ADDR $remote_addr; +uwsgi_param REMOTE_PORT $remote_port; +uwsgi_param SERVER_ADDR $server_addr; +uwsgi_param SERVER_PORT $server_port; +uwsgi_param SERVER_NAME $server_name; diff --git a/conductor/pom.xml b/conductor/pom.xml new file mode 100644 index 0000000..f12574a --- /dev/null +++ b/conductor/pom.xml @@ -0,0 +1,56 @@ +<?xml version="1.0"?> +<!-- + Copyright (c) 2018 Intel Corporation. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. +--> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <modelVersion>4.0.0</modelVersion> + <packaging>pom</packaging> + + <parent> + <groupId>org.onap.optf.has</groupId> + <version>1.1.0-SNAPSHOT</version> + <artifactId>optf-has-root</artifactId> + </parent> + + <groupId>org.onap.optf.has</groupId> + <artifactId>optf-has-conductor</artifactId> + <version>1.1.0-SNAPSHOT</version> + + <name>optf-has-conductor</name> + <description>Homing Allocation Service/Conductor</description> + <build> + <plugins> + <plugin> + <artifactId>maven-assembly-plugin</artifactId> + <configuration> + <appendAssemblyId>false</appendAssemblyId> + <descriptors> + <descriptor>assembly.xml</descriptor> + </descriptors> + </configuration> + <executions> + <execution> + <id>make-assembly</id> + <phase>package</phase> + <goals> + <goal>single</goal> + </goals> + </execution> + </executions> + </plugin> + </plugins> + </build> +</project> diff --git a/conductor/pylintrc b/conductor/pylintrc new file mode 100644 index 0000000..52ae454 --- /dev/null +++ b/conductor/pylintrc @@ -0,0 +1,26 @@ +[Messages Control] +# W0511: TODOs in code comments are fine. +# W0142: *args and **kwargs are fine. +# W0622: Redefining id is fine. +disable-msg=W0511,W0142,W0622 + +[Basic] +# Variable names can be 1 to 31 characters long, with lowercase and underscores +variable-rgx=[a-z_][a-z0-9_]{0,30}$ + +# Argument names can be 2 to 31 characters long, with lowercase and underscores +argument-rgx=[a-z_][a-z0-9_]{1,30}$ + +# Method names should be at least 3 characters long +# and be lowecased with underscores +method-rgx=[a-z_][a-z0-9_]{2,50}$ + +# Don't require docstrings on tests. +no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ + +# Exclude variable names that conflict with debugger +bad-names=c +[Design] +max-public-methods=100 +min-public-methods=0 +max-args=6 diff --git a/conductor/requirements.txt b/conductor/requirements.txt new file mode 100644 index 0000000..a0878b2 --- /dev/null +++ b/conductor/requirements.txt @@ -0,0 +1,25 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +cotyledon # Apache-2.0 +futurist>=0.11.0 # Apache-2.0 +lxml>=2.3 # BSD +oslo.config>=3.9.0 # Apache-2.0 +oslo.i18n>=2.1.0 # Apache-2.0 +oslo.log>=1.14.0 # Apache-2.0 +# oslo.policy>=0.5.0 # Apache-2.0 +oslo.reports>=0.6.0 # Apache-2.0 +PasteDeploy>=1.5.0 # MIT +pbr>=1.6 # Apache-2.0 +pecan>=1.1.1 # BSD +pecan-notario<=0.0.3 # Alfredo Deza +oslo.messaging>=5.2.0 # Apache-2.0 +oslo.middleware>=3.0.0 # Apache-2.0 +python-daemon>=2.1.1 # Apache-2.0 +pyOpenSSL>=0.14 # Apache-2.0 +PyYAML>=3.10.0 # MIT +requests[security]!=2.9.0,>=2.8.1 # Apache-2.0 +six>=1.9.0 # MIT, also required by futurist +stevedore>=1.9.0 # Apache-2.0, also required by oslo.config +WebOb>=1.2.3 # MIT
\ No newline at end of file diff --git a/conductor/setup.cfg b/conductor/setup.cfg new file mode 100644 index 0000000..b62c365 --- /dev/null +++ b/conductor/setup.cfg @@ -0,0 +1,71 @@ +[metadata] +name = of-has +summary = ONAP Homing Service +description-file = README.rst +author = AT&T +author-email = jdandrea@research.att.com +home-page = https://wiki.onap.org/pages/viewpage.action?pageId=16005528 +classifier = + Development Status :: 4 - Beta + Environment :: ONAP + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.5 +keywords = + onap + homing + conductor + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[files] +packages = + conductor +data_files = + etc/conductor = etc/conductor/* +# conductor_integrationtests +#scripts = +# bin/conductor-db-setup + +[entry_points] +wsgi_scripts = + conductor-api = conductor.api.app:build_wsgi_app + +console_scripts = + conductor-controller = conductor.cmd.controller:main + conductor-data = conductor.cmd.data:main + conductor-solver = conductor.cmd.solver:main + conductor-reservation = conductor.cmd.reservation:main + +conductor.inventory_provider.plugin = + aai = conductor.data.plugins.inventory_provider.aai:AAI + +conductor.service_controller.plugin = + sdnc = conductor.data.plugins.service_controller.sdnc:SDNC + +oslo.config.opts = + conductor = conductor.opts:list_opts + +oslo.config.opts.defaults = + conductor = conductor.conf.defaults:set_cors_middleware_defaults + +#tempest.test_plugins = +# conductor_tests = conductor_integrationtests.plugin:ConductorTempestPlugin + +#[build_sphinx] +#all_files = 1 +#build-dir = doc/build +#source-dir = doc/source + +[pbr] +warnerrors = true +autodoc_index_modules = true + diff --git a/conductor/setup.py b/conductor/setup.py new file mode 100644 index 0000000..0c696ed --- /dev/null +++ b/conductor/setup.py @@ -0,0 +1,34 @@ +# -*- encoding: utf-8 -*- +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + +'''Setup''' + +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa # pylint: disable=W0611,C0411 +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr>=1.8'], + pbr=True) diff --git a/conductor/test-requirements.txt b/conductor/test-requirements.txt new file mode 100644 index 0000000..8eb4eb7 --- /dev/null +++ b/conductor/test-requirements.txt @@ -0,0 +1,20 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +# Hacking already pins down pep8, pyflakes and flake8 +hacking<0.11,>=0.10.0 +# bandit>=1.1.0 # Apache-2.0 +coverage>=3.6 # Apache-2.0 +fixtures>=3.0.0 # Apache-2.0/BSD +kombu>=3.0.25 # BSD +mock>=2.0 # BSD +mox3>=0.7.0 # Apache-2.0 +oslotest>=1.10.0 # Apache-2.0 +psycopg2>=2.5 # LGPL/ZPL +testrepository>=0.0.18 # Apache-2.0/BSD +testscenarios>=0.4 # Apache-2.0/BSD +testtools>=1.4.0 # MIT +os-testr>=0.4.1 # Apache-2.0 +tempest>=11.0.0 # Apache-2.0 +pifpaf>=0.0.11
\ No newline at end of file diff --git a/conductor/tools/README.md b/conductor/tools/README.md new file mode 100644 index 0000000..cba4b77 --- /dev/null +++ b/conductor/tools/README.md @@ -0,0 +1 @@ +Files in this directory are general developer tools or examples of how to do certain activities. diff --git a/conductor/tools/pretty_tox.sh b/conductor/tools/pretty_tox.sh new file mode 100755 index 0000000..190b39d --- /dev/null +++ b/conductor/tools/pretty_tox.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# +# ------------------------------------------------------------------------- +# Copyright (c) 2015-2017 AT&T Intellectual Property +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------- +# + + +set -o pipefail + +TESTRARGS=$1 + +# --until-failure is not compatible with --subunit see: +# +# https://bugs.launchpad.net/testrepository/+bug/1411804 +# +# this work around exists until that is addressed +if [[ "$TESTARGS" =~ "until-failure" ]]; then + python setup.py testr --slowest --testr-args="$TESTRARGS" +else + python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f +fi diff --git a/conductor/tox.ini b/conductor/tox.ini new file mode 100644 index 0000000..35bea94 --- /dev/null +++ b/conductor/tox.ini @@ -0,0 +1,53 @@ +[tox] +minversion = 1.6 +skipsdist = True +# envlist = py35,py27,functional,pep8 +envlist = py27,pep8 + +[testenv] +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +install_command = pip install -U {opts} {packages} +usedevelop = True +setenv = VIRTUAL_ENV={envdir} + OS_TEST_PATH=conductor/tests/unit +passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE +commands = + {toxinidir}/tools/pretty_tox.sh "{posargs}" + oslo-config-generator --config-file=etc/conductor/conductor-config-generator.conf +whitelist_externals = bash + +[testenv:functional] +setenv = VIRTUAL_ENV={envdir} + OS_TEST_PATH=conductor/tests/functional/ +passenv = CONDUCTOR_* +commands = + bash -x {toxinidir}/run-functional-tests.sh "{posargs}" + +[testenv:cover] +setenv = OS_TEST_PATH=conductor/tests +commands = + python setup.py testr --slowest --coverage --testr-args='^(?!conductor_integrationtests){posargs}' + +[testenv:pep8] +deps = hacking<0.12,>=0.11.0 +commands = + flake8 + +[testenv:genconfig] +commands = oslo-config-generator --config-file=etc/conductor/conductor-config-generator.conf + +[testenv:venv] +commands = {posargs} +setenv = PYTHONHASHSEED=0 + +[testenv:debug] +commands = bash -x oslo_debug_helper {posargs} + +[flake8] +ignore = H301,E401 +exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,install-guide +show-source = True + +[hacking] +import_exceptions = conductor.common.i18n @@ -0,0 +1,56 @@ +<?xml version="1.0"?> +<!-- + Copyright (c) 2018 Intel Corporation. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. +--> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <modelVersion>4.0.0</modelVersion> + <packaging>pom</packaging> + + <parent> + <groupId>org.onap.oparent</groupId> + <artifactId>oparent-python</artifactId> + <version>1.1.0-SNAPSHOT</version> + </parent> + + <groupId>org.onap.optf.has</groupId> + <artifactId>optf-has-root</artifactId> + + <name>optf-has-root</name> + <version>1.1.0-SNAPSHOT</version> + <description>Homing Allocation Service</description> + + <modules> + <module>conductor</module> + <!--<module>docs</module>--> + </modules> + + <build> + <plugins> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-release-plugin</artifactId> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-deploy-plugin</artifactId> + <version>2.7</version> + <configuration> + <retryFailedDeploymentCount>2</retryFailedDeploymentCount> + </configuration> + </plugin> + </plugins> + </build> +</project> |