summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--conductor/LICENSE26
-rw-r--r--conductor/README.rst26
-rw-r--r--conductor/conductor/__init__.py26
-rwxr-xr-xconductor/conductor/clean.sh23
-rw-r--r--conductor/conductor/cmd/__init__.py19
-rw-r--r--conductor/conductor/cmd/controller.py27
-rw-r--r--conductor/conductor/cmd/data.py27
-rw-r--r--conductor/conductor/cmd/reservation.py27
-rw-r--r--conductor/conductor/cmd/solver.py27
-rw-r--r--conductor/conductor/common/__init__.py44
-rw-r--r--conductor/conductor/common/classes.py79
-rw-r--r--conductor/conductor/common/models/__init__.py47
-rw-r--r--conductor/conductor/common/models/plan.py205
-rw-r--r--conductor/conductor/common/music/__init__.py31
-rw-r--r--conductor/conductor/common/music/api.py493
-rw-r--r--conductor/conductor/common/music/messaging/__init__.py22
-rw-r--r--conductor/conductor/common/music/messaging/component.py435
-rw-r--r--conductor/conductor/common/music/messaging/message.py178
-rw-r--r--conductor/conductor/common/music/model/__init__.py19
-rw-r--r--conductor/conductor/common/music/model/base.py168
-rw-r--r--conductor/conductor/common/music/model/search.py121
-rw-r--r--conductor/conductor/common/music/model/transaction.py54
-rw-r--r--conductor/conductor/common/music/voting.py106
-rw-r--r--conductor/conductor/common/rest.py172
-rw-r--r--conductor/conductor/common/threshold.py281
-rw-r--r--conductor/conductor/conf/__init__.py19
-rw-r--r--conductor/conductor/conf/defaults.py40
-rw-r--r--conductor/conductor/conf/inventory_provider.py32
-rw-r--r--conductor/conductor/conf/service_controller.py32
-rw-r--r--conductor/conductor/i18n.py59
-rw-r--r--conductor/conductor/messaging.py73
-rw-r--r--conductor/conductor/middleware.py67
-rw-r--r--conductor/conductor/opts.py63
-rw-r--r--conductor/conductor/service.py104
-rw-r--r--conductor/conductor/tests/__init__.py19
-rw-r--r--conductor/conductor/tests/data/__init__.py19
-rw-r--r--conductor/conductor/tests/functional/__init__.py19
-rw-r--r--conductor/conductor/tests/integration/__init__.py19
-rw-r--r--conductor/conductor/tests/tempest/__init__.py19
-rw-r--r--conductor/conductor/tests/testing-overview.txt67
-rw-r--r--conductor/conductor/tests/unit/__init__.py19
-rw-r--r--conductor/conductor/version.py22
-rw-r--r--conductor/doc/api/README.md283
-rw-r--r--conductor/doc/distribution/README.md551
-rw-r--r--conductor/doc/examples/README.md96
-rw-r--r--conductor/doc/glossary/README.md26
-rw-r--r--conductor/doc/template/README.md875
-rw-r--r--conductor/etc/conductor/api_paste.ini26
-rw-r--r--conductor/etc/conductor/conductor-config-generator.conf7
-rw-r--r--conductor/etc/conductor/log.conf49
-rw-r--r--conductor/etc/conductor/policy.json18
-rw-r--r--conductor/etc/conductor/rootwrap.conf27
-rw-r--r--conductor/etc/conductor/rootwrap.d/README.txt1
-rw-r--r--conductor/examples/apache2/conductor.conf25
-rw-r--r--conductor/examples/distribution/ubuntu/init.d/conductor-api149
-rw-r--r--conductor/examples/distribution/ubuntu/init.d/conductor-controller148
-rw-r--r--conductor/examples/distribution/ubuntu/init.d/conductor-data148
-rw-r--r--conductor/examples/distribution/ubuntu/init.d/conductor-reservation148
-rw-r--r--conductor/examples/distribution/ubuntu/init.d/conductor-solver148
-rw-r--r--conductor/examples/distribution/ubuntu/init/conductor-api.conf37
-rw-r--r--conductor/examples/distribution/ubuntu/init/conductor-controller.conf36
-rw-r--r--conductor/examples/distribution/ubuntu/init/conductor-data.conf36
-rw-r--r--conductor/examples/distribution/ubuntu/init/conductor-reservation.conf36
-rw-r--r--conductor/examples/distribution/ubuntu/init/conductor-solver.conf36
-rw-r--r--conductor/examples/distribution/ubuntu/logrotate.d/conductor-api7
-rw-r--r--conductor/examples/distribution/ubuntu/logrotate.d/conductor-common7
-rw-r--r--conductor/examples/distribution/ubuntu/logrotate.d/conductor-controller7
-rw-r--r--conductor/examples/distribution/ubuntu/logrotate.d/conductor-data7
-rw-r--r--conductor/examples/distribution/ubuntu/logrotate.d/conductor-reservation7
-rw-r--r--conductor/examples/distribution/ubuntu/logrotate.d/conductor-solver7
-rw-r--r--conductor/examples/nginx/conductor-api.upstart.conf19
-rw-r--r--conductor/examples/nginx/nginx.conf15
-rw-r--r--conductor/examples/nginx/uwsgi_params13
-rw-r--r--conductor/pylintrc26
-rw-r--r--conductor/requirements.txt25
-rw-r--r--conductor/setup.cfg71
-rw-r--r--conductor/setup.py34
-rw-r--r--conductor/test-requirements.txt20
-rw-r--r--conductor/tools/README.md1
-rwxr-xr-xconductor/tools/pretty_tox.sh35
-rw-r--r--conductor/tox.ini53
81 files changed, 6635 insertions, 0 deletions
diff --git a/conductor/LICENSE b/conductor/LICENSE
new file mode 100644
index 0000000..fffadb0
--- /dev/null
+++ b/conductor/LICENSE
@@ -0,0 +1,26 @@
+
+The following licence applies to all files in this and subdirectories. Licences
+are included in individual source files where appropriate, and if it differs
+from this text, it supersedes this. Any file that does not have licence text
+defaults to being covered by this text; not all files support the addition of
+licenses.
+
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
diff --git a/conductor/README.rst b/conductor/README.rst
new file mode 100644
index 0000000..c90eff6
--- /dev/null
+++ b/conductor/README.rst
@@ -0,0 +1,26 @@
+=========
+Conductor
+=========
+
+OF-HAS is the implementation of the ONAP Homing Service. The formal project name in ONAP is *OF-HAS*. The informal name for the project is *Conductor* (inherited from the seed-code), which is interchangeably used through the project.
+
+Given the description of what needs to be deployed (demands) and the placement requirements (constraints), Conductor determines placement candidates that meet all constraints while optimizing the resource usage of the AIC infrastructure. A customer request may be satisfied by deploying new VMs in AIC (AIC inventory) or by using existing service instances with enough remaining capacity (service inventory).
+
+From a canonical standpoint, Conductor is known as a *homing service*, in the same way OpenStack Heat is an orchestration service, or Nova is a compute service.
+
+* License: Licensed under the Apache License, Version 2.0
+* `PyPI`_ - package installation
+* `Python/Linux Distribution Notes`_
+* `Conductor Template Guide`_
+* `Example Templates`_
+* `Homing API`_
+* `Bugs`_ - issue tracking
+* `Source`_
+
+.. _PyPI:
+.. _Python/Linux Distribution Notes: /doc/distribution/README.md
+.. _Conductor Template Guide: /doc/template/README.md
+.. _Example Templates: /examples/README.md
+.. _Homing API: /doc/api/README.md
+.. _Bugs: https://jira.onap.org/projects/OPTFRA/summary
+.. _Source: https://gerrit.onap.org/r/optf/has
diff --git a/conductor/conductor/__init__.py b/conductor/conductor/__init__.py
new file mode 100644
index 0000000..313be31
--- /dev/null
+++ b/conductor/conductor/__init__.py
@@ -0,0 +1,26 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+
+
+class NotImplementedError(NotImplementedError):
+ # FIXME(jd) This is used by WSME to return a correct HTTP code. We should
+ # not expose it here but wrap our methods in the API to convert it to a
+ # proper HTTP error.
+ code = 501
diff --git a/conductor/conductor/clean.sh b/conductor/conductor/clean.sh
new file mode 100755
index 0000000..40ba5d9
--- /dev/null
+++ b/conductor/conductor/clean.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+
+#find . -name '*.pyc' -delete
+find */. -name '*.pyc' -delete
diff --git a/conductor/conductor/cmd/__init__.py b/conductor/conductor/cmd/__init__.py
new file mode 100644
index 0000000..f2bbdfd
--- /dev/null
+++ b/conductor/conductor/cmd/__init__.py
@@ -0,0 +1,19 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
diff --git a/conductor/conductor/cmd/controller.py b/conductor/conductor/cmd/controller.py
new file mode 100644
index 0000000..c00e233
--- /dev/null
+++ b/conductor/conductor/cmd/controller.py
@@ -0,0 +1,27 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from conductor import controller
+from conductor import service
+
+
+def main():
+ conf = service.prepare_service()
+ controller_service = controller.ControllerServiceLauncher(conf=conf)
+ controller_service.run()
diff --git a/conductor/conductor/cmd/data.py b/conductor/conductor/cmd/data.py
new file mode 100644
index 0000000..a880c35
--- /dev/null
+++ b/conductor/conductor/cmd/data.py
@@ -0,0 +1,27 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from conductor import data
+from conductor import service
+
+
+def main():
+ conf = service.prepare_service()
+ data_service = data.DataServiceLauncher(conf=conf)
+ data_service.run()
diff --git a/conductor/conductor/cmd/reservation.py b/conductor/conductor/cmd/reservation.py
new file mode 100644
index 0000000..7a38375
--- /dev/null
+++ b/conductor/conductor/cmd/reservation.py
@@ -0,0 +1,27 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from conductor import reservation
+from conductor import service
+
+
+def main():
+ conf = service.prepare_service()
+ reservation_service = reservation.ReservationServiceLauncher(conf=conf)
+ reservation_service.run()
diff --git a/conductor/conductor/cmd/solver.py b/conductor/conductor/cmd/solver.py
new file mode 100644
index 0000000..8efea99
--- /dev/null
+++ b/conductor/conductor/cmd/solver.py
@@ -0,0 +1,27 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from conductor import service
+from conductor import solver
+
+
+def main():
+ conf = service.prepare_service()
+ solver_service = solver.SolverServiceLauncher(conf=conf)
+ solver_service.run()
diff --git a/conductor/conductor/common/__init__.py b/conductor/conductor/common/__init__.py
new file mode 100644
index 0000000..4d222ec
--- /dev/null
+++ b/conductor/conductor/common/__init__.py
@@ -0,0 +1,44 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""Music - Common Methods"""
+
+from oslo_log import log as logging
+
+from conductor.common.music import api
+
+LOG = logging.getLogger(__name__)
+
+
+def music_api(configuration):
+ """Create or return a Music API instance"""
+
+ configuration = dict(configuration)
+ kwargs = {
+ 'host': configuration.get('host'),
+ 'port': configuration.get('port'),
+ 'replication_factor': configuration.get('replication_factor'),
+ }
+ api_instance = api.API(**kwargs)
+
+ # Create the keyspace if necessary
+ # TODO(jdandrea): Use oslo.config with a [music] section
+ # keyspace = conf.music.get('keyspace')
+ # api_instance.create_keyspace(keyspace)
+ return api_instance
diff --git a/conductor/conductor/common/classes.py b/conductor/conductor/common/classes.py
new file mode 100644
index 0000000..4f44fd7
--- /dev/null
+++ b/conductor/conductor/common/classes.py
@@ -0,0 +1,79 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""Class Helpers"""
+
+from conductor.i18n import _LE # pylint: disable=W0212
+
+
+def get_class(kls):
+ """Returns a class given a fully qualified class name"""
+ parts = kls.split('.')
+ module = ".".join(parts[:-1])
+ mod = __import__(module)
+ for comp in parts[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+
+class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903
+ """Abstract Class Method Decorator from Python 3.3's abc module"""
+
+ __isabstractmethod__ = True
+
+ def __init__(self, callable): # pylint: disable=W0622
+ callable.__isabstractmethod__ = True
+ super(abstractclassmethod, self).__init__(callable)
+
+
+class ClassPropertyDescriptor(object): # pylint: disable=R0903
+ """Supports the notion of a class property"""
+
+ def __init__(self, fget, fset=None):
+ """Initializer"""
+ self.fget = fget
+ self.fset = fset
+
+ def __get__(self, obj, klass=None):
+ """Get attribute"""
+ if klass is None:
+ klass = type(obj)
+ return self.fget.__get__(obj, klass)()
+
+ def __set__(self, obj, value):
+ """Set attribute"""
+ if not self.fset:
+ raise AttributeError(_LE("Can't set attribute"))
+ type_ = type(obj)
+ return self.fset.__get__(obj, type_)(value)
+
+ def setter(self, func):
+ """Setter"""
+ if not isinstance(func, (classmethod, staticmethod)):
+ func = classmethod(func)
+ self.fset = func
+ return self
+
+
+def classproperty(func):
+ """Class Property decorator"""
+ if not isinstance(func, (classmethod, staticmethod)):
+ func = classmethod(func)
+
+ return ClassPropertyDescriptor(func)
diff --git a/conductor/conductor/common/models/__init__.py b/conductor/conductor/common/models/__init__.py
new file mode 100644
index 0000000..ce07a87
--- /dev/null
+++ b/conductor/conductor/common/models/__init__.py
@@ -0,0 +1,47 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from uuid import UUID
+
+import six
+
+
+def validate_uuid4(uuid_string):
+ """Validate that a UUID string is in fact a valid uuid4.
+
+ Happily, the uuid module does the actual checking for us.
+ It is vital that the 'version' kwarg be passed
+ to the UUID() call, otherwise any 32-character
+ hex string is considered valid.
+ """
+
+ if not isinstance(uuid_string, six.string_types):
+ return False
+
+ try:
+ val = UUID(uuid_string.translate(None, '-'), version=4)
+ except ValueError:
+ # If it's a value error, then the string
+ # is not a valid hex code for a UUID.
+ return False
+
+ # If the uuid_string is a valid hex code,
+ # but an invalid uuid4, the UUID.__init__ will convert it to a
+ # valid uuid4. This is bad for validation purposes.
+ return val.hex == uuid_string
diff --git a/conductor/conductor/common/models/plan.py b/conductor/conductor/common/models/plan.py
new file mode 100644
index 0000000..3dbc8f5
--- /dev/null
+++ b/conductor/conductor/common/models/plan.py
@@ -0,0 +1,205 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""Plan Model"""
+
+import json
+import time
+
+from conductor.common.models import validate_uuid4
+from conductor.common.music.model import base
+
+
+def current_time_millis():
+ """Current time in milliseconds."""
+ return int(round(time.time() * 1000))
+
+
+class Plan(base.Base):
+ """Plan model.
+
+ DO NOT use this class directly!
+
+ Only create Plan-based classes using:
+ base.create_dynamic_model(keyspace=KEYSPACE,
+ baseclass=Plan, classname=CLASS).
+ The table will be automatically created if it doesn't exist.
+ """
+
+ __tablename__ = "plans"
+ __keyspace__ = None
+
+ id = None # pylint: disable=C0103
+ status = None
+ created = None
+ updated = None
+ name = None
+ timeout = None
+ recommend_max = None
+ message = None
+ template = None
+ translation = None
+ solution = None
+
+ # Status
+ TEMPLATE = "template" # Template ready for translation
+ TRANSLATED = "translated" # Translation ready for solving
+ SOLVING = "solving" # Search for solutions in progress
+ # Search complete, solution with n>0 recommendations found
+ SOLVED = "solved"
+ # Search failed, no recommendations found
+ NOT_FOUND = "not found"
+ ERROR = "error" # Error
+ # Solved, but reservation of resources in progress
+ RESERVING = "reserving"
+ # Final state, Solved and Reserved resources (if required)
+ DONE = "done"
+ STATUS = [TEMPLATE, TRANSLATED, SOLVING, SOLVED, NOT_FOUND,
+ ERROR, RESERVING, DONE, ]
+ WORKING = [TEMPLATE, TRANSLATED, SOLVING, RESERVING, ]
+ FINISHED = [SOLVED, NOT_FOUND, ERROR, DONE, ]
+
+ @classmethod
+ def schema(cls):
+ """Return schema."""
+ schema = {
+ 'id': 'text', # Plan ID in UUID4 format
+ 'status': 'text', # Plan status (see STATUS for valid values)
+ 'created': 'bigint', # Creation time in msec from epoch
+ 'updated': 'bigint', # Last update time in msec from epoch
+ 'name': 'text', # Plan name/alias
+ 'timeout': 'int', # Timeout in seconds
+ 'recommend_max': 'int', # Max recommendations
+ 'message': 'text', # Message (e.g., error or other info)
+ 'template': 'text', # Plan template
+ 'translation': 'text', # Translated template for the solver
+ 'solution': 'text', # The (ocean is the ultimate) solution (FZ)
+ 'PRIMARY KEY': '(id)',
+ }
+ return schema
+
+ @classmethod
+ def atomic(cls):
+ """Use atomic operations"""
+ return False
+
+ @classmethod
+ def pk_name(cls):
+ """Primary key name"""
+ return 'id'
+
+ def pk_value(self):
+ """Primary key value"""
+ return self.id
+
+ @property
+ def error(self):
+ return self.status == self.ERROR
+
+ @property
+ def finished(self):
+ return self.status in self.FINISHED
+
+ @property
+ def solved(self):
+ return self.status == self.SOLUTION
+
+ @property
+ def done(self):
+ return self.status == self.DONE
+
+ @property
+ def timedout(self):
+ """Calculate if a plan has timed out"""
+ elapsed_msec = (current_time_millis() - self.created)
+ return elapsed_msec >= self.timeout * 1000
+
+ @property
+ def working(self):
+ return self.status in self.WORKING
+
+ def update(self):
+ """Update plan
+
+ Side-effect: Sets the updated field to the current time.
+ """
+ self.updated = current_time_millis()
+ super(Plan, self).update()
+
+ def values(self):
+ """Values"""
+ value_dict = {
+ 'status': self.status,
+ 'created': self.created,
+ 'updated': self.updated,
+ 'name': self.name,
+ 'timeout': self.timeout,
+ 'recommend_max': self.recommend_max,
+ 'message': self.message,
+ 'template': json.dumps(self.template),
+ 'translation': json.dumps(self.translation),
+ 'solution': json.dumps(self.solution),
+ }
+ if self.id:
+ value_dict['id'] = self.id
+ return value_dict
+
+ def __init__(self, name, timeout, recommend_max, template,
+ id=None, created=None, updated=None, status=None,
+ message=None, translation=None, solution=None, _insert=True):
+ """Initializer"""
+ super(Plan, self).__init__()
+ self.status = status or self.TEMPLATE
+ self.created = created or current_time_millis()
+ self.updated = updated or current_time_millis()
+ self.name = name
+ self.timeout = timeout
+ self.recommend_max = recommend_max
+ self.message = message or ""
+ if _insert:
+ if validate_uuid4(id):
+ self.id = id
+ self.template = template or {}
+ self.translation = translation or {}
+ self.solution = solution or {}
+ self.insert()
+ else:
+ self.template = json.loads(template)
+ self.translation = json.loads(translation)
+ self.solution = json.loads(solution)
+
+ def __repr__(self):
+ """Object representation"""
+ return '<Plan {} ({})>'.format(self.name, self.id)
+
+ def __json__(self):
+ """JSON representation"""
+ json_ = {}
+ json_['id'] = self.id
+ json_['status'] = self.status
+ json_['created'] = self.created
+ json_['updated'] = self.updated
+ json_['name'] = self.name
+ json_['timeout'] = self.timeout
+ json_['recommend_max'] = self.recommend_max
+ json_['message'] = self.message
+ json_['template'] = self.template
+ json_['translation'] = self.translation
+ json_['solution'] = self.solution
+ return json_
diff --git a/conductor/conductor/common/music/__init__.py b/conductor/conductor/common/music/__init__.py
new file mode 100644
index 0000000..31ad7e1
--- /dev/null
+++ b/conductor/conductor/common/music/__init__.py
@@ -0,0 +1,31 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from oslo_config import cfg
+
+CONF = cfg.CONF
+
+MUSIC_API_COMMON_OPTS = [
+ cfg.BoolOpt('debug',
+ default=False,
+ help='Log debug messages. '
+ 'Default value is False.'),
+]
+
+CONF.register_opts(MUSIC_API_COMMON_OPTS, group='music_api')
diff --git a/conductor/conductor/common/music/api.py b/conductor/conductor/common/music/api.py
new file mode 100644
index 0000000..013dc79
--- /dev/null
+++ b/conductor/conductor/common/music/api.py
@@ -0,0 +1,493 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""Music Data Store API"""
+
+import copy
+import time
+
+from oslo_config import cfg
+from oslo_log import log
+
+from conductor.common import rest
+from conductor.i18n import _LE, _LI # pylint: disable=W0212
+
+LOG = log.getLogger(__name__)
+
+CONF = cfg.CONF
+
+global MUSIC_API
+
+MUSIC_API_OPTS = [
+ cfg.StrOpt('server_url',
+ default='http://controller:8080/MUSIC/rest',
+ help='Base URL for Music REST API without a trailing slash.'),
+ cfg.ListOpt('hostnames',
+ deprecated_for_removal=True,
+ deprecated_reason='Use server_url instead',
+ help='List of hostnames (round-robin access)'),
+ cfg.IntOpt('port',
+ deprecated_for_removal=True,
+ deprecated_reason='Use server_url instead',
+ help='Port'),
+ cfg.StrOpt('path',
+ deprecated_for_removal=True,
+ deprecated_reason='Use server_url instead',
+ help='Path'),
+ cfg.FloatOpt('connect_timeout',
+ default=3.05,
+ help='Socket connection timeout'),
+ cfg.FloatOpt('read_timeout',
+ default=12.05,
+ help='Socket read timeout'),
+ cfg.IntOpt('lock_timeout',
+ default=10,
+ help='Lock timeout'),
+ cfg.IntOpt('replication_factor',
+ default=1,
+ help='Replication factor'),
+ cfg.BoolOpt('debug',
+ default=False,
+ help='Log debug messages. Default value is False.'),
+ cfg.BoolOpt('mock',
+ default=False,
+ help='Use mock API'),
+]
+
+CONF.register_opts(MUSIC_API_OPTS, group='music_api')
+
+
+class MusicAPI(object):
+ """Wrapper for Music API"""
+
+ lock_ids = None # Cache of lock ids, indexed by name
+ lock_timeout = None # Maximum time in seconds to acquire a lock
+
+ rest = None # API Endpoint
+ replication_factor = None # Number of Music nodes to replicate across
+
+ def __init__(self):
+ """Initializer."""
+ global MUSIC_API
+
+ LOG.info(_LI("Initializing Music API"))
+ server_url = CONF.music_api.server_url.rstrip('/')
+ if not server_url:
+ # host/port/path are deprecated and should not be used anymore.
+ # Defaults removed from oslo_config to give more incentive.
+
+ # No more round robin either. Just take the first entry.
+ host = next(iter(CONF.music_api.hostnames or []), 'controller')
+ port = CONF.music_api.port or 8080
+ path = CONF.music_api.path or '/MUSIC/rest'
+ server_url = 'http://{}:{}/{}'.format(
+ host, port, path.rstrip('/').lstrip('/'))
+
+ kwargs = {
+ 'server_url': server_url,
+ 'log_debug': CONF.music_api.debug,
+ 'connect_timeout': CONF.music_api.connect_timeout,
+ 'read_timeout': CONF.music_api.read_timeout,
+ }
+ self.rest = rest.REST(**kwargs)
+
+ self.lock_ids = {}
+
+ # TODO(jdandrea): Allow override at creation time.
+ self.lock_timeout = CONF.music_api.lock_timeout
+ self.replication_factor = CONF.music_api.replication_factor
+
+ MUSIC_API = self
+
+ def __del__(self):
+ """Deletion."""
+ if type(self.lock_ids) is dict:
+ for lock_name in self.lock_ids.keys():
+ self.lock_delete(lock_name)
+
+ @staticmethod
+ def _row_url_path(keyspace, table, pk_name, pk_value):
+ """Returns a Music-compliant row URL path."""
+ path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % {
+ 'keyspace': keyspace,
+ 'table': table,
+ }
+
+ if pk_name and pk_value:
+ path += '?%s=%s' % (pk_name, pk_value)
+ return path
+
+ @staticmethod
+ def _lock_name_generate(keyspace, table, pk_value):
+ """Generate a lock name."""
+
+ # The Music API dictates that the lock name must be of the form
+ # keyspace.table.primary_key
+ lock_name = '%(keyspace)s.%(table)s.%(primary_key)s' % {
+ 'keyspace': keyspace,
+ 'table': table,
+ 'primary_key': pk_value,
+ }
+ return lock_name
+
+ def _lock_id_create(self, lock_name):
+ """Returns the lock id. Use for acquiring and releasing."""
+ path = '/locks/create/%s' % lock_name
+ response = self.rest.request(method='post',
+ content_type='text/plain', path=path)
+ lock_id = None
+ if response and response.ok:
+ lock_id = response.text
+ return lock_id
+
+ def _lock_id_acquire(self, lock_id):
+ """Acquire a lock by id. Returns True if successful."""
+ path = '/locks/acquire/%s' % lock_id
+ response = self.rest.request(method='get',
+ content_type='text/plain', path=path)
+ status = False
+ if response and response.ok:
+ status = (response.text.lower() == 'true')
+ return status
+
+ def _lock_id_release(self, lock_id):
+ """Release a lock by id. Returns True if successful."""
+ path = '/locks/release/%s' % lock_id
+ response = self.rest.request(method='delete',
+ content_type='text/plain', path=path)
+ return response and response.ok
+
+ def payload_init(self, keyspace=None, table=None,
+ pk_value=None, atomic=False):
+ """Initialize payload for Music requests.
+
+ Supports atomic operations.
+ Returns a payload of data and lock_name (if any).
+ """
+ if atomic:
+ lock_name = self.lock_create(keyspace, table, pk_value)
+ else:
+ lock_name = None
+
+ lock_id = self.lock_ids.get(lock_name)
+ data = {
+ 'consistencyInfo': {
+ 'type': 'atomic' if atomic else 'eventual',
+ 'lockId': lock_id,
+ }
+ }
+ return {'data': data, 'lock_name': lock_name}
+
+ def payload_delete(self, payload):
+ """Delete payload for Music requests. Cleans up atomic operations."""
+
+ # Doesn't actually delete the payload.
+ # We just delete the lock inside of it!
+ # This way payload_init/payload_delete is paired up neatly.
+ lock_name = payload.get('lock_name')
+ if lock_name:
+ self.lock_delete(lock_name)
+
+ def keyspace_create(self, keyspace):
+ """Creates a keyspace."""
+ payload = self.payload_init()
+ data = payload.get('data')
+ data['durabilityOfWrites'] = True
+ data['replicationInfo'] = {
+ 'class': 'SimpleStrategy',
+ 'replication_factor': self.replication_factor,
+ }
+
+ path = '/keyspaces/%s' % keyspace
+ if CONF.music_api.debug:
+ LOG.debug("Creating keyspace {}".format(keyspace))
+ response = self.rest.request(method='post', path=path, data=data)
+ return response and response.ok
+
+ def keyspace_delete(self, keyspace):
+ """Drops a keyspace."""
+ payload = self.payload_init()
+ data = payload.get('data')
+
+ path = '/keyspaces/%s' % keyspace
+ if CONF.music_api.debug:
+ LOG.debug("Deleting keyspace {}".format(keyspace))
+ response = self.rest.request(method='delete', path=path, data=data)
+ return response and response.ok
+
+ def lock_create(self, keyspace, table, pk_value):
+ """Create and acquire a lock. Returns a lock name."""
+
+ # Generate the lock name, then create/acquire the lock id.
+ lock_name = self._lock_name_generate(keyspace, table, pk_value)
+ if CONF.music_api.debug:
+ LOG.debug("Creating lock {}".format(lock_name))
+ lock_id = self._lock_id_create(lock_name)
+ time_now = time.time()
+ while not self._lock_id_acquire(lock_id):
+ if time.time() - time_now > self.lock_timeout:
+ raise IndexError(
+ _LE('Lock id acquire timeout: %s') % lock_name)
+
+ # Cache the lock name/id.
+ self.lock_ids[lock_name] = lock_id
+ return lock_name
+
+ def lock_release(self, lock_name):
+ """Release lock by name. Returns True if successful"""
+
+ # No need to delete the lock. lock_create() will not complain
+ # if a lock with the same name is created later.
+ if CONF.music_api.debug:
+ LOG.debug("Releasing lock {}".format(lock_name))
+ if lock_name:
+ return self._lock_id_release(self.lock_ids.get(lock_name))
+
+ def lock_delete(self, lock_name):
+ """Delete a lock by name. Returns True if successful."""
+ path = '/locks/delete/%s' % lock_name
+ if CONF.music_api.debug:
+ LOG.debug("Deleting lock {}".format(lock_name))
+ response = self.rest.request(content_type='text/plain',
+ method='delete', path=path)
+ if response and response.ok:
+ del self.lock_ids[lock_name]
+ return response and response.ok
+
+ def row_create(self, keyspace, table, # pylint: disable=R0913
+ pk_name, pk_value, values, atomic=False):
+ """Create a row."""
+ payload = self.payload_init(keyspace, table, pk_value, atomic)
+ data = payload.get('data')
+ data['values'] = values
+
+ path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % {
+ 'keyspace': keyspace,
+ 'table': table,
+ }
+ if CONF.music_api.debug:
+ LOG.debug("Creating row with pk_value {} in table "
+ "{}, keyspace {}".format(pk_value, table, keyspace))
+ response = self.rest.request(method='post', path=path, data=data)
+ self.payload_delete(payload)
+ return response and response.ok
+
+ def row_update(self, keyspace, table, # pylint: disable=R0913
+ pk_name, pk_value, values, atomic=False):
+ """Update a row."""
+ payload = self.payload_init(keyspace, table, pk_value, atomic)
+ data = payload.get('data')
+ data['values'] = values
+
+ path = self._row_url_path(keyspace, table, pk_name, pk_value)
+ if CONF.music_api.debug:
+ LOG.debug("Updating row with pk_value {} in table "
+ "{}, keyspace {}".format(pk_value, table, keyspace))
+ response = self.rest.request(method='put', path=path, data=data)
+ self.payload_delete(payload)
+ return response and response.ok
+
+ def row_read(self, keyspace, table, pk_name=None, pk_value=None):
+ """Read one or more rows. Not atomic."""
+ path = self._row_url_path(keyspace, table, pk_name, pk_value)
+ if CONF.music_api.debug:
+ LOG.debug("Reading row with pk_value {} from table "
+ "{}, keyspace {}".format(pk_value, table, keyspace))
+ response = self.rest.request(path=path)
+ return response and response.json()
+
+ def row_delete(self, keyspace, table, pk_name, pk_value, atomic=False):
+ """Delete a row."""
+ payload = self.payload_init(keyspace, table, pk_value, atomic)
+ data = payload.get('data')
+
+ path = self._row_url_path(keyspace, table, pk_name, pk_value)
+ if CONF.music_api.debug:
+ LOG.debug("Deleting row with pk_value {} from table "
+ "{}, keyspace {}".format(pk_value, table, keyspace))
+ response = self.rest.request(method='delete', path=path, data=data)
+ self.payload_delete(payload)
+ return response and response.ok
+
+ @staticmethod
+ def _table_path_generate(keyspace, table):
+ path = '/keyspaces/%(keyspace)s/tables/%(table)s/' % {
+ 'keyspace': keyspace,
+ 'table': table,
+ }
+ return path
+
+ def table_create(self, keyspace, table, schema):
+ """Creates a table."""
+ payload = self.payload_init()
+ data = payload.get('data')
+ data['fields'] = schema
+
+ path = self._table_path_generate(keyspace, table)
+ if CONF.music_api.debug:
+ LOG.debug("Creating table {}, keyspace {}".format(table, keyspace))
+ response = self.rest.request(method='post', path=path, data=data)
+ return response and response.ok
+
+ def table_delete(self, keyspace, table):
+ """Creates a table."""
+ payload = self.payload_init()
+ data = payload.get('data')
+
+ path = self._table_path_generate(keyspace, table)
+ if CONF.music_api.debug:
+ LOG.debug("Deleting table {}, keyspace {}".format(table, keyspace))
+ response = self.rest.request(method='delete', path=path, data=data)
+ return response and response.ok
+
+ def version(self):
+ """Returns version string."""
+ path = '/version'
+ if CONF.music_api.debug:
+ LOG.debug("Requesting version info")
+ response = self.rest.request(method='get',
+ content_type='text/plain', path=path)
+ return response and response.text
+
+
+class MockAPI(object):
+ """Wrapper for Music API"""
+
+ # Mock state for Music
+ music = {
+ 'keyspaces': {}
+ }
+
+ def __init__(self):
+ """Initializer."""
+ LOG.info(_LI("Initializing Music Mock API"))
+
+ global MUSIC_API
+
+ self.music['keyspaces'] = {}
+
+ MUSIC_API = self
+
+ @property
+ def _keyspaces(self):
+ return self.music.get('keyspaces')
+
+ def _set_keyspace(self, keyspace):
+ self._keyspaces[keyspace] = {}
+
+ def _unset_keyspace(self, keyspace):
+ self._keyspaces.pop(keyspace)
+
+ def _set_table(self, keyspace, table):
+ self._keyspaces[keyspace][table] = {}
+
+ def _unset_table(self, keyspace, table):
+ self._keyspaces[keyspace].pop(table)
+
+ def _get_row(self, keyspace, table, key=None):
+ rows = {}
+ row_num = 0
+ for row_key, row in self._keyspaces[keyspace][table].items():
+ if not key or key == row_key:
+ row_num += 1
+ rows['row {}'.format(row_num)] = copy.deepcopy(row)
+ return rows
+
+ def _set_row(self, keyspace, table, key, row):
+ self._keyspaces[keyspace][table][key] = row
+
+ def _unset_row(self, keyspace, table, row):
+ self._keyspaces[keyspace][table].pop(row)
+
+ def keyspace_create(self, keyspace):
+ """Creates a keyspace."""
+ if CONF.music_api.debug:
+ LOG.debug("Creating keyspace {}".format(keyspace))
+ self._set_keyspace(keyspace)
+ return True
+
+ def keyspace_delete(self, keyspace):
+ """Drops a keyspace."""
+ if CONF.music_api.debug:
+ LOG.debug("Deleting keyspace {}".format(keyspace))
+ self._unset_keyspace(keyspace)
+ return True
+
+ def row_create(self, keyspace, table, # pylint: disable=R0913
+ pk_name, pk_value, values, atomic=False):
+ """Create a row."""
+ if CONF.music_api.debug:
+ LOG.debug("Creating row with pk_value {} in table "
+ "{}, keyspace {}".format(pk_value, table, keyspace))
+ self._set_row(keyspace, table, pk_value, values)
+ return True
+
+ def row_update(self, keyspace, table, # pylint: disable=R0913
+ pk_name, pk_value, values, atomic=False):
+ """Update a row."""
+ if CONF.music_api.debug:
+ LOG.debug("Updating row with pk_value {} in table "
+ "{}, keyspace {}".format(pk_value, table, keyspace))
+ self._set_row(keyspace, table, pk_value, values)
+ return True
+
+ def row_read(self, keyspace, table, pk_name=None, pk_value=None):
+ """Read one or more rows. Not atomic."""
+ if CONF.music_api.debug:
+ LOG.debug("Reading row with pk_value {} from table "
+ "{}, keyspace {}".format(pk_value, table, keyspace))
+ values = self._get_row(keyspace, table, pk_value)
+ return values
+
+ def row_delete(self, keyspace, table, pk_name, pk_value, atomic=False):
+ """Delete a row."""
+ if CONF.music_api.debug:
+ LOG.debug("Deleting row with pk_value {} from table "
+ "{}, keyspace {}".format(pk_value, table, keyspace))
+ self._unset_row(keyspace, table, pk_value)
+ return True
+
+ def table_create(self, keyspace, table, schema):
+ """Creates a table."""
+ if CONF.music_api.debug:
+ LOG.debug("Creating table {}, keyspace {}".format(table, keyspace))
+ self._set_table(keyspace, table)
+ return True
+
+ def table_delete(self, keyspace, table):
+ """Creates a table."""
+ if CONF.music_api.debug:
+ LOG.debug("Deleting table {}, keyspace {}".format(table, keyspace))
+ self._unset_table(keyspace, table)
+ return True
+
+ def version(self):
+ """Returns version string."""
+ if CONF.music_api.debug:
+ LOG.debug("Requesting version info")
+ return "v1-mock"
+
+
+def API():
+ """Wrapper for Music and Music Mock API"""
+
+ # FIXME(jdandrea): Follow more formal practices for defining/using mocks
+ if CONF.music_api.mock:
+ return MockAPI()
+ return MusicAPI()
diff --git a/conductor/conductor/common/music/messaging/__init__.py b/conductor/conductor/common/music/messaging/__init__.py
new file mode 100644
index 0000000..6cbca8c
--- /dev/null
+++ b/conductor/conductor/common/music/messaging/__init__.py
@@ -0,0 +1,22 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from .component import RPCClient # noqa: F401
+from .component import RPCService # noqa: F401
+from .component import Target # noqa: F401
diff --git a/conductor/conductor/common/music/messaging/component.py b/conductor/conductor/common/music/messaging/component.py
new file mode 100644
index 0000000..becd02e
--- /dev/null
+++ b/conductor/conductor/common/music/messaging/component.py
@@ -0,0 +1,435 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import inspect
+import sys
+import time
+
+import cotyledon
+import futurist
+from oslo_config import cfg
+from oslo_log import log
+from oslo_messaging._drivers import common as rpc_common
+
+from conductor.common.music.messaging import message
+from conductor.common.music.model import base
+from conductor.i18n import _LE, _LI # pylint: disable=W0212
+
+LOG = log.getLogger(__name__)
+
+CONF = cfg.CONF
+
+MESSAGING_SERVER_OPTS = [
+ cfg.StrOpt('keyspace',
+ default='conductor_rpc',
+ help='Music keyspace for messages'),
+ cfg.IntOpt('check_interval',
+ default=1,
+ min=1,
+ help='Wait interval while checking for a message response. '
+ 'Default value is 1 second.'),
+ cfg.IntOpt('timeout',
+ default=10,
+ min=1,
+ help='Overall message response timeout. '
+ 'Default value is 10 seconds.'),
+ cfg.IntOpt('workers',
+ default=1,
+ min=1,
+ help='Number of workers for messaging service. '
+ 'Default value is 1.'),
+ cfg.IntOpt('polling_interval',
+ default=1,
+ min=1,
+ help='Time between checking for new messages. '
+ 'Default value is 1.'),
+ cfg.BoolOpt('debug',
+ default=False,
+ help='Log debug messages. '
+ 'Default value is False.'),
+]
+
+CONF.register_opts(MESSAGING_SERVER_OPTS, group='messaging_server')
+
+# Some class/method descriptions taken from this Oslo Messaging
+# RPC API Tutorial/Demo: https://www.youtube.com/watch?v=Bf4gkeoBzvA
+
+RPCSVRNAME = "Music-RPC Server"
+
+
+class Target(object):
+ """Returns a messaging target.
+
+ A target encapsulates all the information to identify where a message
+ should be sent or what messages a server is listening for.
+ """
+ _topic = None
+ _topic_class = None
+
+ def __init__(self, topic):
+ """Set the topic and topic class"""
+ self._topic = topic
+
+ # Because this is Music-specific, the server is
+ # built-in to the API class, stored as the transport.
+ # Thus, unlike oslo.messaging, there is no server
+ # specified for a target. There also isn't an
+ # exchange, namespace, or version at the moment.
+
+ # Dynamically create a message class for this topic.
+ self._topic_class = base.create_dynamic_model(
+ keyspace=CONF.messaging_server.keyspace,
+ baseclass=message.Message, classname=self.topic)
+
+ if not self._topic_class:
+ raise RuntimeError("Error setting the topic class "
+ "for the messaging layer.")
+
+ @property
+ def topic(self):
+ """Topic Property"""
+ return self._topic
+
+ @property
+ def topic_class(self):
+ """Topic Class Property"""
+ return self._topic_class
+
+
+class RPCClient(object):
+ """Returns an RPC client using Music as a transport.
+
+ The RPC client is responsible for sending method invocations
+ to remote servers via a messaging transport.
+
+ A method invocation consists of a request context dictionary
+ a method name, and a dictionary of arguments. A cast() invocation
+ just sends the request and returns immediately. A call() invocation
+ waits for the server to send a return value.
+ """
+
+ def __init__(self, conf, transport, target):
+ """Set the transport and target"""
+ self.conf = conf
+ self.transport = transport
+ self.target = target
+ self.RPC = self.target.topic_class
+
+ # introduced as a quick means to cache messages
+ # with the aim of preventing unnecessary communication
+ # across conductor components.
+ # self.message_cache = dict()
+
+ def __check_rpc_status(self, rpc_id, rpc_method):
+ """Check status for a given message id"""
+ # Wait check_interval seconds before proceeding
+ check_interval = self.conf.messaging_server.check_interval
+ time.sleep(check_interval)
+ if self.conf.messaging_server.debug:
+ LOG.debug("Checking status for message {} method {} on "
+ "topic {}".format(rpc_id, rpc_method, self.target.topic))
+ rpc = self.RPC.query.one(rpc_id)
+ return rpc
+
+ def cast(self, ctxt, method, args):
+ """Asynchronous Call"""
+ rpc = self.RPC(action=self.RPC.CAST,
+ ctxt=ctxt, method=method, args=args)
+ assert(rpc.enqueued)
+
+ rpc_id = rpc.id
+ topic = self.target.topic
+ LOG.info(
+ _LI("Message {} on topic {} enqueued").format(rpc_id, topic))
+ if self.conf.messaging_server.debug:
+ LOG.debug("Casting method {} with args {}".format(method, args))
+
+ return rpc_id
+
+ def call(self, ctxt, method, args):
+ """Synchronous Call"""
+ # # check if the call has a message saved in cache
+ # # key: string concatenation of ctxt + method + args
+ # # value: rpc response object
+ # key = ""
+ # for k, v in ctxt.items():
+ # key += str(k)
+ # key += '#' + str(v) + '#'
+ # key += '|' + str(method) + '|'
+ # for k, v in args.items():
+ # key += str(k)
+ # key += '#' + str(v) + '#'
+ #
+ # # check if the method has been called before
+ # # and cached
+ # if key in self.message_cache:
+ # LOG.debug("Retrieved method {} with args "
+ # "{} from cache".format(method, args))
+ # return self.message_cache[key]
+
+ rpc_start_time = time.time()
+
+ rpc = self.RPC(action=self.RPC.CALL,
+ ctxt=ctxt, method=method, args=args)
+
+ # TODO(jdandrea): Do something if the assert fails.
+ assert(rpc.enqueued)
+
+ rpc_id = rpc.id
+ topic = self.target.topic
+ LOG.info(
+ _LI("Message {} on topic {} enqueued.").format(rpc_id, topic))
+ if self.conf.messaging_server.debug:
+ LOG.debug("Calling method {} with args {}".format(method, args))
+
+ # Check message status within a thread
+ executor = futurist.ThreadPoolExecutor()
+ started_at = time.time()
+ while (time.time() - started_at) <= \
+ self.conf.messaging_server.timeout:
+ fut = executor.submit(self.__check_rpc_status, rpc_id, method)
+ rpc = fut.result()
+ if rpc and rpc.finished:
+ if self.conf.messaging_server.debug:
+ LOG.debug("Message {} method {} response received".
+ format(rpc_id, method))
+ break
+ executor.shutdown()
+
+ # Get response, delete message, and return response
+ if not rpc or not rpc.finished:
+ LOG.error(_LE("Message {} on topic {} timed out at {} seconds").
+ format(rpc_id, topic,
+ self.conf.messaging_server.timeout))
+ elif not rpc.ok:
+ LOG.error(_LE("Message {} on topic {} returned an error").
+ format(rpc_id, topic))
+ response = rpc.response
+ failure = rpc.failure
+ rpc.delete() # TODO(jdandrea): Put a TTL on the msg instead?
+ # self.message_cache[key] = response
+
+ LOG.debug("Elapsed time: {0:.3f} sec".format(
+ time.time() - rpc_start_time)
+ )
+ # If there's a failure, raise it as an exception
+ allowed = []
+ if failure is not None and failure != '':
+ # TODO(jdandrea): Do we need to populate allowed(_remote_exmods)?
+ raise rpc_common.deserialize_remote_exception(failure, allowed)
+ return response
+
+
+class RPCService(cotyledon.Service):
+ """Listener for the RPC service.
+
+ An RPC Service exposes a number of endpoints, each of which contain
+ a set of methods which may be invoked remotely by clients over a
+ given transport. To create an RPC server, you supply a transport,
+ target, and a list of endpoints.
+
+ Start the server with server.run()
+ """
+
+ # This will appear in 'ps xaf'
+ name = RPCSVRNAME
+
+ def __init__(self, worker_id, conf, **kwargs):
+ """Initializer"""
+ super(RPCService, self).__init__(worker_id)
+ if conf.messaging_server.debug:
+ LOG.debug("%s" % self.__class__.__name__)
+ self._init(conf, **kwargs)
+ self.running = True
+
+ def _init(self, conf, **kwargs):
+ """Prepare to process requests"""
+ self.conf = conf
+ self.rpc_listener = None
+ self.transport = kwargs.pop('transport')
+ self.target = kwargs.pop('target')
+ self.endpoints = kwargs.pop('endpoints')
+ self.flush = kwargs.pop('flush')
+ self.kwargs = kwargs
+ self.RPC = self.target.topic_class
+ self.name = "{}, topic({})".format(RPCSVRNAME, self.target.topic)
+
+ if self.flush:
+ self._flush_enqueued()
+
+ def _flush_enqueued(self):
+ """Flush all messages with an enqueued status.
+
+ Use this only when the parent service is not running concurrently.
+ """
+
+ msgs = self.RPC.query.all()
+ for msg in msgs:
+ if msg.enqueued:
+ msg.delete()
+
+ def _log_error_and_update_msg(self, msg, error_msg):
+ LOG.error(error_msg)
+ msg.response = {
+ 'error': {
+ 'message': error_msg
+ }
+ }
+ msg.status = message.Message.ERROR
+ msg.update()
+
+ def __check_for_messages(self):
+ """Wait for the polling interval, then do the real message check."""
+
+ # Wait for at least poll_interval sec
+ polling_interval = self.conf.messaging_server.polling_interval
+ time.sleep(polling_interval)
+ if self.conf.messaging_server.debug:
+ LOG.debug("Topic {}: Checking for new messages".format(
+ self.target.topic))
+ self._do()
+ return True
+
+ # FIXME(jdandrea): Better name for this, please, kthx.
+ def _do(self):
+ """Look for a new RPC call and serve it"""
+ # Get all the messages in queue
+ msgs = self.RPC.query.all()
+ for msg in msgs:
+ # Find the first msg marked as enqueued.
+ if not msg.enqueued:
+ continue
+
+ # RPC methods must not start/end with an underscore.
+ if msg.method.startswith('_') or msg.method.endswith('_'):
+ error_msg = _LE("Method {} must not start or end"
+ "with underscores").format(msg.method)
+ self._log_error_and_update_msg(msg, error_msg)
+ return
+
+ # The first endpoint that supports the method wins.
+ method = None
+ for endpoint in self.endpoints:
+ if msg.method not in dir(endpoint):
+ continue
+ endpoint_method = getattr(endpoint, msg.method)
+ if callable(endpoint_method):
+ method = endpoint_method
+ if self.conf.messaging_server.debug:
+ LOG.debug("Message {} method {} is "
+ "handled by endpoint {}".
+ format(msg.id, msg.method,
+ method.__str__.__name__))
+ break
+ if not method:
+ error_msg = _LE("Message {} method {} unsupported "
+ "in endpoints.").format(msg.id, msg.method)
+ self._log_error_and_update_msg(msg, error_msg)
+ return
+
+ # All methods must take a ctxt and args param.
+ if inspect.getargspec(method).args != ['self', 'ctx', 'arg']:
+ error_msg = _LE("Method {} must take three args: "
+ "self, ctx, arg").format(msg.method)
+ self._log_error_and_update_msg(msg, error_msg)
+ return
+
+ LOG.info(_LI("Message {} method {} received").format(
+ msg.id, msg.method))
+ if self.conf.messaging_server.debug:
+ LOG.debug(
+ _LI("Message {} method {} context: {}, args: {}").format(
+ msg.id, msg.method, msg.ctxt, msg.args))
+
+ failure = None
+ try:
+ # Methods return an opaque dictionary
+ result = method(msg.ctxt, msg.args)
+
+ # FIXME(jdandrea): Remove response/error and make it opaque.
+ # That means this would just be assigned result outright.
+ msg.response = result.get('response', result)
+ except Exception:
+ # Current sys.exc_info() content can be overridden
+ # by another exception raised by a log handler during
+ # LOG.exception(). So keep a copy and delete it later.
+ failure = sys.exc_info()
+
+ # Do not log details about the failure here. It will
+ # be returned later upstream.
+ LOG.exception(_LE('Exception during message handling'))
+
+ try:
+ if failure is None:
+ msg.status = message.Message.COMPLETED
+ else:
+ msg.failure = \
+ rpc_common.serialize_remote_exception(failure)
+ msg.status = message.Message.ERROR
+ LOG.info(_LI("Message {} method {}, status: {}").format(
+ msg.id, msg.method, msg.status))
+ if self.conf.messaging_server.debug:
+ LOG.debug("Message {} method {}, response: {}".format(
+ msg.id, msg.method, msg.response))
+ msg.update()
+ except Exception:
+ LOG.exception(_LE("Can not send reply for message {} "
+ "method {}").
+ format(msg.id, msg.method))
+ finally:
+ # Remove circular object reference between the current
+ # stack frame and the traceback in exc_info.
+ del failure
+
+ def _gracefully_stop(self):
+ """Gracefully stop working on things"""
+ pass
+
+ def _restart(self):
+ """Prepare to restart the RPC Server"""
+ pass
+
+ def run(self):
+ """Run"""
+ # The server listens for messages and calls the
+ # appropriate methods. It also deletes messages once
+ # processed.
+ if self.conf.messaging_server.debug:
+ LOG.debug("%s" % self.__class__.__name__)
+
+ # Listen for messages within a thread
+ executor = futurist.ThreadPoolExecutor()
+ while self.running:
+ fut = executor.submit(self.__check_for_messages)
+ fut.result()
+ executor.shutdown()
+
+ def terminate(self):
+ """Terminate"""
+ if self.conf.messaging_server.debug:
+ LOG.debug("%s" % self.__class__.__name__)
+ self.running = False
+ self._gracefully_stop()
+ super(RPCService, self).terminate()
+
+ def reload(self):
+ """Reload"""
+ if self.conf.messaging_server.debug:
+ LOG.debug("%s" % self.__class__.__name__)
+ self._restart()
diff --git a/conductor/conductor/common/music/messaging/message.py b/conductor/conductor/common/music/messaging/message.py
new file mode 100644
index 0000000..8f20162
--- /dev/null
+++ b/conductor/conductor/common/music/messaging/message.py
@@ -0,0 +1,178 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""Message Model"""
+
+import json
+import time
+
+from conductor.common.music.model import base
+
+
+def current_time_millis():
+ """Current time in milliseconds."""
+ return int(round(time.time() * 1000))
+
+
+class Message(base.Base):
+ """Message model.
+
+ DO NOT use this class directly! With messaging, the table
+ name must be the message topic, thus this class has a
+ __tablename__ and __keyspace__ of None.
+
+ Only create Message-based classes using:
+ base.create_dynamic_model(keyspace=KEYSPACE,
+ baseclass=Message, classname=TOPIC_NAME).
+ The table will be automatically created if it doesn't exist.
+ """
+
+ __tablename__ = None
+ __keyspace__ = None
+
+ id = None # pylint: disable=C0103
+ action = None
+ created = None
+ updated = None
+ ctxt = None
+ method = None
+ args = None
+ status = None
+ response = None
+ failure = None
+
+ # Actions
+ CALL = "call"
+ CAST = "cast"
+ ACTIONS = [CALL, CAST, ]
+
+ # Status
+ ENQUEUED = "enqueued"
+ COMPLETED = "completed"
+ ERROR = "error"
+ STATUS = [ENQUEUED, COMPLETED, ERROR, ]
+ FINISHED = [COMPLETED, ERROR, ]
+
+ @classmethod
+ def schema(cls):
+ """Return schema."""
+ schema = {
+ 'id': 'text', # Message ID in UUID4 format
+ 'action': 'text', # Message type (call, cast)
+ 'created': 'bigint', # Creation time in msec from epoch
+ 'updated': 'bigint', # Last update time in msec from epoch
+ 'ctxt': 'text', # JSON request context dictionary
+ 'method': 'text', # RPC method name
+ 'args': 'text', # JSON argument dictionary
+ 'status': 'text', # Status (enqueued, complete, error)
+ 'response': 'text', # Response JSON
+ 'failure': 'text', # Failure JSON (used for exceptions)
+ 'PRIMARY KEY': '(id)',
+ }
+ return schema
+
+ @classmethod
+ def atomic(cls):
+ """Use atomic operations"""
+ return False # FIXME: this should be True for atomic operations
+
+ @classmethod
+ def pk_name(cls):
+ """Primary key name"""
+ return 'id'
+
+ def pk_value(self):
+ """Primary key value"""
+ return self.id
+
+ @property
+ def enqueued(self):
+ return self.status == self.ENQUEUED
+
+ @property
+ def finished(self):
+ return self.status in self.FINISHED
+
+ @property
+ def ok(self):
+ return self.status == self.COMPLETED
+
+ def update(self):
+ """Update message
+
+ Side-effect: Sets the updated field to the current time.
+ """
+ self.updated = current_time_millis()
+ super(Message, self).update()
+
+ def values(self):
+ """Values"""
+ return {
+ 'action': self.action,
+ 'created': self.created,
+ 'updated': self.updated,
+ 'ctxt': json.dumps(self.ctxt),
+ 'method': self.method,
+ 'args': json.dumps(self.args),
+ 'status': self.status,
+ 'response': json.dumps(self.response),
+ 'failure': self.failure, # already serialized by oslo_messaging
+ }
+
+ def __init__(self, action, ctxt, method, args,
+ created=None, updated=None, status=None,
+ response=None, failure=None, _insert=True):
+ """Initializer"""
+ super(Message, self).__init__()
+ self.action = action
+ self.created = created or current_time_millis()
+ self.updated = updated or current_time_millis()
+ self.method = method
+ self.status = status or self.ENQUEUED
+ if _insert:
+ self.ctxt = ctxt or {}
+ self.args = args or {}
+ self.response = response or {}
+ self.failure = failure or ""
+ self.insert()
+ else:
+ self.ctxt = json.loads(ctxt)
+ self.args = json.loads(args)
+ self.response = json.loads(response)
+ self.failure = failure # oslo_messaging will deserialize this
+
+ def __repr__(self):
+ """Object representation"""
+ return '<Message Topic %r>' % self.__tablename__
+
+ def __json__(self):
+ """JSON representation"""
+ json_ = {}
+ json_['id'] = self.id
+ json_['action'] = self.action
+ # TODO(jdandrea): Format timestamps as ISO
+ json_['created'] = self.created
+ json_['updated'] = self.updated
+ json_['ctxt'] = self.ctxt
+ json_['method'] = self.method
+ json_['args'] = self.args
+ json_['status'] = self.status
+ json_['response'] = self.response
+ json_['failure'] = self.failure
+ return json_
diff --git a/conductor/conductor/common/music/model/__init__.py b/conductor/conductor/common/music/model/__init__.py
new file mode 100644
index 0000000..f2bbdfd
--- /dev/null
+++ b/conductor/conductor/common/music/model/__init__.py
@@ -0,0 +1,19 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
diff --git a/conductor/conductor/common/music/model/base.py b/conductor/conductor/common/music/model/base.py
new file mode 100644
index 0000000..cecb6d2
--- /dev/null
+++ b/conductor/conductor/common/music/model/base.py
@@ -0,0 +1,168 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""Music ORM - Model"""
+
+from abc import ABCMeta
+from abc import abstractmethod
+import uuid
+
+from oslo_config import cfg
+from oslo_log import log as logging
+import six
+
+from conductor.common.classes import abstractclassmethod
+from conductor.common.classes import classproperty
+from conductor.common.music import api
+from conductor.common.music.model import search
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+@six.add_metaclass(ABCMeta)
+class Base(object):
+ """A custom declarative base ORM-style class.
+
+ Provides some Elixir-inspired shortcuts as well.
+ """
+
+ # These must be set in the derived class!
+ __tablename__ = None
+ __keyspace__ = None
+
+ @classproperty
+ def query(cls): # pylint: disable=E0213
+ """Return a query object a la sqlalchemy"""
+ return search.Query(cls)
+
+ @classmethod
+ def __kwargs(cls):
+ """Return common keyword args"""
+ kwargs = {
+ 'keyspace': cls.__keyspace__,
+ 'table': cls.__tablename__,
+ }
+ return kwargs
+
+ @classmethod
+ def table_create(cls):
+ """Create table"""
+ kwargs = cls.__kwargs()
+ kwargs['schema'] = cls.schema()
+ api.MUSIC_API.table_create(**kwargs)
+
+ @abstractclassmethod
+ def atomic(cls):
+ """Use atomic operations"""
+ return False
+
+ @abstractclassmethod
+ def schema(cls):
+ """Return schema"""
+ return cls()
+
+ @abstractclassmethod
+ def pk_name(cls):
+ """Primary key name"""
+ return cls()
+
+ @abstractmethod
+ def pk_value(self):
+ """Primary key value"""
+ pass
+
+ @abstractmethod
+ def values(self):
+ """Values"""
+ pass
+
+ def insert(self):
+ """Insert row"""
+ kwargs = self.__kwargs()
+ kwargs['pk_name'] = self.pk_name()
+ kwargs['values'] = self.values()
+ kwargs['atomic'] = self.atomic()
+ pk_name = kwargs['pk_name']
+ if pk_name not in kwargs['values']:
+ # TODO(jdandrea): Make uuid4() generation a default method in Base.
+ the_id = str(uuid.uuid4())
+ kwargs['values'][pk_name] = the_id
+ kwargs['pk_value'] = the_id
+ setattr(self, pk_name, the_id)
+ else:
+ kwargs['pk_value'] = kwargs['values'][pk_name]
+ api.MUSIC_API.row_create(**kwargs)
+
+ def update(self):
+ """Update row"""
+ kwargs = self.__kwargs()
+ kwargs['pk_name'] = self.pk_name()
+ kwargs['pk_value'] = self.pk_value()
+ kwargs['values'] = self.values()
+ kwargs['atomic'] = self.atomic()
+ # FIXME(jdandrea): Do we need this test/pop clause?
+ pk_name = kwargs['pk_name']
+ if pk_name in kwargs['values']:
+ kwargs['values'].pop(pk_name)
+ api.MUSIC_API.row_update(**kwargs)
+
+ def delete(self):
+ """Delete row"""
+ kwargs = self.__kwargs()
+ kwargs['pk_name'] = self.pk_name()
+ kwargs['pk_value'] = self.pk_value()
+ kwargs['atomic'] = self.atomic()
+ api.MUSIC_API.row_delete(**kwargs)
+
+ @classmethod
+ def filter_by(cls, **kwargs):
+ """Filter objects"""
+ return cls.query.filter_by(**kwargs) # pylint: disable=E1101
+
+ def flush(self, *args, **kwargs):
+ """Flush changes to storage"""
+ # TODO(jdandrea): Implement in music? May be a no-op
+ pass
+
+ def as_dict(self):
+ """Return object representation as a dictionary"""
+ return dict((k, v) for k, v in self.__dict__.items()
+ if not k.startswith('_'))
+
+
+def create_dynamic_model(keyspace, classname, baseclass):
+ """Create a dynamic ORM class with a custom keyspace/class/table.
+
+ Given a keyspace, a camelcase class name, and a base class
+ derived from Base, create a dynamic model that adopts a
+ table name based on a lower-cased version of the class name,
+ then create the table in the keyspace if it doesn't already exist.
+ If the baseclass already has __tablename__ or __keyspace__ set, those
+ will take precedence. Set those to None to use keyspace/classname here.
+ """
+
+ # The comma after baseclass belongs there! Tuple of length 1.
+ model = type(
+ classname, (baseclass,), {
+ '__tablename__': baseclass.__tablename__ or classname.lower(),
+ '__keyspace__': baseclass.__keyspace__ or keyspace})
+ model.table_create()
+ return model
diff --git a/conductor/conductor/common/music/model/search.py b/conductor/conductor/common/music/model/search.py
new file mode 100644
index 0000000..67ff92e
--- /dev/null
+++ b/conductor/conductor/common/music/model/search.py
@@ -0,0 +1,121 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""Music ORM - Search"""
+
+import inspect
+
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from conductor.common.music import api
+
+# FIXME(jdandrea): Keep for the __init__
+# from conductor.common.classes import get_class
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+class Query(object):
+ """Data Query"""
+ model = None
+
+ def __init__(self, model):
+ """Initializer"""
+ if inspect.isclass(model):
+ self.model = model
+ # FIXME(jdandrea): Bring this back so it's path-agnostic.
+ # elif isinstance(model, basestring):
+ # self.model = get_class('conductor_api.models.' + model)
+ assert inspect.isclass(self.model)
+
+ def __kwargs(self):
+ """Return common keyword args"""
+ kwargs = {
+ 'keyspace': self.model.__keyspace__,
+ 'table': self.model.__tablename__, # pylint: disable=E1101
+ }
+ return kwargs
+
+ def __rows_to_objects(self, rows):
+ """Convert query response rows to objects"""
+ results = []
+ pk_name = self.model.pk_name() # pylint: disable=E1101
+ for row_id, row in rows.items(): # pylint: disable=W0612
+ the_id = row.pop(pk_name)
+ result = self.model(_insert=False, **row)
+ setattr(result, pk_name, the_id)
+ results.append(result)
+ return Results(results)
+
+ def one(self, pk_value):
+ """Return object with pk_name matching pk_value"""
+ pk_name = self.model.pk_name()
+ kwargs = self.__kwargs()
+ rows = api.MUSIC_API.row_read(
+ pk_name=pk_name, pk_value=pk_value, **kwargs)
+ return self.__rows_to_objects(rows).first()
+
+ def all(self):
+ """Return all objects"""
+ kwargs = self.__kwargs()
+ rows = api.MUSIC_API.row_read(**kwargs)
+ return self.__rows_to_objects(rows)
+
+ def filter_by(self, **kwargs):
+ """Filter objects"""
+ # Music doesn't allow filtering on anything but the primary key.
+ # We need to get all items and then go looking for what we want.
+ all_items = self.all()
+ filtered_items = Results([])
+
+ # For every candidate ...
+ for item in all_items:
+ passes = True
+ # All filters are AND-ed.
+ for key, value in kwargs.items():
+ if getattr(item, key) != value:
+ passes = False
+ break
+ if passes:
+ filtered_items.append(item)
+ return filtered_items
+
+ def first(self):
+ """Return first object"""
+ return self.all().first()
+
+
+class Results(list):
+ """Query results"""
+
+ def __init__(self, *args, **kwargs): # pylint: disable=W0613
+ """Initializer"""
+ super(Results, self).__init__(args[0])
+
+ def all(self):
+ """Return all"""
+ return self
+
+ def first(self):
+ """Return first"""
+ if len(self) > 0:
+ return self[0]
diff --git a/conductor/conductor/common/music/model/transaction.py b/conductor/conductor/common/music/model/transaction.py
new file mode 100644
index 0000000..ced4e42
--- /dev/null
+++ b/conductor/conductor/common/music/model/transaction.py
@@ -0,0 +1,54 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""Music ORM - Transactions"""
+
+from oslo_log import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def start():
+ """Start transaction"""
+ pass
+
+
+def start_read_only():
+ """Start read-only transaction"""
+ start()
+
+
+def commit():
+ """Commit transaction"""
+ pass
+
+
+def rollback():
+ """Rollback transaction"""
+ pass
+
+
+def clear():
+ """Clear transaction"""
+ pass
+
+
+def flush():
+ """Flush to disk"""
+ pass
diff --git a/conductor/conductor/common/music/voting.py b/conductor/conductor/common/music/voting.py
new file mode 100644
index 0000000..c9c02ed
--- /dev/null
+++ b/conductor/conductor/common/music/voting.py
@@ -0,0 +1,106 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import time
+
+from oslo_config import cfg
+
+from conductor.common.music import api
+from conductor import service
+
+CONF = cfg.CONF
+
+
+def current_time_millis():
+ """Current time in milliseconds."""
+ return int(round(time.time() * 1000))
+
+
+def main():
+ """Sample usage of Music."""
+ service.prepare_service()
+ CONF.set_override('debug', True, 'music_api')
+ CONF.set_override('mock', True, 'music_api')
+ CONF.set_override('hostnames', ['music2'], 'music_api')
+ music = api.API()
+ print("Music version %s" % music.version())
+
+ # Randomize the name so that we don't step on each other.
+ keyspace = 'NewVotingApp' + str(current_time_millis() / 100)
+ music.keyspace_create(keyspace)
+
+ # Create the table
+ kwargs = {
+ 'keyspace': keyspace,
+ 'table': 'votecount',
+ 'schema': {
+ 'name': 'text',
+ 'count': 'varint',
+ 'PRIMARY KEY': '(name)'
+ }
+ }
+ music.table_create(**kwargs)
+
+ # Candidate data
+ data = {
+ 'Joe': 5,
+ 'Shankar': 7,
+ 'Gueyoung': 8,
+ 'Matti': 2,
+ 'Kaustubh': 0
+ }
+
+ # Create an entry in the voting table for each candidate
+ # and with a vote count of 0.
+ kwargs = {'keyspace': keyspace, 'table': 'votecount', 'pk_name': 'name'}
+ for name in data: # We only want the keys
+ kwargs['pk_value'] = name
+ kwargs['values'] = {'name': name, 'count': 0}
+ music.row_create(**kwargs)
+
+ # Update each candidate's count atomically.
+ kwargs = {'keyspace': keyspace, 'table': 'votecount', 'pk_name': 'name'}
+ for name in data:
+ count = data[name]
+ kwargs['pk_value'] = name
+ kwargs['values'] = {'count': count}
+ kwargs['atomic'] = True
+ music.row_update(**kwargs)
+
+ # Read all rows
+ kwargs = {'keyspace': keyspace, 'table': 'votecount'}
+ print(music.row_read(**kwargs)) # Reads all rows
+
+ # Delete Joe, read Matti
+ kwargs = {'keyspace': keyspace, 'table': 'votecount', 'pk_name': 'name'}
+ kwargs['pk_value'] = 'Joe'
+ music.row_delete(**kwargs)
+ kwargs['pk_value'] = 'Matti'
+ print(music.row_read(**kwargs))
+
+ # Read all rows again
+ kwargs = {'keyspace': keyspace, 'table': 'votecount'}
+ print(music.row_read(**kwargs)) # Reads all rows
+
+ # Cleanup.
+ music.keyspace_delete(keyspace)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/conductor/conductor/common/rest.py b/conductor/conductor/common/rest.py
new file mode 100644
index 0000000..7bbe5af
--- /dev/null
+++ b/conductor/conductor/common/rest.py
@@ -0,0 +1,172 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""REST Helper"""
+
+import json
+from os import path
+
+from oslo_config import cfg
+from oslo_log import log
+import requests
+from requests.auth import HTTPBasicAuth
+from six.moves.urllib import parse
+
+from conductor.i18n import _LE, _LW # pylint: disable=W0212
+
+LOG = log.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+class RESTException(IOError):
+ """Basic exception for errors raised by REST"""
+
+
+class CertificateFileNotFoundException(RESTException, ValueError):
+ """Certificate file was not found"""
+
+
+class MissingURLNetlocException(RESTException, ValueError):
+ """URL is missing a host/port"""
+
+
+class ProhibitedURLSchemeException(RESTException, ValueError):
+ """URL is using a prohibited scheme"""
+
+
+class REST(object):
+ """Helper class for REST operations."""
+
+ server_url = None
+ timeout = None
+
+ # Why the funny looking connect/read timeouts? Here, read this:
+ # http://docs.python-requests.org/en/master/user/advanced/#timeouts
+
+ def __init__(self, server_url, retries=3, connect_timeout=3.05,
+ read_timeout=12.05, username=None, password=None,
+ cert_file=None, cert_key_file=None, ca_bundle_file=None,
+ log_debug=False):
+ """Initializer."""
+ parsed = parse.urlparse(server_url, 'http')
+ if parsed.scheme not in ('http', 'https'):
+ raise ProhibitedURLSchemeException
+ if not parsed.netloc:
+ raise MissingURLNetlocException
+
+ for file_path in (cert_file, cert_key_file, ca_bundle_file):
+ if file_path and not path.exists(file_path):
+ raise CertificateFileNotFoundException
+
+ self.server_url = server_url.rstrip('/')
+ self.retries = int(retries)
+ self.timeout = (float(connect_timeout), float(read_timeout))
+ self.log_debug = log_debug
+ self.username = username
+ self.password = password
+ self.cert = cert_file
+ self.key = cert_key_file
+ self.verify = ca_bundle_file
+
+ # FIXME(jdandrea): Require a CA bundle; do not suppress warnings.
+ # This is here due to an A&AI's cert/server name mismatch.
+ # Permitting this defeats the purpose of using SSL/TLS.
+ if self.verify == "":
+ requests.packages.urllib3.disable_warnings()
+ self.verify = False
+
+ # Use connection pooling, kthx.
+ # http://docs.python-requests.org/en/master/user/advanced/
+ self.session = requests.Session()
+
+ def request(self, method='get', content_type='application/json',
+ path='', headers=None, data=None):
+ """Performs HTTP request. Returns a requests.Response object."""
+ if method not in ('post', 'get', 'put', 'delete'):
+ method = 'get'
+ method_fn = getattr(self.session, method)
+
+ full_headers = {
+ 'Accept': content_type,
+ 'Content-Type': content_type,
+ }
+ if headers:
+ full_headers.update(headers)
+ full_url = '{}/{}'.format(self.server_url, path.lstrip('/'))
+
+ # Prepare the request args
+ try:
+ data_str = json.dumps(data) if data else None
+ except (TypeError, ValueError):
+ data_str = data
+ kwargs = {
+ 'data': data_str,
+ 'headers': full_headers,
+ 'timeout': self.timeout,
+ 'cert': (self.cert, self.key),
+ 'verify': self.verify,
+ 'stream': False,
+ }
+ if self.username or self.password:
+ LOG.debug("Using HTTPBasicAuth")
+ kwargs['auth'] = HTTPBasicAuth(self.username, self.password)
+ if self.cert and self.key:
+ LOG.debug("Using SSL/TLS Certificate/Key")
+
+ if self.log_debug:
+ LOG.debug("Request: {} {}".format(method.upper(), full_url))
+ if data:
+ LOG.debug("Request Body: {}".format(json.dumps(data)))
+ response = None
+ for attempt in range(self.retries):
+ if attempt > 0:
+ LOG.warn(_LW("Retry #{}/{}").format(
+ attempt + 1, self.retries))
+
+ try:
+ response = method_fn(full_url, **kwargs)
+
+ # We shouldn't have to do this since stream is set to False,
+ # but we're gonna anyway. See "Body Content Workflow" here:
+ # http://docs.python-requests.org/en/master/user/advanced/
+ response.close()
+
+ if not response.ok:
+ LOG.warn("Response Status: {} {}".format(
+ response.status_code, response.reason))
+ if self.log_debug and response.text:
+ try:
+ response_dict = json.loads(response.text)
+ LOG.debug("Response JSON: {}".format(
+ json.dumps(response_dict)))
+ except ValueError:
+ LOG.debug("Response Body: {}".format(response.text))
+ if response.ok:
+ break
+ except requests.exceptions.RequestException as err:
+ LOG.error("Exception: %s", err.message)
+
+ # Response.__bool__ returns false if status is not ok. Ruh roh!
+ # That means we must check the object type vs treating it as a bool.
+ # More info: https://github.com/kennethreitz/requests/issues/2002
+ if isinstance(response, requests.models.Response) and not response.ok:
+ LOG.error(_LE("Status {} {} after {} retries for URL: {}").format(
+ response.status_code, response.reason, self.retries, full_url))
+ return response
diff --git a/conductor/conductor/common/threshold.py b/conductor/conductor/common/threshold.py
new file mode 100644
index 0000000..4ab81fd
--- /dev/null
+++ b/conductor/conductor/common/threshold.py
@@ -0,0 +1,281 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import itertools
+
+import six
+
+
+class ThresholdException(Exception):
+ pass
+
+
+def is_number(input):
+ """Returns True if the value is a number"""
+ try:
+ if type(input) is int or type(input) is float:
+ return True
+ elif isinstance(input, six.string_types) and float(input):
+ return True
+ except ValueError:
+ pass
+ return False
+
+
+class Threshold(object):
+ OPERATORS = ['=', '<', '>', '<=', '>=']
+ UNITS = {
+ 'currency': {
+ 'USD': 1.0,
+ },
+ 'time': {
+ 'ms': 1.0,
+ 'sec': 1000.0,
+ },
+ 'distance': {
+ 'km': 1.0,
+ 'mi': 1.609344,
+ },
+ 'throughput': {
+ 'Kbps': 0.001,
+ 'Mbps': 1.0,
+ 'Gbps': 1000.0,
+ },
+ }
+
+ def __init__(self, expression, base_unit):
+ if not isinstance(expression, six.string_types):
+ raise ThresholdException("Expression must be a string")
+ if not isinstance(base_unit, six.string_types):
+ raise ThresholdException("Base unit must be a string")
+ if base_unit not in self.UNITS:
+ raise ThresholdException(
+ "Base unit {} unsupported, must be one of: {}".format(
+ base_unit, ', '.join(self.UNITS.keys())))
+
+ self._expression = expression
+ self._base_unit = base_unit
+ self._parse()
+
+ def __repr__(self):
+ """Object representation"""
+ return "<Threshold expression: '{}', base_unit: '{}', " \
+ "parts: {}>".format(self.expression, self.base_unit, self.parts)
+
+ def _all_units(self):
+ """Returns a single list of all supported units"""
+ unit_lists = [self.UNITS[k].keys() for k in self.UNITS.keys()]
+ return list(itertools.chain.from_iterable(unit_lists))
+
+ def _default_for_base_unit(self, base_unit):
+ """Returns the default unit (1.0 multiplier) for a given base unit
+
+ Returns None if not found.
+ """
+ units = self.UNITS.get(base_unit)
+ if units:
+ for name, multiplier in units.items():
+ if multiplier == 1.0:
+ return name
+ return None
+
+ def _multiplier_for_unit(self, unit):
+ """Returns the multiplier for a given unit
+
+ Returns None if not found.
+ """
+ return self.UNITS.get(self.base_unit).get(unit)
+
+ def _reset(self):
+ """Resets parsed components"""
+ self._operator = None
+ self._value = None
+ self._min_value = None
+ self._max_value = None
+ self._unit = None
+ self._parsed = False
+
+ def _parse(self):
+ """Parses the expression into parts"""
+ self._reset()
+ parts = self.expression.split()
+ for part in parts:
+ # Is it an operator?
+ if not self.operator and part in self.OPERATORS:
+ if self.value:
+ raise ThresholdException(
+ "Value {} encountered before operator {} "
+ "in expression '{}'".format(
+ self.value, part, self.expression))
+ if self.has_range:
+ raise ThresholdException(
+ "Range {}-{} encountered before operator {} "
+ "in expression '{}'".format(
+ self.min_value, self.max_value,
+ part, self.expression))
+ if self.unit:
+ raise ThresholdException(
+ "Unit '{}' encountered before operator {} "
+ "in expression '{}'".format(
+ self.unit, part, self.expression))
+
+ self._operator = part
+
+ # Is it a lone value?
+ elif not self.value and is_number(part):
+ if self.has_range:
+ raise ThresholdException(
+ "Range {}-{} encountered before value {} "
+ "in expression '{}'".format(
+ self.min_value, self.max_value,
+ part, self.expression))
+ if self.unit:
+ raise ThresholdException(
+ "Unit '{}' encountered before value {} "
+ "in expression '{}'".format(
+ self.unit, part, self.expression))
+ self._value = float(part)
+ if not self.operator:
+ self._operator = '='
+
+ # Is it a value range?
+ elif not self.has_range and part.count('-') == 1:
+ part1, part2 = part.split('-')
+ if is_number(part1) and is_number(part2):
+ if self.operator and self.operator != '=':
+ raise ThresholdException(
+ "Operator {} not supported with range {} "
+ "in expression '{}'".format(
+ self.operator, part, self.expression))
+ if self.value:
+ raise ThresholdException(
+ "Value {} encountered before range {} "
+ "in expression '{}'".format(
+ self.value, part, self.expression))
+ if self.unit:
+ raise ThresholdException(
+ "Unit '{}' encountered before range {} "
+ "in expression '{}'".format(
+ self.unit, part, self.expression))
+ self._min_value = min(float(part1), float(part2))
+ self._max_value = max(float(part1), float(part2))
+ if not self.operator:
+ self._operator = '='
+
+ # Is it a unit?
+ elif part in self._all_units():
+ if not self.value and not self.has_range:
+ if not self.value:
+ raise ThresholdException(
+ "Value {} encountered before unit {} "
+ "in expression '{}'".format(
+ self.value, part, self.expression))
+ else:
+ raise ThresholdException(
+ "Range {}-{} encountered before unit {} "
+ "in expression '{}'".format(
+ self.min_value, self.max_value,
+ part, self.expression))
+ self._unit = part
+
+ # Well then, we don't know.
+ else:
+ raise ThresholdException(
+ "Unknown part '{}' in expression '{}'".format(
+ part, self._expression))
+
+ if not self.has_range and not self._value:
+ raise ThresholdException(
+ "Value/range missing in expression '{}'".format(
+ self._expression))
+
+ if self._unit:
+ # Convert from stated units to default.
+ multiplier = self._multiplier_for_unit(self._unit)
+ if self.value:
+ self._value = self._value * multiplier
+ if self.has_range:
+ self._min_value = self._min_value * multiplier
+ self._max_value = self._max_value * multiplier
+
+ # Always use the default unit.
+ self._unit = self._default_for_base_unit(self._base_unit)
+
+ self._parsed = True
+
+ @property
+ def base_unit(self):
+ """Returns the original base unit"""
+ return self._base_unit
+
+ @property
+ def expression(self):
+ """Returns the original expression"""
+ return self._expression
+
+ @property
+ def has_range(self):
+ """Returns True if a minimum/maximum value range exists"""
+ return self.min_value and self.max_value
+
+ @property
+ def max_value(self):
+ """Returns the detected maximum value, if any"""
+ return self._max_value
+
+ @property
+ def min_value(self):
+ """Returns the detected minimum value, if any"""
+ return self._min_value
+
+ @property
+ def operator(self):
+ """Returns the operator"""
+ return self._operator
+
+ @property
+ def parsed(self):
+ """Returns True if the expression was successfully parsed"""
+ return self._parsed
+
+ @property
+ def parts(self):
+ """Returns the expression as a dictionary of parts"""
+ result = {}
+ if self.parsed:
+ result['operator'] = self.operator
+ if self.has_range:
+ result['value'] = {
+ 'min': self.min_value,
+ 'max': self.max_value,
+ }
+ else:
+ result['value'] = self.value
+ result['units'] = self.unit
+ return result
+
+ @property
+ def unit(self):
+ """Returns the units"""
+ return self._unit
+
+ @property
+ def value(self):
+ """Returns the detected value, if any"""
+ return self._value
diff --git a/conductor/conductor/conf/__init__.py b/conductor/conductor/conf/__init__.py
new file mode 100644
index 0000000..f2bbdfd
--- /dev/null
+++ b/conductor/conductor/conf/__init__.py
@@ -0,0 +1,19 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
diff --git a/conductor/conductor/conf/defaults.py b/conductor/conductor/conf/defaults.py
new file mode 100644
index 0000000..667e047
--- /dev/null
+++ b/conductor/conductor/conf/defaults.py
@@ -0,0 +1,40 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from oslo_config import cfg
+from oslo_middleware import cors
+
+
+def set_cors_middleware_defaults():
+ """Update default configuration options for oslo.middleware."""
+ # CORS Defaults
+ # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/
+ # TODO(jdandrea): Adjust allow/expose headers for Conductor vs OpenStack
+ cfg.set_defaults(cors.CORS_OPTS,
+ allow_headers=['X-Auth-Token',
+ 'X-Conductor-Request-Id'],
+ expose_headers=['X-Auth-Token',
+ 'X-Conductor-Request-Id'],
+ allow_methods=['GET',
+ 'PUT',
+ 'POST',
+ 'DELETE',
+ 'OPTIONS',
+ 'HEAD']
+ )
diff --git a/conductor/conductor/conf/inventory_provider.py b/conductor/conductor/conf/inventory_provider.py
new file mode 100644
index 0000000..759ccf9
--- /dev/null
+++ b/conductor/conductor/conf/inventory_provider.py
@@ -0,0 +1,32 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from oslo_config import cfg
+
+from conductor.i18n import _
+
+INV_PROVIDER_EXT_MANAGER_OPTS = [
+ cfg.ListOpt('extensions',
+ default=['aai'],
+ help=_('Extensions list to use')),
+]
+
+
+def register_extension_manager_opts(cfg=cfg.CONF):
+ cfg.register_opts(INV_PROVIDER_EXT_MANAGER_OPTS, 'inventory_provider')
diff --git a/conductor/conductor/conf/service_controller.py b/conductor/conductor/conf/service_controller.py
new file mode 100644
index 0000000..f85f81f
--- /dev/null
+++ b/conductor/conductor/conf/service_controller.py
@@ -0,0 +1,32 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from oslo_config import cfg
+
+from conductor.i18n import _
+
+SVC_CONTROLLER_EXT_MANAGER_OPTS = [
+ cfg.ListOpt('extensions',
+ default=['sdnc'],
+ help=_('Extensions list to use')),
+]
+
+
+def register_extension_manager_opts(cfg=cfg.CONF):
+ cfg.register_opts(SVC_CONTROLLER_EXT_MANAGER_OPTS, 'service_controller')
diff --git a/conductor/conductor/i18n.py b/conductor/conductor/i18n.py
new file mode 100644
index 0000000..700e083
--- /dev/null
+++ b/conductor/conductor/i18n.py
@@ -0,0 +1,59 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""oslo.i18n integration module.
+
+See http://docs.openstack.org/developer/oslo.i18n/usage.html
+
+"""
+
+import oslo_i18n
+
+DOMAIN = "conductor"
+
+_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
+
+# The primary translation function using the well-known name "_"
+_ = _translators.primary
+
+# The contextual translation function using the name "_C"
+# requires oslo.i18n >=2.1.0
+_C = _translators.contextual_form
+
+# The plural translation function using the name "_P"
+# requires oslo.i18n >=2.1.0
+_P = _translators.plural_form
+
+# Translators for log levels.
+#
+# The abbreviated names are meant to reflect the usual use of a short
+# name like '_'. The "L" is for "log" and the other letter comes from
+# the level.
+_LI = _translators.log_info
+_LW = _translators.log_warning
+_LE = _translators.log_error
+_LC = _translators.log_critical
+
+
+def translate(value, user_locale):
+ return oslo_i18n.translate(value, user_locale)
+
+
+def get_available_languages():
+ return oslo_i18n.get_available_languages(DOMAIN)
diff --git a/conductor/conductor/messaging.py b/conductor/conductor/messaging.py
new file mode 100644
index 0000000..84a34a9
--- /dev/null
+++ b/conductor/conductor/messaging.py
@@ -0,0 +1,73 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from oslo_config import cfg
+
+from conductor.common import music
+from conductor.common.music.messaging import component
+
+DEFAULT_URL = "__default__"
+TRANSPORTS = {}
+
+CONF = cfg.CONF
+
+# Pull in messaging server opts. We use them here.
+MESSAGING_SERVER_OPTS = component.MESSAGING_SERVER_OPTS
+CONF.register_opts(MESSAGING_SERVER_OPTS, group='messaging_server')
+
+
+def setup():
+ """Messaging setup, if any"""
+ # oslo_messaging.set_transport_defaults('conductor')
+ pass
+
+
+# TODO(jdandrea): Remove Music-specific aspects (keyspace -> namespace?)
+# TODO(jdandrea): Make Music an oslo rpc backend (difficulty level: high?)
+def get_transport(conf, url=None, optional=False, cache=True):
+ """Initialise the Music messaging layer."""
+ global TRANSPORTS
+ cache_key = url or DEFAULT_URL
+ transport = TRANSPORTS.get(cache_key)
+
+ if not transport or not cache:
+ try:
+ # "Somebody set up us the API." ;)
+ # Yes, we know an API is not a transport. Cognitive dissonance FTW!
+ # TODO(jdandrea): try/except to catch problems
+ keyspace = conf.messaging_server.keyspace
+ transport = music.api.API()
+ transport.keyspace_create(keyspace=keyspace)
+ except Exception:
+ if not optional or url:
+ # NOTE(sileht): oslo_messaging is configured but unloadable
+ # so reraise the exception
+ raise
+ return None
+ else:
+ if cache:
+ TRANSPORTS[cache_key] = transport
+ return transport
+
+
+def cleanup():
+ """Cleanup the Music messaging layer."""
+ global TRANSPORTS
+ for url in TRANSPORTS:
+ del TRANSPORTS[url]
diff --git a/conductor/conductor/middleware.py b/conductor/conductor/middleware.py
new file mode 100644
index 0000000..5476ff3
--- /dev/null
+++ b/conductor/conductor/middleware.py
@@ -0,0 +1,67 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+""" Middleware """
+
+# from oslo_config import cfg
+# import oslo_messaging
+#
+# from conductor.agent import plugin_base
+# from conductor import sample
+#
+# OPTS = [
+# cfg.MultiStrOpt('http_control_exchanges',
+# default=[cfg.CONF.nova_control_exchange,
+# cfg.CONF.glance_control_exchange,
+# cfg.CONF.neutron_control_exchange,
+# cfg.CONF.cinder_control_exchange],
+# help="Exchanges name to listen for notifications."),
+# ]
+#
+# cfg.CONF.register_opts(OPTS)
+#
+#
+# class HTTPRequest(plugin_base.NotificationBase,
+# plugin_base.NonMetricNotificationBase):
+# event_types = ['http.request']
+#
+# def get_targets(self, conf):
+# """Return a sequence of oslo_messaging.Target
+# This sequence is defining the exchange and topics to be connected for
+# this plugin.
+# """
+# return [oslo_messaging.Target(topic=topic, exchange=exchange)
+# for topic in self.get_notification_topics(conf)
+# for exchange in conf.http_control_exchanges]
+#
+# def process_notification(self, message):
+# yield sample.Sample.from_notification(
+# name=message['event_type'],
+# type=sample.TYPE_DELTA,
+# volume=1,
+# unit=message['event_type'].split('.')[1],
+# user_id=message['payload']['request'].get('HTTP_X_USER_ID'),
+# project_id=message['payload']['request'].get('HTTP_X_PROJECT_ID'),
+# resource_id=message['payload']['request'].get(
+# 'HTTP_X_SERVICE_NAME'),
+# message=message)
+#
+#
+# class HTTPResponse(HTTPRequest):
+# event_types = ['http.response']
diff --git a/conductor/conductor/opts.py b/conductor/conductor/opts.py
new file mode 100644
index 0000000..bb18ac2
--- /dev/null
+++ b/conductor/conductor/opts.py
@@ -0,0 +1,63 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import itertools
+
+import conductor.api.app
+import conductor.common.music.api
+import conductor.common.music.messaging.component
+import conductor.conf.inventory_provider
+import conductor.conf.service_controller
+import conductor.controller.service
+import conductor.controller.translator_svc
+import conductor.data.plugins.inventory_provider.aai
+import conductor.data.plugins.service_controller.sdnc
+import conductor.reservation.service
+import conductor.service
+import conductor.solver.service
+
+
+def list_opts():
+ return [
+ ('DEFAULT', itertools.chain(
+ conductor.api.app.OPTS,
+ conductor.service.OPTS)),
+ ('api', conductor.api.app.API_OPTS),
+ ('controller', itertools.chain(
+ conductor.controller.service.CONTROLLER_OPTS,
+ conductor.controller.translator_svc.CONTROLLER_OPTS)),
+ # ('data', conductor.data.plugins.inventory_provider.aai.DATA_OPTS),
+ ('inventory_provider',
+ itertools.chain(
+ conductor.conf.inventory_provider.
+ INV_PROVIDER_EXT_MANAGER_OPTS)
+ ),
+ ('aai', conductor.data.plugins.inventory_provider.aai.AAI_OPTS),
+ ('service_controller',
+ itertools.chain(
+ conductor.conf.service_controller.
+ SVC_CONTROLLER_EXT_MANAGER_OPTS)
+ ),
+ ('sdnc', conductor.data.plugins.service_controller.sdnc.SDNC_OPTS),
+ ('messaging_server',
+ conductor.common.music.messaging.component.MESSAGING_SERVER_OPTS),
+ ('music_api', conductor.common.music.api.MUSIC_API_OPTS),
+ ('solver', conductor.solver.service.SOLVER_OPTS),
+ ('reservation', conductor.reservation.service.reservation_OPTS),
+ ]
diff --git a/conductor/conductor/service.py b/conductor/conductor/service.py
new file mode 100644
index 0000000..5d86cce
--- /dev/null
+++ b/conductor/conductor/service.py
@@ -0,0 +1,104 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+# import socket
+import sys
+
+# from keystoneauth1 import loading as ka_loading
+from oslo_config import cfg
+import oslo_i18n
+from oslo_log import log
+# from oslo_policy import opts as policy_opts
+from oslo_reports import guru_meditation_report as gmr
+
+from conductor.conf import defaults
+# from conductor import keystone_client
+from conductor import messaging
+from conductor import version
+
+OPTS = [
+ # cfg.StrOpt('host',
+ # default=socket.gethostname(),
+ # sample_default='<your_hostname>',
+ # help='Name of this node, which must be valid in an AMQP '
+ # 'key. Can be an opaque identifier. For ZeroMQ only, must '
+ # 'be a valid host name, FQDN, or IP address.'),
+ # cfg.IntOpt('http_timeout',
+ # default=600,
+ # help='Timeout seconds for HTTP requests. Set it to None to '
+ # 'disable timeout.'),
+ cfg.StrOpt('keyspace',
+ default='conductor',
+ help='Music keyspace for content'),
+]
+cfg.CONF.register_opts(OPTS)
+
+# DATA_OPT = cfg.IntOpt('workers',
+# default=1,
+# min=1,
+# help='Number of workers for data service, '
+# 'default value is 1.')
+# cfg.CONF.register_opt(DATA_OPT, 'data')
+#
+# PARSER_OPT = cfg.IntOpt('workers',
+# default=1,
+# min=1,
+# help='Number of workers for parser service. '
+# 'default value is 1.')
+# cfg.CONF.register_opt(PARSER_OPT, 'parser')
+#
+# SOLVER_OPT = cfg.IntOpt('workers',
+# default=1,
+# min=1,
+# help='Number of workers for solver service. '
+# 'default value is 1.')
+# cfg.CONF.register_opt(SOLVER_OPT, 'solver')
+
+# keystone_client.register_keystoneauth_opts(cfg.CONF)
+
+
+def prepare_service(argv=None, config_files=None):
+ if argv is None:
+ argv = sys.argv
+
+ # FIXME(sileht): Use ConfigOpts() instead
+ conf = cfg.CONF
+
+ oslo_i18n.enable_lazy()
+ log.register_options(conf)
+ log_levels = (conf.default_log_levels +
+ ['futurist=INFO'])
+ log.set_defaults(default_log_levels=log_levels)
+ defaults.set_cors_middleware_defaults()
+ # policy_opts.set_defaults(conf)
+
+ conf(argv[1:], project='conductor', validate_default_values=True,
+ version=version.version_info.version_string(),
+ default_config_files=config_files)
+
+ # ka_loading.load_auth_from_conf_options(conf, "service_credentials")
+
+ log.setup(conf, 'conductor')
+ # NOTE(liusheng): guru cannot run with service under apache daemon, so when
+ # conductor-api running with mod_wsgi, the argv is [], we don't start
+ # guru.
+ if argv:
+ gmr.TextGuruMeditation.setup_autorun(version)
+ messaging.setup()
+ return conf
diff --git a/conductor/conductor/tests/__init__.py b/conductor/conductor/tests/__init__.py
new file mode 100644
index 0000000..f2bbdfd
--- /dev/null
+++ b/conductor/conductor/tests/__init__.py
@@ -0,0 +1,19 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
diff --git a/conductor/conductor/tests/data/__init__.py b/conductor/conductor/tests/data/__init__.py
new file mode 100644
index 0000000..f2bbdfd
--- /dev/null
+++ b/conductor/conductor/tests/data/__init__.py
@@ -0,0 +1,19 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
diff --git a/conductor/conductor/tests/functional/__init__.py b/conductor/conductor/tests/functional/__init__.py
new file mode 100644
index 0000000..f2bbdfd
--- /dev/null
+++ b/conductor/conductor/tests/functional/__init__.py
@@ -0,0 +1,19 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
diff --git a/conductor/conductor/tests/integration/__init__.py b/conductor/conductor/tests/integration/__init__.py
new file mode 100644
index 0000000..f2bbdfd
--- /dev/null
+++ b/conductor/conductor/tests/integration/__init__.py
@@ -0,0 +1,19 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
diff --git a/conductor/conductor/tests/tempest/__init__.py b/conductor/conductor/tests/tempest/__init__.py
new file mode 100644
index 0000000..f2bbdfd
--- /dev/null
+++ b/conductor/conductor/tests/tempest/__init__.py
@@ -0,0 +1,19 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
diff --git a/conductor/conductor/tests/testing-overview.txt b/conductor/conductor/tests/testing-overview.txt
new file mode 100644
index 0000000..fe6955d
--- /dev/null
+++ b/conductor/conductor/tests/testing-overview.txt
@@ -0,0 +1,67 @@
+Conductor testing
+-----------------
+
+All unit tests are to be placed in the conductor/tests directory,
+and tests might be organized by tested subsystem. Each subsystem directory
+must contain a separate blank __init__.py for test discovery to function.
+
+An example directory structure illustrating the above:
+
+conductor/tests
+|-- engine
+| |-- __init__.py
+| |-- test1.py
+| |-- test2.py
+| |-- test3.py
+|-- __init__.py
+|-- test_plan.py
+
+If a given test has no overlapping requirements (variables or same
+routines) a new test does not need to create a subdirectory under the
+test type.
+
+Implementing a test
+-------------------
+
+testrepository - http://pypi.python.org/pypi/testrepository is used to
+find and run tests, parallelize their runs, and record timing/results.
+
+If new dependencies are introduced upon the development of a test, the
+test-requirements.txt file needs to be updated so that the virtual
+environment will be able to successfully execute all tests.
+
+Running the tests
+-----------------
+
+The advised way of running tests is by using tox:
+
+$ tox
+
+By default, this will run the unit test suite with Python 2.7 and PEP8/HACKING
+style checks. To run only one type of test you can explicitly invoke tox
+with the test environment to use.
+
+$ tox -epy27 # test suite on python 2.7
+$ tox -epep8 # run full source code checker
+
+To run only a subset of tests, you can provide tox with a regex argument
+defining which tests to execute.
+
+$ tox -epy27 -- FooTests
+
+To use a debugger like pdb during the test run, one has to run tests directly
+with another, non-concurrent test runner instead of using testr.
+
+That also presumes you have a virtual env with all conductor dependencies active.
+Below is an example bash script using the testtools test runner that also allows
+running single tests by providing a regex.
+
+#!/usr/bin/env sh
+testlist=$(mktemp)
+testr list-tests "$1" > $testlist
+python -m testtools.run --load-list $testlist
+
+Another way to use debugger for testing is run tox with command:
+$ tox -e debug -- conductor.tests.test_foo.FooTest.test_foo_does_something
+
+Note: This last approach is mostly useful to run single tests. \ No newline at end of file
diff --git a/conductor/conductor/tests/unit/__init__.py b/conductor/conductor/tests/unit/__init__.py
new file mode 100644
index 0000000..f2bbdfd
--- /dev/null
+++ b/conductor/conductor/tests/unit/__init__.py
@@ -0,0 +1,19 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
diff --git a/conductor/conductor/version.py b/conductor/conductor/version.py
new file mode 100644
index 0000000..6cdecfe
--- /dev/null
+++ b/conductor/conductor/version.py
@@ -0,0 +1,22 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import pbr.version
+
+version_info = pbr.version.VersionInfo('of_has')
diff --git a/conductor/doc/api/README.md b/conductor/doc/api/README.md
new file mode 100644
index 0000000..59bc295
--- /dev/null
+++ b/conductor/doc/api/README.md
@@ -0,0 +1,283 @@
+# Homing API v1
+
+*Updated: 4 April 2017*
+
+This document describes the Homing API, used by the Conductor service. It is a work in progress and subject to frequent revision.
+
+# General API Information
+
+Authenticated calls that target a known URI but that use an HTTP method the implementation does not support return a 405 Method Not Allowed status. In addition, the HTTP OPTIONS method is supported for each known URI. In both cases, the Allow response header indicates the supported HTTP methods. See the API Errors section for more information about the error response structure.
+
+# API versions
+
+## List all Homing API versions
+
+**GET** ``/``F
+
+**Normal response codes:** 200
+
+```json
+{
+ "versions": [
+ {
+ "status": "EXPERIMENTAL",
+ "id": "v1",
+ "updated": "2016-11-01T00:00:00Z",
+ "media-types": [
+ {
+ "base": "application/json",
+ "type": "application/vnd.ecomp.homing-v1+json"
+ }
+ ],
+ "links": [
+ {
+ "href": "http://135.197.226.83:8091/v1",
+ "rel": "self"
+ },
+ {
+ "href": "http://conductor.research.att.com/",
+ "type": "text/html",
+ "rel": "describedby"
+ }
+ ]
+ }
+ ]
+}
+```
+
+This operation does not accept a request body.
+
+# Plans
+
+## Create a plan
+
+**POST** ``/v1/plans``
+
+* **Normal response codes:** 201
+* **Error response codes:** badRequest (400), unauthorized (401), internalServerError (500)
+
+Request an inventory plan for one or more related service demands.
+
+The request includes or references a declarative **template**, consisting of:
+
+* **Parameters** that can be referenced like macros
+* **Demands** for service made against inventory
+* **Locations** that are common to the overall plan
+* **Constraints** made against demands, resulting in a set of inventory candidates
+* **Optimizations** to further narrow down the remaining candidates
+
+The response contains an inventory **plan**, consisting of one or more sets of recommended pairings of demands with an inventory candidate's attributes and region.
+
+### Request Parameters
+
+| Parameter | Style | Type | Description |
+|-----------|-------|------|-------------|
+| ``name`` (Optional) | plain | xsd:string | A name for the new plan. If a name is not provided, it will be auto-generated based on the homing template. This name must be unique within a given Conductor environment. When deleting a plan, its name will not become available for reuse until the deletion completes successfully. Must only contain letters, numbers, hypens, full stops, underscores, and tildes (RFC 3986, Section 2.3). This parameter is immutable. |
+| ``id`` (Optional) | plain | csapi:UUID | The UUID of the plan. UUID is assigned by Conductor if no id is provided in the request. |
+| ``transaction_id`` | plain | csapi:UUID | The transaction id assigned by MSO. The logs should have this transaction id for tracking purposes. |
+| ``files`` (Optional) | plain | xsd:dict | Supplies the contents of files referenced in the template. Conductor templates can explicitly reference files by using the ``get_file`` intrinsic function. The value is a JSON object, where each key is a relative or absolute URI which serves as the name of a file, and the associated value provides the contents of the file. Additionally, some template authors encode their user data in a local file. The Homing client (e.g., a CLI) can examine the template for the ``get_file`` intrinsic function (e.g., ``{get_file: file.yaml}``) and add an entry to the ``files`` map with the path to the file as the name and the file contents as the value. Do not use this parameter to provide the content of the template located at the ``template_url`` address. Instead, use the ``template`` parameter to supply the template content as part of the request. |
+| ``template_url`` (Optional) | plain | xsd:string | A URI to the location containing the template on which to perform the operation. See the description of the ``template`` parameter for information about the expected template content located at the URI. This parameter is only required when you omit the ``template`` parameter. If you specify both parameters, this parameter is ignored. |
+| ``template``| plain | xsd:string or xsd:dict | The template on which to perform the operation. See the [Conductor Template Guide](/doc/template/README.md) for complete information on the format. This parameter is either provided as a ``string`` or ``dict`` in the JSON request body. For ``string`` content it may be a JSON- or YAML-formatted Conductor template. For ``dict`` content it must be a direct JSON representation of the Conductor template. This parameter is required only when you omit the ``template_url`` parameter. If you specify both parameters, this value overrides the ``template_url`` parameter value. |
+| ``timeout`` (Optional) | plain | xsd:number | The timeout for plan creation in minutes. Default is 1. |
+| ``limit`` (Optional) | plain | xsd:number | The maximum number of recommendations to return. Default is 1. |
+
+**NOTE**: ``files``, ``template_url``, and ``timeout`` are not yet supported.
+
+### Response Parameters
+
+| Parameter | Style | Type | Description |
+|-----------|-------|------|-------------|
+| ``plan`` | plain | xsd:dict | The ``plan`` object. |
+| ``id`` | plain | csapi:UUID | The UUID of the plan. |
+| ``transaction_id`` | plain | csapi:UUID | The transaction id assigned by the MSO. |
+| ``name`` | plain | xsd:string | The plan name. |
+| ``status`` | plain | xsd:string | The plan status. One of ``template``, ``translated``, ``solving``, ``solved``, or ``error``. See **Plan Status** table for descriptions of each value. |
+| ``message`` | plain | xsd:string | Additional context, if any, around the message status. If the status is ``error``, this may include a reason and suggested remediation, if available. |
+| ``links`` | plain | xsd:list | A list of URLs for the plan. Each URL is a JSON object with an ``href`` key indicating the URL and a ``rel`` key indicating its relationship to the plan in question. There may be multiple links returned. The ``self`` relationship identifies the URL of the plan itself. |
+| ``recommendations`` | plain | xsd:list | A list of one or more recommendations. A recommendation pairs each requested demand with an inventory provider, a single candidate, and an opaque dictionary of attributes. Refer to the Demand candidate schema in the [Conductor Template Guide](/doc/template/README.md) for further details. (Note that, when ``inventory_type`` is ``cloud`` the candidate's ``candidate_id`` field is redundant and thus omitted.) |
+
+### Plan Status
+
+| Status | Description |
+|--------|-------------|
+| ``template`` | Plan request and homing template have been received. Awaiting translation. |
+| ``translated`` | Homing template has been translated, and candidates have been obtained from inventory providers. Awaiting solving. |
+| ``solving`` | Search for a solution is in progress. This may incorporate requests to service controllers for additional information. |
+| ``solved`` | Search is complete. A solution with one or more recommendations was found. |
+| ``not found`` | Search is complete. No recommendations were found. |
+| ``error`` | An error was encountered. |
+
+#### State Diagram
+
+```text
+ ----------------------------------------
+ | |
+ | /---> solved ---> reserving ---> done
+ | / /
+ template -> translated -> solving ------> not found /
+ | ^ | \ /
+ | | conditionally | \---> error <----/
+ | | (see note) | ^
+ | \---------------/ |
+ \---------------------------------------/
+```
+**NOTE**: When Conductor's solver service is started in non-concurrent mode (the default), it will reset any plans found waiting and stuck in the ``solving`` state back to ``translated``.
+
+```json
+{
+ "name": "PLAN_NAME",
+ "template": "CONDUCTOR_TEMPLATE",
+ "limit": 3
+}
+```
+
+```json
+{
+ "plan": {
+ "name": "PLAN_NAME",
+ "id": "ee1c5269-c7f0-492a-8652-f0ceb15ed3bc",
+ "transaction_id": "6bca5f2b-ee7e-4637-8b58-1b4b36ed10f9",
+ "status": "solved",
+ "message", "Plan PLAN_NAME is solved.",
+ "links": [
+ {
+ "href": "http://homing/v1/plans/ee1c5269-c7f0-492a-8652-f0ceb15ed3bc",
+ "rel": "self"
+ }
+ ],
+ "recommendations": [
+ {
+ "DEMAND_NAME_1": {
+ "inventory_provider": "aai",
+ "service_resource_id": "4feb0545-69e2-424c-b3c4-b270e5f2a15d",
+ "candidate": {
+ "candidate_id": "99befee8-e8c0-425b-8f36-fb7a8098d9a9",
+ "inventory_type": "service",
+ "location_type": "aic",
+ "location_id": "dal01",
+ "host_id" : "vig20002vm001vig001"
+ },
+ "attributes": {OPAQUE-DICT}
+ },
+ "DEMAND_NAME_2": {
+ "inventory_provider": "aai",
+ "service_resource_id": "578eb063-b24a-4654-ba9e-1e5cf7eb9183",
+ "candidate": {
+ "inventory_type": "cloud",
+ "location_type": "aic",
+ "location_id": "dal02"
+ },
+ "attributes": {OPAQUE-DICT}
+ }
+ },
+ {
+ "DEMAND_NAME_1": {
+ "inventory_provider": "aai",
+ "service_resource_id": "4feb0545-69e2-424c-b3c4-b270e5f2a15d",
+ "candidate": {
+ "candidate_id": "99befee8-e8c0-425b-8f36-fb7a8098d9a9",
+ "inventory_type": "service",
+ "location_type": "aic",
+ "location_id": "dal03",
+ "host_id" : "vig20001vm001vig001"
+ },
+ "attributes": {OPAQUE-DICT}
+ },
+ "DEMAND_NAME_2": {
+ "inventory_provider": "aai",
+ "service_resource_id": "578eb063-b24a-4654-ba9e-1e5cf7eb9183",
+ "candidate": {
+ "inventory_type": "cloud",
+ "location_type": "aic",
+ "location_id": "dal04"
+ },
+ "attributes": {OPAQUE-DICT}
+ }
+ },
+ ...
+ ]
+ }
+}
+```
+
+## Show plan details
+
+**GET** ``/v1/plans/{plan_id}``
+
+* **Normal response codes:** 200
+* **Error response codes:** unauthorized (401), itemNotFound (404)
+
+### Request parameters
+
+| Parameter | Style | Type | Description |
+|-------------|-------|------------|---------------------------------------------------|
+| ``plan_id`` | plain | csapi:UUID | The UUID of the plan. |
+
+### Response Parameters
+
+See the Response Parameters for **Create a plan**.
+
+## Delete a plan
+
+**DELETE** ``/v1/plans/{plan_id}``
+
+* **Normal response codes:** 204
+* **Error response codes:** badRequest (400), unauthorized (401), itemNotFound (404)
+
+### Request parameters
+
+| Parameter | Style | Type | Description |
+|-------------|-------|------------|---------------------------------------------------|
+| ``plan_id`` | plain | csapi:UUID | The UUID of the plan. |
+
+This operation does not accept a request body and does not return a response body.
+
+## API Errors
+
+In the event of an error with a status other than unauthorized (401), a detailed repsonse body is returned.
+
+### Response parameters
+
+| Parameter | Style | Type | Description |
+|-------------|-------|------------|---------------------------------------------------|
+| ``title`` | plain | xsd:string | Human-readable name. |
+| ``explanation`` | plain | xsd:string | Detailed explanation with remediation (if any). |
+| ``code`` | plain | xsd:int | HTTP Status Code. |
+| ``error`` | plain | xsd:dict | Error dictionary. Keys include **message**, **traceback**, and **type**. |
+| ``message`` | plain | xsd:string | Internal error message. |
+| ``traceback`` | plain | xsd:string | Python traceback (if available). |
+| ``type`` | plain | xsd:string | HTTP Status class name (from python-webob) |
+
+#### Examples
+
+A plan with the name "pl an" is considered a bad request because the name contains a space.
+
+```json
+{
+ "title": "Bad Request",
+ "explanation": "-> name -> pl an did not pass validation against callable: plan_name_type (must contain only uppercase and lowercase letters, decimal digits, hyphens, periods, underscores, and tildes [RFC 3986, Section 2.3])",
+ "code": 400,
+ "error": {
+ "message": "The server could not comply with the request since it is either malformed or otherwise incorrect.",
+ "type": "HTTPBadRequest"
+ }
+}
+```
+
+The HTTP COPY method was attempted but is not allowed.
+
+```json
+{
+ "title": "Method Not Allowed",
+ "explanation": "The COPY method is not allowed.",
+ "code": 405,
+ "error": {
+ "message": "The server could not comply with the request since it is either malformed or otherwise incorrect.",
+ "type": "HTTPMethodNotAllowed"
+ }
+}
+```
+
+## Contact ##
+
+Shankar Narayanan <shankarpnsn@gmail.com>
diff --git a/conductor/doc/distribution/README.md b/conductor/doc/distribution/README.md
new file mode 100644
index 0000000..33eb9d1
--- /dev/null
+++ b/conductor/doc/distribution/README.md
@@ -0,0 +1,551 @@
+# Python/Linux Distribution Notes
+
+*Updated: 10 Nov 2017 23:30 GMT*
+
+This document exists to help bridge the gap between the Conductor python package and any downstream distribution. The steps outlined herein may be taken into consideration when creating an AT&T SWM package, Ubuntu/Debian package, Chef cookbook, or Ansible playbook.
+
+## Components
+
+Conductor consists of five services that work together:
+
+* **``conductor-api``**: An HTTP REST API
+* **``conductor-controller``**: Validation, translation, and status/results
+* **``conductor-data``**: Inventory provider and service controller gateway
+* **``conductor-solver``**: Processing and solution calculation
+* **``conductor-reservation``**: Reserves the suggested solution solved by Solver component.
+
+## Workflow
+
+* Deployment **plans** are created, viewed, and deleted via ``conductor-api`` and its [REST API](doc/api/README.md).
+* Included within each ``conductor-api`` plan request is a [Homing Template](doc/template/README.md).
+* Homing Templates describe a set of inventory demands and constraints to be solved against.
+* ``conductor-api`` hands off all API requests to ``conductor-controller`` for handling.
+* All deployment plans are assigned a unique identifier (UUID-4), which can be used to check for solution status asynchronously. (Conductor does not support callbacks at this time.)
+* ``conductor-controller`` ensures templates are well-formed and valid. Errors and remediation are made visible through ``conductor-api``. When running in debug mode, the API will also include a python traceback in the response body, if available.
+* ``conductor-controller`` uses ``conductor-data`` to resolve demands against a particular **inventory provider** (e.g., A&AI).
+* ``conductor-controller`` translates the template into a format suitable for solving.
+* As each template is translated, ``conductor-solver`` begins working on it.
+* ``conductor-solver`` uses ``conductor-data`` to resolve constraints against a particular **service controller** (e.g., SDN-C).
+* ``conductor-solver`` determines the most suitable inventory to recommend.
+* ``conductor-reservation`` attempts to reserve the solved solution in SDN-GC
+
+**NOTE**: There is no Command Line Interface or Python API Library at this time.
+
+## Pre-Flight and Pre-Installation Considerations
+
+### AT&T Application Identifiers and Roles
+
+* App/Tool Name: ECOMP Conductor
+* MOTS Application ID: 26213
+* MechID: m04308
+* ECOMP Feature ID: F13704
+* PMT: 461306
+* UAM Role Name: Conductor Production Support
+* UAM Role id: 0000025248
+
+### Root
+
+Be aware that some commands may require ``sudo``, depending on the account being used to perform the installation.
+
+### Proxy
+
+If line-of-sight to internet-facing repositories is permitted and available, set the following shell environment variables if AT&T proxy services are required:
+
+```bash
+$ export http_proxy="http://one.proxy.att.com:8080/"
+$ export https_proxy="http://one.proxy.att.com:8080/"
+```
+
+### Requirements
+
+Conductor is officially supported on [Ubuntu 14.04 LTS (Trusty Tahr)](http://releases.ubuntu.com/14.04/), though it should also work on newer releases.
+
+Ensure the following Ubuntu packages are present, as they may not be included by default:
+
+* [libffi-dev](http://packages.ubuntu.com/trusty/libffi-dev)
+* [postgresql-server-dev-9.3](http://packages.ubuntu.com/trusty/postgresql-server-dev-9.3)
+* [python2.7](http://packages.ubuntu.com/trusty/python2.7)
+
+``conductor-api`` may be run as-is for development and test purposes. When used in a production environment, it is recommended that ``conductor-api`` run under a multithreaded httpd service supporting [WSGI](https://www.wikipedia.org/wiki/Web_Server_Gateway_Interface), tuned as appropriate.
+
+Configuration instructions for **apache2 httpd** and **nginx** are included herein. Respective package requirements are:
+
+* [apache2](http://packages.ubuntu.com/trusty/apache2) and [libapache2-mod-wsgi](http://packages.ubuntu.com/trusty/libapache2-mod-wsgi)
+* [nginx](http://packages.ubuntu.com/trusty/nginx) and [uwsgi](http://packages.ubuntu.com/trusty/uwsgi)
+
+All Conductor services use AT&T [Music](https://github.com/att/music) for data storage/persistence and/or as a RPC transport mechanism. Consult the [Music Local Installation Guide](https://github.com/att/music/blob/master/README.md) for installation/configuration steps.
+
+### Networking
+
+All conductor services require line-of-sight access to all Music servers/ports.
+
+The ``conductor-api`` service uses TCP port 8091.
+
+### Security
+
+``conductor-api`` is accessed via HTTP. SSL/TLS certificates and AuthN/AuthZ (e.g., AAF) are not supported at this time.
+
+Conductor makes use of plugins that act as gateways to *inventory providers* and *service controllers*. At present, two plugins are supported out-of-the-box: **A&AI** and **SDN-C**, respectively.
+
+A&AI requires two-way SSL/TLS. Certificates must be registered and whitelisted with A&AI. SDN-C uses HTTP Basic Authentication. Consult with each respective service for official information on how to obtain access.
+
+### Storage
+
+For a cloud environment in particular, it may be desirable to use a separate block storage device (e.g., an OpenStack Cinder volume) for logs, configuration, and other data persistence. In this way, it becomes a trivial matter to replace the entire VM if necessary, followed by reinstallation of the app and any supplemental configuration. Take this into consideration when setting various Conductor config options.
+
+### Python Virtual Environments
+
+At present, Conductor installation is only supported at the (upstream) python package level and not the (downstream) Ubuntu distribution or SWM package levels.
+
+To mitigate/eliminate the risk of introducing conflicts with other python applications or Ubuntu/SWM package dependencies, consider installing Conductor in a [python virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs/) (or *venv* for short).
+
+Example venv-aware WSGI app configurations, sysvinit scripts, and upstart scripts can be found in the Conductor repository under [examples](/examples/).
+
+### Python Package Dependencies
+
+Conductor is installed using the python ``pip`` command. ``pip`` uses a python project's [requirements manifest](/requirements.txt) to install all python module dependencies.
+
+**NOTE**: When line-of-sight access to a PyPI-compatible package index is not available, advance installation of Conductor's python package dependencies is required *before* installation.
+
+### Other Production Environment Considerations
+
+TBD. ``:)``
+
+Over time, considerations may include services such as:
+
+* AAF
+* AppMetrics
+* Introscope
+* Nagios
+* Splunk
+* UAM
+
+## Installation and Configuration
+
+**IMPORTANT**: Perform the steps in this section after *optionally* configuring and activating a python virtual environment.
+
+### Installing From a PyPI Repository
+
+In ONAP, the ``conductor`` package can be found on ````.
+
+Installation is via the ``pip`` command. Here is an example ``pip.conf`` file that uses both the internet and intranet-facing PyPI repositories:
+
+```ini
+[global]
+index = https://pypi.python.org/pypi
+index-url = https://pypi.python.org/simple
+extra-index-url =
+trusted-host =
+```
+
+Once the configuration is in place, installation is simple:
+
+```bash
+$ pip install ecomp-conductor
+```
+
+To upgrade or downgrade, simply re-run ``pip install`` using the appropriate ``pip`` command line options.
+
+**NOTE**: Be sure proxy settings are in place if they're required to access ``pypi.python.org``.
+
+### Installing From Source
+
+Conductor source in ONAP is maintained in https://gerrit.onap.org/r/optf/has.
+
+Clone the git repository, and then install from within the ``conductor`` directory:
+
+```bash
+$ git clone https://gerrit.onap.org/r/optf/has
+Cloning into 'conductor'...
+remote: Counting objects: 2291, done.
+remote: Compressing objects: 88% (1918/2179)
+remote: Compressing objects: 100% (2179/2179), done.
+remote: Total 2291 (delta 1422), reused 0 (delta 0)
+Receiving objects: 100% (2291/2291), 477.59 KiB | 0 bytes/s, done.
+Resolving deltas: 100% (1422/1422), done.
+$ cd conductor
+$ pip install .
+```
+
+The latest source can be pulled from ONAP at any time and reinstalled:
+
+```bash
+$ git pull
+$ pip install .
+```
+
+### Verifying Installation
+
+Each of the five Conductor services may be invoked with the ``--help`` option:
+
+```bash
+$ conductor-api -- --help
+$ conductor-controller --help
+$ conductor-data --help
+$ conductor-solver --help
+$ conductor-reservation --help
+```
+
+**NOTE**: The ``conductor-api`` command is deliberate. ``--`` is used as as separator between the arguments used to start the WSGI server and the arguments passed to the WSGI application.
+
+## Post-Flight and Post-Installation Considerations
+
+### User and Group
+
+It's good practice to create an unprivileged account (e.g., a user/group named ``conductor``) and run all Conductor services as that user:
+
+```bash
+$ sudo addgroup --system conductor
+$ sudo adduser --system --home /var/lib/conductor --ingroup conductor --no-create-home --shell /bin/false conductor
+```
+
+### SSL/TLS Certificates
+
+The A&AI Inventory Provider Plugin requiries two-way SSL/TLS. After provisioning a certificate per A&AI guidelines, it will be necessary to securely install the certificate, key, and certificate authority bundle.
+
+When running conductor services as ``conductor:conductor`` (recommended), consider co-locating all of these files under the configuration directory. For example, when using ``/etc/conductor``:
+
+```bash
+$ # Certificate files (crt extension, 644 permissions)
+$ sudo mkdir /etc/conductor/ssl/certs
+$ # Private Certificate Key files (key extension, 640 permissions)
+$ sudo mkdir /etc/conductor/ssl/private
+$ # Certificate Authority (CA) Bundles (crt extension, 644 permissions)
+$ sudo mkdir /etc/conductor/ssl/ca-certificates
+$ # Add files to newly created directories, then set ownership
+$ sudo chmod -R conductor:conductor /etc/conductor/ssl
+```
+
+For a hypothetical domain name ``imacculate.client.research.att.com``, example filenames could be as follows:
+
+```bash
+$ find ssl -type f -printf '%M %u:%g %f\n'
+-rw-r----- conductor:conductor imacculate.client.research.att.com.key
+-rw-r--r-- conductor:conductor Symantec_Class_3_Secure_Server_CA.crt
+-rw-r--r-- conductor:conductor imacculate.client.research.att.com.crt
+```
+
+When running conductor services as ``root``, consider these existing Ubuntu filesystem locations for SSL/TLS files:
+
+**Certificate** files (``crt`` extension) are typically stored in ``/etc/ssl/certs`` with ``root:root`` ownership and 644 permissions.
+
+**Private Certificate Key** files (``key`` extension) are typically stored in ``/etc/ssl/private`` with ``root:root`` ownership and 640 permissions.
+
+**Certificate Authority (CA) Bundles** (``crt`` extension) are typically stored in ``/usr/share/ca-certificates/conductor`` with ``root:root`` ownership, and 644 permissions. These Bundle files are then symlinked within ``/etc/ssl/certs`` using equivalent filenames, a ``pem`` extension, and ``root:root`` ownership.
+
+**NOTE**: LUKS (Linux Unified Key Setup) is not supported by Conductor at this time.
+
+### Configuration
+
+Configuration files are located in ``etc/conductor`` relative to the python environment Conductor is installed in.
+
+To generate a sample configuration file, change to the directory just above where ``etc/conductor`` is located (e.g., `/` for the default environment, or the virtual environment root directory). Then:
+
+```bash
+$ oslo-config-generator --config-file=etc/conductor/conductor-config-generator.conf
+```
+
+This will generate ``etc/conductor/conductor.conf.sample``.
+
+Because the configuration directory and files will include credentials, consider removing world permissions:
+
+```bash
+$ find etc/conductor -type f -exec chmod 640 {} +
+$ find etc/conductor -type d -exec chmod 750 {} +
+```
+
+The sample config may then be copied and edited. Be sure to backup any previous ``conductor.conf`` if necessary.
+
+```bash
+$ cd etc/conductor
+$ cp -p conductor.conf.sample conductor.conf
+```
+
+``conductor.conf`` is fully annotated with descriptions of all options. Defaults are included, with all options commented out. Conductor will use defaults even if an option is not present in the file. To change an option, simply uncomment it and edit its value.
+
+With the exception of the ``DEFAULT`` section, it's best to restart the Conductor services after making any config changes. In some cases, only one particular service actually needs to be restarted. When in doubt, however, it's best to restart all of them.
+
+A few options in particular warrant special attention:
+
+```
+[DEFAULT]
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+# Note: This option can be changed without restarting.
+#debug = false
+```
+
+For more verbose logging across all Conductor services, set ``debug`` to true.
+
+```
+[aai]
+
+# Base URL for A&AI, up to and not including the version, and without a
+# trailing slash. (string value)
+#server_url = https://controller:8443/aai
+
+# SSL/TLS certificate file in pem format. This certificate must be registered
+# with the A&AI endpoint. (string value)
+#certificate_file = certificate.pem
+
+# Private Certificate Key file in pem format. (string value)
+#certificate_key_file = certificate_key.pem
+
+# Certificate Authority Bundle file in pem format. Must contain the appropriate
+# trust chain for the Certificate file. (string value)
+#certificate_authority_bundle_file = certificate_authority_bundle.pem
+```
+
+Set ``server_url`` to the A&AI server URL, to but not including the version, omitting any trailing slash. Conductor supports A&AI API v9 at a minimum.
+
+Set the ``certificate`` prefixed keys to the appropriate SSL/TLS-related files.
+
+**IMPORTANT**: The A&AI server may have a mismatched host/domain name and SSL/TLS certificate. In such cases, certificate verification will fail. To mitigate this, ``certificate_authority_bundle_file`` may be set to an empty value. While Conductor normally requires a CA Bundle (otherwise why bother using SSL/TLS), this requirement has been temporarily relaxed so that development and testing may continue.
+
+```
+[messaging_server]
+
+# Log debug messages. Default value is False. (boolean value)
+#debug = false
+```
+
+When the ``DEFAULT`` section's ``debug`` option is ``true``, set this section's ``debug`` option to ``true`` to enable detailed Conductor-side RPC-over-Music debug messages.
+
+Be aware, it is voluminous. "You have been warned." ``:)``
+
+```
+[music_api]
+
+# List of hostnames (round-robin access) (list value)
+#hostnames = localhost
+
+# Log debug messages. Default value is False. (boolean value)
+#debug = false
+```
+
+Set ``hostnames`` to match wherever the Music REST API is being hosted (wherever Apache Tomcat and ``MUSIC.war`` are located).
+
+When the ``DEFAULT`` section's ``debug`` option is ``true``, set this section's ``debug`` option to ``true`` to enable detailed Conductor-side MUSIC API debug messages.
+
+The previous comment around the volume of log lines applies even more so here. (Srsly. We're not kidding.)
+
+**IMPORTANT**: Conductor does not presently use Music's atomic consistency features due to concern around lock creation/acquisition. Instead, Conductor uses eventual consistency. For this reason, consistency issues may occur when using Music in a multi-server, High Availability configuration.
+
+```
+[sdnc]
+
+# Base URL for SDN-C. (string value)
+#server_url = https://controller:8443/restconf
+
+# Basic Authentication Username (string value)
+#username = <None>
+
+# Basic Authentication Password (string value)
+#password = <None>
+
+```
+
+Set ``server_url`` to the SDN-C server URL, omitting any trailing slash.
+
+Set ``username`` and ``password`` to the appropriate values as directed by SDN-C.
+
+### Running for the First Time
+
+Each Conductor component may be run interactively. In this case, the user does not necessarily matter.
+
+When running interactively, it is suggested to run each command in a separate terminal session and in the following order:
+
+```bash
+conductor-data --config-file=/etc/conductor/conductor.conf
+conductor-controller --config-file=/etc/conductor/conductor.conf
+conductor-solver --config-file=/etc/conductor/conductor.conf
+conductor-reservation --config-file=/etc/conductor/conductor.conf
+conductor-api --port=8091 -- --config-file=/etc/conductor/conductor.conf
+```
+
+Optionally, use an application like [screen](http://packages.ubuntu.com/trusty/screen) to nest all five terminal sessions within one detachable session. (This is also the same package used by [DevStack](https://docs.openstack.org/developer/devstack/).)
+
+To verify that ``conductor-api`` can be reached, browse to ``http://HOST:8091/``, where HOST is the hostname ``conductor-api`` is running on. No AuthN/AuthZ is required at this time. Depending on network considerations, it may be necessary to use a command like ``wget`` instead of a desktop browser.
+
+The response should look similar to:
+
+```json
+{
+ "versions": {
+ "values": [
+ {
+ "status": "development",
+ "updated": "2016-11-01T00:00:00Z",
+ "media-types": [
+ {
+ "base": "application/json",
+ "type": "application/vnd.ecomp.homing-v1+json"
+ }
+ ],
+ "id": "v1",
+ "links": [
+ {
+ "href": "http://127.0.0.1:8091/v1",
+ "rel": "self"
+ },
+ {
+ "href": "http://conductor.research.att.com/",
+ "type": "text/html",
+ "rel": "describedby"
+ }
+ ]
+ }
+ ]
+ }
+}
+```
+
+### Sample API Calls and Homing Templates
+
+A [Postman](http://getpostman.com/) collection illustrating sample requests is available upon request. The collection will also be added in a future revision.
+
+[Sample homing templates](/doc/examples/README.md) are also available.
+
+### Ubuntu Service Scripts
+
+Ubuntu sysvinit (init.d) and upstart (init) scripts are typically installed at the Ubuntu package level. Since there is no such packaging at this time, example scripts have been provided in the repository.
+
+To install, place all Conductor [sysvinit scripts](/examples/distribution/ubuntu/init.d) in ``/etc/init.d``, and all [upstart scripts](/examples/distribution/ubuntu/init) in ``/etc/init``.
+
+Set file permissions:
+
+```bash
+$ sudo chmod 644 /etc/init/conductor*
+$ sudo chmod 755 /etc/init.d/conductor*
+```
+
+If a python virtual environment is being used, edit each ``/etc/init/conductor*`` and ``/etc/init.d/conductor*`` prefixed file so that ``PYTHON_HOME`` is set to the python virtual environment root directory.
+
+Next, enable the scripts:
+
+```bash
+$ sudo update-rc.d conductor-api defaults
+$ sudo update-rc.d conductor-controller defaults
+$ sudo update-rc.d conductor-data defaults
+$ sudo update-rc.d conductor-solver defaults
+$ sudo update-rc.d conductor-reservation defaults
+$ sudo initctl reload-configuration
+```
+
+Conductor components may now be started/stopped like any other Ubuntu service, for example:
+
+```bash
+$ sudo service conductor-api start
+$ sudo service conductor-api status
+$ sudo service conductor-api restart
+$ sudo service conductor-api stop
+```
+
+Conductor service scripts automatically create directories for ``log``, ``lock``, ``run``, ``lib``, and ``log`` files, e.g., ``/var/log/conductor`` and so on.
+
+### Log File Rotation
+
+Sample ``logrotate.d`` configuration files have been provided in the repository.
+
+To install, place all Conductor [logrotate files](/examples/distribution/ubuntu/logrotate.d) in ``/etc/logrotate.d``.
+
+Set file ownership and permissions:
+
+```bash
+$ sudo chown root:root /etc/logrotate.d/conductor*
+$ sudo chmod 644 /etc/logrotate.d/conductor*
+```
+
+``logrotate.d`` automatically recognizes new files at the next log rotation opportunity and does not require restarting.
+
+## Running conductor-api Under apache2 httpd and mod_wsgi
+
+Sample configuration files have been provided in the repository.
+
+These instructions presume a ``conductor`` user exists. See the **Service Scripts** section for details.
+
+First, set up a few directories:
+
+```bash
+$ sudo mkdir -p /var/www/conductor
+$ sudo mkdir /var/log/apache2/conductor
+```
+
+To install, place the Conductor [WSGI application file](/conductor/api/app.wsgi) in ``/var/www/conductor``.
+
+Set the owner/group of both directories/files to ``conductor``:
+
+```bash
+$ sudo chown -R conductor:conductor /var/log/apache2/conductor /var/www/conductor
+```
+
+Next, place the Conductor [apache2 httpd site config file](/examples/apache2/conductor.conf) in ``/etc/apache2/sites-available``.
+
+Set the owner/group to ``root``:
+
+```bash
+$ sudo chown -R root:root /etc/apache2/sites-available/conductor.conf
+```
+
+If Conductor was installed in a python virtual environment, append ``python-home=VENV`` to ``WSGIDaemonProcess``, where ``VENV`` is the python virtual environment root directory.
+
+**IMPORTANT**: Before proceeding, disable the ``conductor-api`` sysvinit and upstart services, as the REST API will now be handled by apache2 httpd. Otherwise there will be a port conflict, and you will be sad.
+
+Enable the Conductor site, ensure the configuration syntax is valid, and gracefully restart apache2 httpd.
+
+```bash
+$ sudo a2ensite conductor
+$ sudo apachectl -t
+Syntax OK
+$ sudo apachectl graceful
+```
+
+To disable the Conductor site, run ``sudo a2dissite conductor``, then gracefully restart once again. Optionally, re-enable the ``conductor-api`` sysvinit and upstart services.
+
+## Running conductor-api Under nginx and uWSGI
+
+Sample configuration files have been provided in the repository.
+
+These instructions presume a ``conductor`` user exists. See the **Service Scripts** section for details.
+
+To install, place the Conductor [nginx config files](/examples/nginx/) and [WSGI application file](/conductor/api/app.wsgi) in ``/etc/nginx`` (taking care to backup any prior configuration files). It may be desirable to incorporate Conductor's ``nginx.conf`` into the existing config.
+
+Rename ``app.wsgi`` to ``conductor.wsgi``:
+
+```bash
+$ cd /etc/nginx
+$ sudo mv app.wsgi conductor.wsgi
+```
+
+In ``nginx.conf``, set ``CONDUCTOR_API_FQDN`` to the server name.
+
+**IMPORTANT**: Before proceeding, disable the ``conductor-api`` sysvinit and upstart services, as the REST API will now be handled by nginx. Otherwise there will be a port conflict, and you will be sad.
+
+Restart nginx:
+
+```bash
+$ sudo service nginx restart
+```
+
+Then, run ``conductor-api`` under nginx using uWSGI:
+
+```bash
+$ sudo uwsgi -s /tmp/uwsgi.sock --chmod-socket=777 --wsgi-file /etc/nginx/conductor.wsgi --callable application --set port=8091
+```
+
+To use a python virtual environment, add ``--venv VENV`` to the ``uwsgi`` command, where ``VENV`` is the python virtual environment root directory.
+
+## Uninstallation
+
+Activate a virtual environment (venv) first, if necessary, then uninstall with:
+
+```bash
+$ pip uninstall ecomp-conductor
+```
+
+Remove any previously made configuration file changes, user accounts, Ubuntu/SWM packages, and other settings as needed.
+
+## Bug Reporting and Feedback
+
+... is encouraged. Please raise an issue at: https://jira.onap.org/projects/OPTFRA/summary \ No newline at end of file
diff --git a/conductor/doc/examples/README.md b/conductor/doc/examples/README.md
new file mode 100644
index 0000000..84e7e6b
--- /dev/null
+++ b/conductor/doc/examples/README.md
@@ -0,0 +1,96 @@
+# Example Conductor Templates
+
+*Updated: 10 Oct 2017*
+
+## Example 1
+
+```yaml
+
+# Homing Specification Version
+homing_template_version: 2017-10-10
+
+# Runtime order Parameters
+parameters:
+ service_name: Residential vCPE
+ service_id: vcpe_service_id
+ customer_lat: 32.897480
+ customer_long: -97.040443
+
+# List of geographical locations
+locations:
+ customer_loc:
+ latitude: {get_param: customer_lat}
+ longitude: {get_param: customer_long}
+
+# List of VNFs (demands) to be homed
+demands:
+ vGMuxInfra:
+ - inventory_provider: aai
+ inventory_type: service
+ attributes:
+ equipment_type: vG_Mux
+ customer_id: some_company
+ excluded_candidates:
+ - candidate_id:
+ 1ac71fb8-ad43-4e16-9459-c3f372b8236d
+ existing_placement:
+ - candidate_id: 21d5f3e8-e714-4383-8f99-cc480144505a
+ vG:
+ - inventory_provider: aai
+ inventory_type: service
+ attributes:
+ equipment_type: vG
+ modelId: vG_model_id
+ customer_id: some_company
+ excluded_candidates:
+ - candidate_id: 1ac71fb8-ad43-4e16-9459-c3f372b8236d
+ existing_placement:
+ - candidate_id: 21d5f3e8-e714-4383-8f99-cc480144505a
+ - inventory_provider: aai
+ inventory_type: cloud
+
+# List of homing policies (constraints)
+constraints:
+ # distance constraint
+ - constraint_vgmux_customer:
+ type: distance_to_location
+ demands: [vGMuxInfra]
+ properties:
+ distance: < 100 km
+ location: customer_loc
+ # cloud region co-location constraint
+ - colocation:
+ type: zone
+ demands: [vGMuxInfra, vG]
+ properties:
+ qualifier: same
+ category: region
+ # platform capability constraint
+ - numa_cpu_pin_capabilities:
+ type: attribute
+ demands: [vG]
+ properties:
+ evaluate:
+ vcpu_pinning: True
+ numa_topology: numa_spanning
+ # cloud provider constraint
+ - cloud_version_capabilities:
+ type: attribute
+ demands: [vGMuxInfra]
+ properties:
+ evaluate:
+ cloud_version: 1.11.84
+ cloud_provider: AWS
+
+# Objective function to minimize
+optimization:
+ minimize:
+ sum:
+ - {distance_between: [customer_loc, vGMuxInfra]}
+ - {distance_between: [customer_loc, vG]}
+
+```
+
+## Contact ##
+
+Shankar Narayanan <shankarpnsn@gmail.com>
diff --git a/conductor/doc/glossary/README.md b/conductor/doc/glossary/README.md
new file mode 100644
index 0000000..5b673ac
--- /dev/null
+++ b/conductor/doc/glossary/README.md
@@ -0,0 +1,26 @@
+# Glossary
+
+| Term | Description |
+|------|-------------|
+| **A&AI** | Active and Available Inventory |
+| **Cloud** | tbd |
+| **Conductor** | The AIC/ECOMP Homing service. |
+| **Constraint** | tbd |
+| **Cost Function** | tbd |
+| **Data Center** | tbd |
+| **DCAE** | Data Collection, Analytics, and Events |
+| **Demand** | tbd |
+| **Homing** | Canonical service name for Conductor. |
+| **Host** | tbd |
+| **Inventory** | tbd |
+| **Inventory Source** | tbd |
+| **LCP (and vLCP)** | Local Control Plane (or virtual LCP). Synonymous with **Region**. |
+| **Location** | tbd |
+| **Network Link** | tbd |
+| **Region** | Synonymous with **LCP**. |
+| **Service Inventory** | tbd |
+| **Site** | tbd |
+
+## Contact ##
+
+Joe D'Andrea <jdandrea@research.att.com>
diff --git a/conductor/doc/template/README.md b/conductor/doc/template/README.md
new file mode 100644
index 0000000..f8afcca
--- /dev/null
+++ b/conductor/doc/template/README.md
@@ -0,0 +1,875 @@
+###### Apache License, Version 2.0
+
+===========================
+
+``Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.``
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
+
+# Homing Specification Guide
+
+*Updated: 10 October 2017*
+
+This document describes the Homing Template format, used by the Homing service. It is a work in progress and subject to frequent revision.
+
+## Template Structure
+
+Homing templates are defined in YAML and follow the structure outlined below.
+
+```yaml
+homing_template_version: 2017-10-10
+parameters:
+ PARAMETER_DICT
+locations:
+ LOCATION_DICT
+demands:
+ DEMAND_DICT
+constraints:
+ CONSTRAINT_DICT
+reservations:
+ RESERVATION_DICT
+optimization:
+ OPTIMIZATION
+```
+
+* ``homing_template_version``: This key with value 2017-10-10 (or a later date) indicates that the YAML document is a Homing template of the specified version.
+* ``parameters``: This section allows for specifying input parameters that have to be provided when instantiating the homing template. Typically, this section is used for providing runtime parameters (like SLA thresholds), which in turn is used in the existing homing policies. The section is optional and can be omitted when no input is required.
+* ``locations``: This section contains the declaration of geographic locations. This section is optional and can be omitted when no input is required.
+* ``demands``: This section contains the declaration of demands. This section with at least one demand should be defined in any Homing template, or the template would not really do anything when being instantiated.
+* ``constraints``: This section contains the declaration of constraints. The section is optional and can be omitted when no input is required.
+* ``reservations``: This section contains the declaration of required reservations. This section is optional and can be omitted when reservations are not required.
+* ``optimization``: This section allows the declaration of an optimization. This section is optional and can be omitted when no input is required.
+
+## Homing Template Version
+
+The value of ``homing_template_version`` tells HAS not only the format of the template but also features that will be validated and supported. Only one value is supported: ``2017-10-10`` in the initial release of HAS.
+
+```yaml
+homing_template_version: 2017-10-10
+```
+
+## Parameters
+
+The **parameters** section allows for specifying input parameters that have to be provided when instantiating the template. Such parameters are typically used for providing runtime inputs (like SLA thresholds), which in turn is used in the existing homing policies. This also helps build reusable homing constraints where these parameters can be embedded design time, and it corresponding values can be supplied during runtime.
+
+Each parameter is specified with the name followed by its value. Values can be strings, lists, or dictionaries.
+
+### Example
+
+In this example, ``provider_name`` is a string and ``service_info`` is a dictionary containing both a string and a list (keyed by ``base_url`` and ``nod_config``, respectively).
+
+```yaml
+parameters:
+ provider_name: multicloud
+ service_info:
+ base_url: http://serviceprovider.sdngc.com/
+ nod_config:
+ - http://nod/config_a.yaml
+ - http://nod/config_b.yaml
+ - http://nod/config_c.yaml
+ - http://nod/config_d.yaml
+```
+
+A parameter can be referenced in place of any value. See the **Intrinsic Functions** section for more details.
+
+## Locations
+
+One or more **locations** may be declared. A location may be referenced by one or more ``constraints``. Locations may be defined in any of the following ways:
+
+### Coordinate
+
+A geographic coordinate expressed as a latitude and longitude.
+
+| Key | Value |
+|-----------------------------|----------------------------|
+| ``latitude`` | Latitude of the location. |
+| ``longitude`` | Longitude of the location. |
+
+### Host Name
+
+An opaque host name that can be translated to a coordinate via an inventory provider (e.g., A&AI).
+
+| Key | Value |
+|-----------------------------|----------------------------|
+| ``host_name`` | Host name identifying a location. |
+
+### CLLI
+
+Common Language Location Identification (CLLI) code(https://en.wikipedia.org/wiki/CLLI_code).
+
+| Key | Value |
+|-----------------------------|----------------------------|
+| ``clli_code`` | 8 character CLLI. |
+
+**Questions**
+
+* Do we need functions that can convert one of these to the other? E.g., CLLI Codes to a latitude/longitude
+
+### Placemark
+
+An address expressed in geographic region-agnostic terms (referred to as a *placemark*).
+
+*Support for this schema is deferred.*
+
+| Key | Value |
+|-----------------------------|----------------------------|
+| ``iso_country_code`` | The abbreviated country name associated with the placemark. |
+| ``postal_code`` | The postal code associated with the placemark. |
+| ``administrative_area`` | The state or province associated with the placemark. |
+| ``sub_administrative_area`` | Additional administrative area information for the placemark. |
+| ``locality`` | The city associated with the placemark. |
+| ``sub_locality`` | Additional city-level information for the placemark. |
+| ``thoroughfare`` | The street address associated with the placemark. |
+| ``sub_thoroughfare`` | Additional street-level information for the placemark. |
+
+**Questions**
+
+* What geocoder can we use to convert placemarks to a latitude/longitude?
+
+### Examples
+
+The following examples illustrate a location expressed in coordinate, host_name, CLLI, and placemark, respectively.
+
+```yaml
+locations:
+ location_using_coordinates:
+ latitude: 32.897480
+ longitude: -97.040443
+
+ host_location_using_host_name:
+ host_name: USESTCDLLSTX55ANZ123
+
+ location_using_clli:
+ clli_code: DLLSTX55
+
+ location_using_placemark:
+ sub_thoroughfare: 1
+ thoroughfare: ATT Way
+ locality: Bedminster
+ administrative_area: NJ
+ postal_code: 07921-2694
+```
+
+## Demands
+
+A **demand** can be satisfied by using candidates drawn from inventories. Each demand is uniquely named. Inventory is considered to be opaque and can represent anything from which candidates can be drawn.
+
+A demand's resource requirements are determined by asking an **inventory provider** for one or more sets of **inventory candidates** against which the demand will be made. An explicit set of candidates may also be declared, for example, if the only candidates for a demand are predetermined.
+
+Demand criteria is dependent upon the inventory provider in use.
+
+**Provider-agnostic Schema**
+
+| Key | Value |
+|------------------------|--------------------------|
+| ``inventory_provider`` | A HAS-supported inventory provider. |
+| ``inventory_type`` | The reserved word ``cloud`` (for cloud regions) or the reserved word ``service`` (for existing service instances). Exactly one inventory type may be specified. |
+| ``attributes`` (Optional) | A list of key-value pairs, that is used to select inventory candidates that match *all* the specified attributes. The key should be a uniquely identifiable attribute at the inventory provider. |
+| ``service_type`` (Optional) | If ``inventory_type`` is ``service``, a list of one or more provider-defined service types. If only one service type is specified, it may appear without list markers (``[]``). |
+| ``service_id`` (Optional) | If ``inventory_type`` is ``service``, a list of one or more provider-defined service ids. If only one service id is specified, it may appear without list markers (``[]``). |
+| ``default_cost`` (Optional) | The default cost of an inventory candidate, expressed as currency. This must be specified if the inventory provider may not always return a cost. |
+| ``required_candidates`` (Optional) | A list of one or more candidates from which a solution will be explored. Must be a valid candidate as described in the **candidate schema**. |
+| ``excluded_candidates`` (Optional) | A list of one or more candidates that should be excluded from the search space. Must be a valid candidate as described in the **candidate schema**. |
+| ``existing_placement`` (Optional) | The current placement for the demand. Must be a valid candidate as described in the **candidate schema**. |
+
+
+### Examples
+
+The following example helps understand a demand specification using Active & Available Inventory (A&AI), the inventory provider-of-record for ONAP.
+
+**Inventory Provider Criteria**
+
+| Key | Value |
+|------------------------|--------------------------|
+| ``inventory_provider`` | Examples: ``aai``, ``multicloud``. |
+| ``inventory_type`` | The reserved word ``cloud`` (for new inventory) or the reserved word ``service`` (for existing inventory). Exactly one inventory type may be specified. |
+| ``attributes`` (Optional) | A list of key-value pairs to match against inventory when drawing candidates. |
+| ``service_type`` (Optional) | Examples may include ``vG``, ``vG_MuxInfra``, etc. |
+| ``service_id`` (Optional) | Must be a valid service id. Examples may include ``vCPE``, ``VoLTE``, etc. |
+| ``default_cost`` (Optional) | The default cost of an inventory candidate, expressed as a unitless number. |
+| ``required_candidates`` (Optional) | A list of one or more valid candidates. See **Candidate Schema** for details. |
+| ``excluded_candidates`` (Optional) | A list of one or more valid candidates. See **Candidate Schema** for details. |
+| ``existing_placement`` (Optional) | A single valid candidate, representing the current placement for the demand. See **candidate schema** for details. |
+
+**Candidate Schema**
+
+The following is the schema for a valid ``candidate``:
+* ``candidate_id`` uniquely identifies a candidate. Currently, it is either a Service Instance ID or Cloud Region ID.
+* ``candidate_type`` identifies the type of the candidate. Currently, it is either ``cloud`` or ``service``.
+* ``inventory_type`` is defined as described in **Inventory Provider Criteria** (above).
+* ``inventory_provider`` identifies the inventory from which the candidate was drawn.
+* ``host_id`` is an ID of a specific host (used only when referring to service/existing inventory).
+* ``cost`` is expressed as a unitless number.
+* ``location_id`` is always a location ID of the specified location type (e.g., for a type of ``cloud`` this will be an Cloud Region ID).
+* ``location_type`` is an inventory provider supported location type.
+* ``latitude`` is a valid latitude corresponding to the *location_id*.
+* ``longitude`` is a valid longitude corresponding to the *location_id*.
+* ``city`` (Optional) city corresponding to the *location_id*.
+* ``state`` (Optional) state corresponding to the *location_id*.
+* ``country`` (Optional) country corresponding to the *location_id*.
+* ``region`` (Optional) geographic region corresponding to the *location_id*.
+* ``complex_name`` (Optional) Name of the complex corresponding to the *location_id*.
+* ``cloud_owner`` (Optional) refers to the *cloud owner* (e.g., ``azure``, ``aws``, ``att``, etc.).
+* ``cloud_region_version`` (Optional) is an inventory provider supported version of the cloud region.
+* ``physical_location_id`` (Optional) is an inventory provider supported CLLI code corresponding to the cloud region.
+
+**Examples**
+
+```json
+{
+ "candidate_id": "1ac71fb8-ad43-4e16-9459-c3f372b8236d",
+ "candidate_type": "service",
+ "inventory_type": "service",
+ "inventory_provider": "aai",
+ "host_id": "vnf_123456",
+ "cost": "100",
+ "location_id": "DLLSTX9A",
+ "location_type": "azure",
+ "latitude": "32.897480",
+ "longitude": "-97.040443",
+ "city": "Dallas",
+ "state": "TX",
+ "country": "USA",
+ "region": "US",
+ "complex_name": "dalls_one",
+ "cloud_owner": "att-aic",
+ "cloud_region_version": "1.1",
+ "physical_location_id": "DLLSTX9A",
+}
+```
+
+**Questions**
+* Currently, candidates are either service instances or cloud regions. As new services are on-boarded, this can be evolved to represent different types of resources.
+
+**Examples**
+
+The following examples illustrate two demands:
+
+* ``vGMuxInfra``: A vGMuxInfra service, drawing candidates of type *service* from the inventory. Only candidates that match the customer_id and orchestration-status will be included in the search space.
+* ``vG``: A vG, drawing candidates of type *service* and *cloud* from the inventory. Only candidates that match the customer_id and provisioning-status will be included in the search space.
+
+
+```yaml
+demands:
+ vGMuxInfra:
+ - inventory_provider: aai
+ inventory_type: service
+ attributes:
+ equipment_type: vG_Mux
+ customer_id: some_company
+ orchestration-status: Activated
+ model-id: 174e371e-f514-4913-a93d-ed7e7f8fbdca
+ model-version: 2.0
+ vG:
+ - inventory_provider: aai
+ inventory_type: service
+ attributes:
+ equipment_type: vG
+ customer_id: some_company
+ provisioning-status: provisioned
+ - inventory_provider: aai
+ inventory_type: cloud
+```
+
+**Questions**
+* Do we need to support cost as a function ?
+
+## Constraints
+
+A **Constraint** is used to *eliminate* inventory candidates from one or more demands that do not meet the requirements specified by the constraint. Since reusability is one of the cornerstones of HAS, Constraints are designed to be service-agnostic, and is parameterized such that it can be reused across a wide range of services. Further, HAS is designed with a plug-in architecture that facilitates easy addition of new constraint types.
+
+Constraints are denoted by a ``constraints`` key. Each constraint is uniquely named and set to a dictionary containing a constraint type, a list of demands to apply the constraint to, and a dictionary of constraint properties.
+
+**Considerations while using multiple constraints**
+* Constraints should be treated as a unordered list, and no assumptions should be made as regards to the order in which the constraints are evaluated for any given demand.
+* All constraints are effectively AND-ed together. Constructs such as "Constraint X OR Y" are unsupported.
+* Constraints are reducing in nature, and does not increase the available candidates at any point during the constraint evaluations.
+
+
+**Schema**
+
+| Key | Value |
+|---------------------|-------------|
+| ``CONSTRAINT_NAME`` | Key is a unique name. |
+| ``type`` | The type of constraint. See **Constraint Types** for a list of currently supported values. |
+| ``demands`` | One or more previously declared demands. If only one demand is specified, it may appear without list markers (``[]``). |
+| ``properties`` (Optional) | Properties particular to the specified constraint type. Use if required by the constraint. |
+
+```yaml
+constraints:
+ CONSTRAINT_NAME_1:
+ type: CONSTRAINT_TYPE
+ demands: DEMAND_NAME | [DEMAND_NAME_1, DEMAND_NAME_2, ...]
+ properties: PROPERTY_DICT
+
+ CONSTRAINT_NAME_2:
+ type: CONSTRAINT_TYPE
+ demands: DEMAND_NAME | [DEMAND_NAME_1, DEMAND_NAME_2, ...]
+ properties: PROPERTY_DICT
+
+ ...
+```
+
+#### Constraint Types
+
+| Type | Description |
+|---------------------|-------------|
+| ``attribute`` | Constraint that matches the specified list of Attributes. |
+| ``distance_between_demands`` | Geographic distance constraint between each pair of a list of demands. |
+| ``distance_to_location`` | Geographic distance constraint between each of a list of demands and a specific location. |
+| ``instance_fit`` | Constraint that ensures available capacity in an existing service instance for an incoming demand. |
+| ``inventory_group`` | Constraint that enforces two or more demands are satisfied using candidates from a pre-established group in the inventory. |
+| ``region_fit`` | Constraint that ensures available capacity in an existing cloud region for an incoming demand. |
+| ``zone`` | Constraint that enforces co-location/diversity at the granularities of clouds/regions/availability-zones. |
+| ``license`` (Deferred) | License availability constraint. |
+| ``network_between_demands`` (Deferred) | Network constraint between each pair of a list of demands. |
+| ``network_to_location`` (Deferred) | Network constraint between each of a list of demands and a specific location/address. |
+
+*Note: Constraint names marked "Deferred" **will not** be supported in the initial release of HAS.*
+
+#### Threshold Values
+
+Constraint property values representing a threshold may be an integer or floating point number, optionally prefixed with a comparison operator: ``=``, ``<``, ``>``, ``<=``, or ``>=``. The default is ``=`` and optionally suffixed with a unit.
+
+Whitespace may appear between the comparison operator and value, and between the value and units. When a range values is specified (e.g., ``10-20 km``), the comparison operator is omitted.
+
+Each property is documented with a default unit. The following units are supported:
+
+| Unit | Values | Default |
+|------------|------------------------------|----------|
+| Currency | ``USD`` | ``USD`` |
+| Time | ``ms``, ``sec`` | ``ms`` |
+| Distance | ``km``, ``mi`` | ``km`` |
+| Throughput | ``Kbps``, ``Mbps``, ``Gbps`` | ``Mbps`` |
+
+### Attribute
+
+Constrain one or more demands by one or more attributes, expressed as properties. Attributes are mapped to the **inventory provider** specified properties, referenced by the demands. For example, properties could be hardware capabilities provided by the platform (flavor, CPU-Pinning, NUMA), features supported by the services, etc.
+
+**Schema**
+
+| Property | Value |
+|--------------|-------------------------------------------------------------|
+| ``evaluate`` | Opaque dictionary of attribute name and value pairs. Values must be strings or numbers. Encoded and sent to the service provider via a plugin. |
+
+*Note: Attribute values are not detected/parsed as thresholds by the Homing framework. Such interpretations and evaluations are inventory provider-specific and delegated to the corresponding plugin*
+
+```yaml
+constraints:
+ sriov_nj:
+ type: attribute
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ evaluate:
+ cloud_version: 1.1
+ flavor: SRIOV
+ subdivision: US-TX
+ vcpu_pinning: True
+ numa_topology: numa_spanning
+```
+
+#### Proposal: Evaluation Operators
+
+To assist in evaluating attributes, the following operators and notation are proposed:
+
+| Operator | Name | Operand |
+|--------------|-----------|------------------------------------------------|
+| ``eq`` | ``==`` | Any object (string, number, list, dict) |
+| ``ne`` | ``!=`` | |
+| ``lt`` | ``<`` | A number (strings are converted to float) |
+| ``gt`` | ``>`` | |
+| ``lte`` | ``<=`` | |
+| ``gte`` | ``>=`` | |
+| ``any`` | ``Any`` | A list of objects (string, number, list, dict) |
+| ``all`` | ``All`` | |
+| ``regex`` | ``RegEx`` | A regular expression pattern |
+
+Example usage:
+
+```yaml
+constraints:
+ sriov_nj:
+ type: attribute
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ evaluate:
+ cloud_version: {gt: 1.0}
+ flavor: {regex: /^SRIOV$/i}
+ subdivision: {any: [US-TX, US-NY, US-CA]}
+```
+
+### Distance Between Demands
+
+Constrain each pairwise combination of two or more demands by distance requirements.
+
+**Schema**
+
+| Name | Value |
+|--------------|-------------------------------------------------------------|
+| ``distance`` | Distance between demands, measured by the geographic path. |
+
+The constraint is applied between each pairwise combination of demands. For this reason, at least two demands must be specified, implicitly or explicitly.
+
+```yaml
+constraints:
+ distance_vnf1_vnf2:
+ type: distance_between_demands
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ distance: < 250 km
+```
+
+### Distance To Location
+
+Constrain one or more demands by distance requirements relative to a specific location.
+
+**Schema**
+
+| Property | Value |
+|--------------|------------------------------------------------------------|
+| ``distance`` | Distance between demands, measured by the geographic path. |
+| ``location`` | A previously declared location. |
+
+The constraint is applied between each demand and the referenced location, not across all pairwise combinations of Demands.
+
+```yaml
+constraints:
+ distance_vnf1_loc:
+ type: distance_to_location
+ demands: [my_vnf_demand, my_other_vnf_demand, another_vnf_demand]
+ properties:
+ distance: < 250 km
+ location: LOCATION_ID
+```
+
+### Instance Fit
+
+Constrain each demand by its service requirements.
+
+Requirements are sent as a request to a **service controller**. Service controllers are defined by plugins in Homing (e.g., ``sdn-c``).
+
+A service controller plugin knows how to communicate with a particular endpoint (via HTTP/REST, DMaaP, etc.), obtain necessary information, and make a decision. The endpoint and credentials can be configured through plugin settings.
+
+**Schema**
+
+| Property | Description |
+|----------------|-----------------------------------|
+| ``controller`` | Name of a service controller. |
+| ``request`` | Opaque dictionary of key/value pairs. Values must be strings or numbers. Encoded and sent to the service provider via a plugin. |
+
+```yaml
+constraints:
+ check_for_availability:
+ type: instance_fit
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ controller: sdn-c
+ request: REQUEST_DICT
+```
+
+### Region Fit
+
+Constrain each demand's inventory candidates based on inventory provider membership.
+
+Requirements are sent as a request to a **service controller**. Service controllers are defined by plugins in Homing (e.g., ``sdn-c``).
+
+A service controller plugin knows how to communicate with a particular endpoint (via HTTP/REST, DMaaP, etc.), obtain necessary information, and make a decision. The endpoint and credentials can be configured through plugin settings.
+
+**Schema**
+
+| Property | Description |
+|----------------|-----------------------------------|
+| ``controller`` | Name of a service controller. |
+| ``request`` | Opaque dictionary of key/value pairs. Values must be strings or numbers. Encoded and sent to the service provider via a plugin. |
+
+```yaml
+constraints:
+ check_for_membership:
+ type: region_fit
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ controller: sdn-c
+ request: REQUEST_DICT
+```
+### Zone
+
+Constrain two or more demands such that each is located in the same or different zone category.
+
+Zone categories are inventory provider-defined, based on the demands being constrained.
+
+**Schema**
+
+| Property | Value |
+|---------------|-------------------------------------------------------------|
+| ``qualifier`` | Zone qualifier. One of ``same`` or ``different``. |
+| ``category`` | Zone category. One of ``disaster``, ``region``, ``complex``, ``time``, or ``maintenance``. |
+
+For example, to place two demands in different disaster zones:
+
+```yaml
+constraints:
+ vnf_diversity:
+ type: zone
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ qualifier: different
+ category: disaster
+```
+
+Or, to place two demands in the same region:
+
+```yaml
+constraints:
+ vnf_affinity:
+ type: zone
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ qualifier: same
+ category: region
+```
+
+**Notes**
+
+* These categories could be any of the following: ``disaster_zone``, ``region``, ``complex``, ``time_zone``, and ``maintenance_zone``. Really, we are talking affinity/anti-affinity at the level of DCs, but these terms may cause confusion with affinity/anti-affinity in OpenStack.
+
+### Inventory Group
+
+Constrain demands such that inventory items are grouped across two demands.
+
+This constraint has no properties.
+
+```yaml
+constraints:
+ my_group:
+ type: inventory_group
+ demands: [demand_1, demand_2]
+```
+
+*Note: Only pair-wise groups are supported at this time. If three or more demands are specified, only the first two will be used.*
+
+### License
+
+Constrain demands according to license availability.
+
+*Support for this constraint is deferred.*
+
+**Schema**
+
+| Property | Value |
+|----------|----------------------------------------------------------|
+| ``id`` | Unique license identifier |
+| ``key`` | Opaque license key, particular to the license identifier |
+
+```yaml
+constraints:
+ my_software:
+ type: license
+ demands: [demand_1, demand_2, ...]
+ properties:
+ id: SOFTWARE_ID
+ key: LICENSE_KEY
+```
+
+### Network Between Demands
+
+Constrain each pairwise combination of two or more demands by network requirements.
+
+*Support for this constraint is deferred.*
+
+**Schema**
+
+| Property | Value |
+|--------------------------|-----------------------------------------------------------------|
+| ``bandwidth`` (Optional) | Desired network bandwidth. |
+| ``distance`` (Optional) | Desired distance between demands, measured by the network path. |
+| ``latency`` (Optional) | Desired network latency. |
+
+Any combination of ``bandwidth``, ``distance``, or ``latency`` must be specified. If none of these properties are used, it is treated as a malformed request.
+
+The constraint is applied between each pairwise combination of demands. For this reason, at least two demands must be specified, implicitly or explicitly.
+
+```yaml
+constraints:
+ network_requirements:
+ type: network_between_demands
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ bandwidth: >= 1000 Mbps
+ distance: < 250 km
+ latency: < 50 ms
+```
+
+### Network To Location
+
+Constrain one or more demands by network requirements relative to a specific location.
+
+*Support for this constraint is deferred.*
+
+**Schema**
+
+| Property | Value |
+|---------------|-----------------------------------------------------------------|
+| ``bandwidth`` | Desired network bandwidth. |
+| ``distance`` | Desired distance between demands, measured by the network path. |
+| ``latency`` | Desired network latency. |
+| ``location`` | A previously declared location. |
+
+Any combination of ``bandwidth``, ``distance``, or ``latency`` must be specified. If none of these properties are used, it is treated as a malformed request.
+
+The constraint is applied between each demand and the referenced location, not across all pairwise combinations of Demands.
+
+```yaml
+constraints:
+ my_access_network_constraint:
+ type: network_to_location
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ bandwidth: >= 1000 Mbps
+ distance: < 250 km
+ latency: < 50 ms
+ location: LOCATION_ID
+```
+### Capabilities
+
+Constrain each demand by its cluster capability requirements. For example, as described by an OpenStack Heat template and operational environment.
+
+*Support for this constraint is deferred.*
+
+**Schema**
+
+| Property | Value |
+|--------------|-------------------------------------------------------------|
+| ``specification`` | Indicates the kind of specification being provided in the properties. Must be ``heat``. Future values may include ``tosca``, ``Homing``, etc. |
+| ``template`` | For specifications of type ``heat``, a single stack in OpenStack Heat Orchestration Template (HOT) format. Stacks may be expressed as a URI reference or a string of well-formed YAML/JSON. Templates are validated by the Heat service configured for use by HAS. Nested stack references are unsupported. |
+| ``environment`` (Optional) | For specifications of type ``heat``, an optional Heat environment. Environments may be expressed as a URI reference or a string of well-formed YAML/JSON. Environments are validated by the Heat service configured for use by Homing. |
+
+```yaml
+constraints:
+ check_for_fit:
+ type: capability
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ specification: heat
+ template: http://repository/my/stack_template
+ environment: http://repository/my/stack_environment
+```
+
+
+## Reservations
+
+A **Reservation** allows reservation of resources associated with candidate that satisfies one or more demands.
+
+Similar to the *instance_fit* constraint, requirements are sent as a request to a **service controller** that handles the reservation. Service controllers are defined by plugins in Homing (e.g., ``sdn-c``).
+
+The service controller plugin knows how to make a reservation (and initiate rollback on a failure) with a particular endpoint (via HTTP/REST, DMaaP, etc.) of the service controller. The endpoint and credentials can be configured through plugin settings.
+
+**Schema**
+
+| Property | Description |
+|----------------|-----------------------------------|
+| ``controller`` | Name of a service controller. |
+| ``request`` | Opaque dictionary of key/value pairs. Values must be strings or numbers. Encoded and sent to the service provider via a plugin. |
+
+
+```yaml
+resource_reservation:
+ type: instance_reservation
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ controller: sdn-c
+ request: REQUEST_DICT
+```
+
+## Optimizations
+
+An **Optimization** allows specification of a objective function, which aims to maximize or minimize a certain value that varies based on the choice of candidates for one or more demands that are a part of the objective function. For example, an objective function may be to find the *closest* cloud-region to a customer to home a demand.
+
+### Optimization Components
+
+Optimization definitions can be broken down into three components:
+
+| Component | Key | Value |
+|-----------|----------------------|---------------------------------------------------------|
+| Goal | ``minimize`` | A single Operand (usually ``sum``) or Function |
+| Operator | ``sum``, ``product`` | Two or more Operands (Numbers, Operators, Functions) |
+| Function | ``distance_between`` | A two-element list consisting of a location and demand. |
+
+
+### Example
+
+Given a customer location ``cl``, two demands ``vG1`` and ``vG2``, and weights ``w1`` and ``w2``, the optimization criteria can be expressed as:
+
+``minimize(weight1 * distance_between(cl, vG1) + weight2 * distance_between(cl, vG2))``
+
+This can be read as: "Minimize the sum of weighted distances from cl to vG1 and from cl to vG2."
+
+Such optimizations may be expressed in a template as follows:
+
+```yaml
+parameters:
+ w1: 10
+ w2: 20
+
+optimization:
+ minimize:
+ sum:
+ - product:
+ - {get_param: w1}
+ - {distance_between: [cl, vG1]}
+ - product:
+ - {get_param: w2}
+ - {distance_between: [cl, vG2]}
+```
+
+Or without the weights as:
+
+```yaml
+optimization:
+ minimize:
+ sum:
+ - {distance_between: [cl, vG1]}
+ - {distance_between: [cl, vG2]}
+```
+
+**Template Restriction**
+
+While the template format supports any number of arrangements of numbers, operators, and functions, HAS's solver presently expects a very specific arrangement.
+
+Until further notice:
+
+* Optimizations must conform to a single goal of ``minimize`` followed by a ``sum`` operator.
+* The sum can consist of two ``distance_between`` function calls, or two ``product`` operators.
+* If a ``product`` operator is present, it must contain at least a ``distance_between`` function call, plus one optional number to be used for weighting.
+* Numbers may be referenced via ``get_param``.
+* The objective function has to be written in the sum-of-product format. In the future, HAS can convert product-of-sum into sum-of-product automatically.
+
+The first two examples in this section illustrate both of these use cases.
+
+**Inline Operations**
+
+If desired, operations can be rewritten inline. For example, the two ``product`` operations from the previous example can also be expressed as:
+
+```yaml
+parameters:
+ w1: 10
+ w2: 20
+
+optimization:
+ minimize:
+ sum:
+ - {product: [{get_param: w1}, {distance_between: [cl, vG1]}]}
+ - {product: [{get_param: w2}, {distance_between: [cl, vG2]}]}
+```
+
+In turn, even the ``sum`` operation can be rewritten inline, however there is a point of diminishing returns in terms of readability!
+
+**Notes**
+
+* In the first version, we do not support more than one dimension in the optimization (e.g., Minimize distance and cost). For supporting multiple dimensions we would need a function the normalize the unit across dimensions.
+
+## Intrinsic Functions
+
+Homing provides a set of intrinsic functions that can be used inside templates to perform specific tasks. The following section describes the role and syntax of the intrinsic functions.
+
+Functions are written as a dictionary with one key/value pair. The key is the function name. The value is a list of arguments. If only one argument is provided, a string may be used instead.
+
+```yaml
+a_property: {FUNCTION_NAME: [ARGUMENT_LIST]}
+
+a_property: {FUNCTION_NAME: ARGUMENT_STRING}
+```
+
+*Note: These functions can only be used within "properties" sections.*
+
+### get_file
+
+The ``get_file`` function inserts the content of a file into the template. It is generally used as a file inclusion mechanism for files containing templates from other services (e.g., Heat).
+
+The syntax of the ``get_file`` function is:
+
+```yaml
+{get_file: <content key>}
+```
+
+The ``content`` key is used to look up the ``files`` dictionary that is provided in the REST API call. The Homing client command (``Homing``) is ``get_file`` aware and populates the ``files`` dictionary with the actual content of fetched paths and URLs. The Homing client command supports relative paths and transforms these to the absolute URLs required by the Homing API.
+
+**Note**: The ``get_file`` argument must be a static path or URL and not rely on intrinsic functions like ``get_param``. The Homing client does not process intrinsic functions. They are only processed by the Homing engine.
+
+The example below demonstrates the ``get_file`` function usage with both relative and absolute URLs:
+
+```yaml
+constraints:
+ check_for_fit:
+ type: capacity
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ template: {get_file: stack_template.yaml}
+ environment: {get_file: http://hostname/environment.yaml}
+```
+
+The ``files`` dictionary generated by the Homing client during instantiation of the plan would contain the following keys. Each value would be of that file's contents.
+
+* ``file:///path/to/stack_template.yaml``
+* ``http://hostname/environment.yaml``
+
+**Questions**
+
+* If Homing will only be accessed over DMaaP, files will need to be embedded using the Homing API request format.
+
+### get_param
+
+The ``get_param`` function references an input parameter of a template. It resolves to the value provided for this input parameter at runtime.
+
+The syntax of the ``get_param`` function is:
+
+```yaml
+{get_param: <parameter name>}
+
+{get_param: [<parameter name>, <key/index1> (optional), <key/index2> (optional), ...]}
+```
+
+**parameter name** is the parameter name to be resolved. If the parameters returns a complex data structure such as a list or a dict, then subsequent keys or indices can be specified. These additional parameters are used to navigate the data structure to return the desired value. Indices are zero-based.
+
+The following example demonstrates how the ``get_param`` function is used:
+
+```yaml
+parameters:
+ software_id: SOFTWARE_ID
+ license_key: LICENSE_KEY
+ service_info:
+ provider: dmaap:///full.topic.name
+ costs: [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
+
+constraints:
+ my_software:
+ type: license
+ demands: [demand_1, demand_2, ...]
+ properties:
+ id: {get_param: software_id}
+ key: {get_param: license_key}
+
+ check_for_availability:
+ type: service
+ demands: [my_vnf_demand, my_other_vnf_demand]
+ properties:
+ provider_url: {get_param: [service_info, provider]}
+ request: REQUEST_DICT
+ cost: {get_param: [service_info, costs, 4]}
+```
+
+In this example, properties would be set as follows:
+
+| Key | Value |
+|------------------|--------------------------|
+| ``id`` | SOFTWARE_ID |
+| ``key`` | LICENSE_KEY |
+| ``provider_url`` | dmaap:///full.topic.name |
+| ``cost`` | 50 |
+
+## Contact ##
+
+Shankar Narayanan <shankarpnsn@gmail.com>
diff --git a/conductor/etc/conductor/api_paste.ini b/conductor/etc/conductor/api_paste.ini
new file mode 100644
index 0000000..9d6b32a
--- /dev/null
+++ b/conductor/etc/conductor/api_paste.ini
@@ -0,0 +1,26 @@
+# Conductor API WSGI Pipeline
+# Define the filters that make up the pipeline for processing WSGI requests
+# Note: This pipeline is PasteDeploy's term rather than Conductor's pipeline
+# used for processing samples
+
+# Remove authtoken from the pipeline if you don't want to use keystone authentication
+[pipeline:main]
+pipeline = cors http_proxy_to_wsgi api-server
+#pipeline = cors http_proxy_to_wsgi request_id authtoken api-server
+
+[app:api-server]
+paste.app_factory = conductor.api.app:app_factory
+
+#[filter:authtoken]
+#paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+#[filter:request_id]
+#paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = conductor
+
+[filter:http_proxy_to_wsgi]
+paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
+oslo_config_project = conductor \ No newline at end of file
diff --git a/conductor/etc/conductor/conductor-config-generator.conf b/conductor/etc/conductor/conductor-config-generator.conf
new file mode 100644
index 0000000..fa694d2
--- /dev/null
+++ b/conductor/etc/conductor/conductor-config-generator.conf
@@ -0,0 +1,7 @@
+[DEFAULT]
+output_file = etc/conductor/conductor.conf.sample
+wrap_width = 79
+namespace = conductor
+namespace = oslo.log
+# namespace = oslo.messaging
+# namespace = oslo.middleware.cors \ No newline at end of file
diff --git a/conductor/etc/conductor/log.conf b/conductor/etc/conductor/log.conf
new file mode 100644
index 0000000..070da15
--- /dev/null
+++ b/conductor/etc/conductor/log.conf
@@ -0,0 +1,49 @@
+[loggers]
+keys=root
+
+[handlers]
+keys=trfhand,infohand,errhand,debughand,warninghand
+
+[logger_root]
+level=NOTSET
+handlers=trfhand,infohand,errhand,debughand,warninghand
+
+
+[handler_trfhand]
+class=FileHandler
+formatter=generic
+level=NOTSET
+args=('/home/larry/Desktop/log/application.log',)
+
+[handler_errhand]
+class=FileHandler
+level=ERROR
+formatter=generic
+args=('/home/larry/Desktop/log/error.log', )
+
+[handler_infohand]
+class=FileHandler
+level=INFO
+formatter=generic
+args=('/home/larry/Desktop/log/info.log',)
+
+[handler_debughand]
+class=FileHandler
+level=DEBUG
+formatter=generic
+args=('/home/larry/Desktop/log/debug.log',)
+
+[handler_warninghand]
+class=FileHandler
+level=WARNING
+formatter=generic
+args=('/home/larry/Desktop/log/warning.log',)
+
+
+[formatters]
+keys=generic
+
+[formatter_generic]
+class=logging.Formatter
+format=%(asctime)s %(levelname)s %(name)s: [-] %(message)s
+datefmt= \ No newline at end of file
diff --git a/conductor/etc/conductor/policy.json b/conductor/etc/conductor/policy.json
new file mode 100644
index 0000000..95e5e19
--- /dev/null
+++ b/conductor/etc/conductor/policy.json
@@ -0,0 +1,18 @@
+{
+ "context_is_admin": "role:admin",
+ "segregation": "rule:context_is_admin",
+
+ "homing:get_samples": "",
+ "homing:get_sample": "",
+ "homing:query_sample": "",
+ "homing:create_samples": "",
+
+ "homing:compute_statistics": "",
+ "homing:get_meters": "",
+
+ "homing:get_resource": "",
+ "homing:get_resources": "",
+
+ "homing:events:index": "",
+ "homing:events:show": ""
+} \ No newline at end of file
diff --git a/conductor/etc/conductor/rootwrap.conf b/conductor/etc/conductor/rootwrap.conf
new file mode 100644
index 0000000..75275ca
--- /dev/null
+++ b/conductor/etc/conductor/rootwrap.conf
@@ -0,0 +1,27 @@
+# Configuration for conductor-rootwrap
+# This file should be owned by (and only-writeable by) the root user
+
+[DEFAULT]
+# List of directories to load filter definitions from (separated by ',').
+# These directories MUST all be only writeable by root !
+filters_path=/etc/conductor/rootwrap.d,/usr/share/conductor/rootwrap
+
+# List of directories to search executables in, in case filters do not
+# explicitely specify a full path (separated by ',')
+# If not specified, defaults to system PATH environment variable.
+# These directories MUST all be only writeable by root !
+exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin
+
+# Enable logging to syslog
+# Default value is False
+use_syslog=False
+
+# Which syslog facility to use.
+# Valid values include auth, authpriv, syslog, user0, user1...
+# Default value is 'syslog'
+syslog_log_facility=syslog
+
+# Which messages to log.
+# INFO means log all usage
+# ERROR means only log unsuccessful attempts
+syslog_log_level=ERROR \ No newline at end of file
diff --git a/conductor/etc/conductor/rootwrap.d/README.txt b/conductor/etc/conductor/rootwrap.d/README.txt
new file mode 100644
index 0000000..a3808e1
--- /dev/null
+++ b/conductor/etc/conductor/rootwrap.d/README.txt
@@ -0,0 +1 @@
+This space reserved for future rootwrap command filters. \ No newline at end of file
diff --git a/conductor/examples/apache2/conductor.conf b/conductor/examples/apache2/conductor.conf
new file mode 100644
index 0000000..c882555
--- /dev/null
+++ b/conductor/examples/apache2/conductor.conf
@@ -0,0 +1,25 @@
+# This is an example Apache2 configuration file for using the
+# conductor API through mod_wsgi.
+
+# Note: If you are using a Debian-based system then the paths
+# "/var/log/httpd" and "/var/run/httpd" will use "apache2" instead
+# of "httpd".
+#
+# The number of processes and threads is an example only and should
+# be adjusted according to local requirements.
+
+Listen 8091
+
+<VirtualHost *:8091>
+ WSGIDaemonProcess conductor-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP}
+ WSGIProcessGroup conductor-api
+ WSGIScriptAlias / /var/www/conductor/app
+ WSGIApplicationGroup %{GLOBAL}
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/httpd/conductor_error.log
+ CustomLog /var/log/httpd/conductor_access.log combined
+</VirtualHost>
+
+WSGISocketPrefix /var/run/httpd \ No newline at end of file
diff --git a/conductor/examples/distribution/ubuntu/init.d/conductor-api b/conductor/examples/distribution/ubuntu/init.d/conductor-api
new file mode 100644
index 0000000..e67a9dc
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/init.d/conductor-api
@@ -0,0 +1,149 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: conductor-api
+# Required-Start: $network $local_fs $remote_fs $syslog
+# Required-Stop: $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Conductor API
+# Description: Conductor API server
+### END INIT INFO
+
+# Author: Joe D'Andrea <jdandrea@research.att.com>
+# Based on work by Thomas Goirand <zigo@debian.or>
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="Conductor API"
+PROJECT_NAME=conductor
+NAME=${PROJECT_NAME}-api
+PYTHON_HOME=
+PORT=8091
+
+#!/bin/sh
+# The content after this line comes from openstack-pkg-tools
+# and has been automatically added to a .init.in script, which
+# contains only the descriptive part for the daemon. Everything
+# else is standardized as a single unique script.
+
+# Author: Joe D'Andrea <jdandrea@research.att.com>
+# Based on work by Thomas Goirand <zigo@debian.or>
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+
+if [ -z "${DAEMON}" ] ; then
+ if [ -d "${PYTHON_HOME}" ] ; then
+ DAEMON=${PYTHON_HOME}/bin/${NAME}
+ else
+ DAEMON=/usr/bin/${NAME}
+ fi
+fi
+PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid
+if [ -z "${SCRIPTNAME}" ] ; then
+ SCRIPTNAME=/etc/init.d/${NAME}
+fi
+if [ -z "${SYSTEM_USER}" ] ; then
+ SYSTEM_USER=${PROJECT_NAME}
+fi
+if [ -z "${SYSTEM_GROUP}" ] ; then
+ SYSTEM_GROUP=${PROJECT_NAME}
+fi
+if [ "${SYSTEM_USER}" != "root" ] ; then
+ STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}"
+fi
+if [ -z "${CONFIG_FILE}" ] ; then
+ CONFIG_FILE=/etc/${PROJECT_NAME}/${PROJECT_NAME}.conf
+fi
+LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log
+if [ -z "${NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG}" ] ; then
+ DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}"
+fi
+
+# Exit if the package is not installed
+[ -x $DAEMON ] || exit 0
+
+# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed
+if [ `whoami` = "root" ] ; then
+ for i in lock run log lib ; do
+ mkdir -p /var/$i/${PROJECT_NAME}
+ chown ${SYSTEM_USER}:${SYSTEM_USER} /var/$i/${PROJECT_NAME}
+ done
+fi
+
+# This defines init_is_upstart which we use later on (+ more...)
+. /lib/lsb/init-functions
+
+# Manage log options: logfile and/or syslog, depending on user's choosing
+[ -r /etc/default/openstack ] && . /etc/default/openstack
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
+[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE"
+
+do_start() {
+ start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \
+ --test > /dev/null || return 1
+ start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \
+ -- --port ${PORT} -- $DAEMON_ARGS || return 2
+}
+
+do_stop() {
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
+ RETVAL=$?
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+do_systemd_start() {
+ exec $DAEMON $DAEMON_ARGS
+}
+
+case "$1" in
+start)
+ #init_is_upstart > /dev/null 2>&1 && exit 1
+ log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case $? in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+stop)
+ #init_is_upstart > /dev/null 2>&1 && exit 0
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case $? in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+status)
+ status_of_proc -p "${PIDFILE}" "$DAEMON" "$NAME" && exit 0 || exit $?
+;;
+systemd-start)
+ do_systemd_start
+;;
+restart|force-reload)
+ #init_is_upstart > /dev/null 2>&1 && exit 1
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case $? in
+ 0|1)
+ do_start
+ case $? in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *) log_end_msg 1 ;; # Failed to stop
+ esac
+;;
+*)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2
+ exit 3
+;;
+esac
+
+exit 0
+
diff --git a/conductor/examples/distribution/ubuntu/init.d/conductor-controller b/conductor/examples/distribution/ubuntu/init.d/conductor-controller
new file mode 100644
index 0000000..f09d302
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/init.d/conductor-controller
@@ -0,0 +1,148 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: conductor-controller
+# Required-Start: $network $local_fs $remote_fs $syslog
+# Required-Stop: $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Conductor Controller
+# Description: Conductor Controller server
+### END INIT INFO
+
+# Author: Joe D'Andrea <jdandrea@research.att.com>
+# Based on work by Thomas Goirand <zigo@debian.or>
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="Conductor Controller"
+PROJECT_NAME=conductor
+NAME=${PROJECT_NAME}-controller
+PYTHON_HOME=
+
+#!/bin/sh
+# The content after this line comes from openstack-pkg-tools
+# and has been automatically added to a .init.in script, which
+# contains only the descriptive part for the daemon. Everything
+# else is standardized as a single unique script.
+
+# Author: Joe D'Andrea <jdandrea@research.att.com>
+# Based on work by Thomas Goirand <zigo@debian.or>
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+
+if [ -z "${DAEMON}" ] ; then
+ if [ -d "${PYTHON_HOME}" ] ; then
+ DAEMON=${PYTHON_HOME}/bin/${NAME}
+ else
+ DAEMON=/usr/bin/${NAME}
+ fi
+fi
+PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid
+if [ -z "${SCRIPTNAME}" ] ; then
+ SCRIPTNAME=/etc/init.d/${NAME}
+fi
+if [ -z "${SYSTEM_USER}" ] ; then
+ SYSTEM_USER=${PROJECT_NAME}
+fi
+if [ -z "${SYSTEM_GROUP}" ] ; then
+ SYSTEM_GROUP=${PROJECT_NAME}
+fi
+if [ "${SYSTEM_USER}" != "root" ] ; then
+ STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}"
+fi
+if [ -z "${CONFIG_FILE}" ] ; then
+ CONFIG_FILE=/etc/${PROJECT_NAME}/${PROJECT_NAME}.conf
+fi
+LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log
+if [ -z "${NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG}" ] ; then
+ DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}"
+fi
+
+# Exit if the package is not installed
+[ -x $DAEMON ] || exit 0
+
+# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed
+if [ `whoami` = "root" ] ; then
+ for i in lock run log lib ; do
+ mkdir -p /var/$i/${PROJECT_NAME}
+ chown ${SYSTEM_USER} /var/$i/${PROJECT_NAME}
+ done
+fi
+
+# This defines init_is_upstart which we use later on (+ more...)
+. /lib/lsb/init-functions
+
+# Manage log options: logfile and/or syslog, depending on user's choosing
+[ -r /etc/default/openstack ] && . /etc/default/openstack
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
+[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE"
+
+do_start() {
+ start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \
+ --test > /dev/null || return 1
+ start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \
+ -- $DAEMON_ARGS || return 2
+}
+
+do_stop() {
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
+ RETVAL=$?
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+do_systemd_start() {
+ exec $DAEMON $DAEMON_ARGS
+}
+
+case "$1" in
+start)
+ #init_is_upstart > /dev/null 2>&1 && exit 1
+ log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case $? in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+stop)
+ #init_is_upstart > /dev/null 2>&1 && exit 0
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case $? in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+status)
+ status_of_proc -p "${PIDFILE}" "$DAEMON" "$NAME" && exit 0 || exit $?
+;;
+systemd-start)
+ do_systemd_start
+;;
+restart|force-reload)
+ #init_is_upstart > /dev/null 2>&1 && exit 1
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case $? in
+ 0|1)
+ do_start
+ case $? in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *) log_end_msg 1 ;; # Failed to stop
+ esac
+;;
+*)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2
+ exit 3
+;;
+esac
+
+exit 0
+
diff --git a/conductor/examples/distribution/ubuntu/init.d/conductor-data b/conductor/examples/distribution/ubuntu/init.d/conductor-data
new file mode 100644
index 0000000..70b3e2f
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/init.d/conductor-data
@@ -0,0 +1,148 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: conductor-data
+# Required-Start: $network $local_fs $remote_fs $syslog
+# Required-Stop: $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Conductor Data
+# Description: Conductor Data server
+### END INIT INFO
+
+# Author: Joe D'Andrea <jdandrea@research.att.com>
+# Based on work by Thomas Goirand <zigo@debian.or>
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="Conductor Data"
+PROJECT_NAME=conductor
+NAME=${PROJECT_NAME}-data
+PYTHON_HOME=
+
+#!/bin/sh
+# The content after this line comes from openstack-pkg-tools
+# and has been automatically added to a .init.in script, which
+# contains only the descriptive part for the daemon. Everything
+# else is standardized as a single unique script.
+
+# Author: Joe D'Andrea <jdandrea@research.att.com>
+# Based on work by Thomas Goirand <zigo@debian.or>
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+
+if [ -z "${DAEMON}" ] ; then
+ if [ -d "${PYTHON_HOME}" ] ; then
+ DAEMON=${PYTHON_HOME}/bin/${NAME}
+ else
+ DAEMON=/usr/bin/${NAME}
+ fi
+fi
+PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid
+if [ -z "${SCRIPTNAME}" ] ; then
+ SCRIPTNAME=/etc/init.d/${NAME}
+fi
+if [ -z "${SYSTEM_USER}" ] ; then
+ SYSTEM_USER=${PROJECT_NAME}
+fi
+if [ -z "${SYSTEM_GROUP}" ] ; then
+ SYSTEM_GROUP=${PROJECT_NAME}
+fi
+if [ "${SYSTEM_USER}" != "root" ] ; then
+ STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}"
+fi
+if [ -z "${CONFIG_FILE}" ] ; then
+ CONFIG_FILE=/etc/${PROJECT_NAME}/${PROJECT_NAME}.conf
+fi
+LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log
+if [ -z "${NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG}" ] ; then
+ DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}"
+fi
+
+# Exit if the package is not installed
+[ -x $DAEMON ] || exit 0
+
+# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed
+if [ `whoami` = "root" ] ; then
+ for i in lock run log lib ; do
+ mkdir -p /var/$i/${PROJECT_NAME}
+ chown ${SYSTEM_USER} /var/$i/${PROJECT_NAME}
+ done
+fi
+
+# This defines init_is_upstart which we use later on (+ more...)
+. /lib/lsb/init-functions
+
+# Manage log options: logfile and/or syslog, depending on user's choosing
+[ -r /etc/default/openstack ] && . /etc/default/openstack
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
+[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE"
+
+do_start() {
+ start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \
+ --test > /dev/null || return 1
+ start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \
+ -- $DAEMON_ARGS || return 2
+}
+
+do_stop() {
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
+ RETVAL=$?
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+do_systemd_start() {
+ exec $DAEMON $DAEMON_ARGS
+}
+
+case "$1" in
+start)
+ #init_is_upstart > /dev/null 2>&1 && exit 1
+ log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case $? in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+stop)
+ #init_is_upstart > /dev/null 2>&1 && exit 0
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case $? in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+status)
+ status_of_proc -p "${PIDFILE}" "$DAEMON" "$NAME" && exit 0 || exit $?
+;;
+systemd-start)
+ do_systemd_start
+;;
+restart|force-reload)
+ #init_is_upstart > /dev/null 2>&1 && exit 1
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case $? in
+ 0|1)
+ do_start
+ case $? in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *) log_end_msg 1 ;; # Failed to stop
+ esac
+;;
+*)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2
+ exit 3
+;;
+esac
+
+exit 0
+
diff --git a/conductor/examples/distribution/ubuntu/init.d/conductor-reservation b/conductor/examples/distribution/ubuntu/init.d/conductor-reservation
new file mode 100644
index 0000000..6365b32
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/init.d/conductor-reservation
@@ -0,0 +1,148 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: conductor-reservation
+# Required-Start: $network $local_fs $remote_fs $syslog
+# Required-Stop: $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Conductor reservation
+# Description: Conductor reservation server
+### END INIT INFO
+
+# Author: Shankaranarayanan Puzhavakath Narayanan <snarayanan@research.att.com>
+# Based on work by Thomas Goirand <zigo@debian.or>
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="Conductor Reservation"
+PROJECT_NAME=conductor
+NAME=${PROJECT_NAME}-reservation
+PYTHON_HOME=
+
+#!/bin/sh
+# The content after this line comes from openstack-pkg-tools
+# and has been automatically added to a .init.in script, which
+# contains only the descriptive part for the daemon. Everything
+# else is standardized as a single unique script.
+
+# Author: Shankaranarayanan Puzhavakath Narayanan <snarayanan@research.att.com>
+# Based on work by Thomas Goirand <zigo@debian.or>
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+
+if [ -z "${DAEMON}" ] ; then
+ if [ -d "${PYTHON_HOME}" ] ; then
+ DAEMON=${PYTHON_HOME}/bin/${NAME}
+ else
+ DAEMON=/usr/bin/${NAME}
+ fi
+fi
+PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid
+if [ -z "${SCRIPTNAME}" ] ; then
+ SCRIPTNAME=/etc/init.d/${NAME}
+fi
+if [ -z "${SYSTEM_USER}" ] ; then
+ SYSTEM_USER=${PROJECT_NAME}
+fi
+if [ -z "${SYSTEM_GROUP}" ] ; then
+ SYSTEM_GROUP=${PROJECT_NAME}
+fi
+if [ "${SYSTEM_USER}" != "root" ] ; then
+ STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}"
+fi
+if [ -z "${CONFIG_FILE}" ] ; then
+ CONFIG_FILE=/etc/${PROJECT_NAME}/${PROJECT_NAME}.conf
+fi
+LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log
+if [ -z "${NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG}" ] ; then
+ DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}"
+fi
+
+# Exit if the package is not installed
+[ -x $DAEMON ] || exit 0
+
+# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed
+if [ `whoami` = "root" ] ; then
+ for i in lock run log lib ; do
+ mkdir -p /var/$i/${PROJECT_NAME}
+ chown ${SYSTEM_USER} /var/$i/${PROJECT_NAME}
+ done
+fi
+
+# This defines init_is_upstart which we use later on (+ more...)
+. /lib/lsb/init-functions
+
+# Manage log options: logfile and/or syslog, depending on user's choosing
+[ -r /etc/default/openstack ] && . /etc/default/openstack
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
+[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE"
+
+do_start() {
+ start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \
+ --test > /dev/null || return 1
+ start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \
+ -- $DAEMON_ARGS || return 2
+}
+
+do_stop() {
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
+ RETVAL=$?
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+do_systemd_start() {
+ exec $DAEMON $DAEMON_ARGS
+}
+
+case "$1" in
+start)
+ #init_is_upstart > /dev/null 2>&1 && exit 1
+ log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case $? in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+stop)
+ #init_is_upstart > /dev/null 2>&1 && exit 0
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case $? in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+status)
+ status_of_proc -p "${PIDFILE}" "$DAEMON" "$NAME" && exit 0 || exit $?
+;;
+systemd-start)
+ do_systemd_start
+;;
+restart|force-reload)
+ #init_is_upstart > /dev/null 2>&1 && exit 1
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case $? in
+ 0|1)
+ do_start
+ case $? in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *) log_end_msg 1 ;; # Failed to stop
+ esac
+;;
+*)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2
+ exit 3
+;;
+esac
+
+exit 0
+
diff --git a/conductor/examples/distribution/ubuntu/init.d/conductor-solver b/conductor/examples/distribution/ubuntu/init.d/conductor-solver
new file mode 100644
index 0000000..99cc98b
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/init.d/conductor-solver
@@ -0,0 +1,148 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: conductor-solver
+# Required-Start: $network $local_fs $remote_fs $syslog
+# Required-Stop: $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Conductor Solver
+# Description: Conductor Solver server
+### END INIT INFO
+
+# Author: Joe D'Andrea <jdandrea@research.att.com>
+# Based on work by Thomas Goirand <zigo@debian.or>
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="Conductor Solver"
+PROJECT_NAME=conductor
+NAME=${PROJECT_NAME}-solver
+PYTHON_HOME=
+
+#!/bin/sh
+# The content after this line comes from openstack-pkg-tools
+# and has been automatically added to a .init.in script, which
+# contains only the descriptive part for the daemon. Everything
+# else is standardized as a single unique script.
+
+# Author: Joe D'Andrea <jdandrea@research.att.com>
+# Based on work by Thomas Goirand <zigo@debian.or>
+
+# PATH should only include /usr/* if it runs after the mountnfs.sh script
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+
+if [ -z "${DAEMON}" ] ; then
+ if [ -d "${PYTHON_HOME}" ] ; then
+ DAEMON=${PYTHON_HOME}/bin/${NAME}
+ else
+ DAEMON=/usr/bin/${NAME}
+ fi
+fi
+PIDFILE=/var/run/${PROJECT_NAME}/${NAME}.pid
+if [ -z "${SCRIPTNAME}" ] ; then
+ SCRIPTNAME=/etc/init.d/${NAME}
+fi
+if [ -z "${SYSTEM_USER}" ] ; then
+ SYSTEM_USER=${PROJECT_NAME}
+fi
+if [ -z "${SYSTEM_GROUP}" ] ; then
+ SYSTEM_GROUP=${PROJECT_NAME}
+fi
+if [ "${SYSTEM_USER}" != "root" ] ; then
+ STARTDAEMON_CHUID="--chuid ${SYSTEM_USER}:${SYSTEM_GROUP}"
+fi
+if [ -z "${CONFIG_FILE}" ] ; then
+ CONFIG_FILE=/etc/${PROJECT_NAME}/${PROJECT_NAME}.conf
+fi
+LOGFILE=/var/log/${PROJECT_NAME}/${NAME}.log
+if [ -z "${NO_OPENSTACK_CONFIG_FILE_DAEMON_ARG}" ] ; then
+ DAEMON_ARGS="${DAEMON_ARGS} --config-file=${CONFIG_FILE}"
+fi
+
+# Exit if the package is not installed
+[ -x $DAEMON ] || exit 0
+
+# If ran as root, create /var/lock/X, /var/run/X, /var/lib/X and /var/log/X as needed
+if [ `whoami` = "root" ] ; then
+ for i in lock run log lib ; do
+ mkdir -p /var/$i/${PROJECT_NAME}
+ chown ${SYSTEM_USER} /var/$i/${PROJECT_NAME}
+ done
+fi
+
+# This defines init_is_upstart which we use later on (+ more...)
+. /lib/lsb/init-functions
+
+# Manage log options: logfile and/or syslog, depending on user's choosing
+[ -r /etc/default/openstack ] && . /etc/default/openstack
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+[ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
+[ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=$LOGFILE"
+
+do_start() {
+ start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \
+ --test > /dev/null || return 1
+ start-stop-daemon --start --quiet --background ${STARTDAEMON_CHUID} --make-pidfile --pidfile ${PIDFILE} --chdir /var/lib/${PROJECT_NAME} --startas $DAEMON \
+ -- $DAEMON_ARGS || return 2
+}
+
+do_stop() {
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
+ RETVAL=$?
+ rm -f $PIDFILE
+ return "$RETVAL"
+}
+
+do_systemd_start() {
+ exec $DAEMON $DAEMON_ARGS
+}
+
+case "$1" in
+start)
+ #init_is_upstart > /dev/null 2>&1 && exit 1
+ log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case $? in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+stop)
+ #init_is_upstart > /dev/null 2>&1 && exit 0
+ log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case $? in
+ 0|1) log_end_msg 0 ;;
+ 2) log_end_msg 1 ;;
+ esac
+;;
+status)
+ status_of_proc -p "${PIDFILE}" "$DAEMON" "$NAME" && exit 0 || exit $?
+;;
+systemd-start)
+ do_systemd_start
+;;
+restart|force-reload)
+ #init_is_upstart > /dev/null 2>&1 && exit 1
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case $? in
+ 0|1)
+ do_start
+ case $? in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *) log_end_msg 1 ;; # Failed to stop
+ esac
+;;
+*)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload|systemd-start}" >&2
+ exit 3
+;;
+esac
+
+exit 0
+
diff --git a/conductor/examples/distribution/ubuntu/init/conductor-api.conf b/conductor/examples/distribution/ubuntu/init/conductor-api.conf
new file mode 100644
index 0000000..f6f805e
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/init/conductor-api.conf
@@ -0,0 +1,37 @@
+description "Conductor API"
+author "Joe D'Andrea <jdandrea@research.att.com>"
+
+env PYTHON_HOME=
+env PORT=8091
+
+start on runlevel [2345]
+stop on runlevel [!2345]
+
+chdir /var/run
+
+respawn
+limit nofile 65535 65535
+
+pre-start script
+ for i in lock run log lib ; do
+ mkdir -p /var/$i/conductor
+ chown conductor:conductor /var/$i/conductor
+ done
+end script
+
+script
+ DAEMON="/usr/bin/conductor-api"
+ [ -d "$PYTHON_HOME" ] && DAEMON="$PYTHON_HOME/bin/conductor-api"
+ [ -x "$DAEMON" ] || exit 0
+ DAEMON_ARGS=""
+ [ -r /etc/default/openstack ] && . /etc/default/openstack
+ [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB
+ [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
+ [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/conductor/conductor-api.log"
+
+ exec start-stop-daemon --start --chdir /var/lib/conductor \
+ --chuid conductor:conductor --make-pidfile --pidfile /var/run/conductor/conductor-engine.pid \
+ --exec $DAEMON -- --port $PORT -- --config-file=/etc/conductor/conductor.conf ${DAEMON_ARGS}
+end script
+
+
diff --git a/conductor/examples/distribution/ubuntu/init/conductor-controller.conf b/conductor/examples/distribution/ubuntu/init/conductor-controller.conf
new file mode 100644
index 0000000..9d44be2
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/init/conductor-controller.conf
@@ -0,0 +1,36 @@
+description "Conductor Controller"
+author "Joe D'Andrea <jdandrea@research.att.com>"
+
+env PYTHON_HOME=
+
+start on runlevel [2345]
+stop on runlevel [!2345]
+
+chdir /var/run
+
+respawn
+limit nofile 65535 65535
+
+pre-start script
+ for i in lock run log lib ; do
+ mkdir -p /var/$i/conductor
+ chown conductor /var/$i/conductor
+ done
+end script
+
+script
+ DAEMON="/usr/bin/conductor-controller"
+ [ -d "$PYTHON_HOME" ] && DAEMON="$PYTHON_HOME/bin/conductor-controller"
+ [ -x "$DAEMON" ] || exit 0
+ DAEMON_ARGS=""
+ [ -r /etc/default/openstack ] && . /etc/default/openstack
+ [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB
+ [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
+ [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/conductor/conductor-controller.log"
+
+ exec start-stop-daemon --start --chdir /var/lib/conductor \
+ --chuid conductor:conductor --make-pidfile --pidfile /var/run/conductor/conductor-controller.pid \
+ --exec $DAEMON -- --config-file=/etc/conductor/conductor.conf ${DAEMON_ARGS}
+end script
+
+
diff --git a/conductor/examples/distribution/ubuntu/init/conductor-data.conf b/conductor/examples/distribution/ubuntu/init/conductor-data.conf
new file mode 100644
index 0000000..643206d
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/init/conductor-data.conf
@@ -0,0 +1,36 @@
+description "Conductor Data"
+author "Joe D'Andrea <jdandrea@research.att.com>"
+
+env PYTHON_HOME=
+
+start on runlevel [2345]
+stop on runlevel [!2345]
+
+chdir /var/run
+
+respawn
+limit nofile 65535 65535
+
+pre-start script
+ for i in lock run log lib ; do
+ mkdir -p /var/$i/conductor
+ chown conductor /var/$i/conductor
+ done
+end script
+
+script
+ DAEMON="/usr/bin/conductor-data"
+ [ -d "$PYTHON_HOME" ] && DAEMON="$PYTHON_HOME/bin/conductor-data"
+ [ -x "$DAEMON" ] || exit 0
+ DAEMON_ARGS=""
+ [ -r /etc/default/openstack ] && . /etc/default/openstack
+ [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB
+ [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
+ [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/conductor/conductor-data.log"
+
+ exec start-stop-daemon --start --chdir /var/lib/conductor \
+ --chuid conductor:conductor --make-pidfile --pidfile /var/run/conductor/conductor-data.pid \
+ --exec $DAEMON -- --config-file=/etc/conductor/conductor.conf ${DAEMON_ARGS}
+end script
+
+
diff --git a/conductor/examples/distribution/ubuntu/init/conductor-reservation.conf b/conductor/examples/distribution/ubuntu/init/conductor-reservation.conf
new file mode 100644
index 0000000..0af5603
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/init/conductor-reservation.conf
@@ -0,0 +1,36 @@
+description "Conductor Reservation"
+author "Shankaranarayanan Puzhavakath Narayanan <snarayanan@research.att.com>"
+
+env PYTHON_HOME=
+
+start on runlevel [2345]
+stop on runlevel [!2345]
+
+chdir /var/run
+
+respawn
+limit nofile 65535 65535
+
+pre-start script
+ for i in lock run log lib ; do
+ mkdir -p /var/$i/conductor
+ chown conductor /var/$i/conductor
+ done
+end script
+
+script
+ DAEMON="/usr/bin/conductor-reservation"
+ [ -d "$PYTHON_HOME" ] && DAEMON="$PYTHON_HOME/bin/conductor-reservation"
+ [ -x "$DAEMON" ] || exit 0
+ DAEMON_ARGS=""
+ [ -r /etc/default/openstack ] && . /etc/default/openstack
+ [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB
+ [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
+ [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/conductor/conductor-reservation.log"
+
+ exec start-stop-daemon --start --chdir /var/lib/conductor \
+ --chuid conductor:conductor --make-pidfile --pidfile /var/run/conductor/conductor-reservation.pid \
+ --exec $DAEMON -- --config-file=/etc/conductor/conductor.conf ${DAEMON_ARGS}
+end script
+
+
diff --git a/conductor/examples/distribution/ubuntu/init/conductor-solver.conf b/conductor/examples/distribution/ubuntu/init/conductor-solver.conf
new file mode 100644
index 0000000..649c8c6
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/init/conductor-solver.conf
@@ -0,0 +1,36 @@
+description "Conductor Solver"
+author "Joe D'Andrea <jdandrea@research.att.com>"
+
+env PYTHON_HOME=
+
+start on runlevel [2345]
+stop on runlevel [!2345]
+
+chdir /var/run
+
+respawn
+limit nofile 65535 65535
+
+pre-start script
+ for i in lock run log lib ; do
+ mkdir -p /var/$i/conductor
+ chown conductor /var/$i/conductor
+ done
+end script
+
+script
+ DAEMON="/usr/bin/conductor-solver"
+ [ -d "$PYTHON_HOME" ] && DAEMON="$PYTHON_HOME/bin/conductor-solver"
+ [ -x "$DAEMON" ] || exit 0
+ DAEMON_ARGS=""
+ [ -r /etc/default/openstack ] && . /etc/default/openstack
+ [ -r /etc/default/$UPSTART_JOB ] && . /etc/default/$UPSTART_JOB
+ [ "x$USE_SYSLOG" = "xyes" ] && DAEMON_ARGS="$DAEMON_ARGS --use-syslog"
+ [ "x$USE_LOGFILE" != "xno" ] && DAEMON_ARGS="$DAEMON_ARGS --log-file=/var/log/conductor/conductor-solver.log"
+
+ exec start-stop-daemon --start --chdir /var/lib/conductor \
+ --chuid conductor:conductor --make-pidfile --pidfile /var/run/conductor/conductor-solver.pid \
+ --exec $DAEMON -- --config-file=/etc/conductor/conductor.conf ${DAEMON_ARGS}
+end script
+
+
diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-api b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-api
new file mode 100644
index 0000000..8599adf
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-api
@@ -0,0 +1,7 @@
+/var/log/conductor/conductor-api.log {
+ daily
+ missingok
+ compress
+ delaycompress
+ notifempty
+}
diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-common b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-common
new file mode 100644
index 0000000..6efb26d
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-common
@@ -0,0 +1,7 @@
+/var/log/conductor/*.log {
+ daily
+ missingok
+ compress
+ delaycompress
+ copytruncate
+}
diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-controller b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-controller
new file mode 100644
index 0000000..af03403
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-controller
@@ -0,0 +1,7 @@
+/var/log/conductor/conductor-controller.log {
+ daily
+ missingok
+ compress
+ delaycompress
+ notifempty
+}
diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-data b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-data
new file mode 100644
index 0000000..1e4dc75
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-data
@@ -0,0 +1,7 @@
+/var/log/conductor/conductor-data.log {
+ daily
+ missingok
+ compress
+ delaycompress
+ notifempty
+}
diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-reservation b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-reservation
new file mode 100644
index 0000000..648d3e5
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-reservation
@@ -0,0 +1,7 @@
+/var/log/conductor/conductor-reservation.log {
+ daily
+ missingok
+ compress
+ delaycompress
+ notifempty
+}
diff --git a/conductor/examples/distribution/ubuntu/logrotate.d/conductor-solver b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-solver
new file mode 100644
index 0000000..a433b9c
--- /dev/null
+++ b/conductor/examples/distribution/ubuntu/logrotate.d/conductor-solver
@@ -0,0 +1,7 @@
+/var/log/conductor/conductor-solver.log {
+ daily
+ missingok
+ compress
+ delaycompress
+ notifempty
+}
diff --git a/conductor/examples/nginx/conductor-api.upstart.conf b/conductor/examples/nginx/conductor-api.upstart.conf
new file mode 100644
index 0000000..fd8275f
--- /dev/null
+++ b/conductor/examples/nginx/conductor-api.upstart.conf
@@ -0,0 +1,19 @@
+# cat /etc/init/conductor-uwsgi.conf
+description "uWSGI server for conductor"
+
+start on runlevel [2345] # start on all runlevels.
+stop on runlevel [!2345] # stop when shutting down.
+
+respawn
+
+script
+ /opt/app/conductor/bin/uwsgi \
+ -s /tmp/uwsgi.sock \
+ --chmod-socket=777 \
+ --wsgi-file /etc/nginx/conductor.wsgi \
+ --callable application \
+ --set port=8091 \
+ --venv /opt/app/conductor/ \
+ --die-on-term \
+ --logto /var/log/conductor/conductor-api.log
+end scrip \ No newline at end of file
diff --git a/conductor/examples/nginx/nginx.conf b/conductor/examples/nginx/nginx.conf
new file mode 100644
index 0000000..90f78c9
--- /dev/null
+++ b/conductor/examples/nginx/nginx.conf
@@ -0,0 +1,15 @@
+# This is an example nginx configuration file for using the
+# conductor API through uWSGI. Include the "server" section
+# within the broader "http" configuration.
+
+http {
+ server {
+ listen 8091;
+ server_name CONDUCTOR_API_FQDN;
+
+ location / {
+ include uwsgi_params;
+ uwsgi_pass unix:/tmp/uwsgi.sock;
+ }
+ }
+}
diff --git a/conductor/examples/nginx/uwsgi_params b/conductor/examples/nginx/uwsgi_params
new file mode 100644
index 0000000..c7727cd
--- /dev/null
+++ b/conductor/examples/nginx/uwsgi_params
@@ -0,0 +1,13 @@
+uwsgi_param QUERY_STRING $query_string;
+uwsgi_param REQUEST_METHOD $request_method;
+uwsgi_param CONTENT_TYPE $content_type;
+uwsgi_param CONTENT_LENGTH $content_length;
+uwsgi_param REQUEST_URI $request_uri;
+uwsgi_param PATH_INFO $document_uri;
+uwsgi_param DOCUMENT_ROOT $document_root;
+uwsgi_param SERVER_PROTOCOL $server_protocol;
+uwsgi_param REMOTE_ADDR $remote_addr;
+uwsgi_param REMOTE_PORT $remote_port;
+uwsgi_param SERVER_ADDR $server_addr;
+uwsgi_param SERVER_PORT $server_port;
+uwsgi_param SERVER_NAME $server_name;
diff --git a/conductor/pylintrc b/conductor/pylintrc
new file mode 100644
index 0000000..52ae454
--- /dev/null
+++ b/conductor/pylintrc
@@ -0,0 +1,26 @@
+[Messages Control]
+# W0511: TODOs in code comments are fine.
+# W0142: *args and **kwargs are fine.
+# W0622: Redefining id is fine.
+disable-msg=W0511,W0142,W0622
+
+[Basic]
+# Variable names can be 1 to 31 characters long, with lowercase and underscores
+variable-rgx=[a-z_][a-z0-9_]{0,30}$
+
+# Argument names can be 2 to 31 characters long, with lowercase and underscores
+argument-rgx=[a-z_][a-z0-9_]{1,30}$
+
+# Method names should be at least 3 characters long
+# and be lowecased with underscores
+method-rgx=[a-z_][a-z0-9_]{2,50}$
+
+# Don't require docstrings on tests.
+no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
+
+# Exclude variable names that conflict with debugger
+bad-names=c
+[Design]
+max-public-methods=100
+min-public-methods=0
+max-args=6
diff --git a/conductor/requirements.txt b/conductor/requirements.txt
new file mode 100644
index 0000000..a0878b2
--- /dev/null
+++ b/conductor/requirements.txt
@@ -0,0 +1,25 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+cotyledon # Apache-2.0
+futurist>=0.11.0 # Apache-2.0
+lxml>=2.3 # BSD
+oslo.config>=3.9.0 # Apache-2.0
+oslo.i18n>=2.1.0 # Apache-2.0
+oslo.log>=1.14.0 # Apache-2.0
+# oslo.policy>=0.5.0 # Apache-2.0
+oslo.reports>=0.6.0 # Apache-2.0
+PasteDeploy>=1.5.0 # MIT
+pbr>=1.6 # Apache-2.0
+pecan>=1.1.1 # BSD
+pecan-notario<=0.0.3 # Alfredo Deza
+oslo.messaging>=5.2.0 # Apache-2.0
+oslo.middleware>=3.0.0 # Apache-2.0
+python-daemon>=2.1.1 # Apache-2.0
+pyOpenSSL>=0.14 # Apache-2.0
+PyYAML>=3.10.0 # MIT
+requests[security]!=2.9.0,>=2.8.1 # Apache-2.0
+six>=1.9.0 # MIT, also required by futurist
+stevedore>=1.9.0 # Apache-2.0, also required by oslo.config
+WebOb>=1.2.3 # MIT \ No newline at end of file
diff --git a/conductor/setup.cfg b/conductor/setup.cfg
new file mode 100644
index 0000000..b62c365
--- /dev/null
+++ b/conductor/setup.cfg
@@ -0,0 +1,71 @@
+[metadata]
+name = of-has
+summary = ONAP Homing Service
+description-file = README.rst
+author = AT&T
+author-email = jdandrea@research.att.com
+home-page = https://wiki.onap.org/pages/viewpage.action?pageId=16005528
+classifier =
+ Development Status :: 4 - Beta
+ Environment :: ONAP
+ Intended Audience :: Information Technology
+ Intended Audience :: System Administrators
+ License :: OSI Approved :: Apache Software License
+ Operating System :: POSIX :: Linux
+ Programming Language :: Python
+ Programming Language :: Python :: 2
+ Programming Language :: Python :: 2.7
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3.5
+keywords =
+ onap
+ homing
+ conductor
+
+[global]
+setup-hooks =
+ pbr.hooks.setup_hook
+
+[files]
+packages =
+ conductor
+data_files =
+ etc/conductor = etc/conductor/*
+# conductor_integrationtests
+#scripts =
+# bin/conductor-db-setup
+
+[entry_points]
+wsgi_scripts =
+ conductor-api = conductor.api.app:build_wsgi_app
+
+console_scripts =
+ conductor-controller = conductor.cmd.controller:main
+ conductor-data = conductor.cmd.data:main
+ conductor-solver = conductor.cmd.solver:main
+ conductor-reservation = conductor.cmd.reservation:main
+
+conductor.inventory_provider.plugin =
+ aai = conductor.data.plugins.inventory_provider.aai:AAI
+
+conductor.service_controller.plugin =
+ sdnc = conductor.data.plugins.service_controller.sdnc:SDNC
+
+oslo.config.opts =
+ conductor = conductor.opts:list_opts
+
+oslo.config.opts.defaults =
+ conductor = conductor.conf.defaults:set_cors_middleware_defaults
+
+#tempest.test_plugins =
+# conductor_tests = conductor_integrationtests.plugin:ConductorTempestPlugin
+
+#[build_sphinx]
+#all_files = 1
+#build-dir = doc/build
+#source-dir = doc/source
+
+[pbr]
+warnerrors = true
+autodoc_index_modules = true
+
diff --git a/conductor/setup.py b/conductor/setup.py
new file mode 100644
index 0000000..0c696ed
--- /dev/null
+++ b/conductor/setup.py
@@ -0,0 +1,34 @@
+# -*- encoding: utf-8 -*-
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+'''Setup'''
+
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
+try:
+ import multiprocessing # noqa # pylint: disable=W0611,C0411
+except ImportError:
+ pass
+
+setuptools.setup(
+ setup_requires=['pbr>=1.8'],
+ pbr=True)
diff --git a/conductor/test-requirements.txt b/conductor/test-requirements.txt
new file mode 100644
index 0000000..8eb4eb7
--- /dev/null
+++ b/conductor/test-requirements.txt
@@ -0,0 +1,20 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+
+# Hacking already pins down pep8, pyflakes and flake8
+hacking<0.11,>=0.10.0
+# bandit>=1.1.0 # Apache-2.0
+coverage>=3.6 # Apache-2.0
+fixtures>=3.0.0 # Apache-2.0/BSD
+kombu>=3.0.25 # BSD
+mock>=2.0 # BSD
+mox3>=0.7.0 # Apache-2.0
+oslotest>=1.10.0 # Apache-2.0
+psycopg2>=2.5 # LGPL/ZPL
+testrepository>=0.0.18 # Apache-2.0/BSD
+testscenarios>=0.4 # Apache-2.0/BSD
+testtools>=1.4.0 # MIT
+os-testr>=0.4.1 # Apache-2.0
+tempest>=11.0.0 # Apache-2.0
+pifpaf>=0.0.11 \ No newline at end of file
diff --git a/conductor/tools/README.md b/conductor/tools/README.md
new file mode 100644
index 0000000..cba4b77
--- /dev/null
+++ b/conductor/tools/README.md
@@ -0,0 +1 @@
+Files in this directory are general developer tools or examples of how to do certain activities.
diff --git a/conductor/tools/pretty_tox.sh b/conductor/tools/pretty_tox.sh
new file mode 100755
index 0000000..190b39d
--- /dev/null
+++ b/conductor/tools/pretty_tox.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+
+set -o pipefail
+
+TESTRARGS=$1
+
+# --until-failure is not compatible with --subunit see:
+#
+# https://bugs.launchpad.net/testrepository/+bug/1411804
+#
+# this work around exists until that is addressed
+if [[ "$TESTARGS" =~ "until-failure" ]]; then
+ python setup.py testr --slowest --testr-args="$TESTRARGS"
+else
+ python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f
+fi
diff --git a/conductor/tox.ini b/conductor/tox.ini
new file mode 100644
index 0000000..35bea94
--- /dev/null
+++ b/conductor/tox.ini
@@ -0,0 +1,53 @@
+[tox]
+minversion = 1.6
+skipsdist = True
+# envlist = py35,py27,functional,pep8
+envlist = py27,pep8
+
+[testenv]
+deps = -r{toxinidir}/requirements.txt
+ -r{toxinidir}/test-requirements.txt
+install_command = pip install -U {opts} {packages}
+usedevelop = True
+setenv = VIRTUAL_ENV={envdir}
+ OS_TEST_PATH=conductor/tests/unit
+passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE
+commands =
+ {toxinidir}/tools/pretty_tox.sh "{posargs}"
+ oslo-config-generator --config-file=etc/conductor/conductor-config-generator.conf
+whitelist_externals = bash
+
+[testenv:functional]
+setenv = VIRTUAL_ENV={envdir}
+ OS_TEST_PATH=conductor/tests/functional/
+passenv = CONDUCTOR_*
+commands =
+ bash -x {toxinidir}/run-functional-tests.sh "{posargs}"
+
+[testenv:cover]
+setenv = OS_TEST_PATH=conductor/tests
+commands =
+ python setup.py testr --slowest --coverage --testr-args='^(?!conductor_integrationtests){posargs}'
+
+[testenv:pep8]
+deps = hacking<0.12,>=0.11.0
+commands =
+ flake8
+
+[testenv:genconfig]
+commands = oslo-config-generator --config-file=etc/conductor/conductor-config-generator.conf
+
+[testenv:venv]
+commands = {posargs}
+setenv = PYTHONHASHSEED=0
+
+[testenv:debug]
+commands = bash -x oslo_debug_helper {posargs}
+
+[flake8]
+ignore = H301,E401
+exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,install-guide
+show-source = True
+
+[hacking]
+import_exceptions = conductor.common.i18n