aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-x__init__.py45
-rw-r--r--adapters/__init__.py0
-rw-r--r--adapters/database/OracleDB.py32
-rw-r--r--adapters/database/PostgresDB.py31
-rw-r--r--adapters/database/VerticaDB.py55
-rw-r--r--adapters/database/__init__.py0
-rw-r--r--adapters/dcae/__init__.py0
-rwxr-xr-xadapters/dcae/message_router.py100
-rw-r--r--adapters/local_data/__init__.py0
-rw-r--r--adapters/local_data/local_policies.py40
-rw-r--r--adapters/request_parsing/__init__.py0
-rw-r--r--adapters/request_parsing/placement.py33
-rwxr-xr-xmodels/api/common.py54
-rw-r--r--models/api/placementRequest.py124
-rw-r--r--models/api/placementResponse.py57
-rw-r--r--optimizers/__init__.py0
-rw-r--r--optimizers/placementopt/__init__.py0
-rw-r--r--optimizers/placementopt/conductor/__init__.py0
-rw-r--r--optimizers/placementopt/conductor/api_builder.py121
-rw-r--r--optimizers/placementopt/conductor/conductor.py186
-rw-r--r--optimizers/placementopt/conductor/remote_opt_processor.py79
-rw-r--r--optimizers/placementopt/conductor/translation.py215
22 files changed, 1172 insertions, 0 deletions
diff --git a/__init__.py b/__init__.py
new file mode 100755
index 0000000..d0993ae
--- /dev/null
+++ b/__init__.py
@@ -0,0 +1,45 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""Core functions for OSDF Application, including flask app"""
+
+from jinja2 import Template
+
+
+end_point_auth_mapping = { # map a URL endpoint to auth group
+ "cmscheduler": "CMScheduler",
+ "placement": "Placement",
+}
+
+userid_suffix, passwd_suffix = "Username", "Password"
+auth_groups = set(end_point_auth_mapping.values())
+
+ERROR_TEMPLATE = Template("""
+{
+ "serviceException": {
+ "text": "{{ description }}"
+ }
+}
+""")
+
+ACCEPTED_MESSAGE_TEMPLATE = Template("""
+{
+ "requestId": "{{ request_id }}",
+ "text": "{{ description }}"
+}
+""")
diff --git a/adapters/__init__.py b/adapters/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/adapters/__init__.py
diff --git a/adapters/database/OracleDB.py b/adapters/database/OracleDB.py
new file mode 100644
index 0000000..655dd27
--- /dev/null
+++ b/adapters/database/OracleDB.py
@@ -0,0 +1,32 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import cx_Oracle
+
+from osdf.utils.programming_utils import MetaSingleton
+
+
+class OracleDB(metaclass=MetaSingleton):
+ conn, cur = None, None
+
+ def connect(self, host=None, sid=None, user=None, passwd=None, port=5432):
+ if self.conn is None:
+ tns_info = cx_Oracle.makedsn(host=host, port=port, sid=sid)
+ self.conn = cx_Oracle.connect(user=user, password=passwd, dsn=tns_info, threaded=True)
+ self.cur = self.conn.cursor()
+ return self.conn, self.cur
diff --git a/adapters/database/PostgresDB.py b/adapters/database/PostgresDB.py
new file mode 100644
index 0000000..6689566
--- /dev/null
+++ b/adapters/database/PostgresDB.py
@@ -0,0 +1,31 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import psycopg2
+
+from osdf.utils.programming_utils import MetaSingleton
+
+
+class PostgresDB(metaclass=MetaSingleton):
+ conn, cur = None, None
+
+ def connect(self, host=None, db=None, user=None, passwd=None, port=5432):
+ if self.conn is None:
+ self.conn = psycopg2.connect(host=host, port=port, user=user, password=passwd, database=db)
+ self.cur = self.conn.cursor()
+ return self.conn, self.cur
diff --git a/adapters/database/VerticaDB.py b/adapters/database/VerticaDB.py
new file mode 100644
index 0000000..ad961d7
--- /dev/null
+++ b/adapters/database/VerticaDB.py
@@ -0,0 +1,55 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import jaydebeapi
+import sqlalchemy.pool as pool
+
+from jaydebeapi import _DEFAULT_CONVERTERS, _java_to_py
+from osdf.utils.programming_utils import MetaSingleton
+from osdf.config.base import osdf_config
+
+_DEFAULT_CONVERTERS.update({'BIGINT': _java_to_py('longValue')})
+
+
+class VerticaDB(metaclass=MetaSingleton):
+ connection_pool = None
+
+ def get_connection(self):
+ p = self.get_config_params()
+ c = jaydebeapi.connect(
+ 'com.vertica.jdbc.Driver',
+ 'jdbc:vertica://{}:{}/{}'.format(p['host'], p['port'], p['db']),
+ {'user': p['user'], 'password': p['passwd'], 'CHARSET': 'UTF8'},
+ jars=[p['db_driver']]
+ )
+ return c
+
+ def get_config_params(self):
+ config = osdf_config["deployment"]
+ host, port, db = config["verticaHost"], config["verticaPort"], config.get("verticaDB")
+ user, passwd = config["verticaUsername"], config["verticaPassword"]
+ jar_path = osdf_config['core']['osdf_system']['vertica_jar']
+ params = dict(host=host, db=db, user=user, passwd=passwd, port=port, db_driver=jar_path)
+ return params
+
+ def connect(self):
+ if self.connection_pool is None:
+ self.connection_pool = pool.QueuePool(self.get_connection, max_overflow=10, pool_size=5, recycle=600)
+ conn = self.connection_pool.connect()
+ cursor = conn.cursor()
+ return conn, cursor
diff --git a/adapters/database/__init__.py b/adapters/database/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/adapters/database/__init__.py
diff --git a/adapters/dcae/__init__.py b/adapters/dcae/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/adapters/dcae/__init__.py
diff --git a/adapters/dcae/message_router.py b/adapters/dcae/message_router.py
new file mode 100755
index 0000000..e495331
--- /dev/null
+++ b/adapters/dcae/message_router.py
@@ -0,0 +1,100 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import requests
+from osdf.utils.data_types import list_like
+from osdf.operation.exceptions import MessageBusConfigurationException
+
+
+class MessageRouterClient(object):
+ def __init__(self,
+ dmaap_url=None,
+ mr_host_base_urls=None,
+ topic=None,
+ consumer_group=None, consumer_id=None,
+ timeout_ms=15000, fetch_limit=1000,
+ userid=None, passwd=None):
+ """
+ :param dmaap_url: protocol, host and port; mostly for UEB
+ (e.g. https://dcae-msrt-ftl.homer.att.com:3905/)
+ :param mr_host_base_urls: for DMaaP, we get a topic URL (base_url + events/topic_name)
+ (e.g. https://dcae-msrt-ftl.homer.att.com:3905/events/com.att.dcae.dmaap.FTL.SNIRO-CM-SCHEDULER-RESPONSE)
+ :param consumer_group: DMaaP/UEB consumer group (unique for each subscriber; required for GET)
+ :param consumer_id: DMaaP/UEB consumer ID (unique for each thread/process for a subscriber; required for GET)
+ :param timeout_ms: (optional, default 15 seconds or 15,000 ms) server-side timeout for GET request
+ :param fetch_limit: (optional, default 1000 messages per request for GET), ignored for "POST"
+ :param userid: (optional, userid for HTTP basic authentication)
+ :param passwd: (optional, password for HTTP basic authentication)
+ """
+ mr_error = MessageBusConfigurationException
+ if dmaap_url is None: # definitely not DMaaP, so use UEB mode
+ self.is_dmaap = False
+ if not (mr_host_base_urls and list_like(mr_host_base_urls)):
+ raise mr_error("Not a DMaaP or UEB configuration")
+ if not topic:
+ raise mr_error("Invalid topic: '{}'",format(topic))
+ self.topic_urls = ["{}/events/{}".format(base_url, topic) for base_url in mr_host_base_urls]
+ else:
+ self.is_dmaap = True
+ self.topic_urls = [dmaap_url]
+
+ self.timeout_ms = timeout_ms
+ self.fetch_limit = fetch_limit
+ self.auth = (userid, passwd) if userid and passwd else None
+ self.consumer_group = consumer_group
+ self.consumer_id = consumer_id
+
+ def get(self, outputjson=True):
+ """Fetch messages from message router (DMaaP or UEB)
+ :param outputjson: (optional, specifies if response is expected to be in json format), ignored for "POST"
+ :return: response as a json object (if outputjson is True) or as a string
+ """
+ url_fmt = "{topic_url}/{cgroup}/{cid}?timeout={timeout_ms}&limit={limit}"
+ urls = [url_fmt.format(topic_url=x, timeout_ms=self.timeout_ms, limit=self.fetch_limit,
+ cgroup=self.consumer_group, cid=self.consumer_id) for x in self.topic_urls]
+ for url in urls[:-1]:
+ try:
+ return self.http_request(method='GET', url=url, outputjson=outputjson)
+ except:
+ pass
+ return self.http_request(method='GET', url=urls[-1], outputjson=outputjson)
+
+ def post(self, msg, inputjson=True):
+ for url in self.topic_urls[:-1]:
+ try:
+ return self.http_request(method='POST', url=url, inputjson=inputjson, msg=msg)
+ except:
+ pass
+ return self.http_request(method='POST', url=self.topic_urls[-1], inputjson=inputjson, msg=msg)
+
+ def http_request(self, url, method, inputjson=True, outputjson=True, msg=None, **kwargs):
+ """
+ Perform the actual URL request (GET or POST), and do error handling
+ :param url: full URL (including topic, limit, timeout, etc.)
+ :param method: GET or POST
+ :param inputjson: Specify whether input is in json format (valid only for POST)
+ :param outputjson: Is response expected in a json format
+ :param msg: content to be posted (valid only for POST)
+ :return: response as a json object (if outputjson or POST) or as a string; None if error
+ """
+ res = requests.request(url=url, method=method, auth=self.auth, **kwargs)
+ if res.status_code == requests.codes.ok:
+ return res.json() if outputjson or method == "POST" else res.content
+ else:
+ raise Exception("HTTP Response Error: code {}; headers:{}, content: {}".format(
+ res.status_code, res.headers, res.content))
diff --git a/adapters/local_data/__init__.py b/adapters/local_data/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/adapters/local_data/__init__.py
diff --git a/adapters/local_data/local_policies.py b/adapters/local_data/local_policies.py
new file mode 100644
index 0000000..c63ae5a
--- /dev/null
+++ b/adapters/local_data/local_policies.py
@@ -0,0 +1,40 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import json
+import os
+
+
+def get_local_policies(local_policy_folder, local_policy_list, policy_id_list=None):
+ """
+ Get policies from a local file system.
+ Required for the following scenarios:
+ (a) doing work-arounds (e.g. if we are asked to drop some policies for testing purposes)
+ (b) work-arounds when policy platform is giving issues (e.g. if dev/IST policies are wiped out in an upgrade)
+ :param local_policy_folder: where the policy files are present
+ :param local_policy_list: list of local policies
+ :param policy_id_list: list of policies to get (if unspecified or None, get all)
+ :return: get policies
+ """
+ policies = []
+ for fname in local_policy_list: # ugly removal of .json from file name
+ if policy_id_list and fname[:-5] not in policy_id_list:
+ continue
+ with open(os.path.join(local_policy_folder, fname)) as fid:
+ policies.append(json.load(fid))
+ return policies
diff --git a/adapters/request_parsing/__init__.py b/adapters/request_parsing/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/adapters/request_parsing/__init__.py
diff --git a/adapters/request_parsing/placement.py b/adapters/request_parsing/placement.py
new file mode 100644
index 0000000..d7a6575
--- /dev/null
+++ b/adapters/request_parsing/placement.py
@@ -0,0 +1,33 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import copy
+import json
+from osdf.utils.programming_utils import list_flatten, dot_notation
+
+
+def json_path_after_expansion(req_json, reference):
+ """
+ Get the child node(s) from the dot-notation [reference] and parent [req_json].
+ For placement and other requests, there are encoded JSONs inside the request or policy,
+ so we need to expand it and then do a search over the parent plus expanded JSON.
+ """
+ req_json_copy = copy.deepcopy(req_json) # since we expand the JSON in place, we work on a copy
+ req_json_copy['placementInfo']['orderInfo'] = json.loads(req_json_copy['placementInfo']['orderInfo'])
+ info = dot_notation(req_json_copy, reference)
+ return list_flatten(info) if isinstance(info, list) else info
diff --git a/models/api/common.py b/models/api/common.py
new file mode 100755
index 0000000..0d2d0eb
--- /dev/null
+++ b/models/api/common.py
@@ -0,0 +1,54 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import datetime
+from pprint import pformat
+
+from dateutil.parser import parse
+from schematics.exceptions import ConversionError
+from schematics.models import Model
+from schematics.types import DateTimeType
+
+
+class OSDFModel(Model):
+ """Extends generic model with a couple of extra methods"""
+ def __str__(self):
+ """Return values of object's attributes -- excluding hidden or callable ones"""
+ def _str_format(x):
+ """Coerce as string for some special objects"""
+ return str(x) if isinstance(x, datetime.datetime) else x
+
+ z1 = dict((x, getattr(self, x)) for x in dir(self)
+ if not x.startswith("_") and not callable(getattr(self, x)))
+ z1 = dict((x, _str_format(y)) for x, y in z1.items())
+ return pformat(z1, depth=4, indent=2, width=1000)
+
+ def __repr__(self):
+ """Return values of object's attributes -- excluding hidden or callable ones"""
+ return self.__str__()
+
+
+class CustomISODateType(DateTimeType):
+ """Schematics doesn't support full ISO, so we use custom one"""
+ def to_native(self, value, context=None):
+ if isinstance(value, datetime.datetime):
+ return value
+ try:
+ return parse(value)
+ except:
+ raise ConversionError(u'Invalid timestamp {}'.format(value))
diff --git a/models/api/placementRequest.py b/models/api/placementRequest.py
new file mode 100644
index 0000000..73eac75
--- /dev/null
+++ b/models/api/placementRequest.py
@@ -0,0 +1,124 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from .common import OSDFModel
+from schematics.types import StringType, URLType, IntType, FloatType
+from schematics.types.compound import ModelType, ListType
+
+
+class RequestInfo(OSDFModel):
+ """Info for northbound request from client such as SO"""
+ transactionId = StringType(required=True)
+ requestId = StringType(required=True)
+ callbackUrl = URLType(required=True)
+ sourceId = StringType(required=True)
+ optimizer = ListType(StringType())
+ numSolutions = IntType()
+ timeout = IntType()
+ requestType = StringType()
+
+
+class CandidateInfo(OSDFModel):
+ """Preferred candidate for a resource (sent as part of a request from client)"""
+ candidateType = StringType(required=True)
+ candidates = ListType(StringType(required=True))
+
+
+class ResourceModelInfo(OSDFModel):
+ """Model information for a specific resource"""
+ modelCustomizationId = StringType(required=True)
+ modelInvariantId = StringType(required=True)
+ modelName = StringType()
+ modelVersion = StringType()
+ modelVersionId = StringType()
+ modelType = StringType()
+ operationalStatus = StringType()
+
+
+class ExistingLicenseInfo(OSDFModel):
+ entitlementPoolUUID = ListType(StringType())
+ licenseKeyGroupUUID = ListType(StringType())
+
+
+class LicenseDemand(OSDFModel):
+ resourceInstanceType = StringType(required=True)
+ serviceResourceId = StringType(required=True)
+ resourceModuleName = StringType(required=True)
+ resourceModelInfo = ModelType(ResourceModelInfo)
+ existingLicense = ModelType(ExistingLicenseInfo)
+
+
+class PlacementDemand(OSDFModel):
+ resourceInstanceType = StringType(required=True)
+ serviceResourceId = StringType(required=True)
+ resourceModuleName = StringType(required=True)
+ exclusionCandidateInfo = ListType(ModelType(CandidateInfo))
+ requiredCandidateInfo = ListType(ModelType(CandidateInfo))
+ resourceModelInfo = ModelType(ResourceModelInfo)
+ tenantId = StringType()
+ tenantName = StringType()
+
+
+class ExistingPlacementInfo(OSDFModel):
+ serviceInstanceId = StringType(required=True)
+
+
+class DemandInfo(OSDFModel):
+ """Requested resources (sent as part of a request from client)"""
+ placementDemand = ListType(ModelType(PlacementDemand))
+ licenseDemand = ListType(ModelType(LicenseDemand))
+
+
+class SubscriberInfo(OSDFModel):
+ """Details on the customer that subscribes to the VNFs"""
+ globalSubscriberId = StringType(required=True)
+ subscriberName = StringType()
+ subscriberCommonSiteId = StringType()
+
+
+class ServiceModelInfo(OSDFModel):
+ """ASDC Service model information"""
+ modelType = StringType(required=True)
+ modelInvariantId = StringType(required=True)
+ modelVersionId = StringType(required=True)
+ modelName = StringType(required=True)
+ modelVersion = StringType(required=True)
+
+
+class Location(OSDFModel):
+ latitude = FloatType(required=True)
+ longitude = FloatType(required=True)
+
+
+class PlacementInfo(OSDFModel):
+ """Information specific to placement optimization"""
+ serviceModelInfo = ModelType(ServiceModelInfo)
+ subscriberInfo = ModelType(SubscriberInfo)
+ demandInfo = ModelType(DemandInfo, required=True)
+ orderInfo = StringType()
+ policyId = ListType(StringType())
+ serviceInstanceId = StringType()
+ existingPlacement = ModelType(ExistingPlacementInfo)
+ location = ModelType(Location)
+ serviceType = StringType()
+
+
+class PlacementAPI(OSDFModel):
+ """Request for placement optimization (specific to optimization and additional metadata"""
+ requestInfo = ModelType(RequestInfo, required=True)
+ placementInfo = ModelType(PlacementInfo, required=True)
diff --git a/models/api/placementResponse.py b/models/api/placementResponse.py
new file mode 100644
index 0000000..e9746d6
--- /dev/null
+++ b/models/api/placementResponse.py
@@ -0,0 +1,57 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from .common import OSDFModel
+from schematics.types import StringType
+from schematics.types.compound import ModelType, ListType
+
+
+# TODO: update osdf.models
+
+class LicenseSolution(OSDFModel):
+ serviceResourceId = StringType(required=True)
+ resourceModuleName = StringType(required=True)
+ entitlementPoolList = ListType(StringType(required=True))
+ licenseKeyGroupList = ListType(StringType(required=True))
+
+
+class AssignmentInfo(OSDFModel):
+ variableName = StringType(required=True)
+ variableValue = StringType(required=True)
+
+
+class PlacementSolution(OSDFModel):
+ serviceResourceId = StringType(required=True)
+ resourceModuleName = StringType(required=True)
+ inventoryType = StringType(required=True)
+ serviceInstanceId = StringType()
+ cloudRegionId = StringType()
+ assignmentInfo = ListType(ModelType(AssignmentInfo))
+
+
+class SolutionInfo(OSDFModel):
+ placement = ListType(ModelType(PlacementSolution), min_size=1)
+ license = ListType(ModelType(LicenseSolution), min_size=1)
+
+
+class PlacementResponse(OSDFModel):
+ transactionId = StringType(required=True)
+ requestId = StringType(required=True)
+ requestState = StringType(required=True)
+ statusMessage = StringType(required=True)
+ solutionInfo = ModelType(SolutionInfo)
diff --git a/optimizers/__init__.py b/optimizers/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/optimizers/__init__.py
diff --git a/optimizers/placementopt/__init__.py b/optimizers/placementopt/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/optimizers/placementopt/__init__.py
diff --git a/optimizers/placementopt/conductor/__init__.py b/optimizers/placementopt/conductor/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/optimizers/placementopt/conductor/__init__.py
diff --git a/optimizers/placementopt/conductor/api_builder.py b/optimizers/placementopt/conductor/api_builder.py
new file mode 100644
index 0000000..c0281fe
--- /dev/null
+++ b/optimizers/placementopt/conductor/api_builder.py
@@ -0,0 +1,121 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import copy
+import json
+from osdf.utils import data_mapping
+from jinja2 import Template
+from osdf.utils.programming_utils import list_flatten, dot_notation
+import osdf.optimizers.placementopt.conductor.translation as tr
+from osdf.adapters.policy.utils import group_policies
+
+
+def conductor_api_builder(request_json, flat_policies: list, local_config, prov_status,
+ template="templates/conductor_interface.json"):
+ """Build a SNIRO southbound API call for Conductor/Placement optimization
+ :param request_json: parameter data received from a client
+ :param flat_policies: policy data received from the policy platform (flat policies)
+ :param template: template to generate southbound API call to conductor
+ :param local_config: local configuration file with pointers for the service specific information
+ :param prov_status: provStatus retrieved from Subscriber policy
+ :return: json to be sent to Conductor/placement optimization
+ """
+ templ = Template(open(template).read())
+ gp = group_policies(flat_policies)
+ demand_vnf_name_list = []
+
+ for placementDemand in request_json['placementInfo']['demandInfo']['placementDemand']:
+ demand_vnf_name_list.append(placementDemand['resourceModuleName'])
+
+ demand_list = tr.gen_demands(request_json['placementInfo']['demandInfo'], gp['vnfPolicy'])
+ attribute_policy_list = tr.gen_attribute_policy(demand_vnf_name_list, gp['attribute'])
+ distance_to_location_policy_list = tr.gen_distance_to_location_policy(
+ demand_vnf_name_list, gp['distance_to_location'])
+ inventory_policy_list = tr.gen_inventory_group_policy(demand_vnf_name_list, gp['inventory_group'])
+ resource_instance_policy_list = tr.gen_resource_instance_policy(
+ demand_vnf_name_list, gp['instance_fit'])
+ resource_region_policy_list = tr.gen_resource_region_policy(demand_vnf_name_list, gp['region_fit'])
+ zone_policy_list = tr.gen_zone_policy(demand_vnf_name_list, gp['zone'])
+ optimization_policy_list = tr.gen_optimization_policy(demand_vnf_name_list, gp['placementOptimization'])
+ reservation_policy_list = tr.gen_reservation_policy(demand_vnf_name_list, gp['instance_reservation'])
+ conductor_policies = [attribute_policy_list, distance_to_location_policy_list, inventory_policy_list,
+ resource_instance_policy_list, resource_region_policy_list, zone_policy_list]
+ filtered_policies = [x for x in conductor_policies if len(x) > 0]
+ policy_groups = list_flatten(filtered_policies)
+ reservation_policies = [x for x in reservation_policy_list if len(x) > 0]
+ reservation_groups = list_flatten(reservation_policies)
+ req_info = request_json['requestInfo']
+ model_name = request_json['placementInfo']['serviceModelInfo']['modelName']
+ service_type = data_mapping.get_service_type(model_name)
+ service_info = local_config.get('service_info', {}).get(service_type, {})
+ if 'orderInfo' in request_json["placementInfo"]:
+ order_info = json.loads(request_json["placementInfo"]["orderInfo"])
+ request_type = req_info.get('requestType', None)
+ subs_com_site_id = ""
+ if 'subscriberInfo' in request_json['placementInfo']:
+ subs_com_site_id = request_json['placementInfo']['subscriberInfo'].get('subscriberCommonSiteId', "")
+ if service_type == 'vCPE':
+ data_mapping.normalize_user_params(order_info)
+ rendered_req = templ.render(
+ requestType=request_type,
+ chosenComplex=subs_com_site_id,
+ demand_list=demand_list,
+ policy_groups=policy_groups,
+ optimization_policies=optimization_policy_list,
+ name=req_info['requestId'],
+ timeout=req_info['timeout'],
+ limit=req_info['numSolutions'],
+ serviceType=service_type,
+ serviceInstance=request_json['placementInfo']['serviceInstanceId'],
+ provStatus = prov_status,
+ chosenRegion=order_info['requestParameters']['lcpCloudRegionId'],
+ json=json)
+ elif service_type == 'UNKNOWN':
+ rendered_req = templ.render(
+ requestType=request_type,
+ chosenComplex=subs_com_site_id,
+ demand_list=demand_list,
+ policy_groups=policy_groups,
+ reservation_groups=reservation_groups,
+ optimization_policies=optimization_policy_list,
+ name=req_info['requestId'],
+ timeout=req_info['timeout'],
+ limit=req_info['numSolutions'],
+ serviceType=service_type,
+ serviceInstance=request_json['placementInfo']['serviceInstanceId'],
+ provStatus = prov_status,
+ # process order data
+ bandwidth=dot_notation(order_info, service_info['bandwidth']),
+ bandwidth_unit=dot_notation(order_info, service_info['bandwidth_units']),
+ json=json)
+ json_payload = json.dumps(json.loads(rendered_req)) # need this because template's JSON is ugly!
+ return json_payload
+
+
+def retrieve_node(req_json, reference):
+ """
+ Get the child node(s) from the dot-notation [reference] and parent [req_json].
+ For placement and other requests, there are encoded JSONs inside the request or policy,
+ so we need to expand it and then do a search over the parent plus expanded JSON.
+ """
+ req_json_copy = copy.deepcopy(req_json) # since we expand the JSON in place, we work on a copy
+ if 'orderInfo' in req_json_copy['placementInfo']:
+ req_json_copy['placementInfo']['orderInfo'] = json.loads(req_json_copy['placementInfo']['orderInfo'])
+ info = dot_notation(req_json_copy, reference)
+ return list_flatten(info) if isinstance(info, list) else info
+
diff --git a/optimizers/placementopt/conductor/conductor.py b/optimizers/placementopt/conductor/conductor.py
new file mode 100644
index 0000000..bdc7f17
--- /dev/null
+++ b/optimizers/placementopt/conductor/conductor.py
@@ -0,0 +1,186 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""
+This application generates conductor API calls using the information received from SO and Policy platform.
+"""
+
+import json
+import time
+
+from jinja2 import Template
+from requests import RequestException
+
+from osdf.logging.osdf_logging import debug_log
+from osdf.optimizers.placementopt.conductor.api_builder import conductor_api_builder
+from osdf.utils.interfaces import RestClient
+from osdf.operation.exceptions import BusinessException
+
+
+def request(req_object, osdf_config, grouped_policies, prov_status):
+ """
+ Process a placement request from a Client (build Conductor API call, make the call, return result)
+ :param req_object: Request parameters from the client
+ :param osdf_config: Configuration specific to SNIRO application (core + deployment)
+ :param grouped_policies: policies related to placement (fetched based on request, and grouped by policy type)
+ :param prov_status: provStatus retrieved from Subscriber policy
+ :return: response from Conductor (accounting for redirects from Conductor service
+ """
+ config = osdf_config.deployment
+ local_config = osdf_config.core
+ uid, passwd = config['conductorUsername'], config['conductorPassword']
+ conductor_url = config['conductorUrl']
+ req_id = req_object['requestInfo']['requestId']
+ transaction_id = req_object['requestInfo']['transactionId']
+ headers = dict(transaction_id=transaction_id)
+
+ max_retries = config.get('conductorMaxRetries', 30)
+ ping_wait_time = config.get('conductorPingWaitTime', 60)
+
+ rc = RestClient(userid=uid, passwd=passwd, method="GET", log_func=debug_log.debug, headers=headers)
+ conductor_req_json_str = conductor_api_builder(req_object, grouped_policies, local_config, prov_status)
+ conductor_req_json = json.loads(conductor_req_json_str)
+
+ debug_log.debug("Sending first Conductor request for request_id {}".format(req_id))
+ resp, raw_resp = initial_request_to_conductor(rc, conductor_url, conductor_req_json)
+ # Very crude way of keeping track of time.
+ # We are not counting initial request time, first call back, or time for HTTP request
+ total_time, ctr = 0, 2
+ client_timeout = req_object['requestInfo']['timeout']
+ configured_timeout = max_retries * ping_wait_time
+ max_timeout = min(client_timeout, configured_timeout)
+
+ while True: # keep requesting conductor till we get a result or we run out of time
+ if resp is not None:
+ if resp["plans"][0].get("status") in ["error"]:
+ raise RequestException(response=raw_resp, request=raw_resp.request)
+
+ if resp["plans"][0].get("status") in ["done", "not found"]:
+ if resp["plans"][0].get("recommendations"):
+ return conductor_response_processor(resp, raw_resp, req_id)
+ else: # "solved" but no solutions found
+ return conductor_no_solution_processor(resp, raw_resp, req_id)
+ new_url = resp['plans'][0]['links'][0][0]['href'] # TODO: check why a list of lists
+
+ if total_time >= max_timeout:
+ raise BusinessException("Conductor could not provide a solution within {} seconds, this transaction is timing out".format(max_timeout))
+ time.sleep(ping_wait_time)
+ ctr += 1
+ debug_log.debug("Attempt number {} url {}; prior status={}".format(ctr, new_url, resp['plans'][0]['status']))
+ total_time += ping_wait_time
+
+ try:
+ raw_resp = rc.request(new_url, raw_response=True)
+ resp = raw_resp.json()
+ except RequestException as e:
+ debug_log.debug("Conductor attempt {} for request_id {} has failed because {}".format(ctr, req_id, str(e)))
+
+
+def initial_request_to_conductor(rc, conductor_url, conductor_req_json):
+ """First steps in the request-redirect chain in making a call to Conductor
+ :param rc: REST client object for calling conductor
+ :param conductor_url: conductor's base URL to submit a placement request
+ :param conductor_req_json: request json object to send to Conductor
+ :return: URL to check for follow up (similar to redirects); we keep checking these till we get a result/error
+ """
+ debug_log.debug("Payload to Conductor: {}".format(json.dumps(conductor_req_json)))
+ raw_resp = rc.request(url=conductor_url, raw_response=True, method="POST", json=conductor_req_json)
+ resp = raw_resp.json()
+ if resp["status"] != "template":
+ raise RequestException(response=raw_resp, request=raw_resp.request)
+ time.sleep(10) # 10 seconds wait time to avoid being too quick!
+ plan_url = resp["links"][0][0]["href"]
+ debug_log.debug("Attemping to read the plan from the conductor provided url {}".format(plan_url))
+ raw_resp = rc.request(raw_response=True, url=plan_url) # TODO: check why a list of lists for links
+ resp = raw_resp.json()
+
+ if resp["plans"][0]["status"] in ["error"]:
+ raise RequestException(response=raw_resp, request=raw_resp.request)
+ return resp, raw_resp # now the caller of this will handle further follow-ups
+
+
+def conductor_response_processor(conductor_response, raw_response, req_id):
+ """Build a response object to be sent to client's callback URL from Conductor's response
+ This includes Conductor's placement optimization response, and required ASDC license artifacts
+
+ :param conductor_response: JSON response from Conductor
+ :param raw_response: Raw HTTP response corresponding to above
+ :param req_id: Id of a request
+ :return: JSON object that can be sent to the client's callback URL
+ """
+ composite_solutions = []
+ name_map = {"physical-location-id": "cloudClli", "host_id": "vnfHostName",
+ "cloud_version": "cloudVersion", "cloud_owner": "cloudOwner"}
+ for reco in conductor_response['plans'][0]['recommendations']:
+ for resource in reco.keys():
+ c = reco[resource]['candidate']
+ solution = {
+ 'resourceModuleName': resource,
+ 'serviceResourceId': reco[resource]['service_resource_id'],
+ 'inventoryType': c['inventory_type'],
+ 'serviceInstanceId': c['candidate_id'] if c['inventory_type'] == "service" else "",
+ 'cloudRegionId': c['location_id'],
+ 'assignmentInfo': []
+ }
+
+ for key, value in reco[resource]['attributes'].items():
+ try:
+ solution['assignmentInfo'].append({"variableName": name_map[key], "variableValue": value})
+ except KeyError:
+ debug_log.debug("The key[{}] is not mapped and will not be returned in assignment info".format(key))
+
+ if c.get('host_id'):
+ solution['assignmentInfo'].append({'variableName': name_map['host_id'], 'variableValue': c['host_id']})
+ composite_solutions.append(solution)
+
+ request_state = conductor_response['plans'][0]['status']
+ transaction_id = raw_response.headers.get('transaction_id', "")
+ status_message = conductor_response.get('plans')[0].get('message', "")
+
+ solution_info = {}
+ if composite_solutions:
+ solution_info['placementInfo'] = composite_solutions
+
+ resp = {
+ "transactionId": transaction_id,
+ "requestId": req_id,
+ "requestState": request_state,
+ "statusMessage": status_message,
+ "solutionInfo": solution_info
+ }
+ return resp
+
+
+def conductor_no_solution_processor(conductor_response, raw_response, request_id,
+ template_placement_response="templates/plc_opt_response.jsont"):
+ """Build a response object to be sent to client's callback URL from Conductor's response
+ This is for case where no solution is found
+
+ :param conductor_response: JSON response from Conductor
+ :param raw_response: Raw HTTP response corresponding to above
+ :param request_id: request Id associated with the client request (same as conductor response's "name")
+ :param template_placement_response: the template for generating response to client (plc_opt_response.jsont)
+ :return: JSON object that can be sent to the client's callback URL
+ """
+ status_message = conductor_response["plans"][0].get("message")
+ templ = Template(open(template_placement_response).read())
+ return json.loads(templ.render(composite_solutions=[], requestId=request_id,
+ transactionId=raw_response.headers.get('transaction_id', ""),
+ statusMessage=status_message, json=json))
+
+
diff --git a/optimizers/placementopt/conductor/remote_opt_processor.py b/optimizers/placementopt/conductor/remote_opt_processor.py
new file mode 100644
index 0000000..f753a70
--- /dev/null
+++ b/optimizers/placementopt/conductor/remote_opt_processor.py
@@ -0,0 +1,79 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from requests import RequestException
+
+import traceback
+from osdf.operation.error_handling import build_json_error_body
+from osdf.logging.osdf_logging import metrics_log, MH, error_log
+from osdf.optimizers.placementopt.conductor import conductor
+from osdf.optimizers.licenseopt.simple_license_allocation import license_optim
+from osdf.utils.interfaces import get_rest_client
+
+
+def process_placement_opt(request_json, policies, osdf_config, prov_status):
+ """Perform the work for placement optimization (e.g. call SDC artifact and make conductor request)
+ NOTE: there is scope to make the requests to policy asynchronous to speed up overall performance
+ :param request_json: json content from original request
+ :param policies: flattened policies corresponding to this request
+ :param osdf_config: configuration specific to OSDF app
+ :param prov_status: provStatus retrieved from Subscriber policy
+ :return: None, but make a POST to callback URL
+ """
+
+ try:
+ rc = get_rest_client(request_json, service="so")
+ req_id = request_json["requestInfo"]["requestId"]
+ transaction_id = request_json['requestInfo']['transactionId']
+
+ metrics_log.info(MH.inside_worker_thread(req_id))
+ license_info = None
+ if 'licenseDemand' in request_json['placementInfo']['demandInfo']:
+ license_info = license_optim(request_json)
+
+ # Conductor only handles placement, only call Conductor if placementDemands exist
+ if 'placementDemand' in request_json['placementInfo']['demandInfo']:
+ metrics_log.info(MH.requesting("placement/conductor", req_id))
+ placement_response = conductor.request(request_json, osdf_config, policies, prov_status)
+ if license_info: # Attach license solution if it exists
+ placement_response['solutionInfo']['licenseInfo'] = license_info
+ else: # License selection only scenario
+ placement_response = {
+ "transactionId": transaction_id,
+ "requestId": req_id,
+ "requestState": "complete",
+ "statusMessage": "License selection completed successfully",
+ "solutionInfo": {"licenseInfo": license_info}
+ }
+ except Exception as err:
+ error_log.error("Error for {} {}".format(req_id, traceback.format_exc()))
+
+ try:
+ body = build_json_error_body(err)
+ metrics_log.info(MH.sending_response(req_id, "ERROR"))
+ rc.request(json=body, noresponse=True)
+ except RequestException:
+ error_log.error("Error sending asynchronous notification for {} {}".format(req_id, traceback.format_exc()))
+ return
+
+ try:
+ metrics_log.info(MH.calling_back_with_body(req_id, rc.url,placement_response))
+ rc.request(json=placement_response, noresponse=True)
+ except RequestException : # can't do much here but log it and move on
+ error_log.error("Error sending asynchronous notification for {} {}".format(req_id, traceback.format_exc()))
+
diff --git a/optimizers/placementopt/conductor/translation.py b/optimizers/placementopt/conductor/translation.py
new file mode 100644
index 0000000..036398a
--- /dev/null
+++ b/optimizers/placementopt/conductor/translation.py
@@ -0,0 +1,215 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import json
+from osdf.utils.data_conversion import text_to_symbol
+from osdf.utils import data_mapping
+
+def gen_optimization_policy(vnf_list, optimization_policy):
+ """Generate optimization policy details to pass to Conductor
+ :param vnf_list: List of vnf's to used in placement request
+ :param optimization_policy: optimization policy information provided in the incoming request
+ :return: List of optimization policies in a format required by Conductor
+ """
+ optimization_policy_list = []
+ for policy in optimization_policy:
+ content = policy['content']
+ parameter_list = []
+
+ for attr in content['objectiveParameter']['parameterAttributes']:
+ parameter = attr['parameter'] if attr['parameter'] == "cloud_version" else attr['parameter']+"_between"
+ for res in attr['resource']:
+ vnf = get_matching_vnf(res, vnf_list)
+ value = [vnf] if attr['parameter'] == "cloud_version" else [attr['customerLocationInfo'], vnf]
+ parameter_list.append({
+ attr['operator']: [attr['weight'], {parameter: value}]
+ })
+
+ optimization_policy_list.append({
+ content['objective']: {content['objectiveParameter']['operator']: parameter_list }
+ })
+ return optimization_policy_list
+
+
+def get_matching_vnf(resource, vnf_list):
+
+ for vnf in vnf_list:
+ if resource in vnf:
+ return vnf
+ return resource
+
+
+def get_matching_vnfs(resources, vnf_list, match_type="intersection"):
+ """Get a list of matching VNFs from the list of resources
+ :param resources:
+ :param vnf_list: List of vnf's to used in placement request
+ :param match_type: "intersection" or "all" or "any" (any => send all_vnfs if there is any intersection)
+ :return: List of matching VNFs
+ """
+ common_vnfs = []
+ for vnf in vnf_list:
+ for resource in resources:
+ if resource in vnf:
+ common_vnfs.append(vnf)
+ if match_type == "intersection": # specifically requested intersection
+ return common_vnfs
+ elif common_vnfs or match_type == "all": # ("any" and common) OR "all"
+ return resources
+ return None
+
+
+def gen_policy_instance(vnf_list, resource_policy, match_type="intersection", rtype=None):
+ """Generate a list of policies
+ :param vnf_list: List of vnf's to used in placement request
+ :param resource_policy: policy for this specific resource
+ :param match_type: How to match the vnf_names with the vnf_list (intersection or "any")
+ intersection => return intersection; "any" implies return all vnf_names if intersection is not null
+ :param rtype: resource type (e.g. resourceRegionProperty or resourceInstanceProperty)
+ None => no controller information added to the policy specification to Conductor
+ :return: resource policy list in a format required by Conductor
+ """
+ resource_policy_list = []
+ related_policies = []
+ for policy in resource_policy:
+ pc = policy['content']
+ demands = get_matching_vnfs(pc['resourceInstanceType'], vnf_list, match_type=match_type)
+ resource = {pc['identity']: {'type': pc['type'], 'demands': demands}}
+
+ if rtype:
+ resource[pc['identity']]['properties'] = {'controller': pc[rtype]['controller'],
+ 'request': json.loads(pc[rtype]['request'])}
+ if demands and len(demands) != 0:
+ resource_policy_list.append(resource)
+ related_policies.append(policy)
+ return resource_policy_list, related_policies
+
+
+def gen_resource_instance_policy(vnf_list, resource_instance_policy):
+ """Get policies governing resource instances in order to populate the Conductor API call"""
+ cur_policies, _ = gen_policy_instance(vnf_list, resource_instance_policy, rtype='resourceInstanceProperty')
+ return cur_policies
+
+
+def gen_resource_region_policy(vnf_list, resource_region_policy):
+ """Get policies governing resource region in order to populate the Conductor API call"""
+ cur_policies, _ = gen_policy_instance(vnf_list, resource_region_policy, rtype='resourceRegionProperty')
+ return cur_policies
+
+
+def gen_inventory_group_policy(vnf_list, inventory_group_policy):
+ """Get policies governing inventory group in order to populate the Conductor API call"""
+ cur_policies, _ = gen_policy_instance(vnf_list, inventory_group_policy, rtype=None)
+ return cur_policies
+
+
+def gen_reservation_policy(vnf_list, reservation_policy):
+ """Get policies governing resource instances in order to populate the Conductor API call"""
+ cur_policies, _ = gen_policy_instance(vnf_list, reservation_policy, rtype='instanceReservationProperty')
+ return cur_policies
+
+
+def gen_distance_to_location_policy(vnf_list, distance_to_location_policy):
+ """Get policies governing distance-to-location for VNFs in order to populate the Conductor API call"""
+ cur_policies, related_policies = gen_policy_instance(vnf_list, distance_to_location_policy, rtype=None)
+ for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
+ properties = p_main['content']['distanceToLocationProperty']
+ pcp_d = properties['distanceCondition']
+ p_new[p_main['content']['identity']]['properties'] = {
+ 'distance': text_to_symbol[pcp_d['operator']] + " " + pcp_d['value'].lower(),
+ 'location': properties['locationInfo']
+ }
+ return cur_policies
+
+
+def gen_attribute_policy(vnf_list, attribute_policy):
+ """Get policies governing attributes of VNFs in order to populate the Conductor API call"""
+ cur_policies, related_policies = gen_policy_instance(vnf_list, attribute_policy, rtype=None)
+ for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
+ properties = p_main['content']['cloudAttributeProperty']
+ p_new[p_main['content']['identity']]['properties'] = {
+ 'evaluate': {
+ 'hypervisor': properties.get('hypervisor', ''),
+ 'cloud_version': properties.get('cloudVersion', ''),
+ 'cloud_type': properties.get('cloudType', ''),
+ 'dataplane': properties.get('dataPlane', ''),
+ 'network_roles': properties.get('networkRoles', ''),
+ 'complex': properties.get('complex', ''),
+ 'state': properties.get('state', ''),
+ 'country': properties.get('country', ''),
+ 'geo_region': properties.get('geoRegion', ''),
+ 'exclusivity_groups': properties.get('exclusivityGroups', ''),
+ 'replication_role': properties.get('replicationRole', '')
+ }
+ }
+ return cur_policies
+
+
+def gen_zone_policy(vnf_list, zone_policy):
+ """Get zone policies in order to populate the Conductor API call"""
+ cur_policies, related_policies = gen_policy_instance(vnf_list, zone_policy, rtype=None)
+ for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
+ pmz = p_main['content']['zoneProperty']
+ p_new[p_main['content']['identity']]['properties'] = {'category': pmz['category'], 'qualifier': pmz['qualifier']}
+ return cur_policies
+
+
+def get_demand_properties(demand, policies):
+ """Get list demand properties objects (named tuples) from policy"""
+ def _get_candidates(candidate_info):
+ return [dict(inventory_type=x['candidateType'], candidate_id=x['candidates']) for x in candidate_info]
+ properties = []
+ for policy in policies:
+ for resourceInstanceType in policy['content']['resourceInstanceType']:
+ if resourceInstanceType in demand['resourceModuleName']:
+ for x in policy['content']['property']:
+ property = dict(inventory_provider=x['inventoryProvider'],
+ inventory_type=x['inventoryType'],
+ service_resource_id=demand['serviceResourceId'])
+ if 'attributes' in x:
+ attributes = {}
+ for k,v in x['attributes'].items():
+ key=data_mapping.convert(k)
+ attributes[key] = v
+ if(key=="model-invariant-id"):
+ attributes[key]=demand['resourceModelInfo']['modelInvariantId']
+ elif(key=="model-version-id"):
+ attributes[key]=demand['resourceModelInfo']['modelVersionId']
+ property.update({"attributes": attributes})
+ if x['inventoryType'] == "cloud":
+ property['region'] = {'get_param': "CHOSEN_REGION"}
+ if 'exclusionCandidateInfo' in demand:
+ property['excluded_candidates'] = _get_candidates(demand['exclusionCandidateInfo'])
+ if 'requiredCandidateInfo' in demand:
+ property['required_candidates'] = _get_candidates(demand['requiredCandidateInfo'])
+ properties.append(property)
+ if len(properties) == 0:
+ properties.append(dict(customer_id="", service_type="", inventory_provider="", inventory_type=""))
+ return properties
+
+
+def gen_demands(req_json, vnf_policies):
+ """Generate list of demands based on request and VNF policies
+ :param req_json: Request object from the client (e.g. MSO)
+ :param vnf_policies: Policies associated with demand resources (e.g. from grouped_policies['vnfPolicy'])
+ :return: list of demand parameters to populate the Conductor API call
+ """
+ demand_dictionary = {}
+ for placementDemand in req_json['placementDemand']:
+ demand_dictionary.update({placementDemand['resourceModuleName']: get_demand_properties(placementDemand, vnf_policies)})
+
+ return demand_dictionary