From 0b855c08fd98fb8fa0f4bc40d8df430c897b4bad Mon Sep 17 00:00:00 2001 From: Ankitkumar Patel Date: Sun, 11 Feb 2018 17:51:13 -0500 Subject: Re-org folders, onboard test folder, test config Reorganized the folder structure. Onboarded testcases. Added test config. Issue-ID: OPTFRA-74 Change-Id: I97882a162a405a18ffd287495039e15ae9d0ad7b Signed-off-by: Ankitkumar Patel --- adapters/__init__.py | 0 adapters/database/OracleDB.py | 32 ------ adapters/database/PostgresDB.py | 31 ------ adapters/database/VerticaDB.py | 55 --------- adapters/database/__init__.py | 0 adapters/dcae/__init__.py | 0 adapters/dcae/message_router.py | 100 ----------------- adapters/local_data/__init__.py | 0 adapters/local_data/local_policies.py | 40 ------- adapters/policy/__init__.py | 0 adapters/policy/interface.py | 203 ---------------------------------- adapters/policy/utils.py | 58 ---------- adapters/request_parsing/__init__.py | 0 adapters/request_parsing/placement.py | 33 ------ adapters/sdc/__init__.py | 0 adapters/sdc/asdc.py | 40 ------- adapters/sdc/constraint_handler.py | 81 -------------- 17 files changed, 673 deletions(-) delete mode 100644 adapters/__init__.py delete mode 100644 adapters/database/OracleDB.py delete mode 100644 adapters/database/PostgresDB.py delete mode 100644 adapters/database/VerticaDB.py delete mode 100644 adapters/database/__init__.py delete mode 100644 adapters/dcae/__init__.py delete mode 100755 adapters/dcae/message_router.py delete mode 100644 adapters/local_data/__init__.py delete mode 100644 adapters/local_data/local_policies.py delete mode 100644 adapters/policy/__init__.py delete mode 100644 adapters/policy/interface.py delete mode 100644 adapters/policy/utils.py delete mode 100644 adapters/request_parsing/__init__.py delete mode 100644 adapters/request_parsing/placement.py delete mode 100644 adapters/sdc/__init__.py delete mode 100755 adapters/sdc/asdc.py delete mode 100644 adapters/sdc/constraint_handler.py (limited to 'adapters') diff --git a/adapters/__init__.py b/adapters/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/adapters/database/OracleDB.py b/adapters/database/OracleDB.py deleted file mode 100644 index 655dd27..0000000 --- a/adapters/database/OracleDB.py +++ /dev/null @@ -1,32 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) 2015-2017 AT&T Intellectual Property -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ------------------------------------------------------------------------- -# - -import cx_Oracle - -from osdf.utils.programming_utils import MetaSingleton - - -class OracleDB(metaclass=MetaSingleton): - conn, cur = None, None - - def connect(self, host=None, sid=None, user=None, passwd=None, port=5432): - if self.conn is None: - tns_info = cx_Oracle.makedsn(host=host, port=port, sid=sid) - self.conn = cx_Oracle.connect(user=user, password=passwd, dsn=tns_info, threaded=True) - self.cur = self.conn.cursor() - return self.conn, self.cur diff --git a/adapters/database/PostgresDB.py b/adapters/database/PostgresDB.py deleted file mode 100644 index 6689566..0000000 --- a/adapters/database/PostgresDB.py +++ /dev/null @@ -1,31 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) 2015-2017 AT&T Intellectual Property -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ------------------------------------------------------------------------- -# - -import psycopg2 - -from osdf.utils.programming_utils import MetaSingleton - - -class PostgresDB(metaclass=MetaSingleton): - conn, cur = None, None - - def connect(self, host=None, db=None, user=None, passwd=None, port=5432): - if self.conn is None: - self.conn = psycopg2.connect(host=host, port=port, user=user, password=passwd, database=db) - self.cur = self.conn.cursor() - return self.conn, self.cur diff --git a/adapters/database/VerticaDB.py b/adapters/database/VerticaDB.py deleted file mode 100644 index ad961d7..0000000 --- a/adapters/database/VerticaDB.py +++ /dev/null @@ -1,55 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) 2015-2017 AT&T Intellectual Property -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ------------------------------------------------------------------------- -# - -import jaydebeapi -import sqlalchemy.pool as pool - -from jaydebeapi import _DEFAULT_CONVERTERS, _java_to_py -from osdf.utils.programming_utils import MetaSingleton -from osdf.config.base import osdf_config - -_DEFAULT_CONVERTERS.update({'BIGINT': _java_to_py('longValue')}) - - -class VerticaDB(metaclass=MetaSingleton): - connection_pool = None - - def get_connection(self): - p = self.get_config_params() - c = jaydebeapi.connect( - 'com.vertica.jdbc.Driver', - 'jdbc:vertica://{}:{}/{}'.format(p['host'], p['port'], p['db']), - {'user': p['user'], 'password': p['passwd'], 'CHARSET': 'UTF8'}, - jars=[p['db_driver']] - ) - return c - - def get_config_params(self): - config = osdf_config["deployment"] - host, port, db = config["verticaHost"], config["verticaPort"], config.get("verticaDB") - user, passwd = config["verticaUsername"], config["verticaPassword"] - jar_path = osdf_config['core']['osdf_system']['vertica_jar'] - params = dict(host=host, db=db, user=user, passwd=passwd, port=port, db_driver=jar_path) - return params - - def connect(self): - if self.connection_pool is None: - self.connection_pool = pool.QueuePool(self.get_connection, max_overflow=10, pool_size=5, recycle=600) - conn = self.connection_pool.connect() - cursor = conn.cursor() - return conn, cursor diff --git a/adapters/database/__init__.py b/adapters/database/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/adapters/dcae/__init__.py b/adapters/dcae/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/adapters/dcae/message_router.py b/adapters/dcae/message_router.py deleted file mode 100755 index e495331..0000000 --- a/adapters/dcae/message_router.py +++ /dev/null @@ -1,100 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) 2015-2017 AT&T Intellectual Property -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ------------------------------------------------------------------------- -# - -import requests -from osdf.utils.data_types import list_like -from osdf.operation.exceptions import MessageBusConfigurationException - - -class MessageRouterClient(object): - def __init__(self, - dmaap_url=None, - mr_host_base_urls=None, - topic=None, - consumer_group=None, consumer_id=None, - timeout_ms=15000, fetch_limit=1000, - userid=None, passwd=None): - """ - :param dmaap_url: protocol, host and port; mostly for UEB - (e.g. https://dcae-msrt-ftl.homer.att.com:3905/) - :param mr_host_base_urls: for DMaaP, we get a topic URL (base_url + events/topic_name) - (e.g. https://dcae-msrt-ftl.homer.att.com:3905/events/com.att.dcae.dmaap.FTL.SNIRO-CM-SCHEDULER-RESPONSE) - :param consumer_group: DMaaP/UEB consumer group (unique for each subscriber; required for GET) - :param consumer_id: DMaaP/UEB consumer ID (unique for each thread/process for a subscriber; required for GET) - :param timeout_ms: (optional, default 15 seconds or 15,000 ms) server-side timeout for GET request - :param fetch_limit: (optional, default 1000 messages per request for GET), ignored for "POST" - :param userid: (optional, userid for HTTP basic authentication) - :param passwd: (optional, password for HTTP basic authentication) - """ - mr_error = MessageBusConfigurationException - if dmaap_url is None: # definitely not DMaaP, so use UEB mode - self.is_dmaap = False - if not (mr_host_base_urls and list_like(mr_host_base_urls)): - raise mr_error("Not a DMaaP or UEB configuration") - if not topic: - raise mr_error("Invalid topic: '{}'",format(topic)) - self.topic_urls = ["{}/events/{}".format(base_url, topic) for base_url in mr_host_base_urls] - else: - self.is_dmaap = True - self.topic_urls = [dmaap_url] - - self.timeout_ms = timeout_ms - self.fetch_limit = fetch_limit - self.auth = (userid, passwd) if userid and passwd else None - self.consumer_group = consumer_group - self.consumer_id = consumer_id - - def get(self, outputjson=True): - """Fetch messages from message router (DMaaP or UEB) - :param outputjson: (optional, specifies if response is expected to be in json format), ignored for "POST" - :return: response as a json object (if outputjson is True) or as a string - """ - url_fmt = "{topic_url}/{cgroup}/{cid}?timeout={timeout_ms}&limit={limit}" - urls = [url_fmt.format(topic_url=x, timeout_ms=self.timeout_ms, limit=self.fetch_limit, - cgroup=self.consumer_group, cid=self.consumer_id) for x in self.topic_urls] - for url in urls[:-1]: - try: - return self.http_request(method='GET', url=url, outputjson=outputjson) - except: - pass - return self.http_request(method='GET', url=urls[-1], outputjson=outputjson) - - def post(self, msg, inputjson=True): - for url in self.topic_urls[:-1]: - try: - return self.http_request(method='POST', url=url, inputjson=inputjson, msg=msg) - except: - pass - return self.http_request(method='POST', url=self.topic_urls[-1], inputjson=inputjson, msg=msg) - - def http_request(self, url, method, inputjson=True, outputjson=True, msg=None, **kwargs): - """ - Perform the actual URL request (GET or POST), and do error handling - :param url: full URL (including topic, limit, timeout, etc.) - :param method: GET or POST - :param inputjson: Specify whether input is in json format (valid only for POST) - :param outputjson: Is response expected in a json format - :param msg: content to be posted (valid only for POST) - :return: response as a json object (if outputjson or POST) or as a string; None if error - """ - res = requests.request(url=url, method=method, auth=self.auth, **kwargs) - if res.status_code == requests.codes.ok: - return res.json() if outputjson or method == "POST" else res.content - else: - raise Exception("HTTP Response Error: code {}; headers:{}, content: {}".format( - res.status_code, res.headers, res.content)) diff --git a/adapters/local_data/__init__.py b/adapters/local_data/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/adapters/local_data/local_policies.py b/adapters/local_data/local_policies.py deleted file mode 100644 index c63ae5a..0000000 --- a/adapters/local_data/local_policies.py +++ /dev/null @@ -1,40 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) 2015-2017 AT&T Intellectual Property -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ------------------------------------------------------------------------- -# - -import json -import os - - -def get_local_policies(local_policy_folder, local_policy_list, policy_id_list=None): - """ - Get policies from a local file system. - Required for the following scenarios: - (a) doing work-arounds (e.g. if we are asked to drop some policies for testing purposes) - (b) work-arounds when policy platform is giving issues (e.g. if dev/IST policies are wiped out in an upgrade) - :param local_policy_folder: where the policy files are present - :param local_policy_list: list of local policies - :param policy_id_list: list of policies to get (if unspecified or None, get all) - :return: get policies - """ - policies = [] - for fname in local_policy_list: # ugly removal of .json from file name - if policy_id_list and fname[:-5] not in policy_id_list: - continue - with open(os.path.join(local_policy_folder, fname)) as fid: - policies.append(json.load(fid)) - return policies diff --git a/adapters/policy/__init__.py b/adapters/policy/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/adapters/policy/interface.py b/adapters/policy/interface.py deleted file mode 100644 index ee45051..0000000 --- a/adapters/policy/interface.py +++ /dev/null @@ -1,203 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) 2015-2017 AT&T Intellectual Property -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ------------------------------------------------------------------------- -# - -import base64 -import itertools -import json - - -from requests import RequestException -from osdf.operation.exceptions import BusinessException -from osdf.adapters.local_data.local_policies import get_local_policies -from osdf.adapters.policy.utils import _regex_policy_name -from osdf.config.base import osdf_config -from osdf.logging.osdf_logging import audit_log, MH, metrics_log, error_log, debug_log -from osdf.utils.interfaces import RestClient -from osdf.optimizers.placementopt.conductor.api_builder import retrieve_node -from osdf.utils import data_mapping - - -def get_by_name(rest_client, policy_name_list, wildcards=True): - policy_list = [] - for policy_name in policy_name_list: - try: - query_name = policy_name - if wildcards: - query_name = _regex_policy_name(query_name) - policy_list.append(rest_client.request(json={"policyName": query_name})) - except RequestException as err: - audit_log.warn("Error in fetching policy: " + policy_name) - raise BusinessException("Cannot fetch policy {}: ".format(policy_name), err) - return policy_list - - -def get_subscriber_name(req, pmain): - subs_name = retrieve_node(req, pmain['subscriber_name']) - if subs_name is None: - return "DEFAULT" - else: - subs_name_uc = subs_name.upper() - if subs_name_uc in ("DEFAULT", "NULL", ""): - subs_name = "DEFAULT" - return subs_name - - -def get_subscriber_role(rest_client, req, pmain, service_name, scope): - """Make a request to policy and return subscriberRole - :param rest_client: rest client to make call - :param req: request object from MSO - :param pmain: main config that will have policy path information - :param service_name: the type of service to call: e.g. "vCPE - :param scope: the scope of policy to call: e.g. "OOF_HAS_vCPE". - :return: subscriberRole and provStatus retrieved from Subscriber policy - """ - subscriber_role = "DEFAULT" - prov_status = [] - subs_name = get_subscriber_name(req, pmain) - if subs_name == "DEFAULT": - return subscriber_role, prov_status - - policy_subs = pmain['policy_subscriber'] - policy_scope = {"policyName": "{}.*".format(scope), - "configAttributes": { - "serviceType": "{}".format(service_name), - "service": "{}".format(policy_subs)} - } - policy_list = [] - try: - policy_list.append(rest_client.request(json=policy_scope)) - except RequestException as err: - audit_log.warn("Error in fetching policy for {}: ".format(policy_subs)) - return subscriber_role, prov_status - - formatted_policies = [] - for x in itertools.chain(*policy_list): - if x['config'] is None: - raise BusinessException("Config not found for policy with name %s" % x['policyName']) - else: - formatted_policies.append(json.loads(x['config'])) - - for policy in formatted_policies: - property_list = policy['content']['property'] - for prop in property_list: - if subs_name in prop['subscriberName']: - subs_role_list = prop['subscriberRole'] - prov_status = prop['provStatus'] - if isinstance(subs_role_list, list): # as list is returned - return subs_role_list[0], prov_status - return subscriber_role, prov_status - - -def get_by_scope(rest_client, req, config_local, type_service): - policy_list = [] - pmain = config_local['policy_info'][type_service] - pscope = pmain['policy_scope'] - - model_name = retrieve_node(req, pscope['service_name']) - service_name = data_mapping.get_request_service_type(req) - if service_name is None: - service_name = data_mapping.get_service_type(model_name) - scope = pscope['scope_{}'.format(service_name.lower())] - subscriber_role, prov_status = get_subscriber_role(rest_client, req, pmain, service_name, scope) - policy_type_list = pmain['policy_type_{}'.format(service_name.lower())] - for policy_type in policy_type_list: - policy_scope = {"policyName": "{}.*".format(scope), - "configAttributes": { - "serviceType": "{}".format(service_name), - "service": "{}".format(policy_type), - "subscriberRole": "{}".format(subscriber_role)} - } - policy_list.append(rest_client.request(json=policy_scope)) - return policy_list, prov_status - - -def remote_api(req_json, osdf_config, service_type="placement"): - """Make a request to policy and return response -- it accounts for multiple requests that be needed - :param req_json: policy request object (can have multiple policy names) - :param osdf_config: main config that will have credential information - :param service_type: the type of service to call: "placement", "scheduling" - :return: all related policies and provStatus retrieved from Subscriber policy - """ -# if not req_json[service_type + "Info"]['policyId']: -# return [] - - config = osdf_config.deployment - uid, passwd = config['policyPlatformUsername'], config['policyPlatformPassword'] - pcuid, pcpasswd = config['policyClientUsername'], config['policyClientPassword'] - headers = {"ClientAuth": base64.b64encode(bytes("{}:{}".format(pcuid, pcpasswd), "ascii"))} - headers.update({'Environment': config['policyPlatformEnv']}) - url = config['policyPlatformUrl'] - rc = RestClient(userid=uid, passwd=passwd, headers=headers, url=url, log_func=debug_log.debug) - - if osdf_config.core['policy_info'][service_type]['policy_fetch'] == "by_name": - policies = get_by_name(rc, req_json[service_type + "Info"]['policyId'], wildcards=True) - elif osdf_config.core['policy_info'][service_type]['policy_fetch'] == "by_name_no_wildcards": - policies = get_by_name(rc, req_json[service_type + "Info"]['policyId'], wildcards=False) - else: # Get policy by scope - policies, prov_status = get_by_scope(rc, req_json, osdf_config.core, service_type) - - # policies in res are list of lists, so flatten them; also only keep config part - formatted_policies = [] - for x in itertools.chain(*policies): - if x['config'] is None: - raise BusinessException("Config not found for policy with name %s" % x['policyName']) - else: - formatted_policies.append(json.loads(x['config'])) - return formatted_policies, prov_status - - -def local_policies_location(req_json, osdf_config, service_type): - """ - Get folder and list of policy_files if "local policies" option is enabled - :param service_type: placement supported for now, but can be any other service - :return: a tuple (folder, file_list) or None - """ - lp = osdf_config.core.get('osdf_hacks', {}).get('local_policies', {}) - if lp.get('global_disabled'): - return None # short-circuit to disable all local policies - if lp.get('local_{}_policies_enabled'.format(service_type)): - if service_type == "scheduling": - return lp.get('{}_policy_dir'.format(service_type)), lp.get('{}_policy_files'.format(service_type)) - else: - model_name = retrieve_node(req_json, osdf_config.core['policy_info'][service_type]['policy_scope']['service_name']) - service_name = data_mapping.get_service_type(model_name) - return lp.get('{}_policy_dir_{}'.format(service_type, service_name.lower())), lp.get('{}_policy_files_{}'.format(service_type, service_name.lower())) - return None - - -def get_policies(request_json, service_type): - """Validate the request and get relevant policies - :param request_json: Request object - :param service_type: the type of service to call: "placement", "scheduling" - :return: policies associated with this request and provStatus retrieved from Subscriber policy - """ - prov_status = [] - req_info = request_json['requestInfo'] - req_id = req_info['requestId'] - metrics_log.info(MH.requesting("policy", req_id)) - local_info = local_policies_location(request_json, osdf_config, service_type) - - if local_info: # tuple containing location and list of files - to_filter = None - if osdf_config.core['policy_info'][service_type]['policy_fetch'] == "by_name": - to_filter = request_json[service_type + "Info"]['policyId'] - policies = get_local_policies(local_info[0], local_info[1], to_filter) - else: - policies, prov_status= remote_api(request_json, osdf_config, service_type) - - return policies, prov_status diff --git a/adapters/policy/utils.py b/adapters/policy/utils.py deleted file mode 100644 index a006f12..0000000 --- a/adapters/policy/utils.py +++ /dev/null @@ -1,58 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) 2015-2017 AT&T Intellectual Property -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ------------------------------------------------------------------------- -# - -from collections import defaultdict - - -def group_policies(flat_policies): - """Filter policies using the following steps: - 1. Apply prioritization among the policies that are sharing the same policy type and resource type - 2. Remove redundant policies that may applicable across different types of resource - 3. Filter policies based on type and return - :param flat_policies: list of flat policies - :return: Filtered policies - """ - aggregated_policies = {} - filter_policies = defaultdict(list) - policy_name = [] - for policy in flat_policies: - policy_type = policy['content']['type'] - if policy_type not in aggregated_policies: - aggregated_policies[policy_type] = defaultdict(list) - for resource in policy['content']['policyScope']['resourceInstanceType']: - aggregated_policies[policy_type][resource].append(policy) - for policy_type in aggregated_policies: - for resource in aggregated_policies[policy_type]: - if len(aggregated_policies[policy_type][resource]) > 0: - aggregated_policies[policy_type][resource].sort(key=lambda x: x['priority'], reverse=True) - policy = aggregated_policies[policy_type][resource][0] - if policy['policyName'] not in policy_name: - filter_policies[policy['content']['type']].append(policy) - policy_name.append(policy['policyName']) - return filter_policies - - -def _regex_policy_name(policy_name): - """Get the correct policy name as a regex - (e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml - So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*) - :param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy - :return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.* - """ - p = policy_name.partition('.') - return p[0] + p[1] + ".*" + p[2] + ".*" diff --git a/adapters/request_parsing/__init__.py b/adapters/request_parsing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/adapters/request_parsing/placement.py b/adapters/request_parsing/placement.py deleted file mode 100644 index d7a6575..0000000 --- a/adapters/request_parsing/placement.py +++ /dev/null @@ -1,33 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) 2015-2017 AT&T Intellectual Property -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ------------------------------------------------------------------------- -# - -import copy -import json -from osdf.utils.programming_utils import list_flatten, dot_notation - - -def json_path_after_expansion(req_json, reference): - """ - Get the child node(s) from the dot-notation [reference] and parent [req_json]. - For placement and other requests, there are encoded JSONs inside the request or policy, - so we need to expand it and then do a search over the parent plus expanded JSON. - """ - req_json_copy = copy.deepcopy(req_json) # since we expand the JSON in place, we work on a copy - req_json_copy['placementInfo']['orderInfo'] = json.loads(req_json_copy['placementInfo']['orderInfo']) - info = dot_notation(req_json_copy, reference) - return list_flatten(info) if isinstance(info, list) else info diff --git a/adapters/sdc/__init__.py b/adapters/sdc/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/adapters/sdc/asdc.py b/adapters/sdc/asdc.py deleted file mode 100755 index 43932ba..0000000 --- a/adapters/sdc/asdc.py +++ /dev/null @@ -1,40 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) 2015-2017 AT&T Intellectual Property -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ------------------------------------------------------------------------- -# - -from osdf.utils.interfaces import RestClient -import xml.etree.ElementTree as ET - -def request(model_version_id, request_id, config): - """Get all of the license artifacts from SDC using service_resource_id and model_version_id - :param model_version_id: model_version_id - :param request_id: request_id - :return: license artifacts from SDC - """ - base_url = config['sdcUrl'] - uid, passwd = config['sdcUsername'], config['sdcPassword'] - headers = {"CSP_UID": config['sdcMechId'], "X-ONAP-InstanceID": "osdf"} - rc = RestClient(userid=uid, passwd=passwd, headers=headers, method="GET", req_id=request_id) - resource_data = rc.request(base_url + '/resources/{}/metadata'.format(model_version_id)) - - artifact_ids = [x['artifactURL'].split("/resources/")[-1] # get the part after /resources/ - for x in resource_data.get('artifacts', []) if x.get('artifactType') == "VF_LICENSE"] - artifact_urls = [base_url + '/resources/' + str(artifact_id) for artifact_id in artifact_ids] - licenses = [] - for x in artifact_urls: - licenses.append(ET.fromstring(rc.request(x, asjson=False))) - return licenses diff --git a/adapters/sdc/constraint_handler.py b/adapters/sdc/constraint_handler.py deleted file mode 100644 index 2aae9a0..0000000 --- a/adapters/sdc/constraint_handler.py +++ /dev/null @@ -1,81 +0,0 @@ -# ------------------------------------------------------------------------- -# Copyright (c) 2015-2017 AT&T Intellectual Property -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# ------------------------------------------------------------------------- -# - -from osdf.config.base import osdf_config -from osdf.utils.programming_utils import dot_notation - -ns = {'p': 'http://xmlns.onap.org/sdc/license-model/1.0'} -config_local = osdf_config.core - - -def choose_license(license_artifacts, order_info, service_type): - entitlement_pool_uuids = [] - license_key_group_uuids = [] - - for license_artifact in license_artifacts: - for feature in license_artifact.findall('./p:feature-group-list/', ns): - for entitlement in feature.findall('./p:entitlement-pool-list/', ns): - if is_valid(entitlement, order_info, service_type): - entitlement_pool_uuid = entitlement.find('p:entitlement-pool-uuid', ns).text - entitlement_pool_uuids.append(entitlement_pool_uuid) - for license_key_group in feature.findall('./p:license-key-group-list/', ns): - if is_valid(license_key_group, order_info, service_type): - license_key_group_uuid = license_key_group.find('p:license-key-group-uuid', ns).text - license_key_group_uuids.append(license_key_group_uuid) - return entitlement_pool_uuids, license_key_group_uuids - - -# element is expected to be a license-key-group or entitlement-pool -# if these elements diverge at a later date this method should be refactored -def is_valid(element, order_info, service_type): - for limit in element.findall('./p:sp-limits/p:limit', ns): - # description = limit.find('p:description', ns).text - metric_value = limit.find('p:values', ns).text - metric = limit.find('p:metric', ns).text - try: - order_value = dot_notation(order_info, config_local['service_info'][service_type][metric]) - # print("The order has the value %s for the metric %s and the limit specifies the value %s. The limit has the description %s." % (order_value, metric, metric_value, description)) - if isinstance(order_value, list): # it is possible a list is returned, for example a list of vnfs for vCPE - for arr_value in order_value: - if str(metric_value) != str(arr_value): - return False - else: - if str(metric_value) != str(order_value): - return False - except KeyError: - return False - # vendor limits - for limit in element.findall('./p:vendor-limits/p:limit', ns): - # description = limit.find('p:description', ns).text - metric_value = limit.find('p:values', ns).text - metric = limit.find('p:metric', ns).text - try: - order_value = dot_notation(order_info, config_local['service_info'][service_type][metric]) - if isinstance(order_value, list): # it is possible a list is returned, for example a list of vnfs for vCPE - for arr_value in order_value: - if str(metric_value) != str(arr_value): - return False - else: - if str(metric_value) != str(order_value): - return False - # print("The order has the value %s for the metric %s and the limit specifies the value %s. The limit has the description %s." % (order_value, metric, metric_value, description)) - - except KeyError: - return False - return True - -- cgit 1.2.3-korg