aboutsummaryrefslogtreecommitdiffstats
path: root/osdf
diff options
context:
space:
mode:
Diffstat (limited to 'osdf')
-rwxr-xr-xosdf/__init__.py5
-rw-r--r--osdf/adapters/aaf/aaf_authentication.py46
-rw-r--r--osdf/adapters/aaf/sms.py116
-rw-r--r--[-rwxr-xr-x]osdf/adapters/aai/_init_.py (renamed from osdf/logging/onap_common_v1/__init__.py)0
-rw-r--r--osdf/adapters/aai/fetch_aai_data.py90
-rw-r--r--osdf/adapters/conductor/__init__.py (renamed from osdf/optimizers/placementopt/conductor/__init__.py)2
-rw-r--r--osdf/adapters/conductor/api_builder.py123
-rw-r--r--osdf/adapters/conductor/conductor.py120
-rwxr-xr-xosdf/adapters/conductor/templates/conductor_interface.json (renamed from osdf/templates/conductor_interface.json)80
-rw-r--r--osdf/adapters/conductor/translation.py376
-rw-r--r--osdf/adapters/dcae/des.py47
-rwxr-xr-xosdf/adapters/dcae/message_router.py37
-rw-r--r--osdf/adapters/local_data/local_policies.py3
-rw-r--r--osdf/adapters/policy/interface.py156
-rw-r--r--osdf/adapters/policy/utils.py12
-rw-r--r--osdf/apps/__init__.py2
-rw-r--r--osdf/apps/baseapp.py222
-rw-r--r--osdf/cmd/encryptionUtil.py50
-rw-r--r--osdf/config/__init__.py2
-rw-r--r--osdf/config/base.py15
-rw-r--r--osdf/config/consulconfig.py52
-rw-r--r--osdf/config/loader.py2
-rw-r--r--osdf/logging/__init__.py4
-rw-r--r--osdf/logging/monkey.py (renamed from osdf/optimizers/placementopt/__init__.py)20
-rwxr-xr-xosdf/logging/onap_common_v1/CommonLogger.py900
-rwxr-xr-xosdf/logging/onap_common_v1/CommonLogger_test.config58
-rwxr-xr-xosdf/logging/onap_common_v1/CommonLogger_testing.py143
-rwxr-xr-xosdf/logging/onap_common_v1/README.md214
-rwxr-xr-xosdf/logging/onap_common_v1/makefile40
-rw-r--r--osdf/logging/oof_mdc_context.py170
-rw-r--r--osdf/logging/oof_mdc_formatter.py51
-rwxr-xr-xosdf/logging/osdf_logging.py166
-rw-r--r--osdf/models/api/pciOptimizationRequest.py48
-rw-r--r--osdf/models/api/pciOptimizationResponse.py40
-rw-r--r--osdf/models/api/placementRequest.py105
-rw-r--r--osdf/models/api/placementResponse.py64
-rw-r--r--osdf/models/policy/placement/tosca/affinityPolicy-v20181031.yml (renamed from osdf/models/policy/placement/tosca/affinityPolicy-v20180326.yml)2
-rw-r--r--osdf/models/policy/placement/tosca/distancePolicy-v20181031.yml (renamed from osdf/models/policy/placement/tosca/distancePolicy-v20180326.yml)2
-rw-r--r--osdf/models/policy/placement/tosca/hpaPolicy-v20181031.yml (renamed from osdf/models/policy/placement/tosca/hpaPolicy-v20180326.yml)14
-rw-r--r--osdf/models/policy/placement/tosca/optimizationPolicy-v20181031.yml (renamed from osdf/models/policy/placement/tosca/optimizationPolicy-v20180326.yml)10
-rw-r--r--osdf/models/policy/placement/tosca/queryPolicy-v20181031.yml (renamed from osdf/models/policy/placement/tosca/queryPolicy-v20180326.yml)4
-rw-r--r--osdf/models/policy/placement/tosca/vnfPolicy-v20181031.yml (renamed from osdf/models/policy/placement/tosca/vnfPolicy-v20180326.yml)19
-rw-r--r--osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.AffinityPolicy.yaml31
-rw-r--r--osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.AggregationPolicy.yaml42
-rw-r--r--osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.DistancePolicy.yaml56
-rw-r--r--osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.HpaPolicy.yaml103
-rw-r--r--osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.OptimizationPolicy.yaml66
-rw-r--r--osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.PciPolicy.yaml30
-rw-r--r--osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.ThresholdPolicy.yaml37
-rw-r--r--osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.Vim_fit.yaml28
-rw-r--r--osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.VnfPolicy.yaml44
-rw-r--r--osdf/models/policy/placement/tosca_upload/onap.policies.optimization.service.QueryPolicy.yaml24
-rw-r--r--osdf/models/policy/placement/tosca_upload/onap.policies.optimization.service.SubscriberPolicy.yaml34
-rw-r--r--osdf/optimizers/__init__.py17
-rw-r--r--osdf/optimizers/licenseopt/simple_license_allocation.py41
-rw-r--r--osdf/optimizers/pciopt/__init__.py0
-rw-r--r--osdf/optimizers/pciopt/configdb.py65
-rw-r--r--osdf/optimizers/pciopt/pci_opt_processor.py95
-rw-r--r--osdf/optimizers/pciopt/solver/__init__.py0
-rw-r--r--osdf/optimizers/pciopt/solver/min_confusion.mzn95
-rw-r--r--osdf/optimizers/pciopt/solver/no_conflicts_no_confusion.mzn86
-rw-r--r--osdf/optimizers/pciopt/solver/optimizer.py82
-rw-r--r--osdf/optimizers/pciopt/solver/pci_utils.py39
-rw-r--r--osdf/optimizers/placementopt/conductor/api_builder.py79
-rwxr-xr-xosdf/optimizers/placementopt/conductor/conductor.py202
-rw-r--r--osdf/optimizers/placementopt/conductor/remote_opt_processor.py79
-rw-r--r--osdf/optimizers/placementopt/conductor/translation.py254
-rw-r--r--osdf/optimizers/routeopt/__init__.py17
-rw-r--r--osdf/optimizers/routeopt/simple_route_opt.py150
-rwxr-xr-xosdf/templates/cms_opt_request.jsont35
-rwxr-xr-xosdf/templates/cms_opt_request.jsont_1707_v167
-rwxr-xr-xosdf/templates/cms_opt_request_1702.jsont63
-rw-r--r--osdf/templates/cms_opt_response.jsont8
-rw-r--r--osdf/templates/license_opt_request.jsont6
-rwxr-xr-xosdf/templates/plc_opt_request.jsont142
-rwxr-xr-xosdf/templates/plc_opt_response.jsont10
-rwxr-xr-xosdf/templates/policy_request.jsont3
-rwxr-xr-xosdf/templates/test_cms_nb_req_from_client.jsont19
-rwxr-xr-xosdf/templates/test_plc_nb_req_from_client.jsont52
-rw-r--r--osdf/utils/cipherUtils.py59
-rw-r--r--osdf/utils/data_conversion.py25
-rw-r--r--osdf/utils/file_utils.py (renamed from osdf/optimizers/licenseopt/__init__.py)19
-rw-r--r--osdf/utils/interfaces.py26
-rw-r--r--osdf/utils/mdc_utils.py155
-rw-r--r--osdf/webapp/appcontroller.py20
85 files changed, 2560 insertions, 3577 deletions
diff --git a/osdf/__init__.py b/osdf/__init__.py
index c33639e..8036d89 100755
--- a/osdf/__init__.py
+++ b/osdf/__init__.py
@@ -20,11 +20,12 @@
from jinja2 import Template
-
end_point_auth_mapping = { # map a URL endpoint to auth group
"cmscheduler": "CMScheduler",
"placement": "Placement",
- "pci": "PCIOpt"
+ "pci": "PCIOpt",
+ "optmodel": "OptEngine",
+ "optengine": "OptEngine"
}
userid_suffix, passwd_suffix = "Username", "Password"
diff --git a/osdf/adapters/aaf/aaf_authentication.py b/osdf/adapters/aaf/aaf_authentication.py
index 26eac29..b9aa510 100644
--- a/osdf/adapters/aaf/aaf_authentication.py
+++ b/osdf/adapters/aaf/aaf_authentication.py
@@ -17,12 +17,14 @@
#
import base64
-import re
-from datetime import datetime, timedelta
+from datetime import datetime
+from datetime import timedelta
from flask import request
+import re
from osdf.config.base import osdf_config
-from osdf.logging.osdf_logging import error_log, debug_log
+from osdf.logging.osdf_logging import debug_log
+from osdf.logging.osdf_logging import error_log
from osdf.utils.interfaces import RestClient
AUTHZ_PERMS_USER = '{}/authz/perms/user/{}'
@@ -43,7 +45,6 @@ def authenticate(uid, passwd):
return has_valid_role(perms)
except Exception as exp:
error_log.error("Error Authenticating the user {} : {}: ".format(uid, exp))
- pass
return False
@@ -57,27 +58,38 @@ else return false
def has_valid_role(perms):
aaf_user_roles = deploy_config['aaf_user_roles']
+ aaf_roles = get_role_list(perms)
+
for roles in aaf_user_roles:
path_perm = roles.split(':')
uri = path_perm[0]
- role = path_perm[1].split('|')[0]
- if re.search(uri, request.path) and perms:
- roles = perms.get('roles')
- if roles:
- perm_list = roles.get('perm')
- for p in perm_list:
- if role == p['type']:
- return True
+ perm = path_perm[1].split('|')
+ p = (perm[0], perm[1], perm[2].split()[0])
+ if re.search(uri, request.path) and p in aaf_roles:
+ return True
return False
+
"""
-Make the remote aaf api call if user is not in the cache.
+Build a list of roles tuples from the AAF response.
-Return the perms
"""
+
+
+def get_role_list(perms):
+ role_list = []
+ if perms:
+ roles = perms.get('roles')
+ if roles:
+ perm = roles.get('perm', [])
+ for p in perm:
+ role_list.append((p['type'], p['instance'], p['action']))
+ return role_list
+
+
def get_aaf_permissions(uid, passwd):
key = base64.b64encode(bytes("{}_{}".format(uid, passwd), "ascii"))
- time_delta = timedelta(hours=deploy_config.get('aaf_cache_expiry_hrs', 3))
+ time_delta = timedelta(minutes=deploy_config.get('aaf_cache_expiry_mins', 5))
perms = perm_cache.get(key)
@@ -91,8 +103,8 @@ def get_aaf_permissions(uid, passwd):
def remote_api(passwd, uid):
- headers = {"Accept": "application/Users+xml;q=1.0;charset=utf-8;version=2.0,text/xml;q=1.0;version=2.0",
- "Accept": "application/Users+json;q=1.0;charset=utf-8;version=2.0,application/json;q=1.0;version=2.0,*/*;q=1.0"}
+ headers = {"Accept": "application/Users+json;q=1.0;charset=utf-8;version=2.0,application/json;q=1.0;version=2.0,"
+ "*/*;q=1.0"}
url = AUTHZ_PERMS_USER.format(deploy_config['aaf_url'], uid)
rc = RestClient(userid=uid, passwd=passwd, headers=headers, url=url, log_func=debug_log.debug,
req_id='aaf_user_id')
diff --git a/osdf/adapters/aaf/sms.py b/osdf/adapters/aaf/sms.py
index 9c7af51..031fee4 100644
--- a/osdf/adapters/aaf/sms.py
+++ b/osdf/adapters/aaf/sms.py
@@ -1,6 +1,7 @@
#
# -------------------------------------------------------------------------
# Copyright (c) 2018 Intel Corporation Intellectual Property
+# Copyright (C) 2020 Wipro Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,9 +22,12 @@
from onapsmsclient import Client
-import osdf.config.loader as config_loader
+import osdf.config.base as cfg_base
from osdf.config.base import osdf_config
+import osdf.config.credentials as creds
+import osdf.config.loader as config_loader
from osdf.logging.osdf_logging import debug_log
+from osdf.utils import cipherUtils
config_spec = {
"preload_secrets": "config/preload_secrets.yaml"
@@ -31,9 +35,12 @@ config_spec = {
def preload_secrets():
- """ This is intended to load the secrets required for testing Application
- Actual deployment will have a preload script. Make sure the config is
- in sync"""
+ """preload_secrets()
+
+ This is intended to load the secrets required for testing Application
+ Actual deployment will have a preload script. Make sure the config is
+ in sync
+ """
preload_config = config_loader.load_config_file(
config_spec.get("preload_secrets"))
domain = preload_config.get("domain")
@@ -41,6 +48,9 @@ def preload_secrets():
sms_url = config["aaf_sms_url"]
timeout = config["aaf_sms_timeout"]
cacert = config["aaf_ca_certs"]
+ if not sms_url:
+ debug_log.debug("SMS Disabled")
+ return
sms_client = Client(url=sms_url, timeout=timeout, cacert=cacert)
domain_uuid = sms_client.createDomain(domain)
debug_log.debug(
@@ -60,58 +70,80 @@ def retrieve_secrets():
timeout = config["aaf_sms_timeout"]
cacert = config["aaf_ca_certs"]
domain = config["secret_domain"]
- sms_client = Client(url=sms_url, timeout=timeout, cacert=cacert)
- secrets = sms_client.getSecretNames(domain)
- for secret in secrets:
- values = sms_client.getSecret(domain, secret)
- secret_dict[secret] = values
- debug_log.debug("Secret Dictionary Retrieval Success")
+ if sms_url:
+ sms_client = Client(url=sms_url, timeout=timeout, cacert=cacert)
+ secrets = sms_client.getSecretNames(domain)
+ for secret in secrets:
+ values = sms_client.getSecret(domain, secret)
+ secret_dict[secret] = values
+ debug_log.debug("Secret Dictionary Retrieval Success")
+ else:
+ debug_log.debug("SMS Disabled. Secrets not loaded")
return secret_dict
def load_secrets():
config = osdf_config.deployment
secret_dict = retrieve_secrets()
- config['soUsername'] = secret_dict['so']['UserName']
- config['soPassword'] = secret_dict['so']['Password']
- config['conductorUsername'] = secret_dict['conductor']['UserName']
- config['conductorPassword'] = secret_dict['conductor']['Password']
- config['policyPlatformUsername'] = secret_dict['policyPlatform']['UserName']
- config['policyPlatformPassword'] = secret_dict['policyPlatform']['Password']
- config['policyClientUsername'] = secret_dict['policyClient']['UserName']
- config['policyClientPassword'] = secret_dict['policyClient']['Password']
- config['messageReaderAafUserId'] = secret_dict['dmaap']['UserName']
- config['messageReaderAafPassword'] = secret_dict['dmaap']['Password']
- config['sdcUsername'] = secret_dict['sdc']['UserName']
- config['sdcPassword'] = secret_dict['sdc']['Password']
- config['osdfPlacementUsername'] = secret_dict['osdfPlacement']['UserName']
- config['osdfPlacementPassword'] = secret_dict['osdfPlacement']['Password']
- config['osdfPlacementSOUsername'] = secret_dict['osdfPlacementSO']['UserName']
- config['osdfPlacementSOPassword'] = secret_dict['osdfPlacementSO']['Password']
- config['osdfPlacementVFCUsername'] = secret_dict['osdfPlacementVFC']['UserName']
- config['osdfPlacementVFCPassword'] = secret_dict['osdfPlacementVFC']['Password']
- config['osdfCMSchedulerUsername'] = secret_dict['osdfCMScheduler']['UserName']
- config['osdfCMSchedulerPassword'] = secret_dict['osdfCMScheduler']['Password']
- config['configDbUserName'] = secret_dict['configDb']['UserName']
- config['configDbPassword'] = secret_dict['configDb']['Password']
- config['pciHMSUsername'] = secret_dict['pciHMS']['UserName']
- config['pciHMSPassword'] = secret_dict['pciHMS']['Password']
- config['osdfPCIOptUsername'] = secret_dict['osdfPCIOpt']['UserName']
- config['osdfPCIOptPassword'] = secret_dict['osdfPCIOpt']['Password']
+ if secret_dict:
+ config['soUsername'] = secret_dict['so']['UserName']
+ config['soPassword'] = decrypt_pass(secret_dict['so']['Password'])
+ config['conductorUsername'] = secret_dict['conductor']['UserName']
+ config['conductorPassword'] = decrypt_pass(secret_dict['conductor']['Password'])
+ config['policyPlatformUsername'] = secret_dict['policyPlatform']['UserName']
+ config['policyPlatformPassword'] = decrypt_pass(secret_dict['policyPlatform']['Password'])
+ config['policyClientUsername'] = secret_dict['policyPlatform']['UserName']
+ config['policyClientPassword'] = decrypt_pass(secret_dict['policyPlatform']['Password'])
+ config['messageReaderAafUserId'] = secret_dict['dmaap']['UserName']
+ config['messageReaderAafPassword'] = decrypt_pass(secret_dict['dmaap']['Password'])
+ config['sdcUsername'] = secret_dict['sdc']['UserName']
+ config['sdcPassword'] = decrypt_pass(secret_dict['sdc']['Password'])
+ config['osdfPlacementUsername'] = secret_dict['osdfPlacement']['UserName']
+ config['osdfPlacementPassword'] = decrypt_pass(secret_dict['osdfPlacement']['Password'])
+ config['osdfPlacementSOUsername'] = secret_dict['osdfPlacementSO']['UserName']
+ config['osdfPlacementSOPassword'] = decrypt_pass(secret_dict['osdfPlacementSO']['Password'])
+ config['osdfPlacementVFCUsername'] = secret_dict['osdfPlacementVFC']['UserName']
+ config['osdfPlacementVFCPassword'] = decrypt_pass(secret_dict['osdfPlacementVFC']['Password'])
+ config['osdfCMSchedulerUsername'] = secret_dict['osdfCMScheduler']['UserName']
+ config['osdfCMSchedulerPassword'] = decrypt_pass(secret_dict['osdfCMScheduler']['Password'])
+ config['configDbUserName'] = secret_dict['configDb']['UserName']
+ config['configDbPassword'] = decrypt_pass(secret_dict['configDb']['Password'])
+ config['pciHMSUsername'] = secret_dict['pciHMS']['UserName']
+ config['pciHMSPassword'] = decrypt_pass(secret_dict['pciHMS']['Password'])
+ config['osdfPCIOptUsername'] = secret_dict['osdfPCIOpt']['UserName']
+ config['osdfPCIOptPassword'] = decrypt_pass(secret_dict['osdfPCIOpt']['Password'])
+ config['osdfOptEngineUsername'] = secret_dict['osdfOptEngine']['UserName']
+ config['osdfOptEnginePassword'] = decrypt_pass(secret_dict['osdfOptEngine']['Password'])
+ cfg_base.http_basic_auth_credentials = creds.load_credentials(osdf_config)
+ cfg_base.dmaap_creds = creds.dmaap_creds()
+
+
+def decrypt_pass(passwd):
+ config = osdf_config.deployment
+ if not config.get('appkey') or passwd == '' or passwd == 'NA':
+ return passwd
+ else:
+ return cipherUtils.AESCipher.get_instance().decrypt(passwd)
def delete_secrets():
- """ This is intended to delete the secrets for a clean initialization for
- testing Application. Actual deployment will have a preload script.
- Make sure the config is in sync"""
+ """delete_secrets()
+
+ This is intended to delete the secrets for a clean initialization for
+ testing Application. Actual deployment will have a preload script.
+ Make sure the config is in sync
+ """
config = osdf_config.deployment
sms_url = config["aaf_sms_url"]
timeout = config["aaf_sms_timeout"]
cacert = config["aaf_ca_certs"]
domain = config["secret_domain"]
- sms_client = Client(url=sms_url, timeout=timeout, cacert=cacert)
- ret_val = sms_client.deleteDomain(domain)
- debug_log.debug("Clean up complete")
+ if sms_url:
+ sms_client = Client(url=sms_url, timeout=timeout, cacert=cacert)
+ ret_val = sms_client.deleteDomain(domain)
+ debug_log.debug("Clean up complete")
+ else:
+ debug_log.debug("SMS Disabled. Secrets delete skipped")
return ret_val
diff --git a/osdf/logging/onap_common_v1/__init__.py b/osdf/adapters/aai/_init_.py
index e69de29..e69de29 100755..100644
--- a/osdf/logging/onap_common_v1/__init__.py
+++ b/osdf/adapters/aai/_init_.py
diff --git a/osdf/adapters/aai/fetch_aai_data.py b/osdf/adapters/aai/fetch_aai_data.py
new file mode 100644
index 0000000..170d5e5
--- /dev/null
+++ b/osdf/adapters/aai/fetch_aai_data.py
@@ -0,0 +1,90 @@
+# -------------------------------------------------------------------------
+# Copyright (C) 2020 Wipro Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import json
+import requests
+from requests.auth import HTTPBasicAuth
+from requests import RequestException
+
+from osdf.logging.osdf_logging import debug_log
+
+AAI_HEADERS = {
+ "X-TransactionId": "9999",
+ "X-FromAppId": "OOF",
+ "Accept": "application/json",
+ "Content-Type": "application/json",
+}
+
+AUTH = HTTPBasicAuth("AAI", "AAI")
+
+
+class AAIException(Exception):
+ pass
+
+
+def get_aai_data(request_json, osdf_config):
+
+ """Get the response from AAI
+
+ :param request_json: requestjson
+ :param osdf_config: configuration specific to OSDF app
+ :return:response body from AAI
+ """
+
+ nxi_id = request_json["NxIId"]
+ config = osdf_config.deployment
+ aai_url = config["aaiUrl"]
+ aai_req_url = aai_url + config["aaiServiceInstanceUrl"] + nxi_id + "?depth=2"
+
+ try:
+ debug_log.debug("aai request: {}".format(aai_req_url))
+ response = requests.get(aai_req_url, headers=AAI_HEADERS, auth=AUTH, verify=False)
+ debug_log.debug("aai response: {}".format(response.json()))
+ except RequestException as e:
+ raise AAIException("Request exception was encountered {}".format(e))
+
+ if response.status_code == 200:
+ return response.json()
+ else:
+ raise AAIException("Error response recieved from AAI for the request {}".format(aai_req_url))
+
+
+def execute_dsl_query(query, format, osdf_config):
+ """Get the response from AAI
+
+ :param query: dsl query to be executed
+ :param format format of the output
+ :param osdf_config: configuration specific to OSDF app
+ :return:response body from AAI
+ """
+ config = osdf_config.deployment
+ dsl_url = config["aaiUrl"] + config["dslQueryPath"] + format
+
+ data = json.dumps({'dsl': query})
+ debug_log.debug("aai dsl request: {}".format(data))
+ try:
+ response = requests.put(dsl_url, data=data, headers=AAI_HEADERS, auth=AUTH, verify=False)
+ debug_log.debug("aai dsl response: {}".format(response))
+ except RequestException as ex:
+ raise AAIException("Request exception was encountered {}".format(ex))
+
+ if response.status_code == 200:
+ return response.json()
+ else:
+ raise AAIException("Response code other than 200 from AAI: {} {}".format(response.status_code,
+ response.content))
diff --git a/osdf/optimizers/placementopt/conductor/__init__.py b/osdf/adapters/conductor/__init__.py
index 4b25e5b..6156206 100644
--- a/osdf/optimizers/placementopt/conductor/__init__.py
+++ b/osdf/adapters/conductor/__init__.py
@@ -14,4 +14,4 @@
# limitations under the License.
#
# -------------------------------------------------------------------------
-#
+# \ No newline at end of file
diff --git a/osdf/adapters/conductor/api_builder.py b/osdf/adapters/conductor/api_builder.py
new file mode 100644
index 0000000..f3b0798
--- /dev/null
+++ b/osdf/adapters/conductor/api_builder.py
@@ -0,0 +1,123 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+# Copyright (C) 2020 Wipro Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+from jinja2 import Template
+import json
+
+import osdf.adapters.conductor.translation as tr
+from osdf.adapters.policy.utils import group_policies_gen
+from osdf.utils.programming_utils import list_flatten
+
+
+def _build_parameters(group_policies, service_info, request_parameters):
+ """Function prepares parameters section for has request
+
+ :param group_policies: filtered policies
+ :param service_info: service info
+ :param request_parameters: request parameters
+ :return:
+ """
+ initial_params = tr.get_opt_query_data(request_parameters,
+ group_policies['onap.policies.optimization.service.QueryPolicy'])
+ params = dict()
+ params.update({"REQUIRED_MEM": initial_params.pop("requiredMemory", "")})
+ params.update({"REQUIRED_DISK": initial_params.pop("requiredDisk", "")})
+ params.update({"customer_lat": initial_params.pop("customerLatitude", 0.0)})
+ params.update({"customer_long": initial_params.pop("customerLongitude", 0.0)})
+ params.update({"service_name": service_info.get('serviceName', "")})
+ params.update({"service_id": service_info.get('serviceInstanceId', "")})
+
+ for key, val in initial_params.items():
+ if val and val != "":
+ params.update({key: val})
+ return params
+
+
+def conductor_api_builder(req_info, demands, request_parameters, service_info,
+ template_fields, flat_policies: list, local_config,
+ template="osdf/adapters/conductor/templates/conductor_interface.json"):
+ """Build an OSDF southbound API call for HAS-Conductor/Placement optimization
+
+ :param req_info: parameter data received from a client
+ :param demands: list of demands
+ :param request_parameters: request parameters
+ :param service_info: service info object
+ :param template_fields: Fields that has to be passed to the template to render
+ :param flat_policies: policy data received from the policy platform (flat policies)
+ :param template: template to generate southbound API call to conductor
+ :param local_config: local configuration file with pointers for
+ the service specific information
+ :return: json to be sent to Conductor/placement optimization
+ """
+
+ templ = Template(open(template).read())
+ gp = group_policies_gen(flat_policies, local_config)
+ demand_name_list = []
+ for demand in demands:
+ demand_name_list.append(demand['resourceModuleName'].lower())
+ demand_list = tr.gen_demands(demands, gp['onap.policies.optimization.resource.VnfPolicy'])
+ attribute_policy_list = tr.gen_attribute_policy(
+ demand_name_list, gp['onap.policies.optimization.resource.AttributePolicy'])
+
+ distance_to_location_policy_list = tr.gen_distance_to_location_policy(
+ demand_name_list, gp['onap.policies.optimization.resource.DistancePolicy'])
+ inventory_policy_list = tr.gen_inventory_group_policy(
+ demand_name_list, gp['onap.policies.optimization.resource.InventoryGroupPolicy'])
+ resource_instance_policy_list = tr.gen_resource_instance_policy(
+ demand_name_list, gp['onap.policies.optimization.resource.ResourceInstancePolicy'])
+ resource_region_policy_list = tr.gen_resource_region_policy(
+ demand_name_list, gp['onap.policies.optimization.resource.ResourceRegionPolicy'])
+ zone_policy_list = tr.gen_zone_policy(
+ demand_name_list, gp['onap.policies.optimization.resource.AffinityPolicy'])
+ optimization_policy_list = tr.gen_optimization_policy(
+ demand_name_list, gp['onap.policies.optimization.resource.OptimizationPolicy'])
+ reservation_policy_list = tr.gen_reservation_policy(
+ demand_name_list, gp['onap.policies.optimization.resource.InstanceReservationPolicy'])
+ capacity_policy_list = tr.gen_capacity_policy(
+ demand_name_list, gp['onap.policies.optimization.resource.Vim_fit'])
+ hpa_policy_list = tr.gen_hpa_policy(
+ demand_name_list, gp['onap.policies.optimization.resource.HpaPolicy'])
+ threshold_policy_list = tr.gen_threshold_policy(demand_name_list,
+ gp['onap.policies.optimization.resource.'
+ 'ThresholdPolicy'])
+ aggregation_policy_list = tr.gen_aggregation_policy(demand_name_list,
+ gp['onap.policies.optimization.resource.'
+ 'AggregationPolicy'])
+ req_params_dict = _build_parameters(gp, service_info, request_parameters)
+ conductor_policies = [attribute_policy_list, distance_to_location_policy_list,
+ inventory_policy_list, resource_instance_policy_list,
+ resource_region_policy_list, zone_policy_list, reservation_policy_list,
+ capacity_policy_list, hpa_policy_list, threshold_policy_list, aggregation_policy_list]
+ filtered_policies = [x for x in conductor_policies if len(x) > 0]
+ policy_groups = list_flatten(filtered_policies)
+ request_type = req_info.get('requestType', None)
+ rendered_req = templ.render(
+ requestType=request_type,
+ demand_list=demand_list,
+ policy_groups=policy_groups,
+ optimization_policies=optimization_policy_list,
+ name=req_info['requestId'],
+ timeout=req_info['timeout'],
+ limit=req_info['numSolutions'],
+ request_params=req_params_dict,
+ location_enabled=template_fields.get('location_enabled'),
+ version=template_fields.get('version'),
+ json=json)
+ json_payload = json.dumps(json.loads(rendered_req)) # need this because template's JSON is ugly!
+ return json_payload
diff --git a/osdf/adapters/conductor/conductor.py b/osdf/adapters/conductor/conductor.py
new file mode 100644
index 0000000..49c123d
--- /dev/null
+++ b/osdf/adapters/conductor/conductor.py
@@ -0,0 +1,120 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+# Copyright (C) 2020 Wipro Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import json
+from requests import RequestException
+import time
+
+from osdf.adapters.conductor.api_builder import conductor_api_builder
+from osdf.logging.osdf_logging import debug_log
+from osdf.operation.exceptions import BusinessException
+from osdf.utils.interfaces import RestClient
+
+
+def request(req_info, demands, request_parameters, service_info, template_fields,
+ osdf_config, flat_policies):
+ config = osdf_config.deployment
+ local_config = osdf_config.core
+ uid, passwd = config['conductorUsername'], config['conductorPassword']
+ conductor_url = config['conductorUrl']
+ req_id = req_info["requestId"]
+ transaction_id = req_info['transactionId']
+ headers = dict(transaction_id=transaction_id)
+ placement_ver_enabled = config.get('placementVersioningEnabled', False)
+
+ if placement_ver_enabled:
+ cond_minor_version = config.get('conductorMinorVersion', None)
+ if cond_minor_version is not None:
+ x_minor_version = str(cond_minor_version)
+ headers.update({'X-MinorVersion': x_minor_version})
+ debug_log.debug("Versions set in HTTP header to "
+ "conductor: X-MinorVersion: {} ".format(x_minor_version))
+
+ max_retries = config.get('conductorMaxRetries', 30)
+ ping_wait_time = config.get('conductorPingWaitTime', 60)
+
+ rc = RestClient(userid=uid, passwd=passwd, method="GET", log_func=debug_log.debug,
+ headers=headers)
+ conductor_req_json_str = conductor_api_builder(req_info, demands, request_parameters,
+ service_info, template_fields, flat_policies,
+ local_config)
+ conductor_req_json = json.loads(conductor_req_json_str)
+
+ debug_log.debug("Sending first Conductor request for request_id {}".format(req_id))
+
+ resp, raw_resp = initial_request_to_conductor(rc, conductor_url, conductor_req_json)
+ # Very crude way of keeping track of time.
+ # We are not counting initial request time, first call back, or time for HTTP request
+ total_time, ctr = 0, 2
+ client_timeout = req_info['timeout']
+ configured_timeout = max_retries * ping_wait_time
+ max_timeout = min(client_timeout, configured_timeout)
+
+ while True: # keep requesting conductor till we get a result or we run out of time
+ if resp is not None:
+ if resp["plans"][0].get("status") in ["error"]:
+ raise RequestException(response=raw_resp, request=raw_resp.request)
+
+ if resp["plans"][0].get("status") in ["done", "not found", "solved"]:
+ return resp
+ new_url = resp['plans'][0]['links'][0][0]['href'] # TODO(krishna): check why a list of lists
+
+ if total_time >= max_timeout:
+ raise BusinessException("Conductor could not provide a solution within {} seconds,"
+ "this transaction is timing out".format(max_timeout))
+ time.sleep(ping_wait_time)
+ ctr += 1
+ debug_log.debug("Attempt number {} url {}; prior status={}"
+ .format(ctr, new_url, resp['plans'][0]['status']))
+ total_time += ping_wait_time
+
+ try:
+ raw_resp = rc.request(new_url, raw_response=True)
+ resp = raw_resp.json()
+ except RequestException as e:
+ debug_log.debug("Conductor attempt {} for request_id {} has failed because {}"
+ .format(ctr, req_id, str(e)))
+
+
+def initial_request_to_conductor(rc, conductor_url, conductor_req_json):
+ """First steps in the request-redirect chain in making a call to Conductor
+
+ :param rc: REST client object for calling conductor
+ :param conductor_url: conductor's base URL to submit a placement request
+ :param conductor_req_json: request json object to send to Conductor
+ :return: URL to check for follow up (similar to redirects);
+ we keep checking these till we get a result/error
+ """
+ debug_log.debug("Payload to Conductor: {}".format(json.dumps(conductor_req_json)))
+ raw_resp = rc.request(url=conductor_url, raw_response=True, method="POST",
+ json=conductor_req_json)
+ resp = raw_resp.json()
+ if resp["status"] != "template":
+ raise RequestException(response=raw_resp, request=raw_resp.request)
+ time.sleep(10) # 10 seconds wait time to avoid being too quick!
+ plan_url = resp["links"][0][0]["href"]
+ debug_log.debug("Attempting to read the plan from "
+ "the conductor provided url {}".format(plan_url))
+ raw_resp = rc.request(raw_response=True,
+ url=plan_url)
+ resp = raw_resp.json()
+
+ if resp["plans"][0]["status"] in ["error"]:
+ raise RequestException(response=raw_resp, request=raw_resp.request)
+ return resp, raw_resp # now the caller of this will handle further follow-ups
diff --git a/osdf/templates/conductor_interface.json b/osdf/adapters/conductor/templates/conductor_interface.json
index d2258a0..c0e28dc 100755
--- a/osdf/templates/conductor_interface.json
+++ b/osdf/adapters/conductor/templates/conductor_interface.json
@@ -1,39 +1,41 @@
-{
- "name": "{{ name }}",
- "files": {},
- "timeout": {{ timeout }},
- "limit": {{ limit }},
- "template": {
- "homing_template_version": "2017-10-10",
- "parameters": {
- "service_name": "{{ service_name }}",
- "service_id": "{{ service_id }}",
- "customer_lat": {{ latitude }},
- "customer_long": {{ longitude }}
- },
- "locations": {
- "customer_loc": {
- "latitude": { "get_param": "customer_lat" },
- "longitude": { "get_param": "customer_long" }
- }
- },
- "demands": {{ json.dumps(demand_list) }},
- {% set comma_main = joiner(",") %}
- "constraints": {
- {% set comma=joiner(",") %}
- {% for elem in policy_groups %} {{ comma() }}
- {% for key, value in elem.items() %}
- "{{key}}": {{ json.dumps(value) }}
- {% endfor %}
- {% endfor %}
- },
- "optimization": {
- {% set comma=joiner(",") %}
- {% for elem in optimization_policies %} {{ comma() }}
- {% for key, value in elem.items() %}
- "{{key}}": {{ json.dumps(value) }}
- {% endfor %}
- {% endfor %}
- }
- }
-} \ No newline at end of file
+{
+ "name": "{{ name }}",
+ "files": {},
+ "timeout": {{ timeout }},
+ "num_solution": "{{ limit }}",
+ "template": {
+ "homing_template_version": "{{ version }}",
+ "parameters": {
+ {% set comma=joiner(",") %}
+ {% for key, value in request_params.items() %} {{ comma() }}
+ "{{key}}": {{ json.dumps(value) }}
+ {% endfor %}
+ },
+ {% if location_enabled %}
+ "locations": {
+ "customer_loc": {
+ "latitude": { "get_param": "customer_lat" },
+ "longitude": { "get_param": "customer_long" }
+ }
+ },
+ {% endif %}
+ "demands": {{ json.dumps(demand_list) }},
+ {% set comma_main = joiner(",") %}
+ "constraints": {
+ {% set comma=joiner(",") %}
+ {% for elem in policy_groups %} {{ comma() }}
+ {% for key, value in elem.items() %}
+ "{{key}}": {{ json.dumps(value) }}
+ {% endfor %}
+ {% endfor %}
+ },
+ "optimization": {
+ {% set comma=joiner(",") %}
+ {% for elem in optimization_policies %} {{ comma() }}
+ {% for key, value in elem.items() %}
+ "{{key}}": {{ json.dumps(value) }} {{ ", " if not loop.last }}
+ {% endfor %}
+ {% endfor %}
+ }
+ }
+}
diff --git a/osdf/adapters/conductor/translation.py b/osdf/adapters/conductor/translation.py
new file mode 100644
index 0000000..f44f27f
--- /dev/null
+++ b/osdf/adapters/conductor/translation.py
@@ -0,0 +1,376 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+# Copyright (C) 2020 Wipro Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import copy
+import json
+import re
+import yaml
+
+from osdf.utils.programming_utils import dot_notation
+
+policy_config_mapping = yaml.safe_load(open('config/has_config.yaml')).get('policy_config_mapping')
+
+CONSTRAINT_TYPE_MAP = {"onap.policies.optimization.resource.AttributePolicy": "attribute",
+ "onap.policies.optimization.resource.DistancePolicy": "distance_to_location",
+ "onap.policies.optimization.resource.InventoryGroupPolicy": "inventory_group",
+ "onap.policies.optimization.resource.ResourceInstancePolicy": "instance_fit",
+ "onap.policies.optimization.resource.ResourceRegionPolicy": "region_fit",
+ "onap.policies.optimization.resource.AffinityPolicy": "zone",
+ "onap.policies.optimization.resource.InstanceReservationPolicy":
+ "instance_reservation",
+ "onap.policies.optimization.resource.Vim_fit": "vim_fit",
+ "onap.policies.optimization.resource.HpaPolicy": "hpa",
+ "onap.policies.optimization.resource.ThresholdPolicy": "threshold",
+ "onap.policies.optimization.resource.AggregationPolicy": "aggregation"
+ }
+
+
+def get_opt_query_data(request_parameters, policies):
+ """Fetch service and order specific details from the requestParameters field of a request.
+
+ :param request_parameters: A list of request parameters
+ :param policies: A set of policies
+ :return: A dictionary with service and order-specific attributes.
+ """
+ req_param_dict = {}
+ if request_parameters:
+ for policy in policies:
+ for queryProp in policy[list(policy.keys())[0]]['properties']['queryProperties']:
+ attr_val = queryProp['value'] if 'value' in queryProp and queryProp['value'] != "" \
+ else dot_notation(request_parameters, queryProp['attribute_location'])
+ if attr_val is not None:
+ req_param_dict.update({queryProp['attribute']: attr_val})
+ return req_param_dict
+
+
+def gen_optimization_policy(vnf_list, optimization_policy):
+ """Generate optimization policy details to pass to Conductor
+
+ :param vnf_list: List of vnf's to used in placement request
+ :param optimization_policy: optimization objective policy information provided in the incoming request
+ :return: List of optimization objective policies in a format required by Conductor
+ """
+ if len(optimization_policy) == 1:
+ policy = optimization_policy[0]
+ policy_content = policy[list(policy.keys())[0]]
+ if policy_content['type_version'] == '2.0.0':
+ properties = policy_content['properties']
+ objective = {'goal': properties['goal'],
+ 'operation_function': properties['operation_function']}
+ return [objective]
+
+ optimization_policy_list = []
+ for policy in optimization_policy:
+ content = policy[list(policy.keys())[0]]['properties']
+ parameter_list = []
+ parameters = ["cloud_version", "hpa_score"]
+
+ for attr in content['objectiveParameter']['parameterAttributes']:
+ parameter = attr['parameter'] if attr['parameter'] in parameters else attr['parameter'] + "_between"
+ default, vnfs = get_matching_vnfs(attr['resources'], vnf_list)
+ for vnf in vnfs:
+ value = [vnf] if attr['parameter'] in parameters else [attr['customerLocationInfo'], vnf]
+ parameter_list.append({
+ attr['operator']: [attr['weight'], {parameter: value}]
+ })
+
+ optimization_policy_list.append({
+ content['objective']: {content['objectiveParameter']['operator']: parameter_list}
+ })
+ return optimization_policy_list
+
+
+def get_matching_vnfs(resources, vnf_list, match_type="intersection"):
+ """Get a list of matching VNFs from the list of resources
+
+ :param resources:
+ :param vnf_list: List of vnfs to used in placement request
+ :param match_type: "intersection" or "all" or "any" (any => send all_vnfs if there is any intersection)
+ :return: List of matching VNFs
+ """
+ # Check if it is a default policy
+ default = True if resources == [] else False
+ resources_lcase = [x.lower() for x in resources] if not default else [x.lower() for x in vnf_list]
+ if match_type == "all": # don't bother with any comparisons
+ return default, resources if set(resources_lcase) <= set(vnf_list) else None
+ common_vnfs = set(vnf_list) & set(resources_lcase) if not default else set(vnf_list)
+ common_resources = [x for x in resources if x.lower() in common_vnfs] if not default else list(common_vnfs)
+ if match_type == "intersection": # specifically requested intersection
+ return default, list(common_resources)
+ return default, resources if common_vnfs else None # "any" match => all resources to be returned
+
+
+def gen_policy_instance(vnf_list, resource_policy, match_type="intersection", rtype=None):
+ """Generate a list of policies
+
+ :param vnf_list: List of vnf's to used in placement request
+ :param resource_policy: policy for this specific resource
+ :param match_type: How to match the vnf_names with the vnf_list (intersection or "any")
+ intersection => return intersection; "any" implies return all vnf_names if intersection is not null
+ :param rtype: resource type (e.g. resourceRegionProperty or resourceInstanceProperty)
+ None => no controller information added to the policy specification to Conductor
+ :return: resource policy list in a format required by Conductor
+ """
+ resource_policy_list = []
+ related_policies = []
+ for policy in resource_policy:
+ pc = policy[list(policy.keys())[0]]
+ default, demands = get_matching_vnfs(pc['properties']['resources'], vnf_list, match_type=match_type)
+ resource = {pc['properties']['identity']: {'type': CONSTRAINT_TYPE_MAP.get(pc['type']), 'demands': demands}}
+
+ if rtype:
+ resource[pc['properties']['identity']]['properties'] = {'controller': pc[rtype]['controller'],
+ 'request': json.loads(pc[rtype]['request'])}
+ if demands and len(demands) != 0:
+ # The default policy shall not override the specific policy that already appended
+ if default:
+ for d in demands:
+ resource_repeated = True \
+ if {pc['properties']['identity']: {'type': CONSTRAINT_TYPE_MAP.get(pc['type']),
+ 'demands': d}} in resource_policy_list else False
+ if resource_repeated:
+ continue
+ else:
+ resource_policy_list.append(
+ {pc['properties']['identity']: {'type': CONSTRAINT_TYPE_MAP.get(pc['type']),
+ 'demands': d}})
+ policy[list(policy.keys())[0]]['properties']['resources'] = d
+ related_policies.append(policy)
+ # Need to override the default policies, here delete the outdated policy stored in the db
+ if resource in resource_policy_list:
+ for pc in related_policies:
+ if pc[list(pc.keys()[0])]['properties']['resources'] == resource:
+ related_policies.remove(pc)
+ resource_policy_list.remove(resource)
+ related_policies.append(policy)
+ resource_policy_list.append(resource)
+
+ return resource_policy_list, related_policies
+
+
+def gen_resource_instance_policy(vnf_list, resource_instance_policy):
+ """Get policies governing resource instances in order to populate the Conductor API call"""
+ cur_policies, _ = gen_policy_instance(vnf_list, resource_instance_policy, rtype='resourceInstanceProperty')
+ return cur_policies
+
+
+def gen_resource_region_policy(vnf_list, resource_region_policy):
+ """Get policies governing resource region in order to populate the Conductor API call"""
+ cur_policies, _ = gen_policy_instance(vnf_list, resource_region_policy, rtype='resourceRegionProperty')
+ return cur_policies
+
+
+def gen_inventory_group_policy(vnf_list, inventory_group_policy):
+ """Get policies governing inventory group in order to populate the Conductor API call"""
+ cur_policies, _ = gen_policy_instance(vnf_list, inventory_group_policy, rtype=None)
+ return cur_policies
+
+
+def gen_reservation_policy(vnf_list, reservation_policy):
+ """Get policies governing resource instances in order to populate the Conductor API call"""
+ cur_policies, _ = gen_policy_instance(vnf_list, reservation_policy, rtype='instanceReservationProperty')
+ return cur_policies
+
+
+def gen_distance_to_location_policy(vnf_list, distance_to_location_policy):
+ """Get policies governing distance-to-location for VNFs in order to populate the Conductor API call"""
+ cur_policies, related_policies = gen_policy_instance(vnf_list, distance_to_location_policy, rtype=None)
+ for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
+ properties = p_main[list(p_main.keys())[0]]['properties']['distanceProperties']
+ pcp_d = properties['distance']
+ p_new[p_main[list(p_main.keys())[0]]['properties']['identity']]['properties'] = {
+ 'distance': pcp_d['operator'] + " " + pcp_d['value'].lower() + " " + pcp_d['unit'].lower(),
+ 'location': properties['locationInfo']
+ }
+ return cur_policies
+
+
+def gen_attribute_policy(vnf_list, attribute_policy):
+ """Get policies governing attributes of VNFs in order to populate the Conductor API call"""
+ cur_policies, related_policies = gen_policy_instance(vnf_list, attribute_policy, rtype=None)
+ for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
+ properties = p_main[list(p_main.keys())[0]]['properties']['attributeProperties']
+ attribute_mapping = policy_config_mapping['filtering_attributes'] # wanted attributes and mapping
+ p_new[p_main[list(p_main.keys())[0]]['properties']['identity']]['properties'] = {
+ 'evaluate': dict((attribute_mapping[k], properties.get(k)
+ if k != "cloudRegion" else gen_cloud_region(properties))
+ for k in attribute_mapping.keys())
+ }
+ return cur_policies # cur_policies gets updated in place...
+
+
+def gen_zone_policy(vnf_list, zone_policy):
+ """Get zone policies in order to populate the Conductor API call"""
+ cur_policies, related_policies = gen_policy_instance(vnf_list, zone_policy, match_type="all", rtype=None)
+ for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
+ pmz = p_main[list(p_main.keys())[0]]['properties']['affinityProperties']
+ p_new[p_main[list(p_main.keys())[0]]['properties']['identity']]['properties'] = \
+ {'category': pmz['category'], 'qualifier': pmz['qualifier']}
+ return cur_policies
+
+
+def gen_capacity_policy(vnf_list, capacity_policy):
+ """Get zone policies in order to populate the Conductor API call"""
+ cur_policies, related_policies = gen_policy_instance(vnf_list, capacity_policy, rtype=None)
+ for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
+ pmz = p_main[list(p_main.keys())[0]]['properties']['capacityProperty']
+ p_new[p_main[list(p_main.keys())[0]]['properties']['identity']]['properties'] = \
+ {"controller": pmz['controller'], 'request': json.loads(pmz['request'])}
+ return cur_policies
+
+
+def gen_hpa_policy(vnf_list, hpa_policy):
+ """Get zone policies in order to populate the Conductor API call"""
+ cur_policies, related_policies = gen_policy_instance(vnf_list, hpa_policy, rtype=None)
+ for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
+ p_new[p_main[list(p_main.keys())[0]]['properties']['identity']]['properties'] = \
+ {'evaluate': p_main[list(p_main.keys())[0]]['properties']['flavorFeatures']}
+ return cur_policies
+
+
+def gen_threshold_policy(vnf_list, threshold_policy):
+ cur_policies, related_policies = gen_policy_instance(vnf_list, threshold_policy, rtype=None)
+ for p_new, p_main in zip(cur_policies, related_policies):
+ pmz = p_main[list(p_main.keys())[0]]['properties']['thresholdProperties']
+ p_new[p_main[list(p_main.keys())[0]]['properties']['identity']]['properties'] = {'evaluate': pmz}
+ return cur_policies
+
+
+def gen_aggregation_policy(vnf_list, cross_policy):
+ cur_policies, related_policies = gen_policy_instance(vnf_list, cross_policy, rtype=None)
+ for p_new, p_main in zip(cur_policies, related_policies):
+ pmz = p_main[list(p_main.keys())[0]]['properties']['aggregationProperties']
+ p_new[p_main[list(p_main.keys())[0]]['properties']['identity']]['properties'] = {'evaluate': pmz}
+ return cur_policies
+
+
+def get_augmented_policy_attributes(policy_property, demand):
+ """Get policy attributes and augment them using policy_config_mapping and demand information"""
+ attributes = copy.copy(policy_property['attributes'])
+ remapping = policy_config_mapping['remapping']
+ extra = dict((x, demand['resourceModelInfo'][remapping[x]]) for x in attributes if x in remapping)
+ attributes.update(extra)
+ return attributes
+
+
+def get_candidates_demands(demand):
+ """Get demands related to candidates; e.g. excluded/required"""
+ res = {}
+ for k, v in policy_config_mapping['candidates'].items():
+ if k not in demand:
+ continue
+ res[v] = [{'inventory_type': x['identifierType'], 'candidate_id': x['identifiers']} for x in demand[k]]
+ return res
+
+
+def get_policy_properties(demand, policies):
+ """Get policy_properties for cases where there is a match with the demand"""
+ for policy in policies:
+ policy_demands = set([x.lower() for x in policy[list(policy.keys())[0]]['properties']['resources']])
+ if policy_demands and demand['resourceModuleName'].lower() not in policy_demands:
+ continue # no match for this policy
+ elif policy_demands == set(): # Append resource name for default policy
+ policy[list(policy.keys())[0]]['properties'].update(resources=list(demand.get('resourceModuleName')))
+ for policy_property in policy[list(policy.keys())[0]]['properties']['vnfProperties']:
+ yield policy_property
+
+
+def get_demand_properties(demand, policies):
+ """Get list demand properties objects (named tuples) from policy"""
+ demand_properties = []
+ for policy_property in get_policy_properties(demand, policies):
+ prop = dict(inventory_provider=policy_property['inventoryProvider'],
+ inventory_type=policy_property['inventoryType'],
+ service_type=demand.get('serviceResourceId', ''),
+ service_resource_id=demand.get('serviceResourceId', ''))
+ policy_property_mapping = {'filtering_attributes': 'attributes',
+ 'passthrough_attributes': 'passthroughAttributes',
+ 'default_attributes': 'defaultAttributes'}
+
+ prop.update({'unique': policy_property['unique']} if 'unique' in policy_property and
+ policy_property['unique'] else {})
+ prop['filtering_attributes'] = dict()
+ for key, value in policy_property_mapping.items():
+ get_demand_attributes(prop, policy_property, key, value)
+
+ prop['filtering_attributes'].update({'global-customer-id': policy_property['customerId']}
+ if 'customerId' in policy_property and policy_property['customerId']
+ else {})
+ prop['filtering_attributes'].update({'model-invariant-id': demand['resourceModelInfo']['modelInvariantId']}
+ if 'modelInvariantId' in demand['resourceModelInfo']
+ and demand['resourceModelInfo']['modelInvariantId'] else {})
+ prop['filtering_attributes'].update({'model-version-id': demand['resourceModelInfo']['modelVersionId']}
+ if 'modelVersionId' in demand['resourceModelInfo']
+ and demand['resourceModelInfo']['modelVersionId'] else {})
+ prop['filtering_attributes'].update({'equipment-role': policy_property['equipmentRole']}
+ if 'equipmentRole' in policy_property and policy_property['equipmentRole']
+ else {})
+ prop.update(get_candidates_demands(demand))
+ demand_properties.append(prop)
+ return demand_properties
+
+
+def get_demand_attributes(prop, policy_property, attribute_type, key):
+ if policy_property.get(key):
+ prop[attribute_type] = dict()
+ for attr_key, attr_val in policy_property[key].items():
+ update_converted_attribute(attr_key, attr_val, prop, attribute_type)
+
+
+def update_converted_attribute(attr_key, attr_val, properties, attribute_type):
+ """Updates dictonary of attributes with one specified in the arguments.
+
+ Automatically translates key namr from camelCase to hyphens
+ :param attribute_type: attribute section name
+ :param attr_key: key of the attribute
+ :param attr_val: value of the attribute
+ :param properties: dictionary with attributes to update
+ :return:
+ """
+ if attr_val:
+ remapping = policy_config_mapping[attribute_type]
+ if remapping.get(attr_key):
+ key_value = remapping.get(attr_key)
+ else:
+ key_value = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', attr_key)
+ key_value = re.sub('([a-z0-9])([A-Z])', r'\1-\2', key_value).lower()
+ properties[attribute_type].update({key_value: attr_val})
+
+
+def gen_demands(demands, vnf_policies):
+ """Generate list of demands based on request and VNF policies
+
+ :param demands: A List of demands
+ :param vnf_policies: Policies associated with demand resources
+ (e.g. from grouped_policies['vnfPolicy'])
+ :return: list of demand parameters to populate the Conductor API call
+ """
+ demand_dictionary = {}
+ for demand in demands:
+ prop = get_demand_properties(demand, vnf_policies)
+ if len(prop) > 0:
+ demand_dictionary.update({demand['resourceModuleName']: prop})
+ return demand_dictionary
+
+
+def gen_cloud_region(property):
+ prop = {"cloud_region_attributes": dict()}
+ if 'cloudRegion' in property:
+ for k, v in property['cloudRegion'].items():
+ update_converted_attribute(k, v, prop, 'cloud_region_attributes')
+ return prop["cloud_region_attributes"]
diff --git a/osdf/adapters/dcae/des.py b/osdf/adapters/dcae/des.py
new file mode 100644
index 0000000..57cb128
--- /dev/null
+++ b/osdf/adapters/dcae/des.py
@@ -0,0 +1,47 @@
+# -------------------------------------------------------------------------
+# Copyright (C) 2020 Wipro Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import requests
+
+from osdf.config.base import osdf_config
+from osdf.utils.interfaces import RestClient
+
+
+class DESException(Exception):
+ pass
+
+
+def extract_data(service_id, request_data):
+ """Extracts data from the data lake via DES.
+
+ param: service_id: kpi data identifier
+ param: request_data: data to send
+ param: osdf_config: osdf config to retrieve api info
+ """
+
+ config = osdf_config.deployment
+ user, password = config['desUsername'], config['desPassword']
+ headers = config["desHeaders"]
+ req_url = config["desUrl"] + config["desApiPath"] + service_id
+ rc = RestClient(userid=user, passwd=password, url=req_url, headers=headers, method="POST")
+
+ try:
+ response_json = rc.request(data=request_data)
+ return response_json.get("result")
+ except requests.RequestException as e:
+ raise DESException("Request exception was encountered {}".format(e))
diff --git a/osdf/adapters/dcae/message_router.py b/osdf/adapters/dcae/message_router.py
index caf04a4..0968812 100755
--- a/osdf/adapters/dcae/message_router.py
+++ b/osdf/adapters/dcae/message_router.py
@@ -17,8 +17,10 @@
#
import requests
-from osdf.utils.data_types import list_like
+
from osdf.operation.exceptions import MessageBusConfigurationException
+from osdf.utils.data_types import list_like
+from osdf.utils.interfaces import RestClient
class MessageRouterClient(object):
@@ -27,7 +29,8 @@ class MessageRouterClient(object):
consumer_group_id=':',
timeout_ms=15000, fetch_limit=1000,
userid_passwd=':'):
- """
+ """Class initializer
+
:param dmaap_url: protocol, host and port; can also be a list of URLs
(e.g. https://dmaap-host.onapdemo.onap.org:3905/events/org.onap.dmaap.MULTICLOUD.URGENT),
can also be a list of such URLs
@@ -44,14 +47,14 @@ class MessageRouterClient(object):
self.topic_urls = [dmaap_url] if not list_like(dmaap_url) else dmaap_url
self.timeout_ms = timeout_ms
self.fetch_limit = fetch_limit
- userid, passwd = userid_passwd.split(':')
- self.auth = (userid, passwd) if userid and passwd else None
+ self.userid, self.passwd = userid_passwd.split(':')
consumer_group, consumer_id = consumer_group_id.split(':')
self.consumer_group = consumer_group
self.consumer_id = consumer_id
def get(self, outputjson=True):
"""Fetch messages from message router (DMaaP or UEB)
+
:param outputjson: (optional, specifies if response is expected to be in json format), ignored for "POST"
:return: response as a json object (if outputjson is True) or as a string
"""
@@ -61,7 +64,7 @@ class MessageRouterClient(object):
for url in urls[:-1]:
try:
return self.http_request(method='GET', url=url, outputjson=outputjson)
- except:
+ except Exception:
pass
return self.http_request(method='GET', url=urls[-1], outputjson=outputjson)
@@ -69,13 +72,13 @@ class MessageRouterClient(object):
for url in self.topic_urls[:-1]:
try:
return self.http_request(method='POST', url=url, inputjson=inputjson, msg=msg)
- except:
+ except Exception:
pass
return self.http_request(method='POST', url=self.topic_urls[-1], inputjson=inputjson, msg=msg)
def http_request(self, url, method, inputjson=True, outputjson=True, msg=None, **kwargs):
- """
- Perform the actual URL request (GET or POST), and do error handling
+ """Perform the actual URL request (GET or POST), and do error handling
+
:param url: full URL (including topic, limit, timeout, etc.)
:param method: GET or POST
:param inputjson: Specify whether input is in json format (valid only for POST)
@@ -83,9 +86,15 @@ class MessageRouterClient(object):
:param msg: content to be posted (valid only for POST)
:return: response as a json object (if outputjson or POST) or as a string; None if error
"""
- res = requests.request(url=url, method=method, auth=self.auth, **kwargs)
- if res.status_code == requests.codes.ok:
- return res.json() if outputjson or method == "POST" else res.content
- else:
- raise Exception("HTTP Response Error: code {}; headers:{}, content: {}".format(
- res.status_code, res.headers, res.content))
+
+ rc = RestClient(userid=self.userid, passwd=self.passwd, url=url, method=method)
+ try:
+ res = rc.request(raw_response=True, data=msg, **kwargs)
+ if res.status_code == requests.codes.ok:
+ return res.json() if outputjson or method == "POST" else res.content
+ else:
+ raise Exception("HTTP Response Error: code {}; headers:{}, content: {}".format(
+ res.status_code, res.headers, res.content))
+
+ except requests.RequestException as ex:
+ raise Exception("Request Exception occurred {}".format(str(ex)))
diff --git a/osdf/adapters/local_data/local_policies.py b/osdf/adapters/local_data/local_policies.py
index 6e49388..dc6837a 100644
--- a/osdf/adapters/local_data/local_policies.py
+++ b/osdf/adapters/local_data/local_policies.py
@@ -19,7 +19,7 @@
import json
import os
import re
-
+from osdf.logging.osdf_logging import debug_log
def get_local_policies(local_policy_folder, local_policy_list, policy_id_list=None):
"""
@@ -32,6 +32,7 @@ def get_local_policies(local_policy_folder, local_policy_list, policy_id_list=No
:param policy_id_list: list of policies to get (if unspecified or None, get all)
:return: get policies
"""
+ debug_log.debug("Policy folder: {}, local_list {}, policy id list {}".format(local_policy_folder, local_policy_list, policy_id_list))
policies = []
if policy_id_list:
for policy_id in policy_id_list:
diff --git a/osdf/adapters/policy/interface.py b/osdf/adapters/policy/interface.py
index 95bfacc..6799c6b 100644
--- a/osdf/adapters/policy/interface.py
+++ b/osdf/adapters/policy/interface.py
@@ -1,4 +1,4 @@
- # -------------------------------------------------------------------------
+# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,18 +17,23 @@
#
import base64
-import itertools
import json
-
-
+import os
from requests import RequestException
-from osdf.operation.exceptions import BusinessException
+import uuid
+import yaml
+
from osdf.adapters.local_data.local_policies import get_local_policies
-from osdf.adapters.policy.utils import policy_name_as_regex, retrieve_node
-from osdf.utils.programming_utils import list_flatten, dot_notation
+from osdf.adapters.policy.utils import policy_name_as_regex
from osdf.config.base import osdf_config
-from osdf.logging.osdf_logging import audit_log, MH, metrics_log, debug_log
+from osdf.logging.osdf_logging import audit_log
+from osdf.logging.osdf_logging import debug_log
+from osdf.logging.osdf_logging import metrics_log
+from osdf.logging.osdf_logging import MH
+from osdf.operation.exceptions import BusinessException
from osdf.utils.interfaces import RestClient
+from osdf.utils.programming_utils import dot_notation
+from osdf.utils.programming_utils import list_flatten
def get_by_name(rest_client, policy_name_list, wildcards=True):
@@ -46,7 +51,8 @@ def get_by_name(rest_client, policy_name_list, wildcards=True):
def get_by_scope(rest_client, req, config_local, type_service):
- """ Get policies by scopes as defined in the configuration file.
+ """Get policies by scopes as defined in the configuration file.
+
:param rest_client: a rest client object to make a call.
:param req: an optimization request.
:param config_local: application configuration file.
@@ -55,28 +61,39 @@ def get_by_scope(rest_client, req, config_local, type_service):
"""
scope_policies = []
references = config_local.get('references', {})
- pscope = config_local.get('policy_info', {}).get(type_service, {}).get('policy_scope', {})
- service_name = dot_notation(req, references.get('service_name', {}).get('value', None))
- primary_scope = pscope['{}_scope'.format(service_name.lower() if pscope.get(service_name + "_scope", None)
- else "default")]
- for sec_scope in pscope.get('secondary_scopes', []):
- policies, scope_fields = [], []
- for field in sec_scope:
- scope_fields.extend([get_scope_fields(field, references, req, list_flatten(scope_policies))
- if 'get_param' in field else field])
- scope_fields = set(list_flatten(scope_fields))
- scope_fields = set([x.lower() for x in scope_fields])
- for scope in scope_fields:
- policies.extend(policy_api_call(rest_client, primary_scope, scope))
- scope_policies.append([policy for policy in policies
- if scope_fields <= set(json.loads(policy['config'])['content']['policyScope'])])
+ pscope = config_local.get('policy_info', {}).get(type_service, {}).get('policy_scope', [])
+ scope_fields = {}
+ policies = {}
+ for scopes in pscope:
+ for key in scopes.keys():
+ for field in scopes[key]:
+ scope_fields[key] = list_flatten([get_scope_fields(field, references, req, policies)
+ if 'get_param' in field else field])
+ if scope_fields.get('resources') and len(scope_fields['resources']) > 1:
+ for s in scope_fields['resources']:
+ scope_fields['resources'] = [s]
+ policies.update(policy_api_call(rest_client, scope_fields).get('policies', {}))
+ else:
+ policies.update(policy_api_call(rest_client, scope_fields).get('policies', {}))
+ for policyName in policies.keys():
+ keys = scope_fields.keys() & policies[policyName]['properties'].keys()
+ policy = {}
+ policy[policyName] = policies[policyName]
+ for k in keys:
+ if set(policies.get(policyName, {}).get('properties', {}).get(k)) >= set(scope_fields[k]) \
+ and policy not in scope_policies:
+ scope_policies.append(policy)
+
return scope_policies
-def get_scope_fields(field, references, req, policy_info):
- """ Retrieve the values for scope fields from a request and policies as per the configuration
- and references defined in a configuration file. If the value of a scope field missing in a request or
+def get_scope_fields(field, references, req, policies):
+ """Retrieve the values for scope fields from a request and policies
+
+ They are derived as per the configuration and references defined in a
+ configuration file. If the value of a scope field missing in a request or
policies, throw an exception since correct policies cannot be retrieved.
+
:param field: details on a scope field from a configuration file.
:param references: references defined in a configuration file.
:param req: an optimization request.
@@ -92,9 +109,9 @@ def get_scope_fields(field, references, req, policy_info):
raise BusinessException("Field {} is missing a value in a request".format(ref_value.split('.')[-1]))
else:
scope_fields = []
- for policy in policy_info:
- policy_content = json.loads(policy.get('config', "{}"))
- if policy_content.get('content', {}).get('policyType', "invalid_policy") == ref_source:
+ for policyName in policies.keys():
+ policy_content = policies.get(policyName)
+ if policy_content.get('type', "invalid_policy") == ref_source:
scope_fields.append(dot_notation(policy_content, ref_value))
scope_values = list_flatten(scope_fields)
if len(scope_values) > 0:
@@ -103,31 +120,35 @@ def get_scope_fields(field, references, req, policy_info):
ref_value.split('.')[-1], ref_source))
-def policy_api_call(rest_client, primary_scope, scope_field):
- """ Makes a getConfig API call to the policy system to retrieve policies matching a scope.
- :param rest_client: rest client object to make a call
- :param primary_scope: the primary scope of policies, which is a folder in the policy system
- where policies are stored.
- :param scope_field: the secondary scope of policies, which is a collection of domain values.
- :return: a list of policies matching both primary and secondary scopes.
+def policy_api_call(rest_client, scope_fields):
+ """Makes the api call to policy and return the response if policies are present
+
+ :param rest_client: rest client to make a call
+ :param scope_fields: a collection of scopes to be used for filtering
+ :return: a list of policies matching all filters
"""
- api_call_body = {"policyName": "{}.*".format(primary_scope),
- "configAttributes": {"policyScope": "{}".format(scope_field)}}
- return rest_client.request(json=api_call_body)
+ api_call_body = {"ONAPName": "OOF",
+ "ONAPComponent": "OOF_Component",
+ "ONAPInstance": "OOF_Component_Instance",
+ "action": "optimize",
+ "resource": scope_fields}
+ response = rest_client.request(json=api_call_body)
+ if not response.get("policies"):
+ raise Exception("Policies not found for the scope {}".format(scope_fields))
+ return response
def remote_api(req_json, osdf_config, service_type="placement"):
"""Make a request to policy and return response -- it accounts for multiple requests that be needed
+
:param req_json: policy request object (can have multiple policy names)
:param osdf_config: main config that will have credential information
:param service_type: the type of service to call: "placement", "scheduling"
:return: all related policies and provStatus retrieved from Subscriber policy
"""
config = osdf_config.deployment
+ headers = {"Content-type": "application/json"}
uid, passwd = config['policyPlatformUsername'], config['policyPlatformPassword']
- pcuid, pcpasswd = config['policyClientUsername'], config['policyClientPassword']
- headers = {"ClientAuth": base64.b64encode(bytes("{}:{}".format(pcuid, pcpasswd), "ascii"))}
- headers.update({'Environment': config['policyPlatformEnv']})
url = config['policyPlatformUrl']
rc = RestClient(userid=uid, passwd=passwd, headers=headers, url=url, log_func=debug_log.debug)
@@ -139,17 +160,17 @@ def remote_api(req_json, osdf_config, service_type="placement"):
policies = get_by_scope(rc, req_json, osdf_config.core, service_type)
formatted_policies = []
- for x in itertools.chain(*policies):
- if x['config'] is None:
- raise BusinessException("Config not found for policy with name %s" % x['policyName'])
+ for x in policies:
+ if x[list(x.keys())[0]].get('properties') is None:
+ raise BusinessException("Properties not found for policy with name %s" % x[list(x.keys()[0])])
else:
- formatted_policies.append(json.loads(x['config']))
+ formatted_policies.append(x)
return formatted_policies
def local_policies_location(req_json, osdf_config, service_type):
- """
- Get folder and list of policy_files if "local policies" option is enabled
+ """Get folder and list of policy_files if "local policies" option is enabled
+
:param service_type: placement supported for now, but can be any other service
:return: a tuple (folder, file_list) or None
"""
@@ -157,17 +178,20 @@ def local_policies_location(req_json, osdf_config, service_type):
if lp.get('global_disabled'):
return None # short-circuit to disable all local policies
if lp.get('local_{}_policies_enabled'.format(service_type)):
+ debug_log.debug('Loading local policies for service type: {}'.format(service_type))
if service_type == "scheduling":
return lp.get('{}_policy_dir'.format(service_type)), lp.get('{}_policy_files'.format(service_type))
else:
- service_name = req_json['serviceInfo']['serviceName'] # TODO: data_mapping.get_service_type(model_name)
+ service_name = req_json['serviceInfo']['serviceName']
+ debug_log.debug('Loading local policies for service name: {}'.format(service_name))
return lp.get('{}_policy_dir_{}'.format(service_type, service_name.lower())), \
- lp.get('{}_policy_files_{}'.format(service_type, service_name.lower()))
+ lp.get('{}_policy_files_{}'.format(service_type, service_name.lower()))
return None
def get_policies(request_json, service_type):
"""Validate the request and get relevant policies
+
:param request_json: Request object
:param service_type: the type of service to call: "placement", "scheduling"
:return: policies associated with this request and provStatus retrieved from Subscriber policy
@@ -178,6 +202,8 @@ def get_policies(request_json, service_type):
local_info = local_policies_location(request_json, osdf_config, service_type)
if local_info: # tuple containing location and list of files
+ if local_info[0] is None or local_info[1] is None:
+ raise ValueError("Error fetching local policy info")
to_filter = None
if osdf_config.core['policy_info'][service_type]['policy_fetch'] == "by_name":
to_filter = request_json[service_type + "Info"]['policyId']
@@ -186,3 +212,31 @@ def get_policies(request_json, service_type):
policies = remote_api(request_json, osdf_config, service_type)
return policies
+
+
+def upload_policy_models():
+ """Upload all the policy models reside in the folder"""
+ requestId = uuid.uuid4()
+ config = osdf_config.deployment
+ model_path = config['pathPolicyModelUpload']
+ uid, passwd = config['policyPlatformUsername'], config['policyPlatformPassword']
+ pcuid, pcpasswd = config['policyClientUsername'], config['policyClientPassword']
+ headers = {"ClientAuth": base64.b64encode(bytes("{}:{}".format(pcuid, pcpasswd), "ascii"))}
+ headers.update({'Environment': config['policyPlatformEnv']})
+ headers.update({'X-ONAP-RequestID': requestId})
+ url = config['policyPlatformUrlModelUpload']
+ rc = RestClient(userid=uid, passwd=passwd, headers=headers, url=url, log_func=debug_log.debug)
+
+ for file in os.listdir(model_path):
+ if not file.endswith(".yaml"):
+ continue
+ with open(file) as f:
+ file_converted = json.dumps(yaml.load(f))
+ response = rc.request(json=file_converted, ok_codes=(200))
+ if not response:
+ success = False
+ audit_log.warn("Policy model %s uploading failed!" % file)
+ if not success:
+ return "Policy model uploading success!"
+ else:
+ return "Policy model uploading not success!"
diff --git a/osdf/adapters/policy/utils.py b/osdf/adapters/policy/utils.py
index 2f873af..79047eb 100644
--- a/osdf/adapters/policy/utils.py
+++ b/osdf/adapters/policy/utils.py
@@ -33,11 +33,11 @@ def group_policies_gen(flat_policies, config):
"""
filtered_policies = defaultdict(list)
policy_name = []
- policies = [x for x in flat_policies if x['content'].get('policyType')] # drop ones without 'policy_type'
+ policies = [x for x in flat_policies if x[list(x.keys())[0]]["type"]] # drop ones without 'type'
priority = config.get('policy_info', {}).get('prioritization_attributes', {})
aggregated_policies = dict()
for plc in policies:
- attrs = [dot_notation(plc, dot_path) for key in priority.keys() for dot_path in priority[key]]
+ attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]]
attrs_list = [x if isinstance(x, list) else [x] for x in attrs]
attributes = [list_flatten(x) if isinstance(x, list) else x for x in attrs_list]
for y in itertools.product(*attributes):
@@ -45,12 +45,12 @@ def group_policies_gen(flat_policies, config):
aggregated_policies[y].append(plc)
for key in aggregated_policies.keys():
- aggregated_policies[key].sort(key=lambda x: x['priority'], reverse=True)
+ #aggregated_policies[key].sort(key=lambda x: x['priority'], reverse=True)
prioritized_policy = aggregated_policies[key][0]
- if prioritized_policy['policyName'] not in policy_name:
+ if list(prioritized_policy.keys())[0] not in policy_name:
# TODO: Check logic here... should policy appear only once across all groups?
- filtered_policies[prioritized_policy['content']['policyType']].append(prioritized_policy)
- policy_name.append(prioritized_policy['policyName'])
+ filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy)
+ policy_name.append(list(prioritized_policy.keys())[0])
return filtered_policies
diff --git a/osdf/apps/__init__.py b/osdf/apps/__init__.py
new file mode 100644
index 0000000..370bc06
--- /dev/null
+++ b/osdf/apps/__init__.py
@@ -0,0 +1,2 @@
+import yaml
+yaml.warnings({'YAMLLoadWarning': False}) \ No newline at end of file
diff --git a/osdf/apps/baseapp.py b/osdf/apps/baseapp.py
new file mode 100644
index 0000000..1ad7556
--- /dev/null
+++ b/osdf/apps/baseapp.py
@@ -0,0 +1,222 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2017 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+"""
+OSDF Manager Main Flask Application
+"""
+
+import json
+from optparse import OptionParser
+import ssl
+import sys
+import time
+import traceback
+
+from flask import Flask
+from flask import g
+from flask import request
+from flask import Response
+from onaplogging.mdcContext import MDC
+from requests import RequestException
+from schematics.exceptions import DataError
+
+import osdf.adapters.aaf.sms as sms
+from osdf.config.base import osdf_config
+from osdf.logging.osdf_logging import audit_log
+from osdf.logging.osdf_logging import debug_log
+from osdf.logging.osdf_logging import error_log
+from osdf.operation.error_handling import internal_error_message
+from osdf.operation.error_handling import request_exception_to_json_body
+from osdf.operation.exceptions import BusinessException
+import osdf.operation.responses
+from osdf.utils.mdc_utils import clear_mdc
+from osdf.utils.mdc_utils import get_request_id
+from osdf.utils.mdc_utils import populate_default_mdc
+from osdf.utils.mdc_utils import populate_mdc
+from osdf.utils.mdc_utils import set_error_details
+
+ERROR_TEMPLATE = osdf.ERROR_TEMPLATE
+
+app = Flask(__name__)
+
+BAD_CLIENT_REQUEST_MESSAGE = 'Client sent an invalid request'
+
+
+@app.errorhandler(BusinessException)
+def handle_business_exception(e):
+ """An exception explicitly raised due to some business rule
+
+ """
+ error_log.error("Synchronous error for request id {} {}"
+ .format(g.request_id, traceback.format_exc()))
+ err_msg = ERROR_TEMPLATE.render(description=str(e))
+ response = Response(err_msg, content_type='application/json; charset=utf-8')
+ response.status_code = 400
+ return response
+
+
+@app.errorhandler(RequestException)
+def handle_request_exception(e):
+ """Returns a detailed synchronous message to the calling client when osdf fails due to a remote call to another system
+
+ """
+ error_log.error("Synchronous error for request id {} {}".format(g.request_id, traceback.format_exc()))
+ err_msg = request_exception_to_json_body(e)
+ response = Response(err_msg, content_type='application/json; charset=utf-8')
+ response.status_code = 400
+ return response
+
+
+@app.errorhandler(DataError)
+def handle_data_error(e):
+ """Returns a detailed message to the calling client when the initial synchronous message is invalid
+
+ """
+ error_log.error("Synchronous error for request id {} {}".format(g.request_id, traceback.format_exc()))
+
+ body_dictionary = {
+ "serviceException": {
+ "text": BAD_CLIENT_REQUEST_MESSAGE,
+ "exceptionMessage": str(e.errors),
+ "errorType": "InvalidClientRequest"
+ }
+ }
+
+ body_as_json = json.dumps(body_dictionary)
+ response = Response(body_as_json, content_type='application/json; charset=utf-8')
+ response.status_code = 400
+ return response
+
+
+@app.before_request
+def log_request():
+ clear_mdc()
+ if request.content_type and 'json' in request.content_type:
+ populate_mdc(request)
+ g.request_id = get_request_id(request.get_json())
+ log_message(json.dumps(request.get_json()), "INPROGRESS", 'ENTRY')
+ else:
+ populate_default_mdc(request)
+ log_message('', "INPROGRESS", 'ENTRY')
+
+
+@app.after_request
+def log_response(response):
+ log_response_data(response)
+ return response
+
+
+def log_response_data(response):
+ status_value = ''
+ try:
+ status_value = map_status_value(response)
+ log_message(response.get_data(as_text=True), status_value, 'EXIT')
+ except Exception:
+ try:
+ set_default_audit_mdc(request, status_value, 'EXIT')
+ audit_log.info(response.get_data(as_text=True))
+ except Exception:
+ set_error_details(300, 'Internal Error')
+ error_log.error("Error logging the response data due to {}".format(traceback.format_exc()))
+
+
+def set_default_audit_mdc(request, status_value, p_marker):
+ MDC.put('partnerName', 'internal')
+ MDC.put('serviceName', request.path)
+ MDC.put('statusCode', status_value)
+ MDC.put('requestID', 'internal')
+ MDC.put('timer', int((time.process_time() - g.request_start) * 1000))
+ MDC.put('customField1', p_marker)
+
+
+def log_message(message, status_value, p_marker='INVOKE'):
+ MDC.put('statusCode', status_value)
+ MDC.put('customField1', p_marker)
+ MDC.put('timer', int((time.process_time() - g.request_start) * 1000))
+ audit_log.info(message)
+
+
+def map_status_value(response):
+ if 200 <= response.status_code < 300:
+ status_value = "COMPLETE"
+ else:
+ status_value = "ERROR"
+ return status_value
+
+
+@app.errorhandler(500)
+def internal_failure(error):
+ """Returned when unexpected coding errors occur during initial synchronous processing
+
+ """
+ error_log.error("Synchronous error for request id {} {}".format(g.request_id, traceback.format_exc()))
+ response = Response(internal_error_message, content_type='application/json; charset=utf-8')
+ response.status_code = 500
+ return response
+
+
+def get_options(argv):
+ program_version_string = '%%prog %s' % "v1.0"
+ program_longdesc = ""
+ program_license = ""
+
+ parser = OptionParser(version=program_version_string, epilog=program_longdesc, description=program_license)
+ parser.add_option("-l", "--local", dest="local", help="run locally", action="store_true", default=False)
+ parser.add_option("-t", "--devtest", dest="devtest", help="run in dev/test environment", action="store_true",
+ default=False)
+ parser.add_option("-d", "--debughost", dest="debughost",
+ help="IP Address of host running debug server", default='')
+ parser.add_option("-p", "--debugport", dest="debugport",
+ help="Port number of debug server", type=int, default=5678)
+ opts, args = parser.parse_args(argv)
+
+ if opts.debughost:
+ debug_log.debug('pydevd.settrace({}, port={})'.format(opts.debughost, opts.debugport))
+ # pydevd.settrace(opts.debughost, port=opts.debugport)
+ return opts
+
+
+def build_ssl_context():
+ ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
+ ssl_context.set_ciphers('ECDHE-RSA-AES128-SHA256:EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH')
+ ssl_context.load_cert_chain(sys_conf['ssl_context'][0], sys_conf['ssl_context'][1])
+ return ssl_context
+
+
+def run_app():
+ global sys_conf
+ sys_conf = osdf_config['core']['osdf_system']
+ ports = sys_conf['osdf_ports']
+ internal_port, external_port = ports['internal'], ports['external']
+ local_host = sys_conf['osdf_ip_default']
+ common_app_opts = dict(host=local_host, threaded=True, use_reloader=False)
+ ssl_opts = sys_conf.get('ssl_context')
+ if ssl_opts:
+ common_app_opts.update({'ssl_context': build_ssl_context()})
+ opts = get_options(sys.argv)
+ # Load secrets from SMS
+ sms.load_secrets()
+ if not opts.local and not opts.devtest: # normal deployment
+ app.run(port=internal_port, debug=False, **common_app_opts)
+ else:
+ port = internal_port if opts.local else external_port
+ app.run(port=port, debug=True, **common_app_opts)
+
+
+if __name__ == "__main__":
+ run_app()
diff --git a/osdf/cmd/encryptionUtil.py b/osdf/cmd/encryptionUtil.py
new file mode 100644
index 0000000..6c0cae2
--- /dev/null
+++ b/osdf/cmd/encryptionUtil.py
@@ -0,0 +1,50 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (c) 2015-2018 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+import sys
+from osdf.utils import cipherUtils
+
+
+def main():
+
+ if len(sys.argv) != 4:
+ print("Invalid input - usage --> (options(encrypt/decrypt) input-value with-key)")
+ return
+
+ enc_dec = sys.argv[1]
+ valid_option_values = ['encrypt', 'decrypt']
+ if enc_dec not in valid_option_values:
+ print("Invalid input - usage --> (options(encrypt/decrypt) input-value with-key)")
+ print("Option value can only be one of {}".format(valid_option_values))
+ print("You entered '{}'".format(enc_dec))
+ return
+
+ input_string = sys.argv[2]
+ with_key = sys.argv[3]
+
+ print("You've requested '{}' to be '{}ed' using key '{}'".format(input_string, enc_dec, with_key))
+ print("You can always perform the reverse operation (encrypt/decrypt) using the same key"
+ "to be certain you get the same results back'")
+
+ util = cipherUtils.AESCipher.get_instance(with_key)
+ if enc_dec.lower() == 'encrypt':
+ result = util.encrypt(input_string)
+ else:
+ result = util.decrypt(input_string)
+
+ print("Your resultt: {}".format(result)) \ No newline at end of file
diff --git a/osdf/config/__init__.py b/osdf/config/__init__.py
index 86156a1..e32d44e 100644
--- a/osdf/config/__init__.py
+++ b/osdf/config/__init__.py
@@ -27,6 +27,6 @@ class CoreConfig(metaclass=MetaSingleton):
def get_core_config(self, config_file=None):
if self.core_config is None:
- self.core_config = yaml.load(open(config_file))
+ self.core_config = yaml.safe_load(open(config_file))
return self.core_config
diff --git a/osdf/config/base.py b/osdf/config/base.py
index fbe9315..be693cb 100644
--- a/osdf/config/base.py
+++ b/osdf/config/base.py
@@ -1,5 +1,6 @@
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
+# Copyright (C) 2020 Wipro Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,16 +18,21 @@
#
import os
-
+from osdf.config.consulconfig import call_consul_kv
import osdf.config.credentials as creds
import osdf.config.loader as config_loader
from osdf.utils.programming_utils import DotDict
+from threading import Thread
config_spec = {
"deployment": os.environ.get("OSDF_CONFIG_FILE", "config/osdf_config.yaml"),
"core": "config/common_config.yaml"
}
+slicing_spec = "config/slicing_config.yaml"
+
+slice_config = config_loader.load_config_file(slicing_spec)
+
osdf_config = DotDict(config_loader.all_configs(**config_spec))
http_basic_auth_credentials = creds.load_credentials(osdf_config)
@@ -34,3 +40,10 @@ http_basic_auth_credentials = creds.load_credentials(osdf_config)
dmaap_creds = creds.dmaap_creds()
creds_prefixes = {"so": "so", "cm": "cmPortal", "pcih": "pciHMS"}
+
+osdf_config_deployment = osdf_config.deployment
+
+
+if osdf_config.deployment.get('activateConsulConfig'):
+ consulthread = Thread(target=call_consul_kv, args=(osdf_config,))
+ consulthread.start()
diff --git a/osdf/config/consulconfig.py b/osdf/config/consulconfig.py
new file mode 100644
index 0000000..fc5b3fe
--- /dev/null
+++ b/osdf/config/consulconfig.py
@@ -0,0 +1,52 @@
+from consul.base import Timeout
+from consul.tornado import Consul
+import json
+from osdf.logging.osdf_logging import debug_log
+from tornado.gen import coroutine
+from tornado.ioloop import IOLoop
+
+
+class Config(object):
+ def __init__(self, loop, osdf_final_config):
+ self.config = osdf_final_config
+ osdf_config = self.config['osdf_config']
+ self.consul = Consul(host=osdf_config['consulHost'], port=osdf_config['consulPort'],
+ scheme=osdf_config['consulScheme'], verify=osdf_config['consulVerify'],
+ cert=osdf_config['consulCert'])
+ result = json.dumps(self.config)
+ self.consul.kv.put("osdfconfiguration", result)
+ loop.add_callback(self.watch)
+
+ @coroutine
+ def watch(self):
+ index = None
+ while True:
+ try:
+ index, data = yield self.consul.kv.get('osdfconfiguration', index=index)
+ if data is not None:
+ self.update_config(data)
+ except Timeout:
+ pass
+ except Exception as e:
+ debug_log.debug('Exception Encountered {}'.format(e))
+
+ def update_config(self, data):
+ new_config = json.loads(data['Value'].decode('utf-8'))
+ osdf_deployment = new_config['osdf_config']
+ osdf_core = new_config['common_config']
+ self.config['osdf_config'].update(osdf_deployment)
+ self.config['common_config'].update(osdf_core)
+ debug_log.debug("updated config {}".format(new_config))
+ debug_log.debug("value changed")
+
+
+def call_consul_kv(osdf_config):
+ osdf_final_config = {
+ 'osdf_config': osdf_config.deployment,
+ 'common_config': osdf_config.core
+ }
+ io_loop = IOLoop()
+ io_loop.make_current()
+ IOLoop.current(instance=False)
+ _ = Config(io_loop, osdf_final_config)
+ io_loop.start()
diff --git a/osdf/config/loader.py b/osdf/config/loader.py
index 7cb363a..dca0033 100644
--- a/osdf/config/loader.py
+++ b/osdf/config/loader.py
@@ -31,7 +31,7 @@ def load_config_file(config_file: str, child_name="dockerConfiguration") -> dict
with open(config_file, 'r') as fid:
res = {}
if config_file.endswith(".yaml"):
- res = yaml.load(fid)
+ res = yaml.safe_load(fid)
elif config_file.endswith(".json") or config_file.endswith("json"):
res = json.load(fid)
return res.get(child_name, res) if child_name else res
diff --git a/osdf/logging/__init__.py b/osdf/logging/__init__.py
index 4b25e5b..df7613d 100644
--- a/osdf/logging/__init__.py
+++ b/osdf/logging/__init__.py
@@ -15,3 +15,7 @@
#
# -------------------------------------------------------------------------
#
+
+import yaml
+
+yaml.warnings({'YAMLLoadWarning': False})
diff --git a/osdf/optimizers/placementopt/__init__.py b/osdf/logging/monkey.py
index 4b25e5b..f6041bc 100644
--- a/osdf/optimizers/placementopt/__init__.py
+++ b/osdf/logging/monkey.py
@@ -1,5 +1,5 @@
# -------------------------------------------------------------------------
-# Copyright (c) 2017-2018 AT&T Intellectual Property
+# Copyright (c) 2020 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,3 +15,21 @@
#
# -------------------------------------------------------------------------
#
+
+
+__all__ = ["patch_all"]
+
+from onaplogging.logWatchDog import patch_loggingYaml
+
+from osdf.logging.oof_mdc_context import patch_logging_mdc
+
+
+def patch_all(mdc=True, yaml=True):
+ """monkey patch osdf logging to enable mdc
+
+ """
+ if mdc is True:
+ patch_logging_mdc()
+
+ if yaml is True:
+ patch_loggingYaml()
diff --git a/osdf/logging/onap_common_v1/CommonLogger.py b/osdf/logging/onap_common_v1/CommonLogger.py
deleted file mode 100755
index 6572d6f..0000000
--- a/osdf/logging/onap_common_v1/CommonLogger.py
+++ /dev/null
@@ -1,900 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-"""ONAP Common Logging library in Python."""
-
-#!/usr/bin/python
-# -*- indent-tabs-mode: nil -*- vi: set expandtab:
-
-
-from __future__ import print_function
-import os, sys, getopt, logging, logging.handlers, time, re, uuid, socket, threading
-
-class CommonLogger:
- """ONAP Common Logging object.
-
- Public methods:
- __init__
- setFields
- debug
- info
- warn
- error
- fatal
- """
-
- UnknownFile = -1
- ErrorFile = 0
- DebugFile = 1
- AuditFile = 2
- MetricsFile = 3
- DateFmt = '%Y-%m-%dT%H:%M:%S'
- verbose = False
-
- def __init__(self, configFile, logKey, **kwargs):
- """Construct a Common Logger for one Log File.
-
- Arguments:
- configFile -- configuration filename.
- logKey -- the keyword in configFile that identifies the log filename.
-
- Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
- style -- the log file format (style) to use when writing log messages,
- one of CommonLogger.ErrorFile, CommonLogger.DebugFile,
- CommonLogger.AuditFile and CommonLogger.MetricsFile, or
- one of the strings "error", "debug", "audit" or "metrics".
- May also be set in the config file using a field named
- <logKey>Style (where <logKey> is the value of the logKey
- parameter). The keyword value overrides the value in the
- config file.
- requestID (dame) -- optional default value for this log record field.
- serviceInstanceID (am) -- optional default value for this log record field.
- threadID (am) -- optional default value for this log record field.
- serverName (am) -- optional default value for this log record field.
- serviceName (am) -- optional default value for this log record field.
- instanceUUID (am) -- optional default value for this log record field.
- severity (am) -- optional default value for this log record field.
- serverIPAddress (am) -- optional default value for this log record field.
- server (am) -- optional default value for this log record field.
- IPAddress (am) -- optional default value for this log record field.
- className (am) -- optional default value for this log record field.
- timer (am) -- (ElapsedTime) optional default value for this log record field.
- partnerName (ame) -- optional default value for this log record field.
- targetEntity (me) -- optional default value for this log record field.
- targetServiceName (me) -- optional default value for this log record field.
- statusCode (am) -- optional default value for this log record field.
- responseCode (am) -- optional default value for this log record field.
- responseDescription (am) -- optional default value for this log record field.
- processKey (am) -- optional default value for this log record field.
- targetVirtualEntity (m) -- optional default value for this log record field.
- customField1 (am) -- optional default value for this log record field.
- customField2 (am) -- optional default value for this log record field.
- customField3 (am) -- optional default value for this log record field.
- customField4 (am) -- optional default value for this log record field.
- errorCategory (e) -- optional default value for this log record field.
- errorCode (e) -- optional default value for this log record field.
- errorDescription (e) -- optional default value for this log record field.
-
- Note: the pipe '|' character is not allowed in any log record field.
- """
-
- self._monitorFlag = False
-
- # Get configuration parameters
- self._logKey = str(logKey)
- self._configFile = str(configFile)
- self._rotateMethod = 'time'
- self._timeRotateIntervalType = 'midnight'
- self._timeRotateInterval = 1
- self._sizeMaxBytes = 0
- self._sizeRotateMode = 'a'
- self._socketHost = None
- self._socketPort = 0
- self._typeLogger = 'filelogger'
- self._backupCount = 6
- self._logLevelThreshold = self._intLogLevel('')
- self._logFile = None
- self._begTime = None
- self._begMsec = 0
- self._fields = {}
- self._fields["style"] = CommonLogger.UnknownFile
- try:
- self._configFileModified = os.path.getmtime(self._configFile)
- for line in open(self._configFile):
- line = line.split('#',1)[0] # remove comments
- if '=' in line:
- key, value = [x.strip() for x in line.split('=',1)]
- if key == 'rotateMethod' and value.lower() in ['time', 'size', 'none']:
- self._rotateMethod = value.lower()
- elif key == 'timeRotateIntervalType' and value in ['S', 'M', 'H', 'D', 'W0', 'W1', 'W2', 'W3', 'W4', 'W5', 'W6', 'midnight']:
- self._timeRotateIntervalType = value
- elif key == 'timeRotateInterval' and int( value ) > 0:
- self._timeRotateInterval = int( value )
- elif key == 'sizeMaxBytes' and int( value ) >= 0:
- self._sizeMaxBytes = int( value )
- elif key == 'sizeRotateMode' and value in ['a']:
- self._sizeRotateMode = value
- elif key == 'backupCount' and int( value ) >= 0:
- self._backupCount = int( value )
- elif key == self._logKey + 'SocketHost':
- self._socketHost = value
- elif key == self._logKey + 'SocketPort' and int( value ) == 0:
- self._socketPort = int( value )
- elif key == self._logKey + 'LogType' and value.lower() in ['filelogger', 'stdoutlogger', 'stderrlogger', 'socketlogger', 'nulllogger']:
- self._typeLogger = value.lower()
- elif key == self._logKey + 'LogLevel':
- self._logLevelThreshold = self._intLogLevel(value.upper())
- elif key == self._logKey + 'Style':
- self._fields["style"] = value
- elif key == self._logKey:
- self._logFile = value
- except Exception as x:
- print("exception reading '%s' configuration file: %s" %(self._configFile, str(x)), file=sys.stderr)
- sys.exit(2)
- except:
- print("exception reading '%s' configuration file" %(self._configFile), file=sys.stderr)
- sys.exit(2)
-
- if self._logFile is None:
- print('configuration file %s is missing definition %s for log file' %(self._configFile, self._logKey), file=sys.stderr)
- sys.exit(2)
-
-
- # initialize default log fields
- # timestamp will automatically be generated
- for key in ['style', 'requestID', 'serviceInstanceID', 'threadID', 'serverName', 'serviceName', 'instanceUUID', \
- 'severity', 'serverIPAddress', 'server', 'IPAddress', 'className', 'timer', \
- 'partnerName', 'targetEntity', 'targetServiceName', 'statusCode', 'responseCode', \
- 'responseDescription', 'processKey', 'targetVirtualEntity', 'customField1', \
- 'customField2', 'customField3', 'customField4', 'errorCategory', 'errorCode', \
- 'errorDescription' ]:
- if key in kwargs and kwargs[key] != None:
- self._fields[key] = kwargs[key]
-
- self._resetStyleField()
-
- # Set up logger
- self._logLock = threading.Lock()
- with self._logLock:
- self._logger = logging.getLogger(self._logKey)
- self._logger.propagate = False
- self._createLogger()
-
- self._defaultServerInfo()
-
- # spawn a thread to monitor configFile for logLevel and logFile changes
- self._monitorFlag = True
- self._monitorThread = threading.Thread(target=self._monitorConfigFile, args=())
- self._monitorThread.daemon = True
- self._monitorThread.start()
-
-
- def _createLogger(self):
- if self._typeLogger == 'filelogger':
- self._mkdir_p(self._logFile)
- if self._rotateMethod == 'time':
- self._logHandler = logging.handlers.TimedRotatingFileHandler(self._logFile, \
- when=self._timeRotateIntervalType, interval=self._timeRotateInterval, \
- backupCount=self._backupCount, encoding=None, delay=False, utc=True)
- elif self._rotateMethod == 'size':
- self._logHandler = logging.handlers.RotatingFileHandler(self._logFile, \
- mode=self._sizeRotateMode, maxBytes=self._sizeMaxBytes, \
- backupCount=self._backupCount, encoding=None, delay=False)
-
- else:
- self._logHandler = logging.handlers.WatchedFileHandler(self._logFile, \
- mode=self._sizeRotateMode, \
- encoding=None, delay=False)
- elif self._typeLogger == 'stderrlogger':
- self._logHandler = logging.handlers.StreamHandler(sys.stderr)
- elif self._typeLogger == 'stdoutlogger':
- self._logHandler = logging.handlers.StreamHandler(sys.stdout)
- elif self._typeLogger == 'socketlogger':
- self._logHandler = logging.handlers.SocketHandler(self._socketHost, self._socketPort)
- elif self._typeLogger == 'nulllogger':
- self._logHandler = logging.handlers.NullHandler()
-
- if self._fields["style"] == CommonLogger.AuditFile or self._fields["style"] == CommonLogger.MetricsFile:
- self._logFormatter = logging.Formatter(fmt='%(begtime)s,%(begmsecs)03d+00:00|%(endtime)s,%(endmsecs)03d+00:00|%(message)s', datefmt=CommonLogger.DateFmt)
- else:
- self._logFormatter = logging.Formatter(fmt='%(asctime)s,%(msecs)03d+00:00|%(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
- self._logFormatter.converter = time.gmtime
- self._logHandler.setFormatter(self._logFormatter)
- self._logger.addHandler(self._logHandler)
-
- def _resetStyleField(self):
- styleFields = ["error", "debug", "audit", "metrics"]
- if self._fields['style'] in styleFields:
- self._fields['style'] = styleFields.index(self._fields['style'])
-
- def __del__(self):
- if self._monitorFlag == False:
- return
-
- self._monitorFlag = False
-
- if self._monitorThread is not None and self._monitorThread.is_alive():
- self._monitorThread.join()
-
- self._monitorThread = None
-
-
- def _defaultServerInfo(self):
-
- # If not set or purposely set = None, then set default
- if self._fields.get('server') is None:
- try:
- self._fields['server'] = socket.getfqdn()
- except Exception as err:
- try:
- self._fields['server'] = socket.gethostname()
- except Exception as err:
- self._fields['server'] = ""
-
- # If not set or purposely set = None, then set default
- if self._fields.get('serverIPAddress') is None:
- try:
- self._fields['serverIPAddress'] = socket.gethostbyname(self._fields['server'])
- except Exception as err:
- self._fields['serverIPAddress'] = ""
-
-
- def _monitorConfigFile(self):
- while self._monitorFlag:
- try:
- fileTime = os.path.getmtime(self._configFile)
- if fileTime > self._configFileModified:
- self._configFileModified = fileTime
- ReopenLogFile = False
- logFile = self._logFile
- with open(self._configFile) as fp:
- for line in fp:
- line = line.split('#',1)[0] # remove comments
- if '=' in line:
- key, value = [x.strip() for x in line.split('=',1)]
- if key == 'rotateMethod' and value.lower() in ['time', 'size', 'none'] and self._rotateMethod != value:
- self._rotateMethod = value.lower()
- ReopenLogFile = True
- elif key == 'timeRotateIntervalType' and value in ['S', 'M', 'H', 'D', 'W0', 'W1', 'W2', 'W3', 'W4', 'W5', 'W6', 'midnight']:
- self._timeRotateIntervalType = value
- ReopenLogFile = True
- elif key == 'timeRotateInterval' and int( value ) > 0:
- self._timeRotateInterval = int( value )
- ReopenLogFile = True
- elif key == 'sizeMaxBytes' and int( value ) >= 0:
- self._sizeMaxBytes = int( value )
- ReopenLogFile = True
- elif key == 'sizeRotateMode' and value in ['a']:
- self._sizeRotateMode = value
- ReopenLogFile = True
- elif key == 'backupCount' and int( value ) >= 0:
- self._backupCount = int( value )
- ReopenLogFile = True
- elif key == self._logKey + 'SocketHost' and self._socketHost != value:
- self._socketHost = value
- ReopenLogFile = True
- elif key == self._logKey + 'SocketPort' and self._socketPort > 0 and self._socketPort != int( value ):
- self._socketPort = int( value )
- ReopenLogFile = True
- elif key == self._logKey + 'LogLevel' and self._logLevelThreshold != self._intLogLevel( value.upper() ):
- self._logLevelThreshold = self._intLogLevel(value.upper())
- elif key == self._logKey + 'LogType' and self._typeLogger != value and value.lower() in ['filelogger', 'stdoutlogger', 'stderrlogger', 'socketlogger', 'nulllogger']:
- self._typeLogger = value.lower()
- ReopenLogFile = True
- elif key == self._logKey + 'Style':
- self._fields["style"] = value
- self._resetStyleField()
- elif key == self._logKey and self._logFile != value:
- logFile = value
- ReopenLogFile = True
- if ReopenLogFile:
- with self._logLock:
- self._logger.removeHandler(self._logHandler)
- self._logFile = logFile
- self._createLogger()
- except Exception as err:
- pass
-
- time.sleep(5)
-
-
- def setFields(self, **kwargs):
- """Set default values for log fields.
-
- Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
- style -- the log file format (style) to use when writing log messages
- requestID (dame) -- optional default value for this log record field.
- serviceInstanceID (am) -- optional default value for this log record field.
- threadID (am) -- optional default value for this log record field.
- serverName (am) -- optional default value for this log record field.
- serviceName (am) -- optional default value for this log record field.
- instanceUUID (am) -- optional default value for this log record field.
- severity (am) -- optional default value for this log record field.
- serverIPAddress (am) -- optional default value for this log record field.
- server (am) -- optional default value for this log record field.
- IPAddress (am) -- optional default value for this log record field.
- className (am) -- optional default value for this log record field.
- timer (am) -- (ElapsedTime) optional default value for this log record field.
- partnerName (ame) -- optional default value for this log record field.
- targetEntity (me) -- optional default value for this log record field.
- targetServiceName (me) -- optional default value for this log record field.
- statusCode (am) -- optional default value for this log record field.
- responseCode (am) -- optional default value for this log record field.
- responseDescription (am) -- optional default value for this log record field.
- processKey (am) -- optional default value for this log record field.
- targetVirtualEntity (m) -- optional default value for this log record field.
- customField1 (am) -- optional default value for this log record field.
- customField2 (am) -- optional default value for this log record field.
- customField3 (am) -- optional default value for this log record field.
- customField4 (am) -- optional default value for this log record field.
- errorCategory (e) -- optional default value for this log record field.
- errorCode (e) -- optional default value for this log record field.
- errorDescription (e) -- optional default value for this log record field.
-
- Note: the pipe '|' character is not allowed in any log record field.
- """
-
- for key in ['style', 'requestID', 'serviceInstanceID', 'threadID', 'serverName', 'serviceName', 'instanceUUID', \
- 'severity', 'serverIPAddress', 'server', 'IPAddress', 'className', 'timer', \
- 'partnerName', 'targetEntity', 'targetServiceName', 'statusCode', 'responseCode', \
- 'responseDescription', 'processKey', 'targetVirtualEntity', 'customField1', \
- 'customField2', 'customField3', 'customField4', 'errorCategory', 'errorCode', \
- 'errorDescription' ]:
- if key in kwargs:
- if kwargs[key] != None:
- self._fields[key] = kwargs[key]
- elif key in self._fields:
- del self._fields[key]
-
- self._defaultServerInfo()
-
-
- def debug(self, message, **kwargs):
- """Write a DEBUG level message to the log file.
-
- Arguments:
- message -- value for the last log record field.
-
- Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
- style -- the log file format (style) to use when writing log messages
- requestID (dame) -- optional default value for this log record field.
- serviceInstanceID (am) -- optional default value for this log record field.
- threadID (am) -- optional default value for this log record field.
- serverName (am) -- optional default value for this log record field.
- serviceName (am) -- optional default value for this log record field.
- instanceUUID (am) -- optional default value for this log record field.
- severity (am) -- optional default value for this log record field.
- serverIPAddress (am) -- optional default value for this log record field.
- server (am) -- optional default value for this log record field.
- IPAddress (am) -- optional default value for this log record field.
- className (am) -- optional default value for this log record field.
- timer (am) -- (ElapsedTime) optional default value for this log record field.
- partnerName (ame) -- optional default value for this log record field.
- targetEntity (me) -- optional default value for this log record field.
- targetServiceName (me) -- optional default value for this log record field.
- statusCode (am) -- optional default value for this log record field.
- responseCode (am) -- optional default value for this log record field.
- responseDescription (am) -- optional default value for this log record field.
- processKey (am) -- optional default value for this log record field.
- targetVirtualEntity (m) -- optional default value for this log record field.
- customField1 (am) -- optional default value for this log record field.
- customField2 (am) -- optional default value for this log record field.
- customField3 (am) -- optional default value for this log record field.
- customField4 (am) -- optional default value for this log record field.
- errorCategory (e) -- optional default value for this log record field.
- errorCode (e) -- optional default value for this log record field.
- errorDescription (e) -- optional default value for this log record field.
-
- Note: the pipe '|' character is not allowed in any log record field.
- """
-
- self._log('DEBUG', message, errorCategory = 'DEBUG', **kwargs)
-
- def info(self, message, **kwargs):
- """Write an INFO level message to the log file.
-
- Arguments:
- message -- value for the last log record field.
-
- Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
- style -- the log file format (style) to use when writing log messages
- requestID (dame) -- optional default value for this log record field.
- serviceInstanceID (am) -- optional default value for this log record field.
- threadID (am) -- optional default value for this log record field.
- serverName (am) -- optional default value for this log record field.
- serviceName (am) -- optional default value for this log record field.
- instanceUUID (am) -- optional default value for this log record field.
- severity (am) -- optional default value for this log record field.
- serverIPAddress (am) -- optional default value for this log record field.
- server (am) -- optional default value for this log record field.
- IPAddress (am) -- optional default value for this log record field.
- className (am) -- optional default value for this log record field.
- timer (am) -- (ElapsedTime) optional default value for this log record field.
- partnerName (ame) -- optional default value for this log record field.
- targetEntity (me) -- optional default value for this log record field.
- targetServiceName (me) -- optional default value for this log record field.
- statusCode (am) -- optional default value for this log record field.
- responseCode (am) -- optional default value for this log record field.
- responseDescription (am) -- optional default value for this log record field.
- processKey (am) -- optional default value for this log record field.
- targetVirtualEntity (m) -- optional default value for this log record field.
- customField1 (am) -- optional default value for this log record field.
- customField2 (am) -- optional default value for this log record field.
- customField3 (am) -- optional default value for this log record field.
- customField4 (am) -- optional default value for this log record field.
- errorCategory (e) -- optional default value for this log record field.
- errorCode (e) -- optional default value for this log record field.
- errorDescription (e) -- optional default value for this log record field.
-
- Note: the pipe '|' character is not allowed in any log record field.
- """
-
- self._log('INFO', message, errorCategory = 'INFO', **kwargs)
-
- def warn(self, message, **kwargs):
- """Write a WARN level message to the log file.
-
- Arguments:
- message -- value for the last log record field.
-
- Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
- style -- the log file format (style) to use when writing log messages
- requestID (dame) -- optional default value for this log record field.
- serviceInstanceID (am) -- optional default value for this log record field.
- threadID (am) -- optional default value for this log record field.
- serverName (am) -- optional default value for this log record field.
- serviceName (am) -- optional default value for this log record field.
- instanceUUID (am) -- optional default value for this log record field.
- severity (am) -- optional default value for this log record field.
- serverIPAddress (am) -- optional default value for this log record field.
- server (am) -- optional default value for this log record field.
- IPAddress (am) -- optional default value for this log record field.
- className (am) -- optional default value for this log record field.
- timer (am) -- (ElapsedTime) optional default value for this log record field.
- partnerName (ame) -- optional default value for this log record field.
- targetEntity (me) -- optional default value for this log record field.
- targetServiceName (me) -- optional default value for this log record field.
- statusCode (am) -- optional default value for this log record field.
- responseCode (am) -- optional default value for this log record field.
- responseDescription (am) -- optional default value for this log record field.
- processKey (am) -- optional default value for this log record field.
- targetVirtualEntity (m) -- optional default value for this log record field.
- customField1 (am) -- optional default value for this log record field.
- customField2 (am) -- optional default value for this log record field.
- customField3 (am) -- optional default value for this log record field.
- customField4 (am) -- optional default value for this log record field.
- errorCategory (e) -- optional default value for this log record field.
- errorCode (e) -- optional default value for this log record field.
- errorDescription (e) -- optional default value for this log record field.
-
- Note: the pipe '|' character is not allowed in any log record field.
- """
-
- self._log('WARN', message, errorCategory = 'WARN', **kwargs)
-
- def error(self, message, **kwargs):
- """Write an ERROR level message to the log file.
-
- Arguments:
- message -- value for the last log record field.
-
- Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
- style -- the log file format (style) to use when writing log messages
- requestID (dame) -- optional default value for this log record field.
- serviceInstanceID (am) -- optional default value for this log record field.
- threadID (am) -- optional default value for this log record field.
- serverName (am) -- optional default value for this log record field.
- serviceName (am) -- optional default value for this log record field.
- instanceUUID (am) -- optional default value for this log record field.
- severity (am) -- optional default value for this log record field.
- serverIPAddress (am) -- optional default value for this log record field.
- server (am) -- optional default value for this log record field.
- IPAddress (am) -- optional default value for this log record field.
- className (am) -- optional default value for this log record field.
- timer (am) -- (ElapsedTime) optional default value for this log record field.
- partnerName (ame) -- optional default value for this log record field.
- targetEntity (me) -- optional default value for this log record field.
- targetServiceName (me) -- optional default value for this log record field.
- statusCode (am) -- optional default value for this log record field.
- responseCode (am) -- optional default value for this log record field.
- responseDescription (am) -- optional default value for this log record field.
- processKey (am) -- optional default value for this log record field.
- targetVirtualEntity (m) -- optional default value for this log record field.
- customField1 (am) -- optional default value for this log record field.
- customField2 (am) -- optional default value for this log record field.
- customField3 (am) -- optional default value for this log record field.
- customField4 (am) -- optional default value for this log record field.
- errorCategory (e) -- optional default value for this log record field.
- errorCode (e) -- optional default value for this log record field.
- errorDescription (e) -- optional default value for this log record field.
-
- Note: the pipe '|' character is not allowed in any log record field.
- """
-
- self._log('ERROR', message, errorCategory = 'ERROR', **kwargs)
-
- def fatal(self, message, **kwargs):
- """Write a FATAL level message to the log file.
-
- Arguments:
- message -- value for the last log record field.
-
- Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
- style -- the log file format (style) to use when writing log messages
- requestID (dame) -- optional default value for this log record field.
- serviceInstanceID (am) -- optional default value for this log record field.
- threadID (am) -- optional default value for this log record field.
- serverName (am) -- optional default value for this log record field.
- serviceName (am) -- optional default value for this log record field.
- instanceUUID (am) -- optional default value for this log record field.
- severity (am) -- optional default value for this log record field.
- serverIPAddress (am) -- optional default value for this log record field.
- server (am) -- optional default value for this log record field.
- IPAddress (am) -- optional default value for this log record field.
- className (am) -- optional default value for this log record field.
- timer (am) -- (ElapsedTime) optional default value for this log record field.
- partnerName (ame) -- optional default value for this log record field.
- targetEntity (me) -- optional default value for this log record field.
- targetServiceName (me) -- optional default value for this log record field.
- statusCode (am) -- optional default value for this log record field.
- responseCode (am) -- optional default value for this log record field.
- responseDescription (am) -- optional default value for this log record field.
- processKey (am) -- optional default value for this log record field.
- targetVirtualEntity (m) -- optional default value for this log record field.
- customField1 (am) -- optional default value for this log record field.
- customField2 (am) -- optional default value for this log record field.
- customField3 (am) -- optional default value for this log record field.
- customField4 (am) -- optional default value for this log record field.
- errorCategory (e) -- optional default value for this log record field.
- errorCode (e) -- optional default value for this log record field.
- errorDescription (e) -- optional default value for this log record field.
-
- Note: the pipe '|' character is not allowed in any log record field.
- """
-
- self._log('FATAL', message, errorCategory = 'FATAL', **kwargs)
-
- def _log(self, logLevel, message, **kwargs):
- """Write a message to the log file.
-
- Arguments:
- logLevel -- value ('DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL', ...) for the log record.
- message -- value for the last log record field.
-
- Keyword arguments: Annotations are d:debug, a=audit, m=metrics, e=error
- style -- the log file format (style) to use when writing log messages
- requestID (dame) -- optional default value for this log record field.
- serviceInstanceID (am) -- optional default value for this log record field.
- threadID (am) -- optional default value for this log record field.
- serverName (am) -- optional default value for this log record field.
- serviceName (am) -- optional default value for this log record field.
- instanceUUID (am) -- optional default value for this log record field.
- severity (am) -- optional default value for this log record field.
- serverIPAddress (am) -- optional default value for this log record field.
- server (am) -- optional default value for this log record field.
- IPAddress (am) -- optional default value for this log record field.
- className (am) -- optional default value for this log record field.
- timer (am) -- (ElapsedTime) optional default value for this log record field.
- partnerName (ame) -- optional default value for this log record field.
- targetEntity (me) -- optional default value for this log record field.
- targetServiceName (me) -- optional default value for this log record field.
- statusCode (am) -- optional default value for this log record field.
- responseCode (am) -- optional default value for this log record field.
- responseDescription (am) -- optional default value for this log record field.
- processKey (am) -- optional default value for this log record field.
- targetVirtualEntity (m) -- optional default value for this log record field.
- customField1 (am) -- optional default value for this log record field.
- customField2 (am) -- optional default value for this log record field.
- customField3 (am) -- optional default value for this log record field.
- customField4 (am) -- optional default value for this log record field.
- errorCategory (e) -- optional default value for this log record field.
- errorCode (e) -- optional default value for this log record field.
- errorDescription (e) -- optional default value for this log record field.
-
- Note: the pipe '|' character is not allowed in any log record field.
- """
-
- # timestamp will automatically be inserted
- style = int(self._getVal('style', '', **kwargs))
- requestID = self._getVal('requestID', '', **kwargs)
- serviceInstanceID = self._getVal('serviceInstanceID', '', **kwargs)
- threadID = self._getVal('threadID', threading.currentThread().getName(), **kwargs)
- serverName = self._getVal('serverName', '', **kwargs)
- serviceName = self._getVal('serviceName', '', **kwargs)
- instanceUUID = self._getVal('instanceUUID', '', **kwargs)
- upperLogLevel = self._noSep(logLevel.upper())
- severity = self._getVal('severity', '', **kwargs)
- serverIPAddress = self._getVal('serverIPAddress', '', **kwargs)
- server = self._getVal('server', '', **kwargs)
- IPAddress = self._getVal('IPAddress', '', **kwargs)
- className = self._getVal('className', '', **kwargs)
- timer = self._getVal('timer', '', **kwargs)
- partnerName = self._getVal('partnerName', '', **kwargs)
- targetEntity = self._getVal('targetEntity', '', **kwargs)
- targetServiceName = self._getVal('targetServiceName', '', **kwargs)
- statusCode = self._getVal('statusCode', '', **kwargs)
- responseCode = self._getVal('responseCode', '', **kwargs)
- responseDescription = self._noSep(self._getVal('responseDescription', '', **kwargs))
- processKey = self._getVal('processKey', '', **kwargs)
- targetVirtualEntity = self._getVal('targetVirtualEntity', '', **kwargs)
- customField1 = self._getVal('customField1', '', **kwargs)
- customField2 = self._getVal('customField2', '', **kwargs)
- customField3 = self._getVal('customField3', '', **kwargs)
- customField4 = self._getVal('customField4', '', **kwargs)
- errorCategory = self._getVal('errorCategory', '', **kwargs)
- errorCode = self._getVal('errorCode', '', **kwargs)
- errorDescription = self._noSep(self._getVal('errorDescription', '', **kwargs))
-
- detailMessage = self._noSep(message)
- if bool(re.match(r" *$", detailMessage)):
- return # don't log empty messages
-
- useLevel = self._intLogLevel(upperLogLevel)
- if CommonLogger.verbose: print("logger STYLE=%s" % style)
- if useLevel < self._logLevelThreshold:
- if CommonLogger.verbose: print("skipping because of level")
- pass
- else:
- with self._logLock:
- if style == CommonLogger.ErrorFile:
- if CommonLogger.verbose: print("using CommonLogger.ErrorFile")
- self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
- %(requestID, threadID, serviceName, partnerName, targetEntity, targetServiceName,
- errorCategory, errorCode, errorDescription, detailMessage))
- elif style == CommonLogger.DebugFile:
- if CommonLogger.verbose: print("using CommonLogger.DebugFile")
- self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
- %(requestID, threadID, serverName, serviceName, instanceUUID, upperLogLevel,
- severity, serverIPAddress, server, IPAddress, className, timer, detailMessage))
- elif style == CommonLogger.AuditFile:
- if CommonLogger.verbose: print("using CommonLogger.AuditFile")
- endAuditTime, endAuditMsec = self._getTime()
- if self._begTime is not None:
- d = { 'begtime': self._begTime, 'begmsecs': self._begMsec, 'endtime': endAuditTime, 'endmsecs': endAuditMsec }
- else:
- d = { 'begtime': endAuditTime, 'begmsecs': endAuditMsec, 'endtime': endAuditTime, 'endmsecs': endAuditMsec }
- self._begTime = None
- unused = ""
- self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
- %(requestID, serviceInstanceID, threadID, serverName, serviceName, partnerName,
- statusCode, responseCode, responseDescription, instanceUUID, upperLogLevel,
- severity, serverIPAddress, timer, server, IPAddress, className, unused,
- processKey, customField1, customField2, customField3, customField4, detailMessage), extra=d)
- elif style == CommonLogger.MetricsFile:
- if CommonLogger.verbose: print("using CommonLogger.MetricsFile")
- endMetricsTime, endMetricsMsec = self._getTime()
- if self._begTime is not None:
- d = { 'begtime': self._begTime, 'begmsecs': self._begMsec, 'endtime': endMetricsTime, 'endmsecs': endMetricsMsec }
- else:
- d = { 'begtime': endMetricsTime, 'begmsecs': endMetricsMsec, 'endtime': endMetricsTime, 'endmsecs': endMetricsMsec }
- self._begTime = None
- unused = ""
- self._logger.log(50, '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' \
- %(requestID, serviceInstanceID, threadID, serverName, serviceName, partnerName,
- targetEntity, targetServiceName, statusCode, responseCode, responseDescription,
- instanceUUID, upperLogLevel, severity, serverIPAddress, timer, server, IPAddress,
- className, unused, processKey, targetVirtualEntity, customField1, customField2,
- customField3, customField4, detailMessage), extra=d)
- else:
- print("!!!!!!!!!!!!!!!! style not set: %s" % self._fields["style"])
-
- def _getTime(self):
- ct = time.time()
- lt = time.localtime(ct)
- return (time.strftime(CommonLogger.DateFmt, lt), (ct - int(ct)) * 1000)
-
- def setStartRecordEvent(self):
- """
- Set the start time to be saved for both audit and metrics records
- """
- self._begTime, self._begMsec = self._getTime()
-
- def _getVal(self, key, default, **kwargs):
- val = self._fields.get(key)
- if key in kwargs: val = kwargs[key]
- if val is None: val = default
- return self._noSep(val)
-
- def _noSep(self, message):
- if message is None: return ''
- return re.sub(r'[\|\n]', ' ', str(message))
-
- def _intLogLevel(self, logLevel):
- if logLevel == 'FATAL': useLevel = 50
- elif logLevel == 'ERROR': useLevel = 40
- elif logLevel == 'WARN': useLevel = 30
- elif logLevel == 'INFO': useLevel = 20
- elif logLevel == 'DEBUG': useLevel = 10
- else: useLevel = 0
- return useLevel
-
- def _mkdir_p(self, filename):
- """Create missing directories from a full filename path like mkdir -p"""
-
- if filename is None:
- return
-
- folder=os.path.dirname(filename)
-
- if folder == "":
- return
-
- if not os.path.exists(folder):
- try:
- os.makedirs(folder)
- except OSError as err:
- print("error number %d creating %s directory to hold %s logfile: %s" %(err.errno, err.filename, filename, err.strerror), file=sys.stderr)
- sys.exit(2)
- except Exception as err:
- print("error creating %s directory to hold %s logfile: %s" %(folder, filename, str(err)), file=sys.stderr)
- sys.exit(2)
-
-def __checkTime1(line):
- format = r'[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T[0-9][0-9]:[0-9][0-9]:[0-9][0-9],[0-9][0-9][0-9][+]00:00[|]'
- format = r'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2},[0-9]{3}[+]00:00[|]'
- m = re.match(format, line)
- if not m:
- print("ERROR: time string did not match proper time format, %s" %line)
- print("\t: format=%s" % format)
- return 1
- return 0
-
-def __checkTime2(line, different):
- format = '[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T[0-9][0-9]:[0-9][0-9]:([0-9][0-9]),([0-9][0-9][0-9])[+]00:00[|][0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]T[0-9][0-9]:[0-9][0-9]:([0-9][0-9]),([0-9][0-9][0-9])[+]00:00[|]'
- format = r'[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:([0-9]{2}),([0-9]{3})[+]00:00[|][0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:([0-9]{2}),([0-9]{3})[+]00:00[|]'
- m = re.match(format, line)
- if not m:
- print("ERROR: time strings did not match proper time format, %s" %line)
- print("\t: format=%s" % format)
- return 1
- second1 = int(m.group(1))
- msec1 = int(m.group(2))
- second2 = int(m.group(3))
- msec2 = int(m.group(4))
- if second1 > second2: second2 += 60
- t1 = second1 * 1000 + msec1
- t2 = second2 * 1000 + msec2
- diff = t2 - t1
- # print("t1=%d (%d,%d) t2=%d (%d,%d), diff = %d" % (t1, second1, msec1, t2, second2, msec2, diff))
- if different:
- if diff < 500:
- print("ERROR: times did not differ enough: %s" % line)
- return 1
- else:
- if diff > 10:
- print("ERROR: times were too far apart: %s" % line)
- return 1
- return 0
-
-def __checkLog(logfile, numLines, numFields):
- lineCount = 0
- errorCount = 0
- with open(logfile, "r") as fp:
- for line in fp:
- # print("saw line %s" % line)
- lineCount += 1
- c = line.count('|')
- if c != numFields:
- print("ERROR: wrong number of fields. Expected %d, got %d: %s" % (numFields, c, line))
- errorCount += 1
- if re.search("should not appear", line):
- print("ERROR: a line appeared that should not have appeared, %s" % line)
- errorCount += 1
- elif re.search("single time", line):
- errorCount += __checkTime1(line)
- elif re.search("time should be the same", line):
- errorCount += __checkTime2(line, different=False)
- elif re.search("time should be different", line):
- errorCount += __checkTime2(line, different=True)
- else:
- print("ERROR: an unknown message appeared, %s" % line)
- errorCount += 1
-
- if lineCount != numLines:
- print("ERROR: expected %d lines, but got %d lines" % (numLines, lineCount))
- errorCount += 1
- return errorCount
-
-if __name__ == "__main__":
- import os, argparse
- parser = argparse.ArgumentParser(description="test the CommonLogger functions")
- parser.add_argument("-k", "--keeplogs", help="Keep the log files after finishing the tests", action="store_true")
- parser.add_argument("-v", "--verbose", help="Print debugging messages", action="store_true")
- args = parser.parse_args()
-
- spid = str(os.getpid())
- if args.keeplogs:
- spid = ""
- logcfg = "/tmp/cl.log" + spid + ".cfg"
- errorLog = "/tmp/cl.error" + spid + ".log"
- metricsLog = "/tmp/cl.metrics" + spid + ".log"
- auditLog = "/tmp/cl.audit" + spid + ".log"
- debugLog = "/tmp/cl.debug" + spid + ".log"
- if args.verbose: CommonLogger.verbose = True
-
- import atexit
- def cleanupTmps():
- for f in [ logcfg, errorLog, metricsLog, auditLog, debugLog ]:
- try:
- os.remove(f)
- except:
- pass
- if not args.keeplogs:
- atexit.register(cleanupTmps)
-
- with open(logcfg, "w") as o:
- o.write("error = " + errorLog + "\n" +
- "errorLogLevel = WARN\n" +
- "metrics = " + metricsLog + "\n" +
- "metricsLogLevel = INFO\n" +
- "audit = " + auditLog + "\n" +
- "auditLogLevel = INFO\n" +
- "debug = " + debugLog + "\n" +
- "debugLogLevel = DEBUG\n")
-
- import uuid
- instanceUUID = uuid.uuid1()
- serviceName = "testharness"
- errorLogger = CommonLogger(logcfg, "error", style=CommonLogger.ErrorFile, instanceUUID=instanceUUID, serviceName=serviceName)
- debugLogger = CommonLogger(logcfg, "debug", style=CommonLogger.DebugFile, instanceUUID=instanceUUID, serviceName=serviceName)
- auditLogger = CommonLogger(logcfg, "audit", style=CommonLogger.AuditFile, instanceUUID=instanceUUID, serviceName=serviceName)
- metricsLogger = CommonLogger(logcfg, "metrics", style=CommonLogger.MetricsFile, instanceUUID=instanceUUID, serviceName=serviceName)
-
- testsRun = 0
- errorCount = 0
- errorLogger.debug("error calling debug (should not appear)")
- errorLogger.info("error calling info (should not appear)")
- errorLogger.warn("error calling warn (single time)")
- errorLogger.error("error calling error (single time)")
- errorLogger.setStartRecordEvent()
- time.sleep(1)
- errorLogger.fatal("error calling fatal, after setStartRecordEvent and sleep (start should be ignored, single time)")
- testsRun += 6
- errorCount += __checkLog(errorLog, 3, 10)
-
- auditLogger.debug("audit calling debug (should not appear)")
- auditLogger.info("audit calling info (time should be the same)")
- auditLogger.warn("audit calling warn (time should be the same)")
- auditLogger.error("audit calling error (time should be the same)")
- auditLogger.setStartRecordEvent()
- time.sleep(1)
- auditLogger.fatal("audit calling fatal, after setStartRecordEvent and sleep, time should be different)")
- testsRun += 6
- errorCount += __checkLog(auditLog, 4, 25)
-
- debugLogger.debug("debug calling debug (single time)")
- debugLogger.info("debug calling info (single time)")
- debugLogger.warn("debug calling warn (single time)")
- debugLogger.setStartRecordEvent()
- time.sleep(1)
- debugLogger.error("debug calling error, after SetStartRecordEvent and sleep (start should be ignored, single time)")
- debugLogger.fatal("debug calling fatal (single time)")
- errorCount += __checkLog(debugLog, 5, 13)
- testsRun += 6
-
- metricsLogger.debug("metrics calling debug (should not appear)")
- metricsLogger.info("metrics calling info (time should be the same)")
- metricsLogger.warn("metrics calling warn (time should be the same)")
- metricsLogger.setStartRecordEvent()
- time.sleep(1)
- metricsLogger.error("metrics calling error, after SetStartRecordEvent and sleep, time should be different")
- metricsLogger.fatal("metrics calling fatal (time should be the same)")
- testsRun += 6
- errorCount += __checkLog(metricsLog, 4, 28)
-
- print("%d tests run, %d errors found" % (testsRun, errorCount))
diff --git a/osdf/logging/onap_common_v1/CommonLogger_test.config b/osdf/logging/onap_common_v1/CommonLogger_test.config
deleted file mode 100755
index 584fb5e..0000000
--- a/osdf/logging/onap_common_v1/CommonLogger_test.config
+++ /dev/null
@@ -1,58 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-# You may change this file while your program is running and CommonLogger will automatically reconfigure accordingly.
-# Changing these parameters may leave old log files lying around.
-
-
-#--- Parameters that apply to all logs
-#
-# rotateMethod: time, size, stdout, stderr, none
-#... Note: the following two parameters apply only when rotateMethod=time
-# timeRotateIntervalType: S, M, H, D, W0 - W6, or midnight (seconds, minutes, hours, days, weekday (0=Monday), or midnight UTC)
-# timeRotateInterval: >= 1 (1 means every timeRotateIntervalType, 2 every other, 3 every third, etc.)
-#... Note: the following parameter applies only when rotateMethod=size
-# sizeMaxBytes: >= 0 (0 means no limit, else maximum filesize in Bytes)
-# backupCount: >= 0 (Number of rotated backup files to retain. If rotateMethod=time, 0 retains *all* backups. If rotateMethod=size, 0 retains *no* backups.)
-#
-rotateMethod = time
-timeRotateIntervalType = midnight
-timeRotateInterval = 1
-sizeMaxBytes = 0
-backupCount = 6
-
-
-#--- Parameters that define log filenames and their initial LogLevel threshold
-#... Note: CommonLogger will exit if your process does not have permission to write to the file.
-#
-
-error = /opt/logs/oof/error.log
-errorLogLevel = WARN
-errorStyle = error
-
-metrics = /opt/logs/oof/metrics.log
-metricsLogLevel = INFO
-metricsStyle = metrics
-
-audit = /opt/logs/oof/audit.log
-auditLogLevel = INFO
-auditStyle = audit
-
-debug = /opt/logs/oof/debug.log
-debugLogLevel = DEBUG
-debugStyle = debug
diff --git a/osdf/logging/onap_common_v1/CommonLogger_testing.py b/osdf/logging/onap_common_v1/CommonLogger_testing.py
deleted file mode 100755
index 43e0ec3..0000000
--- a/osdf/logging/onap_common_v1/CommonLogger_testing.py
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/python
-
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-"""
-Test the ONAP Common Logging library in Python.
-CommonLogger_test.py
-"""
-
-
-from __future__ import print_function # for the example code below parsing command line options
-import os, sys, getopt # for the example code below parsing command line options
-
-from osdf.logging.onap_common_v1.CommonLogger import CommonLogger # all that is needed to import the CommonLogger library
-
-import uuid # to create UUIDs for our log records
-import time # to create elapsed time for our log records
-
-
-#----- A client might want to allow specifying the configFile as a command line option
-usage="usage: %s [ -c <configFile> ]" % ( os.path.basename(__file__) )
-try:
- opts, args = getopt.getopt(sys.argv[1:], "c:")
-except getopt.GetoptError:
- print(usage, file=sys.stderr)
- sys.exit(2)
-
-configFile = "CommonLogger_test.config"
-for opt, arg in opts:
- if opt == "-c":
- configFile = arg
- else:
- print(usage, file=sys.stderr)
- sys.exit(2)
-
-
-#----- Instantiate the loggers
-
-# The client's top-level program (e.g., vPRO.py) can create a unique identifier UUID to differentiate between multiple instances of itself.
-instanceUUID = uuid.uuid1()
-
-# The client should identify its ONAP component -- and if applicable -- its ONAP sub-component
-serviceName = "DCAE/vPRO"
-
-# Instantiate using a configuration file with a key specifying the log file name and set fields' default values
-errorLog = CommonLogger.CommonLogger(configFile, "error", instanceUUID=instanceUUID, serviceName=serviceName)
-metricsLog = CommonLogger.CommonLogger(configFile, "metrics", instanceUUID=instanceUUID, serviceName=serviceName)
-auditLog = CommonLogger.CommonLogger(configFile, "audit", instanceUUID=instanceUUID, serviceName=serviceName)
-debugLog = CommonLogger.CommonLogger(configFile, "debug", instanceUUID=instanceUUID, serviceName=serviceName)
-
-
-#----- use the loggers
-
-# both metrics and audit logs can have an event starting time. This only affects the next log message.
-metricsLog.setStartRecordEvent()
-auditLog.setStartRecordEvent()
-
-# Simple log messages
-debugLog.debug("a DEBUG message for the debug log")
-metricsLog.info("an INFO message for the metrics log")
-auditLog.info("an INFO message for the audit log")
-errorLog.warn("a WARN message for the error log")
-errorLog.error("an ERROR message for the error log")
-errorLog.fatal("a FATAL message for the error log")
-
-
-# Can override any of the other fields when writing each log record
-debugLog.debug("demonstrating overriding all fields with atypical values", requestID="2", serviceInstanceID="3", threadID="4", serverName="5", serviceName="6", instanceUUID="7", severity="9", serverIPAddress="10", server="11", IPAddress="12", className="13", timer="14")
-
-
-# The is an example of an interaction between two ONAP components:
-
-# vPRO generates Closed Loop RESTful API requests to App-C, knowing this information:
-requestClient = "netman@localdcae.att.com:~/vPRO_trinity/vPRO.py:905" # uniquely identifies the requester
-requestTime = "2015-08-20 20:57:14.463426" # unique ID of the request within the requester's scope
-request = "Restart"
-
-# Form the value for Common Logging's requestID field:
-requestID = requestClient + "+" + requestTime # vPRO will use this as the unique requestID
-# requestID = uuid.uuid1() # other services might generate a UUID as their requestID
-
-# Form the value for Common Logging's serviceName field when an interaction between two ONAP components:
-ourONAP = serviceName
-peerONAP = "App-C"
-operation = request
-interaction = ourONAP + ":" + peerONAP + "." + operation
-
-# Let's calculate and report elapsed times
-start = time.time()
-
-# Log the request
-auditLog.info("Requesting %s to %s" %(peerONAP, operation), requestID=requestID, serviceName=interaction)
-
-# Wait for first response
-time.sleep(1) # simulate processing the action, e.g., waiting for response from App-C
-
-# Form the value for Common Logging's serviceName field when an interaction between two ONAP components:
-operation = 'PENDING'
-interaction = peerONAP + ":" + ourONAP + "." + operation
-
-# Log the response with elapsed time
-ms = int(round(1000 * (time.time() - start))) # Calculate elapsed time in ms
-auditLog.info("%s acknowledged receiving request for %s" %(peerONAP, operation), requestID=requestID, serviceName=interaction, timer=ms)
-
-# Wait for next response
-time.sleep(1) # simulate processing the action, e.g., waiting for response from App-C
-
-# Form the value for Common Logging's serviceName field when an interaction between two ONAP components:
-operation = 'SUCCESS'
-interaction = peerONAP + ":" + ourONAP + "." + operation
-
-# Log the response with elapsed time
-ms = int(round(1000 * (time.time() - start))) # Calculate elapsed time in ms
-auditLog.info("%s finished %s" %(peerONAP, operation), requestID=requestID, serviceName=interaction, timer=ms)
-
-
-# Can change the fields' default values for a logger after instantiation if desired
-debugLog.setFields(serviceName="DCAE", threadID='thread-2')
-
-# Then subsequent logging will have the new default field values
-debugLog.info("Something happened")
-debugLog.warn("Something happened again")
-
-
-# Unset (set=None) a field so the Common Logger will use the default value
-debugLog.info("threadID should be default", threadID=None)
-debugLog.setFields(threadID=None)
-debugLog.info("threadID should be default")
diff --git a/osdf/logging/onap_common_v1/README.md b/osdf/logging/onap_common_v1/README.md
deleted file mode 100755
index 596cd7f..0000000
--- a/osdf/logging/onap_common_v1/README.md
+++ /dev/null
@@ -1,214 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-# Common Logging Wrapper for Python
-
-* CommonLogger.py is the module (library) to import
-* CommonLogger_test.config is an example configuration file used by CommonLogger_test.py
-* CommonLogger_test.py is an example of how to import and use the CommonLogger module
-
-## Configuration File
-
-Configure common logging for a python application in a configuration file.
-In the file, put key = value assignments
-
-* defining the filename for each log file you will create, such as
-'error=/path/error.log', 'metrics=/path/metrics.log', 'audit=/path/audit.log',
-and 'debug=/path/debug.log'.
-The name used (shown here as 'error', 'metrics', etc.) is chosen in the program, allowing a single configuration file to be
-used by numerous different programs.
-(It will be referred to below as &lt;logKey&gt;.)
-* defining the style of the log messages to be produced,
-using &lt;logKey&gt; suffixed with 'Style', as in 'errorStyle=', and one of the
-words 'error', 'metrics', 'audit' and 'debug'.
-* defining the minimum level of log messages to be retained in a log file,
-using &lt;logKey&gt; suffixed with 'LogLevel', as in 'errorLogLevel=WARN'.
-The levels are DEBUG, INFO, WARN, ERROR, and FATAL.
-So specifying WARN will retain only WARN, ERROR, and FATAL level
-log messages, while specifying DEBUG will retain all levels of log messages:
-DEBUG, INFO, WARN, ERROR, and FATAL.
-
-Comments may be included on any line following a '#' character.
-
-Common logging monitors the configuration file so if the file is edited
-and any its values change, then common logging will implement the changes
-in the running application.
-This enables operations to change log levels or even log filenames without
-interrupting the running application.
-
-By default, log files are rotated daily at midnight UTC, retaining 6 backup versions by default.
-
-Other strategies can be specified within the configuration file using the keywords:
-
-* rotateMethod = one of 'time', 'size', and 'none' (case insensitive)
-
-If rotateMethod is 'time', the following keywords apply:
-* backupCount = Number of rotated backup files to retain, >= 0. 0 retains *all* backups.
-* timeRotateIntervalType = one of 'S', 'M', 'H', 'D', 'W0', 'W1', 'W2', 'W3', 'W4', 'W5', 'W6', 'midnight'
-(seconds, minutes, hours, days, weekday (0=Monday), or midnight UTC)
-* timeRotateInterval = number of seconds/minutes/hours/days between rotations. (Ignored for W#.)
-
-If rotateMethod is 'size', the following keywords apply:
-* backupCount = Number of rotated backup files to retain, >= 0. 0 retains *no* backups.
-* sizeMaxBytes = maximum number of bytes allowed in the file before rotation
-* sizeRotateMode = for now, this defaults to 'a' and may only be specified as 'a'.
-It is passed to the underlying Python Logging methods.
-
-
-Besides logging to a file, it is also possible to send log messages elsewhere,
-using &lt;logKey&gt; suffixed with 'LogType'.
-You can set &lt;logKey&gt;LogType to any of 'filelogger', 'stdoutlogger', 'stderrlogger', 'socketlogger' orlogger 'null' (case insensitive).
-
-* 'filelogger' is the default specifying logging to a file.
-* 'stdoutlogger' and 'stderrlogger' send the output to the corresponding output streams.
-* 'socketlogger' will send the output to the corresponding socket host.
-* 'nulllogger' turns off logging.
-
-If &lt;logKey&gt;LogType is 'socket', the following keywords apply:
-* &lt;logKey&gt;SocketHost = FQDN or IP address for a host to sent the logs to
-* &lt;logKey&gt;SocketPort = the port (> 0) to open on that host
-
-This is an example configuration file:
-
- error = /var/log/DCAE/vPRO/error.log
- errorLogLevel = WARN
- errorStyle = error
-
- metrics = /var/log/DCAE/vPRO/metrics.log
- metricsLogLevel = INFO
- metricsStyle = metrics
-
- audit = /var/log/DCAE/vPRO/audit.log
- auditLogLevel = INFO
- auditStyle = audit
-
- debug = /var/log/DCAE/vPRO/debug.log
- debugLogLevel = DEBUG
- debugStyle = debug
-
-## Coding Python Applications to Produce ONAP Common Logging
-
-A python application uses common logging by importing the CommonLogger
-module, instantiating a CommonLogger object for each log file, and then
-invoking each object's debug, info, warn, error, or fatal methods to log
-messages to the file. There are four styles of logging:
-error/info logs, debug logs, audit logs, and metrics logs.
-The difference between the types of logs is in the list of fields that
-are printed out.
-
-### Importing the CommonLogger Module
-
-Importing the CommonLogger module is typical:
-
- sys.path.append("/opt/app/dcae-commonlogging/python")
- import CommonLogger
-
-### Creating a CommonLogger object:
-
-When creating a CommonLogger object, three arguments are required:
-
-1. The configuration filename.
-2. The keyword name in the configuration file that
-defines the log filename and parameters controlling rotation of the logfiles.
-(This is the &lt;logKey&gt; referred to above.)
-3. The keyword arguments for style and to set default values for the log record fields.
-
-The style of the log (one of CommonLoger.DebugFile, CommonLogger.AuditFile,
-CommonLogger.MetricsFile and CommonLogger.ErrorFile), must be specified either
-in the configuration file (e.g., errorStyle=error or metricsStyle=metrics) or
-using a style= keyword and one of the values: CommonLoger.DebugFile,
-CommonLogger.AuditFile, CommonLogger.MetricsFile and CommonLogger.ErrorFile.
-
-Keyword arguments for log record fields are as follows.
-The annotation indicates whether the field is included in
-(d) debug logs, (a) audit logs, (m) metrics logs, and (e) error logs.
-
-* requestID (dame)
-* serviceInstanceID (am)
-* threadID (am)
-* serverName (am)
-* serviceName (am)
-* instanceUUID (am)
-* severity (am)
-* serverIPAddress (am)
-* server (am)
-* IPAddress (am)
-* className (am)
-* timer (am)
-* partnerName (ame)
-* targetEntity (me)
-* targetServiceName (me)
-* statusCode (am)
-* responseCode (am)
-* responseDescription (am)
-* processKey (am)
-* targetVirtualEntity (m)
-* customField1 (am)
-* customField2 (am)
-* customField3 (am)
-* customField4 (am)
-* errorCategory (e)
-* errorCode (e)
-* errorDescription (e)
-
-Sample code:
-
- """ The style can be specified here or in the config file using errorStyle. """
- errorLog = CommonLogger.CommonLogger("my.config", "error", style=CommonLogger.ErrorFile, serviceName="DCAE/vPRO")
- infoLog = CommonLogger.CommonLogger("my.config", "info", serviceName="DCAE/vPRO")
-
-### Setting default values for fields:
-
-The object's setFields method allows keyword arguments changing default values for the log record fields.
-
- errorLog.setFields(serviceName="DCAE/vPRO", threadID="thread-2")
-
-### Calling Methods
-
-The object's debug(), info(), warn(), error(), and fatal() methods require a detailMessage argument
-(which can be a zero-length string) and allow the keyword arguments for setting log record field
-values for just that one message.
-Any newlines or '|' characters in the message will be changed to a single space.
-
- infoLog.info("Something benign happened.")
- errorLog.fatal("Something very bad happened.", threadID="thread-4")
-
-### Output
-
-Note that no field may contain the '|' (pipe) field separation character, as that
-character is used as the separator between fields.
-Here is a possible example of a produced log record:
-
- 2015-10-12T15:56:43,182+00:00|netman@localdcae.att.com:~/vPRO_trinity/vPRO.py:905+2015-08-20 20:57:14.463426||||DCAE/vPRO:App-C.Restart|d4d5fc66-70f9-11e5-b0b1-005056866a82|INFO||135.16.76.33|mtvpro01dev1.dev.att.com|||1001|Finished Restart
- 2016-12-09T23:06:02,314+00:00||MainThread|DCAE/vPRO|||||||a FATAL message for the error log
-
-### Example Code
-
-The main within CommonLogger.py contains a regression test of the CommonLogger methods.
-
-CommonLogger_test.py contains a complete demonstration of a python application
-using the python CommonLogging wrapper module, including creating UUIDs,
-setting default log field values, and timing operations.
-
-## Upgrading from Previous Versions of CommonLogger
-
-The current version of CommonLogger is 99% compatible with earlier versions of CommonLogger.
-The key change, due to update ONAP logging requirements, is the choice to use different lists
-of fields in different types of log files.
-This required adding a mandatory "style" to be given, which we chose to do using either a
-new keyword in the configuration file, or using a new parameter keyword when creating the logger.
diff --git a/osdf/logging/onap_common_v1/makefile b/osdf/logging/onap_common_v1/makefile
deleted file mode 100755
index 498127e..0000000
--- a/osdf/logging/onap_common_v1/makefile
+++ /dev/null
@@ -1,40 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-test:
- rm -f /tmp/cl.*.log
- python CommonLogger.py
- rm -f /tmp/cl.*.log
- python3 CommonLogger.py -k -v
- # python CommonLogger_test.py
- # python3 CommonLogger_test.py
-
-# STAGEDIR is overridden in ../makefile
-STAGEDIR=/tmp
-
-build: CommonLogger.html
- mkdir -p $(STAGEDIR)/python
- cp -p *.py *.config *.md CommonLogger.html $(STAGEDIR)/python
- chmod a+x $(STAGEDIR)/python/*.py
-
-CommonLogger.html: CommonLogger.py
- pydoc -w ./CommonLogger.py
-
-clean:
- rm -rf __pycache__ *.pyc CommonLogger.html
- rm -rf *~
diff --git a/osdf/logging/oof_mdc_context.py b/osdf/logging/oof_mdc_context.py
new file mode 100644
index 0000000..9c9b52c
--- /dev/null
+++ b/osdf/logging/oof_mdc_context.py
@@ -0,0 +1,170 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2020 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import logging
+import re
+import sys
+
+from onaplogging.marker import Marker
+from onaplogging.marker import MARKER_TAG
+from onaplogging.mdcContext import _replace_func_name
+from onaplogging.mdcContext import fetchkeys
+from onaplogging.mdcContext import findCaller as fc
+from onaplogging.mdcContext import MDC
+
+from osdf.utils.mdc_utils import set_error_details
+
+
+def findCaller(self, stack_info=False, stacklevel=1):
+ """replacing onaplogging.mdcContext with this method to work with py3.8
+
+ """
+ return fc(stack_info)
+
+
+def mdc_mapper():
+ """Convert the MDC dict into comma separated, name=value string
+
+ :return: string format
+ """
+ return ','.join(f'{k}={v}' for (k, v) in MDC.result().items() if k not in ['customField2'])
+
+
+@fetchkeys
+def info(self, msg, *args, **kwargs):
+ """Wrapper method for log.info is called
+
+ """
+ if self.isEnabledFor(logging.INFO):
+ MDC.put('customField2', mdc_mapper())
+ self._log(logging.INFO, no_sep(msg), args, **kwargs)
+
+
+@fetchkeys
+def debug(self, msg, *args, **kwargs):
+ """Wrapper method for log.debug is called
+
+ msg: log message
+ args: logging args
+ kwargs: all the optional args
+ """
+ if self.isEnabledFor(logging.DEBUG):
+ self._log(logging.DEBUG, no_sep(msg), args, **kwargs)
+
+
+@fetchkeys
+def warning(self, msg, *args, **kwargs):
+ """Wrapper method for log.warning is called
+
+ msg: log message
+ args: logging args
+ kwargs: all the optional args
+ """
+ if self.isEnabledFor(logging.WARNING):
+ self._log(logging.WARNING, no_sep(msg), args, **kwargs)
+
+
+@fetchkeys
+def exception(self, msg, *args, **kwargs):
+ """Wrapper method for log.exception is called
+
+ msg: log message
+ args: logging args
+ kwargs: all the optional args
+ """
+ kwargs['exc_info'] = 1
+ self.error(no_sep(msg), *args, **kwargs)
+
+
+@fetchkeys
+def critical(self, msg, *args, **kwargs):
+ """Wrapper method for log.critical
+
+ msg: log message
+ args: logging args
+ kwargs: all the optional args
+ """
+ if self.isEnabledFor(logging.CRITICAL):
+ self._log(logging.CRITICAL, no_sep(msg), args, **kwargs)
+
+
+@fetchkeys
+def error(self, msg, *args, **kwargs):
+ """Wrapper method for log.error is called
+
+ msg: log message
+ args: logging args
+ kwargs: all the optional args
+ """
+ if self.isEnabledFor(logging.ERROR):
+ if not MDC.get('errorCode'):
+ set_error_details(400, 'Internal Error')
+ MDC.put('customField2', mdc_mapper())
+ self._log(logging.ERROR, no_sep(msg), args, **kwargs)
+
+
+@fetchkeys
+def log(self, level, msg, *args, **kwargs):
+ """Wrapper method for log.log is called
+
+ msg: log message
+ args: logging args
+ kwargs: all the optional args
+ """
+ if not isinstance(level, int):
+ if logging.raiseExceptions:
+ raise TypeError("level must be an integer")
+ else:
+ return
+
+ if self.isEnabledFor(level):
+ self._log(level, no_sep(msg), args, **kwargs)
+
+
+def handle(self, record):
+ """Wrapper method for log.handle is called
+
+ """
+ c_marker = getattr(self, MARKER_TAG, None)
+
+ if isinstance(c_marker, Marker):
+ setattr(record, MARKER_TAG, c_marker)
+
+ if (not self.disabled) and self.filter(record):
+ self.callHandlers(record)
+
+
+def no_sep(message):
+ """This method will remove newline, | from the message
+
+ """
+ if message is None:
+ return ''
+ return re.sub(r'[\|\n]', ' ', str(message))
+
+
+def patch_logging_mdc():
+ """The patch to add MDC ability in logging Record instance at runtime
+
+ """
+ local_module = sys.modules[__name__]
+ for attr in dir(logging.Logger):
+ if attr in _replace_func_name:
+ new_func = getattr(local_module, attr, None)
+ if new_func:
+ setattr(logging.Logger, attr, new_func)
diff --git a/osdf/logging/oof_mdc_formatter.py b/osdf/logging/oof_mdc_formatter.py
new file mode 100644
index 0000000..6272a18
--- /dev/null
+++ b/osdf/logging/oof_mdc_formatter.py
@@ -0,0 +1,51 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2020 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import time
+
+from onaplogging.colorFormatter import RESET
+from onaplogging.mdcformatter import MDCFormatter
+
+
+class OOFMDCFormatter(MDCFormatter):
+ """ONAP MDC formatter
+
+ """
+
+ def __init__(self, fmt=None, mdcfmt=None,
+ datefmt=None, colorfmt=None, style="%"):
+ super().__init__(fmt, mdcfmt, datefmt, colorfmt, style)
+ self.converter = time.gmtime
+
+ def _replaceStr(self, keys):
+ """overriding the default behavior
+
+ keys: keys that needs to be substituted
+
+ """
+ fmt = self._mdcFmt
+ for i in keys:
+ fmt = fmt.replace(i, i)
+
+ return fmt
+
+ def format(self, record):
+ """Removing the color format end of line character.
+
+ """
+ return super(OOFMDCFormatter, self).format(record).replace(RESET, '')
diff --git a/osdf/logging/osdf_logging.py b/osdf/logging/osdf_logging.py
index a54d426..7ebaa99 100755
--- a/osdf/logging/osdf_logging.py
+++ b/osdf/logging/osdf_logging.py
@@ -15,38 +15,23 @@
#
# -------------------------------------------------------------------------
#
-
+import logging
+from logging import config
+import os
import traceback
-import uuid
-from .onap_common_v1.CommonLogger import CommonLogger
-from osdf.utils.programming_utils import MetaSingleton
+import yaml
+from osdf.logging import monkey
+from osdf.utils.programming_utils import MetaSingleton
-def log_handlers_pre_onap(config_file="config/onap_logging_common_v1.config",
- service_name="OOF_OSDF"):
- """
- Convenience handlers for logging to different log files
-
- :param config_file: configuration file (properties file) that specifies log location, rotation, etc.
- :param service_name: name for this service
- :return: dictionary of log objects: "error", "metrics", "audit", "debug"
-
- We can use the returned values as follows:
- X["error"].fatal("a FATAL message for the error log")
- X["error"].error("an ERROR message for the error log")
- X["error"].warn("a WARN message for the error log")
- X["audit"].info("an INFO message for the audit log")
- X["metrics"].info("an INFO message for the metrics log")
- X["debug"].debug("a DEBUG message for the debug log")
- """
- main_params = dict(instanceUUID=uuid.uuid1(), serviceName=service_name, configFile=config_file)
- return dict((x, CommonLogger(logKey=x, **main_params))
- for x in ["error", "metrics", "audit", "debug"])
+BASE_DIR = os.path.dirname(__file__)
+LOGGING_FILE = os.path.join(BASE_DIR, '..', '..', 'config', 'log.yml')
def format_exception(err, prefix=None):
"""Format operation for use with ecomp logging
+
:param err: exception object
:param prefix: prefix string message
:return: formatted exception (via repr(traceback.format_tb(err.__traceback__))
@@ -56,188 +41,279 @@ def format_exception(err, prefix=None):
return exception_desc if not prefix else prefix + ": " + exception_desc
-class OOF_OSDFLogMessageHelper(metaclass=MetaSingleton):
+def create_log_dirs():
+ with open(LOGGING_FILE, 'r') as fid:
+ yaml_config = yaml.full_load(fid)
+ for key in yaml_config['handlers']:
+ a = yaml_config['handlers'][key]
+ if a.get('filename'):
+ os.makedirs(os.path.dirname(a['filename']), exist_ok=True)
+
+
+class OOFOSDFLogMessageHelper(metaclass=MetaSingleton):
"""Provides loggers as a singleton (otherwise, we end up with duplicate messages).
+
Provides error_log, metric_log, audit_log, and debug_log (in that order)
+
Additionally can provide specific log handlers too
"""
log_handlers = None
default_levels = ["error", "metrics", "audit", "debug"]
- def _setup_handlers(self, log_version="pre_onap", config_file=None, service_name=None):
- """return error_log, metrics_log, audit_log, debug_log"""
- if self.log_handlers is None:
- params = {}
- params.update({"config_file": config_file} if config_file else {})
- params.update({"service_name": service_name} if service_name else {})
-
- if log_version == "pre_onap":
- self.log_handlers = log_handlers_pre_onap(**params)
-
- def get_handlers(self, levels=None, log_version="pre_onap", config_file=None, service_name=None):
+ def get_handlers(self, levels=None):
"""Return ONAP-compliant log handlers for different levels. Each "level" ends up in a different log file
+
with a prefix of that level.
For example: error_log, metrics_log, audit_log, debug_log in that order
+
:param levels: None or list of levels subset of self.default_levels (["error", "metrics", "audit", "debug"])
- :param log_version: Currently only pre_onap is supported
- :param config_file: Logging configuration file for ONAP compliant logging
- :param service_name: Name of the service
+
:return: list of log_handlers in the order of levels requested.
if levels is None: we return handlers for self.default_levels
if levels is ["error", "audit"], we return log handlers for that.
"""
- self._setup_handlers(log_version="pre_onap", config_file=config_file, service_name=service_name)
+ create_log_dirs()
+ monkey.patch_all()
+ config.yamlConfig(filepath=LOGGING_FILE, watchDog=False)
wanted_levels = self.default_levels if levels is None else levels
- return [self.log_handlers.get(x) for x in wanted_levels]
+ return [logging.getLogger(x) for x in wanted_levels]
-class OOF_OSDFLogMessageFormatter(object):
+class OOFOSDFLogMessageFormatter(object):
@staticmethod
def accepted_valid_request(req_id, request):
+ """valid request message formatter
+
+ """
return "Accepted valid request for ID: {} for endpoint: {}".format(
req_id, request.url)
@staticmethod
def invalid_request(req_id, err):
+ """invalid request message formatter
+
+ """
return "Invalid request for request ID: {}; cause: {}".format(
req_id, format_exception(err))
@staticmethod
def invalid_response(req_id, err):
+ """invalid response message formatter
+
+ """
return "Invalid response for request ID: {}; cause: {}".format(
req_id, format_exception(err))
@staticmethod
def malformed_request(request, err):
+ """Malformed request message formatter
+
+ """
return "Malformed request for URL {}, from {}; cause: {}".format(
request.url, request.remote_address, format_exception(err))
@staticmethod
def malformed_response(response, client, err):
+ """Malformed response message formatter
+
+ """
return "Malformed response {} for client {}; cause: {}".format(
response, client, format_exception(err))
@staticmethod
def need_policies(req_id):
+ """Policies needed message formatter
+
+ """
return "Policies required but found no policies for request ID: {}".format(req_id)
@staticmethod
def policy_service_error(url, req_id, err):
+ """policy service error message formatter
+
+ """
return "Unable to call policy for {} for request ID: {}; cause: {}".format(
url, req_id, format_exception(err))
@staticmethod
def requesting_url(url, req_id):
+ """Message formatter for requesting url
+
+ """
return "Making a call to URL {} for request ID: {}".format(url, req_id)
@staticmethod
def requesting(service_name, req_id):
+ """Message formatter for requesting a service
+
+ """
return "Making a call to service {} for request ID: {}".format(service_name, req_id)
@staticmethod
def error_requesting(service_name, req_id, err):
+ """Message formatter on error requesting a service
+
+ """
return "Error while requesting service {} for request ID: {}; cause: {}".format(
service_name, req_id, format_exception(err))
@staticmethod
def calling_back(req_id, callback_url):
+ """Message formatter when a callback url is invoked
+
+ """
return "Posting result to callback URL for request ID: {}; callback URL={}".format(
req_id, callback_url)
@staticmethod
def calling_back_with_body(req_id, callback_url, body):
+ """Message formatter when a callback url with body is invoked
+
+ """
return "Posting result to callback URL for request ID: {}; callback URL={} body={}".format(
req_id, callback_url, body)
@staticmethod
def error_calling_back(req_id, callback_url, err):
+ """Message formatter on an error while posting result to callback url
+
+ """
return "Error while posting result to callback URL {} for request ID: {}; cause: {}".format(
req_id, callback_url, format_exception(err))
@staticmethod
def received_request(url, remote_addr, json_body):
+ """Message when a call is received
+
+ """
return "Received a call to {} from {} {}".format(url, remote_addr, json_body)
@staticmethod
def new_worker_thread(req_id, extra_msg=""):
+ """Message on invoking a new worker thread
+
+ """
res = "Initiating new worker thread for request ID: {}".format(req_id)
return res + extra_msg
@staticmethod
def inside_worker_thread(req_id, extra_msg=""):
+ """Message when inside a worker thread
+
+ """
res = "Inside worker thread for request ID: {}".format(req_id)
return res + extra_msg
@staticmethod
def processing(req_id, desc):
+ """Processing a request
+
+ """
return "Processing request ID: {} -- {}".format(req_id, desc)
@staticmethod
def processed(req_id, desc):
+ """Processed the request
+
+ """
return "Processed request ID: {} -- {}".format(req_id, desc)
@staticmethod
def error_while_processing(req_id, desc, err):
+ """Error while processing the request
+
+ """
return "Error while processing request ID: {} -- {}; cause: {}".format(
req_id, desc, format_exception(err))
@staticmethod
def creating_local_env(req_id):
+ """Creating a local environment
+
+ """
return "Creating local environment request ID: {}".format(
req_id)
@staticmethod
def error_local_env(req_id, desc, err):
+ """Error creating a local env
+
+ """
return "Error while creating local environment for request ID: {} -- {}; cause: {}".format(
req_id, desc, err.__traceback__)
@staticmethod
def inside_new_thread(req_id, extra_msg=""):
+ """Inside a new thread
+
+ """
res = "Spinning up a new thread for request ID: {}".format(req_id)
return res + " " + extra_msg
@staticmethod
def error_response_posting(req_id, desc, err):
+ """Error while posting a response
+
+ """
return "Error while posting a response for a request ID: {} -- {}; cause: {}".format(
req_id, desc, err.__traceback__)
@staticmethod
def received_http_response(resp):
+ """Received a http response
+
+ """
return "Received response [code: {}, headers: {}, data: {}]".format(
resp.status_code, resp.headers, resp.__dict__)
@staticmethod
def sending_response(req_id, desc):
+ """sending a response
+
+ """
return "Response is sent for request ID: {} -- {}".format(
req_id, desc)
@staticmethod
def listening_response(req_id, desc):
+ """Resposne is sent for a request ID
+
+ """
return "Response is sent for request ID: {} -- {}".format(
req_id, desc)
@staticmethod
def items_received(item_num, item_type, desc="Received"):
+ """Items received
+
+ """
return "{} {} {}".format(desc, item_num, item_type)
@staticmethod
def items_sent(item_num, item_type, desc="Published"):
+ """items published
+
+ """
return "{} {} {}".format(desc, item_num, item_type)
-MH = OOF_OSDFLogMessageFormatter
-error_log, metrics_log, audit_log, debug_log = OOF_OSDFLogMessageHelper().get_handlers()
+MH = OOFOSDFLogMessageFormatter
+
+error_log, metrics_log, audit_log, debug_log = OOFOSDFLogMessageHelper().get_handlers()
def warn_audit_error(msg):
- """Log the message to error_log.warn and audit_log.warn"""
+ """Log the message to error_log.warn and audit_log.warn
+
+ """
log_message_multi(msg, audit_log.warn, error_log.warn)
def log_message_multi(msg, *logger_methods):
"""Log the msg to multiple loggers
+
:param msg: message to log
:param logger_methods: e.g. error_log.warn, audit_log.warn, etc.
"""
diff --git a/osdf/models/api/pciOptimizationRequest.py b/osdf/models/api/pciOptimizationRequest.py
deleted file mode 100644
index 47b4eba..0000000
--- a/osdf/models/api/pciOptimizationRequest.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2018 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-from schematics.types import BaseType, StringType, URLType, IntType
-from schematics.types.compound import ModelType, ListType, DictType
-
-from .common import OSDFModel
-
-
-class RequestInfo(OSDFModel):
- """Info for northbound request from client such as SO"""
- transactionId = StringType(required=True)
- requestId = StringType(required=True)
- callbackUrl = URLType(required=True)
- callbackHeader = DictType(BaseType)
- sourceId = StringType(required=True)
- requestType = StringType(required=True)
- numSolutions = IntType()
- optimizers = ListType(StringType(required=True))
- timeout = IntType()
-
-
-class CellInfo(OSDFModel):
- """Information specific to CellInfo """
- networkId = StringType(required=True)
- cellIdList = ListType(StringType(required=True))
- trigger = StringType()
-
-
-class PCIOptimizationAPI(OSDFModel):
- """Request for PCI optimization """
- requestInfo = ModelType(RequestInfo, required=True)
- cellInfo = ModelType(CellInfo, required=True)
diff --git a/osdf/models/api/pciOptimizationResponse.py b/osdf/models/api/pciOptimizationResponse.py
deleted file mode 100644
index 876c380..0000000
--- a/osdf/models/api/pciOptimizationResponse.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2018 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-from schematics.types import StringType, IntType
-from schematics.types.compound import ModelType, ListType
-
-from .common import OSDFModel
-
-
-class PCISolution(OSDFModel):
- cellId = StringType(required=True)
- pci = IntType(required=True)
-
-
-class Solution(OSDFModel):
- networkId = StringType(required=True)
- pciSolutions = ListType(ListType(ModelType(PCISolution), min_size=1))
-
-
-class PCIOptimizationResponse(OSDFModel):
- transactionId = StringType(required=True)
- requestId = StringType(required=True)
- requestStatus = StringType(required=True)
- statusMessage = StringType()
- solutions = ModelType(Solution, required=True)
diff --git a/osdf/models/api/placementRequest.py b/osdf/models/api/placementRequest.py
deleted file mode 100644
index aa71eac..0000000
--- a/osdf/models/api/placementRequest.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-from .common import OSDFModel
-from schematics.types import BaseType, StringType, URLType, IntType
-from schematics.types.compound import ModelType, ListType, DictType
-
-
-class RequestInfo(OSDFModel):
- """Info for northbound request from client such as SO"""
- transactionId = StringType(required=True)
- requestId = StringType(required=True)
- callbackUrl = URLType(required=True)
- callbackHeader = DictType(BaseType)
- sourceId = StringType(required=True)
- requestType = StringType(required=True)
- numSolutions = IntType()
- optimizers = ListType(StringType(required=True))
- timeout = IntType()
-
-
-class Candidates(OSDFModel):
- """Preferred candidate for a resource (sent as part of a request from client)"""
- identifierType = StringType(required=True)
- identifiers = ListType(StringType(required=True))
- cloudOwner = StringType()
-
-
-class ModelMetaData(OSDFModel):
- """Model information for a specific resource"""
- modelInvariantId = StringType(required=True)
- modelVersionId = StringType(required=True)
- modelName = StringType()
- modelType = StringType()
- modelVersion = StringType()
- modelCustomizationName = StringType(required=True)
-
-
-class LicenseModel(OSDFModel):
- entitlementPoolUUID = ListType(StringType(required=True))
- licenseKeyGroupUUID = ListType(StringType(required=True))
-
-
-class LicenseDemands(OSDFModel):
- resourceModuleName = StringType(required=True)
- serviceResourceId = StringType(required=True)
- resourceModelInfo = ModelType(ModelMetaData, required=True)
- existingLicenses = ModelType(LicenseModel)
-
-
-class LicenseInfo(OSDFModel):
- licenseDemands = ListType(ModelType(LicenseDemands))
-
-
-class PlacementDemand(OSDFModel):
- resourceModuleName = StringType(required=True)
- serviceResourceId = StringType(required=True)
- tenantId = StringType()
- resourceModelInfo = ModelType(ModelMetaData, required=True)
- existingCandidates = ListType(ModelType(Candidates))
- excludedCandidates = ListType(ModelType(Candidates))
- requiredCandidates = ListType(ModelType(Candidates))
-
-
-class ServiceInfo(OSDFModel):
- serviceInstanceId = StringType(required=True)
- modelInfo = ModelType(ModelMetaData, required=True)
- serviceName = StringType(required=True)
-
-
-class SubscriberInfo(OSDFModel):
- """Details on the customer that subscribes to the VNFs"""
- globalSubscriberId = StringType(required=True)
- subscriberName = StringType()
- subscriberCommonSiteId = StringType()
-
-
-class PlacementInfo(OSDFModel):
- """Information specific to placement optimization"""
- requestParameters = DictType(BaseType)
- placementDemands = ListType(ModelType(PlacementDemand), min_size=1)
- subscriberInfo = ModelType(SubscriberInfo)
-
-
-class PlacementAPI(OSDFModel):
- """Request for placement optimization (specific to optimization and additional metadata"""
- requestInfo = ModelType(RequestInfo, required=True)
- placementInfo = ModelType(PlacementInfo, required=True)
- licenseInfo = ModelType(LicenseInfo)
- serviceInfo = ModelType(ServiceInfo, required=True)
diff --git a/osdf/models/api/placementResponse.py b/osdf/models/api/placementResponse.py
deleted file mode 100644
index 063a9a8..0000000
--- a/osdf/models/api/placementResponse.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-from .common import OSDFModel
-from schematics.types import BaseType, StringType
-from schematics.types.compound import ModelType, ListType, DictType
-
-
-# TODO: update osdf.models
-
-class LicenseSolution(OSDFModel):
- serviceResourceId = StringType(required=True)
- resourceModuleName = StringType(required=True)
- entitlementPoolUUID = ListType(StringType(required=True))
- licenseKeyGroupUUID = ListType(StringType(required=True))
- entitlementPoolInvariantUUID = ListType(StringType(required=True))
- licenseKeyGroupInvariantUUID = ListType(StringType(required=True))
-
-
-class Candidates(OSDFModel):
- """Preferred candidate for a resource (sent as part of a request from client)"""
- identifierType = StringType(required=True)
- identifiers = ListType(StringType(required=True))
- cloudOwner = StringType()
-
-
-class AssignmentInfo(OSDFModel):
- key = StringType(required=True)
- value = BaseType(required=True)
-
-
-class PlacementSolution(OSDFModel):
- serviceResourceId = StringType(required=True)
- resourceModuleName = StringType(required=True)
- solution = ModelType(Candidates, required=True)
- assignmentInfo = ListType(ModelType(AssignmentInfo))
-
-
-class Solution(OSDFModel):
- placementSolutions = ListType(ListType(ModelType(PlacementSolution), min_size=1))
- licenseSolutions = ListType(ModelType(LicenseSolution), min_size=1)
-
-
-class PlacementResponse(OSDFModel):
- transactionId = StringType(required=True)
- requestId = StringType(required=True)
- requestStatus = StringType(required=True)
- statusMessage = StringType()
- solutions = ModelType(Solution, required=True)
diff --git a/osdf/models/policy/placement/tosca/affinityPolicy-v20180326.yml b/osdf/models/policy/placement/tosca/affinityPolicy-v20181031.yml
index 4db64db..89a3e9d 100644
--- a/osdf/models/policy/placement/tosca/affinityPolicy-v20180326.yml
+++ b/osdf/models/policy/placement/tosca/affinityPolicy-v20181031.yml
@@ -19,7 +19,7 @@ node_types:
type: string
consraints:
- valid_values:
- - affinityPolicy
+ - zone
identity:
type: string
required: true
diff --git a/osdf/models/policy/placement/tosca/distancePolicy-v20180326.yml b/osdf/models/policy/placement/tosca/distancePolicy-v20181031.yml
index 53d15dd..9c3bd29 100644
--- a/osdf/models/policy/placement/tosca/distancePolicy-v20180326.yml
+++ b/osdf/models/policy/placement/tosca/distancePolicy-v20181031.yml
@@ -19,7 +19,7 @@ node_types:
type: string
consraints:
- valid_values:
- - distancePolicy
+ - distance_to_location
identity:
type: string
required: true
diff --git a/osdf/models/policy/placement/tosca/hpaPolicy-v20180326.yml b/osdf/models/policy/placement/tosca/hpaPolicy-v20181031.yml
index 293cb63..374f752 100644
--- a/osdf/models/policy/placement/tosca/hpaPolicy-v20180326.yml
+++ b/osdf/models/policy/placement/tosca/hpaPolicy-v20181031.yml
@@ -19,10 +19,12 @@ node_types:
type: string
consraints:
- valid_values:
- - hpaPolicy
- resource:
- type: string
+ - hpa
+ resources:
+ type: list
required: true
+ entry_schema:
+ type: string
identity:
type: string
required: true
@@ -30,7 +32,7 @@ node_types:
type: list
required: true
entry_schema:
- - type: policy.data.flavorFeatures_properties
+ type:policy.data.flavorFeatures_properties
data_types:
policy.data.flavorFeatures_properties:
derived_from: tosca.nodes.Root
@@ -78,7 +80,7 @@ data_types:
required: true
score:
type: string
- required: true
+ required: false
architecture:
type: string
required: true
@@ -122,4 +124,4 @@ data_types:
- subset
unit:
type: string
- required: true
+ required: false
diff --git a/osdf/models/policy/placement/tosca/optimizationPolicy-v20180326.yml b/osdf/models/policy/placement/tosca/optimizationPolicy-v20181031.yml
index 0a3b94f..40678c3 100644
--- a/osdf/models/policy/placement/tosca/optimizationPolicy-v20180326.yml
+++ b/osdf/models/policy/placement/tosca/optimizationPolicy-v20181031.yml
@@ -19,7 +19,7 @@ node_types:
type: string
consraints:
- valid_values:
- - optimizationPolicy
+ - placement_optimization
identity:
type: string
required: true
@@ -32,11 +32,11 @@ node_types:
- valid_values:
- minimize
- maximize
- objectiveParameters:
- type: policy.data.objectiveParameters_properties
+ objectiveParameter:
+ type: policy.data.objectiveParameter_properties
required: true
data_types:
- policy.data.objectiveParameters_properties:
+ policy.data.objectiveParameter_properties:
derived_from: tosca.nodes.Root
properties:
parameterAttributes:
@@ -62,7 +62,7 @@ data_types:
resources:
type: string
required: true
- customerLocation:
+ customerLocationInfo:
type: string
required: true
parameter:
diff --git a/osdf/models/policy/placement/tosca/queryPolicy-v20180326.yml b/osdf/models/policy/placement/tosca/queryPolicy-v20181031.yml
index 2488769..09824db 100644
--- a/osdf/models/policy/placement/tosca/queryPolicy-v20180326.yml
+++ b/osdf/models/policy/placement/tosca/queryPolicy-v20181031.yml
@@ -19,7 +19,7 @@ node_types:
type: string
consraints:
- valid_values:
- - queryPolicy
+ - request_param_query
identity:
type: string
required: true
@@ -27,7 +27,7 @@ node_types:
type: list
required: true
entry_schema:
- - type: policy.data.queryProperties_properties
+ type:policy.data.queryProperties_properties
data_types:
policy.data.queryProperties_properties:
derived_from: tosca.nodes.Root
diff --git a/osdf/models/policy/placement/tosca/vnfPolicy-v20180326.yml b/osdf/models/policy/placement/tosca/vnfPolicy-v20181031.yml
index 3880cea..8eaf178 100644
--- a/osdf/models/policy/placement/tosca/vnfPolicy-v20180326.yml
+++ b/osdf/models/policy/placement/tosca/vnfPolicy-v20181031.yml
@@ -41,7 +41,7 @@ node_types:
type: list
required: true
entry_schema:
- - type: policy.data.vnfProperties_properties
+ type:policy.data.vnfProperties_properties
data_types:
policy.data.vnfProperties_properties:
derived_from: tosca.nodes.Root
@@ -66,3 +66,20 @@ data_types:
customerId:
type: string
required: true
+ unique:
+ type: string
+ required: false
+ attributes:
+ type: list
+ required: false
+ entry_schema:
+ type:policy.data.vnfProperties_filteringAttributes
+ passthroughAttributes:
+ type: list
+ required: false
+ entry_schema:
+ type:policy.data.vnfProperties_passthroughAttributes
+ policy.data.vnfProperties_filteringAttributes:
+ derived_from: tosca.nodes.Root
+ policy.data.vnfProperties_passthroughAttributes:
+ derived_from: tosca.nodes.Root
diff --git a/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.AffinityPolicy.yaml b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.AffinityPolicy.yaml
new file mode 100644
index 0000000..3fb8525
--- /dev/null
+++ b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.AffinityPolicy.yaml
@@ -0,0 +1,31 @@
+tosca_definitions_version: tosca_simple_yaml_1_1_0
+policy_types:
+ onap.policies.optimization.resource.AffinityPolicy:
+ derived_from: onap.policies.optimization.Resource
+ version: 1.0.0
+ properties:
+ applicableResources:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - any
+ - all
+ affinityProperties:
+ type: policy.data.affinityProperties_properties
+ required: true
+data_types:
+ policy.data.affinityProperties_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ qualifier:
+ type: string
+ constraints:
+ - valid_values:
+ - same
+ - different
+ category:
+ type: string
+ required: true \ No newline at end of file
diff --git a/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.AggregationPolicy.yaml b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.AggregationPolicy.yaml
new file mode 100644
index 0000000..1e3c813
--- /dev/null
+++ b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.AggregationPolicy.yaml
@@ -0,0 +1,42 @@
+tosca_definitions_version: tosca_simple_yaml_1_1_0
+policy_types:
+ onap.policies.optimization.resource.AggregationPolicy:
+ derived_from: onap.policies.optimization.Resource
+ version: 1.0.0
+ properties:
+ applicableResources:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - any
+ - all
+ aggregationProperties:
+ type: list
+ required: true
+ entry_schema:
+ type: policy.data.aggregationProperties_properties
+data_types:
+ policy.data.aggregationProperties_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ attribute:
+ type: string
+ required: true
+ operator:
+ type: string
+ required: true
+ threshold:
+ type: policy.data.thresh_properties
+ required: true
+ unit:
+ type: string
+ required: false
+ function:
+ type: string
+ required: true
+ policy.data.thresh_properties:
+ derived_from: tosca.nodes.Root
+
diff --git a/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.DistancePolicy.yaml b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.DistancePolicy.yaml
new file mode 100644
index 0000000..196ba9e
--- /dev/null
+++ b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.DistancePolicy.yaml
@@ -0,0 +1,56 @@
+tosca_definitions_version: tosca_simple_yaml_1_1_0
+policy_types:
+ onap.policies.optimization.resource.DistancePolicy:
+ derived_from: onap.policies.optimization.Resource
+ version: 1.0.0
+ properties:
+ applicableResources:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - any
+ - all
+ distanceProperties:
+ type: policy.data.distanceProperties_properties
+ required: true
+data_types:
+ policy.data.distanceProperties_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ locationInfo:
+ type: string
+ required: true
+ distance:
+ type: policy.data.distance_properties
+ required: true
+ entry_schema:
+ type: policy.data.distance_properties
+ policy.data.distance_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ value:
+ type: string
+ required: true
+ operator:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - <
+ - <=
+ - '>'
+ - '>='
+ - =
+ unit:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - km \ No newline at end of file
diff --git a/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.HpaPolicy.yaml b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.HpaPolicy.yaml
new file mode 100644
index 0000000..fe7b864
--- /dev/null
+++ b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.HpaPolicy.yaml
@@ -0,0 +1,103 @@
+tosca_definitions_version: tosca_simple_yaml_1_1_0
+policy_types:
+ onap.policies.optimization.resource.HpaPolicy:
+ derived_from: onap.policies.optimization.Resource
+ version: 1.0.0
+ properties:
+ flavorFeatures:
+ type: list
+ required: true
+ entry_schema:
+ type: policy.data.flavorFeatures_properties
+data_types:
+ policy.data.flavorFeatures_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ id:
+ type: string
+ required: true
+ type:
+ type: string
+ required: true
+ directives:
+ type: list
+ required: true
+ entry_schema:
+ type: policy.data.directives_properties
+ flavorProperties:
+ type: list
+ required: true
+ entry_schema:
+ type: policy.data.flavorProperties_properties
+ policy.data.directives_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ type:
+ type: string
+ attributes:
+ type: list
+ entry_schema:
+ type: policy.data.directives_attributes_properties
+ policy.data.directives_attributes_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ attribute_name:
+ type: string
+ attribute_value:
+ type: string
+ policy.data.flavorProperties_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ hpa-feature:
+ type: string
+ required: true
+ mandatory:
+ type: string
+ required: true
+ score:
+ type: string
+ required: false
+ architecture:
+ type: string
+ required: true
+ hpa-version:
+ type: string
+ required: true
+ directives:
+ type: list
+ required: true
+ entry_schema:
+ type: policy.data.directives_properties
+ hpa-feature-attributes:
+ type: list
+ required: true
+ entry_schema:
+ type: policy.data.hpa-feature-attributes_properties
+ policy.data.hpa-feature-attributes_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ hpa-attribute-key:
+ type: string
+ required: true
+ hpa-attribute-value:
+ type: string
+ required: true
+ operator:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - <
+ - <=
+ - '>'
+ - '>='
+ - =
+ - '!='
+ - any
+ - all
+ - subset
+ unit:
+ type: string
+ required: false
diff --git a/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.OptimizationPolicy.yaml b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.OptimizationPolicy.yaml
new file mode 100644
index 0000000..fae050b
--- /dev/null
+++ b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.OptimizationPolicy.yaml
@@ -0,0 +1,66 @@
+tosca_definitions_version: tosca_simple_yaml_1_1_0
+policy_types:
+ onap.policies.optimization.resource.OptimizationPolicy:
+ derived_from: onap.policies.optimization.Resource
+ version: 1.0.0
+ properties:
+ objective:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - minimize
+ - maximize
+ objectiveParameter:
+ type: policy.data.objectiveParameter_properties
+ required: true
+data_types:
+ policy.data.objectiveParameter_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ parameterAttributes:
+ type: list
+ required: true
+ entry_schema:
+ type: policy.data.parameterAttributes_properties
+ operator:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - '*'
+ - +
+ - '-'
+ - /
+ - '%'
+ policy.data.parameterAttributes_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ resources:
+ type: string
+ required: true
+ customerLocationInfo:
+ type: string
+ required: true
+ parameter:
+ type: string
+ required: true
+ weight:
+ type: string
+ required: true
+ operator:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - '*'
+ - +
+ - '-'
+ - /
+ - '%' \ No newline at end of file
diff --git a/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.PciPolicy.yaml b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.PciPolicy.yaml
new file mode 100644
index 0000000..021cff9
--- /dev/null
+++ b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.PciPolicy.yaml
@@ -0,0 +1,30 @@
+tosca_definitions_version: tosca_simple_yaml_1_1_0
+policy_types:
+ onap.policies.optimization.resource.PciPolicy:
+ derived_from: onap.policies.optimization.Resource
+ version: 1.0.0
+ properties:
+ pciProperties:
+ type: list
+ required: false
+ entry_schema:
+ type: policy.data.pciProperties_properties
+data_types:
+ policy.data.pciProperties_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ algoCategory:
+ type: string
+ required: false
+ pciOptmizationAlgoName:
+ type: string
+ required: false
+ pciOptimizationNwConstraint:
+ type: string
+ required: false
+ pciOptimizationPriority:
+ type: string
+ required: false
+ pciOptimizationTimeConstraint:
+ type: string
+ required: false \ No newline at end of file
diff --git a/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.ThresholdPolicy.yaml b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.ThresholdPolicy.yaml
new file mode 100644
index 0000000..ab400dd
--- /dev/null
+++ b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.ThresholdPolicy.yaml
@@ -0,0 +1,37 @@
+tosca_definitions_version: tosca_simple_yaml_1_1_0
+policy_types:
+ onap.policies.optimization.resource.ThresholdPolicy:
+ derived_from: onap.policies.optimization.Resource
+ version: 1.0.0
+ properties:
+ applicableResources:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - any
+ - all
+ thresholdProperties:
+ type: list
+ required: true
+ entry_schema:
+ type: policy.data.thresholdProperties_properties
+data_types:
+ policy.data.thresholdProperties_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ attribute:
+ type: string
+ required: true
+ operator:
+ type: string
+ required: true
+ threshold:
+ type: float
+ required: true
+ unit:
+ type: string
+ required: false
+
diff --git a/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.Vim_fit.yaml b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.Vim_fit.yaml
new file mode 100644
index 0000000..6ba2ae1
--- /dev/null
+++ b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.Vim_fit.yaml
@@ -0,0 +1,28 @@
+tosca_definitions_version: tosca_simple_yaml_1_1_0
+policy_types:
+ onap.policies.optimization.resource.Vim_fit:
+ derived_from: onap.policies.optimization.Resource
+ version: 1.0.0
+ properties:
+ applicableResources:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - any
+ - all
+ capacityProperties:
+ type: policy.data.capacityProperties_properties
+ required: true
+data_types:
+ policy.data.capacityProperties_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ controller:
+ type: string
+ required: true
+ request:
+ type: string
+ required: true \ No newline at end of file
diff --git a/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.VnfPolicy.yaml b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.VnfPolicy.yaml
new file mode 100644
index 0000000..1c7d3b6
--- /dev/null
+++ b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.resource.VnfPolicy.yaml
@@ -0,0 +1,44 @@
+tosca_definitions_version: tosca_simple_yaml_1_1_0
+policy_types:
+ onap.policies.optimization.resource.VnfPolicy:
+ derived_from: onap.policies.optimization.Resource
+ version: 1.0.0
+ properties:
+ applicableResources:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - any
+ - all
+ vnfProperties:
+ type: list
+ required: true
+ entry_schema:
+ type: policy.data.vnfProperties_properties
+data_types:
+ policy.data.vnfProperties_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ inventoryProvider:
+ type: string
+ required: true
+ serviceType:
+ type: string
+ required: true
+ inventoryType:
+ type: list
+ required: true
+ entry_schema:
+ type: string
+ constraints:
+ - valid_values:
+ - serviceInstanceId
+ - vnfName
+ - cloudRegionId
+ - vimId
+ customerId:
+ type: string
+ required: true \ No newline at end of file
diff --git a/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.service.QueryPolicy.yaml b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.service.QueryPolicy.yaml
new file mode 100644
index 0000000..2a615ab
--- /dev/null
+++ b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.service.QueryPolicy.yaml
@@ -0,0 +1,24 @@
+tosca_definitions_version: tosca_simple_yaml_1_1_0
+policy_types:
+ onap.policies.optimization.service.QueryPolicy:
+ derived_from: onap.policies.optimization.Service
+ version: 1.0.0
+ properties:
+ queryProperties:
+ type: list
+ required: true
+ entry_schema:
+ type: policy.data.queryProperties_properties
+data_types:
+ policy.data.queryProperties_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ attribute:
+ type: string
+ required: true
+ value:
+ type: string
+ required: true
+ attribute_location:
+ type: string
+ required: true \ No newline at end of file
diff --git a/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.service.SubscriberPolicy.yaml b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.service.SubscriberPolicy.yaml
new file mode 100644
index 0000000..60da742
--- /dev/null
+++ b/osdf/models/policy/placement/tosca_upload/onap.policies.optimization.service.SubscriberPolicy.yaml
@@ -0,0 +1,34 @@
+tosca_definitions_version: tosca_simple_yaml_1_1_0
+policy_types:
+ onap.policies.optimization.service.SubscriberPolicy:
+ derived_from: onap.policies.optimization.Service
+ version: 1.0.0
+ properties:
+ subscriberProperties:
+ type: policy.data.subscriberProperties_properties
+ required: true
+data_types:
+ policy.data.subscriberProperties_properties:
+ derived_from: tosca.nodes.Root
+ properties:
+ subscriberName:
+ type: list
+ required: true
+ metadata:
+ contextProvider: true
+ entry_schema:
+ type: string
+ subscriberRole:
+ type: list
+ required: true
+ metadata:
+ contextMatchable: scope
+ entry_schema:
+ type: string
+ provStatus:
+ type: list
+ required: true
+ metadata:
+ contextAttribute: true
+ entry_schema:
+ type: string \ No newline at end of file
diff --git a/osdf/optimizers/__init__.py b/osdf/optimizers/__init__.py
deleted file mode 100644
index 4b25e5b..0000000
--- a/osdf/optimizers/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2017-2018 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
diff --git a/osdf/optimizers/licenseopt/simple_license_allocation.py b/osdf/optimizers/licenseopt/simple_license_allocation.py
deleted file mode 100644
index 74d220f..0000000
--- a/osdf/optimizers/licenseopt/simple_license_allocation.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-
-def license_optim(request_json):
- """
- Fetch license artifacts associated with the service model and search licensekey-group-UUID and entitlement-pool-uuid
- associated with the given att part number and nominal throughput in a request
- :param request_json: Request in a JSON format
- :return: A tuple of licensekey-group-uuid-list and entitlement-group-uuid-list
- """
- req_id = request_json["requestInfo"]["requestId"]
-
- model_name = request_json.get('placementInfo', {}).get('serviceInfo', {}).get('modelInfo', {}).get('modelName')
- service_name = model_name
-
- license_info = []
-
- for demand in request_json.get('placementInfo', {}).get('demandInfo', {}).get('licenseDemands', []):
- license_info.append(
- {'serviceResourceId': demand['serviceResourceId'],
- 'resourceModuleName': demand['resourceModuleName'],
- 'entitlementPoolList': "NOT SUPPORTED",
- 'licenseKeyGroupList': "NOT SUPPORTED"
- })
- return license_info
diff --git a/osdf/optimizers/pciopt/__init__.py b/osdf/optimizers/pciopt/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/osdf/optimizers/pciopt/__init__.py
+++ /dev/null
diff --git a/osdf/optimizers/pciopt/configdb.py b/osdf/optimizers/pciopt/configdb.py
deleted file mode 100644
index bebc5c0..0000000
--- a/osdf/optimizers/pciopt/configdb.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2018 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-from datetime import datetime as dt
-
-from osdf.logging.osdf_logging import debug_log
-from osdf.utils.interfaces import RestClient
-
-
-def request(req_object, osdf_config, flat_policies):
- """
- Process a configdb request from a Client (build Conductor API call, make the call, return result)
- :param req_object: Request parameters from the client
- :param osdf_config: Configuration specific to OSDF application (core + deployment)
- :param flat_policies: policies related to PCI Opt (fetched based on request)
- :return: response from ConfigDB (accounting for redirects from Conductor service
- """
- cell_list_response = {}
- config = osdf_config.deployment
- local_config = osdf_config.core
- uid, passwd = config['configDbUserName'], config['configDbPassword']
- req_id = req_object['requestInfo']['requestId']
- transaction_id = req_object['requestInfo']['transactionId']
- headers = dict(transaction_id=transaction_id)
-
- network_id = req_object['cellInfo']['networkId']
-
- cell_list_response['network_id'] = network_id
-
- rc = RestClient(userid=uid, passwd=passwd, method="GET", log_func=debug_log.debug, headers=headers)
-
- cell_list_url = '{}/{}?networkId={}'.format(config['configDbUrl'], config['configDbGetCellListUrl'], network_id)
-
- cell_list_resp = rc.request(raw_response=True, url=cell_list_url)
- cell_resp = cell_list_resp.json()
- ts = dt.strftime(dt.now(), '%Y-%m-%dT%H:%M:%S%z')
-
- cell_list = []
- count = 0
- for cell_id in cell_resp:
- cell_info = {'cell_id': cell_id, 'id': count}
- nbr_list_url = '{}/{}?cellId={}&ts={}'.format(config['configDbUrl'], config['configDbGetNbrListUrl'], cell_id,
- ts)
- nbr_list_raw = rc.request(url=nbr_list_url, raw_response=True)
- cell_info['nbr_list'] = nbr_list_raw.json()
- cell_list.append(cell_info)
- count += 1
-
- cell_list_response['cell_list'] = cell_list
- return cell_resp, cell_list_response
diff --git a/osdf/optimizers/pciopt/pci_opt_processor.py b/osdf/optimizers/pciopt/pci_opt_processor.py
deleted file mode 100644
index 989f578..0000000
--- a/osdf/optimizers/pciopt/pci_opt_processor.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2018 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-import traceback
-from requests import RequestException
-
-from osdf.logging.osdf_logging import metrics_log, MH, error_log
-from osdf.operation.error_handling import build_json_error_body
-from osdf.utils.interfaces import get_rest_client
-from .configdb import request as config_request
-from .solver.optimizer import pci_optimize as optimize
-from .solver.pci_utils import get_cell_id, get_pci_value
-
-"""
-This application generates PCI Optimization API calls using the information received from PCI-Handler-MS, SDN-C
-and Policy.
-"""
-
-
-def process_pci_optimation(request_json, osdf_config, flat_policies):
- """
- Process a PCI request from a Client (build config-db, policy and API call, make the call, return result)
- :param req_object: Request parameters from the client
- :param osdf_config: Configuration specific to OSDF application (core + deployment)
- :param flat_policies: policies related to pci (fetched based on request)
- :return: response from PCI Opt
- """
- try:
- rc = get_rest_client(request_json, service="pcih")
- req_id = request_json["requestInfo"]["requestId"]
- cell_info_list, network_cell_info = config_request(request_json, osdf_config, flat_policies)
-
- pci_response = get_solutions(cell_info_list, network_cell_info, request_json)
-
- metrics_log.info(MH.inside_worker_thread(req_id))
- except Exception as err:
- error_log.error("Error for {} {}".format(req_id, traceback.format_exc()))
-
- try:
- body = build_json_error_body(err)
- metrics_log.info(MH.sending_response(req_id, "ERROR"))
- rc.request(json=body, noresponse=True)
- except RequestException:
- error_log.error("Error sending asynchronous notification for {} {}".format(req_id, traceback.format_exc()))
- return
-
- try:
- metrics_log.info(MH.calling_back_with_body(req_id, rc.url, pci_response))
- rc.request(json=pci_response, noresponse=True)
- except RequestException: # can't do much here but log it and move on
- error_log.error("Error sending asynchronous notification for {} {}".format(req_id, traceback.format_exc()))
-
-
-def get_solutions(cell_info_list, network_cell_info, request_json):
- return {
- "transactionId": request_json['requestInfo']['transactionId'],
- "requestId": request_json["requestInfo"]["requestId"],
- "requestStatus": "completed",
- "statusMessage": "success",
- "solutions": [
- {
- 'networkId': request_json['cellInfo']['networkId'],
- 'pciSolutions': build_solution_list(cell_info_list, network_cell_info, request_json)
- }
- ]
- }
-
-
-def build_solution_list(cell_info_list, network_cell_info, request_json):
- solution_list = []
- for cell in request_json['cellInfo']['cellIdList']:
- opt_solution = optimize(cell, network_cell_info, cell_info_list)
- sol = opt_solution[0]['pci']
- for k, v in sol.items():
- response = {
- 'cellId': get_cell_id(network_cell_info, k),
- 'pci': get_pci_value(network_cell_info, v)
- }
- solution_list.append(response)
- return solution_list
diff --git a/osdf/optimizers/pciopt/solver/__init__.py b/osdf/optimizers/pciopt/solver/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/osdf/optimizers/pciopt/solver/__init__.py
+++ /dev/null
diff --git a/osdf/optimizers/pciopt/solver/min_confusion.mzn b/osdf/optimizers/pciopt/solver/min_confusion.mzn
deleted file mode 100644
index 803f914..0000000
--- a/osdf/optimizers/pciopt/solver/min_confusion.mzn
+++ /dev/null
@@ -1,95 +0,0 @@
-% -------------------------------------------------------------------------
-% Copyright (c) 2018 AT&T Intellectual Property
-%
-% Licensed under the Apache License, Version 2.0 (the "License");
-% you may not use this file except in compliance with the License.
-% You may obtain a copy of the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS,
-% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-% See the License for the specific language governing permissions and
-% limitations under the License.
-%
-% -------------------------------------------------------------------------
-%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Parameters and its assertions
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% Number of cells/radios.
-int: NUM_NODES;
-
-% Maximum number of Physical Cell Identifiers to be assigned to the nodes.
-int: NUM_PCIS;
-
-% Number of edges between neighbor nodes. There is a edge (i,j) if and only
-% if nodes i and j are neighbors, i.e., an user equipment (UE) can make
-% handoff between i and j. Such edges are used to avoid **CONFLICTS**, i.e.,
-% to guarantee that nodes i and j have different PCIs.
-int: NUM_CONFLICT_EDGES;
-
-% Each line represents an edge between direct neighbors as defined before.
-array[1..NUM_CONFLICT_EDGES, 1..2] of int: CONFLICT_EDGES;
-
-% Number of undirect neighbor pairs (j, k) such that both j and k are direct
-% neighbors of node i, i.e., (j, k) exits if and only if exists (i, j) and
-% (i, k). Nodes (i, k) can generate "confunsions" in the network if they have
-% the same PCI. Such edges are used to avoid/minimize **CONFUSIONS**.
-int: NUM_CONFUSION_EDGES;
-
-% Each line represents an edge between undirect neighbors as defined before.
-array[1..NUM_CONFUSION_EDGES, 1..2] of int: CONFUSION_EDGES;
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Decision variables
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% Defines the PCI for each node.
-array[0..NUM_NODES-1] of var 0..NUM_PCIS-1: pci;
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Constraints
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% Direct neighbors must have different PCIs for avoid **CONFLICTS**.
-constraint
-forall(i in 1..NUM_CONFLICT_EDGES)(
- pci[CONFLICT_EDGES[i, 1]] != pci[CONFLICT_EDGES[i, 2]]
-);
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Objective function
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% Total number of confusions.
-var int: total_confusions =
- sum([bool2int(pci[CONFUSION_EDGES[i, 1]] == pci[CONFUSION_EDGES[i, 2]])
- | i in 1..NUM_CONFUSION_EDGES]);
-
-% Minimize the total number of confusions.
-solve :: int_search(pci, smallest, indomain_min, complete)
-minimize total_confusions;
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Output
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-output
-["PCI assigment"] ++
-["\nnode,pci"] ++
-[
- "\n" ++ show(node) ++ "," ++ show(pci[node])
-| node in 0..NUM_NODES-1
-] ++
-
-["\n\nConfusions"] ++
-["\nTotal confusions: " ++ show(total_confusions)] ++
-["\nConfusion pairs"] ++
-[
- "\n" ++ show(CONFUSION_EDGES[i, 1]) ++ "," ++ show(CONFUSION_EDGES[i, 2])
-| i in 1..NUM_CONFUSION_EDGES where
- fix(pci[CONFUSION_EDGES[i, 1]] == pci[CONFUSION_EDGES[i, 2]])
-]
diff --git a/osdf/optimizers/pciopt/solver/no_conflicts_no_confusion.mzn b/osdf/optimizers/pciopt/solver/no_conflicts_no_confusion.mzn
deleted file mode 100644
index 19fabb9..0000000
--- a/osdf/optimizers/pciopt/solver/no_conflicts_no_confusion.mzn
+++ /dev/null
@@ -1,86 +0,0 @@
-% -------------------------------------------------------------------------
-% Copyright (c) 2018 AT&T Intellectual Property
-%
-% Licensed under the Apache License, Version 2.0 (the "License");
-% you may not use this file except in compliance with the License.
-% You may obtain a copy of the License at
-%
-% http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS,
-% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-% See the License for the specific language governing permissions and
-% limitations under the License.
-%
-% -------------------------------------------------------------------------
-%
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Parameters and its assertions
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% Number of cells/radios.
-int: NUM_NODES;
-
-% Maximum number of Physical Cell Identifiers to be assigned to the nodes.
-int: NUM_PCIS;
-
-% Number of edges between neighbor nodes. There is a edge (i,j) if and only
-% if nodes i and j are neighbors, i.e., an user equipment (UE) can make
-% handoff between i and j. Such edges are used to avoid **CONFLICTS**, i.e.,
-% to guarantee that nodes i and j have different PCIs.
-int: NUM_CONFLICT_EDGES;
-
-% Each line represents an edge between direct neighbors as defined before.
-array[1..NUM_CONFLICT_EDGES, 1..2] of int: CONFLICT_EDGES;
-
-% Number of undirect neighbor pairs (j, k) such that both j and k are direct
-% neighbors of node i, i.e., (j, k) exits if and only if exists (i, j) and
-% (i, k). Nodes (i, k) can generate "confunsions" in the network if they have
-% the same PCI. Such edges are used to avoid/minimize **CONFUSIONS**.
-int: NUM_CONFUSION_EDGES;
-
-% Each line represents an edge between undirect neighbors as defined before.
-array[1..NUM_CONFUSION_EDGES, 1..2] of int: CONFUSION_EDGES;
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Decision variables
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% Defines the PCI for each node.
-array[0..NUM_NODES-1] of var 0..NUM_PCIS-1: pci;
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Constraints
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% Direct neighbors must have different PCIs for avoid **CONFLICTS**.
-constraint
-forall(i in 1..NUM_CONFLICT_EDGES)(
- pci[CONFLICT_EDGES[i, 1]] != pci[CONFLICT_EDGES[i, 2]]
-);
-
-% Undirect neighbors must have different PCIs for avoid **CONFUSIONS**.
-constraint
-forall(i in 1..NUM_CONFUSION_EDGES)(
- pci[CONFUSION_EDGES[i, 1]] != pci[CONFUSION_EDGES[i, 2]]
-);
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Objective function
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-% Just satisfy the problem.
-solve :: int_search(pci, smallest, indomain_min, complete) satisfy;
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-% Output
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-output
-["node,pci\n"] ++
-[
- show(node) ++ "," ++ show(pci[node]) ++ "\n"
-| node in 0..NUM_NODES-1
-]
diff --git a/osdf/optimizers/pciopt/solver/optimizer.py b/osdf/optimizers/pciopt/solver/optimizer.py
deleted file mode 100644
index e9fcb0d..0000000
--- a/osdf/optimizers/pciopt/solver/optimizer.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2018 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-import itertools
-
-import os
-import pymzn
-
-from osdf.logging.osdf_logging import debug_log
-from .pci_utils import get_id
-
-BASE_DIR = os.path.dirname(__file__)
-MZN_FILE_NAME = os.path.join(BASE_DIR, 'no_conflicts_no_confusion.mzn')
-
-
-def pci_optimize(cell_id, network_cell_info, cell_info_list):
- debug_log.debug("Cell ID {} ".format(cell_id))
- dzn_data = {}
- dzn_data['NUM_NODES'] = len(cell_info_list)
- dzn_data['NUM_PCIS'] = len(cell_info_list)
-
- conflict_edges = get_conflict_edges(cell_id, network_cell_info)
-
- dzn_data['NUM_CONFLICT_EDGES'] = len(conflict_edges)
- dzn_data['CONFLICT_EDGES'] = conflict_edges
-
- confusion_edges = get_confusion_edges(cell_id, network_cell_info)
-
- dzn_data['NUM_CONFUSION_EDGES'] = len(confusion_edges)
- dzn_data['CONFUSION_EDGES'] = confusion_edges
-
- return solve(dzn_data)
-
-def solve(dzn_data):
- return pymzn.minizinc(MZN_FILE_NAME, data=dzn_data)
-
-
-def get_conflict_edges(cell_id, network_cell_info):
- conflict_edges = []
- for cell in network_cell_info['cell_list']:
-
- if cell_id == cell['cell_id']:
- add_to_conflict_edges(network_cell_info, cell, conflict_edges)
- return conflict_edges
-
-
-def add_to_conflict_edges(network_cell_info, cell, conflict_edges):
- cell_id = cell['cell_id']
- for nbr in cell.get('nbr_list', []):
- conflict_edges.append([get_id(network_cell_info, cell_id), get_id(network_cell_info, nbr['cellId'])])
-
-
-
-def get_confusion_edges(cell_id, network_cell_info):
- confusion_edges = []
- for cell in network_cell_info['cell_list']:
- if cell_id == cell['cell_id']:
- return add_to_confusion_edges(network_cell_info, cell)
- return confusion_edges
-
-
-def add_to_confusion_edges(network_cell_info, cell):
- cell_id = cell['cell_id']
- nbr_list = []
- for nbr in cell.get('nbr_list', []):
- nbr_list.append(get_id(network_cell_info, nbr['cellId']))
- return [list(elem) for elem in list(itertools.combinations(nbr_list, 2))]
diff --git a/osdf/optimizers/pciopt/solver/pci_utils.py b/osdf/optimizers/pciopt/solver/pci_utils.py
deleted file mode 100644
index 71b5dd2..0000000
--- a/osdf/optimizers/pciopt/solver/pci_utils.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2018 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-
-def get_id(network_cell_info, cell_id):
- for i in network_cell_info['cell_list']:
- if i['cell_id'] == cell_id:
- return i['id']
- return None
-
-
-def get_cell_id(network_cell_info, id):
- for i in network_cell_info['cell_list']:
- if i['id'] == id:
- return i['cell_id']
- return None
-
-def get_pci_value(network_cell_info, id):
- cell_id = get_cell_id(network_cell_info, id)
- for i in network_cell_info['cell_list']:
- for j in i['nbr_list']:
- if cell_id == j['cellId']:
- return j['pciValue']
- return None
diff --git a/osdf/optimizers/placementopt/conductor/api_builder.py b/osdf/optimizers/placementopt/conductor/api_builder.py
deleted file mode 100644
index e841f48..0000000
--- a/osdf/optimizers/placementopt/conductor/api_builder.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-import json
-
-from jinja2 import Template
-
-import osdf.optimizers.placementopt.conductor.translation as tr
-from osdf.adapters.policy.utils import group_policies_gen
-from osdf.utils.programming_utils import list_flatten
-
-
-def conductor_api_builder(request_json, flat_policies: list, local_config,
- template="osdf/templates/conductor_interface.json"):
- """Build an OSDF southbound API call for HAS-Conductor/Placement optimization
- :param request_json: parameter data received from a client
- :param flat_policies: policy data received from the policy platform (flat policies)
- :param template: template to generate southbound API call to conductor
- :param local_config: local configuration file with pointers for the service specific information
- :param prov_status: provStatus retrieved from Subscriber policy
- :return: json to be sent to Conductor/placement optimization
- """
- templ = Template(open(template).read())
- gp = group_policies_gen(flat_policies, local_config)
- demand_vnf_name_list = []
-
- for placementDemand in request_json['placementInfo']['placementDemands']:
- demand_vnf_name_list.append(placementDemand['resourceModuleName'].lower())
- demand_list = tr.gen_demands(request_json, gp['vnfPolicy'])
- attribute_policy_list = tr.gen_attribute_policy(demand_vnf_name_list, gp['attribute'])
- distance_to_location_policy_list = tr.gen_distance_to_location_policy(
- demand_vnf_name_list, gp['distance_to_location'])
- inventory_policy_list = tr.gen_inventory_group_policy(demand_vnf_name_list, gp['inventory_group'])
- resource_instance_policy_list = tr.gen_resource_instance_policy(
- demand_vnf_name_list, gp['instance_fit'])
- resource_region_policy_list = tr.gen_resource_region_policy(demand_vnf_name_list, gp['region_fit'])
- zone_policy_list = tr.gen_zone_policy(demand_vnf_name_list, gp['zone'])
- optimization_policy_list = tr.gen_optimization_policy(demand_vnf_name_list, gp['placementOptimization'])
- reservation_policy_list = tr.gen_reservation_policy(demand_vnf_name_list, gp['instance_reservation'])
- capacity_policy_list = tr.gen_capacity_policy(demand_vnf_name_list, gp['vim_fit'])
- hpa_policy_list = tr.gen_hpa_policy(demand_vnf_name_list, gp['hpa'])
- req_params_dict = tr.get_opt_query_data(request_json, gp['optimizationQueryPolicy'])
- conductor_policies = [attribute_policy_list, distance_to_location_policy_list, inventory_policy_list,
- resource_instance_policy_list, resource_region_policy_list, zone_policy_list,
- reservation_policy_list, capacity_policy_list, hpa_policy_list]
- filtered_policies = [x for x in conductor_policies if len(x) > 0]
- policy_groups = list_flatten(filtered_policies)
- req_info = request_json['requestInfo']
- request_type = req_info.get('requestType', None)
- rendered_req = templ.render(
- requestType=request_type,
- demand_list=demand_list,
- policy_groups=policy_groups,
- optimization_policies=optimization_policy_list,
- name=req_info['requestId'],
- timeout=req_info['timeout'],
- limit=req_info['numSolutions'],
- service_type=request_json['serviceInfo']['serviceName'],
- service_id=request_json['serviceInfo']['serviceInstanceId'],
- latitude=req_params_dict.get("customerLatitude", 0.0),
- longitude=req_params_dict.get("customerLongitude", 0.0),
- json=json)
- json_payload = json.dumps(json.loads(rendered_req)) # need this because template's JSON is ugly!
- return json_payload
diff --git a/osdf/optimizers/placementopt/conductor/conductor.py b/osdf/optimizers/placementopt/conductor/conductor.py
deleted file mode 100755
index 357efd1..0000000
--- a/osdf/optimizers/placementopt/conductor/conductor.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-"""
-This application generates conductor API calls using the information received from SO and Policy platform.
-"""
-
-import json
-import time
-
-from jinja2 import Template
-from requests import RequestException
-
-from osdf.logging.osdf_logging import debug_log
-from osdf.optimizers.placementopt.conductor.api_builder import conductor_api_builder
-from osdf.utils.interfaces import RestClient
-from osdf.operation.exceptions import BusinessException
-
-
-def request(req_object, osdf_config, flat_policies):
- """
- Process a placement request from a Client (build Conductor API call, make the call, return result)
- :param req_object: Request parameters from the client
- :param osdf_config: Configuration specific to SNIRO application (core + deployment)
- :param flat_policies: policies related to placement (fetched based on request)
- :param prov_status: provStatus retrieved from Subscriber policy
- :return: response from Conductor (accounting for redirects from Conductor service
- """
- config = osdf_config.deployment
- local_config = osdf_config.core
- uid, passwd = config['conductorUsername'], config['conductorPassword']
- conductor_url = config['conductorUrl']
- req_id = req_object['requestInfo']['requestId']
- transaction_id = req_object['requestInfo']['transactionId']
- headers = dict(transaction_id=transaction_id)
- placement_ver_enabled = config.get('placementVersioningEnabled', False)
-
- if placement_ver_enabled:
- cond_minor_version = config.get('conductorMinorVersion', None)
- if cond_minor_version is not None:
- x_minor_version = str(cond_minor_version)
- headers.update({'X-MinorVersion': x_minor_version})
- debug_log.debug("Versions set in HTTP header to conductor: X-MinorVersion: {} ".format(x_minor_version))
-
- max_retries = config.get('conductorMaxRetries', 30)
- ping_wait_time = config.get('conductorPingWaitTime', 60)
-
- rc = RestClient(userid=uid, passwd=passwd, method="GET", log_func=debug_log.debug, headers=headers)
- conductor_req_json_str = conductor_api_builder(req_object, flat_policies, local_config)
- conductor_req_json = json.loads(conductor_req_json_str)
-
- debug_log.debug("Sending first Conductor request for request_id {}".format(req_id))
- resp, raw_resp = initial_request_to_conductor(rc, conductor_url, conductor_req_json)
- # Very crude way of keeping track of time.
- # We are not counting initial request time, first call back, or time for HTTP request
- total_time, ctr = 0, 2
- client_timeout = req_object['requestInfo']['timeout']
- configured_timeout = max_retries * ping_wait_time
- max_timeout = min(client_timeout, configured_timeout)
-
- while True: # keep requesting conductor till we get a result or we run out of time
- if resp is not None:
- if resp["plans"][0].get("status") in ["error"]:
- raise RequestException(response=raw_resp, request=raw_resp.request)
-
- if resp["plans"][0].get("status") in ["done", "not found"]:
- if resp["plans"][0].get("recommendations"):
- return conductor_response_processor(resp, raw_resp, req_id)
- else: # "solved" but no solutions found
- return conductor_no_solution_processor(resp, raw_resp, req_id)
- new_url = resp['plans'][0]['links'][0][0]['href'] # TODO: check why a list of lists
-
- if total_time >= max_timeout:
- raise BusinessException("Conductor could not provide a solution within {} seconds,"
- "this transaction is timing out".format(max_timeout))
- time.sleep(ping_wait_time)
- ctr += 1
- debug_log.debug("Attempt number {} url {}; prior status={}".format(ctr, new_url, resp['plans'][0]['status']))
- total_time += ping_wait_time
-
- try:
- raw_resp = rc.request(new_url, raw_response=True)
- resp = raw_resp.json()
- except RequestException as e:
- debug_log.debug("Conductor attempt {} for request_id {} has failed because {}".format(ctr, req_id, str(e)))
-
-
-def initial_request_to_conductor(rc, conductor_url, conductor_req_json):
- """First steps in the request-redirect chain in making a call to Conductor
- :param rc: REST client object for calling conductor
- :param conductor_url: conductor's base URL to submit a placement request
- :param conductor_req_json: request json object to send to Conductor
- :return: URL to check for follow up (similar to redirects); we keep checking these till we get a result/error
- """
- debug_log.debug("Payload to Conductor: {}".format(json.dumps(conductor_req_json)))
- raw_resp = rc.request(url=conductor_url, raw_response=True, method="POST", json=conductor_req_json)
- resp = raw_resp.json()
- if resp["status"] != "template":
- raise RequestException(response=raw_resp, request=raw_resp.request)
- time.sleep(10) # 10 seconds wait time to avoid being too quick!
- plan_url = resp["links"][0][0]["href"]
- debug_log.debug("Attempting to read the plan from the conductor provided url {}".format(plan_url))
- raw_resp = rc.request(raw_response=True, url=plan_url) # TODO: check why a list of lists for links
- resp = raw_resp.json()
-
- if resp["plans"][0]["status"] in ["error"]:
- raise RequestException(response=raw_resp, request=raw_resp.request)
- return resp, raw_resp # now the caller of this will handle further follow-ups
-
-
-def conductor_response_processor(conductor_response, raw_response, req_id):
- """Build a response object to be sent to client's callback URL from Conductor's response
- This includes Conductor's placement optimization response, and required ASDC license artifacts
-
- :param conductor_response: JSON response from Conductor
- :param raw_response: Raw HTTP response corresponding to above
- :param req_id: Id of a request
- :return: JSON object that can be sent to the client's callback URL
- """
- composite_solutions = []
- name_map = {"physical-location-id": "cloudClli", "host_id": "vnfHostName",
- "cloud_version": "cloudVersion", "cloud_owner": "cloudOwner",
- "cloud": "cloudRegionId", "service": "serviceInstanceId", "is_rehome": "isRehome",
- "location_id": "locationId", "location_type": "locationType", "directives": "oof_directives"}
- for reco in conductor_response['plans'][0]['recommendations']:
- for resource in reco.keys():
- c = reco[resource]['candidate']
- solution = {
- 'resourceModuleName': resource,
- 'serviceResourceId': reco[resource].get('service_resource_id', ""),
- 'solution': {"identifierType": name_map.get(c['inventory_type'], c['inventory_type']),
- 'identifiers': [c['candidate_id']],
- 'cloudOwner': c.get('cloud_owner', "")},
- 'assignmentInfo': []
- }
- for key, value in c.items():
- if key in ["location_id", "location_type", "is_rehome", "host_id"]:
- try:
- solution['assignmentInfo'].append({"key": name_map.get(key, key), "value": value})
- except KeyError:
- debug_log.debug("The key[{}] is not mapped and will not be returned in assignment info".format(key))
-
- for key, value in reco[resource]['attributes'].items():
- try:
- solution['assignmentInfo'].append({"key": name_map.get(key, key), "value": value})
- except KeyError:
- debug_log.debug("The key[{}] is not mapped and will not be returned in assignment info".format(key))
- composite_solutions.append(solution)
-
- request_status = "completed" if conductor_response['plans'][0]['status'] == "done" \
- else conductor_response['plans'][0]['status']
- transaction_id = raw_response.headers.get('transaction_id', "")
- status_message = conductor_response.get('plans')[0].get('message', "")
-
- solution_info = {}
- if composite_solutions:
- solution_info.setdefault('placementSolutions', [])
- solution_info['placementSolutions'].append(composite_solutions)
-
- resp = {
- "transactionId": transaction_id,
- "requestId": req_id,
- "requestStatus": request_status,
- "statusMessage": status_message,
- "solutions": solution_info
- }
- return resp
-
-
-def conductor_no_solution_processor(conductor_response, raw_response, request_id,
- template_placement_response="templates/plc_opt_response.jsont"):
- """Build a response object to be sent to client's callback URL from Conductor's response
- This is for case where no solution is found
-
- :param conductor_response: JSON response from Conductor
- :param raw_response: Raw HTTP response corresponding to above
- :param request_id: request Id associated with the client request (same as conductor response's "name")
- :param template_placement_response: the template for generating response to client (plc_opt_response.jsont)
- :return: JSON object that can be sent to the client's callback URL
- """
- status_message = conductor_response["plans"][0].get("message")
- templ = Template(open(template_placement_response).read())
- return json.loads(templ.render(composite_solutions=[], requestId=request_id, license_solutions=[],
- transactionId=raw_response.headers.get('transaction_id', ""),
- requestStatus="completed", statusMessage=status_message, json=json))
-
-
diff --git a/osdf/optimizers/placementopt/conductor/remote_opt_processor.py b/osdf/optimizers/placementopt/conductor/remote_opt_processor.py
deleted file mode 100644
index 614eca3..0000000
--- a/osdf/optimizers/placementopt/conductor/remote_opt_processor.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-from requests import RequestException
-
-import traceback
-from osdf.operation.error_handling import build_json_error_body
-from osdf.logging.osdf_logging import metrics_log, MH, error_log
-from osdf.optimizers.placementopt.conductor import conductor
-from osdf.optimizers.licenseopt.simple_license_allocation import license_optim
-from osdf.utils.interfaces import get_rest_client
-
-
-def process_placement_opt(request_json, policies, osdf_config):
- """Perform the work for placement optimization (e.g. call SDC artifact and make conductor request)
- NOTE: there is scope to make the requests to policy asynchronous to speed up overall performance
- :param request_json: json content from original request
- :param policies: flattened policies corresponding to this request
- :param osdf_config: configuration specific to OSDF app
- :param prov_status: provStatus retrieved from Subscriber policy
- :return: None, but make a POST to callback URL
- """
-
- try:
- rc = get_rest_client(request_json, service="so")
- req_id = request_json["requestInfo"]["requestId"]
- transaction_id = request_json['requestInfo']['transactionId']
-
- metrics_log.info(MH.inside_worker_thread(req_id))
- license_info = None
- if request_json.get('licenseInfo', {}).get('licenseDemands'):
- license_info = license_optim(request_json)
-
- # Conductor only handles placement, only call Conductor if placementDemands exist
- if request_json.get('placementInfo', {}).get('placementDemands'):
- metrics_log.info(MH.requesting("placement/conductor", req_id))
- placement_response = conductor.request(request_json, osdf_config, policies)
- if license_info: # Attach license solution if it exists
- placement_response['solutionInfo']['licenseInfo'] = license_info
- else: # License selection only scenario
- placement_response = {
- "transactionId": transaction_id,
- "requestId": req_id,
- "requestStatus": "completed",
- "statusMessage": "License selection completed successfully",
- "solutionInfo": {"licenseInfo": license_info}
- }
- except Exception as err:
- error_log.error("Error for {} {}".format(req_id, traceback.format_exc()))
-
- try:
- body = build_json_error_body(err)
- metrics_log.info(MH.sending_response(req_id, "ERROR"))
- rc.request(json=body, noresponse=True)
- except RequestException:
- error_log.error("Error sending asynchronous notification for {} {}".format(req_id, traceback.format_exc()))
- return
-
- try:
- metrics_log.info(MH.calling_back_with_body(req_id, rc.url,placement_response))
- rc.request(json=placement_response, noresponse=True)
- except RequestException : # can't do much here but log it and move on
- error_log.error("Error sending asynchronous notification for {} {}".format(req_id, traceback.format_exc()))
-
diff --git a/osdf/optimizers/placementopt/conductor/translation.py b/osdf/optimizers/placementopt/conductor/translation.py
deleted file mode 100644
index d856a22..0000000
--- a/osdf/optimizers/placementopt/conductor/translation.py
+++ /dev/null
@@ -1,254 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2015-2017 AT&T Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-import copy
-import json
-import yaml
-
-from osdf.utils.data_conversion import text_to_symbol
-from osdf.utils.programming_utils import dot_notation
-
-policy_config_mapping = yaml.load(open('config/has_config.yaml')).get('policy_config_mapping')
-
-
-def get_opt_query_data(req_json, policies):
- """
- Fetch service and order specific details from the requestParameters field of a request.
- :param req_json: a request file
- :param policies: A set of policies
- :return: A dictionary with service and order-specific attributes.
- """
- req_param_dict = {}
- if 'requestParameters' in req_json["placementInfo"]:
- req_params = req_json["placementInfo"]["requestParameters"]
- for policy in policies:
- for queryProp in policy['content']['queryProperties']:
- attr_val = queryProp['value'] if 'value' in queryProp and queryProp['value'] != "" \
- else dot_notation(req_params, queryProp['attribute_location'])
- if attr_val is not None:
- req_param_dict.update({queryProp['attribute']: attr_val})
- return req_param_dict
-
-
-def gen_optimization_policy(vnf_list, optimization_policy):
- """Generate optimization policy details to pass to Conductor
- :param vnf_list: List of vnf's to used in placement request
- :param optimization_policy: optimization objective policy information provided in the incoming request
- :return: List of optimization objective policies in a format required by Conductor
- """
- optimization_policy_list = []
- for policy in optimization_policy:
- content = policy['content']
- parameter_list = []
- parameters = ["cloud_version", "hpa_score"]
-
- for attr in content['objectiveParameter']['parameterAttributes']:
- parameter = attr['parameter'] if attr['parameter'] in parameters else attr['parameter']+"_between"
- vnfs = get_matching_vnfs(attr['resources'], vnf_list)
- for vnf in vnfs:
- value = [vnf] if attr['parameter'] in parameters else [attr['customerLocationInfo'], vnf]
- parameter_list.append({
- attr['operator']: [attr['weight'], {parameter: value}]
- })
-
- optimization_policy_list.append({
- content['objective']: {content['objectiveParameter']['operator']: parameter_list }
- })
- return optimization_policy_list
-
-
-def get_matching_vnfs(resources, vnf_list, match_type="intersection"):
- """Get a list of matching VNFs from the list of resources
- :param resources:
- :param vnf_list: List of vnfs to used in placement request
- :param match_type: "intersection" or "all" or "any" (any => send all_vnfs if there is any intersection)
- :return: List of matching VNFs
- """
- resources_lcase = [x.lower() for x in resources]
- if match_type == "all": # don't bother with any comparisons
- return resources if set(resources_lcase) <= set(vnf_list) else None
- common_vnfs = set(vnf_list) & set(resources_lcase)
- common_resources = [x for x in resources if x.lower() in common_vnfs]
- if match_type == "intersection": # specifically requested intersection
- return list(common_resources)
- return resources if common_vnfs else None # "any" match => all resources to be returned
-
-
-def gen_policy_instance(vnf_list, resource_policy, match_type="intersection", rtype=None):
- """Generate a list of policies
- :param vnf_list: List of vnf's to used in placement request
- :param resource_policy: policy for this specific resource
- :param match_type: How to match the vnf_names with the vnf_list (intersection or "any")
- intersection => return intersection; "any" implies return all vnf_names if intersection is not null
- :param rtype: resource type (e.g. resourceRegionProperty or resourceInstanceProperty)
- None => no controller information added to the policy specification to Conductor
- :return: resource policy list in a format required by Conductor
- """
- resource_policy_list = []
- related_policies = []
- for policy in resource_policy:
- pc = policy['content']
- demands = get_matching_vnfs(pc['resources'], vnf_list, match_type=match_type)
- resource = {pc['identity']: {'type': pc['policyType'], 'demands': demands}}
-
- if rtype:
- resource[pc['identity']]['properties'] = {'controller': pc[rtype]['controller'],
- 'request': json.loads(pc[rtype]['request'])}
- if demands and len(demands) != 0:
- resource_policy_list.append(resource)
- related_policies.append(policy)
- return resource_policy_list, related_policies
-
-
-def gen_resource_instance_policy(vnf_list, resource_instance_policy):
- """Get policies governing resource instances in order to populate the Conductor API call"""
- cur_policies, _ = gen_policy_instance(vnf_list, resource_instance_policy, rtype='resourceInstanceProperty')
- return cur_policies
-
-
-def gen_resource_region_policy(vnf_list, resource_region_policy):
- """Get policies governing resource region in order to populate the Conductor API call"""
- cur_policies, _ = gen_policy_instance(vnf_list, resource_region_policy, rtype='resourceRegionProperty')
- return cur_policies
-
-
-def gen_inventory_group_policy(vnf_list, inventory_group_policy):
- """Get policies governing inventory group in order to populate the Conductor API call"""
- cur_policies, _ = gen_policy_instance(vnf_list, inventory_group_policy, rtype=None)
- return cur_policies
-
-
-def gen_reservation_policy(vnf_list, reservation_policy):
- """Get policies governing resource instances in order to populate the Conductor API call"""
- cur_policies, _ = gen_policy_instance(vnf_list, reservation_policy, rtype='instanceReservationProperty')
- return cur_policies
-
-
-def gen_distance_to_location_policy(vnf_list, distance_to_location_policy):
- """Get policies governing distance-to-location for VNFs in order to populate the Conductor API call"""
- cur_policies, related_policies = gen_policy_instance(vnf_list, distance_to_location_policy, rtype=None)
- for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
- properties = p_main['content']['distanceProperties']
- pcp_d = properties['distance']
- p_new[p_main['content']['identity']]['properties'] = {
- 'distance': pcp_d['operator'] + " " + pcp_d['value'].lower() + " " + pcp_d['unit'].lower(),
- 'location': properties['locationInfo']
- }
- return cur_policies
-
-
-def gen_attribute_policy(vnf_list, attribute_policy):
- """Get policies governing attributes of VNFs in order to populate the Conductor API call"""
- cur_policies, related_policies = gen_policy_instance(vnf_list, attribute_policy, rtype=None)
- for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
- properties = p_main['content']['cloudAttributeProperty']
- attribute_mapping = policy_config_mapping['attributes'] # wanted attributes and mapping
- p_new[p_main['content']['identity']]['properties'] = {
- 'evaluate': dict((k, properties.get(attribute_mapping.get(k))) for k in attribute_mapping.keys())
- }
- return cur_policies # cur_policies gets updated in place...
-
-
-def gen_zone_policy(vnf_list, zone_policy):
- """Get zone policies in order to populate the Conductor API call"""
- cur_policies, related_policies = gen_policy_instance(vnf_list, zone_policy, match_type="all", rtype=None)
- for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
- pmz = p_main['content']['affinityProperty']
- p_new[p_main['content']['identity']]['properties'] = {'category': pmz['category'], 'qualifier': pmz['qualifier']}
- return cur_policies
-
-
-def gen_capacity_policy(vnf_list, capacity_policy):
- """Get zone policies in order to populate the Conductor API call"""
- cur_policies, related_policies = gen_policy_instance(vnf_list, capacity_policy, rtype=None)
- for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
- pmz = p_main['content']['capacityProperty']
- p_new[p_main['content']['identity']]['properties'] = \
- {"controller": pmz['controller'], 'request': json.loads(pmz['request'])}
- return cur_policies
-
-
-def gen_hpa_policy(vnf_list, hpa_policy):
- """Get zone policies in order to populate the Conductor API call"""
- cur_policies, related_policies = gen_policy_instance(vnf_list, hpa_policy, rtype=None)
- for p_new, p_main in zip(cur_policies, related_policies): # add additional fields to each policy
- p_new[p_main['content']['identity']]['properties'] = {'evaluate': p_main['content']['flavorFeatures']}
- return cur_policies
-
-
-def get_augmented_policy_attributes(policy_property, demand):
- """Get policy attributes and augment them using policy_config_mapping and demand information"""
- attributes = copy.copy(policy_property['attributes'])
- remapping = policy_config_mapping['remapping']
- extra = dict((x, demand['resourceModelInfo'][remapping[x]]) for x in attributes if x in remapping)
- attributes.update(extra)
- return attributes
-
-
-def get_candidates_demands(demand):
- """Get demands related to candidates; e.g. excluded/required"""
- res = {}
- for k, v in policy_config_mapping['candidates'].items():
- if k not in demand:
- continue
- res[v] = [{'inventory_type': x['candidateType'], 'candidate_id': x['candidates']} for x in demand[k]]
- return res
-
-
-def get_policy_properties(demand, policies):
- """Get policy_properties for cases where there is a match with the demand"""
- for policy in policies:
- policy_demands = set([x.lower() for x in policy['content'].get('resources', [])])
- if demand['resourceModuleName'].lower() not in policy_demands:
- continue # no match for this policy
- for policy_property in policy['content']['vnfProperties']:
- yield policy_property
-
-
-def get_demand_properties(demand, policies):
- """Get list demand properties objects (named tuples) from policy"""
- demand_properties = []
- for policy_property in get_policy_properties(demand, policies):
- prop = dict(inventory_provider=policy_property['inventoryProvider'],
- inventory_type=policy_property['inventoryType'],
- service_type=demand['serviceResourceId'])
- prop['attributes'] = dict()
- prop['attributes'].update({'global-customer-id': policy_property['customerId']}
- if policy_property['customerId'] else {})
- prop['attributes'].update({'model-invariant-id': demand['resourceModelInfo']['modelInvariantId']}
- if demand['resourceModelInfo']['modelInvariantId'] else {})
- prop['attributes'].update({'model-version-id': demand['resourceModelInfo']['modelVersionId']}
- if demand['resourceModelInfo']['modelVersionId'] else {})
- prop['attributes'].update({'equipment-role': policy_property['equipmentRole']}
- if policy_property['equipmentRole'] else {})
- prop.update(get_candidates_demands(demand))
- demand_properties.append(prop)
- return demand_properties
-
-
-def gen_demands(req_json, vnf_policies):
- """Generate list of demands based on request and VNF policies
- :param req_json: Request object from the client (e.g. MSO)
- :param vnf_policies: Policies associated with demand resources (e.g. from grouped_policies['vnfPolicy'])
- :return: list of demand parameters to populate the Conductor API call
- """
- demand_dictionary = {}
- for demand in req_json['placementInfo']['placementDemands']:
- prop = get_demand_properties(demand, vnf_policies)
- if len(prop) > 0:
- demand_dictionary.update({demand['resourceModuleName']: prop})
- return demand_dictionary
diff --git a/osdf/optimizers/routeopt/__init__.py b/osdf/optimizers/routeopt/__init__.py
deleted file mode 100644
index c235f2a..0000000
--- a/osdf/optimizers/routeopt/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2018 Huawei Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
diff --git a/osdf/optimizers/routeopt/simple_route_opt.py b/osdf/optimizers/routeopt/simple_route_opt.py
deleted file mode 100644
index 060e1ed..0000000
--- a/osdf/optimizers/routeopt/simple_route_opt.py
+++ /dev/null
@@ -1,150 +0,0 @@
-# -------------------------------------------------------------------------
-# Copyright (c) 2018 Huawei Intellectual Property
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# -------------------------------------------------------------------------
-#
-
-import requests
-from requests.auth import HTTPBasicAuth
-
-
-class RouteOpt:
-
- """
- This values will need to deleted..
- only added for the debug purpose
- """
- # DNS server and standard port of AAI..
- # TODO: read the port from the configuration and add to DNS
- aai_host = "https://aai.api.simpledemo.onap.org:8443"
- aai_headers = {
- "X-TransactionId": "9999",
- "X-FromAppId": "OOF",
- "Accept": "application/json",
- "Content-Type": "application/json",
- "Real-Time": "true"
- }
-
- def isCrossONAPLink(self, logical_link):
- """
- This method checks if cross link is cross onap
- :param logical_link:
- :return:
- """
- for relationship in logical_link["relationship-list"]["relationship"]:
- if relationship["related-to"] == "ext-aai-network":
- return True
- return False
-
- def getRoute(self, request):
- """
- This method checks
- :param logical_link:
- :return:
- """
-
- src_access_node_id = request["srcPort"]["src-access-node-id"]
- dst_access_node_id = request["dstPort"]["dst-access-node-id"]
-
-
- ingress_p_interface = None
- egress_p_interface = None
-
- # for the case of request_json for same domain, return the same node with destination update
- if src_access_node_id == dst_access_node_id:
- data = '{'\
- '"vpns":['\
- '{'\
- '"access-topology-id": "' + request["srcPort"]["src-access-topology-id"] + '",'\
- '"access-client-id": "' + request["srcPort"]["src-access-client-id"] + '",'\
- '"access-provider-id": "' + request["srcPort"]["src-access-provider-id"]+ '",'\
- '"access-node-id": "' + request["srcPort"]["src-access-node-id"]+ '",'\
- '"src-access-ltp-id": "' + request["srcPort"]["src-access-ltp-id"]+ '",'\
- '"dst-access-ltp-id": "' + request["dstPort"]["dst-access-ltp-id"] +'"'\
- '}'\
- ']'\
- '}'
- return data
- else:
- logical_links = self.get_logical_links()
-
- # take the logical link where both the p-interface in same onap
- if logical_links != None:
- for logical_link in logical_links.get("logical-link"):
- if not self.isCrossONAPLink(logical_link):
- # link is in local ONAP
- for relationship in logical_link["relationship-list"]["relationship"]:
- if relationship["related-to"] == "p-interface":
- if src_access_node_id in relationship["related-link"]:
- i_interface = relationship["related-link"].split("/")[-1]
- ingress_p_interface = i_interface.split("-")[-1]
- if dst_access_node_id in relationship["related-link"]:
- e_interface = relationship["related-link"].split("/")[-1]
- egress_p_interface = e_interface.split("-")[-1]
- data = '{'\
- '"vpns":['\
- '{'\
- '"access-topology-id": "' + request["srcPort"]["src-access-topology-id"] + '",'\
- '"access-client-id": "' + request["srcPort"]["src-access-client-id"] + '",'\
- '"access-provider-id": "' + request["srcPort"]["src-access-provider-id"]+ '",'\
- '"access-node-id": "' + request["srcPort"]["src-access-node-id"]+ '",'\
- '"src-access-ltp-id": "' + request["srcPort"]["src-access-ltp-id"]+ '",'\
- '"dst-access-ltp-id": "' + ingress_p_interface +'"'\
- '},'\
- '{' \
- '"access-topology-id": "' + request["dstPort"]["dst-access-topology-id"] + '",' \
- '"access-topology-id": "' + request["dstPort"]["dst-access-topology-id"]+ '",' \
- '"access-provider-id": "' + request["dstPort"]["dst-access-provider-id"]+ '",' \
- '"access-node-id": "' + request["dstPort"]["dst-access-node-id"]+ '",' \
- '"src-access-ltp-id": "' + egress_p_interface + '",' \
- '"dst-access-ltp-id": "' + request["dstPort"]["dst-access-ltp-id"] + '"' \
- '}'\
- ']'\
- '}'
- return data
-
-
- def get_pinterface(self, url):
- """
- This method returns details for p interface
- :return: details of p interface
- """
- aai_req_url = self.aai_host + url
- response = requests.get(aai_req_url,
- headers=self.aai_headers,
- auth=HTTPBasicAuth("AAI", "AAI"),
- verify=False)
-
- if response.status_code == 200:
- return response.json()
-
-
- def get_logical_links(self):
- """
- This method returns list of all cross ONAP links
- from /aai/v14/network/logical-links?operation-status="Up"
- :return: logical-links[]
- """
- logical_link_url = "/aai/v13/network/logical-links?operational-status=up"
- aai_req_url = self.aai_host + logical_link_url
-
- response = requests.get(aai_req_url,
- headers=self.aai_headers,
- auth=HTTPBasicAuth("AAI", "AAI"),
- verify=False)
-
- logical_links = None
- if response.status_code == 200:
- return response.json() \ No newline at end of file
diff --git a/osdf/templates/cms_opt_request.jsont b/osdf/templates/cms_opt_request.jsont
deleted file mode 100755
index 006562b..0000000
--- a/osdf/templates/cms_opt_request.jsont
+++ /dev/null
@@ -1,35 +0,0 @@
-{
- "transaction_id": "{{ transaction_id }}",
- "request_id": "{{ request_id }}",
- "start_date" : "{{ start_time }}",
- "end_date" : "{{ end_time }}",
- "change_elements" : {{ json.dumps(change_elements) }},
- "constraints" : [
- {
- "type" : "general_concurrency_limit",
- "parameters": [{{ concurrency_limit }}]
- },
-
- {
- "type" : "allowed_forbidden_periods",
- "parameters" : {{ json.dumps(allowed_periods) }}
- }
-
- {% if spatial_conflicts is defined and spatial_conflicts|length > 0 %}
- ,
- {
- "type" : "spatial_conflict",
- "parameters": {{ json.dumps(spatial_conflicts) }}
- }
- {% endif %}
-
-
- {% if critical_periods is defined and spatial_conflicts|length > 0 %}
- ,
- {
- "type" : "critical_periods",
- "parameters": {{ json.dumps(critical_periods) }}
- }
- {% endif %}
- ]
-}
diff --git a/osdf/templates/cms_opt_request.jsont_1707_v1 b/osdf/templates/cms_opt_request.jsont_1707_v1
deleted file mode 100755
index 75ecbe5..0000000
--- a/osdf/templates/cms_opt_request.jsont_1707_v1
+++ /dev/null
@@ -1,67 +0,0 @@
-{
- "transaction_id": "{{ transaction_id }}",
- "request_id": "{{ request_id }}",
- "start_date" : "{{ start_time }}",
- "end_date" : "{{ end_time }}",
-
- "change_elements" : [
- {% set comma = joiner(",") -%}
- {% for element in all_upgrades -%} {{ comma() }}
- {
- "id" : "{{ element.id }}",
- "failback_duration": {{ element.failback_duration }},
- {% if element.group_id -%}
- "group_id": "{{ element.group_id }}",
- {% endif %}
- {% if element.scheduled_on -%}
- "scheduled_on": "{{ element.scheduled_on }}",
- {% endif %}
- "duration": {{ element.duration }}
- }
- {% endfor -%}
- ],
-
- "constraints" : [
- {
- "type" : "general_concurrency_limit",
- "parameters": [{{ concurrency_limit }}]
- },
-
- {
- "type" : "allowed_forbidden_periods",
- "parameters" : [
- {% set comma = joiner(",") -%}
- {% for idx in all_pending -%} {{ comma() }}
- { "id" : "{{ idx.id }}",
- "allowed_periods": [ {{ allowed_periods }}]
- }
- {% endfor -%}
- ]
- },
- {
- "type" : "spatial_conflict",
- "parameters": [
- {% set comma = joiner(",") -%}
- {% for pserver, vce_list in vce_pserver_mapping.items() -%} {{ comma() }}
- {
- "spatial_entity": "{{ pserver }}",
- "affected_entities": {{ vce_list }}
- }
- {% endfor -%}
- ]
- },
-
- {
- "type" : "critical_periods",
- "parameters": [
- {% set comma = joiner(",") -%}
- {% for element, conflict_period in conflict_interval.items() -%} {{ comma() }}
- {
- "id" : "{{ element }}",
- "periods": [{{ conflict_period }}]
- }
- {% endfor -%}
- ]
- }
- ]
-}
diff --git a/osdf/templates/cms_opt_request_1702.jsont b/osdf/templates/cms_opt_request_1702.jsont
deleted file mode 100755
index bcafa45..0000000
--- a/osdf/templates/cms_opt_request_1702.jsont
+++ /dev/null
@@ -1,63 +0,0 @@
-{
- "request_id": "{{ request_id }}",
- "startdate" : "{{ start_time }}",
- "enddate" : "{{ end_time }}",
-
- "change_elements" : [
-{% set comma = joiner(",") -%}
-{% for element in all_upgrades -%} {{ comma() }}
- { "id" : "{{ element.id }}",
- {% if element.scheduled -%} "scheduled_on": "{{ element.scheduled }}", {% endif -%}
- "duration": {{ element.duration }}, {# duration in seconds #}
- "failback_duration": {{ element.failback_duration }}, {# duration in seconds #}
- "group_id": {{ element.group_id }}, {# duration in seconds #}
- }{% endfor -%}
- ],
-
- "constraints" : [
- {
- "type" : "general_concurrency_limit",
- "parameters" : [ {{ general_concurrency_limit }} ]
- },
-
- {
- "type" : "allowed_forbidden_periods",
- "parameters" : [
-{% set comma = joiner(",") -%}
-{% for idx in all_pending -%} {{ comma() }}
- { "id" : "{{ idx.id }}",
- "allowed_periods": [ {% set comma2 = joiner(",") -%}
- {% for period in allowed_periods -%} {{ comma2() }} [{{ json.dumps(period[0]) }}, {{ json.dumps(period[1]) }}]
- {% endfor -%} ] }{% endfor -%}
- ]
- }
-
-{% if p_v_conflict is defined and p_v_conflict|length > 0 %}
- ,
- {
- "type" : "critical_periods",
- "description" : "Simultaneous upgrades",
- "parameters" : [
-{% set comma2 = joiner(",") -%}
-{% for element in p_v_conflict -%} {{ comma2() }}
- {
- "id" : "{{ element[0] }}",
- "periods" : [{{ json.dumps(element[0]) }}, {{ json.dumps(element[1]) }}]
- }
-{% endfor -%}
-{% endif %}
-
-{% for pserver, vce_group in grouped_vces.items() -%} {{ comma() }}
- ,
- {
- "id" : "{{ pserver }}",
- "name" : "VCE's on pserver {{ pserver }}",
- "description": "Only some VCEs on a pserver can be upgraded at a time",
- "max_num_upgrades" : {{ max_num_upgrades(vce_group) }},
- "upgrades" : {{ json.dumps(vce_group) }}
- }
-{% endfor -%}
- ]
- }
- ]
-}
diff --git a/osdf/templates/cms_opt_response.jsont b/osdf/templates/cms_opt_response.jsont
deleted file mode 100644
index a8817df..0000000
--- a/osdf/templates/cms_opt_response.jsont
+++ /dev/null
@@ -1,8 +0,0 @@
-{
- "transactionId": "{{transaction_id}}",
- "scheduleId":"{{schedule_id}}",
- "requestState": "{{request_state}}",
- "status": "{{status}}",
- "description": "{{description}}",
- "schedule": {{schedule}}
-} \ No newline at end of file
diff --git a/osdf/templates/license_opt_request.jsont b/osdf/templates/license_opt_request.jsont
deleted file mode 100644
index 7baa759..0000000
--- a/osdf/templates/license_opt_request.jsont
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "transactionId": "{{transaction_id}}",
- "requestId": "{{request_id}}",
- "partNumber": "{{part_number}}",
- "licenseModel" : "{{artifact}}"
-} \ No newline at end of file
diff --git a/osdf/templates/plc_opt_request.jsont b/osdf/templates/plc_opt_request.jsont
deleted file mode 100755
index cd78b3e..0000000
--- a/osdf/templates/plc_opt_request.jsont
+++ /dev/null
@@ -1,142 +0,0 @@
-{
- "name": "{{ name }}",
- "files": "{{ files }}",
- "timeout": "{{ timeout }}",
- "limit": "{{ limit }}",
- "template": {
- "CUST_ID": "{{ cust_id }}",
- "E2EVPNKEY": "{{ e2evpnkey }}",
- "UCPEHOST": "{{ ucpehost }}",
- "WAN_PORT1_UP": "{{ wan_port1_up }}",
- "WAN_PORT1_DOWN": "{{ wan_port1_down }}",
- "EFFECTIVE_BANDWIDTH": "{{ effective_bandwidth }}",
- "SERVICE_INST": "{{ service_inst }}",
- "locations": {
- "customer_loc": {
- "host_name": "{{ ucpehost }}"
- }
- },
- "demands": [
- {% set comma=joiner(",") %}
- {% for demand in demand_list %} {{ comma() }}
- {
- "{{ demand.vnf_name }}": [
- {% set comma2=joiner(",") %}
- {% for property in demand.property %}
- "inventory_provider": {{ property.inventory_provider }},
- "inventory_type": {{ property.inventory_type }},
- "service_type": {{ property.service_type }},
- "customer_id": {{ property.customer_id }},
- "candidate_id": {{ property.candidate_id }}
- {% endfor %}
- ]
- }
- {% endfor %}
- ],
- "constraints": {
- {% set comma_main=joiner(",") %}
-
- {% if attribute_policy_list %} {{ comma_main() }} {% endif %}
- {% set comma=joiner(",") %}
- {% for attribute in attribute_policy_list %} {{ comma() }}
- attribute['identity'] : {
- "type": {{ attribute['type'] }},
- "demands": {{ attribute['demands'] }},
- "properties": {
- "evaluate": {
- "hypervisor": {{ attribute['property']['hypervisor'] }},
- "aic_version": {{ attribute['property']['aicVersion'] }},
- "aic_type": {{ attribute['property']['aicType'] }},
- "dataplane": {{ attribute['property']['datatype'] }},
- "network_roles": {{ attribute['property']['networkRoles'] }},
- "complex": {{ attribute['property']['complex'] }}
- }
- }
- }
- {% endfor %}
-
- {% if distance_to_location_policy_list %} {{ comma_main() }} {% endif %}
- {% set comma=joiner(",") %}
- {% for distance_location in distance_to_location_policy_list %} {{ comma() }}
- distance_location['identity'] : {
- "type": {{ distance_location['type'] }},
- "demands": {{ distance_location['demands'] }},
- "properties": {
- "distance": {{ distance_location['property']['distance'] }},
- "location": {{ distance_location['property']['location'] }}
- }
- }
- {% endfor %}
-
- {% if inventory_policy_list %} {{ comma_main() }} {% endif %}
- {% set comma=joiner(",") %}
- {% for inventory in inventory_policy_list %} {{ comma() }}
- inventory['identity'] : {
- "type": {{ inventory['type'] }},
- "demands": {{ inventory['demands'] }}
- }
- {% endfor %}
-
- {% if resource_instance_policy_list %} {{ comma_main() }} {% endif %}
- {% set comma=joiner(",") %}
- {% for resource_instance in resource_instance_policy_list %} {{ comma() }}
- resource_instance['identity'] : {
- "type": {{ resource_instance['type'] }},
- "demands": {{ resource_instance['demands'] }},
- "properties": {
- "controller": {{ resource_instance['property']['controller'] }},
- "request": {{ resource_instance['property']['request'] }}
- }
- }
- {% endfor %}
-
- {% if resource_region_policy_list %} {{ comma_main() }} {% endif %}
- {% set comma=joiner(",") %}
- {% for resource_region in resource_region_policy_list %} {{ comma() }}
- resource_region['identity'] : {
- "type": {{ resource_region['type'] }},
- "demands": {{ resource_region['demands'] }},
- "properties": {
- "controller": {{ resource_region['property']['controller'] }},
- "request": {{ resource_region['property']['request'] }}
- }
- }
- {% endfor %}
-
- {% if zone_policy_list %} {{ comma_main() }} {% endif %}
- {% set comma=joiner(",") %}
- {% for zone in zone_policy_list %} {{ comma() }}
- zone['identity'] : {
- "type": {{ zone['type'] }},
- "demands": {{ zone['demands'] }},
- "properties": {
- "qualifier": {{ resource_region['property']['qualifier'] }},
- "category": {{ resource_region['property']['category'] }}
- }
- }
- {% endfor %}
-
- {% if optmization_policy_list %} {{ comma_main() }} {% endif %}
- {% set comma=joiner(",") %}
- {% for optimization in optimization_policy_list %} {{ comma() }}
- "optimization" : {
- {{ optimization['objective'] }}: {
- "sum": [
- {% set comma2=joiner(",") %}
- {% for parameter in optimization['parameter'] %} {{ comma() }}
- {
- "product": [
- {{ parameter['weight'] }},
- {
- "distance_between": [{{ parameter['customerLocation'] }},{{ parameter['demand'] }}]
- }
- ]
- }
- {% endfor %}
- ]
- }
- }
- {% endfor %}
- }
- }
-} \ No newline at end of file
diff --git a/osdf/templates/plc_opt_response.jsont b/osdf/templates/plc_opt_response.jsont
deleted file mode 100755
index e5709e7..0000000
--- a/osdf/templates/plc_opt_response.jsont
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "requestId": "{{requestId}}",
- "transactionId": "{{transacationId}}",
- "requestStatus": "{{requestStatus}}",
- "statusMessage": "{{statusMessage}}"
- "solutions": {
- "placementSolutions": {{ json.dumps(composite_solutions) }},
- "licenseSolutions":{{ json.dumps(license_solutions) }}
- }
-}
diff --git a/osdf/templates/policy_request.jsont b/osdf/templates/policy_request.jsont
deleted file mode 100755
index 3a9e201..0000000
--- a/osdf/templates/policy_request.jsont
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "policyName": "{{policy_name}}" {# we currently only support query by policy name only -- policyName #}
-}
diff --git a/osdf/templates/test_cms_nb_req_from_client.jsont b/osdf/templates/test_cms_nb_req_from_client.jsont
deleted file mode 100755
index a60c8ff..0000000
--- a/osdf/templates/test_cms_nb_req_from_client.jsont
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "schedulingInfo": {
- "change_management_id": "{{ change_management_id }}",
- "start_time": "{{ start_time }}",
- "end_time": "{{ end_time }}",
- "policy_id": {{ json.dumps(policy_id) }}, {# a list of policy Ids #}
- "service_type": "{{ service_type }}",
- "workflow_type": "{{ workflow_type }}",
- "upgrades": {{ json.dumps(upgrades) }} {# a list of node Ids #}
- },
- "requestInfo": {
- "requestId": "{{ requestId }}",
- "sourceId": "{{ sourceId }}",
- "optimizer": "{{ optimizer }}",
- "numSolutions": "{{ numSolutions }}",
- "callbackUrl" : "{{ callbackUrl }}"
- }
-}
-
diff --git a/osdf/templates/test_plc_nb_req_from_client.jsont b/osdf/templates/test_plc_nb_req_from_client.jsont
deleted file mode 100755
index 998ffb3..0000000
--- a/osdf/templates/test_plc_nb_req_from_client.jsont
+++ /dev/null
@@ -1,52 +0,0 @@
-{
- "requestInfo": {
- "requestId": "{{requestId}}",
- "sourceId": "{{sourceId}}",
- "optimizer": "{{optimizer}}",
- "numSolutions": {{numSolutions}},
- "timeout": {{timeout}},
- "callbackUrl" : "{{callbackUrl}}"
- },
- "placementInfo": {
- "modelInfo": {
- "modelType": "{{modelType}}",
- "modelInvariant": "{{modelInvariantId}}",
- "modelVersionId": "{{modelVersionId}}",
- "modelName": "{{modelName}}",
- "modelVersion": "{{modelVersion}}",
- "modelCustomizationId": "{{modelCustomizationId}}"
- },
- "subscriberInfo": {
- "globalSubscriberId": "{{globalSubscriberId}}",
- "subscriberName": "{{subscriberName}}",
- "subscriberCommonSiteId": "{{subscriberCommonSiteId}}",
- "ucpeHostName": "{{ucpeHostName}}"
- },
- "policyId": {{json.dumps(policyId)}},
- "vnfInfo": {
- "vnfType": "{{vnfType}}",
- "vnfPartNumber": "{{vnfPartNumber}}",
- "nominalThroughput": "{{nominalThroughput}}",
- "vnfSoftwareVersion": "{{vnfSoftwareVersion}}",
- "vnfManagementOption": "{{vnfManagementOption}}"
- },
- "vpnInfo": {
- "vpnId": "{{vpnId}}",
- "pvcId": "{{pvcId}}"
- },
- "serviceInfo": {
- "dhvServiceInfo": {
- "serviceInstanceId": "{{serviceInstanceId}}",
- "serviceType": "{{serviceType}}",
- "e2evpnkey": "{{e2evpnkey}}",
- "dhvSiteEffectiveTransportBandwidth": {{dhvSiteEffectiveTransportBandwidth}},
- "dhvIPSecTransportBandwidthUp": {{dhvIPSecTransportBandwidthUp}},
- "dhvIPSecTransportBandwidthDown": {{dhvIPSecTransportBandwidthDown}},
- "dhvIPSec2TransportBandwidthUp": {{dhvIPSec2TransportBandwidthUp}},
- "dhvIPSec2TransportBandwidthDown": {{dhvIPSec2TransportBandwidthDown}},
- "dhvVendorName": "{{dhvVendorName}}"
- }
- },
- "demandInfo": {{json.dumps(demandInfo)}}
- }
-}
diff --git a/osdf/utils/cipherUtils.py b/osdf/utils/cipherUtils.py
new file mode 100644
index 0000000..169f1a1
--- /dev/null
+++ b/osdf/utils/cipherUtils.py
@@ -0,0 +1,59 @@
+#
+# -------------------------------------------------------------------------
+# Copyright (C) 2020 Wipro Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+
+from Crypto.Cipher import AES
+from osdf.config.base import osdf_config
+from Crypto.Util.Padding import unpad
+from Crypto.Util.Padding import pad
+
+
+class AESCipher(object):
+ __instance = None
+
+ @staticmethod
+ def get_instance(key = None):
+ if AESCipher.__instance is None:
+ print("Creating the singleton instance")
+ AESCipher(key)
+ return AESCipher.__instance
+
+ def __init__(self, key=None):
+ if AESCipher.__instance is not None:
+ raise Exception("This class is a singleton!")
+ else:
+ AESCipher.__instance = self
+
+ self.bs = 32
+ if key is None:
+ key = osdf_config.deployment["appkey"]
+
+ self.key = key.encode()
+
+ def encrypt(self, data):
+ data = data.encode()
+ cipher = AES.new(self.key, AES.MODE_CBC)
+ ciphered_data = cipher.encrypt(pad(data, AES.block_size))
+ enc = (cipher.iv.hex())+(ciphered_data.hex())
+ return enc
+
+ def decrypt(self, enc):
+ iv = bytes.fromhex(enc[:32])
+ ciphered_data = bytes.fromhex(enc[32:])
+ cipher = AES.new(self.key, AES.MODE_CBC, iv=iv)
+ original_data = unpad(cipher.decrypt(ciphered_data), AES.block_size).decode()
+ return original_data
diff --git a/osdf/utils/data_conversion.py b/osdf/utils/data_conversion.py
index 2f678fa..08af3e4 100644
--- a/osdf/utils/data_conversion.py
+++ b/osdf/utils/data_conversion.py
@@ -16,15 +16,16 @@
# -------------------------------------------------------------------------
#
-import itertools
from collections import defaultdict
+import itertools
-from dateutil import tz
from dateutil.parser import parse
+from dateutil import tz
def tuples_to_multi_val_dict(kvw_tuples, colnums=(0, 1)):
"""Given a list of k,v tuples, get a dictionary of the form k -> [v1,v2,...,vn]
+
:param kvw_tuples: list of k,v,w tuples (e.g. [(k1,v1,a1), (k2,v2,a2), (k1,v3,a3), (k1,v4,a4)]
:param colnums: column numbers
:return: a dict of str:set, something like {k1: {v1, v3, v4}, k2: {v2}} or {k1: {a1, a3, a4}, k2: {a2}}
@@ -38,6 +39,7 @@ def tuples_to_multi_val_dict(kvw_tuples, colnums=(0, 1)):
def tuples_to_dict(kvw_tuples, colnums=(0, 1)):
"""Given a list of k,v tuples, get a dictionary of the form k -> v
+
:param kvw_tuples: list of k,v,w tuples (e.g. [(k1,v1,a1), (k2,v2,a2), (k3,v3,a3), (k1,v4,a4)]
:param colnums: column numbers
:return: a dict; something like {k1: v4, k2: v2, k3: v3} (note, k1 is repeated, so last val is retained)
@@ -46,13 +48,17 @@ def tuples_to_dict(kvw_tuples, colnums=(0, 1)):
def utc_time_from_ts(timestamp):
- """Return corresponding UTC timestamp for a given ISO timestamp (or anything that parse accepts)"""
+ """Return corresponding UTC timestamp for a given ISO timestamp (or anything that parse accepts)
+
+ """
return parse(timestamp).astimezone(tz.tzutc()).strftime('%Y-%m-%d %H:%M:%S')
-def list_flatten(l):
- """Flatten a complex nested list of nested lists into a flat list"""
- return itertools.chain(*[list_flatten(j) if isinstance(j, list) else [j] for j in l])
+def list_flatten(_l):
+ """Flatten a complex nested list of nested lists into a flat list
+
+ """
+ return itertools.chain(*[list_flatten(j) if isinstance(j, list) else [j] for j in _l])
text_to_symbol = {
@@ -60,3 +66,10 @@ text_to_symbol = {
'less': "<",
'equal': "="
}
+
+
+def decode_data(data):
+ """Decode bytes to string
+
+ """
+ return data.decode(encoding='utf-8') if isinstance(data, bytes) else data
diff --git a/osdf/optimizers/licenseopt/__init__.py b/osdf/utils/file_utils.py
index 4b25e5b..b12c17d 100644
--- a/osdf/optimizers/licenseopt/__init__.py
+++ b/osdf/utils/file_utils.py
@@ -1,5 +1,5 @@
# -------------------------------------------------------------------------
-# Copyright (c) 2017-2018 AT&T Intellectual Property
+# Copyright (c) 2020 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,3 +15,20 @@
#
# -------------------------------------------------------------------------
#
+
+# File related utilities
+
+import os
+from shutil import rmtree
+
+from osdf.logging.osdf_logging import debug_log
+
+
+def delete_file_folder(p):
+ if not p:
+ return
+ debug_log.debug('Deleting folder/file {}'.format(p))
+ if os.path.isfile(p):
+ os.remove(p)
+ else:
+ rmtree(p, ignore_errors=True)
diff --git a/osdf/utils/interfaces.py b/osdf/utils/interfaces.py
index a869d6d..4fa4730 100644
--- a/osdf/utils/interfaces.py
+++ b/osdf/utils/interfaces.py
@@ -20,12 +20,15 @@ import json
import requests
import yaml
-from osdf.config.base import osdf_config, creds_prefixes
-from osdf.logging.osdf_logging import MH, debug_log
+from osdf.config.base import creds_prefixes
+from osdf.config.base import osdf_config
+from osdf.logging.osdf_logging import debug_log
+from osdf.logging.osdf_logging import MH
def get_rest_client(request_json, service):
"""Get a RestClient based on request_json's callback URL and osdf_config's credentials based on service name
+
:param request_json:
:param service: so or cm
:return: rc -- RestClient
@@ -53,21 +56,23 @@ class RestClient(object):
"""Simple REST Client that supports get/post and basic auth"""
def __init__(self, userid=None, passwd=None, log_func=None, url=None, timeout=None, headers=None,
- method="POST", req_id=None):
+ method="POST", req_id=None, verify=None):
self.auth = (userid, passwd) if userid and passwd else None
self.headers = headers if headers else {}
self.method = method
self.url = url
self.log_func = log_func
- self.timeout = (30, 90) if timeout is None else timeout
+ self.timeout = (30, 120) if timeout is None else timeout
self.req_id = req_id
+ self.verify = verify
def add_headers(self, headers):
self.headers.update(headers)
def request(self, url=None, method=None, asjson=True, ok_codes=(2, ),
raw_response=False, noresponse=False, timeout=None, **kwargs):
- """
+ """Sends http request to the specified url
+
:param url: REST end point to query
:param method: GET or POST (default is None => self.method)
:param asjson: whether the expected response is in json format
@@ -83,16 +88,23 @@ class RestClient(object):
else:
debug_log.debug("Requesting URL: {} for request ID: {}".format(url or self.url, self.req_id))
+ if not url:
+ url = self.url
+ if not self.verify and url.startswith("https"):
+ verify = osdf_config.deployment["aaf_ca_certs"]
+ else:
+ verify = self.verify
+
res = requests.request(url=url or self.url, method=method or self.method,
auth=self.auth, headers=self.headers,
- timeout=timeout or self.timeout, **kwargs)
+ timeout=timeout or self.timeout, verify=verify, **kwargs)
if self.log_func:
self.log_func(MH.received_http_response(res))
res_code = str(res.status_code)
if not any(res_code.startswith(x) for x in map(str, ok_codes)):
- raise res.raise_for_status()
+ raise BaseException(res.raise_for_status())
if raw_response:
return res
diff --git a/osdf/utils/mdc_utils.py b/osdf/utils/mdc_utils.py
new file mode 100644
index 0000000..c74a161
--- /dev/null
+++ b/osdf/utils/mdc_utils.py
@@ -0,0 +1,155 @@
+# -------------------------------------------------------------------------
+# Copyright (c) 2020 AT&T Intellectual Property
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# -------------------------------------------------------------------------
+#
+
+import socket
+import threading
+import time
+import uuid
+
+from flask import g
+from onaplogging.mdcContext import MDC
+
+EMPTY = "EMPTY"
+
+DATE_FMT = '%Y-%m-%dT%H:%M:%S'
+
+
+def default_server_info():
+ """Populate server & server_ip_address MDC fields
+
+ """
+ # If not set or purposely set = None, then set default
+ if MDC.get('server') is None:
+ try:
+ server = socket.getfqdn()
+ except Exception:
+ try:
+ server = socket.gethostname()
+ except Exception:
+ server = ''
+ MDC.put('server', server)
+ if MDC.get('serverIPAddress') is None:
+ try:
+ server_ip_address = socket.gethostbyname(MDC.get('server'))
+ except Exception:
+ server_ip_address = ""
+ MDC.put('serverIPAddress', server_ip_address)
+
+
+def default_mdc():
+ """Populate default MDC fields
+
+ """
+ MDC.put('instanceUUID', generate_uuid())
+ MDC.put('InvocationID', generate_uuid())
+ MDC.put('serviceName', 'OOF_OSDF')
+ MDC.put('threadID', threading.currentThread().getName())
+ default_server_info()
+ MDC.put('requestID', generate_uuid())
+ MDC.put('partnerName', 'N/A')
+ MDC.put('entryTimestamp', get_time())
+
+
+def mdc_from_json(request_json):
+ """Populate MDC fields given a request in json format
+
+ """
+ if MDC.get("instanceUUID") is None:
+ default_mdc()
+ MDC.put('requestID', get_request_id(request_json))
+ MDC.put('partnerName', get_partner_name(request_json))
+
+
+def populate_default_mdc(request):
+ """Populate default MDC fields given the request
+
+ """
+ if MDC.get("instanceUUID") is None:
+ default_mdc()
+ g.request_start = time.process_time()
+ g.empty_value = "EMPTY"
+ g.request_id = MDC.get("requestID")
+ MDC.put('serviceName', request.path)
+ MDC.put('IPAddress', request.headers.get('X-Forwarded-For', request.remote_addr))
+
+
+def populate_mdc(request):
+ """Populate MDC fields from the request headers
+
+ """
+ populate_default_mdc(request)
+ req_id = request.headers.get('X-ONAP-RequestID', g.empty_value)
+ request_json = request.get_json()
+ if req_id == g.empty_value:
+ req_id = get_request_id(request_json)
+ g.request_id = req_id
+ MDC.put('requestID', req_id)
+ MDC.put('partnerName', get_partner_name(request_json))
+
+
+def get_request_id(request_json):
+ """Get the request_id from the request
+
+ """
+ request_id = request_json['requestInfo'].get('requestId')
+ if not request_id:
+ request_id = request_json['requestInfo'].get('requestID')
+ return request_id
+
+
+def generate_uuid():
+ """Generate an unique uuid
+
+ """
+ return f'{uuid.uuid1()}'
+
+
+def get_partner_name(request_json):
+ """Get the partnerName
+
+ """
+ partner_name = EMPTY
+ if 'requestInfo' in request_json:
+ partner_name = request_json['requestInfo'].get('sourceId', EMPTY)
+ return partner_name
+
+
+def clear_mdc():
+ """Clear MDC
+
+ """
+ MDC.clear()
+
+
+def get_time():
+ """Generate the invocation time string
+
+ The dateformat is %Y-%m-%dT%H:%M:%S.sss
+ """
+ ct = time.time()
+ lt = time.gmtime(ct)
+ msec = int((ct - int(ct)) * 1000)
+ return f'{time.strftime(DATE_FMT, lt)}.{msec:0>3}'
+
+
+def set_error_details(code, desc):
+ """set errorCode and description
+
+ """
+ MDC.put('errorCode', code)
+ MDC.put('errorDescription', desc)
diff --git a/osdf/webapp/appcontroller.py b/osdf/webapp/appcontroller.py
index 3a5385d..5db879a 100644
--- a/osdf/webapp/appcontroller.py
+++ b/osdf/webapp/appcontroller.py
@@ -16,13 +16,16 @@
# -------------------------------------------------------------------------
#
+import json
+
+from flask import Response
from flask import request
from flask_httpauth import HTTPBasicAuth
-from flask import Response
-import json
+
import osdf
-from osdf.config.base import http_basic_auth_credentials, osdf_config
+import osdf.config.base as cfg_base
from osdf.adapters.aaf import aaf_authentication as aaf_auth
+from osdf.config.base import osdf_config
auth_basic = HTTPBasicAuth()
@@ -34,11 +37,15 @@ error_body = {
unauthorized_message = json.dumps(error_body)
+
@auth_basic.get_password
def get_pw(username):
- end_point = request.url.split('/')[-1]
- auth_group = osdf.end_point_auth_mapping.get(end_point)
- return http_basic_auth_credentials[auth_group].get(username) if auth_group else None
+ auth_group = ''
+ for k in osdf.end_point_auth_mapping:
+ if k in request.url:
+ auth_group = osdf.end_point_auth_mapping.get(k)
+ return cfg_base.http_basic_auth_credentials[auth_group].get(username) if auth_group else None
+
@auth_basic.error_handler
def auth_error():
@@ -56,4 +63,3 @@ def verify_pw(username, password):
else:
pw = get_pw(username)
return pw == password
- return False \ No newline at end of file