summaryrefslogtreecommitdiffstats
path: root/share/starlingx_base
diff options
context:
space:
mode:
authorBin Yang <bin.yang@windriver.com>2019-04-03 08:29:15 +0000
committerBin Yang <bin.yang@windriver.com>2019-04-03 08:29:15 +0000
commit218da6448f00c5d071561186d335f5560138ef19 (patch)
treeda7cb8f9c1ef1fad1a4facaf2a0278411f9e1eef /share/starlingx_base
parent61097cd57a4bfde36a09160aba82f329628d9a14 (diff)
Upgrade infra_workload for starlingx_base
Try to fetch openstack artifact from sdc client via the shared volume Change-Id: Ibcb7336e4cfd318497e23167af8cd97883bde594 Issue-ID: MULTICLOUD-541 Signed-off-by: Bin Yang <bin.yang@windriver.com>
Diffstat (limited to 'share/starlingx_base')
-rw-r--r--share/starlingx_base/resource/infra_workload.py170
1 files changed, 167 insertions, 3 deletions
diff --git a/share/starlingx_base/resource/infra_workload.py b/share/starlingx_base/resource/infra_workload.py
index 61db8a72..8fea68f3 100644
--- a/share/starlingx_base/resource/infra_workload.py
+++ b/share/starlingx_base/resource/infra_workload.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import os
import logging
from django.conf import settings
@@ -47,7 +48,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
}
try:
- worker_self = infra_workload_helper.InfraWorkloadHelper(
+ worker_self = InfraWorkloadHelper(
settings.MULTICLOUD_API_V1_PREFIX,
settings.AAI_BASE_URL
)
@@ -143,7 +144,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
backlog_item = gInfraWorkloadThread.get(workloadid)
if not backlog_item:
# backlog item not found, so check the stack status
- worker_self = infra_workload_helper.InfraWorkloadHelper(
+ worker_self = InfraWorkloadHelper(
settings.MULTICLOUD_API_V1_PREFIX,
settings.AAI_BASE_URL
)
@@ -202,7 +203,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
super(InfraWorkload, self).delete(request, vimid, workloadid)
# backlog for a post to heatbridge delete
- worker_self = infra_workload_helper.InfraWorkloadHelper(
+ worker_self = InfraWorkloadHelper(
settings.MULTICLOUD_API_V1_PREFIX,
settings.AAI_BASE_URL
)
@@ -287,3 +288,166 @@ class APIv1InfraWorkload(InfraWorkload):
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1InfraWorkload, self).delete(request, vimid, requri)
+
+class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
+
+ def __init__(self, multicloud_prefix, aai_base_url):
+ super(InfraWorkloadHelper, self).__init__(multicloud_prefix, aai_base_url)
+ self._logger = logger
+
+ def param_update_user_directives(self, parameters, oof_directives):
+ return parameters
+
+ def param_update_sdnc_directives(self, parameters, sdnc_directives):
+ return parameters
+
+ def param_update_oof_directives(self, parameters, oof_directives):
+ for directive in oof_directives.get("directives", []):
+ if directive["type"] == "vnfc":
+ for directive2 in directive.get("directives", []):
+ if directive2["type"] in ["flavor_directives",
+ "sriovNICNetwork_directives"]:
+ for attr in directive2.get("attributes", []):
+ flavor_label = attr.get("attribute_name", None)
+ flavor_value = attr.get("attribute_value", None)
+ if flavor_label in parameters:
+ parameters[flavor_label] = flavor_value
+ else:
+ self._logger.warn(
+ "There is no parameter exist: %s" %
+ flavor_label)
+
+ return parameters
+
+ def openstack_template_update(self, template_data, vf_module_model_customization_id):
+ # try 1: check if artifact is available with vfmodule_uuid
+ # assumption: mount point: /opt/artifacts/<vfmodule_uuid>
+ try:
+ vfmodule_path_base = r"/opt/artifacts/%s" % vf_module_model_customization_id
+ vfmodule_metadata_path = r"%s/vfmodule-meta.json" % vfmodule_path_base
+ service_metadata_path = r"%s/service-meta.json" % vfmodule_path_base
+ with open(vfmodule_metadata_path,
+ 'r', encoding='UTF-8') as vf:
+ vfmodule_metadata = vf.read() # assume the metadata file size is small
+ if vfmodule_metadata and len(vfmodule_metadata) > 0:
+ # load service-metadata
+ with open(service_metadata_path,
+ 'r', encoding='UTF-8') as sf:
+ service_metadata = sf.read() # assume the metadata file size is small
+ if service_metadata and len(service_metadata) > 0:
+ # get the artifacts uuid
+ artifacts_uuids = vfmodule_metadata.get("artifacts", None)
+ templatedata1 = {}.update(template_data)
+ for a in service_metadata["artifacts"]:
+ artifactUUID = a.get("artifactUUID", "")
+ if artifactUUID not in artifacts_uuids:
+ continue
+ artifact_type = a.get("artifactType", "")
+ artifact_name = a.get("artifactName", "")
+ artifact_path = r"%s/%s" % (vfmodule_path_base, artifact_name)
+
+ # now check the type
+ if artifact_type.lower() == "heat":
+ # heat template file
+ with open(artifact_path,
+ 'r', encoding='UTF-8') as af:
+ templatedata1["template"] = af.read() # assume the template file size is small
+ # pass
+
+ elif artifact_type.lower() == "heat_env":
+ # heat env file
+ with open(artifact_path,
+ 'r', encoding='UTF-8') as af:
+ templatedata1["parameters"] = af.read() # assume the env file size is small
+ # pass
+ # pass
+ return templatedata1
+ else:
+ pass
+ else:
+ self._logger.info("artifacts not available for vfmodule %s" % vfmodule_uuid)
+ pass
+ except Exception as e:
+ self._logger.error("template_update fails: %s" % e.message)
+
+ # try 2: reuse the input: template_data
+ return template_data
+
+ def workload_create(self, vimid, workload_data):
+ '''
+ Instantiate a stack over target cloud region (OpenStack instance)
+ The template for workload will be fetched from sdc client
+ :param vimid:
+ :param workload_data:
+ :return: result code, status enum, status reason
+ result code: 0-ok, otherwise error
+ status enum: "WORKLOAD_CREATE_IN_PROGRESS", "WORKLOAD_CREATE_FAIL"
+ status reason: message to explain the status enum
+ '''
+
+ # step 2: normalize the input: xxx_directives
+ data = workload_data
+ vf_module_model_customization_id = data.get("vf-module-model-customization-id", None)
+ vf_module_id = data.get("vf-module-id", "")
+ user_directive = data.get("user_directives", {})
+ oof_directive = data.get("oof_directives", {})
+ sdnc_directive = data.get("sdnc_directives", {})
+ template_type = data.get("template_type", None)
+ template_data = data.get("template_data", {})
+ # resp_template = None
+ if not template_type or "heat" != template_type.lower():
+ return 14, "WORKLOAD_CREATE_FAIL", \
+ "Bad parameters: template type %s is not heat" %\
+ template_type or ""
+
+ # retrieve the template data
+ template_data = self.openstack_template_update(template_data, vf_module_model_customization_id)
+
+ # update the parameter in order of reverse precedence
+ parameters = template_data.get("parameters", {})
+ parameters = self.param_update_sdnc_directives(parameters, sdnc_directive)
+ parameters = self.param_update_oof_directives(parameters, oof_directive)
+ parameters = self.param_update_user_directives(parameters, user_directive)
+ template_data["parameters"] = parameters
+
+ # reset to make sure "files" are empty
+ template_data["files"] = {}
+
+ template_data["stack_name"] = vf_module_id \
+ if not hasattr(template_data, "stack_name")\
+ else template_data["stack_name"]
+
+ # authenticate
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that
+ # the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens"
+ )
+ if retcode > 0 or not v2_token_resp_json:
+ errmsg = "authenticate fails:%s,%s, %s" %\
+ (cloud_owner, regionid, v2_token_resp_json)
+ logger.error(errmsg)
+ return (
+ retcode, "WORKLOAD_CREATE_FAIL", errmsg
+ )
+
+ # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+ service_type = "orchestration"
+ resource_uri = "/stacks"
+ self._logger.info("create stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ template_data, "POST")
+
+ if retcode == 0:
+ stack1 = content.get('stack', None)
+ # stackid = stack1["id"] if stack1 else ""
+ return 0, "WORKLOAD_CREATE_IN_PROGRESS", stack1
+ else:
+ self._logger.info("RESP with data> result:%s" % content)
+ return retcode, "WORKLOAD_CREATE_FAIL", content