summaryrefslogtreecommitdiffstats
path: root/share
diff options
context:
space:
mode:
Diffstat (limited to 'share')
-rw-r--r--share/common/msapi/helper.py26
-rw-r--r--share/newton_base/resource/infra_workload_helper.py421
-rw-r--r--share/starlingx_base/registration/registration.py2
-rw-r--r--share/starlingx_base/resource/infra_workload.py596
4 files changed, 576 insertions, 469 deletions
diff --git a/share/common/msapi/helper.py b/share/common/msapi/helper.py
index 48626cc9..bb07b097 100644
--- a/share/common/msapi/helper.py
+++ b/share/common/msapi/helper.py
@@ -187,7 +187,7 @@ class MultiCloudThreadHelper(object):
self.backlog = {}
# expired backlog items
self.expired_backlog = {}
- # self.lock = threading.Lock()
+ self.lock = threading.Lock()
self.state_ = 0 # 0: stopped, 1: started
self.thread = None
@@ -195,12 +195,14 @@ class MultiCloudThreadHelper(object):
return self.state_
def start(self):
+ self.lock.acquire()
if 0 == self.state_:
self.state_ = 1
self.thread = MultiCloudThreadHelper.HelperThread(self)
self.thread.start()
else:
pass
+ self.lock.release()
def stop(self):
self.state_ = 0
@@ -215,6 +217,8 @@ class MultiCloudThreadHelper(object):
backlog_item["timestamp"] = 0
# self.lock.acquire()
+ # make sure there is no identical backlog in expired backlog
+ self.expired_backlog.pop(backlog_item["id"], None)
self.backlog.update(backlog_item["id"], backlog_item)
# self.lock.release()
return len(self.backlog)
@@ -222,6 +226,13 @@ class MultiCloudThreadHelper(object):
def get(self, backlog_id):
self.backlog.get(backlog_id, None) or self.expired_backlog.get(backlog_id, None)
+ # check if the backlog item is in expired backlog
+ def expired(self, backlog_id):
+ if not self.backlog.get(backlog_id, None):
+ if self.expired_backlog.get(backlog_id, None):
+ return True
+ return False
+
def remove(self, backlog_id):
# self.lock.acquire()
self.backlog.pop(backlog_id, None)
@@ -246,18 +257,25 @@ class MultiCloudThreadHelper(object):
def run(self):
logger.debug("Start processing backlogs")
+ nexttimer = 0
while self.owner.state_ == 1 and self.owner.count() > 0:
+ if nexttimer > 1000000:
+ # sleep in case of interval > 1 second
+ time.sleep(nexttimer // 1000000)
+ nexttimer = 30*1000000 # initial interval in us to be updated:30 seconds
for backlog_id, item in self.owner.backlog:
# check interval for repeatable backlog item
now = MultiCloudThreadHelper.get_epoch_now_usecond()
repeat_interval = item.get("repeat", 0)
if repeat_interval > 0:
timestamp = item.get("timestamp", 0)
+ timeleft = (now - timestamp
+ if now > timestamp
+ else repeat_interval)
+ nexttimer = timeleft if nexttimer > timeleft else nexttimer
# compare interval with elapsed time.
# workaround the case of timestamp turnaround
- if repeat_interval > (now - timestamp
- if now > timestamp
- else repeat_interval):
+ if repeat_interval > timeleft:
# not time to run this backlog item yet
continue
diff --git a/share/newton_base/resource/infra_workload_helper.py b/share/newton_base/resource/infra_workload_helper.py
new file mode 100644
index 00000000..0fd7c86e
--- /dev/null
+++ b/share/newton_base/resource/infra_workload_helper.py
@@ -0,0 +1,421 @@
+# Copyright (c) 2017-2018 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import json
+
+from django.conf import settings
+from common.msapi import extsys
+from common.msapi.helper import Helper as helper
+
+from common.utils import restcall
+from newton_base.registration import registration as newton_registration
+
+logger = logging.getLogger(__name__)
+
+
+class InfraWorkloadHelper(object):
+
+ def __init__(self, multicloud_prefix, aai_base_url):
+ self.proxy_prefix = multicloud_prefix
+ self.aai_base_url = aai_base_url
+ self._logger = logger
+ super(InfraWorkloadHelper, self).__init__()
+
+ def workload_create(self, vimid, workload_data):
+ '''
+ Instantiate a stack over target cloud region (OpenStack instance)
+ :param vimid:
+ :param workload_data:
+ :return: result code, status enum, status reason
+ result code: 0-ok, otherwise error
+ status enum: "WORKLOAD_CREATE_IN_PROGRESS", "WORKLOAD_CREATE_FAIL"
+ status reason: message to explain the status enum
+ '''
+ data = workload_data
+ oof_directive = data.get("oof_directives", {})
+ template_type = data.get("template_type", None)
+ template_data = data.get("template_data", {})
+ # resp_template = None
+ if not template_type or "heat" != template_type.lower():
+ return 14, "WORKLOAD_CREATE_FAIL", \
+ "Bad parameters: template type %s is not heat" %\
+ template_type or ""
+
+ # update heat parameters from oof_directive
+ parameters = template_data.get("parameters", {})
+
+ for directive in oof_directive.get("directives", []):
+ if directive["type"] == "vnfc":
+ for directive2 in directive.get("directives", []):
+ if directive2["type"] in ["flavor_directives",
+ "sriovNICNetwork_directives"]:
+ for attr in directive2.get("attributes", []):
+ flavor_label = attr.get("attribute_name", None)
+ flavor_value = attr.get("attribute_value", None)
+ if flavor_label in parameters:
+ parameters[flavor_label] = flavor_value
+ else:
+ self._logger.warn(
+ "There is no parameter exist: %s" %
+ flavor_label)
+
+ # update parameters
+ template_data["parameters"] = parameters
+
+ # reset to make sure "files" are empty
+ template_data["files"] = {}
+
+ # authenticate
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that
+ # the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens"
+ )
+ if retcode > 0 or not v2_token_resp_json:
+ errmsg = "authenticate fails:%s,%s, %s" %\
+ (cloud_owner, regionid, v2_token_resp_json)
+ logger.error(errmsg)
+ return (
+ retcode, "WORKLOAD_CREATE_FAIL", errmsg
+ )
+
+ # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+ service_type = "orchestration"
+ resource_uri = "/stacks"
+ self._logger.info("create stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ template_data, "POST")
+
+ if retcode == 0:
+ stack1 = content.get('stack', None)
+ # stackid = stack1["id"] if stack1 else ""
+ return 0, "WORKLOAD_CREATE_IN_PROGRESS", stack1
+ else:
+ self._logger.info("RESP with data> result:%s" % content)
+ return retcode, "WORKLOAD_CREATE_FAIL", content
+
+ def workload_update(self, vimid, stack_id, otherinfo):
+ '''
+ update heat resource to AAI for the specified cloud region and tenant
+ The resources includes: vserver, vserver/l-interface,
+ :param vimid:
+ :param stack_id: id of the created stack in OpenStack instance
+ :param otherinfo:
+ :return: result code, status enum, status reason
+ result code: 0-ok, otherwise error
+ status enum: "WORKLOAD_UPDATE_IN_PROGRESS", "WORKLOAD_UPDATE_FAIL"
+ status reason: message to explain the status enum
+ '''
+
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+ if retcode > 0:
+ errmsg = "authenticate fails:%s, %s, %s" %\
+ (cloud_owner, regionid, v2_token_resp_json)
+ logger.error(errmsg)
+ return retcode, "WORKLOAD_UPDATE_FAIL", errmsg
+
+ tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+ # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
+
+ # common prefix
+ aai_cloud_region = \
+ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
+ % (cloud_owner, regionid, tenant_id)
+
+ # get stack resource
+ service_type = "orchestration"
+ resource_uri = "/stacks/%s/resources" % (stack_id)
+ self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ resources = content.get('resources', []) if retcode == 0 and content else []
+ if retcode > 0:
+ errmsg = "stack:%s, query fails: %s" %\
+ (resource_uri, content)
+ logger.error(errmsg)
+ return retcode, "WORKLOAD_UPDATE_FAIL", errmsg
+
+ # find and update resources
+ # transactions = []
+ for resource in resources:
+ if resource.get('resource_status', None) != "CREATE_COMPLETE":
+ # this resource is not ready yet, just return
+ errmsg = "stack: %s, resource not ready :%s" % \
+ (resource_uri, resource)
+ logger.info(errmsg)
+ return retcode, "WORKLOAD_UPDATE_FAIL", errmsg
+ # continue
+ if resource.get('resource_type', None) == 'OS::Nova::Server':
+ # retrieve vserver details
+ service_type = "compute"
+ resource_uri = "/servers/%s" % (resource['physical_resource_id'])
+ self._logger.info("retrieve vserver detail, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ self._logger.debug(" resp data:%s" % content)
+ if retcode > 0:
+ errmsg = "stack resource:%s, query fails: %s" % \
+ (resource_uri, content)
+ logger.error(errmsg)
+ return retcode, "WORKLOAD_UPDATE_FAIL", errmsg
+ vserver_detail = content.get('server', None) if retcode == 0 and content else None
+ if vserver_detail:
+ # compose inventory entry for vserver
+ vserver_link = ""
+ for link in vserver_detail['links']:
+ if link['rel'] == 'self':
+ vserver_link = link['href']
+ break
+ pass
+
+ # note: relationship-list to flavor/image is not be update yet
+ # note: volumes is not updated yet
+ # note: relationship-list to vnf will be handled somewhere else
+ aai_resource = {
+ 'body': {
+ 'vserver-name': vserver_detail['name'],
+ 'vserver-name2': vserver_detail['name'],
+ "vserver-id": vserver_detail['id'],
+ "vserver-selflink": vserver_link,
+ "prov-status": vserver_detail['status']
+ },
+ "uri": aai_cloud_region + "/vservers/vserver/%s" % (vserver_detail['id'])
+ }
+
+ try:
+ # then update the resource
+ retcode, content, status_code = \
+ restcall.req_to_aai(aai_resource['uri'],
+ "PUT", content=aai_resource['body'])
+
+ if retcode == 0 and content:
+ content = json.JSONDecoder().decode(content)
+ self._logger.debug("AAI update %s response: %s" %
+ (aai_resource['uri'], content))
+ except Exception as e:
+ self._logger.error(e.message)
+ return retcode, "WORKLOAD_UPDATE_FAIL", e.message
+
+ # aai_resource_transactions = {"put": [aai_resource]}
+ # transactions.append(aai_resource_transactions)
+ # self._logger.debug("aai_resource :%s" % aai_resource_transactions)
+
+ for resource in resources:
+ if resource.get('resource_status', None) != "CREATE_COMPLETE":
+ continue
+ if resource.get('resource_type', None) == 'OS::Neutron::Port':
+ # retrieve vport details
+ service_type = "network"
+ resource_uri = "/v2.0/ports/%s" % (resource['physical_resource_id'])
+ self._logger.info("retrieve vport detail, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ self._logger.debug(" resp data:%s" % content)
+ if retcode > 0:
+ errmsg = "stack resource:%s, query fails: %s" % \
+ (resource_uri, content)
+ logger.error(errmsg)
+ return retcode, "WORKLOAD_UPDATE_FAIL", errmsg
+
+ vport_detail = content.get('port', None) if retcode == 0 and content else None
+ if vport_detail:
+ # compose inventory entry for vport
+ # note: l3-interface-ipv4-address-list,
+ # l3-interface-ipv6-address-list are not updated yet
+ # note: network-name is not update yet since the detail
+ # coming with network-id
+ aai_resource = {
+ "body": {
+ "interface-name": vport_detail['name'],
+ "interface-id": vport_detail['id'],
+ "macaddr": vport_detail['mac_address']
+ },
+ 'uri':
+ aai_cloud_region + "/vservers/vserver/%s/l-interfaces/l-interface/%s"
+ % (vport_detail['device_id'], vport_detail['name'])
+ }
+ try:
+ # then update the resource
+ retcode, content, status_code = \
+ restcall.req_to_aai(aai_resource['uri'], "PUT",
+ content=aai_resource['body'])
+
+ if retcode == 0 and content:
+ content = json.JSONDecoder().decode(content)
+ self._logger.debug("AAI update %s response: %s" %
+ (aai_resource['uri'], content))
+ except Exception as e:
+ self._logger.error(e.message)
+ return retcode, "WORKLOAD_UPDATE_FAIL", e.message
+
+ # aai_resource_transactions = {"put": [aai_resource]}
+ # transactions.append(aai_resource_transactions)
+ # self._logger.debug("aai_resource :%s" % aai_resource_transactions)
+
+ # aai_transactions = {"transactions": transactions}
+ # self._logger.debug("aai_transactions :%s" % aai_transactions)
+ return 0, "WORKLOAD_UPDATE_COMPLETE", "succeed"
+
+ def workload_delete(self, vimid, stack_id, otherinfo):
+ '''
+ remove heat resource from AAI for the specified cloud region and tenant
+ The resources includes: vserver, vserver/l-interface,
+ :param vimid:
+ :param stack_id: id of the created stack in OpenStack instance
+ :param otherinfo:
+ :return: result code, status enum, status reason
+ result code: 0-ok, otherwise error
+ status enum: "WORKLOAD_DELETE_IN_PROGRESS", "WORKLOAD_DELETE_FAIL"
+ status reason: message to explain the status enum
+ '''
+
+ # enumerate the resources
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+ if retcode > 0:
+ errmsg = "authenticate fails:%s, %s, %s" %\
+ (cloud_owner, regionid, v2_token_resp_json)
+ logger.error(errmsg)
+ return retcode, "WORKLOAD_DELETE_FAIL", errmsg
+
+ tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+ # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
+
+ # common prefix
+ aai_cloud_region = \
+ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
+ % (cloud_owner, regionid, tenant_id)
+
+ # get stack resource
+ service_type = "orchestration"
+ resource_uri = "/stacks/%s/resources" % (stack_id)
+ self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+ resources = content.get('resources', []) \
+ if retcode == 0 and content else []
+
+ vserver_list = [resource['physical_resource_id'] for resource in resources
+ if resource.get('resource_type', None) == 'OS::Nova::Server']
+
+ try:
+ # get list of vservers
+ vserver_list_url = aai_cloud_region + "/vservers?depth=all"
+ retcode, content, status_code = \
+ restcall.req_to_aai(vserver_list_url, "GET")
+ if retcode > 0 or not content:
+ self._logger.debug("AAI get %s response: %s" % (vserver_list_url, content))
+ return (retcode, "WORKLOAD_DELETE_FAIL", "authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+
+ content = json.JSONDecoder().decode(content)
+ vservers = content['vserver']
+ for vserver in vservers:
+ if vserver['vserver-id'] not in vserver_list:
+ continue
+
+ try:
+ # iterate vport, except will be raised if no l-interface exist
+ for vport in vserver['l-interfaces']['l-interface']:
+ # delete vport
+ vport_delete_url = \
+ aai_cloud_region + \
+ "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \
+ % (vserver['vserver-id'], vport['interface-name'],
+ vport['resource-version'])
+
+ restcall.req_to_aai(vport_delete_url, "DELETE")
+ except Exception as e:
+ # return 12, "WORKLOAD_DELETE_FAIL", e.message
+ pass
+
+ try:
+ # delete vserver
+ vserver_delete_url = \
+ aai_cloud_region + \
+ "/vservers/vserver/%s?resource-version=%s" \
+ % (vserver['vserver-id'], vserver['resource-version'])
+
+ restcall.req_to_aai(vserver_delete_url, "DELETE")
+ except Exception:
+ continue
+
+ return 0, "WORKLOAD_DELETE_COMPLETE", "succeed"
+ except Exception as e:
+ self._logger.error(e.message)
+ return 12, "WORKLOAD_DELETE_FAIL", e.message
+ pass
+
+ def workload_status(self, vimid, stack_id, otherinfo):
+ try:
+ # assume the workload_type is heat
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+
+ if retcode > 0 or not v2_token_resp_json:
+ errmsg = "authenticate fails:%s, %s, %s" % \
+ (cloud_owner, regionid, v2_token_resp_json)
+ logger.error(errmsg)
+ return retcode, "WORKLOAD_STATUS_UNKNOWN", errmsg
+
+ # get stack status
+ service_type = "orchestration"
+ resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks"
+ self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ stacks = content.get('stacks', []) if retcode == 0 and content else []
+ stack_status = stacks[0]["stack_status"] if len(stacks) > 0 else ""
+
+ return retcode, stack_status, stacks
+ except Exception as e:
+ self._logger.error(e.message)
+ return 12, "WORKLOAD_STATUS_UNKNOWN", e.message
diff --git a/share/starlingx_base/registration/registration.py b/share/starlingx_base/registration/registration.py
index fa79e5b3..9d1846d7 100644
--- a/share/starlingx_base/registration/registration.py
+++ b/share/starlingx_base/registration/registration.py
@@ -16,8 +16,6 @@ import logging
import json
import uuid
import traceback
-import threading
-import time
from django.conf import settings
diff --git a/share/starlingx_base/resource/infra_workload.py b/share/starlingx_base/resource/infra_workload.py
index 4da0a0d0..61db8a72 100644
--- a/share/starlingx_base/resource/infra_workload.py
+++ b/share/starlingx_base/resource/infra_workload.py
@@ -13,25 +13,15 @@
# limitations under the License.
import logging
-import json
-import traceback
-import uuid
-
from django.conf import settings
-from common.exceptions import VimDriverNewtonException
-# from newton_base.util import VimDriverUtils
-from keystoneauth1.exceptions import HttpError
from rest_framework import status
from rest_framework.response import Response
-from rest_framework.views import APIView
from common.msapi import extsys
from common.msapi.helper import Helper as helper
-from common.utils import restcall
-from newton_base.registration import registration as newton_registration
from newton_base.resource import infra_workload as newton_infra_workload
-from newton_base.util import VimDriverUtils
+from newton_base.resource import infra_workload_helper as infra_workload_helper
logger = logging.getLogger(__name__)
@@ -46,28 +36,52 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
def post(self, request, vimid="", workloadid=""):
self._logger.info("vimid: %s, stackid:%s" % (vimid, workloadid))
- self._logger.info("data: %s" % (request.data))
+ self._logger.info("data: %s" % request.data)
self._logger.debug("META: %s" % request.META)
- try:
- resp_template = {}
- os_status = status.HTTP_500_INTERNAL_SERVER_ERROR
+ resp_template = {
+ "template_type": "HEAT",
+ "workload_id": workloadid,
+ "workload_status": "WORKLOAD_CREATE_FAIL",
+ "workload_status_reason": "Exception occurs"
+ }
+ try:
+ worker_self = infra_workload_helper.InfraWorkloadHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
+ )
if workloadid == "":
- return super(InfraWorkload, self).post(request, vimid)
+ resp_template["workload_status"] = "WORKLOAD_CREATE_FAIL"
+ # post to create a new stack, stack id available only after creating a stack is done
+ progress_code, progress_status, progress_msg = worker_self.workload_create(vimid, request.data)
+ resp_template["workload_status"] = progress_status
+ resp_template["workload_status_reason"] = progress_msg
+
+ if progress_code == 0:
+ # update workload_id
+ stack = progress_msg
+ stackid = stack["id"]
+ resp_template["workload_id"] = stackid
+ status_code = status.HTTP_201_CREATED
+ else:
+ status_code = status.HTTP_400_BAD_REQUEST
+
+ return Response(data=resp_template, status=status_code)
+ # return super(InfraWorkload, self).post(request, vimid)
else:
+ resp_template["workload_status"] = "WORKLOAD_UPDATE_FAIL"
# a post to heatbridge
- worker_self = InfraWorkloadHelper(
- settings.MULTICLOUD_API_V1_PREFIX,
- settings.AAI_BASE_URL
- )
backlog_item = {
"id": workloadid,
- "worker": worker_self.workload_update(),
+ "worker": worker_self.workload_update,
"payload": (worker_self, vimid, workloadid, request.data),
"repeat": 0, # one time job
# format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message
- "status": (0, "AAI_UPDATE_IN_PROGRESS", "Pending to schedule")
+ "status": (
+ 0, "WORKLOAD_UPDATE_IN_PROGRESS",
+ "backlog to update workload %s pends to schedule" % workloadid
+ )
}
gInfraWorkloadThread.add(backlog_item)
if 0 == gInfraWorkloadThread.state():
@@ -77,91 +91,132 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
backlog_item = gInfraWorkloadThread.get(workloadid)
if not backlog_item:
# backlog item not found
+ resp_template["workload_status_reason"] = \
+ "backlog to update workload %s " \
+ "into AAI is not found" % workloadid
return Response(
- data={
- 'workload_status': "AAI_UPDATE_FAIL",
- "message": "AAI update failed"
- },
+ data=resp_template,
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
else:
- progress = backlog_item.get("status", "Status not found")
+ progress = backlog_item.get("status",
+ (13, "WORKLOAD_DELETE_FAIL",
+ "Unexpected:status not found in backlog item")
+ )
progress_code = progress[0]
progress_status = progress[1]
progress_msg = progress[2]
- return Response(data={'workload_status': progress_status, "message": progress_msg},
- status=status.HTTP_201_CREATED
+ resp_template["workload_status"] = progress_status
+ resp_template["workload_status_reason"] = progress_msg
+ return Response(data=resp_template,
+ status=status.HTTP_200_ACCEPTED
if progress_code == 0 else progress_code
)
except Exception as e:
- self._logger.error(traceback.format_exc())
- return Response(data={'error': str(e)},
+ errmsg = e.message
+ self._logger.error(errmsg)
+ resp_template["workload_status_reason"] = errmsg
+ return Response(data=resp_template,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def get(self, request, vimid="", workloadid=""):
self._logger.info("vimid, workload id: %s, %s" % (vimid, workloadid))
self._logger.debug("META: %s" % request.META)
+ resp_template = {
+ "template_type": "HEAT",
+ "workload_id": workloadid,
+ "workload_status": "WORKLOAD_GET_FAIL",
+ "workload_status_reason": "Exception occurs"
+ }
try:
if workloadid == "":
- raise VimDriverNewtonException(
- message="workload_id is not specified",
- content="workload_id must be specified to delete the workload",
- status_code = status.HTTP_400_BAD_REQUEST)
+ resp_template["workload_status_reason"] = "workload id is not found in API url"
+ return Response(
+ data=resp_template,
+ status=status.HTTP_400_BAD_REQUEST
+ )
# now query the progress
+ status_code = status.HTTP_200_OK
backlog_item = gInfraWorkloadThread.get(workloadid)
if not backlog_item:
- # backlog item not found
- return Response(
- data={
- 'workload_status': "AAI_UPDATE_FAIL",
- "message": "AAI update failed"
- },
- status=status.HTTP_500_INTERNAL_SERVER_ERROR
+ # backlog item not found, so check the stack status
+ worker_self = infra_workload_helper.InfraWorkloadHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
)
+ progress_code, progress_status, progress_msg = worker_self.workload_status(vimid, workloadid, None)
+
+ resp_template["workload_status"] = progress_status
+ resp_template["workload_status_reason"] = progress_msg
+ status_code = status.HTTP_200_OK\
+ if progress_code == 0 else progress_code
+
else:
- progress = backlog_item.get("status", "Status not found")
+ progress = backlog_item.get("status",
+ (13, "WORKLOAD_DELETE_FAIL",
+ "Unexpected:status not found in backlog item")
+ )
progress_code = progress[0]
progress_status = progress[1]
progress_msg = progress[2]
- return Response(data={'workload_status': progress_status, "message": progress_msg},
- status=status.HTTP_201_CREATED
- if progress_code == 0 else progress_code
- )
+ # if gInfraWorkloadThread.expired(workloadid):
+ # gInfraWorkloadThread.remove(workloadid)
+ resp_template["workload_status"] = progress_status
+ resp_template["workload_status_reason"] = progress_msg
+ status_code = status.HTTP_200_OK\
+ if progress_code == 0 else progress_code
+
+ return Response(data=resp_template,
+ status=status_code
+ )
except Exception as e:
- self._logger.error(traceback.format_exc())
- return Response(data={'error': str(e)},
+ self._logger.error(e.message)
+ resp_template["workload_status_reason"] = e.message
+ return Response(data=resp_template,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def delete(self, request, vimid="", workloadid=""):
self._logger.info("vimid, workload id: %s, %s" % (vimid, workloadid))
self._logger.debug("META: %s" % request.META)
+ resp_template = {
+ "template_type": "HEAT",
+ "workload_id": workloadid,
+ "workload_status": "WORKLOAD_DELETE_FAIL",
+ "workload_status_reason": "Exception occurs"
+ }
try:
- super(InfraWorkload, self).delete(request, vimid, workloadid)
-
if workloadid == "":
- raise VimDriverNewtonException(
- message="workload_id is not specified",
- content="workload_id must be specified to delete the workload",
- status_code = status.HTTP_400_BAD_REQUEST)
+ resp_template["workload_status_reason"] = "workload id is not found in API url"
+ return Response(
+ data=resp_template,
+ status=status.HTTP_400_BAD_REQUEST
+ )
- # a post to heatbridge delete
- worker_self = InfraWorkloadHelper(
+ # remove the stack object from vim
+ super(InfraWorkload, self).delete(request, vimid, workloadid)
+
+ # backlog for a post to heatbridge delete
+ worker_self = infra_workload_helper.InfraWorkloadHelper(
settings.MULTICLOUD_API_V1_PREFIX,
settings.AAI_BASE_URL
)
backlog_item = {
"id": workloadid,
- "worker": worker_self.workload_delete(),
+ "worker": worker_self.workload_delete,
"payload": (worker_self, vimid, workloadid, request.data),
"repeat": 0, # one time job
# format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message
- "status": (0, "AAI_REMOVE_IN_PROGRESS", "Pending to schedule")
+ "status": (
+ 0, "WORKLOAD_DELETE_IN_PROGRESS",
+ "backlog for delete the workload %s "
+ "pends to schedule" % workloadid
+ )
}
gInfraWorkloadThread.add(backlog_item)
if 0 == gInfraWorkloadThread.state():
@@ -171,95 +226,35 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
backlog_item = gInfraWorkloadThread.get(workloadid)
if not backlog_item:
# backlog item not found
+ resp_template["workload_status_reason"] = \
+ "backlog to remove the "\
+ "workload %s is not found" % workloadid
+
return Response(
- data={
- 'workload_status': "STACK_REMOVE_FAILED",
- "message": "AAI update failed"
- },
+ data=resp_template,
status=status.HTTP_500_INTERNAL_SERVER_ERROR
)
else:
- progress = backlog_item.get("status", "Status not found")
+ progress = backlog_item.get("status",
+ (13, "WORKLOAD_DELETE_FAIL",
+ "Unexpected:status not found in backlog item")
+ )
progress_code = progress[0]
progress_status = progress[1]
progress_msg = progress[2]
- return Response(data={'workload_status': progress_status, "message": progress_msg},
- status=status.HTTP_200_OK
+ # if gInfraWorkloadThread.expired(workloadid):
+ # gInfraWorkloadThread.remove(workloadid)
+
+ resp_template["workload_status"] = progress_status
+ resp_template["workload_status_reason"] = progress_msg
+ return Response(data=resp_template,
+ status=status.HTTP_204_NO_CONTENT
if progress_code == 0 else progress_code
)
-
- # # assume the workload_type is heat
- # stack_id = workloadid
- # cloud_owner, regionid = extsys.decode_vim_id(vimid)
- # # should go via multicloud proxy so that
- # # the selflink is updated by multicloud
- # retcode, v2_token_resp_json, os_status = \
- # helper.MultiCloudIdentityHelper(
- # settings.MULTICLOUD_API_V1_PREFIX,
- # cloud_owner, regionid, "/v2.0/tokens")
- #
- # if retcode > 0 or not v2_token_resp_json:
- # logger.error("authenticate fails:%s, %s, %s" %
- # (cloud_owner, regionid, v2_token_resp_json))
- # return
- # # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
- # # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
- #
- # # get stack status
- # service_type = "orchestration"
- # resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks"
- # self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
- # retcode, content, os_status = \
- # helper.MultiCloudServiceHelper(cloud_owner, regionid,
- # v2_token_resp_json,
- # service_type, resource_uri,
- # None, "GET")
- #
- # stacks = content.get('stacks', []) \
- # if retcode == 0 and content else []
- # # assume there is at most 1 stack returned
- # # since it was filtered by id
- # stack1 = stacks[0] if stacks else None
- # stack_status = ""
- #
- # if stack1 and 'CREATE_COMPLETE' == stack1['stack_status']:
- # # delete the stack
- # resource_uri = "/stacks/%s/%s" % \
- # (stack1['stack_name'], stack1['id'])
- # self._logger.info("delete stack, URI:%s" % resource_uri)
- # retcode, content, os_status = \
- # helper.MultiCloudServiceHelper(cloud_owner, regionid,
- # v2_token_resp_json,
- # service_type, resource_uri,
- # None, "DELETE")
- # # if retcode == 0:
- # # stack_status = "DELETE_IN_PROCESS"
- # # # and update AAI inventory by heatbridge-delete
- # # self.heatbridge_delete(request, vimid, stack1['id'])
- #
- # # stub response
- # resp_template = {
- # "template_type": "HEAT",
- # "workload_id": stack_id,
- # "workload_status": stack_status
- # }
- #
- # if retcode > 0:
- # resp_template["workload_response"] = content
- #
- # self._logger.info("RESP with data> result:%s" % resp_template)
- # return Response(status=os_status)
- # except VimDriverNewtonException as e:
- # self._logger.error("Plugin exception> status:%s,error:%s"
- # % (e.status_code, e.content))
- # return Response(data={'error': e.content}, status=e.status_code)
- # except HttpError as e:
- # self._logger.error("HttpError: status:%s, response:%s" %
- # (e.http_status, e.response.json()))
- # return Response(data=e.response.json(), status=e.http_status)
except Exception as e:
- self._logger.error(traceback.format_exc())
- return Response(data={'error': str(e)},
+ self._logger.error(e.message)
+ resp_template["workload_status_reason"] = e.message
+ return Response(data=resp_template,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@@ -292,328 +287,3 @@ class APIv1InfraWorkload(InfraWorkload):
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1InfraWorkload, self).delete(request, vimid, requri)
-
-class InfraWorkloadHelper(newton_registration.RegistryHelper):
-
- def __init__(self, multicloud_prefix, aai_base_url, vimid, workloadid=""):
- self.proxy_prefix = multicloud_prefix
- self.aai_base_url = aai_base_url
- self._logger = logger
- self.vimid = vimid
- self.workloadid = workloadid
- super(InfraWorkloadHelper, self).__init__()
-
- def workload_create(self, vimid, workload_data):
- data = workload_data
- oof_directive = data.get("oof_directives", {})
- template_type = data.get("template_type", None)
- template_data = data.get("template_data", {})
- resp_template = None
- if template_type and "heat" == template_type.lower():
- # update heat parameters from oof_directive
- parameters = template_data.get("parameters", {})
-
- for directive in oof_directive.get("directives", []):
- if directive["type"] == "vnfc":
- for directive2 in directive.get("directives", []):
- if directive2["type"] in ["flavor_directives",
- "sriovNICNetwork_directives"]:
- for attr in directive2.get("attributes", []):
- flavor_label = attr.get("attribute_name", None)
- flavor_value = attr.get("attribute_value", None)
- if flavor_label in parameters:
- parameters[flavor_label] = flavor_value
- else:
- self._logger.warn(
- "There is no parameter exist: %s" %
- flavor_label)
-
- # update parameters
- template_data["parameters"] = parameters
-
- # reset to make sure "files" are empty
- template_data["files"] = {}
-
- # authenticate
- cloud_owner, regionid = extsys.decode_vim_id(vimid)
- # should go via multicloud proxy so that
- # the selflink is updated by multicloud
- retcode, v2_token_resp_json, os_status = \
- helper.MultiCloudIdentityHelper(
- settings.MULTICLOUD_API_V1_PREFIX,
- cloud_owner, regionid, "/v2.0/tokens"
- )
-
- if retcode > 0 or not v2_token_resp_json:
- logger.error("authenticate fails:%s,%s, %s" %
- (cloud_owner, regionid, v2_token_resp_json))
- return (
- retcode,
- "authenticate fails:%s,%s, %s" %
- (cloud_owner, regionid, v2_token_resp_json)
- )
-
- # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
-
- service_type = "orchestration"
- resource_uri = "/stacks"
- self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
- retcode, content, os_status = \
- helper.MultiCloudServiceHelper(cloud_owner, regionid,
- v2_token_resp_json,
- service_type, resource_uri,
- template_data, "POST")
-
- stack1 = content.get('stack', None) \
- if retcode == 0 and content else None
-
- stackid = stack1["id"] if stack1 else ""
- resp_template = {
- "template_type": template_type,
- "workload_id": stackid,
- "template_response": content
- }
- self._logger.info("RESP with data> result:%s" % resp_template)
- return (0, resp_template)
-
- def workload_update(self, vimid, stack_id, otherinfo):
- '''
- update heat resource to AAI for the specified cloud region and tenant
- The resources includes: vserver, vserver/l-interface,
- '''
-
- cloud_owner, regionid = extsys.decode_vim_id(vimid)
- # should go via multicloud proxy so that the selflink is updated by multicloud
- retcode, v2_token_resp_json, os_status = \
- helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
- cloud_owner, regionid, "/v2.0/tokens")
- if retcode > 0:
- logger.error("authenticate fails:%s, %s, %s" %
- (cloud_owner, regionid, v2_token_resp_json))
-
- return (retcode, "AAI_UPDATE_FAIL", "authenticate fails:%s, %s, %s" %
- (cloud_owner, regionid, v2_token_resp_json))
- tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
- # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
-
- # common prefix
- aai_cloud_region = \
- "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
- % (cloud_owner, regionid, tenant_id)
-
- # get stack resource
- service_type = "orchestration"
- resource_uri = "/stacks/%s/resources" % (stack_id)
- self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
- retcode, content, os_status = \
- helper.MultiCloudServiceHelper(cloud_owner, regionid,
- v2_token_resp_json,
- service_type, resource_uri,
- None, "GET")
-
- resources = content.get('resources', []) if retcode == 0 and content else []
-
- # find and update resources
- transactions = []
- for resource in resources:
- if resource.get('resource_status', None) != "CREATE_COMPLETE":
- # this resource is not ready yet
- continue
- if resource.get('resource_type', None) == 'OS::Nova::Server':
- # retrieve vserver details
- service_type = "compute"
- resource_uri = "/servers/%s" % (resource['physical_resource_id'])
- self._logger.info("retrieve vserver detail, URI:%s" % resource_uri)
- retcode, content, os_status = \
- helper.MultiCloudServiceHelper(cloud_owner, regionid,
- v2_token_resp_json,
- service_type, resource_uri,
- None, "GET")
-
- self._logger.debug(" resp data:%s" % content)
- vserver_detail = content.get('server', None) if retcode == 0 and content else None
- if vserver_detail:
- # compose inventory entry for vserver
- vserver_link = ""
- for link in vserver_detail['links']:
- if link['rel'] == 'self':
- vserver_link = link['href']
- break
- pass
-
- # note: relationship-list to flavor/image is not be update yet
- # note: volumes is not updated yet
- # note: relationship-list to vnf will be handled somewhere else
- aai_resource = {
- 'body': {
- 'vserver-name': vserver_detail['name'],
- 'vserver-name2': vserver_detail['name'],
- "vserver-id": vserver_detail['id'],
- "vserver-selflink": vserver_link,
- "prov-status": vserver_detail['status']
- },
- "uri": aai_cloud_region + "/vservers/vserver/%s" % (vserver_detail['id'])
- }
-
- try:
- # then update the resource
- retcode, content, status_code = \
- restcall.req_to_aai(aai_resource['uri'],
- "PUT", content=aai_resource['body'])
-
- if retcode == 0 and content:
- content = json.JSONDecoder().decode(content)
- self._logger.debug("AAI update %s response: %s" %
- (aai_resource['uri'], content))
- except Exception:
- self._logger.error(traceback.format_exc())
- pass
-
- aai_resource_transactions = {"put": [aai_resource]}
- transactions.append(aai_resource_transactions)
- # self._logger.debug("aai_resource :%s" % aai_resource_transactions)
- pass
-
- for resource in resources:
- if resource.get('resource_status', None) != "CREATE_COMPLETE":
- continue
- if resource.get('resource_type', None) == 'OS::Neutron::Port':
- # retrieve vport details
- service_type = "network"
- resource_uri = "/v2.0/ports/%s" % (resource['physical_resource_id'])
- self._logger.info("retrieve vport detail, URI:%s" % resource_uri)
- retcode, content, os_status = \
- helper.MultiCloudServiceHelper(cloud_owner, regionid,
- v2_token_resp_json,
- service_type, resource_uri,
- None, "GET")
-
- self._logger.debug(" resp data:%s" % content)
-
- vport_detail = content.get('port', None) if retcode == 0 and content else None
- if vport_detail:
- # compose inventory entry for vport
- # note: l3-interface-ipv4-address-list,
- # l3-interface-ipv6-address-list are not updated yet
- # note: network-name is not update yet since the detail
- # coming with network-id
- aai_resource = {
- "body": {
- "interface-name": vport_detail['name'],
- "interface-id": vport_detail['id'],
- "macaddr": vport_detail['mac_address']
- },
- 'uri':
- aai_cloud_region + "/vservers/vserver/%s/l-interfaces/l-interface/%s"
- % (vport_detail['device_id'], vport_detail['name'])
- }
- try:
- # then update the resource
- retcode, content, status_code = \
- restcall.req_to_aai(aai_resource['uri'], "PUT",
- content=aai_resource['body'])
-
- if retcode == 0 and content:
- content = json.JSONDecoder().decode(content)
- self._logger.debug("AAI update %s response: %s" %
- (aai_resource['uri'], content))
- except Exception:
- self._logger.error(traceback.format_exc())
- pass
-
- aai_resource_transactions = {"put": [aai_resource]}
- transactions.append(aai_resource_transactions)
- # self._logger.debug("aai_resource :%s" % aai_resource_transactions)
-
- pass
-
- # aai_transactions = {"transactions": transactions}
- # self._logger.debug("aai_transactions :%s" % aai_transactions)
- return (retcode, "AAI_UPDATE_COMPLETE", "succeed")
-
- def workload_delete(self, vimid, stack_id, otherinfo):
- '''
- remove heat resource from AAI for the specified cloud region and tenant
-
- '''
-
- # enumerate the resources
- cloud_owner, regionid = extsys.decode_vim_id(vimid)
- # should go via multicloud proxy so that the selflink is updated by multicloud
- retcode, v2_token_resp_json, os_status = \
- helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
- cloud_owner, regionid, "/v2.0/tokens")
- if retcode > 0:
- logger.error("authenticate fails:%s, %s, %s" %
- (cloud_owner, regionid, v2_token_resp_json))
- return None
-
- tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
- # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
-
- # common prefix
- aai_cloud_region = \
- "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
- % (cloud_owner, regionid, tenant_id)
-
- # get stack resource
- service_type = "orchestration"
- resource_uri = "/stacks/%s/resources" % (stack_id)
- self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
- retcode, content, os_status = \
- helper.MultiCloudServiceHelper(cloud_owner, regionid,
- v2_token_resp_json,
- service_type, resource_uri,
- None, "GET")
- resources = content.get('resources', []) \
- if retcode == 0 and content else []
-
- vserver_list = [resource['physical_resource_id'] for resource in resources
- if resource.get('resource_type', None) == 'OS::Nova::Server']
-
- try:
- # get list of vservers
- vserver_list_url = aai_cloud_region + "/vservers?depth=all"
- retcode, content, status_code = \
- restcall.req_to_aai(vserver_list_url, "GET")
- if retcode > 0 or not content:
- self._logger.debug("AAI get %s response: %s" % (vserver_list_url, content))
- return (retcode, "AAI_REMOVE_FAIL", "authenticate fails:%s, %s, %s" %
- (cloud_owner, regionid, v2_token_resp_json))
-
- content = json.JSONDecoder().decode(content)
- vservers = content['vserver']
- for vserver in vservers:
- if vserver['vserver-id'] not in vserver_list:
- continue
-
- try:
- # iterate vport, except will be raised if no l-interface exist
- for vport in vserver['l-interfaces']['l-interface']:
- # delete vport
- vport_delete_url = \
- aai_cloud_region + \
- "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \
- % (vserver['vserver-id'], vport['interface-name'],
- vport['resource-version'])
-
- restcall.req_to_aai(vport_delete_url, "DELETE")
- except Exception:
- pass
-
- try:
- # delete vserver
- vserver_delete_url = \
- aai_cloud_region + \
- "/vservers/vserver/%s?resource-version=%s" \
- % (vserver['vserver-id'], vserver['resource-version'])
-
- restcall.req_to_aai(vserver_delete_url, "DELETE")
- except Exception:
- continue
-
- return (retcode, "AAI_REMOVE_COMPLETE", "succeed")
- except Exception:
- self._logger.error(traceback.format_exc())
- return None
- pass