summaryrefslogtreecommitdiffstats
path: root/share/starlingx_base
diff options
context:
space:
mode:
Diffstat (limited to 'share/starlingx_base')
-rw-r--r--share/starlingx_base/registration/registration.py226
-rw-r--r--share/starlingx_base/resource/__init__.py10
-rw-r--r--share/starlingx_base/resource/capacity.py97
-rw-r--r--share/starlingx_base/resource/infra_workload.py619
4 files changed, 876 insertions, 76 deletions
diff --git a/share/starlingx_base/registration/registration.py b/share/starlingx_base/registration/registration.py
index 255b6689..fa79e5b3 100644
--- a/share/starlingx_base/registration/registration.py
+++ b/share/starlingx_base/registration/registration.py
@@ -25,14 +25,18 @@ from newton_base.registration import registration as newton_registration
from rest_framework import status
from rest_framework.response import Response
from common.msapi import extsys
+from common.msapi import helper
from keystoneauth1.exceptions import HttpError
from newton_base.util import VimDriverUtils
from common.utils import restcall
-from threading import Thread
+from django.core.cache import cache
logger = logging.getLogger(__name__)
-# DEBUG=True
+# global var: Audition thread
+gAZCapAuditThread = helper.MultiCloudThreadHelper()
+
+# DEBUG=True
# APIv0 handler upgrading: leverage APIv1 handler
class APIv0Registry(newton_registration.Registry):
@@ -45,14 +49,24 @@ class APIv0Registry(newton_registration.Registry):
self._logger.info("registration with : %s" % vimid)
# vim registration will trigger the start the audit of AZ capacity
- gAZCapAuditThread.addv0(vimid)
+ worker_self = InfraResourceAuditor(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
+ )
+ backlog_item = {
+ "id": vimid,
+ "worker": worker_self.azcap_audit,
+ "payload": (worker_self, vimid),
+ "repeat": 5*1000000, # repeat every 5 seconds
+ }
+ gAZCapAuditThread.add(backlog_item)
if 0 == gAZCapAuditThread.state():
gAZCapAuditThread.start()
return super(APIv0Registry, self).post(request, vimid)
def delete(self, request, vimid=""):
self._logger.debug("unregister cloud region: %s" % vimid)
- gAZCapAuditThread.removev0(vimid)
+ gAZCapAuditThread.remove(vimid)
return super(APIv0Registry, self).delete(request, vimid)
@@ -63,8 +77,8 @@ class Registry(APIv0Registry):
class APIv1Registry(newton_registration.Registry):
def __init__(self):
- super(APIv1Registry, self).__init__()
self.register_helper = RegistryHelper(settings.MULTICLOUD_API_V1_PREFIX, settings.AAI_BASE_URL)
+ super(APIv1Registry, self).__init__()
# self._logger = logger
def post(self, request, cloud_owner="", cloud_region_id=""):
@@ -75,7 +89,17 @@ class APIv1Registry(newton_registration.Registry):
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
# vim registration will trigger the start the audit of AZ capacity
- gAZCapAuditThread.addv0(vimid)
+ worker_self = InfraResourceAuditor(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
+ )
+ backlog_item = {
+ "id": vimid,
+ "worker": worker_self.azcap_audit,
+ "payload": (worker_self, vimid),
+ "repeat": 5 * 1000000, # repeat every 5 seconds
+ }
+ gAZCapAuditThread.add(backlog_item)
if 0 == gAZCapAuditThread.state():
gAZCapAuditThread.start()
@@ -96,7 +120,7 @@ class APIv1Registry(newton_registration.Registry):
% (cloud_owner, cloud_region_id))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- gAZCapAuditThread.removev0(vimid)
+ gAZCapAuditThread.remove(vimid)
return super(APIv1Registry, self).delete(request, vimid)
@@ -108,7 +132,7 @@ class RegistryHelper(newton_registration.RegistryHelper):
super(RegistryHelper, self).__init__(multicloud_prefix, aai_base_url)
# self._logger = logger
- def registry(self, vimid=""):
+ def registryV0(self, vimid=""):
'''
extend base method
'''
@@ -133,7 +157,7 @@ class RegistryHelper(newton_registration.RegistryHelper):
viminfo, tenant_name=viminfo['tenant'])
# discover the regions, expect it always returns a list (even empty list)
- # cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
# region_ids = self._discover_regions(cloud_owner, cloud_region_id, sess, viminfo)
region_ids = self._discover_regions(vimid, sess, viminfo)
@@ -299,8 +323,7 @@ class RegistryHelper(newton_registration.RegistryHelper):
return 1 # unknown cloud owner,region_id
# def _discover_regions(self, cloud_owner="", cloud_region_id="",
- def _discover_regions(self, vimid="",
- session=None, viminfo=None):
+ def _discover_regions(self, vimid, session=None, viminfo=None):
try:
regions = []
# vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
@@ -336,7 +359,7 @@ class RegistryHelper(newton_registration.RegistryHelper):
return []
-class InfraResourceAuditor(object):
+class InfraResourceAuditor(newton_registration.RegistryHelper):
def __init__(self, multicloud_prefix, aai_base_url):
self.proxy_prefix = multicloud_prefix
@@ -344,70 +367,121 @@ class InfraResourceAuditor(object):
self._logger = logger
# super(InfraResourceAuditor, self).__init__();
- def azcap_audit(self, vimid=""):
- # now retrieve the latest AZ cap info
- # TBD
-
- # store the cap info into cache
- # TBD
- pass
-
-
-class AuditorHelperThread(threading.Thread):
- '''
- thread to register infrastructure resource into AAI
- '''
-
- def __init__(self, audit_helper):
- threading.Thread.__init__(self)
- self.daemon = True
- self.duration = 0
- self.helper = audit_helper
-
- # The set of IDs of cloud regions, format:
- # v0: "owner1_regionid1"
- self.queuev0 = set()
- self.lock = threading.Lock()
- self.state_ = 0 # 0: stopped, 1: started
-
- def addv0(self, vimid=""):
- self.lock.acquire()
- self.queuev0.add(vimid)
- self.lock.release()
- return len(self.queuev0)
-
- def removev0(self, vimid):
- '''
- discard cloud region from list without raise any exception
- '''
- self.queuev0.discard(vimid)
-
- def resetv0(self):
- self.queuev0.clear()
-
- def countv0(self):
- return len(self.queuev0)
-
- def state(self):
- return self.state_
-
- def run(self):
- logger.debug("Start the Audition thread")
- self.state_ = 1
- while self.helper and self.countv0() > 0:
- for vimidv0 in self.queuev0:
- self.helper(vimidv0)
- # sleep for a while in seconds
- time.sleep(5)
+ def azcap_audit(self, vimid):
+ viminfo = VimDriverUtils.get_vim_info(vimid)
+ if not viminfo:
+ self._logger.warn("azcap_audit no valid vimid: %s" % vimid)
+ return
- self.state_ = 0
- logger.debug("Stop the Audition thread")
- # end of processing
+ session = VimDriverUtils.get_session(
+ viminfo,
+ tenant_name=viminfo['tenant']
+ )
-# global Audition thread
-gAZCapAuditThread = AuditorHelperThread(
- InfraResourceAuditor(
- settings.MULTICLOUD_API_V1_PREFIX,
- settings.AAI_BASE_URL).azcap_audit
-)
+ # now retrieve the latest AZ cap info
+ try:
+ # get all hypervisor detail ?
+ hypervisors = self._get_list_resources(
+ "/os-hypervisors/detail", "compute", session,
+ viminfo, vimid, "hypervisors")
+
+ hypervisors_dict = {}
+ # for h in hypervisors:
+ # if not h.get("service", None):
+ # continue
+ # if not h.get("host", None):
+ # continue
+ # hypervisors_dict[h["service"]["host"]] = h
+ for h in hypervisors:
+ if not h.get("hypervisor_hostname", None):
+ continue
+ hypervisors_dict[h["hypervisor_hostname"]] = h
+
+ vimAzCacheKey = "cap_azlist_" + vimid
+ vimAzList = []
+ # cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ for az in self._get_list_resources(
+ "/os-availability-zone/detail", "compute", session,
+ viminfo, vimid,
+ "availabilityZoneInfo"):
+ az_info = {
+ 'availability-zone-name': az['zoneName'],
+ 'operational-status': az['zoneState']['available']
+ if az.get('zoneState') else '',
+ 'hypervisor-type': '',
+ }
+ # filter out the default az: "internal" and "nova"
+ azName = az.get('zoneName', None)
+ # comment it for test the registration process only
+ # if azName == 'nova':
+ # continue
+ if azName == 'internal':
+ continue
+ vimAzList.append(azName)
+
+ # get list of host names
+ pservers_info = [k for (k, v) in az['hosts'].items()]
+
+ # Get current cap info of azName
+ azCapCacheKey = "cap_" + vimid + "_" + azName
+ azCapInfoCacheStr = cache.get(azCapCacheKey)
+ azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else None
+
+ for psname in pservers_info:
+ psinfo = hypervisors_dict.get(psname, None)
+ if not psinfo:
+ # warning: the pserver info not found
+ continue
+ # get current pserver cap info
+ psCapInfoCacheKey = "cap_" + vimid + "_" + psname
+ psCapInfoCacheStr = cache.get(psCapInfoCacheKey)
+ psCapInfoCache = json.loads(psCapInfoCacheStr) if psCapInfoCacheStr else None
+
+ # compare latest info with cached one
+ vcpu_delta = 0
+ vcpu_used_delta = 0
+ mem_delta = 0
+ mem_free_delta = 0
+ localstorage_delta = 0
+ localstorage_free_delta = 0
+ if psinfo.get("vcpus", 0) != psCapInfoCache.get("vcpus", 0):
+ vcpu_delta += psinfo.get("vcpus", 0) \
+ - psCapInfoCache.get("vcpus", 0)
+ psCapInfoCache["vcpus"] = psinfo.get("vcpus", 0)
+ if psinfo.get("memory_mb", 0) != psCapInfoCache.get("memory_mb", 0):
+ mem_delta += psinfo.get("memory_mb", 0) \
+ - psCapInfoCache.get("memory_mb", 0)
+ psCapInfoCache["memory_mb"] = psinfo.get("memory_mb", 0)
+ if psinfo.get("local_gb", 0) != psCapInfoCache.get("local_gb", 0):
+ localstorage_delta += psinfo.get("local_gb", 0) \
+ - psCapInfoCache.get("local_gb", 0)
+ psCapInfoCache["local_gb"] = psinfo.get("local_gb", 0)
+ if psinfo.get("vcpus_used", 0) != psCapInfoCache.get("vcpus_used", 0):
+ vcpu_used_delta += psinfo.get("vcpus_used", 0)\
+ - psCapInfoCache.get("vcpus_used", 0)
+ psCapInfoCache["vcpus_used"] = psinfo.get("vcpus_used", 0)
+ if psinfo.get("free_ram_mb", 0) != psCapInfoCache.get("free_ram_mb", 0):
+ mem_free_delta += psinfo.get("free_ram_mb", 0)\
+ - psCapInfoCache.get("free_ram_mb", 0)
+ psCapInfoCache["free_ram_mb"] = psinfo.get("free_ram_mb", 0)
+ if psinfo.get("free_disk_gb", 0) != psCapInfoCache.get("free_disk_gb", 0):
+ localstorage_free_delta += psinfo.get("free_disk_gb", 0)\
+ - psCapInfoCache.get("free_disk_gb", 0)
+ psCapInfoCache["free_disk_gb"] = psinfo.get("free_disk_gb", 0)
+ pass
+
+ # now apply the delta to azCapInfo
+ azCapInfoCache["vcpus"] = azCapInfoCache.get("vcpus", 0) + vcpu_delta
+ azCapInfoCache["memory_mb"] = azCapInfoCache.get("memory_mb", 0) + mem_delta
+ azCapInfoCache["local_gb"] = azCapInfoCache.get("local_gb", 0) + localstorage_delta
+ azCapInfoCache["vcpus_used"] = azCapInfoCache.get("vcpus_used", 0) + vcpu_used_delta
+ azCapInfoCache["free_ram_mb"] = azCapInfoCache.get("free_ram_mb", 0) + mem_free_delta
+ azCapInfoCache["free_disk_gb"] = azCapInfoCache.get("free_disk_gb", 0) + localstorage_free_delta
+
+ # update the cache
+ cache.set(azCapCacheKey, json.dumps(azCapInfoCache), 3600 * 24)
+ cache.set(vimAzCacheKey, vimAzList, 3600 * 24)
+ except Exception as e:
+ self._logger.error("azcap_audit raise exception: %s" % e)
+ pass
diff --git a/share/starlingx_base/resource/__init__.py b/share/starlingx_base/resource/__init__.py
new file mode 100644
index 00000000..825091ff
--- /dev/null
+++ b/share/starlingx_base/resource/__init__.py
@@ -0,0 +1,10 @@
+# Copyright (c) 2017-2019 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
diff --git a/share/starlingx_base/resource/capacity.py b/share/starlingx_base/resource/capacity.py
new file mode 100644
index 00000000..fc926d00
--- /dev/null
+++ b/share/starlingx_base/resource/capacity.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2017-2019 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import traceback
+import json
+
+from django.core.cache import cache
+
+from newton_base.resource import capacity as newton_capacity
+from common.exceptions import VimDriverNewtonException
+from newton_base.util import VimDriverUtils
+
+from keystoneauth1.exceptions import HttpError
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.views import APIView
+from common.msapi import extsys
+
+logger = logging.getLogger(__name__)
+
+
+class CapacityCheck(newton_capacity.CapacityCheck):
+ def __init__(self):
+ super(CapacityCheck, self).__init__()
+ self._logger = logger
+
+ def post(self, request, vimid=""):
+ self._logger.info("vimid, data> %s, %s" % (vimid, request.data))
+ self._logger.debug("META> %s" % request.META)
+
+ try:
+ hasEnoughResource = self.get_tenant_cap_info(vimid, request.data)
+ azCapInfo = self.get_az_cap_info(vimid)
+ self._logger.info("RESP with data> result:%s" % hasEnoughResource)
+ return Response(data={'result': hasEnoughResource, 'AZs': azCapInfo}, status=status.HTTP_200_OK)
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(data={'result': False, 'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ def get_az_cap_info(self, vimid):
+ azCapInfo = []
+ viminfo = VimDriverUtils.get_vim_info(vimid)
+ if not viminfo:
+ self._logger.warn("azcap_audit no valid vimid: %s" % vimid)
+ return
+
+ session = VimDriverUtils.get_session(
+ viminfo,
+ tenant_name=viminfo['tenant']
+ )
+ try:
+ # get list of AZ
+ vimAzCacheKey = "cap_azlist_" + vimid
+ vimAzListCacheStr = cache.get(vimAzCacheKey)
+ vimAzListCache = json.loads(vimAzListCacheStr) if vimAzListCacheStr else []
+ for azName in vimAzListCache:
+ azCapCacheKey = "cap_" + vimid + "_" + azName
+ azCapInfoCacheStr = cache.get(azCapCacheKey)
+ azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else None
+
+ azCapInfo["availability-zone-name"] = azName
+ azCapInfo["vCPUAvail"] = azCapInfoCache.get("vcpus", 0) + azCapInfoCache.get("vcpus_used", 0)
+ azCapInfo["vCPUTotal"] = azCapInfoCache.get("vcpus", 0)
+ azCapInfo["MemoryAvail"] = azCapInfoCache.get("vcpus", 0)
+ azCapInfo["MemoryTotal"] = azCapInfoCache.get("vcpus", 0)
+ azCapInfo["StorageAvail"] = azCapInfoCache.get("vcpus", 0)
+ azCapInfo["StorageTotal"] = azCapInfoCache.get("vcpus", 0)
+
+ return azCapInfo
+ except Exception as e:
+ return azCapInfo
+ pass
+
+class APIv1CapacityCheck(CapacityCheck):
+ def __init__(self):
+ super(APIv1CapacityCheck, self).__init__()
+ # self._logger = logger
+
+ def post(self, request, cloud_owner="", cloud_region_id=""):
+ self._logger.info("vimid, data> %s,%s, %s" % (cloud_owner, cloud_region_id, request.data))
+ self._logger.debug("META> %s" % request.META)
+
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return super(APIv1CapacityCheck, self).post(request, vimid)
diff --git a/share/starlingx_base/resource/infra_workload.py b/share/starlingx_base/resource/infra_workload.py
new file mode 100644
index 00000000..4da0a0d0
--- /dev/null
+++ b/share/starlingx_base/resource/infra_workload.py
@@ -0,0 +1,619 @@
+# Copyright (c) 2017-2018 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import json
+import traceback
+import uuid
+
+from django.conf import settings
+from common.exceptions import VimDriverNewtonException
+# from newton_base.util import VimDriverUtils
+
+from keystoneauth1.exceptions import HttpError
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.views import APIView
+from common.msapi import extsys
+from common.msapi.helper import Helper as helper
+
+from common.utils import restcall
+from newton_base.registration import registration as newton_registration
+from newton_base.resource import infra_workload as newton_infra_workload
+from newton_base.util import VimDriverUtils
+
+logger = logging.getLogger(__name__)
+
+# global var: Audition thread
+# the id is the workloadid, which implies post to workloadid1 followed by delete workloadid1
+# will replace the previous backlog item
+gInfraWorkloadThread = helper.MultiCloudThreadHelper()
+
+class InfraWorkload(newton_infra_workload.InfraWorkload):
+ def __init__(self):
+ self._logger = logger
+
+ def post(self, request, vimid="", workloadid=""):
+ self._logger.info("vimid: %s, stackid:%s" % (vimid, workloadid))
+ self._logger.info("data: %s" % (request.data))
+ self._logger.debug("META: %s" % request.META)
+
+ try:
+ resp_template = {}
+ os_status = status.HTTP_500_INTERNAL_SERVER_ERROR
+
+ if workloadid == "":
+ return super(InfraWorkload, self).post(request, vimid)
+ else:
+ # a post to heatbridge
+ worker_self = InfraWorkloadHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
+ )
+ backlog_item = {
+ "id": workloadid,
+ "worker": worker_self.workload_update(),
+ "payload": (worker_self, vimid, workloadid, request.data),
+ "repeat": 0, # one time job
+ # format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message
+ "status": (0, "AAI_UPDATE_IN_PROGRESS", "Pending to schedule")
+ }
+ gInfraWorkloadThread.add(backlog_item)
+ if 0 == gInfraWorkloadThread.state():
+ gInfraWorkloadThread.start()
+
+ # now query the progress
+ backlog_item = gInfraWorkloadThread.get(workloadid)
+ if not backlog_item:
+ # backlog item not found
+ return Response(
+ data={
+ 'workload_status': "AAI_UPDATE_FAIL",
+ "message": "AAI update failed"
+ },
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR
+ )
+ else:
+ progress = backlog_item.get("status", "Status not found")
+ progress_code = progress[0]
+ progress_status = progress[1]
+ progress_msg = progress[2]
+ return Response(data={'workload_status': progress_status, "message": progress_msg},
+ status=status.HTTP_201_CREATED
+ if progress_code == 0 else progress_code
+ )
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ def get(self, request, vimid="", workloadid=""):
+ self._logger.info("vimid, workload id: %s, %s" % (vimid, workloadid))
+ self._logger.debug("META: %s" % request.META)
+
+ try:
+
+ if workloadid == "":
+ raise VimDriverNewtonException(
+ message="workload_id is not specified",
+ content="workload_id must be specified to delete the workload",
+ status_code = status.HTTP_400_BAD_REQUEST)
+
+ # now query the progress
+ backlog_item = gInfraWorkloadThread.get(workloadid)
+ if not backlog_item:
+ # backlog item not found
+ return Response(
+ data={
+ 'workload_status': "AAI_UPDATE_FAIL",
+ "message": "AAI update failed"
+ },
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR
+ )
+ else:
+ progress = backlog_item.get("status", "Status not found")
+ progress_code = progress[0]
+ progress_status = progress[1]
+ progress_msg = progress[2]
+ return Response(data={'workload_status': progress_status, "message": progress_msg},
+ status=status.HTTP_201_CREATED
+ if progress_code == 0 else progress_code
+ )
+
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ def delete(self, request, vimid="", workloadid=""):
+ self._logger.info("vimid, workload id: %s, %s" % (vimid, workloadid))
+ self._logger.debug("META: %s" % request.META)
+
+ try:
+
+ super(InfraWorkload, self).delete(request, vimid, workloadid)
+
+ if workloadid == "":
+ raise VimDriverNewtonException(
+ message="workload_id is not specified",
+ content="workload_id must be specified to delete the workload",
+ status_code = status.HTTP_400_BAD_REQUEST)
+
+ # a post to heatbridge delete
+ worker_self = InfraWorkloadHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
+ )
+ backlog_item = {
+ "id": workloadid,
+ "worker": worker_self.workload_delete(),
+ "payload": (worker_self, vimid, workloadid, request.data),
+ "repeat": 0, # one time job
+ # format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message
+ "status": (0, "AAI_REMOVE_IN_PROGRESS", "Pending to schedule")
+ }
+ gInfraWorkloadThread.add(backlog_item)
+ if 0 == gInfraWorkloadThread.state():
+ gInfraWorkloadThread.start()
+
+ # now query the progress
+ backlog_item = gInfraWorkloadThread.get(workloadid)
+ if not backlog_item:
+ # backlog item not found
+ return Response(
+ data={
+ 'workload_status': "STACK_REMOVE_FAILED",
+ "message": "AAI update failed"
+ },
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR
+ )
+ else:
+ progress = backlog_item.get("status", "Status not found")
+ progress_code = progress[0]
+ progress_status = progress[1]
+ progress_msg = progress[2]
+ return Response(data={'workload_status': progress_status, "message": progress_msg},
+ status=status.HTTP_200_OK
+ if progress_code == 0 else progress_code
+ )
+
+ # # assume the workload_type is heat
+ # stack_id = workloadid
+ # cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # # should go via multicloud proxy so that
+ # # the selflink is updated by multicloud
+ # retcode, v2_token_resp_json, os_status = \
+ # helper.MultiCloudIdentityHelper(
+ # settings.MULTICLOUD_API_V1_PREFIX,
+ # cloud_owner, regionid, "/v2.0/tokens")
+ #
+ # if retcode > 0 or not v2_token_resp_json:
+ # logger.error("authenticate fails:%s, %s, %s" %
+ # (cloud_owner, regionid, v2_token_resp_json))
+ # return
+ # # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+ # # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
+ #
+ # # get stack status
+ # service_type = "orchestration"
+ # resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks"
+ # self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ # retcode, content, os_status = \
+ # helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ # v2_token_resp_json,
+ # service_type, resource_uri,
+ # None, "GET")
+ #
+ # stacks = content.get('stacks', []) \
+ # if retcode == 0 and content else []
+ # # assume there is at most 1 stack returned
+ # # since it was filtered by id
+ # stack1 = stacks[0] if stacks else None
+ # stack_status = ""
+ #
+ # if stack1 and 'CREATE_COMPLETE' == stack1['stack_status']:
+ # # delete the stack
+ # resource_uri = "/stacks/%s/%s" % \
+ # (stack1['stack_name'], stack1['id'])
+ # self._logger.info("delete stack, URI:%s" % resource_uri)
+ # retcode, content, os_status = \
+ # helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ # v2_token_resp_json,
+ # service_type, resource_uri,
+ # None, "DELETE")
+ # # if retcode == 0:
+ # # stack_status = "DELETE_IN_PROCESS"
+ # # # and update AAI inventory by heatbridge-delete
+ # # self.heatbridge_delete(request, vimid, stack1['id'])
+ #
+ # # stub response
+ # resp_template = {
+ # "template_type": "HEAT",
+ # "workload_id": stack_id,
+ # "workload_status": stack_status
+ # }
+ #
+ # if retcode > 0:
+ # resp_template["workload_response"] = content
+ #
+ # self._logger.info("RESP with data> result:%s" % resp_template)
+ # return Response(status=os_status)
+ # except VimDriverNewtonException as e:
+ # self._logger.error("Plugin exception> status:%s,error:%s"
+ # % (e.status_code, e.content))
+ # return Response(data={'error': e.content}, status=e.status_code)
+ # except HttpError as e:
+ # self._logger.error("HttpError: status:%s, response:%s" %
+ # (e.http_status, e.response.json()))
+ # return Response(data=e.response.json(), status=e.http_status)
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+
+class APIv1InfraWorkload(InfraWorkload):
+ def __init__(self):
+ super(APIv1InfraWorkload, self).__init__()
+ # self._logger = logger
+
+ def post(self, request, cloud_owner="", cloud_region_id=""):
+ # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
+ # (cloud_owner, cloud_region_id, request.data))
+ # self._logger.debug("META: %s" % request.META)
+
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return super(APIv1InfraWorkload, self).post(request, vimid)
+
+ def get(self, request, cloud_owner="", cloud_region_id="", requri=""):
+ # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
+ # (cloud_owner, cloud_region_id, request.data))
+ # self._logger.debug("META: %s" % request.META)
+
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return super(APIv1InfraWorkload, self).get(request, vimid, requri)
+
+ def delete(self, request, cloud_owner="", cloud_region_id="", requri=""):
+ # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
+ # (cloud_owner, cloud_region_id, request.data))
+ # self._logger.debug("META: %s" % request.META)
+
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return super(APIv1InfraWorkload, self).delete(request, vimid, requri)
+
+
+class InfraWorkloadHelper(newton_registration.RegistryHelper):
+
+ def __init__(self, multicloud_prefix, aai_base_url, vimid, workloadid=""):
+ self.proxy_prefix = multicloud_prefix
+ self.aai_base_url = aai_base_url
+ self._logger = logger
+ self.vimid = vimid
+ self.workloadid = workloadid
+ super(InfraWorkloadHelper, self).__init__()
+
+ def workload_create(self, vimid, workload_data):
+ data = workload_data
+ oof_directive = data.get("oof_directives", {})
+ template_type = data.get("template_type", None)
+ template_data = data.get("template_data", {})
+ resp_template = None
+ if template_type and "heat" == template_type.lower():
+ # update heat parameters from oof_directive
+ parameters = template_data.get("parameters", {})
+
+ for directive in oof_directive.get("directives", []):
+ if directive["type"] == "vnfc":
+ for directive2 in directive.get("directives", []):
+ if directive2["type"] in ["flavor_directives",
+ "sriovNICNetwork_directives"]:
+ for attr in directive2.get("attributes", []):
+ flavor_label = attr.get("attribute_name", None)
+ flavor_value = attr.get("attribute_value", None)
+ if flavor_label in parameters:
+ parameters[flavor_label] = flavor_value
+ else:
+ self._logger.warn(
+ "There is no parameter exist: %s" %
+ flavor_label)
+
+ # update parameters
+ template_data["parameters"] = parameters
+
+ # reset to make sure "files" are empty
+ template_data["files"] = {}
+
+ # authenticate
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that
+ # the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens"
+ )
+
+ if retcode > 0 or not v2_token_resp_json:
+ logger.error("authenticate fails:%s,%s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+ return (
+ retcode,
+ "authenticate fails:%s,%s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json)
+ )
+
+ # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+
+ service_type = "orchestration"
+ resource_uri = "/stacks"
+ self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ template_data, "POST")
+
+ stack1 = content.get('stack', None) \
+ if retcode == 0 and content else None
+
+ stackid = stack1["id"] if stack1 else ""
+ resp_template = {
+ "template_type": template_type,
+ "workload_id": stackid,
+ "template_response": content
+ }
+ self._logger.info("RESP with data> result:%s" % resp_template)
+ return (0, resp_template)
+
+ def workload_update(self, vimid, stack_id, otherinfo):
+ '''
+ update heat resource to AAI for the specified cloud region and tenant
+ The resources includes: vserver, vserver/l-interface,
+ '''
+
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+ if retcode > 0:
+ logger.error("authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+
+ return (retcode, "AAI_UPDATE_FAIL", "authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+ tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+ # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
+
+ # common prefix
+ aai_cloud_region = \
+ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
+ % (cloud_owner, regionid, tenant_id)
+
+ # get stack resource
+ service_type = "orchestration"
+ resource_uri = "/stacks/%s/resources" % (stack_id)
+ self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ resources = content.get('resources', []) if retcode == 0 and content else []
+
+ # find and update resources
+ transactions = []
+ for resource in resources:
+ if resource.get('resource_status', None) != "CREATE_COMPLETE":
+ # this resource is not ready yet
+ continue
+ if resource.get('resource_type', None) == 'OS::Nova::Server':
+ # retrieve vserver details
+ service_type = "compute"
+ resource_uri = "/servers/%s" % (resource['physical_resource_id'])
+ self._logger.info("retrieve vserver detail, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ self._logger.debug(" resp data:%s" % content)
+ vserver_detail = content.get('server', None) if retcode == 0 and content else None
+ if vserver_detail:
+ # compose inventory entry for vserver
+ vserver_link = ""
+ for link in vserver_detail['links']:
+ if link['rel'] == 'self':
+ vserver_link = link['href']
+ break
+ pass
+
+ # note: relationship-list to flavor/image is not be update yet
+ # note: volumes is not updated yet
+ # note: relationship-list to vnf will be handled somewhere else
+ aai_resource = {
+ 'body': {
+ 'vserver-name': vserver_detail['name'],
+ 'vserver-name2': vserver_detail['name'],
+ "vserver-id": vserver_detail['id'],
+ "vserver-selflink": vserver_link,
+ "prov-status": vserver_detail['status']
+ },
+ "uri": aai_cloud_region + "/vservers/vserver/%s" % (vserver_detail['id'])
+ }
+
+ try:
+ # then update the resource
+ retcode, content, status_code = \
+ restcall.req_to_aai(aai_resource['uri'],
+ "PUT", content=aai_resource['body'])
+
+ if retcode == 0 and content:
+ content = json.JSONDecoder().decode(content)
+ self._logger.debug("AAI update %s response: %s" %
+ (aai_resource['uri'], content))
+ except Exception:
+ self._logger.error(traceback.format_exc())
+ pass
+
+ aai_resource_transactions = {"put": [aai_resource]}
+ transactions.append(aai_resource_transactions)
+ # self._logger.debug("aai_resource :%s" % aai_resource_transactions)
+ pass
+
+ for resource in resources:
+ if resource.get('resource_status', None) != "CREATE_COMPLETE":
+ continue
+ if resource.get('resource_type', None) == 'OS::Neutron::Port':
+ # retrieve vport details
+ service_type = "network"
+ resource_uri = "/v2.0/ports/%s" % (resource['physical_resource_id'])
+ self._logger.info("retrieve vport detail, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ self._logger.debug(" resp data:%s" % content)
+
+ vport_detail = content.get('port', None) if retcode == 0 and content else None
+ if vport_detail:
+ # compose inventory entry for vport
+ # note: l3-interface-ipv4-address-list,
+ # l3-interface-ipv6-address-list are not updated yet
+ # note: network-name is not update yet since the detail
+ # coming with network-id
+ aai_resource = {
+ "body": {
+ "interface-name": vport_detail['name'],
+ "interface-id": vport_detail['id'],
+ "macaddr": vport_detail['mac_address']
+ },
+ 'uri':
+ aai_cloud_region + "/vservers/vserver/%s/l-interfaces/l-interface/%s"
+ % (vport_detail['device_id'], vport_detail['name'])
+ }
+ try:
+ # then update the resource
+ retcode, content, status_code = \
+ restcall.req_to_aai(aai_resource['uri'], "PUT",
+ content=aai_resource['body'])
+
+ if retcode == 0 and content:
+ content = json.JSONDecoder().decode(content)
+ self._logger.debug("AAI update %s response: %s" %
+ (aai_resource['uri'], content))
+ except Exception:
+ self._logger.error(traceback.format_exc())
+ pass
+
+ aai_resource_transactions = {"put": [aai_resource]}
+ transactions.append(aai_resource_transactions)
+ # self._logger.debug("aai_resource :%s" % aai_resource_transactions)
+
+ pass
+
+ # aai_transactions = {"transactions": transactions}
+ # self._logger.debug("aai_transactions :%s" % aai_transactions)
+ return (retcode, "AAI_UPDATE_COMPLETE", "succeed")
+
+ def workload_delete(self, vimid, stack_id, otherinfo):
+ '''
+ remove heat resource from AAI for the specified cloud region and tenant
+
+ '''
+
+ # enumerate the resources
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+ if retcode > 0:
+ logger.error("authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+ return None
+
+ tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+ # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
+
+ # common prefix
+ aai_cloud_region = \
+ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
+ % (cloud_owner, regionid, tenant_id)
+
+ # get stack resource
+ service_type = "orchestration"
+ resource_uri = "/stacks/%s/resources" % (stack_id)
+ self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+ resources = content.get('resources', []) \
+ if retcode == 0 and content else []
+
+ vserver_list = [resource['physical_resource_id'] for resource in resources
+ if resource.get('resource_type', None) == 'OS::Nova::Server']
+
+ try:
+ # get list of vservers
+ vserver_list_url = aai_cloud_region + "/vservers?depth=all"
+ retcode, content, status_code = \
+ restcall.req_to_aai(vserver_list_url, "GET")
+ if retcode > 0 or not content:
+ self._logger.debug("AAI get %s response: %s" % (vserver_list_url, content))
+ return (retcode, "AAI_REMOVE_FAIL", "authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+
+ content = json.JSONDecoder().decode(content)
+ vservers = content['vserver']
+ for vserver in vservers:
+ if vserver['vserver-id'] not in vserver_list:
+ continue
+
+ try:
+ # iterate vport, except will be raised if no l-interface exist
+ for vport in vserver['l-interfaces']['l-interface']:
+ # delete vport
+ vport_delete_url = \
+ aai_cloud_region + \
+ "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \
+ % (vserver['vserver-id'], vport['interface-name'],
+ vport['resource-version'])
+
+ restcall.req_to_aai(vport_delete_url, "DELETE")
+ except Exception:
+ pass
+
+ try:
+ # delete vserver
+ vserver_delete_url = \
+ aai_cloud_region + \
+ "/vservers/vserver/%s?resource-version=%s" \
+ % (vserver['vserver-id'], vserver['resource-version'])
+
+ restcall.req_to_aai(vserver_delete_url, "DELETE")
+ except Exception:
+ continue
+
+ return (retcode, "AAI_REMOVE_COMPLETE", "succeed")
+ except Exception:
+ self._logger.error(traceback.format_exc())
+ return None
+ pass