diff options
Diffstat (limited to 'windriver')
-rw-r--r-- | windriver/titanium_cloud/pub/config/log.yml | 4 | ||||
-rw-r--r-- | windriver/titanium_cloud/registration/tests/test_registration2.py | 11 | ||||
-rw-r--r-- | windriver/titanium_cloud/registration/views/__init__.py | 13 | ||||
-rw-r--r-- | windriver/titanium_cloud/registration/views/registration.py | 322 | ||||
-rw-r--r-- | windriver/titanium_cloud/resource/tests/tests_infra_workload.py | 6 | ||||
-rw-r--r-- | windriver/titanium_cloud/resource/views/__init__.py | 13 | ||||
-rw-r--r-- | windriver/titanium_cloud/resource/views/capacity.py | 159 | ||||
-rw-r--r-- | windriver/titanium_cloud/resource/views/infra_workload.py | 554 | ||||
-rw-r--r-- | windriver/titanium_cloud/urls.py | 6 | ||||
-rw-r--r-- | windriver/tox.ini | 2 |
10 files changed, 11 insertions, 1079 deletions
diff --git a/windriver/titanium_cloud/pub/config/log.yml b/windriver/titanium_cloud/pub/config/log.yml index 5cf448d4..ca3a5497 100644 --- a/windriver/titanium_cloud/pub/config/log.yml +++ b/windriver/titanium_cloud/pub/config/log.yml @@ -14,6 +14,10 @@ loggers: handlers: [titanium_cloud_handler] level: "DEBUG" propagate: False + starlingx_base: + handlers: [titanium_cloud_handler] + level: "DEBUG" + propagate: False handlers: titanium_cloud_handler: level: "DEBUG" diff --git a/windriver/titanium_cloud/registration/tests/test_registration2.py b/windriver/titanium_cloud/registration/tests/test_registration2.py index 9f0cd4d2..840f982f 100644 --- a/windriver/titanium_cloud/registration/tests/test_registration2.py +++ b/windriver/titanium_cloud/registration/tests/test_registration2.py @@ -18,17 +18,8 @@ import unittest # import json from django.test import Client from rest_framework import status - -# from django.core.cache import cache -# from common.msapi import extsys - from common.utils import restcall -# from newton_base.tests import mock_info -# from newton_base.tests import test_base -# from newton_base.util import VimDriverUtils - -# from newton_base.registration import registration as newton_registration -from titanium_cloud.registration.views import registration +from starlingx_base.registration import registration from newton_base.tests import test_base MOCK_VIM_INFO = { diff --git a/windriver/titanium_cloud/registration/views/__init__.py b/windriver/titanium_cloud/registration/views/__init__.py deleted file mode 100644 index ae1ce9db..00000000 --- a/windriver/titanium_cloud/registration/views/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2017-2018 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/windriver/titanium_cloud/registration/views/registration.py b/windriver/titanium_cloud/registration/views/registration.py deleted file mode 100644 index 17992af7..00000000 --- a/windriver/titanium_cloud/registration/views/registration.py +++ /dev/null @@ -1,322 +0,0 @@ -# Copyright (c) 2017-2018 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import json -import uuid -import traceback - -from django.conf import settings - -from newton_base.registration import registration as newton_registration -# from common.exceptions import VimDriverNewtonException -from rest_framework import status -from rest_framework.response import Response -from common.msapi import extsys -from keystoneauth1.exceptions import HttpError -from newton_base.util import VimDriverUtils -from common.utils import restcall - -logger = logging.getLogger(__name__) - - -# DEBUG=True - - -class Registry(newton_registration.Registry): - def __init__(self): - super(Registry, self).__init__() - self.proxy_prefix = settings.MULTICLOUD_PREFIX - self.aai_base_url = settings.AAI_BASE_URL - # self._logger = logger - - def _get_ovsdpdk_capabilities(self, extra_specs, viminfo): - instruction_capability = {} - feature_uuid = uuid.uuid4() - - instruction_capability['hpa-capability-id'] = str(feature_uuid) - instruction_capability['hpa-feature'] = 'ovsDpdk' - instruction_capability['architecture'] = 'Intel64' - instruction_capability['hpa-version'] = 'v1' - - instruction_capability['hpa-feature-attributes'] = [] - instruction_capability['hpa-feature-attributes'].append( - {'hpa-attribute-key': 'dataProcessingAccelerationLibrary', - 'hpa-attribute-value': - '{{\"value\":\"{0}\"}}'.format("v17.02") - }) - return instruction_capability - - -class APIv1Registry(newton_registration.Registry): - def __init__(self): - super(APIv1Registry, self).__init__() - self.proxy_prefix = settings.MULTICLOUD_API_V1_PREFIX - self.aai_base_url = settings.AAI_BASE_URL - # self._logger = logger - - def _get_ovsdpdk_capabilities(self, extra_specs, viminfo): - instruction_capability = {} - feature_uuid = uuid.uuid4() - - instruction_capability['hpa-capability-id'] = str(feature_uuid) - instruction_capability['hpa-feature'] = 'ovsDpdk' - instruction_capability['architecture'] = 'Intel64' - instruction_capability['hpa-version'] = 'v1' - - instruction_capability['hpa-feature-attributes'] = [] - instruction_capability['hpa-feature-attributes'].append( - {'hpa-attribute-key': 'dataProcessingAccelerationLibrary', - 'hpa-attribute-value': - '{{\"value\":\"{0}\"}}'.format("v17.02") - }) - return instruction_capability - - def _update_cloud_region(self, cloud_owner, cloud_region_id, openstack_region_id, viminfo, session=None): - if cloud_owner and cloud_region_id: - self._logger.debug( - ("_update_cloud_region, %(cloud_owner)s" - "_%(cloud_region_id)s ") - % { - "cloud_owner": cloud_owner, - "cloud_region_id": cloud_region_id - }) - - # Note1: The intent is to populate the openstack region id into property: cloud-region.esr-system-info.openstackRegionId - # Note2: As temp solution: the openstack region id was put into AAI cloud-region["cloud-epa-caps"] - - resource_info = { - "cloud-owner": cloud_owner, - "cloud-region-id": cloud_region_id, - "cloud-type": viminfo["type"], - "cloud-region-version": viminfo["version"], - "identity-url": - self.proxy_prefix + "/%s_%s/identity/v2.0" % (cloud_owner, cloud_region_id) - if self.proxy_prefix[-3:] == "/v0" else - self.proxy_prefix + "/%s/%s/identity/v2.0" % (cloud_owner, cloud_region_id), - "complex-name": viminfo["complex-name"], - "cloud-extra-info": viminfo["cloud_extra_info"], - "cloud-epa-caps": openstack_region_id, - "esr-system-info-list": { - "esr-system-info": [ - { - "esr-system-info-id": str(uuid.uuid4()), - "service-url": viminfo["url"], - "user-name": viminfo["userName"], - "password": viminfo["password"], - "system-type": "VIM", - "ssl-cacert": viminfo["cacert"], - "ssl-insecure": viminfo["insecure"], - "cloud-domain": viminfo["domain"], - "default-tenant": viminfo["tenant"] - - } - ] - } - } - - # get the resource first - resource_url = ("/cloud-infrastructure/cloud-regions/" - "cloud-region/%(cloud_owner)s/%(cloud_region_id)s" - % { - "cloud_owner": cloud_owner, - "cloud_region_id": cloud_region_id - }) - - # get cloud-region - retcode, content, status_code = \ - restcall.req_to_aai(resource_url, "GET") - - # add resource-version - if retcode == 0 and content: - content = json.JSONDecoder().decode(content) - # resource_info["resource-version"] = content["resource-version"] - content.update(resource_info) - resource_info = content - - # then update the resource - retcode, content, status_code = \ - restcall.req_to_aai(resource_url, "PUT", content=resource_info) - - self._logger.debug( - ("_update_cloud_region,%(cloud_owner)s" - "_%(cloud_region_id)s , " - "return %(retcode)s, %(content)s, %(status_code)s") - % { - "cloud_owner": cloud_owner, - "cloud_region_id": cloud_region_id, - "retcode": retcode, - "content": content, - "status_code": status_code, - }) - - # wait and confirm the update has been available for next AAI calls - while True: - # get cloud-region - retcode2, content2, status_code2 = \ - restcall.req_to_aai(resource_url, "GET") - if retcode2 == 0 and content2: - content2 = json.JSONDecoder().decode(content2) - if content2.get("identity-url", None)\ - == resource_info.get("identity-url", None): - break - - return retcode - return 1 # unknown cloud owner,region_id - - def _discover_regions(self, cloud_owner="", cloud_region_id="", - session=None, viminfo=None): - try: - regions = [] - vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - isDistributedCloud = False - openstackregions = self._get_list_resources( - "/regions", "identity", session, viminfo, vimid, - "regions") - - for region in openstackregions: - if region['id'] == 'SystemController': - isDistributedCloud = True - break - else: - continue - - for region in openstackregions: - if region['id'] == 'SystemController': - continue - elif region['id'] == 'RegionOne' and isDistributedCloud: - continue - else: - regions.append(region['id']) - - self._logger.info("Discovered Regions :%s" % regions) - return regions - - except HttpError as e: - self._logger.error("HttpError: status:%s, response:%s" - % (e.http_status, e.response.json())) - return [] - except Exception: - self._logger.error(traceback.format_exc()) - return [] - - def post(self, request, cloud_owner="", cloud_region_id=""): - self._logger.info("registration with : %s, %s" - % (cloud_owner, cloud_region_id)) - - try: - vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - - viminfo = VimDriverUtils.get_vim_info(vimid) - cloud_extra_info_str = viminfo['cloud_extra_info'] - cloud_extra_info = None - try: - cloud_extra_info = json.loads(cloud_extra_info_str)\ - if cloud_extra_info_str else None - except Exception as ex: - logger.error("Can not convert cloud extra info %s %s" % ( - str(ex), cloud_extra_info_str)) - pass - - region_specified = cloud_extra_info.get( - "openstack-region-id", None) if cloud_extra_info else None - multi_region_discovery = cloud_extra_info.get( - "multi-region-discovery", None) if cloud_extra_info else None - - # set the default tenant since there is no tenant info in the VIM yet - sess = VimDriverUtils.get_session( - viminfo, tenant_name=viminfo['tenant']) - - # discover the regions, expect it always returns a list (even empty list) - region_ids = self._discover_regions(cloud_owner, cloud_region_id, sess, viminfo) - - if len(region_ids) == 0: - self._logger.warn("failed to get region id") - - # compare the regions with region_specified and then cloud_region_id - if region_specified in region_ids: - pass - elif cloud_region_id in region_ids: - region_specified = cloud_region_id - pass - else: - # assume the first region be the primary region - # since we have no other way to determine it. - region_specified = region_ids.pop(0) - - # update cloud region and discover/register resource - if multi_region_discovery: - # no input for specified cloud region, - # so discover all cloud region - for regionid in region_ids: - # do not update the specified region here - if region_specified == regionid: - continue - - # create cloud region with composed AAI cloud_region_id - # except for the one onboarded externally (e.g. ESR) - gen_cloud_region_id = cloud_region_id + "_" + regionid - self._logger.info("create a cloud region: %s,%s,%s" - % (cloud_owner, gen_cloud_region_id, regionid)) - - self._update_cloud_region( - cloud_owner, gen_cloud_region_id, regionid, viminfo) - new_vimid = extsys.encode_vim_id( - cloud_owner, gen_cloud_region_id) - super(APIv1Registry, self).post(request, new_vimid) - - # update the specified region - self._update_cloud_region(cloud_owner, cloud_region_id, - region_specified, viminfo) - return super(APIv1Registry, self).post(request, vimid) - - except HttpError as e: - self._logger.error("HttpError: status:%s, response:%s" - % (e.http_status, e.response.json())) - return Response(data=e.response.json(), status=e.http_status) - except Exception as e: - self._logger.error(traceback.format_exc()) - return Response( - data={'error': str(e)}, - status=status.HTTP_500_INTERNAL_SERVER_ERROR) - - def delete(self, request, cloud_owner="", cloud_region_id=""): - self._logger.debug("unregister cloud region: %s, %s" - % (cloud_owner, cloud_region_id)) - - vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1Registry, self).delete(request, vimid) - - -# APIv0 handler upgrading: leverage APIv1 handler -class APIv0Registry(APIv1Registry): - def __init__(self): - super(APIv0Registry, self).__init__() - self.proxy_prefix = settings.MULTICLOUD_PREFIX - self.aai_base_url = settings.AAI_BASE_URL - # self._logger = logger - - def post(self, request, vimid=""): - self._logger.info("registration with : %s" % vimid) - - cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) - return super(APIv0Registry, self).post( - request, cloud_owner, cloud_region_id) - - def delete(self, request, vimid=""): - self._logger.debug("unregister cloud region: %s" % vimid) - - cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) - return super(APIv0Registry, self).delete( - request, cloud_owner, cloud_region_id) diff --git a/windriver/titanium_cloud/resource/tests/tests_infra_workload.py b/windriver/titanium_cloud/resource/tests/tests_infra_workload.py index c6991837..cd8fe558 100644 --- a/windriver/titanium_cloud/resource/tests/tests_infra_workload.py +++ b/windriver/titanium_cloud/resource/tests/tests_infra_workload.py @@ -15,13 +15,11 @@ import mock import unittest -# import json from rest_framework import status -# from common.utils import restcall from common.msapi.helper import Helper as helper -from titanium_cloud.resource.views.infra_workload import InfraWorkload -from titanium_cloud.resource.views.infra_workload import APIv1InfraWorkload +from newton_base.resource.infra_workload import InfraWorkload +from newton_base.resource.infra_workload import APIv1InfraWorkload MOCK_TOKEN_RESPONSE = { "access": diff --git a/windriver/titanium_cloud/resource/views/__init__.py b/windriver/titanium_cloud/resource/views/__init__.py deleted file mode 100644 index ae1ce9db..00000000 --- a/windriver/titanium_cloud/resource/views/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2017-2018 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/windriver/titanium_cloud/resource/views/capacity.py b/windriver/titanium_cloud/resource/views/capacity.py deleted file mode 100644 index 669cb659..00000000 --- a/windriver/titanium_cloud/resource/views/capacity.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) 2017-2018 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -# import json -import traceback - -# from rest_framework import status - -# from django.conf import settings -from common.exceptions import VimDriverNewtonException -from newton_base.util import VimDriverUtils - -from keystoneauth1.exceptions import HttpError -from rest_framework import status -from rest_framework.response import Response -from rest_framework.views import APIView -from common.msapi import extsys - -logger = logging.getLogger(__name__) - - -class CapacityCheck(APIView): - def __init__(self): - self._logger = logger - - def post(self, request, vimid=""): - self._logger.info("vimid, data> %s, %s" % (vimid, request.data)) - self._logger.debug("META> %s" % request.META) - - hasEnoughResource = False - try: - resource_demand = request.data - - tenant_name = None - vim = VimDriverUtils.get_vim_info(vimid) - sess = VimDriverUtils.get_session(vim, tenant_name) - - # get token: - cloud_owner, regionid = extsys.decode_vim_id(vimid) - interface = 'public' - service = {'service_type': 'compute', - 'interface': interface, - 'region_name': vim['openstack_region_id'] - if vim.get('openstack_region_id') - else vim['cloud_region_id'] - } - - # get limit for this tenant - req_resouce = "/limits" - self._logger.info("check limits> URI:%s" % req_resouce) - resp = sess.get(req_resouce, endpoint_filter=service) - self._logger.info("check limits> status:%s" % resp.status_code) - content = resp.json() - compute_limits = content['limits']['absolute'] - self._logger.debug("check limits> resp data:%s" % content) - - # get total resource of this cloud region - try: - req_resouce = "/os-hypervisors/statistics" - self._logger.info("check os-hypervisors statistics> URI:%s" % req_resouce) - resp = sess.get(req_resouce, endpoint_filter=service) - self._logger.info("check os-hypervisors statistics> status:%s" % resp.status_code) - content = resp.json() - hypervisor_statistics = content['hypervisor_statistics'] - self._logger.debug("check os-hypervisors statistics> resp data:%s" % content) - except HttpError as e: - if e.http_status == status.HTTP_403_FORBIDDEN: - # Due to non administrator account cannot get hypervisor data, - # so construct enough resource data - conVCPUS = int(resource_demand['vCPU']) - conFreeRamMB = int(resource_demand['Memory']) - conFreeDiskGB = int(resource_demand['Storage']) - self._logger.info("Non administator forbidden to access hypervisor statistics data") - hypervisor_statistics = {'vcpus_used': 0, - 'vcpus': conVCPUS, - 'free_ram_mb': conFreeRamMB, - 'free_disk_gb': conFreeDiskGB} - else: - # non forbiden exeption will be redirected - raise e - - # get storage limit for this tenant - service['service_type'] = 'volumev2' - req_resouce = "/limits" - self._logger.info("check volumev2 limits> URI:%s" % req_resouce) - resp = sess.get(req_resouce, endpoint_filter=service) - self._logger.info("check volumev2> status:%s" % resp.status_code) - content = resp.json() - storage_limits = content['limits']['absolute'] - self._logger.debug("check volumev2> resp data:%s" % content) - - # compute actual available resource for this tenant - remainVCPU = compute_limits['maxTotalCores'] - compute_limits['totalCoresUsed'] - remainHypervisorVCPU = hypervisor_statistics['vcpus'] - hypervisor_statistics['vcpus_used'] - - if (remainVCPU > remainHypervisorVCPU): - remainVCPU = remainHypervisorVCPU - - remainMEM = compute_limits['maxTotalRAMSize'] - compute_limits['totalRAMUsed'] - remainHypervisorMEM = hypervisor_statistics['free_ram_mb'] - if remainMEM > remainHypervisorMEM: - remainMEM = remainHypervisorMEM - - remainStorage = storage_limits['maxTotalVolumeGigabytes'] - storage_limits['totalGigabytesUsed'] - remainHypervisorStorage = hypervisor_statistics['free_disk_gb'] - if (remainStorage > remainHypervisorStorage): - remainStorage = remainHypervisorStorage - - # compare resource demanded with available - if (int(resource_demand['vCPU']) > remainVCPU): - hasEnoughResource = False - elif (int(resource_demand['Memory']) > remainMEM): - hasEnoughResource = False - elif (int(resource_demand['Storage']) > remainStorage): - hasEnoughResource = False - else: - hasEnoughResource = True - - self._logger.info("RESP with data> result:%s" % hasEnoughResource) - return Response(data={'result': hasEnoughResource}, status=status.HTTP_200_OK) - except VimDriverNewtonException as e: - self._logger.error("Plugin exception> status:%s,error:%s" - % (e.status_code, e.content)) - return Response(data={'result': hasEnoughResource, - 'error': e.content}, status=e.status_code) - except HttpError as e: - self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) - resp = e.response.json() - resp.update({'result': hasEnoughResource}) - return Response(data=e.response.json(), status=e.http_status) - except Exception as e: - self._logger.error(traceback.format_exc()) - return Response(data={'result': hasEnoughResource, 'error': str(e)}, - status=status.HTTP_500_INTERNAL_SERVER_ERROR) - - -class APIv1CapacityCheck(CapacityCheck): - def __init__(self): - super(APIv1CapacityCheck, self).__init__() - # self._logger = logger - - def post(self, request, cloud_owner="", cloud_region_id=""): - self._logger.info("vimid, data> %s,%s, %s" % (cloud_owner, cloud_region_id, request.data)) - self._logger.debug("META> %s" % request.META) - - vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1CapacityCheck, self).post(request, vimid) diff --git a/windriver/titanium_cloud/resource/views/infra_workload.py b/windriver/titanium_cloud/resource/views/infra_workload.py deleted file mode 100644 index 74c187fe..00000000 --- a/windriver/titanium_cloud/resource/views/infra_workload.py +++ /dev/null @@ -1,554 +0,0 @@ -# Copyright (c) 2017-2018 Wind River Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -import json -import traceback - -from django.conf import settings -from common.exceptions import VimDriverNewtonException -# from newton_base.util import VimDriverUtils - -from keystoneauth1.exceptions import HttpError -from rest_framework import status -from rest_framework.response import Response -from rest_framework.views import APIView -from common.msapi import extsys -from common.msapi.helper import Helper as helper - -from common.utils import restcall - -logger = logging.getLogger(__name__) - - -class InfraWorkload(APIView): - def __init__(self): - self._logger = logger - - def post(self, request, vimid=""): - self._logger.info("vimid: %s" % (vimid)) - self._logger.info("data: %s" % (request.data)) - self._logger.debug("META: %s" % request.META) - - try: - - data = request.data - oof_directive = data.get("oof_directives", {}) - template_type = data.get("template_type", None) - template_data = data.get("template_data", {}) - resp_template = None - if template_type and "heat" == template_type.lower(): - # update heat parameters from oof_directive - parameters = template_data.get("parameters", {}) - - for directive in oof_directive.get("directives", []): - if directive["type"] == "vnfc": - for directive2 in directive.get("directives", []): - if directive2["type"] in ["flavor_directives", - "sriovNICNetwork_directives"]: - for attr in directive2.get("attributes", []): - flavor_label = attr.get("attribute_name", None) - flavor_value = attr.get("attribute_value", None) - if flavor_label in parameters: - parameters[flavor_label] = flavor_value - else: - self._logger.warn( - "There is no parameter exist: %s" % - flavor_label) - - # update parameters - template_data["parameters"] = parameters - - # reset to make sure "files" are empty - template_data["files"] = {} - - # authenticate - cloud_owner, regionid = extsys.decode_vim_id(vimid) - # should go via multicloud proxy so that - # the selflink is updated by multicloud - retcode, v2_token_resp_json, os_status = \ - helper.MultiCloudIdentityHelper( - settings.MULTICLOUD_API_V1_PREFIX, - cloud_owner, regionid, "/v2.0/tokens") - - if retcode > 0 or not v2_token_resp_json: - logger.error("authenticate fails:%s,%s, %s" % - (cloud_owner, regionid, v2_token_resp_json)) - return - # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] - - service_type = "orchestration" - resource_uri = "/stacks" - self._logger.info("retrieve stack resources, URI:%s" % resource_uri) - retcode, content, os_status = \ - helper.MultiCloudServiceHelper(cloud_owner, regionid, - v2_token_resp_json, - service_type, resource_uri, - template_data, "POST") - - stack1 = content.get('stack', None) \ - if retcode == 0 and content else None - - resp_template = { - "template_type": template_type, - "workload_id": stack1["id"] if stack1 else "", - "template_response": content - } - self._logger.info("RESP with data> result:%s" % resp_template) - - return Response(data=resp_template, status=os_status) - - else: - msg = "The template type %s is not supported" % (template_type) - self._logger.warn(msg) - return Response(data={"error": msg}, - status=status.HTTP_500_INTERNAL_SERVER_ERROR) - - except VimDriverNewtonException as e: - self._logger.error("Plugin exception> status:%s,error:%s" - % (e.status_code, e.content)) - return Response(data={'error': e.content}, status=e.status_code) - except HttpError as e: - self._logger.error("HttpError: status:%s, response:%s" % - (e.http_status, e.response.json())) - return Response(data=e.response.json(), status=e.http_status) - except Exception as e: - self._logger.error(traceback.format_exc()) - return Response(data={'error': str(e)}, - status=status.HTTP_500_INTERNAL_SERVER_ERROR) - - def get(self, request, vimid="", requri=""): - self._logger.info("vimid,requri: %s, %s" % (vimid, requri)) - self._logger.debug("META: %s" % request.META) - - try: - # assume the workload_type is heat - stack_id = requri - cloud_owner, regionid = extsys.decode_vim_id(vimid) - # should go via multicloud proxy so that the selflink is updated by multicloud - retcode, v2_token_resp_json, os_status = \ - helper.MultiCloudIdentityHelper( - settings.MULTICLOUD_API_V1_PREFIX, - cloud_owner, regionid, "/v2.0/tokens") - - if retcode > 0 or not v2_token_resp_json: - logger.error("authenticate fails:%s, %s, %s" % - (cloud_owner, regionid, v2_token_resp_json)) - return - # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] - # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"] - - # get stack status - service_type = "orchestration" - resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks" - self._logger.info("retrieve stack resources, URI:%s" % resource_uri) - retcode, content, os_status = \ - helper.MultiCloudServiceHelper(cloud_owner, regionid, - v2_token_resp_json, - service_type, resource_uri, - None, "GET") - - stacks = content.get('stacks', []) if retcode == 0 and content else [] - stack_status = stacks[0]["stack_status"] if len(stacks) > 0 else "" - - # stub response - resp_template = { - "template_type": "HEAT", - "workload_id": stack_id, - "workload_status": stack_status - } - - if retcode > 0: - # return error messsages - resp_template['workload_response'] = content - - if ('CREATE_COMPLETE' == stack_status): - self.heatbridge_update(request, vimid, stack_id) - - self._logger.info("RESP with data> result:%s" % resp_template) - return Response(data=resp_template, status=os_status) - except VimDriverNewtonException as e: - self._logger.error("Plugin exception> status:%s,error:%s" - % (e.status_code, e.content)) - return Response(data={'error': e.content}, status=e.status_code) - except HttpError as e: - self._logger.error("HttpError: status:%s, response:%s" % - (e.http_status, e.response.json())) - return Response(data=e.response.json(), status=e.http_status) - except Exception as e: - self._logger.error(traceback.format_exc()) - return Response(data={'error': str(e)}, - status=status.HTTP_500_INTERNAL_SERVER_ERROR) - - def delete(self, request, vimid="", requri=""): - self._logger.info("vimid,requri: %s, %s" % (vimid, requri)) - self._logger.debug("META: %s" % request.META) - - try: - if requri == "": - raise VimDriverNewtonException( - message="workload_id is not specified", - content="workload_id must be specified to delete the workload", - status_code=400) - - # assume the workload_type is heat - stack_id = requri - cloud_owner, regionid = extsys.decode_vim_id(vimid) - # should go via multicloud proxy so that - # the selflink is updated by multicloud - retcode, v2_token_resp_json, os_status = \ - helper.MultiCloudIdentityHelper( - settings.MULTICLOUD_API_V1_PREFIX, - cloud_owner, regionid, "/v2.0/tokens") - - if retcode > 0 or not v2_token_resp_json: - logger.error("authenticate fails:%s, %s, %s" % - (cloud_owner, regionid, v2_token_resp_json)) - return - # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] - # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"] - - # get stack status - service_type = "orchestration" - resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks" - self._logger.info("retrieve stack resources, URI:%s" % resource_uri) - retcode, content, os_status = \ - helper.MultiCloudServiceHelper(cloud_owner, regionid, - v2_token_resp_json, - service_type, resource_uri, - None, "GET") - - stacks = content.get('stacks', []) \ - if retcode == 0 and content else [] - # assume there is at most 1 stack returned - # since it was filtered by id - stack1 = stacks[0] if stacks else None - stack_status = "" - - if stack1 and 'CREATE_COMPLETE' == stack1['stack_status']: - # delete the stack - resource_uri = "/stacks/%s/%s" % \ - (stack1['stack_name'], stack1['id']) - self._logger.info("delete stack, URI:%s" % resource_uri) - retcode, content, os_status = \ - helper.MultiCloudServiceHelper(cloud_owner, regionid, - v2_token_resp_json, - service_type, resource_uri, - None, "DELETE") - if retcode == 0: - stack_status = "DELETE_IN_PROCESS" - # and update AAI inventory by heatbridge-delete - self.heatbridge_delete(request, vimid, stack1['id']) - - # stub response - resp_template = { - "template_type": "HEAT", - "workload_id": stack_id, - "workload_status": stack_status - } - - if retcode > 0: - resp_template["workload_response"] = content - - self._logger.info("RESP with data> result:%s" % resp_template) - return Response(status=os_status) - except VimDriverNewtonException as e: - self._logger.error("Plugin exception> status:%s,error:%s" - % (e.status_code, e.content)) - return Response(data={'error': e.content}, status=e.status_code) - except HttpError as e: - self._logger.error("HttpError: status:%s, response:%s" % - (e.http_status, e.response.json())) - return Response(data=e.response.json(), status=e.http_status) - except Exception as e: - self._logger.error(traceback.format_exc()) - return Response(data={'error': str(e)}, - status=status.HTTP_500_INTERNAL_SERVER_ERROR) - - def heatbridge_update(self, request, vimid, stack_id): - ''' - update heat resource to AAI for the specified cloud region and tenant - The resources includes: vserver, vserver/l-interface, - :param request: - :param vimid: - :param stack_id: - :return: - ''' - - cloud_owner, regionid = extsys.decode_vim_id(vimid) - # should go via multicloud proxy so that the selflink is updated by multicloud - retcode, v2_token_resp_json, os_status = \ - helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX, - cloud_owner, regionid, "/v2.0/tokens") - if retcode > 0: - logger.error("authenticate fails:%s, %s, %s" % - (cloud_owner, regionid, v2_token_resp_json)) - - return None - tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] - # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"] - - # common prefix - aai_cloud_region = \ - "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \ - % (cloud_owner, regionid, tenant_id) - - # get stack resource - service_type = "orchestration" - resource_uri = "/stacks/%s/resources" % (stack_id) - self._logger.info("retrieve stack resources, URI:%s" % resource_uri) - retcode, content, os_status = \ - helper.MultiCloudServiceHelper(cloud_owner, regionid, - v2_token_resp_json, - service_type, resource_uri, - None, "GET") - - resources = content.get('resources', []) if retcode == 0 and content else [] - - # find and update resources - transactions = [] - for resource in resources: - if resource.get('resource_status', None) != "CREATE_COMPLETE": - continue - if resource.get('resource_type', None) == 'OS::Nova::Server': - # retrieve vserver details - service_type = "compute" - resource_uri = "/servers/%s" % (resource['physical_resource_id']) - self._logger.info("retrieve vserver detail, URI:%s" % resource_uri) - retcode, content, os_status = \ - helper.MultiCloudServiceHelper(cloud_owner, regionid, - v2_token_resp_json, - service_type, resource_uri, - None, "GET") - - self._logger.debug(" resp data:%s" % content) - vserver_detail = content.get('server', None) if retcode == 0 and content else None - if vserver_detail: - # compose inventory entry for vserver - vserver_link = "" - for link in vserver_detail['links']: - if link['rel'] == 'self': - vserver_link = link['href'] - break - pass - - # note: relationship-list to flavor/image is not be update yet - # note: volumes is not updated yet - # note: relationship-list to vnf will be handled somewhere else - aai_resource = { - 'body': { - 'vserver-name': vserver_detail['name'], - 'vserver-name2': vserver_detail['name'], - "vserver-id": vserver_detail['id'], - "vserver-selflink": vserver_link, - "prov-status": vserver_detail['status'] - }, - "uri": aai_cloud_region + "/vservers/vserver/%s" % (vserver_detail['id']) - } - - try: - # then update the resource - retcode, content, status_code = \ - restcall.req_to_aai(aai_resource['uri'], - "PUT", content=aai_resource['body']) - - if retcode == 0 and content: - content = json.JSONDecoder().decode(content) - self._logger.debug("AAI update %s response: %s" % - (aai_resource['uri'], content)) - except Exception: - self._logger.error(traceback.format_exc()) - pass - - aai_resource_transactions = {"put": [aai_resource]} - transactions.append(aai_resource_transactions) - # self._logger.debug("aai_resource :%s" % aai_resource_transactions) - pass - - for resource in resources: - if resource.get('resource_status', None) != "CREATE_COMPLETE": - continue - if resource.get('resource_type', None) == 'OS::Neutron::Port': - # retrieve vport details - service_type = "network" - resource_uri = "/v2.0/ports/%s" % (resource['physical_resource_id']) - self._logger.info("retrieve vport detail, URI:%s" % resource_uri) - retcode, content, os_status = \ - helper.MultiCloudServiceHelper(cloud_owner, regionid, - v2_token_resp_json, - service_type, resource_uri, - None, "GET") - - self._logger.debug(" resp data:%s" % content) - - vport_detail = content.get('port', None) if retcode == 0 and content else None - if vport_detail: - # compose inventory entry for vport - # note: l3-interface-ipv4-address-list, - # l3-interface-ipv6-address-list are not updated yet - # note: network-name is not update yet since the detail - # coming with network-id - aai_resource = { - "body": { - "interface-name": vport_detail['name'], - "interface-id": vport_detail['id'], - "macaddr": vport_detail['mac_address'] - }, - 'uri': - aai_cloud_region + "/vservers/vserver/%s/l-interfaces/l-interface/%s" - % (vport_detail['device_id'], vport_detail['name']) - } - try: - # then update the resource - retcode, content, status_code = \ - restcall.req_to_aai(aai_resource['uri'], "PUT", - content=aai_resource['body']) - - if retcode == 0 and content: - content = json.JSONDecoder().decode(content) - self._logger.debug("AAI update %s response: %s" % - (aai_resource['uri'], content)) - except Exception: - self._logger.error(traceback.format_exc()) - pass - - aai_resource_transactions = {"put": [aai_resource]} - transactions.append(aai_resource_transactions) - # self._logger.debug("aai_resource :%s" % aai_resource_transactions) - - pass - - aai_transactions = {"transactions": transactions} - self._logger.debug("aai_transactions :%s" % aai_transactions) - - return aai_transactions - - def heatbridge_delete(self, request, vimid, stack_id): - ''' - remove heat resource from AAI for the specified cloud region and tenant - The resources includes: vserver, vserver/l-interface, - :param request: - :param vimid: - :param stack_id: - :param tenant_id: - :return: - ''' - - # enumerate the resources - cloud_owner, regionid = extsys.decode_vim_id(vimid) - # should go via multicloud proxy so that the selflink is updated by multicloud - retcode, v2_token_resp_json, os_status = \ - helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX, - cloud_owner, regionid, "/v2.0/tokens") - if retcode > 0: - logger.error("authenticate fails:%s, %s, %s" % - (cloud_owner, regionid, v2_token_resp_json)) - return None - - tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] - # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"] - - # common prefix - aai_cloud_region = \ - "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \ - % (cloud_owner, regionid, tenant_id) - - # get stack resource - service_type = "orchestration" - resource_uri = "/stacks/%s/resources" % (stack_id) - self._logger.info("retrieve stack resources, URI:%s" % resource_uri) - retcode, content, os_status = \ - helper.MultiCloudServiceHelper(cloud_owner, regionid, - v2_token_resp_json, - service_type, resource_uri, - None, "GET") - resources = content.get('resources', []) \ - if retcode == 0 and content else [] - - vserver_list = [resource['physical_resource_id'] for resource in resources - if resource.get('resource_type', None) == 'OS::Nova::Server'] - - try: - # get list of vservers - vserver_list_url = aai_cloud_region + "/vservers?depth=all" - retcode, content, status_code = \ - restcall.req_to_aai(vserver_list_url, "GET") - if retcode > 0 or not content: - self._logger.debug("AAI get %s response: %s" % (vserver_list_url, content)) - return None - content = json.JSONDecoder().decode(content) - vservers = content['vserver'] - for vserver in vservers: - if vserver['vserver-id'] not in vserver_list: - continue - - try: - # iterate vport, except will be raised if no l-interface exist - for vport in vserver['l-interfaces']['l-interface']: - # delete vport - vport_delete_url = \ - aai_cloud_region + \ - "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \ - % (vserver['vserver-id'], vport['interface-name'], - vport['resource-version']) - - restcall.req_to_aai(vport_delete_url, "DELETE") - except Exception: - pass - - try: - # delete vserver - vserver_delete_url = \ - aai_cloud_region + \ - "/vservers/vserver/%s?resource-version=%s" \ - % (vserver['vserver-id'], vserver['resource-version']) - - restcall.req_to_aai(vserver_delete_url, "DELETE") - except Exception: - continue - - except Exception: - self._logger.error(traceback.format_exc()) - return None - pass - - -class APIv1InfraWorkload(InfraWorkload): - def __init__(self): - super(APIv1InfraWorkload, self).__init__() - # self._logger = logger - - def post(self, request, cloud_owner="", cloud_region_id=""): - # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % - # (cloud_owner, cloud_region_id, request.data)) - # self._logger.debug("META: %s" % request.META) - - vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1InfraWorkload, self).post(request, vimid) - - def get(self, request, cloud_owner="", cloud_region_id="", requri=""): - # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % - # (cloud_owner, cloud_region_id, request.data)) - # self._logger.debug("META: %s" % request.META) - - vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1InfraWorkload, self).get(request, vimid, requri) - - def delete(self, request, cloud_owner="", cloud_region_id="", requri=""): - # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % - # (cloud_owner, cloud_region_id, request.data)) - # self._logger.debug("META: %s" % request.META) - - vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1InfraWorkload, self).delete(request, vimid, requri) diff --git a/windriver/titanium_cloud/urls.py b/windriver/titanium_cloud/urls.py index e953140e..f4adc10b 100644 --- a/windriver/titanium_cloud/urls.py +++ b/windriver/titanium_cloud/urls.py @@ -14,11 +14,11 @@ from django.conf.urls import include, url -from titanium_cloud.registration.views import registration +from starlingx_base.registration import registration from newton_base.openoapi import tenants -from titanium_cloud.resource.views import capacity +from newton_base.resource import capacity from titanium_cloud.vesagent import vesagent_ctrl -from titanium_cloud.resource.views import infra_workload +from newton_base.resource import infra_workload urlpatterns = [ url(r'^', include('titanium_cloud.swagger.urls')), diff --git a/windriver/tox.ini b/windriver/tox.ini index 113f9c1b..dd34e479 100644 --- a/windriver/tox.ini +++ b/windriver/tox.ini @@ -16,7 +16,7 @@ setenv = deps = -r{toxinidir}/requirements.txt commands = coverage run --branch manage.py test titanium_cloud - coverage report --omit="./venv-tox/*,./.tox/*,*tests*,*__init__.py,*newton_base*,*common*" --fail-under=30 + coverage report --omit="./venv-tox/*,./.tox/*,*tests*,*__init__.py,*newton_base*,*common*,*starlingx_base*" --fail-under=30 [testenv:pep8] deps=flake8 |