summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--lenovo/thinkcloud/registration/tests/test_registration.py2
-rw-r--r--lenovo/thinkcloud/registration/tests/test_registration2.py8
-rw-r--r--newton/newton/registration/tests/test_registration.py2
-rw-r--r--newton/newton/registration/tests/test_registration.py.bak350
-rw-r--r--ocata/ocata/registration/tests/test_registration.py2
-rw-r--r--ocata/ocata/registration/tests/test_registration2.py8
-rw-r--r--pike/pike/registration/tests/test_registration.py2
-rw-r--r--share/common/msapi/helper.py230
-rw-r--r--share/newton_base/registration/registration.py483
-rw-r--r--share/newton_base/resource/capacity.py183
-rw-r--r--share/starlingx_base/registration/registration.py226
-rw-r--r--share/starlingx_base/resource/__init__.py10
-rw-r--r--share/starlingx_base/resource/capacity.py97
-rw-r--r--share/starlingx_base/resource/infra_workload.py619
-rw-r--r--starlingx/starlingx/registration/tests/test_registration.py2
-rw-r--r--starlingx/starlingx/registration/tests/test_registration2.py8
-rw-r--r--starlingx/starlingx/urls.py39
-rw-r--r--starlingx/tox.ini4
-rw-r--r--windriver/titanium_cloud/registration/tests/test_registration.py2
-rw-r--r--windriver/titanium_cloud/registration/tests/test_registration2.py8
-rw-r--r--windriver/titanium_cloud/urls.py79
-rw-r--r--windriver/tox.ini2
22 files changed, 1539 insertions, 827 deletions
diff --git a/lenovo/thinkcloud/registration/tests/test_registration.py b/lenovo/thinkcloud/registration/tests/test_registration.py
index c8c8e6b5..c9de46d9 100644
--- a/lenovo/thinkcloud/registration/tests/test_registration.py
+++ b/lenovo/thinkcloud/registration/tests/test_registration.py
@@ -294,7 +294,7 @@ class TestRegistration(test_base.TestRequest):
"registry"), "{}", content_type="application/json",
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
- self.assertEquals(status.HTTP_500_INTERNAL_SERVER_ERROR,
+ self.assertEquals(status.HTTP_204_NO_CONTENT,
response.status_code)
@mock.patch.object(VimDriverUtils, 'get_session')
diff --git a/lenovo/thinkcloud/registration/tests/test_registration2.py b/lenovo/thinkcloud/registration/tests/test_registration2.py
index 7abd6073..f49b023f 100644
--- a/lenovo/thinkcloud/registration/tests/test_registration2.py
+++ b/lenovo/thinkcloud/registration/tests/test_registration2.py
@@ -92,12 +92,12 @@ class TestRegistration2(unittest.TestCase):
}
)
- resp = self.view.register_helper._discover_flavors(
+ retcode, content = self.view.register_helper._discover_flavors(
vimid="lenovo-hudson-dc_RegionOne",
session=mock_session, viminfo=MOCK_VIM_INFO
)
- self.assertIsNone(resp)
+ self.assertEquals(retcode, 0)
def test_discover_flavors_w_hpa_numa(self):
restcall.req_to_aai = mock.Mock()
@@ -112,9 +112,9 @@ class TestRegistration2(unittest.TestCase):
}
),
- resp = self.view.register_helper._discover_flavors(
+ retcode, content = self.view.register_helper._discover_flavors(
vimid="lenovo-hudson-dc_RegionOne",
session=mock_session, viminfo=MOCK_VIM_INFO
)
- self.assertIsNone(resp)
+ self.assertEquals(retcode, 11)
diff --git a/newton/newton/registration/tests/test_registration.py b/newton/newton/registration/tests/test_registration.py
index cfc2cd59..5481822f 100644
--- a/newton/newton/registration/tests/test_registration.py
+++ b/newton/newton/registration/tests/test_registration.py
@@ -291,7 +291,7 @@ class TestRegistration(test_base.TestRequest):
"registry"), "{}", content_type="application/json",
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
- self.assertEquals(status.HTTP_500_INTERNAL_SERVER_ERROR,
+ self.assertEquals(status.HTTP_204_NO_CONTENT,
response.status_code)
@mock.patch.object(VimDriverUtils, 'get_session')
diff --git a/newton/newton/registration/tests/test_registration.py.bak b/newton/newton/registration/tests/test_registration.py.bak
deleted file mode 100644
index d5be81de..00000000
--- a/newton/newton/registration/tests/test_registration.py.bak
+++ /dev/null
@@ -1,350 +0,0 @@
-# Copyright (c) 2017 Intel Corporation, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-from django.conf import settings
-from rest_framework import status
-
-from common.utils import restcall
-from newton_base.openoapi.flavor import Flavors
-from newton_base.tests import mock_info
-from newton_base.tests import test_base
-from newton_base.util import VimDriverUtils
-
-MOCK_GET_TENANT_RESPONSE = {
- "projects": [
- {"id": "1", "name": "project"},
- {"id": "2", "name": "project2"},
- ]
-}
-
-MOCK_GET_FLAVOR_RESPONSE = {
- "flavors": [
- {
- "id": "1", "name": "micro", "vcpus": 1, "ram": "1MB",
- "disk": "1G", "OS-FLV-EXT-DATA:ephemeral": False,
- "swap": True, "os-flavor-access:is_public": True,
- "OS-FLV-DISABLED:disabled": True, "link": [{"href": 1}]
- },
- {
- "id": "2", "name": "mini", "vcpus": 2, "ram": "2MB",
- "disk": "2G", "OS-FLV-EXT-DATA:ephemeral": True,
- "swap": False, "os-flavor-access:is_public": True,
- "OS-FLV-DISABLED:disabled": True
- },
- ]
-}
-
-MOCK_GET_EXTRA_SPECS_RESPONSE = {
- "extra_specs": {
- "hw:cpu_sockets": 4,
- "hw:cpu_cores": 4,
- "hw:cpu_policy": "dedicated",
- "hw:numa_nodes": 3,
- "hw:numa_cpus.1": [0, 1],
- "hw:numa_mem.1": 2,
- "pci_passthrough:alias": "mycrypto-8086-0443:4",
- "aggregate_instance_extra_spec:sriov-device-intel-1832-9475": "true",
- "hw:mem_page_size": "1GB"
- }
-}
-
-MOCK_HPA_RESPONSE = """{
- "basicCapabilities": {
- "info": {
- "hpa-feature": "basicCapabilities",
- "hpa-version": "v1",
- "architecture": "generic"
- },
- "hpa-attributes": {
- "vcpus": {
- "key": "numVirtualCpu",
- "unit": null
- },
- "ram": {
- "key": "virtualMemSize",
- "unit": "GB"
- }
- }
- },
- "localStorage": {
- "info": {
- "hpa-feature": "localStorage",
- "hpa-version": "v1",
- "architecture": "generic"
- },
- "hpa-attributes": {
- "disk": {
- "key": "diskSize",
- "unit": "GB"
- },
- "swap": {
- "key": "swapMemSize",
- "unit": "MB"
- }
- }
- },
- "cpuTopology": {
- "info": {
- "hpa-feature": "cpuTopology",
- "hpa-version": "v1",
- "architecture": "generic"
- },
- "hpa-attributes": {
- "hw:cpu_sockets": {
- "key": "numCpuSockets",
- "unit": null
- },
- "hw:cpu_cores": {
- "key": "numCpuCores",
- "unit": null
- },
- "hw:cpu_threads": {
- "key": "numCpuThreads",
- "unit": null
- }
- }
- },
- "cpuPinning": {
- "info": {
- "hpa-feature": "cpuPinning",
- "hpa-version": "v1",
- "architecture": "generic"
- },
- "hpa-attributes": {
- "hw:cpu_thread_policy": {
- "key": "logicalCpuThreadPinningPolicy",
- "unit": null
- },
- "hw:cpu_policy": {
- "key": "logicalCpuPinningPolicy",
- "unit": null
- }
- }
- },
- "numa": {
- "info": {
- "hpa-feature": "numa",
- "hpa-version": "v1",
- "architecture": "generic"
- },
- "hpa-attributes": {
- "hw:numa_nodes": {
- "key": "numaNodes",
- "unit": null
- },
- "hw:numa_cpus": {
- "key": "numaCpu",
- "unit": null
- },
- "hw:numa_mem": {
- "key": "numaMem",
- "unit": "GB"
- }
- }
- },
- "hugePages": {
- "info": {
- "hpa-feature": "hugePages",
- "hpa-version": "v1",
- "architecture": "generic"
- },
- "hpa-attributes": {
- "hw:mem_page_size": {
- "key": "memoryPageSize",
- "unit": null
- }
- }
- },
- "pciePassthrough": {
- "info": {
- "hpa-feature": "pciePassthrough",
- "hpa-version": "v1",
- "architecture": "generic"
- },
- "hpa-attributes": {
- "pci_count": {
- "key": "pciCount",
- "unit": null
- },
- "pci_vendor_id": {
- "key": "pciVendorId",
- "unit": null
- },
- "pci_device_id": {
- "key": "pciDeviceId",
- "unit": null
- }
- }
- }
-}"""
-
-MOCK_GET_IMAGE_RESPONSE = {
- "images": [
- {
- "id": "1", "name": "cirros", "self": "test",
- "os_distro": "CirrOS", "os_version": "0.3",
- "application": "test", "application_vendor": "ONAP",
- "application_version": 1, "architecture": "x86",
- "schema": None
- },
- {
- "id": "2", "name": "cirros", "self": "test",
- "os_distro": "CirrOS", "os_version": "0.3",
- "application": "test", "application_vendor": "ONAP",
- "application_version": 1, "architecture": "x86",
- "schema": "req_resource"
- },
- ]
-}
-
-MOCK_GET_AZ_RESPONSE = {
- "availabilityZoneInfo": [
- {
- "zoneName": "production",
- "zoneState": {"available": True},
- "hosts": {"hypervisor": "kvm"}
- },
- {
- "zoneName": "testing",
- },
- ]
-}
-
-MOCK_HYPERVISOR_RESPONSE = {
- "hypervisors": [
- {"hypervisor_type": "kvm"}
- ]
-}
-
-MOCK_GET_SNAPSHOT_RESPONSE = {
- "snapshots": [
- {
- "id": 1, "name": "test", "metadata":
- {
- "architecture": "x86", "os-distro": "clearlinux",
- "os-version": "276", "vendor": "intel", "version": 3,
- "selflink": "test", "prev-snapshot-id": "test-id"
- }
- },
- {"id": 2, "name": "test2"}
- ]
-}
-
-MOCK_GET_HYPERVISOR_RESPONSE = {
- "hypervisors": [
- {
- "hypervisor_hostname": "testing", "state": "ACTIVE",
- "id": 1, "local_gb": 256, "memory_mb": 1024,
- "hypervisor_links": "link", "host_ip": "127.0.0.1",
- "cpu_info":
- u'{"topology": {"cores": 8, "threads": 16,'
- u'"sockets": 4}}'
- },
- {
- "hypervisor_hostname": "testing2", "state": "XXX",
- "id": 1, "local_gb": 256, "memory_mb": 1024,
- "hypervisor_links": "link", "host_ip": "127.0.0.1",
- }
- ]
-}
-
-TEST_REGISTER_ENDPOINT_REQUEST = {
- "defaultTenant": "project1"
-}
-
-
-class TestFlavors(test_base.TestRequest):
- def setUp(self):
- super(TestFlavors, self).setUp()
- self.req_to_aai_backup = restcall.req_to_aai
-
- def tearDown(self):
- super(TestFlavors, self).tearDown()
- restcall.req_to_aai = self.req_to_aai_backup
-
- def _get_mock_response(self, return_value=None):
- mock_response = mock.Mock(spec=test_base.MockResponse)
- mock_response.status_code = status.HTTP_200_OK
- mock_response.json.return_value = return_value
- return mock_response
-
- @mock.patch.object(Flavors, '_get_flavor_extra_specs')
- @mock.patch.object(VimDriverUtils, 'get_session')
- @mock.patch.object(VimDriverUtils, 'get_vim_info')
- def test_register_endpoint_successfully(
- self, mock_get_vim_info, mock_get_session,
- mock_get_extra_specs):
- settings.AAI_SCHEMA_VERSION = "v13"
- restcall.req_to_aai = mock.Mock()
- restcall.req_to_aai.return_value = (0, {}, status.HTTP_200_OK)
- mock_get_vim_info.return_value = mock_info.MOCK_VIM_INFO
- mock_get_session.return_value = test_base.get_mock_session(
- ["get"], {
- "side_effect": [
- self._get_mock_response(MOCK_GET_TENANT_RESPONSE),
- self._get_mock_response(MOCK_GET_FLAVOR_RESPONSE),
- self._get_mock_response(MOCK_GET_IMAGE_RESPONSE),
- self._get_mock_response(),
- self._get_mock_response(MOCK_GET_AZ_RESPONSE),
- self._get_mock_response(MOCK_HYPERVISOR_RESPONSE),
- self._get_mock_response(
- MOCK_GET_SNAPSHOT_RESPONSE),
- self._get_mock_response(
- MOCK_GET_HYPERVISOR_RESPONSE)
- ]
- })
- mock_extra_specs_response = mock.Mock(spec=test_base.MockResponse)
- mock_extra_specs_response.status_code = status.HTTP_200_OK
- mock_extra_specs_response.json.return_value = MOCK_GET_EXTRA_SPECS_RESPONSE
- mock_get_extra_specs.return_value = mock_extra_specs_response
-
- with mock.patch('__builtin__.open', mock.mock_open(read_data=MOCK_HPA_RESPONSE)) as mock_file:
- response = self.client.post((
- "/api/%s/v0/windriver-hudson-dc_RegionOne/"
- "registry" % test_base.MULTIVIM_VERSION),
- TEST_REGISTER_ENDPOINT_REQUEST,
- HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
-
- self.assertEquals(status.HTTP_202_ACCEPTED,
- response.status_code)
-
- @mock.patch.object(VimDriverUtils, 'delete_vim_info')
- def test_unregister_endpoint_successfully(
- self, mock_delete_vim_info):
- mock_delete_vim_info.return_value = 0
-
- response = self.client.delete((
- "/api/%s/v0/windriver-hudson-dc_RegionOne/"
- "registry" % test_base.MULTIVIM_VERSION),
- "{}", content_type="application/json",
- HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
-
- self.assertEquals(status.HTTP_202_ACCEPTED,
- response.status_code)
-
- @mock.patch.object(VimDriverUtils, 'delete_vim_info')
- def test_fail_unregister_endpoint(
- self, mock_delete_vim_info):
- mock_delete_vim_info.return_value = 1
-
- response = self.client.delete((
- "/api/%s/v0/windriver-hudson-dc_RegionOne/"
- "registry" % test_base.MULTIVIM_VERSION),
- "{}", content_type="application/json",
- HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
-
- self.assertEquals(status.HTTP_500_INTERNAL_SERVER_ERROR,
- response.status_code)
diff --git a/ocata/ocata/registration/tests/test_registration.py b/ocata/ocata/registration/tests/test_registration.py
index 2daca137..540f170b 100644
--- a/ocata/ocata/registration/tests/test_registration.py
+++ b/ocata/ocata/registration/tests/test_registration.py
@@ -294,7 +294,7 @@ class TestRegistration(test_base.TestRequest):
"registry"), "{}", content_type="application/json",
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
- self.assertEquals(status.HTTP_500_INTERNAL_SERVER_ERROR,
+ self.assertEquals(status.HTTP_204_NO_CONTENT,
response.status_code)
@mock.patch.object(VimDriverUtils, 'get_session')
diff --git a/ocata/ocata/registration/tests/test_registration2.py b/ocata/ocata/registration/tests/test_registration2.py
index bfb7d7a5..13851763 100644
--- a/ocata/ocata/registration/tests/test_registration2.py
+++ b/ocata/ocata/registration/tests/test_registration2.py
@@ -92,12 +92,12 @@ class TestRegistration2(unittest.TestCase):
}
)
- resp = self.view.register_helper._discover_flavors(
+ retcode, content = self.view.register_helper._discover_flavors(
vimid="windriver-hudson-dc_RegionOne",
session=mock_session, viminfo=MOCK_VIM_INFO
)
- self.assertIsNone(resp)
+ self.assertEquals(retcode, 0)
def test_discover_flavors_w_hpa_numa(self):
restcall.req_to_aai = mock.Mock()
@@ -112,9 +112,9 @@ class TestRegistration2(unittest.TestCase):
}
),
- resp = self.view.register_helper._discover_flavors(
+ retcode, content = self.view.register_helper._discover_flavors(
vimid="windriver-hudson-dc_RegionOne",
session=mock_session, viminfo=MOCK_VIM_INFO
)
- self.assertIsNone(resp)
+ self.assertEquals(retcode, 11)
diff --git a/pike/pike/registration/tests/test_registration.py b/pike/pike/registration/tests/test_registration.py
index becbbcc1..84840cdb 100644
--- a/pike/pike/registration/tests/test_registration.py
+++ b/pike/pike/registration/tests/test_registration.py
@@ -294,7 +294,7 @@ class TestRegistration(test_base.TestRequest):
"registry"), "{}", content_type="application/json",
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
- self.assertEquals(status.HTTP_500_INTERNAL_SERVER_ERROR,
+ self.assertEquals(status.HTTP_204_NO_CONTENT,
response.status_code)
@mock.patch.object(VimDriverUtils, 'get_session')
diff --git a/share/common/msapi/helper.py b/share/common/msapi/helper.py
index 3e10c0fd..48626cc9 100644
--- a/share/common/msapi/helper.py
+++ b/share/common/msapi/helper.py
@@ -11,9 +11,14 @@
import json
import logging
-import re
+# import re
+import uuid
-from common.exceptions import VimDriverNewtonException
+import threading
+import datetime
+import time
+
+# from common.exceptions import VimDriverNewtonException
from common.utils import restcall
from rest_framework import status
@@ -21,6 +26,7 @@ from rest_framework import status
logger = logging.getLogger(__name__)
+# Helper of MultiCloud API invocation
class Helper(object):
@staticmethod
@@ -53,4 +59,222 @@ class Helper(object):
content = json.JSONDecoder().decode(ret[1])
ret[1] = content
return ret
- return [1, None, status.HTTP_404_NOT_FOUND] # return resource not found in case no type found \ No newline at end of file
+ return [1, None, status.HTTP_404_NOT_FOUND] # return resource not found in case no type found
+
+
+# Helper of AAI resource access
+class MultiCloudAAIHelper(object):
+
+ def __init__(self, multicloud_prefix, aai_base_url):
+ self.proxy_prefix = multicloud_prefix
+ self.aai_base_url = aai_base_url
+ self._logger = logger
+ # super(MultiCloudAAIHelper, self).__init__()
+
+ def _get_list_resources(
+ self, resource_url, service_type, session, viminfo,
+ vimid, content_key):
+ service = {
+ 'service_type': service_type,
+ 'interface': 'public'
+ }
+
+ # identity service should not filtered by region since it is might be first call
+ # to figure out available region list
+ if service_type != 'identity':
+ service['region_name'] = viminfo['openstack_region_id']\
+ if viminfo.get('openstack_region_id') else viminfo['cloud_region_id']
+
+ self._logger.debug("making request with URI:%s,%s" % (resource_url, service))
+ resp = session.get(resource_url, endpoint_filter=service)
+ self._logger.debug("request returns with status %s" % resp.status_code)
+ if resp.status_code == status.HTTP_200_OK:
+ self._logger.debug("with content:%s" % resp.json())
+ content = resp.json()
+ return content.get(content_key)
+ return None # failed to discover resources
+
+ def _update_resoure(self, cloud_owner, cloud_region_id,
+ resoure_id, resource_info, resource_type):
+ if cloud_owner and cloud_region_id:
+ self._logger.debug(
+ ("_update_resoure,vimid:%(cloud_owner)s"
+ "_%(cloud_region_id)s req_to_aai: %(resoure_id)s, "
+ "%(resource_type)s, %(resource_info)s")
+ % {
+ "cloud_owner": cloud_owner,
+ "cloud_region_id": cloud_region_id,
+ "resoure_id": resoure_id,
+ "resource_type": resource_type,
+ "resource_info": resource_info,
+ })
+
+ # get the resource first
+ resource_url = ("/cloud-infrastructure/cloud-regions/"
+ "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"
+ "%(resource_type)ss/%(resource_type)s/%(resoure_id)s"
+ % {
+ "cloud_owner": cloud_owner,
+ "cloud_region_id": cloud_region_id,
+ "resoure_id": resoure_id,
+ "resource_type": resource_type,
+ })
+
+ # get cloud-region
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "GET")
+
+ # add resource-version
+ if retcode == 0 and content:
+ content = json.JSONDecoder().decode(content)
+ #resource_info["resource-version"] = content["resource-version"]
+ content.update(resource_info)
+ resource_info = content
+
+ #then update the resource
+ retcode, content, status_code = \
+ restcall.req_to_aai(resource_url, "PUT", content=resource_info)
+
+ self._logger.debug(
+ ("_update_resoure,vimid:%(cloud_owner)s"
+ "_%(cloud_region_id)s req_to_aai: %(resoure_id)s, "
+ "return %(retcode)s, %(content)s, %(status_code)s")
+ % {
+ "cloud_owner": cloud_owner,
+ "cloud_region_id": cloud_region_id,
+ "resoure_id": resoure_id,
+ "retcode": retcode,
+ "content": content,
+ "status_code": status_code,
+ })
+ return retcode, content
+ # unknown cloud owner,region_id
+ return (
+ 11,
+ "Unknown Cloud Region ID: %s ,%s" %(cloud_owner, cloud_region_id)
+ )
+ pass
+
+
+# thread helper
+class MultiCloudThreadHelper(object):
+ '''
+ thread to register infrastructure resource into AAI
+ '''
+
+ @staticmethod
+ def get_epoch_now_usecond():
+ '''
+ get epoch timestamp of this moment in usecond
+ :return:
+ '''
+ now_time = datetime.datetime.now()
+ epoch_time_sec = time.mktime(now_time.timetuple())
+ return int(epoch_time_sec * 1e6 + now_time.microsecond)
+
+ def __init__(self):
+ # format of a backlog item:
+ # {
+ # "id": unique string to identify this item in backlog,
+ # "worker": pointer to helper method
+ # "payload": opaque object to pass to the worker for processing
+ # "repeat": interval in micro-seconds for repeating this worker, 0 for one time worker
+ # "timestamp": time stamp of last invocation of this worker, 0 for initial state
+ # "status": opaque object to represent the progress of the backlog processing
+ # }
+ # format of backlog:
+ # {"<id value of backlog item>": <backlog item>, ...}
+ self.backlog = {}
+ # expired backlog items
+ self.expired_backlog = {}
+ # self.lock = threading.Lock()
+ self.state_ = 0 # 0: stopped, 1: started
+ self.thread = None
+
+ def state(self):
+ return self.state_
+
+ def start(self):
+ if 0 == self.state_:
+ self.state_ = 1
+ self.thread = MultiCloudThreadHelper.HelperThread(self)
+ self.thread.start()
+ else:
+ pass
+
+ def stop(self):
+ self.state_ = 0
+
+ def add(self, backlog_item):
+ if not hasattr(backlog_item, "worker"):
+ return None
+ if not hasattr(backlog_item, "id"):
+ backlog_item["id"] = str(uuid.uuid1())
+ if not hasattr(backlog_item, "repeat"):
+ backlog_item["repeat"] = 0
+ backlog_item["timestamp"] = 0
+
+ # self.lock.acquire()
+ self.backlog.update(backlog_item["id"], backlog_item)
+ # self.lock.release()
+ return len(self.backlog)
+
+ def get(self, backlog_id):
+ self.backlog.get(backlog_id, None) or self.expired_backlog.get(backlog_id, None)
+
+ def remove(self, backlog_id):
+ # self.lock.acquire()
+ self.backlog.pop(backlog_id, None)
+ self.expired_backlog.pop(backlog_id, None)
+ # self.lock.release()
+
+ def reset(self):
+ # self.lock.acquire()
+ self.backlog.clear()
+ self.expired_backlog.clear()
+ # self.lock.release()
+
+ def count(self):
+ return len(self.backlog)
+
+ class HelperThread(threading.Thread):
+ def __init__(self, owner):
+ threading.Thread.__init__(self)
+ self.daemon = True
+ self.duration = 0
+ self.owner = owner
+
+ def run(self):
+ logger.debug("Start processing backlogs")
+ while self.owner.state_ == 1 and self.owner.count() > 0:
+ for backlog_id, item in self.owner.backlog:
+ # check interval for repeatable backlog item
+ now = MultiCloudThreadHelper.get_epoch_now_usecond()
+ repeat_interval = item.get("repeat", 0)
+ if repeat_interval > 0:
+ timestamp = item.get("timestamp", 0)
+ # compare interval with elapsed time.
+ # workaround the case of timestamp turnaround
+ if repeat_interval > (now - timestamp
+ if now > timestamp
+ else repeat_interval):
+ # not time to run this backlog item yet
+ continue
+
+ worker = item.get("worker", None)
+ payload = item.get("payload", None)
+ try:
+ item["status"] = worker(payload) or 0
+ except Exception as e:
+ item["status"] = e.message
+ if item.get("repeat", 0) == 0:
+ self.owner.remove(backlog_id)
+ # keep only the id and status
+ self.owner.expired_backlog[backlog_id] = {"status": item["status"]}
+ else:
+ item["timestamp"] = now
+ pass
+ # end of loop
+ logger.debug("stop processing backlogs")
+ self.owner.state_ = 0
+ # end of processing
diff --git a/share/newton_base/registration/registration.py b/share/newton_base/registration/registration.py
index 0f6d5540..6e8f8809 100644
--- a/share/newton_base/registration/registration.py
+++ b/share/newton_base/registration/registration.py
@@ -16,7 +16,6 @@ import logging
import json
import uuid
import traceback
-import threading
from keystoneauth1.exceptions import HttpError
from rest_framework import status
@@ -25,6 +24,7 @@ from rest_framework.views import APIView
from common.exceptions import VimDriverNewtonException
from common.msapi import extsys
+from common.msapi import helper
from common.utils import restcall
from newton_base.util import VimDriverUtils
@@ -37,6 +37,10 @@ class Registry(APIView):
if not hasattr(self, "_logger"):
self._logger = logger
+ if not hasattr(self, "register_thread"):
+ # dedicate thread to offload vim registration process
+ self.register_thread = helper.MultiCloudThreadHelper()
+
if not hasattr(self, "register_helper") or not self.register_helper:
if not hasattr(self, "proxy_prefix"):
self.proxy_prefix = "multicloud"
@@ -49,11 +53,17 @@ class Registry(APIView):
self._logger.debug("with data: %s" % request.data)
try:
-
- thread1 = RegisterHelperThread(self.register_helper.registry)
- thread1.addv0(vimid)
- if 0 == thread1.state():
- thread1.start()
+ # compose the one time backlog item
+ backlog_item = {
+ "id": vimid,
+ "worker": self.register_helper.registryV0,
+ "payload": (self.register_helper, vimid),
+ "repeat": 0,
+ "status": (1, "The registration process waits to be scheduled to run")
+ }
+ self.register_thread.add(backlog_item)
+ if 0 == self.register_thread.state():
+ self.register_thread.start()
return Response(status=status.HTTP_202_ACCEPTED)
@@ -68,15 +78,47 @@ class Registry(APIView):
data={'error': str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+ def get(self, request, vimid):
+ try:
+ backlog_item = self.register_thread.get(vimid)
+ if backlog_item:
+ return Response(
+ data={'status': backlog_item.get("status", "Status not available, vimid: %s" % vimid)},
+ status=status.HTTP_200_OK)
+ else:
+ return Response(
+ data={
+ 'error': "Registration process for "
+ "Cloud Region not found: %s"
+ % vimid
+ },
+ status=status.HTTP_404_NOT_FOUND)
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(
+ data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
def delete(self, request, vimid=""):
self._logger.debug("Registration--delete::data> %s" % request.data)
self._logger.debug("Registration--delete::vimid > %s"% vimid)
try:
- retcode = RegistryHelper.unregistry(vimid)
+ # compose the one time backlog item
+ backlog_item = {
+ "id": vimid,
+ "worker": self.register_helper.unregistryV0,
+ "payload": (self.register_helper, vimid),
+ "repeat": 0,
+ "status": (1, "The registration process waits to be scheduled to run")
+ }
+ self.register_thread.add(backlog_item)
+ if 0 == self.register_thread.state():
+ self.register_thread.start()
- #ret_code = VimDriverUtils.delete_vim_info(vimid)
- return Response(status=status.HTTP_204_NO_CONTENT if retcode==0 else status.HTTP_500_INTERNAL_SERVER_ERROR)
+ return Response(
+ status=status.HTTP_204_NO_CONTENT
+ )
except VimDriverNewtonException as e:
return Response(data={'error': e.content}, status=e.status_code)
except HttpError as e:
@@ -88,7 +130,8 @@ class Registry(APIView):
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
-class RegistryHelper(object):
+
+class RegistryHelper(helper.MultiCloudAAIHelper):
'''
Helper code to discover and register a cloud region's resource
'''
@@ -97,8 +140,15 @@ class RegistryHelper(object):
self.proxy_prefix = multicloud_prefix
self.aai_base_url = aai_base_url
self._logger = logger
+ super(RegistryHelper, self).__init__(multicloud_prefix, aai_base_url)
- def registry(self, vimid=""):
+ def registryV1(self, cloud_owner, cloud_region_id):
+ # cloud_owner = payload.get("cloud-owner", None)
+ # cloud_region_id = payload.get("cloud-region-id", None)
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return self.registryV0(vimid)
+
+ def registryV0(self, vimid):
# populate proxy identity url
self._update_proxy_identity_endpoint(vimid)
@@ -106,48 +156,85 @@ class RegistryHelper(object):
# get token:
viminfo = VimDriverUtils.get_vim_info(vimid)
if not viminfo:
- raise VimDriverNewtonException(
- "There is no cloud-region with {cloud-owner}_{cloud-region-id}=%s in AAI" % vimid)
+ return (
+ 10,
+ "Cloud Region not found in AAI: %s" % vimid
+ )
# set the default tenant since there is no tenant info in the VIM yet
sess = VimDriverUtils.get_session(
viminfo, tenant_name=viminfo['tenant'])
# step 1. discover all projects and populate into AAI
- self._discover_tenants(vimid, sess, viminfo)
+ retcode, status = self._discover_tenants(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return (
+ # retcode, status
+ # )
# discover all flavors
- self._discover_flavors(vimid, sess, viminfo)
+ retcode, status = self._discover_flavors(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return (
+ # retcode, status
+ # )
# discover all images
- self._discover_images(vimid, sess, viminfo)
+ retcode, status = self._discover_images(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return (
+ # retcode, status
+ # )
# discover all az
- self._discover_availability_zones(vimid, sess, viminfo)
+ retcode, status = self._discover_availability_zones(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return (
+ # retcode, status
+ # )
# discover all vg
#self._discover_volumegroups(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return (
+ # retcode, status
+ # )
# discover all snapshots
#self._discover_snapshots(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return retcode, status
# discover all server groups
#self.discover_servergroups(request, vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return retcode, status
# discover all pservers
#self._discover_pservers(vimid, sess, viminfo)
+ # if 0 != retcode:
+ # return retcode, status
- return 0
-
+ return (
+ 0,
+ "Registration finished for Cloud Region: %s" % vimid
+ )
- def unregistry(self, vimid=""):
+ def unregistryV1(self, cloud_owner, cloud_region_id):
+ # cloud_owner = payload.get("cloud-owner", None)
+ # cloud_region_id = payload.get("cloud-region-id", None)
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return self.unregistryV0(vimid)
+ def unregistryV0(self, vimid):
# prepare request resource to vim instance
# get token:
viminfo = VimDriverUtils.get_vim_info(vimid)
if not viminfo:
- raise VimDriverNewtonException(
- "There is no cloud-region with {cloud-owner}_{cloud-region-id}=%s in AAI" % vimid)
+ return (
+ 10,
+ "Cloud Region not found:" % vimid
+ )
cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
@@ -164,8 +251,14 @@ class RegistryHelper(object):
restcall.req_to_aai(resource_url, "GET")
# add resource-version
+ cloudregiondata = {}
if retcode == 0 and content:
cloudregiondata = json.JSONDecoder().decode(content)
+ else:
+ return (
+ 10,
+ "Cloud Region not found: %s, %s" % (cloud_owner, cloud_region_id)
+ )
# step 1. remove all tenants
tenants = cloudregiondata.get("tenants", None)
@@ -314,87 +407,7 @@ class RegistryHelper(object):
retcode, content, status_code = \
restcall.req_to_aai(resource_url, "DELETE")
- return retcode, content, status_code
-
-
- def _get_list_resources(
- self, resource_url, service_type, session, viminfo,
- vimid, content_key):
- service = {
- 'service_type': service_type,
- 'interface': 'public'
- }
-
- # identity service should not filtered by region since it is might be first call
- # to figure out available region list
- if service_type != 'identity':
- service['region_name'] = viminfo['openstack_region_id']\
- if viminfo.get('openstack_region_id') else viminfo['cloud_region_id']
-
- self._logger.info("making request with URI:%s,%s" % (resource_url,service))
- resp = session.get(resource_url, endpoint_filter=service)
- self._logger.info("request returns with status %s" % resp.status_code)
- if resp.status_code == status.HTTP_200_OK:
- self._logger.debug("with content:%s" % resp.json())
- content = resp.json()
- return content.get(content_key)
- return # failed to discover resources
-
- def _update_resoure(self, cloud_owner, cloud_region_id,
- resoure_id, resource_info, resource_type):
- if cloud_owner and cloud_region_id:
- self._logger.debug(
- ("_update_resoure,vimid:%(cloud_owner)s"
- "_%(cloud_region_id)s req_to_aai: %(resoure_id)s, "
- "%(resource_type)s, %(resource_info)s")
- % {
- "cloud_owner": cloud_owner,
- "cloud_region_id": cloud_region_id,
- "resoure_id": resoure_id,
- "resource_type": resource_type,
- "resource_info": resource_info,
- })
-
- #get the resource first
- resource_url = ("/cloud-infrastructure/cloud-regions/"
- "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/"
- "%(resource_type)ss/%(resource_type)s/%(resoure_id)s"
- % {
- "cloud_owner": cloud_owner,
- "cloud_region_id": cloud_region_id,
- "resoure_id": resoure_id,
- "resource_type": resource_type,
- })
-
- # get cloud-region
- retcode, content, status_code = \
- restcall.req_to_aai(resource_url, "GET")
-
- # add resource-version
- if retcode == 0 and content:
- content = json.JSONDecoder().decode(content)
- #resource_info["resource-version"] = content["resource-version"]
- content.update(resource_info)
- resource_info = content
-
- #then update the resource
- retcode, content, status_code = \
- restcall.req_to_aai(resource_url, "PUT", content=resource_info)
-
- self._logger.debug(
- ("_update_resoure,vimid:%(cloud_owner)s"
- "_%(cloud_region_id)s req_to_aai: %(resoure_id)s, "
- "return %(retcode)s, %(content)s, %(status_code)s")
- % {
- "cloud_owner": cloud_owner,
- "cloud_region_id": cloud_region_id,
- "resoure_id": resoure_id,
- "retcode": retcode,
- "content": content,
- "status_code": status_code,
- })
- return retcode
- return 1 # unknown cloud owner,region_id
+ return retcode, content
def _discover_tenants(self, vimid="", session=None, viminfo=None):
try:
@@ -410,10 +423,12 @@ class RegistryHelper(object):
self._update_resoure(
cloud_owner, cloud_region_id, tenant['id'],
tenant_info, "tenant")
-
+ return (0, "succeed")
except VimDriverNewtonException as e:
self._logger.error("VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content))
- return
+ return (
+ e.http_status, e.content
+ )
except HttpError as e:
if e.http_status == status.HTTP_403_FORBIDDEN:
### get the tenant information from the token response
@@ -434,12 +449,21 @@ class RegistryHelper(object):
except Exception as ex:
self._logger.error(traceback.format_exc())
+ return (
+ 11,
+ ex.message
+ )
else:
self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
- return
+ return (
+ e.http_status, e.response.json()
+ )
except Exception as e:
self._logger.error(traceback.format_exc())
- return
+ return (
+ 11,
+ e.message
+ )
def _discover_flavors(self, vimid="", session=None, viminfo=None):
try:
@@ -472,19 +496,26 @@ class RegistryHelper(object):
hpa_capabilities = self._get_hpa_capabilities(flavor, extraResp, viminfo)
flavor_info['hpa-capabilities'] = {'hpa-capability': hpa_capabilities}
- self._update_resoure(
+ retcode, content = self._update_resoure(
cloud_owner, cloud_region_id, flavor['id'],
flavor_info, "flavor")
+ return (0, "succeed")
except VimDriverNewtonException as e:
self._logger.error("VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content))
- return
+ return (
+ e.http_status, e.content
+ )
except HttpError as e:
self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
- return
+ return (
+ e.http_status, e.response.json()
+ )
except Exception as e:
self._logger.error(traceback.format_exc())
- return
+ return (
+ 11, e.message
+ )
def _get_hpa_capabilities(self, flavor, extra_specs, viminfo):
hpa_caps = []
@@ -571,8 +602,11 @@ class RegistryHelper(object):
'hpa-attribute-value':
'{{\"value\":\"{0}\",\"unit\":\"{1}\"}}'.format(flavor['ram'],"MB")
})
- except Exception:
+ except Exception as e:
self._logger.error(traceback.format_exc())
+ return (
+ 11, e.message
+ )
return basic_capability
@@ -927,16 +961,22 @@ class RegistryHelper(object):
# parse the schema? TBD
# self.update_image(cloud_owner, cloud_region_id, image_info)
#metadata_info = {}
-
+ return (0, "succeed")
except VimDriverNewtonException as e:
self._logger.error("VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content))
- return
+ return (
+ e.http_status, e.content
+ )
except HttpError as e:
self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
- return
+ return (
+ e.http_status, e.response.json()
+ )
except Exception as e:
self._logger.error(traceback.format_exc())
- return
+ return (
+ 11, e.message
+ )
def _discover_availability_zones(self, vimid="", session=None,
viminfo=None):
@@ -957,9 +997,9 @@ class RegistryHelper(object):
azName = az.get('zoneName', None)
# comment it for test the registration process only
# if azName == 'nova':
- # continue;
+ # continue
if azName == 'internal':
- continue;
+ continue
# get list of host names
pservers_info = [k for (k, v) in az['hosts'].items()]
@@ -968,37 +1008,18 @@ class RegistryHelper(object):
az_info['hypervisor-type'] = 'QEMU' # default for OpenStack
- # if az.get('hosts'):
- # for (k, v) in az['hosts'].items():
- # req_resource = "/os-hypervisors/detail?hypervisor_hostname_pattern=%s" % k
- # service = {'service_type': "compute",
- # 'interface': 'public',
- # 'region_name': viminfo['openstack_region_id']
- # if viminfo.get('openstack_region_id')
- # else viminfo['cloud_region_id']
- # }
- #
- # self._logger.info("making request with URI:%s" % req_resource)
- # resp = session.get(req_resource, endpoint_filter=service)
- # self._logger.info("request returns with status %s" % resp.status_code)
- # if resp.status_code == status.HTTP_200_OK:
- # self._logger.debug("with content:%s" % resp.json())
- # pass
- # content = resp.json()
- # if resp.status_code != status.HTTP_200_OK and not content[0]:
- # continue
- # az_info['hypervisor-type'] = content['hypervisors'][0]['hypervisor_type']\
- # if len(content.get('hypervisors')) else ''
- #
- # break
- ret = self._update_resoure(
+ ret, content = self._update_resoure(
cloud_owner, cloud_region_id, az['zoneName'], az_info,
"availability-zone")
if ret != 0:
# failed to update image
self._logger.debug("failed to populate az info into AAI: %s, az name: %s, ret:%s"
% (vimid, az_info['availability-zone-name'], ret))
- return None
+ # return (
+ # ret,
+ # "fail to popluate az info into AAI:%s" % content
+ # )
+ continue
# populate pservers:
for hostname in pservers_info:
@@ -1023,17 +1044,22 @@ class RegistryHelper(object):
self._update_pserver_relation_az(cloud_owner, cloud_region_id, pinfo, azName)
self._update_pserver_relation_cloudregion(cloud_owner, cloud_region_id, pinfo)
- return az_pserver_info
-
+ return (0, az_pserver_info)
except VimDriverNewtonException as e:
self._logger.error("VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content))
- return None
+ return (
+ e.http_status, e.content
+ )
except HttpError as e:
self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
- return None
+ return (
+ e.http_status, e.response.json()
+ )
except Exception as e:
self._logger.error(traceback.format_exc())
- return None
+ return (
+ 11, e.message
+ )
# def _discover_volumegroups(self, vimid="", session=None, viminfo=None):
# cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
@@ -1076,23 +1102,33 @@ class RegistryHelper(object):
snapshot_info['snapshot-selflink'] = ss['metadata'].get('selflink')
snapshot_info['prev-snapshot-id'] = ss['metadata'].get('prev-snapshot-id')
- ret = self._update_resoure(
+ ret, content = self._update_resoure(
cloud_owner, cloud_region_id, ss['id'], snapshot_info,
"snapshot")
if ret != 0:
# failed to update image
self._logger.debug("failed to populate snapshot info into AAI: %s, snapshot-id: %s, ret:%s"
% (vimid, snapshot_info['snapshot-id'], ret))
-
+ return (
+ ret,
+ "fail to populate snapshot into AAI:%s" % content
+ )
+ return 0, "Succeed"
except VimDriverNewtonException as e:
self._logger.error("VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content))
- return
+ return (
+ e.http_status, e.content
+ )
except HttpError as e:
self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
- return
+ return (
+ e.http_status, e.response.json()
+ )
except Exception as e:
self._logger.error(traceback.format_exc())
- return
+ return (
+ 11, e.message
+ )
# def _discover_servergroups(self, vimid="", session=None, viminfo=None):
# for sg in self._get_list_resources(
@@ -1151,9 +1187,17 @@ class RegistryHelper(object):
cloud_owner, cloud_region_id,
pserverinfo['hostname'], retcode, content,
status_code))
-
-
- def _update_pserver_relation_cloudregion(self, cloud_owner, cloud_region_id, pserverinfo):
+ return (
+ 0,
+ "succeed"
+ )
+
+ def _update_pserver_relation_cloudregion(
+ self,
+ cloud_owner,
+ cloud_region_id,
+ pserverinfo
+ ):
related_link = ("%s/cloud-infrastructure/cloud-regions/"
"cloud-region/%s/%s"% (
self.aai_base_url, cloud_owner,
@@ -1194,6 +1238,10 @@ class RegistryHelper(object):
% (cloud_owner, cloud_region_id, cloud_owner, cloud_region_id,
pserverinfo['hostname'], retcode, content,
status_code))
+ return (
+ 0,
+ "succeed"
+ )
def _update_pserver(self, cloud_owner, cloud_region_id, pserverinfo):
'''
@@ -1254,8 +1302,13 @@ class RegistryHelper(object):
self._logger.debug("update_snapshot,vimid:%s_%s req_to_aai: %s, return %s, %s, %s"
% (cloud_owner,cloud_region_id, pserverinfo['hostname'], retcode, content, status_code))
- return retcode
- return 1 # unknown cloud owner,region_id
+ return retcode, content
+ else:
+ # unknown cloud owner,region_id
+ return (
+ 10,
+ "Cloud Region not found: %s,%s" % (cloud_owner, cloud_region_id)
+ )
def _discover_pservers(self, vimid="", session=None, viminfo=None):
try:
@@ -1287,22 +1340,30 @@ class RegistryHelper(object):
n_cpus = cputopo['cores'] * cputopo['threads'] * cputopo['sockets']
hypervisor_info['number-of-cpus'] = n_cpus
- ret = self._update_pserver(cloud_owner, cloud_region_id,
+ ret, content = self._update_pserver(cloud_owner, cloud_region_id,
hypervisor_info)
if ret != 0:
# failed to update image
self._logger.debug("failed to populate pserver info into AAI: %s, hostname: %s, ret:%s"
% (vimid, hypervisor_info['hostname'], ret))
+ return ret, "fail to update pserver to AAI:%s" % content
+ return 0, "succeed"
except VimDriverNewtonException as e:
self._logger.error("VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content))
- return
+ return (
+ e.http_status, e.content
+ )
except HttpError as e:
self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
- return
+ return (
+ e.http_status, e.response.json()
+ )
except Exception as e:
self._logger.error(traceback.format_exc())
- return
+ return (
+ 11, e.message
+ )
def _update_proxy_identity_endpoint(self, vimid):
'''
@@ -1335,96 +1396,30 @@ class RegistryHelper(object):
self._logger.debug("update_proxy_identity_endpoint,vimid:%s req_to_aai: %s, return %s, %s, %s"
% (vimid, viminfo['identity-url'], retcode, content, status_code))
+ return 0, "succeed"
else:
self._logger.debug("failure: update_proxy_identity_endpoint,vimid:%s req_to_aai: return %s, %s, %s"
% (vimid, retcode, content, status_code))
+ return retcode, content
+ else:
+ return (
+ 10,
+ "Cloud Region not found: %s" % vimid
+ )
except VimDriverNewtonException as e:
self._logger.error("VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content))
- return
+ return (
+ e.http_status, e.content
+ )
except HttpError as e:
self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
- return
+ return (
+ e.http_status, e.response.json()
+ )
except Exception as e:
self._logger.error(traceback.format_exc())
- return
-
-
-class RegisterHelperThread(threading.Thread):
- '''
- thread to register infrastructure resource into AAI
- '''
-
- def __init__(self, registry_helper):
- threading.Thread.__init__(self)
- self.daemon = True
- self.duration = 0
- self.helper = registry_helper
-
- # The queue of IDs of cloud regions, format:
- # v0: "owner1_regionid1"
- self.queuev0 = []
-
- # v1: {"cloud-owner": "owner1", "cloud-region-id": "regionid1"},
- self.queuev1 = []
- self.lock = threading.Lock()
-
- self.state_ = 0 # 0: stopped, 1: started
-
- def addv0(self, vimid):
- self.lock.acquire()
- self.queuev0.append(vimid)
- self.lock.release()
- return len(self.queuev0)
-
- def removev0(self, vimid):
- '''
- remove cloud region from list
- '''
- self.queuev0 = [x for x in self.queuev0 if x != vimid]
-
- def resetv0(self):
- self.queuev0 = []
-
- def countv0(self):
- return len(self.queuev0)
-
- def addv1(self, cloud_owner, cloud_region_id):
- self.lock.acquire()
- self.queuev1.append({"cloud-owner": cloud_owner, "cloud-region-id": cloud_region_id})
- self.lock.release()
- return len(self.queuev1)
-
- def removev1(self, cloud_owner, cloud_region_id):
- '''
- remove cloud region from list
- '''
- self.queuev1 = [x for x in self.queuev1 if x["cloud-owner"] != cloud_owner or x["cloud-region-id"] != cloud_region_id]
-
- def resetv1(self):
- self.queuev1 = []
-
- def countv1(self):
- return len(self.queuev1)
-
- def state(self):
- return self.state_
-
- def run(self):
- logger.debug("Starting registration thread")
- self.state_ = 1
- while self.helper and len(self.queuev0) > 0 and len(self.queuev1) > 0:
- self.lock.acquire()
- vimidv1 = self.queuev1.pop()
- self.lock.release()
- vimid = extsys.encode_vim_id(vimidv1["cloud-owner"], vimidv1["cloud-region-id"])
- self.helper(vimid)
-
- self.lock.acquire()
- vimidv0 = self.queuev0.pop()
- self.lock.release()
- self.helper(vimidv0)
-
- self.state_ = 0
- # end of processing
+ return (
+ 11, e.message
+ )
diff --git a/share/newton_base/resource/capacity.py b/share/newton_base/resource/capacity.py
index 1a08b9a5..c2fee361 100644
--- a/share/newton_base/resource/capacity.py
+++ b/share/newton_base/resource/capacity.py
@@ -35,112 +35,113 @@ class CapacityCheck(APIView):
self._logger.info("vimid, data> %s, %s" % (vimid, request.data))
self._logger.debug("META> %s" % request.META)
- hasEnoughResource = False
try:
- resource_demand = request.data
-
- tenant_name = None
- vim = VimDriverUtils.get_vim_info(vimid)
- sess = VimDriverUtils.get_session(vim, tenant_name)
-
- # get token:
- cloud_owner, regionid = extsys.decode_vim_id(vimid)
- interface = 'public'
- service = {'service_type': 'compute',
- 'interface': interface,
- 'region_name': vim['openstack_region_id']
- if vim.get('openstack_region_id')
- else vim['cloud_region_id']
- }
-
- # get limit for this tenant
- req_resouce = "/limits"
- self._logger.info("check limits> URI:%s" % req_resouce)
- resp = sess.get(req_resouce, endpoint_filter=service)
- self._logger.info("check limits> status:%s" % resp.status_code)
- content = resp.json()
- compute_limits = content['limits']['absolute']
- self._logger.debug("check limits> resp data:%s" % content)
-
- # get total resource of this cloud region
- try:
- req_resouce = "/os-hypervisors/statistics"
- self._logger.info("check os-hypervisors statistics> URI:%s" % req_resouce)
- resp = sess.get(req_resouce, endpoint_filter=service)
- self._logger.info("check os-hypervisors statistics> status:%s" % resp.status_code)
- content = resp.json()
- hypervisor_statistics = content['hypervisor_statistics']
- self._logger.debug("check os-hypervisors statistics> resp data:%s" % content)
- except HttpError as e:
- if e.http_status == status.HTTP_403_FORBIDDEN:
- # Due to non administrator account cannot get hypervisor data,
- # so construct enough resource data
- conVCPUS = int(resource_demand['vCPU'])
- conFreeRamMB = int(resource_demand['Memory'])
- conFreeDiskGB = int(resource_demand['Storage'])
- self._logger.info("Non administator forbidden to access hypervisor statistics data")
- hypervisor_statistics = {'vcpus_used': 0,
- 'vcpus': conVCPUS,
- 'free_ram_mb': conFreeRamMB,
- 'free_disk_gb': conFreeDiskGB}
- else:
- # non forbiden exeption will be redirected
- raise e
-
- # get storage limit for this tenant
- service['service_type'] = 'volumev2'
- req_resouce = "/limits"
- self._logger.info("check volumev2 limits> URI:%s" % req_resouce)
- resp = sess.get(req_resouce, endpoint_filter=service)
- self._logger.info("check volumev2> status:%s" % resp.status_code)
- content = resp.json()
- storage_limits = content['limits']['absolute']
- self._logger.debug("check volumev2> resp data:%s" % content)
-
- # compute actual available resource for this tenant
- remainVCPU = compute_limits['maxTotalCores'] - compute_limits['totalCoresUsed']
- remainHypervisorVCPU = hypervisor_statistics['vcpus'] - hypervisor_statistics['vcpus_used']
-
- if (remainVCPU > remainHypervisorVCPU):
- remainVCPU = remainHypervisorVCPU
-
- remainMEM = compute_limits['maxTotalRAMSize'] - compute_limits['totalRAMUsed']
- remainHypervisorMEM = hypervisor_statistics['free_ram_mb']
- if remainMEM > remainHypervisorMEM:
- remainMEM = remainHypervisorMEM
-
- remainStorage = storage_limits['maxTotalVolumeGigabytes'] - storage_limits['totalGigabytesUsed']
- remainHypervisorStorage = hypervisor_statistics['free_disk_gb']
- if (remainStorage > remainHypervisorStorage):
- remainStorage = remainHypervisorStorage
-
- # compare resource demanded with available
- if (int(resource_demand['vCPU']) > remainVCPU):
- hasEnoughResource = False
- elif (int(resource_demand['Memory']) > remainMEM):
- hasEnoughResource = False
- elif (int(resource_demand['Storage']) > remainStorage):
- hasEnoughResource = False
- else:
- hasEnoughResource = True
-
+ hasEnoughResource = self.get_tenant_cap_info(vimid, request.data)
self._logger.info("RESP with data> result:%s" % hasEnoughResource)
return Response(data={'result': hasEnoughResource}, status=status.HTTP_200_OK)
except VimDriverNewtonException as e:
self._logger.error("Plugin exception> status:%s,error:%s"
% (e.status_code, e.content))
- return Response(data={'result': hasEnoughResource,
+ return Response(data={'result': False,
'error': e.content}, status=e.status_code)
except HttpError as e:
self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
resp = e.response.json()
- resp.update({'result': hasEnoughResource})
+ resp.update({'result': False})
return Response(data=e.response.json(), status=e.http_status)
except Exception as e:
self._logger.error(traceback.format_exc())
- return Response(data={'result': hasEnoughResource, 'error': str(e)},
+ return Response(data={'result': False, 'error': str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+ def get_tenant_cap_info(self, vimid, resource_demand):
+ hasEnoughResource = False
+ tenant_name = None
+ vim = VimDriverUtils.get_vim_info(vimid)
+ sess = VimDriverUtils.get_session(vim, tenant_name)
+
+ # get token:
+ # cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ interface = 'public'
+ service = {'service_type': 'compute',
+ 'interface': interface,
+ 'region_name': vim['openstack_region_id']
+ if vim.get('openstack_region_id')
+ else vim['cloud_region_id']
+ }
+
+ # get limit for this tenant
+ req_resouce = "/limits"
+ self._logger.info("check limits> URI:%s" % req_resouce)
+ resp = sess.get(req_resouce, endpoint_filter=service)
+ self._logger.info("check limits> status:%s" % resp.status_code)
+ content = resp.json()
+ compute_limits = content['limits']['absolute']
+ self._logger.debug("check limits> resp data:%s" % content)
+
+ # get total resource of this cloud region
+ try:
+ req_resouce = "/os-hypervisors/statistics"
+ self._logger.info("check os-hypervisors statistics> URI:%s" % req_resouce)
+ resp = sess.get(req_resouce, endpoint_filter=service)
+ self._logger.info("check os-hypervisors statistics> status:%s" % resp.status_code)
+ content = resp.json()
+ hypervisor_statistics = content['hypervisor_statistics']
+ self._logger.debug("check os-hypervisors statistics> resp data:%s" % content)
+ except HttpError as e:
+ if e.http_status == status.HTTP_403_FORBIDDEN:
+ # Due to non administrator account cannot get hypervisor data,
+ # so construct enough resource data
+ conVCPUS = int(resource_demand['vCPU'])
+ conFreeRamMB = int(resource_demand['Memory'])
+ conFreeDiskGB = int(resource_demand['Storage'])
+ self._logger.info("Non administator forbidden to access hypervisor statistics data")
+ hypervisor_statistics = {'vcpus_used': 0,
+ 'vcpus': conVCPUS,
+ 'free_ram_mb': conFreeRamMB,
+ 'free_disk_gb': conFreeDiskGB}
+ else:
+ # non forbiden exeption will be redirected
+ raise e
+
+ # get storage limit for this tenant
+ service['service_type'] = 'volumev2'
+ req_resouce = "/limits"
+ self._logger.info("check volumev2 limits> URI:%s" % req_resouce)
+ resp = sess.get(req_resouce, endpoint_filter=service)
+ self._logger.info("check volumev2> status:%s" % resp.status_code)
+ content = resp.json()
+ storage_limits = content['limits']['absolute']
+ self._logger.debug("check volumev2> resp data:%s" % content)
+
+ # compute actual available resource for this tenant
+ remainVCPU = compute_limits['maxTotalCores'] - compute_limits['totalCoresUsed']
+ remainHypervisorVCPU = hypervisor_statistics['vcpus'] - hypervisor_statistics['vcpus_used']
+
+ if (remainVCPU > remainHypervisorVCPU):
+ remainVCPU = remainHypervisorVCPU
+
+ remainMEM = compute_limits['maxTotalRAMSize'] - compute_limits['totalRAMUsed']
+ remainHypervisorMEM = hypervisor_statistics['free_ram_mb']
+ if remainMEM > remainHypervisorMEM:
+ remainMEM = remainHypervisorMEM
+
+ remainStorage = storage_limits['maxTotalVolumeGigabytes'] - storage_limits['totalGigabytesUsed']
+ remainHypervisorStorage = hypervisor_statistics['free_disk_gb']
+ if (remainStorage > remainHypervisorStorage):
+ remainStorage = remainHypervisorStorage
+
+ # compare resource demanded with available
+ if (int(resource_demand['vCPU']) > remainVCPU):
+ hasEnoughResource = False
+ elif (int(resource_demand['Memory']) > remainMEM):
+ hasEnoughResource = False
+ elif (int(resource_demand['Storage']) > remainStorage):
+ hasEnoughResource = False
+ else:
+ hasEnoughResource = True
+
+ return hasEnoughResource
class APIv1CapacityCheck(CapacityCheck):
def __init__(self):
diff --git a/share/starlingx_base/registration/registration.py b/share/starlingx_base/registration/registration.py
index 255b6689..fa79e5b3 100644
--- a/share/starlingx_base/registration/registration.py
+++ b/share/starlingx_base/registration/registration.py
@@ -25,14 +25,18 @@ from newton_base.registration import registration as newton_registration
from rest_framework import status
from rest_framework.response import Response
from common.msapi import extsys
+from common.msapi import helper
from keystoneauth1.exceptions import HttpError
from newton_base.util import VimDriverUtils
from common.utils import restcall
-from threading import Thread
+from django.core.cache import cache
logger = logging.getLogger(__name__)
-# DEBUG=True
+# global var: Audition thread
+gAZCapAuditThread = helper.MultiCloudThreadHelper()
+
+# DEBUG=True
# APIv0 handler upgrading: leverage APIv1 handler
class APIv0Registry(newton_registration.Registry):
@@ -45,14 +49,24 @@ class APIv0Registry(newton_registration.Registry):
self._logger.info("registration with : %s" % vimid)
# vim registration will trigger the start the audit of AZ capacity
- gAZCapAuditThread.addv0(vimid)
+ worker_self = InfraResourceAuditor(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
+ )
+ backlog_item = {
+ "id": vimid,
+ "worker": worker_self.azcap_audit,
+ "payload": (worker_self, vimid),
+ "repeat": 5*1000000, # repeat every 5 seconds
+ }
+ gAZCapAuditThread.add(backlog_item)
if 0 == gAZCapAuditThread.state():
gAZCapAuditThread.start()
return super(APIv0Registry, self).post(request, vimid)
def delete(self, request, vimid=""):
self._logger.debug("unregister cloud region: %s" % vimid)
- gAZCapAuditThread.removev0(vimid)
+ gAZCapAuditThread.remove(vimid)
return super(APIv0Registry, self).delete(request, vimid)
@@ -63,8 +77,8 @@ class Registry(APIv0Registry):
class APIv1Registry(newton_registration.Registry):
def __init__(self):
- super(APIv1Registry, self).__init__()
self.register_helper = RegistryHelper(settings.MULTICLOUD_API_V1_PREFIX, settings.AAI_BASE_URL)
+ super(APIv1Registry, self).__init__()
# self._logger = logger
def post(self, request, cloud_owner="", cloud_region_id=""):
@@ -75,7 +89,17 @@ class APIv1Registry(newton_registration.Registry):
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
# vim registration will trigger the start the audit of AZ capacity
- gAZCapAuditThread.addv0(vimid)
+ worker_self = InfraResourceAuditor(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
+ )
+ backlog_item = {
+ "id": vimid,
+ "worker": worker_self.azcap_audit,
+ "payload": (worker_self, vimid),
+ "repeat": 5 * 1000000, # repeat every 5 seconds
+ }
+ gAZCapAuditThread.add(backlog_item)
if 0 == gAZCapAuditThread.state():
gAZCapAuditThread.start()
@@ -96,7 +120,7 @@ class APIv1Registry(newton_registration.Registry):
% (cloud_owner, cloud_region_id))
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- gAZCapAuditThread.removev0(vimid)
+ gAZCapAuditThread.remove(vimid)
return super(APIv1Registry, self).delete(request, vimid)
@@ -108,7 +132,7 @@ class RegistryHelper(newton_registration.RegistryHelper):
super(RegistryHelper, self).__init__(multicloud_prefix, aai_base_url)
# self._logger = logger
- def registry(self, vimid=""):
+ def registryV0(self, vimid=""):
'''
extend base method
'''
@@ -133,7 +157,7 @@ class RegistryHelper(newton_registration.RegistryHelper):
viminfo, tenant_name=viminfo['tenant'])
# discover the regions, expect it always returns a list (even empty list)
- # cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
# region_ids = self._discover_regions(cloud_owner, cloud_region_id, sess, viminfo)
region_ids = self._discover_regions(vimid, sess, viminfo)
@@ -299,8 +323,7 @@ class RegistryHelper(newton_registration.RegistryHelper):
return 1 # unknown cloud owner,region_id
# def _discover_regions(self, cloud_owner="", cloud_region_id="",
- def _discover_regions(self, vimid="",
- session=None, viminfo=None):
+ def _discover_regions(self, vimid, session=None, viminfo=None):
try:
regions = []
# vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
@@ -336,7 +359,7 @@ class RegistryHelper(newton_registration.RegistryHelper):
return []
-class InfraResourceAuditor(object):
+class InfraResourceAuditor(newton_registration.RegistryHelper):
def __init__(self, multicloud_prefix, aai_base_url):
self.proxy_prefix = multicloud_prefix
@@ -344,70 +367,121 @@ class InfraResourceAuditor(object):
self._logger = logger
# super(InfraResourceAuditor, self).__init__();
- def azcap_audit(self, vimid=""):
- # now retrieve the latest AZ cap info
- # TBD
-
- # store the cap info into cache
- # TBD
- pass
-
-
-class AuditorHelperThread(threading.Thread):
- '''
- thread to register infrastructure resource into AAI
- '''
-
- def __init__(self, audit_helper):
- threading.Thread.__init__(self)
- self.daemon = True
- self.duration = 0
- self.helper = audit_helper
-
- # The set of IDs of cloud regions, format:
- # v0: "owner1_regionid1"
- self.queuev0 = set()
- self.lock = threading.Lock()
- self.state_ = 0 # 0: stopped, 1: started
-
- def addv0(self, vimid=""):
- self.lock.acquire()
- self.queuev0.add(vimid)
- self.lock.release()
- return len(self.queuev0)
-
- def removev0(self, vimid):
- '''
- discard cloud region from list without raise any exception
- '''
- self.queuev0.discard(vimid)
-
- def resetv0(self):
- self.queuev0.clear()
-
- def countv0(self):
- return len(self.queuev0)
-
- def state(self):
- return self.state_
-
- def run(self):
- logger.debug("Start the Audition thread")
- self.state_ = 1
- while self.helper and self.countv0() > 0:
- for vimidv0 in self.queuev0:
- self.helper(vimidv0)
- # sleep for a while in seconds
- time.sleep(5)
+ def azcap_audit(self, vimid):
+ viminfo = VimDriverUtils.get_vim_info(vimid)
+ if not viminfo:
+ self._logger.warn("azcap_audit no valid vimid: %s" % vimid)
+ return
- self.state_ = 0
- logger.debug("Stop the Audition thread")
- # end of processing
+ session = VimDriverUtils.get_session(
+ viminfo,
+ tenant_name=viminfo['tenant']
+ )
-# global Audition thread
-gAZCapAuditThread = AuditorHelperThread(
- InfraResourceAuditor(
- settings.MULTICLOUD_API_V1_PREFIX,
- settings.AAI_BASE_URL).azcap_audit
-)
+ # now retrieve the latest AZ cap info
+ try:
+ # get all hypervisor detail ?
+ hypervisors = self._get_list_resources(
+ "/os-hypervisors/detail", "compute", session,
+ viminfo, vimid, "hypervisors")
+
+ hypervisors_dict = {}
+ # for h in hypervisors:
+ # if not h.get("service", None):
+ # continue
+ # if not h.get("host", None):
+ # continue
+ # hypervisors_dict[h["service"]["host"]] = h
+ for h in hypervisors:
+ if not h.get("hypervisor_hostname", None):
+ continue
+ hypervisors_dict[h["hypervisor_hostname"]] = h
+
+ vimAzCacheKey = "cap_azlist_" + vimid
+ vimAzList = []
+ # cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid)
+ for az in self._get_list_resources(
+ "/os-availability-zone/detail", "compute", session,
+ viminfo, vimid,
+ "availabilityZoneInfo"):
+ az_info = {
+ 'availability-zone-name': az['zoneName'],
+ 'operational-status': az['zoneState']['available']
+ if az.get('zoneState') else '',
+ 'hypervisor-type': '',
+ }
+ # filter out the default az: "internal" and "nova"
+ azName = az.get('zoneName', None)
+ # comment it for test the registration process only
+ # if azName == 'nova':
+ # continue
+ if azName == 'internal':
+ continue
+ vimAzList.append(azName)
+
+ # get list of host names
+ pservers_info = [k for (k, v) in az['hosts'].items()]
+
+ # Get current cap info of azName
+ azCapCacheKey = "cap_" + vimid + "_" + azName
+ azCapInfoCacheStr = cache.get(azCapCacheKey)
+ azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else None
+
+ for psname in pservers_info:
+ psinfo = hypervisors_dict.get(psname, None)
+ if not psinfo:
+ # warning: the pserver info not found
+ continue
+ # get current pserver cap info
+ psCapInfoCacheKey = "cap_" + vimid + "_" + psname
+ psCapInfoCacheStr = cache.get(psCapInfoCacheKey)
+ psCapInfoCache = json.loads(psCapInfoCacheStr) if psCapInfoCacheStr else None
+
+ # compare latest info with cached one
+ vcpu_delta = 0
+ vcpu_used_delta = 0
+ mem_delta = 0
+ mem_free_delta = 0
+ localstorage_delta = 0
+ localstorage_free_delta = 0
+ if psinfo.get("vcpus", 0) != psCapInfoCache.get("vcpus", 0):
+ vcpu_delta += psinfo.get("vcpus", 0) \
+ - psCapInfoCache.get("vcpus", 0)
+ psCapInfoCache["vcpus"] = psinfo.get("vcpus", 0)
+ if psinfo.get("memory_mb", 0) != psCapInfoCache.get("memory_mb", 0):
+ mem_delta += psinfo.get("memory_mb", 0) \
+ - psCapInfoCache.get("memory_mb", 0)
+ psCapInfoCache["memory_mb"] = psinfo.get("memory_mb", 0)
+ if psinfo.get("local_gb", 0) != psCapInfoCache.get("local_gb", 0):
+ localstorage_delta += psinfo.get("local_gb", 0) \
+ - psCapInfoCache.get("local_gb", 0)
+ psCapInfoCache["local_gb"] = psinfo.get("local_gb", 0)
+ if psinfo.get("vcpus_used", 0) != psCapInfoCache.get("vcpus_used", 0):
+ vcpu_used_delta += psinfo.get("vcpus_used", 0)\
+ - psCapInfoCache.get("vcpus_used", 0)
+ psCapInfoCache["vcpus_used"] = psinfo.get("vcpus_used", 0)
+ if psinfo.get("free_ram_mb", 0) != psCapInfoCache.get("free_ram_mb", 0):
+ mem_free_delta += psinfo.get("free_ram_mb", 0)\
+ - psCapInfoCache.get("free_ram_mb", 0)
+ psCapInfoCache["free_ram_mb"] = psinfo.get("free_ram_mb", 0)
+ if psinfo.get("free_disk_gb", 0) != psCapInfoCache.get("free_disk_gb", 0):
+ localstorage_free_delta += psinfo.get("free_disk_gb", 0)\
+ - psCapInfoCache.get("free_disk_gb", 0)
+ psCapInfoCache["free_disk_gb"] = psinfo.get("free_disk_gb", 0)
+ pass
+
+ # now apply the delta to azCapInfo
+ azCapInfoCache["vcpus"] = azCapInfoCache.get("vcpus", 0) + vcpu_delta
+ azCapInfoCache["memory_mb"] = azCapInfoCache.get("memory_mb", 0) + mem_delta
+ azCapInfoCache["local_gb"] = azCapInfoCache.get("local_gb", 0) + localstorage_delta
+ azCapInfoCache["vcpus_used"] = azCapInfoCache.get("vcpus_used", 0) + vcpu_used_delta
+ azCapInfoCache["free_ram_mb"] = azCapInfoCache.get("free_ram_mb", 0) + mem_free_delta
+ azCapInfoCache["free_disk_gb"] = azCapInfoCache.get("free_disk_gb", 0) + localstorage_free_delta
+
+ # update the cache
+ cache.set(azCapCacheKey, json.dumps(azCapInfoCache), 3600 * 24)
+ cache.set(vimAzCacheKey, vimAzList, 3600 * 24)
+ except Exception as e:
+ self._logger.error("azcap_audit raise exception: %s" % e)
+ pass
diff --git a/share/starlingx_base/resource/__init__.py b/share/starlingx_base/resource/__init__.py
new file mode 100644
index 00000000..825091ff
--- /dev/null
+++ b/share/starlingx_base/resource/__init__.py
@@ -0,0 +1,10 @@
+# Copyright (c) 2017-2019 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
diff --git a/share/starlingx_base/resource/capacity.py b/share/starlingx_base/resource/capacity.py
new file mode 100644
index 00000000..fc926d00
--- /dev/null
+++ b/share/starlingx_base/resource/capacity.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2017-2019 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import traceback
+import json
+
+from django.core.cache import cache
+
+from newton_base.resource import capacity as newton_capacity
+from common.exceptions import VimDriverNewtonException
+from newton_base.util import VimDriverUtils
+
+from keystoneauth1.exceptions import HttpError
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.views import APIView
+from common.msapi import extsys
+
+logger = logging.getLogger(__name__)
+
+
+class CapacityCheck(newton_capacity.CapacityCheck):
+ def __init__(self):
+ super(CapacityCheck, self).__init__()
+ self._logger = logger
+
+ def post(self, request, vimid=""):
+ self._logger.info("vimid, data> %s, %s" % (vimid, request.data))
+ self._logger.debug("META> %s" % request.META)
+
+ try:
+ hasEnoughResource = self.get_tenant_cap_info(vimid, request.data)
+ azCapInfo = self.get_az_cap_info(vimid)
+ self._logger.info("RESP with data> result:%s" % hasEnoughResource)
+ return Response(data={'result': hasEnoughResource, 'AZs': azCapInfo}, status=status.HTTP_200_OK)
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(data={'result': False, 'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ def get_az_cap_info(self, vimid):
+ azCapInfo = []
+ viminfo = VimDriverUtils.get_vim_info(vimid)
+ if not viminfo:
+ self._logger.warn("azcap_audit no valid vimid: %s" % vimid)
+ return
+
+ session = VimDriverUtils.get_session(
+ viminfo,
+ tenant_name=viminfo['tenant']
+ )
+ try:
+ # get list of AZ
+ vimAzCacheKey = "cap_azlist_" + vimid
+ vimAzListCacheStr = cache.get(vimAzCacheKey)
+ vimAzListCache = json.loads(vimAzListCacheStr) if vimAzListCacheStr else []
+ for azName in vimAzListCache:
+ azCapCacheKey = "cap_" + vimid + "_" + azName
+ azCapInfoCacheStr = cache.get(azCapCacheKey)
+ azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else None
+
+ azCapInfo["availability-zone-name"] = azName
+ azCapInfo["vCPUAvail"] = azCapInfoCache.get("vcpus", 0) + azCapInfoCache.get("vcpus_used", 0)
+ azCapInfo["vCPUTotal"] = azCapInfoCache.get("vcpus", 0)
+ azCapInfo["MemoryAvail"] = azCapInfoCache.get("vcpus", 0)
+ azCapInfo["MemoryTotal"] = azCapInfoCache.get("vcpus", 0)
+ azCapInfo["StorageAvail"] = azCapInfoCache.get("vcpus", 0)
+ azCapInfo["StorageTotal"] = azCapInfoCache.get("vcpus", 0)
+
+ return azCapInfo
+ except Exception as e:
+ return azCapInfo
+ pass
+
+class APIv1CapacityCheck(CapacityCheck):
+ def __init__(self):
+ super(APIv1CapacityCheck, self).__init__()
+ # self._logger = logger
+
+ def post(self, request, cloud_owner="", cloud_region_id=""):
+ self._logger.info("vimid, data> %s,%s, %s" % (cloud_owner, cloud_region_id, request.data))
+ self._logger.debug("META> %s" % request.META)
+
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return super(APIv1CapacityCheck, self).post(request, vimid)
diff --git a/share/starlingx_base/resource/infra_workload.py b/share/starlingx_base/resource/infra_workload.py
new file mode 100644
index 00000000..4da0a0d0
--- /dev/null
+++ b/share/starlingx_base/resource/infra_workload.py
@@ -0,0 +1,619 @@
+# Copyright (c) 2017-2018 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import json
+import traceback
+import uuid
+
+from django.conf import settings
+from common.exceptions import VimDriverNewtonException
+# from newton_base.util import VimDriverUtils
+
+from keystoneauth1.exceptions import HttpError
+from rest_framework import status
+from rest_framework.response import Response
+from rest_framework.views import APIView
+from common.msapi import extsys
+from common.msapi.helper import Helper as helper
+
+from common.utils import restcall
+from newton_base.registration import registration as newton_registration
+from newton_base.resource import infra_workload as newton_infra_workload
+from newton_base.util import VimDriverUtils
+
+logger = logging.getLogger(__name__)
+
+# global var: Audition thread
+# the id is the workloadid, which implies post to workloadid1 followed by delete workloadid1
+# will replace the previous backlog item
+gInfraWorkloadThread = helper.MultiCloudThreadHelper()
+
+class InfraWorkload(newton_infra_workload.InfraWorkload):
+ def __init__(self):
+ self._logger = logger
+
+ def post(self, request, vimid="", workloadid=""):
+ self._logger.info("vimid: %s, stackid:%s" % (vimid, workloadid))
+ self._logger.info("data: %s" % (request.data))
+ self._logger.debug("META: %s" % request.META)
+
+ try:
+ resp_template = {}
+ os_status = status.HTTP_500_INTERNAL_SERVER_ERROR
+
+ if workloadid == "":
+ return super(InfraWorkload, self).post(request, vimid)
+ else:
+ # a post to heatbridge
+ worker_self = InfraWorkloadHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
+ )
+ backlog_item = {
+ "id": workloadid,
+ "worker": worker_self.workload_update(),
+ "payload": (worker_self, vimid, workloadid, request.data),
+ "repeat": 0, # one time job
+ # format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message
+ "status": (0, "AAI_UPDATE_IN_PROGRESS", "Pending to schedule")
+ }
+ gInfraWorkloadThread.add(backlog_item)
+ if 0 == gInfraWorkloadThread.state():
+ gInfraWorkloadThread.start()
+
+ # now query the progress
+ backlog_item = gInfraWorkloadThread.get(workloadid)
+ if not backlog_item:
+ # backlog item not found
+ return Response(
+ data={
+ 'workload_status': "AAI_UPDATE_FAIL",
+ "message": "AAI update failed"
+ },
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR
+ )
+ else:
+ progress = backlog_item.get("status", "Status not found")
+ progress_code = progress[0]
+ progress_status = progress[1]
+ progress_msg = progress[2]
+ return Response(data={'workload_status': progress_status, "message": progress_msg},
+ status=status.HTTP_201_CREATED
+ if progress_code == 0 else progress_code
+ )
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ def get(self, request, vimid="", workloadid=""):
+ self._logger.info("vimid, workload id: %s, %s" % (vimid, workloadid))
+ self._logger.debug("META: %s" % request.META)
+
+ try:
+
+ if workloadid == "":
+ raise VimDriverNewtonException(
+ message="workload_id is not specified",
+ content="workload_id must be specified to delete the workload",
+ status_code = status.HTTP_400_BAD_REQUEST)
+
+ # now query the progress
+ backlog_item = gInfraWorkloadThread.get(workloadid)
+ if not backlog_item:
+ # backlog item not found
+ return Response(
+ data={
+ 'workload_status': "AAI_UPDATE_FAIL",
+ "message": "AAI update failed"
+ },
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR
+ )
+ else:
+ progress = backlog_item.get("status", "Status not found")
+ progress_code = progress[0]
+ progress_status = progress[1]
+ progress_msg = progress[2]
+ return Response(data={'workload_status': progress_status, "message": progress_msg},
+ status=status.HTTP_201_CREATED
+ if progress_code == 0 else progress_code
+ )
+
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+ def delete(self, request, vimid="", workloadid=""):
+ self._logger.info("vimid, workload id: %s, %s" % (vimid, workloadid))
+ self._logger.debug("META: %s" % request.META)
+
+ try:
+
+ super(InfraWorkload, self).delete(request, vimid, workloadid)
+
+ if workloadid == "":
+ raise VimDriverNewtonException(
+ message="workload_id is not specified",
+ content="workload_id must be specified to delete the workload",
+ status_code = status.HTTP_400_BAD_REQUEST)
+
+ # a post to heatbridge delete
+ worker_self = InfraWorkloadHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
+ )
+ backlog_item = {
+ "id": workloadid,
+ "worker": worker_self.workload_delete(),
+ "payload": (worker_self, vimid, workloadid, request.data),
+ "repeat": 0, # one time job
+ # format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message
+ "status": (0, "AAI_REMOVE_IN_PROGRESS", "Pending to schedule")
+ }
+ gInfraWorkloadThread.add(backlog_item)
+ if 0 == gInfraWorkloadThread.state():
+ gInfraWorkloadThread.start()
+
+ # now query the progress
+ backlog_item = gInfraWorkloadThread.get(workloadid)
+ if not backlog_item:
+ # backlog item not found
+ return Response(
+ data={
+ 'workload_status': "STACK_REMOVE_FAILED",
+ "message": "AAI update failed"
+ },
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR
+ )
+ else:
+ progress = backlog_item.get("status", "Status not found")
+ progress_code = progress[0]
+ progress_status = progress[1]
+ progress_msg = progress[2]
+ return Response(data={'workload_status': progress_status, "message": progress_msg},
+ status=status.HTTP_200_OK
+ if progress_code == 0 else progress_code
+ )
+
+ # # assume the workload_type is heat
+ # stack_id = workloadid
+ # cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # # should go via multicloud proxy so that
+ # # the selflink is updated by multicloud
+ # retcode, v2_token_resp_json, os_status = \
+ # helper.MultiCloudIdentityHelper(
+ # settings.MULTICLOUD_API_V1_PREFIX,
+ # cloud_owner, regionid, "/v2.0/tokens")
+ #
+ # if retcode > 0 or not v2_token_resp_json:
+ # logger.error("authenticate fails:%s, %s, %s" %
+ # (cloud_owner, regionid, v2_token_resp_json))
+ # return
+ # # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+ # # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
+ #
+ # # get stack status
+ # service_type = "orchestration"
+ # resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks"
+ # self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ # retcode, content, os_status = \
+ # helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ # v2_token_resp_json,
+ # service_type, resource_uri,
+ # None, "GET")
+ #
+ # stacks = content.get('stacks', []) \
+ # if retcode == 0 and content else []
+ # # assume there is at most 1 stack returned
+ # # since it was filtered by id
+ # stack1 = stacks[0] if stacks else None
+ # stack_status = ""
+ #
+ # if stack1 and 'CREATE_COMPLETE' == stack1['stack_status']:
+ # # delete the stack
+ # resource_uri = "/stacks/%s/%s" % \
+ # (stack1['stack_name'], stack1['id'])
+ # self._logger.info("delete stack, URI:%s" % resource_uri)
+ # retcode, content, os_status = \
+ # helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ # v2_token_resp_json,
+ # service_type, resource_uri,
+ # None, "DELETE")
+ # # if retcode == 0:
+ # # stack_status = "DELETE_IN_PROCESS"
+ # # # and update AAI inventory by heatbridge-delete
+ # # self.heatbridge_delete(request, vimid, stack1['id'])
+ #
+ # # stub response
+ # resp_template = {
+ # "template_type": "HEAT",
+ # "workload_id": stack_id,
+ # "workload_status": stack_status
+ # }
+ #
+ # if retcode > 0:
+ # resp_template["workload_response"] = content
+ #
+ # self._logger.info("RESP with data> result:%s" % resp_template)
+ # return Response(status=os_status)
+ # except VimDriverNewtonException as e:
+ # self._logger.error("Plugin exception> status:%s,error:%s"
+ # % (e.status_code, e.content))
+ # return Response(data={'error': e.content}, status=e.status_code)
+ # except HttpError as e:
+ # self._logger.error("HttpError: status:%s, response:%s" %
+ # (e.http_status, e.response.json()))
+ # return Response(data=e.response.json(), status=e.http_status)
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ return Response(data={'error': str(e)},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+
+
+class APIv1InfraWorkload(InfraWorkload):
+ def __init__(self):
+ super(APIv1InfraWorkload, self).__init__()
+ # self._logger = logger
+
+ def post(self, request, cloud_owner="", cloud_region_id=""):
+ # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
+ # (cloud_owner, cloud_region_id, request.data))
+ # self._logger.debug("META: %s" % request.META)
+
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return super(APIv1InfraWorkload, self).post(request, vimid)
+
+ def get(self, request, cloud_owner="", cloud_region_id="", requri=""):
+ # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
+ # (cloud_owner, cloud_region_id, request.data))
+ # self._logger.debug("META: %s" % request.META)
+
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return super(APIv1InfraWorkload, self).get(request, vimid, requri)
+
+ def delete(self, request, cloud_owner="", cloud_region_id="", requri=""):
+ # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
+ # (cloud_owner, cloud_region_id, request.data))
+ # self._logger.debug("META: %s" % request.META)
+
+ vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
+ return super(APIv1InfraWorkload, self).delete(request, vimid, requri)
+
+
+class InfraWorkloadHelper(newton_registration.RegistryHelper):
+
+ def __init__(self, multicloud_prefix, aai_base_url, vimid, workloadid=""):
+ self.proxy_prefix = multicloud_prefix
+ self.aai_base_url = aai_base_url
+ self._logger = logger
+ self.vimid = vimid
+ self.workloadid = workloadid
+ super(InfraWorkloadHelper, self).__init__()
+
+ def workload_create(self, vimid, workload_data):
+ data = workload_data
+ oof_directive = data.get("oof_directives", {})
+ template_type = data.get("template_type", None)
+ template_data = data.get("template_data", {})
+ resp_template = None
+ if template_type and "heat" == template_type.lower():
+ # update heat parameters from oof_directive
+ parameters = template_data.get("parameters", {})
+
+ for directive in oof_directive.get("directives", []):
+ if directive["type"] == "vnfc":
+ for directive2 in directive.get("directives", []):
+ if directive2["type"] in ["flavor_directives",
+ "sriovNICNetwork_directives"]:
+ for attr in directive2.get("attributes", []):
+ flavor_label = attr.get("attribute_name", None)
+ flavor_value = attr.get("attribute_value", None)
+ if flavor_label in parameters:
+ parameters[flavor_label] = flavor_value
+ else:
+ self._logger.warn(
+ "There is no parameter exist: %s" %
+ flavor_label)
+
+ # update parameters
+ template_data["parameters"] = parameters
+
+ # reset to make sure "files" are empty
+ template_data["files"] = {}
+
+ # authenticate
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that
+ # the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens"
+ )
+
+ if retcode > 0 or not v2_token_resp_json:
+ logger.error("authenticate fails:%s,%s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+ return (
+ retcode,
+ "authenticate fails:%s,%s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json)
+ )
+
+ # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+
+ service_type = "orchestration"
+ resource_uri = "/stacks"
+ self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ template_data, "POST")
+
+ stack1 = content.get('stack', None) \
+ if retcode == 0 and content else None
+
+ stackid = stack1["id"] if stack1 else ""
+ resp_template = {
+ "template_type": template_type,
+ "workload_id": stackid,
+ "template_response": content
+ }
+ self._logger.info("RESP with data> result:%s" % resp_template)
+ return (0, resp_template)
+
+ def workload_update(self, vimid, stack_id, otherinfo):
+ '''
+ update heat resource to AAI for the specified cloud region and tenant
+ The resources includes: vserver, vserver/l-interface,
+ '''
+
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+ if retcode > 0:
+ logger.error("authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+
+ return (retcode, "AAI_UPDATE_FAIL", "authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+ tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+ # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
+
+ # common prefix
+ aai_cloud_region = \
+ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
+ % (cloud_owner, regionid, tenant_id)
+
+ # get stack resource
+ service_type = "orchestration"
+ resource_uri = "/stacks/%s/resources" % (stack_id)
+ self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ resources = content.get('resources', []) if retcode == 0 and content else []
+
+ # find and update resources
+ transactions = []
+ for resource in resources:
+ if resource.get('resource_status', None) != "CREATE_COMPLETE":
+ # this resource is not ready yet
+ continue
+ if resource.get('resource_type', None) == 'OS::Nova::Server':
+ # retrieve vserver details
+ service_type = "compute"
+ resource_uri = "/servers/%s" % (resource['physical_resource_id'])
+ self._logger.info("retrieve vserver detail, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ self._logger.debug(" resp data:%s" % content)
+ vserver_detail = content.get('server', None) if retcode == 0 and content else None
+ if vserver_detail:
+ # compose inventory entry for vserver
+ vserver_link = ""
+ for link in vserver_detail['links']:
+ if link['rel'] == 'self':
+ vserver_link = link['href']
+ break
+ pass
+
+ # note: relationship-list to flavor/image is not be update yet
+ # note: volumes is not updated yet
+ # note: relationship-list to vnf will be handled somewhere else
+ aai_resource = {
+ 'body': {
+ 'vserver-name': vserver_detail['name'],
+ 'vserver-name2': vserver_detail['name'],
+ "vserver-id": vserver_detail['id'],
+ "vserver-selflink": vserver_link,
+ "prov-status": vserver_detail['status']
+ },
+ "uri": aai_cloud_region + "/vservers/vserver/%s" % (vserver_detail['id'])
+ }
+
+ try:
+ # then update the resource
+ retcode, content, status_code = \
+ restcall.req_to_aai(aai_resource['uri'],
+ "PUT", content=aai_resource['body'])
+
+ if retcode == 0 and content:
+ content = json.JSONDecoder().decode(content)
+ self._logger.debug("AAI update %s response: %s" %
+ (aai_resource['uri'], content))
+ except Exception:
+ self._logger.error(traceback.format_exc())
+ pass
+
+ aai_resource_transactions = {"put": [aai_resource]}
+ transactions.append(aai_resource_transactions)
+ # self._logger.debug("aai_resource :%s" % aai_resource_transactions)
+ pass
+
+ for resource in resources:
+ if resource.get('resource_status', None) != "CREATE_COMPLETE":
+ continue
+ if resource.get('resource_type', None) == 'OS::Neutron::Port':
+ # retrieve vport details
+ service_type = "network"
+ resource_uri = "/v2.0/ports/%s" % (resource['physical_resource_id'])
+ self._logger.info("retrieve vport detail, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ self._logger.debug(" resp data:%s" % content)
+
+ vport_detail = content.get('port', None) if retcode == 0 and content else None
+ if vport_detail:
+ # compose inventory entry for vport
+ # note: l3-interface-ipv4-address-list,
+ # l3-interface-ipv6-address-list are not updated yet
+ # note: network-name is not update yet since the detail
+ # coming with network-id
+ aai_resource = {
+ "body": {
+ "interface-name": vport_detail['name'],
+ "interface-id": vport_detail['id'],
+ "macaddr": vport_detail['mac_address']
+ },
+ 'uri':
+ aai_cloud_region + "/vservers/vserver/%s/l-interfaces/l-interface/%s"
+ % (vport_detail['device_id'], vport_detail['name'])
+ }
+ try:
+ # then update the resource
+ retcode, content, status_code = \
+ restcall.req_to_aai(aai_resource['uri'], "PUT",
+ content=aai_resource['body'])
+
+ if retcode == 0 and content:
+ content = json.JSONDecoder().decode(content)
+ self._logger.debug("AAI update %s response: %s" %
+ (aai_resource['uri'], content))
+ except Exception:
+ self._logger.error(traceback.format_exc())
+ pass
+
+ aai_resource_transactions = {"put": [aai_resource]}
+ transactions.append(aai_resource_transactions)
+ # self._logger.debug("aai_resource :%s" % aai_resource_transactions)
+
+ pass
+
+ # aai_transactions = {"transactions": transactions}
+ # self._logger.debug("aai_transactions :%s" % aai_transactions)
+ return (retcode, "AAI_UPDATE_COMPLETE", "succeed")
+
+ def workload_delete(self, vimid, stack_id, otherinfo):
+ '''
+ remove heat resource from AAI for the specified cloud region and tenant
+
+ '''
+
+ # enumerate the resources
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+ if retcode > 0:
+ logger.error("authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+ return None
+
+ tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+ # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
+
+ # common prefix
+ aai_cloud_region = \
+ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
+ % (cloud_owner, regionid, tenant_id)
+
+ # get stack resource
+ service_type = "orchestration"
+ resource_uri = "/stacks/%s/resources" % (stack_id)
+ self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+ resources = content.get('resources', []) \
+ if retcode == 0 and content else []
+
+ vserver_list = [resource['physical_resource_id'] for resource in resources
+ if resource.get('resource_type', None) == 'OS::Nova::Server']
+
+ try:
+ # get list of vservers
+ vserver_list_url = aai_cloud_region + "/vservers?depth=all"
+ retcode, content, status_code = \
+ restcall.req_to_aai(vserver_list_url, "GET")
+ if retcode > 0 or not content:
+ self._logger.debug("AAI get %s response: %s" % (vserver_list_url, content))
+ return (retcode, "AAI_REMOVE_FAIL", "authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+
+ content = json.JSONDecoder().decode(content)
+ vservers = content['vserver']
+ for vserver in vservers:
+ if vserver['vserver-id'] not in vserver_list:
+ continue
+
+ try:
+ # iterate vport, except will be raised if no l-interface exist
+ for vport in vserver['l-interfaces']['l-interface']:
+ # delete vport
+ vport_delete_url = \
+ aai_cloud_region + \
+ "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \
+ % (vserver['vserver-id'], vport['interface-name'],
+ vport['resource-version'])
+
+ restcall.req_to_aai(vport_delete_url, "DELETE")
+ except Exception:
+ pass
+
+ try:
+ # delete vserver
+ vserver_delete_url = \
+ aai_cloud_region + \
+ "/vservers/vserver/%s?resource-version=%s" \
+ % (vserver['vserver-id'], vserver['resource-version'])
+
+ restcall.req_to_aai(vserver_delete_url, "DELETE")
+ except Exception:
+ continue
+
+ return (retcode, "AAI_REMOVE_COMPLETE", "succeed")
+ except Exception:
+ self._logger.error(traceback.format_exc())
+ return None
+ pass
diff --git a/starlingx/starlingx/registration/tests/test_registration.py b/starlingx/starlingx/registration/tests/test_registration.py
index 99f5ffc5..d4af22b6 100644
--- a/starlingx/starlingx/registration/tests/test_registration.py
+++ b/starlingx/starlingx/registration/tests/test_registration.py
@@ -271,7 +271,7 @@ class TestRegistration(test_base.TestRequest):
"registry"), "{}", content_type="application/json",
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
- self.assertEquals(status.HTTP_500_INTERNAL_SERVER_ERROR,
+ self.assertEquals(status.HTTP_204_NO_CONTENT,
response.status_code)
@mock.patch.object(VimDriverUtils, 'get_session')
diff --git a/starlingx/starlingx/registration/tests/test_registration2.py b/starlingx/starlingx/registration/tests/test_registration2.py
index 6f470eea..d2084ecf 100644
--- a/starlingx/starlingx/registration/tests/test_registration2.py
+++ b/starlingx/starlingx/registration/tests/test_registration2.py
@@ -86,12 +86,12 @@ class TestRegistration2(unittest.TestCase):
["get"], {"get": {
"content": MOCK_GET_FLAVOR_RESPONSE}}),
- resp = self.view.register_helper._discover_flavors(
+ retcode, content = self.view.register_helper._discover_flavors(
vimid="starlingx_RegionOne",
session=mock_session, viminfo=MOCK_VIM_INFO
)
- self.assertIsNone(resp)
+ self.assertEquals(retcode, 11)
def test_discover_flavors_w_hpa_numa(self):
restcall.req_to_aai = mock.Mock()
@@ -103,9 +103,9 @@ class TestRegistration2(unittest.TestCase):
"content": MOCK_GET_FLAVOR_EXTRASPECS_RESPONSE_w_hpa_numa}
]}),
- resp = self.view.register_helper._discover_flavors(
+ retcode, content = self.view.register_helper._discover_flavors(
vimid="starlingx_RegionOne",
session=mock_session, viminfo=MOCK_VIM_INFO
)
- self.assertIsNone(resp)
+ self.assertEquals(retcode, 11)
diff --git a/starlingx/starlingx/urls.py b/starlingx/starlingx/urls.py
index b64946ce..d08a2068 100644
--- a/starlingx/starlingx/urls.py
+++ b/starlingx/starlingx/urls.py
@@ -31,30 +31,45 @@ urlpatterns = [
tenants.Tenants.as_view()),
url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
'(?P<tenantid>[0-9a-zA-Z_-]{20,})/', include('starlingx.requests.urls')),
- url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/capacity_check/?$',
+ url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
+ r'capacity_check/?$',
capacity.CapacityCheck.as_view()),
- url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/infra_workload/?$',
+ url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
+ r'infra_workload/?$',
infra_workload.InfraWorkload.as_view()),
- url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/infra_workload/(?P<requri>[0-9a-zA-Z_-]*)/?$',
+ url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
+ r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
infra_workload.InfraWorkload.as_view()),
url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/',
include('starlingx.proxy.urls')),
- # API v1, depreciated due to MULTICLOUD-335
- url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/registry/?$',
+ # API v1
+ url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/registry/?$',
registration.APIv1Registry.as_view()),
- url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/?$',
+ url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/?$',
registration.APIv1Registry.as_view()),
- url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
+ url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
include('starlingx.proxy.urlsV1')),
- url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/tenants/?$',
+ url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/tenants/?$',
tenants.APIv1Tenants.as_view()),
- url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/'
+ url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/'
'(?P<tenantid>[0-9a-zA-Z_-]{20,})/', include('starlingx.requests.urlsV1')),
- url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/capacity_check/?$',
+ url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/capacity_check/?$',
capacity.APIv1CapacityCheck.as_view()),
- url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/?$',
+ url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/?$',
+ infra_workload.APIv1InfraWorkload.as_view()),
+ url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/'
+ r'(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
infra_workload.APIv1InfraWorkload.as_view()),
- url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
+ url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
include('starlingx.proxy.urlsV1')),
]
diff --git a/starlingx/tox.ini b/starlingx/tox.ini
index 09d3a50f..78dc9675 100644
--- a/starlingx/tox.ini
+++ b/starlingx/tox.ini
@@ -32,12 +32,12 @@ deps =
-r{toxinidir}/test-requirements.txt
commands =
coverage run --branch manage.py test starlingx
- coverage report --omit="./venv-tox/*,./.tox/*,*tests*,*__init__.py,*newton_base*,*common*" --fail-under=30
+ coverage report --omit="./venv-tox/*,./.tox/*,*tests*,*__init__.py,*newton_base*,*starlingx_base*,*common*" --fail-under=30
[testenv:pep8]
deps=flake8
commands=flake8
[testenv:cov]
-commands = coverage xml --omit="./venv-tox/*,./.tox/*,*tests*,*__init__.py,*newton_base*,*common*, *site-packages*"
+commands = coverage xml --omit="./venv-tox/*,./.tox/*,*tests*,*__init__.py,*newton_base*,*starlingx_base*,*common*, *site-packages*"
diff --git a/windriver/titanium_cloud/registration/tests/test_registration.py b/windriver/titanium_cloud/registration/tests/test_registration.py
index 749fba62..49df5b23 100644
--- a/windriver/titanium_cloud/registration/tests/test_registration.py
+++ b/windriver/titanium_cloud/registration/tests/test_registration.py
@@ -271,7 +271,7 @@ class TestRegistration(test_base.TestRequest):
"registry"), "{}", content_type="application/json",
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
- self.assertEquals(status.HTTP_500_INTERNAL_SERVER_ERROR,
+ self.assertEquals(status.HTTP_204_NO_CONTENT,
response.status_code)
@mock.patch.object(VimDriverUtils, 'get_session')
diff --git a/windriver/titanium_cloud/registration/tests/test_registration2.py b/windriver/titanium_cloud/registration/tests/test_registration2.py
index 495b2126..e00c6adc 100644
--- a/windriver/titanium_cloud/registration/tests/test_registration2.py
+++ b/windriver/titanium_cloud/registration/tests/test_registration2.py
@@ -87,10 +87,10 @@ class TestRegistration2(unittest.TestCase):
["get"], {"get": {
"content": MOCK_GET_FLAVOR_RESPONSE}}),
- resp = self.view.register_helper._discover_flavors(vimid="windriver-hudson-dc_RegionOne",
+ retcode, content = self.view.register_helper._discover_flavors(vimid="windriver-hudson-dc_RegionOne",
session=mock_session, viminfo=MOCK_VIM_INFO)
- self.assertIsNone(resp)
+ self.assertEquals(retcode, 11)
def test_discover_flavors_w_hpa_numa(self):
restcall.req_to_aai = mock.Mock()
@@ -102,7 +102,7 @@ class TestRegistration2(unittest.TestCase):
"content": MOCK_GET_FLAVOR_EXTRASPECS_RESPONSE_w_hpa_numa}
]}),
- resp = self.view.register_helper._discover_flavors(vimid="windriver-hudson-dc_RegionOne",
+ retcode, content = self.view.register_helper._discover_flavors(vimid="windriver-hudson-dc_RegionOne",
session=mock_session, viminfo=MOCK_VIM_INFO)
- self.assertIsNone(resp)
+ self.assertEquals(retcode, 11)
diff --git a/windriver/titanium_cloud/urls.py b/windriver/titanium_cloud/urls.py
index f910bcc8..8b73662a 100644
--- a/windriver/titanium_cloud/urls.py
+++ b/windriver/titanium_cloud/urls.py
@@ -24,7 +24,8 @@ urlpatterns = [
url(r'^', include('titanium_cloud.samples.urls')),
# API v0, depreciated due to MULTICLOUD-335
- url(r'^api/multicloud-titanium_cloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/registry/?$',
+ url(r'^api/multicloud-titanium_cloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
+ r'registry/?$',
registration.Registry.as_view()),
url(r'^api/multicloud-titanium_cloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/?$',
registration.Registry.as_view()),
@@ -32,72 +33,98 @@ urlpatterns = [
include('titanium_cloud.extensions.urls')),
url(r'^api/multicloud-titanium_cloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/',
include('titanium_cloud.proxy.urls')),
- url(r'^api/multicloud-titanium_cloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/tenants/?$',
+ url(r'^api/multicloud-titanium_cloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
+ r'tenants/?$',
tenants.Tenants.as_view()),
url(r'^api/multicloud-titanium_cloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
- '(?P<tenantid>[0-9a-zA-Z_-]{20,})/', include('titanium_cloud.requests.urls')),
+ '(?P<tenantid>[0-9a-zA-Z_-]{20,})/',
+ include('titanium_cloud.requests.urls')),
# CapacityCheck
- url(r'^api/multicloud-titanium_cloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/capacity_check/?$',
+ url(r'^api/multicloud-titanium_cloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
+ r'capacity_check/?$',
capacity.CapacityCheck.as_view()),
# API v1, depreciated due to MULTICLOUD-335
- url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/registry/?$',
+ url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/registry/?$',
registration.APIv1Registry.as_view()),
- url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/?$',
+ url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/?$',
registration.APIv1Registry.as_view()),
- url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/exten',
+ url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/exten',
include('titanium_cloud.extensions.urlsV1')),
- url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
+ url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
include('titanium_cloud.proxy.urlsV1')),
- url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/tenants/?$',
+ url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/tenants/?$',
tenants.APIv1Tenants.as_view()),
- url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/'
+ url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/'
'(?P<tenantid>[0-9a-zA-Z_-]{20,})/', include('titanium_cloud.requests.urlsV1')),
# CapacityCheck
- url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/capacity_check/?$',
+ url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/capacity_check/?$',
capacity.APIv1CapacityCheck.as_view()),
- url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/?$',
+ url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/?$',
infra_workload.APIv1InfraWorkload.as_view()),
# API v0, new namespace due to MULTICLOUD-335
- url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/registry/?$',
+ url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
+ r'registry/?$',
registration.APIv0Registry.as_view()),
url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/?$',
registration.APIv0Registry.as_view()),
url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/exten',
include('titanium_cloud.extensions.urls')),
- url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/infra_workload/?$',
+ url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
+ r'infra_workload/?$',
infra_workload.InfraWorkload.as_view()),
- url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/infra_workload/(?P<requri>[0-9a-zA-Z_-]*)/?$',
+ url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
+ r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
infra_workload.InfraWorkload.as_view()),
url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/',
include('titanium_cloud.proxy.urls')),
- url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/tenants/?$',
+ url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
+ r'tenants/?$',
tenants.Tenants.as_view()),
url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
'(?P<tenantid>[0-9a-zA-Z_-]{20,})/', include('titanium_cloud.requests.urls')),
# CapacityCheck
- url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/capacity_check/?$',
+ url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
+ r'capacity_check/?$',
capacity.CapacityCheck.as_view()),
# API v1, new namespace due to MULTICLOUD-335
- url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/registry/?$',
+ url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/registry/?$',
registration.APIv1Registry.as_view()),
- url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/?$',
+ url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/?$',
registration.APIv1Registry.as_view()),
- url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/exten',
+ url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/exten',
include('titanium_cloud.extensions.urlsV1')),
- url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/tenants/?$',
+ url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/tenants/?$',
tenants.APIv1Tenants.as_view()),
- url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/'
+ url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/'
'(?P<tenantid>[0-9a-zA-Z_-]{20,})/', include('titanium_cloud.requests.urlsV1')),
# CapacityCheck
- url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/capacity_check/?$',
+ url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/capacity_check/?$',
capacity.APIv1CapacityCheck.as_view()),
- url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/?$',
+ url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/?$',
infra_workload.APIv1InfraWorkload.as_view()),
- url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/(?P<requri>[0-9a-zA-Z_-]*)/?$',
+ url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/'
+ r'(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
infra_workload.APIv1InfraWorkload.as_view()),
- url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
+ url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
+ r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
include('titanium_cloud.proxy.urlsV1')),
]
diff --git a/windriver/tox.ini b/windriver/tox.ini
index e5a65d39..680db516 100644
--- a/windriver/tox.ini
+++ b/windriver/tox.ini
@@ -25,7 +25,7 @@ deps=flake8
commands=flake8
[testenv:cov]
-commands = coverage xml --omit="./venv-tox/*,./.tox/*,*tests*,*__init__.py,*newton_base*,*common*, *site-packages*"
+commands = coverage xml --omit="./venv-tox/*,./.tox/*,*tests*,*__init__.py,*newton_base*,*common*,*starlingx_base* *site-packages*"
[testenv:pylint]
whitelist_externals = bash