summaryrefslogtreecommitdiffstats
path: root/windriver/titanium_cloud
diff options
context:
space:
mode:
authorXiaohua Zhang <xiaohua.zhang@windriver.com>2018-11-27 10:30:56 +0000
committerXiaohua Zhang <xiaohua.zhang@windriver.com>2018-11-27 10:30:56 +0000
commit78ead3c00e16cf9e4fd24535a925d3766c6f6aa8 (patch)
tree3486a41b4717e75f07f1ce714e4a945f205c8907 /windriver/titanium_cloud
parent0d045c0bb7ee6e6162d2c1fc3e8031a3b067f49f (diff)
Fix pep8 errors in windriver plugin
Fix errors in resource module Change-Id: I59d43b3c62928bdacffc7fcd6f4c1c8055412e59 Issue-ID: MULTICLOUD-420 Signed-off-by: Xiaohua Zhang <xiaohua.zhang@windriver.com>
Diffstat (limited to 'windriver/titanium_cloud')
-rw-r--r--windriver/titanium_cloud/requests/__init__.py1
-rw-r--r--windriver/titanium_cloud/requests/views/__init__.py1
-rw-r--r--windriver/titanium_cloud/resource/__init__.py1
-rw-r--r--windriver/titanium_cloud/resource/tests/__init__.py1
-rw-r--r--windriver/titanium_cloud/resource/tests/test_capacity.py82
-rw-r--r--windriver/titanium_cloud/resource/tests/tests_infra_workload.py287
-rw-r--r--windriver/titanium_cloud/resource/views/__init__.py1
-rw-r--r--windriver/titanium_cloud/resource/views/capacity.py35
-rw-r--r--windriver/titanium_cloud/resource/views/infra_workload.py274
9 files changed, 374 insertions, 309 deletions
diff --git a/windriver/titanium_cloud/requests/__init__.py b/windriver/titanium_cloud/requests/__init__.py
index afa702d3..ae1ce9db 100644
--- a/windriver/titanium_cloud/requests/__init__.py
+++ b/windriver/titanium_cloud/requests/__init__.py
@@ -11,4 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
diff --git a/windriver/titanium_cloud/requests/views/__init__.py b/windriver/titanium_cloud/requests/views/__init__.py
index afa702d3..ae1ce9db 100644
--- a/windriver/titanium_cloud/requests/views/__init__.py
+++ b/windriver/titanium_cloud/requests/views/__init__.py
@@ -11,4 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
diff --git a/windriver/titanium_cloud/resource/__init__.py b/windriver/titanium_cloud/resource/__init__.py
index afa702d3..ae1ce9db 100644
--- a/windriver/titanium_cloud/resource/__init__.py
+++ b/windriver/titanium_cloud/resource/__init__.py
@@ -11,4 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
diff --git a/windriver/titanium_cloud/resource/tests/__init__.py b/windriver/titanium_cloud/resource/tests/__init__.py
index afa702d3..ae1ce9db 100644
--- a/windriver/titanium_cloud/resource/tests/__init__.py
+++ b/windriver/titanium_cloud/resource/tests/__init__.py
@@ -11,4 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
diff --git a/windriver/titanium_cloud/resource/tests/test_capacity.py b/windriver/titanium_cloud/resource/tests/test_capacity.py
index 51785308..baca720f 100644
--- a/windriver/titanium_cloud/resource/tests/test_capacity.py
+++ b/windriver/titanium_cloud/resource/tests/test_capacity.py
@@ -17,30 +17,30 @@ import json
from rest_framework import status
-from common.utils import restcall
+# from common.utils import restcall
from newton_base.tests import mock_info
from newton_base.tests import test_base
from newton_base.util import VimDriverUtils
MOCK_GET_TENANT_LIMIT_RESPONSE = {
- "limits" : {
- "rate" : [],
- "absolute" : {
- "maxTotalRAMSize" : 128*1024,
- "totalRAMUsed" : 8*1024,
- "totalCoresUsed" : 4,
- "maxTotalCores" : 20,
- }
- }
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxTotalRAMSize": 128 * 1024,
+ "totalRAMUsed": 8 * 1024,
+ "totalCoresUsed": 4,
+ "maxTotalCores": 20,
+ }
+ }
}
MOCK_GET_HYPER_STATATICS_RESPONSE = {
- "hypervisor_statistics" : {
- "vcpus_used" : 4,
- "free_ram_mb" : 120*1024,
- "vcpus" : 10,
- "free_disk_gb" : 300
- }
+ "hypervisor_statistics": {
+ "vcpus_used": 4,
+ "free_ram_mb": 120 * 1024,
+ "vcpus": 10,
+ "free_disk_gb": 300
+ }
}
MOCK_GET_STORAGE_RESPONSE_OOS = {
@@ -66,8 +66,8 @@ MOCK_GET_TENANT_LIMIT_RESPONSE_OUTOFRAM = {
"limits": {
"rate": [],
"absolute": {
- "maxTotalRAMSize": 128 * 1024,
- "totalRAMUsed": 1 * 1024,
+ "maxTotalRAMSize": 12 * 1024,
+ "totalRAMUsed": 10 * 1024,
"totalCoresUsed": 4,
"maxTotalCores": 20,
}
@@ -75,31 +75,31 @@ MOCK_GET_TENANT_LIMIT_RESPONSE_OUTOFRAM = {
}
MOCK_GET_HYPER_STATATICS_RESPONSE_OUTOFRAM = {
- "hypervisor_statistics" : {
- "vcpus_used" : 4,
- "free_ram_mb" : 1*1024,
- "vcpus" : 10,
- "free_disk_gb" : 300
- }
+ "hypervisor_statistics": {
+ "vcpus_used": 4,
+ "free_ram_mb": 1 * 1024,
+ "vcpus": 10,
+ "free_disk_gb": 300
+ }
}
MOCK_GET_HYPER_STATATICS_RESPONSE_OUTOFSTORAGE = {
- "hypervisor_statistics" : {
- "vcpus_used" : 4,
- "free_ram_mb" : 120*1024,
- "vcpus" : 10,
- "free_disk_gb" : 3
- }
+ "hypervisor_statistics": {
+ "vcpus_used": 4,
+ "free_ram_mb": 120 * 1024,
+ "vcpus": 10,
+ "free_disk_gb": 3
+ }
}
MOCK_GET_STORAGE_RESPONSE = {
- "limits" : {
- "rate" : [],
- "absolute" : {
- "totalGigabytesUsed" : 200,
- "maxTotalVolumeGigabytes" : 500,
- }
- }
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "totalGigabytesUsed": 200,
+ "maxTotalVolumeGigabytes": 500,
+ }
+ }
}
TEST_REQ_SUCCESS_SOURCE = {
@@ -108,13 +108,13 @@ TEST_REQ_SUCCESS_SOURCE = {
"Storage": "200"
}
-
TEST_REQ_FAILED_SOURCE = {
"vCPU": "17",
"Memory": "4096",
"Storage": "200"
}
+
class TestCapacity(test_base.TestRequest):
def setUp(self):
super(TestCapacity, self).setUp()
@@ -163,7 +163,7 @@ class TestCapacity(test_base.TestRequest):
"/api/multicloud-titaniumcloud/v0/windriver-hudson-dc_RegionOne/capacity_check",
TEST_REQ_FAILED_SOURCE,
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
-
+
self.assertEquals(status.HTTP_200_OK, response.status_code)
self.assertEqual({"result": False}, response.data)
@@ -252,6 +252,9 @@ class TestCapacity(test_base.TestRequest):
content_type='application/json',
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
+ self.assertEquals(status.HTTP_200_OK, response.status_code)
+ self.assertEqual({"result": False}, response.data)
+
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
def test_capacity_check_volume_limits_outofstorage(self, mock_get_vim_info, mock_get_session):
@@ -273,4 +276,3 @@ class TestCapacity(test_base.TestRequest):
self.assertEquals(status.HTTP_200_OK, response.status_code)
self.assertEqual({"result": False}, response.data)
-
diff --git a/windriver/titanium_cloud/resource/tests/tests_infra_workload.py b/windriver/titanium_cloud/resource/tests/tests_infra_workload.py
index 1ace4f99..c6991837 100644
--- a/windriver/titanium_cloud/resource/tests/tests_infra_workload.py
+++ b/windriver/titanium_cloud/resource/tests/tests_infra_workload.py
@@ -15,91 +15,93 @@
import mock
import unittest
-import json
+# import json
from rest_framework import status
-from common.utils import restcall
+# from common.utils import restcall
from common.msapi.helper import Helper as helper
from titanium_cloud.resource.views.infra_workload import InfraWorkload
from titanium_cloud.resource.views.infra_workload import APIv1InfraWorkload
-MOCK_TOKEN_RESPONSE = {"access":
- {"token":
- {"issued_at":"2018-05-10T16:56:56.000000Z",
- "expires":"2018-05-10T17:56:56.000000Z",
- "id":"4a832860dd744306b3f66452933f939e",
- "tenant":{"domain":{"id":"default","name":"Default"},
- "enabled":"true","id":"0e148b76ee8c42f78d37013bf6b7b1ae",
- "name":"VIM"}},"serviceCatalog":[],
- "user":{"domain":{"id":"default","name":"Default"},
- "id":"ba76c94eb5e94bb7bec6980e5507aae2",
- "name":"demo"}}}
-
-MOCK_HEAT_CREATE_BODY1 = {
- "generic-vnf-id":"MOCK_GENERIF_VNF_ID1",
- "vf-module-id":"MOCK_VF_MODULE_ID1",
- "oof_directives":{
- "directives":[
- {
- "id":"MOCK_VNFC_ID1",
- "type": "vnfc",
- "directives":[{
- "type":"flavor_directives",
- "attributes":[
- {
- "attribute_name":"flavor1",
- "attribute_value":"m1.hpa.medium"
- }
- ]
- },
- {
- "type":"sriovNetNetwork_directives",
- "attributes":[
- {
- "attribute_name":"physnetwork_label",
- "attribute_value":"physnet1"
- }
- ]
- }
- ]
- }
- ]
- },
- "sdnc_directives":{},
- "template_type":"HEAT",
- "template_data":{
- "files":{ },
- "disable_rollback":True,
- "parameters":{
- "flavor1":"m1.heat"
- },
- "stack_name":"teststack",
- "template":{
- "heat_template_version":"2013-05-23",
- "description":"Simple template to test heat commands",
- "parameters":
- {
- "flavor":{
- "default":"m1.tiny",
- "type":"string"
- }
- },
- "resources":{
- "hello_world":{
- "type":"OS::Nova::Server",
- "properties":{
- "key_name":"heat_key",
- "flavor":{
- "get_param":"flavor"
- },
- "image":"40be8d1a-3eb9-40de-8abd-43237517384f",
- "user_data":"#!/bin/bash -xv\necho \"hello world\" &gt; /root/hello-world.txt\n"
+MOCK_TOKEN_RESPONSE = {
+ "access":
+ {"token": {"issued_at": "2018-05-10T16:56:56.000000Z",
+ "expires": "2018-05-10T17:56:56.000000Z",
+ "id": "4a832860dd744306b3f66452933f939e",
+ "tenant": {"domain": {"id": "default",
+ "name": "Default"},
+ "enabled": "true",
+ "id": "0e148b76ee8c42f78d37013bf6b7b1ae",
+ "name": "VIM"}},
+ "serviceCatalog": [],
+ "user": {"domain": {"id": "default",
+ "name": "Default"},
+ "id": "ba76c94eb5e94bb7bec6980e5507aae2",
+ "name": "demo"}
+ }
+}
+
+MOCK_HEAT_CREATE_BODY1 = {
+ "generic-vnf-id": "MOCK_GENERIF_VNF_ID1",
+ "vf-module-id": "MOCK_VF_MODULE_ID1",
+ "oof_directives": {
+ "directives": [
+ {
+ "id": "MOCK_VNFC_ID1",
+ "type": "vnfc",
+ "directives": [{
+ "type": "flavor_directives",
+ "attributes": [
+ {
+ "attribute_name": "flavor1",
+ "attribute_value": "m1.hpa.medium"
+ }
+ ]
+ },
+ {"type": "sriovNetNetwork_directives",
+ "attributes": [{
+ "attribute_name": "physnetwork_label",
+ "attribute_value": "physnet1"
+ }]
}
- }
- }
- },
- "timeout_mins":60
- }
+ ]
+ }
+ ]
+ },
+ "sdnc_directives": {},
+ "template_type": "HEAT",
+ "template_data": {
+ "files": {},
+ "disable_rollback": True,
+ "parameters": {
+ "flavor1": "m1.heat"
+ },
+ "stack_name": "teststack",
+ "template": {
+ "heat_template_version": "2013-05-23",
+ "description": "Simple template to test heat commands",
+ "parameters": {
+ "flavor": {
+ "default": "m1.tiny",
+ "type": "string"
+ }
+ },
+ "resources": {
+ "hello_world": {
+ "type": "OS::Nova::Server",
+ "properties": {
+ "key_name": "heat_key",
+ "flavor": {
+ "get_param": "flavor"
+ },
+ "image": "40be8d1a-3eb9-40de-8abd-43237517384f",
+ "user_data": "#!/bin/bash -xv\necho \"hello world\" &gt; /root/hello-world.txt\n"
+ }
+ }
+ }
+ },
+ "timeout_mins": 60
+ }
}
MOCK_HEAT_CREATE_RESPONSE1 = {
@@ -111,49 +113,47 @@ MOCK_HEAT_CREATE_RESPONSE1 = {
MOCK_HEAT_LIST_RESPONSE1 = {
'stacks': [
{
- 'stack_status':"CREATE_IN_PROCESS"
+ 'stack_status': "CREATE_IN_PROCESS"
}
]
}
-
-MOCK_HEAT_CREATE_BODY2 = {
- "generic-vnf-id":"MOCK_GENERIF_VNF_ID1",
- "vf-module-id":"MOCK_VF_MODULE_ID1",
- "template_type":"HEAT",
- "template_data":{
- "files":{ },
- "disable_rollback":True,
- "parameters":{
- "flavor1":"m1.heat"
- },
- "stack_name":"teststack",
- "template":{
- "heat_template_version":"2013-05-23",
- "description":"Simple template to test heat commands",
- "parameters":
- {
- "flavor":{
- "default":"m1.tiny",
- "type":"string"
- }
- },
- "resources":{
- "hello_world":{
- "type":"OS::Nova::Server",
- "properties":{
- "key_name":"heat_key",
- "flavor":{
- "get_param":"flavor"
- },
- "image":"40be8d1a-3eb9-40de-8abd-43237517384f",
- "user_data":"#!/bin/bash -xv\necho \"hello world\" &gt; /root/hello-world.txt\n"
- }
- }
- }
- },
- "timeout_mins":60
- }
+MOCK_HEAT_CREATE_BODY2 = {
+ "generic-vnf-id": "MOCK_GENERIF_VNF_ID1",
+ "vf-module-id": "MOCK_VF_MODULE_ID1",
+ "template_type": "HEAT",
+ "template_data": {
+ "files": {},
+ "disable_rollback": True,
+ "parameters": {
+ "flavor1": "m1.heat"
+ },
+ "stack_name": "teststack",
+ "template": {
+ "heat_template_version": "2013-05-23",
+ "description": "Simple template to test heat commands",
+ "parameters": {
+ "flavor": {
+ "default": "m1.tiny",
+ "type": "string"
+ }
+ },
+ "resources": {
+ "hello_world": {
+ "type": "OS::Nova::Server",
+ "properties": {
+ "key_name": "heat_key",
+ "flavor": {
+ "get_param": "flavor"
+ },
+ "image": "40be8d1a-3eb9-40de-8abd-43237517384f",
+ "user_data": "#!/bin/bash -xv\necho \"hello world\" &gt; /root/hello-world.txt\n"
+ }
+ }
+ }
+ },
+ "timeout_mins": 60
+ }
}
@@ -167,18 +167,18 @@ class InfraWorkloadTest(unittest.TestCase):
@mock.patch.object(helper, 'MultiCloudServiceHelper')
@mock.patch.object(helper, 'MultiCloudIdentityHelper')
- def test_post(self, mock_MultiCloudIdentityHelper, mock_MultiCloudServiceHelper):
+ def test_post(self, mock_MultiCloudIdentityHelper, mock_MultiCloudServiceHelper):
mock_request = mock.Mock()
mock_request.META = {"testkey": "testvalue"}
mock_request.data = MOCK_HEAT_CREATE_BODY1
- mock_MultiCloudIdentityHelper.side_effect= [
+ mock_MultiCloudIdentityHelper.side_effect = [
(0, MOCK_TOKEN_RESPONSE, status.HTTP_201_CREATED)
- ]
+ ]
- mock_MultiCloudServiceHelper.side_effect= [
+ mock_MultiCloudServiceHelper.side_effect = [
(0, MOCK_HEAT_CREATE_RESPONSE1, status.HTTP_201_CREATED)
- ]
+ ]
vimid = "CloudOwner_Region1"
@@ -186,21 +186,20 @@ class InfraWorkloadTest(unittest.TestCase):
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
pass
-
@mock.patch.object(helper, 'MultiCloudServiceHelper')
@mock.patch.object(helper, 'MultiCloudIdentityHelper')
- def test_post_wo_oof_directive(self, mock_MultiCloudIdentityHelper, mock_MultiCloudServiceHelper):
+ def test_post_wo_oof_directive(self, mock_MultiCloudIdentityHelper, mock_MultiCloudServiceHelper):
mock_request = mock.Mock()
mock_request.META = {"testkey": "testvalue"}
mock_request.data = MOCK_HEAT_CREATE_BODY2
- mock_MultiCloudIdentityHelper.side_effect= [
+ mock_MultiCloudIdentityHelper.side_effect = [
(0, MOCK_TOKEN_RESPONSE, status.HTTP_201_CREATED)
- ]
+ ]
- mock_MultiCloudServiceHelper.side_effect= [
+ mock_MultiCloudServiceHelper.side_effect = [
(0, MOCK_HEAT_CREATE_RESPONSE1, status.HTTP_201_CREATED)
- ]
+ ]
vimid = "CloudOwner_Region1"
@@ -210,17 +209,17 @@ class InfraWorkloadTest(unittest.TestCase):
@mock.patch.object(helper, 'MultiCloudServiceHelper')
@mock.patch.object(helper, 'MultiCloudIdentityHelper')
- def test_get(self, mock_MultiCloudIdentityHelper, mock_MultiCloudServiceHelper):
+ def test_get(self, mock_MultiCloudIdentityHelper, mock_MultiCloudServiceHelper):
mock_request = mock.Mock()
mock_request.META = {"testkey": "testvalue"}
- mock_MultiCloudIdentityHelper.side_effect= [
+ mock_MultiCloudIdentityHelper.side_effect = [
(0, MOCK_TOKEN_RESPONSE, status.HTTP_201_CREATED)
- ]
+ ]
- mock_MultiCloudServiceHelper.side_effect= [
+ mock_MultiCloudServiceHelper.side_effect = [
(0, MOCK_HEAT_LIST_RESPONSE1, status.HTTP_200_OK)
- ]
+ ]
vimid = "CloudOwner_Region1"
mock_stack_id = "MOCKED_HEAT_STACK_ID1"
@@ -240,18 +239,18 @@ class APIv1InfraWorkloadTest(unittest.TestCase):
@mock.patch.object(helper, 'MultiCloudServiceHelper')
@mock.patch.object(helper, 'MultiCloudIdentityHelper')
- def test_post(self, mock_MultiCloudIdentityHelper, mock_MultiCloudServiceHelper):
+ def test_post(self, mock_MultiCloudIdentityHelper, mock_MultiCloudServiceHelper):
mock_request = mock.Mock()
mock_request.META = {"testkey": "testvalue"}
mock_request.data = MOCK_HEAT_CREATE_BODY1
- mock_MultiCloudIdentityHelper.side_effect= [
+ mock_MultiCloudIdentityHelper.side_effect = [
(0, MOCK_TOKEN_RESPONSE, status.HTTP_201_CREATED)
- ]
+ ]
- mock_MultiCloudServiceHelper.side_effect= [
+ mock_MultiCloudServiceHelper.side_effect = [
(0, MOCK_HEAT_CREATE_RESPONSE1, status.HTTP_201_CREATED)
- ]
+ ]
cloud_owner = "CloudOwner"
cloud_region_id = "Region1"
@@ -260,21 +259,19 @@ class APIv1InfraWorkloadTest(unittest.TestCase):
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
pass
-
@mock.patch.object(helper, 'MultiCloudServiceHelper')
@mock.patch.object(helper, 'MultiCloudIdentityHelper')
- def test_get(self, mock_MultiCloudIdentityHelper, mock_MultiCloudServiceHelper):
+ def test_get(self, mock_MultiCloudIdentityHelper, mock_MultiCloudServiceHelper):
mock_request = mock.Mock()
mock_request.META = {"testkey": "testvalue"}
- mock_MultiCloudIdentityHelper.side_effect= [
+ mock_MultiCloudIdentityHelper.side_effect = [
(0, MOCK_TOKEN_RESPONSE, status.HTTP_201_CREATED)
- ]
+ ]
- mock_MultiCloudServiceHelper.side_effect= [
+ mock_MultiCloudServiceHelper.side_effect = [
(0, MOCK_HEAT_LIST_RESPONSE1, status.HTTP_200_OK)
- ]
-
+ ]
cloud_owner = "CloudOwner"
cloud_region_id = "Region1"
diff --git a/windriver/titanium_cloud/resource/views/__init__.py b/windriver/titanium_cloud/resource/views/__init__.py
index afa702d3..ae1ce9db 100644
--- a/windriver/titanium_cloud/resource/views/__init__.py
+++ b/windriver/titanium_cloud/resource/views/__init__.py
@@ -11,4 +11,3 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
diff --git a/windriver/titanium_cloud/resource/views/capacity.py b/windriver/titanium_cloud/resource/views/capacity.py
index 25b29da8..669cb659 100644
--- a/windriver/titanium_cloud/resource/views/capacity.py
+++ b/windriver/titanium_cloud/resource/views/capacity.py
@@ -13,12 +13,12 @@
# limitations under the License.
import logging
-import json
+# import json
import traceback
-from rest_framework import status
+# from rest_framework import status
-from django.conf import settings
+# from django.conf import settings
from common.exceptions import VimDriverNewtonException
from newton_base.util import VimDriverUtils
@@ -28,12 +28,10 @@ from rest_framework.response import Response
from rest_framework.views import APIView
from common.msapi import extsys
-
logger = logging.getLogger(__name__)
class CapacityCheck(APIView):
-
def __init__(self):
self._logger = logger
@@ -42,24 +40,24 @@ class CapacityCheck(APIView):
self._logger.debug("META> %s" % request.META)
hasEnoughResource = False
- try :
+ try:
resource_demand = request.data
tenant_name = None
vim = VimDriverUtils.get_vim_info(vimid)
sess = VimDriverUtils.get_session(vim, tenant_name)
- #get token:
+ # get token:
cloud_owner, regionid = extsys.decode_vim_id(vimid)
interface = 'public'
service = {'service_type': 'compute',
'interface': interface,
'region_name': vim['openstack_region_id']
- if vim.get('openstack_region_id')
- else vim['cloud_region_id']
+ if vim.get('openstack_region_id')
+ else vim['cloud_region_id']
}
- #get limit for this tenant
+ # get limit for this tenant
req_resouce = "/limits"
self._logger.info("check limits> URI:%s" % req_resouce)
resp = sess.get(req_resouce, endpoint_filter=service)
@@ -68,7 +66,7 @@ class CapacityCheck(APIView):
compute_limits = content['limits']['absolute']
self._logger.debug("check limits> resp data:%s" % content)
- #get total resource of this cloud region
+ # get total resource of this cloud region
try:
req_resouce = "/os-hypervisors/statistics"
self._logger.info("check os-hypervisors statistics> URI:%s" % req_resouce)
@@ -85,12 +83,15 @@ class CapacityCheck(APIView):
conFreeRamMB = int(resource_demand['Memory'])
conFreeDiskGB = int(resource_demand['Storage'])
self._logger.info("Non administator forbidden to access hypervisor statistics data")
- hypervisor_statistics = {'vcpus_used':0, 'vcpus':conVCPUS, 'free_ram_mb':conFreeRamMB, 'free_disk_gb':conFreeDiskGB}
+ hypervisor_statistics = {'vcpus_used': 0,
+ 'vcpus': conVCPUS,
+ 'free_ram_mb': conFreeRamMB,
+ 'free_disk_gb': conFreeDiskGB}
else:
# non forbiden exeption will be redirected
raise e
- #get storage limit for this tenant
+ # get storage limit for this tenant
service['service_type'] = 'volumev2'
req_resouce = "/limits"
self._logger.info("check volumev2 limits> URI:%s" % req_resouce)
@@ -131,8 +132,9 @@ class CapacityCheck(APIView):
return Response(data={'result': hasEnoughResource}, status=status.HTTP_200_OK)
except VimDriverNewtonException as e:
self._logger.error("Plugin exception> status:%s,error:%s"
- % (e.status_code, e.content))
- return Response(data={'result': hasEnoughResource,'error': e.content}, status=e.status_code)
+ % (e.status_code, e.content))
+ return Response(data={'result': hasEnoughResource,
+ 'error': e.content}, status=e.status_code)
except HttpError as e:
self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
resp = e.response.json()
@@ -144,9 +146,7 @@ class CapacityCheck(APIView):
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
-
class APIv1CapacityCheck(CapacityCheck):
-
def __init__(self):
super(APIv1CapacityCheck, self).__init__()
# self._logger = logger
@@ -157,4 +157,3 @@ class APIv1CapacityCheck(CapacityCheck):
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1CapacityCheck, self).post(request, vimid)
-
diff --git a/windriver/titanium_cloud/resource/views/infra_workload.py b/windriver/titanium_cloud/resource/views/infra_workload.py
index ec1e5744..681a7f91 100644
--- a/windriver/titanium_cloud/resource/views/infra_workload.py
+++ b/windriver/titanium_cloud/resource/views/infra_workload.py
@@ -16,11 +16,9 @@ import logging
import json
import traceback
-from rest_framework import status
-
from django.conf import settings
from common.exceptions import VimDriverNewtonException
-from newton_base.util import VimDriverUtils
+# from newton_base.util import VimDriverUtils
from keystoneauth1.exceptions import HttpError
from rest_framework import status
@@ -35,7 +33,6 @@ logger = logging.getLogger(__name__)
class InfraWorkload(APIView):
-
def __init__(self):
self._logger = logger
@@ -44,7 +41,7 @@ class InfraWorkload(APIView):
self._logger.info("data: %s" % (request.data))
self._logger.debug("META: %s" % request.META)
- try :
+ try:
data = request.data
oof_directive = data.get("oof_directives", {})
@@ -55,41 +52,54 @@ class InfraWorkload(APIView):
# update heat parameters from oof_directive
parameters = template_data.get("parameters", {})
- for directive in oof_directive.get("directives",[]):
+ for directive in oof_directive.get("directives", []):
if directive["type"] == "vnfc":
- for directive2 in directive.get("directives",[]):
- if directive2["type"] in ["flavor_directives", "sriovNICNetwork_directives"]:
- for attr in directive2.get("attributes",[]):
+ for directive2 in directive.get("directives", []):
+ if directive2["type"] in ["flavor_directives",
+ "sriovNICNetwork_directives"]:
+ for attr in directive2.get("attributes", []):
flavor_label = attr.get("attribute_name", None)
flavor_value = attr.get("attribute_value", None)
- if parameters.has_key(flavor_label):
+ if flavor_label in parameters:
parameters[flavor_label] = flavor_value
else:
- self._logger.warn("There is no parameter exist: %s" %
- flavor_label)
+ self._logger.warn(
+ "There is no parameter exist: %s" %
+ flavor_label)
# update parameters
- template_data["parameters"]=parameters
+ template_data["parameters"] = parameters
# reset to make sure "files" are empty
template_data["files"] = {}
# authenticate
cloud_owner, regionid = extsys.decode_vim_id(vimid)
- # should go via multicloud proxy so that the selflink is updated by multicloud
- retcode, v2_token_resp_json, os_status = helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
- cloud_owner, regionid, "/v2.0/tokens")
+ # should go via multicloud proxy so that
+ # the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+
if retcode > 0 or not v2_token_resp_json:
- logger.error("authenticate fails:%s,%s, %s" % (cloud_owner, regionid, v2_token_resp_json))
+ logger.error("authenticate fails:%s,%s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
return
# tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
service_type = "orchestration"
resource_uri = "/stacks"
self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
- retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json,
- service_type, resource_uri, template_data, "POST")
- stack1 = content.get('stack', None) if retcode == 0 and content else None
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ template_data, "POST")
+
+ stack1 = content.get('stack', None) \
+ if retcode == 0 and content else None
+
resp_template = {
"template_type": template_type,
"workload_id": stack1["id"] if stack1 else "",
@@ -102,35 +112,39 @@ class InfraWorkload(APIView):
else:
msg = "The template type %s is not supported" % (template_type)
self._logger.warn(msg)
- return Response(data={"error":msg}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+ return Response(data={"error": msg},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except VimDriverNewtonException as e:
self._logger.error("Plugin exception> status:%s,error:%s"
- % (e.status_code, e.content))
+ % (e.status_code, e.content))
return Response(data={'error': e.content}, status=e.status_code)
except HttpError as e:
- self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
+ self._logger.error("HttpError: status:%s, response:%s" %
+ (e.http_status, e.response.json()))
return Response(data=e.response.json(), status=e.http_status)
except Exception as e:
self._logger.error(traceback.format_exc())
return Response(data={'error': str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
-
def get(self, request, vimid="", requri=""):
self._logger.info("vimid,requri: %s, %s" % (vimid, requri))
self._logger.debug("META: %s" % request.META)
- try :
+ try:
# assume the workload_type is heat
stack_id = requri
cloud_owner, regionid = extsys.decode_vim_id(vimid)
# should go via multicloud proxy so that the selflink is updated by multicloud
- retcode, v2_token_resp_json, os_status = helper.MultiCloudIdentityHelper(
- settings.MULTICLOUD_API_V1_PREFIX,
- cloud_owner, regionid, "/v2.0/tokens")
- if retcode > 0 or not v2_token_resp_json:
- logger.error("authenticate fails:%s, %s, %s" % (cloud_owner, regionid, v2_token_resp_json))
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+
+ if retcode > 0 or not v2_token_resp_json:
+ logger.error("authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
return
# tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
# tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
@@ -139,8 +153,12 @@ class InfraWorkload(APIView):
service_type = "orchestration"
resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks"
self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
- retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json,
- service_type, resource_uri, None, "GET")
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
stacks = content.get('stacks', []) if retcode == 0 and content else []
stack_status = stacks[0]["stack_status"] if len(stacks) > 0 else ""
@@ -162,10 +180,11 @@ class InfraWorkload(APIView):
return Response(data=resp_template, status=os_status)
except VimDriverNewtonException as e:
self._logger.error("Plugin exception> status:%s,error:%s"
- % (e.status_code, e.content))
+ % (e.status_code, e.content))
return Response(data={'error': e.content}, status=e.status_code)
except HttpError as e:
- self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
+ self._logger.error("HttpError: status:%s, response:%s" %
+ (e.http_status, e.response.json()))
return Response(data=e.response.json(), status=e.http_status)
except Exception as e:
self._logger.error(traceback.format_exc())
@@ -173,19 +192,23 @@ class InfraWorkload(APIView):
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def delete(self, request, vimid="", requri=""):
- self._logger.info("vimid,requri: %s, %s" % (vimid,requri))
+ self._logger.info("vimid,requri: %s, %s" % (vimid, requri))
self._logger.debug("META: %s" % request.META)
- try :
+ try:
# assume the workload_type is heat
stack_id = requri
cloud_owner, regionid = extsys.decode_vim_id(vimid)
- # should go via multicloud proxy so that the selflink is updated by multicloud
- retcode, v2_token_resp_json, os_status = helper.MultiCloudIdentityHelper(
- settings.MULTICLOUD_API_V1_PREFIX,
- cloud_owner, regionid, "/v2.0/tokens")
- if retcode > 0 or not v2_token_resp_json:
- logger.error("authenticate fails:%s, %s, %s" % (cloud_owner, regionid, v2_token_resp_json))
+ # should go via multicloud proxy so that
+ # the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+
+ if retcode > 0 or not v2_token_resp_json:
+ logger.error("authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
return
# tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
# tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
@@ -194,19 +217,29 @@ class InfraWorkload(APIView):
service_type = "orchestration"
resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks"
self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
- retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json,
- service_type, resource_uri, None, "GET")
- stacks = content.get('stacks', []) if retcode == 0 and content else []
- # assume there is at most 1 stack returned since it was filtered by id
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ stacks = content.get('stacks', []) \
+ if retcode == 0 and content else []
+ # assume there is at most 1 stack returned
+ # since it was filtered by id
stack1 = stacks[0] if stacks else None
stack_status = ""
if stack1 and 'CREATE_COMPLETE' == stack1['stack_status']:
# delete the stack
- resource_uri = "/stacks/%s/%s" % (stack1['stack_name'], stack1['id'])
+ resource_uri = "/stacks/%s/%s" % \
+ (stack1['stack_name'], stack1['id'])
self._logger.info("delete stack, URI:%s" % resource_uri)
- retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json,
- service_type, resource_uri, None, "DELETE")
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "DELETE")
if retcode == 0:
stack_status = "DELETE_IN_PROCESS"
# and update AAI inventory by heatbridge-delete
@@ -226,10 +259,11 @@ class InfraWorkload(APIView):
return Response(status=os_status)
except VimDriverNewtonException as e:
self._logger.error("Plugin exception> status:%s,error:%s"
- % (e.status_code, e.content))
+ % (e.status_code, e.content))
return Response(data={'error': e.content}, status=e.status_code)
except HttpError as e:
- self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
+ self._logger.error("HttpError: status:%s, response:%s" %
+ (e.http_status, e.response.json()))
return Response(data=e.response.json(), status=e.http_status)
except Exception as e:
self._logger.error(traceback.format_exc())
@@ -248,27 +282,35 @@ class InfraWorkload(APIView):
cloud_owner, regionid = extsys.decode_vim_id(vimid)
# should go via multicloud proxy so that the selflink is updated by multicloud
- retcode, v2_token_resp_json, os_status = helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
- cloud_owner, regionid, "/v2.0/tokens")
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
if retcode > 0:
- logger.error("authenticate fails:%s, %s, %s" % (cloud_owner, regionid, v2_token_resp_json))
+ logger.error("authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+
return None
tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
# tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
# common prefix
- aai_cloud_region = "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
- % (cloud_owner, regionid, tenant_id)
+ aai_cloud_region = \
+ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
+ % (cloud_owner, regionid, tenant_id)
# get stack resource
service_type = "orchestration"
- resource_uri = "/stacks/%s/resources"%(stack_id)
+ resource_uri = "/stacks/%s/resources" % (stack_id)
self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
- retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json,
- service_type, resource_uri, None, "GET")
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
resources = content.get('resources', []) if retcode == 0 and content else []
- #find and update resources
+ # find and update resources
transactions = []
for resource in resources:
if resource.get('resource_status', None) != "CREATE_COMPLETE":
@@ -278,8 +320,12 @@ class InfraWorkload(APIView):
service_type = "compute"
resource_uri = "/servers/%s" % (resource['physical_resource_id'])
self._logger.info("retrieve vserver detail, URI:%s" % resource_uri)
- retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json,
- service_type, resource_uri, None, "GET")
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
self._logger.debug(" resp data:%s" % content)
vserver_detail = content.get('server', None) if retcode == 0 and content else None
if vserver_detail:
@@ -302,24 +348,26 @@ class InfraWorkload(APIView):
"vserver-selflink": vserver_link,
"prov-status": vserver_detail['status']
},
- "uri": aai_cloud_region + "/vservers/vserver/%s"\
- % ( vserver_detail['id'])}
+ "uri": aai_cloud_region + "/vservers/vserver/%s" % (vserver_detail['id'])
+ }
try:
# then update the resource
retcode, content, status_code = \
- restcall.req_to_aai(aai_resource['uri'], "PUT", content=aai_resource['body'])
+ restcall.req_to_aai(aai_resource['uri'],
+ "PUT", content=aai_resource['body'])
if retcode == 0 and content:
content = json.JSONDecoder().decode(content)
- self._logger.debug("AAI update %s response: %s" % (aai_resource['uri'], content))
- except Exception as e:
+ self._logger.debug("AAI update %s response: %s" %
+ (aai_resource['uri'], content))
+ except Exception:
self._logger.error(traceback.format_exc())
pass
aai_resource_transactions = {"put": [aai_resource]}
transactions.append(aai_resource_transactions)
- #self._logger.debug("aai_resource :%s" % aai_resource_transactions)
+ # self._logger.debug("aai_resource :%s" % aai_resource_transactions)
pass
for resource in resources:
@@ -330,33 +378,42 @@ class InfraWorkload(APIView):
service_type = "network"
resource_uri = "/v2.0/ports/%s" % (resource['physical_resource_id'])
self._logger.info("retrieve vport detail, URI:%s" % resource_uri)
- retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json,
- service_type, resource_uri, None, "GET")
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
self._logger.debug(" resp data:%s" % content)
vport_detail = content.get('port', None) if retcode == 0 and content else None
if vport_detail:
# compose inventory entry for vport
- # note: l3-interface-ipv4-address-list, l3-interface-ipv6-address-list are not updated yet
- # note: network-name is not update yet since the detail coming with network-id
+ # note: l3-interface-ipv4-address-list,
+ # l3-interface-ipv6-address-list are not updated yet
+ # note: network-name is not update yet since the detail
+ # coming with network-id
aai_resource = {
"body": {
"interface-name": vport_detail['name'],
"interface-id": vport_detail['id'],
"macaddr": vport_detail['mac_address']
},
- 'uri': aai_cloud_region + "/vservers/vserver/%s/l-interfaces/l-interface/%s" \
- % (vport_detail['device_id'], vport_detail['name'])
+ 'uri':
+ aai_cloud_region + "/vservers/vserver/%s/l-interfaces/l-interface/%s"
+ % (vport_detail['device_id'], vport_detail['name'])
}
try:
# then update the resource
retcode, content, status_code = \
- restcall.req_to_aai(aai_resource['uri'], "PUT", content=aai_resource['body'])
+ restcall.req_to_aai(aai_resource['uri'], "PUT",
+ content=aai_resource['body'])
if retcode == 0 and content:
content = json.JSONDecoder().decode(content)
- self._logger.debug("AAI update %s response: %s" % (aai_resource['uri'], content))
- except Exception as e:
+ self._logger.debug("AAI update %s response: %s" %
+ (aai_resource['uri'], content))
+ except Exception:
self._logger.error(traceback.format_exc())
pass
@@ -369,7 +426,7 @@ class InfraWorkload(APIView):
aai_transactions = {"transactions": transactions}
self._logger.debug("aai_transactions :%s" % aai_transactions)
- return aai_transactions
+ return aai_transactions
def heatbridge_delete(self, request, vimid, stack_id):
'''
@@ -385,26 +442,33 @@ class InfraWorkload(APIView):
# enumerate the resources
cloud_owner, regionid = extsys.decode_vim_id(vimid)
# should go via multicloud proxy so that the selflink is updated by multicloud
- retcode, v2_token_resp_json, os_status = helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
- cloud_owner, regionid, "/v2.0/tokens")
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
if retcode > 0:
- logger.error("authenticate fails:%s, %s, %s" % (cloud_owner, regionid, v2_token_resp_json))
+ logger.error("authenticate fails:%s, %s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
return None
tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
# tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
# common prefix
- aai_cloud_region = "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
- % (cloud_owner, regionid, tenant_id)
+ aai_cloud_region = \
+ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
+ % (cloud_owner, regionid, tenant_id)
# get stack resource
service_type = "orchestration"
- resource_uri = "/stacks/%s/resources"%(stack_id)
+ resource_uri = "/stacks/%s/resources" % (stack_id)
self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
- retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json,
- service_type, resource_uri, None, "GET")
- resources = content.get('resources', []) if retcode == 0 and content else []
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+ resources = content.get('resources', []) \
+ if retcode == 0 and content else []
vserver_list = [resource['physical_resource_id'] for resource in resources
if resource.get('resource_type', None) == 'OS::Nova::Server']
@@ -427,19 +491,25 @@ class InfraWorkload(APIView):
# iterate vport, except will be raised if no l-interface exist
for vport in vserver['l-interfaces']['l-interface']:
# delete vport
- vport_delete_url = aai_cloud_region + "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \
- % (vserver['vserver-id'], vport['interface-name'],
- vport['resource-version'])
+ vport_delete_url = \
+ aai_cloud_region + \
+ "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \
+ % (vserver['vserver-id'], vport['interface-name'],
+ vport['resource-version'])
+
restcall.req_to_aai(vport_delete_url, "DELETE")
- except Exception as e:
+ except Exception:
pass
try:
# delete vserver
- vserver_delete_url = aai_cloud_region + "/vservers/vserver/%s?resource-version=%s" \
- % (vserver['vserver-id'], vserver['resource-version'])
+ vserver_delete_url = \
+ aai_cloud_region + \
+ "/vservers/vserver/%s?resource-version=%s" \
+ % (vserver['vserver-id'], vserver['resource-version'])
+
restcall.req_to_aai(vserver_delete_url, "DELETE")
- except Exception as e:
+ except Exception:
continue
except Exception:
@@ -449,28 +519,30 @@ class InfraWorkload(APIView):
class APIv1InfraWorkload(InfraWorkload):
-
def __init__(self):
super(APIv1InfraWorkload, self).__init__()
# self._logger = logger
def post(self, request, cloud_owner="", cloud_region_id=""):
- #self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % (cloud_owner, cloud_region_id, request.data))
- #self._logger.debug("META: %s" % request.META)
+ # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
+ # (cloud_owner, cloud_region_id, request.data))
+ # self._logger.debug("META: %s" % request.META)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1InfraWorkload, self).post(request, vimid)
def get(self, request, cloud_owner="", cloud_region_id="", requri=""):
- #self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % (cloud_owner, cloud_region_id, request.data))
- #self._logger.debug("META: %s" % request.META)
+ # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
+ # (cloud_owner, cloud_region_id, request.data))
+ # self._logger.debug("META: %s" % request.META)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1InfraWorkload, self).get(request, vimid, requri)
def delete(self, request, cloud_owner="", cloud_region_id="", requri=""):
- #self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % (cloud_owner, cloud_region_id, request.data))
- #self._logger.debug("META: %s" % request.META)
+ # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
+ # (cloud_owner, cloud_region_id, request.data))
+ # self._logger.debug("META: %s" % request.META)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
return super(APIv1InfraWorkload, self).delete(request, vimid, requri)