summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiaohua Zhang <xiaohua.zhang@windriver.com>2019-04-10 08:51:19 +0000
committerXiaohua Zhang <xiaohua.zhang@windriver.com>2019-04-10 09:48:26 +0000
commit2024bb59b1d4ec20300304aed3a69132b9c082bc (patch)
tree349a90cd5fc0c64335142c09bd76db86e3d2d7e7
parent14132a2c06bc6b6431f5be90cecf303132d94103 (diff)
Fix bug of AZ cap check
Fix bugs in restcall module update the status code of workload api Remove unused event api from lenovo Change-Id: Iaa16bc3aca42c4583408384c73802ff4debe1b19 Issue-ID: MULTICLOUD-542 Signed-off-by: Xiaohua Zhang <xiaohua.zhang@windriver.com>
-rw-r--r--lenovo/thinkcloud/resource/tests/test_events.py350
-rw-r--r--lenovo/thinkcloud/resource/views/events.py99
-rw-r--r--lenovo/thinkcloud/urls.py4
-rw-r--r--share/common/msapi/helper.py10
-rw-r--r--share/common/utils/aai_cache.py11
-rw-r--r--share/common/utils/restcall.py11
-rw-r--r--share/newton_base/registration/registration.py8
-rw-r--r--share/newton_base/resource/infra_workload_helper.py60
-rw-r--r--share/starlingx_base/registration/registration.py31
-rw-r--r--share/starlingx_base/resource/capacity.py19
-rw-r--r--share/starlingx_base/resource/infra_workload.py93
-rw-r--r--starlingx/starlingx/resource/tests/test_capacity.py14
-rw-r--r--starlingx/starlingx/urls.py2
-rw-r--r--windriver/titanium_cloud/resource/tests/test_capacity.py14
-rw-r--r--windriver/titanium_cloud/urls.py2
15 files changed, 146 insertions, 582 deletions
diff --git a/lenovo/thinkcloud/resource/tests/test_events.py b/lenovo/thinkcloud/resource/tests/test_events.py
deleted file mode 100644
index 5a363a4d..00000000
--- a/lenovo/thinkcloud/resource/tests/test_events.py
+++ /dev/null
@@ -1,350 +0,0 @@
-# Copyright (c) 2017-2018 Lenovo Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-from rest_framework import status
-
-from newton_base.tests import mock_info
-from newton_base.tests import test_base
-from newton_base.util import VimDriverUtils
-
-MOCK_GET_SERVERS_DETAIL_RESPONSE = {
- "servers": [
- {
- "accessIPv4": "",
- "OS-EXT-SRV-ATTR:instance_name": "instance-0000000a",
- "OS-SRV-USG:terminated_at": "",
- "accessIPv6": "",
- "config_drive": "",
- "OS-DCF:diskConfig": "AUTO",
- "updated": "2018-03-27T02:17:12Z",
- "metadata": {},
- "id": "12f5b1d0-fe5c-469f-a7d4-b62a91134bf8",
- "flavor": {
- "id": "60edb520-5826-4ae7-9e07-709b19ba6f39",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/flavors/60edb520-5826-4ae7-9e07-709b19ba6f39"
- }
- ]
- },
- "links": [
- {
- "rel": "self",
- "href": "http://192.168.100.100:8774/v2.1/ad979139d5ea4a84b21b3620c0e4761e/servers/12f5b1d0-fe5c-469f-a7d4-b62a91134bf8"
- },
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/servers/12f5b1d0-fe5c-469f-a7d4-b62a91134bf8"
- }
- ],
- "OS-EXT-SRV-ATTR:host": "compute-0",
- "OS-EXT-AZ:availability_zone": "nova",
- "name": "test1",
- "wrs-res:pci_devices": "",
- "hostId": "b3479a460f5effda10c6fdb860e824be631026c1d09f551479180577",
- "user_id": "777155411f3042c9b7e3194188d6f85d",
- "status": "PAUSED",
- "OS-EXT-STS:power_state": 3,
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0",
- "tenant_id": "ad979139d5ea4a84b21b3620c0e4761e",
- "OS-SRV-USG:launched_at": "2018-03-27T02:16:40.000000",
- "OS-EXT-STS:vm_state": "paused",
- "wrs-if:nics": [
- {
- "nic1": {
- "mac_address": "fa:16:3e:5f:1a:76",
- "network": "mgmt",
- "port_id": "6c225c23-abe3-42a8-8909-83471503d5d4",
- "vif_model": "virtio",
- "vif_pci_address": "",
- "mtu": 9216
- }
- },
- {
- "nic2": {
- "mac_address": "fa:16:3e:7c:7b:d7",
- "network": "data0",
- "port_id": "cbea2fec-c9b8-48ec-a964-0e3e255841bc",
- "vif_model": "virtio",
- "vif_pci_address": "",
- "mtu": 9216
- }
- }
- ],
- "wrs-sg:server_group": "",
- "OS-EXT-STS:task_state": "",
- "wrs-res:topology": "node:0, 1024MB, pgsize:2M, 1s,1c,2t, vcpus:0,1, pcpus:5,21, siblings:{0,1}, pol:ded, thr:pre\nnode:1, 1024MB, pgsize:2M, 1s,1c,2t, vcpus:2,3, pcpus:8,24, siblings:{2,3}, pol:ded, thr:pre",
- "wrs-res:vcpus": [4, 4, 4],
- "key_name": "",
- "image": {
- "id": "7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/images/7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb"
- }
- ]
- },
- "created": "2018-03-27T02:16:32Z",
- "addresses": {
- "data0": [
- {
- "OS-EXT-IPS:type": "fixed",
- "version": 4,
- "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:7b:d7",
- "addr": "192.168.2.8"
- }
- ],
- "mgmt": [
- {
- "OS-EXT-IPS:type": "fixed",
- "version": 4,
- "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:5f:1a:76",
- "addr": "192.168.1.6"
- }
- ]
- },
- "os-extended-volumes:volumes_attached": []
- },
- {
- "accessIPv4": "",
- "OS-EXT-SRV-ATTR:instance_name": "instance-00000009",
- "OS-SRV-USG:terminated_at": "",
- "accessIPv6": "",
- "config_drive": "",
- "OS-DCF:diskConfig": "AUTO",
- "updated": "2018-03-27T02:12:21Z",
- "metadata": {},
- "id": "3f1b0375-a1db-4d94-b336-f32c82c0d7ec",
- "flavor": {
- "id": "0d3b1381-1626-4f6b-869b-4a4d5d42085e",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/flavors/0d3b1381-1626-4f6b-869b-4a4d5d42085e"
- }
- ]
- },
- "links": [
- {
- "rel": "self",
- "href": "http://192.168.100.100:8774/v2.1/ad979139d5ea4a84b21b3620c0e4761e/servers/3f1b0375-a1db-4d94-b336-f32c82c0d7ec"
- },
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/servers/3f1b0375-a1db-4d94-b336-f32c82c0d7ec"
- }
- ],
- "OS-EXT-SRV-ATTR:host": "compute-0",
- "OS-EXT-AZ:availability_zone": "nova",
- "name": "test2",
- "wrs-res:pci_devices": "",
- "hostId": "b3479a460f5effda10c6fdb860e824be631026c1d09f551479180577",
- "user_id": "777155411f3042c9b7e3194188d6f85d",
- "status": "ACTIVE",
- "OS-EXT-STS:power_state": 1,
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0",
- "tenant_id": "ad979139d5ea4a84b21b3620c0e4761e",
- "OS-SRV-USG:launched_at": "2018-03-27T02:12:21.000000",
- "OS-EXT-STS:vm_state": "active",
- "wrs-if:nics": [
- {
- "nic1": {
- "mac_address": "fa:16:3e:54:f8:a6",
- "network": "mgmt",
- "port_id": "30e2f51c-4473-4650-9ae9-a35e5d7ad452",
- "vif_model": "avp",
- "vif_pci_address": "",
- "mtu": 9216
- }
- }
- ],
- "wrs-sg:server_group": "",
- "OS-EXT-STS:task_state": "",
- "wrs-res:topology": "node:0, 4096MB, pgsize:2M, 1s,3c,1t, vcpus:0-2, pcpus:4,20,7, pol:ded, thr:pre",
- "progress": 0,
- "wrs-res:vcpus": [3, 3, 3],
- "key_name": "",
- "image": {
- "id": "7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/images/7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb"
- }
- ]
- },
- "created": "2018-03-27T02:10:26Z",
- "addresses": {
- "mgmt": [
- {
- "OS-EXT-IPS:type": "fixed",
- "version": 4,
- "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:54:f8:a6",
- "addr": "192.168.1.11"
- }
- ]
- },
- "os-extended-volumes:volumes_attached": []
- },
- {
- "accessIPv4": "",
- "OS-EXT-SRV-ATTR:instance_name": "instance-00000008",
- "OS-SRV-USG:terminated_at": "",
- "accessIPv6": "",
- "config_drive": "",
- "OS-DCF:diskConfig": "AUTO",
- "updated": "2018-03-27T02:12:15Z",
- "metadata": {},
- "id": "1b6f6671-b680-42cd-89e9-fc4ddd5d2e02",
- "flavor": {
- "id": "0d3b1381-1626-4f6b-869b-4a4d5d42085e",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/flavors/0d3b1381-1626-4f6b-869b-4a4d5d42085e"
- }
- ]
- },
- "links": [
- {
- "rel": "self",
- "href": "http://192.168.100.100:8774/v2.1/ad979139d5ea4a84b21b3620c0e4761e/servers/1b6f6671-b680-42cd-89e9-fc4ddd5d2e02"
- },
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/servers/1b6f6671-b680-42cd-89e9-fc4ddd5d2e02"
- }
- ],
- "OS-EXT-SRV-ATTR:host": "compute-0",
- "OS-EXT-AZ:availability_zone": "nova",
- "name": "test3",
- "wrs-res:pci_devices": "",
- "hostId": "b3479a460f5effda10c6fdb860e824be631026c1d09f551479180577",
- "user_id": "777155411f3042c9b7e3194188d6f85d",
- "status": "ACTIVE",
- "OS-EXT-STS:power_state": 1,
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0",
- "tenant_id": "ad979139d5ea4a84b21b3620c0e4761e",
- "OS-SRV-USG:launched_at": "2018-03-27T02:12:15.000000",
- "OS-EXT-STS:vm_state": "active",
- "wrs-if:nics": [
- {
- "nic1": {
- "mac_address": "fa:16:3e:4e:9b:75",
- "network": "mgmt",
- "port_id": "72d13987-1d94-4a64-aa1a-973869ae1cad",
- "vif_model": "avp",
- "vif_pci_address": "",
- "mtu": 9216
- }
- }
- ],
- "wrs-sg:server_group": "",
- "OS-EXT-STS:task_state": "",
- "wrs-res:topology": "node:0, 4096MB, pgsize:2M, 1s,3c,1t, vcpus:0-2, pcpus:19,3,22, pol:ded, thr:pre",
- "progress": 0,
- "wrs-res:vcpus": [3, 3, 3],
- "key_name": "",
- "image": {
- "id": "7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/images/7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb"
- }
- ]
- },
- "created": "2018-03-27T02:10:01Z",
- "addresses": {
- "mgmt": [
- {
- "OS-EXT-IPS:type": "fixed",
- "version": 4,
- "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:4e:9b:75",
- "addr": "192.168.1.8"
- }
- ]
- },
- "os-extended-volumes:volumes_attached": []
- }
- ]
-}
-
-SUCCESS_VMSTATE_RESPONSE = {
- 'result': [
- {
- 'name': 'test1',
- 'power_state': 3,
- 'id': '12f5b1d0-fe5c-469f-a7d4-b62a91134bf8',
- 'state': 'paused',
- 'tenant_id': 'ad979139d5ea4a84b21b3620c0e4761e',
- 'host': 'compute-0',
- 'availability_zone': 'nova',
- 'launched_at': '2018-03-27T02:16:40.000000'
- },
- {
- 'name': 'test2',
- 'power_state': 1,
- 'id': '3f1b0375-a1db-4d94-b336-f32c82c0d7ec',
- 'state': 'active',
- 'tenant_id': 'ad979139d5ea4a84b21b3620c0e4761e',
- 'host': 'compute-0',
- 'availability_zone': 'nova',
- 'launched_at': '2018-03-27T02:12:21.000000'
- },
- {
- 'name': 'test3',
- 'power_state': 1,
- 'id': '1b6f6671-b680-42cd-89e9-fc4ddd5d2e02',
- 'state': 'active',
- 'tenant_id': 'ad979139d5ea4a84b21b3620c0e4761e',
- 'host': 'compute-0',
- 'availability_zone': 'nova',
- 'launched_at': '2018-03-27T02:12:15.000000'
- }
- ]
-}
-
-
-class TestEvents(test_base.TestRequest):
- def setUp(self):
- super(TestEvents, self).setUp()
-
- def _get_mock_response(self, return_value=None):
- mock_response = mock.Mock(spec=test_base.MockResponse)
- mock_response.status_code = status.HTTP_200_OK
- mock_response.json.return_value = return_value
- return mock_response
-
- @mock.patch.object(VimDriverUtils, 'get_session')
- @mock.patch.object(VimDriverUtils, 'get_vim_info')
- def test_events_check_success(self, mock_get_vim_info, mock_get_session):
- mock_get_vim_info.return_value = mock_info.MOCK_VIM_INFO
- mock_get_session.return_value = test_base.get_mock_session(
- ["get"], {
- "side_effect": [
- self._get_mock_response(MOCK_GET_SERVERS_DETAIL_RESPONSE),
- ]
- })
-
- response = self.client.post(
- "/api/multicloud-thinkcloud/v0/lenovo-hudson-dc_RegionOne/events_check",
- HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
-
- self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual(SUCCESS_VMSTATE_RESPONSE, response.data)
diff --git a/lenovo/thinkcloud/resource/views/events.py b/lenovo/thinkcloud/resource/views/events.py
deleted file mode 100644
index ab706889..00000000
--- a/lenovo/thinkcloud/resource/views/events.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright (c) 2017-2018 Lenovo Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import traceback
-
-from common.exceptions import VimDriverNewtonException
-from newton_base.util import VimDriverUtils
-
-from keystoneauth1.exceptions import HttpError
-from rest_framework import status
-from rest_framework.response import Response
-from rest_framework.views import APIView
-from common.msapi import extsys
-
-
-logger = logging.getLogger(__name__)
-
-
-class EventsCheck(APIView):
-
- def __init__(self):
- self._logger = logger
-
- def post(self, request, vimid=""):
- self._logger.info("vimid, data> %s, %s" % (vimid, request.data))
- self._logger.debug("META> %s" % request.META)
-
- try:
- tenant_name = None
- vim = VimDriverUtils.get_vim_info(vimid)
- sess = VimDriverUtils.get_session(vim, tenant_name)
-
- # get token:
- cloud_owner, regionid = extsys.decode_vim_id(vimid)
- interface = 'public'
- service = {
- 'service_type': 'compute',
- 'interface': interface,
- 'region_name': vim['openstack_region_id']
- if vim.get('openstack_region_id')
- else vim['cloud_region_id']
- }
-
- # get servers detail info
- req_resouce = "/servers/detail"
- self._logger.info("check servers detail> URI:%s" % req_resouce)
- resp = sess.get(req_resouce, endpoint_filter=service)
- self._logger.info("check servers detail> status:%s" % resp.status_code)
- content = resp.json()
- self._logger.debug("check servers detail> resp data:%s" % content)
-
- # extract server status info
- if len(content['servers']):
- servers = content['servers']
- resp_vmstate = []
- for num in range(0, len(servers)):
- vmstate = {
- 'name': servers[num]['name'],
- 'state': servers[num]['OS-EXT-STS:vm_state'],
- 'power_state': servers[num]['OS-EXT-STS:power_state'],
- 'launched_at': servers[num]['OS-SRV-USG:launched_at'],
- 'id': servers[num]['id'],
- 'host': servers[num]['OS-EXT-SRV-ATTR:host'],
- 'availability_zone': servers[num]['OS-EXT-AZ:availability_zone'],
- 'tenant_id': servers[num]['tenant_id']
- }
-
- resp_vmstate.append(vmstate)
-
- self._logger.info("RESP with data> result:%s" % resp_vmstate)
- return Response(data={'result': resp_vmstate}, status=status.HTTP_200_OK)
-
- except VimDriverNewtonException as e:
- self._logger.error("Plugin exception> status:%s,error:%s" %
- (e.status_code, e.content))
- return Response(data={'result': resp_vmstate, 'error': e.content}, status=e.status_code)
-
- except HttpError as e:
- self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
- resp = e.response.json()
- resp.update({'result': resp_vmstate})
- return Response(data=e.response.json(), status=e.http_status)
-
- except Exception as e:
- self._logger.error(traceback.format_exc())
- return Response(data={'result': resp_vmstate, 'error': str(e)},
- status=status.HTTP_500_INTERNAL_SERVER_ERROR)
diff --git a/lenovo/thinkcloud/urls.py b/lenovo/thinkcloud/urls.py
index 1828bb33..9559b906 100644
--- a/lenovo/thinkcloud/urls.py
+++ b/lenovo/thinkcloud/urls.py
@@ -17,7 +17,6 @@ from django.conf.urls import include, url
from thinkcloud.registration.views import registration
from newton_base.openoapi import tenants
from thinkcloud.resource.views import capacity
-from thinkcloud.resource.views import events
from thinkcloud.resource.views import infra_workload
urlpatterns = [
@@ -42,9 +41,6 @@ urlpatterns = [
# CapacityCheck
url(r'^api/multicloud-thinkcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/capacity_check/?$',
capacity.CapacityCheck.as_view()),
- # events
- url(r'^api/multicloud-thinkcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/events_check/?$',
- events.EventsCheck.as_view()),
# API upgrading
url(r'^api/multicloud-thinkcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/registry$',
diff --git a/share/common/msapi/helper.py b/share/common/msapi/helper.py
index 947966c9..69b91953 100644
--- a/share/common/msapi/helper.py
+++ b/share/common/msapi/helper.py
@@ -13,12 +13,10 @@ import json
import logging
# import re
import uuid
-
import threading
import datetime
import time
-
-import traceback
+#import traceback
# from common.exceptions import VimDriverNewtonException
from common.utils import restcall
@@ -75,7 +73,7 @@ class MultiCloudAAIHelper(object):
'''
def __init__(self, multicloud_prefix, aai_base_url):
- logger.debug("MultiCloudAAIHelper __init__ traceback: %s" % traceback.format_exc())
+ # logger.debug("MultiCloudAAIHelper __init__ traceback: %s" % traceback.format_exc())
self.proxy_prefix = multicloud_prefix
self.aai_base_url = aai_base_url
self._logger = logger
@@ -203,7 +201,7 @@ class MultiCloudThreadHelper(object):
self.expired_backlog = {}
self.lock = threading.Lock()
self.state_ = 0 # 0: stopped, 1: started
- self.cache_prefix = "bi_"+name+"_"
+ self.cache_prefix = "bi_"+self.name+"_"
self.cache_expired_prefix = "biex_"+self.name+"_"
self.thread = MultiCloudThreadHelper.HelperThread(self)
@@ -317,7 +315,7 @@ class MultiCloudThreadHelper(object):
# sleep in case of interval > 1 second
time.sleep(nexttimer // 1000000)
nexttimer = 30*1000000 # initial interval in us to be updated:30 seconds
- # logger.debug("self.owner.backlog len: %s" % len(self.owner.backlog))
+ # logger.debug("self.owner.backlog: %s, len: %s" % (self.owner.name, len(self.owner.backlog)))
for backlog_id, item in self.owner.backlog.items():
# logger.debug("evaluate backlog item: %s" % item)
# check interval for repeatable backlog item
diff --git a/share/common/utils/aai_cache.py b/share/common/utils/aai_cache.py
index 53298bb8..41506aca 100644
--- a/share/common/utils/aai_cache.py
+++ b/share/common/utils/aai_cache.py
@@ -26,12 +26,14 @@ def flush_cache_by_url(resource_url):
def get_cache_by_url(resource_url):
try:
- if (filter_cache_by_url(resource_url)):
+ if filter_cache_by_url(resource_url):
value = cache.get("AAI_" + resource_url)
+ # logger.debug("Find cache the resource: %s, %s" %( resource_url, value))
return json.loads(value) if value else None
else:
return None
- except:
+ except Exception as e:
+ logger.error("get_cache_by_url exception: %s" % e.message)
return None
@@ -40,9 +42,10 @@ def set_cache_by_url(resource_url, resource_in_json):
# filter out unmanaged AAI resource
if filter_cache_by_url(resource_url):
# cache the resource for 24 hours
- logger.debug("Cache the resource: "+ resource_url)
+ # logger.debug("Cache the resource: "+ resource_url)
cache.set("AAI_" + resource_url, json.dumps(resource_in_json), 3600 * 24)
- except:
+ except Exception as e:
+ logger.error("get_cache_by_url exception: %s" % e.message)
pass
def filter_cache_by_url(resource_url):
diff --git a/share/common/utils/restcall.py b/share/common/utils/restcall.py
index eb4cb008..464dd65f 100644
--- a/share/common/utils/restcall.py
+++ b/share/common/utils/restcall.py
@@ -65,9 +65,10 @@ def _call_req(base_url, user, passwd, auth_type,
headers['Authorization'] = 'Basic ' + \
base64.b64encode(tmpauthsource).decode('utf-8')
- logger.info("Making rest call with uri,method, header = %s, %s, %s" % (full_url, method.upper(), headers))
+ logger.info("Making rest call with method, uri, header = %s, %s, %s" %
+ (method.upper(), full_url, headers))
if content:
- logger.debug("with content = %s" % (content))
+ logger.debug("with content = %s" % content)
ca_certs = None
for retry_times in range(MAX_RETRY_TIME):
@@ -138,8 +139,9 @@ def req_to_aai(resource, method, content='', appid=settings.MULTICLOUD_APP_ID, n
# hook to flush cache
if method.upper() in ["PUT", "POST", "PATCH", "DELETE"]:
aai_cache.flush_cache_by_url(resource)
- elif method.upper in ["GET"] and not nocache:
+ elif method.upper() in ["GET"] and not nocache:
content = aai_cache.get_cache_by_url(resource)
+ # logger.debug("cached resource: %s, %s" % (resource, content))
if content:
return content
@@ -148,7 +150,8 @@ def req_to_aai(resource, method, content='', appid=settings.MULTICLOUD_APP_ID, n
resource, method, content=json.dumps(content), extra_headers=headers)
if method.upper() in ["GET"] and ret == 0 and not nocache:
- aai_cache.set_cache_by_url(resource, [ret, resp_body, resp_status])
+ # aai_cache.set_cache_by_url(resource, [ret, resp_body, resp_status])
+ aai_cache.set_cache_by_url(resource, (ret, resp_body, resp_status))
return [ret, resp_body, resp_status]
diff --git a/share/newton_base/registration/registration.py b/share/newton_base/registration/registration.py
index a875cd84..fe31478d 100644
--- a/share/newton_base/registration/registration.py
+++ b/share/newton_base/registration/registration.py
@@ -41,7 +41,7 @@ class Registry(APIView):
if not hasattr(self, "register_thread"):
# dedicate thread to offload vim registration process
- self.register_thread = MultiCloudThreadHelper()
+ self.register_thread = MultiCloudThreadHelper("vimupdater")
if not hasattr(self, "register_helper") or not self.register_helper:
if not hasattr(self, "proxy_prefix"):
@@ -67,8 +67,7 @@ class Registry(APIView):
"payload": (vimid, specified_project_idorname),
"repeat": 0,
"status": (1,
- "The registration process waits to"
- " be scheduled to run")
+ "The registration is on progress")
}
self.register_thread.add(backlog_item)
if 0 == self.register_thread.state():
@@ -121,8 +120,7 @@ class Registry(APIView):
"worker": self.register_helper.unregistryV0,
"payload": (vimid),
"repeat": 0,
- "status": (1, "The registration process waits"
- " to be scheduled to run")
+ "status": (1, "The de-registration is on process")
}
self.register_thread.add(backlog_item)
if 0 == self.register_thread.state():
diff --git a/share/newton_base/resource/infra_workload_helper.py b/share/newton_base/resource/infra_workload_helper.py
index ee8291b1..13d1e18d 100644
--- a/share/newton_base/resource/infra_workload_helper.py
+++ b/share/newton_base/resource/infra_workload_helper.py
@@ -14,7 +14,7 @@
import logging
import json
-
+from rest_framework import status
from django.conf import settings
from common.msapi import extsys
from common.msapi.helper import Helper as helper
@@ -50,7 +50,7 @@ class InfraWorkloadHelper(object):
template_data = data.get("template_data", {})
# resp_template = None
if not template_type or "heat" != template_type.lower():
- return 14, "CREATE_FAILED", \
+ return status.HTTP_400_BAD_REQUEST, "CREATE_FAILED", \
"Bad parameters: template type %s is not heat" %\
template_type or ""
@@ -93,7 +93,7 @@ class InfraWorkloadHelper(object):
(cloud_owner, regionid, v2_token_resp_json)
logger.error(errmsg)
return (
- retcode, "CREATE_FAILED", errmsg
+ os_status, "CREATE_FAILED", errmsg
)
# tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
@@ -111,8 +111,8 @@ class InfraWorkloadHelper(object):
# stackid = stack1["id"] if stack1 else ""
return 0, "CREATE_IN_PROGRESS", stack1
else:
- self._logger.info("RESP with data> result:%s" % content)
- return retcode, "CREATE_FAILED", content
+ self._logger.info("workload_create fail: %s" % content)
+ return os_status, "CREATE_FAILED", content
def workload_update(self, vimid, stack_id, otherinfo=None, project_idorname=None):
'''
@@ -139,7 +139,7 @@ class InfraWorkloadHelper(object):
errmsg = "authenticate fails:%s, %s, %s" %\
(cloud_owner, regionid, v2_token_resp_json)
logger.error(errmsg)
- return retcode, "UPDATE_FAILED", errmsg
+ return os_status, "UPDATE_FAILED", errmsg
tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
# tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
@@ -164,7 +164,7 @@ class InfraWorkloadHelper(object):
errmsg = "stack:%s, query fails: %s" %\
(resource_uri, content)
logger.error(errmsg)
- return retcode, "UPDATE_FAILED", errmsg
+ return os_status, "UPDATE_FAILED", errmsg
# find and update resources
# transactions = []
@@ -174,7 +174,7 @@ class InfraWorkloadHelper(object):
errmsg = "stack: %s, resource not ready :%s" % \
(resource_uri, resource)
logger.info(errmsg)
- return retcode, "UPDATE_FAILED", errmsg
+ return status.HTTP_206_PARTIAL_CONTENT, "UPDATE_FAILED", errmsg
# continue
if resource.get('resource_type', None) == 'OS::Nova::Server':
# retrieve vserver details
@@ -192,7 +192,7 @@ class InfraWorkloadHelper(object):
errmsg = "stack resource:%s, query fails: %s" % \
(resource_uri, content)
logger.error(errmsg)
- return retcode, "UPDATE_FAILED", errmsg
+ return os_status, "UPDATE_FAILED", errmsg
vserver_detail = content.get('server', None) if retcode == 0 and content else None
if vserver_detail:
# compose inventory entry for vserver
@@ -229,7 +229,7 @@ class InfraWorkloadHelper(object):
(aai_resource['uri'], content))
except Exception as e:
self._logger.error(e.message)
- return retcode, "UPDATE_FAILED", e.message
+ return status.HTTP_500_INTERNAL_SERVER_ERROR, "UPDATE_FAILED", e.message
# aai_resource_transactions = {"put": [aai_resource]}
# transactions.append(aai_resource_transactions)
@@ -254,7 +254,7 @@ class InfraWorkloadHelper(object):
errmsg = "stack resource:%s, query fails: %s" % \
(resource_uri, content)
logger.error(errmsg)
- return retcode, "UPDATE_FAILED", errmsg
+ return os_status, "UPDATE_FAILED", errmsg
vport_detail = content.get('port', None) if retcode == 0 and content else None
if vport_detail:
@@ -285,7 +285,7 @@ class InfraWorkloadHelper(object):
(aai_resource['uri'], content))
except Exception as e:
self._logger.error(e.message)
- return retcode, "UPDATE_FAILED", e.message
+ return status.HTTP_500_INTERNAL_SERVER_ERROR, "UPDATE_FAILED", e.message
# aai_resource_transactions = {"put": [aai_resource]}
# transactions.append(aai_resource_transactions)
@@ -320,7 +320,7 @@ class InfraWorkloadHelper(object):
errmsg = "authenticate fails:%s, %s, %s" %\
(cloud_owner, regionid, v2_token_resp_json)
logger.error(errmsg)
- return retcode, "DELETE_FAILED", errmsg
+ return os_status, "DELETE_FAILED", errmsg
tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
# tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
@@ -352,7 +352,7 @@ class InfraWorkloadHelper(object):
restcall.req_to_aai(vserver_list_url, "GET")
if retcode > 0 or not content:
self._logger.debug("AAI get %s response: %s" % (vserver_list_url, content))
- return (retcode, "DELETE_FAILED", "authenticate fails:%s, %s, %s" %
+ return (status_code, "DELETE_FAILED", "authenticate fails:%s, %s, %s" %
(cloud_owner, regionid, v2_token_resp_json))
content = json.JSONDecoder().decode(content)
@@ -390,7 +390,7 @@ class InfraWorkloadHelper(object):
return 0, "DELETE_COMPLETE", "succeed"
except Exception as e:
self._logger.error(e.message)
- return 12, "DELETE_FAILED", e.message
+ return status.HTTP_500_INTERNAL_SERVER_ERROR, "DELETE_FAILED", e.message
pass
def workload_status(self, vimid, stack_id=None, stack_name=None, otherinfo=None, project_idorname=None):
@@ -416,13 +416,15 @@ class InfraWorkloadHelper(object):
errmsg = "authenticate fails:%s, %s, %s" % \
(cloud_owner, regionid, v2_token_resp_json)
logger.error(errmsg)
- return retcode, "GET_FAILED", errmsg
+ return os_status, "GET_FAILED", errmsg
# get stack status
service_type = "orchestration"
- resource_uri = "/stacks/id=%s" % stack_id if stack_id else "/stacks"
- if stack_name:
- resource_uri = "/stacks?name=%s" % stack_name if not stack_id else resource_uri
+ resource_uri = "/stacks"
+ if stack_id:
+ resource_uri = "/stacks/id=%s" % stack_id
+ elif stack_name:
+ resource_uri = "/stacks?name=%s" % stack_name
self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
retcode, content, os_status = \
@@ -434,15 +436,16 @@ class InfraWorkloadHelper(object):
if retcode > 0 or not content:
errmsg = "Stack query %s response: %s" % (resource_uri, content)
self._logger.debug(errmsg)
- return retcode, "GET_FAILED", errmsg
+ return os_status, "GET_FAILED", errmsg
stacks = content.get('stacks', []) # if retcode == 0 and content else []
- stack_status = stacks[0].get("stack_status", "GET_FAILED") if len(stacks) > 0 else "GET_FAILED"
+ # stack_status = stacks[0].get("stack_status", "GET_FAILED") if len(stacks) > 0 else "GET_FAILED"
+ workload_status = "GET_COMPLETE"
- return retcode, stack_status, content
+ return retcode, workload_status, content
except Exception as e:
self._logger.error(e.message)
- return 12, "GET_FAILED", e.message
+ return status.HTTP_500_INTERNAL_SERVER_ERROR, "GET_FAILED", e.message
def workload_detail(self, vimid, stack_id, nexturi=None, otherinfo=None, project_idorname=None):
@@ -468,7 +471,7 @@ class InfraWorkloadHelper(object):
errmsg = "authenticate fails:%s, %s, %s" % \
(cloud_owner, regionid, v2_token_resp_json)
logger.error(errmsg)
- return retcode, "GET_FAILED", errmsg
+ return os_status, "GET_FAILED", errmsg
# get stack status
service_type = "orchestration"
@@ -486,12 +489,13 @@ class InfraWorkloadHelper(object):
if retcode > 0 or not content:
errmsg = "Stack query %s response: %s" % (resource_uri, content)
self._logger.debug(errmsg)
- return retcode, "GET_FAILED", errmsg
+ return os_status, "GET_FAILED", errmsg
stack = content.get('stack', {}) # if retcode == 0 and content else []
- stack_status = stack.get("stack_status", "GET_FAILED")
+ # stack_status = stack.get("stack_status", "GET_FAILED")
+ workload_status = "GET_COMPLETE"
- return retcode, stack_status, content
+ return 0, workload_status, content
except Exception as e:
self._logger.error(e.message)
- return 12, "GET_FAILED", e.message
+ return status.HTTP_500_INTERNAL_SERVER_ERROR, "GET_FAILED", e.message
diff --git a/share/starlingx_base/registration/registration.py b/share/starlingx_base/registration/registration.py
index cf0281f7..dd71c1b4 100644
--- a/share/starlingx_base/registration/registration.py
+++ b/share/starlingx_base/registration/registration.py
@@ -165,6 +165,7 @@ class RegistryHelper(newton_registration.RegistryHelper):
multi_region_discovery = cloud_extra_info.get(
"multi-region-discovery", None) if cloud_extra_info else None
+ sess = None
if project_idorname:
try:
# check if specified with tenant id
@@ -406,6 +407,7 @@ class InfraResourceAuditor(newton_registration.RegistryHelper):
self._logger.warn("azcap_audit no valid vimid: %s" % vimid)
return
+ sess = None
if project_idorname:
try:
# check if specified with tenant id
@@ -459,9 +461,8 @@ class InfraResourceAuditor(newton_registration.RegistryHelper):
viminfo, vimid,
"availabilityZoneInfo"):
az_info = {
- 'availability-zone-name': az['zoneName'],
- 'operational-status': az['zoneState']['available']
- if az.get('zoneState') else '',
+ 'availability-zone-name': az.get('zoneName', ""),
+ 'operational-status': az.get('zoneState', {}).get('available', ""),
'hypervisor-type': '',
}
# filter out the default az: "internal" and "nova"
@@ -480,7 +481,7 @@ class InfraResourceAuditor(newton_registration.RegistryHelper):
# Get current cap info of azName
azCapCacheKey = "cap_" + vimid + "_" + azName
azCapInfoCacheStr = cache.get(azCapCacheKey)
- azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else None
+ azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else {}
for psname in pservers_info:
psinfo = hypervisors_dict.get(psname, None)
@@ -490,7 +491,7 @@ class InfraResourceAuditor(newton_registration.RegistryHelper):
# get current pserver cap info
psCapInfoCacheKey = "cap_" + vimid + "_" + psname
psCapInfoCacheStr = cache.get(psCapInfoCacheKey)
- psCapInfoCache = json.loads(psCapInfoCacheStr) if psCapInfoCacheStr else None
+ psCapInfoCache = json.loads(psCapInfoCacheStr) if psCapInfoCacheStr else {}
# compare latest info with cached one
vcpu_delta = 0
@@ -523,19 +524,21 @@ class InfraResourceAuditor(newton_registration.RegistryHelper):
localstorage_free_delta += psinfo.get("free_disk_gb", 0)\
- psCapInfoCache.get("free_disk_gb", 0)
psCapInfoCache["free_disk_gb"] = psinfo.get("free_disk_gb", 0)
- pass
- # now apply the delta to azCapInfo
- azCapInfoCache["vcpus"] = azCapInfoCache.get("vcpus", 0) + vcpu_delta
- azCapInfoCache["memory_mb"] = azCapInfoCache.get("memory_mb", 0) + mem_delta
- azCapInfoCache["local_gb"] = azCapInfoCache.get("local_gb", 0) + localstorage_delta
- azCapInfoCache["vcpus_used"] = azCapInfoCache.get("vcpus_used", 0) + vcpu_used_delta
- azCapInfoCache["free_ram_mb"] = azCapInfoCache.get("free_ram_mb", 0) + mem_free_delta
- azCapInfoCache["free_disk_gb"] = azCapInfoCache.get("free_disk_gb", 0) + localstorage_free_delta
+ cache.set(psCapInfoCacheKey, json.dumps(psCapInfoCache), 3600 * 24)
+
+ # now apply the delta to azCapInfo
+ azCapInfoCache["vcpus"] = azCapInfoCache.get("vcpus", 0) + vcpu_delta
+ azCapInfoCache["memory_mb"] = azCapInfoCache.get("memory_mb", 0) + mem_delta
+ azCapInfoCache["local_gb"] = azCapInfoCache.get("local_gb", 0) + localstorage_delta
+ azCapInfoCache["vcpus_used"] = azCapInfoCache.get("vcpus_used", 0) + vcpu_used_delta
+ azCapInfoCache["free_ram_mb"] = azCapInfoCache.get("free_ram_mb", 0) + mem_free_delta
+ azCapInfoCache["free_disk_gb"] = azCapInfoCache.get("free_disk_gb", 0) + localstorage_free_delta
+ pass
# update the cache
cache.set(azCapCacheKey, json.dumps(azCapInfoCache), 3600 * 24)
- cache.set(vimAzCacheKey, vimAzList, 3600 * 24)
+ cache.set(vimAzCacheKey, json.dumps(vimAzList), 3600 * 24)
except Exception as e:
self._logger.error("azcap_audit raise exception: %s" % e)
pass
diff --git a/share/starlingx_base/resource/capacity.py b/share/starlingx_base/resource/capacity.py
index cbdedaa3..861d4d50 100644
--- a/share/starlingx_base/resource/capacity.py
+++ b/share/starlingx_base/resource/capacity.py
@@ -67,27 +67,34 @@ class CapacityCheck(newton_capacity.CapacityCheck):
# get list of AZ
vimAzCacheKey = "cap_azlist_" + vimid
vimAzListCacheStr = cache.get(vimAzCacheKey)
+ self._logger.debug("Found AZ list: %s" % vimAzListCacheStr)
vimAzListCache = json.loads(vimAzListCacheStr) if vimAzListCacheStr else []
azCapInfoList = []
for azName in vimAzListCache:
azCapCacheKey = "cap_" + vimid + "_" + azName
azCapInfoCacheStr = cache.get(azCapCacheKey)
+ self._logger.debug("Found AZ info: %s, %s" % (azCapCacheKey, azCapInfoCacheStr))
if not azCapInfoCacheStr:
continue
azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else None
azCapInfo = {}
azCapInfo["availability-zone-name"] = azName
- azCapInfo["vCPUAvail"] = azCapInfoCache.get("vcpus", 0) + azCapInfoCache.get("vcpus_used", 0)
- azCapInfo["vCPUTotal"] = azCapInfoCache.get("vcpus", 0)
- azCapInfo["MemoryAvail"] = azCapInfoCache.get("vcpus", 0)
- azCapInfo["MemoryTotal"] = azCapInfoCache.get("vcpus", 0)
- azCapInfo["StorageAvail"] = azCapInfoCache.get("vcpus", 0)
- azCapInfo["StorageTotal"] = azCapInfoCache.get("vcpus", 0)
+ # vcpu ratio: cpu_allocation_ratio=16 by default
+ azCapInfo["vCPUAvail"] = \
+ (azCapInfoCache.get("vcpus", 0)
+ - azCapInfoCache.get("vcpus_used", 0)) * 16
+ azCapInfo["vCPUTotal"] = azCapInfoCache.get("vcpus", 0) * 16
+ # mem size in MB
+ azCapInfo["MemoryAvail"] = azCapInfoCache.get("free_ram_mb", 0) / 1024.0
+ azCapInfo["MemoryTotal"] = azCapInfoCache.get("memory_mb", 0) / 1024.0
+ azCapInfo["StorageAvail"] = azCapInfoCache.get("free_disk_gb", 0)
+ azCapInfo["StorageTotal"] = azCapInfoCache.get("local_gb", 0)
azCapInfoList.append(azCapInfo)
return azCapInfoList
except Exception as e:
+ self._logger.error(traceback.format_exc())
return azCapInfo
diff --git a/share/starlingx_base/resource/infra_workload.py b/share/starlingx_base/resource/infra_workload.py
index 409d74ed..fc6d7ef2 100644
--- a/share/starlingx_base/resource/infra_workload.py
+++ b/share/starlingx_base/resource/infra_workload.py
@@ -91,7 +91,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
# format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message
"status": (
0, "UPDATE_IN_PROGRESS",
- "backlog to update workload %s pends to schedule" % workloadid
+ "backlog to update workload %s is on progress" % workloadid
)
}
gInfraWorkloadThread.add(backlog_item)
@@ -158,10 +158,10 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
# now check the query params in case of query existing of workload
querystr = request.META.get("QUERY_STRING", None)
qd = QueryDict(querystr).dict() if querystr else None
- workload_name = qd.get("name", None) if qd else None
- workload_id = qd.get("id", None) if qd else None
+ workload_query_name = qd.get("name", None) if qd else None
+ workload_query_id = qd.get("id", None) if qd else None
- if not workload_name and not workload_id:
+ if not workload_query_name and not workload_query_id:
resp_template["workload_status_reason"] =\
"workload id is not found in API url"
return Response(
@@ -177,64 +177,65 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
# now query the status of workload by name or id, id as 1st priority
progress_code, progress_status, progress_msg =\
0, "GET_FAILED", ""
- if not workload_id:
- # by name
+ if workload_query_id:
+ # by id
progress_code, progress_status, progress_msg =\
worker_self.workload_status(
- vimid, stack_name=workload_name,
+ vimid, stack_id=workload_query_id,
project_idorname=specified_project_idorname
)
else:
- # by id
+ # by name or get all stacks
progress_code, progress_status, progress_msg =\
worker_self.workload_status(
- vimid, stack_id=workloadid,
+ vimid, stack_name=workload_query_name,
project_idorname=specified_project_idorname
)
resp_template["workload_status"] = progress_status
resp_template["workload_status_reason"] = progress_msg
status_code = status.HTTP_200_OK \
- if progress_code == 0 else progress_code
+ if progress_code == 0 else status.HTTP_500_INTERNAL_SERVER_ERROR # progress_code
pass
- # now query the progress
- backlog_item = gInfraWorkloadThread.get(workloadid)
- if not backlog_item:
- # backlog item not found, so check the stack status
- worker_self = InfraWorkloadHelper(
- settings.MULTICLOUD_API_V1_PREFIX,
- settings.AAI_BASE_URL
- )
- progress_code, progress_status, progress_msg =\
- worker_self.workload_detail(
- vimid, stack_id=workloadid,
- project_idorname=specified_project_idorname)
-
- resp_template["workload_status"] = progress_status
- resp_template["workload_status_reason"] = progress_msg
- status_code = status.HTTP_200_OK\
- if progress_code == 0 else progress_code
-
else:
- progress = backlog_item.get(
- "status",
- (13, "GET_FAILED",
- "Unexpected:status not found in backlog item")
- )
- try:
- progress_code = progress[0]
- progress_status = progress[1]
- progress_msg = progress[2]
- # if gInfraWorkloadThread.expired(workloadid):
- # gInfraWorkloadThread.remove(workloadid)
+ # now query the progress
+ backlog_item = gInfraWorkloadThread.get(workloadid)
+ if not backlog_item:
+ # backlog item not found, so check the stack status
+ worker_self = InfraWorkloadHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
+ )
+ progress_code, progress_status, progress_msg =\
+ worker_self.workload_detail(
+ vimid, stack_id=workloadid,
+ project_idorname=specified_project_idorname)
+
resp_template["workload_status"] = progress_status
resp_template["workload_status_reason"] = progress_msg
status_code = status.HTTP_200_OK\
if progress_code == 0 else progress_code
- except Exception as e:
- resp_template["workload_status_reason"] = progress
+
+ else:
+ progress = backlog_item.get(
+ "status",
+ (13, "GET_FAILED",
+ "Unexpected:status not found in backlog item")
+ )
+ try:
+ progress_code = progress[0]
+ progress_status = progress[1]
+ progress_msg = progress[2]
+ # if gInfraWorkloadThread.expired(workloadid):
+ # gInfraWorkloadThread.remove(workloadid)
+ resp_template["workload_status"] = progress_status
+ resp_template["workload_status_reason"] = progress_msg
+ status_code = status.HTTP_200_OK\
+ if progress_code == 0 else progress_code
+ except Exception as e:
+ resp_template["workload_status_reason"] = progress
return Response(data=resp_template, status=status_code)
@@ -286,7 +287,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
"status": (
0, "DELETE_IN_PROGRESS",
"backlog for delete the workload %s "
- "pends to schedule" % workloadid
+ "is on progress" % workloadid
)
}
gInfraWorkloadThread.add(backlog_item)
@@ -488,7 +489,7 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
template_data = data.get("template_data", {})
# resp_template = None
if not template_type or "heat" != template_type.lower():
- return 14, "CREATE_FAILED", \
+ return status.HTTP_400_BAD_REQUEST, "CREATE_FAILED", \
"Bad parameters: template type %s is not heat" %\
template_type or ""
@@ -523,7 +524,7 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
(cloud_owner, regionid, v2_token_resp_json)
logger.error(errmsg)
return (
- retcode, "CREATE_FAILED", errmsg
+ os_status, "CREATE_FAILED", errmsg
)
# tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
@@ -541,5 +542,5 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
# stackid = stack1["id"] if stack1 else ""
return 0, "CREATE_IN_PROGRESS", stack1
else:
- self._logger.info("RESP with data> result:%s" % content)
- return retcode, "CREATE_FAILED", content
+ self._logger.info("workload_create fails: %s" % content)
+ return os_status, "CREATE_FAILED", content
diff --git a/starlingx/starlingx/resource/tests/test_capacity.py b/starlingx/starlingx/resource/tests/test_capacity.py
index c48f0eff..ee30985f 100644
--- a/starlingx/starlingx/resource/tests/test_capacity.py
+++ b/starlingx/starlingx/resource/tests/test_capacity.py
@@ -143,7 +143,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": True}, response.data)
+ self.assertEqual({'AZs': [], "result": True}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -164,7 +164,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -186,7 +186,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -208,7 +208,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -230,7 +230,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -252,7 +252,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -274,4 +274,4 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
diff --git a/starlingx/starlingx/urls.py b/starlingx/starlingx/urls.py
index ca9ef240..f340b7fd 100644
--- a/starlingx/starlingx/urls.py
+++ b/starlingx/starlingx/urls.py
@@ -15,7 +15,7 @@
from django.conf.urls import include, url
from starlingx_base.registration import registration
from newton_base.openoapi import tenants
-from newton_base.resource import capacity
+from starlingx_base.resource import capacity
from starlingx_base.resource import infra_workload
urlpatterns = [
diff --git a/windriver/titanium_cloud/resource/tests/test_capacity.py b/windriver/titanium_cloud/resource/tests/test_capacity.py
index baca720f..82c453a2 100644
--- a/windriver/titanium_cloud/resource/tests/test_capacity.py
+++ b/windriver/titanium_cloud/resource/tests/test_capacity.py
@@ -144,7 +144,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": True}, response.data)
+ self.assertEqual({'AZs': [],"result": True}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -165,7 +165,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [],"result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -187,7 +187,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [],"result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -209,7 +209,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [],"result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -231,7 +231,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [],"result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -253,7 +253,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -275,4 +275,4 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
diff --git a/windriver/titanium_cloud/urls.py b/windriver/titanium_cloud/urls.py
index 7a9f6455..a0537ddc 100644
--- a/windriver/titanium_cloud/urls.py
+++ b/windriver/titanium_cloud/urls.py
@@ -16,7 +16,7 @@ from django.conf.urls import include, url
from starlingx_base.registration import registration
from newton_base.openoapi import tenants
-from newton_base.resource import capacity
+from starlingx_base.resource import capacity
from starlingx_base.resource import infra_workload
urlpatterns = [