summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fcaps/run.sh2
-rw-r--r--fcaps/stop.sh2
-rwxr-xr-xlenovo/run.sh2
-rwxr-xr-xlenovo/stop.sh2
-rw-r--r--lenovo/thinkcloud/resource/tests/test_events.py350
-rw-r--r--lenovo/thinkcloud/resource/views/events.py99
-rw-r--r--lenovo/thinkcloud/urls.py4
-rwxr-xr-xocata/run.sh2
-rwxr-xr-xocata/stop.sh2
-rwxr-xr-xpike/run.sh2
-rwxr-xr-xpike/stop.sh2
-rw-r--r--share/common/msapi/helper.py63
-rw-r--r--share/common/utils/aai_cache.py11
-rw-r--r--share/common/utils/restcall.py11
-rw-r--r--share/newton_base/registration/registration.py13
-rw-r--r--share/newton_base/resource/infra_workload.py6
-rw-r--r--share/newton_base/resource/infra_workload_helper.py102
-rw-r--r--share/starlingx_base/registration/registration.py38
-rw-r--r--share/starlingx_base/resource/capacity.py19
-rw-r--r--share/starlingx_base/resource/infra_workload.py118
-rwxr-xr-xstarlingx/run.sh2
-rw-r--r--starlingx/starlingx/resource/tests/test_capacity.py14
-rw-r--r--starlingx/starlingx/urls.py6
-rwxr-xr-xstarlingx/stop.sh2
-rw-r--r--windriver/run.sh2
-rw-r--r--windriver/stop.sh2
-rw-r--r--windriver/titanium_cloud/resource/tests/test_capacity.py14
-rw-r--r--windriver/titanium_cloud/urls.py6
28 files changed, 261 insertions, 637 deletions
diff --git a/fcaps/run.sh b/fcaps/run.sh
index 088a04c2..b5e1a61f 100644
--- a/fcaps/run.sh
+++ b/fcaps/run.sh
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
export PYTHONPATH=lib/share
#service rabbitmq-server restart
diff --git a/fcaps/stop.sh b/fcaps/stop.sh
index 56104f9f..b09ccb9f 100644
--- a/fcaps/stop.sh
+++ b/fcaps/stop.sh
@@ -15,4 +15,4 @@
#ps auxww | grep 'manage.py runserver 0.0.0.0:9011' | awk '{print $2}' | xargs kill -9
ps auxww |grep 'uwsgi --http :9011 --module fcaps.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
diff --git a/lenovo/run.sh b/lenovo/run.sh
index 0fa331f7..48e26cdc 100755
--- a/lenovo/run.sh
+++ b/lenovo/run.sh
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
export PYTHONPATH=lib/share
#nohup python manage.py runserver 0.0.0.0:9010 2>&1 &
diff --git a/lenovo/stop.sh b/lenovo/stop.sh
index f43fab1b..87f80ba8 100755
--- a/lenovo/stop.sh
+++ b/lenovo/stop.sh
@@ -15,4 +15,4 @@
#ps auxww | grep 'manage.py runserver 0.0.0.0:9010' | awk '{print $2}' | xargs kill -9
ps auxww |grep 'uwsgi --http :9010 --module thinkcloud.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
diff --git a/lenovo/thinkcloud/resource/tests/test_events.py b/lenovo/thinkcloud/resource/tests/test_events.py
deleted file mode 100644
index 5a363a4d..00000000
--- a/lenovo/thinkcloud/resource/tests/test_events.py
+++ /dev/null
@@ -1,350 +0,0 @@
-# Copyright (c) 2017-2018 Lenovo Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-from rest_framework import status
-
-from newton_base.tests import mock_info
-from newton_base.tests import test_base
-from newton_base.util import VimDriverUtils
-
-MOCK_GET_SERVERS_DETAIL_RESPONSE = {
- "servers": [
- {
- "accessIPv4": "",
- "OS-EXT-SRV-ATTR:instance_name": "instance-0000000a",
- "OS-SRV-USG:terminated_at": "",
- "accessIPv6": "",
- "config_drive": "",
- "OS-DCF:diskConfig": "AUTO",
- "updated": "2018-03-27T02:17:12Z",
- "metadata": {},
- "id": "12f5b1d0-fe5c-469f-a7d4-b62a91134bf8",
- "flavor": {
- "id": "60edb520-5826-4ae7-9e07-709b19ba6f39",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/flavors/60edb520-5826-4ae7-9e07-709b19ba6f39"
- }
- ]
- },
- "links": [
- {
- "rel": "self",
- "href": "http://192.168.100.100:8774/v2.1/ad979139d5ea4a84b21b3620c0e4761e/servers/12f5b1d0-fe5c-469f-a7d4-b62a91134bf8"
- },
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/servers/12f5b1d0-fe5c-469f-a7d4-b62a91134bf8"
- }
- ],
- "OS-EXT-SRV-ATTR:host": "compute-0",
- "OS-EXT-AZ:availability_zone": "nova",
- "name": "test1",
- "wrs-res:pci_devices": "",
- "hostId": "b3479a460f5effda10c6fdb860e824be631026c1d09f551479180577",
- "user_id": "777155411f3042c9b7e3194188d6f85d",
- "status": "PAUSED",
- "OS-EXT-STS:power_state": 3,
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0",
- "tenant_id": "ad979139d5ea4a84b21b3620c0e4761e",
- "OS-SRV-USG:launched_at": "2018-03-27T02:16:40.000000",
- "OS-EXT-STS:vm_state": "paused",
- "wrs-if:nics": [
- {
- "nic1": {
- "mac_address": "fa:16:3e:5f:1a:76",
- "network": "mgmt",
- "port_id": "6c225c23-abe3-42a8-8909-83471503d5d4",
- "vif_model": "virtio",
- "vif_pci_address": "",
- "mtu": 9216
- }
- },
- {
- "nic2": {
- "mac_address": "fa:16:3e:7c:7b:d7",
- "network": "data0",
- "port_id": "cbea2fec-c9b8-48ec-a964-0e3e255841bc",
- "vif_model": "virtio",
- "vif_pci_address": "",
- "mtu": 9216
- }
- }
- ],
- "wrs-sg:server_group": "",
- "OS-EXT-STS:task_state": "",
- "wrs-res:topology": "node:0, 1024MB, pgsize:2M, 1s,1c,2t, vcpus:0,1, pcpus:5,21, siblings:{0,1}, pol:ded, thr:pre\nnode:1, 1024MB, pgsize:2M, 1s,1c,2t, vcpus:2,3, pcpus:8,24, siblings:{2,3}, pol:ded, thr:pre",
- "wrs-res:vcpus": [4, 4, 4],
- "key_name": "",
- "image": {
- "id": "7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/images/7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb"
- }
- ]
- },
- "created": "2018-03-27T02:16:32Z",
- "addresses": {
- "data0": [
- {
- "OS-EXT-IPS:type": "fixed",
- "version": 4,
- "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:7b:d7",
- "addr": "192.168.2.8"
- }
- ],
- "mgmt": [
- {
- "OS-EXT-IPS:type": "fixed",
- "version": 4,
- "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:5f:1a:76",
- "addr": "192.168.1.6"
- }
- ]
- },
- "os-extended-volumes:volumes_attached": []
- },
- {
- "accessIPv4": "",
- "OS-EXT-SRV-ATTR:instance_name": "instance-00000009",
- "OS-SRV-USG:terminated_at": "",
- "accessIPv6": "",
- "config_drive": "",
- "OS-DCF:diskConfig": "AUTO",
- "updated": "2018-03-27T02:12:21Z",
- "metadata": {},
- "id": "3f1b0375-a1db-4d94-b336-f32c82c0d7ec",
- "flavor": {
- "id": "0d3b1381-1626-4f6b-869b-4a4d5d42085e",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/flavors/0d3b1381-1626-4f6b-869b-4a4d5d42085e"
- }
- ]
- },
- "links": [
- {
- "rel": "self",
- "href": "http://192.168.100.100:8774/v2.1/ad979139d5ea4a84b21b3620c0e4761e/servers/3f1b0375-a1db-4d94-b336-f32c82c0d7ec"
- },
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/servers/3f1b0375-a1db-4d94-b336-f32c82c0d7ec"
- }
- ],
- "OS-EXT-SRV-ATTR:host": "compute-0",
- "OS-EXT-AZ:availability_zone": "nova",
- "name": "test2",
- "wrs-res:pci_devices": "",
- "hostId": "b3479a460f5effda10c6fdb860e824be631026c1d09f551479180577",
- "user_id": "777155411f3042c9b7e3194188d6f85d",
- "status": "ACTIVE",
- "OS-EXT-STS:power_state": 1,
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0",
- "tenant_id": "ad979139d5ea4a84b21b3620c0e4761e",
- "OS-SRV-USG:launched_at": "2018-03-27T02:12:21.000000",
- "OS-EXT-STS:vm_state": "active",
- "wrs-if:nics": [
- {
- "nic1": {
- "mac_address": "fa:16:3e:54:f8:a6",
- "network": "mgmt",
- "port_id": "30e2f51c-4473-4650-9ae9-a35e5d7ad452",
- "vif_model": "avp",
- "vif_pci_address": "",
- "mtu": 9216
- }
- }
- ],
- "wrs-sg:server_group": "",
- "OS-EXT-STS:task_state": "",
- "wrs-res:topology": "node:0, 4096MB, pgsize:2M, 1s,3c,1t, vcpus:0-2, pcpus:4,20,7, pol:ded, thr:pre",
- "progress": 0,
- "wrs-res:vcpus": [3, 3, 3],
- "key_name": "",
- "image": {
- "id": "7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/images/7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb"
- }
- ]
- },
- "created": "2018-03-27T02:10:26Z",
- "addresses": {
- "mgmt": [
- {
- "OS-EXT-IPS:type": "fixed",
- "version": 4,
- "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:54:f8:a6",
- "addr": "192.168.1.11"
- }
- ]
- },
- "os-extended-volumes:volumes_attached": []
- },
- {
- "accessIPv4": "",
- "OS-EXT-SRV-ATTR:instance_name": "instance-00000008",
- "OS-SRV-USG:terminated_at": "",
- "accessIPv6": "",
- "config_drive": "",
- "OS-DCF:diskConfig": "AUTO",
- "updated": "2018-03-27T02:12:15Z",
- "metadata": {},
- "id": "1b6f6671-b680-42cd-89e9-fc4ddd5d2e02",
- "flavor": {
- "id": "0d3b1381-1626-4f6b-869b-4a4d5d42085e",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/flavors/0d3b1381-1626-4f6b-869b-4a4d5d42085e"
- }
- ]
- },
- "links": [
- {
- "rel": "self",
- "href": "http://192.168.100.100:8774/v2.1/ad979139d5ea4a84b21b3620c0e4761e/servers/1b6f6671-b680-42cd-89e9-fc4ddd5d2e02"
- },
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/servers/1b6f6671-b680-42cd-89e9-fc4ddd5d2e02"
- }
- ],
- "OS-EXT-SRV-ATTR:host": "compute-0",
- "OS-EXT-AZ:availability_zone": "nova",
- "name": "test3",
- "wrs-res:pci_devices": "",
- "hostId": "b3479a460f5effda10c6fdb860e824be631026c1d09f551479180577",
- "user_id": "777155411f3042c9b7e3194188d6f85d",
- "status": "ACTIVE",
- "OS-EXT-STS:power_state": 1,
- "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute-0",
- "tenant_id": "ad979139d5ea4a84b21b3620c0e4761e",
- "OS-SRV-USG:launched_at": "2018-03-27T02:12:15.000000",
- "OS-EXT-STS:vm_state": "active",
- "wrs-if:nics": [
- {
- "nic1": {
- "mac_address": "fa:16:3e:4e:9b:75",
- "network": "mgmt",
- "port_id": "72d13987-1d94-4a64-aa1a-973869ae1cad",
- "vif_model": "avp",
- "vif_pci_address": "",
- "mtu": 9216
- }
- }
- ],
- "wrs-sg:server_group": "",
- "OS-EXT-STS:task_state": "",
- "wrs-res:topology": "node:0, 4096MB, pgsize:2M, 1s,3c,1t, vcpus:0-2, pcpus:19,3,22, pol:ded, thr:pre",
- "progress": 0,
- "wrs-res:vcpus": [3, 3, 3],
- "key_name": "",
- "image": {
- "id": "7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb",
- "links": [
- {
- "rel": "bookmark",
- "href": "http://192.168.100.100:8774/ad979139d5ea4a84b21b3620c0e4761e/images/7ba636ef-5dfd-4e67-ad32-cd23ee74e1eb"
- }
- ]
- },
- "created": "2018-03-27T02:10:01Z",
- "addresses": {
- "mgmt": [
- {
- "OS-EXT-IPS:type": "fixed",
- "version": 4,
- "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:4e:9b:75",
- "addr": "192.168.1.8"
- }
- ]
- },
- "os-extended-volumes:volumes_attached": []
- }
- ]
-}
-
-SUCCESS_VMSTATE_RESPONSE = {
- 'result': [
- {
- 'name': 'test1',
- 'power_state': 3,
- 'id': '12f5b1d0-fe5c-469f-a7d4-b62a91134bf8',
- 'state': 'paused',
- 'tenant_id': 'ad979139d5ea4a84b21b3620c0e4761e',
- 'host': 'compute-0',
- 'availability_zone': 'nova',
- 'launched_at': '2018-03-27T02:16:40.000000'
- },
- {
- 'name': 'test2',
- 'power_state': 1,
- 'id': '3f1b0375-a1db-4d94-b336-f32c82c0d7ec',
- 'state': 'active',
- 'tenant_id': 'ad979139d5ea4a84b21b3620c0e4761e',
- 'host': 'compute-0',
- 'availability_zone': 'nova',
- 'launched_at': '2018-03-27T02:12:21.000000'
- },
- {
- 'name': 'test3',
- 'power_state': 1,
- 'id': '1b6f6671-b680-42cd-89e9-fc4ddd5d2e02',
- 'state': 'active',
- 'tenant_id': 'ad979139d5ea4a84b21b3620c0e4761e',
- 'host': 'compute-0',
- 'availability_zone': 'nova',
- 'launched_at': '2018-03-27T02:12:15.000000'
- }
- ]
-}
-
-
-class TestEvents(test_base.TestRequest):
- def setUp(self):
- super(TestEvents, self).setUp()
-
- def _get_mock_response(self, return_value=None):
- mock_response = mock.Mock(spec=test_base.MockResponse)
- mock_response.status_code = status.HTTP_200_OK
- mock_response.json.return_value = return_value
- return mock_response
-
- @mock.patch.object(VimDriverUtils, 'get_session')
- @mock.patch.object(VimDriverUtils, 'get_vim_info')
- def test_events_check_success(self, mock_get_vim_info, mock_get_session):
- mock_get_vim_info.return_value = mock_info.MOCK_VIM_INFO
- mock_get_session.return_value = test_base.get_mock_session(
- ["get"], {
- "side_effect": [
- self._get_mock_response(MOCK_GET_SERVERS_DETAIL_RESPONSE),
- ]
- })
-
- response = self.client.post(
- "/api/multicloud-thinkcloud/v0/lenovo-hudson-dc_RegionOne/events_check",
- HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
-
- self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual(SUCCESS_VMSTATE_RESPONSE, response.data)
diff --git a/lenovo/thinkcloud/resource/views/events.py b/lenovo/thinkcloud/resource/views/events.py
deleted file mode 100644
index ab706889..00000000
--- a/lenovo/thinkcloud/resource/views/events.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright (c) 2017-2018 Lenovo Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import traceback
-
-from common.exceptions import VimDriverNewtonException
-from newton_base.util import VimDriverUtils
-
-from keystoneauth1.exceptions import HttpError
-from rest_framework import status
-from rest_framework.response import Response
-from rest_framework.views import APIView
-from common.msapi import extsys
-
-
-logger = logging.getLogger(__name__)
-
-
-class EventsCheck(APIView):
-
- def __init__(self):
- self._logger = logger
-
- def post(self, request, vimid=""):
- self._logger.info("vimid, data> %s, %s" % (vimid, request.data))
- self._logger.debug("META> %s" % request.META)
-
- try:
- tenant_name = None
- vim = VimDriverUtils.get_vim_info(vimid)
- sess = VimDriverUtils.get_session(vim, tenant_name)
-
- # get token:
- cloud_owner, regionid = extsys.decode_vim_id(vimid)
- interface = 'public'
- service = {
- 'service_type': 'compute',
- 'interface': interface,
- 'region_name': vim['openstack_region_id']
- if vim.get('openstack_region_id')
- else vim['cloud_region_id']
- }
-
- # get servers detail info
- req_resouce = "/servers/detail"
- self._logger.info("check servers detail> URI:%s" % req_resouce)
- resp = sess.get(req_resouce, endpoint_filter=service)
- self._logger.info("check servers detail> status:%s" % resp.status_code)
- content = resp.json()
- self._logger.debug("check servers detail> resp data:%s" % content)
-
- # extract server status info
- if len(content['servers']):
- servers = content['servers']
- resp_vmstate = []
- for num in range(0, len(servers)):
- vmstate = {
- 'name': servers[num]['name'],
- 'state': servers[num]['OS-EXT-STS:vm_state'],
- 'power_state': servers[num]['OS-EXT-STS:power_state'],
- 'launched_at': servers[num]['OS-SRV-USG:launched_at'],
- 'id': servers[num]['id'],
- 'host': servers[num]['OS-EXT-SRV-ATTR:host'],
- 'availability_zone': servers[num]['OS-EXT-AZ:availability_zone'],
- 'tenant_id': servers[num]['tenant_id']
- }
-
- resp_vmstate.append(vmstate)
-
- self._logger.info("RESP with data> result:%s" % resp_vmstate)
- return Response(data={'result': resp_vmstate}, status=status.HTTP_200_OK)
-
- except VimDriverNewtonException as e:
- self._logger.error("Plugin exception> status:%s,error:%s" %
- (e.status_code, e.content))
- return Response(data={'result': resp_vmstate, 'error': e.content}, status=e.status_code)
-
- except HttpError as e:
- self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
- resp = e.response.json()
- resp.update({'result': resp_vmstate})
- return Response(data=e.response.json(), status=e.http_status)
-
- except Exception as e:
- self._logger.error(traceback.format_exc())
- return Response(data={'result': resp_vmstate, 'error': str(e)},
- status=status.HTTP_500_INTERNAL_SERVER_ERROR)
diff --git a/lenovo/thinkcloud/urls.py b/lenovo/thinkcloud/urls.py
index 1828bb33..9559b906 100644
--- a/lenovo/thinkcloud/urls.py
+++ b/lenovo/thinkcloud/urls.py
@@ -17,7 +17,6 @@ from django.conf.urls import include, url
from thinkcloud.registration.views import registration
from newton_base.openoapi import tenants
from thinkcloud.resource.views import capacity
-from thinkcloud.resource.views import events
from thinkcloud.resource.views import infra_workload
urlpatterns = [
@@ -42,9 +41,6 @@ urlpatterns = [
# CapacityCheck
url(r'^api/multicloud-thinkcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/capacity_check/?$',
capacity.CapacityCheck.as_view()),
- # events
- url(r'^api/multicloud-thinkcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/events_check/?$',
- events.EventsCheck.as_view()),
# API upgrading
url(r'^api/multicloud-thinkcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/registry$',
diff --git a/ocata/run.sh b/ocata/run.sh
index 5d027658..a1f44ed2 100755
--- a/ocata/run.sh
+++ b/ocata/run.sh
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
export PYTHONPATH=lib/share
diff --git a/ocata/stop.sh b/ocata/stop.sh
index 1402a97c..4e46b2cd 100755
--- a/ocata/stop.sh
+++ b/ocata/stop.sh
@@ -15,4 +15,4 @@
#ps auxww | grep 'manage.py runserver 0.0.0.0:9006' | awk '{print $2}' | xargs kill -9
ps auxww |grep 'uwsgi --http :9006 --module ocata.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
diff --git a/pike/run.sh b/pike/run.sh
index 738ac6cf..3822c8a5 100755
--- a/pike/run.sh
+++ b/pike/run.sh
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
export PYTHONPATH=lib/share
#nohup python manage.py runserver 0.0.0.0:9007 2>&1 &
diff --git a/pike/stop.sh b/pike/stop.sh
index 9e433eb4..38d101d5 100755
--- a/pike/stop.sh
+++ b/pike/stop.sh
@@ -15,4 +15,4 @@
#ps auxww | grep 'manage.py runserver 0.0.0.0:9007' | awk '{print $2}' | xargs kill -9
ps auxww |grep 'uwsgi --http :9007 --module pike.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
diff --git a/share/common/msapi/helper.py b/share/common/msapi/helper.py
index b2ff1d6b..69b91953 100644
--- a/share/common/msapi/helper.py
+++ b/share/common/msapi/helper.py
@@ -13,12 +13,10 @@ import json
import logging
# import re
import uuid
-
import threading
import datetime
import time
-
-import traceback
+#import traceback
# from common.exceptions import VimDriverNewtonException
from common.utils import restcall
@@ -75,7 +73,7 @@ class MultiCloudAAIHelper(object):
'''
def __init__(self, multicloud_prefix, aai_base_url):
- logger.debug("MultiCloudAAIHelper __init__ traceback: %s" % traceback.format_exc())
+ # logger.debug("MultiCloudAAIHelper __init__ traceback: %s" % traceback.format_exc())
self.proxy_prefix = multicloud_prefix
self.aai_base_url = aai_base_url
self._logger = logger
@@ -145,18 +143,18 @@ class MultiCloudAAIHelper(object):
retcode, content, status_code = \
restcall.req_to_aai(resource_url, "PUT", content=resource_info)
- self._logger.debug(
- ("_update_resoure,vimid:%(cloud_owner)s"
- "_%(cloud_region_id)s req_to_aai: %(resoure_id)s, "
- "return %(retcode)s, %(content)s, %(status_code)s")
- % {
- "cloud_owner": cloud_owner,
- "cloud_region_id": cloud_region_id,
- "resoure_id": resoure_id,
- "retcode": retcode,
- "content": content,
- "status_code": status_code,
- })
+ # self._logger.debug(
+ # ("_update_resoure,vimid:%(cloud_owner)s"
+ # "_%(cloud_region_id)s req_to_aai: %(resoure_id)s, "
+ # "return %(retcode)s, %(content)s, %(status_code)s")
+ # % {
+ # "cloud_owner": cloud_owner,
+ # "cloud_region_id": cloud_region_id,
+ # "resoure_id": resoure_id,
+ # "retcode": retcode,
+ # "content": content,
+ # "status_code": status_code,
+ # })
return retcode, content
# unknown cloud owner,region_id
return (
@@ -197,17 +195,17 @@ class MultiCloudThreadHelper(object):
# }
# format of backlog:
# {"<id value of backlog item>": <backlog item>, ...}
+ self.name = name or "default"
self.backlog = {}
# expired backlog items
self.expired_backlog = {}
self.lock = threading.Lock()
self.state_ = 0 # 0: stopped, 1: started
- self.cache_prefix = "bi_"+name+"_"
- self.cache_expired_prefix = "biex_"+name+"_"
+ self.cache_prefix = "bi_"+self.name+"_"
+ self.cache_expired_prefix = "biex_"+self.name+"_"
self.thread = MultiCloudThreadHelper.HelperThread(self)
- self.thread.start()
-
+ # self.thread.start()
def state(self):
return self.state_
@@ -217,7 +215,7 @@ class MultiCloudThreadHelper(object):
if 0 == self.state_:
self.state_ = 1
# self.thread = MultiCloudThreadHelper.HelperThread(self)
- # self.thread.start()
+ self.thread.start()
else:
pass
self.lock.release()
@@ -227,9 +225,10 @@ class MultiCloudThreadHelper(object):
def add(self, backlog_item):
cache_for_query = None
- if not hasattr(backlog_item, "worker"):
+ if not backlog_item.get("worker", None):
+ logger.warn("Fail to add backlog item: %s" % backlog_item)
return None
- if not hasattr(backlog_item, "id"):
+ if not backlog_item.get("id", None):
backlog_item["id"] = str(uuid.uuid1())
else:
cache_for_query = {
@@ -237,7 +236,7 @@ class MultiCloudThreadHelper(object):
"status": backlog_item.get("status", None)
}
- if not hasattr(backlog_item, "repeat"):
+ if not backlog_item.get("repeat", None):
backlog_item["repeat"] = 0
backlog_item["timestamp"] = 0
@@ -248,8 +247,9 @@ class MultiCloudThreadHelper(object):
json.dumps(cache_for_query), 3600 * 24)
self.expired_backlog.pop(backlog_item["id"], None)
- self.backlog.update(backlog_item["id"], backlog_item)
+ self.backlog[backlog_item["id"]] = backlog_item
# self.lock.release()
+ logger.debug("Add backlog item: %s" % backlog_item)
return len(self.backlog)
def get(self, backlog_id):
@@ -305,17 +305,19 @@ class MultiCloudThreadHelper(object):
self.duration = 0
self.owner = owner
# debug: dump the callstack to determine the callstack, hence the lcm
- logger.debug("HelperThread __init__ : %s" % traceback.format_exc())
+ # logger.debug("HelperThread __init__ : %s" % traceback.format_exc())
def run(self):
- logger.debug("Start processing backlogs")
+ logger.debug("Thread %s starts processing backlogs" % self.owner.name)
nexttimer = 0
while self.owner.state_ == 1: # and self.owner.count() > 0:
if nexttimer > 1000000:
# sleep in case of interval > 1 second
time.sleep(nexttimer // 1000000)
nexttimer = 30*1000000 # initial interval in us to be updated:30 seconds
- for backlog_id, item in self.owner.backlog:
+ # logger.debug("self.owner.backlog: %s, len: %s" % (self.owner.name, len(self.owner.backlog)))
+ for backlog_id, item in self.owner.backlog.items():
+ # logger.debug("evaluate backlog item: %s" % item)
# check interval for repeatable backlog item
now = MultiCloudThreadHelper.get_epoch_now_usecond()
repeat_interval = item.get("repeat", 0)
@@ -331,10 +333,11 @@ class MultiCloudThreadHelper(object):
# not time to run this backlog item yet
continue
+ # logger.debug("process backlog item: %s" % backlog_id)
worker = item.get("worker", None)
payload = item.get("payload", None)
try:
- item["status"] = worker(payload) or 0
+ item["status"] = worker(*payload) or 0
except Exception as e:
item["status"] = e.message
cache_item_for_query = {
@@ -364,6 +367,6 @@ class MultiCloudThreadHelper(object):
# while True:
# logger.debug("thread sleep for 5 seconds")
# time.sleep(5) # wait forever, testonly
- logger.debug("stop processing backlogs")
+ logger.debug("Thread %s stops processing backlogs" % self.owner.name)
self.owner.state_ = 0
# end of processing
diff --git a/share/common/utils/aai_cache.py b/share/common/utils/aai_cache.py
index 53298bb8..41506aca 100644
--- a/share/common/utils/aai_cache.py
+++ b/share/common/utils/aai_cache.py
@@ -26,12 +26,14 @@ def flush_cache_by_url(resource_url):
def get_cache_by_url(resource_url):
try:
- if (filter_cache_by_url(resource_url)):
+ if filter_cache_by_url(resource_url):
value = cache.get("AAI_" + resource_url)
+ # logger.debug("Find cache the resource: %s, %s" %( resource_url, value))
return json.loads(value) if value else None
else:
return None
- except:
+ except Exception as e:
+ logger.error("get_cache_by_url exception: %s" % e.message)
return None
@@ -40,9 +42,10 @@ def set_cache_by_url(resource_url, resource_in_json):
# filter out unmanaged AAI resource
if filter_cache_by_url(resource_url):
# cache the resource for 24 hours
- logger.debug("Cache the resource: "+ resource_url)
+ # logger.debug("Cache the resource: "+ resource_url)
cache.set("AAI_" + resource_url, json.dumps(resource_in_json), 3600 * 24)
- except:
+ except Exception as e:
+ logger.error("get_cache_by_url exception: %s" % e.message)
pass
def filter_cache_by_url(resource_url):
diff --git a/share/common/utils/restcall.py b/share/common/utils/restcall.py
index eb4cb008..464dd65f 100644
--- a/share/common/utils/restcall.py
+++ b/share/common/utils/restcall.py
@@ -65,9 +65,10 @@ def _call_req(base_url, user, passwd, auth_type,
headers['Authorization'] = 'Basic ' + \
base64.b64encode(tmpauthsource).decode('utf-8')
- logger.info("Making rest call with uri,method, header = %s, %s, %s" % (full_url, method.upper(), headers))
+ logger.info("Making rest call with method, uri, header = %s, %s, %s" %
+ (method.upper(), full_url, headers))
if content:
- logger.debug("with content = %s" % (content))
+ logger.debug("with content = %s" % content)
ca_certs = None
for retry_times in range(MAX_RETRY_TIME):
@@ -138,8 +139,9 @@ def req_to_aai(resource, method, content='', appid=settings.MULTICLOUD_APP_ID, n
# hook to flush cache
if method.upper() in ["PUT", "POST", "PATCH", "DELETE"]:
aai_cache.flush_cache_by_url(resource)
- elif method.upper in ["GET"] and not nocache:
+ elif method.upper() in ["GET"] and not nocache:
content = aai_cache.get_cache_by_url(resource)
+ # logger.debug("cached resource: %s, %s" % (resource, content))
if content:
return content
@@ -148,7 +150,8 @@ def req_to_aai(resource, method, content='', appid=settings.MULTICLOUD_APP_ID, n
resource, method, content=json.dumps(content), extra_headers=headers)
if method.upper() in ["GET"] and ret == 0 and not nocache:
- aai_cache.set_cache_by_url(resource, [ret, resp_body, resp_status])
+ # aai_cache.set_cache_by_url(resource, [ret, resp_body, resp_status])
+ aai_cache.set_cache_by_url(resource, (ret, resp_body, resp_status))
return [ret, resp_body, resp_status]
diff --git a/share/newton_base/registration/registration.py b/share/newton_base/registration/registration.py
index 550c394e..fe31478d 100644
--- a/share/newton_base/registration/registration.py
+++ b/share/newton_base/registration/registration.py
@@ -41,7 +41,7 @@ class Registry(APIView):
if not hasattr(self, "register_thread"):
# dedicate thread to offload vim registration process
- self.register_thread = MultiCloudThreadHelper()
+ self.register_thread = MultiCloudThreadHelper("vimupdater")
if not hasattr(self, "register_helper") or not self.register_helper:
if not hasattr(self, "proxy_prefix"):
@@ -64,12 +64,10 @@ class Registry(APIView):
backlog_item = {
"id": vimid,
"worker": self.register_helper.registryV0,
- "payload": (self.register_helper,
- vimid, specified_project_idorname),
+ "payload": (vimid, specified_project_idorname),
"repeat": 0,
"status": (1,
- "The registration process waits to"
- " be scheduled to run")
+ "The registration is on progress")
}
self.register_thread.add(backlog_item)
if 0 == self.register_thread.state():
@@ -120,10 +118,9 @@ class Registry(APIView):
backlog_item = {
"id": vimid,
"worker": self.register_helper.unregistryV0,
- "payload": (self.register_helper, vimid),
+ "payload": (vimid),
"repeat": 0,
- "status": (1, "The registration process waits"
- " to be scheduled to run")
+ "status": (1, "The de-registration is on process")
}
self.register_thread.add(backlog_item)
if 0 == self.register_thread.state():
diff --git a/share/newton_base/resource/infra_workload.py b/share/newton_base/resource/infra_workload.py
index 7bf07952..db216ce2 100644
--- a/share/newton_base/resource/infra_workload.py
+++ b/share/newton_base/resource/infra_workload.py
@@ -36,7 +36,7 @@ class InfraWorkload(APIView):
def __init__(self):
self._logger = logger
- def post(self, request, vimid=""):
+ def post(self, request, vimid="", requri=""):
self._logger.info("vimid: %s" % (vimid))
self._logger.info("data: %s" % (request.data))
self._logger.debug("META: %s" % request.META)
@@ -529,13 +529,13 @@ class APIv1InfraWorkload(InfraWorkload):
super(APIv1InfraWorkload, self).__init__()
# self._logger = logger
- def post(self, request, cloud_owner="", cloud_region_id=""):
+ def post(self, request, cloud_owner="", cloud_region_id="", requri=""):
# self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
# (cloud_owner, cloud_region_id, request.data))
# self._logger.debug("META: %s" % request.META)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- return super(APIv1InfraWorkload, self).post(request, vimid)
+ return super(APIv1InfraWorkload, self).post(request, vimid, requri)
def get(self, request, cloud_owner="", cloud_region_id="", requri=""):
# self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
diff --git a/share/newton_base/resource/infra_workload_helper.py b/share/newton_base/resource/infra_workload_helper.py
index bfc3fc86..13d1e18d 100644
--- a/share/newton_base/resource/infra_workload_helper.py
+++ b/share/newton_base/resource/infra_workload_helper.py
@@ -14,7 +14,7 @@
import logging
import json
-
+from rest_framework import status
from django.conf import settings
from common.msapi import extsys
from common.msapi.helper import Helper as helper
@@ -50,7 +50,7 @@ class InfraWorkloadHelper(object):
template_data = data.get("template_data", {})
# resp_template = None
if not template_type or "heat" != template_type.lower():
- return 14, "CREATE_FAILED", \
+ return status.HTTP_400_BAD_REQUEST, "CREATE_FAILED", \
"Bad parameters: template type %s is not heat" %\
template_type or ""
@@ -93,7 +93,7 @@ class InfraWorkloadHelper(object):
(cloud_owner, regionid, v2_token_resp_json)
logger.error(errmsg)
return (
- retcode, "CREATE_FAILED", errmsg
+ os_status, "CREATE_FAILED", errmsg
)
# tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
@@ -111,8 +111,8 @@ class InfraWorkloadHelper(object):
# stackid = stack1["id"] if stack1 else ""
return 0, "CREATE_IN_PROGRESS", stack1
else:
- self._logger.info("RESP with data> result:%s" % content)
- return retcode, "CREATE_FAILED", content
+ self._logger.info("workload_create fail: %s" % content)
+ return os_status, "CREATE_FAILED", content
def workload_update(self, vimid, stack_id, otherinfo=None, project_idorname=None):
'''
@@ -139,7 +139,7 @@ class InfraWorkloadHelper(object):
errmsg = "authenticate fails:%s, %s, %s" %\
(cloud_owner, regionid, v2_token_resp_json)
logger.error(errmsg)
- return retcode, "UPDATE_FAILED", errmsg
+ return os_status, "UPDATE_FAILED", errmsg
tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
# tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
@@ -164,7 +164,7 @@ class InfraWorkloadHelper(object):
errmsg = "stack:%s, query fails: %s" %\
(resource_uri, content)
logger.error(errmsg)
- return retcode, "UPDATE_FAILED", errmsg
+ return os_status, "UPDATE_FAILED", errmsg
# find and update resources
# transactions = []
@@ -174,7 +174,7 @@ class InfraWorkloadHelper(object):
errmsg = "stack: %s, resource not ready :%s" % \
(resource_uri, resource)
logger.info(errmsg)
- return retcode, "UPDATE_FAILED", errmsg
+ return status.HTTP_206_PARTIAL_CONTENT, "UPDATE_FAILED", errmsg
# continue
if resource.get('resource_type', None) == 'OS::Nova::Server':
# retrieve vserver details
@@ -192,7 +192,7 @@ class InfraWorkloadHelper(object):
errmsg = "stack resource:%s, query fails: %s" % \
(resource_uri, content)
logger.error(errmsg)
- return retcode, "UPDATE_FAILED", errmsg
+ return os_status, "UPDATE_FAILED", errmsg
vserver_detail = content.get('server', None) if retcode == 0 and content else None
if vserver_detail:
# compose inventory entry for vserver
@@ -229,7 +229,7 @@ class InfraWorkloadHelper(object):
(aai_resource['uri'], content))
except Exception as e:
self._logger.error(e.message)
- return retcode, "UPDATE_FAILED", e.message
+ return status.HTTP_500_INTERNAL_SERVER_ERROR, "UPDATE_FAILED", e.message
# aai_resource_transactions = {"put": [aai_resource]}
# transactions.append(aai_resource_transactions)
@@ -254,7 +254,7 @@ class InfraWorkloadHelper(object):
errmsg = "stack resource:%s, query fails: %s" % \
(resource_uri, content)
logger.error(errmsg)
- return retcode, "UPDATE_FAILED", errmsg
+ return os_status, "UPDATE_FAILED", errmsg
vport_detail = content.get('port', None) if retcode == 0 and content else None
if vport_detail:
@@ -285,7 +285,7 @@ class InfraWorkloadHelper(object):
(aai_resource['uri'], content))
except Exception as e:
self._logger.error(e.message)
- return retcode, "UPDATE_FAILED", e.message
+ return status.HTTP_500_INTERNAL_SERVER_ERROR, "UPDATE_FAILED", e.message
# aai_resource_transactions = {"put": [aai_resource]}
# transactions.append(aai_resource_transactions)
@@ -320,7 +320,7 @@ class InfraWorkloadHelper(object):
errmsg = "authenticate fails:%s, %s, %s" %\
(cloud_owner, regionid, v2_token_resp_json)
logger.error(errmsg)
- return retcode, "DELETE_FAILED", errmsg
+ return os_status, "DELETE_FAILED", errmsg
tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
# tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
@@ -352,7 +352,7 @@ class InfraWorkloadHelper(object):
restcall.req_to_aai(vserver_list_url, "GET")
if retcode > 0 or not content:
self._logger.debug("AAI get %s response: %s" % (vserver_list_url, content))
- return (retcode, "DELETE_FAILED", "authenticate fails:%s, %s, %s" %
+ return (status_code, "DELETE_FAILED", "authenticate fails:%s, %s, %s" %
(cloud_owner, regionid, v2_token_resp_json))
content = json.JSONDecoder().decode(content)
@@ -390,7 +390,7 @@ class InfraWorkloadHelper(object):
return 0, "DELETE_COMPLETE", "succeed"
except Exception as e:
self._logger.error(e.message)
- return 12, "DELETE_FAILED", e.message
+ return status.HTTP_500_INTERNAL_SERVER_ERROR, "DELETE_FAILED", e.message
pass
def workload_status(self, vimid, stack_id=None, stack_name=None, otherinfo=None, project_idorname=None):
@@ -416,13 +416,15 @@ class InfraWorkloadHelper(object):
errmsg = "authenticate fails:%s, %s, %s" % \
(cloud_owner, regionid, v2_token_resp_json)
logger.error(errmsg)
- return retcode, "GET_FAILED", errmsg
+ return os_status, "GET_FAILED", errmsg
# get stack status
service_type = "orchestration"
- resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks"
- if stack_name:
- resource_uri = "/stacks?name=%s" % stack_id if not stack_id else resource_uri
+ resource_uri = "/stacks"
+ if stack_id:
+ resource_uri = "/stacks/id=%s" % stack_id
+ elif stack_name:
+ resource_uri = "/stacks?name=%s" % stack_name
self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
retcode, content, os_status = \
@@ -434,12 +436,66 @@ class InfraWorkloadHelper(object):
if retcode > 0 or not content:
errmsg = "Stack query %s response: %s" % (resource_uri, content)
self._logger.debug(errmsg)
- return retcode, "GET_FAILED", errmsg
+ return os_status, "GET_FAILED", errmsg
stacks = content.get('stacks', []) # if retcode == 0 and content else []
- stack_status = stacks[0].get("stack_status", "GET_FAILED") if len(stacks) > 0 else "GET_FAILED"
+ # stack_status = stacks[0].get("stack_status", "GET_FAILED") if len(stacks) > 0 else "GET_FAILED"
+ workload_status = "GET_COMPLETE"
+
+ return retcode, workload_status, content
+ except Exception as e:
+ self._logger.error(e.message)
+ return status.HTTP_500_INTERNAL_SERVER_ERROR, "GET_FAILED", e.message
+
+
+ def workload_detail(self, vimid, stack_id, nexturi=None, otherinfo=None, project_idorname=None):
+ '''
+ get workload status by either stack id or name
+ :param vimid:
+ :param stack_id:
+ :param nexturi: stacks/<stack id>/<nexturi>
+ :param otherinfo:
+ :return:
+ '''
+ try:
+ # assume the workload_type is heat
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = \
+ helper.MultiCloudIdentityHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens",
+ {"Project": project_idorname})
+
+ if retcode > 0 or not v2_token_resp_json:
+ errmsg = "authenticate fails:%s, %s, %s" % \
+ (cloud_owner, regionid, v2_token_resp_json)
+ logger.error(errmsg)
+ return os_status, "GET_FAILED", errmsg
+
+ # get stack status
+ service_type = "orchestration"
+ resource_uri = "/stacks/%s" % stack_id
+ if nexturi:
+ resource_uri += "/" + nexturi
+
+ self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ retcode, content, os_status = \
+ helper.MultiCloudServiceHelper(cloud_owner, regionid,
+ v2_token_resp_json,
+ service_type, resource_uri,
+ None, "GET")
+
+ if retcode > 0 or not content:
+ errmsg = "Stack query %s response: %s" % (resource_uri, content)
+ self._logger.debug(errmsg)
+ return os_status, "GET_FAILED", errmsg
+
+ stack = content.get('stack', {}) # if retcode == 0 and content else []
+ # stack_status = stack.get("stack_status", "GET_FAILED")
+ workload_status = "GET_COMPLETE"
- return retcode, stack_status, content
+ return 0, workload_status, content
except Exception as e:
self._logger.error(e.message)
- return 12, "GET_FAILED", e.message
+ return status.HTTP_500_INTERNAL_SERVER_ERROR, "GET_FAILED", e.message
diff --git a/share/starlingx_base/registration/registration.py b/share/starlingx_base/registration/registration.py
index 507f0fbf..dd71c1b4 100644
--- a/share/starlingx_base/registration/registration.py
+++ b/share/starlingx_base/registration/registration.py
@@ -58,7 +58,7 @@ class APIv0Registry(newton_registration.Registry):
backlog_item = {
"id": vimid,
"worker": worker_self.azcap_audit,
- "payload": (worker_self, vimid, specified_project_idorname),
+ "payload": (vimid, specified_project_idorname),
"repeat": 10*1000000, # repeat every 10 seconds
}
gAZCapAuditThread.add(backlog_item)
@@ -88,6 +88,9 @@ class APIv1Registry(newton_registration.Registry):
% (cloud_owner, cloud_region_id))
try:
+ # Get the specified tenant id
+ specified_project_idorname = request.META.get("Project", None)
+
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
# vim registration will trigger the start the audit of AZ capacity
@@ -98,7 +101,7 @@ class APIv1Registry(newton_registration.Registry):
backlog_item = {
"id": vimid,
"worker": worker_self.azcap_audit,
- "payload": (worker_self, vimid),
+ "payload": (vimid, specified_project_idorname),
"repeat": 5 * 1000000, # repeat every 5 seconds
}
gAZCapAuditThread.add(backlog_item)
@@ -162,6 +165,7 @@ class RegistryHelper(newton_registration.RegistryHelper):
multi_region_discovery = cloud_extra_info.get(
"multi-region-discovery", None) if cloud_extra_info else None
+ sess = None
if project_idorname:
try:
# check if specified with tenant id
@@ -403,6 +407,7 @@ class InfraResourceAuditor(newton_registration.RegistryHelper):
self._logger.warn("azcap_audit no valid vimid: %s" % vimid)
return
+ sess = None
if project_idorname:
try:
# check if specified with tenant id
@@ -456,9 +461,8 @@ class InfraResourceAuditor(newton_registration.RegistryHelper):
viminfo, vimid,
"availabilityZoneInfo"):
az_info = {
- 'availability-zone-name': az['zoneName'],
- 'operational-status': az['zoneState']['available']
- if az.get('zoneState') else '',
+ 'availability-zone-name': az.get('zoneName', ""),
+ 'operational-status': az.get('zoneState', {}).get('available', ""),
'hypervisor-type': '',
}
# filter out the default az: "internal" and "nova"
@@ -477,7 +481,7 @@ class InfraResourceAuditor(newton_registration.RegistryHelper):
# Get current cap info of azName
azCapCacheKey = "cap_" + vimid + "_" + azName
azCapInfoCacheStr = cache.get(azCapCacheKey)
- azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else None
+ azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else {}
for psname in pservers_info:
psinfo = hypervisors_dict.get(psname, None)
@@ -487,7 +491,7 @@ class InfraResourceAuditor(newton_registration.RegistryHelper):
# get current pserver cap info
psCapInfoCacheKey = "cap_" + vimid + "_" + psname
psCapInfoCacheStr = cache.get(psCapInfoCacheKey)
- psCapInfoCache = json.loads(psCapInfoCacheStr) if psCapInfoCacheStr else None
+ psCapInfoCache = json.loads(psCapInfoCacheStr) if psCapInfoCacheStr else {}
# compare latest info with cached one
vcpu_delta = 0
@@ -520,19 +524,21 @@ class InfraResourceAuditor(newton_registration.RegistryHelper):
localstorage_free_delta += psinfo.get("free_disk_gb", 0)\
- psCapInfoCache.get("free_disk_gb", 0)
psCapInfoCache["free_disk_gb"] = psinfo.get("free_disk_gb", 0)
- pass
- # now apply the delta to azCapInfo
- azCapInfoCache["vcpus"] = azCapInfoCache.get("vcpus", 0) + vcpu_delta
- azCapInfoCache["memory_mb"] = azCapInfoCache.get("memory_mb", 0) + mem_delta
- azCapInfoCache["local_gb"] = azCapInfoCache.get("local_gb", 0) + localstorage_delta
- azCapInfoCache["vcpus_used"] = azCapInfoCache.get("vcpus_used", 0) + vcpu_used_delta
- azCapInfoCache["free_ram_mb"] = azCapInfoCache.get("free_ram_mb", 0) + mem_free_delta
- azCapInfoCache["free_disk_gb"] = azCapInfoCache.get("free_disk_gb", 0) + localstorage_free_delta
+ cache.set(psCapInfoCacheKey, json.dumps(psCapInfoCache), 3600 * 24)
+
+ # now apply the delta to azCapInfo
+ azCapInfoCache["vcpus"] = azCapInfoCache.get("vcpus", 0) + vcpu_delta
+ azCapInfoCache["memory_mb"] = azCapInfoCache.get("memory_mb", 0) + mem_delta
+ azCapInfoCache["local_gb"] = azCapInfoCache.get("local_gb", 0) + localstorage_delta
+ azCapInfoCache["vcpus_used"] = azCapInfoCache.get("vcpus_used", 0) + vcpu_used_delta
+ azCapInfoCache["free_ram_mb"] = azCapInfoCache.get("free_ram_mb", 0) + mem_free_delta
+ azCapInfoCache["free_disk_gb"] = azCapInfoCache.get("free_disk_gb", 0) + localstorage_free_delta
+ pass
# update the cache
cache.set(azCapCacheKey, json.dumps(azCapInfoCache), 3600 * 24)
- cache.set(vimAzCacheKey, vimAzList, 3600 * 24)
+ cache.set(vimAzCacheKey, json.dumps(vimAzList), 3600 * 24)
except Exception as e:
self._logger.error("azcap_audit raise exception: %s" % e)
pass
diff --git a/share/starlingx_base/resource/capacity.py b/share/starlingx_base/resource/capacity.py
index cbdedaa3..861d4d50 100644
--- a/share/starlingx_base/resource/capacity.py
+++ b/share/starlingx_base/resource/capacity.py
@@ -67,27 +67,34 @@ class CapacityCheck(newton_capacity.CapacityCheck):
# get list of AZ
vimAzCacheKey = "cap_azlist_" + vimid
vimAzListCacheStr = cache.get(vimAzCacheKey)
+ self._logger.debug("Found AZ list: %s" % vimAzListCacheStr)
vimAzListCache = json.loads(vimAzListCacheStr) if vimAzListCacheStr else []
azCapInfoList = []
for azName in vimAzListCache:
azCapCacheKey = "cap_" + vimid + "_" + azName
azCapInfoCacheStr = cache.get(azCapCacheKey)
+ self._logger.debug("Found AZ info: %s, %s" % (azCapCacheKey, azCapInfoCacheStr))
if not azCapInfoCacheStr:
continue
azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else None
azCapInfo = {}
azCapInfo["availability-zone-name"] = azName
- azCapInfo["vCPUAvail"] = azCapInfoCache.get("vcpus", 0) + azCapInfoCache.get("vcpus_used", 0)
- azCapInfo["vCPUTotal"] = azCapInfoCache.get("vcpus", 0)
- azCapInfo["MemoryAvail"] = azCapInfoCache.get("vcpus", 0)
- azCapInfo["MemoryTotal"] = azCapInfoCache.get("vcpus", 0)
- azCapInfo["StorageAvail"] = azCapInfoCache.get("vcpus", 0)
- azCapInfo["StorageTotal"] = azCapInfoCache.get("vcpus", 0)
+ # vcpu ratio: cpu_allocation_ratio=16 by default
+ azCapInfo["vCPUAvail"] = \
+ (azCapInfoCache.get("vcpus", 0)
+ - azCapInfoCache.get("vcpus_used", 0)) * 16
+ azCapInfo["vCPUTotal"] = azCapInfoCache.get("vcpus", 0) * 16
+ # mem size in MB
+ azCapInfo["MemoryAvail"] = azCapInfoCache.get("free_ram_mb", 0) / 1024.0
+ azCapInfo["MemoryTotal"] = azCapInfoCache.get("memory_mb", 0) / 1024.0
+ azCapInfo["StorageAvail"] = azCapInfoCache.get("free_disk_gb", 0)
+ azCapInfo["StorageTotal"] = azCapInfoCache.get("local_gb", 0)
azCapInfoList.append(azCapInfo)
return azCapInfoList
except Exception as e:
+ self._logger.error(traceback.format_exc())
return azCapInfo
diff --git a/share/starlingx_base/resource/infra_workload.py b/share/starlingx_base/resource/infra_workload.py
index 6b064856..fc6d7ef2 100644
--- a/share/starlingx_base/resource/infra_workload.py
+++ b/share/starlingx_base/resource/infra_workload.py
@@ -85,19 +85,21 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
backlog_item = {
"id": workloadid,
"worker": worker_self.workload_update,
- "payload": (worker_self, vimid, workloadid,
+ "payload": (vimid, workloadid,
request.data, specified_project_idorname),
"repeat": 0, # one time job
# format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message
"status": (
0, "UPDATE_IN_PROGRESS",
- "backlog to update workload %s pends to schedule" % workloadid
+ "backlog to update workload %s is on progress" % workloadid
)
}
gInfraWorkloadThread.add(backlog_item)
if 0 == gInfraWorkloadThread.state():
gInfraWorkloadThread.start()
-
+ # progress = worker_self.workload_update(
+ # vimid, workloadid,
+ # request.data, specified_project_idorname)
# now query the progress
backlog_item = gInfraWorkloadThread.get(workloadid)
if not backlog_item:
@@ -156,10 +158,10 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
# now check the query params in case of query existing of workload
querystr = request.META.get("QUERY_STRING", None)
qd = QueryDict(querystr).dict() if querystr else None
- workload_name = qd.get("name", None) if qd else None
- workload_id = qd.get("id", None) if qd else None
+ workload_query_name = qd.get("name", None) if qd else None
+ workload_query_id = qd.get("id", None) if qd else None
- if not workload_name and not workload_id:
+ if not workload_query_name and not workload_query_id:
resp_template["workload_status_reason"] =\
"workload id is not found in API url"
return Response(
@@ -175,64 +177,65 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
# now query the status of workload by name or id, id as 1st priority
progress_code, progress_status, progress_msg =\
0, "GET_FAILED", ""
- if not workload_id:
- # by name
+ if workload_query_id:
+ # by id
progress_code, progress_status, progress_msg =\
worker_self.workload_status(
- vimid, stack_name=workload_name,
+ vimid, stack_id=workload_query_id,
project_idorname=specified_project_idorname
)
else:
- # by id
+ # by name or get all stacks
progress_code, progress_status, progress_msg =\
worker_self.workload_status(
- vimid, stack_id=workloadid,
+ vimid, stack_name=workload_query_name,
project_idorname=specified_project_idorname
)
resp_template["workload_status"] = progress_status
resp_template["workload_status_reason"] = progress_msg
status_code = status.HTTP_200_OK \
- if progress_code == 0 else progress_code
+ if progress_code == 0 else status.HTTP_500_INTERNAL_SERVER_ERROR # progress_code
pass
- # now query the progress
- backlog_item = gInfraWorkloadThread.get(workloadid)
- if not backlog_item:
- # backlog item not found, so check the stack status
- worker_self = InfraWorkloadHelper(
- settings.MULTICLOUD_API_V1_PREFIX,
- settings.AAI_BASE_URL
- )
- progress_code, progress_status, progress_msg =\
- worker_self.workload_status(
- vimid, stack_id=workloadid,
- project_idorname=specified_project_idorname)
-
- resp_template["workload_status"] = progress_status
- resp_template["workload_status_reason"] = progress_msg
- status_code = status.HTTP_200_OK\
- if progress_code == 0 else progress_code
-
else:
- progress = backlog_item.get(
- "status",
- (13, "GET_FAILED",
- "Unexpected:status not found in backlog item")
- )
- try:
- progress_code = progress[0]
- progress_status = progress[1]
- progress_msg = progress[2]
- # if gInfraWorkloadThread.expired(workloadid):
- # gInfraWorkloadThread.remove(workloadid)
+ # now query the progress
+ backlog_item = gInfraWorkloadThread.get(workloadid)
+ if not backlog_item:
+ # backlog item not found, so check the stack status
+ worker_self = InfraWorkloadHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ settings.AAI_BASE_URL
+ )
+ progress_code, progress_status, progress_msg =\
+ worker_self.workload_detail(
+ vimid, stack_id=workloadid,
+ project_idorname=specified_project_idorname)
+
resp_template["workload_status"] = progress_status
resp_template["workload_status_reason"] = progress_msg
status_code = status.HTTP_200_OK\
if progress_code == 0 else progress_code
- except Exception as e:
- resp_template["workload_status_reason"] = progress
+
+ else:
+ progress = backlog_item.get(
+ "status",
+ (13, "GET_FAILED",
+ "Unexpected:status not found in backlog item")
+ )
+ try:
+ progress_code = progress[0]
+ progress_status = progress[1]
+ progress_msg = progress[2]
+ # if gInfraWorkloadThread.expired(workloadid):
+ # gInfraWorkloadThread.remove(workloadid)
+ resp_template["workload_status"] = progress_status
+ resp_template["workload_status_reason"] = progress_msg
+ status_code = status.HTTP_200_OK\
+ if progress_code == 0 else progress_code
+ except Exception as e:
+ resp_template["workload_status_reason"] = progress
return Response(data=resp_template, status=status_code)
@@ -277,14 +280,14 @@ class InfraWorkload(newton_infra_workload.InfraWorkload):
backlog_item = {
"id": workloadid,
"worker": worker_self.workload_delete,
- "payload": (worker_self, vimid, workloadid, request.data,
+ "payload": (vimid, workloadid, request.data,
specified_project_idorname),
"repeat": 0, # one time job
# format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message
"status": (
0, "DELETE_IN_PROGRESS",
"backlog for delete the workload %s "
- "pends to schedule" % workloadid
+ "is on progress" % workloadid
)
}
gInfraWorkloadThread.add(backlog_item)
@@ -335,29 +338,29 @@ class APIv1InfraWorkload(InfraWorkload):
super(APIv1InfraWorkload, self).__init__()
# self._logger = logger
- def post(self, request, cloud_owner="", cloud_region_id=""):
+ def post(self, request, cloud_owner="", cloud_region_id="", workloadid=""):
# self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
# (cloud_owner, cloud_region_id, request.data))
# self._logger.debug("META: %s" % request.META)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- return super(APIv1InfraWorkload, self).post(request, vimid)
+ return super(APIv1InfraWorkload, self).post(request, vimid, workloadid)
- def get(self, request, cloud_owner="", cloud_region_id="", requri=""):
+ def get(self, request, cloud_owner="", cloud_region_id="", workloadid=""):
# self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
# (cloud_owner, cloud_region_id, request.data))
# self._logger.debug("META: %s" % request.META)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- return super(APIv1InfraWorkload, self).get(request, vimid, requri)
+ return super(APIv1InfraWorkload, self).get(request, vimid, workloadid)
- def delete(self, request, cloud_owner="", cloud_region_id="", requri=""):
+ def delete(self, request, cloud_owner="", cloud_region_id="", workloadid=""):
# self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" %
# (cloud_owner, cloud_region_id, request.data))
# self._logger.debug("META: %s" % request.META)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- return super(APIv1InfraWorkload, self).delete(request, vimid, requri)
+ return super(APIv1InfraWorkload, self).delete(request, vimid, workloadid)
class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
@@ -486,7 +489,7 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
template_data = data.get("template_data", {})
# resp_template = None
if not template_type or "heat" != template_type.lower():
- return 14, "CREATE_FAILED", \
+ return status.HTTP_400_BAD_REQUEST, "CREATE_FAILED", \
"Bad parameters: template type %s is not heat" %\
template_type or ""
@@ -503,9 +506,8 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
# reset to make sure "files" are empty
template_data["files"] = {}
- template_data["stack_name"] = vf_module_id \
- if not hasattr(template_data, "stack_name")\
- else template_data["stack_name"]
+ template_data["stack_name"] =\
+ template_data.get("stack_name", vf_module_id)
# authenticate
cloud_owner, regionid = extsys.decode_vim_id(vimid)
@@ -522,7 +524,7 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
(cloud_owner, regionid, v2_token_resp_json)
logger.error(errmsg)
return (
- retcode, "CREATE_FAILED", errmsg
+ os_status, "CREATE_FAILED", errmsg
)
# tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
@@ -540,5 +542,5 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper):
# stackid = stack1["id"] if stack1 else ""
return 0, "CREATE_IN_PROGRESS", stack1
else:
- self._logger.info("RESP with data> result:%s" % content)
- return retcode, "CREATE_FAILED", content
+ self._logger.info("workload_create fails: %s" % content)
+ return os_status, "CREATE_FAILED", content
diff --git a/starlingx/run.sh b/starlingx/run.sh
index a2dc4a75..7b69d169 100755
--- a/starlingx/run.sh
+++ b/starlingx/run.sh
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
export PYTHONPATH=lib/share
uwsgi --http :9009 --module starlingx.wsgi --master --processes 4
diff --git a/starlingx/starlingx/resource/tests/test_capacity.py b/starlingx/starlingx/resource/tests/test_capacity.py
index c48f0eff..ee30985f 100644
--- a/starlingx/starlingx/resource/tests/test_capacity.py
+++ b/starlingx/starlingx/resource/tests/test_capacity.py
@@ -143,7 +143,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": True}, response.data)
+ self.assertEqual({'AZs': [], "result": True}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -164,7 +164,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -186,7 +186,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -208,7 +208,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -230,7 +230,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -252,7 +252,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -274,4 +274,4 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
diff --git a/starlingx/starlingx/urls.py b/starlingx/starlingx/urls.py
index 5a76b330..f340b7fd 100644
--- a/starlingx/starlingx/urls.py
+++ b/starlingx/starlingx/urls.py
@@ -15,7 +15,7 @@
from django.conf.urls import include, url
from starlingx_base.registration import registration
from newton_base.openoapi import tenants
-from newton_base.resource import capacity
+from starlingx_base.resource import capacity
from starlingx_base.resource import infra_workload
urlpatterns = [
@@ -38,7 +38,7 @@ urlpatterns = [
r'infra_workload/?$',
infra_workload.InfraWorkload.as_view()),
url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
- r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
+ r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]+)/?$',
infra_workload.InfraWorkload.as_view()),
url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/',
include('starlingx.proxy.urls')),
@@ -67,7 +67,7 @@ urlpatterns = [
infra_workload.APIv1InfraWorkload.as_view()),
url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/'
- r'(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
+ r'(?P<workloadid>[0-9a-zA-Z_-]+)/?$',
infra_workload.APIv1InfraWorkload.as_view()),
url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
diff --git a/starlingx/stop.sh b/starlingx/stop.sh
index 87d67ebc..0b734f38 100755
--- a/starlingx/stop.sh
+++ b/starlingx/stop.sh
@@ -15,4 +15,4 @@
#ps auxww | grep 'manage.py runserver 0.0.0.0:9009' | awk '{print $2}' | xargs kill -9
ps auxww |grep 'uwsgi --http :9009 --module starlingx.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
diff --git a/windriver/run.sh b/windriver/run.sh
index 74aff7fa..650c2c1b 100644
--- a/windriver/run.sh
+++ b/windriver/run.sh
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid
+memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid
export PYTHONPATH=lib/share
#nohup python manage.py runserver 0.0.0.0:9005 2>&1 &
diff --git a/windriver/stop.sh b/windriver/stop.sh
index ab8a72d5..5e7a6ac3 100644
--- a/windriver/stop.sh
+++ b/windriver/stop.sh
@@ -15,4 +15,4 @@
#ps auxww | grep 'manage.py runserver 0.0.0.0:9005' | awk '{print $2}' | xargs kill -9
ps auxww |grep 'uwsgi --http :9005 --module titanium_cloud.wsgi --master' |awk '{print $2}' |xargs kill -9
-ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
+ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9
diff --git a/windriver/titanium_cloud/resource/tests/test_capacity.py b/windriver/titanium_cloud/resource/tests/test_capacity.py
index baca720f..82c453a2 100644
--- a/windriver/titanium_cloud/resource/tests/test_capacity.py
+++ b/windriver/titanium_cloud/resource/tests/test_capacity.py
@@ -144,7 +144,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": True}, response.data)
+ self.assertEqual({'AZs': [],"result": True}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -165,7 +165,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [],"result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -187,7 +187,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [],"result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -209,7 +209,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [],"result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -231,7 +231,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [],"result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -253,7 +253,7 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
@mock.patch.object(VimDriverUtils, 'get_session')
@mock.patch.object(VimDriverUtils, 'get_vim_info')
@@ -275,4 +275,4 @@ class TestCapacity(test_base.TestRequest):
HTTP_X_AUTH_TOKEN=mock_info.MOCK_TOKEN_ID)
self.assertEquals(status.HTTP_200_OK, response.status_code)
- self.assertEqual({"result": False}, response.data)
+ self.assertEqual({'AZs': [], "result": False}, response.data)
diff --git a/windriver/titanium_cloud/urls.py b/windriver/titanium_cloud/urls.py
index 4d118dc2..a0537ddc 100644
--- a/windriver/titanium_cloud/urls.py
+++ b/windriver/titanium_cloud/urls.py
@@ -16,7 +16,7 @@ from django.conf.urls import include, url
from starlingx_base.registration import registration
from newton_base.openoapi import tenants
-from newton_base.resource import capacity
+from starlingx_base.resource import capacity
from starlingx_base.resource import infra_workload
urlpatterns = [
@@ -83,7 +83,7 @@ urlpatterns = [
r'infra_workload/?$',
infra_workload.InfraWorkload.as_view()),
url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/'
- r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
+ r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]+)/?$',
infra_workload.InfraWorkload.as_view()),
url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/',
include('titanium_cloud.proxy.urls')),
@@ -122,7 +122,7 @@ urlpatterns = [
infra_workload.APIv1InfraWorkload.as_view()),
url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/'
- r'(?P<workloadid>[0-9a-zA-Z_-]*)/?$',
+ r'(?P<workloadid>[0-9a-zA-Z_-]+)/?$',
infra_workload.APIv1InfraWorkload.as_view()),
url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/'
r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',