summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ocata/ocata/vesagent/event_domain/fault_vm.py10
-rw-r--r--ocata/ocata/vesagent/event_domain/tests_fault_vm.py167
-rw-r--r--ocata/ocata/vesagent/tests.py97
-rw-r--r--pike/pike/resource/views/infra_workload.py70
-rw-r--r--share/common/msapi/helper.py59
-rw-r--r--share/newton_base/proxy/services.py23
-rw-r--r--windriver/titanium_cloud/registration/views/registration.py38
-rw-r--r--windriver/titanium_cloud/resource/views/infra_workload.py174
-rw-r--r--windriver/titanium_cloud/urls.py6
9 files changed, 594 insertions, 50 deletions
diff --git a/ocata/ocata/vesagent/event_domain/fault_vm.py b/ocata/ocata/vesagent/event_domain/fault_vm.py
index df1ca60d..faddd25c 100644
--- a/ocata/ocata/vesagent/event_domain/fault_vm.py
+++ b/ocata/ocata/vesagent/event_domain/fault_vm.py
@@ -21,7 +21,7 @@ import time
from django.conf import settings
from ocata.vesagent.vespublish import publishAnyEventToVES
-from common.utils.restcall import _call_req
+from common.utils import restcall
import datetime
import time
@@ -61,7 +61,7 @@ def buildBacklog_fault_vm(vimid, backlog_input):
auth_api_data = { "auth":{"tenantName": tenant_name} }
base_url = settings.MULTICLOUD_PREFIX
extra_headers = ''
- ret = _call_req(base_url, "", "", 0, auth_api_url, "POST", extra_headers, json.dumps(auth_api_data))
+ ret = restcall._call_req(base_url, "", "", 0, auth_api_url, "POST", extra_headers, json.dumps(auth_api_data))
if ret[0] > 0 or ret[1] is None:
logger.critical("call url %s failed with status %s" % (auth_api_url, ret[0]))
return None
@@ -79,7 +79,7 @@ def buildBacklog_fault_vm(vimid, backlog_input):
f_server_name=server_name)
base_url = settings.MULTICLOUD_PREFIX
extra_headers = {'X-Auth-Token': token}
- ret = _call_req(base_url, "", "", 0, vserver_api_url, "GET", extra_headers, "")
+ ret = restcall._call_req(base_url, "", "", 0, vserver_api_url, "GET", extra_headers, "")
if ret[0] > 0 or ret[1] is None:
logger.critical("call url %s failed with status %s" % (vserver_api_url, ret[0]))
return None
@@ -143,7 +143,7 @@ def processBacklog_fault_vm(vesAgentConfig, vesAgentState, oneBacklog):
base_url = settings.MULTICLOUD_PREFIX
extra_headers = ''
logger.debug("authenticate with url:%s" % auth_api_url)
- ret = _call_req(base_url, "", "", 0, auth_api_url, "POST", extra_headers, json.dumps(auth_api_data))
+ ret = restcall._call_req(base_url, "", "", 0, auth_api_url, "POST", extra_headers, json.dumps(auth_api_data))
if ret[0] > 0 or ret[1] is None:
logger.critical("call url %s failed with status %s" % (auth_api_url, ret[0]))
@@ -159,7 +159,7 @@ def processBacklog_fault_vm(vesAgentConfig, vesAgentState, oneBacklog):
extra_headers = {'X-Auth-Token': token}
#which one is correct? extra_headers = {'HTTP_X_AUTH_TOKEN': token}
logger.debug("authenticate with url:%s, header:%s" % (auth_api_url,extra_headers))
- ret = _call_req(base_url, "", "", 0, api_link, method, extra_headers, data)
+ ret = restcall._call_req(base_url, "", "", 0, api_link, method, extra_headers, data)
if ret[0] > 0 or ret[1] is None:
logger.critical("call url %s failed with status %s" % (api_link, ret[0]))
diff --git a/ocata/ocata/vesagent/event_domain/tests_fault_vm.py b/ocata/ocata/vesagent/event_domain/tests_fault_vm.py
new file mode 100644
index 00000000..919988e2
--- /dev/null
+++ b/ocata/ocata/vesagent/event_domain/tests_fault_vm.py
@@ -0,0 +1,167 @@
+# Copyright (c) Intel Corporation, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+import unittest
+import json
+
+from ocata.vesagent import vespublish
+from common.utils import restcall
+from ocata.vesagent.event_domain import fault_vm
+
+MOCK_TOKEN_RESPONSE = {"access":{"token":{"issued_at":"2018-05-10T16:56:56.000000Z","expires":"2018-05-10T17:56:56.000000Z","id":"4a832860dd744306b3f66452933f939e","tenant":{"domain":{"id":"default","name":"Default"},"enabled":"true","id":"0e148b76ee8c42f78d37013bf6b7b1ae","name":"VIM"}},"serviceCatalog":[],"user":{"domain":{"id":"default","name":"Default"},"id":"ba76c94eb5e94bb7bec6980e5507aae2","name":"demo"}}}
+MOCK_SERVERS_GET_RESPONSE = {"servers": [{"id": "c4b575fa-ed85-4642-ab4b-335cb5744721", "links": [{"href": "http://10.12.25.2:8774/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721", "rel": "self"}, {"href": "http://10.12.25.2:8774/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721", "rel": "bookmark"}], "name": "onap-aaf"}]}
+MOCK_BACKLOG_INPUT = {"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
+ "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
+ "source": "onap-aaf",
+ "api_link": "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "domain": "fault", "type": "vm", "tenant": "VIM"}
+
+MOCK_BACKLOG_INPUT_wo_tenant_id = {"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
+ "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "source": "onap-aaf",
+ "api_link": "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "domain": "fault", "type": "vm", "tenant": "VIM"}
+MOCK_BACKLOG_INPUT_wo_tenant = {"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
+ "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "source": "onap-aaf",
+ "domain": "fault", "type": "vm", }
+
+MOCK_BACKLOG_INPUT_wo_server_id = {"source": "onap-aaf",
+ "domain": "fault", "type": "vm", "tenant": "VIM"}
+MOCK_BACKLOG_INPUT_wo_server = {"domain": "fault", "type": "vm", "tenant": "VIM"}
+
+MOCK_SERVER_GET_RESPONSE = {"server": {"wrs-res:topology": "node:0, 4096MB, pgsize:2M, vcpus:0,1, pol:sha", "OS-EXT-STS:task_state": None, "addresses": {"oam_onap_BTHY": [{"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:6c:0d:6b", "version": 4, "addr": "10.0.13.1", "OS-EXT-IPS:type": "fixed"}, {"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:6c:0d:6b", "version": 4, "addr": "10.12.5.185", "OS-EXT-IPS:type": "floating"}]}, "links": [], "image": {"id": "6e219e86-cd94-4989-9119-def29aa10b12", "links": []}, "wrs-if:nics": [], "wrs-sg:server_group": "", "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2018-04-26T08:01:28.000000", "flavor": {}, "id": "c4b575fa-ed85-4642-ab4b-335cb5744721", "security_groups": [{"name": "onap_sg_BTHY"}], "user_id": "ba76c94eb5e94bb7bec6980e5507aae2", "OS-DCF:diskConfig": "MANUAL", "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "OS-EXT-AZ:availability_zone": "nova", "metadata": {}, "status": "ACTIVE", "updated": "2018-04-26T08:01:28Z", "hostId": "17acc9f2ae4f618c314e4cdf0c206585b895bc72a9ec57e57b254133", "OS-SRV-USG:terminated_at": None, "wrs-res:pci_devices": "", "wrs-res:vcpus": [2, 2, 2], "key_name": "onap_key_BTHY", "name": "onap-aaf", "created": "2018-04-26T08:01:20Z", "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "os-extended-volumes:volumes_attached": [], "config_drive": ""}}
+
+MOCK_SERVER_GET_RESPONSE_empty = {}
+
+MOCK_vesAgentConfig = {"backlogs": [{"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
+ "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
+ "source": "onap-aaf",
+ "api_link": "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "domain": "fault", "type": "vm", "tenant": "VIM"}],
+ "poll_interval_default": 10, "vimid": "windriver-hudson-dc_RegionOne",
+ "ves_subscription": {"username": "user", "password": "password",
+ "endpoint": "http://127.0.0.1:9005/sample"}}
+
+MOCK_vesAgentState = {"ce2d7597-22e1-4239-890f-bc303bd67076": {"timestamp": 1525975400}}
+MOCK_oneBacklog = {"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076", "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721", "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET", "source": "onap-aaf", "api_link": "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721", "domain": "fault", "type": "vm", "tenant": "VIM"}
+
+class FaultVMTest(unittest.TestCase):
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_get_epoch_now_usecond(self):
+ epoch = fault_vm.get_epoch_now_usecond()
+ self.assertGreater(epoch, 1)
+ pass
+
+
+ @mock.patch.object(restcall, '_call_req')
+ def test_buildBacklog_fault_vm(self, mock_call_req):
+
+ mock_call_req.side_effect= [
+ (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
+ (0, json.dumps(MOCK_SERVERS_GET_RESPONSE), "MOCKED response body")
+ ]
+ backlog = fault_vm.buildBacklog_fault_vm(vimid="windriver-hudson-dc_RegionOne",
+ backlog_input = MOCK_BACKLOG_INPUT)
+ self.assertIsNotNone(backlog)
+ pass
+
+ @mock.patch.object(restcall, '_call_req')
+ def test_buildBacklog_fault_vm_wo_tenant_id(self, mock_call_req):
+
+ mock_call_req.side_effect= [
+ (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
+ (0, json.dumps(MOCK_SERVERS_GET_RESPONSE), "MOCKED response body")
+ ]
+ backlog = fault_vm.buildBacklog_fault_vm(vimid="windriver-hudson-dc_RegionOne",
+ backlog_input = MOCK_BACKLOG_INPUT_wo_tenant_id)
+ self.assertIsNotNone(backlog)
+ pass
+
+ @mock.patch.object(restcall, '_call_req')
+ def test_buildBacklog_fault_vm_wo_tenant(self, mock_call_req):
+
+ mock_call_req.side_effect= [
+ (1, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body: failed"),
+ (0, json.dumps(MOCK_SERVERS_GET_RESPONSE), "MOCKED response body")
+ ]
+ backlog = fault_vm.buildBacklog_fault_vm(vimid="windriver-hudson-dc_RegionOne",
+ backlog_input = MOCK_BACKLOG_INPUT_wo_tenant)
+ self.assertIsNone(backlog)
+ pass
+
+ @mock.patch.object(restcall, '_call_req')
+ def test_buildBacklog_fault_vm_wo_server_id(self, mock_call_req):
+
+ mock_call_req.side_effect= [
+ (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
+ (0, json.dumps(MOCK_SERVERS_GET_RESPONSE), "MOCKED response body")
+ ]
+ backlog = fault_vm.buildBacklog_fault_vm(vimid="windriver-hudson-dc_RegionOne",
+ backlog_input = MOCK_BACKLOG_INPUT_wo_server_id)
+ self.assertIsNotNone(backlog)
+ pass
+
+ @mock.patch.object(restcall, '_call_req')
+ def test_buildBacklog_fault_vm_wo_server(self, mock_call_req):
+
+ mock_call_req.side_effect= [
+ (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
+ (0, json.dumps(MOCK_SERVERS_GET_RESPONSE), "MOCKED response body")
+ ]
+ backlog = fault_vm.buildBacklog_fault_vm(vimid="windriver-hudson-dc_RegionOne",
+ backlog_input = MOCK_BACKLOG_INPUT_wo_server)
+ self.assertIsNotNone(backlog)
+ pass
+
+ @mock.patch.object(vespublish, 'publishAnyEventToVES')
+ @mock.patch.object(restcall, '_call_req')
+ def test_processBacklog_fault_vm(self, mock_call_req, mock_publishAnyEventToVES):
+
+ mock_call_req.side_effect= [
+ (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
+ (0, json.dumps(MOCK_SERVER_GET_RESPONSE), "MOCKED response body")
+ ]
+ mock_publishAnyEventToVES.return_value = "mocked return value"
+
+ result = fault_vm.processBacklog_fault_vm(vesAgentConfig=MOCK_vesAgentConfig,
+ vesAgentState=MOCK_vesAgentState,
+ oneBacklog=MOCK_oneBacklog)
+ self.assertIsNone(result)
+ pass
+
+ @mock.patch.object(vespublish, 'publishAnyEventToVES')
+ @mock.patch.object(restcall, '_call_req')
+ def test_processBacklog_fault_vm_wo_server(self, mock_call_req, mock_publishAnyEventToVES):
+
+ mock_call_req.side_effect= [
+ (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
+ (0, json.dumps(MOCK_SERVER_GET_RESPONSE_empty), "MOCKED response body")
+ ]
+ mock_publishAnyEventToVES.return_value = "mocked return value"
+
+ result = fault_vm.processBacklog_fault_vm(vesAgentConfig=MOCK_vesAgentConfig,
+ vesAgentState=MOCK_vesAgentState,
+ oneBacklog=MOCK_oneBacklog)
+ self.assertIsNone(result)
+ pass
diff --git a/ocata/ocata/vesagent/tests.py b/ocata/ocata/vesagent/tests.py
index 9f34b3f5..242f7d19 100644
--- a/ocata/ocata/vesagent/tests.py
+++ b/ocata/ocata/vesagent/tests.py
@@ -21,7 +21,8 @@ from rest_framework import status
from django.core.cache import cache
from common.msapi import extsys
-
+from ocata.vesagent import vesagent_ctrl
+from ocata.vesagent.tasks import scheduleBacklogs
MOCK_VIM_INFO = {
@@ -45,6 +46,7 @@ MOCK_VIM_INFO = {
class VesAgentCtrlTest(unittest.TestCase):
def setUp(self):
self.client = Client()
+ self.view = vesagent_ctrl.VesAgentCtrl()
def tearDown(self):
pass
@@ -57,3 +59,96 @@ class VesAgentCtrlTest(unittest.TestCase):
response = self.client.get("/api/multicloud-ocata/v0/windriver-hudson-dc_RegionOne/vesagent")
self.assertEqual(status.HTTP_200_OK, response.status_code, response.content)
+
+ @mock.patch.object(vesagent_ctrl.VesAgentCtrl, 'buildBacklogsOneVIM')
+ @mock.patch.object(extsys, 'get_vim_by_id')
+ def test_post(self, mock_get_vim_by_id, mock_buildBacklogsOneVIM):
+ mock_get_vim_by_id.return_value = MOCK_VIM_INFO
+ mock_buildBacklogsOneVIM.return_value = "mocked vesagent_backlogs"
+ mock_request = mock.Mock()
+ mock_request.META = {"testkey":"testvalue"}
+ mock_request.data = {"testdatakey":"testdatavalue"}
+
+ response = self.view.post(request=mock_request, vimid="windriver-hudson-dc_RegionOne")
+ self.assertEquals(status.HTTP_201_CREATED, response.status_code)
+
+ pass
+
+ @mock.patch.object(vesagent_ctrl.VesAgentCtrl, 'clearBacklogsOneVIM')
+ @mock.patch.object(extsys, 'get_vim_by_id')
+ def test_delete(self, mock_get_vim_by_id, mock_clearBacklogsOneVIM):
+ mock_get_vim_by_id.return_value = MOCK_VIM_INFO
+ mock_clearBacklogsOneVIM.return_value = "mocked vesagent_backlogs"
+ mock_request = mock.Mock()
+ mock_request.META = {"testkey": "testvalue"}
+
+ response = self.view.delete(request=mock_request, vimid="windriver-hudson-dc_RegionOne")
+ self.assertEquals(status.HTTP_200_OK, response.status_code)
+
+ pass
+
+ @mock.patch.object(cache, 'get')
+ def test_getBacklogsOneVIM(self, mock_get):
+ mock_vesagent_config = {"backlogs": [{"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076", "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721", "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET", "source": "onap-aaf", "api_link": "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721", "domain": "fault", "type": "vm", "tenant": "VIM"}], "poll_interval_default": 10, "vimid": "onaplab_RegionOne", "subscription": {"username": "user", "password": "password", "endpoint": "http://127.0.0.1:9005/sample"}}
+ mock_get.return_value = json.dumps(mock_vesagent_config)
+
+ vesAgentConfig = self.view.getBacklogsOneVIM(vimid="windriver-hudson-dc_RegionOne")
+ self.assertEquals(vesAgentConfig, mock_vesagent_config)
+
+ pass
+
+ @mock.patch.object(cache, 'set')
+ @mock.patch.object(cache, 'get')
+ def test_clearBacklogsOneVIM(self, mock_get, mock_set):
+ mock_VesAgentBacklogs_vimlist = ["windriver-hudson-dc_RegionOne"]
+ mock_vesagent_config = {"backlogs": [{"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
+ "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
+ "source": "onap-aaf",
+ "api_link": "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "domain": "fault", "type": "vm", "tenant": "VIM"}],
+ "poll_interval_default": 10, "vimid": "onaplab_RegionOne",
+ "subscription": {"username": "user", "password": "password",
+ "endpoint": "http://127.0.0.1:9005/sample"}}
+
+ mock_get.side_effect= [
+ json.dumps(mock_VesAgentBacklogs_vimlist),
+ json.dumps(mock_vesagent_config)
+ ]
+
+
+ mock_set.return_value = "mocked cache set"
+
+ result = self.view.clearBacklogsOneVIM(vimid="windriver-hudson-dc_RegionOne")
+ self.assertEquals(0, result)
+
+
+ pass
+
+ @mock.patch.object(scheduleBacklogs, 'delay')
+ @mock.patch.object(cache, 'set')
+ @mock.patch.object(cache, 'get')
+ def test_buildBacklogsOneVIM(self, mock_get, mock_set, mock_scheduleBacklogs_delay):
+ mock_VesAgentBacklogs_vimlist = ["windriver-hudson-dc_RegionOne"]
+ mock_vesagent_config = {"backlogs": [{"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
+ "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
+ "source": "onap-aaf",
+ "api_link": "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "domain": "fault", "type": "vm", "tenant": "VIM"}],
+ "poll_interval_default": 10, "vimid": "windriver-hudson-dc_RegionOne",
+ "ves_subscription": {"username": "user", "password": "password",
+ "endpoint": "http://127.0.0.1:9005/sample"}}
+
+ mock_get.side_effect= [
+ json.dumps(mock_VesAgentBacklogs_vimlist),
+ ]
+
+ mock_set.return_value = "mocked cache set"
+ mock_scheduleBacklogs_delay.return_value = "mocked delay"
+
+ VesAgentBacklogsConfig = self.view.buildBacklogsOneVIM(vimid="windriver-hudson-dc_RegionOne",
+ vesagent_config = mock_vesagent_config)
+ self.assertIsNotNone(VesAgentBacklogsConfig)
+
+ pass
diff --git a/pike/pike/resource/views/infra_workload.py b/pike/pike/resource/views/infra_workload.py
index 9ef249d0..20ad67d6 100644
--- a/pike/pike/resource/views/infra_workload.py
+++ b/pike/pike/resource/views/infra_workload.py
@@ -39,24 +39,60 @@ class InfraWorkload(APIView):
self._logger.debug("META: %s" % request.META)
try :
-
- # stub response
- resp_template = {
- "template_type": "heat",
- "workload_id": "3095aefc-09fb-4bc7-b1f0-f21a304e864c",
- "template_response":
- {
- "stack": {
- "id": "3095aefc-09fb-4bc7-b1f0-f21a304e864c",
- "links": [
- {
- "href": "http://192.168.123.200:8004/v1/eb1c63a4f77141548385f113a28f0f52/stacks/teststack/3095aefc-09fb-4bc7-b1f0-f21a304e864c",
- "rel": "self"
- }
- ]
- }
+ vim = VimDriverUtils.get_vim_info(vimid)
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+
+ data = request.data
+ oof_directive = data["oof_directive"]
+ template_type = data["template_type"]
+ template_data = data["template_data"]
+
+ resp_template = None
+ if "heat" == template_type:
+ tenant_name = None
+ interface = 'public'
+ service = {'service_type': 'orchestration',
+ 'interface': interface,
+ 'region_id': vim['openstack_region_id']
+ if vim.get('openstack_region_id')
+ else vim['cloud_region_id']}
+
+ if template_data.has_key("parameters"):
+ paramters = template_data["parameters"]
+ else:
+ self._logger.error("we can't find parameters in heat template")
+ return Response(data=None, status=HTTP_400_BADREQUEST)
+
+ for directive in template_data["directives"]:
+ if directive["type"] == "vnfc":
+ for directive2 in directive["directives"]:
+ if directive2["type"] == flavor_directive:
+ flavor_label = directive2[0]["attribute_name"]
+ flavor_value = directive2[0]["attribute_value"]
+ if parameters.has_key(flavor_label):
+ template_data["parameters"][flavor_label] = flavor_value
+ else:
+ self._logger.warn("we can't find the flavor_label: %s" %
+ flavor_label)
+
+ req_body = template_data
+ sess = VimDriverUtils.get_session(vim, tenant_name)
+ resp = sess.post(req_resource,
+ data = req_body,
+ endpoint_filter = service)
+
+ resp_template = {
+ "template_type": template_type,
+ "workload_id": resp["stack"]["id"],
+ "template_response": resp
}
- }
+
+ elif "tosca" == template_type:
+ #TODO
+ self._logger.info("TBD")
+ else:
+ self._logger.warn("This template type is not supported")
+
self._logger.info("RESP with data> result:%s" % resp_template)
return Response(data=resp_template, status=status.HTTP_201_CREATED)
diff --git a/share/common/msapi/helper.py b/share/common/msapi/helper.py
new file mode 100644
index 00000000..0c27990b
--- /dev/null
+++ b/share/common/msapi/helper.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2017-2018 Wind River Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+
+import json
+import logging
+import re
+
+from common.exceptions import VimDriverNewtonException
+from common.utils import restcall
+
+
+logger = logging.getLogger(__name__)
+
+
+class Helper(object):
+
+ @staticmethod
+ def MultiCloudIdentityHelper(multicloud_api_prefix, cloud_owner, cloud_region_id, uri, data={}, header=''):
+ auth_api_url_format = "/{f_cloudowner}/{f_cloudregionid}/identity{f_uri}"
+ auth_api_url = auth_api_url_format.format(f_cloudowner=cloud_owner,
+ f_cloudregionid=cloud_region_id,
+ f_uri=uri)
+ extra_headers = header
+ ret = restcall._call_req(multicloud_api_prefix, "", "", 0, auth_api_url, "POST", extra_headers, json.dumps(data))
+ if ret[0] > 0 or ret[1] is None:
+ logger.critical("call url %s failed with status %s" % (multicloud_api_prefix+auth_api_url, ret[0]))
+ return None
+
+ resp = json.JSONDecoder().decode(ret[1])
+ return resp
+
+ # The consumer of this api must be attaching to the same management network of multicloud,
+ # The constraints comes from the returned catalog endpoint url e.g. "http://10.0.14.1:80/api/multicloud-titaniumcloud/v0/pod25_RegionOne/identity/v3"
+ @staticmethod
+ def MultiCloudServiceHelper(cloud_owner, cloud_region_id, v2_token_resp_json, service_type, uri, data=None, method="GET",):
+ # get endpoint from token response
+ token = v2_token_resp_json["access"]["token"]["id"]
+ catalogs = v2_token_resp_json["access"]["serviceCatalog"]
+ for catalog in catalogs:
+ if catalog['type'] == service_type:
+ # now we have endpoint
+ endpoint_url = catalog['endpoints'][0]['publicURL']
+ extra_headers = {'X-Auth-Token': token}
+ ret = restcall._call_req(endpoint_url, "", "", 0, uri, method, extra_headers, json.dumps(data) if data else "")
+ if ret[0] > 0 or ret[1] is None:
+ logger.critical("call url %s failed with status %s" % (endpoint_url+uri, ret[0]))
+ return None
+
+ content = json.JSONDecoder().decode(ret[1])
+ return content
+ pass
diff --git a/share/newton_base/proxy/services.py b/share/newton_base/proxy/services.py
index 6aa1bbce..36ae9840 100644
--- a/share/newton_base/proxy/services.py
+++ b/share/newton_base/proxy/services.py
@@ -269,7 +269,28 @@ class GetTenants(Services):
return Response(headers={'X-Subject-Token': tmp_auth_token}, data={'tenants': content['projects'],'tenants_links':[]},
status=resp.status_code)
else:
- return resp
+ viminfo = VimDriverUtils.get_vim_info(vimid)
+ session = VimDriverUtils.get_session(
+ viminfo, tenant_name=viminfo['tenant'])
+ tmp_auth_state = VimDriverUtils.get_auth_state(session)
+ tmp_auth_info = json.loads(tmp_auth_state)
+ tmp_auth_data = tmp_auth_info['body']
+ tenant = tmp_auth_data['token']['project']
+ content = {'projects': [
+ {
+ 'is_domain': False,
+ 'description': 'tenant info provisioned by VIM onborading',
+ 'enabled': True,
+ 'domain_id': viminfo['domain'],
+ 'parent_id': 'default',
+ 'id': tenant['id'],
+ 'name': tenant['name']
+ }
+ ]}
+ return Response(headers={'X-Subject-Token': tmp_auth_token}, data={'tenants': content['projects'],'tenants_links':[]},
+ status=status.HTTP_200_OK)
+
+ # return resp
def head(self, request, vimid="", servicetype="", requri=""):
self._logger.warn("wrong request with vimid, servicetype, requri> %s,%s,%s"
diff --git a/windriver/titanium_cloud/registration/views/registration.py b/windriver/titanium_cloud/registration/views/registration.py
index 584b263f..1d560e4e 100644
--- a/windriver/titanium_cloud/registration/views/registration.py
+++ b/windriver/titanium_cloud/registration/views/registration.py
@@ -171,7 +171,7 @@ class APIv1Registry(newton_registration.Registry):
for region in openstackregions:
if (region['id'] == 'SystemController'):
isDistributedCloud = True;
- break;
+ break
else:
continue
@@ -200,9 +200,17 @@ class APIv1Registry(newton_registration.Registry):
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
viminfo = VimDriverUtils.get_vim_info(vimid)
- cloud_extra_info = viminfo['cloud_extra_info']
- region_specified = cloud_extra_info["openstack-region-id"] if cloud_extra_info else None
- multi_region_discovery = cloud_extra_info["multi-region-discovery"] if cloud_extra_info else None
+ cloud_extra_info_str = viminfo['cloud_extra_info']
+ cloud_extra_info = None
+ try:
+ cloud_extra_info = json.loads(cloud_extra_info_str) if cloud_extra_info_str else None
+ except Exception as ex:
+ logger.error("Can not convert cloud extra info %s %s" % (
+ str(ex), cloud_extra_info_str))
+ pass
+
+ region_specified = cloud_extra_info.get("openstack-region-id", None) if cloud_extra_info else None
+ multi_region_discovery = cloud_extra_info.get("multi-region-discovery", None) if cloud_extra_info else None
# set the default tenant since there is no tenant info in the VIM yet
sess = VimDriverUtils.get_session(
@@ -226,19 +234,27 @@ class APIv1Registry(newton_registration.Registry):
pass
else:
# assume the first region be the primary region since we have no other way to determine it.
- region_specified = region_ids.pop();
+ region_specified = region_ids.pop(0);
# update cloud region and discover/register resource
- if (multi_region_discovery and multi_region_discovery.upper() == "TRUE"):
+ if (multi_region_discovery):
# no input for specified cloud region, so discover all cloud region?
for regionid in region_ids:
+ # do not update the specified region here
+ if region_specified == regionid:
+ continue
+
#create cloud region with composed AAI cloud_region_id except for the one onboarded externally (e.g. ESR)
- gen_cloud_region_id = cloud_region_id + "." + regionid if region_specified != regionid else cloud_region_id
+ gen_cloud_region_id = cloud_region_id + "." + regionid
+ self._logger.info("create a cloud region: %s,%s,%s" % (cloud_owner,gen_cloud_region_id,regionid))
+
self._update_cloud_region(cloud_owner, gen_cloud_region_id, regionid, viminfo)
- return super(APIv1Registry, self).post(request, vimid)
- else:
- self._update_cloud_region(cloud_owner, cloud_region_id, region_specified, viminfo)
- return super(APIv1Registry, self).post(request, vimid)
+ super(APIv1Registry, self).post(request, vimid)
+
+
+ # update the specified region
+ self._update_cloud_region(cloud_owner, cloud_region_id, region_specified, viminfo)
+ return super(APIv1Registry, self).post(request, vimid)
except HttpError as e:
self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json()))
diff --git a/windriver/titanium_cloud/resource/views/infra_workload.py b/windriver/titanium_cloud/resource/views/infra_workload.py
index 08efcfd1..dce66dec 100644
--- a/windriver/titanium_cloud/resource/views/infra_workload.py
+++ b/windriver/titanium_cloud/resource/views/infra_workload.py
@@ -27,7 +27,9 @@ from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from common.msapi import extsys
+from common.msapi.helper import Helper as helper
+from common.utils import restcall
logger = logging.getLogger(__name__)
@@ -37,8 +39,9 @@ class InfraWorkload(APIView):
def __init__(self):
self._logger = logger
- def post(self, request, vimid=""):
- self._logger.info("vimid, data: %s, %s" % (vimid, request.data))
+ def post(self, request, vimid="", requri=""):
+ self._logger.info("vimid,requri: %s, %s" % (vimid, requri))
+ self._logger.info("data: %s, %s" % (request.data))
self._logger.debug("META: %s" % request.META)
try :
@@ -76,17 +79,19 @@ class InfraWorkload(APIView):
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
- def get(self, request, vimid=""):
- self._logger.info("vimid: %s" % (vimid))
+ def get(self, request, vimid="", requri=""):
+ self._logger.info("vimid,requri: %s, %s" % (vimid, requri))
self._logger.debug("META: %s" % request.META)
try :
-
+ stack_id = requri
+ workload_data = self.heatbridge_update(request, vimid, stack_id)
# stub response
resp_template = {
"template_type": "heat",
"workload_id": "3095aefc-09fb-4bc7-b1f0-f21a304e864c",
"workload_status": "CREATE_IN_PROCESS",
+ 'workload_data': workload_data
}
self._logger.info("RESP with data> result:%s" % resp_template)
@@ -103,11 +108,12 @@ class InfraWorkload(APIView):
return Response(data={'error': str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
- def delete(self, request, vimid=""):
- self._logger.info("vimid: %s" % (vimid))
+ def delete(self, request, vimid="", requri=""):
+ self._logger.info("vimid,requri: %s, %s" % (vimid,requri))
self._logger.debug("META: %s" % request.META)
try :
+ stack_id = requri
# stub response
self._logger.info("RESP with data> result:%s" % "")
@@ -124,6 +130,148 @@ class InfraWorkload(APIView):
return Response(data={'error': str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
+ def heatbridge_update(self, request, vimid, stack_id):
+ '''
+ update heat resource to AAI for the specified cloud region and tenant
+ The resources includes: vserver, vserver/l-interface,
+ :param request:
+ :param vimid:
+ :param stack_id:
+ :return:
+ '''
+
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that the selflink is updated by multicloud
+ v2_token_resp_json = helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+ if not v2_token_resp_json:
+ logger.error("authenticate fails:%s,%s" % (cloud_owner, regionid))
+ return
+ tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"]
+ # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"]
+
+ # common prefix
+ aai_cloud_region = "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \
+ % (cloud_owner, regionid, tenant_id)
+
+ # get stack resource
+ service_type = "orchestration"
+ resource_uri = "/stacks/%s/resources"%(stack_id)
+ self._logger.info("retrieve stack resources, URI:%s" % resource_uri)
+ content = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET")
+ resources = content.get('resources', []) if content else []
+
+ #find and update resources
+ transactions = []
+ for resource in resources:
+ if resource.get('resource_type', None) == 'OS::Nova::Server':
+ # retrieve vserver details
+ service_type = "compute"
+ resource_uri = "/servers/%s" % (resource['physical_resource_id'])
+ self._logger.info("retrieve vserver detail, URI:%s" % resource_uri)
+ content = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type,
+ resource_uri, None, "GET")
+ self._logger.debug(" resp data:%s" % content)
+ vserver_detail = content.get('server', None) if content else None
+ if vserver_detail:
+ # compose inventory entry for vserver
+ vserver_link = ""
+ for link in vserver_detail['links']:
+ if link['rel'] == 'self':
+ vserver_link = link['href']
+ break
+ pass
+
+ # note: relationship-list to flavor/image is not be update yet
+ # note: volumes is not updated yet
+ # note: relationship-list to vnf will be handled somewhere else
+ aai_resource = {
+ 'body': {
+ 'vserver-name': vserver_detail['name'],
+ 'vserver-name2': vserver_detail['name'],
+ "vserver-id": vserver_detail['id'],
+ "vserver-selflink": vserver_link,
+ "prov-status": vserver_detail['status']
+ },
+ "uri": aai_cloud_region + "/vservers/vserver/%s"\
+ % ( vserver_detail['id'])}
+
+ try:
+ # then update the resource
+ retcode, content, status_code = \
+ restcall.req_to_aai(aai_resource['uri'], "PUT", content=aai_resource['body'])
+
+ if retcode == 0 and content:
+ content = json.JSONDecoder().decode(content)
+ self._logger.debug("AAI update %s response: %s" % (aai_resource['uri'], content))
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ pass
+
+ aai_resource_transactions = {"put": [aai_resource]}
+ transactions.append(aai_resource_transactions)
+ #self._logger.debug("aai_resource :%s" % aai_resource_transactions)
+ pass
+
+ for resource in resources:
+ if resource.get('resource_type', None) == 'OS::Neutron::Port':
+ # retrieve vserver details
+ service_type = "network"
+ resource_uri = "/v2.0/ports/%s" % (resource['physical_resource_id'])
+ self._logger.info("retrieve vserver detail, URI:%s" % resource_uri)
+ content = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type,
+ resource_uri, None, "GET")
+ self._logger.debug(" resp data:%s" % content)
+
+ vport_detail = content.get('port', None)
+ if vport_detail:
+ # compose inventory entry for vport
+ # note: l3-interface-ipv4-address-list, l3-interface-ipv6-address-list are not updated yet
+ # note: network-name is not update yet since the detail coming with network-id
+ aai_resource = {
+ "body": {
+ "interface-name": vport_detail['name'],
+ "interface-id": vport_detail['id'],
+ "macaddr": vport_detail['mac_address']
+ },
+ 'uri': aai_cloud_region + "/vservers/vserver/%s/l-interfaces/l-interface/%s" \
+ % (vport_detail['device_id'], vport_detail['name'])
+ }
+ try:
+ # then update the resource
+ retcode, content, status_code = \
+ restcall.req_to_aai(aai_resource['uri'], "PUT", content=aai_resource['body'])
+
+ if retcode == 0 and content:
+ content = json.JSONDecoder().decode(content)
+ self._logger.debug("AAI update %s response: %s" % (aai_resource['uri'], content))
+ except Exception as e:
+ self._logger.error(traceback.format_exc())
+ pass
+
+ aai_resource_transactions = {"put": [aai_resource]}
+ transactions.append(aai_resource_transactions)
+ # self._logger.debug("aai_resource :%s" % aai_resource_transactions)
+
+ pass
+
+ aai_transactions = {"transactions": transactions}
+ self._logger.debug("aai_transactions :%s" % aai_transactions)
+
+ return aai_transactions
+
+ def heatbridge_delete(self, request, stack_id, vimid, tenant_id):
+ '''
+ remove heat resource from AAI for the specified cloud region and tenant
+ The resources includes: vserver, vserver/l-interface,
+ :param request:
+ :param stack_id:
+ :param vimid:
+ :param tenant_id:
+ :return:
+ '''
+ pass
+
class APIv1InfraWorkload(InfraWorkload):
@@ -131,23 +279,23 @@ class APIv1InfraWorkload(InfraWorkload):
super(APIv1InfraWorkload, self).__init__()
# self._logger = logger
- def post(self, request, cloud_owner="", cloud_region_id=""):
+ def post(self, request, cloud_owner="", cloud_region_id="", requri=""):
#self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % (cloud_owner, cloud_region_id, request.data))
#self._logger.debug("META: %s" % request.META)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- return super(APIv1InfraWorkload, self).post(request, vimid)
+ return super(APIv1InfraWorkload, self).post(request, vimid, requri)
- def get(self, request, cloud_owner="", cloud_region_id=""):
+ def get(self, request, cloud_owner="", cloud_region_id="", requri=""):
#self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % (cloud_owner, cloud_region_id, request.data))
#self._logger.debug("META: %s" % request.META)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- return super(APIv1InfraWorkload, self).get(request, vimid)
+ return super(APIv1InfraWorkload, self).get(request, vimid, requri)
- def delete(self, request, cloud_owner="", cloud_region_id=""):
+ def delete(self, request, cloud_owner="", cloud_region_id="", requri=""):
#self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % (cloud_owner, cloud_region_id, request.data))
#self._logger.debug("META: %s" % request.META)
vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- return super(APIv1InfraWorkload, self).delete(request, vimid)
+ return super(APIv1InfraWorkload, self).delete(request, vimid, requri)
diff --git a/windriver/titanium_cloud/urls.py b/windriver/titanium_cloud/urls.py
index bd5af421..1d54325b 100644
--- a/windriver/titanium_cloud/urls.py
+++ b/windriver/titanium_cloud/urls.py
@@ -90,8 +90,6 @@ urlpatterns = [
registration.APIv1Registry.as_view()),
url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/exten',
include('titanium_cloud.extensions.urlsV1')),
- url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
- include('titanium_cloud.proxy.urlsV1')),
url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/tenants/?$',
tenants.APIv1Tenants.as_view()),
url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/'
@@ -103,4 +101,8 @@ urlpatterns = [
vesagent_ctrl.APIv1VesAgentCtrl.as_view()),
url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/?$',
infra_workload.APIv1InfraWorkload.as_view()),
+ url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/(?P<requri>[0-9a-zA-Z_-]*)/?$',
+ infra_workload.APIv1InfraWorkload.as_view()),
+ url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/',
+ include('titanium_cloud.proxy.urlsV1')),
]