diff options
author | Huang Haibin <haibin.huang@intel.com> | 2018-09-19 14:31:08 +0800 |
---|---|---|
committer | Huang Haibin <haibin.huang@intel.com> | 2018-09-19 14:31:08 +0800 |
commit | 4ad0dab6583445ffd49366944b89ceae2de76c74 (patch) | |
tree | 0d28e26130cd60e825fe43117bafd0b7dd0e9f3e | |
parent | d43818d4f538439c893313fd460fd79121d75cf4 (diff) |
Refactor delete of infra workload
Change-Id: I37ab8a106b978db7fff94492f0075714a77fc7a4
Issue-ID: MULTICLOUD-250
Signed-off-by: Huang Haibin <haibin.huang@intel.com>
-rw-r--r-- | pike/pike/resource/tests/tests_infra_workload.py | 2 | ||||
-rw-r--r-- | pike/pike/resource/views/infra_workload.py | 108 |
2 files changed, 65 insertions, 45 deletions
diff --git a/pike/pike/resource/tests/tests_infra_workload.py b/pike/pike/resource/tests/tests_infra_workload.py index 92d1b4bb..51fd0837 100644 --- a/pike/pike/resource/tests/tests_infra_workload.py +++ b/pike/pike/resource/tests/tests_infra_workload.py @@ -102,7 +102,7 @@ MOCK_HEAT_CREATE_RESPONSE1 = { MOCK_HEAT_LIST_RESPONSE1 = { 'stacks': [ { - 'resource_status':"CREATE_IN_PROCESS" + 'stack_status':"CREATE_IN_PROCESS" } ] } diff --git a/pike/pike/resource/views/infra_workload.py b/pike/pike/resource/views/infra_workload.py index 415bf0d2..80cb112e 100644 --- a/pike/pike/resource/views/infra_workload.py +++ b/pike/pike/resource/views/infra_workload.py @@ -122,6 +122,7 @@ class InfraWorkload(APIView): try : # assume the workload_type is heat + template_type = "heat" stack_id = requri cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that the selflink is updated by multicloud @@ -138,20 +139,19 @@ class InfraWorkload(APIView): self._logger.info("retrieve stack resources, URI:%s" % resource_uri) retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") - resources = content.get('stacks', []) if retcode > 0 and content else [] - - resource_status = resources[0]["resource_status"] if len(resources)>0 else "" + stacks = content.get('stacks', []) if retcode == 0 and content else [] + stack_status = stacks[0]["stack_status"] if len(stacks)>0 else "" resp_template = { - "template_type": "HEAT", + "template_type": template_type, "workload_id": stack_id, - "workload_status": resource_status + "workload_status": stack_status } if retcode > 0: resp_template['workload_response'] = content - if ('CREATE_COMPLETE' == resource_status): + if ('CREATE_COMPLETE' == stack_status): self.heatbridge_update(request, vimid, stack_id) self._logger.info("RESP with data> result:%s" % resp_template) @@ -197,7 +197,7 @@ class InfraWorkload(APIView): resource_uri = "/stacks/%s/resources"%(stack_id) self._logger.info("retrieve stack resources, URI:%s" % resource_uri) retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") - resources = content.get('resources', []) if retcode==0 and content else [] + resources = content.get('resources', []) if retcode == 0 and content else [] #find and update resources transactions = [] @@ -307,45 +307,53 @@ class InfraWorkload(APIView): self._logger.debug("META: %s" % request.META) try : - # we just support heat template - workload_id = requri - tenant_name = None - vim = VimDriverUtils.get_vim_info(vimid) + # assume the workload_type is heat + template_type = "heat" + stack_id = requri cloud_owner, regionid = extsys.decode_vim_id(vimid) - v2_token_resp_json = helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX, - cloud_owner, regionid, "/v2.0/tokens") - if not v2_token_resp_json: - logger.error("authenticate fails:%s,%s" % (cloud_owner, regionid)) + # should go via multicloud proxy so that the selflink is updated by multicloud + retcode, v2_token_resp_json, os_status = helper.MultiCloudIdentityHelper( + settings.MULTICLOUD_API_V1_PREFIX, + cloud_owner, regionid, "/v2.0/tokens") + if retcode > 0 or not v2_token_resp_json: + logger.error("authenticate fails:%s, %s, %s" % (cloud_owner, regionid, v2_token_resp_json)) return - tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] - interface = 'public' - service = {'service_type': 'orchestration', - 'interface': interface, - 'region_id': vim['openstack_region_id'] - if vim.get('openstack_region_id') - else vim['cloud_region_id']} - - req_body = template_data - url_get = "/v1/%s/stacks" % (tenant_id) - sess = VimDriverUtils.get_session(vim, tenant_name) - get_resp = sess.get(url_get, - data = req_body, - endpoint_filter = service) - stack_info = get_resp.json() - stacks = stack_info["stacks"] - stack_name = "" - for stack in stacks: - if workload_id == stack["id"]: - stack_name = stack["stack_name"] - break - - req_source = "/v1/%s/stacks/%s/%s" % (tenant_id, stack_name, workload_id) - resp = sess.delete(req_resource, - endpoint_filter = service) - - resp_status = status.HTTP_204_NO_CONTENT - self._logger.info("RESP with data> result:%s" % "") - return Response(status=resp_status) + # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] + # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"] + + # get stack status + service_type = "orchestration" + resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks" + self._logger.info("retrieve stack resources, URI:%s" % resource_uri) + retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, + service_type, resource_uri, None, "GET") + stacks = content.get('stacks', []) if retcode == 0 and content else [] + # assume there is at most 1 stack returned since it was filtered by id + stack1 = stacks[0] if stacks else None + stack_status = "" + + if stack1 and 'CREATE_COMPLETE' == stack1['stack_status']: + # delete the stack + resource_uri = "/stacks/%s/%s" % (stack1['stack_name'], stack1['id']) + self._logger.info("delete stack, URI:%s" % resource_uri) + retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, + service_type, resource_uri, None, "DELETE") + if retcode == 0: + stack_status = "DELETE_IN_PROCESS" + # and update AAI inventory by heatbridge-delete + self.heatbridge_delete(request, vimid, stack1['id']) + + resp_template = { + "template_type": template_type, + "workload_id": stack_id, + "workload_status": stack_status + } + + if retcode > 0: + resp_template["workload_response"] = content + + self._logger.info("RESP with data> result:%s" % resp_template) + return Response(status=os_status) except VimDriverNewtonException as e: self._logger.error("Plugin exception> status:%s,error:%s" % (e.status_code, e.content)) @@ -358,6 +366,18 @@ class InfraWorkload(APIView): return Response(data={'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + def heatbridge_delete(self, request, stack_id, vimid): + ''' + remove heat resource from AAI for the specified cloud region and tenant + The resources includes: vserver, vserver/l-interface, + :param request: + :param stack_id: + :param vimid: + :param tenant_id: + :return: + ''' + pass + class APIv1InfraWorkload(InfraWorkload): |