diff options
author | Xiaohua Zhang <xiaohua.zhang@windriver.com> | 2019-04-09 10:30:44 +0000 |
---|---|---|
committer | Xiaohua Zhang <xiaohua.zhang@windriver.com> | 2019-04-09 10:30:44 +0000 |
commit | 14132a2c06bc6b6431f5be90cecf303132d94103 (patch) | |
tree | 1c914fa84e52cde8937e356156f16be9e14744c1 /share/starlingx_base | |
parent | 5dc8f722d834229c27aab4ae2e8a609db09edb0d (diff) |
Fix bugs for infra_workload APIs
Change-Id: Ieda8b2f7f68911bc348613cf7b18b40ea800b1e2
Issue-ID: MULTICLOUD-541
Signed-off-by: Xiaohua Zhang <xiaohua.zhang@windriver.com>
Diffstat (limited to 'share/starlingx_base')
-rw-r--r-- | share/starlingx_base/registration/registration.py | 7 | ||||
-rw-r--r-- | share/starlingx_base/resource/infra_workload.py | 27 |
2 files changed, 19 insertions, 15 deletions
diff --git a/share/starlingx_base/registration/registration.py b/share/starlingx_base/registration/registration.py index 507f0fbf..cf0281f7 100644 --- a/share/starlingx_base/registration/registration.py +++ b/share/starlingx_base/registration/registration.py @@ -58,7 +58,7 @@ class APIv0Registry(newton_registration.Registry): backlog_item = { "id": vimid, "worker": worker_self.azcap_audit, - "payload": (worker_self, vimid, specified_project_idorname), + "payload": (vimid, specified_project_idorname), "repeat": 10*1000000, # repeat every 10 seconds } gAZCapAuditThread.add(backlog_item) @@ -88,6 +88,9 @@ class APIv1Registry(newton_registration.Registry): % (cloud_owner, cloud_region_id)) try: + # Get the specified tenant id + specified_project_idorname = request.META.get("Project", None) + vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) # vim registration will trigger the start the audit of AZ capacity @@ -98,7 +101,7 @@ class APIv1Registry(newton_registration.Registry): backlog_item = { "id": vimid, "worker": worker_self.azcap_audit, - "payload": (worker_self, vimid), + "payload": (vimid, specified_project_idorname), "repeat": 5 * 1000000, # repeat every 5 seconds } gAZCapAuditThread.add(backlog_item) diff --git a/share/starlingx_base/resource/infra_workload.py b/share/starlingx_base/resource/infra_workload.py index 6b064856..409d74ed 100644 --- a/share/starlingx_base/resource/infra_workload.py +++ b/share/starlingx_base/resource/infra_workload.py @@ -85,7 +85,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): backlog_item = { "id": workloadid, "worker": worker_self.workload_update, - "payload": (worker_self, vimid, workloadid, + "payload": (vimid, workloadid, request.data, specified_project_idorname), "repeat": 0, # one time job # format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message @@ -97,7 +97,9 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): gInfraWorkloadThread.add(backlog_item) if 0 == gInfraWorkloadThread.state(): gInfraWorkloadThread.start() - + # progress = worker_self.workload_update( + # vimid, workloadid, + # request.data, specified_project_idorname) # now query the progress backlog_item = gInfraWorkloadThread.get(workloadid) if not backlog_item: @@ -206,7 +208,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): settings.AAI_BASE_URL ) progress_code, progress_status, progress_msg =\ - worker_self.workload_status( + worker_self.workload_detail( vimid, stack_id=workloadid, project_idorname=specified_project_idorname) @@ -277,7 +279,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): backlog_item = { "id": workloadid, "worker": worker_self.workload_delete, - "payload": (worker_self, vimid, workloadid, request.data, + "payload": (vimid, workloadid, request.data, specified_project_idorname), "repeat": 0, # one time job # format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message @@ -335,29 +337,29 @@ class APIv1InfraWorkload(InfraWorkload): super(APIv1InfraWorkload, self).__init__() # self._logger = logger - def post(self, request, cloud_owner="", cloud_region_id=""): + def post(self, request, cloud_owner="", cloud_region_id="", workloadid=""): # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % # (cloud_owner, cloud_region_id, request.data)) # self._logger.debug("META: %s" % request.META) vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1InfraWorkload, self).post(request, vimid) + return super(APIv1InfraWorkload, self).post(request, vimid, workloadid) - def get(self, request, cloud_owner="", cloud_region_id="", requri=""): + def get(self, request, cloud_owner="", cloud_region_id="", workloadid=""): # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % # (cloud_owner, cloud_region_id, request.data)) # self._logger.debug("META: %s" % request.META) vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1InfraWorkload, self).get(request, vimid, requri) + return super(APIv1InfraWorkload, self).get(request, vimid, workloadid) - def delete(self, request, cloud_owner="", cloud_region_id="", requri=""): + def delete(self, request, cloud_owner="", cloud_region_id="", workloadid=""): # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % # (cloud_owner, cloud_region_id, request.data)) # self._logger.debug("META: %s" % request.META) vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1InfraWorkload, self).delete(request, vimid, requri) + return super(APIv1InfraWorkload, self).delete(request, vimid, workloadid) class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper): @@ -503,9 +505,8 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper): # reset to make sure "files" are empty template_data["files"] = {} - template_data["stack_name"] = vf_module_id \ - if not hasattr(template_data, "stack_name")\ - else template_data["stack_name"] + template_data["stack_name"] =\ + template_data.get("stack_name", vf_module_id) # authenticate cloud_owner, regionid = extsys.decode_vim_id(vimid) |