diff options
author | Xiaohua Zhang <xiaohua.zhang@windriver.com> | 2019-04-10 08:51:19 +0000 |
---|---|---|
committer | Xiaohua Zhang <xiaohua.zhang@windriver.com> | 2019-04-10 09:48:26 +0000 |
commit | 2024bb59b1d4ec20300304aed3a69132b9c082bc (patch) | |
tree | 349a90cd5fc0c64335142c09bd76db86e3d2d7e7 /share/starlingx_base | |
parent | 14132a2c06bc6b6431f5be90cecf303132d94103 (diff) |
Fix bug of AZ cap check
Fix bugs in restcall module
update the status code of workload api
Remove unused event api from lenovo
Change-Id: Iaa16bc3aca42c4583408384c73802ff4debe1b19
Issue-ID: MULTICLOUD-542
Signed-off-by: Xiaohua Zhang <xiaohua.zhang@windriver.com>
Diffstat (limited to 'share/starlingx_base')
-rw-r--r-- | share/starlingx_base/registration/registration.py | 31 | ||||
-rw-r--r-- | share/starlingx_base/resource/capacity.py | 19 | ||||
-rw-r--r-- | share/starlingx_base/resource/infra_workload.py | 93 |
3 files changed, 77 insertions, 66 deletions
diff --git a/share/starlingx_base/registration/registration.py b/share/starlingx_base/registration/registration.py index cf0281f7..dd71c1b4 100644 --- a/share/starlingx_base/registration/registration.py +++ b/share/starlingx_base/registration/registration.py @@ -165,6 +165,7 @@ class RegistryHelper(newton_registration.RegistryHelper): multi_region_discovery = cloud_extra_info.get( "multi-region-discovery", None) if cloud_extra_info else None + sess = None if project_idorname: try: # check if specified with tenant id @@ -406,6 +407,7 @@ class InfraResourceAuditor(newton_registration.RegistryHelper): self._logger.warn("azcap_audit no valid vimid: %s" % vimid) return + sess = None if project_idorname: try: # check if specified with tenant id @@ -459,9 +461,8 @@ class InfraResourceAuditor(newton_registration.RegistryHelper): viminfo, vimid, "availabilityZoneInfo"): az_info = { - 'availability-zone-name': az['zoneName'], - 'operational-status': az['zoneState']['available'] - if az.get('zoneState') else '', + 'availability-zone-name': az.get('zoneName', ""), + 'operational-status': az.get('zoneState', {}).get('available', ""), 'hypervisor-type': '', } # filter out the default az: "internal" and "nova" @@ -480,7 +481,7 @@ class InfraResourceAuditor(newton_registration.RegistryHelper): # Get current cap info of azName azCapCacheKey = "cap_" + vimid + "_" + azName azCapInfoCacheStr = cache.get(azCapCacheKey) - azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else None + azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else {} for psname in pservers_info: psinfo = hypervisors_dict.get(psname, None) @@ -490,7 +491,7 @@ class InfraResourceAuditor(newton_registration.RegistryHelper): # get current pserver cap info psCapInfoCacheKey = "cap_" + vimid + "_" + psname psCapInfoCacheStr = cache.get(psCapInfoCacheKey) - psCapInfoCache = json.loads(psCapInfoCacheStr) if psCapInfoCacheStr else None + psCapInfoCache = json.loads(psCapInfoCacheStr) if psCapInfoCacheStr else {} # compare latest info with cached one vcpu_delta = 0 @@ -523,19 +524,21 @@ class InfraResourceAuditor(newton_registration.RegistryHelper): localstorage_free_delta += psinfo.get("free_disk_gb", 0)\ - psCapInfoCache.get("free_disk_gb", 0) psCapInfoCache["free_disk_gb"] = psinfo.get("free_disk_gb", 0) - pass - # now apply the delta to azCapInfo - azCapInfoCache["vcpus"] = azCapInfoCache.get("vcpus", 0) + vcpu_delta - azCapInfoCache["memory_mb"] = azCapInfoCache.get("memory_mb", 0) + mem_delta - azCapInfoCache["local_gb"] = azCapInfoCache.get("local_gb", 0) + localstorage_delta - azCapInfoCache["vcpus_used"] = azCapInfoCache.get("vcpus_used", 0) + vcpu_used_delta - azCapInfoCache["free_ram_mb"] = azCapInfoCache.get("free_ram_mb", 0) + mem_free_delta - azCapInfoCache["free_disk_gb"] = azCapInfoCache.get("free_disk_gb", 0) + localstorage_free_delta + cache.set(psCapInfoCacheKey, json.dumps(psCapInfoCache), 3600 * 24) + + # now apply the delta to azCapInfo + azCapInfoCache["vcpus"] = azCapInfoCache.get("vcpus", 0) + vcpu_delta + azCapInfoCache["memory_mb"] = azCapInfoCache.get("memory_mb", 0) + mem_delta + azCapInfoCache["local_gb"] = azCapInfoCache.get("local_gb", 0) + localstorage_delta + azCapInfoCache["vcpus_used"] = azCapInfoCache.get("vcpus_used", 0) + vcpu_used_delta + azCapInfoCache["free_ram_mb"] = azCapInfoCache.get("free_ram_mb", 0) + mem_free_delta + azCapInfoCache["free_disk_gb"] = azCapInfoCache.get("free_disk_gb", 0) + localstorage_free_delta + pass # update the cache cache.set(azCapCacheKey, json.dumps(azCapInfoCache), 3600 * 24) - cache.set(vimAzCacheKey, vimAzList, 3600 * 24) + cache.set(vimAzCacheKey, json.dumps(vimAzList), 3600 * 24) except Exception as e: self._logger.error("azcap_audit raise exception: %s" % e) pass diff --git a/share/starlingx_base/resource/capacity.py b/share/starlingx_base/resource/capacity.py index cbdedaa3..861d4d50 100644 --- a/share/starlingx_base/resource/capacity.py +++ b/share/starlingx_base/resource/capacity.py @@ -67,27 +67,34 @@ class CapacityCheck(newton_capacity.CapacityCheck): # get list of AZ vimAzCacheKey = "cap_azlist_" + vimid vimAzListCacheStr = cache.get(vimAzCacheKey) + self._logger.debug("Found AZ list: %s" % vimAzListCacheStr) vimAzListCache = json.loads(vimAzListCacheStr) if vimAzListCacheStr else [] azCapInfoList = [] for azName in vimAzListCache: azCapCacheKey = "cap_" + vimid + "_" + azName azCapInfoCacheStr = cache.get(azCapCacheKey) + self._logger.debug("Found AZ info: %s, %s" % (azCapCacheKey, azCapInfoCacheStr)) if not azCapInfoCacheStr: continue azCapInfoCache = json.loads(azCapInfoCacheStr) if azCapInfoCacheStr else None azCapInfo = {} azCapInfo["availability-zone-name"] = azName - azCapInfo["vCPUAvail"] = azCapInfoCache.get("vcpus", 0) + azCapInfoCache.get("vcpus_used", 0) - azCapInfo["vCPUTotal"] = azCapInfoCache.get("vcpus", 0) - azCapInfo["MemoryAvail"] = azCapInfoCache.get("vcpus", 0) - azCapInfo["MemoryTotal"] = azCapInfoCache.get("vcpus", 0) - azCapInfo["StorageAvail"] = azCapInfoCache.get("vcpus", 0) - azCapInfo["StorageTotal"] = azCapInfoCache.get("vcpus", 0) + # vcpu ratio: cpu_allocation_ratio=16 by default + azCapInfo["vCPUAvail"] = \ + (azCapInfoCache.get("vcpus", 0) + - azCapInfoCache.get("vcpus_used", 0)) * 16 + azCapInfo["vCPUTotal"] = azCapInfoCache.get("vcpus", 0) * 16 + # mem size in MB + azCapInfo["MemoryAvail"] = azCapInfoCache.get("free_ram_mb", 0) / 1024.0 + azCapInfo["MemoryTotal"] = azCapInfoCache.get("memory_mb", 0) / 1024.0 + azCapInfo["StorageAvail"] = azCapInfoCache.get("free_disk_gb", 0) + azCapInfo["StorageTotal"] = azCapInfoCache.get("local_gb", 0) azCapInfoList.append(azCapInfo) return azCapInfoList except Exception as e: + self._logger.error(traceback.format_exc()) return azCapInfo diff --git a/share/starlingx_base/resource/infra_workload.py b/share/starlingx_base/resource/infra_workload.py index 409d74ed..fc6d7ef2 100644 --- a/share/starlingx_base/resource/infra_workload.py +++ b/share/starlingx_base/resource/infra_workload.py @@ -91,7 +91,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): # format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message "status": ( 0, "UPDATE_IN_PROGRESS", - "backlog to update workload %s pends to schedule" % workloadid + "backlog to update workload %s is on progress" % workloadid ) } gInfraWorkloadThread.add(backlog_item) @@ -158,10 +158,10 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): # now check the query params in case of query existing of workload querystr = request.META.get("QUERY_STRING", None) qd = QueryDict(querystr).dict() if querystr else None - workload_name = qd.get("name", None) if qd else None - workload_id = qd.get("id", None) if qd else None + workload_query_name = qd.get("name", None) if qd else None + workload_query_id = qd.get("id", None) if qd else None - if not workload_name and not workload_id: + if not workload_query_name and not workload_query_id: resp_template["workload_status_reason"] =\ "workload id is not found in API url" return Response( @@ -177,64 +177,65 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): # now query the status of workload by name or id, id as 1st priority progress_code, progress_status, progress_msg =\ 0, "GET_FAILED", "" - if not workload_id: - # by name + if workload_query_id: + # by id progress_code, progress_status, progress_msg =\ worker_self.workload_status( - vimid, stack_name=workload_name, + vimid, stack_id=workload_query_id, project_idorname=specified_project_idorname ) else: - # by id + # by name or get all stacks progress_code, progress_status, progress_msg =\ worker_self.workload_status( - vimid, stack_id=workloadid, + vimid, stack_name=workload_query_name, project_idorname=specified_project_idorname ) resp_template["workload_status"] = progress_status resp_template["workload_status_reason"] = progress_msg status_code = status.HTTP_200_OK \ - if progress_code == 0 else progress_code + if progress_code == 0 else status.HTTP_500_INTERNAL_SERVER_ERROR # progress_code pass - # now query the progress - backlog_item = gInfraWorkloadThread.get(workloadid) - if not backlog_item: - # backlog item not found, so check the stack status - worker_self = InfraWorkloadHelper( - settings.MULTICLOUD_API_V1_PREFIX, - settings.AAI_BASE_URL - ) - progress_code, progress_status, progress_msg =\ - worker_self.workload_detail( - vimid, stack_id=workloadid, - project_idorname=specified_project_idorname) - - resp_template["workload_status"] = progress_status - resp_template["workload_status_reason"] = progress_msg - status_code = status.HTTP_200_OK\ - if progress_code == 0 else progress_code - else: - progress = backlog_item.get( - "status", - (13, "GET_FAILED", - "Unexpected:status not found in backlog item") - ) - try: - progress_code = progress[0] - progress_status = progress[1] - progress_msg = progress[2] - # if gInfraWorkloadThread.expired(workloadid): - # gInfraWorkloadThread.remove(workloadid) + # now query the progress + backlog_item = gInfraWorkloadThread.get(workloadid) + if not backlog_item: + # backlog item not found, so check the stack status + worker_self = InfraWorkloadHelper( + settings.MULTICLOUD_API_V1_PREFIX, + settings.AAI_BASE_URL + ) + progress_code, progress_status, progress_msg =\ + worker_self.workload_detail( + vimid, stack_id=workloadid, + project_idorname=specified_project_idorname) + resp_template["workload_status"] = progress_status resp_template["workload_status_reason"] = progress_msg status_code = status.HTTP_200_OK\ if progress_code == 0 else progress_code - except Exception as e: - resp_template["workload_status_reason"] = progress + + else: + progress = backlog_item.get( + "status", + (13, "GET_FAILED", + "Unexpected:status not found in backlog item") + ) + try: + progress_code = progress[0] + progress_status = progress[1] + progress_msg = progress[2] + # if gInfraWorkloadThread.expired(workloadid): + # gInfraWorkloadThread.remove(workloadid) + resp_template["workload_status"] = progress_status + resp_template["workload_status_reason"] = progress_msg + status_code = status.HTTP_200_OK\ + if progress_code == 0 else progress_code + except Exception as e: + resp_template["workload_status_reason"] = progress return Response(data=resp_template, status=status_code) @@ -286,7 +287,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): "status": ( 0, "DELETE_IN_PROGRESS", "backlog for delete the workload %s " - "pends to schedule" % workloadid + "is on progress" % workloadid ) } gInfraWorkloadThread.add(backlog_item) @@ -488,7 +489,7 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper): template_data = data.get("template_data", {}) # resp_template = None if not template_type or "heat" != template_type.lower(): - return 14, "CREATE_FAILED", \ + return status.HTTP_400_BAD_REQUEST, "CREATE_FAILED", \ "Bad parameters: template type %s is not heat" %\ template_type or "" @@ -523,7 +524,7 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper): (cloud_owner, regionid, v2_token_resp_json) logger.error(errmsg) return ( - retcode, "CREATE_FAILED", errmsg + os_status, "CREATE_FAILED", errmsg ) # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] @@ -541,5 +542,5 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper): # stackid = stack1["id"] if stack1 else "" return 0, "CREATE_IN_PROGRESS", stack1 else: - self._logger.info("RESP with data> result:%s" % content) - return retcode, "CREATE_FAILED", content + self._logger.info("workload_create fails: %s" % content) + return os_status, "CREATE_FAILED", content |