diff options
author | Xiaohua Zhang <xiaohua.zhang@windriver.com> | 2019-04-09 10:30:44 +0000 |
---|---|---|
committer | Xiaohua Zhang <xiaohua.zhang@windriver.com> | 2019-04-09 10:30:44 +0000 |
commit | 14132a2c06bc6b6431f5be90cecf303132d94103 (patch) | |
tree | 1c914fa84e52cde8937e356156f16be9e14744c1 | |
parent | 5dc8f722d834229c27aab4ae2e8a609db09edb0d (diff) |
Fix bugs for infra_workload APIs
Change-Id: Ieda8b2f7f68911bc348613cf7b18b40ea800b1e2
Issue-ID: MULTICLOUD-541
Signed-off-by: Xiaohua Zhang <xiaohua.zhang@windriver.com>
-rw-r--r-- | fcaps/run.sh | 2 | ||||
-rw-r--r-- | fcaps/stop.sh | 2 | ||||
-rwxr-xr-x | lenovo/run.sh | 2 | ||||
-rwxr-xr-x | lenovo/stop.sh | 2 | ||||
-rwxr-xr-x | ocata/run.sh | 2 | ||||
-rwxr-xr-x | ocata/stop.sh | 2 | ||||
-rwxr-xr-x | pike/run.sh | 2 | ||||
-rwxr-xr-x | pike/stop.sh | 2 | ||||
-rw-r--r-- | share/common/msapi/helper.py | 55 | ||||
-rw-r--r-- | share/newton_base/registration/registration.py | 5 | ||||
-rw-r--r-- | share/newton_base/resource/infra_workload.py | 6 | ||||
-rw-r--r-- | share/newton_base/resource/infra_workload_helper.py | 56 | ||||
-rw-r--r-- | share/starlingx_base/registration/registration.py | 7 | ||||
-rw-r--r-- | share/starlingx_base/resource/infra_workload.py | 27 | ||||
-rwxr-xr-x | starlingx/run.sh | 2 | ||||
-rw-r--r-- | starlingx/starlingx/urls.py | 4 | ||||
-rwxr-xr-x | starlingx/stop.sh | 2 | ||||
-rw-r--r-- | windriver/run.sh | 2 | ||||
-rw-r--r-- | windriver/stop.sh | 2 | ||||
-rw-r--r-- | windriver/titanium_cloud/urls.py | 4 |
20 files changed, 124 insertions, 64 deletions
diff --git a/fcaps/run.sh b/fcaps/run.sh index 51450029..0c1cd87d 100644 --- a/fcaps/run.sh +++ b/fcaps/run.sh @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid +memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid export PYTHONPATH=lib/share #service rabbitmq-server restart diff --git a/fcaps/stop.sh b/fcaps/stop.sh index 56104f9f..b09ccb9f 100644 --- a/fcaps/stop.sh +++ b/fcaps/stop.sh @@ -15,4 +15,4 @@ #ps auxww | grep 'manage.py runserver 0.0.0.0:9011' | awk '{print $2}' | xargs kill -9 ps auxww |grep 'uwsgi --http :9011 --module fcaps.wsgi --master' |awk '{print $2}' |xargs kill -9 -ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 +ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 diff --git a/lenovo/run.sh b/lenovo/run.sh index 0fa331f7..48e26cdc 100755 --- a/lenovo/run.sh +++ b/lenovo/run.sh @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid +memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid export PYTHONPATH=lib/share #nohup python manage.py runserver 0.0.0.0:9010 2>&1 & diff --git a/lenovo/stop.sh b/lenovo/stop.sh index f43fab1b..87f80ba8 100755 --- a/lenovo/stop.sh +++ b/lenovo/stop.sh @@ -15,4 +15,4 @@ #ps auxww | grep 'manage.py runserver 0.0.0.0:9010' | awk '{print $2}' | xargs kill -9 ps auxww |grep 'uwsgi --http :9010 --module thinkcloud.wsgi --master' |awk '{print $2}' |xargs kill -9 -ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 +ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 diff --git a/ocata/run.sh b/ocata/run.sh index 5d027658..a1f44ed2 100755 --- a/ocata/run.sh +++ b/ocata/run.sh @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid +memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid export PYTHONPATH=lib/share diff --git a/ocata/stop.sh b/ocata/stop.sh index 1402a97c..4e46b2cd 100755 --- a/ocata/stop.sh +++ b/ocata/stop.sh @@ -15,4 +15,4 @@ #ps auxww | grep 'manage.py runserver 0.0.0.0:9006' | awk '{print $2}' | xargs kill -9 ps auxww |grep 'uwsgi --http :9006 --module ocata.wsgi --master' |awk '{print $2}' |xargs kill -9 -ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 +ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 diff --git a/pike/run.sh b/pike/run.sh index 738ac6cf..3822c8a5 100755 --- a/pike/run.sh +++ b/pike/run.sh @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid +memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid export PYTHONPATH=lib/share #nohup python manage.py runserver 0.0.0.0:9007 2>&1 & diff --git a/pike/stop.sh b/pike/stop.sh index 9e433eb4..38d101d5 100755 --- a/pike/stop.sh +++ b/pike/stop.sh @@ -15,4 +15,4 @@ #ps auxww | grep 'manage.py runserver 0.0.0.0:9007' | awk '{print $2}' | xargs kill -9 ps auxww |grep 'uwsgi --http :9007 --module pike.wsgi --master' |awk '{print $2}' |xargs kill -9 -ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 +ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 diff --git a/share/common/msapi/helper.py b/share/common/msapi/helper.py index b2ff1d6b..947966c9 100644 --- a/share/common/msapi/helper.py +++ b/share/common/msapi/helper.py @@ -145,18 +145,18 @@ class MultiCloudAAIHelper(object): retcode, content, status_code = \ restcall.req_to_aai(resource_url, "PUT", content=resource_info) - self._logger.debug( - ("_update_resoure,vimid:%(cloud_owner)s" - "_%(cloud_region_id)s req_to_aai: %(resoure_id)s, " - "return %(retcode)s, %(content)s, %(status_code)s") - % { - "cloud_owner": cloud_owner, - "cloud_region_id": cloud_region_id, - "resoure_id": resoure_id, - "retcode": retcode, - "content": content, - "status_code": status_code, - }) + # self._logger.debug( + # ("_update_resoure,vimid:%(cloud_owner)s" + # "_%(cloud_region_id)s req_to_aai: %(resoure_id)s, " + # "return %(retcode)s, %(content)s, %(status_code)s") + # % { + # "cloud_owner": cloud_owner, + # "cloud_region_id": cloud_region_id, + # "resoure_id": resoure_id, + # "retcode": retcode, + # "content": content, + # "status_code": status_code, + # }) return retcode, content # unknown cloud owner,region_id return ( @@ -197,17 +197,17 @@ class MultiCloudThreadHelper(object): # } # format of backlog: # {"<id value of backlog item>": <backlog item>, ...} + self.name = name or "default" self.backlog = {} # expired backlog items self.expired_backlog = {} self.lock = threading.Lock() self.state_ = 0 # 0: stopped, 1: started self.cache_prefix = "bi_"+name+"_" - self.cache_expired_prefix = "biex_"+name+"_" + self.cache_expired_prefix = "biex_"+self.name+"_" self.thread = MultiCloudThreadHelper.HelperThread(self) - self.thread.start() - + # self.thread.start() def state(self): return self.state_ @@ -217,7 +217,7 @@ class MultiCloudThreadHelper(object): if 0 == self.state_: self.state_ = 1 # self.thread = MultiCloudThreadHelper.HelperThread(self) - # self.thread.start() + self.thread.start() else: pass self.lock.release() @@ -227,9 +227,10 @@ class MultiCloudThreadHelper(object): def add(self, backlog_item): cache_for_query = None - if not hasattr(backlog_item, "worker"): + if not backlog_item.get("worker", None): + logger.warn("Fail to add backlog item: %s" % backlog_item) return None - if not hasattr(backlog_item, "id"): + if not backlog_item.get("id", None): backlog_item["id"] = str(uuid.uuid1()) else: cache_for_query = { @@ -237,7 +238,7 @@ class MultiCloudThreadHelper(object): "status": backlog_item.get("status", None) } - if not hasattr(backlog_item, "repeat"): + if not backlog_item.get("repeat", None): backlog_item["repeat"] = 0 backlog_item["timestamp"] = 0 @@ -248,8 +249,9 @@ class MultiCloudThreadHelper(object): json.dumps(cache_for_query), 3600 * 24) self.expired_backlog.pop(backlog_item["id"], None) - self.backlog.update(backlog_item["id"], backlog_item) + self.backlog[backlog_item["id"]] = backlog_item # self.lock.release() + logger.debug("Add backlog item: %s" % backlog_item) return len(self.backlog) def get(self, backlog_id): @@ -305,17 +307,19 @@ class MultiCloudThreadHelper(object): self.duration = 0 self.owner = owner # debug: dump the callstack to determine the callstack, hence the lcm - logger.debug("HelperThread __init__ : %s" % traceback.format_exc()) + # logger.debug("HelperThread __init__ : %s" % traceback.format_exc()) def run(self): - logger.debug("Start processing backlogs") + logger.debug("Thread %s starts processing backlogs" % self.owner.name) nexttimer = 0 while self.owner.state_ == 1: # and self.owner.count() > 0: if nexttimer > 1000000: # sleep in case of interval > 1 second time.sleep(nexttimer // 1000000) nexttimer = 30*1000000 # initial interval in us to be updated:30 seconds - for backlog_id, item in self.owner.backlog: + # logger.debug("self.owner.backlog len: %s" % len(self.owner.backlog)) + for backlog_id, item in self.owner.backlog.items(): + # logger.debug("evaluate backlog item: %s" % item) # check interval for repeatable backlog item now = MultiCloudThreadHelper.get_epoch_now_usecond() repeat_interval = item.get("repeat", 0) @@ -331,10 +335,11 @@ class MultiCloudThreadHelper(object): # not time to run this backlog item yet continue + # logger.debug("process backlog item: %s" % backlog_id) worker = item.get("worker", None) payload = item.get("payload", None) try: - item["status"] = worker(payload) or 0 + item["status"] = worker(*payload) or 0 except Exception as e: item["status"] = e.message cache_item_for_query = { @@ -364,6 +369,6 @@ class MultiCloudThreadHelper(object): # while True: # logger.debug("thread sleep for 5 seconds") # time.sleep(5) # wait forever, testonly - logger.debug("stop processing backlogs") + logger.debug("Thread %s stops processing backlogs" % self.owner.name) self.owner.state_ = 0 # end of processing diff --git a/share/newton_base/registration/registration.py b/share/newton_base/registration/registration.py index 550c394e..a875cd84 100644 --- a/share/newton_base/registration/registration.py +++ b/share/newton_base/registration/registration.py @@ -64,8 +64,7 @@ class Registry(APIView): backlog_item = { "id": vimid, "worker": self.register_helper.registryV0, - "payload": (self.register_helper, - vimid, specified_project_idorname), + "payload": (vimid, specified_project_idorname), "repeat": 0, "status": (1, "The registration process waits to" @@ -120,7 +119,7 @@ class Registry(APIView): backlog_item = { "id": vimid, "worker": self.register_helper.unregistryV0, - "payload": (self.register_helper, vimid), + "payload": (vimid), "repeat": 0, "status": (1, "The registration process waits" " to be scheduled to run") diff --git a/share/newton_base/resource/infra_workload.py b/share/newton_base/resource/infra_workload.py index 7bf07952..db216ce2 100644 --- a/share/newton_base/resource/infra_workload.py +++ b/share/newton_base/resource/infra_workload.py @@ -36,7 +36,7 @@ class InfraWorkload(APIView): def __init__(self): self._logger = logger - def post(self, request, vimid=""): + def post(self, request, vimid="", requri=""): self._logger.info("vimid: %s" % (vimid)) self._logger.info("data: %s" % (request.data)) self._logger.debug("META: %s" % request.META) @@ -529,13 +529,13 @@ class APIv1InfraWorkload(InfraWorkload): super(APIv1InfraWorkload, self).__init__() # self._logger = logger - def post(self, request, cloud_owner="", cloud_region_id=""): + def post(self, request, cloud_owner="", cloud_region_id="", requri=""): # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % # (cloud_owner, cloud_region_id, request.data)) # self._logger.debug("META: %s" % request.META) vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1InfraWorkload, self).post(request, vimid) + return super(APIv1InfraWorkload, self).post(request, vimid, requri) def get(self, request, cloud_owner="", cloud_region_id="", requri=""): # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % diff --git a/share/newton_base/resource/infra_workload_helper.py b/share/newton_base/resource/infra_workload_helper.py index bfc3fc86..ee8291b1 100644 --- a/share/newton_base/resource/infra_workload_helper.py +++ b/share/newton_base/resource/infra_workload_helper.py @@ -420,9 +420,9 @@ class InfraWorkloadHelper(object): # get stack status service_type = "orchestration" - resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks" + resource_uri = "/stacks/id=%s" % stack_id if stack_id else "/stacks" if stack_name: - resource_uri = "/stacks?name=%s" % stack_id if not stack_id else resource_uri + resource_uri = "/stacks?name=%s" % stack_name if not stack_id else resource_uri self._logger.info("retrieve stack resources, URI:%s" % resource_uri) retcode, content, os_status = \ @@ -443,3 +443,55 @@ class InfraWorkloadHelper(object): except Exception as e: self._logger.error(e.message) return 12, "GET_FAILED", e.message + + + def workload_detail(self, vimid, stack_id, nexturi=None, otherinfo=None, project_idorname=None): + ''' + get workload status by either stack id or name + :param vimid: + :param stack_id: + :param nexturi: stacks/<stack id>/<nexturi> + :param otherinfo: + :return: + ''' + try: + # assume the workload_type is heat + cloud_owner, regionid = extsys.decode_vim_id(vimid) + # should go via multicloud proxy so that the selflink is updated by multicloud + retcode, v2_token_resp_json, os_status = \ + helper.MultiCloudIdentityHelper( + settings.MULTICLOUD_API_V1_PREFIX, + cloud_owner, regionid, "/v2.0/tokens", + {"Project": project_idorname}) + + if retcode > 0 or not v2_token_resp_json: + errmsg = "authenticate fails:%s, %s, %s" % \ + (cloud_owner, regionid, v2_token_resp_json) + logger.error(errmsg) + return retcode, "GET_FAILED", errmsg + + # get stack status + service_type = "orchestration" + resource_uri = "/stacks/%s" % stack_id + if nexturi: + resource_uri += "/" + nexturi + + self._logger.info("retrieve stack resources, URI:%s" % resource_uri) + retcode, content, os_status = \ + helper.MultiCloudServiceHelper(cloud_owner, regionid, + v2_token_resp_json, + service_type, resource_uri, + None, "GET") + + if retcode > 0 or not content: + errmsg = "Stack query %s response: %s" % (resource_uri, content) + self._logger.debug(errmsg) + return retcode, "GET_FAILED", errmsg + + stack = content.get('stack', {}) # if retcode == 0 and content else [] + stack_status = stack.get("stack_status", "GET_FAILED") + + return retcode, stack_status, content + except Exception as e: + self._logger.error(e.message) + return 12, "GET_FAILED", e.message diff --git a/share/starlingx_base/registration/registration.py b/share/starlingx_base/registration/registration.py index 507f0fbf..cf0281f7 100644 --- a/share/starlingx_base/registration/registration.py +++ b/share/starlingx_base/registration/registration.py @@ -58,7 +58,7 @@ class APIv0Registry(newton_registration.Registry): backlog_item = { "id": vimid, "worker": worker_self.azcap_audit, - "payload": (worker_self, vimid, specified_project_idorname), + "payload": (vimid, specified_project_idorname), "repeat": 10*1000000, # repeat every 10 seconds } gAZCapAuditThread.add(backlog_item) @@ -88,6 +88,9 @@ class APIv1Registry(newton_registration.Registry): % (cloud_owner, cloud_region_id)) try: + # Get the specified tenant id + specified_project_idorname = request.META.get("Project", None) + vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) # vim registration will trigger the start the audit of AZ capacity @@ -98,7 +101,7 @@ class APIv1Registry(newton_registration.Registry): backlog_item = { "id": vimid, "worker": worker_self.azcap_audit, - "payload": (worker_self, vimid), + "payload": (vimid, specified_project_idorname), "repeat": 5 * 1000000, # repeat every 5 seconds } gAZCapAuditThread.add(backlog_item) diff --git a/share/starlingx_base/resource/infra_workload.py b/share/starlingx_base/resource/infra_workload.py index 6b064856..409d74ed 100644 --- a/share/starlingx_base/resource/infra_workload.py +++ b/share/starlingx_base/resource/infra_workload.py @@ -85,7 +85,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): backlog_item = { "id": workloadid, "worker": worker_self.workload_update, - "payload": (worker_self, vimid, workloadid, + "payload": (vimid, workloadid, request.data, specified_project_idorname), "repeat": 0, # one time job # format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message @@ -97,7 +97,9 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): gInfraWorkloadThread.add(backlog_item) if 0 == gInfraWorkloadThread.state(): gInfraWorkloadThread.start() - + # progress = worker_self.workload_update( + # vimid, workloadid, + # request.data, specified_project_idorname) # now query the progress backlog_item = gInfraWorkloadThread.get(workloadid) if not backlog_item: @@ -206,7 +208,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): settings.AAI_BASE_URL ) progress_code, progress_status, progress_msg =\ - worker_self.workload_status( + worker_self.workload_detail( vimid, stack_id=workloadid, project_idorname=specified_project_idorname) @@ -277,7 +279,7 @@ class InfraWorkload(newton_infra_workload.InfraWorkload): backlog_item = { "id": workloadid, "worker": worker_self.workload_delete, - "payload": (worker_self, vimid, workloadid, request.data, + "payload": (vimid, workloadid, request.data, specified_project_idorname), "repeat": 0, # one time job # format of status: retcode:0 is ok, otherwise error code from http status, Status ENUM, Message @@ -335,29 +337,29 @@ class APIv1InfraWorkload(InfraWorkload): super(APIv1InfraWorkload, self).__init__() # self._logger = logger - def post(self, request, cloud_owner="", cloud_region_id=""): + def post(self, request, cloud_owner="", cloud_region_id="", workloadid=""): # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % # (cloud_owner, cloud_region_id, request.data)) # self._logger.debug("META: %s" % request.META) vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1InfraWorkload, self).post(request, vimid) + return super(APIv1InfraWorkload, self).post(request, vimid, workloadid) - def get(self, request, cloud_owner="", cloud_region_id="", requri=""): + def get(self, request, cloud_owner="", cloud_region_id="", workloadid=""): # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % # (cloud_owner, cloud_region_id, request.data)) # self._logger.debug("META: %s" % request.META) vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1InfraWorkload, self).get(request, vimid, requri) + return super(APIv1InfraWorkload, self).get(request, vimid, workloadid) - def delete(self, request, cloud_owner="", cloud_region_id="", requri=""): + def delete(self, request, cloud_owner="", cloud_region_id="", workloadid=""): # self._logger.info("cloud owner, cloud region id, data: %s,%s, %s" % # (cloud_owner, cloud_region_id, request.data)) # self._logger.debug("META: %s" % request.META) vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id) - return super(APIv1InfraWorkload, self).delete(request, vimid, requri) + return super(APIv1InfraWorkload, self).delete(request, vimid, workloadid) class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper): @@ -503,9 +505,8 @@ class InfraWorkloadHelper(infra_workload_helper.InfraWorkloadHelper): # reset to make sure "files" are empty template_data["files"] = {} - template_data["stack_name"] = vf_module_id \ - if not hasattr(template_data, "stack_name")\ - else template_data["stack_name"] + template_data["stack_name"] =\ + template_data.get("stack_name", vf_module_id) # authenticate cloud_owner, regionid = extsys.decode_vim_id(vimid) diff --git a/starlingx/run.sh b/starlingx/run.sh index a2dc4a75..7b69d169 100755 --- a/starlingx/run.sh +++ b/starlingx/run.sh @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid +memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid export PYTHONPATH=lib/share uwsgi --http :9009 --module starlingx.wsgi --master --processes 4 diff --git a/starlingx/starlingx/urls.py b/starlingx/starlingx/urls.py index 5a76b330..ca9ef240 100644 --- a/starlingx/starlingx/urls.py +++ b/starlingx/starlingx/urls.py @@ -38,7 +38,7 @@ urlpatterns = [ r'infra_workload/?$', infra_workload.InfraWorkload.as_view()), url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/' - r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]*)/?$', + r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]+)/?$', infra_workload.InfraWorkload.as_view()), url(r'^api/multicloud-starlingx/v0/(?P<vimid>[0-9a-zA-Z_-]+)/', include('starlingx.proxy.urls')), @@ -67,7 +67,7 @@ urlpatterns = [ infra_workload.APIv1InfraWorkload.as_view()), url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/' r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/' - r'(?P<workloadid>[0-9a-zA-Z_-]*)/?$', + r'(?P<workloadid>[0-9a-zA-Z_-]+)/?$', infra_workload.APIv1InfraWorkload.as_view()), url(r'^api/multicloud-starlingx/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/' r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/', diff --git a/starlingx/stop.sh b/starlingx/stop.sh index 87d67ebc..0b734f38 100755 --- a/starlingx/stop.sh +++ b/starlingx/stop.sh @@ -15,4 +15,4 @@ #ps auxww | grep 'manage.py runserver 0.0.0.0:9009' | awk '{print $2}' | xargs kill -9 ps auxww |grep 'uwsgi --http :9009 --module starlingx.wsgi --master' |awk '{print $2}' |xargs kill -9 -ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 +ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 diff --git a/windriver/run.sh b/windriver/run.sh index 74aff7fa..650c2c1b 100644 --- a/windriver/run.sh +++ b/windriver/run.sh @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid +memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid export PYTHONPATH=lib/share #nohup python manage.py runserver 0.0.0.0:9005 2>&1 & diff --git a/windriver/stop.sh b/windriver/stop.sh index ab8a72d5..5e7a6ac3 100644 --- a/windriver/stop.sh +++ b/windriver/stop.sh @@ -15,4 +15,4 @@ #ps auxww | grep 'manage.py runserver 0.0.0.0:9005' | awk '{print $2}' | xargs kill -9 ps auxww |grep 'uwsgi --http :9005 --module titanium_cloud.wsgi --master' |awk '{print $2}' |xargs kill -9 -ps auxww | grep 'memcached -d -m 2048 -u root -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 +ps auxww | grep 'memcached -d -m 2048 -c 1024 -p 11211 -P /tmp/memcached1.pid' | awk '{print $2}' | xargs kill -9 diff --git a/windriver/titanium_cloud/urls.py b/windriver/titanium_cloud/urls.py index 4d118dc2..7a9f6455 100644 --- a/windriver/titanium_cloud/urls.py +++ b/windriver/titanium_cloud/urls.py @@ -83,7 +83,7 @@ urlpatterns = [ r'infra_workload/?$', infra_workload.InfraWorkload.as_view()), url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/' - r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]*)/?$', + r'infra_workload/(?P<workloadid>[0-9a-zA-Z_-]+)/?$', infra_workload.InfraWorkload.as_view()), url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/', include('titanium_cloud.proxy.urls')), @@ -122,7 +122,7 @@ urlpatterns = [ infra_workload.APIv1InfraWorkload.as_view()), url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/' r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/' - r'(?P<workloadid>[0-9a-zA-Z_-]*)/?$', + r'(?P<workloadid>[0-9a-zA-Z_-]+)/?$', infra_workload.APIv1InfraWorkload.as_view()), url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/' r'(?P<cloud_region_id>[0-9a-zA-Z_-]+)/', |