summaryrefslogtreecommitdiffstats
path: root/windriver/titanium_cloud
diff options
context:
space:
mode:
authorXiaohua Zhang <xiaohua.zhang@windriver.com>2019-03-04 08:00:08 +0000
committerXiaohua Zhang <xiaohua.zhang@windriver.com>2019-03-04 08:00:08 +0000
commit03985a2977b3934330c171d9261045193beb2725 (patch)
treeeb0eed9c173682e958bf4372ad937751fcf6d476 /windriver/titanium_cloud
parent2a0a977a319550dbd70e75695bddc011f261d3ab (diff)
Remove vesagent and celery
The vesagent has been moved to fcaps module Change-Id: I938df03374ca887d4f455f206d398491010a9361 Issue-ID: MULTICLOUD-499 Signed-off-by: Xiaohua Zhang <xiaohua.zhang@windriver.com>
Diffstat (limited to 'windriver/titanium_cloud')
-rw-r--r--windriver/titanium_cloud/__init__.py3
-rw-r--r--windriver/titanium_cloud/celery.py39
-rw-r--r--windriver/titanium_cloud/urls.py9
-rw-r--r--windriver/titanium_cloud/vesagent/__init__.py15
-rw-r--r--windriver/titanium_cloud/vesagent/event_domain/__init__.py15
-rw-r--r--windriver/titanium_cloud/vesagent/event_domain/fault_vm.py325
-rw-r--r--windriver/titanium_cloud/vesagent/tasks.py197
-rw-r--r--windriver/titanium_cloud/vesagent/tests/__init__.py15
-rw-r--r--windriver/titanium_cloud/vesagent/tests/tests_fault_vm.py228
-rw-r--r--windriver/titanium_cloud/vesagent/tests/tests_tasks.py143
-rw-r--r--windriver/titanium_cloud/vesagent/tests/tests_vesagent_ctrl.py179
-rw-r--r--windriver/titanium_cloud/vesagent/tests/tests_vespublish.py54
-rw-r--r--windriver/titanium_cloud/vesagent/vesagent_ctrl.py452
-rw-r--r--windriver/titanium_cloud/vesagent/vespublish.py53
14 files changed, 0 insertions, 1727 deletions
diff --git a/windriver/titanium_cloud/__init__.py b/windriver/titanium_cloud/__init__.py
index f7fd66a2..d2ee8a03 100644
--- a/windriver/titanium_cloud/__init__.py
+++ b/windriver/titanium_cloud/__init__.py
@@ -16,6 +16,3 @@ from __future__ import absolute_import, unicode_literals
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
-from .celery import app as celery_app
-
-__all__ = ['celery_app']
diff --git a/windriver/titanium_cloud/celery.py b/windriver/titanium_cloud/celery.py
deleted file mode 100644
index ada8bef5..00000000
--- a/windriver/titanium_cloud/celery.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import, unicode_literals
-import os
-from celery import Celery
-import logging
-
-# set the default Django settings module for the 'celery' program.
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'titanium_cloud.settings')
-
-app = Celery('titanium_cloud')
-
-# Using a string here means the worker doesn't have to serialize
-# the configuration object to child processes.
-# - namespace='CELERY' means all celery-related configuration keys
-# should have a `CELERY_` prefix.
-app.config_from_object('django.conf:settings', namespace='CELERY')
-
-# Load task modules from all registered Django app configs.
-app.autodiscover_tasks()
-
-logger = logging.getLogger(__name__)
-
-
-@app.task(bind=True)
-def debug_task(self):
- logger.debug("self.request")
diff --git a/windriver/titanium_cloud/urls.py b/windriver/titanium_cloud/urls.py
index f4adc10b..f910bcc8 100644
--- a/windriver/titanium_cloud/urls.py
+++ b/windriver/titanium_cloud/urls.py
@@ -17,7 +17,6 @@ from django.conf.urls import include, url
from starlingx_base.registration import registration
from newton_base.openoapi import tenants
from newton_base.resource import capacity
-from titanium_cloud.vesagent import vesagent_ctrl
from newton_base.resource import infra_workload
urlpatterns = [
@@ -40,8 +39,6 @@ urlpatterns = [
# CapacityCheck
url(r'^api/multicloud-titanium_cloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/capacity_check/?$',
capacity.CapacityCheck.as_view()),
- url(r'^api/multicloud-titanium_cloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/vesagent/?$',
- vesagent_ctrl.VesAgentCtrl.as_view()),
# API v1, depreciated due to MULTICLOUD-335
url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/registry/?$',
@@ -59,8 +56,6 @@ urlpatterns = [
# CapacityCheck
url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/capacity_check/?$',
capacity.APIv1CapacityCheck.as_view()),
- url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/vesagent/?$',
- vesagent_ctrl.APIv1VesAgentCtrl.as_view()),
url(r'^api/multicloud-titanium_cloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/?$',
infra_workload.APIv1InfraWorkload.as_view()),
@@ -84,8 +79,6 @@ urlpatterns = [
# CapacityCheck
url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/capacity_check/?$',
capacity.CapacityCheck.as_view()),
- url(r'^api/multicloud-titaniumcloud/v0/(?P<vimid>[0-9a-zA-Z_-]+)/vesagent/?$',
- vesagent_ctrl.VesAgentCtrl.as_view()),
# API v1, new namespace due to MULTICLOUD-335
url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/registry/?$',
@@ -101,8 +94,6 @@ urlpatterns = [
# CapacityCheck
url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/capacity_check/?$',
capacity.APIv1CapacityCheck.as_view()),
- url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/vesagent/?$',
- vesagent_ctrl.APIv1VesAgentCtrl.as_view()),
url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/?$',
infra_workload.APIv1InfraWorkload.as_view()),
url(r'^api/multicloud-titaniumcloud/v1/(?P<cloud_owner>[0-9a-zA-Z_-]+)/(?P<cloud_region_id>[0-9a-zA-Z_-]+)/infra_workload/(?P<requri>[0-9a-zA-Z_-]*)/?$',
diff --git a/windriver/titanium_cloud/vesagent/__init__.py b/windriver/titanium_cloud/vesagent/__init__.py
deleted file mode 100644
index 5f8b0d18..00000000
--- a/windriver/titanium_cloud/vesagent/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/windriver/titanium_cloud/vesagent/event_domain/__init__.py b/windriver/titanium_cloud/vesagent/event_domain/__init__.py
deleted file mode 100644
index 5f8b0d18..00000000
--- a/windriver/titanium_cloud/vesagent/event_domain/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/windriver/titanium_cloud/vesagent/event_domain/fault_vm.py b/windriver/titanium_cloud/vesagent/event_domain/fault_vm.py
deleted file mode 100644
index 3c93c321..00000000
--- a/windriver/titanium_cloud/vesagent/event_domain/fault_vm.py
+++ /dev/null
@@ -1,325 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import json
-import uuid
-
-from django.conf import settings
-from titanium_cloud.vesagent.vespublish import publishAnyEventToVES
-from common.utils import restcall
-# from common.msapi.helper import Helper as helper
-
-import datetime
-import time
-
-logger = logging.getLogger(__name__)
-
-
-def get_epoch_now_usecond():
- '''
- get epoch timestamp of this moment in usecond
- :return:
- '''
- now_time = datetime.datetime.now()
- epoch_time_sec = time.mktime(now_time.timetuple())
- return int(epoch_time_sec * 1e6 + now_time.microsecond)
-
-
-def buildBacklog_fault_vm(vimid, backlog_input):
- # build backlog with domain:"fault", type:"vm"
-
- logger.info("vimid: %s" % vimid)
- logger.debug("with input: %s" % backlog_input)
-
- try:
-
- # must resolve the tenant id and server id while building the backlog
- tenant_id = backlog_input.get("tenantid", None)
- server_id = backlog_input.get("sourceid", None)
- server_name = backlog_input.get("source", None)
-
- # should resolve the name to id later
- if tenant_id is None:
- tenant_name = backlog_input["tenant"]
-
- # get token
- # resolve tenant_name to tenant_id
- auth_api_url_format = "/{f_vim_id}/identity/v2.0/tokens"
- auth_api_url = auth_api_url_format.format(f_vim_id=vimid)
- auth_api_data = {"auth": {"tenantName": tenant_name}}
- base_url = settings.MULTICLOUD_PREFIX
- extra_headers = ''
- ret = restcall._call_req(base_url, "", "", 0, auth_api_url,
- "POST", extra_headers, json.dumps(auth_api_data))
- if ret[0] > 0 or ret[1] is None:
- logger.critical("call url %s failed with status %s" % (auth_api_url, ret[0]))
- return None
-
- token_resp = json.JSONDecoder().decode(ret[1])
- token = token_resp["access"]["token"]["id"]
- tenant_id = token_resp["access"]["token"]["tenant"]["id"]
-
- if server_id is None and server_name:
- # resolve server_name to server_id in case no wildcast in server_name
- vserver_api_url_format \
- = "/{f_vim_id}/compute/v2.1/{f_tenant_id}/servers?name={f_server_name}"
- vserver_api_url = vserver_api_url_format.format(f_vim_id=vimid,
- f_tenant_id=tenant_id,
- f_server_name=server_name)
- base_url = settings.MULTICLOUD_PREFIX
- extra_headers = {'X-Auth-Token': token}
- ret = restcall._call_req(base_url, "", "", 0, vserver_api_url, "GET", extra_headers, "")
- if ret[0] > 0 or ret[1] is None:
- logger.critical("call url %s failed with status %s" % (vserver_api_url, ret[0]))
- return None
-
- server_resp = json.JSONDecoder().decode(ret[1])
- # find out the server wanted
- for s in server_resp.get("servers", []):
- if s["name"] == server_name:
- server_id = s["id"]
- break
- if server_id is None:
- logger.warn("source %s cannot be found under tenant id %s "
- % (server_name, tenant_id))
- return None
-
- # m.c. proxied OpenStack API
- if server_id is None and server_name is None:
- # monitor all VMs of the specified VIMs since no server_id can be resolved
- api_url_fmt = "/{f_vim_id}/compute/v2.1/{f_tenant_id}/servers/detail"
- api_url = api_url_fmt.format(
- f_vim_id=vimid, f_tenant_id=tenant_id)
- else:
- api_url_fmt = "/{f_vim_id}/compute/v2.1/{f_tenant_id}/servers/{f_server_id}"
- api_url = api_url_fmt.format(
- f_vim_id=vimid, f_tenant_id=tenant_id, f_server_id=server_id)
-
- backlog = {
- "backlog_uuid":
- str(uuid.uuid3(uuid.NAMESPACE_URL,
- str("%s-%s-%s" % (vimid, tenant_id, server_id)))),
- "tenant_id": tenant_id,
- "server_id": server_id,
- "api_method": "GET",
- "api_link": api_url,
- }
- backlog.update(backlog_input)
- except Exception as e:
- logger.error("exception:%s" % str(e))
- return None
-
- logger.info("return")
- logger.debug("with backlog: %s" % backlog)
- return backlog
-
-
-# process backlog with domain:"fault", type:"vm"
-
-
-def processBacklog_fault_vm(vesAgentConfig, vesAgentState, oneBacklog):
- logger.debug("vesAgentConfig:%s, vesAgentState:%s, oneBacklog: %s"
- % (vesAgentConfig, vesAgentState, oneBacklog))
-
- try:
- vimid = vesAgentConfig["vimid"]
- tenant_name = oneBacklog["tenant"]
-
- # get token
- auth_api_url_format = "/{f_vim_id}/identity/v2.0/tokens"
- auth_api_url = auth_api_url_format.format(f_vim_id=vimid)
- auth_api_data = {"auth": {"tenantName": tenant_name}}
- base_url = settings.MULTICLOUD_PREFIX
- extra_headers = ''
- logger.debug("authenticate with url:%s" % auth_api_url)
- ret = restcall._call_req(base_url, "", "", 0, auth_api_url,
- "POST", extra_headers, json.dumps(auth_api_data))
- if ret[0] > 0 or ret[1] is None:
- logger.critical("call url %s failed with status %s" %
- (auth_api_url, ret[0]))
-
- token_resp = json.JSONDecoder().decode(ret[1])
- logger.debug("authenticate resp: %s" % token_resp)
- token = token_resp["access"]["token"]["id"]
-
- # collect data by issue API
- api_link = oneBacklog["api_link"]
- method = oneBacklog["api_method"]
- base_url = settings.MULTICLOUD_PREFIX
- data = ''
- extra_headers = {'X-Auth-Token': token}
- # which one is correct? extra_headers = {'HTTP_X_AUTH_TOKEN': token}
- logger.debug("authenticate with url:%s, header:%s" %
- (auth_api_url, extra_headers))
- ret = restcall._call_req(base_url, "", "", 0, api_link, method, extra_headers, data)
- if ret[0] > 0 or ret[1] is None:
- logger.critical("call url %s failed with status %s" % (api_link, ret[0]))
-
- server_resp = json.JSONDecoder().decode(ret[1])
- logger.debug("collected data: %s" % server_resp)
-
- # encode data
- backlog_uuid = oneBacklog.get("backlog_uuid", None)
- backlogState = vesAgentState.get("%s" % (backlog_uuid), None)
-
- # iterate all VMs
- all_events = []
- server_1 = server_resp.get("server", None) # in case querying single server
- for s in server_resp.get("servers", [server_1] if server_1 else []):
- server_id = s.get("id", None)
- server_name = s.get("name", None)
- if not server_id:
- continue
-
- last_event = backlogState.get("last_event_%s" % (server_id), None)
- logger.debug("last event for server name %s: %s" % (server_name, last_event))
-
- this_event = data2event_fault_vm(vimid, oneBacklog, last_event, s)
- if this_event is not None:
- logger.debug("this event: %s" % this_event)
- all_events.append(this_event.get("event", None))
- backlogState["last_event_%s" % (server_id)] = this_event
-
- # report data to VES
- if len(all_events) > 0:
- ves_subscription = vesAgentConfig.get("subscription", None)
- publishAnyEventToVES(ves_subscription, all_events)
- # store the latest data into cache, never expire
-
- except Exception as e:
- logger.error("exception:%s" % str(e))
- return
-
- logger.info("return")
- return
-
-
-def data2event_fault_vm(vimid, oneBacklog, last_event, vm_data):
- VES_EVENT_VERSION = 3.0
- VES_EVENT_FAULT_VERSION = 2.0
- VES_EVENT_FAULT_DOMAIN = "fault"
-
- try:
-
- if vm_status_is_fault(vm_data["status"]):
- if last_event is not None \
- and last_event['event']['commonEventHeader']['eventName'] == 'Fault_MultiCloud_VMFailure':
- # asserted alarm already, so no need to assert it again
- return None
-
- eventName = "Fault_MultiCloud_VMFailure"
- priority = "High"
- eventSeverity = "CRITICAL"
- alarmCondition = "Guest_Os_Failure"
- # vfStatus = "Active"
- specificProblem = "Fault_MultiCloud_VMFailure"
- eventType = ''
- reportingEntityId = vimid
- reportingEntityName = vimid
- sequence = 0
-
- startEpochMicrosec = get_epoch_now_usecond()
- lastEpochMicrosec = startEpochMicrosec
-
- eventId = str(uuid.uuid4())
- pass
- else:
- if last_event is None \
- or last_event['event']['commonEventHeader']['eventName'] != 'Fault_MultiCloud_VMFailure':
- # not assert alarm yet, so no need to clear it
- return None
-
- eventName = "Fault_MultiCloud_VMFailureCleared"
- priority = "Normal"
- eventSeverity = "NORMAL"
- alarmCondition = "Vm_Restart"
- # vfStatus = "Active"
- specificProblem = "Fault_MultiCloud_VMFailure"
- eventType = ''
- reportingEntityId = vimid
- reportingEntityName = vimid
- sequence = 1 # last_event['event']['commonEventHeader']['sequence'] + 1
-
- startEpochMicrosec = last_event['event']['commonEventHeader']['startEpochMicrosec']
- lastEpochMicrosec = get_epoch_now_usecond()
- # holmes requires that eventId must be unique for each event!
- eventId = str(uuid.uuid4())
-
- pass
-
- # now populate the event structure
- this_event = {
- 'event': {
- 'commonEventHeader': {
- 'version': VES_EVENT_VERSION,
- 'eventName': eventName,
- 'domain': VES_EVENT_FAULT_DOMAIN,
- 'eventId': eventId,
- 'eventType': eventType,
- 'sourceId': vm_data['id'],
- 'sourceName': vm_data['name'],
- 'reportingEntityId': reportingEntityId,
- 'reportingEntityName': reportingEntityName,
- 'priority': priority,
- 'startEpochMicrosec': startEpochMicrosec,
- 'lastEpochMicrosec': lastEpochMicrosec,
- 'sequence': sequence
- },
- 'faultFields': {
- 'faultFieldsVersion': VES_EVENT_FAULT_VERSION,
- 'eventSeverity': eventSeverity,
- 'eventSourceType': 'virtualMachine',
- 'alarmCondition': alarmCondition,
- 'specificProblem': specificProblem,
- 'vfStatus': 'Active',
- "alarmInterfaceA": "aaaa",
- "alarmAdditionalInformation": [
- {
- "name": "objectType",
- "value": "VIM"
- },
- {
- "name": "eventTime",
- "value": str(datetime.datetime.now())
- }
- ],
- }
-
- }
-
- }
-
- return this_event
-
- except Exception as e:
- logger.error("exception:%s" % str(e))
- return None
-
-
-def vm_status_is_fault(status):
- '''
- report VM fault when status falls into one of following state
- ['ERROR', 'DELETED', 'PAUSED', 'REBUILD', 'RESCUE',
- 'RESIZE','REVERT_RESIZE', 'SHELVED', 'SHELVED_OFFLOADED',
- 'SHUTOFF', 'SOFT_DELETED','SUSPENDED', 'UNKNOWN', 'VERIFY_RESIZE']
- :param status:
- :return:
- '''
- if status in ['BUILD', 'ACTIVE', 'HARD_REBOOT', 'REBOOT', 'MIGRATING', 'PASSWORD']:
- return False
- else:
- return True
diff --git a/windriver/titanium_cloud/vesagent/tasks.py b/windriver/titanium_cloud/vesagent/tasks.py
deleted file mode 100644
index d4e67c95..00000000
--- a/windriver/titanium_cloud/vesagent/tasks.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# VES agent workers
-from __future__ import absolute_import, unicode_literals
-from titanium_cloud.celery import app
-# import os
-import logging
-import json
-import time
-
-from django.core.cache import cache
-
-from titanium_cloud.vesagent.event_domain.fault_vm import processBacklog_fault_vm
-
-logger = logging.getLogger(__name__)
-
-
-@app.task(bind=True)
-def scheduleBacklogs(self, vimid):
- # make sure only one task runs here
- # cannot get vimid ? logger.info("schedule with vimid:%" % (vimid))
-
- logger.debug("scheduleBacklogs starts")
- backlog_count, next_time_slot = processBacklogs()
- logger.debug("processBacklogs return with %s, %s" % (backlog_count, next_time_slot))
-
- # sleep for next_time_slot
- while backlog_count > 0:
- time.sleep(next_time_slot)
- backlog_count, next_time_slot = processBacklogs()
-
- logger.debug("scheduleBacklogs stops")
-
-
-def processBacklogs():
- # find out count of valid backlog and the next time slot
- backlog_count = 0
- next_time_slot = 10
- try:
- # get the whole list of backlog
- VesAgentBacklogsVimListStr = cache.get("VesAgentBacklogs.vimlist")
- if VesAgentBacklogsVimListStr is None:
- logger.warn("VesAgentBacklogs.vimlist cannot be found in cache")
- return 0, next_time_slot
-
- logger.debug("VesAgentBacklogs.vimlist: %s" % VesAgentBacklogsVimListStr)
-
- backlogsAllVims = json.loads(VesAgentBacklogsVimListStr)
- if backlogsAllVims is None:
- logger.warn("VesAgentBacklogs.vimlist is empty")
- return 0, next_time_slot
-
- for vimid in backlogsAllVims:
- # iterate each backlogs
- backlog_count_tmp, next_time_slot_tmp = processBacklogsOfOneVIM(vimid)
- logger.debug("vimid:%s, backlog_count,next_time_slot:%s,%s"
- % (vimid, backlog_count_tmp, next_time_slot_tmp))
- backlog_count += backlog_count_tmp
- next_time_slot = next_time_slot_tmp if next_time_slot > next_time_slot_tmp else next_time_slot
- pass
-
- except Exception as e:
- logger.error("exception:%s" % str(e))
-
- return backlog_count, next_time_slot
-
- pass
-
-
-def processBacklogsOfOneVIM(vimid):
- '''
- process all backlogs for a VIM, return count of valid backlogs
- :param vimid:
- :return:
- '''
- backlog_count = 0
- next_time_slot = 10
-
- try:
- vesAgentConfigStr = cache.get("VesAgentBacklogs.config.%s" % vimid)
- if vesAgentConfigStr is None:
- logger.warn("VesAgentBacklogs.config.%s cannot be found in cache" % vimid)
- return 0, next_time_slot
-
- logger.debug("VesAgentBacklogs.config.%s: %s" % (vimid, vesAgentConfigStr))
-
- vesAgentConfig = json.loads(vesAgentConfigStr)
- if vesAgentConfig is None:
- logger.warn("VesAgentBacklogs.config.%s corrupts" % vimid)
- return 0, next_time_slot
-
- vesAgentStateStr = cache.get("VesAgentBacklogs.state.%s" % vimid)
- vesAgentState = json.loads(vesAgentStateStr) if vesAgentStateStr is not None else {}
-
- ves_info = vesAgentConfig.get("subscription", None)
- if ves_info is None:
- logger.warn("VesAgentBacklogs.config.%s: ves subscription corrupts:%s" % (vimid, vesAgentConfigStr))
- return 0, next_time_slot
-
- poll_interval_default = vesAgentConfig.get("poll_interval_default", None)
- if poll_interval_default is None:
- logger.warn("VesAgentBacklogs.config.%s: poll_interval_default corrupts:%s" % (vimid, vesAgentConfigStr))
- return 0, next_time_slot
-
- if poll_interval_default == 0:
- # invalid interval value
- logger.warn("VesAgentBacklogs.config.%s: poll_interval_default invalid:%s" % (vimid, vesAgentConfigStr))
- return 0, next_time_slot
-
- backlogs_list = vesAgentConfig.get("backlogs", None)
- if backlogs_list is None:
- logger.warn("VesAgentBacklogs.config.%s: backlogs corrupts:%s" % (vimid, vesAgentConfigStr))
- return 0, next_time_slot
-
- for backlog in backlogs_list:
- backlog_count_tmp, next_time_slot_tmp = \
- processOneBacklog(
- vesAgentConfig, vesAgentState, poll_interval_default, backlog)
- logger.debug("processOneBacklog return with %s,%s" % (backlog_count_tmp, next_time_slot_tmp))
- backlog_count += backlog_count_tmp
- next_time_slot = next_time_slot_tmp if next_time_slot > next_time_slot_tmp else next_time_slot
-
- pass
-
- # save back the updated backlogs state
- vesAgentStateStr = json.dumps(vesAgentState)
- cache.set("VesAgentBacklogs.state.%s" % vimid, vesAgentStateStr, None)
-
- except Exception as e:
- logger.error("exception:%s" % str(e))
-
- return backlog_count, next_time_slot
-
-
-def processOneBacklog(vesAgentConfig, vesAgentState, poll_interval_default, oneBacklog):
- logger.info("Process one backlog")
- # logger.debug("vesAgentConfig:%s, vesAgentState:%s, poll_interval_default:%s, oneBacklog: %s"
- # % (vesAgentConfig, vesAgentState, poll_interval_default, oneBacklog))
-
- backlog_count = 1
- next_time_slot = 10
- try:
- timestamp_now = int(time.time())
- backlog_uuid = oneBacklog.get("backlog_uuid", None)
- if backlog_uuid is None:
- # warning: uuid is None, omit this backlog
- logger.warn("backlog without uuid: %s" % oneBacklog)
- return 0, next_time_slot
-
- backlogState = vesAgentState.get("%s" % (backlog_uuid), None)
- if backlogState is None:
- initialBacklogState = {
- "timestamp": timestamp_now
- }
- vesAgentState["%s" % (backlog_uuid)] = initialBacklogState
- backlogState = initialBacklogState
-
- time_expiration = \
- backlogState["timestamp"] + \
- oneBacklog.get("poll_interval", poll_interval_default)
-
- # check if poll interval expires
- if timestamp_now < time_expiration:
- # not expired yet
- logger.info("return without dispatching, not expired yet")
- return backlog_count, next_time_slot
-
- logger.info("Dispatching backlog")
-
- # collect data in case of expiration
- if oneBacklog["domain"] == "fault" and oneBacklog["type"] == "vm":
- processBacklog_fault_vm(vesAgentConfig, vesAgentState, oneBacklog)
- else:
- logger.warn("Dispatching backlog fails due to unsupported backlog domain %s,type:%s"
- % (oneBacklog["domain"], oneBacklog["type"]))
- backlog_count = 0
- pass
-
- # update timestamp and internal state
- backlogState["timestamp"] = timestamp_now
- except Exception as e:
- logger.error("exception:%s" % str(e))
-
- logger.info("return")
- return backlog_count, next_time_slot
diff --git a/windriver/titanium_cloud/vesagent/tests/__init__.py b/windriver/titanium_cloud/vesagent/tests/__init__.py
deleted file mode 100644
index 5f8b0d18..00000000
--- a/windriver/titanium_cloud/vesagent/tests/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/windriver/titanium_cloud/vesagent/tests/tests_fault_vm.py b/windriver/titanium_cloud/vesagent/tests/tests_fault_vm.py
deleted file mode 100644
index 880c1d5d..00000000
--- a/windriver/titanium_cloud/vesagent/tests/tests_fault_vm.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-import unittest
-import json
-
-from titanium_cloud.vesagent import vespublish
-from common.utils import restcall
-from titanium_cloud.vesagent.event_domain import fault_vm
-
-MOCK_TOKEN_RESPONSE = {
- "access":
- {"token": {"issued_at": "2018-05-10T16:56:56.000000Z",
- "expires": "2018-05-10T17:56:56.000000Z",
- "id": "4a832860dd744306b3f66452933f939e",
- "tenant": {"domain": {"id": "default", "name": "Default"},
- "enabled": "true", "id": "0e148b76ee8c42f78d37013bf6b7b1ae", "name": "VIM"}},
- "serviceCatalog": [], "user": {"domain": {"id": "default", "name": "Default"},
- "id": "ba76c94eb5e94bb7bec6980e5507aae2", "name": "demo"}}
-}
-
-MOCK_SERVERS_GET_RESPONSE = {
- "servers": [
- {"id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "links": [{
- "href": "http://10.12.25.2:8774/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "rel": "self"},
- {
- "href": "http://10.12.25.2:8774/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "rel": "bookmark"}],
- "name": "onap-aaf"}]
-}
-
-MOCK_BACKLOG_INPUT = {
- "backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
- "source": "onap-aaf",
- "api_link":
- "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "domain": "fault", "type": "vm", "tenant": "VIM"
-}
-
-MOCK_BACKLOG_INPUT_wo_tenant_id = {
- "backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "source": "onap-aaf",
- "api_link":
- "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "domain": "fault", "type": "vm", "tenant": "VIM"
-}
-
-MOCK_BACKLOG_INPUT_wo_tenant = {
- "backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "source": "onap-aaf",
- "domain": "fault", "type": "vm", }
-
-MOCK_BACKLOG_INPUT_wo_server_id = {
- "source": "onap-aaf",
- "domain": "fault", "type": "vm", "tenant": "VIM"}
-
-MOCK_BACKLOG_INPUT_wo_server = {"domain": "fault", "type": "vm", "tenant": "VIM"}
-
-MOCK_SERVER_GET_RESPONSE = {
- "server": {"wrs-res:topology": "node:0, 4096MB, pgsize:2M, vcpus:0,1, pol:sha",
- "OS-EXT-STS:task_state": None,
- "addresses": {
- "oam_onap_BTHY": [{"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:6c:0d:6b",
- "version": 4, "addr": "10.0.13.1", "OS-EXT-IPS:type": "fixed"},
- {"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:6c:0d:6b", "version": 4,
- "addr": "10.12.5.185", "OS-EXT-IPS:type": "floating"}]},
- "links": [], "image": {"id": "6e219e86-cd94-4989-9119-def29aa10b12", "links": []},
- "wrs-if:nics": [], "wrs-sg:server_group": "",
- "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2018-04-26T08:01:28.000000",
- "flavor": {}, "id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "security_groups": [{"name": "onap_sg_BTHY"}],
- "user_id": "ba76c94eb5e94bb7bec6980e5507aae2",
- "OS-DCF:diskConfig": "MANUAL", "accessIPv4": "",
- "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1,
- "OS-EXT-AZ:availability_zone": "nova", "metadata": {},
- "status": "ACTIVE", "updated": "2018-04-26T08:01:28Z",
- "hostId": "17acc9f2ae4f618c314e4cdf0c206585b895bc72a9ec57e57b254133",
- "OS-SRV-USG:terminated_at": None, "wrs-res:pci_devices": "",
- "wrs-res:vcpus": [2, 2, 2], "key_name": "onap_key_BTHY", "name": "onap-aaf",
- "created": "2018-04-26T08:01:20Z", "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae",
- "os-extended-volumes:volumes_attached": [], "config_drive": ""}}
-
-MOCK_SERVER_GET_RESPONSE_empty = {}
-
-MOCK_vesAgentConfig = {
- "backlogs": [
- {"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
- "source": "onap-aaf",
- "api_link":
- "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "domain": "fault", "type": "vm", "tenant": "VIM"}
- ],
- "poll_interval_default": 10, "vimid": "windriver-hudson-dc_RegionOne",
- "ves_subscription": {"username": "user", "password": "password",
- "endpoint": "http://127.0.0.1:9005/sample"}}
-
-MOCK_vesAgentState = {"ce2d7597-22e1-4239-890f-bc303bd67076": {"timestamp": 1525975400}}
-MOCK_oneBacklog = {
- "backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae",
- "api_method": "GET", "source": "onap-aaf",
- "api_link":
- "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "domain": "fault", "type": "vm", "tenant": "VIM"}
-
-
-class FaultVMTest(unittest.TestCase):
- def setUp(self):
- pass
-
- def tearDown(self):
- pass
-
- def test_get_epoch_now_usecond(self):
- epoch = fault_vm.get_epoch_now_usecond()
- self.assertGreater(epoch, 1)
-
- @mock.patch.object(restcall, '_call_req')
- def test_buildBacklog_fault_vm(self, mock_call_req):
- mock_call_req.side_effect = [
- (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
- (0, json.dumps(MOCK_SERVERS_GET_RESPONSE), "MOCKED response body")
- ]
- backlog = fault_vm.buildBacklog_fault_vm(
- vimid="windriver-hudson-dc_RegionOne",
- backlog_input=MOCK_BACKLOG_INPUT)
-
- self.assertIsNotNone(backlog)
-
- @mock.patch.object(restcall, '_call_req')
- def test_buildBacklog_fault_vm_wo_tenant_id(self, mock_call_req):
- mock_call_req.side_effect = [
- (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
- (0, json.dumps(MOCK_SERVERS_GET_RESPONSE), "MOCKED response body")
- ]
- backlog = fault_vm.buildBacklog_fault_vm(
- vimid="windriver-hudson-dc_RegionOne",
- backlog_input=MOCK_BACKLOG_INPUT_wo_tenant_id)
- self.assertIsNotNone(backlog)
-
- @mock.patch.object(restcall, '_call_req')
- def test_buildBacklog_fault_vm_wo_tenant(self, mock_call_req):
- mock_call_req.side_effect = [
- (1, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body: failed"),
- (0, json.dumps(MOCK_SERVERS_GET_RESPONSE), "MOCKED response body")
- ]
- backlog = fault_vm.buildBacklog_fault_vm(
- vimid="windriver-hudson-dc_RegionOne",
- backlog_input=MOCK_BACKLOG_INPUT_wo_tenant)
- self.assertIsNone(backlog)
-
- @mock.patch.object(restcall, '_call_req')
- def test_buildBacklog_fault_vm_wo_server_id(self, mock_call_req):
- mock_call_req.side_effect = [
- (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
- (0, json.dumps(MOCK_SERVERS_GET_RESPONSE), "MOCKED response body")
- ]
- backlog = fault_vm.buildBacklog_fault_vm(
- vimid="windriver-hudson-dc_RegionOne",
- backlog_input=MOCK_BACKLOG_INPUT_wo_server_id)
- self.assertIsNotNone(backlog)
-
- @mock.patch.object(restcall, '_call_req')
- def test_buildBacklog_fault_vm_wo_server(self, mock_call_req):
- mock_call_req.side_effect = [
- (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
- (0, json.dumps(MOCK_SERVERS_GET_RESPONSE), "MOCKED response body")
- ]
- backlog = fault_vm.buildBacklog_fault_vm(
- vimid="windriver-hudson-dc_RegionOne",
- backlog_input=MOCK_BACKLOG_INPUT_wo_server)
- self.assertIsNotNone(backlog)
-
- @mock.patch.object(vespublish, 'publishAnyEventToVES')
- @mock.patch.object(restcall, '_call_req')
- def test_processBacklog_fault_vm(
- self, mock_call_req, mock_publishAnyEventToVES):
- mock_call_req.side_effect = [
- (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
- (0, json.dumps(MOCK_SERVER_GET_RESPONSE), "MOCKED response body")
- ]
- mock_publishAnyEventToVES.return_value = "mocked return value"
-
- result = fault_vm.processBacklog_fault_vm(
- vesAgentConfig=MOCK_vesAgentConfig,
- vesAgentState=MOCK_vesAgentState,
- oneBacklog=MOCK_oneBacklog)
- self.assertIsNone(result)
- pass
-
- @mock.patch.object(vespublish, 'publishAnyEventToVES')
- @mock.patch.object(restcall, '_call_req')
- def test_processBacklog_fault_vm_wo_server(
- self, mock_call_req, mock_publishAnyEventToVES):
- mock_call_req.side_effect = [
- (0, json.dumps(MOCK_TOKEN_RESPONSE), "MOCKED response body"),
- (0, json.dumps(MOCK_SERVER_GET_RESPONSE_empty), "MOCKED response body")
- ]
- mock_publishAnyEventToVES.return_value = "mocked return value"
-
- result = fault_vm.processBacklog_fault_vm(
- vesAgentConfig=MOCK_vesAgentConfig,
- vesAgentState=MOCK_vesAgentState,
- oneBacklog=MOCK_oneBacklog)
-
- self.assertIsNone(result)
diff --git a/windriver/titanium_cloud/vesagent/tests/tests_tasks.py b/windriver/titanium_cloud/vesagent/tests/tests_tasks.py
deleted file mode 100644
index 64b8d3df..00000000
--- a/windriver/titanium_cloud/vesagent/tests/tests_tasks.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-import unittest
-import json
-from django.test import Client
-# from rest_framework import status
-
-from django.core.cache import cache
-from common.msapi import extsys
-
-from titanium_cloud.vesagent import tasks
-from titanium_cloud.vesagent.event_domain import fault_vm
-
-MOCK_VIM_INFO = {
- "createTime": "2017-04-01 02:22:27",
- "domain": "Default",
- "name": "TiS_R4",
- "password": "admin",
- "tenant": "admin",
- "type": "openstack",
- "url": "http://128.224.180.14:5000/v3",
- "userName": "admin",
- "vendor": "WindRiver",
- "version": "newton",
- "vimId": "windriver-hudson-dc_RegionOne",
- 'cloud_owner': 'windriver-hudson-dc',
- 'cloud_region_id': 'RegionOne',
- 'cloud_extra_info':
- '{"vesagent_config":{"backlogs":[{"source":"onap-aaf","domain":"fault","type":"vm","tenant":"VIM"}],"poll_interval_default":10,"ves_subscription":{"username":"user","password":"password","endpoint":"http://127.0.0.1:9005/sample"}}}',
- 'insecure': 'True',
-}
-
-COUNT_TIME_SLOT1 = (1, 1)
-COUNT_TIME_SLOT2 = (0, 1)
-
-
-class VesTaskTest(unittest.TestCase):
- def setUp(self):
- self.client = Client()
-
- def tearDown(self):
- pass
-
- @mock.patch.object(tasks, 'processBacklogs')
- @mock.patch.object(extsys, 'get_vim_by_id')
- def test_tasks_scheduleBacklogs(self, mock_get_vim_by_id, mock_processBacklogs):
- mock_get_vim_by_id.return_value = MOCK_VIM_INFO
- mock_processBacklogs.side_effect = [
- COUNT_TIME_SLOT1,
- COUNT_TIME_SLOT2
- ]
- result = tasks.scheduleBacklogs(vimid="windriver-hudson-dc_RegionOne")
- self.assertEquals(None, result)
-
- @mock.patch.object(tasks, 'processBacklogsOfOneVIM')
- @mock.patch.object(cache, 'get')
- def test_tasks_processBacklogs(
- self, mock_cache_get, mock_tasks_processBacklogsOfOneVIM):
- mock_VesAgentBacklogs_vimlist = ["windriver-hudson-dc_RegionOne"]
- COUNT_TIME_SLOT_ONE_VIM = (1, 1)
- mock_tasks_processBacklogsOfOneVIM.return_value = COUNT_TIME_SLOT_ONE_VIM
- mock_cache_get.side_effect = [
- json.dumps(mock_VesAgentBacklogs_vimlist),
- ]
- result = tasks.processBacklogs()
- self.assertEquals(COUNT_TIME_SLOT_ONE_VIM, result)
-
- @mock.patch.object(tasks, 'processOneBacklog')
- @mock.patch.object(cache, 'set')
- @mock.patch.object(cache, 'get')
- def test_tasks_processBacklogsOfOneVIM(
- self, mock_cache_get, mock_cache_set, mock_tasks_processOneBacklog):
- # mock_VesAgentBacklogs_vimlist = ["windriver-hudson-dc_RegionOne"]
- mock_vesagent_config = {
- "backlogs":
- [{"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
- "source": "onap-aaf",
- "api_link":
- "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "domain": "fault", "type": "vm", "tenant": "VIM"}],
- "poll_interval_default": 10, "vimid": "onaplab_RegionOne",
- "subscription": {"username": "user", "password": "password",
- "endpoint": "http://127.0.0.1:9005/sample"}}
- mock_cache_get.side_effect = [
- json.dumps(mock_vesagent_config),
- json.dumps({})
- ]
- mock_tasks_processOneBacklog.return_value = (1, 11)
- mock_cache_set.return_value = "mocked cache set"
- result = tasks.processBacklogsOfOneVIM(vimid="windriver-hudson-dc_RegionOne")
- COUNT_TIME_SLOT = (1, 10)
- self.assertEquals(COUNT_TIME_SLOT, result)
-
- @mock.patch.object(fault_vm, 'processBacklog_fault_vm')
- def test_tasks_processOneBacklog(
- self, mock_fault_vm_processBacklog_fault_vm):
- mock_fault_vm_processBacklog_fault_vm.return_value = None
- vesagent_config = {
- "backlogs":
- [{"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
- "source": "onap-aaf",
- "api_link":
- "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "domain": "fault", "type": "vm", "tenant": "VIM"}],
- "poll_interval_default": 10, "vimid": "onaplab_RegionOne",
- "subscription": {"username": "user", "password": "password",
- "endpoint": "http://127.0.0.1:9005/sample"}}
-
- vesagent_onebacklog = {
- "backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "poll_interval": 10,
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
- "source": "onap-aaf",
- "api_link": "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "domain": "fault", "type": "vm", "tenant": "VIM"
- }
-
- result = tasks.processOneBacklog(
- vesAgentConfig=vesagent_config,
- vesAgentState={},
- poll_interval_default=10,
- oneBacklog=vesagent_onebacklog)
- COUNT_TIME_SLOT = (1, 10)
- self.assertEquals(COUNT_TIME_SLOT, result)
diff --git a/windriver/titanium_cloud/vesagent/tests/tests_vesagent_ctrl.py b/windriver/titanium_cloud/vesagent/tests/tests_vesagent_ctrl.py
deleted file mode 100644
index 289e43d8..00000000
--- a/windriver/titanium_cloud/vesagent/tests/tests_vesagent_ctrl.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-import unittest
-import json
-from django.test import Client
-from rest_framework import status
-
-from django.core.cache import cache
-from common.msapi import extsys
-from titanium_cloud.vesagent import vesagent_ctrl
-from titanium_cloud.vesagent.event_domain import fault_vm
-from titanium_cloud.vesagent.tasks import scheduleBacklogs
-
-MOCK_VIM_INFO = {
- "createTime": "2017-04-01 02:22:27",
- "domain": "Default",
- "name": "TiS_R4",
- "password": "admin",
- "tenant": "admin",
- "type": "openstack",
- "url": "http://128.224.180.14:5000/v3",
- "userName": "admin",
- "vendor": "WindRiver",
- "version": "newton",
- "vimId": "windriver-hudson-dc_RegionOne",
- 'cloud_owner': 'windriver-hudson-dc',
- 'cloud_region_id': 'RegionOne',
- 'cloud_extra_info':
- '{"vesagent_config":{"backlogs":[{"source": "onap-aaf","domain": "fault","type": "vm","tenant": "VIM"}],"poll_interval_default":10,"ves_subscription":{"username": "user","password": "password","endpoint": "http://127.0.0.1:9005/sample"}}}',
- 'insecure': 'True',
-}
-
-
-class VesAgentCtrlTest(unittest.TestCase):
- def setUp(self):
- self.client = Client()
- self.view = vesagent_ctrl.VesAgentCtrl()
-
- def tearDown(self):
- pass
-
- @mock.patch.object(cache, 'get')
- @mock.patch.object(extsys, 'get_vim_by_id')
- def test_get(self, mock_get_vim_by_id, mock_get):
- mock_get_vim_by_id.return_value = MOCK_VIM_INFO
- mock_get.return_value = \
- '{"backlogs": [{"backlog_uuid": "2b8f6ff8-bc64-339b-a714-155909db937f", "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721", "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET", "source": "onap-aaf", "api_link": "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721", "domain": "fault", "type": "vm", "tenant": "VIM"}], "poll_interval_default": 10, "vimid": "onaplab_RegionOne", "subscription": {"username": "user", "password": "password", "endpoint": "http://127.0.0.1:9005/sample"}}'
-
- response = self.client.get("/api/multicloud-titaniumcloud/v0/windriver-hudson-dc_RegionOne/vesagent")
- self.assertEqual(status.HTTP_200_OK, response.status_code, response.content)
-
- @mock.patch.object(vesagent_ctrl.VesAgentCtrl, 'buildBacklogsOneVIM')
- @mock.patch.object(extsys, 'get_vim_by_id')
- def test_post(self, mock_get_vim_by_id, mock_buildBacklogsOneVIM):
- mock_get_vim_by_id.return_value = MOCK_VIM_INFO
- mock_buildBacklogsOneVIM.return_value = "mocked vesagent_backlogs"
- mock_request = mock.Mock()
- mock_request.META = {"testkey": "testvalue"}
- mock_request.data = {"testdatakey": "testdatavalue"}
-
- response = self.view.post(request=mock_request, vimid="windriver-hudson-dc_RegionOne")
- self.assertEquals(status.HTTP_201_CREATED, response.status_code)
-
- @mock.patch.object(vesagent_ctrl.VesAgentCtrl, 'clearBacklogsOneVIM')
- @mock.patch.object(extsys, 'get_vim_by_id')
- def test_delete(self, mock_get_vim_by_id, mock_clearBacklogsOneVIM):
- mock_get_vim_by_id.return_value = MOCK_VIM_INFO
- mock_clearBacklogsOneVIM.return_value = "mocked vesagent_backlogs"
- mock_request = mock.Mock()
- mock_request.META = {"testkey": "testvalue"}
-
- response = self.view.delete(request=mock_request, vimid="windriver-hudson-dc_RegionOne")
- self.assertEquals(status.HTTP_200_OK, response.status_code)
-
- @mock.patch.object(cache, 'get')
- def test_getBacklogsOneVIM(self, mock_get):
- mock_vesagent_config = {"backlogs": [{"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
- "source": "onap-aaf",
- "api_link": "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "domain": "fault", "type": "vm", "tenant": "VIM"}],
- "poll_interval_default": 10, "vimid": "onaplab_RegionOne",
- "subscription": {"username": "user", "password": "password",
- "endpoint": "http://127.0.0.1:9005/sample"}}
- mock_get.return_value = json.dumps(mock_vesagent_config)
-
- vesAgentConfig = self.view.getBacklogsOneVIM(vimid="windriver-hudson-dc_RegionOne")
- self.assertEquals(vesAgentConfig, mock_vesagent_config)
-
- @mock.patch.object(cache, 'set')
- @mock.patch.object(cache, 'get')
- def test_clearBacklogsOneVIM(self, mock_get, mock_set):
- mock_VesAgentBacklogs_vimlist = ["windriver-hudson-dc_RegionOne"]
- mock_vesagent_config = {
- "backlogs": [
- {"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
- "source": "onap-aaf",
- "api_link":
- "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "domain": "fault", "type": "vm", "tenant": "VIM"}
- ],
- "poll_interval_default": 10, "vimid": "onaplab_RegionOne",
- "subscription": {"username": "user", "password": "password",
- "endpoint": "http://127.0.0.1:9005/sample"}}
-
- mock_get.side_effect = [
- json.dumps(mock_VesAgentBacklogs_vimlist),
- json.dumps(mock_vesagent_config)
- ]
-
- mock_set.return_value = "mocked cache set"
-
- result = self.view.clearBacklogsOneVIM(
- vimid="windriver-hudson-dc_RegionOne")
- self.assertEquals(0, result)
-
- @mock.patch.object(scheduleBacklogs, 'delay')
- @mock.patch.object(cache, 'set')
- @mock.patch.object(cache, 'get')
- def test_buildBacklogsOneVIM(
- self, mock_get, mock_set, mock_scheduleBacklogs_delay):
- mock_VesAgentBacklogs_vimlist = ["windriver-hudson-dc_RegionOne"]
- mock_vesagent_config = {
- "backlogs": [{"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
- "source": "onap-aaf",
- "api_link": "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "domain": "fault", "type": "vm", "tenant": "VIM"}],
- "poll_interval_default": 10, "vimid": "windriver-hudson-dc_RegionOne",
- "ves_subscription": {"username": "user", "password": "password",
- "endpoint": "http://127.0.0.1:9005/sample"}}
-
- mock_get.side_effect = [
- json.dumps(mock_VesAgentBacklogs_vimlist),
- ]
-
- mock_set.return_value = "mocked cache set"
- mock_scheduleBacklogs_delay.return_value = "mocked delay"
-
- VesAgentBacklogsConfig = self.view.buildBacklogsOneVIM(
- vimid="windriver-hudson-dc_RegionOne",
- vesagent_config=mock_vesagent_config)
- self.assertIsNotNone(VesAgentBacklogsConfig)
-
- @mock.patch.object(fault_vm, 'buildBacklog_fault_vm')
- def test_buildBacklog(self, mock_buildBacklog_fault_vm):
- mock_backlog_input = {
- "backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
- "server_id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
- "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
- "source": "onap-aaf",
- "api_link":
- "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
- "domain": "fault", "type": "vm", "tenant": "VIM"}
-
- mock_buildBacklog_fault_vm.return_value = "mocked buildBacklog_fault_vm"
-
- VesAgentBacklogsConfig = self.view.buildBacklog(
- vimid="windriver-hudson-dc_RegionOne",
- backlog_input=mock_backlog_input)
- self.assertIsNotNone(VesAgentBacklogsConfig)
diff --git a/windriver/titanium_cloud/vesagent/tests/tests_vespublish.py b/windriver/titanium_cloud/vesagent/tests/tests_vespublish.py
deleted file mode 100644
index 64ea7d00..00000000
--- a/windriver/titanium_cloud/vesagent/tests/tests_vespublish.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-import unittest
-# import json
-import urllib2
-
-from titanium_cloud.vesagent import vespublish
-
-MOCK_VESENDPOINT = {
- "endpoint": "MOCKED_VES_COLLECTOR_EP1",
- "username": "MOCKED_VES_COLLECTOR_USER1",
- "password": "MOCKED_VES_COLLECTOR_PASSWD1",
-}
-
-MOCK_VESPUBLISH_EVENT1 = [{"name": "event1"}]
-
-
-class VespublishTest(unittest.TestCase):
- def setUp(self):
- pass
-
- def tearDown(self):
- pass
-
- @mock.patch.object(urllib2, 'urlopen')
- @mock.patch.object(urllib2, 'Request')
- def test_publishAnyEventToVES(self, mock_Request, mock_urlopen):
- mock_request = mock.Mock()
-
- mock_Request.side_effect = [
- mock_request
- ]
-
- mock_response = mock.Mock(["read"])
- mock_response.read.return_value = "MOCKED_VESPUBLISH_RESPONSE_MESSAGE"
- mock_urlopen.side_effect = [
- mock_response
- ]
-
- vespublish.publishAnyEventToVES(MOCK_VESENDPOINT, MOCK_VESPUBLISH_EVENT1)
diff --git a/windriver/titanium_cloud/vesagent/vesagent_ctrl.py b/windriver/titanium_cloud/vesagent/vesagent_ctrl.py
deleted file mode 100644
index 86b4ef51..00000000
--- a/windriver/titanium_cloud/vesagent/vesagent_ctrl.py
+++ /dev/null
@@ -1,452 +0,0 @@
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-# import traceback
-import json
-
-from rest_framework import status
-from rest_framework.response import Response
-from rest_framework.views import APIView
-
-from django.conf import settings
-from common.msapi import extsys
-from titanium_cloud.vesagent.tasks import scheduleBacklogs
-from titanium_cloud.vesagent.event_domain import fault_vm
-
-from django.core.cache import cache
-
-logger = logging.getLogger(__name__)
-
-
-class VesAgentCtrl(APIView):
- '''
- control plane of VesAgent
- Design tips:
- 1, vesagent are multiple processing workers
- 2, the runtime logic is simple: a vesagent worker polls the data source (vm/hypervisor/host/vim/etc.)
- and then feeds the encoded data to VES.
- 3, the vesagent workers can be distributed to different clouds while latency/throughput is concerned,
- this distributed deployment usually comes along with the distributed VES deployment.
- So it is very likely that the collected data from different VIM/Cloud instance will be fed into
- different VES endpoint, however, assuming that there will be at most one VES endpoint serving
- any single VIM/Cloud instance.
- 4, According to VES specs, the collected data can be cataloged by domain:
- domain : fault, heartbeat, measurementsForVfScaling, other, stateChange, syslog, thresholdCrossingAlert
- As far as VIM/Cloud concerned, fault, heartbeat, measurementsForVfScaling, TCAalert are relevant.
- 5, the source of the collected data can be cataloged by eventSourceType:
- eventSourceType: VNF/VNFC/VM
- As far as VIM/Cloud concerned, only VM is relevant. This eventSourceType should be extended to cover
- the data source of hypervisor, VIM, Host,Controller, PIM, etc.
-
- 6, the source of collected data should be specified explicitly,so is the domain of the collected data.
- To specify the source: eventSourceType, uuid or name of the source
- To specify the domain: domain
- the specifications above will be provisioned as a vesagent backlog entry to a VIM/Cloud instance
- to tell a vesagent worker that :
- with regarding to that VIM/Cloud instance, what kind of data to be collected from which source .
-
- 7,the VES endpoint will be also specified for a VIM/Cloud instance, so that all collected data
- will be fed into this VES endpoint
-
- 8, the vesagent backlog are stored into the respective cloud_region's property "cloud-extra-info",
- which implies that those specifications can be CRUD either by ESR portal or the RestAPIs in this view, e.g.
- "cloud-extra-info": {
- ...,
- "vesagent_config":
- {
- "ves_subscription":{
- "endpoint":"http://{VES IP}:{VES port}/{URI}",
- "username":"{VES username}",
- "password":"{VES password}",
- },
- "poll_interval_default" : "{default interval for polling}",
- "backlogs":[
- {
- "domain":"fault"
- "type":"vm",
- "tenant":"{tenant name1}",
- "source":"{VM name1}",
- "poll_interval" : "{optional, interval for polling}",
- },
- {
- "domain":"fault"
- "type":"vm",
- "tenant":"{tenant name2}",
- "source":"{VM name2}",
- "poll_interval" : "{optional, interval for polling}",
- }
- ]
- }
- }
-
- Idea: API dispatching to distributed M.C. service can be determined by Complex Object in AAI:
- cloud-region has been assoicated to a Complex Object
- M.C. plugin service instance should refer to the same Complex Object (by physical_locaton_id ?)
- So the M.C. broker/API distributor/other approach will correlate the cloud-region with
- corresponding M.C. plugin service instance.
-
-
- Backlog built in cache:
-
- maintain backlog in cache and VES agent workers
- cache objects:
- "VesAgentBacklogs.vimlist": [ list of vimid] ### will not expire forever
- "VesAgentBacklogs.state.{vimdid}":
- ### will expire eventually to eliminate the garbage, expiration duration: 1hour?
- {
- "{backlog_uuid}": {
- "timestamp": "{timestamp for last time of data collecting}",
- "api_data": [list of data to populate the format string of the API link]
- "last_event": {object, event reported to ves last time}"
- }
- }
- "VesAgentBacklogs.config.{vimdid}": ### will not expire forever
- {
- "vimid": "{vim id}",
- "subscription": {
- "endpoint": "{ves endpoint, e.g. http://ves_ip:ves_port/eventListener/v5}",
- "username": "{username}",
- "password": "{password}"
- }
- "poll_interval_default" : "{default interval for polling}",
- "backlogs":[
- {
- "backlog_uuid": "{uuid to identify the backlog}"
- "domain":"fault"
- "type":"vm",
- "tenant":"{tenant name1}",
- "source":"{VM name1}",
- "poll_interval" : "{optional, interval in second for polling}",
- "api_method": "{GET/POST/PUT/etc.}",
- "api_link":"{API link to collect data, could be format string}",
- "tenant_id": tenant_id,
- "server_id": server_id,
- },
- {
- "domain":"fault"
- "type":"vm",
- "tenant":"{tenant name2}",
- "source":"{VM name2}",
- "poll_interval" : "{optional, interval in second for polling}",
- "api_method": "{GET/POST/PUT/etc.}",
- "api_link":"{API link to collect data, could be format string}",
- "tenant_id": tenant_id,
- "server_id": server_id,
- }
- ]
- }
- '''
-
- def __init__(self):
- self._logger = logger
- self.proxy_prefix = settings.MULTICLOUD_PREFIX
-
- def get(self, request, vimid=""):
- '''
- get blob of vesagent-config
- :param request:
- :param vimid:
- :return:
- '''
- self._logger.info("vimid: %s" % vimid)
- self._logger.debug("with META: %s" % request.META)
- try:
- # get vesagent_config from cloud region
- try:
- viminfo = extsys.get_vim_by_id(vimid)
- cloud_extra_info_str = viminfo.get('cloud_extra_info', '')
- cloud_extra_info = json.loads(cloud_extra_info_str) if cloud_extra_info_str != '' else None
- vesagent_config = cloud_extra_info.get("vesagent_config", None) if cloud_extra_info is not None else None
- except Exception:
- # ignore this error
- self._logger.warn("cloud extra info is provided with data in bad format: %s" % cloud_extra_info_str)
- pass
-
- vesagent_backlogs = self.getBacklogsOneVIM(vimid)
-
- except Exception as e:
- self._logger.error("exception:%s" % str(e))
- return Response(data={'error': str(e)},
- status=status.HTTP_500_INTERNAL_SERVER_ERROR)
-
- self._logger.info("return with %s" % status.HTTP_200_OK)
- return Response(data={"vesagent_config": vesagent_config,
- "vesagent_backlogs": vesagent_backlogs},
- status=status.HTTP_200_OK)
-
- def post(self, request, vimid=""):
- '''
- update the blob of vesagent-config, rebuild the backlog for the vesagent workers,
- and start the vesagent workers if not started yet
- Implication: the request to this API endpoint will build the backlog locally, hence only local VES agent workers
- will process these backlogs, which conforms to distributed deployment of M.C. services which includes VES agents
- :param request:{"vesagent_config":
- {"ves_subscription":
- {"endpoint":"http://127.0.0.1:9005/sample",
- "username":"user","password":"password"},
- "poll_interval_default":10,
- "backlogs":
- [
- {"domain":"fault","type":"vm","tenant":"VIM","source":"onap-aaf"}
- ]
- }
- }
- :param vimid:
- :return:
- '''
-
- self._logger.info("vimid: %s" % vimid)
- self._logger.debug("with META: %s, with data: %s" % (request.META, request.data))
- try:
- vesagent_config = None
- if request.data is None or request.data.get("vesagent_config", None) is None:
- # Try to load the vesagent_config out of cloud_region["cloud_extra_info"]
- viminfo = extsys.get_vim_by_id(vimid)
- cloud_extra_info_str = viminfo.get('cloud_extra_info', None)
- cloud_extra_info = json.loads(cloud_extra_info_str) if cloud_extra_info_str is not None else None
- vesagent_config = cloud_extra_info.get("vesagent_config", None) if cloud_extra_info is not None else None
- else:
- vesagent_config = request.data.get("vesagent_config", None)
-
- if vesagent_config is None:
- return Response(data={'vesagent_config is not provided'},
- status=status.HTTP_400_BAD_REQUEST)
-
- vesagent_backlogs = self.buildBacklogsOneVIM(vimid, vesagent_config)
-
- # store back to cloud_extra_info
- # tbd
-
- except Exception as e:
- self._logger.error("exception:%s" % str(e))
- return Response(data={'error': str(e)},
- status=status.HTTP_500_INTERNAL_SERVER_ERROR)
-
- self._logger.info("return with %s" % status.HTTP_201_CREATED)
- return Response(data={"vesagent_config": vesagent_config,
- "vesagent_backlogs": vesagent_backlogs},
- status=status.HTTP_201_CREATED)
-
- def delete(self, request, vimid=""):
- '''
- delete the blob of vesagent-config, remove it from backlog and stop the vesagent worker if no backlog
- :param request:
- :param vimid:
- :return:
- '''
- self._logger.info("vimid: %s" % vimid)
- self._logger.debug("with META: %s" % request.META)
- try:
- # tbd
- self.clearBacklogsOneVIM(vimid)
- except Exception as e:
- self._logger.error("exception:%s" % str(e))
- return Response(data={'error': str(e)},
- status=status.HTTP_500_INTERNAL_SERVER_ERROR)
-
- self._logger.info("return with %s" % status.HTTP_200_OK)
- return Response(status=status.HTTP_200_OK)
-
- def getBacklogsOneVIM(self, vimid):
- '''
- remove the specified backlogs for a VIM
- :param vimid:
- :return:
- '''
- self._logger.debug("vimid: %s" % vimid)
-
- vesAgentConfig = None
- try:
- # retrive the backlogs
- vesAgentConfigStr = cache.get("VesAgentBacklogs.config.%s" % (vimid))
- if vesAgentConfigStr is None:
- logger.warn("VesAgentBacklogs.config.%s cannot be found in cache" % (vimid))
- return None
-
- logger.debug("VesAgentBacklogs.config.%s: %s" % (vimid, vesAgentConfigStr))
-
- vesAgentConfig = json.loads(vesAgentConfigStr)
- if vesAgentConfig is None:
- logger.warn("VesAgentBacklogs.config.%s corrupts" % (vimid))
- return None
-
- except Exception as e:
- self._logger.error("exception:%s" % str(e))
- vesAgentConfig = {"error": "exception occurs"}
-
- self._logger.debug("return")
- return vesAgentConfig
-
- def clearBacklogsOneVIM(self, vimid):
- '''
- remove the specified backlogs for a VIM
- :param vimid:
- :param vesagent_config:
- :return:
- '''
- self._logger.debug("vimid: %s" % vimid)
-
- try:
- # remove vimid from "VesAgentBacklogs.vimlist"
- VesAgentBacklogsVimListStr = cache.get("VesAgentBacklogs.vimlist")
- VesAgentBacklogsVimList = []
- if VesAgentBacklogsVimListStr is not None:
- VesAgentBacklogsVimList = json.loads(VesAgentBacklogsVimListStr)
- VesAgentBacklogsVimList = [v for v in VesAgentBacklogsVimList if v != vimid]
-
- logger.debug("VesAgentBacklogs.vimlist is %s" % VesAgentBacklogsVimList)
-
- # cache forever
- cache.set("VesAgentBacklogs.vimlist", json.dumps(VesAgentBacklogsVimList), None)
-
- # retrieve the backlogs
- vesAgentConfigStr = cache.get("VesAgentBacklogs.config.%s" % (vimid))
- if vesAgentConfigStr is None:
- logger.warn("VesAgentBacklogs.config.%s cannot be found in cache" % (vimid))
- return 0
-
- logger.debug("VesAgentBacklogs.config.%s: %s" % (vimid, vesAgentConfigStr))
-
- vesAgentConfig = json.loads(vesAgentConfigStr)
- if vesAgentConfig is None:
- logger.warn("VesAgentBacklogs.config.%s corrupts" % (vimid))
- return 0
-
- # iterate all backlog and remove the associate state!
- # tbd
-
- # clear the whole backlogs for a VIM
- cache.set("VesAgentBacklogs.config.%s" % vimid, "deleting the backlogs", 1)
-
- except Exception as e:
- self._logger.error("exception:%s" % str(e))
-
- self._logger.debug("return")
- return 0
-
- def buildBacklogsOneVIM(self, vimid, vesagent_config=None):
- '''
- build and cache backlog for specific cloud region,spawn vesagent workers if needed
- :param vimid:
- :param vesagent_config: vesagent_config data in json object
- :return:
- '''
- self._logger.debug("vimid: %s" % vimid)
- self._logger.debug("config data: %s" % vesagent_config)
-
- VesAgentBacklogsConfig = None
- try:
- if vesagent_config:
- # now rebuild the backlog
- VesAgentBacklogsConfig = {
- "vimid": vimid,
- "poll_interval_default": vesagent_config.get("poll_interval_default", 0),
- "subscription": vesagent_config.get("ves_subscription", None),
- "backlogs": [self.buildBacklog(vimid, b) for b in vesagent_config.get("backlogs", [])]
- }
-
- # add/update the backlog into cache
- VesAgentBacklogsConfigStr = json.dumps(VesAgentBacklogsConfig)
- # cache forever
- cache.set("VesAgentBacklogs.config.%s" % vimid, VesAgentBacklogsConfigStr, None)
-
- # update list of vimid for vesagent
- # get the whole list of backlog
- VesAgentBacklogsVimListStr = cache.get("VesAgentBacklogs.vimlist")
- VesAgentBacklogsVimList = [vimid]
- if VesAgentBacklogsVimListStr is not None:
- VesAgentBacklogsVimList = json.loads(VesAgentBacklogsVimListStr)
- VesAgentBacklogsVimList = [v for v in VesAgentBacklogsVimList if v != vimid]
- VesAgentBacklogsVimList.append(vimid)
-
- logger.debug("VesAgentBacklogs.vimlist is %s" % VesAgentBacklogsVimList)
-
- # cache forever
- cache.set("VesAgentBacklogs.vimlist", json.dumps(VesAgentBacklogsVimList), None)
-
- # notify schduler
- scheduleBacklogs.delay(vimid)
- except Exception as e:
- self._logger.error("exception:%s" % str(e))
- VesAgentBacklogsConfig = {"error": "exception occurs during build backlogs"}
-
- self._logger.debug("return")
- return VesAgentBacklogsConfig
-
- def buildBacklog(self, vimid, backlog_input):
- self._logger.debug("build backlog for: %s" % vimid)
- self._logger.debug("with input: %s" % backlog_input)
-
- try:
- if backlog_input["domain"] == "fault" and backlog_input["type"] == "vm":
- return fault_vm.buildBacklog_fault_vm(vimid, backlog_input)
- else:
- self._logger.warn("return with failure: unsupported backlog domain:%s, type:%s"
- % (backlog_input["domain"], backlog_input["type"] == "vm"))
- return None
- except Exception as e:
- self._logger.error("exception:%s" % str(e))
- return None
-
- self._logger.debug("return without backlog")
- return None
-
-
-class APIv1VesAgentCtrl(VesAgentCtrl):
-
- def __init__(self):
- super(APIv1VesAgentCtrl, self).__init__()
- # self._logger = logger
- self.proxy_prefix = settings.MULTICLOUD_API_V1_PREFIX
-
- def get(self, request, cloud_owner="", cloud_region_id=""):
- '''
- :param request:
- :param cloud_owner:
- :param cloud_region_id:
- :return:
- '''
- self._logger.info("cloud_owner, cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
-
- vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- return super(APIv1VesAgentCtrl, self).get(request, vimid)
-
- def post(self, request, cloud_owner="", cloud_region_id=""):
- '''
- wrapper for inherited API with VIM ID
- :param request:
- :param cloud_owner:
- :param cloud_region_id:
- :return:
- '''
- self._logger.info("cloud_owner, cloud_region_id: %s,%s" % (cloud_owner, cloud_region_id))
-
- vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- return super(APIv1VesAgentCtrl, self).post(request, vimid)
-
- def delete(self, request, cloud_owner="", cloud_region_id=""):
- '''
- wrapper of inherited API with VIM ID
- :param request:
- :param cloud_owner:
- :param cloud_region_id:
- :return:
- '''
- self._logger.info(
- "cloud_owner, cloud_region_id: %s,%s" %
- (cloud_owner, cloud_region_id))
-
- vimid = extsys.encode_vim_id(cloud_owner, cloud_region_id)
- return super(APIv1VesAgentCtrl, self).delete(request, vimid)
diff --git a/windriver/titanium_cloud/vesagent/vespublish.py b/windriver/titanium_cloud/vesagent/vespublish.py
deleted file mode 100644
index 33b971b0..00000000
--- a/windriver/titanium_cloud/vesagent/vespublish.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright (c) 2017-2018 Wind River Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import, unicode_literals
-
-import time
-import logging
-import json
-import urllib2
-
-logger = logging.getLogger(__name__)
-
-
-def publishAnyEventToVES(ves_subscription, events):
- if not events or len(events) == 0:
- return
-
- logger.info("Start to send single event to VES collector.")
- endpoint = ves_subscription.get("endpoint", None)
- # username = ves_subscription.get("username", None)
- # password = ves_subscription.get("password", None)
-
- if endpoint:
- try:
- if len(events) > 1:
- endpoint = "%s/eventBatch" % endpoint
- events = {"eventList": events}
- elif len(events) == 1:
- events = {"event": events[0]}
-
- logger.info("publish event to VES: %s" % endpoint)
- headers = {'Content-Type': 'application/json'}
- request = urllib2.Request(url=endpoint, headers=headers, data=json.dumps(events))
- time.sleep(1)
- response = urllib2.urlopen(request)
- logger.info("VES response is: %s" % response.read())
- except urllib2.URLError as e:
- logger.critical("Failed to publish to %s: %s" % (endpoint, e.reason))
- except Exception as e:
- logger.error("exception:%s" % str(e))
- else:
- logger.info("Missing VES info.")