summaryrefslogtreecommitdiffstats
path: root/fcaps
diff options
context:
space:
mode:
authorLiexiang Yue <yueliexiang@chinamobile.com>2019-03-20 10:54:24 +0800
committerLiexiang Yue <yueliexiang@chinamobile.com>2019-03-20 18:48:14 +0800
commit54cb529d8452c1f235fc69ea4a647e2374d475d2 (patch)
tree87d03355584ce17b14bf9e442bafa4b106175a7c /fcaps
parent58693a8e82547fa29eee954c3be295734b3f5fda (diff)
Multi-Cloud Fcaps enhancement
Issue-ID: MULTICLOUD-495 Change-Id: I3d5d6eadeaa00b8597dfa318bad6a1fa1796a2ce Signed-off-by: Liexiang Yue <yueliexiang@chinamobile.com>
Diffstat (limited to 'fcaps')
-rw-r--r--fcaps/fcaps/settings.py4
-rw-r--r--fcaps/fcaps/vesagent/event_domain/pm_vm.py250
-rw-r--r--fcaps/fcaps/vesagent/tasks.py3
-rw-r--r--fcaps/fcaps/vesagent/tests/tests_pm_vm.py138
4 files changed, 393 insertions, 2 deletions
diff --git a/fcaps/fcaps/settings.py b/fcaps/fcaps/settings.py
index f7306b70..7844c68f 100644
--- a/fcaps/fcaps/settings.py
+++ b/fcaps/fcaps/settings.py
@@ -97,10 +97,10 @@ MSB_SERVICE_ADDR = os.environ.get('MSB_ADDR', DEFAULT_MSB_ADDR)
MSB_SERVICE_PORT = os.environ.get('MSB_PORT', "80")
# [Multicloud]
-MULTICLOUD_PREFIX = "http://%s:%s/api/multicloud-fcaps/v0" % (
+MULTICLOUD_PREFIX = "http://%s:%s/api/multicloud/v0" % (
MSB_SERVICE_ADDR, MSB_SERVICE_PORT)
-MULTICLOUD_API_V1_PREFIX = "http://%s:%s/api/multicloud-fcaps/v1" % (
+MULTICLOUD_API_V1_PREFIX = "http://%s:%s/api/multicloud/v1" % (
MSB_SERVICE_ADDR, MSB_SERVICE_PORT)
# [A&AI]
diff --git a/fcaps/fcaps/vesagent/event_domain/pm_vm.py b/fcaps/fcaps/vesagent/event_domain/pm_vm.py
new file mode 100644
index 00000000..032655ba
--- /dev/null
+++ b/fcaps/fcaps/vesagent/event_domain/pm_vm.py
@@ -0,0 +1,250 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019, CMCC Technologies Co., Ltd.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import json
+import uuid
+
+from django.conf import settings
+from fcaps.vesagent.vespublish import publishAnyEventToVES
+from common.utils import restcall
+from common.msapi.helper import Helper as helper
+
+
+import datetime
+import time
+
+logger = logging.getLogger(__name__)
+
+
+def get_epoch_now_usecond():
+ '''
+ get epoch timestamp of this moment in usecond
+ :return:
+ '''
+ now_time = datetime.datetime.now()
+ epoch_time_sec = time.mktime(now_time.timetuple())
+ return int(epoch_time_sec * 1e6 + now_time.microsecond)
+
+
+def buildBacklog_pm_vm(vimid, backlog_input):
+ # build backlog with domain:"fault", type:"vm"
+
+ logger.info("vimid: %s" % vimid)
+ logger.debug("with input: %s" % backlog_input)
+
+ try:
+
+ # must resolve the tenant id and server id while building the backlog
+ tenant_id = backlog_input.get("tenantid", None)
+ server_id = backlog_input.get("sourceid", None)
+ server_name = backlog_input.get("source", None)
+
+ # should resolve the name to id later
+ if tenant_id is None:
+ tenant_name = backlog_input["tenant"]
+
+ # resolve tenant_name to tenant_id
+ auth_api_url_format = "/{f_vim_id}/identity/v2.0/tokens"
+ auth_api_url = auth_api_url_format.format(f_vim_id=vimid)
+ auth_api_data = {"auth": {"tenantName": tenant_name}}
+ base_url = settings.MULTICLOUD_PREFIX
+ extra_headers = ''
+ ret = restcall._call_req(base_url, "", "", 0, auth_api_url,
+ "POST", extra_headers, json.dumps(auth_api_data))
+ if ret[0] > 0 or ret[1] is None:
+ logger.critical("call url %s failed with status %s" % (auth_api_url, ret[0]))
+ return None
+
+ token_resp = json.JSONDecoder().decode(ret[1])
+ token = token_resp["access"]["token"]["id"]
+ tenant_id = token_resp["access"]["token"]["tenant"]["id"]
+
+ if server_id is None and server_name:
+ # resolve server_name to server_id in case no wildcast in server_name
+ vserver_api_url_format \
+ = "/{f_vim_id}/compute/v2.1/{f_tenant_id}/servers?name={f_server_name}"
+ vserver_api_url = vserver_api_url_format.format(f_vim_id=vimid,
+ f_tenant_id=tenant_id,
+ f_server_name=server_name)
+ base_url = settings.MULTICLOUD_PREFIX
+ extra_headers = {'X-Auth-Token': token}
+ ret = restcall._call_req(base_url, "", "", 0, vserver_api_url, "GET", extra_headers, "")
+ if ret[0] > 0 or ret[1] is None:
+ logger.critical("call url %s failed with status %s" % (vserver_api_url, ret[0]))
+ return None
+
+ server_resp = json.JSONDecoder().decode(ret[1])
+ # find out the server wanted
+ for s in server_resp.get("servers", []):
+ if s["name"] == server_name:
+ server_id = s["id"]
+ break
+ if server_id is None:
+ logger.warn("source %s cannot be found under tenant id %s "
+ % (server_name, tenant_id))
+ return None
+
+ # m.c. proxied OpenStack API
+ if server_id is None and server_name is None:
+ api_url = "/v2/samples"
+
+ else:
+ # monitor all VMs of the specified VIMs since no server_id can be resolved
+ api_url_fmt = "/v2/samples?q.field=resource_id&q.op=eq&q.value={f_server_id}"
+ api_url = api_url_fmt.format(
+ f_server_id=server_id)
+
+
+ backlog = {
+ "backlog_uuid":
+ str(uuid.uuid3(uuid.NAMESPACE_URL,
+ str("%s-%s-%s" % (vimid, tenant_id, server_id)))),
+ "tenant_id": tenant_id,
+ "server_id": server_id,
+ "api_method": "GET",
+ "api_link": api_url,
+ }
+ backlog.update(backlog_input)
+ except Exception as e:
+ logger.error("exception:%s" % str(e))
+ return None
+
+ logger.info("return")
+ logger.debug("with backlog: %s" % backlog)
+ return backlog
+
+
+# process backlog with domain:"pm", type:"vm"
+
+
+def processBacklog_pm_vm(vesAgentConfig, vesAgentState, oneBacklog):
+ logger.debug("vesAgentConfig:%s, vesAgentState:%s, oneBacklog: %s"
+ % (vesAgentConfig, vesAgentState, oneBacklog))
+
+ try:
+
+ # get token
+ # resolve tenant_name to tenant_id
+ cloud_owner, regionid = extsys.decode_vim_id(vimid)
+ # should go via multicloud proxy so that the selflink is updated by multicloud
+ retcode, v2_token_resp_json, os_status = helper.MultiCloudIdentityHelper(
+ settings.MULTICLOUD_API_V1_PREFIX,
+ cloud_owner, regionid, "/v2.0/tokens")
+ if retcode > 0 or not v2_token_resp_json:
+ logger.error("authenticate fails:%s,%s, %s" %
+ (cloud_owner, regionid, v2_token_resp_json))
+ return
+
+ service_type = "metering"
+ resource_uri = oneBacklog["api_link"]
+ template_data = ''
+ self._logger.info("retrieve metering resources, URI:%s" % resource_uri)
+ retcode, content, os_status = helper.MultiCloudServiceHelper(cloud_owner,
+ regionid,
+ v2_token_resp_json,
+ service_type,
+ resource_uri,
+ template_data,
+ "GET")
+ meters = content if retcode == 0 and content else []
+
+ for meter in meters:
+ encodeData = data2event_pm_vm(meter)
+ encodeData['event']['commonEventHeader']['eventType'] = 'guestOS'
+ encodeData['event']['commonEventHeader']['reportingEntityId'] = vimid
+ encodeData['event']['commonEventHeader']['reportingEntityName'] = vimid
+
+ if encodeData is not None:
+ logger.debug("this event: %s" % encodeData)
+ all_events.append(encodeData.get("event", None))
+
+ # report data to VES
+ if len(all_events) > 0:
+ ves_subscription = vesAgentConfig.get("subscription", None)
+ publishAnyEventToVES(ves_subscription, all_events)
+ # store the latest data into cache, never expire
+
+ except Exception as e:
+ logger.error("exception:%s" % str(e))
+ return
+
+ logger.info("return")
+ return
+
+
+def data2event_pm_vm(vm_data):
+ VES_EVENT_VERSION = 3.0
+ VES_EVENT_pm_VERSION = 2.0
+ VES_EVENT_pm_DOMAIN = "measurementsForVfScaling"
+ eventId = str(uuid.uuid1())
+ eventName = 'Mfvs_' + vm_data['resource_id']
+ eventType = ''
+ sourceId = vm_data['resource_id']
+ sourceName = vm_data['resource_id']
+ reportingEntityId = ''
+ reportingEntityName = ''
+ priority = 'Normal'
+ sequence = 1
+ startEpochMicrosec = int(time.mktime(time.strptime(vm_data['recorded_at'], '%Y-%m-%dT%H:%M:%S')))
+ lastEpochMicrosec = startEpochMicrosec
+ # now populate the event structure
+ this_event = {
+ 'event': {
+ 'commonEventHeader': {
+ 'version': VES_EVENT_VERSION,
+ 'eventName': eventName,
+ 'domain': VES_EVENT_pm_DOMAIN,
+ 'eventId': eventId,
+ 'eventType': eventType,
+ 'sourceId': sourceId,
+ 'sourceName': sourceName,
+ 'reportingEntityId': reportingEntityId,
+ 'reportingEntityName': reportingEntityName,
+ 'priority': priority,
+ 'startEpochMicrosec': startEpochMicrosec,
+ 'lastEpochMicrosec': lastEpochMicrosec,
+ 'sequence': sequence
+ },
+ 'measurementsForVfScalingFields': {
+ 'measurementsForVfScalingVersion': VES_EVENT_pm_VERSION,
+ 'measurementInterval': 0,
+ 'additionalMeasurements': vm_data['additionalMeasurements']
+ }
+ }
+ }
+
+ return this_event
+
+def partition_samples(data):
+ # check all samples recorded at time
+ time_sates = []
+ for obj in data:
+ time_sates.append(obj['recorded_at'])
+ time_sates = list(set(time_sates))
+ # match time for each sample
+ meter_to_ves = []
+ for time_sate in time_sates:
+ additionalMeasurements = []
+ arrayOfFields = []
+ resource_id = ""
+ for obj in data:
+ if obj['recorded_at'] == time_sate:
+ arrayOfFields.append({'name':obj['meter'], 'value':str(obj['volume'])})
+ resource_id = obj['resource_id']
+ additionalMeasurements.append({'name':resource_id, 'arrayOfFields':arrayOfFields})
+ meter_to_ves.append({'resource_id':resource_id, 'recorded_at':time_sate, 'additionalMeasurements':additionalMeasurements})
+ return meter_to_ves \ No newline at end of file
diff --git a/fcaps/fcaps/vesagent/tasks.py b/fcaps/fcaps/vesagent/tasks.py
index 6a24a653..169c156e 100644
--- a/fcaps/fcaps/vesagent/tasks.py
+++ b/fcaps/fcaps/vesagent/tasks.py
@@ -23,6 +23,7 @@ import time
from django.core.cache import cache
from fcaps.vesagent.event_domain.fault_vm import processBacklog_fault_vm
+from fcaps.vesagent.event_domain.pm_vm import processBacklog_pm_vm
logger = logging.getLogger(__name__)
@@ -182,6 +183,8 @@ def processOneBacklog(vesAgentConfig, vesAgentState, poll_interval_default, oneB
# collect data in case of expiration
if oneBacklog["domain"] == "fault" and oneBacklog["type"] == "vm":
processBacklog_fault_vm(vesAgentConfig, vesAgentState, oneBacklog)
+ elif oneBacklog["domain"] == "pm" and oneBacklog["type"] == "vm":
+ processBacklog_pm_vm(vesAgentConfig, vesAgentState, oneBacklog)
else:
logger.warn("Dispatching backlog fails due to unsupported backlog domain %s,type:%s"
% (oneBacklog["domain"], oneBacklog["type"]))
diff --git a/fcaps/fcaps/vesagent/tests/tests_pm_vm.py b/fcaps/fcaps/vesagent/tests/tests_pm_vm.py
new file mode 100644
index 00000000..50db3c38
--- /dev/null
+++ b/fcaps/fcaps/vesagent/tests/tests_pm_vm.py
@@ -0,0 +1,138 @@
+# Copyright (c) 2019, CMCC Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+import unittest
+import json
+
+from fcaps.vesagent import vespublish
+from common.utils import restcall
+from fcaps.vesagent.event_domain import pm_vm
+
+MOCK_TOKEN_RESPONSE = {
+ "access":
+ {"token": {"issued_at": "2018-05-10T16:56:56.000000Z",
+ "expires": "2018-05-10T17:56:56.000000Z",
+ "id": "4e481914244d4adbb755c4ea455abff7",
+ "tenant": {"domain": {"id": "default", "name": "Default"},
+ "enabled": "true", "id": "9ef561bd76254639b8e31eea4b56f179", "name": "onap-casablanca01"}},
+ "serviceCatalog": [], "user": {"domain": {"id": "default", "name": "Default"},
+ "id": "ba76c94eb5e94bb7bec6980e5507aae2", "name": "demo"}}
+}
+
+MOCK_SERVERS_GET_RESPONSE = {
+ "servers": [
+ {"id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "links": [{
+ "href": "http://10.12.25.2:8774/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "rel": "self"},
+ {
+ "href": "http://10.12.25.2:8774/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "rel": "bookmark"}],
+ "name": "onap-aaf"}]
+}
+
+MOCK_BACKLOG_INPUT = {
+ "backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
+ "server_id": "012a5f3b-1c55-48bc-a606-951421c9a998",
+ "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
+ "source": "onap-aaf",
+ "api_link":
+ "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "domain": "pm", "type": "vm", "tenant": "VIM"
+}
+
+MOCK_BACKLOG_INPUT_wo_tenant_id = {
+ "backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
+ "server_id": "012a5f3b-1c55-48bc-a606-951421c9a998",
+ "source": "onap-aaf",
+ "api_link":
+ "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "domain": "pm", "type": "vm", "tenant": "VIM"
+}
+
+MOCK_BACKLOG_INPUT_wo_tenant = {
+ "backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
+ "server_id": "012a5f3b-1c55-48bc-a606-951421c9a998",
+ "source": "onap-aaf",
+ "domain": "fault", "type": "vm", }
+
+MOCK_BACKLOG_INPUT_wo_server_id = {
+ "source": "onap-aaf",
+ "domain": "fault", "type": "vm", "tenant": "VIM"}
+
+MOCK_BACKLOG_INPUT_wo_server = {"domain": "pm", "type": "vm", "tenant": "VIM"}
+
+MOCK_SERVER_GET_RESPONSE = {
+ "server": {"wrs-res:topology": "node:0, 4096MB, pgsize:2M, vcpus:0,1, pol:sha",
+ "OS-EXT-STS:task_state": None,
+ "addresses": {
+ "oam_onap_BTHY": [{"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:6c:0d:6b",
+ "version": 4, "addr": "10.0.13.1", "OS-EXT-IPS:type": "fixed"},
+ {"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:6c:0d:6b", "version": 4,
+ "addr": "10.12.5.185", "OS-EXT-IPS:type": "floating"}]},
+ "links": [], "image": {"id": "6e219e86-cd94-4989-9119-def29aa10b12", "links": []},
+ "wrs-if:nics": [], "wrs-sg:server_group": "",
+ "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2018-04-26T08:01:28.000000",
+ "flavor": {}, "id": "c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "security_groups": [{"name": "onap_sg_BTHY"}],
+ "user_id": "ba76c94eb5e94bb7bec6980e5507aae2",
+ "OS-DCF:diskConfig": "MANUAL", "accessIPv4": "",
+ "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1,
+ "OS-EXT-AZ:availability_zone": "nova", "metadata": {},
+ "status": "ACTIVE", "updated": "2018-04-26T08:01:28Z",
+ "hostId": "17acc9f2ae4f618c314e4cdf0c206585b895bc72a9ec57e57b254133",
+ "OS-SRV-USG:terminated_at": None, "wrs-res:pci_devices": "",
+ "wrs-res:vcpus": [2, 2, 2], "key_name": "onap_key_BTHY", "name": "onap-aaf",
+ "created": "2018-04-26T08:01:20Z", "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae",
+ "os-extended-volumes:volumes_attached": [], "config_drive": ""}}
+
+MOCK_SERVER_GET_RESPONSE_empty = {}
+
+MOCK_vesAgentConfig = {
+ "backlogs": [
+ {"backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
+ "server_id": "012a5f3b-1c55-48bc-a606-951421c9a998",
+ "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae", "api_method": "GET",
+ "source": "onap-aaf",
+ "api_link":
+ "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "domain": "fault", "type": "vm", "tenant": "VIM"}
+ ],
+ "poll_interval_default": 10, "vimid": "CloudOwner3_RegionOne",
+ "ves_subscription": {"username": "user", "password": "password",
+ "endpoint": "http://127.0.0.1:9011/sample"}}
+
+MOCK_vesAgentState = {"ce2d7597-22e1-4239-890f-bc303bd67076": {"timestamp": 1525975400}}
+MOCK_oneBacklog = {
+ "backlog_uuid": "ce2d7597-22e1-4239-890f-bc303bd67076",
+ "server_id": "012a5f3b-1c55-48bc-a606-951421c9a998",
+ "tenant_id": "0e148b76ee8c42f78d37013bf6b7b1ae",
+ "api_method": "GET", "source": "onap-aaf",
+ "api_link":
+ "/onaplab_RegionOne/compute/v2.1/0e148b76ee8c42f78d37013bf6b7b1ae/servers/c4b575fa-ed85-4642-ab4b-335cb5744721",
+ "domain": "fault", "type": "vm", "tenant": "VIM"}
+
+
+class PmVMTest(unittest.TestCase):
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def test_get_epoch_now_usecond(self):
+ epoch = pm_vm.get_epoch_now_usecond()
+ self.assertGreater(epoch, 1)