summaryrefslogtreecommitdiffstats
path: root/vnftest/common
diff options
context:
space:
mode:
Diffstat (limited to 'vnftest/common')
-rw-r--r--vnftest/common/__init__.py0
-rw-r--r--vnftest/common/constants.py147
-rw-r--r--vnftest/common/exceptions.py61
-rw-r--r--vnftest/common/html_template.py195
-rw-r--r--vnftest/common/httpClient.py48
-rw-r--r--vnftest/common/openstack_utils.py765
-rw-r--r--vnftest/common/process.py140
-rw-r--r--vnftest/common/rest_client.py62
-rwxr-xr-xvnftest/common/task_template.py78
-rw-r--r--vnftest/common/template_format.py72
-rw-r--r--vnftest/common/utils.py399
-rw-r--r--vnftest/common/yaml_loader.py35
12 files changed, 2002 insertions, 0 deletions
diff --git a/vnftest/common/__init__.py b/vnftest/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/common/__init__.py
diff --git a/vnftest/common/constants.py b/vnftest/common/constants.py
new file mode 100644
index 0000000..9da64ba
--- /dev/null
+++ b/vnftest/common/constants.py
@@ -0,0 +1,147 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/constants.py
+from __future__ import absolute_import
+import os
+import errno
+
+from functools import reduce
+
+import pkg_resources
+
+# this module must only import other modules that do
+# not require loggers to be created, so this cannot
+# include vnftest.common.utils
+from vnftest.common.yaml_loader import yaml_load
+
+dirname = os.path.dirname
+abspath = os.path.abspath
+join = os.path.join
+sep = os.path.sep
+
+CONF = {}
+CONF_FILE = None
+VNFTEST_ROOT_PATH = dirname(
+ dirname(abspath(pkg_resources.resource_filename(__name__, "")))) + sep
+
+
+def get_param(key, default=''):
+ # we have to defer this to runtime so that we can mock os.environ.get in unittests
+ default_path = os.path.join(VNFTEST_ROOT_PATH, "etc/vnftest/vnftest.yaml")
+ conf_file = os.environ.get('CONF_FILE', default_path)
+ # don't re-parse yaml for each lookup
+ if not CONF:
+ # do not use vnftest.common.utils.parse_yaml
+ # since vnftest.common.utils creates a logger
+ # and so it cannot be imported before this code
+ try:
+ with open(conf_file) as f:
+ value = yaml_load(f)
+ except IOError:
+ pass
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ else:
+ CONF.update(value)
+ try:
+ return reduce(lambda a, b: a[b], key.split('.'), CONF)
+ except KeyError:
+ if not default:
+ raise
+ return default
+
+
+try:
+ SERVER_IP = get_param('api.server_ip')
+except KeyError:
+ try:
+ from pyroute2 import IPDB
+ except ImportError:
+ SERVER_IP = '172.17.0.1'
+ else:
+ with IPDB() as ip:
+ try:
+ SERVER_IP = ip.routes['default'].gateway
+ except KeyError:
+ # during unittests ip.routes['default'] can be invalid
+ SERVER_IP = '127.0.0.1'
+
+if not SERVER_IP:
+ SERVER_IP = '127.0.0.1'
+
+
+# dir
+CONF_DIR = get_param('dir.conf', join(VNFTEST_ROOT_PATH, 'etc/vnftest'))
+CONF_FILE = join(CONF_DIR, 'vnftest.conf')
+REPOS_DIR = get_param('dir.repos', join(VNFTEST_ROOT_PATH, 'home/onap/repos/vnftest'))
+LOG_DIR = get_param('dir.log', join(VNFTEST_ROOT_PATH, 'tmp/vnftest/'))
+
+TASK_LOG_DIR = get_param('dir.tasklog', join(VNFTEST_ROOT_PATH, 'var/log/vnftest/'))
+CONF_SAMPLE_DIR = join(REPOS_DIR, 'etc/vnftest/')
+SAMPLE_CASE_DIR = join(REPOS_DIR, 'samples')
+TESTCASE_DIR = join(VNFTEST_ROOT_PATH, 'tests/onap/test_cases/')
+TESTSUITE_DIR = join(VNFTEST_ROOT_PATH, 'tests/onap/test_suites/')
+
+# file
+DEFAULT_OUTPUT_FILE = get_param('file.output_file', join(VNFTEST_ROOT_PATH, 'tmp/vnftest.out'))
+DEFAULT_HTML_FILE = get_param('file.html_file', join(VNFTEST_ROOT_PATH, 'tmp/vnftest.htm'))
+REPORTING_FILE = get_param('file.reporting_file', join(VNFTEST_ROOT_PATH, 'tmp/report.html'))
+
+# components
+AAI_IP = get_param('component.aai_ip')
+AAI_PORT = get_param('component.aai_port')
+AAI_SSL_PORT = get_param('component.aai_ssl_port')
+MSO_IP = get_param('component.mso_ip')
+SDC_IP = get_param('component.sdc_ip')
+SDC_PORT = get_param('component.sdc_port')
+SDC_CATALOG_PORT = get_param('component.sdc_catalog_port')
+SDC_DESIGNER_USER = get_param('component.sdc_designer_user')
+SDC_TESTER_USER = get_param('component.sdc_tester_user')
+SDC_GOVERNANCE_USER = get_param('component.sdc_governance_user')
+SDC_OPERATIONS_USER = get_param('component.sdc_operations_user')
+
+component_constants = {}
+component_constants['aai_ip'] = AAI_IP
+component_constants['aai_port'] = AAI_PORT
+component_constants['aai_ssl_port'] = AAI_SSL_PORT
+component_constants['mso_ip'] = MSO_IP
+component_constants['sdc_ip'] = SDC_IP
+component_constants['sdc_port'] = SDC_PORT
+component_constants['sdc_catalog_port'] = SDC_CATALOG_PORT
+component_constants['sdc_designer_user'] = SDC_DESIGNER_USER
+component_constants['sdc_tester_user'] = SDC_TESTER_USER
+component_constants['sdc_governance_user'] = SDC_GOVERNANCE_USER
+component_constants['sdc_operations_user'] = SDC_OPERATIONS_USER
+
+
+# api
+API_PORT = 5000
+DOCKER_URL = 'unix://var/run/docker.sock'
+SQLITE = 'sqlite:////tmp/vnftest.db'
+
+API_SUCCESS = 1
+API_ERROR = 2
+TASK_NOT_DONE = 0
+TASK_DONE = 1
+TASK_FAILED = 2
+
+BASE_URL = 'http://localhost:5000'
+ENV_ACTION_API = BASE_URL + '/vnftest/env/action'
+ASYNC_TASK_API = BASE_URL + '/vnftest/asynctask'
+
+# general
+TESTCASE_PRE = 'onap_vnftest_'
+TESTSUITE_PRE = 'onap_'
diff --git a/vnftest/common/exceptions.py b/vnftest/common/exceptions.py
new file mode 100644
index 0000000..6273cd3
--- /dev/null
+++ b/vnftest/common/exceptions.py
@@ -0,0 +1,61 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/exceptions.py
+
+from oslo_utils import excutils
+
+
+class ProcessExecutionError(RuntimeError):
+ def __init__(self, message, returncode):
+ super(ProcessExecutionError, self).__init__(message)
+ self.returncode = returncode
+
+
+class VnftestException(Exception):
+ """Base Vnftest Exception.
+
+ To correctly use this class, inherit from it and define
+ a 'message' property. That message will get printf'd
+ with the keyword arguments provided to the constructor.
+
+ Based on NeutronException class.
+ """
+ message = "An unknown exception occurred."
+
+ def __init__(self, **kwargs):
+ try:
+ super(VnftestException, self).__init__(self.message % kwargs)
+ self.msg = self.message % kwargs
+ except Exception: # pylint: disable=broad-except
+ with excutils.save_and_reraise_exception() as ctxt:
+ if not self.use_fatal_exceptions():
+ ctxt.reraise = False
+ # at least get the core message out if something happened
+ super(VnftestException, self).__init__(self.message)
+
+ def __str__(self):
+ return self.msg
+
+ def use_fatal_exceptions(self):
+ """Is the instance using fatal exceptions.
+
+ :returns: Always returns False.
+ """
+ return False
+
+
+class FunctionNotImplemented(VnftestException):
+ message = ('The function "%(function_name)s" is not implemented in '
+ '"%(class_name)" class.')
diff --git a/vnftest/common/html_template.py b/vnftest/common/html_template.py
new file mode 100644
index 0000000..572d47f
--- /dev/null
+++ b/vnftest/common/html_template.py
@@ -0,0 +1,195 @@
+#############################################################################
+# Copyright (c) 2017 Rajesh Kudaka
+#
+# Author: Rajesh Kudaka 4k.rajesh@gmail.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/html_template.py
+
+template = """
+<html>
+<body>
+<head>
+<meta charset="utf-8">
+<meta name="viewport" content="width=device-width, initial-scale=1">
+<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7\
+/css/bootstrap.min.css">
+<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1\
+/jquery.min.js"></script>
+<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7\
+/js/bootstrap.min.js"></script>
+<script src="https://code.highcharts.com/highcharts.js"></script>
+<script src="jquery.min.js"></script>
+<script src="highcharts.js"></script>
+</head>
+<style>
+
+table{
+ overflow-y: scroll;
+ height: 360px;
+ display: block;
+ }
+
+ header,h3{
+ font-family:Frutiger;
+ clear: left;
+ text-align: center;
+}
+</style>
+<header class="jumbotron text-center">
+ <h1>Vnftest User Interface</h1>
+ <h4>Report of {{task_id}} Generated</h4>
+</header>
+
+<div class="container">
+ <div class="row">
+ <div class="col-md-4">
+ <div class="table-responsive" >
+ <table class="table table-hover" > </table>
+ </div>
+ </div>
+ <div class="col-md-8" >
+ <div id="container" ></div>
+ </div>
+ </div>
+</div>
+<script>
+ var arr, tab, th, tr, td, tn, row, col, thead, tbody;
+ arr={{table|safe}}
+ tab = document.getElementsByTagName('table')[0];
+ thead=document.createElement('thead');
+ tr = document.createElement('tr');
+ for(row=0;row<Object.keys(arr).length;row++)
+ {
+ th = document.createElement('th');
+ tn = document.createTextNode(Object.keys(arr).sort()[row]);
+ th.appendChild(tn);
+ tr.appendChild(th);
+ thead.appendChild(tr);
+ }
+ tab.appendChild(thead);
+ tbody=document.createElement('tbody');
+
+ for (col = 0; col < arr[Object.keys(arr)[0]].length; col++){
+ tr = document.createElement('tr');
+ for(row=0;row<Object.keys(arr).length;row++)
+ {
+ td = document.createElement('td');
+ tn = document.createTextNode(arr[Object.keys(arr).sort()[row]][col]);
+ td.appendChild(tn);
+ tr.appendChild(td);
+ }
+ tbody.appendChild(tr);
+ }
+tab.appendChild(tbody);
+
+</script>
+
+<script language="JavaScript">
+
+$(function() {
+ $('#container').highcharts({
+ title: {
+ text: 'Vnftest test results',
+ x: -20 //center
+ },
+ subtitle: {
+ text: 'Report of {{task_id}} Task Generated',
+ x: -20
+ },
+ xAxis: {
+ title: {
+ text: 'Timestamp'
+ },
+ categories:{{Timestamp|safe}}
+ },
+ yAxis: {
+
+ plotLines: [{
+ value: 0,
+ width: 1,
+ color: '#808080'
+ }]
+ },
+ tooltip: {
+ valueSuffix: ''
+ },
+ legend: {
+ layout: 'vertical',
+ align: 'right',
+ verticalAlign: 'middle',
+ borderWidth: 0
+ },
+ series: {{series|safe}}
+ });
+});
+
+</script>
+
+
+</body>
+</html>"""
+
+report_template = """
+<html>
+ <head>
+ <title>Vnftest Report</title>
+ <link href="http://cdn.static.runoob.com/libs/bootstrap/3.3.7/css\
+/bootstrap.min.css" rel="stylesheet">
+ </head>
+ <div class="content">
+ <h3>Vnftest Report </h3>
+ <hr/>
+ <div>
+
+ <div>Task ID : {{result.task_id}} </div>
+ <div style="margin-top:5px;">Criteria :
+ <font> {{result.criteria}}</font>
+ </div>
+ <hr/>
+
+ <caption>Information</caption>
+ <table class="table table-striped">
+ <tr>
+ <th>#</th>
+ <th>key</th>
+ <th>value</th>
+ </tr>
+ <tbody>
+ {% for key, value in result.info.items() %}
+ <tr>
+ <td>{{ loop.index }}</td>
+ <td>{{key}}</td>
+ <td>{{value}}</td>
+ </tr>
+ {% endfor %}
+ </tbody>
+ </table>
+ <hr/>
+
+ <caption>Test Cases</caption>
+ <table class="table table-striped">
+ <tr>
+ <th>#</th>
+ <th>key</th>
+ <th>value</th>
+ </tr>
+ <tbody>
+ {% for key, value in result.testcases.items() %}
+ <tr>
+ <td>{{ loop.index }}</td>
+ <td>{{key}}</td>
+ <td>{{value.criteria}}</td>
+ </tr>
+ {% endfor %}
+ </tbody>
+ </table>
+
+ </div>
+ </div>
+</html>
+"""
diff --git a/vnftest/common/httpClient.py b/vnftest/common/httpClient.py
new file mode 100644
index 0000000..e2c7937
--- /dev/null
+++ b/vnftest/common/httpClient.py
@@ -0,0 +1,48 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/httpClient.py
+
+from __future__ import absolute_import
+
+import logging
+import time
+
+from oslo_serialization import jsonutils
+import requests
+
+logger = logging.getLogger(__name__)
+
+
+class HttpClient(object):
+
+ def post(self, url, data, timeout=0):
+ data = jsonutils.dump_as_bytes(data)
+ headers = {'Content-Type': 'application/json'}
+ t_end = time.time() + timeout
+ while True:
+ try:
+ response = requests.post(url, data=data, headers=headers)
+ result = response.json()
+ logger.debug('The result is: %s', result)
+ return result
+ except Exception:
+ if time.time() > t_end:
+ logger.exception('')
+ raise
+ time.sleep(1)
+
+ def get(self, url):
+ response = requests.get(url)
+ return response.json()
diff --git a/vnftest/common/openstack_utils.py b/vnftest/common/openstack_utils.py
new file mode 100644
index 0000000..954df2e
--- /dev/null
+++ b/vnftest/common/openstack_utils.py
@@ -0,0 +1,765 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/openstack_utils.py
+
+from __future__ import absolute_import
+
+import os
+import time
+import sys
+import logging
+
+from keystoneauth1 import loading
+from keystoneauth1 import session
+from cinderclient import client as cinderclient
+from novaclient import client as novaclient
+from glanceclient import client as glanceclient
+from neutronclient.neutron import client as neutronclient
+
+log = logging.getLogger(__name__)
+
+DEFAULT_HEAT_API_VERSION = '1'
+DEFAULT_API_VERSION = '2'
+
+
+# *********************************************
+# CREDENTIALS
+# *********************************************
+def get_credentials():
+ """Returns a creds dictionary filled with parsed from env"""
+ creds = {}
+
+ keystone_api_version = os.getenv('OS_IDENTITY_API_VERSION')
+
+ if keystone_api_version is None or keystone_api_version == '2':
+ keystone_v3 = False
+ tenant_env = 'OS_TENANT_NAME'
+ tenant = 'tenant_name'
+ else:
+ keystone_v3 = True
+ tenant_env = 'OS_PROJECT_NAME'
+ tenant = 'project_name'
+
+ # The most common way to pass these info to the script is to do it
+ # through environment variables.
+ creds.update({
+ "username": os.environ.get("OS_USERNAME"),
+ "password": os.environ.get("OS_PASSWORD"),
+ "auth_url": os.environ.get("OS_AUTH_URL"),
+ tenant: os.environ.get(tenant_env)
+ })
+
+ if keystone_v3:
+ if os.getenv('OS_USER_DOMAIN_NAME') is not None:
+ creds.update({
+ "user_domain_name": os.getenv('OS_USER_DOMAIN_NAME')
+ })
+ if os.getenv('OS_PROJECT_DOMAIN_NAME') is not None:
+ creds.update({
+ "project_domain_name": os.getenv('OS_PROJECT_DOMAIN_NAME')
+ })
+
+ return creds
+
+
+def get_session_auth():
+ loader = loading.get_plugin_loader('password')
+ creds = get_credentials()
+ auth = loader.load_from_options(**creds)
+ return auth
+
+
+def get_session():
+ auth = get_session_auth()
+ try:
+ cacert = os.environ['OS_CACERT']
+ except KeyError:
+ return session.Session(auth=auth)
+ else:
+ insecure = os.getenv('OS_INSECURE', '').lower() == 'true'
+ cacert = False if insecure else cacert
+ return session.Session(auth=auth, verify=cacert)
+
+
+def get_endpoint(service_type, endpoint_type='publicURL'):
+ auth = get_session_auth()
+ # for multi-region, we need to specify region
+ # when finding the endpoint
+ return get_session().get_endpoint(auth=auth,
+ service_type=service_type,
+ endpoint_type=endpoint_type,
+ region_name=os.environ.get(
+ "OS_REGION_NAME"))
+
+
+# *********************************************
+# CLIENTS
+# *********************************************
+def get_heat_api_version(): # pragma: no cover
+ try:
+ api_version = os.environ['HEAT_API_VERSION']
+ except KeyError:
+ return DEFAULT_HEAT_API_VERSION
+ else:
+ log.info("HEAT_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+
+
+def get_cinder_client_version(): # pragma: no cover
+ try:
+ api_version = os.environ['OS_VOLUME_API_VERSION']
+ except KeyError:
+ return DEFAULT_API_VERSION
+ else:
+ log.info("OS_VOLUME_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+
+
+def get_cinder_client(): # pragma: no cover
+ sess = get_session()
+ return cinderclient.Client(get_cinder_client_version(), session=sess)
+
+
+def get_nova_client_version(): # pragma: no cover
+ try:
+ api_version = os.environ['OS_COMPUTE_API_VERSION']
+ except KeyError:
+ return DEFAULT_API_VERSION
+ else:
+ log.info("OS_COMPUTE_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+
+
+def get_nova_client(): # pragma: no cover
+ sess = get_session()
+ return novaclient.Client(get_nova_client_version(), session=sess)
+
+
+def get_neutron_client_version(): # pragma: no cover
+ try:
+ api_version = os.environ['OS_NETWORK_API_VERSION']
+ except KeyError:
+ return DEFAULT_API_VERSION
+ else:
+ log.info("OS_NETWORK_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+
+
+def get_neutron_client(): # pragma: no cover
+ sess = get_session()
+ return neutronclient.Client(get_neutron_client_version(), session=sess)
+
+
+def get_glance_client_version(): # pragma: no cover
+ try:
+ api_version = os.environ['OS_IMAGE_API_VERSION']
+ except KeyError:
+ return DEFAULT_API_VERSION
+ else:
+ log.info("OS_IMAGE_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+
+
+def get_glance_client(): # pragma: no cover
+ sess = get_session()
+ return glanceclient.Client(get_glance_client_version(), session=sess)
+
+
+# *********************************************
+# NOVA
+# *********************************************
+def get_instances(nova_client): # pragma: no cover
+ try:
+ return nova_client.servers.list(search_opts={'all_tenants': 1})
+ except Exception:
+ log.exception("Error [get_instances(nova_client)]")
+
+
+def get_instance_status(nova_client, instance): # pragma: no cover
+ try:
+ return nova_client.servers.get(instance.id).status
+ except Exception:
+ log.exception("Error [get_instance_status(nova_client)]")
+
+
+def get_instance_by_name(nova_client, instance_name): # pragma: no cover
+ try:
+ return nova_client.servers.find(name=instance_name)
+ except Exception:
+ log.exception("Error [get_instance_by_name(nova_client, '%s')]",
+ instance_name)
+
+
+def get_aggregates(nova_client): # pragma: no cover
+ try:
+ return nova_client.aggregates.list()
+ except Exception:
+ log.exception("Error [get_aggregates(nova_client)]")
+
+
+def get_availability_zones(nova_client): # pragma: no cover
+ try:
+ return nova_client.availability_zones.list()
+ except Exception:
+ log.exception("Error [get_availability_zones(nova_client)]")
+
+
+def get_availability_zone_names(nova_client): # pragma: no cover
+ try:
+ return [az.zoneName for az in get_availability_zones(nova_client)]
+ except Exception:
+ log.exception("Error [get_availability_zone_names(nova_client)]")
+
+
+def create_aggregate(nova_client, aggregate_name, av_zone): # pragma: no cover
+ try:
+ nova_client.aggregates.create(aggregate_name, av_zone)
+ except Exception:
+ log.exception("Error [create_aggregate(nova_client, %s, %s)]",
+ aggregate_name, av_zone)
+ return False
+ else:
+ return True
+
+
+def get_aggregate_id(nova_client, aggregate_name): # pragma: no cover
+ try:
+ aggregates = get_aggregates(nova_client)
+ _id = next((ag.id for ag in aggregates if ag.name == aggregate_name))
+ except Exception:
+ log.exception("Error [get_aggregate_id(nova_client, %s)]",
+ aggregate_name)
+ else:
+ return _id
+
+
+def add_host_to_aggregate(nova_client, aggregate_name,
+ compute_host): # pragma: no cover
+ try:
+ aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+ nova_client.aggregates.add_host(aggregate_id, compute_host)
+ except Exception:
+ log.exception("Error [add_host_to_aggregate(nova_client, %s, %s)]",
+ aggregate_name, compute_host)
+ return False
+ else:
+ return True
+
+
+def create_aggregate_with_host(nova_client, aggregate_name, av_zone,
+ compute_host): # pragma: no cover
+ try:
+ create_aggregate(nova_client, aggregate_name, av_zone)
+ add_host_to_aggregate(nova_client, aggregate_name, compute_host)
+ except Exception:
+ log.exception("Error [create_aggregate_with_host("
+ "nova_client, %s, %s, %s)]",
+ aggregate_name, av_zone, compute_host)
+ return False
+ else:
+ return True
+
+
+def create_keypair(nova_client, name, key_path=None): # pragma: no cover
+ try:
+ with open(key_path) as fpubkey:
+ keypair = get_nova_client().keypairs.create(name=name, public_key=fpubkey.read())
+ return keypair
+ except Exception:
+ log.exception("Error [create_keypair(nova_client)]")
+
+
+def create_instance(json_body): # pragma: no cover
+ try:
+ return get_nova_client().servers.create(**json_body)
+ except Exception:
+ log.exception("Error create instance failed")
+ return None
+
+
+def create_instance_and_wait_for_active(json_body): # pragma: no cover
+ SLEEP = 3
+ VM_BOOT_TIMEOUT = 180
+ nova_client = get_nova_client()
+ instance = create_instance(json_body)
+ count = VM_BOOT_TIMEOUT / SLEEP
+ for n in range(count, -1, -1):
+ status = get_instance_status(nova_client, instance)
+ if status.lower() == "active":
+ return instance
+ elif status.lower() == "error":
+ log.error("The instance went to ERROR status.")
+ return None
+ time.sleep(SLEEP)
+ log.error("Timeout booting the instance.")
+ return None
+
+
+def attach_server_volume(server_id, volume_id, device=None): # pragma: no cover
+ try:
+ get_nova_client().volumes.create_server_volume(server_id, volume_id, device)
+ except Exception:
+ log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]",
+ server_id, volume_id)
+ return False
+ else:
+ return True
+
+
+def delete_instance(nova_client, instance_id): # pragma: no cover
+ try:
+ nova_client.servers.force_delete(instance_id)
+ except Exception:
+ log.exception("Error [delete_instance(nova_client, '%s')]",
+ instance_id)
+ return False
+ else:
+ return True
+
+
+def remove_host_from_aggregate(nova_client, aggregate_name,
+ compute_host): # pragma: no cover
+ try:
+ aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+ nova_client.aggregates.remove_host(aggregate_id, compute_host)
+ except Exception:
+ log.exception("Error remove_host_from_aggregate(nova_client, %s, %s)",
+ aggregate_name, compute_host)
+ return False
+ else:
+ return True
+
+
+def remove_hosts_from_aggregate(nova_client,
+ aggregate_name): # pragma: no cover
+ aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+ hosts = nova_client.aggregates.get(aggregate_id).hosts
+ assert(
+ all(remove_host_from_aggregate(nova_client, aggregate_name, host)
+ for host in hosts))
+
+
+def delete_aggregate(nova_client, aggregate_name): # pragma: no cover
+ try:
+ remove_hosts_from_aggregate(nova_client, aggregate_name)
+ nova_client.aggregates.delete(aggregate_name)
+ except Exception:
+ log.exception("Error [delete_aggregate(nova_client, %s)]",
+ aggregate_name)
+ return False
+ else:
+ return True
+
+
+def get_server_by_name(name): # pragma: no cover
+ try:
+ return get_nova_client().servers.list(search_opts={'name': name})[0]
+ except IndexError:
+ log.exception('Failed to get nova client')
+ raise
+
+
+def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover
+ try:
+ return get_nova_client().flavors.create(name, ram, vcpus, disk, **kwargs)
+ except Exception:
+ log.exception("Error [create_flavor(nova_client, %s, %s, %s, %s, %s)]",
+ name, ram, disk, vcpus, kwargs['is_public'])
+ return None
+
+
+def get_image_by_name(name): # pragma: no cover
+ images = get_nova_client().images.list()
+ try:
+ return next((a for a in images if a.name == name))
+ except StopIteration:
+ log.exception('No image matched')
+
+
+def get_flavor_id(nova_client, flavor_name): # pragma: no cover
+ flavors = nova_client.flavors.list(detailed=True)
+ flavor_id = ''
+ for f in flavors:
+ if f.name == flavor_name:
+ flavor_id = f.id
+ break
+ return flavor_id
+
+
+def get_flavor_by_name(name): # pragma: no cover
+ flavors = get_nova_client().flavors.list()
+ try:
+ return next((a for a in flavors if a.name == name))
+ except StopIteration:
+ log.exception('No flavor matched')
+
+
+def check_status(status, name, iterations, interval): # pragma: no cover
+ for i in range(iterations):
+ try:
+ server = get_server_by_name(name)
+ except IndexError:
+ log.error('Cannot found %s server', name)
+ raise
+
+ if server.status == status:
+ return True
+
+ time.sleep(interval)
+ return False
+
+
+def delete_flavor(flavor_id): # pragma: no cover
+ try:
+ get_nova_client().flavors.delete(flavor_id)
+ except Exception:
+ log.exception("Error [delete_flavor(nova_client, %s)]", flavor_id)
+ return False
+ else:
+ return True
+
+
+def delete_keypair(nova_client, key): # pragma: no cover
+ try:
+ nova_client.keypairs.delete(key=key)
+ return True
+ except Exception:
+ log.exception("Error [delete_keypair(nova_client)]")
+ return False
+
+
+# *********************************************
+# NEUTRON
+# *********************************************
+def get_network_id(neutron_client, network_name): # pragma: no cover
+ networks = neutron_client.list_networks()['networks']
+ return next((n['id'] for n in networks if n['name'] == network_name), None)
+
+
+def get_port_id_by_ip(neutron_client, ip_address): # pragma: no cover
+ ports = neutron_client.list_ports()['ports']
+ return next((i['id'] for i in ports for j in i.get(
+ 'fixed_ips') if j['ip_address'] == ip_address), None)
+
+
+def create_neutron_net(neutron_client, json_body): # pragma: no cover
+ try:
+ network = neutron_client.create_network(body=json_body)
+ return network['network']['id']
+ except Exception:
+ log.error("Error [create_neutron_net(neutron_client)]")
+ raise Exception("operation error")
+ return None
+
+
+def delete_neutron_net(neutron_client, network_id): # pragma: no cover
+ try:
+ neutron_client.delete_network(network_id)
+ return True
+ except Exception:
+ log.error("Error [delete_neutron_net(neutron_client, '%s')]" % network_id)
+ return False
+
+
+def create_neutron_subnet(neutron_client, json_body): # pragma: no cover
+ try:
+ subnet = neutron_client.create_subnet(body=json_body)
+ return subnet['subnets'][0]['id']
+ except Exception:
+ log.error("Error [create_neutron_subnet")
+ raise Exception("operation error")
+ return None
+
+
+def create_neutron_router(neutron_client, json_body): # pragma: no cover
+ try:
+ router = neutron_client.create_router(json_body)
+ return router['router']['id']
+ except Exception:
+ log.error("Error [create_neutron_router(neutron_client)]")
+ raise Exception("operation error")
+ return None
+
+
+def delete_neutron_router(neutron_client, router_id): # pragma: no cover
+ try:
+ neutron_client.delete_router(router=router_id)
+ return True
+ except Exception:
+ log.error("Error [delete_neutron_router(neutron_client, '%s')]" % router_id)
+ return False
+
+
+def remove_gateway_router(neutron_client, router_id): # pragma: no cover
+ try:
+ neutron_client.remove_gateway_router(router_id)
+ return True
+ except Exception:
+ log.error("Error [remove_gateway_router(neutron_client, '%s')]" % router_id)
+ return False
+
+
+def remove_interface_router(neutron_client, router_id, subnet_id,
+ **json_body): # pragma: no cover
+ json_body.update({"subnet_id": subnet_id})
+ try:
+ neutron_client.remove_interface_router(router=router_id,
+ body=json_body)
+ return True
+ except Exception:
+ log.error("Error [remove_interface_router(neutron_client, '%s', "
+ "'%s')]" % (router_id, subnet_id))
+ return False
+
+
+def create_floating_ip(neutron_client, extnet_id): # pragma: no cover
+ props = {'floating_network_id': extnet_id}
+ try:
+ ip_json = neutron_client.create_floatingip({'floatingip': props})
+ fip_addr = ip_json['floatingip']['floating_ip_address']
+ fip_id = ip_json['floatingip']['id']
+ except Exception:
+ log.error("Error [create_floating_ip(neutron_client)]")
+ return None
+ return {'fip_addr': fip_addr, 'fip_id': fip_id}
+
+
+def delete_floating_ip(nova_client, floatingip_id): # pragma: no cover
+ try:
+ nova_client.floating_ips.delete(floatingip_id)
+ return True
+ except Exception:
+ log.error("Error [delete_floating_ip(nova_client, '%s')]" % floatingip_id)
+ return False
+
+
+def get_security_groups(neutron_client): # pragma: no cover
+ try:
+ security_groups = neutron_client.list_security_groups()[
+ 'security_groups']
+ return security_groups
+ except Exception:
+ log.error("Error [get_security_groups(neutron_client)]")
+ return None
+
+
+def get_security_group_id(neutron_client, sg_name): # pragma: no cover
+ security_groups = get_security_groups(neutron_client)
+ id = ''
+ for sg in security_groups:
+ if sg['name'] == sg_name:
+ id = sg['id']
+ break
+ return id
+
+
+def create_security_group(neutron_client, sg_name, sg_description): # pragma: no cover
+ json_body = {'security_group': {'name': sg_name,
+ 'description': sg_description}}
+ try:
+ secgroup = neutron_client.create_security_group(json_body)
+ return secgroup['security_group']
+ except Exception:
+ log.error("Error [create_security_group(neutron_client, '%s', "
+ "'%s')]" % (sg_name, sg_description))
+ return None
+
+
+def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
+ port_range_min=None, port_range_max=None,
+ **json_body): # pragma: no cover
+ # We create a security group in 2 steps
+ # 1 - we check the format and set the json body accordingly
+ # 2 - we call neturon client to create the security group
+
+ # Format check
+ json_body.update({'security_group_rule': {'direction': direction,
+ 'security_group_id': sg_id, 'protocol': protocol}})
+ # parameters may be
+ # - both None => we do nothing
+ # - both Not None => we add them to the json description
+ # but one cannot be None is the other is not None
+ if (port_range_min is not None and port_range_max is not None):
+ # add port_range in json description
+ json_body['security_group_rule']['port_range_min'] = port_range_min
+ json_body['security_group_rule']['port_range_max'] = port_range_max
+ log.debug("Security_group format set (port range included)")
+ else:
+ # either both port range are set to None => do nothing
+ # or one is set but not the other => log it and return False
+ if port_range_min is None and port_range_max is None:
+ log.debug("Security_group format set (no port range mentioned)")
+ else:
+ log.error("Bad security group format."
+ "One of the port range is not properly set:"
+ "range min: {},"
+ "range max: {}".format(port_range_min,
+ port_range_max))
+ return False
+
+ # Create security group using neutron client
+ try:
+ neutron_client.create_security_group_rule(json_body)
+ return True
+ except Exception:
+ log.exception("Impossible to create_security_group_rule,"
+ "security group rule probably already exists")
+ return False
+
+
+def create_security_group_full(neutron_client,
+ sg_name, sg_description): # pragma: no cover
+ sg_id = get_security_group_id(neutron_client, sg_name)
+ if sg_id != '':
+ log.info("Using existing security group '%s'..." % sg_name)
+ else:
+ log.info("Creating security group '%s'..." % sg_name)
+ SECGROUP = create_security_group(neutron_client,
+ sg_name,
+ sg_description)
+ if not SECGROUP:
+ log.error("Failed to create the security group...")
+ return None
+
+ sg_id = SECGROUP['id']
+
+ log.debug("Security group '%s' with ID=%s created successfully."
+ % (SECGROUP['name'], sg_id))
+
+ log.debug("Adding ICMP rules in security group '%s'..."
+ % sg_name)
+ if not create_secgroup_rule(neutron_client, sg_id,
+ 'ingress', 'icmp'):
+ log.error("Failed to create the security group rule...")
+ return None
+
+ log.debug("Adding SSH rules in security group '%s'..."
+ % sg_name)
+ if not create_secgroup_rule(
+ neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
+ log.error("Failed to create the security group rule...")
+ return None
+
+ if not create_secgroup_rule(
+ neutron_client, sg_id, 'egress', 'tcp', '22', '22'):
+ log.error("Failed to create the security group rule...")
+ return None
+ return sg_id
+
+
+# *********************************************
+# GLANCE
+# *********************************************
+def get_image_id(glance_client, image_name): # pragma: no cover
+ images = glance_client.images.list()
+ return next((i.id for i in images if i.name == image_name), None)
+
+
+def create_image(glance_client, image_name, file_path, disk_format,
+ container_format, min_disk, min_ram, protected, tag,
+ public, **kwargs): # pragma: no cover
+ if not os.path.isfile(file_path):
+ log.error("Error: file %s does not exist." % file_path)
+ return None
+ try:
+ image_id = get_image_id(glance_client, image_name)
+ if image_id is not None:
+ log.info("Image %s already exists." % image_name)
+ else:
+ log.info("Creating image '%s' from '%s'...", image_name, file_path)
+
+ image = glance_client.images.create(name=image_name,
+ visibility=public,
+ disk_format=disk_format,
+ container_format=container_format,
+ min_disk=min_disk,
+ min_ram=min_ram,
+ tags=tag,
+ protected=protected,
+ **kwargs)
+ image_id = image.id
+ with open(file_path) as image_data:
+ glance_client.images.upload(image_id, image_data)
+ return image_id
+ except Exception:
+ log.error("Error [create_glance_image(glance_client, '%s', '%s', '%s')]",
+ image_name, file_path, public)
+ return None
+
+
+def delete_image(glance_client, image_id): # pragma: no cover
+ try:
+ glance_client.images.delete(image_id)
+
+ except Exception:
+ log.exception("Error [delete_flavor(glance_client, %s)]", image_id)
+ return False
+ else:
+ return True
+
+
+# *********************************************
+# CINDER
+# *********************************************
+def get_volume_id(volume_name): # pragma: no cover
+ volumes = get_cinder_client().volumes.list()
+ return next((v.id for v in volumes if v.name == volume_name), None)
+
+
+def create_volume(cinder_client, volume_name, volume_size,
+ volume_image=False): # pragma: no cover
+ try:
+ if volume_image:
+ volume = cinder_client.volumes.create(name=volume_name,
+ size=volume_size,
+ imageRef=volume_image)
+ else:
+ volume = cinder_client.volumes.create(name=volume_name,
+ size=volume_size)
+ return volume
+ except Exception:
+ log.exception("Error [create_volume(cinder_client, %s)]",
+ (volume_name, volume_size))
+ return None
+
+
+def delete_volume(cinder_client, volume_id, forced=False): # pragma: no cover
+ try:
+ if forced:
+ try:
+ cinder_client.volumes.detach(volume_id)
+ except:
+ log.error(sys.exc_info()[0])
+ cinder_client.volumes.force_delete(volume_id)
+ else:
+ while True:
+ volume = get_cinder_client().volumes.get(volume_id)
+ if volume.status.lower() == 'available':
+ break
+ cinder_client.volumes.delete(volume_id)
+ return True
+ except Exception:
+ log.exception("Error [delete_volume(cinder_client, '%s')]" % volume_id)
+ return False
+
+
+def detach_volume(server_id, volume_id): # pragma: no cover
+ try:
+ get_nova_client().volumes.delete_server_volume(server_id, volume_id)
+ return True
+ except Exception:
+ log.exception("Error [detach_server_volume(nova_client, '%s', '%s')]",
+ server_id, volume_id)
+ return False
diff --git a/vnftest/common/process.py b/vnftest/common/process.py
new file mode 100644
index 0000000..21a21ac
--- /dev/null
+++ b/vnftest/common/process.py
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/process.py
+
+import logging
+import multiprocessing
+import signal
+import subprocess
+import time
+
+import os
+from oslo_utils import encodeutils
+
+from vnftest.common import exceptions
+from vnftest.common import utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+def check_if_process_failed(proc, timeout=1):
+ if proc is not None:
+ proc.join(timeout)
+ # Only abort if the process aborted
+ if proc.exitcode is not None and proc.exitcode > 0:
+ raise RuntimeError("{} exited with status {}".format(proc.name, proc.exitcode))
+
+
+def terminate_children(timeout=3):
+ current_proccess = multiprocessing.current_process()
+ active_children = multiprocessing.active_children()
+ if not active_children:
+ LOG.debug("no children to terminate")
+ return
+ for child in active_children:
+ LOG.debug("%s %s %s, child: %s %s", current_proccess.name, current_proccess.pid,
+ os.getpid(), child, child.pid)
+ LOG.debug("joining %s", child)
+ child.join(timeout)
+ child.terminate()
+ active_children = multiprocessing.active_children()
+ if not active_children:
+ LOG.debug("no children to terminate")
+ for child in active_children:
+ LOG.debug("%s %s %s, after terminate child: %s %s", current_proccess.name,
+ current_proccess.pid, os.getpid(), child, child.pid)
+
+
+def _additional_env_args(additional_env):
+ """Build arguments for adding additional environment vars with env"""
+ if additional_env is None:
+ return []
+ return ['env'] + ['%s=%s' % pair for pair in additional_env.items()]
+
+
+def _subprocess_setup():
+ # Python installs a SIGPIPE handler by default. This is usually not what
+ # non-Python subprocesses expect.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+
+def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
+ env=None, preexec_fn=_subprocess_setup, close_fds=True):
+ return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
+ stderr=stderr, preexec_fn=preexec_fn,
+ close_fds=close_fds, env=env)
+
+
+def create_process(cmd, run_as_root=False, additional_env=None):
+ """Create a process object for the given command.
+
+ The return value will be a tuple of the process object and the
+ list of command arguments used to create it.
+ """
+ if not isinstance(cmd, list):
+ cmd = [cmd]
+ cmd = list(map(str, _additional_env_args(additional_env) + cmd))
+ if run_as_root:
+ # NOTE(ralonsoh): to handle a command executed as root, using
+ # a root wrapper, instead of using "sudo".
+ pass
+ LOG.debug("Running command: %s", cmd)
+ obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ return obj, cmd
+
+
+def execute(cmd, process_input=None, additional_env=None,
+ check_exit_code=True, return_stderr=False, log_fail_as_error=True,
+ extra_ok_codes=None, run_as_root=False):
+ try:
+ if process_input is not None:
+ _process_input = encodeutils.to_utf8(process_input)
+ else:
+ _process_input = None
+
+ # NOTE(ralonsoh): to handle the execution of a command as root,
+ # using a root wrapper, instead of using "sudo".
+ obj, cmd = create_process(cmd, run_as_root=run_as_root,
+ additional_env=additional_env)
+ _stdout, _stderr = obj.communicate(_process_input)
+ returncode = obj.returncode
+ obj.stdin.close()
+ _stdout = utils.safe_decode_utf8(_stdout)
+ _stderr = utils.safe_decode_utf8(_stderr)
+
+ extra_ok_codes = extra_ok_codes or []
+ if returncode and returncode not in extra_ok_codes:
+ msg = ("Exit code: %(returncode)d; "
+ "Stdin: %(stdin)s; "
+ "Stdout: %(stdout)s; "
+ "Stderr: %(stderr)s") % {'returncode': returncode,
+ 'stdin': process_input or '',
+ 'stdout': _stdout,
+ 'stderr': _stderr}
+ if log_fail_as_error:
+ LOG.error(msg)
+ if check_exit_code:
+ raise exceptions.ProcessExecutionError(msg,
+ returncode=returncode)
+
+ finally:
+ # This appears to be necessary in order for the subprocess to clean up
+ # something between call; without it, the second process hangs when two
+ # execute calls are made in a row.
+ time.sleep(0)
+
+ return (_stdout, _stderr) if return_stderr else _stdout
diff --git a/vnftest/common/rest_client.py b/vnftest/common/rest_client.py
new file mode 100644
index 0000000..23a108c
--- /dev/null
+++ b/vnftest/common/rest_client.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+import json
+import urllib2
+import requests
+
+
+def post(url, headers, data, logger):
+ return call(url, 'POST', headers, data, logger)
+
+
+def call(url, method, headers, data, logger):
+ data_json = json.dumps(data)
+ f = None
+ try:
+ req = urllib2.Request(url, data=data_json, headers=headers)
+ req.get_method = lambda: method
+ f = urllib2.urlopen(req)
+ return_code = f.code
+ response_body = f.read()
+ f.close()
+ if len(str(response_body)) == 0:
+ response_body = "{}"
+ response_body = json.loads(response_body)
+ result = {'return_code': return_code, 'body': response_body}
+ return result
+
+ except Exception as e:
+ message = "Cannot read content from {}, exception: {}".format(url, e)
+ logger.exception(message)
+ raise RuntimeError(message)
+ finally:
+ if f is not None:
+ f.close()
+
+
+def upload_file(url, headers, file, logger):
+ logger.debug("Upload file. URL: {}".format(url))
+ response = None
+ try:
+ response = requests.post(url, headers=headers, files=file)
+ return {'return_code': response.status_code, 'body': response.json()}
+ except Exception as e:
+ message = "Error while uploading file to {}, exception: {}".format(url, e)
+ logger.exception(message)
+ raise RuntimeError(message)
+ finally:
+ if response is not None:
+ response.close()
diff --git a/vnftest/common/task_template.py b/vnftest/common/task_template.py
new file mode 100755
index 0000000..7872aed
--- /dev/null
+++ b/vnftest/common/task_template.py
@@ -0,0 +1,78 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/task_template.py
+
+from __future__ import absolute_import
+import re
+import jinja2
+import jinja2.meta
+import yaml
+
+
+def finalize_for_yaml(elem):
+ """Render Jinja2 output specifically for YAML files"""
+ # Jinaj2 by default converts None to 'None', we can't allow this
+ # we could convert to empty string '', or we can convert to null, aka ~
+ if elem is None:
+ return '~'
+ # convert data structures to inline YAML
+ # match builtin types because we shouldn't be trying to render complex types
+ if isinstance(elem, (dict, list)):
+ # remove newlines because we are injecting back into YAML
+ # use block style for single line
+ return yaml.safe_dump(elem, default_flow_style=True).replace('\n', '')
+ return elem
+
+
+class TaskTemplate(object):
+
+ @classmethod
+ def render(cls, task_template, **kwargs):
+ """Render jinja2 task template to Vnftest input task.
+
+ :param task_template: string that contains template
+ :param kwargs: Dict with template arguments
+ :returns:rendered template str
+ """
+
+ from six.moves import builtins
+
+ ast = jinja2.Environment().parse(task_template)
+ required_kwargs = jinja2.meta.find_undeclared_variables(ast)
+
+ missing = set(required_kwargs) - set(kwargs) - set(dir(builtins))
+ real_missing = [mis for mis in missing
+ if is_really_missing(mis, task_template)]
+
+ if real_missing:
+ multi_msg = ("Please specify next template task arguments:%s")
+ single_msg = ("Please specify template task argument:%s")
+ raise TypeError((len(real_missing) > 1 and multi_msg or single_msg)
+ % ", ".join(real_missing))
+ return jinja2.Template(task_template, finalize=finalize_for_yaml).render(**kwargs)
+
+
+def is_really_missing(mis, task_template):
+ # Removing variables that have default values from
+ # missing. Construction that won't be properly
+ # check is {% set x = x or 1}
+ if re.search(mis.join([r"{%\s*set\s+", "\s*=\s*", r"[^\w]+"]),
+ task_template):
+ return False
+ # Also check for a default filter which can show up as
+ # a missing variable
+ if re.search(mis + r"\s*\|\s*default\(", task_template):
+ return False
+ return True
diff --git a/vnftest/common/template_format.py b/vnftest/common/template_format.py
new file mode 100644
index 0000000..5cfc2f2
--- /dev/null
+++ b/vnftest/common/template_format.py
@@ -0,0 +1,72 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/template_format.py
+
+from __future__ import absolute_import
+
+import yaml
+from oslo_serialization import jsonutils
+
+if hasattr(yaml, 'CSafeLoader'):
+ # make a dynamic subclass so we don't override global yaml Loader
+ yaml_loader = type('HeatYamlLoader', (yaml.CSafeLoader,), {})
+else:
+ yaml_loader = type('HeatYamlLoader', (yaml.SafeLoader,), {})
+
+if hasattr(yaml, 'CSafeDumper'):
+ yaml_dumper = yaml.CSafeDumper
+else:
+ yaml_dumper = yaml.SafeDumper
+
+
+# This breaks NetworkServiceTestCase yaml loading, because we need to conversion to
+# native Python str() objects because we use use Trex and Trex is has broken unicode handling
+def _construct_yaml_str(self, node):
+ # Override the default string handling function
+ # to always return unicode objects
+ return self.construct_scalar(node)
+
+yaml_loader.add_constructor(u'tag:yaml.org,2002:str', _construct_yaml_str)
+# Unquoted dates like 2013-05-23 in yaml files get loaded as objects of type
+# datetime.data which causes problems in API layer when being processed by
+# openstack.common.jsonutils. Therefore, make unicode string out of timestamps
+# until jsonutils can handle dates.
+yaml_loader.add_constructor(u'tag:yaml.org,2002:timestamp',
+ _construct_yaml_str)
+
+
+def parse(tmpl_str):
+ """Takes a string and returns a dict containing the parsed structure.
+
+ This includes determination of whether the string is using the
+ JSON or YAML format.
+ """
+ if tmpl_str.startswith('{'):
+ tpl = jsonutils.loads(tmpl_str)
+ else:
+ try:
+ # we already use SafeLoader when constructing special Heat YAML loader class
+ tpl = yaml.load(tmpl_str, Loader=yaml_loader)
+ except yaml.YAMLError as yea:
+ raise ValueError(yea)
+ else:
+ if tpl is None:
+ tpl = {}
+ # Looking for supported version keys in the loaded template
+ if not ('HeatTemplateFormatVersion' in tpl or
+ 'heat_template_version' in tpl or
+ 'AWSTemplateFormatVersion' in tpl):
+ raise ValueError("Template format version not found.")
+ return tpl
diff --git a/vnftest/common/utils.py b/vnftest/common/utils.py
new file mode 100644
index 0000000..e62b5db
--- /dev/null
+++ b/vnftest/common/utils.py
@@ -0,0 +1,399 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/utils.py
+
+import collections
+from contextlib import closing
+import datetime
+import errno
+import importlib
+import ipaddress
+import logging
+import os
+import random
+import socket
+import subprocess
+import sys
+
+import six
+from flask import jsonify
+from six.moves import configparser
+from oslo_serialization import jsonutils
+
+import vnftest
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+# Decorator for cli-args
+def cliargs(*args, **kwargs):
+ def _decorator(func):
+ func.__dict__.setdefault('arguments', []).insert(0, (args, kwargs))
+ return func
+ return _decorator
+
+
+def itersubclasses(cls, _seen=None):
+ """Generator over all subclasses of a given class in depth first order."""
+
+ if not isinstance(cls, type):
+ raise TypeError("itersubclasses must be called with "
+ "new-style classes, not %.100r" % cls)
+ _seen = _seen or set()
+ try:
+ subs = cls.__subclasses__()
+ except TypeError: # fails only when cls is type
+ subs = cls.__subclasses__(cls)
+ for sub in subs:
+ if sub not in _seen:
+ _seen.add(sub)
+ yield sub
+ for sub in itersubclasses(sub, _seen):
+ yield sub
+
+
+def import_modules_from_package(package):
+ """Import modules given a package name
+
+ :param: package - Full package name. For example: rally.deploy.engines
+ """
+ vnftest_root = os.path.dirname(os.path.dirname(vnftest.__file__))
+ path = os.path.join(vnftest_root, *package.split('.'))
+ for root, _, files in os.walk(path):
+ matches = (filename for filename in files if filename.endswith('.py')
+ and not filename.startswith('__'))
+ new_package = os.path.relpath(root, vnftest_root).replace(os.sep,
+ '.')
+ module_names = set(
+ '{}.{}'.format(new_package, filename.rsplit('.py', 1)[0])
+ for filename in matches)
+ # Find modules which haven't already been imported
+ missing_modules = module_names.difference(sys.modules)
+ logger.debug('Importing modules: %s', missing_modules)
+ for module_name in missing_modules:
+ try:
+ importlib.import_module(module_name)
+ except (ImportError, SyntaxError):
+ logger.exception('Unable to import module %s', module_name)
+
+
+def makedirs(d):
+ try:
+ os.makedirs(d)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def remove_file(path):
+ try:
+ os.remove(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+
+def execute_command(cmd):
+ exec_msg = "Executing command: '%s'" % cmd
+ logger.debug(exec_msg)
+
+ output = subprocess.check_output(cmd.split()).split(os.linesep)
+
+ return output
+
+
+def source_env(env_file):
+ p = subprocess.Popen(". %s; env" % env_file, stdout=subprocess.PIPE,
+ shell=True)
+ output = p.communicate()[0]
+ env = dict(line.split('=', 1) for line in output.splitlines() if '=' in line)
+ os.environ.update(env)
+ return env
+
+
+def read_json_from_file(path):
+ with open(path, 'r') as f:
+ j = f.read()
+ # don't use jsonutils.load() it conflicts with already decoded input
+ return jsonutils.loads(j)
+
+
+def write_json_to_file(path, data, mode='w'):
+ with open(path, mode) as f:
+ jsonutils.dump(data, f)
+
+
+def write_file(path, data, mode='w'):
+ with open(path, mode) as f:
+ f.write(data)
+
+
+def parse_ini_file(path):
+ parser = configparser.ConfigParser()
+
+ try:
+ files = parser.read(path)
+ except configparser.MissingSectionHeaderError:
+ logger.exception('invalid file type')
+ raise
+ else:
+ if not files:
+ raise RuntimeError('file not exist')
+
+ try:
+ default = {k: v for k, v in parser.items('DEFAULT')}
+ except configparser.NoSectionError:
+ default = {}
+
+ config = dict(DEFAULT=default,
+ **{s: {k: v for k, v in parser.items(
+ s)} for s in parser.sections()})
+
+ return config
+
+
+def get_port_mac(sshclient, port):
+ cmd = "ifconfig |grep HWaddr |grep %s |awk '{print $5}' " % port
+ status, stdout, stderr = sshclient.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+ return stdout.rstrip()
+
+
+def get_port_ip(sshclient, port):
+ cmd = "ifconfig %s |grep 'inet addr' |awk '{print $2}' " \
+ "|cut -d ':' -f2 " % port
+ status, stdout, stderr = sshclient.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+ return stdout.rstrip()
+
+
+def flatten_dict_key(data):
+ next_data = {}
+
+ # use list, because iterable is too generic
+ if not any(isinstance(v, (collections.Mapping, list))
+ for v in data.values()):
+ return data
+
+ for k, v in data.items():
+ if isinstance(v, collections.Mapping):
+ for n_k, n_v in v.items():
+ next_data["%s.%s" % (k, n_k)] = n_v
+ # use list because iterable is too generic
+ elif isinstance(v, collections.Iterable) and not isinstance(v, six.string_types):
+ for index, item in enumerate(v):
+ next_data["%s%d" % (k, index)] = item
+ else:
+ next_data[k] = v
+
+ return flatten_dict_key(next_data)
+
+
+def translate_to_str(obj):
+ if isinstance(obj, collections.Mapping):
+ return {str(k): translate_to_str(v) for k, v in obj.items()}
+ elif isinstance(obj, list):
+ return [translate_to_str(ele) for ele in obj]
+ elif isinstance(obj, six.text_type):
+ return str(obj)
+ return obj
+
+
+def result_handler(status, data):
+ result = {
+ 'status': status,
+ 'result': data
+ }
+ return jsonify(result)
+
+
+def change_obj_to_dict(obj):
+ dic = {}
+ for k, v in vars(obj).items():
+ try:
+ vars(v)
+ except TypeError:
+ dic.update({k: v})
+ return dic
+
+
+def set_dict_value(dic, keys, value):
+ return_dic = dic
+
+ for key in keys.split('.'):
+ return_dic.setdefault(key, {})
+ if key == keys.split('.')[-1]:
+ return_dic[key] = value
+ else:
+ return_dic = return_dic[key]
+ return dic
+
+
+def get_free_port(ip):
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
+ port = random.randint(5000, 10000)
+ while s.connect_ex((ip, port)) == 0:
+ port = random.randint(5000, 10000)
+ return port
+
+
+def mac_address_to_hex_list(mac):
+ octets = ["0x{:02x}".format(int(elem, 16)) for elem in mac.split(':')]
+ assert len(octets) == 6 and all(len(octet) == 4 for octet in octets)
+ return octets
+
+
+def safe_ip_address(ip_addr):
+ """ get ip address version v6 or v4 """
+ try:
+ return ipaddress.ip_address(six.text_type(ip_addr))
+ except ValueError:
+ logging.error("%s is not valid", ip_addr)
+ return None
+
+
+def get_ip_version(ip_addr):
+ """ get ip address version v6 or v4 """
+ try:
+ address = ipaddress.ip_address(six.text_type(ip_addr))
+ except ValueError:
+ logging.error("%s is not valid", ip_addr)
+ return None
+ else:
+ return address.version
+
+
+def ip_to_hex(ip_addr, separator=''):
+ try:
+ address = ipaddress.ip_address(six.text_type(ip_addr))
+ except ValueError:
+ logging.error("%s is not valid", ip_addr)
+ return ip_addr
+
+ if address.version != 4:
+ return ip_addr
+
+ if not separator:
+ return '{:08x}'.format(int(address))
+
+ return separator.join('{:02x}'.format(octet) for octet in address.packed)
+
+
+def try_int(s, *args):
+ """Convert to integer if possible."""
+ try:
+ return int(s)
+ except (TypeError, ValueError):
+ return args[0] if args else s
+
+
+class SocketTopology(dict):
+
+ @classmethod
+ def parse_cpuinfo(cls, cpuinfo):
+ socket_map = {}
+
+ lines = cpuinfo.splitlines()
+
+ core_details = []
+ core_lines = {}
+ for line in lines:
+ if line.strip():
+ name, value = line.split(":", 1)
+ core_lines[name.strip()] = try_int(value.strip())
+ else:
+ core_details.append(core_lines)
+ core_lines = {}
+
+ for core in core_details:
+ socket_map.setdefault(core["physical id"], {}).setdefault(
+ core["core id"], {})[core["processor"]] = (
+ core["processor"], core["core id"], core["physical id"])
+
+ return cls(socket_map)
+
+ def sockets(self):
+ return sorted(self.keys())
+
+ def cores(self):
+ return sorted(core for cores in self.values() for core in cores)
+
+ def processors(self):
+ return sorted(
+ proc for cores in self.values() for procs in cores.values() for
+ proc in procs)
+
+
+def config_to_dict(config):
+ return {section: dict(config.items(section)) for section in
+ config.sections()}
+
+
+def validate_non_string_sequence(value, default=None, raise_exc=None):
+ # NOTE(ralonsoh): refactor this function to check if raise_exc is an
+ # Exception. Remove duplicate code, this function is duplicated in this
+ # repository.
+ if isinstance(value, collections.Sequence) and not isinstance(value, six.string_types):
+ return value
+ if raise_exc:
+ raise raise_exc # pylint: disable=raising-bad-type
+ return default
+
+
+def join_non_strings(separator, *non_strings):
+ try:
+ non_strings = validate_non_string_sequence(non_strings[0], raise_exc=RuntimeError)
+ except (IndexError, RuntimeError):
+ pass
+ return str(separator).join(str(non_string) for non_string in non_strings)
+
+
+def safe_decode_utf8(s):
+ """Safe decode a str from UTF"""
+ if six.PY3 and isinstance(s, bytes):
+ return s.decode('utf-8', 'surrogateescape')
+ return s
+
+
+class ErrorClass(object):
+
+ def __init__(self, *args, **kwargs):
+ if 'test' not in kwargs:
+ raise RuntimeError
+
+ def __getattr__(self, item):
+ raise AttributeError
+
+
+class Timer(object):
+ def __init__(self):
+ super(Timer, self).__init__()
+ self.start = self.delta = None
+
+ def __enter__(self):
+ self.start = datetime.datetime.now()
+ return self
+
+ def __exit__(self, *_):
+ self.delta = datetime.datetime.now() - self.start
+
+ def __getattr__(self, item):
+ return getattr(self.delta, item)
+
diff --git a/vnftest/common/yaml_loader.py b/vnftest/common/yaml_loader.py
new file mode 100644
index 0000000..4f93e62
--- /dev/null
+++ b/vnftest/common/yaml_loader.py
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/yaml_loader.py
+
+from __future__ import absolute_import
+
+import yaml
+
+
+if hasattr(yaml, 'CSafeLoader'):
+ # make a dynamic subclass so we don't override global yaml Loader
+ yaml_loader = type('CustomLoader', (yaml.CSafeLoader,), {})
+else:
+ yaml_loader = type('CustomLoader', (yaml.SafeLoader,), {})
+
+if hasattr(yaml, 'CSafeDumper'):
+ yaml_dumper = yaml.CSafeDumper
+else:
+ yaml_dumper = yaml.SafeDumper
+
+
+def yaml_load(tmpl_str):
+ return yaml.load(tmpl_str, Loader=yaml_loader)