summaryrefslogtreecommitdiffstats
path: root/vnftest
diff options
context:
space:
mode:
authorMoshe <moshehoa@amdocs.com>2018-02-26 13:39:57 +0200
committerMoshe <moshehoa@amdocs.com>2018-03-04 14:24:35 +0200
commit0bb532c41e89568966ca2bfae259737e8830249b (patch)
treedaad9e5debb95f292de880cfad18d2d0dce29239 /vnftest
parentc1731afc800b3f7aaefd7c06dbe887ff057fa0f8 (diff)
Initial VNFTEST fw
Issue-ID: VNFSDK-195 Change-Id: I5abf0dd033e76e5225bb8271c0afaea325d741d9 Signed-off-by: Moshe <moshehoa@amdocs.com> docker build Issue-ID: VNFSDK-195 Change-Id: I25eb933504c0201e6c26477b540626fd515d2887 Signed-off-by: Moshe <moshehoa@amdocs.com> fix requirements Issue-ID: VNFSDK-195 Change-Id: I5907fa102bfbf9cb81d42e491c133b4fdbb0d6fd Signed-off-by: Moshe <moshehoa@amdocs.com> rm netifaces Issue-ID: VNFSDK-195 Change-Id: I349d0c738442edfef256c90b06cbaeb446c1db13 Signed-off-by: Moshe <moshehoa@amdocs.com> fix tox config IssueID: VNFTEST-195 Change-Id: I5c0b0e0ab96cad1bdc56ab63860d794bfd15b5eb Signed-off-by: Moshe <moshehoa@amdocs.com> Add unit test IssueID: VNFTEST-195 Change-Id: I08c9ba53721306aff4b74720181f8c853c4ccabe Signed-off-by: Moshe <moshehoa@amdocs.com> fix setup.py Issue-ID: VNFSDK-195 Change-Id: I72bd93e4977edf5ef0b46c72fe47165b805aab7b Signed-off-by: Moshe <moshehoa@amdocs.com> fix test execution Issue-ID: VNFSDK-195 Change-Id: I488a6226d2562229f0e7fa6c1d20f0c43882bc3b Signed-off-by: Moshe <moshehoa@amdocs.com>
Diffstat (limited to 'vnftest')
-rw-r--r--vnftest/__init__.py61
-rwxr-xr-xvnftest/cmd/NSBperf.py228
-rw-r--r--vnftest/cmd/__init__.py0
-rw-r--r--vnftest/cmd/cli.py184
-rw-r--r--vnftest/cmd/commands/__init__.py17
-rw-r--r--vnftest/cmd/commands/env.py95
-rw-r--r--vnftest/cmd/commands/plugin.py44
-rw-r--r--vnftest/cmd/commands/report.py34
-rw-r--r--vnftest/cmd/commands/runner.py41
-rw-r--r--vnftest/cmd/commands/step.py40
-rw-r--r--vnftest/cmd/commands/task.py70
-rw-r--r--vnftest/cmd/commands/testcase.py49
-rw-r--r--vnftest/common/__init__.py0
-rw-r--r--vnftest/common/constants.py147
-rw-r--r--vnftest/common/exceptions.py61
-rw-r--r--vnftest/common/html_template.py195
-rw-r--r--vnftest/common/httpClient.py48
-rw-r--r--vnftest/common/openstack_utils.py765
-rw-r--r--vnftest/common/process.py140
-rw-r--r--vnftest/common/rest_client.py62
-rwxr-xr-xvnftest/common/task_template.py78
-rw-r--r--vnftest/common/template_format.py72
-rw-r--r--vnftest/common/utils.py399
-rw-r--r--vnftest/common/yaml_loader.py35
-rw-r--r--vnftest/dispatcher/__init__.py30
-rw-r--r--vnftest/dispatcher/base.py50
-rw-r--r--vnftest/dispatcher/file.py36
-rw-r--r--vnftest/dispatcher/http.py94
-rwxr-xr-xvnftest/main.py57
-rw-r--r--vnftest/onap/__init__.py20
-rw-r--r--vnftest/onap/contexts/__init__.py0
-rw-r--r--vnftest/onap/contexts/base.py64
-rw-r--r--vnftest/onap/contexts/csar.py44
-rw-r--r--vnftest/onap/contexts/dummy.py41
-rw-r--r--vnftest/onap/core/__init__.py45
-rw-r--r--vnftest/onap/core/plugin.py175
-rw-r--r--vnftest/onap/core/report.py128
-rw-r--r--vnftest/onap/core/runner.py44
-rw-r--r--vnftest/onap/core/step.py44
-rw-r--r--vnftest/onap/core/task.py605
-rw-r--r--vnftest/onap/core/testcase.py113
-rw-r--r--vnftest/onap/core/testsuite.py49
-rw-r--r--vnftest/onap/runners/__init__.py0
-rwxr-xr-xvnftest/onap/runners/base.py250
-rw-r--r--vnftest/onap/runners/duration.py145
-rwxr-xr-xvnftest/onap/runners/dynamictp.py179
-rw-r--r--vnftest/onap/runners/iteration.py160
-rw-r--r--vnftest/onap/runners/search.py180
-rw-r--r--vnftest/onap/runners/sequence.py149
-rw-r--r--vnftest/onap/steps/__init__.py0
-rw-r--r--vnftest/onap/steps/base.py89
-rw-r--r--vnftest/onap/steps/dummy/__init__.py0
-rw-r--r--vnftest/onap/steps/dummy/dummy.py42
-rw-r--r--vnftest/onap/steps/onap_api_call.py145
-rw-r--r--vnftest/onap/steps/onboard/__init__.py0
-rw-r--r--vnftest/onap/steps/onboard/create_vlm.yaml28
-rw-r--r--vnftest/ssh.py497
57 files changed, 6368 insertions, 0 deletions
diff --git a/vnftest/__init__.py b/vnftest/__init__.py
new file mode 100644
index 0000000..212c153
--- /dev/null
+++ b/vnftest/__init__.py
@@ -0,0 +1,61 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+from __future__ import absolute_import
+import logging
+import os
+import errno
+
+# this module must only import other modules that do
+# not require loggers to be created, so this cannot
+# include vnftest.common.utils
+from vnftest.common import constants
+
+try:
+ # do not use vnftest.common.utils.makedirs
+ # since vnftest.common.utils creates a logger
+ # and so it cannot be imported before this code
+ os.makedirs(constants.LOG_DIR)
+except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+LOG_FILE = os.path.join(constants.LOG_DIR, 'vnftest.log')
+LOG_FORMATTER = '%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d %(message)s'
+
+_LOG_FORMATTER = logging.Formatter(LOG_FORMATTER)
+_LOG_STREAM_HDLR = logging.StreamHandler()
+_LOG_FILE_HDLR = logging.FileHandler(LOG_FILE)
+
+LOG = logging.getLogger(__name__)
+
+
+def _init_logging():
+
+ LOG.setLevel(logging.DEBUG)
+
+ _LOG_STREAM_HDLR.setFormatter(_LOG_FORMATTER)
+ if os.environ.get('CI_DEBUG', '').lower() in {'1', 'y', "yes", "true"}:
+ _LOG_STREAM_HDLR.setLevel(logging.DEBUG)
+ else:
+ _LOG_STREAM_HDLR.setLevel(logging.INFO)
+
+ # don't append to log file, clobber
+ _LOG_FILE_HDLR.setFormatter(_LOG_FORMATTER)
+ _LOG_FILE_HDLR.setLevel(logging.DEBUG)
+
+ del logging.root.handlers[:]
+ logging.root.addHandler(_LOG_STREAM_HDLR)
+ logging.root.addHandler(_LOG_FILE_HDLR)
+ logging.debug("logging.root.handlers = %s", logging.root.handlers)
diff --git a/vnftest/cmd/NSBperf.py b/vnftest/cmd/NSBperf.py
new file mode 100755
index 0000000..40a157b
--- /dev/null
+++ b/vnftest/cmd/NSBperf.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/NSBperf.py
+
+"""NSBPERF main script.
+"""
+
+from __future__ import absolute_import
+from __future__ import print_function
+import os
+import argparse
+import json
+import subprocess
+import signal
+from oslo_serialization import jsonutils
+
+from six.moves import input
+
+CLI_PATH = os.path.dirname(os.path.realpath(__file__))
+REPO_PATH = os.path.abspath(os.path.join(CLI_PATH, os.pardir))
+
+
+def sigint_handler(*args, **kwargs):
+ """ Capture ctrl+c and exit cli """
+ subprocess.call(["pkill", "-9", "vnftest"])
+ raise SystemExit(1)
+
+
+class VnftestNSCli(object):
+ """ This class handles vnftest network serivce testing """
+
+ def __init__(self):
+ super(VnftestNSCli, self).__init__()
+
+ @classmethod
+ def validate_input(cls, choice, choice_len):
+ """ Validate user inputs """
+ if not str(choice):
+ return 1
+
+ choice = int(choice)
+ if not 1 <= choice <= choice_len:
+ print("\nInvalid wrong choice...")
+ input("Press Enter to continue...")
+ return 1
+ subprocess.call(['clear'])
+ return 0
+
+ @classmethod
+ def parse_arguments(cls):
+ """
+ Parse command line arguments.
+ """
+ parser = \
+ argparse.ArgumentParser(
+ prog=__file__,
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('--version', action='version',
+ version='%(prog)s 0.1')
+ parser.add_argument('--list', '--list-tests', action='store_true',
+ help='list all tests and exit')
+ parser.add_argument('--list-vnfs', action='store_true',
+ help='list all system vnfs and exit')
+
+ group = parser.add_argument_group('test selection options')
+ group.add_argument('--vnf', help='vnf to use')
+ group.add_argument('--test', help='test in use')
+
+ args = vars(parser.parse_args())
+
+ return args
+
+ @classmethod
+ def generate_kpi_results(cls, tkey, tgen):
+ """ Generate report for vnf & traffic generator kpis """
+ if tgen:
+ print("\n%s stats" % tkey)
+ print("----------------------------")
+ for key, value in tgen.items():
+ if key != "collect_stats":
+ print(json.dumps({key: value}, indent=2))
+
+ @classmethod
+ def generate_nfvi_results(cls, nfvi):
+ """ Generate report for vnf & traffic generator kpis """
+ if nfvi:
+ nfvi_kpi = {k: v for k, v in nfvi.items() if k == 'collect_stats'}
+ if nfvi_kpi:
+ print("\nNFVi stats")
+ print("----------------------------")
+ for key, value in nfvi_kpi.items():
+ print(json.dumps({key: value}, indent=2))
+
+ def generate_final_report(self, test_case):
+ """ Function will check if partial test results are available
+ and generates final report in rst format.
+ """
+
+ tc_name = os.path.splitext(test_case)[0]
+ report_caption = '{}\n{} ({})\n{}\n\n'.format(
+ '================================================================',
+ 'Performance report for', tc_name.upper(),
+ '================================================================')
+ print(report_caption)
+ if os.path.isfile("/tmp/vnftest.out"):
+ lines = []
+ with open("/tmp/vnftest.out") as infile:
+ lines = jsonutils.load(infile)
+
+ if lines:
+ lines = \
+ lines['result']["testcases"][tc_name]["tc_data"]
+ tc_res = lines.pop(len(lines) - 1)
+ for key, value in tc_res["data"].items():
+ self.generate_kpi_results(key, value)
+ self.generate_nfvi_results(value)
+
+ @classmethod
+ def handle_list_options(cls, args, test_path):
+ """ Process --list cli arguments if needed
+
+ :param args: A dictionary with all CLI arguments
+ """
+ if args['list_vnfs']:
+ vnfs = os.listdir(test_path)
+ print("VNF :")
+ print("================")
+ for index, vnf in enumerate(vnfs, 1):
+ print((' %-2s %s' % ('%s:' % str(index), vnf)))
+ raise SystemExit(0)
+
+ if args['list']:
+ vnfs = os.listdir(test_path)
+
+ print("Available Tests:")
+ print("*****************")
+ for vnf in vnfs:
+ testcases = os.listdir(test_path + vnf)
+ print(("VNF :(%s)" % vnf))
+ print("================")
+ test_cases = [tc for tc in testcases if "tc_" in tc and "template" not in tc]
+
+ print("\tBareMetal Testcase:")
+ print("\t===================")
+ for testcase in [tc for tc in test_cases if "baremetal" in tc]:
+ print("\t%s" % testcase)
+
+ print(os.linesep)
+ print("\tStandalone Virtualization Testcase:")
+ print("\t===================================")
+ for testcase in [tc for tc in test_cases if "ovs" in tc or "sriov" in tc]:
+ print("\t%s" % testcase)
+
+ print(os.linesep)
+ print("\tOpenstack Testcase:")
+ print("\t===================")
+ for testcase in [tc for tc in test_cases if "heat" in tc]:
+ print("\t%s" % testcase)
+ print(os.linesep)
+ raise SystemExit(0)
+
+ @classmethod
+ def terminate_if_less_options(cls, args):
+ """ terminate cli if cmdline options is invalid """
+ if not (args["vnf"] and args["test"]):
+ print("CLI needs option, make sure to pass vnf, test")
+ print("eg: NSBperf.py --vnf <vnf untertest> --test <test yaml>")
+ raise SystemExit(1)
+
+ def run_test(self, args, test_path):
+ """ run requested test """
+ try:
+ vnf = args.get("vnf", "")
+ test = args.get("test", "")
+
+ vnf_dir = test_path + os.sep + vnf
+ if not os.path.exists(vnf_dir):
+ raise ValueError("'%s', vnf not supported." % vnf)
+
+ testcases = [tc for tc in os.listdir(vnf_dir) if "tc" in tc]
+ subtest = set([test]).issubset(testcases)
+ if not subtest:
+ raise ValueError("'%s', testcase not supported." % test)
+
+ os.chdir(vnf_dir)
+ # fixme: Use REST APIs to initiate testcases
+ subprocess.check_output(["vnftest", "--debug",
+ "task", "start", test])
+ self.generate_final_report(test)
+ except (IOError, ValueError):
+ print("Value/I/O error...")
+ except BaseException:
+ print("Test failed. Please verify test inputs & re-run the test..")
+ print("eg: NSBperf.py --vnf <vnf untertest> --test <test yaml>")
+
+ def main(self):
+ """Main function.
+ """
+ test_path = os.path.join(REPO_PATH, "../samples/vnf_samples/nsut/")
+ os.chdir(os.path.join(REPO_PATH, "../"))
+ args = self.parse_arguments()
+
+ # if required, handle list-* operations
+ self.handle_list_options(args, test_path)
+
+ # check for input params
+ self.terminate_if_less_options(args)
+
+ # run test
+ self.run_test(args, test_path)
+
+if __name__ == "__main__":
+ signal.signal(signal.SIGINT, sigint_handler)
+ NS_CLI = VnftestNSCli()
+ NS_CLI.main()
diff --git a/vnftest/cmd/__init__.py b/vnftest/cmd/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/cmd/__init__.py
diff --git a/vnftest/cmd/cli.py b/vnftest/cmd/cli.py
new file mode 100644
index 0000000..9bffe56
--- /dev/null
+++ b/vnftest/cmd/cli.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/cli.py
+"""
+Command-line interface to vnftest
+"""
+
+from __future__ import absolute_import
+import logging
+import os
+import sys
+
+from pkg_resources import get_distribution
+from argparse import RawDescriptionHelpFormatter
+from oslo_config import cfg
+
+from vnftest import _init_logging, _LOG_STREAM_HDLR
+from vnftest.cmd.commands import task
+from vnftest.cmd.commands import runner
+from vnftest.cmd.commands import step
+from vnftest.cmd.commands import testcase
+from vnftest.cmd.commands import plugin
+from vnftest.cmd.commands import env
+from vnftest.cmd.commands import report
+
+CONF = cfg.CONF
+cli_opts = [
+ cfg.BoolOpt('debug',
+ short='d',
+ default=False,
+ help='increase output verbosity to debug')
+]
+CONF.register_cli_opts(cli_opts)
+
+CONFIG_SEARCH_PATHS = [sys.prefix + "/etc/vnftest",
+ "~/.vnftest",
+ "/etc/vnftest"]
+
+
+def find_config_files(path_list):
+ for path in path_list:
+ abspath = os.path.abspath(os.path.expanduser(path))
+ confname = abspath + "/vnftest.conf"
+ if os.path.isfile(confname):
+ return [confname]
+
+ return None
+
+
+class VnftestCLI(): # pragma: no cover
+ """Command-line interface to vnftest"""
+
+ # Command categories
+ categories = {
+ 'task': task.TaskCommands,
+ 'runner': runner.RunnerCommands,
+ 'step': step.StepCommands,
+ 'testcase': testcase.TestcaseCommands,
+ 'plugin': plugin.PluginCommands,
+ 'env': env.EnvCommand,
+ 'report': report.ReportCommands
+ }
+
+ def __init__(self):
+ self.opts = []
+ self._version = 'vnftest version %s ' % \
+ get_distribution('vnftest').version
+
+ def _find_actions(self, subparsers, actions_module):
+ """find action methods"""
+ # Find action methods inside actions_module and
+ # add them to the command parser.
+ # The 'actions_module' argument may be a class
+ # or module. Action methods start with 'do_'
+ for attr in (a for a in dir(actions_module) if a.startswith('do_')):
+ command = attr[3:].replace('_', '-')
+ callback = getattr(actions_module, attr)
+ desc = callback.__doc__ or ''
+ arguments = getattr(callback, 'arguments', [])
+ subparser = subparsers.add_parser(
+ command,
+ description=desc
+ )
+ for (args, kwargs) in arguments:
+ subparser.add_argument(*args, **kwargs)
+ subparser.set_defaults(func=callback)
+
+ def _add_command_parsers(self, categories, subparsers):
+ """add commands to command-line parser"""
+ for category in categories:
+ command_object = categories[category]()
+ desc = command_object.__doc__ or ''
+ subparser = subparsers.add_parser(
+ category, description=desc,
+ formatter_class=RawDescriptionHelpFormatter
+ )
+ subparser.set_defaults(command_object=command_object)
+ cmd_subparsers = subparser.add_subparsers(title='subcommands')
+ self._find_actions(cmd_subparsers, command_object)
+
+ def _register_cli_opt(self):
+
+ # register subcommands to parse additional command line arguments
+ def parser(subparsers):
+ self._add_command_parsers(VnftestCLI.categories, subparsers)
+
+ category_opt = cfg.SubCommandOpt("category",
+ title="Command categories",
+ help="Available categories",
+ handler=parser)
+ self._register_opt(category_opt)
+
+ def _register_opt(self, opt):
+
+ CONF.register_cli_opt(opt)
+ self.opts.append(opt)
+
+ def _load_cli_config(self, argv):
+
+ # load CLI args and config files
+ CONF(argv, project="vnftest", version=self._version,
+ default_config_files=find_config_files(CONFIG_SEARCH_PATHS))
+
+ def _handle_global_opts(self):
+
+ _init_logging()
+ if CONF.debug:
+ _LOG_STREAM_HDLR.setLevel(logging.DEBUG)
+
+ def _dispatch_func_notask(self):
+
+ # dispatch to category parser
+ func = CONF.category.func
+ func(CONF.category)
+
+ def _dispatch_func_task(self, task_id):
+
+ # dispatch to category parser
+ func = CONF.category.func
+ func(CONF.category, task_id=task_id)
+
+ def _clear_config_opts(self):
+
+ CONF.clear()
+ CONF.unregister_opts(self.opts)
+
+ def main(self, argv): # pragma: no cover
+ """run the command line interface"""
+ try:
+ self._register_cli_opt()
+
+ self._load_cli_config(argv)
+
+ self._handle_global_opts()
+
+ self._dispatch_func_notask()
+ finally:
+ self._clear_config_opts()
+
+ def api(self, argv, task_id): # pragma: no cover
+ """run the api interface"""
+ try:
+ self._register_cli_opt()
+
+ self._load_cli_config(argv)
+
+ self._handle_global_opts()
+
+ self._dispatch_func_task(task_id)
+ finally:
+ self._clear_config_opts()
diff --git a/vnftest/cmd/commands/__init__.py b/vnftest/cmd/commands/__init__.py
new file mode 100644
index 0000000..8c46b47
--- /dev/null
+++ b/vnftest/cmd/commands/__init__.py
@@ -0,0 +1,17 @@
+from __future__ import absolute_import
+from vnftest.onap.core import Param
+
+
+def change_osloobj_to_paras(args):
+ param = Param({})
+ for k in vars(param):
+ if hasattr(args, k):
+ setattr(param, k, getattr(args, k))
+ return param
+
+
+class Commands(object):
+
+ def _change_to_dict(self, args):
+ p = Param({})
+ return {k: getattr(args, k) for k in vars(p) if hasattr(args, k)}
diff --git a/vnftest/cmd/commands/env.py b/vnftest/cmd/commands/env.py
new file mode 100644
index 0000000..55f0ebb
--- /dev/null
+++ b/vnftest/cmd/commands/env.py
@@ -0,0 +1,95 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/env.py
+from __future__ import absolute_import
+from __future__ import print_function
+
+import os
+import sys
+import time
+
+from six.moves import range
+
+from vnftest.common import constants as consts
+from vnftest.common.httpClient import HttpClient
+
+
+class EnvCommand(object):
+ """
+
+ Set of commands to prepare environment
+ """
+
+ def do_influxdb(self, args):
+ data = {'action': 'create_influxdb'}
+ task_id = self._start_async_task(data)
+
+ start = '* creating influxDB'
+ self._check_status(task_id, start)
+
+ def do_grafana(self, args):
+ data = {'action': 'create_grafana'}
+ task_id = self._start_async_task(data)
+
+ start = '* creating grafana'
+ self._check_status(task_id, start)
+
+ def do_prepare(self, args):
+ data = {'action': 'prepare_env'}
+ task_id = self._start_async_task(data)
+
+ start = '* preparing vnftest environment'
+ self._check_status(task_id, start)
+
+ def _start_async_task(self, data):
+ url = consts.ENV_ACTION_API
+ return HttpClient().post(url, data)['result']['task_id']
+
+ def _check_status(self, task_id, start):
+ self._print_status(start, '[]\r')
+ url = '{}?task_id={}'.format(consts.ASYNC_TASK_API, task_id)
+
+ CHECK_STATUS_RETRY = 20
+ CHECK_STATUS_DELAY = 5
+
+ for retry in range(CHECK_STATUS_RETRY):
+ response = HttpClient().get(url)
+ status = response['status']
+
+ if status:
+ break
+
+ # wait until the async task finished
+ time.sleep(CHECK_STATUS_DELAY * (retry + 1))
+
+ switcher = {
+ 0: 'Timeout',
+ 1: 'Finished',
+ 2: 'Error'
+ }
+ self._print_status(start, '[{}]'.format(switcher[status]))
+ if status == 2:
+ print(response['result'])
+ sys.stdout.flush()
+ return status
+
+ def _print_status(self, s, e):
+ try:
+ columns = int(os.popen('stty size', 'r').read().split()[1])
+ word = '{}{}{}'.format(s, ' ' * (columns - len(s) - len(e)), e)
+ sys.stdout.write(word)
+ sys.stdout.flush()
+ except IndexError:
+ pass
diff --git a/vnftest/cmd/commands/plugin.py b/vnftest/cmd/commands/plugin.py
new file mode 100644
index 0000000..e05130a
--- /dev/null
+++ b/vnftest/cmd/commands/plugin.py
@@ -0,0 +1,44 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/plugin.py
+""" Handler for vnftest command 'plugin' """
+
+from __future__ import print_function
+
+from __future__ import absolute_import
+from vnftest.onap.core.plugin import Plugin
+from vnftest.common.utils import cliargs
+from vnftest.cmd.commands import change_osloobj_to_paras
+
+
+class PluginCommands(object): # pragma: no cover
+ """Plugin commands.
+
+ Set of commands to manage plugins.
+ """
+
+ @cliargs("input_file", type=str, help="path to plugin configuration file",
+ nargs=1)
+ def do_install(self, args):
+ """Install a plugin."""
+ param = change_osloobj_to_paras(args)
+ Plugin().install(param)
+
+ @cliargs("input_file", type=str, help="path to plugin configuration file",
+ nargs=1)
+ def do_remove(self, args):
+ """Remove a plugin."""
+ param = change_osloobj_to_paras(args)
+ Plugin().remove(param)
diff --git a/vnftest/cmd/commands/report.py b/vnftest/cmd/commands/report.py
new file mode 100644
index 0000000..05b9249
--- /dev/null
+++ b/vnftest/cmd/commands/report.py
@@ -0,0 +1,34 @@
+##############################################################################
+# Copyright (c) 2017 Rajesh Kudaka.
+#
+# Author: Rajesh Kudaka (4k.rajesh@gmail.com)
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/report.py
+""" Handler for vnftest command 'report' """
+
+from __future__ import print_function
+
+from __future__ import absolute_import
+
+from vnftest.onap.core.report import Report
+from vnftest.cmd.commands import change_osloobj_to_paras
+from vnftest.common.utils import cliargs
+
+
+class ReportCommands(object): # pragma: no cover
+ """Report commands.
+
+ Set of commands to manage benchmark tasks.
+ """
+
+ @cliargs("task_id", type=str, help=" task id", nargs=1)
+ @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
+ def do_generate(self, args):
+ """Start a benchmark step."""
+ param = change_osloobj_to_paras(args)
+ Report().generate(param)
diff --git a/vnftest/cmd/commands/runner.py b/vnftest/cmd/commands/runner.py
new file mode 100644
index 0000000..557f58f
--- /dev/null
+++ b/vnftest/cmd/commands/runner.py
@@ -0,0 +1,41 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/runner.py
+""" Handler for vnftest command 'runner' """
+
+from __future__ import print_function
+
+from __future__ import absolute_import
+from vnftest.onap.core.runner import Runners
+from vnftest.common.utils import cliargs
+from vnftest.cmd.commands import change_osloobj_to_paras
+
+
+class RunnerCommands(object): # pragma: no cover
+ """Runner commands.
+
+ Set of commands to discover and display runner types.
+ """
+
+ def do_list(self, args):
+ """List existing runner types"""
+ param = change_osloobj_to_paras(args)
+ Runners().list_all(param)
+
+ @cliargs("type", type=str, help="runner type", nargs=1)
+ def do_show(self, args):
+ """Show details of a specific runner type"""
+ param = change_osloobj_to_paras(args)
+ Runners().show(param)
diff --git a/vnftest/cmd/commands/step.py b/vnftest/cmd/commands/step.py
new file mode 100644
index 0000000..10ae913
--- /dev/null
+++ b/vnftest/cmd/commands/step.py
@@ -0,0 +1,40 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/step.py
+""" Handler for vnftest command 'step' """
+
+from __future__ import print_function
+from __future__ import absolute_import
+from vnftest.onap.core.step import Steps
+from vnftest.common.utils import cliargs
+from vnftest.cmd.commands import change_osloobj_to_paras
+
+
+class StepCommands(object): # pragma: no cover
+ """Step commands.
+
+ Set of commands to discover and display step types.
+ """
+
+ def do_list(self, args):
+ """List existing step types"""
+ param = change_osloobj_to_paras(args)
+ Steps().list_all(param)
+
+ @cliargs("type", type=str, help="runner type", nargs=1)
+ def do_show(self, args):
+ """Show details of a specific step type"""
+ param = change_osloobj_to_paras(args)
+ Steps().show(param)
diff --git a/vnftest/cmd/commands/task.py b/vnftest/cmd/commands/task.py
new file mode 100644
index 0000000..cc77bb8
--- /dev/null
+++ b/vnftest/cmd/commands/task.py
@@ -0,0 +1,70 @@
+#############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/task.py
+""" Handler for vnftest command 'task' """
+from __future__ import print_function
+from __future__ import absolute_import
+
+import logging
+
+from vnftest.onap.core.task import Task
+from vnftest.common.utils import cliargs
+from vnftest.common.utils import write_json_to_file
+from vnftest.cmd.commands import change_osloobj_to_paras
+
+output_file_default = "/tmp/vnftest.out"
+
+
+LOG = logging.getLogger(__name__)
+
+
+class TaskCommands(object): # pragma: no cover
+ """Task commands.
+
+ Set of commands to manage benchmark tasks.
+ """
+
+ @cliargs("inputfile", type=str, help="path to task or suite file", nargs=1)
+ @cliargs("--task-args", dest="task_args",
+ help="Input task args (dict in json). These args are used"
+ "to render input task that is jinja2 template.")
+ @cliargs("--task-args-file", dest="task_args_file",
+ help="Path to the file with input task args (dict in "
+ "json/yaml). These args are used to render input"
+ "task that is jinja2 template.")
+ @cliargs("--keep-deploy", help="keep context deployed in cloud",
+ action="store_true")
+ @cliargs("--parse-only", help="parse the config file and exit",
+ action="store_true")
+ @cliargs("--output-file", help="file where output is stored, default %s" %
+ output_file_default, default=output_file_default)
+ @cliargs("--suite", help="process test suite file instead of a task file",
+ action="store_true")
+ def do_start(self, args, **kwargs):
+ param = change_osloobj_to_paras(args)
+ self.output_file = param.output_file
+
+ result = {}
+ LOG.info('Task START')
+ try:
+ result = Task().start(param, **kwargs)
+ except Exception as e:
+ self._write_error_data(e)
+ LOG.exception("")
+
+ if result.get('result', {}).get('criteria') == 'PASS':
+ LOG.info('Task SUCCESS')
+ else:
+ LOG.info('Task FAILED')
+ raise RuntimeError('Task Failed')
+
+ def _write_error_data(self, error):
+ data = {'status': 2, 'result': str(error)}
+ write_json_to_file(self.output_file, data)
diff --git a/vnftest/cmd/commands/testcase.py b/vnftest/cmd/commands/testcase.py
new file mode 100644
index 0000000..518df2d
--- /dev/null
+++ b/vnftest/cmd/commands/testcase.py
@@ -0,0 +1,49 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/cmd/commands/testcase.py
+""" Handler for vnftest command 'testcase' """
+from __future__ import absolute_import
+
+import prettytable
+
+from vnftest.onap.core.testcase import Testcase
+from vnftest.common.utils import cliargs
+from vnftest.cmd.commands import change_osloobj_to_paras
+from vnftest.cmd.commands import Commands
+
+
+class TestcaseCommands(Commands):
+ """Testcase commands.
+
+ Set of commands to discover and display test cases.
+ """
+
+ def do_list(self, *args):
+ testcase_list = ""
+ self._format_print(testcase_list)
+
+ @cliargs("casename", type=str, help="test case name", nargs=1)
+ def do_show(self, args):
+ """Show details of a specific test case"""
+ param = change_osloobj_to_paras(args)
+ Testcase().show(param)
+
+ def _format_print(self, testcase_list):
+ """format output"""
+ case_table = prettytable.PrettyTable(['Testcase Name', 'Description'])
+ case_table.align = 'l'
+ for testcase_record in testcase_list:
+ case_table.add_row([testcase_record['Name'], testcase_record['Description']])
+ print(case_table)
diff --git a/vnftest/common/__init__.py b/vnftest/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/common/__init__.py
diff --git a/vnftest/common/constants.py b/vnftest/common/constants.py
new file mode 100644
index 0000000..9da64ba
--- /dev/null
+++ b/vnftest/common/constants.py
@@ -0,0 +1,147 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/constants.py
+from __future__ import absolute_import
+import os
+import errno
+
+from functools import reduce
+
+import pkg_resources
+
+# this module must only import other modules that do
+# not require loggers to be created, so this cannot
+# include vnftest.common.utils
+from vnftest.common.yaml_loader import yaml_load
+
+dirname = os.path.dirname
+abspath = os.path.abspath
+join = os.path.join
+sep = os.path.sep
+
+CONF = {}
+CONF_FILE = None
+VNFTEST_ROOT_PATH = dirname(
+ dirname(abspath(pkg_resources.resource_filename(__name__, "")))) + sep
+
+
+def get_param(key, default=''):
+ # we have to defer this to runtime so that we can mock os.environ.get in unittests
+ default_path = os.path.join(VNFTEST_ROOT_PATH, "etc/vnftest/vnftest.yaml")
+ conf_file = os.environ.get('CONF_FILE', default_path)
+ # don't re-parse yaml for each lookup
+ if not CONF:
+ # do not use vnftest.common.utils.parse_yaml
+ # since vnftest.common.utils creates a logger
+ # and so it cannot be imported before this code
+ try:
+ with open(conf_file) as f:
+ value = yaml_load(f)
+ except IOError:
+ pass
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ else:
+ CONF.update(value)
+ try:
+ return reduce(lambda a, b: a[b], key.split('.'), CONF)
+ except KeyError:
+ if not default:
+ raise
+ return default
+
+
+try:
+ SERVER_IP = get_param('api.server_ip')
+except KeyError:
+ try:
+ from pyroute2 import IPDB
+ except ImportError:
+ SERVER_IP = '172.17.0.1'
+ else:
+ with IPDB() as ip:
+ try:
+ SERVER_IP = ip.routes['default'].gateway
+ except KeyError:
+ # during unittests ip.routes['default'] can be invalid
+ SERVER_IP = '127.0.0.1'
+
+if not SERVER_IP:
+ SERVER_IP = '127.0.0.1'
+
+
+# dir
+CONF_DIR = get_param('dir.conf', join(VNFTEST_ROOT_PATH, 'etc/vnftest'))
+CONF_FILE = join(CONF_DIR, 'vnftest.conf')
+REPOS_DIR = get_param('dir.repos', join(VNFTEST_ROOT_PATH, 'home/onap/repos/vnftest'))
+LOG_DIR = get_param('dir.log', join(VNFTEST_ROOT_PATH, 'tmp/vnftest/'))
+
+TASK_LOG_DIR = get_param('dir.tasklog', join(VNFTEST_ROOT_PATH, 'var/log/vnftest/'))
+CONF_SAMPLE_DIR = join(REPOS_DIR, 'etc/vnftest/')
+SAMPLE_CASE_DIR = join(REPOS_DIR, 'samples')
+TESTCASE_DIR = join(VNFTEST_ROOT_PATH, 'tests/onap/test_cases/')
+TESTSUITE_DIR = join(VNFTEST_ROOT_PATH, 'tests/onap/test_suites/')
+
+# file
+DEFAULT_OUTPUT_FILE = get_param('file.output_file', join(VNFTEST_ROOT_PATH, 'tmp/vnftest.out'))
+DEFAULT_HTML_FILE = get_param('file.html_file', join(VNFTEST_ROOT_PATH, 'tmp/vnftest.htm'))
+REPORTING_FILE = get_param('file.reporting_file', join(VNFTEST_ROOT_PATH, 'tmp/report.html'))
+
+# components
+AAI_IP = get_param('component.aai_ip')
+AAI_PORT = get_param('component.aai_port')
+AAI_SSL_PORT = get_param('component.aai_ssl_port')
+MSO_IP = get_param('component.mso_ip')
+SDC_IP = get_param('component.sdc_ip')
+SDC_PORT = get_param('component.sdc_port')
+SDC_CATALOG_PORT = get_param('component.sdc_catalog_port')
+SDC_DESIGNER_USER = get_param('component.sdc_designer_user')
+SDC_TESTER_USER = get_param('component.sdc_tester_user')
+SDC_GOVERNANCE_USER = get_param('component.sdc_governance_user')
+SDC_OPERATIONS_USER = get_param('component.sdc_operations_user')
+
+component_constants = {}
+component_constants['aai_ip'] = AAI_IP
+component_constants['aai_port'] = AAI_PORT
+component_constants['aai_ssl_port'] = AAI_SSL_PORT
+component_constants['mso_ip'] = MSO_IP
+component_constants['sdc_ip'] = SDC_IP
+component_constants['sdc_port'] = SDC_PORT
+component_constants['sdc_catalog_port'] = SDC_CATALOG_PORT
+component_constants['sdc_designer_user'] = SDC_DESIGNER_USER
+component_constants['sdc_tester_user'] = SDC_TESTER_USER
+component_constants['sdc_governance_user'] = SDC_GOVERNANCE_USER
+component_constants['sdc_operations_user'] = SDC_OPERATIONS_USER
+
+
+# api
+API_PORT = 5000
+DOCKER_URL = 'unix://var/run/docker.sock'
+SQLITE = 'sqlite:////tmp/vnftest.db'
+
+API_SUCCESS = 1
+API_ERROR = 2
+TASK_NOT_DONE = 0
+TASK_DONE = 1
+TASK_FAILED = 2
+
+BASE_URL = 'http://localhost:5000'
+ENV_ACTION_API = BASE_URL + '/vnftest/env/action'
+ASYNC_TASK_API = BASE_URL + '/vnftest/asynctask'
+
+# general
+TESTCASE_PRE = 'onap_vnftest_'
+TESTSUITE_PRE = 'onap_'
diff --git a/vnftest/common/exceptions.py b/vnftest/common/exceptions.py
new file mode 100644
index 0000000..6273cd3
--- /dev/null
+++ b/vnftest/common/exceptions.py
@@ -0,0 +1,61 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/exceptions.py
+
+from oslo_utils import excutils
+
+
+class ProcessExecutionError(RuntimeError):
+ def __init__(self, message, returncode):
+ super(ProcessExecutionError, self).__init__(message)
+ self.returncode = returncode
+
+
+class VnftestException(Exception):
+ """Base Vnftest Exception.
+
+ To correctly use this class, inherit from it and define
+ a 'message' property. That message will get printf'd
+ with the keyword arguments provided to the constructor.
+
+ Based on NeutronException class.
+ """
+ message = "An unknown exception occurred."
+
+ def __init__(self, **kwargs):
+ try:
+ super(VnftestException, self).__init__(self.message % kwargs)
+ self.msg = self.message % kwargs
+ except Exception: # pylint: disable=broad-except
+ with excutils.save_and_reraise_exception() as ctxt:
+ if not self.use_fatal_exceptions():
+ ctxt.reraise = False
+ # at least get the core message out if something happened
+ super(VnftestException, self).__init__(self.message)
+
+ def __str__(self):
+ return self.msg
+
+ def use_fatal_exceptions(self):
+ """Is the instance using fatal exceptions.
+
+ :returns: Always returns False.
+ """
+ return False
+
+
+class FunctionNotImplemented(VnftestException):
+ message = ('The function "%(function_name)s" is not implemented in '
+ '"%(class_name)" class.')
diff --git a/vnftest/common/html_template.py b/vnftest/common/html_template.py
new file mode 100644
index 0000000..572d47f
--- /dev/null
+++ b/vnftest/common/html_template.py
@@ -0,0 +1,195 @@
+#############################################################################
+# Copyright (c) 2017 Rajesh Kudaka
+#
+# Author: Rajesh Kudaka 4k.rajesh@gmail.com
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/html_template.py
+
+template = """
+<html>
+<body>
+<head>
+<meta charset="utf-8">
+<meta name="viewport" content="width=device-width, initial-scale=1">
+<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7\
+/css/bootstrap.min.css">
+<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1\
+/jquery.min.js"></script>
+<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7\
+/js/bootstrap.min.js"></script>
+<script src="https://code.highcharts.com/highcharts.js"></script>
+<script src="jquery.min.js"></script>
+<script src="highcharts.js"></script>
+</head>
+<style>
+
+table{
+ overflow-y: scroll;
+ height: 360px;
+ display: block;
+ }
+
+ header,h3{
+ font-family:Frutiger;
+ clear: left;
+ text-align: center;
+}
+</style>
+<header class="jumbotron text-center">
+ <h1>Vnftest User Interface</h1>
+ <h4>Report of {{task_id}} Generated</h4>
+</header>
+
+<div class="container">
+ <div class="row">
+ <div class="col-md-4">
+ <div class="table-responsive" >
+ <table class="table table-hover" > </table>
+ </div>
+ </div>
+ <div class="col-md-8" >
+ <div id="container" ></div>
+ </div>
+ </div>
+</div>
+<script>
+ var arr, tab, th, tr, td, tn, row, col, thead, tbody;
+ arr={{table|safe}}
+ tab = document.getElementsByTagName('table')[0];
+ thead=document.createElement('thead');
+ tr = document.createElement('tr');
+ for(row=0;row<Object.keys(arr).length;row++)
+ {
+ th = document.createElement('th');
+ tn = document.createTextNode(Object.keys(arr).sort()[row]);
+ th.appendChild(tn);
+ tr.appendChild(th);
+ thead.appendChild(tr);
+ }
+ tab.appendChild(thead);
+ tbody=document.createElement('tbody');
+
+ for (col = 0; col < arr[Object.keys(arr)[0]].length; col++){
+ tr = document.createElement('tr');
+ for(row=0;row<Object.keys(arr).length;row++)
+ {
+ td = document.createElement('td');
+ tn = document.createTextNode(arr[Object.keys(arr).sort()[row]][col]);
+ td.appendChild(tn);
+ tr.appendChild(td);
+ }
+ tbody.appendChild(tr);
+ }
+tab.appendChild(tbody);
+
+</script>
+
+<script language="JavaScript">
+
+$(function() {
+ $('#container').highcharts({
+ title: {
+ text: 'Vnftest test results',
+ x: -20 //center
+ },
+ subtitle: {
+ text: 'Report of {{task_id}} Task Generated',
+ x: -20
+ },
+ xAxis: {
+ title: {
+ text: 'Timestamp'
+ },
+ categories:{{Timestamp|safe}}
+ },
+ yAxis: {
+
+ plotLines: [{
+ value: 0,
+ width: 1,
+ color: '#808080'
+ }]
+ },
+ tooltip: {
+ valueSuffix: ''
+ },
+ legend: {
+ layout: 'vertical',
+ align: 'right',
+ verticalAlign: 'middle',
+ borderWidth: 0
+ },
+ series: {{series|safe}}
+ });
+});
+
+</script>
+
+
+</body>
+</html>"""
+
+report_template = """
+<html>
+ <head>
+ <title>Vnftest Report</title>
+ <link href="http://cdn.static.runoob.com/libs/bootstrap/3.3.7/css\
+/bootstrap.min.css" rel="stylesheet">
+ </head>
+ <div class="content">
+ <h3>Vnftest Report </h3>
+ <hr/>
+ <div>
+
+ <div>Task ID : {{result.task_id}} </div>
+ <div style="margin-top:5px;">Criteria :
+ <font> {{result.criteria}}</font>
+ </div>
+ <hr/>
+
+ <caption>Information</caption>
+ <table class="table table-striped">
+ <tr>
+ <th>#</th>
+ <th>key</th>
+ <th>value</th>
+ </tr>
+ <tbody>
+ {% for key, value in result.info.items() %}
+ <tr>
+ <td>{{ loop.index }}</td>
+ <td>{{key}}</td>
+ <td>{{value}}</td>
+ </tr>
+ {% endfor %}
+ </tbody>
+ </table>
+ <hr/>
+
+ <caption>Test Cases</caption>
+ <table class="table table-striped">
+ <tr>
+ <th>#</th>
+ <th>key</th>
+ <th>value</th>
+ </tr>
+ <tbody>
+ {% for key, value in result.testcases.items() %}
+ <tr>
+ <td>{{ loop.index }}</td>
+ <td>{{key}}</td>
+ <td>{{value.criteria}}</td>
+ </tr>
+ {% endfor %}
+ </tbody>
+ </table>
+
+ </div>
+ </div>
+</html>
+"""
diff --git a/vnftest/common/httpClient.py b/vnftest/common/httpClient.py
new file mode 100644
index 0000000..e2c7937
--- /dev/null
+++ b/vnftest/common/httpClient.py
@@ -0,0 +1,48 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/httpClient.py
+
+from __future__ import absolute_import
+
+import logging
+import time
+
+from oslo_serialization import jsonutils
+import requests
+
+logger = logging.getLogger(__name__)
+
+
+class HttpClient(object):
+
+ def post(self, url, data, timeout=0):
+ data = jsonutils.dump_as_bytes(data)
+ headers = {'Content-Type': 'application/json'}
+ t_end = time.time() + timeout
+ while True:
+ try:
+ response = requests.post(url, data=data, headers=headers)
+ result = response.json()
+ logger.debug('The result is: %s', result)
+ return result
+ except Exception:
+ if time.time() > t_end:
+ logger.exception('')
+ raise
+ time.sleep(1)
+
+ def get(self, url):
+ response = requests.get(url)
+ return response.json()
diff --git a/vnftest/common/openstack_utils.py b/vnftest/common/openstack_utils.py
new file mode 100644
index 0000000..954df2e
--- /dev/null
+++ b/vnftest/common/openstack_utils.py
@@ -0,0 +1,765 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/openstack_utils.py
+
+from __future__ import absolute_import
+
+import os
+import time
+import sys
+import logging
+
+from keystoneauth1 import loading
+from keystoneauth1 import session
+from cinderclient import client as cinderclient
+from novaclient import client as novaclient
+from glanceclient import client as glanceclient
+from neutronclient.neutron import client as neutronclient
+
+log = logging.getLogger(__name__)
+
+DEFAULT_HEAT_API_VERSION = '1'
+DEFAULT_API_VERSION = '2'
+
+
+# *********************************************
+# CREDENTIALS
+# *********************************************
+def get_credentials():
+ """Returns a creds dictionary filled with parsed from env"""
+ creds = {}
+
+ keystone_api_version = os.getenv('OS_IDENTITY_API_VERSION')
+
+ if keystone_api_version is None or keystone_api_version == '2':
+ keystone_v3 = False
+ tenant_env = 'OS_TENANT_NAME'
+ tenant = 'tenant_name'
+ else:
+ keystone_v3 = True
+ tenant_env = 'OS_PROJECT_NAME'
+ tenant = 'project_name'
+
+ # The most common way to pass these info to the script is to do it
+ # through environment variables.
+ creds.update({
+ "username": os.environ.get("OS_USERNAME"),
+ "password": os.environ.get("OS_PASSWORD"),
+ "auth_url": os.environ.get("OS_AUTH_URL"),
+ tenant: os.environ.get(tenant_env)
+ })
+
+ if keystone_v3:
+ if os.getenv('OS_USER_DOMAIN_NAME') is not None:
+ creds.update({
+ "user_domain_name": os.getenv('OS_USER_DOMAIN_NAME')
+ })
+ if os.getenv('OS_PROJECT_DOMAIN_NAME') is not None:
+ creds.update({
+ "project_domain_name": os.getenv('OS_PROJECT_DOMAIN_NAME')
+ })
+
+ return creds
+
+
+def get_session_auth():
+ loader = loading.get_plugin_loader('password')
+ creds = get_credentials()
+ auth = loader.load_from_options(**creds)
+ return auth
+
+
+def get_session():
+ auth = get_session_auth()
+ try:
+ cacert = os.environ['OS_CACERT']
+ except KeyError:
+ return session.Session(auth=auth)
+ else:
+ insecure = os.getenv('OS_INSECURE', '').lower() == 'true'
+ cacert = False if insecure else cacert
+ return session.Session(auth=auth, verify=cacert)
+
+
+def get_endpoint(service_type, endpoint_type='publicURL'):
+ auth = get_session_auth()
+ # for multi-region, we need to specify region
+ # when finding the endpoint
+ return get_session().get_endpoint(auth=auth,
+ service_type=service_type,
+ endpoint_type=endpoint_type,
+ region_name=os.environ.get(
+ "OS_REGION_NAME"))
+
+
+# *********************************************
+# CLIENTS
+# *********************************************
+def get_heat_api_version(): # pragma: no cover
+ try:
+ api_version = os.environ['HEAT_API_VERSION']
+ except KeyError:
+ return DEFAULT_HEAT_API_VERSION
+ else:
+ log.info("HEAT_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+
+
+def get_cinder_client_version(): # pragma: no cover
+ try:
+ api_version = os.environ['OS_VOLUME_API_VERSION']
+ except KeyError:
+ return DEFAULT_API_VERSION
+ else:
+ log.info("OS_VOLUME_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+
+
+def get_cinder_client(): # pragma: no cover
+ sess = get_session()
+ return cinderclient.Client(get_cinder_client_version(), session=sess)
+
+
+def get_nova_client_version(): # pragma: no cover
+ try:
+ api_version = os.environ['OS_COMPUTE_API_VERSION']
+ except KeyError:
+ return DEFAULT_API_VERSION
+ else:
+ log.info("OS_COMPUTE_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+
+
+def get_nova_client(): # pragma: no cover
+ sess = get_session()
+ return novaclient.Client(get_nova_client_version(), session=sess)
+
+
+def get_neutron_client_version(): # pragma: no cover
+ try:
+ api_version = os.environ['OS_NETWORK_API_VERSION']
+ except KeyError:
+ return DEFAULT_API_VERSION
+ else:
+ log.info("OS_NETWORK_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+
+
+def get_neutron_client(): # pragma: no cover
+ sess = get_session()
+ return neutronclient.Client(get_neutron_client_version(), session=sess)
+
+
+def get_glance_client_version(): # pragma: no cover
+ try:
+ api_version = os.environ['OS_IMAGE_API_VERSION']
+ except KeyError:
+ return DEFAULT_API_VERSION
+ else:
+ log.info("OS_IMAGE_API_VERSION is set in env as '%s'", api_version)
+ return api_version
+
+
+def get_glance_client(): # pragma: no cover
+ sess = get_session()
+ return glanceclient.Client(get_glance_client_version(), session=sess)
+
+
+# *********************************************
+# NOVA
+# *********************************************
+def get_instances(nova_client): # pragma: no cover
+ try:
+ return nova_client.servers.list(search_opts={'all_tenants': 1})
+ except Exception:
+ log.exception("Error [get_instances(nova_client)]")
+
+
+def get_instance_status(nova_client, instance): # pragma: no cover
+ try:
+ return nova_client.servers.get(instance.id).status
+ except Exception:
+ log.exception("Error [get_instance_status(nova_client)]")
+
+
+def get_instance_by_name(nova_client, instance_name): # pragma: no cover
+ try:
+ return nova_client.servers.find(name=instance_name)
+ except Exception:
+ log.exception("Error [get_instance_by_name(nova_client, '%s')]",
+ instance_name)
+
+
+def get_aggregates(nova_client): # pragma: no cover
+ try:
+ return nova_client.aggregates.list()
+ except Exception:
+ log.exception("Error [get_aggregates(nova_client)]")
+
+
+def get_availability_zones(nova_client): # pragma: no cover
+ try:
+ return nova_client.availability_zones.list()
+ except Exception:
+ log.exception("Error [get_availability_zones(nova_client)]")
+
+
+def get_availability_zone_names(nova_client): # pragma: no cover
+ try:
+ return [az.zoneName for az in get_availability_zones(nova_client)]
+ except Exception:
+ log.exception("Error [get_availability_zone_names(nova_client)]")
+
+
+def create_aggregate(nova_client, aggregate_name, av_zone): # pragma: no cover
+ try:
+ nova_client.aggregates.create(aggregate_name, av_zone)
+ except Exception:
+ log.exception("Error [create_aggregate(nova_client, %s, %s)]",
+ aggregate_name, av_zone)
+ return False
+ else:
+ return True
+
+
+def get_aggregate_id(nova_client, aggregate_name): # pragma: no cover
+ try:
+ aggregates = get_aggregates(nova_client)
+ _id = next((ag.id for ag in aggregates if ag.name == aggregate_name))
+ except Exception:
+ log.exception("Error [get_aggregate_id(nova_client, %s)]",
+ aggregate_name)
+ else:
+ return _id
+
+
+def add_host_to_aggregate(nova_client, aggregate_name,
+ compute_host): # pragma: no cover
+ try:
+ aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+ nova_client.aggregates.add_host(aggregate_id, compute_host)
+ except Exception:
+ log.exception("Error [add_host_to_aggregate(nova_client, %s, %s)]",
+ aggregate_name, compute_host)
+ return False
+ else:
+ return True
+
+
+def create_aggregate_with_host(nova_client, aggregate_name, av_zone,
+ compute_host): # pragma: no cover
+ try:
+ create_aggregate(nova_client, aggregate_name, av_zone)
+ add_host_to_aggregate(nova_client, aggregate_name, compute_host)
+ except Exception:
+ log.exception("Error [create_aggregate_with_host("
+ "nova_client, %s, %s, %s)]",
+ aggregate_name, av_zone, compute_host)
+ return False
+ else:
+ return True
+
+
+def create_keypair(nova_client, name, key_path=None): # pragma: no cover
+ try:
+ with open(key_path) as fpubkey:
+ keypair = get_nova_client().keypairs.create(name=name, public_key=fpubkey.read())
+ return keypair
+ except Exception:
+ log.exception("Error [create_keypair(nova_client)]")
+
+
+def create_instance(json_body): # pragma: no cover
+ try:
+ return get_nova_client().servers.create(**json_body)
+ except Exception:
+ log.exception("Error create instance failed")
+ return None
+
+
+def create_instance_and_wait_for_active(json_body): # pragma: no cover
+ SLEEP = 3
+ VM_BOOT_TIMEOUT = 180
+ nova_client = get_nova_client()
+ instance = create_instance(json_body)
+ count = VM_BOOT_TIMEOUT / SLEEP
+ for n in range(count, -1, -1):
+ status = get_instance_status(nova_client, instance)
+ if status.lower() == "active":
+ return instance
+ elif status.lower() == "error":
+ log.error("The instance went to ERROR status.")
+ return None
+ time.sleep(SLEEP)
+ log.error("Timeout booting the instance.")
+ return None
+
+
+def attach_server_volume(server_id, volume_id, device=None): # pragma: no cover
+ try:
+ get_nova_client().volumes.create_server_volume(server_id, volume_id, device)
+ except Exception:
+ log.exception("Error [attach_server_volume(nova_client, '%s', '%s')]",
+ server_id, volume_id)
+ return False
+ else:
+ return True
+
+
+def delete_instance(nova_client, instance_id): # pragma: no cover
+ try:
+ nova_client.servers.force_delete(instance_id)
+ except Exception:
+ log.exception("Error [delete_instance(nova_client, '%s')]",
+ instance_id)
+ return False
+ else:
+ return True
+
+
+def remove_host_from_aggregate(nova_client, aggregate_name,
+ compute_host): # pragma: no cover
+ try:
+ aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+ nova_client.aggregates.remove_host(aggregate_id, compute_host)
+ except Exception:
+ log.exception("Error remove_host_from_aggregate(nova_client, %s, %s)",
+ aggregate_name, compute_host)
+ return False
+ else:
+ return True
+
+
+def remove_hosts_from_aggregate(nova_client,
+ aggregate_name): # pragma: no cover
+ aggregate_id = get_aggregate_id(nova_client, aggregate_name)
+ hosts = nova_client.aggregates.get(aggregate_id).hosts
+ assert(
+ all(remove_host_from_aggregate(nova_client, aggregate_name, host)
+ for host in hosts))
+
+
+def delete_aggregate(nova_client, aggregate_name): # pragma: no cover
+ try:
+ remove_hosts_from_aggregate(nova_client, aggregate_name)
+ nova_client.aggregates.delete(aggregate_name)
+ except Exception:
+ log.exception("Error [delete_aggregate(nova_client, %s)]",
+ aggregate_name)
+ return False
+ else:
+ return True
+
+
+def get_server_by_name(name): # pragma: no cover
+ try:
+ return get_nova_client().servers.list(search_opts={'name': name})[0]
+ except IndexError:
+ log.exception('Failed to get nova client')
+ raise
+
+
+def create_flavor(name, ram, vcpus, disk, **kwargs): # pragma: no cover
+ try:
+ return get_nova_client().flavors.create(name, ram, vcpus, disk, **kwargs)
+ except Exception:
+ log.exception("Error [create_flavor(nova_client, %s, %s, %s, %s, %s)]",
+ name, ram, disk, vcpus, kwargs['is_public'])
+ return None
+
+
+def get_image_by_name(name): # pragma: no cover
+ images = get_nova_client().images.list()
+ try:
+ return next((a for a in images if a.name == name))
+ except StopIteration:
+ log.exception('No image matched')
+
+
+def get_flavor_id(nova_client, flavor_name): # pragma: no cover
+ flavors = nova_client.flavors.list(detailed=True)
+ flavor_id = ''
+ for f in flavors:
+ if f.name == flavor_name:
+ flavor_id = f.id
+ break
+ return flavor_id
+
+
+def get_flavor_by_name(name): # pragma: no cover
+ flavors = get_nova_client().flavors.list()
+ try:
+ return next((a for a in flavors if a.name == name))
+ except StopIteration:
+ log.exception('No flavor matched')
+
+
+def check_status(status, name, iterations, interval): # pragma: no cover
+ for i in range(iterations):
+ try:
+ server = get_server_by_name(name)
+ except IndexError:
+ log.error('Cannot found %s server', name)
+ raise
+
+ if server.status == status:
+ return True
+
+ time.sleep(interval)
+ return False
+
+
+def delete_flavor(flavor_id): # pragma: no cover
+ try:
+ get_nova_client().flavors.delete(flavor_id)
+ except Exception:
+ log.exception("Error [delete_flavor(nova_client, %s)]", flavor_id)
+ return False
+ else:
+ return True
+
+
+def delete_keypair(nova_client, key): # pragma: no cover
+ try:
+ nova_client.keypairs.delete(key=key)
+ return True
+ except Exception:
+ log.exception("Error [delete_keypair(nova_client)]")
+ return False
+
+
+# *********************************************
+# NEUTRON
+# *********************************************
+def get_network_id(neutron_client, network_name): # pragma: no cover
+ networks = neutron_client.list_networks()['networks']
+ return next((n['id'] for n in networks if n['name'] == network_name), None)
+
+
+def get_port_id_by_ip(neutron_client, ip_address): # pragma: no cover
+ ports = neutron_client.list_ports()['ports']
+ return next((i['id'] for i in ports for j in i.get(
+ 'fixed_ips') if j['ip_address'] == ip_address), None)
+
+
+def create_neutron_net(neutron_client, json_body): # pragma: no cover
+ try:
+ network = neutron_client.create_network(body=json_body)
+ return network['network']['id']
+ except Exception:
+ log.error("Error [create_neutron_net(neutron_client)]")
+ raise Exception("operation error")
+ return None
+
+
+def delete_neutron_net(neutron_client, network_id): # pragma: no cover
+ try:
+ neutron_client.delete_network(network_id)
+ return True
+ except Exception:
+ log.error("Error [delete_neutron_net(neutron_client, '%s')]" % network_id)
+ return False
+
+
+def create_neutron_subnet(neutron_client, json_body): # pragma: no cover
+ try:
+ subnet = neutron_client.create_subnet(body=json_body)
+ return subnet['subnets'][0]['id']
+ except Exception:
+ log.error("Error [create_neutron_subnet")
+ raise Exception("operation error")
+ return None
+
+
+def create_neutron_router(neutron_client, json_body): # pragma: no cover
+ try:
+ router = neutron_client.create_router(json_body)
+ return router['router']['id']
+ except Exception:
+ log.error("Error [create_neutron_router(neutron_client)]")
+ raise Exception("operation error")
+ return None
+
+
+def delete_neutron_router(neutron_client, router_id): # pragma: no cover
+ try:
+ neutron_client.delete_router(router=router_id)
+ return True
+ except Exception:
+ log.error("Error [delete_neutron_router(neutron_client, '%s')]" % router_id)
+ return False
+
+
+def remove_gateway_router(neutron_client, router_id): # pragma: no cover
+ try:
+ neutron_client.remove_gateway_router(router_id)
+ return True
+ except Exception:
+ log.error("Error [remove_gateway_router(neutron_client, '%s')]" % router_id)
+ return False
+
+
+def remove_interface_router(neutron_client, router_id, subnet_id,
+ **json_body): # pragma: no cover
+ json_body.update({"subnet_id": subnet_id})
+ try:
+ neutron_client.remove_interface_router(router=router_id,
+ body=json_body)
+ return True
+ except Exception:
+ log.error("Error [remove_interface_router(neutron_client, '%s', "
+ "'%s')]" % (router_id, subnet_id))
+ return False
+
+
+def create_floating_ip(neutron_client, extnet_id): # pragma: no cover
+ props = {'floating_network_id': extnet_id}
+ try:
+ ip_json = neutron_client.create_floatingip({'floatingip': props})
+ fip_addr = ip_json['floatingip']['floating_ip_address']
+ fip_id = ip_json['floatingip']['id']
+ except Exception:
+ log.error("Error [create_floating_ip(neutron_client)]")
+ return None
+ return {'fip_addr': fip_addr, 'fip_id': fip_id}
+
+
+def delete_floating_ip(nova_client, floatingip_id): # pragma: no cover
+ try:
+ nova_client.floating_ips.delete(floatingip_id)
+ return True
+ except Exception:
+ log.error("Error [delete_floating_ip(nova_client, '%s')]" % floatingip_id)
+ return False
+
+
+def get_security_groups(neutron_client): # pragma: no cover
+ try:
+ security_groups = neutron_client.list_security_groups()[
+ 'security_groups']
+ return security_groups
+ except Exception:
+ log.error("Error [get_security_groups(neutron_client)]")
+ return None
+
+
+def get_security_group_id(neutron_client, sg_name): # pragma: no cover
+ security_groups = get_security_groups(neutron_client)
+ id = ''
+ for sg in security_groups:
+ if sg['name'] == sg_name:
+ id = sg['id']
+ break
+ return id
+
+
+def create_security_group(neutron_client, sg_name, sg_description): # pragma: no cover
+ json_body = {'security_group': {'name': sg_name,
+ 'description': sg_description}}
+ try:
+ secgroup = neutron_client.create_security_group(json_body)
+ return secgroup['security_group']
+ except Exception:
+ log.error("Error [create_security_group(neutron_client, '%s', "
+ "'%s')]" % (sg_name, sg_description))
+ return None
+
+
+def create_secgroup_rule(neutron_client, sg_id, direction, protocol,
+ port_range_min=None, port_range_max=None,
+ **json_body): # pragma: no cover
+ # We create a security group in 2 steps
+ # 1 - we check the format and set the json body accordingly
+ # 2 - we call neturon client to create the security group
+
+ # Format check
+ json_body.update({'security_group_rule': {'direction': direction,
+ 'security_group_id': sg_id, 'protocol': protocol}})
+ # parameters may be
+ # - both None => we do nothing
+ # - both Not None => we add them to the json description
+ # but one cannot be None is the other is not None
+ if (port_range_min is not None and port_range_max is not None):
+ # add port_range in json description
+ json_body['security_group_rule']['port_range_min'] = port_range_min
+ json_body['security_group_rule']['port_range_max'] = port_range_max
+ log.debug("Security_group format set (port range included)")
+ else:
+ # either both port range are set to None => do nothing
+ # or one is set but not the other => log it and return False
+ if port_range_min is None and port_range_max is None:
+ log.debug("Security_group format set (no port range mentioned)")
+ else:
+ log.error("Bad security group format."
+ "One of the port range is not properly set:"
+ "range min: {},"
+ "range max: {}".format(port_range_min,
+ port_range_max))
+ return False
+
+ # Create security group using neutron client
+ try:
+ neutron_client.create_security_group_rule(json_body)
+ return True
+ except Exception:
+ log.exception("Impossible to create_security_group_rule,"
+ "security group rule probably already exists")
+ return False
+
+
+def create_security_group_full(neutron_client,
+ sg_name, sg_description): # pragma: no cover
+ sg_id = get_security_group_id(neutron_client, sg_name)
+ if sg_id != '':
+ log.info("Using existing security group '%s'..." % sg_name)
+ else:
+ log.info("Creating security group '%s'..." % sg_name)
+ SECGROUP = create_security_group(neutron_client,
+ sg_name,
+ sg_description)
+ if not SECGROUP:
+ log.error("Failed to create the security group...")
+ return None
+
+ sg_id = SECGROUP['id']
+
+ log.debug("Security group '%s' with ID=%s created successfully."
+ % (SECGROUP['name'], sg_id))
+
+ log.debug("Adding ICMP rules in security group '%s'..."
+ % sg_name)
+ if not create_secgroup_rule(neutron_client, sg_id,
+ 'ingress', 'icmp'):
+ log.error("Failed to create the security group rule...")
+ return None
+
+ log.debug("Adding SSH rules in security group '%s'..."
+ % sg_name)
+ if not create_secgroup_rule(
+ neutron_client, sg_id, 'ingress', 'tcp', '22', '22'):
+ log.error("Failed to create the security group rule...")
+ return None
+
+ if not create_secgroup_rule(
+ neutron_client, sg_id, 'egress', 'tcp', '22', '22'):
+ log.error("Failed to create the security group rule...")
+ return None
+ return sg_id
+
+
+# *********************************************
+# GLANCE
+# *********************************************
+def get_image_id(glance_client, image_name): # pragma: no cover
+ images = glance_client.images.list()
+ return next((i.id for i in images if i.name == image_name), None)
+
+
+def create_image(glance_client, image_name, file_path, disk_format,
+ container_format, min_disk, min_ram, protected, tag,
+ public, **kwargs): # pragma: no cover
+ if not os.path.isfile(file_path):
+ log.error("Error: file %s does not exist." % file_path)
+ return None
+ try:
+ image_id = get_image_id(glance_client, image_name)
+ if image_id is not None:
+ log.info("Image %s already exists." % image_name)
+ else:
+ log.info("Creating image '%s' from '%s'...", image_name, file_path)
+
+ image = glance_client.images.create(name=image_name,
+ visibility=public,
+ disk_format=disk_format,
+ container_format=container_format,
+ min_disk=min_disk,
+ min_ram=min_ram,
+ tags=tag,
+ protected=protected,
+ **kwargs)
+ image_id = image.id
+ with open(file_path) as image_data:
+ glance_client.images.upload(image_id, image_data)
+ return image_id
+ except Exception:
+ log.error("Error [create_glance_image(glance_client, '%s', '%s', '%s')]",
+ image_name, file_path, public)
+ return None
+
+
+def delete_image(glance_client, image_id): # pragma: no cover
+ try:
+ glance_client.images.delete(image_id)
+
+ except Exception:
+ log.exception("Error [delete_flavor(glance_client, %s)]", image_id)
+ return False
+ else:
+ return True
+
+
+# *********************************************
+# CINDER
+# *********************************************
+def get_volume_id(volume_name): # pragma: no cover
+ volumes = get_cinder_client().volumes.list()
+ return next((v.id for v in volumes if v.name == volume_name), None)
+
+
+def create_volume(cinder_client, volume_name, volume_size,
+ volume_image=False): # pragma: no cover
+ try:
+ if volume_image:
+ volume = cinder_client.volumes.create(name=volume_name,
+ size=volume_size,
+ imageRef=volume_image)
+ else:
+ volume = cinder_client.volumes.create(name=volume_name,
+ size=volume_size)
+ return volume
+ except Exception:
+ log.exception("Error [create_volume(cinder_client, %s)]",
+ (volume_name, volume_size))
+ return None
+
+
+def delete_volume(cinder_client, volume_id, forced=False): # pragma: no cover
+ try:
+ if forced:
+ try:
+ cinder_client.volumes.detach(volume_id)
+ except:
+ log.error(sys.exc_info()[0])
+ cinder_client.volumes.force_delete(volume_id)
+ else:
+ while True:
+ volume = get_cinder_client().volumes.get(volume_id)
+ if volume.status.lower() == 'available':
+ break
+ cinder_client.volumes.delete(volume_id)
+ return True
+ except Exception:
+ log.exception("Error [delete_volume(cinder_client, '%s')]" % volume_id)
+ return False
+
+
+def detach_volume(server_id, volume_id): # pragma: no cover
+ try:
+ get_nova_client().volumes.delete_server_volume(server_id, volume_id)
+ return True
+ except Exception:
+ log.exception("Error [detach_server_volume(nova_client, '%s', '%s')]",
+ server_id, volume_id)
+ return False
diff --git a/vnftest/common/process.py b/vnftest/common/process.py
new file mode 100644
index 0000000..21a21ac
--- /dev/null
+++ b/vnftest/common/process.py
@@ -0,0 +1,140 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/process.py
+
+import logging
+import multiprocessing
+import signal
+import subprocess
+import time
+
+import os
+from oslo_utils import encodeutils
+
+from vnftest.common import exceptions
+from vnftest.common import utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+def check_if_process_failed(proc, timeout=1):
+ if proc is not None:
+ proc.join(timeout)
+ # Only abort if the process aborted
+ if proc.exitcode is not None and proc.exitcode > 0:
+ raise RuntimeError("{} exited with status {}".format(proc.name, proc.exitcode))
+
+
+def terminate_children(timeout=3):
+ current_proccess = multiprocessing.current_process()
+ active_children = multiprocessing.active_children()
+ if not active_children:
+ LOG.debug("no children to terminate")
+ return
+ for child in active_children:
+ LOG.debug("%s %s %s, child: %s %s", current_proccess.name, current_proccess.pid,
+ os.getpid(), child, child.pid)
+ LOG.debug("joining %s", child)
+ child.join(timeout)
+ child.terminate()
+ active_children = multiprocessing.active_children()
+ if not active_children:
+ LOG.debug("no children to terminate")
+ for child in active_children:
+ LOG.debug("%s %s %s, after terminate child: %s %s", current_proccess.name,
+ current_proccess.pid, os.getpid(), child, child.pid)
+
+
+def _additional_env_args(additional_env):
+ """Build arguments for adding additional environment vars with env"""
+ if additional_env is None:
+ return []
+ return ['env'] + ['%s=%s' % pair for pair in additional_env.items()]
+
+
+def _subprocess_setup():
+ # Python installs a SIGPIPE handler by default. This is usually not what
+ # non-Python subprocesses expect.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+
+def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
+ env=None, preexec_fn=_subprocess_setup, close_fds=True):
+ return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
+ stderr=stderr, preexec_fn=preexec_fn,
+ close_fds=close_fds, env=env)
+
+
+def create_process(cmd, run_as_root=False, additional_env=None):
+ """Create a process object for the given command.
+
+ The return value will be a tuple of the process object and the
+ list of command arguments used to create it.
+ """
+ if not isinstance(cmd, list):
+ cmd = [cmd]
+ cmd = list(map(str, _additional_env_args(additional_env) + cmd))
+ if run_as_root:
+ # NOTE(ralonsoh): to handle a command executed as root, using
+ # a root wrapper, instead of using "sudo".
+ pass
+ LOG.debug("Running command: %s", cmd)
+ obj = subprocess_popen(cmd, shell=False, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ return obj, cmd
+
+
+def execute(cmd, process_input=None, additional_env=None,
+ check_exit_code=True, return_stderr=False, log_fail_as_error=True,
+ extra_ok_codes=None, run_as_root=False):
+ try:
+ if process_input is not None:
+ _process_input = encodeutils.to_utf8(process_input)
+ else:
+ _process_input = None
+
+ # NOTE(ralonsoh): to handle the execution of a command as root,
+ # using a root wrapper, instead of using "sudo".
+ obj, cmd = create_process(cmd, run_as_root=run_as_root,
+ additional_env=additional_env)
+ _stdout, _stderr = obj.communicate(_process_input)
+ returncode = obj.returncode
+ obj.stdin.close()
+ _stdout = utils.safe_decode_utf8(_stdout)
+ _stderr = utils.safe_decode_utf8(_stderr)
+
+ extra_ok_codes = extra_ok_codes or []
+ if returncode and returncode not in extra_ok_codes:
+ msg = ("Exit code: %(returncode)d; "
+ "Stdin: %(stdin)s; "
+ "Stdout: %(stdout)s; "
+ "Stderr: %(stderr)s") % {'returncode': returncode,
+ 'stdin': process_input or '',
+ 'stdout': _stdout,
+ 'stderr': _stderr}
+ if log_fail_as_error:
+ LOG.error(msg)
+ if check_exit_code:
+ raise exceptions.ProcessExecutionError(msg,
+ returncode=returncode)
+
+ finally:
+ # This appears to be necessary in order for the subprocess to clean up
+ # something between call; without it, the second process hangs when two
+ # execute calls are made in a row.
+ time.sleep(0)
+
+ return (_stdout, _stderr) if return_stderr else _stdout
diff --git a/vnftest/common/rest_client.py b/vnftest/common/rest_client.py
new file mode 100644
index 0000000..23a108c
--- /dev/null
+++ b/vnftest/common/rest_client.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+import json
+import urllib2
+import requests
+
+
+def post(url, headers, data, logger):
+ return call(url, 'POST', headers, data, logger)
+
+
+def call(url, method, headers, data, logger):
+ data_json = json.dumps(data)
+ f = None
+ try:
+ req = urllib2.Request(url, data=data_json, headers=headers)
+ req.get_method = lambda: method
+ f = urllib2.urlopen(req)
+ return_code = f.code
+ response_body = f.read()
+ f.close()
+ if len(str(response_body)) == 0:
+ response_body = "{}"
+ response_body = json.loads(response_body)
+ result = {'return_code': return_code, 'body': response_body}
+ return result
+
+ except Exception as e:
+ message = "Cannot read content from {}, exception: {}".format(url, e)
+ logger.exception(message)
+ raise RuntimeError(message)
+ finally:
+ if f is not None:
+ f.close()
+
+
+def upload_file(url, headers, file, logger):
+ logger.debug("Upload file. URL: {}".format(url))
+ response = None
+ try:
+ response = requests.post(url, headers=headers, files=file)
+ return {'return_code': response.status_code, 'body': response.json()}
+ except Exception as e:
+ message = "Error while uploading file to {}, exception: {}".format(url, e)
+ logger.exception(message)
+ raise RuntimeError(message)
+ finally:
+ if response is not None:
+ response.close()
diff --git a/vnftest/common/task_template.py b/vnftest/common/task_template.py
new file mode 100755
index 0000000..7872aed
--- /dev/null
+++ b/vnftest/common/task_template.py
@@ -0,0 +1,78 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/task_template.py
+
+from __future__ import absolute_import
+import re
+import jinja2
+import jinja2.meta
+import yaml
+
+
+def finalize_for_yaml(elem):
+ """Render Jinja2 output specifically for YAML files"""
+ # Jinaj2 by default converts None to 'None', we can't allow this
+ # we could convert to empty string '', or we can convert to null, aka ~
+ if elem is None:
+ return '~'
+ # convert data structures to inline YAML
+ # match builtin types because we shouldn't be trying to render complex types
+ if isinstance(elem, (dict, list)):
+ # remove newlines because we are injecting back into YAML
+ # use block style for single line
+ return yaml.safe_dump(elem, default_flow_style=True).replace('\n', '')
+ return elem
+
+
+class TaskTemplate(object):
+
+ @classmethod
+ def render(cls, task_template, **kwargs):
+ """Render jinja2 task template to Vnftest input task.
+
+ :param task_template: string that contains template
+ :param kwargs: Dict with template arguments
+ :returns:rendered template str
+ """
+
+ from six.moves import builtins
+
+ ast = jinja2.Environment().parse(task_template)
+ required_kwargs = jinja2.meta.find_undeclared_variables(ast)
+
+ missing = set(required_kwargs) - set(kwargs) - set(dir(builtins))
+ real_missing = [mis for mis in missing
+ if is_really_missing(mis, task_template)]
+
+ if real_missing:
+ multi_msg = ("Please specify next template task arguments:%s")
+ single_msg = ("Please specify template task argument:%s")
+ raise TypeError((len(real_missing) > 1 and multi_msg or single_msg)
+ % ", ".join(real_missing))
+ return jinja2.Template(task_template, finalize=finalize_for_yaml).render(**kwargs)
+
+
+def is_really_missing(mis, task_template):
+ # Removing variables that have default values from
+ # missing. Construction that won't be properly
+ # check is {% set x = x or 1}
+ if re.search(mis.join([r"{%\s*set\s+", "\s*=\s*", r"[^\w]+"]),
+ task_template):
+ return False
+ # Also check for a default filter which can show up as
+ # a missing variable
+ if re.search(mis + r"\s*\|\s*default\(", task_template):
+ return False
+ return True
diff --git a/vnftest/common/template_format.py b/vnftest/common/template_format.py
new file mode 100644
index 0000000..5cfc2f2
--- /dev/null
+++ b/vnftest/common/template_format.py
@@ -0,0 +1,72 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/template_format.py
+
+from __future__ import absolute_import
+
+import yaml
+from oslo_serialization import jsonutils
+
+if hasattr(yaml, 'CSafeLoader'):
+ # make a dynamic subclass so we don't override global yaml Loader
+ yaml_loader = type('HeatYamlLoader', (yaml.CSafeLoader,), {})
+else:
+ yaml_loader = type('HeatYamlLoader', (yaml.SafeLoader,), {})
+
+if hasattr(yaml, 'CSafeDumper'):
+ yaml_dumper = yaml.CSafeDumper
+else:
+ yaml_dumper = yaml.SafeDumper
+
+
+# This breaks NetworkServiceTestCase yaml loading, because we need to conversion to
+# native Python str() objects because we use use Trex and Trex is has broken unicode handling
+def _construct_yaml_str(self, node):
+ # Override the default string handling function
+ # to always return unicode objects
+ return self.construct_scalar(node)
+
+yaml_loader.add_constructor(u'tag:yaml.org,2002:str', _construct_yaml_str)
+# Unquoted dates like 2013-05-23 in yaml files get loaded as objects of type
+# datetime.data which causes problems in API layer when being processed by
+# openstack.common.jsonutils. Therefore, make unicode string out of timestamps
+# until jsonutils can handle dates.
+yaml_loader.add_constructor(u'tag:yaml.org,2002:timestamp',
+ _construct_yaml_str)
+
+
+def parse(tmpl_str):
+ """Takes a string and returns a dict containing the parsed structure.
+
+ This includes determination of whether the string is using the
+ JSON or YAML format.
+ """
+ if tmpl_str.startswith('{'):
+ tpl = jsonutils.loads(tmpl_str)
+ else:
+ try:
+ # we already use SafeLoader when constructing special Heat YAML loader class
+ tpl = yaml.load(tmpl_str, Loader=yaml_loader)
+ except yaml.YAMLError as yea:
+ raise ValueError(yea)
+ else:
+ if tpl is None:
+ tpl = {}
+ # Looking for supported version keys in the loaded template
+ if not ('HeatTemplateFormatVersion' in tpl or
+ 'heat_template_version' in tpl or
+ 'AWSTemplateFormatVersion' in tpl):
+ raise ValueError("Template format version not found.")
+ return tpl
diff --git a/vnftest/common/utils.py b/vnftest/common/utils.py
new file mode 100644
index 0000000..e62b5db
--- /dev/null
+++ b/vnftest/common/utils.py
@@ -0,0 +1,399 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/utils.py
+
+import collections
+from contextlib import closing
+import datetime
+import errno
+import importlib
+import ipaddress
+import logging
+import os
+import random
+import socket
+import subprocess
+import sys
+
+import six
+from flask import jsonify
+from six.moves import configparser
+from oslo_serialization import jsonutils
+
+import vnftest
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+
+# Decorator for cli-args
+def cliargs(*args, **kwargs):
+ def _decorator(func):
+ func.__dict__.setdefault('arguments', []).insert(0, (args, kwargs))
+ return func
+ return _decorator
+
+
+def itersubclasses(cls, _seen=None):
+ """Generator over all subclasses of a given class in depth first order."""
+
+ if not isinstance(cls, type):
+ raise TypeError("itersubclasses must be called with "
+ "new-style classes, not %.100r" % cls)
+ _seen = _seen or set()
+ try:
+ subs = cls.__subclasses__()
+ except TypeError: # fails only when cls is type
+ subs = cls.__subclasses__(cls)
+ for sub in subs:
+ if sub not in _seen:
+ _seen.add(sub)
+ yield sub
+ for sub in itersubclasses(sub, _seen):
+ yield sub
+
+
+def import_modules_from_package(package):
+ """Import modules given a package name
+
+ :param: package - Full package name. For example: rally.deploy.engines
+ """
+ vnftest_root = os.path.dirname(os.path.dirname(vnftest.__file__))
+ path = os.path.join(vnftest_root, *package.split('.'))
+ for root, _, files in os.walk(path):
+ matches = (filename for filename in files if filename.endswith('.py')
+ and not filename.startswith('__'))
+ new_package = os.path.relpath(root, vnftest_root).replace(os.sep,
+ '.')
+ module_names = set(
+ '{}.{}'.format(new_package, filename.rsplit('.py', 1)[0])
+ for filename in matches)
+ # Find modules which haven't already been imported
+ missing_modules = module_names.difference(sys.modules)
+ logger.debug('Importing modules: %s', missing_modules)
+ for module_name in missing_modules:
+ try:
+ importlib.import_module(module_name)
+ except (ImportError, SyntaxError):
+ logger.exception('Unable to import module %s', module_name)
+
+
+def makedirs(d):
+ try:
+ os.makedirs(d)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def remove_file(path):
+ try:
+ os.remove(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+
+def execute_command(cmd):
+ exec_msg = "Executing command: '%s'" % cmd
+ logger.debug(exec_msg)
+
+ output = subprocess.check_output(cmd.split()).split(os.linesep)
+
+ return output
+
+
+def source_env(env_file):
+ p = subprocess.Popen(". %s; env" % env_file, stdout=subprocess.PIPE,
+ shell=True)
+ output = p.communicate()[0]
+ env = dict(line.split('=', 1) for line in output.splitlines() if '=' in line)
+ os.environ.update(env)
+ return env
+
+
+def read_json_from_file(path):
+ with open(path, 'r') as f:
+ j = f.read()
+ # don't use jsonutils.load() it conflicts with already decoded input
+ return jsonutils.loads(j)
+
+
+def write_json_to_file(path, data, mode='w'):
+ with open(path, mode) as f:
+ jsonutils.dump(data, f)
+
+
+def write_file(path, data, mode='w'):
+ with open(path, mode) as f:
+ f.write(data)
+
+
+def parse_ini_file(path):
+ parser = configparser.ConfigParser()
+
+ try:
+ files = parser.read(path)
+ except configparser.MissingSectionHeaderError:
+ logger.exception('invalid file type')
+ raise
+ else:
+ if not files:
+ raise RuntimeError('file not exist')
+
+ try:
+ default = {k: v for k, v in parser.items('DEFAULT')}
+ except configparser.NoSectionError:
+ default = {}
+
+ config = dict(DEFAULT=default,
+ **{s: {k: v for k, v in parser.items(
+ s)} for s in parser.sections()})
+
+ return config
+
+
+def get_port_mac(sshclient, port):
+ cmd = "ifconfig |grep HWaddr |grep %s |awk '{print $5}' " % port
+ status, stdout, stderr = sshclient.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+ return stdout.rstrip()
+
+
+def get_port_ip(sshclient, port):
+ cmd = "ifconfig %s |grep 'inet addr' |awk '{print $2}' " \
+ "|cut -d ':' -f2 " % port
+ status, stdout, stderr = sshclient.execute(cmd)
+
+ if status:
+ raise RuntimeError(stderr)
+ return stdout.rstrip()
+
+
+def flatten_dict_key(data):
+ next_data = {}
+
+ # use list, because iterable is too generic
+ if not any(isinstance(v, (collections.Mapping, list))
+ for v in data.values()):
+ return data
+
+ for k, v in data.items():
+ if isinstance(v, collections.Mapping):
+ for n_k, n_v in v.items():
+ next_data["%s.%s" % (k, n_k)] = n_v
+ # use list because iterable is too generic
+ elif isinstance(v, collections.Iterable) and not isinstance(v, six.string_types):
+ for index, item in enumerate(v):
+ next_data["%s%d" % (k, index)] = item
+ else:
+ next_data[k] = v
+
+ return flatten_dict_key(next_data)
+
+
+def translate_to_str(obj):
+ if isinstance(obj, collections.Mapping):
+ return {str(k): translate_to_str(v) for k, v in obj.items()}
+ elif isinstance(obj, list):
+ return [translate_to_str(ele) for ele in obj]
+ elif isinstance(obj, six.text_type):
+ return str(obj)
+ return obj
+
+
+def result_handler(status, data):
+ result = {
+ 'status': status,
+ 'result': data
+ }
+ return jsonify(result)
+
+
+def change_obj_to_dict(obj):
+ dic = {}
+ for k, v in vars(obj).items():
+ try:
+ vars(v)
+ except TypeError:
+ dic.update({k: v})
+ return dic
+
+
+def set_dict_value(dic, keys, value):
+ return_dic = dic
+
+ for key in keys.split('.'):
+ return_dic.setdefault(key, {})
+ if key == keys.split('.')[-1]:
+ return_dic[key] = value
+ else:
+ return_dic = return_dic[key]
+ return dic
+
+
+def get_free_port(ip):
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
+ port = random.randint(5000, 10000)
+ while s.connect_ex((ip, port)) == 0:
+ port = random.randint(5000, 10000)
+ return port
+
+
+def mac_address_to_hex_list(mac):
+ octets = ["0x{:02x}".format(int(elem, 16)) for elem in mac.split(':')]
+ assert len(octets) == 6 and all(len(octet) == 4 for octet in octets)
+ return octets
+
+
+def safe_ip_address(ip_addr):
+ """ get ip address version v6 or v4 """
+ try:
+ return ipaddress.ip_address(six.text_type(ip_addr))
+ except ValueError:
+ logging.error("%s is not valid", ip_addr)
+ return None
+
+
+def get_ip_version(ip_addr):
+ """ get ip address version v6 or v4 """
+ try:
+ address = ipaddress.ip_address(six.text_type(ip_addr))
+ except ValueError:
+ logging.error("%s is not valid", ip_addr)
+ return None
+ else:
+ return address.version
+
+
+def ip_to_hex(ip_addr, separator=''):
+ try:
+ address = ipaddress.ip_address(six.text_type(ip_addr))
+ except ValueError:
+ logging.error("%s is not valid", ip_addr)
+ return ip_addr
+
+ if address.version != 4:
+ return ip_addr
+
+ if not separator:
+ return '{:08x}'.format(int(address))
+
+ return separator.join('{:02x}'.format(octet) for octet in address.packed)
+
+
+def try_int(s, *args):
+ """Convert to integer if possible."""
+ try:
+ return int(s)
+ except (TypeError, ValueError):
+ return args[0] if args else s
+
+
+class SocketTopology(dict):
+
+ @classmethod
+ def parse_cpuinfo(cls, cpuinfo):
+ socket_map = {}
+
+ lines = cpuinfo.splitlines()
+
+ core_details = []
+ core_lines = {}
+ for line in lines:
+ if line.strip():
+ name, value = line.split(":", 1)
+ core_lines[name.strip()] = try_int(value.strip())
+ else:
+ core_details.append(core_lines)
+ core_lines = {}
+
+ for core in core_details:
+ socket_map.setdefault(core["physical id"], {}).setdefault(
+ core["core id"], {})[core["processor"]] = (
+ core["processor"], core["core id"], core["physical id"])
+
+ return cls(socket_map)
+
+ def sockets(self):
+ return sorted(self.keys())
+
+ def cores(self):
+ return sorted(core for cores in self.values() for core in cores)
+
+ def processors(self):
+ return sorted(
+ proc for cores in self.values() for procs in cores.values() for
+ proc in procs)
+
+
+def config_to_dict(config):
+ return {section: dict(config.items(section)) for section in
+ config.sections()}
+
+
+def validate_non_string_sequence(value, default=None, raise_exc=None):
+ # NOTE(ralonsoh): refactor this function to check if raise_exc is an
+ # Exception. Remove duplicate code, this function is duplicated in this
+ # repository.
+ if isinstance(value, collections.Sequence) and not isinstance(value, six.string_types):
+ return value
+ if raise_exc:
+ raise raise_exc # pylint: disable=raising-bad-type
+ return default
+
+
+def join_non_strings(separator, *non_strings):
+ try:
+ non_strings = validate_non_string_sequence(non_strings[0], raise_exc=RuntimeError)
+ except (IndexError, RuntimeError):
+ pass
+ return str(separator).join(str(non_string) for non_string in non_strings)
+
+
+def safe_decode_utf8(s):
+ """Safe decode a str from UTF"""
+ if six.PY3 and isinstance(s, bytes):
+ return s.decode('utf-8', 'surrogateescape')
+ return s
+
+
+class ErrorClass(object):
+
+ def __init__(self, *args, **kwargs):
+ if 'test' not in kwargs:
+ raise RuntimeError
+
+ def __getattr__(self, item):
+ raise AttributeError
+
+
+class Timer(object):
+ def __init__(self):
+ super(Timer, self).__init__()
+ self.start = self.delta = None
+
+ def __enter__(self):
+ self.start = datetime.datetime.now()
+ return self
+
+ def __exit__(self, *_):
+ self.delta = datetime.datetime.now() - self.start
+
+ def __getattr__(self, item):
+ return getattr(self.delta, item)
+
diff --git a/vnftest/common/yaml_loader.py b/vnftest/common/yaml_loader.py
new file mode 100644
index 0000000..4f93e62
--- /dev/null
+++ b/vnftest/common/yaml_loader.py
@@ -0,0 +1,35 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/common/yaml_loader.py
+
+from __future__ import absolute_import
+
+import yaml
+
+
+if hasattr(yaml, 'CSafeLoader'):
+ # make a dynamic subclass so we don't override global yaml Loader
+ yaml_loader = type('CustomLoader', (yaml.CSafeLoader,), {})
+else:
+ yaml_loader = type('CustomLoader', (yaml.SafeLoader,), {})
+
+if hasattr(yaml, 'CSafeDumper'):
+ yaml_dumper = yaml.CSafeDumper
+else:
+ yaml_dumper = yaml.SafeDumper
+
+
+def yaml_load(tmpl_str):
+ return yaml.load(tmpl_str, Loader=yaml_loader)
diff --git a/vnftest/dispatcher/__init__.py b/vnftest/dispatcher/__init__.py
new file mode 100644
index 0000000..232233f
--- /dev/null
+++ b/vnftest/dispatcher/__init__.py
@@ -0,0 +1,30 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/dispatcher/__init__.py
+
+from __future__ import absolute_import
+from oslo_config import cfg
+
+import vnftest.common.utils as utils
+
+utils.import_modules_from_package("vnftest.dispatcher")
+
+CONF = cfg.CONF
+OPTS = [
+ cfg.StrOpt('dispatcher',
+ default='file',
+ help='Dispatcher to store data.'),
+]
+CONF.register_opts(OPTS)
diff --git a/vnftest/dispatcher/base.py b/vnftest/dispatcher/base.py
new file mode 100644
index 0000000..133b792
--- /dev/null
+++ b/vnftest/dispatcher/base.py
@@ -0,0 +1,50 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/dispatcher/base.py
+
+from __future__ import absolute_import
+import abc
+import six
+
+import vnftest.common.utils as utils
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Base(object):
+
+ def __init__(self, conf):
+ self.conf = conf
+
+ @staticmethod
+ def get_cls(dispatcher_type):
+ """Return class of specified type."""
+ for dispatcher in utils.itersubclasses(Base):
+ if dispatcher_type == dispatcher.__dispatcher_type__:
+ return dispatcher
+ raise RuntimeError("No such dispatcher_type %s" % dispatcher_type)
+
+ @staticmethod
+ def get(config):
+ """Returns instance of a dispatcher for dispatcher type.
+ """
+ list_dispatcher = \
+ [Base.get_cls(out_type.capitalize())(config)
+ for out_type in config['DEFAULT']['dispatcher']]
+
+ return list_dispatcher
+
+ @abc.abstractmethod
+ def flush_result_data(self, data):
+ """Flush result data into permanent storage media interface."""
diff --git a/vnftest/dispatcher/file.py b/vnftest/dispatcher/file.py
new file mode 100644
index 0000000..83d0fee
--- /dev/null
+++ b/vnftest/dispatcher/file.py
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/dispatcher/file.py
+
+from __future__ import absolute_import
+
+from vnftest.dispatcher.base import Base as DispatchBase
+from vnftest.common import constants as consts
+from vnftest.common import utils
+
+
+class FileDispatcher(DispatchBase):
+ """Dispatcher class for recording data to a file.
+ """
+
+ __dispatcher_type__ = "File"
+
+ def __init__(self, conf):
+ super(FileDispatcher, self).__init__(conf)
+ self.target = conf['dispatcher_file'].get('file_path',
+ consts.DEFAULT_OUTPUT_FILE)
+
+ def flush_result_data(self, data):
+ utils.write_json_to_file(self.target, data)
diff --git a/vnftest/dispatcher/http.py b/vnftest/dispatcher/http.py
new file mode 100644
index 0000000..da66c90
--- /dev/null
+++ b/vnftest/dispatcher/http.py
@@ -0,0 +1,94 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/dispatcher/http.py
+
+from __future__ import absolute_import
+
+import logging
+import os
+from datetime import datetime
+
+from oslo_serialization import jsonutils
+import requests
+
+from vnftest.dispatcher.base import Base as DispatchBase
+
+LOG = logging.getLogger(__name__)
+
+
+class HttpDispatcher(DispatchBase):
+ """Dispatcher class for posting data into a http target.
+ """
+
+ __dispatcher_type__ = "Http"
+
+ def __init__(self, conf):
+ super(HttpDispatcher, self).__init__(conf)
+ http_conf = conf['dispatcher_http']
+ self.headers = {'Content-type': 'application/json'}
+ self.timeout = int(http_conf.get('timeout', 5))
+ self.target = http_conf.get('target', 'http://127.0.0.1:8000/results')
+
+ def flush_result_data(self, data):
+ if self.target == '':
+ # if the target was not set, do not do anything
+ LOG.error('Dispatcher target was not set, no data will'
+ 'be posted.')
+ return
+
+ result = data['result']
+ self.info = result['info']
+ self.task_id = result['task_id']
+ self.criteria = result['criteria']
+ testcases = result['testcases']
+
+ for case, data in testcases.items():
+ self._upload_case_result(case, data)
+
+ def _upload_case_result(self, case, data):
+ try:
+ step_data = data.get('tc_data', [])[0]
+ except IndexError:
+ current_time = datetime.now()
+ else:
+ timestamp = float(step_data.get('timestamp', 0.0))
+ current_time = datetime.fromtimestamp(timestamp)
+
+ result = {
+ "project_name": "vnftest",
+ "case_name": case,
+ "description": "vnftest ci step status",
+ "step": self.info.get('deploy_step'),
+ "version": self.info.get('version'),
+ "pod_name": self.info.get('pod_name'),
+ "installer": self.info.get('installer'),
+ "build_tag": os.environ.get('BUILD_TAG'),
+ "criteria": data.get('criteria'),
+ "start_date": current_time.strftime('%Y-%m-%d %H:%M:%S'),
+ "stop_date": current_time.strftime('%Y-%m-%d %H:%M:%S'),
+ "trust_indicator": "",
+ "details": ""
+ }
+
+ try:
+ LOG.debug('Test result : %s', result)
+ res = requests.post(self.target,
+ data=jsonutils.dump_as_bytes(result),
+ headers=self.headers,
+ timeout=self.timeout)
+ LOG.debug('Test result posting finished with status code'
+ ' %d.' % res.status_code)
+ except Exception as err:
+ LOG.exception('Failed to record result data: %s', err)
diff --git a/vnftest/main.py b/vnftest/main.py
new file mode 100755
index 0000000..308867c
--- /dev/null
+++ b/vnftest/main.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/tests/main.py
+""" vnftest - command line tool for managing benchmarks
+
+ Example invocation:
+ $ vnftest task start samples/ping.yaml
+
+ Servers are the same as VMs (Nova calls them servers in the API)
+
+ Many tests use a client/server architecture. A test client is configured
+ to use a specific test server e.g. using an IP address. This is true for
+ example iperf. In some cases the test server is included in the kernel
+ (ping, pktgen) and no additional software is needed on the server. In other
+ cases (iperf) a server process needs to be installed and started.
+
+ One server is required to host the test client program (such as ping or
+ iperf). In the task file this server is called host.
+
+ A server can be the _target_ of a test client (think ping destination
+ argument). A target server is optional but needed in most test steps.
+ In the task file this server is called target. This is probably the same
+ as DUT in existing terminology.
+
+ Existing terminology:
+ https://www.ietf.org/rfc/rfc1242.txt (throughput/latency)
+ https://www.ietf.org/rfc/rfc2285.txt (DUT/SUT)
+
+ New terminology:
+ NFV TST
+
+"""
+from __future__ import absolute_import
+import sys
+
+from vnftest.cmd.cli import VnftestCLI
+
+
+def main():
+ """vnftest main"""
+ VnftestCLI().main(sys.argv[1:])
+
+if __name__ == '__main__':
+ main()
diff --git a/vnftest/onap/__init__.py b/vnftest/onap/__init__.py
new file mode 100644
index 0000000..7382128
--- /dev/null
+++ b/vnftest/onap/__init__.py
@@ -0,0 +1,20 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+from __future__ import absolute_import
+import vnftest.common.utils as utils
+
+utils.import_modules_from_package("vnftest.benchmark.contexts")
+utils.import_modules_from_package("vnftest.benchmark.runners")
+utils.import_modules_from_package("vnftest.benchmark.steps")
diff --git a/vnftest/onap/contexts/__init__.py b/vnftest/onap/contexts/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/onap/contexts/__init__.py
diff --git a/vnftest/onap/contexts/base.py b/vnftest/onap/contexts/base.py
new file mode 100644
index 0000000..03c3e1f
--- /dev/null
+++ b/vnftest/onap/contexts/base.py
@@ -0,0 +1,64 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+import abc
+import six
+import vnftest.common.utils as utils
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Context(object):
+ """Class that represents a context in the logical model"""
+ list = []
+
+ @staticmethod
+ def split_name(name, sep='.'):
+ try:
+ name_iter = iter(name.split(sep))
+ except AttributeError:
+ # name is not a string
+ return None, None
+ return next(name_iter), next(name_iter, None)
+
+ def __init__(self):
+ Context.list.append(self)
+
+ @abc.abstractmethod
+ def init(self, attrs):
+ """Initiate context."""
+
+ @staticmethod
+ def get_cls(context_type):
+ """Return class of specified type."""
+ for context in utils.itersubclasses(Context):
+ if context_type == context.__context_type__:
+ return context
+ raise RuntimeError("No such context_type %s" % context_type)
+
+ @staticmethod
+ def get(context_type):
+ """Returns instance of a context for context type.
+ """
+ return Context.get_cls(context_type)()
+
+ def _delete_context(self):
+ Context.list.remove(self)
+
+ @abc.abstractmethod
+ def deploy(self):
+ """Deploy context."""
+
+ @abc.abstractmethod
+ def undeploy(self):
+ """Undeploy context."""
+ self._delete_context()
diff --git a/vnftest/onap/contexts/csar.py b/vnftest/onap/contexts/csar.py
new file mode 100644
index 0000000..8d89467
--- /dev/null
+++ b/vnftest/onap/contexts/csar.py
@@ -0,0 +1,44 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+import logging
+from vnftest.onap.contexts.base import Context
+
+LOG = logging.getLogger(__name__)
+
+
+class CSARContext(Context):
+ """Class that handle sdc info"""
+
+ __context_type__ = "CSAR"
+
+ def __init__(self):
+ self.csar_name = None
+ self.csar_id = None
+ self.csar_package_location = None
+ super(CSARContext, self).__init__()
+
+ def init(self, attrs):
+ """initializes itself from the supplied arguments"""
+ self.csar_name = attrs.get("csar_name")
+ self.csar_id = attrs.get("csar_id")
+ self.csar_package_location = attrs.get("csar_package_location")
+
+ def deploy(self):
+ """no need to deploy"""
+ pass
+
+ def undeploy(self):
+ """no need to undeploy"""
+ super(CSARContext, self).undeploy()
diff --git a/vnftest/onap/contexts/dummy.py b/vnftest/onap/contexts/dummy.py
new file mode 100644
index 0000000..b61d55e
--- /dev/null
+++ b/vnftest/onap/contexts/dummy.py
@@ -0,0 +1,41 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+from __future__ import absolute_import
+import logging
+
+from vnftest.onap.contexts.base import Context
+
+
+LOG = logging.getLogger(__name__)
+
+
+class DummyContext(Context):
+ """Class that handle dummy info"""
+
+ __context_type__ = "Dummy"
+
+ def __init__(self):
+ super(DummyContext, self).__init__()
+
+ def init(self, attrs):
+ pass
+
+ def deploy(self):
+ """don't need to deploy"""
+ pass
+
+ def undeploy(self):
+ """don't need to undeploy"""
+ super(DummyContext, self).undeploy()
diff --git a/vnftest/onap/core/__init__.py b/vnftest/onap/core/__init__.py
new file mode 100644
index 0000000..c204f9d
--- /dev/null
+++ b/vnftest/onap/core/__init__.py
@@ -0,0 +1,45 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/init.py
+"""
+Vnftest core.
+"""
+
+from __future__ import print_function
+
+
+class Param(object):
+ """This class converts a parameter dictionary to an object."""
+
+ def __init__(self, kwargs):
+ # list
+ self.inputfile = kwargs.get('inputfile')
+ self.task_args = kwargs.get('task-args')
+ self.task_args_file = kwargs.get('task-args-file')
+ self.keep_deploy = kwargs.get('keep-deploy')
+ self.parse_only = kwargs.get('parse-only')
+ self.output_file = kwargs.get('output-file', '/tmp/vnftest.out')
+ self.suite = kwargs.get('suite')
+ self.task_id = kwargs.get('task_id')
+ self.yaml_name = kwargs.get('yaml_name')
+
+ # list
+ self.input_file = kwargs.get('input_file')
+
+ # list
+ self.casename = kwargs.get('casename')
+
+ # list
+ self.type = kwargs.get('type')
diff --git a/vnftest/onap/core/plugin.py b/vnftest/onap/core/plugin.py
new file mode 100644
index 0000000..90b3a7e
--- /dev/null
+++ b/vnftest/onap/core/plugin.py
@@ -0,0 +1,175 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/plugin.py
+""" Handler for vnftest command 'plugin' """
+
+from __future__ import print_function
+from __future__ import absolute_import
+import os
+import sys
+import time
+import logging
+import pkg_resources
+import vnftest.ssh as ssh
+
+from vnftest.common.task_template import TaskTemplate
+from vnftest.common.yaml_loader import yaml_load
+
+LOG = logging.getLogger(__name__)
+
+
+class Plugin(object):
+ """Plugin commands.
+
+ Set of commands to manage plugins.
+ """
+
+ def install(self, args):
+ """Install a plugin."""
+
+ total_start_time = time.time()
+ parser = PluginParser(args.input_file[0])
+
+ plugins, deployment = parser.parse_plugin()
+ plugin_name = plugins.get("name")
+ LOG.info("Installing plugin: %s", plugin_name)
+
+ LOG.debug("Executing _install_setup()")
+ self._install_setup(plugin_name, deployment)
+
+ LOG.debug("Executing _run()")
+ self._run(plugin_name)
+
+ total_end_time = time.time()
+ LOG.info("Total finished in %d secs",
+ total_end_time - total_start_time)
+
+ LOG.info("Plugin %s Done, exiting", plugin_name)
+
+ def remove(self, args):
+ """Remove a plugin."""
+
+ total_start_time = time.time()
+ parser = PluginParser(args.input_file[0])
+
+ plugins, deployment = parser.parse_plugin()
+ plugin_name = plugins.get("name")
+ print("Removing plugin: %s" % plugin_name)
+
+ LOG.info("Executing _remove_setup()")
+ self._remove_setup(plugin_name, deployment)
+
+ LOG.info("Executing _run()")
+ self._run(plugin_name)
+
+ total_end_time = time.time()
+ LOG.info("total finished in %d secs",
+ total_end_time - total_start_time)
+
+ print("Done, exiting")
+
+ def _install_setup(self, plugin_name, deployment):
+ """Deployment environment setup"""
+ target_script = plugin_name + ".bash"
+ self.script = pkg_resources.resource_filename(
+ 'vnftest.resources', 'scripts/install/' + target_script)
+
+ deployment_ip = deployment.get("ip", None)
+
+ if deployment_ip == "local":
+ self.client = ssh.SSH.from_node(deployment, overrides={
+ # host can't be None, fail if no JUMP_HOST_IP
+ 'ip': os.environ["JUMP_HOST_IP"],
+ })
+ else:
+ self.client = ssh.SSH.from_node(deployment)
+ self.client.wait(timeout=600)
+
+ # copy script to host
+ remotepath = '~/%s.sh' % plugin_name
+
+ LOG.info("copying script to host: %s", remotepath)
+ self.client._put_file_shell(self.script, remotepath)
+
+ def _remove_setup(self, plugin_name, deployment):
+ """Deployment environment setup"""
+ target_script = plugin_name + ".bash"
+ self.script = pkg_resources.resource_filename(
+ 'vnftest.resources', 'scripts/remove/' + target_script)
+
+ deployment_ip = deployment.get("ip", None)
+
+ if deployment_ip == "local":
+ self.client = ssh.SSH.from_node(deployment, overrides={
+ # host can't be None, fail if no JUMP_HOST_IP
+ 'ip': os.environ["JUMP_HOST_IP"],
+ })
+ else:
+ self.client = ssh.SSH.from_node(deployment)
+ self.client.wait(timeout=600)
+
+ # copy script to host
+ remotepath = '~/%s.sh' % plugin_name
+
+ LOG.info("copying script to host: %s", remotepath)
+ self.client._put_file_shell(self.script, remotepath)
+
+ def _run(self, plugin_name):
+ """Run installation script """
+ cmd = "sudo bash %s" % plugin_name + ".sh"
+
+ LOG.info("Executing command: %s", cmd)
+ self.client.execute(cmd)
+
+
+class PluginParser(object):
+ """Parser for plugin configration files in yaml format"""
+
+ def __init__(self, path):
+ self.path = path
+
+ def parse_plugin(self):
+ """parses the plugin file and return a plugins instance
+ and a deployment instance
+ """
+
+ print("Parsing plugin config:", self.path)
+
+ try:
+ kw = {}
+ with open(self.path) as f:
+ try:
+ input_plugin = f.read()
+ rendered_plugin = TaskTemplate.render(input_plugin, **kw)
+ except Exception as e:
+ print("Failed to render template:\n%(plugin)s\n%(err)s\n"
+ % {"plugin": input_plugin, "err": e})
+ raise e
+ print("Input plugin is:\n%s\n" % rendered_plugin)
+
+ cfg = yaml_load(rendered_plugin)
+ except IOError as ioerror:
+ sys.exit(ioerror)
+
+ self._check_schema(cfg["schema"], "plugin")
+
+ return cfg["plugins"], cfg["deployment"]
+
+ def _check_schema(self, cfg_schema, schema_type):
+ """Check if configration file is using the correct schema type"""
+
+ if cfg_schema != "vnftest:" + schema_type + ":0.1":
+ sys.exit("error: file %s has unknown schema %s" % (self.path,
+ cfg_schema))
diff --git a/vnftest/onap/core/report.py b/vnftest/onap/core/report.py
new file mode 100644
index 0000000..bb791dc
--- /dev/null
+++ b/vnftest/onap/core/report.py
@@ -0,0 +1,128 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/report.py
+""" Handler for vnftest command 'report' """
+
+from __future__ import absolute_import
+from __future__ import print_function
+
+import ast
+import re
+import uuid
+
+from django.conf import settings
+from django.template import Context
+from django.template import Template
+from oslo_utils import encodeutils
+from oslo_utils import uuidutils
+
+from vnftest.common import constants as consts
+from vnftest.common.html_template import template
+from vnftest.common.utils import cliargs
+
+settings.configure()
+
+
+class Report(object):
+ """Report commands.
+
+ Set of commands to manage benchmark tasks.
+ """
+
+ def __init__(self):
+ self.Timestamp = []
+ self.yaml_name = ""
+ self.task_id = ""
+
+ def _validate(self, yaml_name, task_id):
+ if re.match("^[a-z0-9_-]+$", yaml_name):
+ self.yaml_name = yaml_name
+ else:
+ raise ValueError("invalid yaml_name", yaml_name)
+
+ if uuidutils.is_uuid_like(task_id):
+ task_id = '{' + task_id + '}'
+ task_uuid = (uuid.UUID(task_id))
+ self.task_id = task_uuid
+ else:
+ raise ValueError("invalid task_id", task_id)
+
+ # def _get_fieldkeys(self):
+ # fieldkeys_cmd = "show field keys from \"%s\""
+ # fieldkeys_query = fieldkeys_cmd % (self.yaml_name)
+ # query_exec = influx.query(fieldkeys_query)
+ # if query_exec:
+ # return query_exec
+ # else:
+ # raise KeyError("Task ID or Test case not found..")
+
+ #def _get_tasks(self):
+ # task_cmd = "select * from \"%s\" where task_id= '%s'"
+ # task_query = task_cmd % (self.yaml_name, self.task_id)
+ # query_exec = influx.query(task_query)
+ # if query_exec:
+ # return query_exec
+ # else:
+ # raise KeyError("Task ID or Test case not found..")
+
+ @cliargs("task_id", type=str, help=" task id", nargs=1)
+ @cliargs("yaml_name", type=str, help=" Yaml file Name", nargs=1)
+ def generate(self, args):
+ """Start report generation."""
+ self._validate(args.yaml_name[0], args.task_id[0])
+
+ self.db_fieldkeys = self._get_fieldkeys()
+
+ self.db_task = self._get_tasks()
+
+ field_keys = []
+ temp_series = []
+ table_vals = {}
+
+ field_keys = [encodeutils.to_utf8(field['fieldKey'])
+ for field in self.db_fieldkeys]
+
+ for key in field_keys:
+ self.Timestamp = []
+ series = {}
+ values = []
+ for task in self.db_task:
+ task_time = encodeutils.to_utf8(task['time'])
+ if not isinstance(task_time, str):
+ task_time = str(task_time, 'utf8')
+ key = str(key, 'utf8')
+ task_time = task_time[11:]
+ head, sep, tail = task_time.partition('.')
+ task_time = head + "." + tail[:6]
+ self.Timestamp.append(task_time)
+ if isinstance(task[key], float) is True:
+ values.append(task[key])
+ else:
+ values.append(ast.literal_eval(task[key]))
+ table_vals['Timestamp'] = self.Timestamp
+ table_vals[key] = values
+ series['name'] = key
+ series['data'] = values
+ temp_series.append(series)
+
+ Template_html = Template(template)
+ Context_html = Context({"series": temp_series,
+ "Timestamp": self.Timestamp,
+ "task_id": self.task_id,
+ "table": table_vals})
+ with open(consts.DEFAULT_HTML_FILE, "w") as file_open:
+ file_open.write(Template_html.render(Context_html))
+
+ print("Report generated. View /tmp/vnftest.htm")
diff --git a/vnftest/onap/core/runner.py b/vnftest/onap/core/runner.py
new file mode 100644
index 0000000..32ec6e9
--- /dev/null
+++ b/vnftest/onap/core/runner.py
@@ -0,0 +1,44 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/runner.py
+""" Handler for vnftest command 'runner' """
+
+from __future__ import absolute_import
+
+import prettytable
+
+from vnftest.onap.runners.base import Runner
+
+
+class Runners(object): # pragma: no cover
+ """Runner commands.
+
+ Set of commands to discover and display runner types.
+ """
+
+ def list_all(self, *args):
+ """List existing runner types"""
+ types = Runner.get_types()
+ runner_table = prettytable.PrettyTable(['Type', 'Description'])
+ runner_table.align = 'l'
+ for rtype in types:
+ runner_table.add_row([rtype.__execution_type__,
+ rtype.__doc__.split("\n")[0]])
+ print(runner_table)
+
+ def show(self, args):
+ """Show details of a specific runner type"""
+ rtype = Runner.get_cls(args.type[0])
+ print(rtype.__doc__)
diff --git a/vnftest/onap/core/step.py b/vnftest/onap/core/step.py
new file mode 100644
index 0000000..4411780
--- /dev/null
+++ b/vnftest/onap/core/step.py
@@ -0,0 +1,44 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/step.py
+
+""" Handler for vnftest command 'step' """
+
+from __future__ import absolute_import
+import prettytable
+
+from vnftest.onap.steps.base import Step
+
+
+class Steps(object): # pragma: no cover
+ """Step commands.
+
+ Set of commands to discover and display step types.
+ """
+
+ def list_all(self, *args):
+ """List existing step types"""
+ types = Step.get_types()
+ step_table = prettytable.PrettyTable(['Type', 'Description'])
+ step_table.align = 'l'
+ for step_class in types:
+ step_table.add_row([step_class.get_step_type(),
+ step_class.get_description()])
+ print(step_table)
+
+ def show(self, args):
+ """Show details of a specific step type"""
+ stype = Step.get_cls(args.type[0])
+ print(stype.__doc__)
diff --git a/vnftest/onap/core/task.py b/vnftest/onap/core/task.py
new file mode 100644
index 0000000..2d3033a
--- /dev/null
+++ b/vnftest/onap/core/task.py
@@ -0,0 +1,605 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/task.py
+
+""" Handler for vnftest command 'task' """
+
+from __future__ import absolute_import
+from __future__ import print_function
+import sys
+import os
+from collections import OrderedDict
+
+import yaml
+import atexit
+import ipaddress
+import time
+import logging
+import uuid
+import collections
+
+from six.moves import filter
+from jinja2 import Environment
+
+from vnftest.onap.contexts.base import Context
+from vnftest.onap.contexts.csar import CSARContext
+from vnftest.onap.runners import base as base_runner
+from vnftest.onap.runners.duration import DurationRunner
+from vnftest.common.constants import CONF_FILE
+from vnftest.common.yaml_loader import yaml_load
+from vnftest.dispatcher.base import Base as DispatcherBase
+from vnftest.common.task_template import TaskTemplate
+from vnftest.common import utils
+from vnftest.common import constants
+from vnftest.common.html_template import report_template
+
+output_file_default = "/tmp/vnftest.out"
+test_cases_dir_default = "tests/onap/test_cases/"
+LOG = logging.getLogger(__name__)
+
+
+class Task(object): # pragma: no cover
+ """Task commands.
+
+ Set of commands to manage benchmark tasks.
+ """
+
+ def __init__(self):
+ self.context = None
+ self.outputs = {}
+
+ def _set_dispatchers(self, output_config):
+ dispatchers = output_config.get('DEFAULT', {}).get('dispatcher',
+ 'file')
+ out_types = [s.strip() for s in dispatchers.split(',')]
+ output_config['DEFAULT']['dispatcher'] = out_types
+
+ def start(self, args, **kwargs):
+ """Start a vnf step."""
+
+ atexit.register(self.atexit_handler)
+
+ task_id = getattr(args, 'task_id')
+ self.task_id = task_id if task_id else str(uuid.uuid4())
+
+ self._set_log()
+
+ try:
+ output_config = utils.parse_ini_file(CONF_FILE)
+ except Exception:
+ # all error will be ignore, the default value is {}
+ output_config = {}
+
+ self._init_output_config(output_config)
+ self._set_output_config(output_config, args.output_file)
+ LOG.debug('Output configuration is: %s', output_config)
+
+ self._set_dispatchers(output_config)
+
+ # update dispatcher list
+ if 'file' in output_config['DEFAULT']['dispatcher']:
+ result = {'status': 0, 'result': {}}
+ utils.write_json_to_file(args.output_file, result)
+
+ total_start_time = time.time()
+ parser = TaskParser(args.inputfile[0])
+
+ if args.suite:
+ # 1.parse suite, return suite_params info
+ task_files, task_args, task_args_fnames = \
+ parser.parse_suite()
+ else:
+ task_files = [parser.path]
+ task_args = [args.task_args]
+ task_args_fnames = [args.task_args_file]
+
+ LOG.debug("task_files:%s, task_args:%s, task_args_fnames:%s",
+ task_files, task_args, task_args_fnames)
+
+ if args.parse_only:
+ sys.exit(0)
+
+ testcases = {}
+ # parse task_files
+ for i in range(0, len(task_files)):
+ one_task_start_time = time.time()
+ parser.path = task_files[i]
+ steps, run_in_parallel, meet_precondition, ret_context = \
+ parser.parse_task(self.task_id, task_args[i],
+ task_args_fnames[i])
+
+ self.context = ret_context
+
+ if not meet_precondition:
+ LOG.info("meet_precondition is %s, please check envrionment",
+ meet_precondition)
+ continue
+
+ case_name = os.path.splitext(os.path.basename(task_files[i]))[0]
+ try:
+ data = self._run(steps, run_in_parallel, args.output_file)
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True)
+ testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []}
+ else:
+ LOG.info('Testcase: "%s" SUCCESS!!!', case_name)
+ testcases[case_name] = {'criteria': 'PASS', 'tc_data': data}
+
+ if args.keep_deploy:
+ # keep deployment, forget about stack
+ # (hide it for exit handler)
+ self.context = None
+ else:
+ self.context.undeploy()
+ self.context = None
+ one_task_end_time = time.time()
+ LOG.info("Task %s finished in %d secs", task_files[i],
+ one_task_end_time - one_task_start_time)
+
+ result = self._get_format_result(testcases)
+
+ self._do_output(output_config, result)
+ self._generate_reporting(result)
+
+ total_end_time = time.time()
+ LOG.info("Total finished in %d secs",
+ total_end_time - total_start_time)
+
+ step = steps[0]
+ LOG.info("To generate report, execute command "
+ "'vnftest report generate %(task_id)s %(tc)s'", step)
+ LOG.info("Task ALL DONE, exiting")
+ return result
+
+ def _generate_reporting(self, result):
+ env = Environment()
+ with open(constants.REPORTING_FILE, 'w') as f:
+ f.write(env.from_string(report_template).render(result))
+
+ LOG.info("Report can be found in '%s'", constants.REPORTING_FILE)
+
+ def _set_log(self):
+ log_format = '%(asctime)s %(name)s %(filename)s:%(lineno)d %(levelname)s %(message)s'
+ log_formatter = logging.Formatter(log_format)
+
+ utils.makedirs(constants.TASK_LOG_DIR)
+ log_path = os.path.join(constants.TASK_LOG_DIR, '{}.log'.format(self.task_id))
+ log_handler = logging.FileHandler(log_path)
+ log_handler.setFormatter(log_formatter)
+ log_handler.setLevel(logging.DEBUG)
+
+ logging.root.addHandler(log_handler)
+
+ def _init_output_config(self, output_config):
+ output_config.setdefault('DEFAULT', {})
+ output_config.setdefault('dispatcher_http', {})
+ output_config.setdefault('dispatcher_file', {})
+ output_config.setdefault('dispatcher_influxdb', {})
+ output_config.setdefault('nsb', {})
+
+ def _set_output_config(self, output_config, file_path):
+ try:
+ out_type = os.environ['DISPATCHER']
+ except KeyError:
+ output_config['DEFAULT'].setdefault('dispatcher', 'file')
+ else:
+ output_config['DEFAULT']['dispatcher'] = out_type
+
+ output_config['dispatcher_file']['file_path'] = file_path
+
+ try:
+ target = os.environ['TARGET']
+ except KeyError:
+ pass
+ else:
+ k = 'dispatcher_{}'.format(output_config['DEFAULT']['dispatcher'])
+ output_config[k]['target'] = target
+
+ def _get_format_result(self, testcases):
+ criteria = self._get_task_criteria(testcases)
+
+ info = {
+ 'deploy_step': os.environ.get('DEPLOY_STEP', 'unknown'),
+ 'installer': os.environ.get('INSTALLER_TYPE', 'unknown'),
+ 'pod_name': os.environ.get('NODE_NAME', 'unknown'),
+ 'version': os.environ.get('VNFTEST_BRANCH', 'unknown')
+ }
+
+ result = {
+ 'status': 1,
+ 'result': {
+ 'criteria': criteria,
+ 'task_id': self.task_id,
+ 'info': info,
+ 'testcases': testcases
+ }
+ }
+
+ return result
+
+ def _get_task_criteria(self, testcases):
+ criteria = any(t.get('criteria') != 'PASS' for t in testcases.values())
+ if criteria:
+ return 'FAIL'
+ else:
+ return 'PASS'
+
+ def _do_output(self, output_config, result):
+ dispatchers = DispatcherBase.get(output_config)
+
+ for dispatcher in dispatchers:
+ dispatcher.flush_result_data(result)
+
+ def _run(self, steps, run_in_parallel, output_file):
+ """Deploys context and calls runners"""
+ self.context.deploy()
+ background_runners = []
+
+ result = []
+ # Start all background steps
+ for step in filter(_is_background_step, steps):
+ step["runner"] = dict(type="Duration", duration=1000000000)
+ runner = self.run_one_step(step, output_file)
+ background_runners.append(runner)
+
+ runners = []
+ if run_in_parallel:
+ for step in steps:
+ if not _is_background_step(step):
+ runner = self.run_one_step(step, output_file)
+ runners.append(runner)
+
+ # Wait for runners to finish
+ for runner in runners:
+ status = runner_join(runner, background_runners, self.outputs, result)
+ if status != 0:
+ raise RuntimeError(
+ "{0} runner status {1}".format(runner.__execution_type__, status))
+ LOG.info("Runner ended, output in %s", output_file)
+ else:
+ # run serially
+ for step in steps:
+ if not _is_background_step(step):
+ runner = self.run_one_step(step, output_file)
+ status = runner_join(runner, background_runners, self.outputs, result)
+ if status != 0:
+ LOG.error('Step NO.%s: "%s" ERROR!',
+ steps.index(step) + 1,
+ step.get('type'))
+ raise RuntimeError(
+ "{0} runner status {1}".format(runner.__execution_type__, status))
+ LOG.info("Runner ended, output in %s", output_file)
+
+ # Abort background runners
+ for runner in background_runners:
+ runner.abort()
+
+ # Wait for background runners to finish
+ for runner in background_runners:
+ status = runner.join(self.outputs, result)
+ if status is None:
+ # Nuke if it did not stop nicely
+ base_runner.Runner.terminate(runner)
+ runner.join(self.outputs, result)
+ base_runner.Runner.release(runner)
+
+ print("Background task ended")
+ return result
+
+ def atexit_handler(self):
+ """handler for process termination"""
+ base_runner.Runner.terminate_all()
+
+ if self.context:
+ LOG.info("Undeploying context")
+ self.context.undeploy()
+
+ def _parse_options(self, op):
+ if isinstance(op, dict):
+ return {k: self._parse_options(v) for k, v in op.items()}
+ elif isinstance(op, list):
+ return [self._parse_options(v) for v in op]
+ elif isinstance(op, str):
+ return self.outputs.get(op[1:]) if op.startswith('$') else op
+ else:
+ return op
+
+ def run_one_step(self, step_cfg, output_file):
+ """run one step using context"""
+ # default runner is Duration
+ if 'runner' not in step_cfg:
+ step_cfg['runner'] = dict(type="Duration", duration=1000000000)
+ runner_cfg = step_cfg['runner']
+ runner_cfg['output_filename'] = output_file
+ options = step_cfg.get('options', {})
+ step_cfg['options'] = self._parse_options(options)
+ runner = base_runner.Runner.get(runner_cfg)
+
+ LOG.info("Starting runner of type '%s'", runner_cfg["type"])
+ runner.run(step_cfg, self.context)
+ return runner
+
+
+class TaskParser(object): # pragma: no cover
+ """Parser for task config files in yaml format"""
+
+ def __init__(self, path):
+ self.path = path
+
+ def _meet_constraint(self, task, cur_pod, cur_installer):
+ if "constraint" in task:
+ constraint = task.get('constraint', None)
+ if constraint is not None:
+ tc_fit_pod = constraint.get('pod', None)
+ tc_fit_installer = constraint.get('installer', None)
+ LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
+ cur_pod, cur_installer, constraint)
+ if (cur_pod is None) or (tc_fit_pod and cur_pod not in tc_fit_pod):
+ return False
+ if (cur_installer is None) or (tc_fit_installer and cur_installer
+ not in tc_fit_installer):
+ return False
+ return True
+
+ def _get_task_para(self, task, cur_pod):
+ task_args = task.get('task_args', None)
+ if task_args is not None:
+ task_args = task_args.get(cur_pod, task_args.get('default'))
+ task_args_fnames = task.get('task_args_fnames', None)
+ if task_args_fnames is not None:
+ task_args_fnames = task_args_fnames.get(cur_pod, None)
+ return task_args, task_args_fnames
+
+ def parse_suite(self):
+ """parse the suite file and return a list of task config file paths
+ and lists of optional parameters if present"""
+ LOG.info("\nParsing suite file:%s", self.path)
+
+ try:
+ with open(self.path) as stream:
+ cfg = yaml_load(stream)
+ except IOError as ioerror:
+ sys.exit(ioerror)
+
+ self._check_schema(cfg["schema"], "suite")
+ LOG.info("\nStarting step:%s", cfg["name"])
+
+ test_cases_dir = cfg.get("test_cases_dir", test_cases_dir_default)
+ test_cases_dir = os.path.join(constants.VNFTEST_ROOT_PATH,
+ test_cases_dir)
+ if test_cases_dir[-1] != os.sep:
+ test_cases_dir += os.sep
+
+ cur_pod = os.environ.get('NODE_NAME', None)
+ cur_installer = os.environ.get('INSTALLER_TYPE', None)
+
+ valid_task_files = []
+ valid_task_args = []
+ valid_task_args_fnames = []
+
+ for task in cfg["test_cases"]:
+ # 1.check file_name
+ if "file_name" in task:
+ task_fname = task.get('file_name', None)
+ if task_fname is None:
+ continue
+ else:
+ continue
+ # 2.check constraint
+ if self._meet_constraint(task, cur_pod, cur_installer):
+ valid_task_files.append(test_cases_dir + task_fname)
+ else:
+ continue
+ # 3.fetch task parameters
+ task_args, task_args_fnames = self._get_task_para(task, cur_pod)
+ valid_task_args.append(task_args)
+ valid_task_args_fnames.append(task_args_fnames)
+
+ return valid_task_files, valid_task_args, valid_task_args_fnames
+
+ def parse_task(self, task_id, task_args=None, task_args_file=None):
+ """parses the task file and return an context and step instances"""
+ LOG.info("Parsing task config: %s", self.path)
+
+ try:
+ kw = {}
+ if task_args_file:
+ with open(task_args_file) as f:
+ kw.update(parse_task_args("task_args_file", f.read()))
+ kw.update(parse_task_args("task_args", task_args))
+ except TypeError:
+ raise TypeError()
+
+ try:
+ with open(self.path) as f:
+ try:
+ input_task = f.read()
+ rendered_task = TaskTemplate.render(input_task, **kw)
+ except Exception as e:
+ LOG.exception('Failed to render template:\n%s\n', input_task)
+ raise e
+ LOG.debug("Input task is:\n%s\n", rendered_task)
+
+ cfg = yaml_load(rendered_task)
+ except IOError as ioerror:
+ sys.exit(ioerror)
+
+ self._check_schema(cfg["schema"], "task")
+ meet_precondition = self._check_precondition(cfg)
+
+ if "context" in cfg:
+ context_cfg = cfg["context"]
+ else:
+ context_cfg = {"type": "Dummy"}
+
+ name_suffix = '-{}'.format(task_id[:8])
+ try:
+ context_cfg['name'] = '{}{}'.format(context_cfg['name'],
+ name_suffix)
+ except KeyError:
+ pass
+ # default to CSAR context
+ context_type = context_cfg.get("type", "CSAR")
+ context = Context.get(context_type)
+ context.init(context_cfg)
+
+ run_in_parallel = cfg.get("run_in_parallel", False)
+
+ # add tc and task id for influxdb extended tags
+ for step in cfg["steps"]:
+ task_name = os.path.splitext(os.path.basename(self.path))[0]
+ step["tc"] = task_name
+ step["task_id"] = task_id
+ # embed task path into step so we can load other files
+ # relative to task path
+ step["task_path"] = os.path.dirname(self.path)
+
+ # TODO we need something better here, a class that represent the file
+ return cfg["steps"], run_in_parallel, meet_precondition, context
+
+ def _check_schema(self, cfg_schema, schema_type):
+ """Check if config file is using the correct schema type"""
+
+ if cfg_schema != "vnftest:" + schema_type + ":0.1":
+ sys.exit("error: file %s has unknown schema %s" % (self.path,
+ cfg_schema))
+
+ def _check_precondition(self, cfg):
+ """Check if the environment meet the precondition"""
+
+ if "precondition" in cfg:
+ precondition = cfg["precondition"]
+ installer_type = precondition.get("installer_type", None)
+ deploy_steps = precondition.get("deploy_steps", None)
+ tc_fit_pods = precondition.get("pod_name", None)
+ installer_type_env = os.environ.get('INSTALL_TYPE', None)
+ deploy_step_env = os.environ.get('DEPLOY_STEP', None)
+ pod_name_env = os.environ.get('NODE_NAME', None)
+
+ LOG.info("installer_type: %s, installer_type_env: %s",
+ installer_type, installer_type_env)
+ LOG.info("deploy_steps: %s, deploy_step_env: %s",
+ deploy_steps, deploy_step_env)
+ LOG.info("tc_fit_pods: %s, pod_name_env: %s",
+ tc_fit_pods, pod_name_env)
+ if installer_type and installer_type_env:
+ if installer_type_env not in installer_type:
+ return False
+ if deploy_steps and deploy_step_env:
+ deploy_steps_list = deploy_steps.split(',')
+ for deploy_step in deploy_steps_list:
+ if deploy_step_env.startswith(deploy_step):
+ return True
+ return False
+ if tc_fit_pods and pod_name_env:
+ if pod_name_env not in tc_fit_pods:
+ return False
+ return True
+
+
+def is_ip_addr(addr):
+ """check if string addr is an IP address"""
+ try:
+ addr = addr.get('public_ip_attr', addr.get('private_ip_attr'))
+ except AttributeError:
+ pass
+
+ try:
+ ipaddress.ip_address(addr.encode('utf-8'))
+ except ValueError:
+ return False
+ else:
+ return True
+
+
+def _is_background_step(step):
+ if "run_in_background" in step:
+ return step["run_in_background"]
+ else:
+ return False
+
+
+def parse_nodes_with_context(step_cfg):
+ """parse the 'nodes' fields in step """
+ # ensure consistency in node instantiation order
+ return OrderedDict((nodename, Context.get_server(step_cfg["nodes"][nodename]))
+ for nodename in sorted(step_cfg["nodes"]))
+
+
+def get_networks_from_nodes(nodes):
+ """parse the 'nodes' fields in step """
+ networks = {}
+ for node in nodes.values():
+ if not node:
+ continue
+ interfaces = node.get('interfaces', {})
+ for interface in interfaces.values():
+ # vld_id is network_name
+ network_name = interface.get('network_name')
+ if not network_name:
+ continue
+ network = Context.get_network(network_name)
+ if network:
+ networks[network['name']] = network
+ return networks
+
+
+def runner_join(runner, background_runners, outputs, result):
+ """join (wait for) a runner, exit process at runner failure
+ :param background_runners:
+ :type background_runners:
+ :param outputs:
+ :type outputs: dict
+ :param result:
+ :type result: list
+ """
+ while runner.poll() is None:
+ outputs.update(runner.get_output())
+ result.extend(runner.get_result())
+ # drain all the background runner queues
+ for background in background_runners:
+ outputs.update(background.get_output())
+ result.extend(background.get_result())
+ status = runner.join(outputs, result)
+ base_runner.Runner.release(runner)
+ return status
+
+
+def print_invalid_header(source_name, args):
+ print("Invalid %(source)s passed:\n\n %(args)s\n"
+ % {"source": source_name, "args": args})
+
+
+def parse_task_args(src_name, args):
+ if isinstance(args, collections.Mapping):
+ return args
+
+ try:
+ kw = args and yaml_load(args)
+ kw = {} if kw is None else kw
+ except yaml.parser.ParserError as e:
+ print_invalid_header(src_name, args)
+ print("%(source)s has to be YAML. Details:\n\n%(err)s\n"
+ % {"source": src_name, "err": e})
+ raise TypeError()
+
+ if not isinstance(kw, dict):
+ print_invalid_header(src_name, args)
+ print("%(src)s had to be dict, actually %(src_type)s\n"
+ % {"src": src_name, "src_type": type(kw)})
+ raise TypeError()
+ return kw \ No newline at end of file
diff --git a/vnftest/onap/core/testcase.py b/vnftest/onap/core/testcase.py
new file mode 100644
index 0000000..ef3e535
--- /dev/null
+++ b/vnftest/onap/core/testcase.py
@@ -0,0 +1,113 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/testcase.py
+
+""" Handler for vnftest command 'testcase' """
+from __future__ import absolute_import
+from __future__ import print_function
+
+import os
+import logging
+
+from vnftest.common.task_template import TaskTemplate
+from vnftest.common import constants as consts
+from vnftest.common.yaml_loader import yaml_load
+
+LOG = logging.getLogger(__name__)
+
+
+class Testcase(object):
+ """Testcase commands.
+
+ Set of commands to discover and display test cases.
+ """
+
+ def list_all(self, args):
+ """List existing test cases"""
+
+ testcase_files = self._get_testcase_file_list()
+ testcase_list = [self._get_record(f) for f in testcase_files]
+
+ return testcase_list
+
+ def _get_testcase_file_list(self):
+ try:
+ testcase_files = sorted(os.listdir(consts.TESTCASE_DIR))
+ except OSError:
+ LOG.exception('Failed to list dir:\n%s\n', consts.TESTCASE_DIR)
+ raise
+
+ return testcase_files
+
+ def _get_record(self, testcase_file):
+
+ file_path = os.path.join(consts.TESTCASE_DIR, testcase_file)
+ with open(file_path) as f:
+ try:
+ testcase_info = f.read()
+ except IOError:
+ LOG.exception('Failed to load test case:\n%s\n', testcase_file)
+ raise
+
+ description, installer, deploy_steps = self._parse_testcase(
+ testcase_info)
+
+ record = {
+ 'Name': testcase_file.split(".")[0],
+ 'Description': description,
+ 'installer': installer,
+ 'deploy_steps': deploy_steps
+ }
+
+ return record
+
+ def _parse_testcase(self, testcase_info):
+
+ rendered_testcase = TaskTemplate.render(testcase_info)
+ testcase_cfg = yaml_load(rendered_testcase)
+
+ test_precondition = testcase_cfg.get('precondition', {})
+ installer_type = test_precondition.get('installer_type', 'all')
+ deploy_steps = test_precondition.get('deploy_steps', 'all')
+
+ description = self._get_description(testcase_cfg)
+
+ return description, installer_type, deploy_steps
+
+ def _get_description(self, testcase_cfg):
+ try:
+ description_list = testcase_cfg['description'].split(';')
+ except KeyError:
+ return ''
+ else:
+ try:
+ return description_list[1].replace(os.linesep, '').strip()
+ except IndexError:
+ return description_list[0].replace(os.linesep, '').strip()
+
+ def show(self, args):
+ """Show details of a specific test case"""
+ testcase_name = args.casename[0]
+ testcase_path = os.path.join(consts.TESTCASE_DIR,
+ testcase_name + ".yaml")
+ with open(testcase_path) as f:
+ try:
+ testcase_info = f.read()
+ except IOError:
+ LOG.exception('Failed to load test case:\n%s\n', testcase_path)
+ raise
+
+ print(testcase_info)
+ return True
diff --git a/vnftest/onap/core/testsuite.py b/vnftest/onap/core/testsuite.py
new file mode 100644
index 0000000..986982a
--- /dev/null
+++ b/vnftest/onap/core/testsuite.py
@@ -0,0 +1,49 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# yardstick/benchmark/core/testsuite.py
+
+""" Handler for vnftest command 'testcase' """
+from __future__ import absolute_import
+from __future__ import print_function
+
+import os
+import logging
+
+from vnftest.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+
+
+class Testsuite(object):
+ """Testcase commands.
+
+ Set of commands to discover and display test cases.
+ """
+
+ def list_all(self, args):
+ """List existing test cases"""
+
+ testsuite_list = self._get_testsuite_file_list()
+
+ return testsuite_list
+
+ def _get_testsuite_file_list(self):
+ try:
+ testsuite_files = sorted(os.listdir(consts.TESTSUITE_DIR))
+ except OSError:
+ LOG.exception('Failed to list dir:\n%s\n', consts.TESTSUITE_DIR)
+ raise
+
+ return testsuite_files
diff --git a/vnftest/onap/runners/__init__.py b/vnftest/onap/runners/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/onap/runners/__init__.py
diff --git a/vnftest/onap/runners/base.py b/vnftest/onap/runners/base.py
new file mode 100755
index 0000000..5170bbe
--- /dev/null
+++ b/vnftest/onap/runners/base.py
@@ -0,0 +1,250 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/runners/base.py
+
+from __future__ import absolute_import
+import logging
+import multiprocessing
+import subprocess
+import time
+import traceback
+import importlib
+from Queue import Empty
+
+import vnftest.common.utils as utils
+from vnftest.onap.steps import base as base_step
+from vnftest.onap.steps.onap_api_call import OnapApiCall
+
+log = logging.getLogger(__name__)
+
+
+def _execute_shell_command(command):
+ """execute shell script with error handling"""
+ exitcode = 0
+ try:
+ output = subprocess.check_output(command, shell=True)
+ except Exception:
+ exitcode = -1
+ output = traceback.format_exc()
+ log.error("exec command '%s' error:\n ", command)
+ log.error(traceback.format_exc())
+
+ return exitcode, output
+
+
+def _single_action(seconds, command, queue):
+ """entrypoint for the single action process"""
+ log.debug("single action, fires after %d seconds (from now)", seconds)
+ time.sleep(seconds)
+ log.debug("single action: executing command: '%s'", command)
+ ret_code, data = _execute_shell_command(command)
+ if ret_code < 0:
+ log.error("single action error! command:%s", command)
+ queue.put({'single-action-data': data})
+ return
+ log.debug("single action data: \n%s", data)
+ queue.put({'single-action-data': data})
+
+
+def _periodic_action(interval, command, queue):
+ """entrypoint for the periodic action process"""
+ log.debug("periodic action, fires every: %d seconds", interval)
+ time_spent = 0
+ while True:
+ time.sleep(interval)
+ time_spent += interval
+ log.debug("periodic action, executing command: '%s'", command)
+ ret_code, data = _execute_shell_command(command)
+ if ret_code < 0:
+ log.error("periodic action error! command:%s", command)
+ queue.put({'periodic-action-data': data})
+ break
+ log.debug("periodic action data: \n%s", data)
+ queue.put({'periodic-action-data': data})
+
+
+class Runner(object):
+ runners = []
+
+ @staticmethod
+ def get_cls(runner_type):
+ """return class of specified type"""
+ for runner in utils.itersubclasses(Runner):
+ if runner_type == runner.__execution_type__:
+ return runner
+ raise RuntimeError("No such runner_type %s" % runner_type)
+
+ @staticmethod
+ def get_types():
+ """return a list of known runner type (class) names"""
+ types = []
+ for runner in utils.itersubclasses(Runner):
+ types.append(runner)
+ return types
+
+ @staticmethod
+ def get(runner_cfg):
+ """Returns instance of a step runner for execution type.
+ """
+ return Runner.get_cls(runner_cfg["type"])(runner_cfg)
+
+ @staticmethod
+ def release(runner):
+ """Release the runner"""
+ if runner in Runner.runners:
+ Runner.runners.remove(runner)
+
+ @staticmethod
+ def terminate(runner):
+ """Terminate the runner"""
+ if runner.process and runner.process.is_alive():
+ runner.process.terminate()
+
+ @staticmethod
+ def terminate_all():
+ """Terminate all runners (subprocesses)"""
+ log.debug("Terminating all runners", exc_info=True)
+
+ # release dumper process as some errors before any runner is created
+ if not Runner.runners:
+ return
+
+ for runner in Runner.runners:
+ log.debug("Terminating runner: %s", runner)
+ if runner.process:
+ runner.process.terminate()
+ runner.process.join()
+ if runner.periodic_action_process:
+ log.debug("Terminating periodic action process")
+ runner.periodic_action_process.terminate()
+ runner.periodic_action_process = None
+ Runner.release(runner)
+
+ def __init__(self, config):
+ self.config = config
+ self.periodic_action_process = None
+ self.output_queue = multiprocessing.Queue()
+ self.result_queue = multiprocessing.Queue()
+ self.process = None
+ self.aborted = multiprocessing.Event()
+ Runner.runners.append(self)
+
+ def run_post_stop_action(self):
+ """run a potentially configured post-stop action"""
+ if "post-stop-action" in self.config:
+ command = self.config["post-stop-action"]["command"]
+ log.debug("post stop action: command: '%s'", command)
+ ret_code, data = _execute_shell_command(command)
+ if ret_code < 0:
+ log.error("post action error! command:%s", command)
+ self.result_queue.put({'post-stop-action-data': data})
+ return
+ log.debug("post-stop data: \n%s", data)
+ self.result_queue.put({'post-stop-action-data': data})
+
+ def _run_step(self, cls, method_name, step_cfg, context_cfg):
+ raise NotImplementedError
+
+ def run(self, step_cfg, context_cfg):
+ step_type = step_cfg["type"]
+ class_name = base_step.Step.get(step_type)
+ path_split = class_name.split(".")
+ module_path = ".".join(path_split[:-1])
+ module = importlib.import_module(module_path)
+ cls = getattr(module, path_split[-1])
+
+ self.config['object'] = class_name
+ self.aborted.clear()
+
+ # run a potentially configured pre-start action
+ if "pre-start-action" in self.config:
+ command = self.config["pre-start-action"]["command"]
+ log.debug("pre start action: command: '%s'", command)
+ ret_code, data = _execute_shell_command(command)
+ if ret_code < 0:
+ log.error("pre-start action error! command:%s", command)
+ self.result_queue.put({'pre-start-action-data': data})
+ return
+ log.debug("pre-start data: \n%s", data)
+ self.result_queue.put({'pre-start-action-data': data})
+
+ if "single-shot-action" in self.config:
+ single_action_process = multiprocessing.Process(
+ target=_single_action,
+ name="single-shot-action",
+ args=(self.config["single-shot-action"]["after"],
+ self.config["single-shot-action"]["command"],
+ self.result_queue))
+ single_action_process.start()
+
+ if "periodic-action" in self.config:
+ self.periodic_action_process = multiprocessing.Process(
+ target=_periodic_action,
+ name="periodic-action",
+ args=(self.config["periodic-action"]["interval"],
+ self.config["periodic-action"]["command"],
+ self.result_queue))
+ self.periodic_action_process.start()
+
+ self._run_step(cls, "run", step_cfg, context_cfg)
+
+ def abort(self):
+ """Abort the execution of a step"""
+ self.aborted.set()
+
+ QUEUE_JOIN_INTERVAL = 5
+
+ def poll(self, timeout=QUEUE_JOIN_INTERVAL):
+ self.process.join(timeout)
+ return self.process.exitcode
+
+ def join(self, outputs, result, interval=QUEUE_JOIN_INTERVAL):
+ while self.process.exitcode is None:
+ # drain the queue while we are running otherwise we won't terminate
+ outputs.update(self.get_output())
+ result.extend(self.get_result())
+ self.process.join(interval)
+ # drain after the process has exited
+ outputs.update(self.get_output())
+ result.extend(self.get_result())
+
+ self.process.terminate()
+ if self.periodic_action_process:
+ self.periodic_action_process.join(1)
+ self.periodic_action_process.terminate()
+ self.periodic_action_process = None
+
+ self.run_post_stop_action()
+ return self.process.exitcode
+
+ def get_output(self):
+ result = {}
+ while not self.output_queue.empty():
+ log.debug("output_queue size %s", self.output_queue.qsize())
+ try:
+ result.update(self.output_queue.get(True, 1))
+ except Empty:
+ pass
+ return result
+
+ def get_result(self):
+ result = []
+ while not self.result_queue.empty():
+ log.debug("result_queue size %s", self.result_queue.qsize())
+ try:
+ result.append(self.result_queue.get(True, 1))
+ except Empty:
+ pass
+ return result
diff --git a/vnftest/onap/runners/duration.py b/vnftest/onap/runners/duration.py
new file mode 100644
index 0000000..7e539e5
--- /dev/null
+++ b/vnftest/onap/runners/duration.py
@@ -0,0 +1,145 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/runners/duration.py
+
+"""A runner that runs a specific time before it returns
+"""
+
+from __future__ import absolute_import
+import os
+import multiprocessing
+import logging
+import traceback
+import time
+
+from vnftest.onap.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+QUEUE_PUT_TIMEOUT = 10
+
+
+def _worker_process(queue, cls, method_name, step_cfg,
+ context_cfg, aborted, output_queue):
+
+ sequence = 1
+
+ runner_cfg = step_cfg['runner']
+
+ interval = runner_cfg.get("interval", 1)
+ duration = runner_cfg.get("duration", 60)
+ LOG.info("Worker START, duration is %ds", duration)
+ LOG.debug("class is %s", cls)
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ step = cls(step_cfg, context_cfg)
+ step.setup()
+ method = getattr(step, method_name)
+
+ sla_action = None
+ if "sla" in step_cfg:
+ sla_action = step_cfg["sla"].get("action", "assert")
+
+ start = time.time()
+ timeout = start + duration
+ while True:
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START",
+ {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ try:
+ result = method(data)
+ except AssertionError as assertion:
+ # SLA validation failed in scenario, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s", assertion.args)
+ errors = assertion.args
+ # catch all exceptions because with multiprocessing we can have un-picklable exception
+ # problems https://bugs.python.org/issue9400
+ except Exception:
+ errors = traceback.format_exc()
+ LOG.exception("")
+ else:
+ if result:
+ # add timeout for put so we don't block test
+ # if we do timeout we don't care about dropping individual KPIs
+ output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
+
+ time.sleep(interval)
+
+ step_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ queue.put(step_output, True, QUEUE_PUT_TIMEOUT)
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END",
+ {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+ sequence += 1
+
+ if (errors and sla_action is None) or time.time() > timeout or aborted.is_set():
+ LOG.info("Worker END")
+ break
+
+ try:
+ step.teardown()
+ except Exception:
+ # catch any exception in teardown and convert to simple exception
+ # never pass exceptions back to multiprocessing, because some exceptions can
+ # be unpicklable
+ # https://bugs.python.org/issue9400
+ LOG.exception("")
+ raise SystemExit(1)
+
+ LOG.debug("queue.qsize() = %s", queue.qsize())
+ LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
+
+
+class DurationRunner(base.Runner):
+ """Run a scenario for a certain amount of time
+
+If the scenario ends before the time has elapsed, it will be started again.
+
+ Parameters
+ duration - amount of time the scenario will be run for
+ type: int
+ unit: seconds
+ default: 1 sec
+ interval - time to wait between each scenario invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ """
+ __execution_type__ = 'Duration'
+
+ def _run_step(self, cls, method, step_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, step_cfg.get("type"), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=_worker_process,
+ args=(self.result_queue, cls, method, step_cfg,
+ context_cfg, self.aborted, self.output_queue))
+ self.process.start()
diff --git a/vnftest/onap/runners/dynamictp.py b/vnftest/onap/runners/dynamictp.py
new file mode 100755
index 0000000..5ea0910
--- /dev/null
+++ b/vnftest/onap/runners/dynamictp.py
@@ -0,0 +1,179 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/step/runners/dynamictp.py
+
+"""A runner that searches for the max throughput with binary search
+"""
+
+import logging
+import multiprocessing
+import time
+import traceback
+
+import os
+
+from vnftest.onap.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+def _worker_process(queue, cls, method_name, step_cfg,
+ context_cfg, aborted): # pragma: no cover
+
+ runner_cfg = step_cfg['runner']
+ iterations = runner_cfg.get("iterations", 1)
+ interval = runner_cfg.get("interval", 1)
+ run_step = runner_cfg.get("run_step", "setup,run,teardown")
+ delta = runner_cfg.get("delta", 1000)
+ options_cfg = step_cfg['options']
+ initial_rate = options_cfg.get("pps", 1000000)
+ LOG.info("worker START, class %s", cls)
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ step = cls(step_cfg, context_cfg)
+ if "setup" in run_step:
+ step.setup()
+
+ method = getattr(step, method_name)
+
+ queue.put({'runner_id': runner_cfg['runner_id'],
+ 'step_cfg': step_cfg,
+ 'context_cfg': context_cfg})
+
+ if "run" in run_step:
+ iterator = 0
+ search_max = initial_rate
+ search_min = 0
+ while iterator < iterations:
+ search_min = int(search_min / 2)
+ step_cfg['options']['pps'] = search_max
+ search_max_found = False
+ max_throuput_found = False
+ sequence = 0
+
+ last_min_data = {'packets_per_second': 0}
+
+ while True:
+ sequence += 1
+
+ data = {}
+ errors = ""
+ too_high = False
+
+ LOG.debug("sequence: %s search_min: %s search_max: %s",
+ sequence, search_min, search_max)
+
+ try:
+ method(data)
+ except AssertionError as assertion:
+ LOG.warning("SLA validation failed: %s" % assertion.args)
+ too_high = True
+ except Exception as e:
+ errors = traceback.format_exc()
+ LOG.exception(e)
+
+ actual_pps = data['packets_per_second']
+
+ if too_high:
+ search_max = actual_pps
+
+ if not search_max_found:
+ search_max_found = True
+ else:
+ last_min_data = data
+ search_min = actual_pps
+
+ # Check if the actual rate is well below the asked rate
+ if step_cfg['options']['pps'] > actual_pps * 1.5:
+ search_max = actual_pps
+ LOG.debug("Sender reached max tput: %s", search_max)
+ elif not search_max_found:
+ search_max = int(actual_pps * 1.5)
+
+ if ((search_max - search_min) < delta) or \
+ (search_max <= search_min) or (10 <= sequence):
+ if last_min_data['packets_per_second'] > 0:
+ data = last_min_data
+
+ step_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ record = {
+ 'runner_id': runner_cfg['runner_id'],
+ 'step': step_output
+ }
+
+ queue.put(record)
+ max_throuput_found = True
+
+ if errors or aborted.is_set() or max_throuput_found:
+ LOG.info("worker END")
+ break
+
+ if not search_max_found:
+ step_cfg['options']['pps'] = search_max
+ else:
+ step_cfg['options']['pps'] = \
+ (search_max - search_min) / 2 + search_min
+
+ time.sleep(interval)
+
+ iterator += 1
+ LOG.debug("iterator: %s iterations: %s", iterator, iterations)
+
+ if "teardown" in run_step:
+ try:
+ step.teardown()
+ except Exception:
+ # catch any exception in teardown and convert to simple exception
+ # never pass exceptions back to multiprocessing, because some exceptions can
+ # be unpicklable
+ # https://bugs.python.org/issue9400
+ LOG.exception("")
+ raise SystemExit(1)
+
+ LOG.debug("queue.qsize() = %s", queue.qsize())
+
+
+class IterationRunner(base.Runner):
+ """Run a step to find the max throughput
+
+If the step ends before the time has elapsed, it will be started again.
+
+ Parameters
+ interval - time to wait between each step invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ delta - stop condition for the search.
+ type: int
+ unit: pps
+ default: 1000 pps
+ """
+ __execution_type__ = 'Dynamictp'
+
+ def _run_step(self, cls, method, step_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, step_cfg.get("type"), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=_worker_process,
+ args=(self.result_queue, cls, method, step_cfg,
+ context_cfg, self.aborted))
+ self.process.start()
diff --git a/vnftest/onap/runners/iteration.py b/vnftest/onap/runners/iteration.py
new file mode 100644
index 0000000..9bac92e
--- /dev/null
+++ b/vnftest/onap/runners/iteration.py
@@ -0,0 +1,160 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/runners/iteration.py
+
+"""A runner that runs a configurable number of times before it returns
+"""
+
+from __future__ import absolute_import
+
+import logging
+import multiprocessing
+import time
+import traceback
+
+import os
+
+from vnftest.onap.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+QUEUE_PUT_TIMEOUT = 10
+
+
+def _worker_process(queue, cls, method_name, step_cfg,
+ context_cfg, aborted, output_queue):
+
+ sequence = 1
+
+ runner_cfg = step_cfg['runner']
+
+ interval = runner_cfg.get("interval", 1)
+ iterations = runner_cfg.get("iterations", 1)
+ run_step = runner_cfg.get("run_step", "setup,run,teardown")
+
+ delta = runner_cfg.get("delta", 2)
+ LOG.info("worker START, iterations %d times, class %s", iterations, cls)
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ step = cls(step_cfg, context_cfg)
+ if "setup" in run_step:
+ step.setup()
+
+ method = getattr(step, method_name)
+
+ sla_action = None
+ if "sla" in step_cfg:
+ sla_action = step_cfg["sla"].get("action", "assert")
+ if "run" in run_step:
+ while True:
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START",
+ {"runner": runner_cfg["runner_id"],
+ "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ try:
+ result = method(data)
+ except AssertionError as assertion:
+ # SLA validation failed in step, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s", assertion.args)
+ errors = assertion.args
+ elif sla_action == "rate-control":
+ try:
+ step_cfg['options']['rate']
+ except KeyError:
+ step_cfg.setdefault('options', {})
+ step_cfg['options']['rate'] = 100
+
+ step_cfg['options']['rate'] -= delta
+ sequence = 1
+ continue
+ except Exception:
+ errors = traceback.format_exc()
+ LOG.exception("")
+ else:
+ if result:
+ # add timeout for put so we don't block test
+ # if we do timeout we don't care about dropping individual KPIs
+ output_queue.put(result, True, QUEUE_PUT_TIMEOUT)
+
+ time.sleep(interval)
+
+ step_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ queue.put(step_output, True, QUEUE_PUT_TIMEOUT)
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END",
+ {"runner": runner_cfg["runner_id"],
+ "sequence": sequence})
+
+ sequence += 1
+
+ if (errors and sla_action is None) or \
+ (sequence > iterations or aborted.is_set()):
+ LOG.info("worker END")
+ break
+ if "teardown" in run_step:
+ try:
+ step.teardown()
+ except Exception:
+ # catch any exception in teardown and convert to simple exception
+ # never pass exceptions back to multiprocessing, because some exceptions can
+ # be unpicklable
+ # https://bugs.python.org/issue9400
+ LOG.exception("")
+ raise SystemExit(1)
+
+ LOG.debug("queue.qsize() = %s", queue.qsize())
+ LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
+
+
+class IterationRunner(base.Runner):
+ """Run a step for a configurable number of times
+
+If the step ends before the time has elapsed, it will be started again.
+
+ Parameters
+ iterations - amount of times the step will be run for
+ type: int
+ unit: na
+ default: 1
+ interval - time to wait between each step invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ """
+ __execution_type__ = 'Iteration'
+
+ def _run_step(self, cls, method, step_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, step_cfg.get("type"), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=_worker_process,
+ args=(self.result_queue, cls, method, step_cfg,
+ context_cfg, self.aborted, self.output_queue))
+ self.process.start()
diff --git a/vnftest/onap/runners/search.py b/vnftest/onap/runners/search.py
new file mode 100644
index 0000000..d5bd417
--- /dev/null
+++ b/vnftest/onap/runners/search.py
@@ -0,0 +1,180 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/runners/search.py
+
+"""A runner that runs a specific time before it returns
+"""
+
+from __future__ import absolute_import
+
+import logging
+import multiprocessing
+import time
+import traceback
+from contextlib import contextmanager
+from itertools import takewhile
+
+import os
+from collections import Mapping
+from six.moves import zip
+
+from vnftest.onap.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+class SearchRunnerHelper(object):
+
+ def __init__(self, cls, method_name, step_cfg, context_cfg, aborted):
+ super(SearchRunnerHelper, self).__init__()
+ self.cls = cls
+ self.method_name = method_name
+ self.step_cfg = step_cfg
+ self.context_cfg = context_cfg
+ self.aborted = aborted
+ self.runner_cfg = step_cfg['runner']
+ self.run_step = self.runner_cfg.get("run_step", "setup,run,teardown")
+ self.timeout = self.runner_cfg.get("timeout", 60)
+ self.interval = self.runner_cfg.get("interval", 1)
+ self.step = None
+ self.method = None
+
+ def __call__(self, *args, **kwargs):
+ if self.method is None:
+ raise RuntimeError
+ return self.method(*args, **kwargs)
+
+ @contextmanager
+ def get_step_instance(self):
+ self.step = self.cls(self.step_cfg, self.context_cfg)
+
+ if 'setup' in self.run_step:
+ self.step.setup()
+
+ self.method = getattr(self.step, self.method_name)
+ LOG.info("worker START, timeout %d sec, class %s", self.timeout, self.cls)
+ try:
+ yield self
+ finally:
+ if 'teardown' in self.run_step:
+ self.step.teardown()
+
+ def is_not_done(self):
+ if 'run' not in self.run_step:
+ raise StopIteration
+
+ max_time = time.time() + self.timeout
+
+ abort_iter = iter(self.aborted.is_set, True)
+ time_iter = takewhile(lambda t_now: t_now <= max_time, iter(time.time, -1))
+
+ for seq, _ in enumerate(zip(abort_iter, time_iter), 1):
+ yield seq
+ time.sleep(self.interval)
+
+
+class SearchRunner(base.Runner):
+ """Run a step for a certain amount of time
+
+If the step ends before the time has elapsed, it will be started again.
+
+ Parameters
+ timeout - amount of time the step will be run for
+ type: int
+ unit: seconds
+ default: 1 sec
+ interval - time to wait between each step invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ """
+ __execution_type__ = 'Search'
+
+ def __init__(self, config):
+ super(SearchRunner, self).__init__(config)
+ self.runner_cfg = None
+ self.runner_id = None
+ self.sla_action = None
+ self.worker_helper = None
+
+ def _worker_run_once(self, sequence):
+ LOG.debug("runner=%s seq=%s START", self.runner_id, sequence)
+
+ data = {}
+ errors = ""
+
+ try:
+ self.worker_helper(data)
+ except AssertionError as assertion:
+ # SLA validation failed in step, determine what to do now
+ if self.sla_action == "assert":
+ raise
+ elif self.sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s", assertion.args)
+ errors = assertion.args
+ except Exception as e:
+ errors = traceback.format_exc()
+ LOG.exception(e)
+
+ record = {
+ 'runner_id': self.runner_id,
+ 'step': {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors,
+ },
+ }
+
+ self.result_queue.put(record)
+
+ LOG.debug("runner=%s seq=%s END", self.runner_id, sequence)
+
+ # Have to search through all the VNF KPIs
+ kpi_done = any(kpi.get('done') for kpi in data.values() if isinstance(kpi, Mapping))
+
+ return kpi_done or (errors and self.sla_action is None)
+
+ def _worker_run(self, cls, method_name, step_cfg, context_cfg):
+ self.runner_cfg = step_cfg['runner']
+ self.runner_id = self.runner_cfg['runner_id'] = os.getpid()
+
+ self.worker_helper = SearchRunnerHelper(cls, method_name, step_cfg,
+ context_cfg, self.aborted)
+
+ try:
+ self.sla_action = step_cfg['sla'].get('action', 'assert')
+ except KeyError:
+ self.sla_action = None
+
+ self.result_queue.put({
+ 'runner_id': self.runner_id,
+ 'step_cfg': step_cfg,
+ 'context_cfg': context_cfg
+ })
+
+ with self.worker_helper.get_step_instance():
+ for sequence in self.worker_helper.is_not_done():
+ if self._worker_run_once(sequence):
+ LOG.info("worker END")
+ break
+
+ def _run_step(self, cls, method, step_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, step_cfg.get("type"), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=self._worker_run,
+ args=(cls, method, step_cfg, context_cfg))
+ self.process.start()
diff --git a/vnftest/onap/runners/sequence.py b/vnftest/onap/runners/sequence.py
new file mode 100644
index 0000000..b341495
--- /dev/null
+++ b/vnftest/onap/runners/sequence.py
@@ -0,0 +1,149 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/runners/sequence.py
+
+"""A runner that every run changes a specified input value to the step.
+The input value in the sequence is specified in a list in the input file.
+"""
+
+from __future__ import absolute_import
+
+import logging
+import multiprocessing
+import time
+import traceback
+
+import os
+
+from vnftest.onap.runners import base
+
+LOG = logging.getLogger(__name__)
+
+
+def _worker_process(queue, cls, method_name, step_cfg,
+ context_cfg, aborted, output_queue):
+
+ sequence = 1
+
+ runner_cfg = step_cfg['runner']
+
+ interval = runner_cfg.get("interval", 1)
+ arg_name = runner_cfg.get('step_option_name')
+ sequence_values = runner_cfg.get('sequence')
+
+ if 'options' not in step_cfg:
+ step_cfg['options'] = {}
+
+ options = step_cfg['options']
+
+ runner_cfg['runner_id'] = os.getpid()
+
+ LOG.info("worker START, sequence_values(%s, %s), class %s",
+ arg_name, sequence_values, cls)
+
+ step = cls(step_cfg, context_cfg)
+ step.setup()
+ method = getattr(step, method_name)
+
+ sla_action = None
+ if "sla" in step_cfg:
+ sla_action = step_cfg["sla"].get("action", "assert")
+
+ for value in sequence_values:
+ options[arg_name] = value
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s START",
+ {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+ data = {}
+ errors = ""
+
+ try:
+ result = method(data)
+ except AssertionError as assertion:
+ # SLA validation failed in step, determine what to do now
+ if sla_action == "assert":
+ raise
+ elif sla_action == "monitor":
+ LOG.warning("SLA validation failed: %s", assertion.args)
+ errors = assertion.args
+ except Exception as e:
+ errors = traceback.format_exc()
+ LOG.exception(e)
+ else:
+ if result:
+ output_queue.put(result)
+
+ time.sleep(interval)
+
+ step_output = {
+ 'timestamp': time.time(),
+ 'sequence': sequence,
+ 'data': data,
+ 'errors': errors
+ }
+
+ queue.put(step_output)
+
+ LOG.debug("runner=%(runner)s seq=%(sequence)s END",
+ {"runner": runner_cfg["runner_id"], "sequence": sequence})
+
+ sequence += 1
+
+ if (errors and sla_action is None) or aborted.is_set():
+ break
+
+ try:
+ step.teardown()
+ except Exception:
+ # catch any exception in teardown and convert to simple exception
+ # never pass exceptions back to multiprocessing, because some exceptions can
+ # be unpicklable
+ # https://bugs.python.org/issue9400
+ LOG.exception("")
+ raise SystemExit(1)
+ LOG.info("worker END")
+ LOG.debug("queue.qsize() = %s", queue.qsize())
+ LOG.debug("output_queue.qsize() = %s", output_queue.qsize())
+
+
+class SequenceRunner(base.Runner):
+ """Run a step by changing an input value defined in a list
+
+ Parameters
+ interval - time to wait between each step invocation
+ type: int
+ unit: seconds
+ default: 1 sec
+ step_option_name - name of the option that is increased each invocation
+ type: string
+ unit: na
+ default: none
+ sequence - list of values which are executed in their respective steps
+ type: [int]
+ unit: na
+ default: none
+ """
+
+ __execution_type__ = 'Sequence'
+
+ def _run_step(self, cls, method, step_cfg, context_cfg):
+ name = "{}-{}-{}".format(self.__execution_type__, step_cfg.get("type"), os.getpid())
+ self.process = multiprocessing.Process(
+ name=name,
+ target=_worker_process,
+ args=(self.result_queue, cls, method, step_cfg,
+ context_cfg, self.aborted, self.output_queue))
+ self.process.start()
diff --git a/vnftest/onap/steps/__init__.py b/vnftest/onap/steps/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/onap/steps/__init__.py
diff --git a/vnftest/onap/steps/base.py b/vnftest/onap/steps/base.py
new file mode 100644
index 0000000..d5c606a
--- /dev/null
+++ b/vnftest/onap/steps/base.py
@@ -0,0 +1,89 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of
+# rally/rally/benchmark/steps/base.py
+
+""" Step base class
+"""
+
+from __future__ import absolute_import
+import vnftest.common.utils as utils
+
+
+class Step(object):
+
+ def setup(self):
+ """ default impl for step setup """
+ pass
+
+ def run(self, args):
+ """ catcher for not implemented run methods in subclasses """
+ raise RuntimeError("run method not implemented")
+
+ def teardown(self):
+ """ default impl for step teardown """
+ pass
+
+ @staticmethod
+ def get_types():
+ """return a list of known runner type (class) names"""
+ steps = []
+ for step in utils.itersubclasses(Step):
+ steps.append(step)
+ return steps
+
+ @staticmethod
+ def get_cls(step_type):
+ """return class of specified type"""
+ for step in utils.itersubclasses(Step):
+ if step_type == step.__step_type__:
+ return step
+
+ raise RuntimeError("No such step type %s" % step_type)
+
+ @staticmethod
+ def get(step_type):
+ """Returns instance of a step runner for execution type.
+ """
+ for step in utils.itersubclasses(Step):
+ if step_type == step.__step_type__:
+ return step.__module__ + "." + step.__name__
+
+ raise RuntimeError("No such step type %s" % step_type)
+
+ @classmethod
+ def get_step_type(cls):
+ """Return a string with the step type, if defined"""
+ return str(getattr(cls, '__step_type__', None))
+
+ @classmethod
+ def get_description(cls):
+ """Return a single line string with the class description
+
+ This function will retrieve the class docstring and return the first
+ line, or 'None' if it's empty.
+ """
+ return cls.__doc__.splitlines()[0] if cls.__doc__ else str(None)
+
+ def _push_to_outputs(self, keys, values):
+ return dict(zip(keys, values))
+
+ def _change_obj_to_dict(self, obj):
+ dic = {}
+ for k, v in vars(obj).items():
+ try:
+ vars(v)
+ except TypeError:
+ dic[k] = v
+ return dic
diff --git a/vnftest/onap/steps/dummy/__init__.py b/vnftest/onap/steps/dummy/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/onap/steps/dummy/__init__.py
diff --git a/vnftest/onap/steps/dummy/dummy.py b/vnftest/onap/steps/dummy/dummy.py
new file mode 100644
index 0000000..27e9a32
--- /dev/null
+++ b/vnftest/onap/steps/dummy/dummy.py
@@ -0,0 +1,42 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+from __future__ import absolute_import
+import logging
+
+from vnftest.onap.steps import base
+
+LOG = logging.getLogger(__name__)
+
+
+class Dummy(base.Step):
+ """Execute Dummy echo
+ """
+ __step_type__ = "Dummy"
+
+ def __init__(self, step_cfg, context_cfg):
+ self.step_cfg = step_cfg
+ self.context_cfg = context_cfg
+ self.setup_done = False
+
+ def setup(self):
+ """step setup"""
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the benchmark"""
+ if not self.setup_done:
+ self.setup()
+
+ result["hello"] = "vnftest"
+ LOG.info("Dummy echo hello vnftest!")
diff --git a/vnftest/onap/steps/onap_api_call.py b/vnftest/onap/steps/onap_api_call.py
new file mode 100644
index 0000000..ecb2ce3
--- /dev/null
+++ b/vnftest/onap/steps/onap_api_call.py
@@ -0,0 +1,145 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+from __future__ import absolute_import
+
+import logging
+import time
+import os
+import yaml
+import copy
+from vnftest.onap.steps import base
+from vnftest.common import rest_client
+from vnftest.common import constants as consts
+
+LOG = logging.getLogger(__name__)
+
+
+class OnapApiCall(base.Step):
+
+ __step_type__ = "OnapApiCall"
+
+ def __init__(self, step_cfg, context_cfg):
+ self.step_cfg = step_cfg
+ self.context_cfg = context_cfg
+ self.input = None
+ self.output = None
+ self.rest_def_file = None
+ self.setup_done = False
+ self.curr_path = os.path.dirname(os.path.abspath(__file__))
+
+ def setup(self):
+ options = self.step_cfg['options']
+ self.rest_def_file = options.get("file")
+ self.input = options.get("input")
+ self.output = options.get("output")
+ self.setup_done = True
+
+ def run(self, args):
+ if not self.setup_done:
+ self.setup()
+ params = copy.deepcopy(consts.component_constants)
+ for input_parameter in self.input:
+ param_name = input_parameter['parameter_name']
+ param_value = input_parameter['value']
+ params[param_name] = param_value
+ result = self.execute_operation(params)
+ result_body = result['body']
+ for output_parameter in self.output:
+ param_name = output_parameter['parameter_name']
+ param_path = output_parameter['path']
+ path_list = param_path.split("|")
+ param_value = result_body
+ for path_element in path_list:
+ param_value = param_value[path_element]
+ self.context_cfg[param_name] = param_value
+
+ def execute_operation(self, params, attempt=0):
+ try:
+ return self.execute_operation_impl(params)
+ except Exception as e:
+ LOG.info(str(e))
+ if attempt < 2:
+ time.sleep(15)
+ LOG.info("############# retry operation ##########")
+ attempt = attempt + 1
+ return self.execute_operation(params, attempt)
+ else:
+ raise e
+
+ def execute_operation_impl(self, params):
+ input_yaml = self.rest_def_file
+ LOG.info("########## processing " + input_yaml + "##########")
+ yaml_path = os.path.join(self.curr_path, input_yaml)
+ with open(yaml_path) as info:
+ operation = yaml.load(info)
+ operation = self.format(operation, params)
+ url = operation['url']
+ headers = operation['headers']
+ body = {}
+ if 'body' in operation:
+ body = operation['body']
+ LOG.info(url)
+ LOG.info(headers)
+ LOG.info(body)
+ if 'file' in operation:
+ file_path = operation['file']
+ LOG.info(file_path)
+ files = {'upload': open(file_path)}
+ result = rest_client.upload_file(url, headers, files, LOG)
+ else:
+ result = rest_client.call(url,
+ operation['method'],
+ headers,
+ body,
+ LOG)
+ if result['return_code'] >= 300:
+ raise RuntimeError(
+ "Operation failed. return_code:{}, message:{}".format(result['return_code'], result['body']))
+ LOG.info("Results: " + str(result))
+ return result
+
+ def format(self, d, params):
+ ret = None
+ if isinstance(d, dict):
+ ret = {}
+ for k, v in d.iteritems():
+ if isinstance(v, basestring):
+ v = self.format_string(v, params)
+ else:
+ v = self.format(v, params)
+ ret[k] = v
+ if isinstance(d, list):
+ ret = []
+ for v in d:
+ if isinstance(v, basestring):
+ v = self.format_string(v, params)
+ else:
+ v = self.format(v, params)
+ ret.append(v)
+ if isinstance(d, basestring):
+ ret = self.format_string(d, params)
+ return ret
+
+ def format_string(self, st, params):
+ try:
+ return st.format(**params)
+ except Exception as e:
+ s = str(e)
+ s = s.replace("'", "")
+ LOG.info(s)
+ params[s] = ""
+ LOG.info("param" + params[s])
+ return st.format(**params)
+
+
diff --git a/vnftest/onap/steps/onboard/__init__.py b/vnftest/onap/steps/onboard/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vnftest/onap/steps/onboard/__init__.py
diff --git a/vnftest/onap/steps/onboard/create_vlm.yaml b/vnftest/onap/steps/onboard/create_vlm.yaml
new file mode 100644
index 0000000..dce110a
--- /dev/null
+++ b/vnftest/onap/steps/onboard/create_vlm.yaml
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+
+---
+method: "POST"
+url: "http://{sdc_ip}:{sdc_port}/onboarding-api/v1.0/vendor-license-models"
+headers: {
+ "Content-Type": "application/json",
+ "Authorization": "Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA==",
+ "USER_ID": "{sdc_designer_user}",
+ "Accept": "application/json"
+ }
+body: {
+ "vendorName": "{vendor_name}",
+ "description": "vlm via dovetail",
+ "iconRef": "icon"
+ } \ No newline at end of file
diff --git a/vnftest/ssh.py b/vnftest/ssh.py
new file mode 100644
index 0000000..cca0c2c
--- /dev/null
+++ b/vnftest/ssh.py
@@ -0,0 +1,497 @@
+##############################################################################
+# Copyright 2018 EuropeanSoftwareMarketingLtd.
+# ===================================================================
+# Licensed under the ApacheLicense, Version2.0 (the"License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# software distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and limitations under
+# the License
+##############################################################################
+# vnftest comment: this is a modified copy of rally/rally/common/sshutils.py
+
+"""High level ssh library.
+
+Usage examples:
+
+Execute command and get output:
+
+ ssh = sshclient.SSH("root", "example.com", port=33)
+ status, stdout, stderr = ssh.execute("ps ax")
+ if status:
+ raise Exception("Command failed with non-zero status.")
+ print(stdout.splitlines())
+
+Execute command with huge output:
+
+ class PseudoFile(io.RawIOBase):
+ def write(chunk):
+ if "error" in chunk:
+ email_admin(chunk)
+
+ ssh = SSH("root", "example.com")
+ with PseudoFile() as p:
+ ssh.run("tail -f /var/log/syslog", stdout=p, timeout=False)
+
+Execute local script on remote side:
+
+ ssh = sshclient.SSH("user", "example.com")
+
+ with open("~/myscript.sh", "r") as stdin_file:
+ status, out, err = ssh.execute('/bin/sh -s "arg1" "arg2"',
+ stdin=stdin_file)
+
+Upload file:
+
+ ssh = SSH("user", "example.com")
+ # use rb for binary files
+ with open("/store/file.gz", "rb") as stdin_file:
+ ssh.run("cat > ~/upload/file.gz", stdin=stdin_file)
+
+Eventlet:
+
+ eventlet.monkey_patch(select=True, time=True)
+ or
+ eventlet.monkey_patch()
+ or
+ sshclient = eventlet.import_patched("vnftest.ssh")
+
+"""
+from __future__ import absolute_import
+import os
+import io
+import select
+import socket
+import time
+import re
+
+import logging
+
+import paramiko
+from chainmap import ChainMap
+from oslo_utils import encodeutils
+from scp import SCPClient
+import six
+from vnftest.common.utils import try_int
+
+
+def convert_key_to_str(key):
+ if not isinstance(key, (paramiko.RSAKey, paramiko.DSSKey)):
+ return key
+ k = io.StringIO()
+ key.write_private_key(k)
+ return k.getvalue()
+
+
+class SSHError(Exception):
+ pass
+
+
+class SSHTimeout(SSHError):
+ pass
+
+
+class SSH(object):
+ """Represent ssh connection."""
+
+ SSH_PORT = paramiko.config.SSH_PORT
+
+ @staticmethod
+ def gen_keys(key_filename, bit_count=2048):
+ rsa_key = paramiko.RSAKey.generate(bits=bit_count, progress_func=None)
+ rsa_key.write_private_key_file(key_filename)
+ print("Writing %s ..." % key_filename)
+ with open('.'.join([key_filename, "pub"]), "w") as pubkey_file:
+ pubkey_file.write(rsa_key.get_name())
+ pubkey_file.write(' ')
+ pubkey_file.write(rsa_key.get_base64())
+ pubkey_file.write('\n')
+
+ @staticmethod
+ def get_class():
+ # must return static class name, anything else refers to the calling class
+ # i.e. the subclass, not the superclass
+ return SSH
+
+ def __init__(self, user, host, port=None, pkey=None,
+ key_filename=None, password=None, name=None):
+ """Initialize SSH client.
+
+ :param user: ssh username
+ :param host: hostname or ip address of remote ssh server
+ :param port: remote ssh port
+ :param pkey: RSA or DSS private key string or file object
+ :param key_filename: private key filename
+ :param password: password
+ """
+ self.name = name
+ if name:
+ self.log = logging.getLogger(__name__ + '.' + self.name)
+ else:
+ self.log = logging.getLogger(__name__)
+
+ self.user = user
+ self.host = host
+ # everybody wants to debug this in the caller, do it here instead
+ self.log.debug("user:%s host:%s", user, host)
+
+ # we may get text port from YAML, convert to int
+ self.port = try_int(port, self.SSH_PORT)
+ self.pkey = self._get_pkey(pkey) if pkey else None
+ self.password = password
+ self.key_filename = key_filename
+ self._client = False
+ # paramiko loglevel debug will output ssh protocl debug
+ # we don't ever really want that unless we are debugging paramiko
+ # ssh issues
+ if os.environ.get("PARAMIKO_DEBUG", "").lower() == "true":
+ logging.getLogger("paramiko").setLevel(logging.DEBUG)
+ else:
+ logging.getLogger("paramiko").setLevel(logging.WARN)
+
+ @classmethod
+ def args_from_node(cls, node, overrides=None, defaults=None):
+ if overrides is None:
+ overrides = {}
+ if defaults is None:
+ defaults = {}
+ params = ChainMap(overrides, node, defaults)
+ return {
+ 'user': params['user'],
+ 'host': params['ip'],
+ 'port': params.get('ssh_port', cls.SSH_PORT),
+ 'pkey': params.get('pkey'),
+ 'key_filename': params.get('key_filename'),
+ 'password': params.get('password'),
+ 'name': params.get('name'),
+ }
+
+ @classmethod
+ def from_node(cls, node, overrides=None, defaults=None):
+ return cls(**cls.args_from_node(node, overrides, defaults))
+
+ def _get_pkey(self, key):
+ if isinstance(key, six.string_types):
+ key = six.moves.StringIO(key)
+ errors = []
+ for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
+ try:
+ return key_class.from_private_key(key)
+ except paramiko.SSHException as e:
+ errors.append(e)
+ raise SSHError("Invalid pkey: %s" % (errors))
+
+ @property
+ def is_connected(self):
+ return bool(self._client)
+
+ def _get_client(self):
+ if self.is_connected:
+ return self._client
+ try:
+ self._client = paramiko.SSHClient()
+ self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self._client.connect(self.host, username=self.user,
+ port=self.port, pkey=self.pkey,
+ key_filename=self.key_filename,
+ password=self.password,
+ allow_agent=False, look_for_keys=False,
+ timeout=1)
+ return self._client
+ except Exception as e:
+ message = ("Exception %(exception_type)s was raised "
+ "during connect. Exception value is: %(exception)r")
+ self._client = False
+ raise SSHError(message % {"exception": e,
+ "exception_type": type(e)})
+
+ def _make_dict(self):
+ return {
+ 'user': self.user,
+ 'host': self.host,
+ 'port': self.port,
+ 'pkey': self.pkey,
+ 'key_filename': self.key_filename,
+ 'password': self.password,
+ 'name': self.name,
+ }
+
+ def copy(self):
+ return self.get_class()(**self._make_dict())
+
+ def close(self):
+ if self._client:
+ self._client.close()
+ self._client = False
+
+ def run(self, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+ """Execute specified command on the server.
+
+ :param cmd: Command to be executed.
+ :type cmd: str
+ :param stdin: Open file or string to pass to stdin.
+ :param stdout: Open file to connect to stdout.
+ :param stderr: Open file to connect to stderr.
+ :param raise_on_error: If False then exit code will be return. If True
+ then exception will be raized if non-zero code.
+ :param timeout: Timeout in seconds for command execution.
+ Default 1 hour. No timeout if set to 0.
+ :param keep_stdin_open: don't close stdin on empty reads
+ :type keep_stdin_open: bool
+ :param pty: Request a pseudo terminal for this connection.
+ This allows passing control characters.
+ Default False.
+ :type pty: bool
+ """
+
+ client = self._get_client()
+
+ if isinstance(stdin, six.string_types):
+ stdin = six.moves.StringIO(stdin)
+
+ return self._run(client, cmd, stdin=stdin, stdout=stdout,
+ stderr=stderr, raise_on_error=raise_on_error,
+ timeout=timeout,
+ keep_stdin_open=keep_stdin_open, pty=pty)
+
+ def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+
+ transport = client.get_transport()
+ session = transport.open_session()
+ if pty:
+ session.get_pty()
+ session.exec_command(cmd)
+ start_time = time.time()
+
+ # encode on transmit, decode on receive
+ data_to_send = encodeutils.safe_encode("", incoming='utf-8')
+ stderr_data = None
+
+ # If we have data to be sent to stdin then `select' should also
+ # check for stdin availability.
+ if stdin and not stdin.closed:
+ writes = [session]
+ else:
+ writes = []
+
+ while True:
+ # Block until data can be read/write.
+ r, w, e = select.select([session], writes, [session], 1)
+
+ if session.recv_ready():
+ data = encodeutils.safe_decode(session.recv(4096), 'utf-8')
+ self.log.debug("stdout: %r", data)
+ if stdout is not None:
+ stdout.write(data)
+ continue
+
+ if session.recv_stderr_ready():
+ stderr_data = encodeutils.safe_decode(
+ session.recv_stderr(4096), 'utf-8')
+ self.log.debug("stderr: %r", stderr_data)
+ if stderr is not None:
+ stderr.write(stderr_data)
+ continue
+
+ if session.send_ready():
+ if stdin is not None and not stdin.closed:
+ if not data_to_send:
+ stdin_txt = stdin.read(4096)
+ if stdin_txt is None:
+ stdin_txt = ''
+ data_to_send = encodeutils.safe_encode(
+ stdin_txt, incoming='utf-8')
+ if not data_to_send:
+ # we may need to keep stdin open
+ if not keep_stdin_open:
+ stdin.close()
+ session.shutdown_write()
+ writes = []
+ if data_to_send:
+ sent_bytes = session.send(data_to_send)
+ # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
+ data_to_send = data_to_send[sent_bytes:]
+
+ if session.exit_status_ready():
+ break
+
+ if timeout and (time.time() - timeout) > start_time:
+ args = {"cmd": cmd, "host": self.host}
+ raise SSHTimeout("Timeout executing command "
+ "'%(cmd)s' on host %(host)s" % args)
+ if e:
+ raise SSHError("Socket error.")
+
+ exit_status = session.recv_exit_status()
+ if exit_status != 0 and raise_on_error:
+ fmt = "Command '%(cmd)s' failed with exit_status %(status)d."
+ details = fmt % {"cmd": cmd, "status": exit_status}
+ if stderr_data:
+ details += " Last stderr data: '%s'." % stderr_data
+ raise SSHError(details)
+ return exit_status
+
+ def execute(self, cmd, stdin=None, timeout=3600):
+ """Execute the specified command on the server.
+
+ :param cmd: Command to be executed.
+ :param stdin: Open file to be sent on process stdin.
+ :param timeout: Timeout for execution of the command.
+
+ :returns: tuple (exit_status, stdout, stderr)
+ """
+ stdout = six.moves.StringIO()
+ stderr = six.moves.StringIO()
+
+ exit_status = self.run(cmd, stderr=stderr,
+ stdout=stdout, stdin=stdin,
+ timeout=timeout, raise_on_error=False)
+ stdout.seek(0)
+ stderr.seek(0)
+ return exit_status, stdout.read(), stderr.read()
+
+ def wait(self, timeout=120, interval=1):
+ """Wait for the host will be available via ssh."""
+ start_time = time.time()
+ while True:
+ try:
+ return self.execute("uname")
+ except (socket.error, SSHError) as e:
+ self.log.debug("Ssh is still unavailable: %r", e)
+ time.sleep(interval)
+ if time.time() > (start_time + timeout):
+ raise SSHTimeout("Timeout waiting for '%s'", self.host)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.put(files, remote_path, recursive)
+
+ def get(self, remote_path, local_path='/tmp/', recursive=True):
+ client = self._get_client()
+
+ with SCPClient(client.get_transport()) as scp:
+ scp.get(remote_path, local_path, recursive)
+
+ # keep shell running in the background, e.g. screen
+ def send_command(self, command):
+ client = self._get_client()
+ client.exec_command(command, get_pty=True)
+
+ def _put_file_sftp(self, localpath, remotepath, mode=None):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.put(localpath, remotepath)
+ if mode is None:
+ mode = 0o777 & os.stat(localpath).st_mode
+ sftp.chmod(remotepath, mode)
+
+ TILDE_EXPANSIONS_RE = re.compile("(^~[^/]*/)?(.*)")
+
+ def _put_file_shell(self, localpath, remotepath, mode=None):
+ # quote to stop wordpslit
+ tilde, remotepath = self.TILDE_EXPANSIONS_RE.match(remotepath).groups()
+ if not tilde:
+ tilde = ''
+ cmd = ['cat > %s"%s"' % (tilde, remotepath)]
+ if mode is not None:
+ # use -- so no options
+ cmd.append('chmod -- 0%o %s"%s"' % (mode, tilde, remotepath))
+
+ with open(localpath, "rb") as localfile:
+ # only chmod on successful cat
+ self.run("&& ".join(cmd), stdin=localfile)
+
+ def put_file(self, localpath, remotepath, mode=None):
+ """Copy specified local file to the server.
+
+ :param localpath: Local filename.
+ :param remotepath: Remote filename.
+ :param mode: Permissions to set after upload
+ """
+ try:
+ self._put_file_sftp(localpath, remotepath, mode=mode)
+ except (paramiko.SSHException, socket.error):
+ self._put_file_shell(localpath, remotepath, mode=mode)
+
+ def put_file_obj(self, file_obj, remotepath, mode=None):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.putfo(file_obj, remotepath)
+ if mode is not None:
+ sftp.chmod(remotepath, mode)
+
+ def get_file_obj(self, remotepath, file_obj):
+ client = self._get_client()
+
+ with client.open_sftp() as sftp:
+ sftp.getfo(remotepath, file_obj)
+
+
+class AutoConnectSSH(SSH):
+
+ # always wait or we will get OpenStack SSH errors
+ def __init__(self, user, host, port=None, pkey=None,
+ key_filename=None, password=None, name=None, wait=True):
+ super(AutoConnectSSH, self).__init__(user, host, port, pkey, key_filename, password, name)
+ self._wait = wait
+
+ def _make_dict(self):
+ data = super(AutoConnectSSH, self)._make_dict()
+ data.update({
+ 'wait': self._wait
+ })
+ return data
+
+ def _connect(self):
+ if not self.is_connected:
+ self._get_client()
+ if self._wait:
+ self.wait()
+
+ def drop_connection(self):
+ """ Don't close anything, just force creation of a new client """
+ self._client = False
+
+ def execute(self, cmd, stdin=None, timeout=3600):
+ self._connect()
+ return super(AutoConnectSSH, self).execute(cmd, stdin, timeout)
+
+ def run(self, cmd, stdin=None, stdout=None, stderr=None,
+ raise_on_error=True, timeout=3600,
+ keep_stdin_open=False, pty=False):
+ self._connect()
+ return super(AutoConnectSSH, self).run(cmd, stdin, stdout, stderr, raise_on_error,
+ timeout, keep_stdin_open, pty)
+
+ def put(self, files, remote_path=b'.', recursive=False):
+ self._connect()
+ return super(AutoConnectSSH, self).put(files, remote_path, recursive)
+
+ def put_file(self, local_path, remote_path, mode=None):
+ self._connect()
+ return super(AutoConnectSSH, self).put_file(local_path, remote_path, mode)
+
+ def put_file_obj(self, file_obj, remote_path, mode=None):
+ self._connect()
+ return super(AutoConnectSSH, self).put_file_obj(file_obj, remote_path, mode)
+
+ def get_file_obj(self, remote_path, file_obj):
+ self._connect()
+ return super(AutoConnectSSH, self).get_file_obj(remote_path, file_obj)
+
+ @staticmethod
+ def get_class():
+ # must return static class name, anything else refers to the calling class
+ # i.e. the subclass, not the superclass
+ return AutoConnectSSH