diff options
author | Moshe <moshehoa@amdocs.com> | 2019-01-17 18:04:54 +0200 |
---|---|---|
committer | Moshe <moshehoa@amdocs.com> | 2019-01-17 18:05:01 +0200 |
commit | 6ed81bdf24927a0900d4857d7652ae48cc8c78b1 (patch) | |
tree | 821fff9c7ef18e7ce57ce353e75420900c896948 | |
parent | 1112c6e3e6eae3aa10680b2d7b0d653de1a4bc0b (diff) |
Refactor test reporting
Issue-ID: VNFSDK-350
Change-Id: I66a82ab56dd6702903e4d1edf776a6d29cb4e836
Signed-off-by: Moshe <moshehoa@amdocs.com>
-rw-r--r-- | etc/vnftest/vnftest.yaml | 6 | ||||
-rw-r--r-- | tools/vnftest_test_config.yaml | 6 | ||||
-rw-r--r-- | vnftest/cmd/commands/task.py | 6 | ||||
-rw-r--r-- | vnftest/common/constants.py | 6 | ||||
-rw-r--r-- | vnftest/common/html_template.py | 13 | ||||
-rw-r--r-- | vnftest/core/report.py | 7 | ||||
-rw-r--r-- | vnftest/core/task.py | 403 | ||||
-rw-r--r-- | vnftest/core/testcase.py | 10 | ||||
-rw-r--r-- | vnftest/dispatcher/base.py | 2 | ||||
-rw-r--r-- | vnftest/dispatcher/file.py | 10 | ||||
-rw-r--r-- | vnftest/dispatcher/http.py | 21 | ||||
-rw-r--r-- | vnftest/tests/unit/core/test_task.py | 15 | ||||
-rw-r--r-- | vnftest/tests/unit/onap/test_rest_call.py | 5 |
13 files changed, 266 insertions, 244 deletions
diff --git a/etc/vnftest/vnftest.yaml b/etc/vnftest/vnftest.yaml index cd5b961..f1b7750 100644 --- a/etc/vnftest/vnftest.yaml +++ b/etc/vnftest/vnftest.yaml @@ -16,8 +16,4 @@ dir: conf: /etc/nvftest repos: /home/onap/repos/vnftest log: /var/log/vnftest - -file: - output_file: /tmp/vnftest.out - html_file: /tmp/vnftest.htm - reporting_file: /tmp/report.html + report: /tmp
\ No newline at end of file diff --git a/tools/vnftest_test_config.yaml b/tools/vnftest_test_config.yaml index cb2aa4b..ca72694 100644 --- a/tools/vnftest_test_config.yaml +++ b/tools/vnftest_test_config.yaml @@ -16,8 +16,4 @@ dir: conf: ./ repos: ./ log: ./ - -file: - output_file: /tmp/vnftest.out - html_file: /tmp/vnftest.htm - reporting_file: /tmp/report.html + report: /tmp
\ No newline at end of file diff --git a/vnftest/cmd/commands/task.py b/vnftest/cmd/commands/task.py index e10ac30..b839c8a 100644 --- a/vnftest/cmd/commands/task.py +++ b/vnftest/cmd/commands/task.py @@ -45,19 +45,19 @@ class TaskCommands(object): # pragma: no cover output_file_default, default=output_file_default) @cliargs("--suite", help="process test suite file instead of a task file", action="store_true") - def do_start(self, args, **kwargs): + def do_start(self, args): param = change_osloobj_to_paras(args) self.output_file = param.output_file result = {} LOG.info('Task START') try: - result = Task().start(param, **kwargs) + result = Task(param).start() except Exception as e: self._write_error_data(e) LOG.exception("") - if result.get('result', {}).get('criteria') == 'PASS': + if result.get('criteria') == 'PASS': LOG.info('Task SUCCESS') else: LOG.info('Task FAILED') diff --git a/vnftest/common/constants.py b/vnftest/common/constants.py index 9996ca6..9634708 100644 --- a/vnftest/common/constants.py +++ b/vnftest/common/constants.py @@ -88,6 +88,7 @@ PACKAGE_DIR = get_param('dir.packages', join(VNFTEST_ROOT_PATH, '../../packages/ CONF_FILE = join(CONF_DIR, 'vnftest.conf') REPOS_DIR = get_param('dir.repos', join(VNFTEST_ROOT_PATH, '..')) LOG_DIR = get_param('dir.log', '/var/log/vnftest/') +REPORT_DIR = get_param('dir.report', '/tmp/') TASK_LOG_DIR = get_param('dir.tasklog', '/var/log/vnftest/') CONF_SAMPLE_DIR = join(REPOS_DIR, 'etc/vnftest/') @@ -95,11 +96,6 @@ SAMPLE_CASE_DIR = join(REPOS_DIR, 'samples') TESTCASE_DIR = join(VNFTEST_ROOT_PATH, 'vnftest/test_config/onap/test_cases/') TESTSUITE_DIR = join(VNFTEST_ROOT_PATH, 'vnftest/test_config/onap/test_suites/') -# file -DEFAULT_OUTPUT_FILE = get_param('file.output_file', '/tmp/vnftest.out') -DEFAULT_HTML_FILE = get_param('file.html_file', '/tmp/vnftest.htm') -REPORTING_FILE = get_param('file.reporting_file', '/tmp/report.html') - # api API_PORT = 5000 DOCKER_URL = 'unix://var/run/docker.sock' diff --git a/vnftest/common/html_template.py b/vnftest/common/html_template.py index 7a91781..0196ba3 100644 --- a/vnftest/common/html_template.py +++ b/vnftest/common/html_template.py @@ -146,9 +146,9 @@ report_template = """ <hr/> <div> - <div>Task ID : {{result.task_id}} </div> + <div>Task ID : {{task_id}} </div> <div style="margin-top:5px;">Criteria : - <font> {{result.criteria}}</font> + <font> {{criteria}}</font> </div> <hr/> @@ -160,7 +160,7 @@ report_template = """ <th>value</th> </tr> <tbody> - {% for key, value in result.info.items() %} + {% for key, value in info.items() %} <tr> <td>{{ loop.index }}</td> <td>{{key}}</td> @@ -179,12 +179,11 @@ report_template = """ <th>value</th> </tr> <tbody> - {% for key, value in result.testcases.items() %} + {% for testcase in testcases %} <tr> <td>{{ loop.index }}</td> - <td>{{key}}</td> - <td>{{value.criteria}}</td> - <td>{{value.output}}</td> + <td>{{testcase.name}}</td> + <td>{{testcase.criteria}}</td> </tr> {% endfor %} </tbody> diff --git a/vnftest/core/report.py b/vnftest/core/report.py index b8f8bb7..c648848 100644 --- a/vnftest/core/report.py +++ b/vnftest/core/report.py @@ -19,6 +19,7 @@ from __future__ import absolute_import from __future__ import print_function # import ast +import os import re import uuid @@ -120,7 +121,9 @@ class Report(object): "Timestamp": self.Timestamp, "task_id": self.task_id, "table": table_vals}) - with open(consts.DEFAULT_HTML_FILE, "w") as file_open: + file_name = 'vnftest_' + str(self.task_id) + '.htm' + report_file = os.path.join(consts.REPORT_DIR, file_name) + with open(report_file, "w") as file_open: file_open.write(Template_html.render(Context_html)) - print("Report generated. View /tmp/vnftest.htm") + print("Report generated. View " + report_file) diff --git a/vnftest/core/task.py b/vnftest/core/task.py index 41756af..a919b1f 100644 --- a/vnftest/core/task.py +++ b/vnftest/core/task.py @@ -26,7 +26,6 @@ import logging import sys import time import uuid -from collections import OrderedDict import ipaddress import os @@ -61,9 +60,13 @@ class Task(object): # pragma: no cover Set of commands to manage benchmark tasks. """ - def __init__(self): + def __init__(self, args): self.context = None self.outputs = {} + self.args = args or {} + task_id = getattr(args, 'task_id', None) + self.task_id = task_id if task_id is not None else str(uuid.uuid4()) + self.task_info = TaskInfo(task_id) def _set_dispatchers(self, output_config): dispatchers = output_config.get('DEFAULT', {}).get('dispatcher', @@ -71,14 +74,9 @@ class Task(object): # pragma: no cover out_types = [s.strip() for s in dispatchers.split(',')] output_config['DEFAULT']['dispatcher'] = out_types - def start(self, args, **kwargs): + def start(self): atexit.register(self.atexit_handler) - - task_id = getattr(args, 'task_id') - self.task_id = task_id if task_id else str(uuid.uuid4()) - self._set_log() - try: output_config = utils.parse_ini_file(CONF_FILE) except Exception: @@ -86,7 +84,7 @@ class Task(object): # pragma: no cover output_config = {} self._init_output_config(output_config) - self._set_output_config(output_config, args.output_file) + self._set_output_config(output_config, self.args.output_file) LOG.debug('Output configuration is: %s', output_config) self._set_dispatchers(output_config) @@ -94,79 +92,77 @@ class Task(object): # pragma: no cover # update dispatcher list if 'file' in output_config['DEFAULT']['dispatcher']: result = {'status': 0, 'result': {}} - utils.write_json_to_file(args.output_file, result) + utils.write_json_to_file(self.args.output_file, result) total_start_time = time.time() - parser = TaskParser(args.inputfile) + parser = TaskParser(self.args.inputfile) - if args.suite: + if self.args.suite: # 1.parse suite, return suite_params info task_files, task_args_list, task_args_fnames = \ parser.parse_suite() else: task_files = [parser.path] - task_args_list = [args.task_args] - task_args_fnames = [args.task_args_file] + task_args_list = [self.args.task_args] + task_args_fnames = [self.args.task_args_file] LOG.debug("task_files:%s, task_args_list:%s, task_args_fnames:%s", task_files, task_args_list, task_args_fnames) - if args.parse_only: + if self.args.parse_only: sys.exit(0) - testcases = {} - # parse task_files - for i in range(0, len(task_files)): - one_task_start_time = time.time() - # the output of the previous task is the input of the new task - inputs = copy.deepcopy(self.outputs) - task_args_file = task_args_fnames[i] - task_args = task_args_list[i] - try: - if task_args_file: - with utils.load_resource(task_args_file) as f: - inputs.update(parse_task_args("task_args_file", f.read())) - inputs.update(parse_task_args("task_args", task_args)) - except TypeError: - raise TypeError() - parser.path = task_files[i] - steps, run_in_parallel, meet_precondition, ret_context = \ - parser.parse_task(self.task_id, inputs) - - self.context = ret_context - - if not meet_precondition: - LOG.info("meet_precondition is %s, please check envrionment", - meet_precondition) - continue - - case_name = os.path.splitext(os.path.basename(task_files[i]))[0] - try: - data = self._run(steps, run_in_parallel, args.output_file, inputs) - except KeyboardInterrupt: - raise - except Exception: - LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True) - testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []} - else: - criteria = self.evaluate_task_criteria(data) - testcases[case_name] = {'criteria': criteria, 'tc_data': data, 'output': self.outputs} - - if args.keep_deploy: - # keep deployment, forget about stack - # (hide it for exit handler) - self.context = None - else: - self.context.undeploy() - self.context = None - one_task_end_time = time.time() - LOG.info("Task %s finished in %d secs", task_files[i], - one_task_end_time - one_task_start_time) - - result = self._get_format_result(testcases) + try: + for i in range(0, len(task_files)): + one_task_start_time = time.time() + # the output of the previous task is the input of the new task + inputs = copy.deepcopy(self.outputs) + task_args_file = task_args_fnames[i] + task_args = task_args_list[i] + try: + if task_args_file: + with utils.load_resource(task_args_file) as f: + inputs.update(parse_task_args("task_args_file", f.read())) + inputs.update(parse_task_args("task_args", task_args)) + except TypeError: + raise TypeError() + parser.path = task_files[i] + steps, run_in_parallel, meet_precondition, ret_context = \ + parser.parse_task(self.task_id, inputs) + + self.context = ret_context + + if not meet_precondition: + LOG.info("meet_precondition is %s, please check envrionment", + meet_precondition) + continue - self._do_output(output_config, result) - self._generate_reporting(result) + case_name = os.path.splitext(os.path.basename(task_files[i]))[0] + try: + self._run(steps, case_name, run_in_parallel, self.args.output_file, inputs) + except KeyboardInterrupt: + raise + except Exception: + LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True) + + if self.args.keep_deploy: + # keep deployment, forget about stack + # (hide it for exit handler) + self.context = None + else: + self.context.undeploy() + self.context = None + one_task_end_time = time.time() + LOG.info("Task %s finished in %d secs", task_files[i], + one_task_end_time - one_task_start_time) + except Exception as e: + LOG.error("Task fatal error", e) + self.task_info.task_fatal() + finally: + self.task_info.task_end() + + self._do_output(output_config) + self._generate_reporting() total_end_time = time.time() LOG.info("Total finished in %d secs", @@ -176,14 +172,16 @@ class Task(object): # pragma: no cover LOG.info("To generate report, execute command " "'vnftest report generate %(task_id)s %(tc)s'", step) LOG.info("Task ALL DONE, exiting") - return result + return self.task_info.result() - def _generate_reporting(self, result): + def _generate_reporting(self): env = Environment() - with open(constants.REPORTING_FILE, 'w') as f: - f.write(env.from_string(report_template).render(result)) + file_name = 'report_' + str(self.task_id) + '.html' + report_file = os.path.join(constants.REPORT_DIR, file_name) + with open(report_file, 'w') as f: + f.write(env.from_string(report_template).render(self.task_info.result())) - LOG.info("Report can be found in '%s'", constants.REPORTING_FILE) + LOG.info("Report can be found in '%s'", report_file) def _set_log(self): log_format = '%(asctime)s %(name)s %(filename)s:%(lineno)d %(levelname)s %(message)s' @@ -221,104 +219,61 @@ class Task(object): # pragma: no cover k = 'dispatcher_{}'.format(output_config['DEFAULT']['dispatcher']) output_config[k]['target'] = target - def _get_format_result(self, testcases): - criteria = self._get_task_criteria(testcases) - - info = { - 'deploy_step': os.environ.get('DEPLOY_STEP', 'unknown'), - 'installer': os.environ.get('INSTALLER_TYPE', 'unknown'), - 'pod_name': os.environ.get('NODE_NAME', 'unknown'), - 'version': os.environ.get('VNFTEST_BRANCH', 'unknown') - } - - result = { - 'status': 1, - 'result': { - 'criteria': criteria, - 'task_id': self.task_id, - 'info': info, - 'testcases': testcases - } - } - - return result - - def _get_task_criteria(self, testcases): - criteria = any(t.get('criteria') != 'PASS' for t in testcases.values()) - if criteria: - return 'FAIL' - else: - return 'PASS' - - def evaluate_task_criteria(self, steps_result_list): - for step_result in steps_result_list: - errors_list = step_result['errors'] - if errors_list is not None and len(errors_list) > 0: - return 'FAIL' - return 'PASS' - - def _do_output(self, output_config, result): + def _do_output(self, output_config): dispatchers = DispatcherBase.get(output_config) for dispatcher in dispatchers: - dispatcher.flush_result_data(result) + dispatcher.flush_result_data(self.task_id, self.task_info.result()) - def _run(self, steps, run_in_parallel, output_file, inputs): + def _run(self, steps, case_name, run_in_parallel, output_file, inputs): """Deploys context and calls runners""" if self.context: self.context.deploy() - background_runners = [] - - result = [] - # Start all background steps - for step in filter(_is_background_step, steps): - step["runner"] = dict(type="Duration", duration=1000000000) - runner = self.run_one_step(step, output_file, inputs) - background_runners.append(runner) - - runners = [] - if run_in_parallel: - for step in steps: - if not _is_background_step(step): - runner = self.run_one_step(step, output_file, inputs) - runners.append(runner) - - # Wait for runners to finish - for runner in runners: - status = runner_join(runner, background_runners, self.outputs, result) - if status != 0: - raise RuntimeError( - "{0} runner status {1}".format(runner.__execution_type__, status)) - LOG.info("Runner ended, output in %s", output_file) - else: - # run serially + try: + self.task_info.testcase_start(case_name) for step in steps: - if not _is_background_step(step): - runner = self.run_one_step(step, output_file, inputs) - status = runner_join(runner, background_runners, self.outputs, result) - if status != 0: - LOG.error('Step NO.%s: "%s" ERROR!', - steps.index(step) + 1, - step.get('type')) - raise RuntimeError( - "{0} runner status {1}".format(runner.__execution_type__, status)) - LOG.info("Runner ended, output in %s", output_file) - - # Abort background runners - for runner in background_runners: - runner.abort() - - # Wait for background runners to finish - for runner in background_runners: - status = runner.join(self.outputs, result) - if status is None: - # Nuke if it did not stop nicely - base_runner.Runner.terminate(runner) - runner.join(self.outputs, result) - base_runner.Runner.release(runner) - - print("Background task ended") - return result + step_unique_id = self.task_info.step_add(case_name, step['name']) + step['step_unique_id'] = step_unique_id + + background_runners = [] + result = [] + # Start all background steps + for step in filter(_is_background_step, steps): + step["runner"] = dict(type="Duration", duration=1000000000) + self.task_info.step_start(step['step_unique_id']) + runner = self.run_one_step(step, output_file, inputs) + background_runners.append([step, runner]) + + runners = [] + if run_in_parallel: + for step in steps: + if not _is_background_step(step): + self.task_info.step_start(step['step_unique_id']) + runner = self.run_one_step(step, output_file, inputs) + runners.append([step, runner]) + + # Wait for runners to finish + for runner_item in runners: + self.finalize_step(runner_item[0], runner_item[1], result) + else: + # run serially + for step in steps: + if not _is_background_step(step): + self.task_info.step_start(step['step_unique_id']) + runner = self.run_one_step(step, output_file, inputs) + self.finalize_step(step, runner, result) + + # Abort background runners + for runner_item in background_runners: + runner_item[1].abort() + + # Wait for background runners to finish + for runner_item in background_runners: + runner = runner_item[1] + self.finalize_step(step, runner, result) + return result + finally: + self.task_info.testcase_end(case_name) def atexit_handler(self): """handler for process termination""" @@ -355,6 +310,16 @@ class Task(object): # pragma: no cover runner.run(step_cfg, self.context, inputs) return runner + def finalize_step(self, step, runner, result): + step_result_list = [] + status = runner_join(runner, self.outputs, step_result_list) + self.task_info.step_end(step['step_unique_id'], step_result_list) + if status != 0: + raise RuntimeError( + "{0} runner status {1}".format(runner.__execution_type__, status)) + LOG.info("Runner ended") + result.extend(step_result_list) + class TaskParser(object): # pragma: no cover """Parser for task config files in yaml format""" @@ -492,26 +457,17 @@ class TaskParser(object): # pragma: no cover if "precondition" in cfg: precondition = cfg["precondition"] installer_type = precondition.get("installer_type", None) - deploy_steps = precondition.get("deploy_steps", None) tc_fit_pods = precondition.get("pod_name", None) installer_type_env = os.environ.get('INSTALL_TYPE', None) - deploy_step_env = os.environ.get('DEPLOY_STEP', None) pod_name_env = os.environ.get('NODE_NAME', None) LOG.info("installer_type: %s, installer_type_env: %s", installer_type, installer_type_env) - LOG.info("deploy_steps: %s, deploy_step_env: %s", - deploy_steps, deploy_step_env) LOG.info("tc_fit_pods: %s, pod_name_env: %s", tc_fit_pods, pod_name_env) if installer_type and installer_type_env: if installer_type_env not in installer_type: return False - if deploy_steps and deploy_step_env: - deploy_steps_list = deploy_steps.split(',') - for deploy_step in deploy_steps_list: - if deploy_step_env.startswith(deploy_step): - return True return False if tc_fit_pods and pod_name_env: if pod_name_env not in tc_fit_pods: @@ -519,6 +475,97 @@ class TaskParser(object): # pragma: no cover return True +class TaskInfo(object): + info_dict = {} + info_dict['status'] = 'IN_PROGRESS' + info_dict['criteria'] = 'N/A' + result_info_dict = {} + info_dict['info'] = result_info_dict + test_cases_list = [] + info_dict['testcases'] = test_cases_list + helper_test_cases_dict = {} + helper_test_steps_dict = {} + step_id_helper = 0 + + def __init__(self, task_id): + self.info_dict['task_id'] = task_id + + def task_end(self): + if self.info_dict['criteria'] == 'N/A': + criteria = 'PASS' + for testcase in self.info_dict['testcases']: + if testcase['criteria'] == 'FAIL': + criteria = 'FAIL' + break + self.info_dict['criteria'] = criteria + self.info_dict['status'] = 'FINISHED' + + def task_fatal(self): + self.info_dict['criteria'] = 'FATAL' + self.info_dict['status'] = 'FINISHED' + + def testcase_start(self, testcase_name): + testcase_dict = {} + testcase_dict['name'] = testcase_name + testcase_dict['criteria'] = 'N/A' + testcase_dict['status'] = 'IN_PROGRESS' + testcase_dict['steps'] = [] + self.test_cases_list.append(testcase_dict) + self.helper_test_cases_dict[testcase_name] = testcase_dict + + def testcase_end(self, testcase_name): + testcase_dict = self.helper_test_cases_dict[testcase_name] + criteria = 'PASS' + for step in testcase_dict['steps']: + if step['criteria'] == 'FAIL': + criteria = 'FAIL' + break + testcase_dict['criteria'] = criteria + testcase_dict['status'] = 'FINISHED' + + def step_add(self, testcase_name, step_name): + step_dict = {} + step_dict['name'] = step_name + step_dict['criteria'] = 'N/A' + step_dict['status'] = 'NOT_STARTED' + step_dict['results'] = [] + testcase_dict = self.helper_test_cases_dict[testcase_name] + testcase_dict['steps'].append(step_dict) + self.step_id_helper += 1 + step_unique_id = step_name + '_' + str(self.step_id_helper) + self.helper_test_steps_dict[step_unique_id] = step_dict + return step_unique_id + + def step_start(self, step_unique_id): + step_dict = self.helper_test_steps_dict[step_unique_id] + step_dict['status'] = 'IN_PROGRESS' + + def step_end(self, step_unique_id, result_list): + step_dict = self.helper_test_steps_dict[step_unique_id] + errors_count = 0 + for result in result_list: + result_item = { + 'timestamp': result['timestamp'], + 'sequence': result['sequence'], + 'data': [], + 'errors': [] + } + for k, v in result['data'].items(): + result_item['data'].append({'type': 'String', 'key': k, 'value': str(v)}) + + for error in result['errors']: + result_item['errors'].append({'type': 'String', 'key': 'error', 'value': str(error)}) + errors_count += 1 + step_dict['results'].append(result_item) + if errors_count > 0: + step_dict['criteria'] = 'FAIL' + else: + step_dict['criteria'] = 'PASS' + step_dict['status'] = 'FINISHED' + + def result(self): + return self.info_dict + def is_ip_addr(addr): """check if string addr is an IP address""" try: @@ -541,22 +588,10 @@ def _is_background_step(step): return False -def runner_join(runner, background_runners, outputs, result): - """join (wait for) a runner, exit process at runner failure - :param background_runners: - :type background_runners: - :param outputs: - :type outputs: dict - :param result: - :type result: list - """ +def runner_join(runner, outputs, result): while runner.poll() is None: outputs.update(runner.get_output()) result.extend(runner.get_result()) - # drain all the background runner queues - for background in background_runners: - outputs.update(background.get_output()) - result.extend(background.get_result()) status = runner.join(outputs, result) base_runner.Runner.release(runner) return status diff --git a/vnftest/core/testcase.py b/vnftest/core/testcase.py index 2c5c4b4..df86dd6 100644 --- a/vnftest/core/testcase.py +++ b/vnftest/core/testcase.py @@ -63,14 +63,13 @@ class Testcase(object): LOG.exception('Failed to load test case:\n%s\n', testcase_file) raise - description, installer, deploy_steps = self._parse_testcase( + description, installer = self._parse_testcase( testcase_info) record = { 'Name': testcase_file.split(".")[0], 'Description': description, - 'installer': installer, - 'deploy_steps': deploy_steps + 'installer': installer } return record @@ -82,11 +81,8 @@ class Testcase(object): test_precondition = testcase_cfg.get('precondition', {}) installer_type = test_precondition.get('installer_type', 'all') - deploy_steps = test_precondition.get('deploy_steps', 'all') - description = self._get_description(testcase_cfg) - - return description, installer_type, deploy_steps + return description, installer_type def _get_description(self, testcase_cfg): try: diff --git a/vnftest/dispatcher/base.py b/vnftest/dispatcher/base.py index e53dd96..932b8ef 100644 --- a/vnftest/dispatcher/base.py +++ b/vnftest/dispatcher/base.py @@ -46,5 +46,5 @@ class Base(object): return list_dispatcher @abc.abstractmethod - def flush_result_data(self, data): + def flush_result_data(self, id, data): """Flush result data into permanent storage media interface.""" diff --git a/vnftest/dispatcher/file.py b/vnftest/dispatcher/file.py index 83d0fee..2504457 100644 --- a/vnftest/dispatcher/file.py +++ b/vnftest/dispatcher/file.py @@ -16,6 +16,8 @@ from __future__ import absolute_import +import os + from vnftest.dispatcher.base import Base as DispatchBase from vnftest.common import constants as consts from vnftest.common import utils @@ -29,8 +31,8 @@ class FileDispatcher(DispatchBase): def __init__(self, conf): super(FileDispatcher, self).__init__(conf) - self.target = conf['dispatcher_file'].get('file_path', - consts.DEFAULT_OUTPUT_FILE) - def flush_result_data(self, data): - utils.write_json_to_file(self.target, data) + def flush_result_data(self, id, data): + file_name = 'vnftest_' + str(id) + '.out' + target = self.conf['dispatcher_file'].get('file_path', os.path.join(consts.REPORT_DIR, file_name)) + utils.write_json_to_file(target, data) diff --git a/vnftest/dispatcher/http.py b/vnftest/dispatcher/http.py index da66c90..900792a 100644 --- a/vnftest/dispatcher/http.py +++ b/vnftest/dispatcher/http.py @@ -41,7 +41,7 @@ class HttpDispatcher(DispatchBase): self.timeout = int(http_conf.get('timeout', 5)) self.target = http_conf.get('target', 'http://127.0.0.1:8000/results') - def flush_result_data(self, data): + def flush_result_data(self, id, data): if self.target == '': # if the target was not set, do not do anything LOG.error('Dispatcher target was not set, no data will' @@ -54,28 +54,25 @@ class HttpDispatcher(DispatchBase): self.criteria = result['criteria'] testcases = result['testcases'] - for case, data in testcases.items(): - self._upload_case_result(case, data) + for testcase in testcases: + self._upload_case_result(testcase) - def _upload_case_result(self, case, data): + def _upload_case_result(self, testcase): try: - step_data = data.get('tc_data', [])[0] + step_data = testcase.get('steps', [])[0] + step_result = step_data['results'][0] except IndexError: current_time = datetime.now() else: - timestamp = float(step_data.get('timestamp', 0.0)) + timestamp = float(step_result.get('timestamp', 0.0)) current_time = datetime.fromtimestamp(timestamp) result = { "project_name": "vnftest", - "case_name": case, + "case_name": testcase['name'], "description": "vnftest ci step status", - "step": self.info.get('deploy_step'), - "version": self.info.get('version'), - "pod_name": self.info.get('pod_name'), - "installer": self.info.get('installer'), "build_tag": os.environ.get('BUILD_TAG'), - "criteria": data.get('criteria'), + "criteria": testcase.get('criteria'), "start_date": current_time.strftime('%Y-%m-%d %H:%M:%S'), "stop_date": current_time.strftime('%Y-%m-%d %H:%M:%S'), "trust_indicator": "", diff --git a/vnftest/tests/unit/core/test_task.py b/vnftest/tests/unit/core/test_task.py index d554951..7f26d61 100644 --- a/vnftest/tests/unit/core/test_task.py +++ b/vnftest/tests/unit/core/test_task.py @@ -26,14 +26,14 @@ from vnftest.core import task class TaskTestCase(unittest.TestCase): def test_set_dispatchers(self): - t = task.Task() + t = task.Task({}) output_config = {"DEFAULT": {"dispatcher": "file, http"}} t._set_dispatchers(output_config) self.assertEqual(output_config, output_config) @mock.patch.object(task, 'DispatcherBase') def test__do_output(self, mock_dispatcher): - t = task.Task() + t = task.Task({}) output_config = {"DEFAULT": {"dispatcher": "file, http"}} dispatcher1 = mock.MagicMock() @@ -44,7 +44,7 @@ class TaskTestCase(unittest.TestCase): mock_dispatcher.get = mock.MagicMock(return_value=[dispatcher1, dispatcher2]) - self.assertIsNone(t._do_output(output_config, {})) + self.assertIsNone(t._do_output(output_config)) @mock.patch.object(task, 'Context') @mock.patch.object(task, 'base_runner') @@ -55,16 +55,17 @@ class TaskTestCase(unittest.TestCase): 'interval': 1, 'type': 'Duration' }, - 'type': 'Dummy' + 'type': 'Dummy', + 'name': 'Dummy Step' } - t = task.Task() + t = task.Task({}) runner = mock.Mock() runner.join.return_value = 0 runner.get_output.return_value = {} runner.get_result.return_value = [] mock_base_runner.Runner.get.return_value = runner - t._run([step], False, "vnftest.out", {}) + t._run([step], 'dummy_case', False, "vnftest.out", {}) self.assertTrue(runner.run.called) def test_parse_suite_no_constraint_no_args(self): @@ -121,7 +122,7 @@ class TaskTestCase(unittest.TestCase): @mock.patch.object(task, 'utils') @mock.patch('logging.root') def test_set_log(self, mock_logging_root, *args): - task_obj = task.Task() + task_obj = task.Task({}) task_obj.task_id = 'task_id' task_obj._set_log() mock_logging_root.addHandler.assert_called() diff --git a/vnftest/tests/unit/onap/test_rest_call.py b/vnftest/tests/unit/onap/test_rest_call.py index 13aced7..b100ecb 100644 --- a/vnftest/tests/unit/onap/test_rest_call.py +++ b/vnftest/tests/unit/onap/test_rest_call.py @@ -21,6 +21,7 @@ from vnftest.core import task class RestCallTestCase(testtools.TestCase): step = { + 'name': 'DummyName', 'type': 'RestCall', 'options': { 'file': "dummy.yaml", @@ -49,10 +50,10 @@ class RestCallTestCase(testtools.TestCase): @mock.patch('vnftest.steps.rest_call.RestCall.execute_operation_impl') def test_run(self, mock_execute_operation): mock_execute_operation.return_value = {'body': {'value': 'output1'}} - t = task.Task() + t = task.Task({}) context_cfg = {} context = Context.get("CSAR") context.init(context_cfg) t.context = context - output = t._run([RestCallTestCase.step], False, "vnftest.out", {}) + output = t._run([RestCallTestCase.step], 'dummy_case', False, "vnftest.out", {}) self.assertEquals(output[0]['data']['output1'], 'output1') |