diff options
author | Moshe <moshehoa@amdocs.com> | 2019-01-17 18:04:54 +0200 |
---|---|---|
committer | Moshe <moshehoa@amdocs.com> | 2019-01-17 18:05:01 +0200 |
commit | 6ed81bdf24927a0900d4857d7652ae48cc8c78b1 (patch) | |
tree | 821fff9c7ef18e7ce57ce353e75420900c896948 /vnftest/core | |
parent | 1112c6e3e6eae3aa10680b2d7b0d653de1a4bc0b (diff) |
Refactor test reporting
Issue-ID: VNFSDK-350
Change-Id: I66a82ab56dd6702903e4d1edf776a6d29cb4e836
Signed-off-by: Moshe <moshehoa@amdocs.com>
Diffstat (limited to 'vnftest/core')
-rw-r--r-- | vnftest/core/report.py | 7 | ||||
-rw-r--r-- | vnftest/core/task.py | 403 | ||||
-rw-r--r-- | vnftest/core/testcase.py | 10 |
3 files changed, 227 insertions, 193 deletions
diff --git a/vnftest/core/report.py b/vnftest/core/report.py index b8f8bb7..c648848 100644 --- a/vnftest/core/report.py +++ b/vnftest/core/report.py @@ -19,6 +19,7 @@ from __future__ import absolute_import from __future__ import print_function # import ast +import os import re import uuid @@ -120,7 +121,9 @@ class Report(object): "Timestamp": self.Timestamp, "task_id": self.task_id, "table": table_vals}) - with open(consts.DEFAULT_HTML_FILE, "w") as file_open: + file_name = 'vnftest_' + str(self.task_id) + '.htm' + report_file = os.path.join(consts.REPORT_DIR, file_name) + with open(report_file, "w") as file_open: file_open.write(Template_html.render(Context_html)) - print("Report generated. View /tmp/vnftest.htm") + print("Report generated. View " + report_file) diff --git a/vnftest/core/task.py b/vnftest/core/task.py index 41756af..a919b1f 100644 --- a/vnftest/core/task.py +++ b/vnftest/core/task.py @@ -26,7 +26,6 @@ import logging import sys import time import uuid -from collections import OrderedDict import ipaddress import os @@ -61,9 +60,13 @@ class Task(object): # pragma: no cover Set of commands to manage benchmark tasks. """ - def __init__(self): + def __init__(self, args): self.context = None self.outputs = {} + self.args = args or {} + task_id = getattr(args, 'task_id', None) + self.task_id = task_id if task_id is not None else str(uuid.uuid4()) + self.task_info = TaskInfo(task_id) def _set_dispatchers(self, output_config): dispatchers = output_config.get('DEFAULT', {}).get('dispatcher', @@ -71,14 +74,9 @@ class Task(object): # pragma: no cover out_types = [s.strip() for s in dispatchers.split(',')] output_config['DEFAULT']['dispatcher'] = out_types - def start(self, args, **kwargs): + def start(self): atexit.register(self.atexit_handler) - - task_id = getattr(args, 'task_id') - self.task_id = task_id if task_id else str(uuid.uuid4()) - self._set_log() - try: output_config = utils.parse_ini_file(CONF_FILE) except Exception: @@ -86,7 +84,7 @@ class Task(object): # pragma: no cover output_config = {} self._init_output_config(output_config) - self._set_output_config(output_config, args.output_file) + self._set_output_config(output_config, self.args.output_file) LOG.debug('Output configuration is: %s', output_config) self._set_dispatchers(output_config) @@ -94,79 +92,77 @@ class Task(object): # pragma: no cover # update dispatcher list if 'file' in output_config['DEFAULT']['dispatcher']: result = {'status': 0, 'result': {}} - utils.write_json_to_file(args.output_file, result) + utils.write_json_to_file(self.args.output_file, result) total_start_time = time.time() - parser = TaskParser(args.inputfile) + parser = TaskParser(self.args.inputfile) - if args.suite: + if self.args.suite: # 1.parse suite, return suite_params info task_files, task_args_list, task_args_fnames = \ parser.parse_suite() else: task_files = [parser.path] - task_args_list = [args.task_args] - task_args_fnames = [args.task_args_file] + task_args_list = [self.args.task_args] + task_args_fnames = [self.args.task_args_file] LOG.debug("task_files:%s, task_args_list:%s, task_args_fnames:%s", task_files, task_args_list, task_args_fnames) - if args.parse_only: + if self.args.parse_only: sys.exit(0) - testcases = {} - # parse task_files - for i in range(0, len(task_files)): - one_task_start_time = time.time() - # the output of the previous task is the input of the new task - inputs = copy.deepcopy(self.outputs) - task_args_file = task_args_fnames[i] - task_args = task_args_list[i] - try: - if task_args_file: - with utils.load_resource(task_args_file) as f: - inputs.update(parse_task_args("task_args_file", f.read())) - inputs.update(parse_task_args("task_args", task_args)) - except TypeError: - raise TypeError() - parser.path = task_files[i] - steps, run_in_parallel, meet_precondition, ret_context = \ - parser.parse_task(self.task_id, inputs) - - self.context = ret_context - - if not meet_precondition: - LOG.info("meet_precondition is %s, please check envrionment", - meet_precondition) - continue - - case_name = os.path.splitext(os.path.basename(task_files[i]))[0] - try: - data = self._run(steps, run_in_parallel, args.output_file, inputs) - except KeyboardInterrupt: - raise - except Exception: - LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True) - testcases[case_name] = {'criteria': 'FAIL', 'tc_data': []} - else: - criteria = self.evaluate_task_criteria(data) - testcases[case_name] = {'criteria': criteria, 'tc_data': data, 'output': self.outputs} - - if args.keep_deploy: - # keep deployment, forget about stack - # (hide it for exit handler) - self.context = None - else: - self.context.undeploy() - self.context = None - one_task_end_time = time.time() - LOG.info("Task %s finished in %d secs", task_files[i], - one_task_end_time - one_task_start_time) - - result = self._get_format_result(testcases) + try: + for i in range(0, len(task_files)): + one_task_start_time = time.time() + # the output of the previous task is the input of the new task + inputs = copy.deepcopy(self.outputs) + task_args_file = task_args_fnames[i] + task_args = task_args_list[i] + try: + if task_args_file: + with utils.load_resource(task_args_file) as f: + inputs.update(parse_task_args("task_args_file", f.read())) + inputs.update(parse_task_args("task_args", task_args)) + except TypeError: + raise TypeError() + parser.path = task_files[i] + steps, run_in_parallel, meet_precondition, ret_context = \ + parser.parse_task(self.task_id, inputs) + + self.context = ret_context + + if not meet_precondition: + LOG.info("meet_precondition is %s, please check envrionment", + meet_precondition) + continue - self._do_output(output_config, result) - self._generate_reporting(result) + case_name = os.path.splitext(os.path.basename(task_files[i]))[0] + try: + self._run(steps, case_name, run_in_parallel, self.args.output_file, inputs) + except KeyboardInterrupt: + raise + except Exception: + LOG.error('Testcase: "%s" FAILED!!!', case_name, exc_info=True) + + if self.args.keep_deploy: + # keep deployment, forget about stack + # (hide it for exit handler) + self.context = None + else: + self.context.undeploy() + self.context = None + one_task_end_time = time.time() + LOG.info("Task %s finished in %d secs", task_files[i], + one_task_end_time - one_task_start_time) + except Exception as e: + LOG.error("Task fatal error", e) + self.task_info.task_fatal() + finally: + self.task_info.task_end() + + self._do_output(output_config) + self._generate_reporting() total_end_time = time.time() LOG.info("Total finished in %d secs", @@ -176,14 +172,16 @@ class Task(object): # pragma: no cover LOG.info("To generate report, execute command " "'vnftest report generate %(task_id)s %(tc)s'", step) LOG.info("Task ALL DONE, exiting") - return result + return self.task_info.result() - def _generate_reporting(self, result): + def _generate_reporting(self): env = Environment() - with open(constants.REPORTING_FILE, 'w') as f: - f.write(env.from_string(report_template).render(result)) + file_name = 'report_' + str(self.task_id) + '.html' + report_file = os.path.join(constants.REPORT_DIR, file_name) + with open(report_file, 'w') as f: + f.write(env.from_string(report_template).render(self.task_info.result())) - LOG.info("Report can be found in '%s'", constants.REPORTING_FILE) + LOG.info("Report can be found in '%s'", report_file) def _set_log(self): log_format = '%(asctime)s %(name)s %(filename)s:%(lineno)d %(levelname)s %(message)s' @@ -221,104 +219,61 @@ class Task(object): # pragma: no cover k = 'dispatcher_{}'.format(output_config['DEFAULT']['dispatcher']) output_config[k]['target'] = target - def _get_format_result(self, testcases): - criteria = self._get_task_criteria(testcases) - - info = { - 'deploy_step': os.environ.get('DEPLOY_STEP', 'unknown'), - 'installer': os.environ.get('INSTALLER_TYPE', 'unknown'), - 'pod_name': os.environ.get('NODE_NAME', 'unknown'), - 'version': os.environ.get('VNFTEST_BRANCH', 'unknown') - } - - result = { - 'status': 1, - 'result': { - 'criteria': criteria, - 'task_id': self.task_id, - 'info': info, - 'testcases': testcases - } - } - - return result - - def _get_task_criteria(self, testcases): - criteria = any(t.get('criteria') != 'PASS' for t in testcases.values()) - if criteria: - return 'FAIL' - else: - return 'PASS' - - def evaluate_task_criteria(self, steps_result_list): - for step_result in steps_result_list: - errors_list = step_result['errors'] - if errors_list is not None and len(errors_list) > 0: - return 'FAIL' - return 'PASS' - - def _do_output(self, output_config, result): + def _do_output(self, output_config): dispatchers = DispatcherBase.get(output_config) for dispatcher in dispatchers: - dispatcher.flush_result_data(result) + dispatcher.flush_result_data(self.task_id, self.task_info.result()) - def _run(self, steps, run_in_parallel, output_file, inputs): + def _run(self, steps, case_name, run_in_parallel, output_file, inputs): """Deploys context and calls runners""" if self.context: self.context.deploy() - background_runners = [] - - result = [] - # Start all background steps - for step in filter(_is_background_step, steps): - step["runner"] = dict(type="Duration", duration=1000000000) - runner = self.run_one_step(step, output_file, inputs) - background_runners.append(runner) - - runners = [] - if run_in_parallel: - for step in steps: - if not _is_background_step(step): - runner = self.run_one_step(step, output_file, inputs) - runners.append(runner) - - # Wait for runners to finish - for runner in runners: - status = runner_join(runner, background_runners, self.outputs, result) - if status != 0: - raise RuntimeError( - "{0} runner status {1}".format(runner.__execution_type__, status)) - LOG.info("Runner ended, output in %s", output_file) - else: - # run serially + try: + self.task_info.testcase_start(case_name) for step in steps: - if not _is_background_step(step): - runner = self.run_one_step(step, output_file, inputs) - status = runner_join(runner, background_runners, self.outputs, result) - if status != 0: - LOG.error('Step NO.%s: "%s" ERROR!', - steps.index(step) + 1, - step.get('type')) - raise RuntimeError( - "{0} runner status {1}".format(runner.__execution_type__, status)) - LOG.info("Runner ended, output in %s", output_file) - - # Abort background runners - for runner in background_runners: - runner.abort() - - # Wait for background runners to finish - for runner in background_runners: - status = runner.join(self.outputs, result) - if status is None: - # Nuke if it did not stop nicely - base_runner.Runner.terminate(runner) - runner.join(self.outputs, result) - base_runner.Runner.release(runner) - - print("Background task ended") - return result + step_unique_id = self.task_info.step_add(case_name, step['name']) + step['step_unique_id'] = step_unique_id + + background_runners = [] + result = [] + # Start all background steps + for step in filter(_is_background_step, steps): + step["runner"] = dict(type="Duration", duration=1000000000) + self.task_info.step_start(step['step_unique_id']) + runner = self.run_one_step(step, output_file, inputs) + background_runners.append([step, runner]) + + runners = [] + if run_in_parallel: + for step in steps: + if not _is_background_step(step): + self.task_info.step_start(step['step_unique_id']) + runner = self.run_one_step(step, output_file, inputs) + runners.append([step, runner]) + + # Wait for runners to finish + for runner_item in runners: + self.finalize_step(runner_item[0], runner_item[1], result) + else: + # run serially + for step in steps: + if not _is_background_step(step): + self.task_info.step_start(step['step_unique_id']) + runner = self.run_one_step(step, output_file, inputs) + self.finalize_step(step, runner, result) + + # Abort background runners + for runner_item in background_runners: + runner_item[1].abort() + + # Wait for background runners to finish + for runner_item in background_runners: + runner = runner_item[1] + self.finalize_step(step, runner, result) + return result + finally: + self.task_info.testcase_end(case_name) def atexit_handler(self): """handler for process termination""" @@ -355,6 +310,16 @@ class Task(object): # pragma: no cover runner.run(step_cfg, self.context, inputs) return runner + def finalize_step(self, step, runner, result): + step_result_list = [] + status = runner_join(runner, self.outputs, step_result_list) + self.task_info.step_end(step['step_unique_id'], step_result_list) + if status != 0: + raise RuntimeError( + "{0} runner status {1}".format(runner.__execution_type__, status)) + LOG.info("Runner ended") + result.extend(step_result_list) + class TaskParser(object): # pragma: no cover """Parser for task config files in yaml format""" @@ -492,26 +457,17 @@ class TaskParser(object): # pragma: no cover if "precondition" in cfg: precondition = cfg["precondition"] installer_type = precondition.get("installer_type", None) - deploy_steps = precondition.get("deploy_steps", None) tc_fit_pods = precondition.get("pod_name", None) installer_type_env = os.environ.get('INSTALL_TYPE', None) - deploy_step_env = os.environ.get('DEPLOY_STEP', None) pod_name_env = os.environ.get('NODE_NAME', None) LOG.info("installer_type: %s, installer_type_env: %s", installer_type, installer_type_env) - LOG.info("deploy_steps: %s, deploy_step_env: %s", - deploy_steps, deploy_step_env) LOG.info("tc_fit_pods: %s, pod_name_env: %s", tc_fit_pods, pod_name_env) if installer_type and installer_type_env: if installer_type_env not in installer_type: return False - if deploy_steps and deploy_step_env: - deploy_steps_list = deploy_steps.split(',') - for deploy_step in deploy_steps_list: - if deploy_step_env.startswith(deploy_step): - return True return False if tc_fit_pods and pod_name_env: if pod_name_env not in tc_fit_pods: @@ -519,6 +475,97 @@ class TaskParser(object): # pragma: no cover return True +class TaskInfo(object): + info_dict = {} + info_dict['status'] = 'IN_PROGRESS' + info_dict['criteria'] = 'N/A' + result_info_dict = {} + info_dict['info'] = result_info_dict + test_cases_list = [] + info_dict['testcases'] = test_cases_list + helper_test_cases_dict = {} + helper_test_steps_dict = {} + step_id_helper = 0 + + def __init__(self, task_id): + self.info_dict['task_id'] = task_id + + def task_end(self): + if self.info_dict['criteria'] == 'N/A': + criteria = 'PASS' + for testcase in self.info_dict['testcases']: + if testcase['criteria'] == 'FAIL': + criteria = 'FAIL' + break + self.info_dict['criteria'] = criteria + self.info_dict['status'] = 'FINISHED' + + def task_fatal(self): + self.info_dict['criteria'] = 'FATAL' + self.info_dict['status'] = 'FINISHED' + + def testcase_start(self, testcase_name): + testcase_dict = {} + testcase_dict['name'] = testcase_name + testcase_dict['criteria'] = 'N/A' + testcase_dict['status'] = 'IN_PROGRESS' + testcase_dict['steps'] = [] + self.test_cases_list.append(testcase_dict) + self.helper_test_cases_dict[testcase_name] = testcase_dict + + def testcase_end(self, testcase_name): + testcase_dict = self.helper_test_cases_dict[testcase_name] + criteria = 'PASS' + for step in testcase_dict['steps']: + if step['criteria'] == 'FAIL': + criteria = 'FAIL' + break + testcase_dict['criteria'] = criteria + testcase_dict['status'] = 'FINISHED' + + def step_add(self, testcase_name, step_name): + step_dict = {} + step_dict['name'] = step_name + step_dict['criteria'] = 'N/A' + step_dict['status'] = 'NOT_STARTED' + step_dict['results'] = [] + testcase_dict = self.helper_test_cases_dict[testcase_name] + testcase_dict['steps'].append(step_dict) + self.step_id_helper += 1 + step_unique_id = step_name + '_' + str(self.step_id_helper) + self.helper_test_steps_dict[step_unique_id] = step_dict + return step_unique_id + + def step_start(self, step_unique_id): + step_dict = self.helper_test_steps_dict[step_unique_id] + step_dict['status'] = 'IN_PROGRESS' + + def step_end(self, step_unique_id, result_list): + step_dict = self.helper_test_steps_dict[step_unique_id] + errors_count = 0 + for result in result_list: + result_item = { + 'timestamp': result['timestamp'], + 'sequence': result['sequence'], + 'data': [], + 'errors': [] + } + for k, v in result['data'].items(): + result_item['data'].append({'type': 'String', 'key': k, 'value': str(v)}) + + for error in result['errors']: + result_item['errors'].append({'type': 'String', 'key': 'error', 'value': str(error)}) + errors_count += 1 + step_dict['results'].append(result_item) + if errors_count > 0: + step_dict['criteria'] = 'FAIL' + else: + step_dict['criteria'] = 'PASS' + step_dict['status'] = 'FINISHED' + + def result(self): + return self.info_dict + def is_ip_addr(addr): """check if string addr is an IP address""" try: @@ -541,22 +588,10 @@ def _is_background_step(step): return False -def runner_join(runner, background_runners, outputs, result): - """join (wait for) a runner, exit process at runner failure - :param background_runners: - :type background_runners: - :param outputs: - :type outputs: dict - :param result: - :type result: list - """ +def runner_join(runner, outputs, result): while runner.poll() is None: outputs.update(runner.get_output()) result.extend(runner.get_result()) - # drain all the background runner queues - for background in background_runners: - outputs.update(background.get_output()) - result.extend(background.get_result()) status = runner.join(outputs, result) base_runner.Runner.release(runner) return status diff --git a/vnftest/core/testcase.py b/vnftest/core/testcase.py index 2c5c4b4..df86dd6 100644 --- a/vnftest/core/testcase.py +++ b/vnftest/core/testcase.py @@ -63,14 +63,13 @@ class Testcase(object): LOG.exception('Failed to load test case:\n%s\n', testcase_file) raise - description, installer, deploy_steps = self._parse_testcase( + description, installer = self._parse_testcase( testcase_info) record = { 'Name': testcase_file.split(".")[0], 'Description': description, - 'installer': installer, - 'deploy_steps': deploy_steps + 'installer': installer } return record @@ -82,11 +81,8 @@ class Testcase(object): test_precondition = testcase_cfg.get('precondition', {}) installer_type = test_precondition.get('installer_type', 'all') - deploy_steps = test_precondition.get('deploy_steps', 'all') - description = self._get_description(testcase_cfg) - - return description, installer_type, deploy_steps + return description, installer_type def _get_description(self, testcase_cfg): try: |