diff options
author | Steven Wright <sw3588@att.com> | 2019-01-30 19:24:08 +0000 |
---|---|---|
committer | Gerrit Code Review <gerrit@onap.org> | 2019-01-30 19:24:08 +0000 |
commit | ed9d46a261ec2e4c98a10001dbf42b5da0e03689 (patch) | |
tree | cc36152dc4b8c3e3e207698ff3cf265cfd03bc14 /ice_validator/tests | |
parent | 71227245f4bb9fb0fa2f7a14dcee0404b8d34c0b (diff) | |
parent | 350d9dcefe7f69d3e6a3923d2670fa2d140f2ba1 (diff) |
Merge "[VVP] Add new columns to traceability report"
Diffstat (limited to 'ice_validator/tests')
-rw-r--r-- | ice_validator/tests/conftest.py | 97 |
1 files changed, 51 insertions, 46 deletions
diff --git a/ice_validator/tests/conftest.py b/ice_validator/tests/conftest.py index 1a47e63..807ac71 100644 --- a/ice_validator/tests/conftest.py +++ b/ice_validator/tests/conftest.py @@ -42,7 +42,6 @@ import io import json import os import re -import sys import time from collections import defaultdict from itertools import chain @@ -86,6 +85,11 @@ ALL_RESULTS = [] def get_output_dir(config): + """ + Retrieve the output directory for the reports and create it if necessary + :param config: pytest configuration + :return: output directory as string + """ output_dir = config.option.output_dir or DEFAULT_OUTPUT_DIR if not os.path.exists(output_dir): os.makedirs(output_dir, exist_ok=True) @@ -996,6 +1000,11 @@ def pytest_generate_tests(metafunc): def hash_directory(path): + """ + Create md5 hash using the contents of all files under ``path`` + :param path: string directory containing files + :return: string MD5 hash code (hex) + """ md5 = hashlib.md5() for dir_path, sub_dirs, filenames in os.walk(path): for filename in filenames: @@ -1013,45 +1022,20 @@ def load_current_requirements(): return data["versions"][version]["needs"] -def compat_open(path): - """Invokes open correctly depending on the Python version""" - if sys.version_info.major < 3: - return open(path, "wb") - else: - return open(path, "w", newline="") - - -def unicode_writerow(writer, row): - if sys.version_info.major < 3: - row = [s.encode("utf8") for s in row] - writer.writerow(row) - - -def parse_heat_requirements(reqs): - """Takes requirements and returns list of only Heat requirements""" - data = json.loads(reqs) - for key, values in list(data.items()): - if "Heat" in (values["docname"]): - if "MUST" not in (values["keyword"]): - del data[key] - else: - if "none" in (values["validation_mode"]): - del data[key] - else: - del data[key] - return data +def select_heat_requirements(reqs): + """Filters dict requirements to only those requirements pertaining to Heat""" + return {k: v for k, v in reqs.items() if "Heat" in v["docname"]} # noinspection PyUnusedLocal def pytest_report_collectionfinish(config, startdir, items): """Generates a simple traceability report to output/traceability.csv""" - traceability_path = os.path.join(__path__[0], "../output/traceability.csv") + traceability_path = os.path.join(get_output_dir(config), "traceability.csv") output_dir = os.path.split(traceability_path)[0] if not os.path.exists(output_dir): os.makedirs(output_dir) reqs = load_current_requirements() - reqs = json.dumps(reqs) - requirements = parse_heat_requirements(reqs) + requirements = select_heat_requirements(reqs) unmapped, mapped = partition( lambda i: hasattr(i.function, "requirement_ids"), items ) @@ -1067,38 +1051,59 @@ def pytest_report_collectionfinish(config, startdir, items): (req_id, item.function.__module__, item.function.__name__) ) - mapping_error_path = os.path.join(__path__[0], "../output/mapping_errors.csv") - with compat_open(mapping_error_path) as f: + mapping_error_path = os.path.join(get_output_dir(config), "mapping_errors.csv") + with open(mapping_error_path, "w", newline="") as f: writer = csv.writer(f) for err in mapping_errors: - unicode_writerow(writer, err) + writer.writerow(err) - with compat_open(traceability_path) as f: + with open(traceability_path, "w", newline="") as f: out = csv.writer(f) - unicode_writerow( - out, - ("Requirement ID", "Requirement", "Section", "Test Module", "Test Name"), + out.writerow( + ("Requirement ID", "Requirement", "Section", + "Keyword", "Validation Mode", "Is Testable", + "Test Module", "Test Name"), ) for req_id, metadata in requirements.items(): + keyword = metadata["keyword"].upper() + mode = metadata["validation_mode"].lower() + testable = keyword in {"MUST", "MUST NOT"} and mode != "none" if req_to_test[req_id]: for item in req_to_test[req_id]: - unicode_writerow( - out, + out.writerow( ( req_id, metadata["description"], metadata["section_name"], + keyword, + mode, + "TRUE" if testable else "FALSE", item.function.__module__, item.function.__name__, ), ) else: - unicode_writerow( - out, - (req_id, metadata["description"], metadata["section_name"], "", ""), + out.writerow( + (req_id, + metadata["description"], + metadata["section_name"], + keyword, + mode, + "TRUE" if testable else "FALSE", + "", # test module + ""), # test function ) # now write out any test methods that weren't mapped to requirements - for item in unmapped: - unicode_writerow( - out, ("", "", "", item.function.__module__, item.function.__name__) + unmapped_tests = {(item.function.__module__, item.function.__name__) + for item in unmapped} + for test_module, test_name in unmapped_tests: + out.writerow( + ("", # req ID + "", # description + "", # section name + "", # keyword + "static", # validation mode + "TRUE", # testable + test_module, + test_name) ) |