summaryrefslogtreecommitdiffstats
path: root/ice_validator/tests/conftest.py
diff options
context:
space:
mode:
Diffstat (limited to 'ice_validator/tests/conftest.py')
-rw-r--r--ice_validator/tests/conftest.py343
1 files changed, 201 insertions, 142 deletions
diff --git a/ice_validator/tests/conftest.py b/ice_validator/tests/conftest.py
index f5f25a3..a08b99a 100644
--- a/ice_validator/tests/conftest.py
+++ b/ice_validator/tests/conftest.py
@@ -45,8 +45,10 @@ import os
import sys
import time
import requests
+import traceback
import docutils.core
+import jinja2
import pytest
from more_itertools import partition
from six import string_types
@@ -55,9 +57,7 @@ import xlsxwriter
__path__ = [os.path.dirname(os.path.abspath(__file__))]
resolution_steps_file = "resolution_steps.json"
-requirements_file = "requirements.json"
-
-FAILURE_DATA = {}
+heat_requirements_file = "heat_requirements.json"
report_columns = [
("Input File", "file"),
@@ -69,6 +69,12 @@ report_columns = [
]
report = collections.OrderedDict(report_columns)
+COLLECTION_FAILURES = []
+COLLECTION_FAILURE_WARNING = """WARNING: The following unexpected errors occurred
+while preparing to validate the the input files. Some validations may not have been
+executed. Please refer these issue to the VNF Validation Tool team.
+"""
+
def extract_error_msg(rep):
try:
@@ -92,6 +98,11 @@ def pytest_runtest_makereport(item, call):
if rep.outcome == "failed":
if not os.path.exists(output_dir):
os.mkdir(output_dir)
+ if os.path.exists("{}/failures".format(output_dir)):
+ with open("{}/failures".format(output_dir), "r") as o:
+ jdata = json.loads(o.read())
+ else:
+ jdata = {}
if hasattr(item.function, "requirement_ids"):
requirement_ids = item.function.requirement_ids
@@ -113,17 +124,31 @@ def pytest_runtest_makereport(item, call):
else:
resolved_pair = rep.nodeid.split("[")[1][:-1]
- FAILURE_DATA[len(FAILURE_DATA)] = {
+ markers = set(m.name for m in item.iter_markers())
+ base_test = "base" in markers
+
+ msg = extract_error_msg(rep)
+ if base_test:
+ msg = "!!Base Test Failure!! Halting test suite execution...\n{}".format(
+ msg
+ )
+
+ jdata[len(jdata)] = {
"file": resolved_pair,
"vnfrqts": requirement_ids,
"test": item.function.__name__,
"test_file": item.function.__module__.split(".")[-1],
"raw_output": str(rep.longrepr),
- "message": extract_error_msg(rep),
+ "message": msg,
}
with open("{}/failures".format(output_dir), "w") as f:
- json.dump(FAILURE_DATA, f, indent=4)
+ json.dump(jdata, f, indent=4)
+
+ if not item.config.option.continue_on_failure and base_test:
+ pytest.exit(
+ "{}\n{}\n{}".format(msg, resolved_pair, item.function.__name__)
+ )
def make_timestamp():
@@ -144,13 +169,31 @@ def pytest_sessionfinish(session, exitstatus):
)
-def pytest_runtest_setup(item):
- profile = item.session.config.option.validation_profile
- markers = set(m.name for m in item.iter_markers())
- if not profile and markers and "xfail" not in markers:
- pytest.skip("No validation profile selected. Skipping tests with marks.")
- if profile and markers and profile not in markers and "xfail" not in markers:
- pytest.skip("Doesn't match selection validation profile")
+def pytest_collection_modifyitems(session, config, items):
+ allowed_marks = ["xfail", "base"]
+ profile = config.option.validation_profile
+
+ for item in items:
+ markers = set(m.name for m in item.iter_markers())
+ if not profile and markers and set(markers).isdisjoint(allowed_marks):
+ item.add_marker(
+ pytest.mark.skip(
+ reason="No validation profile selected. Skipping tests with marks."
+ )
+ )
+ if (
+ profile
+ and markers
+ and profile not in markers
+ and set(markers).isdisjoint(allowed_marks)
+ ):
+ item.add_marker(
+ pytest.mark.skip(reason="Doesn't match selection validation profile")
+ )
+
+ items.sort(
+ key=lambda item: 0 if "base" in set(m.name for m in item.iter_markers()) else 1
+ )
def make_href(path):
@@ -158,10 +201,10 @@ def make_href(path):
links = []
for p in paths:
abs_path = os.path.abspath(p)
- filename = os.path.split(abs_path)[1]
+ name = abs_path if os.path.isdir(abs_path) else os.path.split(abs_path)[1]
links.append(
- "<a href='file://{abs_path}' target='_blank'>{filename}</a>".format(
- abs_path=abs_path, filename=filename
+ "<a href='file://{abs_path}' target='_blank'>{name}</a>".format(
+ abs_path=abs_path, name=name
)
)
return "<br/>".join(links)
@@ -184,7 +227,7 @@ def generate_report(outpath, template_path, profile_name, output_format):
with open(resolution_steps, "r") as f:
rdata = json.loads(f.read())
- heat_requirements = "{}/../{}".format(__path__[0], requirements_file)
+ heat_requirements = "{}/../{}".format(__path__[0], heat_requirements_file)
if os.path.exists(heat_requirements):
with open(heat_requirements, "r") as f:
hdata = json.loads(f.read())
@@ -203,6 +246,7 @@ def generate_report(outpath, template_path, profile_name, output_format):
# mapping resolution steps to module and test name
for k, v in faildata.items():
+ # resolution_step = ""
faildata[k]["resolution_steps"] = ""
for rs in rdata:
if v["test_file"] == rs["module"] and v["test"] == rs["function"]:
@@ -221,21 +265,33 @@ def generate_report(outpath, template_path, profile_name, output_format):
def generate_csv_report(output_dir, profile_name, template_path, faildata):
- rows = []
- rows.append(["Validation Failures"])
+ rows = [["Validation Failures"]]
headers = [
("Profile Selected:", profile_name),
("Report Generated At:", make_timestamp()),
("Directory Validated:", template_path),
("Checksum:", hash_directory(template_path)),
- ("Total Errors:", len(faildata)),
+ ("Total Errors:", len(faildata) + len(COLLECTION_FAILURES)),
]
-
rows.append([])
for header in headers:
rows.append(header)
rows.append([])
+ if COLLECTION_FAILURES:
+ rows.append([COLLECTION_FAILURE_WARNING])
+ rows.append(["Validation File", "Test", "Fixtures", "Error"])
+ for failure in COLLECTION_FAILURES:
+ rows.append(
+ [
+ failure["module"],
+ failure["test"],
+ ";".join(failure["fixtures"]),
+ failure["error"],
+ ]
+ )
+ rows.append([])
+
# table header
rows.append([col for col, _ in report_columns])
@@ -274,7 +330,7 @@ def generate_excel_report(output_dir, profile_name, template_path, faildata):
("Report Generated At:", make_timestamp()),
("Directory Validated:", template_path),
("Checksum:", hash_directory(template_path)),
- ("Total Errors:", len(faildata)),
+ ("Total Errors:", len(faildata) + len(COLLECTION_FAILURES)),
]
for row, (header, value) in enumerate(headers, start=2):
worksheet.write(row, 0, header, bold)
@@ -283,13 +339,26 @@ def generate_excel_report(output_dir, profile_name, template_path, faildata):
worksheet.set_column(0, len(headers) - 1, 40)
worksheet.set_column(len(headers), len(headers), 80)
+ if COLLECTION_FAILURES:
+ collection_failures_start = 2 + len(headers) + 2
+ worksheet.write(collection_failures_start, 0, COLLECTION_FAILURE_WARNING, bold)
+ collection_failure_headers = ["Validation File", "Test", "Fixtures", "Error"]
+ for col_num, col_name in enumerate(collection_failure_headers):
+ worksheet.write(collection_failures_start + 1, col_num, col_name, bold)
+ for row, data in enumerate(COLLECTION_FAILURES, collection_failures_start + 2):
+ worksheet.write(row, 0, data["module"])
+ worksheet.write(row, 1, data["test"])
+ worksheet.write(row, 2, ",".join(data["fixtures"]))
+ worksheet.write(row, 3, data["error"], code)
+
# table header
- start_error_table_row = 2 + len(headers) + 2
+ start_error_table_row = 2 + len(headers) + len(COLLECTION_FAILURES) + 4
+ worksheet.write(start_error_table_row, 0, "Validation Failures", bold)
for col_num, (col_name, _) in enumerate(report_columns):
- worksheet.write(start_error_table_row, col_num, col_name, bold)
+ worksheet.write(start_error_table_row + 1, col_num, col_name, bold)
# table content
- for row, data in enumerate(faildata.values(), start=start_error_table_row + 1):
+ for row, data in enumerate(faildata.values(), start=start_error_table_row + 2):
for col, key in enumerate(report.values()):
if key == "file":
paths = (
@@ -306,70 +375,35 @@ def generate_excel_report(output_dir, profile_name, template_path, faildata):
def generate_html_report(outpath, profile_name, template_path, faildata):
- with open("{}/report.html".format(outpath), "w") as of:
- body_begin = """
- <style type="text/css">
- h1, li {{
- font-family:Arial, sans-serif;
- }}
- .tg {{border-collapse:collapse;border-spacing:0;}}
- .tg td{{font-family:Arial, sans-serif;font-size:8px;padding:10px 5px;
- border-style:solid;border-width:1px;overflow:hidden;word-break:normal;
- border-color:black;}}
- .tg th{{font-family:Arial, sans-serif;font-size:14px;font-weight:normal;
- padding:10px 5px;border-style:solid;border-width:1px;overflow:hidden;
- word-break:normal;border-color:black;}}
- .tg .tg-rwj1{{font-size:10px;font-family:Arial, Helvetica,
- sans-serif !important;;border-color:inherit;vertical-align:top}}</style>
- <h1>Validation Failures</h1>
- <ul>
- <li><b>Profile Selected: </b> <tt>{profile}</tt></li>
- <li><b>Report Generated At:</b> <tt>{timestamp}</tt></li>
- <li><b>Directory Validated:</b> <tt>{template_dir}</tt></li>
- <li><b>Checksum:</b> <tt>{checksum}</tt></li>
- <li><b>Total Errors:</b> {num_failures}</li>
- </ul>
- """.format(
- profile=profile_name,
- timestamp=make_timestamp(),
+ failures = []
+ for data in faildata.values():
+ failures.append(
+ {
+ "file_links": make_href(data["file"]),
+ "test_id": data["test_file"],
+ "error_message": data["message"],
+ "raw_output": data["raw_output"],
+ "requirements": docutils.core.publish_parts(
+ writer_name="html", source=data["req_description"]
+ )["body"],
+ "resolution_steps": data["resolution_steps"],
+ }
+ )
+ pkg_dir = os.path.split(__file__)[0]
+ j2_template_path = os.path.join(pkg_dir, "report.html.jinja2")
+ with open(j2_template_path, "r") as f:
+ report_template = jinja2.Template(f.read())
+ contents = report_template.render(
+ num_failures=len(failures) + len(COLLECTION_FAILURES),
+ profile_name=profile_name,
+ template_dir=make_href(template_path),
checksum=hash_directory(template_path),
- template_dir=template_path,
- num_failures=len(faildata),
+ timestamp=make_timestamp(),
+ failures=failures,
+ collection_failures=COLLECTION_FAILURES,
)
- of.write(body_begin)
-
- if len(faildata) == 0:
- of.write("<p>Success! No validation failures detected.</p>")
- return
-
- table_begin = '<table class="tg">'
- of.write(table_begin)
-
- # table headers
- of.write("<tr>")
- for k, v in report.items():
- of.write('<th class="tg-rwj1">{}</th>'.format(k))
- of.write("</tr>")
-
- # table content
- for k, v in faildata.items():
- of.write("<tr>")
- for rk, rv in report.items():
- if rv == "file":
- value = make_href(v[rv])
- elif rv == "raw_output":
- value = "<pre>{}</pre>".format(v[rv])
- elif rv == "req_description":
- parts = docutils.core.publish_parts(
- writer_name="html", source=v[rv]
- )
- value = parts["body"]
- else:
- value = v[rv].replace("\n", "<br />")
- of.write(" <td>{}</td>".format(value))
- of.write("</tr>")
-
- of.write("</table>")
+ with open(os.path.join(outpath, "report.html"), "w") as f:
+ f.write(contents)
def pytest_addoption(parser):
@@ -411,6 +445,13 @@ def pytest_addoption(parser):
help="Format of output report (html, csv, excel)",
)
+ parser.addoption(
+ "--continue-on-failure",
+ dest="continue_on_failure",
+ action="store_true",
+ help="Continue validation even when structural errors exist in input files",
+ )
+
def pytest_configure(config):
"""
@@ -420,9 +461,9 @@ def pytest_configure(config):
if config.getoption("template_dir") and config.getoption("self_test"):
raise Exception('"--template-dir", and "--self-test"' " are mutually exclusive")
if not (
- config.getoption("template_dir") or
- config.getoption("self_test") or
- config.getoption("help")
+ config.getoption("template_dir")
+ or config.getoption("self_test")
+ or config.getoption("help")
):
raise Exception('One of "--template-dir" or' ' "--self-test" must be specified')
@@ -435,90 +476,107 @@ def pytest_generate_tests(metafunc):
is not specified on the CLI, the fixtures associated with this
test name.
"""
- if "filename" in metafunc.fixturenames:
- from .parametrizers import parametrize_filename
- parametrize_filename(metafunc)
+ # noinspection PyBroadException
+ try:
+ if "filename" in metafunc.fixturenames:
+ from .parametrizers import parametrize_filename
+
+ parametrize_filename(metafunc)
- if "filenames" in metafunc.fixturenames:
- from .parametrizers import parametrize_filenames
+ if "filenames" in metafunc.fixturenames:
+ from .parametrizers import parametrize_filenames
- parametrize_filenames(metafunc)
+ parametrize_filenames(metafunc)
- if "template_dir" in metafunc.fixturenames:
- from .parametrizers import parametrize_template_dir
+ if "template_dir" in metafunc.fixturenames:
+ from .parametrizers import parametrize_template_dir
- parametrize_template_dir(metafunc)
+ parametrize_template_dir(metafunc)
- if "environment_pair" in metafunc.fixturenames:
- from .parametrizers import parametrize_environment_pair
+ if "environment_pair" in metafunc.fixturenames:
+ from .parametrizers import parametrize_environment_pair
- parametrize_environment_pair(metafunc)
+ parametrize_environment_pair(metafunc)
- if "heat_volume_pair" in metafunc.fixturenames:
- from .parametrizers import parametrize_heat_volume_pair
+ if "heat_volume_pair" in metafunc.fixturenames:
+ from .parametrizers import parametrize_heat_volume_pair
- parametrize_heat_volume_pair(metafunc)
+ parametrize_heat_volume_pair(metafunc)
- if "yaml_files" in metafunc.fixturenames:
- from .parametrizers import parametrize_yaml_files
+ if "yaml_files" in metafunc.fixturenames:
+ from .parametrizers import parametrize_yaml_files
- parametrize_yaml_files(metafunc)
+ parametrize_yaml_files(metafunc)
- if "env_files" in metafunc.fixturenames:
- from .parametrizers import parametrize_environment_files
+ if "env_files" in metafunc.fixturenames:
+ from .parametrizers import parametrize_environment_files
- parametrize_environment_files(metafunc)
+ parametrize_environment_files(metafunc)
- if "yaml_file" in metafunc.fixturenames:
- from .parametrizers import parametrize_yaml_file
+ if "yaml_file" in metafunc.fixturenames:
+ from .parametrizers import parametrize_yaml_file
- parametrize_yaml_file(metafunc)
+ parametrize_yaml_file(metafunc)
- if "env_file" in metafunc.fixturenames:
- from .parametrizers import parametrize_environment_file
+ if "env_file" in metafunc.fixturenames:
+ from .parametrizers import parametrize_environment_file
- parametrize_environment_file(metafunc)
+ parametrize_environment_file(metafunc)
- if "parsed_yaml_file" in metafunc.fixturenames:
- from .parametrizers import parametrize_parsed_yaml_file
+ if "parsed_yaml_file" in metafunc.fixturenames:
+ from .parametrizers import parametrize_parsed_yaml_file
- parametrize_parsed_yaml_file(metafunc)
+ parametrize_parsed_yaml_file(metafunc)
- if "parsed_environment_file" in metafunc.fixturenames:
- from .parametrizers import parametrize_parsed_environment_file
+ if "parsed_environment_file" in metafunc.fixturenames:
+ from .parametrizers import parametrize_parsed_environment_file
- parametrize_parsed_environment_file(metafunc)
+ parametrize_parsed_environment_file(metafunc)
- if "heat_template" in metafunc.fixturenames:
- from .parametrizers import parametrize_heat_template
+ if "heat_template" in metafunc.fixturenames:
+ from .parametrizers import parametrize_heat_template
- parametrize_heat_template(metafunc)
+ parametrize_heat_template(metafunc)
- if "heat_templates" in metafunc.fixturenames:
- from .parametrizers import parametrize_heat_templates
+ if "heat_templates" in metafunc.fixturenames:
+ from .parametrizers import parametrize_heat_templates
- parametrize_heat_templates(metafunc)
+ parametrize_heat_templates(metafunc)
- if "volume_template" in metafunc.fixturenames:
- from .parametrizers import parametrize_volume_template
+ if "volume_template" in metafunc.fixturenames:
+ from .parametrizers import parametrize_volume_template
- parametrize_volume_template(metafunc)
+ parametrize_volume_template(metafunc)
- if "volume_templates" in metafunc.fixturenames:
- from .parametrizers import parametrize_volume_templates
+ if "volume_templates" in metafunc.fixturenames:
+ from .parametrizers import parametrize_volume_templates
- parametrize_volume_templates(metafunc)
+ parametrize_volume_templates(metafunc)
- if "template" in metafunc.fixturenames:
- from .parametrizers import parametrize_template
+ if "template" in metafunc.fixturenames:
+ from .parametrizers import parametrize_template
- parametrize_template(metafunc)
+ parametrize_template(metafunc)
- if "templates" in metafunc.fixturenames:
- from .parametrizers import parametrize_templates
+ if "templates" in metafunc.fixturenames:
+ from .parametrizers import parametrize_templates
- parametrize_templates(metafunc)
+ parametrize_templates(metafunc)
+ except Exception as e:
+ # If an error occurs in the collection phase, then it won't be logged as a
+ # normal test failure. This means that failures could occur, but not
+ # be seen on the report resulting in a false positive success message. These
+ # errors will be stored and reported separately on the report
+ COLLECTION_FAILURES.append(
+ {
+ "module": metafunc.module.__name__,
+ "test": metafunc.function.__name__,
+ "fixtures": metafunc.fixturenames,
+ "error": traceback.format_exc(),
+ }
+ )
+ raise e
def hash_directory(path):
@@ -538,7 +596,7 @@ def load_current_requirements():
r = requests.get(url)
with open('requirements.json', 'wb') as needs:
needs.write(r.content)
- path = "requirements.json"
+ path = "heat_requirements.json"
if not os.path.exists(path):
return {}
with io.open(path, encoding="utf8", mode="r") as f:
@@ -562,6 +620,7 @@ def unicode_writerow(writer, row):
def pytest_report_collectionfinish(config, startdir, items):
+
"""Generates a simple traceability report to output/traceability.csv"""
traceability_path = os.path.join(__path__[0], "../output/traceability.csv")
output_dir = os.path.split(traceability_path)[0]