Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 35 additions & 5 deletions nrtest/compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import os
import json
import logging
from datetime import datetime

# third-party imports
import six
Expand All @@ -13,6 +14,11 @@
from .plugin import find_unique_function


cwidth = 4
nwidth = 35
fill = '.'


class CompareException(Exception):
pass

Expand Down Expand Up @@ -47,15 +53,29 @@ def compare_testsuite(ts_sut, ts_ref, rtol, atol, outfile):

# compare all tests and return False if any are incompatible
compatible = True
for name in sorted(common_test_names):
pass_count = 0

start_time = datetime.now()

for test_count, name in enumerate(sorted(common_test_names), start=1):
test_sut = tests_sut[name]
test_ref = tests_ref[name]

comparison = compare_test(test_sut, test_ref, rtol, atol)
comparison = compare_test(test_count, test_sut, test_ref, rtol, atol)
receipt['Tests'].append(comparison)

if not comparison['passed']:
compatible = False
else:
pass_count += 1

duration = (datetime.now() - start_time).total_seconds()

prct = int(100*pass_count/test_count)
fail_count = test_count - pass_count
logging.info("")
logging.info(f"{prct}% tests passed, {fail_count} tests failed of {test_count}")
logging.info(f"Total test time (real) = {duration:3.2f} sec")

if outfile:
with open(outfile, 'w') as f:
Expand All @@ -65,7 +85,7 @@ def compare_testsuite(ts_sut, ts_ref, rtol, atol, outfile):
return compatible


def compare_test(test_sut, test_ref, rtol, atol):
def compare_test(count, test_sut, test_ref, rtol, atol):
"""Compare the results of a single test against a benchmark.

Args:
Expand All @@ -75,6 +95,10 @@ def compare_test(test_sut, test_ref, rtol, atol):

Returns: boolean compatibility
"""

ccount = f"#{count}"
name = f"{test_sut.name} "

logger = logging.getLogger(test_sut.name)

comparison = {
Expand All @@ -96,6 +120,7 @@ def compare_test(test_sut, test_ref, rtol, atol):

# compare result files
# return False immediately if any are incompatible
start_time = datetime.now()
for fname, ftype in six.iteritems(test_sut.output_files):

ftype = ftype.lower()
Expand All @@ -110,21 +135,26 @@ def compare_test(test_sut, test_ref, rtol, atol):
compare_file = find_unique_function('compare', ftype)
try:
compatible = compare_file(path_sut, path_ref, rtol, atol)

except Exception as e:
raise CompareException('Exception raised during diff: %s' % e)

if not compatible:
raise CompareException('%s: diff failed' % fname)

duration = (datetime.now() - start_time).total_seconds()

except CompareException as e:
comparison['passed'] = False
comparison['error_msg'] = str(e)
logger.info(color('fail', 'r'))
status = color('fail', 'r')
logging.info(f"Test {ccount:>{cwidth}}: {name:{fill}<{nwidth}} {status}")
logger.info(str(e))

else:
comparison['passed'] = compatible
logger.info(color('pass', 'g'))
status = color('pass', 'g')
logging.info(f"Test {ccount:>{cwidth}}: {name:{fill}<{nwidth}} {status} {duration:3.2f} sec")

return comparison

Expand Down
41 changes: 33 additions & 8 deletions nrtest/execute.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import logging
import tempfile
import json
import datetime
from datetime import datetime

# third-party imports
from packaging import version
Expand All @@ -15,20 +15,42 @@
from .utility import color, copy_file_and_path, rmtree, which


cwidth = 4
nwidth = 35
fill = '.'


class TestFailure(Exception):
pass


def execute_testsuite(ts):
success = True
for test in ts.tests:
if not execute_test(test, ts.app):
pass_count = 0

start_time = datetime.now()

for test_count, test in enumerate(ts.tests, start=1):
if not execute_test(test_count, test, ts.app):
success = False
else:
pass_count += 1

duration = (datetime.now() - start_time).total_seconds()

prct = int(100*pass_count/test_count)
fail_count = test_count - pass_count
logging.info("")
logging.info(f"{prct}% tests passed, {fail_count} tests failed of {test_count}")
logging.info(f"Total test time (real) = {duration:3.2f} sec")

return success


def execute_test(test, app):
def execute_test(count, test, app):
tcount = f"#{count}"
name = f"{test.name} "

logger = logging.getLogger(test.name)

if not os.path.exists(test.output_dir):
Expand All @@ -37,17 +59,20 @@ def execute_test(test, app):
try:
duration = _execute(test, app)
_postcheck(test)

except TestFailure as e:
test.passed = False
test.error_msg = str(e)
logger.info(color('fail', 'r'))
status = color('fail', 'r')
logging.info(f"Test {tcount:>{cwidth}}: {name:{fill}<{nwidth}} {status}")
logger.info(test.error_msg)
else:
test.passed = True
test.error_msg = None
logger.info(color('pass', 'g'))
dur_str = str(datetime.timedelta(seconds=duration)).split('.')[0]
logger.debug('Duration {0}'.format(dur_str))
status = color('pass', 'g')
logging.info(f"Test {tcount:>{cwidth}}: {name:{fill}<{nwidth}} {status} {duration:3.2f} sec")
# dur_str = str(datetime.timedelta(seconds=duration)).split('.')[0]
# logger.debug('Duration {0}'.format(dur_str))

return test.passed

Expand Down