gpu: nvgpu: unit: add test levels to required test tracking

Add tracking of test levels to the required test mechanism in the unit
test framework. The json output will now include the test level. This
will allow runs with just level 0 tests to correctly verify the right
tests run.

JIRA NVGPU-3200

Change-Id: Ifc7c7ad5b605487945e0406d387f54ba04f1680d
Signed-off-by: Philip Elcan <pelcan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2103515
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Philip Elcan
2019-04-23 11:05:34 -04:00
committed by mobile promotions
parent 73bd8e4646
commit bb61fc110b
4 changed files with 347 additions and 40 deletions

View File

File diff suppressed because it is too large Load Diff

View File

@@ -131,7 +131,8 @@ static void dump_test_record(FILE *logfile, struct unit_test_record *rec,
fprintf(logfile, "\"uid\": \"%s\", ", rec->test->jama.unique_id); fprintf(logfile, "\"uid\": \"%s\", ", rec->test->jama.unique_id);
fprintf(logfile, "\"vc\": \"%s\", ", fprintf(logfile, "\"vc\": \"%s\", ",
rec->test->jama.verification_criteria); rec->test->jama.verification_criteria);
fprintf(logfile, "\"req\": \"%s\"", rec->test->jama.requirement); fprintf(logfile, "\"req\": \"%s\", ", rec->test->jama.requirement);
fprintf(logfile, "\"test_level\": %d", rec->test->test_lvl);
fprintf(logfile, "}"); fprintf(logfile, "}");
} }

View File

@@ -101,7 +101,7 @@ def regen():
test_count = 0 test_count = 0
for unit, tests in sorted(test_dict.items(), key=lambda kv: kv[0], reverse=False): for unit, tests in sorted(test_dict.items(), key=lambda kv: kv[0], reverse=False):
for test in sorted(tests.items()): for test in sorted(tests.items()):
entry = {"unit": unit, "test": test[0]} entry = {"unit": unit, "test": test[0], 'test_level': test[1]['test_level']}
if test[1]['uid'] != "": if test[1]['uid'] != "":
entry['uid'] = test[1]['uid'] entry['uid'] = test[1]['uid']
entry['vc'] = test[1]['vc'] entry['vc'] = test[1]['vc']
@@ -137,7 +137,7 @@ def format_html_test(unit, test, status, error, uid, req, vc):
ret += "</tr>\n" ret += "</tr>\n"
return ret return ret
def check(html = False): def check(test_level, html = False):
#Check that tests in results.json cover required_tests.json #Check that tests in results.json cover required_tests.json
with open(RESULTS_FILE) as results_file: with open(RESULTS_FILE) as results_file:
results = json.loads(results_file.read()) results = json.loads(results_file.read())
@@ -158,14 +158,18 @@ def check(html = False):
test = reqd_test['test'] test = reqd_test['test']
error = "" error = ""
status = False status = False
skipped = False
if unit not in test_dict: if unit not in test_dict:
error = ("ERROR: Required unit %s is not in test results.\n" % unit) error = ("ERROR: Required unit %s is not in test results.\n" % unit)
log += error log += error
errors += 1 errors += 1
elif test not in test_dict[unit]: elif test not in test_dict[unit]:
if req_dict[unit][test]['test_level'] <= test_level:
error = ("ERROR: Required test %s - %s is not in test results.\n" % (unit, test)) error = ("ERROR: Required test %s - %s is not in test results.\n" % (unit, test))
log += error log += error
errors += 1 errors += 1
else:
skipped = True
elif test_dict[unit][test]['status'] is False: elif test_dict[unit][test]['status'] is False:
log += ("ERROR: Required test %s - %s FAILED.\n" % (unit, test)) log += ("ERROR: Required test %s - %s FAILED.\n" % (unit, test))
error = "FAILED" error = "FAILED"
@@ -173,6 +177,7 @@ def check(html = False):
else: else:
status = True status = True
error = "PASS" error = "PASS"
if not skipped:
html += format_html_test(unit, test, status, error, reqd_test.get('uid', ''), reqd_test.get('req', ''), reqd_test.get('vc', '')) html += format_html_test(unit, test, status, error, reqd_test.get('uid', ''), reqd_test.get('req', ''), reqd_test.get('vc', ''))
test_count += 1 test_count += 1
@@ -208,12 +213,13 @@ def check(html = False):
print("PASS: All %d tests found in result log." % test_count) print("PASS: All %d tests found in result log." % test_count)
return 0 return 0
def html(): def html(test_level):
return check(html = True) return check(test_level, html = True)
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--regen", help="Regenerate list of expected test cases.", action="store_true") parser.add_argument("--regen", help="Regenerate list of expected test cases.", action="store_true")
parser.add_argument("--check", help="Make sure all expected test cases were run.", action="store_true") parser.add_argument("--check", help="Make sure all expected test cases were run.", action="store_true")
parser.add_argument("--test-level", "-t", help="Test level used for checking results. Default=0", type=int, default=0)
parser.add_argument("--html", help="Perform --check and export results in an HTML file.", action="store_true") parser.add_argument("--html", help="Perform --check and export results in an HTML file.", action="store_true")
args = parser.parse_args() args = parser.parse_args()
@@ -221,9 +227,9 @@ if args.regen:
regen() regen()
exit(0) exit(0)
if args.check: if args.check:
exit(check()) exit(check(args.test_level))
if args.html: if args.html:
exit(html()) exit(html(args.test_level))
else: else:
parser.print_help() parser.print_help()

View File

@@ -26,6 +26,8 @@
# that unit tests are found and nvgpu-drv is found. # that unit tests are found and nvgpu-drv is found.
# #
options=$(getopt -o t: --long test-level: -- "$@")
this_script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )" this_script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )"
pushd $this_script_dir pushd $this_script_dir
@@ -48,8 +50,24 @@ echo "$ $NVGPU_UNIT $*"
$NVGPU_UNIT $* $NVGPU_UNIT $*
rc=$? rc=$?
if [ $rc -eq "0" ]; then if [ $rc -eq "0" ]; then
#get the test level, if passed into this script
eval set -- $options
while true; do
case "$1" in
-t|--test-level)
shift;
testlevelparam="-t $1"
;;
--)
shift;
break;
;;
esac
shift
done
echo $testlevelparam
echo "Checking executed tests against list of required tests:" echo "Checking executed tests against list of required tests:"
./testlist.py --html ./testlist.py --html $testlevelparam
rc=$? rc=$?
fi fi
popd popd