Add test cases for the requirements of "gc-aot" feature (#3399)
This commit is contained in:
215
tests/wamr-test-suites/requirement-engineering-test-script/run_requirement.py
Executable file
215
tests/wamr-test-suites/requirement-engineering-test-script/run_requirement.py
Executable file
@ -0,0 +1,215 @@
|
||||
#!/usr/bin/python3
|
||||
#
|
||||
# Copyright (C) 2019 Intel Corporation. All rights reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
from typing import Dict, Tuple, List
|
||||
import importlib
|
||||
import inspect
|
||||
import csv
|
||||
|
||||
REQUIREMENT_TESTS_DIR = "../../requirement-engineering"
|
||||
SUBREQUIREMENT_DESCRIPTIONS = {}
|
||||
|
||||
|
||||
# To use this empty function to do signature check
|
||||
def expected_build_func_template(verbose: bool) -> None:
|
||||
pass
|
||||
|
||||
|
||||
# To use this empty function to do signature check
|
||||
# The actual implementation of the return value should has following information:
|
||||
#
|
||||
def expected_run_func_template(
|
||||
output_dir: str, subrequirement_ids: List[int]
|
||||
) -> Dict[int, Dict[Tuple[str, str], bool]]:
|
||||
pass
|
||||
|
||||
|
||||
def dynamic_import(requirement_dir: str):
|
||||
# Declare that we intend to modify the global variable
|
||||
global SUBREQUIREMENT_DESCRIPTIONS
|
||||
sys.path.append(requirement_dir)
|
||||
os.chdir(requirement_dir)
|
||||
|
||||
try:
|
||||
build_module = importlib.import_module("build")
|
||||
build_function = getattr(build_module, "build")
|
||||
except AttributeError:
|
||||
raise ImportError("'build' function not found in the specified build.py file.")
|
||||
|
||||
try:
|
||||
run_module = importlib.import_module("run")
|
||||
run_function = getattr(run_module, "run")
|
||||
SUBREQUIREMENT_DESCRIPTIONS = getattr(run_module, "SUBREQUIREMENT_DESCRIPTIONS")
|
||||
except AttributeError:
|
||||
raise ImportError(
|
||||
"'run' function or 'SUBREQUIREMENT_DESCRIPTIONS' not found in the specified run.py file."
|
||||
)
|
||||
|
||||
# Do signature check
|
||||
expected_signature = inspect.signature(expected_build_func_template)
|
||||
actual_signature = inspect.signature(build_function)
|
||||
assert (
|
||||
actual_signature == expected_signature
|
||||
), "The build function doesn't have the expected signature"
|
||||
|
||||
expected_signature = inspect.signature(expected_run_func_template)
|
||||
actual_signature = inspect.signature(run_function)
|
||||
assert (
|
||||
actual_signature == expected_signature
|
||||
), "The run function doesn't have the expected signature"
|
||||
|
||||
# Check if the variable is a dictionary
|
||||
if not isinstance(SUBREQUIREMENT_DESCRIPTIONS, dict):
|
||||
raise TypeError("SUBREQUIREMENT_DESCRIPTIONS is not a dictionary")
|
||||
|
||||
# Check the types of keys and values in the dictionary
|
||||
for key, value in SUBREQUIREMENT_DESCRIPTIONS.items():
|
||||
if not isinstance(key, int):
|
||||
raise TypeError("Key in SUBREQUIREMENT_DESCRIPTIONS is not an int")
|
||||
if not (
|
||||
isinstance(value, tuple)
|
||||
and len(value) == 2
|
||||
and all(isinstance(elem, str) for elem in value)
|
||||
):
|
||||
raise TypeError(
|
||||
"Value in SUBREQUIREMENT_DESCRIPTIONS is not a Tuple[str, str]"
|
||||
)
|
||||
|
||||
return build_function, run_function
|
||||
|
||||
|
||||
def cmd_line_summary(
|
||||
requirement_name: str, result_dict: dict, subrequirement_descriptions: dict
|
||||
):
|
||||
# command line summary
|
||||
total, total_pass_nums, total_fail_nums = 0, 0, 0
|
||||
print(f"\n============ Start: Summary of {requirement_name} test ============")
|
||||
for subrequirement_id in result_dict.keys():
|
||||
sub_total = len(result_dict[subrequirement_id])
|
||||
pass_nums = len(
|
||||
[_ for _, result in result_dict[subrequirement_id].items() if result]
|
||||
)
|
||||
fail_nums = len(
|
||||
[_ for _, result in result_dict[subrequirement_id].items() if not result]
|
||||
)
|
||||
issue_number, subrequirement_description = subrequirement_descriptions.get(
|
||||
subrequirement_id, ""
|
||||
)
|
||||
|
||||
print(f"\nTest Sub-requirement id: {subrequirement_id}")
|
||||
print(f"Issue Number: {issue_number}")
|
||||
print(f"Sub-requirement description: {subrequirement_description}")
|
||||
print(f"Number of test cases: {sub_total}")
|
||||
print(f"Pass: {pass_nums}")
|
||||
print(f"Fail: {fail_nums}\n")
|
||||
print(
|
||||
"----------------------------------------------------------------------------"
|
||||
)
|
||||
total += sub_total
|
||||
total_pass_nums += pass_nums
|
||||
total_fail_nums += fail_nums
|
||||
|
||||
print(f"\nTotal Number of test cases: {total}")
|
||||
print(f"Pass: {total_pass_nums}")
|
||||
print(f"Fail: {total_fail_nums}\n")
|
||||
|
||||
print(f"============= End: Summary of {requirement_name} test =============\n")
|
||||
|
||||
|
||||
def generate_report(output_filename: str, result_dict: dict):
|
||||
# create a list of column names
|
||||
column_names = [
|
||||
"subrequirement id",
|
||||
"issue number",
|
||||
"subrequirement description",
|
||||
"running mode",
|
||||
"test case name",
|
||||
"test case description",
|
||||
"test case executing result",
|
||||
]
|
||||
|
||||
# open the output file in write mode
|
||||
with open(output_filename + ".csv", "w") as output_file:
|
||||
# create a csv writer object
|
||||
csv_writer = csv.writer(output_file)
|
||||
# write the column names as the first row
|
||||
csv_writer.writerow(column_names)
|
||||
# loop through the result_dict
|
||||
for subrequirement_id, test_cases in result_dict.items():
|
||||
# get the subrequirement description from the subrequirement_descriptions dict
|
||||
issue_number, subrequirement_description = SUBREQUIREMENT_DESCRIPTIONS.get(
|
||||
subrequirement_id, ""
|
||||
)
|
||||
# loop through the test cases
|
||||
for test_case, result in test_cases.items():
|
||||
# unpack the test case name and description from the tuple
|
||||
test_case_name, test_case_description = test_case
|
||||
# convert the result to pass or fail
|
||||
result = "pass" if result else "fail"
|
||||
# create a list of values for the current row
|
||||
row_values = [
|
||||
subrequirement_id,
|
||||
issue_number,
|
||||
subrequirement_description,
|
||||
"AOT",
|
||||
test_case_name,
|
||||
test_case_description,
|
||||
result,
|
||||
]
|
||||
# write the row values to the output file
|
||||
csv_writer.writerow(row_values)
|
||||
|
||||
|
||||
def run_requirement(
|
||||
requirement_name: str, output_dir: str, subrequirement_ids: List[int]
|
||||
):
|
||||
requirement_dir = os.path.join(REQUIREMENT_TESTS_DIR, requirement_name)
|
||||
if not os.path.isdir(requirement_dir):
|
||||
print(f"No such requirement in directory {requirement_dir} exists")
|
||||
sys.exit(1)
|
||||
|
||||
output_path = os.path.join(output_dir, requirement_name)
|
||||
|
||||
build_requirement_func, run_requirement_func = dynamic_import(requirement_dir)
|
||||
|
||||
build_requirement_func(verbose=False)
|
||||
result_dict = run_requirement_func(output_path, subrequirement_ids)
|
||||
|
||||
cmd_line_summary(requirement_name, result_dict, SUBREQUIREMENT_DESCRIPTIONS)
|
||||
generate_report(output_path, result_dict)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Process command line options.")
|
||||
|
||||
# Define the '-o' option for output directory
|
||||
parser.add_argument(
|
||||
"-o", "--output_directory", required=True, help="Report output directory"
|
||||
)
|
||||
|
||||
# Define the '-r' option for requirement name
|
||||
parser.add_argument(
|
||||
"-r", "--requirement_name", required=True, help="Requirement name"
|
||||
)
|
||||
|
||||
# Define the subrequirement IDs as a list of integers
|
||||
parser.add_argument(
|
||||
"subrequirement_ids", nargs="*", type=int, help="Subrequirement IDs (optional)"
|
||||
)
|
||||
|
||||
# Parse the arguments
|
||||
args = parser.parse_args()
|
||||
|
||||
run_requirement(
|
||||
args.requirement_name, args.output_directory, list(args.subrequirement_ids)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -28,6 +28,7 @@ function help()
|
||||
echo "-e enable exception handling"
|
||||
echo "-x test SGX"
|
||||
echo "-w enable WASI threads"
|
||||
echo "-a test all runtimes in sightglass suite"
|
||||
echo "-b use the wabt binary release package instead of compiling from the source code"
|
||||
echo "-g build iwasm with debug version"
|
||||
echo "-v enable GC heap verification"
|
||||
@ -37,6 +38,10 @@ function help()
|
||||
echo "-C enable code coverage collect"
|
||||
echo "-j set the platform to test"
|
||||
echo "-T set sanitizer to use in tests(ubsan|tsan|asan)"
|
||||
echo "-r [requirement name] [N [N ...]] specify a requirement name followed by one or more"
|
||||
echo " subrequirement IDs, if no subrequirement is specificed,"
|
||||
echo " it will run all subrequirements. When this optin is used,"
|
||||
echo " only run requirement tests"
|
||||
}
|
||||
|
||||
OPT_PARSED=""
|
||||
@ -73,8 +78,11 @@ QEMU_FIRMWARE=""
|
||||
WASI_TESTSUITE_COMMIT="ee807fc551978490bf1c277059aabfa1e589a6c2"
|
||||
TARGET_LIST=("AARCH64" "AARCH64_VFP" "ARMV7" "ARMV7_VFP" "THUMBV7" "THUMBV7_VFP" \
|
||||
"RISCV32" "RISCV32_ILP32F" "RISCV32_ILP32D" "RISCV64" "RISCV64_LP64F" "RISCV64_LP64D")
|
||||
REQUIREMENT_NAME=""
|
||||
# Initialize an empty array for subrequirement IDs
|
||||
SUBREQUIREMENT_IDS=()
|
||||
|
||||
while getopts ":s:cabgvt:m:MCpSXexwWPGQF:j:T:" opt
|
||||
while getopts ":s:cabgvt:m:MCpSXexwWPGQF:j:T:r:" opt
|
||||
do
|
||||
OPT_PARSED="TRUE"
|
||||
case $opt in
|
||||
@ -192,6 +200,19 @@ do
|
||||
echo "sanitizer is " ${OPTARG}
|
||||
WAMR_BUILD_SANITIZER=${OPTARG}
|
||||
;;
|
||||
r)
|
||||
REQUIREMENT_NAME=$OPTARG
|
||||
# get next arg if there are multiple values after -r
|
||||
eval "nxarg=\${$((OPTIND))}"
|
||||
# loop until the next symbol '-' or the end of arguments
|
||||
while [[ "${nxarg}" =~ ^[0-9]+$ ]]; do
|
||||
SUBREQUIREMENT_IDS+=("$nxarg")
|
||||
OPTIND=$((OPTIND+1))
|
||||
eval "nxarg=\${$((OPTIND))}"
|
||||
done
|
||||
echo "Only Test requirement name: ${REQUIREMENT_NAME}"
|
||||
[[ ${#SUBREQUIREMENT_IDS[@]} -ne 0 ]] && echo "Choose subrequirement IDs: ${SUBREQUIREMENT_IDS[@]}"
|
||||
;;
|
||||
?)
|
||||
help
|
||||
exit 1
|
||||
@ -219,6 +240,7 @@ readonly REPORT_DIR=${WORK_DIR}/report/${DATE}
|
||||
mkdir -p ${REPORT_DIR}
|
||||
|
||||
readonly WAMR_DIR=${WORK_DIR}/../../..
|
||||
readonly REQUIREMENT_SCRIPT_DIR=${WORK_DIR}/../requirement-engineering-test-script
|
||||
|
||||
if [[ ${SGX_OPT} == "--sgx" ]];then
|
||||
readonly IWASM_LINUX_ROOT_DIR="${WAMR_DIR}/product-mini/platforms/linux-sgx"
|
||||
@ -442,17 +464,17 @@ function spec_test()
|
||||
popd
|
||||
if [ ! -d "exception-handling" ];then
|
||||
echo "exception-handling not exist, clone it from github"
|
||||
git clone -b master --single-branch https://github.com/WebAssembly/exception-handling
|
||||
git clone -b master --single-branch https://github.com/WebAssembly/exception-handling
|
||||
fi
|
||||
pushd exception-handling
|
||||
|
||||
# restore and clean everything
|
||||
git reset --hard 51c721661b671bb7dc4b3a3acb9e079b49778d36
|
||||
|
||||
|
||||
if [[ ${ENABLE_MULTI_MODULE} == 0 ]]; then
|
||||
git apply ../../spec-test-script/exception_handling.patch
|
||||
fi
|
||||
|
||||
|
||||
popd
|
||||
echo $(pwd)
|
||||
fi
|
||||
@ -731,6 +753,8 @@ function collect_standalone()
|
||||
./collect_coverage.sh "${CODE_COV_FILE}" "${STANDALONE_DIR}/test-running-modes/c-embed/build"
|
||||
echo "Collect code coverage of standalone test-ts2"
|
||||
./collect_coverage.sh "${CODE_COV_FILE}" "${STANDALONE_DIR}/test-ts2/build"
|
||||
echo "Collect code coverage of standalone test-module-malloc"
|
||||
./collect_coverage.sh "${CODE_COV_FILE}" "${STANDALONE_DIR}/test-module-malloc/build"
|
||||
|
||||
popd > /dev/null 2>&1
|
||||
fi
|
||||
@ -858,6 +882,13 @@ function collect_coverage()
|
||||
|
||||
function trigger()
|
||||
{
|
||||
# Check if REQUIREMENT_NAME is set, if set, only calling requirement test and early return
|
||||
if [[ -n $REQUIREMENT_NAME ]]; then
|
||||
python ${REQUIREMENT_SCRIPT_DIR}/run_requirement.py -o ${REPORT_DIR}/ -r "$REQUIREMENT_NAME" "${SUBREQUIREMENT_IDS[@]}"
|
||||
# early return with the python script exit status
|
||||
return $?
|
||||
fi
|
||||
|
||||
local EXTRA_COMPILE_FLAGS=""
|
||||
# default enabled features
|
||||
EXTRA_COMPILE_FLAGS+=" -DWAMR_BUILD_BULK_MEMORY=1"
|
||||
@ -907,6 +938,7 @@ function trigger()
|
||||
EXTRA_COMPILE_FLAGS+=" -DWAMR_BUILD_EXCE_HANDLING=1"
|
||||
EXTRA_COMPILE_FLAGS+=" -DWAMR_BUILD_TAIL_CALL=1"
|
||||
fi
|
||||
|
||||
echo "SANITIZER IS" $WAMR_BUILD_SANITIZER
|
||||
|
||||
if [[ "$WAMR_BUILD_SANITIZER" == "ubsan" ]]; then
|
||||
|
||||
Reference in New Issue
Block a user