diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1ab4cacd..390c41bf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,6 +3,30 @@ name: Tests on: [push, pull_request] jobs: + python-style: + name: Python Checks + runs-on: ubuntu-24.04 + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Install Black, PyLint and PyTest + run: | + python -m pip install --upgrade pip + pip install black pylint pytest + + - name: Run Black Check + run: | + black ./IntegrationTests --check --diff + + - name: Run PyLint + run: | + pylint ./IntegrationTests + + - name: Run PyTest + run: | + pytest -m tester ./IntegrationTests + test_soms: runs-on: ubuntu-24.04 # ubuntu-latest continue-on-error: ${{ matrix.not-up-to-date }} @@ -167,6 +191,16 @@ jobs: echo "${{ matrix.som }} $SOM_TESTS" eval "${{ matrix.som }} $SOM_TESTS" + - name: Run Integration Tests + if: ${{ matrix.som != 'spec' }} + run: | + python -m pip install --upgrade pip + pip install pytest + export VM="som-vm/${{ matrix.som }}" + export CLASSPATH=Smalltalk + export TEST_EXCEPTIONS=som-vm/integration-tests.yml + pytest IntegrationTests + # We currently test SomSom only on TruffleSOM - name: Test SomSom on TruffleSOM if: ${{ matrix.repo == 'TruffleSOM.git' }} diff --git a/IntegrationTests/README.md b/IntegrationTests/README.md new file mode 100644 index 00000000..07a4a148 --- /dev/null +++ b/IntegrationTests/README.md @@ -0,0 +1,186 @@ +# SOM Integration Tests + +Most of the tests for the integration testing come from lang_tests of [yksom](https://github.com/softdevteam/yksom/tree/master/lang_tests). Tests are identified by their path from core-lib to test.som, this ensures there can be multiple tests named test.som in different directories. + +This integration test does not replace the unit tests located in TestSuite but acts as a compliment to it. These integration tests can test more than unit tests can: +- SOM level errors that would cause the VM to exit +- Multiple different classpaths + +## Running the Integration Tests +The tests can be run using pytest by simply running pytest in the base directory of any SOM implementation that includes a version of the core-library with IntegrationTests. It requires multiple python modules installed and environment variables set. + +### Simple Test Run +``` +VM=./path-to-build CLASSPATH=./core-lib/Smalltalk python3 -m pytest +``` + +### Optionals +A set of optionals have been created for this test suite which can be added. + +## Environment variables +The environment variables are split into required and optional. Some optionals may be required for different implementations of SOM. + +#### VM +This is the path from the current working directory to the executable VM of SOM. +#### CLASSPATH +The exact classpath required by SOM to find the Object class etc. +#### TEST_EXCEPTIONS +A yaml file which details the tags of tests. Specifically it labels tests that are expected to fail for one reason or another. **Give the whole path to the file from CWD**. +#### GENERATE_REPORT +Generates a yaml file which can be used as a **TEST_EXCEPTIONS** file. It will also include additional information about how many tests passed, which tests passed that were not expected to and which tests failed. **Give a full path from CWD to where it should be saved including .yaml**. +#### ARBITRARY ENVVARS +When setting custom_classpaths in a test environment variables can be specified to replace tags in those tests, specify those along with all the other variables being specified. Check custom_classpath for more information on runtime classpaths. + +## TEST_EXCEPTIONS (How to write a file) +There are four tags that are currently supported by the SOM integration tests. All tags will run the tests still, other than do_not_run, but will not fail on test failure, a tagged test will cause the run to fail only when it passes unexpectedly. Check for example file IntegrationTests/test_tags.yaml. + +For a test to be given a tag specify it's location path like this: +``` +known_failures: + core-lib/IntegrationTests/Tests/test.som +``` + +### known_failures +Any test located in this tag is assumed to fail, it should only be used when another more suitable tag is not available. + +### failing_as_unspecified +Any test located in this tag failed because SOM does not specify behaviour in this instance, this means that each implementation may treat this situation differently. *Example dividing by 0.* + +### unsupported +Any test located here has a feature which is not suppoprted in this SOM. + +### do_not_run +This test should not be ran ever as it causes an error in the python level code. The test may also cause a SOM level error but does not have to. (*This does not include Unicode errors, they are handled at runtime*) + +## How to write a new test +For a test to be collected by Pytest it has to start with a comment, the comment should be structured with the expected output for either stderr or stdout. + +``` +" +VM: + status: error + custom_classpath: @AWFY:example/classpath:@CLASSPATH + case_sensitive: False + stdout: + 1000 + ... + 2 is an integer + stderr: + ... + ERROR MESSAGE +" +``` + +**When structuring a test all options must come before stderr and stdout** + +### Tags for structuring a test +Below is a list of tags which structure how a test works. + +#### VM: +This is required as the base of the test structure and what allows the tests to be identified as an integration test. + +#### custom_classpath: +This allows for the specification of a custom classpath to be used. This is useful for loading different versions of classes with the same name. I.e. AWFY Vector instead of core-lib Vector. **The path to ./Smalltalk must still be specified after so that the Object class can be loaded** + +Tags can be used to specify a different classpaths at runtime, this is generally recommended otherwise tests would be directory dependent. These tags can be specified with ```@tag``` where tag is the **exact** spelling and caputalisation of the environment variable that matches. Currently to run the tests ```@AWFY``` must be specified alongside ```@CLASSPATH```. + +#### case_sensitive +By default the tests are case insensitive (All outputs and expecteds are converted to be lower case) but by specifying True in case_sensitive that test can be checked as case_sensitive. + +#### stderr or stdout: +This is your expected output, each new line will be a new "thing" to check for. Writing ... signifies a gap in checking, the output does not have to feature this gap but may do. Another option that is featured in stdout, stderr checking is *** which signifies an arbitrary precision "word". + +**Please note that *** is not compatible in the same line as ...** +```python +# not accpeted +... 1.11***11 ... + +# accpeted +... this line ... +Hel***lo +... another ... line +``` + +### Understanding how the "***" works in test_runner +A word is loosely defined as any connected string of characters in this instance, it can be both numbers or letters. What placing the *** in the word does is as follows: +1. All characters before the *** must be present +2. Not all characters after the *** have to be present, but if they are present must match exactly. +3. There cannot be more characters than the stdout specifies. + +This allows for different SOM implementations to pass tests on different levels of precision. But no SOM will pass on an incorrect result. + +#### Example +```python +# Expected +stdout: + 1.111***123 + +# Accepted outputs + +1.111 +1.1111 +1.11112 +1.111123 + +# Not accepted +1.1 +1.11 +1.111124 +1.1111234 +``` + +### Understanding how the "..." works in test_runner +There are situations where the ... is necessary for your output. Here are some example use cases, when they may be necessary and how to write the tests for it. As a preface the check_output will check a line as a whole so writing ... allows for a gap, a more precise check can be made by including as much of the expected output as possible. + +#### Possible evaluations of "..." +``` +stdout: + Hello, World + ... + Goodbye +``` +This would be true for: +``` +Hello, World +Today is a Monday +Goodbye + +/ + +Hello, World +Goodbye +``` + +Line 1 in the below expected stdout says match on a whole line which has Hello, some other text as a gap then the word sample then whatever comes after on that line. Line 2 specifies that we must end with the word line. Whilst line 3 says somewhere in this line the word little must appear. + +#### Stdout +``` +This is SOM++ +Hello, this is some sample output +There is some more on this line +And a little more here +``` + +#### Expected +``` +VM: + status: success + case_sensitive: False + stdout: + Hello, ... sample ... + ... is ... this line + ... little ... +``` + +### When not to use "..." +- When the word you are searching for is the end of the line do not do this "*word* ...". +- When the word you are searching for is at the beginning of the line do not do this "... *word*" + +## Developing the test_runner +For development of the test_runner with more features in the future I have created another set of tests that can be run. These tests test the test_runner itself, they make sure parsing a test file works, output checking works and setting dynamic classpaths works as expected. + + +#### Run this command below to execute those tests +``` +pytest -m tester +``` \ No newline at end of file diff --git a/IntegrationTests/Tests/vector_awfy.som b/IntegrationTests/Tests/vector_awfy.som index 277eb986..9e696c9a 100644 --- a/IntegrationTests/Tests/vector_awfy.som +++ b/IntegrationTests/Tests/vector_awfy.som @@ -1,7 +1,7 @@ " VM: status: success - custom_classpath: ./core-lib/Examples/AreWeFastYet/Core:./core-lib/Smalltalk + custom_classpath: @AWFY:@CLASSPATH stdout: nil diff --git a/IntegrationTests/Tests/vector_awfy2.som b/IntegrationTests/Tests/vector_awfy2.som index 0a214f08..c062158a 100644 --- a/IntegrationTests/Tests/vector_awfy2.som +++ b/IntegrationTests/Tests/vector_awfy2.som @@ -1,7 +1,7 @@ " VM: status: success - custom_classpath: ./core-lib/Examples/AreWeFastYet/Core:./core-lib/Smalltalk + custom_classpath: @AWFY:@CLASSPATH stdout: nil diff --git a/IntegrationTests/Tests/vector_awfy_capacity.som b/IntegrationTests/Tests/vector_awfy_capacity.som index 32c44249..5f19f8ad 100644 --- a/IntegrationTests/Tests/vector_awfy_capacity.som +++ b/IntegrationTests/Tests/vector_awfy_capacity.som @@ -1,7 +1,7 @@ " VM: status: success - custom_classpath: ./core-lib/Examples/AreWeFastYet/Core:./core-lib/Smalltalk + custom_classpath: @AWFY:@CLASSPATH stdout: 50 100 diff --git a/IntegrationTests/conftest.py b/IntegrationTests/conftest.py new file mode 100644 index 00000000..a1e157dd --- /dev/null +++ b/IntegrationTests/conftest.py @@ -0,0 +1,132 @@ +""" +Defines variables that are required for a report to be generated. +""" + +import yaml + +# Report Generate Logic +tests_failed_unexpectedly = [] +tests_passed_unexpectedly = [] +tests_passed = 0 # pylint: disable=invalid-name +tests_failed = 0 # pylint: disable=invalid-name +total_tests = 0 # pylint: disable=invalid-name +tests_skipped = 0 # pylint: disable=invalid-name + +# Lists containing references to each exception test +known_failures = [] +failing_as_unspecified = [] +unsupported = [] +do_not_run = [] + +# Environment variables +CLASSPATH = "" +VM = "" +TEST_EXCEPTIONS = "" +GENERATE_REPORT = "" + + +def pytest_configure(config): + """ + Add a marker to pytest + """ + config.addinivalue_line("markers", "tester: test the testing framework") + + +def pytest_collection_modifyitems(config, items): + """ + Make sure the correct tests are being selected for the mode that is running + """ + # Check if "-m tester" was specified + marker_expr = config.getoption("-m") + run_tester_selected = False + + if marker_expr: + # Simplistic check: if "tester" is anywhere in the -m expression + # (You can improve parsing if needed) + run_tester_selected = "tester" in marker_expr.split(" or ") + + if not run_tester_selected: + deselected = [item for item in items if "tester" in item.keywords] + if deselected: + for item in deselected: + items.remove(item) + config.hook.pytest_deselected(items=deselected) + + +# Log data +def pytest_runtest_logreport(report): + """ + Increment the counters for what action was performed + """ + # Global required here to access counters + # Not ideal but without the counters wouldn't work + global total_tests, tests_passed, tests_failed, tests_skipped # pylint: disable=global-statement + if report.when == "call": # only count test function execution, not setup/teardown + total_tests += 1 + if report.passed: + tests_passed += 1 + elif report.failed: + tests_failed += 1 + elif report.skipped: + tests_skipped += 1 + + +# Run after all tests completed, Generate a report of failing and passing tests +def pytest_sessionfinish(exitstatus): + """ + Generate report based on test run + """ + if GENERATE_REPORT: + # To make the report useful it will add the tests which have failed- + # -unexpectedly to known_failures + # It will also remove those that have passed from any of those lists + + for test_path in tests_passed_unexpectedly: + test = str(test_path) + if test in known_failures: + known_failures.remove(test) + if test in unsupported: + unsupported.remove(test) + if test in failing_as_unspecified: + failing_as_unspecified.remove(test) + + if len(tests_failed_unexpectedly) != 0: + for test in tests_failed_unexpectedly: + # Remove the part of the path that is incompatible with multiple directory running + known_failures.append( + "Tests/" + str(test).rsplit("Tests/", maxsplit=1)[-1] + ) + + # Generate a report_message to save + report_data = { + "summary": { + "tests_total": total_tests, + "tests_passed": tests_passed, + "tests_failed": tests_failed, + "tests_skipped": tests_skipped, + "pytest_exitstatus": str(exitstatus), + "note": "Totals include expected failures", + }, + "unexpected": { + "passed": [ + "Tests/" + str(test).rsplit("Tests/", maxsplit=1)[-1] + for test in tests_passed_unexpectedly + ], + "failed": [ + "Tests/" + str(test).rsplit("Tests/", maxsplit=1)[-1] + for test in tests_failed_unexpectedly + ], + }, + "environment": { + "virtual machine": VM, + "classpath": CLASSPATH, + "test_exceptions": TEST_EXCEPTIONS, + "generate_report_location": GENERATE_REPORT, + }, + "known_failures": known_failures, + "failing_as_unspecified": failing_as_unspecified, + "unsupported": unsupported, + "do_not_run": do_not_run, + } + with open(f"{GENERATE_REPORT}", "w", encoding="utf-8") as f: + yaml.dump(report_data, f, default_flow_style=False, sort_keys=False) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py new file mode 100644 index 00000000..1a7a76ae --- /dev/null +++ b/IntegrationTests/test_runner.py @@ -0,0 +1,541 @@ +# pylint: disable=missing-function-docstring, missing-class-docstring, too-many-arguments, too-many-positional-arguments, too-few-public-methods +""" +This is the SOM integration test runner file. Pytest automatically discovers +this file and will find all .som test files in the below directories. +""" + +import subprocess +from pathlib import Path +from difflib import ndiff +import os +import pytest +import yaml +import conftest as external_vars + + +class Definition: + def __init__( + self, + name: str, + stdout: list[str], + stderr: list[str], + custom_classpath: str | None, + case_sensitive: bool, + definition_fail_msg: str | None = None, + ): + self.name = name + self.stdout = stdout + self.stderr = stderr + self.custom_classpath = custom_classpath + self.case_sensitive = case_sensitive + self.definition_fail_msg = definition_fail_msg + + +class ParseError(Exception): + """ + Exception raised when a test file cannot be parsed correctly. + This is used to fail the test in the test runner. + """ + + def __init__(self, message): + super().__init__(message) + self.message = message + + +def is_som_test(path): + if path.suffix == ".som": + with open(path, "r", encoding="utf-8") as f: + contents = f.read() + if "VM:" in contents: + return True + return False + + +def discover_test_files(path, test_files): + """ + Recursively read the directory tree and add all .som test files to `test_files`. + """ + for element in Path(path).iterdir(): + if element.is_dir(): + discover_test_files(element, test_files) + elif element.is_file() and is_som_test(element): + test_files.append(str(element)) + + +def collect_tests(test_files) -> list[Definition]: + """ + Assemble a dictionary of + name: the name of the test file + stdout/stderr: the expected output of the test + """ + tests = [] + for file_path in test_files: + test_dict = parse_test_file(file_path) + if test_dict is None: + continue + tests.append(test_dict) + + return tests + + +# pylint: disable=too-many-nested-blocks +def parse_custom_classpath(comment) -> str | None: + """ + Based on the comment will calculate the custom_classpath + for the current test + + Return: The custom classpath + """ + comment_lines = comment.split("\n") + for line in comment_lines: + if "custom_classpath" in line: + classpath = line.split("custom_classpath:")[1].strip() + + classpath_t = classpath + + # Now check our custom classpath for any tags + # Tags are defined as @tag in the classpath + # Will then assign the EXACT value of the + # Environment variable to that spot + + if classpath_t.find("@") >= 0: + + classpath_joined = "" + # Does the classpath have a splitter ":" + if ":" in classpath_t: + split_list = classpath_t.split(":") + for tag in split_list: + if tag.find("@") >= 0: + tag = tag.replace("@", "") + if tag in os.environ: + classpath_joined += os.environ[tag] + ":" + continue + raise ParseError(f"Environment variable {tag} not set") + # Add a normal classpath inside of tags + classpath_joined += tag + ":" + + else: + classpath_t = classpath_t.replace("@", "") + if classpath_t in os.environ: + classpath_joined += os.environ[classpath_t] + else: + raise ParseError(f"Environment variable {classpath_t} not set") + + # Remove the final ":" + classpath = classpath_joined[:-1] + + return classpath + return None + + +def parse_case_sensitive(comment): + """ + Based on a comment decide whether a case_sensitive is requried + """ + comment_lines = comment.split("\n") + for line in comment_lines: + if "case_sensitive" in line: + return bool(line.split("case_sensitive:")[1].strip().lower() == "true") + + return False + + +def parse_stdout(comment): + """ + Based on a comment parse the expected stdout + """ + std_out = comment.split("stdout:")[1] + if "stderr" in std_out: + std_err_inx = std_out.index("stderr:") + std_out = std_out[:std_err_inx] + std_err_l = std_out.split("\n") + std_err_l = [line.strip() for line in std_err_l if line.strip()] + return std_err_l + + +def parse_stderr(comment): + """ + Based on a comment parse the expected stderr + """ + std_err = comment.split("stderr:")[1] + if "stdout" in std_err: + std_out_inx = std_err.index("stdout:") + std_err = std_err[:std_out_inx] + std_err_l = std_err.split("\n") + std_err_l = [line.strip() for line in std_err_l if line.strip()] + return std_err_l + + +def parse_test_file(test_file) -> Definition: + """ + parse the test file to extract the important information + """ + name = test_file + stdout = [] + stderr = [] + custom_classpath = None + case_sensitive = False + + try: + with open(test_file, "r", encoding="utf-8") as open_file: + contents = open_file.read() + comment = contents.split('"')[1] + + # Make sure if using a custom test classpath that it is above + # Stdout and Stderr + if "custom_classpath" in comment: + custom_classpath = parse_custom_classpath(comment) + + if "case_sensitive" in comment: + case_sensitive = parse_case_sensitive(comment) + + if "stdout" in comment: + stdout = parse_stdout(comment) + + if "stderr" in comment: + stderr = parse_stderr(comment) + + if not case_sensitive: + stdout = [s.lower() for s in stdout] + stderr = [s.lower() for s in stderr] + except ParseError as e: + return Definition(name, [], [], None, False, e.message) + + return Definition(name, stdout, stderr, custom_classpath, case_sensitive) + + +def make_a_diff(expected, given): + """ + Creates a string that represents the difference between two + lists of Strings. + """ + diff_string = "" + for diff in ndiff(expected, given): + diff_string += f"\n{str(diff)}" + + return diff_string + + +# pylint: disable=too-many-positional-arguments +# pylint: disable=too-many-arguments +def build_error_message( + stdout, stderr, exp_stdout, exp_stderr, command, case_sensitive +): + """ + Build an error message for the test runner + """ + + error_message = f"""\n +Command: {command} +Case Sensitive: {case_sensitive} + """ + + if stdout.strip() != "": + error_message += "\nstdout diff with stdout expected\n" + error_message += make_a_diff(exp_stdout, stdout.strip().split("\n")) + + if stderr.strip() != "": + error_message += "\nstderr diff with stderr expected\n" + error_message += make_a_diff(exp_stderr, stderr.strip().split("\n")) + + return error_message + + +def check_partial_word(word, exp_word): + """ + Check a partial expected String against a line + + returns True if the line matches + """ + + # Creates a list of words that are expected + exp_word_needed = exp_word.split("***")[0] + exp_word_optional = exp_word.split("***")[1] + + if exp_word_needed in word: + where = word.find(exp_word_needed) + len(exp_word_needed) + counter = 0 + for character in exp_word_optional: + + if counter + where > len(word) - 1: + return True + + if word[counter + where] == character: + counter += 1 + continue + return False + else: + return False + + if counter + where < len(word): + return False + + return True + + +def check_output_matches(given, expected): + """ + Check if the expected output is contained in the given output + + given: list of strings representing the actual output + expected: list of strings representing the expected output + """ + # Check if the stdout matches the expected stdout + exp_std_inx = 0 + for g_out in given: + # Check that checks don't pass before out of outputs + if exp_std_inx >= len(expected): + return True + + if expected[exp_std_inx] == "...": + # If the expected output is '...' then we skip this line + exp_std_inx += 1 + continue + + # This is incompaptible with ... for line skipping + if "***" in expected[exp_std_inx]: + # Now do some partial checking + partial_output = check_partial_word(g_out, expected[exp_std_inx]) + if partial_output: + exp_std_inx += 1 + continue + + if g_out.strip() != expected[exp_std_inx].strip(): + # Check if expected has ... + if "..." in expected[exp_std_inx]: + # If it does then we need to remove it and check for that line containing string + without_gap = expected[exp_std_inx].split("...") + if all(without_gap in g_out for without_gap in without_gap): + exp_std_inx += 1 + continue + + # If the output does not match, continue without incrementing + continue + + exp_std_inx += 1 + + if exp_std_inx != len(expected): + # It is not all contained in the output + return False + + return True + + +def check_output(test_outputs, expected_std_out, expected_std_err): + """ + check if the output of the test matches the expected output + test_outputs: The object returned by subprocess.run + expected_std_out: The expected standard output + expected_std_err: The expected standard error output + Returns: Boolean indicating if result matches expected output + + note: This method does not directly error, just checks conditions + + stdout and stderr do not match in all SOMs + stderr checked against stdout and stderr + stdout checked against stdout and stderr + + This is relatively robust for most test cases + """ + given_std_out = test_outputs.stdout.split("\n") + given_std_err = test_outputs.stderr.split("\n") + + return check_output_matches( + given_std_out, expected_std_out + ) and check_output_matches(given_std_err, expected_std_err) + + +# Read the test exceptions file and set the variables correctly +# pylint: disable=too-many-branches +def read_test_exceptions(filename): + """ + Read a TEST_EXCEPTIONS file and extract the core information + Filename should be either a relative path from CWD to file + or an absolute path. + """ + if not filename: + return + + with open(f"{filename}", "r", encoding="utf-8") as file: + yaml_file = yaml.safe_load(file) + + if yaml_file is not None: + external_vars.known_failures = yaml_file.get("known_failures", []) or [] + external_vars.failing_as_unspecified = ( + yaml_file.get("failing_as_unspecified", []) or [] + ) + external_vars.unsupported = yaml_file.get("unsupported", []) or [] + external_vars.do_not_run = yaml_file.get("do_not_run", []) or [] + + path = os.path.relpath(os.path.dirname(__file__)) + if path == ".": + path = "" + + external_vars.known_failures = [ + os.path.join(path, test) + for test in external_vars.known_failures + if test is not None + ] + external_vars.failing_as_unspecified = [ + os.path.join(path, test) + for test in external_vars.failing_as_unspecified + if test is not None + ] + external_vars.unsupported = [ + os.path.join(path, test) + for test in external_vars.unsupported + if test is not None + ] + external_vars.do_not_run = [ + os.path.join(path, test) + for test in external_vars.do_not_run + if test is not None + ] + + +def prepare_tests(): + location = os.path.relpath(os.path.dirname(__file__)) + if not os.path.exists(location + "/Tests"): + return [ + Definition( + "test-setup", + [], + [], + None, + False, + "`Tests` directory not found. Please make sure the lang_tests are installed", + ) + ] + + # Work out settings for the application (They are labelled REQUIRED or OPTIONAL) + if "CLASSPATH" not in os.environ: # REQUIRED + return [ + Definition( + "test-setup", + [], + [], + None, + False, + "Please set the CLASSPATH environment variable", + ) + ] + + if "VM" not in os.environ: # REQUIRED + return [ + Definition( + "test-setup", + [], + [], + None, + False, + "Please set the VM environment variable", + ) + ] + + if "TEST_EXCEPTIONS" in os.environ: # OPTIONAL + external_vars.TEST_EXCEPTIONS = os.environ["TEST_EXCEPTIONS"] + + if "GENERATE_REPORT" in os.environ: # OPTIONAL + external_vars.GENERATE_REPORT = os.environ["GENERATE_REPORT"] + + external_vars.CLASSPATH = os.environ["CLASSPATH"] + external_vars.VM = os.environ["VM"] + + read_test_exceptions(external_vars.TEST_EXCEPTIONS) + + test_files = [] + discover_test_files(location + "/Tests", test_files) + test_files = sorted(test_files) + return collect_tests(test_files) + + +def get_test_id(test): + print(test) + return "Tests/" + test.name.split("Tests/")[-1] + + +@pytest.mark.parametrize( + "test", + prepare_tests(), + ids=get_test_id, +) +# pylint: disable=too-many-branches +def tests_runner(test): + """ + Take an array of dictionaries with test file names and expected output + Run all of the tests and check the output + Cleanup the build directory if required + """ + + # Check if a test should not be ran + if test.name in external_vars.do_not_run: + pytest.skip("Test included in do_not_run") + + if test.definition_fail_msg: + pytest.fail(test.definition_fail_msg) + + if test.custom_classpath is not None: + command = f"{external_vars.VM} -cp {test.custom_classpath} {test.name}" + else: + command = f"{external_vars.VM} -cp {external_vars.CLASSPATH} {test.name}" + + try: + result = subprocess.run( + command, capture_output=True, text=True, shell=True, check=False + ) + except UnicodeDecodeError: + pytest.skip( + "Test output could not be decoded SOM may not support " + "full Unicode. Result object not generated." + ) + + # lower-case comparisons unless specified otherwise + if test.case_sensitive is False: + result.stdout = str(result.stdout).lower() + result.stderr = str(result.stderr).lower() + + # Produce potential error messages now and then run assertion + error_message = build_error_message( + result.stdout, + result.stderr, + test.stdout, + test.stderr, + command, + test.case_sensitive, + ) + + # Related to above line (Rather than change how stdout and stderr are + # represented just joining and then splitting again) + + if result.returncode != 0: + error_message += f"Command failed with return code: {result.returncode}\n" + + test_pass_bool = check_output(result, test.stdout, test.stderr) + + # Check if we have any unexpectedly passing tests + if ( + test.name in external_vars.known_failures and test_pass_bool + ): # Test passed when it is not expected to + external_vars.tests_passed_unexpectedly.append(test.name) + assert False, f"Test {test.name} is in known_failures but passed" + + if test.name in external_vars.failing_as_unspecified and test_pass_bool: + # Test passed when it is not expected tp + external_vars.tests_passed_unexpectedly.append(test.name) + assert False, f"Test {test.name} is in failing_as_unspecified but passed" + + if test.name in external_vars.unsupported and test_pass_bool: + # Test passed when it is not expected tp + external_vars.tests_passed_unexpectedly.append(test.name) + assert False, f"Test {test.name} is in unsupported but passed" + + if ( + test.name not in external_vars.unsupported + and test.name not in external_vars.known_failures + and test.name not in external_vars.failing_as_unspecified + ): + if not test_pass_bool: + external_vars.tests_failed_unexpectedly.append(test.name) + assert ( + test_pass_bool + ), f"Error on test, {test.name} expected to pass: {error_message}" diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_1.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_1.som new file mode 100644 index 00000000..c7d9379a --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_1.som @@ -0,0 +1,23 @@ +" +VM: + status: success + stdout: + 1 + 2 + 3 + 4 + 5 + ... + 10 + stderr: + THIS IS AN ERROR + ... + Hello, world +" + +som_test_1 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_2.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_2.som new file mode 100644 index 00000000..ffce47e7 --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_2.som @@ -0,0 +1,20 @@ +" +VM: + status: success + case_sensitive: True + stdout: + I AM cAsE sensitiVe + ... + Dots/inTest + stderr: + CaSE sensitive ErrOr + ... + TestCaseSensitivity +" + +som_test_2 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_3.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_3.som new file mode 100644 index 00000000..457ae003 --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_3.som @@ -0,0 +1,17 @@ +" +VM: + status: success + case_sensitive: False + custom_classpath: core-lib/AreWeFastYet/Core + stdout: + ... + stderr: + ... +" + +som_test_3 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_4.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_4.som new file mode 100644 index 00000000..48845925 --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_4.som @@ -0,0 +1,17 @@ +" +VM: + status: success + case_sensitive: False + custom_classpath: @AWFYtest:@experimental:@oneWord + stdout: + ... + stderr: + ... +" + +som_test_4 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_5.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_5.som new file mode 100644 index 00000000..dbfdba1f --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_5.som @@ -0,0 +1,17 @@ +" +VM: + status: success + case_sensitive: False + custom_classpath: one/the/outside:@AWFYtest:then/another/one + stdout: + ... + stderr: + ... +" + +som_test_5 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_6.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_6.som new file mode 100644 index 00000000..4785f622 --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_6.som @@ -0,0 +1,17 @@ +" +VM: + status: success + case_sensitive: False + custom_classpath: @IDontExist + stdout: + ... + stderr: + ... +" + +som_test_6 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/yaml_for_testing/missing_all_tags.yaml b/IntegrationTests/test_runner_tests/yaml_for_testing/missing_all_tags.yaml new file mode 100644 index 00000000..e69de29b diff --git a/IntegrationTests/test_runner_tests/yaml_for_testing/missing_known_declaration.yaml b/IntegrationTests/test_runner_tests/yaml_for_testing/missing_known_declaration.yaml new file mode 100644 index 00000000..ad648289 --- /dev/null +++ b/IntegrationTests/test_runner_tests/yaml_for_testing/missing_known_declaration.yaml @@ -0,0 +1,7 @@ +known_failures: + +failing_as_unspecified: + +unsupported: + +do_not_run: diff --git a/IntegrationTests/test_runner_tests/yaml_for_testing/set_to_be_null.yaml b/IntegrationTests/test_runner_tests/yaml_for_testing/set_to_be_null.yaml new file mode 100644 index 00000000..2d57be68 --- /dev/null +++ b/IntegrationTests/test_runner_tests/yaml_for_testing/set_to_be_null.yaml @@ -0,0 +1,8 @@ +known_failures: + - null +failing_as_unspecified: + - null +unsupported: + - null +do_not_run: + - null \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/yaml_for_testing/tests_in_each.yaml b/IntegrationTests/test_runner_tests/yaml_for_testing/tests_in_each.yaml new file mode 100644 index 00000000..b24be382 --- /dev/null +++ b/IntegrationTests/test_runner_tests/yaml_for_testing/tests_in_each.yaml @@ -0,0 +1,8 @@ +known_failures: + - Tests/mutate_superclass_method/test.som +failing_as_unspecified: + - Tests/mutate_superclass_method/test.som +unsupported: + - Tests/mutate_superclass_method/test.som +do_not_run: + - Tests/mutate_superclass_method/test.som diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py new file mode 100644 index 00000000..e44fccb2 --- /dev/null +++ b/IntegrationTests/test_tester.py @@ -0,0 +1,404 @@ +# pylint: disable=missing-function-docstring,redefined-outer-name +""" +Tests for the tester functionality itself. +""" + +import os +import pytest + +from test_runner import ( + parse_test_file, + discover_test_files, + check_output_matches, + read_test_exceptions, + check_partial_word, + parse_custom_classpath, + parse_case_sensitive, + parse_stdout, + parse_stderr, + ParseError, +) +import conftest as external_vars + + +@pytest.mark.parametrize( + "word", + [ + "1.111111111", + "1.1111111111", + "1.11111111111", + "1.1111111111111", + "1.11111111111111", + "1.111111111111111", + "1.1111111111111111", + "1.11111111111111111", + "1.111111111111111111", + "1.1111111111111111111", + ], +) +@pytest.mark.tester +def test_check_partial_word_matches(word): + assert check_partial_word(word, "1.111111111***1111111111") + + +@pytest.mark.parametrize( + "word", + [ + "", + "1", + "1.", + "1.1", + "1.11", + "1.111", + "1.1111", + "1.11111", + "1.111111", + "1.1111111", + "1.11111111", + "1.1111111112", + "1.211111111111111", + "1.11111111111111111111", + "1.11111111111111111112", + ], +) +@pytest.mark.tester +def test_check_partial_word_does_not_match(word): + assert not check_partial_word(word, "1.111111111***1111111111") + + +@pytest.fixture +def soms_for_testing_location(): + return os.path.relpath( + os.path.dirname(__file__) + "/test_runner_tests/soms_for_testing" + ) + + +@pytest.mark.parametrize( + "test_file, exp_stdout, exp_stderr, custom_classpath, case_sensitive", + [ + ( + "/som_test_1.som", + ["1", "2", "3", "4", "5", "...", "10"], + ["this is an error", "...", "hello, world"], + None, + False, + ), + ( + "/som_test_2.som", + ["I AM cAsE sensitiVe", "...", "Dots/inTest"], + ["CaSE sensitive ErrOr", "...", "TestCaseSensitivity"], + None, + True, + ), + ("/som_test_3.som", ["..."], ["..."], "core-lib/AreWeFastYet/Core", False), + ], +) +@pytest.mark.tester +def test_parse_file( # pylint: disable=too-many-arguments,too-many-positional-arguments + test_file, + exp_stdout, + exp_stderr, + custom_classpath, + case_sensitive, + soms_for_testing_location, +): + result = parse_test_file(soms_for_testing_location + test_file) + assert result.stdout == exp_stdout + assert result.stderr == exp_stderr + assert result.custom_classpath == custom_classpath + assert result.case_sensitive is case_sensitive + + +@pytest.mark.tester +def test_parse_file_correctly_using_envvars(soms_for_testing_location): + custom_classpath = "core-lib/AreWeFastYet/Core:experiments/Classpath:anotherOne" + os.environ["AWFYtest"] = "core-lib/AreWeFastYet/Core" + os.environ["experimental"] = "experiments/Classpath" + os.environ["oneWord"] = "anotherOne" + + result = parse_test_file(soms_for_testing_location + "/som_test_4.som") + assert result.custom_classpath == custom_classpath + + # Now test the ability to interleave regular classpaths + custom_classpath = "one/the/outside:core-lib/AreWeFastYet/Core:then/another/one" + result = parse_test_file(soms_for_testing_location + "/som_test_5.som") + assert result.custom_classpath == custom_classpath + + +@pytest.mark.tester +def test_parse_file_failing_because_of_envvar_not_being_set(soms_for_testing_location): + result = parse_test_file(soms_for_testing_location + "/som_test_6.som") + assert result.definition_fail_msg == "Environment variable IDontExist not set" + + +@pytest.mark.tester +def test_discover_test_files(): + test_runner_tests_location = os.path.relpath( + os.path.dirname(__file__) + "/test_runner_tests" + ) + tests = [] + discover_test_files(test_runner_tests_location, tests) + tests = sorted(tests) + + expected_tests = [ + f"{test_runner_tests_location}/soms_for_testing/som_test_{i}.som" + # add missing tests here, or make the test independent of the actual test files + for i in range(1, 7) + ] + + assert tests == expected_tests + + +@pytest.mark.parametrize( + "out, expected", + [ + ( + "Hello World\nSome other output in the Middle\nThis is a test\n".lower(), + ["hello world", "...", "this is a test"], + ), + ( + """This is SOM++ +Hello Rhys this is some sample output +1\n2\n3\n4\n4\n56\n6\n7\n7\n8\n9\n9 +1010101\n10101\n1010101 +1010101010101010100101010101010010101 +Rhys Walker +Moving on +Extra text +more Numbers +NUMBER NUMBER NUMBER NUMBER +""", + [ + "Hello ... this is ... sample output", + "Rhys Walker", + "... on", + "more ...", + "... NUMBER ... NUMBER", + ], + ), + ( + """This is SOM++ +Hello, this is some sample output +There is some more on this line +And a little more here +""", + [ + "Hello, ... sample ...", + "... is ... this line", + "... little ...", + ], + ), + ( + "Some output, as an example\nExtra Line\nReallyLongWord", + ["...", "Really***LongWord"], + ), + ( + "Some output, as an example\nExtra Line\nReally", + ["...", "Really***LongWord"], + ), + ( + "Some output, as an example\nExtra Line\nReallyLong", + ["...", "Really***LongWord"], + ), + ( + "Some output, as an example\nExtra Line\nReallyLo", + ["...", "Really***LongWord"], + ), + ], +) +@pytest.mark.tester +def test_check_output_matches(out, expected): + assert check_output_matches(out.split("\n"), expected) + + +@pytest.mark.parametrize( + "out, expected", + [ + ( + "Hello World\nSome other output in the Middle\nThis is a test\n", + ["hello world", "...", "this is a test"], + ), + ( + "Some output, as an example\nExtra Line\nReallyLongTestFunction", + ["...", "Really***LongWord"], + ), + ( + "Some output, as an example\nExtra Line\nReallyLongWordExtra", + ["...", "Really***LongWord"], + ), + ], +) +@pytest.mark.tester +def test_check_output_does_not_match(out, expected): + assert not check_output_matches(out.split("\n"), expected) + + +@pytest.mark.tester +def test_different_yaml(): + """ + Test different yaml files which may be missing some information + Or be malformed + """ + + # First, save the variables that will change in external_vars + temp_known = external_vars.known_failures + temp_unspecified = external_vars.failing_as_unspecified + temp_unsupported = external_vars.unsupported + temp_do_not_run = external_vars.do_not_run + + yaml_for_testing_location = os.path.relpath( + os.path.dirname(__file__) + "/test_runner_tests/yaml_for_testing" + ) + full_path_from_cwd = os.path.relpath(os.path.dirname(__file__)) + if full_path_from_cwd == ".": + full_path_from_cwd = "" + + # Read a yaml file with nothing after tag (Should all be empty lists) + read_test_exceptions(yaml_for_testing_location + "/missing_known_declaration.yaml") + assert external_vars.known_failures == [] + assert external_vars.failing_as_unspecified == [] + assert external_vars.unsupported == [] + assert external_vars.do_not_run == [] + + # Read a yaml file with null after each tag (Should all be []) + read_test_exceptions(yaml_for_testing_location + "/set_to_be_null.yaml") + assert external_vars.known_failures == [] + assert external_vars.failing_as_unspecified == [] + assert external_vars.unsupported == [] + assert external_vars.do_not_run == [] + + # Read a yaml file where the yamlFile object will evaluate to None type (Should be all []) + read_test_exceptions(yaml_for_testing_location + "/missing_all_tags.yaml") + assert external_vars.known_failures == [] + assert external_vars.failing_as_unspecified == [] + assert external_vars.unsupported == [] + assert external_vars.do_not_run == [] + + # Read a yaml file where each tag has one test included + # [core-lib/IntegrationTests/Tests/mutate_superclass_method/test.som] + read_test_exceptions(yaml_for_testing_location + "/tests_in_each.yaml") + test_list = [f"{str(full_path_from_cwd)}Tests/mutate_superclass_method/test.som"] + assert external_vars.known_failures == test_list + assert external_vars.failing_as_unspecified == test_list + assert external_vars.unsupported == test_list + assert external_vars.do_not_run == test_list + + # Reset external vars after test + external_vars.known_failures = temp_known + external_vars.failing_as_unspecified = temp_unspecified + external_vars.unsupported = temp_unsupported + external_vars.do_not_run = temp_do_not_run + + +# ######################################### # +# ALL TEST BELOW HERE SHARE THESE COMMENTS # +# ######################################### # + +COMMENT_TESTERS = """ +VM: + status: success + case_sensitive: True + custom_classpath: @custom_1:./some/other/one:@custom_2 + stdout: + Some random output + ... some other output + even more output ... + ... + the last bit std + stderr: + Some random error + ... some other error + even more error ... + ... + the last bit of error +""" + +# Causes fail on parse_custom_classpath +# False in case_sensitive +COMMENT_TESTERS_2 = """ +VM: + status: success + case_sensitive: False + custom_classpath: @no_exist_1:./some/other/one:@no_exist_2 + stdout: + ... + stderr: + ... +""" + + +@pytest.mark.tester +def test_custom_classpath(): + """ + Test parsing a custom_classpath + """ + os.environ["custom_1"] = "classpath_1" + os.environ["custom_2"] = "classpath_2" + + expected = "classpath_1:./some/other/one:classpath_2" + + assert expected == parse_custom_classpath(COMMENT_TESTERS) + + # Now assert a failure on a classpath envvar that hasnt been set + with pytest.raises(ParseError, match=r"Environment variable no_exist_1 not set"): + parse_custom_classpath(COMMENT_TESTERS_2) + + os.environ["no_exist_1"] = "exists_1" + + # Now assert we fail on the second + with pytest.raises(ParseError, match=r"Environment variable no_exist_2 not set"): + parse_custom_classpath(COMMENT_TESTERS_2) + + os.environ["no_exist_2"] = "exists_2" + + # Now we should pass + expected = "exists_1:./some/other/one:exists_2" + assert expected == parse_custom_classpath(COMMENT_TESTERS_2) + + +@pytest.mark.tester +def test_case_sensitive(): + """ + Test that parsing case_sensitive generates the correct values + """ + assert parse_case_sensitive(COMMENT_TESTERS) + assert not parse_case_sensitive(COMMENT_TESTERS_2) + + +# THESE BELOW MUST BE DIFFERENT EVEN THOUGH THE FUNCTIONS DO ESSENTIALLY THE SAME THING + + +@pytest.mark.tester +def test_parse_stdout(): + """ + Check that parsing the test comment generates the correct output + """ + comment_testers_expected_1 = [ + "Some random output", + "... some other output", + "even more output ...", + "...", + "the last bit std", + ] + comment_testers_expected_2 = ["..."] + + assert comment_testers_expected_1 == parse_stdout(COMMENT_TESTERS) + assert comment_testers_expected_2 == parse_stdout(COMMENT_TESTERS_2) + + +@pytest.mark.tester +def test_parse_stderr(): + """ + Check that parsing the test comment generates the correct output + """ + comment_testers_expected_1 = [ + "Some random error", + "... some other error", + "even more error ...", + "...", + "the last bit of error", + ] + comment_testers_expected_2 = ["..."] + + assert comment_testers_expected_1 == parse_stderr(COMMENT_TESTERS) + assert comment_testers_expected_2 == parse_stderr(COMMENT_TESTERS_2)