From b3aeffa31017ed496aad4074d5335a4d421054b0 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Wed, 18 Jun 2025 17:26:20 +0100 Subject: [PATCH 01/38] Implement test_runner.py, being mostly lang_tester compatible This is a PyTest-based runner for the integration tests of SOM. - Test expectations can be set through a tagging system. Uses a yaml file which can be specified through TEST_EXCEPTIONS envvar --- IntegrationTests/conftest.py | 82 ++++++++++ IntegrationTests/test_runner.py | 280 ++++++++++++++++++++++++++++++++ 2 files changed, 362 insertions(+) create mode 100644 IntegrationTests/conftest.py create mode 100644 IntegrationTests/test_runner.py diff --git a/IntegrationTests/conftest.py b/IntegrationTests/conftest.py new file mode 100644 index 00000000..64ca7da3 --- /dev/null +++ b/IntegrationTests/conftest.py @@ -0,0 +1,82 @@ +import pytest +import os + +# Report Generate Logic +failedUnexpectedly = [] +passedUnexpectedly = [] +tests_passed = 0 +tests_failed = 0 +total_tests = 0 +tests_skipped = 0 + +# Lists containing references to each exception test +known_failures = [] +failing_as_unspecified = [] +unsupported = [] +do_not_run = [] + +# Environment variables +DEBUG = False +CLASSPATH = "" +EXECUTABLE = "" +GENERATE_REPORT_LOCATION = "" +TEST_EXCEPTIONS = "" +GENERATE_REPORT = False + +# Log data +def pytest_runtest_logreport(report): + global total_tests, tests_passed, tests_failed, tests_skipped + if report.when == "call": # only count test function execution, not setup/teardown + total_tests += 1 + if report.passed: + tests_passed += 1 + elif report.failed: + tests_failed += 1 + elif report.skipped: + tests_skipped += 1 + +# Run after all tests completed, Generate a report of failing and passing tests +def pytest_sessionfinish(session, exitstatus): + print("Running this method") + if GENERATE_REPORT: + os.makedirs(GENERATE_REPORT_LOCATION, exist_ok=True) + report_message = f""" +Pytest Completed with {tests_passed}/{total_tests} passing: + +Test_total: {total_tests} ** This includes those that we expect to fail ** +Tests_passed: {tests_passed} ** This includes those that we expect to fail ** +Tests_failed: {tests_failed} +Tests_skipped: {tests_skipped} + +Tests that passed unexpectedly: +{'\n'.join(f"{test}" for test in passedUnexpectedly)} + +Tests that failed unexpectedly: +{'\n'.join(f"{test}" for test in failedUnexpectedly)} + +## ENVIRONMENT VARIABLES USED ## + +Executable: {EXECUTABLE} +Classpath: {CLASSPATH} +Test Exceptions: {TEST_EXCEPTIONS} +Debug: {DEBUG} +Generage Report: {GENERATE_REPORT_LOCATION} + +## TAGGED TESTS FILE ## + +Known_failures: +{'\n'.join(f"{test}" for test in known_failures)} + +Failing_as_unspecified: +{'\n'.join(f"{test}" for test in failing_as_unspecified)} + +Unsupported: +{'\n'.join(f"{test}" for test in unsupported)} + +Do_not_run: +{'\n'.join(f"{test}" for test in do_not_run)} +""" + print(f"Report location {GENERATE_REPORT_LOCATION}/report.txt") + with open(f"{GENERATE_REPORT_LOCATION}/report.txt", "w") as f: + f.write(report_message) + f.close() \ No newline at end of file diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py new file mode 100644 index 00000000..3453bd68 --- /dev/null +++ b/IntegrationTests/test_runner.py @@ -0,0 +1,280 @@ +import subprocess +from pathlib import Path +import os +import sys +import pytest +import yaml +import conftest as vars + +def debug(message): + """ + Take a string as a mesasage and output if DEBUG is true + """ + if vars.DEBUG is True: + print(message) + + +def debugList(messageList, prefix="", postfix=""): + """ + Take a list of messages and output if DEBUG is true, with a prefix and a postfix + """ + if vars.DEBUG is True: + for message in messageList: + print(prefix + str(message) + postfix) + + +def locateTests(path, testFiles): + """ + Locate all test files that exist in the given directory + Ignore any tests which are in the ignoredTests directory + Return a list of paths to the test files + """ + # To ID a file will be opened and at the top there should be a comment which starts with VM: + for file in Path(path).glob("*.som"): + # Check if the file is in the ignored tests (Check via path, multiple tests named test.som) + with open(file, "r") as f: + contents = f.read() + if "VM" in contents: + testFiles.append(file) + + return testFiles + + +def readDirectory(path, testFiles): + """ + Recursively read all sub directories + Path is the directory we are currently in + testFiles is the list of test files we are building up + """ + for directory in Path(path).iterdir(): + if directory.is_dir(): + readDirectory(directory, testFiles) + else: + continue + + locateTests(path, testFiles) + + +def assembleTestDictionary(testFiles): + """ + Assemble a dictionary of + name: the name of the test file + stdout/stderr: the expected output of the test + """ + tests = [] + for file in testFiles: + testDict = parseTestFile(file) + if testDict is None: + continue + tests.append(testDict) + + return tests + + +def parseTestFile(testFile): + """ + parse the test file to extract the important information + """ + testDict = {"name": testFile, "stdout": [], "stderr": [], "customCP": "NaN"} + with open(testFile, "r") as f: + contents = f.read() + comment = contents.split('"')[1] + + # Make sure if using a custom test classpath that it is above + # Stdout and Stderr + if "customCP" in comment: + commentLines = comment.split("\n") + for line in commentLines: + if "customCP" in line: + testDict["customCP"] = line.split("customCP:")[1].strip() + continue + + if "stdout" in comment: + stdOut = comment.split("stdout:")[1] + if "stderr" in stdOut: + stdErrInx = stdOut.index("stderr:") + stdOut = stdOut[:stdErrInx] + stdOut = stdOut.replace("...", "") + stdOutL = stdOut.split("\n") + stdOutL = [line.strip() for line in stdOutL if line.strip()] + testDict["stdout"] = stdOutL + + if "stderr" in comment: + stdErr = comment.split("stderr:")[1] + if "stdout" in stdErr: + stdOutInx = stdErr.index("stdout:") + stdErr = stdErr[:stdOutInx] + stdErr = stdErr.replace("...", "") + stdErrL = stdErr.split("\n") + stdErrL = [line.strip() for line in stdErrL if line.strip()] + testDict["stderr"] = stdErrL + + testTuple = ( + testDict["name"], + testDict["stdout"], + testDict["stderr"], + testDict["customCP"], + ) + + return testTuple + + +def checkOut(result, expstd, experr, errorMessage): + """ + check if the output of the test matches the expected output + result: The object returned by subprocess.run + expstd: The expected standard output + experr: The expected standard error output + errorMessage: The pregenerated error message to be used in case of failure + Returns: Boolean indicating if result matches expected output + """ + + # Tests borrowed from lang_tests and stderr and atdout will not directly match that of all SOMs + # Order of the output is important + + stdout = result.stdout.splitlines() + stderr = result.stderr.splitlines() + + # Check if each line in stdout and stderr is in the expected output + for line in expstd: + if not any(line in out_line for out_line in stdout): + return False + if line in stdout: + stdout.remove(line) + if line in stderr: + stderr.remove(line) + + for line in experr: + if not any(line in err_line for err_line in stderr): + return False + if line in stdout: + stdout.remove(line) + if line in stderr: + stderr.remove(line) + + # If we made it this far then the test passed + return True + +# Code below here runs before pytest finds it's methods + +location = os.path.relpath(os.path.dirname(__file__) + "/Tests") + +# Work out settings for the application (They are labelled REQUIRED or OPTIONAL) +if "DEBUG" in os.environ: # OPTIONAL + vars.DEBUG = os.environ["DEBUG"].lower() == "true" + +if "CLASSPATH" not in os.environ: # REQUIRED + sys.exit("Please set the CLASSPATH environment variable") + +if "EXECUTABLE" not in os.environ: # REQUIRED + sys.exit("Please set the EXECUTABLE environment variable") + +if "TEST_EXCEPTIONS" in os.environ: # OPTIONAL + vars.TEST_EXCEPTIONS = os.environ["TEST_EXCEPTIONS"] + +vars.GENERATE_REPORT = False +if "GENERATE_REPORT" in os.environ: # OPTIONAL + # Value is the location + # Its prescense in env variables signifies intent to save + vars.GENERATE_REPORT_LOCATION = os.environ["GENERATE_REPORT"] + vars.GENERATE_REPORT = True + +vars.CLASSPATH = os.environ["CLASSPATH"] +vars.EXECUTABLE = os.environ["EXECUTABLE"] + +debug( + f""" +\n\nWelcome to SOM Integration Testing +\nDEBUG is set to: {vars.DEBUG} +CLASSPATH is set to: {vars.CLASSPATH} +EXECUTABLE is set to: {vars.EXECUTABLE} +TEST_EXCEPTIONS is set to: {vars.TEST_EXCEPTIONS} +GENERATE_REPORT is set to: {vars.GENERATE_REPORT} +GENERATE_REPORT_LOCATION is set to: {vars.GENERATE_REPORT_LOCATION} +""" +) + +debug(f"Opening test_tags") +if vars.TEST_EXCEPTIONS: + with open(f"{vars.TEST_EXCEPTIONS}", "r") as f: + yamlFile = yaml.safe_load(f) + vars.known_failures = (yamlFile["known_failures"]) + vars.failing_as_unspecified = (yamlFile["failing_as_unspecified"]) + vars.unsupported = (yamlFile["unsupported"]) + vars.do_not_run = yamlFile["do_not_run"] # Tests here do not fail at a SOM level but at a python level (e.g. Invalud UTF-8 characters) + +debugList(vars.known_failures, prefix="Failure expected from: ") +debugList(vars.failing_as_unspecified, prefix="Failure expected through undefined behaviour: ") +debugList(vars.unsupported, prefix="Test that fails through unsupported bahaviour: ") +debugList(vars.do_not_run, prefix="Test that will not run through python breaking logic: ") + +testFiles = [] +readDirectory(location, testFiles) +TESTS_LIST = assembleTestDictionary(testFiles) + +@pytest.mark.parametrize( + "name,stdout,stderr,customCP", + TESTS_LIST, + ids=[str(test_args[0]) for test_args in TESTS_LIST], +) +def tests_runner(name, stdout, stderr, customCP): + """ + Take an array of dictionaries with test file names and expected output + Run all of the tests and check the output + Cleanup the build directory if required + """ + + # Check if a test shoudld not be ran + if (str(name) in vars.do_not_run): + debug(f"Not running test {name}") + pytest.skip("Test included in do_not_run") + + if customCP != "NaN": + debug(f"Using custom classpath: {customCP}") + command = f"{vars.EXECUTABLE} -cp {customCP} {name}" + else: + command = f"{vars.EXECUTABLE} -cp {vars.CLASSPATH} {name}" + + debug(f"Running test: {name}") + + result = subprocess.run(command, capture_output=True, text=True, shell=True) + + # Produce potential error messages now and then run assertion + errMsg = f""" +Test failed for: {name} +Expected stdout: {stdout} +Given stdout : {result.stdout} +Expected stderr: {stderr} +Given stderr : {result.stderr} +Command used : {command} +""" + + if result.returncode != 0: + errMsg += f"Command failed with return code: {result.returncode}\n" + + # SOM level errors will be raised in stdout only SOM++ errors are in stderr (Most tests are for SOM level errors) + testPassed = checkOut(result, stdout, stderr, errMsg) + + # Check if we have any unexpectedly passing tests + if (str(name) in vars.known_failures and testPassed): # Test passed when it is not expected tp + vars.passedUnexpectedly.append(name) + assert(False), f"Test {name} is in known_failures but passed" + elif (str(name) in vars.known_failures and testPassed is False): # Test failed as expected + assert(True) + + if (str(name) in vars.failing_as_unspecified and testPassed): # Test passed when it is not expected tp + vars.passedUnexpectedly.append(name) + assert(False), f"Test {name} is in failing_as_unspecified but passed" + elif (str(name) in vars.failing_as_unspecified and testPassed is False): # Test failed as expected + assert(True) + + if (str(name) in vars.unsupported and testPassed): # Test passed when it is not expected tp + vars.passedUnexpectedly.append(name) + assert(False), f"Test {name} is in unsupported but passed" + elif (str(name) in vars.unsupported and testPassed is False): # Test failed as expected + assert(True) + + if (str(name) not in vars.unsupported and str(name) not in vars.known_failures and str(name) not in vars.failing_as_unspecified): + if (not testPassed): + vars.failedUnexpectedly.append(name) + assert(testPassed), f"Error on test, {name} expected to pass: {errMsg}" From a4df60522e63020400fffd18c7a9908a591f0e74 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Fri, 4 Jul 2025 14:00:34 +0100 Subject: [PATCH 02/38] Updated CI to run Black and PyLint, and integration tests --- .github/workflows/ci.yml | 29 +++ IntegrationTests/conftest.py | 41 +++-- IntegrationTests/test_runner.py | 302 ++++++++++++++++++-------------- 3 files changed, 226 insertions(+), 146 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1ab4cacd..1f20d5ff 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,6 +3,26 @@ name: Tests on: [push, pull_request] jobs: + python-style: + name: Python Style Checks + runs-on: ubuntu-24.04 + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Install Black, PyLint, PyYaml and Pytest + run: | + python -m pip install --upgrade pip + pip install black pylint pyyaml pytest + + - name: Run Black Check + run: | + black ./IntegrationTests --check --diff + + - name: Run PyLint + run: | + pylint ./IntegrationTests/test_runner.py ./IntegrationTests/conftest.py + test_soms: runs-on: ubuntu-24.04 # ubuntu-latest continue-on-error: ${{ matrix.not-up-to-date }} @@ -166,6 +186,15 @@ jobs: echo "${{ matrix.som }} $SOM_TESTS" eval "${{ matrix.som }} $SOM_TESTS" + + - name: Run Integration Tests + if: ${{ matrix.som != 'spec' }} + run: | + python -m pip install --upgrade pip + pip install pytest + export EXECUTABLE="som-vm/${{ matrix.som }}" + export CLASSPATH=Smalltalk + pytest IntegrationTests # We currently test SomSom only on TruffleSOM - name: Test SomSom on TruffleSOM diff --git a/IntegrationTests/conftest.py b/IntegrationTests/conftest.py index 64ca7da3..c8b3012b 100644 --- a/IntegrationTests/conftest.py +++ b/IntegrationTests/conftest.py @@ -1,13 +1,16 @@ -import pytest +""" +Defines variables that are required for a report to be generated. +""" + import os # Report Generate Logic -failedUnexpectedly = [] -passedUnexpectedly = [] -tests_passed = 0 -tests_failed = 0 -total_tests = 0 -tests_skipped = 0 +tests_failed_unexpectedly = [] +tests_passed_unexpectedly = [] +tests_passed = 0 # pylint: disable=invalid-name +tests_failed = 0 # pylint: disable=invalid-name +total_tests = 0 # pylint: disable=invalid-name +tests_skipped = 0 # pylint: disable=invalid-name # Lists containing references to each exception test known_failures = [] @@ -23,9 +26,15 @@ TEST_EXCEPTIONS = "" GENERATE_REPORT = False + # Log data def pytest_runtest_logreport(report): - global total_tests, tests_passed, tests_failed, tests_skipped + """ + Increment the counters for what action was performed + """ + # Global required here to access counters + # Not ideal but without the counters wouldn't work + global total_tests, tests_passed, tests_failed, tests_skipped # pylint: disable=global-statement if report.when == "call": # only count test function execution, not setup/teardown total_tests += 1 if report.passed: @@ -35,11 +44,17 @@ def pytest_runtest_logreport(report): elif report.skipped: tests_skipped += 1 + # Run after all tests completed, Generate a report of failing and passing tests -def pytest_sessionfinish(session, exitstatus): +def pytest_sessionfinish(): + """ + Generate report based on test run + """ print("Running this method") if GENERATE_REPORT: os.makedirs(GENERATE_REPORT_LOCATION, exist_ok=True) + + # Generate a report_message to save report_message = f""" Pytest Completed with {tests_passed}/{total_tests} passing: @@ -49,10 +64,10 @@ def pytest_sessionfinish(session, exitstatus): Tests_skipped: {tests_skipped} Tests that passed unexpectedly: -{'\n'.join(f"{test}" for test in passedUnexpectedly)} +{'\n'.join(f"{test}" for test in tests_passed_unexpectedly)} Tests that failed unexpectedly: -{'\n'.join(f"{test}" for test in failedUnexpectedly)} +{'\n'.join(f"{test}" for test in tests_failed_unexpectedly)} ## ENVIRONMENT VARIABLES USED ## @@ -77,6 +92,6 @@ def pytest_sessionfinish(session, exitstatus): {'\n'.join(f"{test}" for test in do_not_run)} """ print(f"Report location {GENERATE_REPORT_LOCATION}/report.txt") - with open(f"{GENERATE_REPORT_LOCATION}/report.txt", "w") as f: + with open(f"{GENERATE_REPORT_LOCATION}/report.txt", "w", encoding="utf-8") as f: f.write(report_message) - f.close() \ No newline at end of file + f.close() diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 3453bd68..8a4dff85 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -1,46 +1,52 @@ +""" +This is the SOM integration test runner file. Pytest automatically discovers +this file and will find all .som test files in the below directories. +""" + import subprocess from pathlib import Path import os import sys import pytest import yaml -import conftest as vars +import conftest as external_vars + def debug(message): """ Take a string as a mesasage and output if DEBUG is true """ - if vars.DEBUG is True: + if external_vars.DEBUG is True: print(message) -def debugList(messageList, prefix="", postfix=""): +def debug_list(message_list, prefix="", postfix=""): """ Take a list of messages and output if DEBUG is true, with a prefix and a postfix """ - if vars.DEBUG is True: - for message in messageList: + if external_vars.DEBUG is True: + for message in message_list: print(prefix + str(message) + postfix) -def locateTests(path, testFiles): +def locate_tests(path, test_files): """ Locate all test files that exist in the given directory Ignore any tests which are in the ignoredTests directory Return a list of paths to the test files """ # To ID a file will be opened and at the top there should be a comment which starts with VM: - for file in Path(path).glob("*.som"): + for file_path in Path(path).glob("*.som"): # Check if the file is in the ignored tests (Check via path, multiple tests named test.som) - with open(file, "r") as f: + with open(file_path, "r", encoding="utf-8") as f: contents = f.read() if "VM" in contents: - testFiles.append(file) + test_files.append(file_path) - return testFiles + return test_files -def readDirectory(path, testFiles): +def read_directory(path, test_files): """ Recursively read all sub directories Path is the directory we are currently in @@ -48,176 +54,187 @@ def readDirectory(path, testFiles): """ for directory in Path(path).iterdir(): if directory.is_dir(): - readDirectory(directory, testFiles) + read_directory(directory, test_files) else: continue - locateTests(path, testFiles) + locate_tests(path, test_files) -def assembleTestDictionary(testFiles): +def assemble_test_dictionary(test_files): """ Assemble a dictionary of name: the name of the test file stdout/stderr: the expected output of the test """ tests = [] - for file in testFiles: - testDict = parseTestFile(file) - if testDict is None: + for file_path in test_files: + test_dict = parse_test_file(file_path) + if test_dict is None: continue - tests.append(testDict) + tests.append(test_dict) return tests -def parseTestFile(testFile): +def parse_test_file(test_file): """ parse the test file to extract the important information """ - testDict = {"name": testFile, "stdout": [], "stderr": [], "customCP": "NaN"} - with open(testFile, "r") as f: - contents = f.read() + test_info_dict = {"name": test_file, "stdout": [], "stderr": [], "customCP": "NaN"} + with open(test_file, "r", encoding="utf-8") as open_file: + contents = open_file.read() comment = contents.split('"')[1] # Make sure if using a custom test classpath that it is above # Stdout and Stderr - if "customCP" in comment: - commentLines = comment.split("\n") - for line in commentLines: - if "customCP" in line: - testDict["customCP"] = line.split("customCP:")[1].strip() + if "custom_classpath" in comment: + comment_lines = comment.split("\n") + for line in comment_lines: + if "custom_classpath" in line: + test_info_dict["customCP"] = line.split("custom_classpath:")[1].strip() continue if "stdout" in comment: - stdOut = comment.split("stdout:")[1] - if "stderr" in stdOut: - stdErrInx = stdOut.index("stderr:") - stdOut = stdOut[:stdErrInx] - stdOut = stdOut.replace("...", "") - stdOutL = stdOut.split("\n") - stdOutL = [line.strip() for line in stdOutL if line.strip()] - testDict["stdout"] = stdOutL + std_out = comment.split("stdout:")[1] + if "stderr" in std_out: + std_err_inx = std_out.index("stderr:") + std_out = std_out[:std_err_inx] + std_out = std_out.replace("...", "") + std_err_l = std_out.split("\n") + std_err_l = [line.strip() for line in std_err_l if line.strip()] + test_info_dict["stdout"] = std_err_l if "stderr" in comment: - stdErr = comment.split("stderr:")[1] - if "stdout" in stdErr: - stdOutInx = stdErr.index("stdout:") - stdErr = stdErr[:stdOutInx] - stdErr = stdErr.replace("...", "") - stdErrL = stdErr.split("\n") - stdErrL = [line.strip() for line in stdErrL if line.strip()] - testDict["stderr"] = stdErrL - - testTuple = ( - testDict["name"], - testDict["stdout"], - testDict["stderr"], - testDict["customCP"], + std_err = comment.split("stderr:")[1] + if "stdout" in std_err: + std_out_inx = std_err.index("stdout:") + std_err = std_err[:std_out_inx] + std_err = std_err.replace("...", "") + std_err_l = std_err.split("\n") + std_err_l = [line.strip() for line in std_err_l if line.strip()] + test_info_dict["stderr"] = std_err_l + + test_tuple = ( + test_info_dict["name"], + test_info_dict["stdout"], + test_info_dict["stderr"], + test_info_dict["customCP"], ) - return testTuple + return test_tuple -def checkOut(result, expstd, experr, errorMessage): +def check_out(test_outputs, expected_std_out, expected_std_err): """ check if the output of the test matches the expected output result: The object returned by subprocess.run expstd: The expected standard output experr: The expected standard error output - errorMessage: The pregenerated error message to be used in case of failure Returns: Boolean indicating if result matches expected output + + note: This method does not directly error, just checks conditions """ # Tests borrowed from lang_tests and stderr and atdout will not directly match that of all SOMs # Order of the output is important - stdout = result.stdout.splitlines() - stderr = result.stderr.splitlines() + std_out = test_outputs.stdout.splitlines() + std_err = test_outputs.stderr.splitlines() # Check if each line in stdout and stderr is in the expected output - for line in expstd: - if not any(line in out_line for out_line in stdout): + for line in expected_std_out: + if not any(line in out_line for out_line in std_out): return False - if line in stdout: - stdout.remove(line) - if line in stderr: - stderr.remove(line) + if line in std_out: + std_out.remove(line) + if line in std_err: + std_err.remove(line) - for line in experr: - if not any(line in err_line for err_line in stderr): + for line in expected_std_err: + if not any(line in err_line for err_line in std_err): return False - if line in stdout: - stdout.remove(line) - if line in stderr: - stderr.remove(line) + if line in std_out: + std_out.remove(line) + if line in std_err: + std_err.remove(line) # If we made it this far then the test passed return True + # Code below here runs before pytest finds it's methods location = os.path.relpath(os.path.dirname(__file__) + "/Tests") # Work out settings for the application (They are labelled REQUIRED or OPTIONAL) -if "DEBUG" in os.environ: # OPTIONAL - vars.DEBUG = os.environ["DEBUG"].lower() == "true" +if "DEBUG" in os.environ: # OPTIONAL + external_vars.DEBUG = os.environ["DEBUG"].lower() == "true" -if "CLASSPATH" not in os.environ: # REQUIRED +if "CLASSPATH" not in os.environ: # REQUIRED sys.exit("Please set the CLASSPATH environment variable") -if "EXECUTABLE" not in os.environ: # REQUIRED +if "EXECUTABLE" not in os.environ: # REQUIRED sys.exit("Please set the EXECUTABLE environment variable") -if "TEST_EXCEPTIONS" in os.environ: # OPTIONAL - vars.TEST_EXCEPTIONS = os.environ["TEST_EXCEPTIONS"] +if "TEST_EXCEPTIONS" in os.environ: # OPTIONAL + external_vars.TEST_EXCEPTIONS = os.environ["TEST_EXCEPTIONS"] -vars.GENERATE_REPORT = False -if "GENERATE_REPORT" in os.environ: # OPTIONAL +if "GENERATE_REPORT" in os.environ: # OPTIONAL # Value is the location # Its prescense in env variables signifies intent to save - vars.GENERATE_REPORT_LOCATION = os.environ["GENERATE_REPORT"] - vars.GENERATE_REPORT = True + external_vars.GENERATE_REPORT_LOCATION = os.environ["GENERATE_REPORT"] + external_vars.GENERATE_REPORT = True -vars.CLASSPATH = os.environ["CLASSPATH"] -vars.EXECUTABLE = os.environ["EXECUTABLE"] +external_vars.CLASSPATH = os.environ["CLASSPATH"] +external_vars.EXECUTABLE = os.environ["EXECUTABLE"] debug( f""" \n\nWelcome to SOM Integration Testing -\nDEBUG is set to: {vars.DEBUG} -CLASSPATH is set to: {vars.CLASSPATH} -EXECUTABLE is set to: {vars.EXECUTABLE} -TEST_EXCEPTIONS is set to: {vars.TEST_EXCEPTIONS} -GENERATE_REPORT is set to: {vars.GENERATE_REPORT} -GENERATE_REPORT_LOCATION is set to: {vars.GENERATE_REPORT_LOCATION} +\nDEBUG is set to: {external_vars.DEBUG} +CLASSPATH is set to: {external_vars.CLASSPATH} +EXECUTABLE is set to: {external_vars.EXECUTABLE} +TEST_EXCEPTIONS is set to: {external_vars.TEST_EXCEPTIONS} +GENERATE_REPORT is set to: {external_vars.GENERATE_REPORT} +GENERATE_REPORT_LOCATION is set to: {external_vars.GENERATE_REPORT_LOCATION} """ ) -debug(f"Opening test_tags") -if vars.TEST_EXCEPTIONS: - with open(f"{vars.TEST_EXCEPTIONS}", "r") as f: - yamlFile = yaml.safe_load(f) - vars.known_failures = (yamlFile["known_failures"]) - vars.failing_as_unspecified = (yamlFile["failing_as_unspecified"]) - vars.unsupported = (yamlFile["unsupported"]) - vars.do_not_run = yamlFile["do_not_run"] # Tests here do not fail at a SOM level but at a python level (e.g. Invalud UTF-8 characters) - -debugList(vars.known_failures, prefix="Failure expected from: ") -debugList(vars.failing_as_unspecified, prefix="Failure expected through undefined behaviour: ") -debugList(vars.unsupported, prefix="Test that fails through unsupported bahaviour: ") -debugList(vars.do_not_run, prefix="Test that will not run through python breaking logic: ") +debug("Opening test_tags") +if external_vars.TEST_EXCEPTIONS: + with open(f"{external_vars.TEST_EXCEPTIONS}", "r", encoding="utf-8") as file: + yamlFile = yaml.safe_load(file) + external_vars.known_failures = yamlFile["known_failures"] + external_vars.failing_as_unspecified = yamlFile["failing_as_unspecified"] + external_vars.unsupported = yamlFile["unsupported"] + # Tests here do not fail at a SOM level but at a python level + external_vars.do_not_run = yamlFile["do_not_run"] + +debug_list(external_vars.known_failures, prefix="Failure expected from: ") +debug_list( + external_vars.failing_as_unspecified, + prefix="Failure expected through undefined behaviour: ", +) +debug_list( + external_vars.unsupported, prefix="Test that fails through unsupported bahaviour: " +) +debug_list( + external_vars.do_not_run, + prefix="Test that will not run through python breaking logic: ", +) testFiles = [] -readDirectory(location, testFiles) -TESTS_LIST = assembleTestDictionary(testFiles) +read_directory(location, testFiles) +TESTS_LIST = assemble_test_dictionary(testFiles) + @pytest.mark.parametrize( - "name,stdout,stderr,customCP", + "name,stdout,stderr,custom_classpath", TESTS_LIST, ids=[str(test_args[0]) for test_args in TESTS_LIST], ) -def tests_runner(name, stdout, stderr, customCP): +def tests_runner(name, stdout, stderr, custom_classpath): """ Take an array of dictionaries with test file names and expected output Run all of the tests and check the output @@ -225,22 +242,24 @@ def tests_runner(name, stdout, stderr, customCP): """ # Check if a test shoudld not be ran - if (str(name) in vars.do_not_run): + if str(name) in external_vars.do_not_run: debug(f"Not running test {name}") pytest.skip("Test included in do_not_run") - if customCP != "NaN": - debug(f"Using custom classpath: {customCP}") - command = f"{vars.EXECUTABLE} -cp {customCP} {name}" + if custom_classpath != "NaN": + debug(f"Using custom classpath: {custom_classpath}") + command = f"{external_vars.EXECUTABLE} -cp {custom_classpath} {name}" else: - command = f"{vars.EXECUTABLE} -cp {vars.CLASSPATH} {name}" + command = f"{external_vars.EXECUTABLE} -cp {external_vars.CLASSPATH} {name}" debug(f"Running test: {name}") - result = subprocess.run(command, capture_output=True, text=True, shell=True) + result = subprocess.run( + command, capture_output=True, text=True, shell=True, check=False + ) # Produce potential error messages now and then run assertion - errMsg = f""" + error_message = f""" Test failed for: {name} Expected stdout: {stdout} Given stdout : {result.stdout} @@ -250,31 +269,48 @@ def tests_runner(name, stdout, stderr, customCP): """ if result.returncode != 0: - errMsg += f"Command failed with return code: {result.returncode}\n" + error_message += f"Command failed with return code: {result.returncode}\n" - # SOM level errors will be raised in stdout only SOM++ errors are in stderr (Most tests are for SOM level errors) - testPassed = checkOut(result, stdout, stderr, errMsg) + test_pass_bool = check_out(result, stdout, stderr) # Check if we have any unexpectedly passing tests - if (str(name) in vars.known_failures and testPassed): # Test passed when it is not expected tp - vars.passedUnexpectedly.append(name) - assert(False), f"Test {name} is in known_failures but passed" - elif (str(name) in vars.known_failures and testPassed is False): # Test failed as expected - assert(True) - - if (str(name) in vars.failing_as_unspecified and testPassed): # Test passed when it is not expected tp - vars.passedUnexpectedly.append(name) - assert(False), f"Test {name} is in failing_as_unspecified but passed" - elif (str(name) in vars.failing_as_unspecified and testPassed is False): # Test failed as expected - assert(True) - - if (str(name) in vars.unsupported and testPassed): # Test passed when it is not expected tp - vars.passedUnexpectedly.append(name) - assert(False), f"Test {name} is in unsupported but passed" - elif (str(name) in vars.unsupported and testPassed is False): # Test failed as expected - assert(True) - - if (str(name) not in vars.unsupported and str(name) not in vars.known_failures and str(name) not in vars.failing_as_unspecified): - if (not testPassed): - vars.failedUnexpectedly.append(name) - assert(testPassed), f"Error on test, {name} expected to pass: {errMsg}" + if ( + str(name) in external_vars.known_failures and test_pass_bool + ): # Test passed when it is not expected tp + external_vars.tests_passed_unexpectedly.append(name) + assert False, f"Test {name} is in known_failures but passed" + elif ( + str(name) in external_vars.known_failures and test_pass_bool is False + ): # Test failed as expected + assert True + + if ( + str(name) in external_vars.failing_as_unspecified and test_pass_bool + ): # Test passed when it is not expected tp + external_vars.tests_passed_unexpectedly.append(name) + assert False, f"Test {name} is in failing_as_unspecified but passed" + elif ( + str(name) in external_vars.failing_as_unspecified and test_pass_bool is False + ): # Test failed as expected + assert True + + if ( + str(name) in external_vars.unsupported and test_pass_bool + ): # Test passed when it is not expected tp + external_vars.tests_passed_unexpectedly.append(name) + assert False, f"Test {name} is in unsupported but passed" + elif ( + str(name) in external_vars.unsupported and test_pass_bool is False + ): # Test failed as expected + assert True + + if ( + str(name) not in external_vars.unsupported + and str(name) not in external_vars.known_failures + and str(name) not in external_vars.failing_as_unspecified + ): + if not test_pass_bool: + external_vars.tests_failed_unexpectedly.append(name) + assert ( + test_pass_bool + ), f"Error on test, {name} expected to pass: {error_message}" From fe5fd3df7979c69a47079a980ed0051c25e62799 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Wed, 9 Jul 2025 14:59:12 +0100 Subject: [PATCH 03/38] Added README --- IntegrationTests/README.md | 94 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 IntegrationTests/README.md diff --git a/IntegrationTests/README.md b/IntegrationTests/README.md new file mode 100644 index 00000000..8a0605d1 --- /dev/null +++ b/IntegrationTests/README.md @@ -0,0 +1,94 @@ +# SOM Integration Tests + +Most of the tests for the integration testing come from lang_tests of [yksom](https://github.com/softdevteam/yksom/tree/master/lang_tests). Tests are identified by their path from core-lib to test.som, this ensures there can be multiple tests named test.som in different directories. Different directories should only be used when necessary for subclass testing. + +## Running the Integration Tests +The tests can be run using pytest by simply running pytest in the base directory of any SOM implementation that includes a version of the core-library with IntegrationTests. It requires multiple python modules installed and environment variables set. + +### Simple Test Run +``` +EXECUTABLE=./path-to-build CLASSPATH=./core-lib/Smalltalk pytest +``` + +## Prerequisites +The integration tests require two python modules to be installed: + + +## Environment variables +The environment variables are split into required and optional. Some optionals may be required for different implementations of SOM. + +#### EXECUTABLE +This is the path from the current working directory to the executable of SOM. +#### CLASSPATH +The exact classpath required by SOM to find the Object class etc. +#### TEST_EXCEPTIONS +A yaml file which details the tags of tests. Specifically it labels tests that are expected to fail for one reason or another. +#### GENERATE_REPORT +Generates a yaml file which can be used as a **TEST_EXCEPTIONS** file. It will also include additional information about how many tests passed, which tests passed that were not expected to and which tests failed. +#### DEBUG +Allows the printing of detailed run information. Shows currently running test and the configuration before all tests are ran. Must run pytest with the -s flag when using DEBUG. + +## TEST_EXCEPTIONS (How to write a file) +There are four tags that are currently supported by the SOM integration tests. All tags will run the tests still, other than do_not_run, but will not fail on test failure, a tagged test will cause the run to fail only when it passes unexpectedly. + +For a test to be given that tag specify it's location path like this: +``` +core-lib/IntegrationTests/Tests/test.som +``` + +### known_failures +Any test located in this tag is assumed to fail, it should only be used when another more suitable tag is not available. + +### failing_as_unspecified +Any test located in this tag failed because SOM does not specify behaviour in this instance, this means that each implementation may treat this situation differently. *Example dividing by 0.* + +### unsupported +Any test located here has a feature which is not suppoprted in this SOM. + +### do_not_run +This test should not be ran ever as it causes an error in the python level code. The test may also cause a SOM level error but does not have to. For example invalid UTF-8 characters inside of a test. + +## How to write a new test +For a test to be collected by Pytest it has to start with a comment, the comment should be structured with the expected output for either stderr or stdout. + +``` +" +VM: + status: error + custom_classpath: ./core-lib/Examples/AreWeFastYet/Core:./core-lib/Smalltalk + case_sensitive: False + stdout: + 1000 + ... + 2 is an integer + stderr: + ... + ERROR MESSAGE +" +``` + +**When structuring a test all options must come before stderr and stdout** + +### Tags for structuring a test +Below is a list of tags which structure how a test works. + +#### VM: +This is required as the base of the test structure and what allows the tests to be identified as an integration test. + +#### custom_classpath: +This allows for the specification of a custom classpath to be used. This is useful for loading different versions of classes with the same name. I.e. AWFY Vector instead of core-lib Vector. **The path to ./Smalltalk must still be specified after so that the Object class can be loaded** + +#### case_sensitive (Still a WiP) +By default the tests are case insensitive (All outputs and expecteds are converted to be lower case) but by specifying True in case_sensitive that test can be checked as case_sensitive. + +#### stderr or stdout: +This is your expected output, each new line will be a new "thing" to check for. So each line is checked as a whole order is not checked. Writing ... will be ignored by the checker. + +Due to the differences between SOM implementations not all will output to stderr for errors, some SOMs will output errors to stdout instead. Thus the **expected outputs are checked against both stderr or stdout**. + +### Good test practice +To make it clear to others how the test works please follow these guidelines. + +- Any error regardless of where it is output (stderr/stdout) should be under stderr to make it clear. +- Expected outputs should be written in the order they are being expected regardless of the lack of order checking. +- Use ... to show a break in output checking. \ No newline at end of file From 974a088ea72fd92814033309d0ed555a2dbb1165 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Thu, 10 Jul 2025 12:41:15 +0100 Subject: [PATCH 04/38] Further improve implementation - The report generated is now in yaml format. It can function as a TEST_EXCEPTIONS file with failing tests and passing tests, added and removed as needed. It should not be blindly followed but makes the generation of a tags file very easy. - It comes with additional information like number passed/skipped and which ones exactly have been changed. - Test runner is now case insensitive unless instructed not to be through case_sensitive: True in the tests. - It is also possible to now in the test level expect a test to fail. This allows for testing of test_runner features. Not ideal but makes sure case_sensitivity works correctly. - adjusted the reading of filepaths to require only 1 str() call - Tests now sorted / Name of check_out changed. Comments updated to reflect variable naming - Diff is now shown in error message --- .github/workflows/ci.yml | 4 +- IntegrationTests/README.md | 4 +- IntegrationTests/Tests/case_insensitive.som | 20 ++ IntegrationTests/conftest.py | 91 +++--- IntegrationTests/test_runner.py | 313 +++++++++++--------- 5 files changed, 251 insertions(+), 181 deletions(-) create mode 100644 IntegrationTests/Tests/case_insensitive.som diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1f20d5ff..98e95791 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,10 +10,10 @@ jobs: - name: Checkout Repository uses: actions/checkout@v4 - - name: Install Black, PyLint, PyYaml and Pytest + - name: Install Black, PyLint and Pytest run: | python -m pip install --upgrade pip - pip install black pylint pyyaml pytest + pip install black pylint pytest - name: Run Black Check run: | diff --git a/IntegrationTests/README.md b/IntegrationTests/README.md index 8a0605d1..efc0fa89 100644 --- a/IntegrationTests/README.md +++ b/IntegrationTests/README.md @@ -29,7 +29,7 @@ Generates a yaml file which can be used as a **TEST_EXCEPTIONS** file. It will a Allows the printing of detailed run information. Shows currently running test and the configuration before all tests are ran. Must run pytest with the -s flag when using DEBUG. ## TEST_EXCEPTIONS (How to write a file) -There are four tags that are currently supported by the SOM integration tests. All tags will run the tests still, other than do_not_run, but will not fail on test failure, a tagged test will cause the run to fail only when it passes unexpectedly. +There are four tags that are currently supported by the SOM integration tests. All tags will run the tests still, other than do_not_run, but will not fail on test failure, a tagged test will cause the run to fail only when it passes unexpectedly. Check for example file IntegrationTests/test_tags.yaml. For a test to be given that tag specify it's location path like this: ``` @@ -78,7 +78,7 @@ This is required as the base of the test structure and what allows the tests to #### custom_classpath: This allows for the specification of a custom classpath to be used. This is useful for loading different versions of classes with the same name. I.e. AWFY Vector instead of core-lib Vector. **The path to ./Smalltalk must still be specified after so that the Object class can be loaded** -#### case_sensitive (Still a WiP) +#### case_sensitive By default the tests are case insensitive (All outputs and expecteds are converted to be lower case) but by specifying True in case_sensitive that test can be checked as case_sensitive. #### stderr or stdout: diff --git a/IntegrationTests/Tests/case_insensitive.som b/IntegrationTests/Tests/case_insensitive.som new file mode 100644 index 00000000..97729a7e --- /dev/null +++ b/IntegrationTests/Tests/case_insensitive.som @@ -0,0 +1,20 @@ +" +VM: + status: success + stdout: + ... + A + b + #c + #D + +" + +case_insensitive = ( + run = ( + 'a' println. + 'B' println. + #C println. + #d println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/conftest.py b/IntegrationTests/conftest.py index c8b3012b..905585b8 100644 --- a/IntegrationTests/conftest.py +++ b/IntegrationTests/conftest.py @@ -2,7 +2,7 @@ Defines variables that are required for a report to be generated. """ -import os +import yaml # Report Generate Logic tests_failed_unexpectedly = [] @@ -19,12 +19,10 @@ do_not_run = [] # Environment variables -DEBUG = False CLASSPATH = "" EXECUTABLE = "" -GENERATE_REPORT_LOCATION = "" TEST_EXCEPTIONS = "" -GENERATE_REPORT = False +GENERATE_REPORT = "" # Log data @@ -46,52 +44,55 @@ def pytest_runtest_logreport(report): # Run after all tests completed, Generate a report of failing and passing tests -def pytest_sessionfinish(): +def pytest_sessionfinish(exitstatus): """ Generate report based on test run """ - print("Running this method") if GENERATE_REPORT: - os.makedirs(GENERATE_REPORT_LOCATION, exist_ok=True) + print("Generating report for the test run") - # Generate a report_message to save - report_message = f""" -Pytest Completed with {tests_passed}/{total_tests} passing: - -Test_total: {total_tests} ** This includes those that we expect to fail ** -Tests_passed: {tests_passed} ** This includes those that we expect to fail ** -Tests_failed: {tests_failed} -Tests_skipped: {tests_skipped} - -Tests that passed unexpectedly: -{'\n'.join(f"{test}" for test in tests_passed_unexpectedly)} - -Tests that failed unexpectedly: -{'\n'.join(f"{test}" for test in tests_failed_unexpectedly)} - -## ENVIRONMENT VARIABLES USED ## + # To make the report useful it will add the tests which have failed- + # -unexpectedly to known_failures + # It will also remove those that have passed from any of those lists -Executable: {EXECUTABLE} -Classpath: {CLASSPATH} -Test Exceptions: {TEST_EXCEPTIONS} -Debug: {DEBUG} -Generage Report: {GENERATE_REPORT_LOCATION} + for test_path in tests_passed_unexpectedly: + test = str(test_path) + if test in known_failures: + known_failures.remove(test) + if test in unsupported: + unsupported.remove(test) + if test in failing_as_unspecified: + failing_as_unspecified.remove(test) -## TAGGED TESTS FILE ## + if len(tests_failed_unexpectedly) != 0: + for test in tests_failed_unexpectedly: + known_failures.append(str(test)) -Known_failures: -{'\n'.join(f"{test}" for test in known_failures)} - -Failing_as_unspecified: -{'\n'.join(f"{test}" for test in failing_as_unspecified)} - -Unsupported: -{'\n'.join(f"{test}" for test in unsupported)} - -Do_not_run: -{'\n'.join(f"{test}" for test in do_not_run)} -""" - print(f"Report location {GENERATE_REPORT_LOCATION}/report.txt") - with open(f"{GENERATE_REPORT_LOCATION}/report.txt", "w", encoding="utf-8") as f: - f.write(report_message) - f.close() + # Generate a report_message to save + report_data = { + "summary": { + "tests_total": total_tests, + "tests_passed": tests_passed, + "tests_failed": tests_failed, + "tests_skipped": tests_skipped, + "pytest_exitstatus": exitstatus, + "note": "Totals include expected failures", + }, + "unexpected": { + "passed": [str(test) for test in tests_passed_unexpectedly], + "failed": [str(test) for test in tests_failed_unexpectedly], + }, + "environment": { + "executable": EXECUTABLE, + "classpath": CLASSPATH, + "test_exceptions": TEST_EXCEPTIONS, + "generate_report_location": GENERATE_REPORT, + }, + "known_failures": known_failures, + "failing_as_unspecified": failing_as_unspecified, + "unsupported": unsupported, + "do_not_run": do_not_run, + } + print(f"Report location {GENERATE_REPORT}") + with open(f"{GENERATE_REPORT}", "w", encoding="utf-8") as f: + yaml.dump(report_data, f, default_flow_style=False, sort_keys=False) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 8a4dff85..22ea2a1d 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -5,6 +5,7 @@ import subprocess from pathlib import Path +from difflib import ndiff import os import sys import pytest @@ -12,36 +13,18 @@ import conftest as external_vars -def debug(message): - """ - Take a string as a mesasage and output if DEBUG is true - """ - if external_vars.DEBUG is True: - print(message) - - -def debug_list(message_list, prefix="", postfix=""): - """ - Take a list of messages and output if DEBUG is true, with a prefix and a postfix - """ - if external_vars.DEBUG is True: - for message in message_list: - print(prefix + str(message) + postfix) - - def locate_tests(path, test_files): """ - Locate all test files that exist in the given directory - Ignore any tests which are in the ignoredTests directory - Return a list of paths to the test files + Locate all tests which are in the current directory. + Add them to the list test_files and return + A check if made on if the file has VM: in it's content """ # To ID a file will be opened and at the top there should be a comment which starts with VM: for file_path in Path(path).glob("*.som"): - # Check if the file is in the ignored tests (Check via path, multiple tests named test.som) with open(file_path, "r", encoding="utf-8") as f: contents = f.read() - if "VM" in contents: - test_files.append(file_path) + if "VM:" in contents: + test_files.append(str(file_path)) return test_files @@ -50,7 +33,7 @@ def read_directory(path, test_files): """ Recursively read all sub directories Path is the directory we are currently in - testFiles is the list of test files we are building up + test_files is the list of test files we are building up """ for directory in Path(path).iterdir(): if directory.is_dir(): @@ -61,7 +44,7 @@ def read_directory(path, test_files): locate_tests(path, test_files) -def assemble_test_dictionary(test_files): +def collect_tests(test_files): """ Assemble a dictionary of name: the name of the test file @@ -81,7 +64,13 @@ def parse_test_file(test_file): """ parse the test file to extract the important information """ - test_info_dict = {"name": test_file, "stdout": [], "stderr": [], "customCP": "NaN"} + test_info_dict = { + "name": test_file, + "stdout": [], + "stderr": [], + "custom_classpath": "None", + "case_sensitive": False, + } with open(test_file, "r", encoding="utf-8") as open_file: contents = open_file.read() comment = contents.split('"')[1] @@ -92,15 +81,25 @@ def parse_test_file(test_file): comment_lines = comment.split("\n") for line in comment_lines: if "custom_classpath" in line: - test_info_dict["customCP"] = line.split("custom_classpath:")[1].strip() + test_info_dict["custom_classpath"] = line.split( + "custom_classpath:" + )[1].strip() continue + # Check if we are case sensitive (has to be toggled on) + if "case_sensitive" in comment: + comment_lines = comment.split("\n") + for line in comment_lines: + if "case_sensitive" in line: + test_info_dict["case_sensitive"] = bool( + line.split("case_sensitive")[1].strip() + ) + if "stdout" in comment: std_out = comment.split("stdout:")[1] if "stderr" in std_out: std_err_inx = std_out.index("stderr:") std_out = std_out[:std_err_inx] - std_out = std_out.replace("...", "") std_err_l = std_out.split("\n") std_err_l = [line.strip() for line in std_err_l if line.strip()] test_info_dict["stdout"] = std_err_l @@ -110,57 +109,102 @@ def parse_test_file(test_file): if "stdout" in std_err: std_out_inx = std_err.index("stdout:") std_err = std_err[:std_out_inx] - std_err = std_err.replace("...", "") std_err_l = std_err.split("\n") std_err_l = [line.strip() for line in std_err_l if line.strip()] test_info_dict["stderr"] = std_err_l + if test_info_dict["case_sensitive"]: + test_tuple = ( + test_info_dict["name"], + test_info_dict["stdout"], + test_info_dict["stderr"], + test_info_dict["custom_classpath"], + test_info_dict["case_sensitive"], + ) + return test_tuple + test_tuple = ( test_info_dict["name"], - test_info_dict["stdout"], - test_info_dict["stderr"], - test_info_dict["customCP"], + [s.lower() for s in test_info_dict["stdout"]], + [s.lower() for s in test_info_dict["stderr"]], + test_info_dict["custom_classpath"], + test_info_dict["case_sensitive"], ) return test_tuple -def check_out(test_outputs, expected_std_out, expected_std_err): +def check_exp_given(given, expected): + """ + Check if the expected output is contained in the given output + + given: list of strings representing some kind of SOM output + expected: list of strings representing the expected output + + return: 1 if success 0 if failure + """ + # Check if the stdout matches the expected stdout + exp_std_inx = 0 + for g_out in given: + # Check that checks don't pass before out of outputs + if exp_std_inx >= len(expected): + return 1 + + if expected[exp_std_inx] == "...": + # If the expected output is '...' then we skip this line + exp_std_inx += 1 + continue + + if g_out.strip() != expected[exp_std_inx].strip(): + # Check if expected has ... + if "..." in expected[exp_std_inx]: + # If it does then we need to remove it and check for that line containing string + without_gap = expected[exp_std_inx].split("...") + if all(without_gap in g_out for without_gap in without_gap): + exp_std_inx += 1 + # If the output does not match, continue without incrementing + continue + + exp_std_inx += 1 + + if exp_std_inx != len(expected): + # It is not all contained in the output + return 0 + + return 1 + + +def check_output(test_outputs, expected_std_out, expected_std_err): """ check if the output of the test matches the expected output - result: The object returned by subprocess.run - expstd: The expected standard output - experr: The expected standard error output + test_outputs: The object returned by subprocess.run + expected_std_out: The expected standard output + expected_std_err: The expected standard error output Returns: Boolean indicating if result matches expected output note: This method does not directly error, just checks conditions - """ - # Tests borrowed from lang_tests and stderr and atdout will not directly match that of all SOMs - # Order of the output is important + stdout and stderr do not match in all SOMs + stderr checked against stdout and stderr + stdout checked against stdout and stderr + + This is relatively robust for most test cases + """ + given_std_out = test_outputs.stdout.split("\n") + given_std_err = test_outputs.stderr.split("\n") - std_out = test_outputs.stdout.splitlines() - std_err = test_outputs.stderr.splitlines() + passing = 0 - # Check if each line in stdout and stderr is in the expected output - for line in expected_std_out: - if not any(line in out_line for out_line in std_out): - return False - if line in std_out: - std_out.remove(line) - if line in std_err: - std_err.remove(line) + passing += check_exp_given(given_std_out, expected_std_out) + passing += check_exp_given(given_std_err, expected_std_err) + passing += check_exp_given(given_std_out, expected_std_err) + passing += check_exp_given(given_std_err, expected_std_out) - for line in expected_std_err: - if not any(line in err_line for err_line in std_err): - return False - if line in std_out: - std_out.remove(line) - if line in std_err: - std_err.remove(line) + if passing >= 3: + # If we have at least 2 then a pass has succeeded on at least both so should be ok + return True - # If we made it this far then the test passed - return True + return False # Code below here runs before pytest finds it's methods @@ -168,9 +212,6 @@ def check_out(test_outputs, expected_std_out, expected_std_err): location = os.path.relpath(os.path.dirname(__file__) + "/Tests") # Work out settings for the application (They are labelled REQUIRED or OPTIONAL) -if "DEBUG" in os.environ: # OPTIONAL - external_vars.DEBUG = os.environ["DEBUG"].lower() == "true" - if "CLASSPATH" not in os.environ: # REQUIRED sys.exit("Please set the CLASSPATH environment variable") @@ -183,58 +224,62 @@ def check_out(test_outputs, expected_std_out, expected_std_err): if "GENERATE_REPORT" in os.environ: # OPTIONAL # Value is the location # Its prescense in env variables signifies intent to save - external_vars.GENERATE_REPORT_LOCATION = os.environ["GENERATE_REPORT"] - external_vars.GENERATE_REPORT = True + external_vars.GENERATE_REPORT = os.environ["GENERATE_REPORT"] external_vars.CLASSPATH = os.environ["CLASSPATH"] external_vars.EXECUTABLE = os.environ["EXECUTABLE"] -debug( - f""" -\n\nWelcome to SOM Integration Testing -\nDEBUG is set to: {external_vars.DEBUG} -CLASSPATH is set to: {external_vars.CLASSPATH} -EXECUTABLE is set to: {external_vars.EXECUTABLE} -TEST_EXCEPTIONS is set to: {external_vars.TEST_EXCEPTIONS} -GENERATE_REPORT is set to: {external_vars.GENERATE_REPORT} -GENERATE_REPORT_LOCATION is set to: {external_vars.GENERATE_REPORT_LOCATION} -""" -) - -debug("Opening test_tags") if external_vars.TEST_EXCEPTIONS: with open(f"{external_vars.TEST_EXCEPTIONS}", "r", encoding="utf-8") as file: yamlFile = yaml.safe_load(file) - external_vars.known_failures = yamlFile["known_failures"] - external_vars.failing_as_unspecified = yamlFile["failing_as_unspecified"] - external_vars.unsupported = yamlFile["unsupported"] - # Tests here do not fail at a SOM level but at a python level - external_vars.do_not_run = yamlFile["do_not_run"] - -debug_list(external_vars.known_failures, prefix="Failure expected from: ") -debug_list( - external_vars.failing_as_unspecified, - prefix="Failure expected through undefined behaviour: ", -) -debug_list( - external_vars.unsupported, prefix="Test that fails through unsupported bahaviour: " -) -debug_list( - external_vars.do_not_run, - prefix="Test that will not run through python breaking logic: ", -) -testFiles = [] -read_directory(location, testFiles) -TESTS_LIST = assemble_test_dictionary(testFiles) + if "known_failures" in yamlFile.keys(): + external_vars.known_failures = yamlFile["known_failures"] + if external_vars.known_failures is None: + external_vars.known_failures = [] + else: + external_vars.known_failures = [] + + if "failing_as_unspecified" in yamlFile.keys(): + external_vars.failing_as_unspecified = yamlFile["failing_as_unspecified"] + if external_vars.failing_as_unspecified is None: + external_vars.failing_as_unspecified = [] + else: + external_vars.failing_as_unspecified = [] + + if "unsupported" in yamlFile.keys(): + external_vars.unsupported = yamlFile["unsupported"] + if external_vars.unsupported is None: + external_vars.unsupported = [] + else: + external_vars.unsupported = [] + + if "do_not_run" in yamlFile.keys(): + external_vars.do_not_run = yamlFile["do_not_run"] + if external_vars.do_not_run is None: + external_vars.do_not_run = [] + else: + external_vars.do_not_run = [] + + +def prepare_tests(): + """ + Prepare all of the tests and their relevent information into a dictionary + so that the test runner understands each test + """ + test_files = [] + read_directory(location, test_files) + test_files = sorted(test_files) + return collect_tests(test_files) @pytest.mark.parametrize( - "name,stdout,stderr,custom_classpath", - TESTS_LIST, - ids=[str(test_args[0]) for test_args in TESTS_LIST], + "name,stdout,stderr,custom_classpath,case_sensitive", + prepare_tests(), + ids=[str(test_args[0]) for test_args in prepare_tests()], ) -def tests_runner(name, stdout, stderr, custom_classpath): +# pylint: disable=too-many-branches +def tests_runner(name, stdout, stderr, custom_classpath, case_sensitive): """ Take an array of dictionaries with test file names and expected output Run all of the tests and check the output @@ -243,71 +288,75 @@ def tests_runner(name, stdout, stderr, custom_classpath): # Check if a test shoudld not be ran if str(name) in external_vars.do_not_run: - debug(f"Not running test {name}") pytest.skip("Test included in do_not_run") - if custom_classpath != "NaN": - debug(f"Using custom classpath: {custom_classpath}") + if custom_classpath != "None": command = f"{external_vars.EXECUTABLE} -cp {custom_classpath} {name}" else: command = f"{external_vars.EXECUTABLE} -cp {external_vars.CLASSPATH} {name}" - debug(f"Running test: {name}") + print(f"Running test: {name}") result = subprocess.run( command, capture_output=True, text=True, shell=True, check=False ) + # lower-case comparisons unless specified otherwise + if case_sensitive is False: + result.stdout = str(result.stdout).lower() + result.stderr = str(result.stderr).lower() # Produce potential error messages now and then run assertion error_message = f""" -Test failed for: {name} -Expected stdout: {stdout} -Given stdout : {result.stdout} -Expected stderr: {stderr} -Given stderr : {result.stderr} +Expected stdout: \n{"\n".join(f"{i + 1}| {line}" for i, line in enumerate(stdout))} +Given stdout : \n{"\n" + .join(f"{i + 1}| {line}" for i, line in enumerate( + result.stdout.split("\n")))} +Expected stderr: \n{"\n".join(f"{i + 1}| {line}" for i, line in enumerate(stderr))} +Given stderr : \n{"\n" + .join(f"{i + 1}| {line}" for i, line in enumerate( + result.stderr.split("\n")))} Command used : {command} +Case sensitive : {case_sensitive} +Stdout diff : \n{''.join(ndiff("\n" + .join(stdout).splitlines(keepends=True), + result.stdout.splitlines(keepends=True)))} +Stderr diff : \n{''.join(ndiff("\n" + .join(stderr).splitlines(keepends=True), + result.stderr.splitlines(keepends=True)))} """ + # Related to above line (Rather than change how stdout and stderr are + # represented just joining and then splitting again) if result.returncode != 0: error_message += f"Command failed with return code: {result.returncode}\n" - test_pass_bool = check_out(result, stdout, stderr) + test_pass_bool = check_output(result, stdout, stderr) # Check if we have any unexpectedly passing tests if ( - str(name) in external_vars.known_failures and test_pass_bool - ): # Test passed when it is not expected tp + name in external_vars.known_failures and test_pass_bool + ): # Test passed when it is not expected to external_vars.tests_passed_unexpectedly.append(name) - assert False, f"Test {name} is in known_failures but passed" - elif ( - str(name) in external_vars.known_failures and test_pass_bool is False - ): # Test failed as expected - assert True + assert False, f"Test {name} is in known_failures but passed \n{error_message}" if ( str(name) in external_vars.failing_as_unspecified and test_pass_bool ): # Test passed when it is not expected tp external_vars.tests_passed_unexpectedly.append(name) - assert False, f"Test {name} is in failing_as_unspecified but passed" - elif ( - str(name) in external_vars.failing_as_unspecified and test_pass_bool is False - ): # Test failed as expected - assert True + assert ( + False + ), f"Test {name} is in failing_as_unspecified but passed \n{error_message}" if ( - str(name) in external_vars.unsupported and test_pass_bool + name in external_vars.unsupported and test_pass_bool ): # Test passed when it is not expected tp external_vars.tests_passed_unexpectedly.append(name) - assert False, f"Test {name} is in unsupported but passed" - elif ( - str(name) in external_vars.unsupported and test_pass_bool is False - ): # Test failed as expected - assert True + assert False, f"Test {name} is in unsupported but passed \n{error_message}" if ( - str(name) not in external_vars.unsupported - and str(name) not in external_vars.known_failures - and str(name) not in external_vars.failing_as_unspecified + name not in external_vars.unsupported + and name not in external_vars.known_failures + and name not in external_vars.failing_as_unspecified ): if not test_pass_bool: external_vars.tests_failed_unexpectedly.append(name) From 4883d65d0b0a25e0dabd2fb4bc9aa5b2655f89fe Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Fri, 11 Jul 2025 14:54:41 +0100 Subject: [PATCH 05/38] SOM UnicodeDecodeError: now more robust against a failure in decode a string. - skip test on Unicode Failure --- IntegrationTests/test_runner.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 22ea2a1d..55de627f 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -297,9 +297,17 @@ def tests_runner(name, stdout, stderr, custom_classpath, case_sensitive): print(f"Running test: {name}") - result = subprocess.run( - command, capture_output=True, text=True, shell=True, check=False - ) + try: + result = subprocess.run( + command, capture_output=True, text=True, shell=True, check=False + ) + except UnicodeDecodeError as e: + print(f"Error decoding output for test {name}: {e}") + pytest.skip( + "Test output could not be decoded SOM may not support " + "full Unicode. Result object not generated." + ) + # lower-case comparisons unless specified otherwise if case_sensitive is False: result.stdout = str(result.stdout).lower() From d15bf59e97d22e28f18acb01dbb2c35c1311a7f5 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Fri, 11 Jul 2025 15:16:17 +0100 Subject: [PATCH 06/38] Further improvements - Removed Print statements leftover - Updated error handling to just show diff unless no diff is available - EXECUTABLE changed to be VM - Added some basic test_runner tests - Make a diff reverted to just be a diff and not have line numbers - be strict on inclusion in stdout/stderr - Removed the lang_tests and adjusted the test_runner logic to now print a proper error on /Tests folder not found - Fix conftest to output a string rather than an object for exitstatus - Fixed error with missing or empty tag file, addded test to assert that the yaml files are working correctly - Added an additional test to check the robustness of read_test_exceptions - Fixed an error where specifying case_sensitive: False would not register as False but True instead - Added a new tests to test the discovery of lang_tester valid tests inside a directory - Pathname for TEST_EXCEPTIONS now just takes a filename if it is located within IntegrationTests. - Adjusted test_test_diccovery to use relative path names rather than hard coded - There are now two options for running pytest, normally and execute just the som tests and with the argument -m tester which will run the test_runner tests located in test_tester - Added deselect message when tests are deselected due to runner choice - Filepaths must now be given from IntegrationTests onwards so: Tests/test.som - Updated to now use relative paths rather than hardcoded for TEST_EXCEPTIONS --- IntegrationTests/README.md | 84 ++++-- IntegrationTests/Tests/case_insensitive.som | 20 -- IntegrationTests/conftest.py | 37 ++- IntegrationTests/test_runner.py | 219 ++++++++++----- .../soms_for_testing/som_test_1.som | 23 ++ .../soms_for_testing/som_test_2.som | 20 ++ .../soms_for_testing/som_test_3.som | 17 ++ .../yaml_for_testing/missing_all_tags.yaml | 0 .../missing_known_declaration.yaml | 7 + .../yaml_for_testing/set_to_be_null.yaml | 8 + .../yaml_for_testing/tests_in_each.yaml | 8 + IntegrationTests/test_tester.py | 256 ++++++++++++++++++ 12 files changed, 580 insertions(+), 119 deletions(-) delete mode 100644 IntegrationTests/Tests/case_insensitive.som create mode 100644 IntegrationTests/test_runner_tests/soms_for_testing/som_test_1.som create mode 100644 IntegrationTests/test_runner_tests/soms_for_testing/som_test_2.som create mode 100644 IntegrationTests/test_runner_tests/soms_for_testing/som_test_3.som create mode 100644 IntegrationTests/test_runner_tests/yaml_for_testing/missing_all_tags.yaml create mode 100644 IntegrationTests/test_runner_tests/yaml_for_testing/missing_known_declaration.yaml create mode 100644 IntegrationTests/test_runner_tests/yaml_for_testing/set_to_be_null.yaml create mode 100644 IntegrationTests/test_runner_tests/yaml_for_testing/tests_in_each.yaml create mode 100644 IntegrationTests/test_tester.py diff --git a/IntegrationTests/README.md b/IntegrationTests/README.md index efc0fa89..c4e27e3b 100644 --- a/IntegrationTests/README.md +++ b/IntegrationTests/README.md @@ -1,39 +1,48 @@ # SOM Integration Tests -Most of the tests for the integration testing come from lang_tests of [yksom](https://github.com/softdevteam/yksom/tree/master/lang_tests). Tests are identified by their path from core-lib to test.som, this ensures there can be multiple tests named test.som in different directories. Different directories should only be used when necessary for subclass testing. +Most of the tests for the integration testing come from lang_tests of [yksom](https://github.com/softdevteam/yksom/tree/master/lang_tests). Tests are identified by their path from core-lib to test.som, this ensures there can be multiple tests named test.som in different directories. ## Running the Integration Tests The tests can be run using pytest by simply running pytest in the base directory of any SOM implementation that includes a version of the core-library with IntegrationTests. It requires multiple python modules installed and environment variables set. ### Simple Test Run ``` -EXECUTABLE=./path-to-build CLASSPATH=./core-lib/Smalltalk pytest +VM=./path-to-build CLASSPATH=./core-lib/Smalltalk python3 -m pytest +``` + +### Optionals +A set of optionals have been created for this test suite which can be added. + +#### Executing the test_runner tests +This optional flag will not execute any normal SOM tests but instead test the test_runner. +``` +VM=./path-to-build CLASSPATH=./core-lib/Smalltalk python3 -m pytest -m tester ``` ## Prerequisites -The integration tests require two python modules to be installed: +- [PyYaml](https://pypi.org/project/PyYAML/) +- [Pytest](https://pypi.org/project/pytest/) ## Environment variables The environment variables are split into required and optional. Some optionals may be required for different implementations of SOM. -#### EXECUTABLE -This is the path from the current working directory to the executable of SOM. +#### VM +This is the path from the current working directory to the executable VM of SOM. #### CLASSPATH The exact classpath required by SOM to find the Object class etc. #### TEST_EXCEPTIONS -A yaml file which details the tags of tests. Specifically it labels tests that are expected to fail for one reason or another. +A yaml file which details the tags of tests. Specifically it labels tests that are expected to fail for one reason or another. **Give the whole path to the file**. #### GENERATE_REPORT -Generates a yaml file which can be used as a **TEST_EXCEPTIONS** file. It will also include additional information about how many tests passed, which tests passed that were not expected to and which tests failed. -#### DEBUG -Allows the printing of detailed run information. Shows currently running test and the configuration before all tests are ran. Must run pytest with the -s flag when using DEBUG. +Generates a yaml file which can be used as a **TEST_EXCEPTIONS** file. It will also include additional information about how many tests passed, which tests passed that were not expected to and which tests failed. **Give a full path from CWD to where it should be saved including .yaml**. ## TEST_EXCEPTIONS (How to write a file) There are four tags that are currently supported by the SOM integration tests. All tags will run the tests still, other than do_not_run, but will not fail on test failure, a tagged test will cause the run to fail only when it passes unexpectedly. Check for example file IntegrationTests/test_tags.yaml. -For a test to be given that tag specify it's location path like this: +For a test to be given a tag specify it's location path like this: ``` -core-lib/IntegrationTests/Tests/test.som +known_failures: + core-lib/IntegrationTests/Tests/test.som ``` ### known_failures @@ -46,7 +55,7 @@ Any test located in this tag failed because SOM does not specify behaviour in th Any test located here has a feature which is not suppoprted in this SOM. ### do_not_run -This test should not be ran ever as it causes an error in the python level code. The test may also cause a SOM level error but does not have to. For example invalid UTF-8 characters inside of a test. +This test should not be ran ever as it causes an error in the python level code. The test may also cause a SOM level error but does not have to. (*This does not include Unicode errors, they are handled at runtime*) ## How to write a new test For a test to be collected by Pytest it has to start with a comment, the comment should be structured with the expected output for either stderr or stdout. @@ -82,13 +91,50 @@ This allows for the specification of a custom classpath to be used. This is usef By default the tests are case insensitive (All outputs and expecteds are converted to be lower case) but by specifying True in case_sensitive that test can be checked as case_sensitive. #### stderr or stdout: -This is your expected output, each new line will be a new "thing" to check for. So each line is checked as a whole order is not checked. Writing ... will be ignored by the checker. +This is your expected output, each new line will be a new "thing" to check for. Writing ... signifies a gap in checking, the output does not have to feature this gap but may do. For example: + +``` +stdout: + Hello, World + ... + Goodbye +``` +This would be true for: +``` +Hello, World +Today is a Monday +Goodbye + +/ + +Hello, World +Goodbye +``` + +### Understanding how the "..." works in test_runner +There are situations where the ... is necessary for your output. Here are some example use cases, when they may be necessary and how to write the tests for it. As a preface the check_output will check a line as a whole so writing ... allows for a gap, a more precise check can be made by including as much of the expected output as possible. + +Line 1 in the below expected stdout says match on a whole line which has Hello, some other text as a gap then the word sample then whatever comes after on that line. Line 2 specifies that we must end with the word line. Whilst line 3 says somewhere in this line the word little must appear. -Due to the differences between SOM implementations not all will output to stderr for errors, some SOMs will output errors to stdout instead. Thus the **expected outputs are checked against both stderr or stdout**. +#### Stdout +``` +This is SOM++ +Hello, this is some sample output +There is some more on this line +And a little more here +``` -### Good test practice -To make it clear to others how the test works please follow these guidelines. +#### Expected +``` +VM: + status: success + case_sensitive: False + stdout: + Hello, ... sample ... + ... is ... this line + ... little ... +``` -- Any error regardless of where it is output (stderr/stdout) should be under stderr to make it clear. -- Expected outputs should be written in the order they are being expected regardless of the lack of order checking. -- Use ... to show a break in output checking. \ No newline at end of file +### When not to use "..." +- When the word you are searching for is the end of the line do not do this "*word* ...". +- When the word you are searching for is at the beginning of the line do not do this "... *word*" \ No newline at end of file diff --git a/IntegrationTests/Tests/case_insensitive.som b/IntegrationTests/Tests/case_insensitive.som deleted file mode 100644 index 97729a7e..00000000 --- a/IntegrationTests/Tests/case_insensitive.som +++ /dev/null @@ -1,20 +0,0 @@ -" -VM: - status: success - stdout: - ... - A - b - #c - #D - -" - -case_insensitive = ( - run = ( - 'a' println. - 'B' println. - #C println. - #d println. - ) -) \ No newline at end of file diff --git a/IntegrationTests/conftest.py b/IntegrationTests/conftest.py index 905585b8..c8e85803 100644 --- a/IntegrationTests/conftest.py +++ b/IntegrationTests/conftest.py @@ -20,11 +20,39 @@ # Environment variables CLASSPATH = "" -EXECUTABLE = "" +VM = "" TEST_EXCEPTIONS = "" GENERATE_REPORT = "" +def pytest_configure(config): + """ + Add a marker to pytest + """ + config.addinivalue_line("markers", "tester: test the testing framework") + + +def pytest_collection_modifyitems(config, items): + """ + Make sure the correct tests are being selected for the mode that is running + """ + # Check if "-m tester" was specified + marker_expr = config.getoption("-m") + run_tester_selected = False + + if marker_expr: + # Simplistic check: if "tester" is anywhere in the -m expression + # (You can improve parsing if needed) + run_tester_selected = "tester" in marker_expr.split(" or ") + + if not run_tester_selected: + deselected = [item for item in items if "tester" in item.keywords] + if deselected: + for item in deselected: + items.remove(item) + config.hook.pytest_deselected(items=deselected) + + # Log data def pytest_runtest_logreport(report): """ @@ -49,8 +77,6 @@ def pytest_sessionfinish(exitstatus): Generate report based on test run """ if GENERATE_REPORT: - print("Generating report for the test run") - # To make the report useful it will add the tests which have failed- # -unexpectedly to known_failures # It will also remove those that have passed from any of those lists @@ -75,7 +101,7 @@ def pytest_sessionfinish(exitstatus): "tests_passed": tests_passed, "tests_failed": tests_failed, "tests_skipped": tests_skipped, - "pytest_exitstatus": exitstatus, + "pytest_exitstatus": str(exitstatus), "note": "Totals include expected failures", }, "unexpected": { @@ -83,7 +109,7 @@ def pytest_sessionfinish(exitstatus): "failed": [str(test) for test in tests_failed_unexpectedly], }, "environment": { - "executable": EXECUTABLE, + "virtual machine": VM, "classpath": CLASSPATH, "test_exceptions": TEST_EXCEPTIONS, "generate_report_location": GENERATE_REPORT, @@ -93,6 +119,5 @@ def pytest_sessionfinish(exitstatus): "unsupported": unsupported, "do_not_run": do_not_run, } - print(f"Report location {GENERATE_REPORT}") with open(f"{GENERATE_REPORT}", "w", encoding="utf-8") as f: yaml.dump(report_data, f, default_flow_style=False, sort_keys=False) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 55de627f..9116463f 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -68,7 +68,7 @@ def parse_test_file(test_file): "name": test_file, "stdout": [], "stderr": [], - "custom_classpath": "None", + "custom_classpath": None, "case_sensitive": False, } with open(test_file, "r", encoding="utf-8") as open_file: @@ -92,7 +92,7 @@ def parse_test_file(test_file): for line in comment_lines: if "case_sensitive" in line: test_info_dict["case_sensitive"] = bool( - line.split("case_sensitive")[1].strip() + line.split("case_sensitive:")[1].strip().lower() == "true" ) if "stdout" in comment: @@ -134,6 +134,43 @@ def parse_test_file(test_file): return test_tuple +def make_a_diff(expected, given): + """ + Creates a string that represents the difference between two + lists of Strings. + """ + diff_string = "" + for diff in ndiff(expected, given): + diff_string += f"\n{str(diff)}" + + return diff_string + + +# pylint: disable=too-many-positional-arguments +# pylint: disable=too-many-arguments +def build_error_message( + stdout, stderr, exp_stdout, exp_stderr, command, case_sensitive +): + """ + Build an error message for the test runner + """ + + error_message = f"""\n +Command: {command} +Case Sensitive: {case_sensitive} + """ + + if stdout.strip() != "": + error_message += "\nstdout diff with stdout expected\n" + error_message += make_a_diff(exp_stdout, stdout.strip().split("\n")) + + if stderr.strip() != "": + error_message += "\nstderr diff with stderr expected\n" + error_message += make_a_diff(exp_stderr, stderr.strip().split("\n")) + + return error_message + + def check_exp_given(given, expected): """ Check if the expected output is contained in the given output @@ -197,29 +234,112 @@ def check_output(test_outputs, expected_std_out, expected_std_err): passing += check_exp_given(given_std_out, expected_std_out) passing += check_exp_given(given_std_err, expected_std_err) - passing += check_exp_given(given_std_out, expected_std_err) - passing += check_exp_given(given_std_err, expected_std_out) - if passing >= 3: - # If we have at least 2 then a pass has succeeded on at least both so should be ok + if passing == 2: + # If we have two passing then we know we have what we expect return True return False -# Code below here runs before pytest finds it's methods +# Read the test exceptions file and set the variables correctly +# pylint: disable=too-many-branches +def read_test_exceptions(filename): + """ + Read a TEST_EXCEPTIONS file and extract the core information + Filename should be either a relative path from CWD to file + or an abolute path. + """ + if filename: + path = os.path.relpath(os.path.dirname(__file__)) + + with open(f"{filename}", "r", encoding="utf-8") as file: + yaml_file = yaml.safe_load(file) + + if yaml_file is not None: + if "known_failures" in yaml_file: + external_vars.known_failures = yaml_file["known_failures"] + if external_vars.known_failures is None: + external_vars.known_failures = [] + + else: + external_vars.known_failures = [] + + if "failing_as_unspecified" in yaml_file: + external_vars.failing_as_unspecified = yaml_file[ + "failing_as_unspecified" + ] + if external_vars.failing_as_unspecified is None: + external_vars.failing_as_unspecified = [] + + else: + external_vars.failing_as_unspecified = [] + + if "unsupported" in yaml_file: + external_vars.unsupported = yaml_file["unsupported"] + if external_vars.unsupported is None: + external_vars.unsupported = [] + + else: + external_vars.unsupported = [] + + if "do_not_run" in yaml_file: + external_vars.do_not_run = yaml_file["do_not_run"] + if external_vars.do_not_run is None: + external_vars.do_not_run = [] + + else: + external_vars.do_not_run = [] + else: + external_vars.known_failures = [] + external_vars.failing_as_unspecified = [] + external_vars.unsupported = [] + external_vars.do_not_run = [] -location = os.path.relpath(os.path.dirname(__file__) + "/Tests") + if ( + external_vars.known_failures is not None + and external_vars.known_failures != [None] + ): + external_vars.known_failures = [ + os.path.join(path, test) for test in external_vars.known_failures + ] + if ( + external_vars.failing_as_unspecified is not None + and external_vars.failing_as_unspecified != [None] + ): + external_vars.failing_as_unspecified = [ + os.path.join(path, test) + for test in external_vars.failing_as_unspecified + ] + if external_vars.unsupported is not None and external_vars.unsupported != [ + None + ]: + external_vars.unsupported = [ + os.path.join(path, test) for test in external_vars.unsupported + ] + if external_vars.do_not_run is not None and external_vars.do_not_run != [None]: + external_vars.do_not_run = [ + os.path.join(path, test) for test in external_vars.do_not_run + ] + + +# START, ENTRY, BEGIN, MAIN +# Code below here runs before pytest finds it's methods +location = os.path.relpath(os.path.dirname(__file__)) +if not os.path.exists(location + "/Tests"): + pytest.exit( + "/Tests directory not found. Please make sure the lang_tests are installed" + ) # Work out settings for the application (They are labelled REQUIRED or OPTIONAL) if "CLASSPATH" not in os.environ: # REQUIRED sys.exit("Please set the CLASSPATH environment variable") -if "EXECUTABLE" not in os.environ: # REQUIRED - sys.exit("Please set the EXECUTABLE environment variable") +if "VM" not in os.environ: # REQUIRED + sys.exit("Please set the VM environment variable") if "TEST_EXCEPTIONS" in os.environ: # OPTIONAL - external_vars.TEST_EXCEPTIONS = os.environ["TEST_EXCEPTIONS"] + external_vars.TEST_EXCEPTIONS = location + "/" + os.environ["TEST_EXCEPTIONS"] if "GENERATE_REPORT" in os.environ: # OPTIONAL # Value is the location @@ -227,39 +347,9 @@ def check_output(test_outputs, expected_std_out, expected_std_err): external_vars.GENERATE_REPORT = os.environ["GENERATE_REPORT"] external_vars.CLASSPATH = os.environ["CLASSPATH"] -external_vars.EXECUTABLE = os.environ["EXECUTABLE"] +external_vars.VM = os.environ["VM"] -if external_vars.TEST_EXCEPTIONS: - with open(f"{external_vars.TEST_EXCEPTIONS}", "r", encoding="utf-8") as file: - yamlFile = yaml.safe_load(file) - - if "known_failures" in yamlFile.keys(): - external_vars.known_failures = yamlFile["known_failures"] - if external_vars.known_failures is None: - external_vars.known_failures = [] - else: - external_vars.known_failures = [] - - if "failing_as_unspecified" in yamlFile.keys(): - external_vars.failing_as_unspecified = yamlFile["failing_as_unspecified"] - if external_vars.failing_as_unspecified is None: - external_vars.failing_as_unspecified = [] - else: - external_vars.failing_as_unspecified = [] - - if "unsupported" in yamlFile.keys(): - external_vars.unsupported = yamlFile["unsupported"] - if external_vars.unsupported is None: - external_vars.unsupported = [] - else: - external_vars.unsupported = [] - - if "do_not_run" in yamlFile.keys(): - external_vars.do_not_run = yamlFile["do_not_run"] - if external_vars.do_not_run is None: - external_vars.do_not_run = [] - else: - external_vars.do_not_run = [] +read_test_exceptions(external_vars.TEST_EXCEPTIONS) def prepare_tests(): @@ -268,7 +358,7 @@ def prepare_tests(): so that the test runner understands each test """ test_files = [] - read_directory(location, test_files) + read_directory(location + "/Tests", test_files) test_files = sorted(test_files) return collect_tests(test_files) @@ -290,19 +380,16 @@ def tests_runner(name, stdout, stderr, custom_classpath, case_sensitive): if str(name) in external_vars.do_not_run: pytest.skip("Test included in do_not_run") - if custom_classpath != "None": - command = f"{external_vars.EXECUTABLE} -cp {custom_classpath} {name}" + if custom_classpath is not None: + command = f"{external_vars.VM} -cp {custom_classpath} {name}" else: - command = f"{external_vars.EXECUTABLE} -cp {external_vars.CLASSPATH} {name}" - - print(f"Running test: {name}") + command = f"{external_vars.VM} -cp {external_vars.CLASSPATH} {name}" try: result = subprocess.run( command, capture_output=True, text=True, shell=True, check=False ) - except UnicodeDecodeError as e: - print(f"Error decoding output for test {name}: {e}") + except UnicodeDecodeError: pytest.skip( "Test output could not be decoded SOM may not support " "full Unicode. Result object not generated." @@ -314,24 +401,10 @@ def tests_runner(name, stdout, stderr, custom_classpath, case_sensitive): result.stderr = str(result.stderr).lower() # Produce potential error messages now and then run assertion - error_message = f""" -Expected stdout: \n{"\n".join(f"{i + 1}| {line}" for i, line in enumerate(stdout))} -Given stdout : \n{"\n" - .join(f"{i + 1}| {line}" for i, line in enumerate( - result.stdout.split("\n")))} -Expected stderr: \n{"\n".join(f"{i + 1}| {line}" for i, line in enumerate(stderr))} -Given stderr : \n{"\n" - .join(f"{i + 1}| {line}" for i, line in enumerate( - result.stderr.split("\n")))} -Command used : {command} -Case sensitive : {case_sensitive} -Stdout diff : \n{''.join(ndiff("\n" - .join(stdout).splitlines(keepends=True), - result.stdout.splitlines(keepends=True)))} -Stderr diff : \n{''.join(ndiff("\n" - .join(stderr).splitlines(keepends=True), - result.stderr.splitlines(keepends=True)))} -""" + error_message = build_error_message( + result.stdout, result.stderr, stdout, stderr, command, case_sensitive + ) + # Related to above line (Rather than change how stdout and stderr are # represented just joining and then splitting again) @@ -345,21 +418,19 @@ def tests_runner(name, stdout, stderr, custom_classpath, case_sensitive): name in external_vars.known_failures and test_pass_bool ): # Test passed when it is not expected to external_vars.tests_passed_unexpectedly.append(name) - assert False, f"Test {name} is in known_failures but passed \n{error_message}" + assert False, f"Test {name} is in known_failures but passed" if ( str(name) in external_vars.failing_as_unspecified and test_pass_bool ): # Test passed when it is not expected tp external_vars.tests_passed_unexpectedly.append(name) - assert ( - False - ), f"Test {name} is in failing_as_unspecified but passed \n{error_message}" + assert False, f"Test {name} is in failing_as_unspecified but passed" if ( name in external_vars.unsupported and test_pass_bool ): # Test passed when it is not expected tp external_vars.tests_passed_unexpectedly.append(name) - assert False, f"Test {name} is in unsupported but passed \n{error_message}" + assert False, f"Test {name} is in unsupported but passed" if ( name not in external_vars.unsupported diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_1.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_1.som new file mode 100644 index 00000000..c7d9379a --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_1.som @@ -0,0 +1,23 @@ +" +VM: + status: success + stdout: + 1 + 2 + 3 + 4 + 5 + ... + 10 + stderr: + THIS IS AN ERROR + ... + Hello, world +" + +som_test_1 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_2.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_2.som new file mode 100644 index 00000000..ffce47e7 --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_2.som @@ -0,0 +1,20 @@ +" +VM: + status: success + case_sensitive: True + stdout: + I AM cAsE sensitiVe + ... + Dots/inTest + stderr: + CaSE sensitive ErrOr + ... + TestCaseSensitivity +" + +som_test_2 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_3.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_3.som new file mode 100644 index 00000000..457ae003 --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_3.som @@ -0,0 +1,17 @@ +" +VM: + status: success + case_sensitive: False + custom_classpath: core-lib/AreWeFastYet/Core + stdout: + ... + stderr: + ... +" + +som_test_3 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/yaml_for_testing/missing_all_tags.yaml b/IntegrationTests/test_runner_tests/yaml_for_testing/missing_all_tags.yaml new file mode 100644 index 00000000..e69de29b diff --git a/IntegrationTests/test_runner_tests/yaml_for_testing/missing_known_declaration.yaml b/IntegrationTests/test_runner_tests/yaml_for_testing/missing_known_declaration.yaml new file mode 100644 index 00000000..ad648289 --- /dev/null +++ b/IntegrationTests/test_runner_tests/yaml_for_testing/missing_known_declaration.yaml @@ -0,0 +1,7 @@ +known_failures: + +failing_as_unspecified: + +unsupported: + +do_not_run: diff --git a/IntegrationTests/test_runner_tests/yaml_for_testing/set_to_be_null.yaml b/IntegrationTests/test_runner_tests/yaml_for_testing/set_to_be_null.yaml new file mode 100644 index 00000000..2d57be68 --- /dev/null +++ b/IntegrationTests/test_runner_tests/yaml_for_testing/set_to_be_null.yaml @@ -0,0 +1,8 @@ +known_failures: + - null +failing_as_unspecified: + - null +unsupported: + - null +do_not_run: + - null \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/yaml_for_testing/tests_in_each.yaml b/IntegrationTests/test_runner_tests/yaml_for_testing/tests_in_each.yaml new file mode 100644 index 00000000..b24be382 --- /dev/null +++ b/IntegrationTests/test_runner_tests/yaml_for_testing/tests_in_each.yaml @@ -0,0 +1,8 @@ +known_failures: + - Tests/mutate_superclass_method/test.som +failing_as_unspecified: + - Tests/mutate_superclass_method/test.som +unsupported: + - Tests/mutate_superclass_method/test.som +do_not_run: + - Tests/mutate_superclass_method/test.som diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py new file mode 100644 index 00000000..476c7d11 --- /dev/null +++ b/IntegrationTests/test_tester.py @@ -0,0 +1,256 @@ +""" +Tests that will check if the test_runner is wokring ok +""" + +import os +import pytest +from test_runner import ( + parse_test_file, + read_directory, + check_exp_given, + read_test_exceptions, +) +import conftest as external_vars + + +@pytest.mark.tester +def test_parse_file(): + """ + Test that the test_runner can parse a file correctly. + Expected output should be lower case + """ + + # EXAMPLE TUPLE + # 0. test_info_dict["name"], + # 1. test_info_dict["stdout"], + # 2. test_info_dict["stderr"], + # 3. test_info_dict["custom_classpath"], + # 4. test_info_dict["case_sensitive"], + + soms_for_testing_location = os.path.relpath( + os.path.dirname(__file__) + "/test_runner_tests/soms_for_testing" + ) + + # Firstly assign what is expected from parsing som_test_1.som + exp_stdout = ["1", "2", "3", "4", "5", "...", "10"] + exp_stderr = ["this is an error", "...", "hello, world"] + custom_classpath = None + case_sensitive = False + + # Parse test and assert values are as above + result_tuple = parse_test_file(soms_for_testing_location + "/som_test_1.som") + assert result_tuple[1] == exp_stdout, "som_test_1.som stdout is not correct" + assert result_tuple[2] == exp_stderr, "som_test_1.som stderr is not correct" + assert ( + result_tuple[3] is custom_classpath + ), "som_test_1.som custom_classpath should be None" + assert ( + result_tuple[4] is case_sensitive + ), "som_test_1.som case_sensitive shoudl be False" + + # Firstly assign what is expected from parsing som_test_2.som + exp_stdout = ["I AM cAsE sensitiVe", "...", "Dots/inTest"] + exp_stderr = ["CaSE sensitive ErrOr", "...", "TestCaseSensitivity"] + custom_classpath = None + case_sensitive = True + + # Parse test and assert values are as above + result_tuple = parse_test_file(soms_for_testing_location + "/som_test_2.som") + assert result_tuple[1] == exp_stdout, "som_test_2.som stdout is not correct" + assert result_tuple[2] == exp_stderr, "som_test_2.som stderr is not correct" + assert ( + result_tuple[3] is custom_classpath + ), "som_test_2.som custom_classpath should be None" + assert ( + result_tuple[4] is case_sensitive + ), "som_test_2.som case_sensitive shoudl be True" + + # Firstly assign what is expected from parsing som_test_3.som + exp_stdout = ["..."] + exp_stderr = ["..."] + custom_classpath = "core-lib/AreWeFastYet/Core" + case_sensitive = False + + # Parse test and assert values are as above + result_tuple = parse_test_file(soms_for_testing_location + "/som_test_3.som") + assert result_tuple[1] == exp_stdout, "som_test_3.som stdout is not correct" + assert result_tuple[2] == exp_stderr, "som_test_3.som stderr is not correct" + assert ( + result_tuple[3] == custom_classpath + ), f"som_test_3.som custom_classpath should be {custom_classpath}" + assert ( + result_tuple[4] is case_sensitive + ), "som_test_3.som case_sensitive shoudl be False" + + +@pytest.mark.tester +def test_test_discovery(): + """ + Test the som test discovery methods in the test_runner_tests directory + Three tests should be located, Update this method if more tests are added + """ + # Locate all SOM tests + test_runner_tests_location = os.path.relpath( + os.path.dirname(__file__) + "/test_runner_tests" + ) + tests = [] + read_directory(test_runner_tests_location, tests) + tests = sorted(tests) + + expected_tests = [ + f"{str(test_runner_tests_location)}/soms_for_testing/som_test_1.som", + f"{str(test_runner_tests_location)}/soms_for_testing/som_test_2.som", + f"{str(test_runner_tests_location)}/soms_for_testing/som_test_3.som", + ] + + assert ( + tests == expected_tests + ), "Some expected tests not found in tests_runner_tests, discovery could be incorrect" + + +@pytest.mark.tester +def test_check_output(): + """ + Test that the check_output function complies with the expected output. + """ + + stdout = "Hello World\nSome other output in the Middle\nThis is a test\n" + + expected_stdout = ["hello world", "...", "this is a test"] + + # For checking case sensitivity all are converted to lower at differnt parts of the program + # This just simulates that. It is very difficult to actually run this + + # Check case sensitive + assert ( + check_exp_given(stdout.split("\n"), expected_stdout) == 0 + ), "Output here should evaluate to False, currently case_sensitive" + # Check case insensitive + assert ( + check_exp_given(stdout.lower().split("\n"), expected_stdout) == 1 + ), "Output here should evaluate to True, currently case_insensitive" + + # Check large output file with ... used inline at beginning and at end + stdout = """This is SOM++ +Hello Rhys this is some sample output +1\n2\n3\n4\n4\n56\n6\n7\n7\n8\n9\n9 +1010101\n10101\n1010101 +1010101010101010100101010101010010101 +Rhys Walker +Moving on +Extra text +more Numbers +NUMBER NUMBER NUMBER NUMBER +""" + expected_stdout = [ + "Hello ... this is ... sample output", + "Rhys Walker", + "... on", + "more ...", + "... NUMBER ... NUMBER", + ] + + assert ( + check_exp_given(stdout.split("\n"), expected_stdout) == 1 + ), "Evaluation should have been successfull" + + stdout = """This is SOM++ +Hello, this is some sample output +There is some more on this line +And a little more here +""" + expected_stdout = [ + "Hello, ... sample ...", + "... is ... this line", + "... little ...", + ] + + assert ( + check_exp_given(stdout.split("\n"), expected_stdout) == 1 + ), "Evaluation should have been successfull" + + +@pytest.mark.tester +def test_different_yaml(): + """ + Test different yaml files which may be missing some information + Or be malformed + """ + + # First save the variables that will change in external_vars + temp_known = external_vars.known_failures + temp_unspecified = external_vars.failing_as_unspecified + temp_unsuported = external_vars.unsupported + temp_do_not_run = external_vars.do_not_run + + yaml_for_testing_location = os.path.relpath( + os.path.dirname(__file__) + "/test_runner_tests/yaml_for_testing" + ) + + # Read a yaml file with nothing after tag (Should all be empty lists) + read_test_exceptions(yaml_for_testing_location + "/missing_known_declaration.yaml") + assert ( + external_vars.known_failures == [] + ), "known_failures was not [] in missing_known_declaration.yaml" + assert ( + external_vars.failing_as_unspecified == [] + ), "failing_as_unspecified was not [] in missing_known_declaration.yaml" + assert ( + external_vars.unsupported == [] + ), "unsupported was not [] in missing_known_declaration.yaml" + assert ( + external_vars.do_not_run == [] + ), "do_not_run was not [] in missing_known_declaration.yaml" + + # Read a yaml file with null after each tag (Should all be []) + read_test_exceptions(yaml_for_testing_location + "/set_to_be_null.yaml") + assert external_vars.known_failures == [ + None + ], "known_failures was not [] in set_to_be_null.yaml" + assert external_vars.failing_as_unspecified == [ + None + ], "failing_as_unspecified was not [] in set_to_be_null.yaml" + assert external_vars.unsupported == [ + None + ], "unsupported was not [] in set_to_be_null.yaml" + assert external_vars.do_not_run == [ + None + ], "do_not_run was not [] in set_to_be_null.yaml" + + # Read a yaml file where the yamlFile object will evaluate to None type (Should be all []) + read_test_exceptions(yaml_for_testing_location + "/missing_all_tags.yaml") + assert ( + external_vars.known_failures == [] + ), "known_failures was not [] in missing_all_tags.yaml" + assert ( + external_vars.failing_as_unspecified == [] + ), "failing_as_unspecified was not [] in missing_all_tags.yaml" + assert ( + external_vars.unsupported == [] + ), "unsupported was not [] in missing_all_tags.yaml" + assert ( + external_vars.do_not_run == [] + ), "do_not_run was not [] in missing_all_tags.yaml" + + # Read a yaml file where each tag has one test included + # [core-lib/IntegrationTests/Tests/mutate_superclass_method/test.som] + read_test_exceptions(yaml_for_testing_location + "/tests_in_each.yaml") + test_list = ["core-lib/IntegrationTests/Tests/mutate_superclass_method/test.som"] + assert ( + external_vars.known_failures == test_list + ), f"known_failures was not {test_list} in missing_all_tags.yaml" + assert ( + external_vars.failing_as_unspecified == test_list + ), f"failing_as_unspecified was not {test_list} in missing_all_tags.yaml" + assert ( + external_vars.unsupported == test_list + ), f"unsupported was not {test_list} in missing_all_tags.yaml" + assert ( + external_vars.do_not_run == test_list + ), f"do_not_run was not {test_list} in missing_all_tags.yaml" + + # Reset external vars after test + external_vars.known_failures = temp_known + external_vars.failing_as_unspecified = temp_unspecified + external_vars.unsupported = temp_unsuported + external_vars.do_not_run = temp_do_not_run From affcc0832940b2a0c942e69301bef7e8a96f4b6a Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Wed, 16 Jul 2025 15:12:11 +0100 Subject: [PATCH 07/38] Added ability to specify *** to require part of word on left and check if there for word on right. --- IntegrationTests/test_runner.py | 44 +++++++++++++++++++++++++++++++++ IntegrationTests/test_tester.py | 32 ++++++++++++++++++++++++ 2 files changed, 76 insertions(+) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 9116463f..8fcf120f 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -170,6 +170,38 @@ def build_error_message( return error_message +def check_partial_word(word, exp_word): + """ + Check a partial expected String against a line + + returns True if the line matches + """ + + # Creates a list of words that are expected + exp_word_needed = exp_word.split("***")[0] + exp_word_optional = exp_word.split("***")[1] + + if exp_word_needed in word: + where = word.find(exp_word_needed)+len(exp_word_needed) + counter = 0 + for character in exp_word_optional: + + if counter+where > len(word)-1: + return True + + if word[counter+where] == character: + counter += 1 + continue + else: + return False + else: + return False + + if counter+where < len(word): + return False + + return True + def check_exp_given(given, expected): """ @@ -192,6 +224,16 @@ def check_exp_given(given, expected): exp_std_inx += 1 continue + # This is incompaptible with ... for line skipping + if "***" in expected[exp_std_inx]: + # Now do some partial checking + partial_output = check_partial_word(g_out, expected[exp_std_inx]) + if partial_output is True: + exp_std_inx += 1 + continue + else: + continue + if g_out.strip() != expected[exp_std_inx].strip(): # Check if expected has ... if "..." in expected[exp_std_inx]: @@ -199,6 +241,8 @@ def check_exp_given(given, expected): without_gap = expected[exp_std_inx].split("...") if all(without_gap in g_out for without_gap in without_gap): exp_std_inx += 1 + continue + # If the output does not match, continue without incrementing continue diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index 476c7d11..bb87a0d0 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -169,6 +169,38 @@ def test_check_output(): check_exp_given(stdout.split("\n"), expected_stdout) == 1 ), "Evaluation should have been successfull" + # Now check some outputs with *** + # A couple of assertions are run on this expacted + expected = ["...", "Really***LongWord"] + + stdout = "Some output, as an example\nExtra Line\nReallyLongWord" + assert ( + check_exp_given(stdout.split("\n"), expected) + ), "Evaluation should've been successfull" + stdout = "Some output, as an example\nExtra Line\nReally" + assert ( + check_exp_given(stdout.split("\n"), expected) + ), "Evaluation should've been successfull" + stdout = "Some output, as an example\nExtra Line\nReallyLong" + assert ( + check_exp_given(stdout.split("\n"), expected) + ), "Evaluation should've been successfull" + stdout = "Some output, as an example\nExtra Line\nReallyLo" + assert ( + check_exp_given(stdout.split("\n"), expected) + ), "Evaluation should've been successfull" + + # Now assert some failures to test when it should fail + stdout = "Some output, as an example\nExtra Line\nReallyLongTestFunction" + assert ( + not check_exp_given(stdout.split("\n"), expected) + ), "Evaluation should've been successfull" + + # This one should fail as there is still more word than expected + stdout = "Some output, as an example\nExtra Line\nReallyLongWordExtra" + assert ( + not check_exp_given(stdout.split("\n"), expected) + ), "Evaluation should've been successfull" @pytest.mark.tester def test_different_yaml(): From 4546e22813e1153a21c0a0d1bf5fd4f20a0fa4ed Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Wed, 16 Jul 2025 23:20:20 +0100 Subject: [PATCH 08/38] Fix PyLint issues and apply Black formatting Signed-off-by: Stefan Marr --- IntegrationTests/test_runner.py | 22 ++++++++++------------ IntegrationTests/test_tester.py | 25 +++++++++++++------------ 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 8fcf120f..8e3bc846 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -170,6 +170,7 @@ def build_error_message( return error_message + def check_partial_word(word, exp_word): """ Check a partial expected String against a line @@ -182,26 +183,25 @@ def check_partial_word(word, exp_word): exp_word_optional = exp_word.split("***")[1] if exp_word_needed in word: - where = word.find(exp_word_needed)+len(exp_word_needed) + where = word.find(exp_word_needed) + len(exp_word_needed) counter = 0 for character in exp_word_optional: - if counter+where > len(word)-1: + if counter + where > len(word) - 1: return True - if word[counter+where] == character: + if word[counter + where] == character: counter += 1 continue - else: - return False + return False else: return False - - if counter+where < len(word): + + if counter + where < len(word): return False - + return True - + def check_exp_given(given, expected): """ @@ -230,9 +230,7 @@ def check_exp_given(given, expected): partial_output = check_partial_word(g_out, expected[exp_std_inx]) if partial_output is True: exp_std_inx += 1 - continue - else: - continue + continue if g_out.strip() != expected[exp_std_inx].strip(): # Check if expected has ... diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index bb87a0d0..71b272e1 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -174,34 +174,35 @@ def test_check_output(): expected = ["...", "Really***LongWord"] stdout = "Some output, as an example\nExtra Line\nReallyLongWord" - assert ( - check_exp_given(stdout.split("\n"), expected) + assert check_exp_given( + stdout.split("\n"), expected ), "Evaluation should've been successfull" stdout = "Some output, as an example\nExtra Line\nReally" - assert ( - check_exp_given(stdout.split("\n"), expected) + assert check_exp_given( + stdout.split("\n"), expected ), "Evaluation should've been successfull" stdout = "Some output, as an example\nExtra Line\nReallyLong" - assert ( - check_exp_given(stdout.split("\n"), expected) + assert check_exp_given( + stdout.split("\n"), expected ), "Evaluation should've been successfull" stdout = "Some output, as an example\nExtra Line\nReallyLo" - assert ( - check_exp_given(stdout.split("\n"), expected) + assert check_exp_given( + stdout.split("\n"), expected ), "Evaluation should've been successfull" # Now assert some failures to test when it should fail stdout = "Some output, as an example\nExtra Line\nReallyLongTestFunction" - assert ( - not check_exp_given(stdout.split("\n"), expected) + assert not check_exp_given( + stdout.split("\n"), expected ), "Evaluation should've been successfull" # This one should fail as there is still more word than expected stdout = "Some output, as an example\nExtra Line\nReallyLongWordExtra" - assert ( - not check_exp_given(stdout.split("\n"), expected) + assert not check_exp_given( + stdout.split("\n"), expected ), "Evaluation should've been successfull" + @pytest.mark.tester def test_different_yaml(): """ From 79a727c12fe1e4c4e99cc87a465e6a1e280fd8dc Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Wed, 16 Jul 2025 23:21:58 +0100 Subject: [PATCH 09/38] Update GHA to use VM and integration-tests.yml Signed-off-by: Stefan Marr --- .github/workflows/ci.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 98e95791..3538d02b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,7 +18,7 @@ jobs: - name: Run Black Check run: | black ./IntegrationTests --check --diff - + - name: Run PyLint run: | pylint ./IntegrationTests/test_runner.py ./IntegrationTests/conftest.py @@ -186,14 +186,15 @@ jobs: echo "${{ matrix.som }} $SOM_TESTS" eval "${{ matrix.som }} $SOM_TESTS" - + - name: Run Integration Tests if: ${{ matrix.som != 'spec' }} run: | python -m pip install --upgrade pip pip install pytest - export EXECUTABLE="som-vm/${{ matrix.som }}" + export VM="som-vm/${{ matrix.som }}" export CLASSPATH=Smalltalk + export TEST_EXCEPTIONS=som-vm/integration-tests.yml pytest IntegrationTests # We currently test SomSom only on TruffleSOM From f0cdac4be348bb92ef99dee190c83ef5531a082e Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Wed, 16 Jul 2025 23:55:49 +0100 Subject: [PATCH 10/38] Fix typos Signed-off-by: Stefan Marr --- IntegrationTests/test_runner.py | 3 +-- IntegrationTests/test_tester.py | 8 ++++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 8e3bc846..14b32973 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -7,7 +7,6 @@ from pathlib import Path from difflib import ndiff import os -import sys import pytest import yaml import conftest as external_vars @@ -290,7 +289,7 @@ def read_test_exceptions(filename): """ Read a TEST_EXCEPTIONS file and extract the core information Filename should be either a relative path from CWD to file - or an abolute path. + or an absolute path. """ if filename: path = os.path.relpath(os.path.dirname(__file__)) diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index 71b272e1..d1c56e82 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -17,7 +17,7 @@ def test_parse_file(): """ Test that the test_runner can parse a file correctly. - Expected output should be lower case + Expected output should be lower-case """ # EXAMPLE TUPLE @@ -210,10 +210,10 @@ def test_different_yaml(): Or be malformed """ - # First save the variables that will change in external_vars + # First, save the variables that will change in external_vars temp_known = external_vars.known_failures temp_unspecified = external_vars.failing_as_unspecified - temp_unsuported = external_vars.unsupported + temp_unsupported = external_vars.unsupported temp_do_not_run = external_vars.do_not_run yaml_for_testing_location = os.path.relpath( @@ -285,5 +285,5 @@ def test_different_yaml(): # Reset external vars after test external_vars.known_failures = temp_known external_vars.failing_as_unspecified = temp_unspecified - external_vars.unsupported = temp_unsuported + external_vars.unsupported = temp_unsupported external_vars.do_not_run = temp_do_not_run From f8709634cc35b6cfedd7074020cb4474176fd870 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Wed, 16 Jul 2025 23:56:29 +0100 Subject: [PATCH 11/38] Run PyTest tests for the tester Signed-off-by: Stefan Marr --- .github/workflows/ci.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3538d02b..390c41bf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,13 +4,13 @@ on: [push, pull_request] jobs: python-style: - name: Python Style Checks + name: Python Checks runs-on: ubuntu-24.04 steps: - name: Checkout Repository uses: actions/checkout@v4 - - name: Install Black, PyLint and Pytest + - name: Install Black, PyLint and PyTest run: | python -m pip install --upgrade pip pip install black pylint pytest @@ -21,7 +21,11 @@ jobs: - name: Run PyLint run: | - pylint ./IntegrationTests/test_runner.py ./IntegrationTests/conftest.py + pylint ./IntegrationTests + + - name: Run PyTest + run: | + pytest -m tester ./IntegrationTests test_soms: runs-on: ubuntu-24.04 # ubuntu-latest From 92d9a0e60d3cdb4b74e5f110b01e73a9ce4ab896 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Thu, 17 Jul 2025 00:15:22 +0100 Subject: [PATCH 12/38] Simplify code Signed-off-by: Stefan Marr --- IntegrationTests/test_runner.py | 87 ++++++++++----------------------- 1 file changed, 26 insertions(+), 61 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 14b32973..165df52f 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -291,76 +291,41 @@ def read_test_exceptions(filename): Filename should be either a relative path from CWD to file or an absolute path. """ - if filename: - path = os.path.relpath(os.path.dirname(__file__)) - - with open(f"{filename}", "r", encoding="utf-8") as file: - yaml_file = yaml.safe_load(file) - - if yaml_file is not None: - if "known_failures" in yaml_file: - external_vars.known_failures = yaml_file["known_failures"] - if external_vars.known_failures is None: - external_vars.known_failures = [] - - else: - external_vars.known_failures = [] - - if "failing_as_unspecified" in yaml_file: - external_vars.failing_as_unspecified = yaml_file[ - "failing_as_unspecified" - ] - if external_vars.failing_as_unspecified is None: - external_vars.failing_as_unspecified = [] - - else: - external_vars.failing_as_unspecified = [] - - if "unsupported" in yaml_file: - external_vars.unsupported = yaml_file["unsupported"] - if external_vars.unsupported is None: - external_vars.unsupported = [] - - else: - external_vars.unsupported = [] - - if "do_not_run" in yaml_file: - external_vars.do_not_run = yaml_file["do_not_run"] - if external_vars.do_not_run is None: - external_vars.do_not_run = [] - - else: - external_vars.do_not_run = [] - else: - external_vars.known_failures = [] - external_vars.failing_as_unspecified = [] - external_vars.unsupported = [] - external_vars.do_not_run = [] - - if ( - external_vars.known_failures is not None - and external_vars.known_failures != [None] - ): + if not filename: + return + + with open(f"{filename}", "r", encoding="utf-8") as file: + yaml_file = yaml.safe_load(file) + + if yaml_file is not None: + external_vars.known_failures = yaml_file.get("known_failures", []) or [] + external_vars.failing_as_unspecified = ( + yaml_file.get("failing_as_unspecified", []) or [] + ) + external_vars.unsupported = yaml_file.get("unsupported", []) or [] + external_vars.do_not_run = yaml_file.get("do_not_run", []) or [] + + path = os.path.relpath(os.path.dirname(__file__)) + external_vars.known_failures = [ - os.path.join(path, test) for test in external_vars.known_failures + os.path.join(path, test) + for test in external_vars.known_failures + if test is not None ] - if ( - external_vars.failing_as_unspecified is not None - and external_vars.failing_as_unspecified != [None] - ): external_vars.failing_as_unspecified = [ os.path.join(path, test) for test in external_vars.failing_as_unspecified + if test is not None ] - if external_vars.unsupported is not None and external_vars.unsupported != [ - None - ]: external_vars.unsupported = [ - os.path.join(path, test) for test in external_vars.unsupported + os.path.join(path, test) + for test in external_vars.unsupported + if test is not None ] - if external_vars.do_not_run is not None and external_vars.do_not_run != [None]: external_vars.do_not_run = [ - os.path.join(path, test) for test in external_vars.do_not_run + os.path.join(path, test) + for test in external_vars.do_not_run + if test is not None ] From f25126fb01a563827e4b20a6e774339640e5a49c Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Thu, 17 Jul 2025 00:15:47 +0100 Subject: [PATCH 13/38] Simplify assertions, pytest already shows that info Signed-off-by: Stefan Marr --- IntegrationTests/test_tester.py | 50 +++++++++------------------------ 1 file changed, 13 insertions(+), 37 deletions(-) diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index d1c56e82..546b1c74 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -237,50 +237,26 @@ def test_different_yaml(): # Read a yaml file with null after each tag (Should all be []) read_test_exceptions(yaml_for_testing_location + "/set_to_be_null.yaml") - assert external_vars.known_failures == [ - None - ], "known_failures was not [] in set_to_be_null.yaml" - assert external_vars.failing_as_unspecified == [ - None - ], "failing_as_unspecified was not [] in set_to_be_null.yaml" - assert external_vars.unsupported == [ - None - ], "unsupported was not [] in set_to_be_null.yaml" - assert external_vars.do_not_run == [ - None - ], "do_not_run was not [] in set_to_be_null.yaml" + assert external_vars.known_failures == [] + assert external_vars.failing_as_unspecified == [] + assert external_vars.unsupported == [] + assert external_vars.do_not_run == [] # Read a yaml file where the yamlFile object will evaluate to None type (Should be all []) read_test_exceptions(yaml_for_testing_location + "/missing_all_tags.yaml") - assert ( - external_vars.known_failures == [] - ), "known_failures was not [] in missing_all_tags.yaml" - assert ( - external_vars.failing_as_unspecified == [] - ), "failing_as_unspecified was not [] in missing_all_tags.yaml" - assert ( - external_vars.unsupported == [] - ), "unsupported was not [] in missing_all_tags.yaml" - assert ( - external_vars.do_not_run == [] - ), "do_not_run was not [] in missing_all_tags.yaml" + assert external_vars.known_failures == [] + assert external_vars.failing_as_unspecified == [] + assert external_vars.unsupported == [] + assert external_vars.do_not_run == [] # Read a yaml file where each tag has one test included # [core-lib/IntegrationTests/Tests/mutate_superclass_method/test.som] read_test_exceptions(yaml_for_testing_location + "/tests_in_each.yaml") - test_list = ["core-lib/IntegrationTests/Tests/mutate_superclass_method/test.som"] - assert ( - external_vars.known_failures == test_list - ), f"known_failures was not {test_list} in missing_all_tags.yaml" - assert ( - external_vars.failing_as_unspecified == test_list - ), f"failing_as_unspecified was not {test_list} in missing_all_tags.yaml" - assert ( - external_vars.unsupported == test_list - ), f"unsupported was not {test_list} in missing_all_tags.yaml" - assert ( - external_vars.do_not_run == test_list - ), f"do_not_run was not {test_list} in missing_all_tags.yaml" + test_list = ["./Tests/mutate_superclass_method/test.som"] + assert external_vars.known_failures == test_list + assert external_vars.failing_as_unspecified == test_list + assert external_vars.unsupported == test_list + assert external_vars.do_not_run == test_list # Reset external vars after test external_vars.known_failures = temp_known From 59e82e4f5af6e9415898907279adbdc9b64bc579 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Thu, 17 Jul 2025 00:16:48 +0100 Subject: [PATCH 14/38] Make failures pytest.fail, and move reading of envvars into prepare_tests() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It’s only needed when the tests are prepared. Signed-off-by: Stefan Marr --- IntegrationTests/test_runner.py | 46 ++++++++++++++------------------- 1 file changed, 19 insertions(+), 27 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 165df52f..6c1054ca 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -1,3 +1,4 @@ +# pylint: disable=missing-function-docstring """ This is the SOM integration test runner file. Pytest automatically discovers this file and will find all .som test files in the below directories. @@ -329,40 +330,31 @@ def read_test_exceptions(filename): ] -# START, ENTRY, BEGIN, MAIN -# Code below here runs before pytest finds it's methods -location = os.path.relpath(os.path.dirname(__file__)) -if not os.path.exists(location + "/Tests"): - pytest.exit( - "/Tests directory not found. Please make sure the lang_tests are installed" - ) - -# Work out settings for the application (They are labelled REQUIRED or OPTIONAL) -if "CLASSPATH" not in os.environ: # REQUIRED - sys.exit("Please set the CLASSPATH environment variable") +def prepare_tests(): + location = os.path.relpath(os.path.dirname(__file__)) + if not os.path.exists(location + "/Tests"): + pytest.fail( + "`Tests` directory not found. Please make sure the lang_tests are installed" + ) -if "VM" not in os.environ: # REQUIRED - sys.exit("Please set the VM environment variable") + # Work out settings for the application (They are labelled REQUIRED or OPTIONAL) + if "CLASSPATH" not in os.environ: # REQUIRED + pytest.fail("Please set the CLASSPATH environment variable") -if "TEST_EXCEPTIONS" in os.environ: # OPTIONAL - external_vars.TEST_EXCEPTIONS = location + "/" + os.environ["TEST_EXCEPTIONS"] + if "VM" not in os.environ: # REQUIRED + pytest.fail("Please set the VM environment variable") -if "GENERATE_REPORT" in os.environ: # OPTIONAL - # Value is the location - # Its prescense in env variables signifies intent to save - external_vars.GENERATE_REPORT = os.environ["GENERATE_REPORT"] + if "TEST_EXCEPTIONS" in os.environ: # OPTIONAL + external_vars.TEST_EXCEPTIONS = location + "/" + os.environ["TEST_EXCEPTIONS"] -external_vars.CLASSPATH = os.environ["CLASSPATH"] -external_vars.VM = os.environ["VM"] + if "GENERATE_REPORT" in os.environ: # OPTIONAL + external_vars.GENERATE_REPORT = os.environ["GENERATE_REPORT"] -read_test_exceptions(external_vars.TEST_EXCEPTIONS) + external_vars.CLASSPATH = os.environ["CLASSPATH"] + external_vars.VM = os.environ["VM"] + read_test_exceptions(external_vars.TEST_EXCEPTIONS) -def prepare_tests(): - """ - Prepare all of the tests and their relevent information into a dictionary - so that the test runner understands each test - """ test_files = [] read_directory(location + "/Tests", test_files) test_files = sorted(test_files) From 1c9f4bc054cc894633ab84720676c798c7f2690d Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Thu, 17 Jul 2025 09:51:38 +0100 Subject: [PATCH 15/38] Updated to now feature relative paths from CWD for testing --- IntegrationTests/test_tester.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index 546b1c74..f8e1fcbd 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -219,6 +219,9 @@ def test_different_yaml(): yaml_for_testing_location = os.path.relpath( os.path.dirname(__file__) + "/test_runner_tests/yaml_for_testing" ) + full_path_from_CWD = os.path.relpath( + os.path.dirname(__file__) + ) # Read a yaml file with nothing after tag (Should all be empty lists) read_test_exceptions(yaml_for_testing_location + "/missing_known_declaration.yaml") @@ -252,7 +255,7 @@ def test_different_yaml(): # Read a yaml file where each tag has one test included # [core-lib/IntegrationTests/Tests/mutate_superclass_method/test.som] read_test_exceptions(yaml_for_testing_location + "/tests_in_each.yaml") - test_list = ["./Tests/mutate_superclass_method/test.som"] + test_list = [f"{str(full_path_from_CWD)}/Tests/mutate_superclass_method/test.som"] assert external_vars.known_failures == test_list assert external_vars.failing_as_unspecified == test_list assert external_vars.unsupported == test_list From 8759095ee2b72221acc1f6d5b7b4f5eb1f88933b Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Thu, 17 Jul 2025 10:30:03 +0100 Subject: [PATCH 16/38] Path now checks for current directory and removes "." from its name so that passing.yaml can be recognised --- IntegrationTests/test_runner.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 6c1054ca..e73d4024 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -307,6 +307,8 @@ def read_test_exceptions(filename): external_vars.do_not_run = yaml_file.get("do_not_run", []) or [] path = os.path.relpath(os.path.dirname(__file__)) + if path == ".": + path = "" external_vars.known_failures = [ os.path.join(path, test) @@ -329,7 +331,6 @@ def read_test_exceptions(filename): if test is not None ] - def prepare_tests(): location = os.path.relpath(os.path.dirname(__file__)) if not os.path.exists(location + "/Tests"): From 1f17fc31d4d64bd5e11b20ca27fc89e865b7a6a7 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Thu, 17 Jul 2025 10:34:42 +0100 Subject: [PATCH 17/38] GENERATE_REPORT is now compatible with multiple directories. It will remove anything in the path from before Tests/ --- IntegrationTests/conftest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/IntegrationTests/conftest.py b/IntegrationTests/conftest.py index c8e85803..944f2cec 100644 --- a/IntegrationTests/conftest.py +++ b/IntegrationTests/conftest.py @@ -92,7 +92,8 @@ def pytest_sessionfinish(exitstatus): if len(tests_failed_unexpectedly) != 0: for test in tests_failed_unexpectedly: - known_failures.append(str(test)) + # Remove the part of the path that is incompatible with multiple directory running + known_failures.append("Tests/"+str(test).split("Tests/")[len(test.split("Tests/"))-1]) # Generate a report_message to save report_data = { From d3c24729b87bfa79aaec1905f04d96c7987f5258 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Thu, 17 Jul 2025 10:52:31 +0100 Subject: [PATCH 18/38] Text exceptions no longer alters path to yaml file --- IntegrationTests/test_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index e73d4024..c3fbba75 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -346,7 +346,7 @@ def prepare_tests(): pytest.fail("Please set the VM environment variable") if "TEST_EXCEPTIONS" in os.environ: # OPTIONAL - external_vars.TEST_EXCEPTIONS = location + "/" + os.environ["TEST_EXCEPTIONS"] + external_vars.TEST_EXCEPTIONS = os.environ["TEST_EXCEPTIONS"] if "GENERATE_REPORT" in os.environ: # OPTIONAL external_vars.GENERATE_REPORT = os.environ["GENERATE_REPORT"] From 400dbb590278c1ca9152988b06968f709048126c Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Thu, 17 Jul 2025 14:35:02 +0100 Subject: [PATCH 19/38] Can now specify @tag_name in custom_classpath to load an environment variable by that name. Useful for custom classpath loading --- IntegrationTests/Tests/vector_awfy.som | 2 +- IntegrationTests/Tests/vector_awfy2.som | 2 +- .../Tests/vector_awfy_capacity.som | 2 +- IntegrationTests/test_runner.py | 27 +++++++++++++++++++ 4 files changed, 30 insertions(+), 3 deletions(-) diff --git a/IntegrationTests/Tests/vector_awfy.som b/IntegrationTests/Tests/vector_awfy.som index 277eb986..9e696c9a 100644 --- a/IntegrationTests/Tests/vector_awfy.som +++ b/IntegrationTests/Tests/vector_awfy.som @@ -1,7 +1,7 @@ " VM: status: success - custom_classpath: ./core-lib/Examples/AreWeFastYet/Core:./core-lib/Smalltalk + custom_classpath: @AWFY:@CLASSPATH stdout: nil diff --git a/IntegrationTests/Tests/vector_awfy2.som b/IntegrationTests/Tests/vector_awfy2.som index 0a214f08..c062158a 100644 --- a/IntegrationTests/Tests/vector_awfy2.som +++ b/IntegrationTests/Tests/vector_awfy2.som @@ -1,7 +1,7 @@ " VM: status: success - custom_classpath: ./core-lib/Examples/AreWeFastYet/Core:./core-lib/Smalltalk + custom_classpath: @AWFY:@CLASSPATH stdout: nil diff --git a/IntegrationTests/Tests/vector_awfy_capacity.som b/IntegrationTests/Tests/vector_awfy_capacity.som index 32c44249..5f19f8ad 100644 --- a/IntegrationTests/Tests/vector_awfy_capacity.som +++ b/IntegrationTests/Tests/vector_awfy_capacity.som @@ -1,7 +1,7 @@ " VM: status: success - custom_classpath: ./core-lib/Examples/AreWeFastYet/Core:./core-lib/Smalltalk + custom_classpath: @AWFY:@CLASSPATH stdout: 50 100 diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index c3fbba75..09b2110f 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -84,6 +84,33 @@ def parse_test_file(test_file): test_info_dict["custom_classpath"] = line.split( "custom_classpath:" )[1].strip() + + classpath_t = test_info_dict["custom_classpath"] + + # Now check our custom classpath for any tags + # Tags are defined as @tag in the classpath + # Will then assign the EXACT value of the + # Environment variable to that spot + + if "@" in classpath_t: + + classpath_joined = "" + # Does the classpath have a splitter ":" + if ":" in classpath_t: + split_list = classpath_t.split(":") + for tag in split_list: + if "@" in tag: + tag = tag.replace("@", "") + classpath_joined += os.environ[tag] + ":" + continue + classpath_joined += tag + ":" + else: + classpath_t = classpath_t.replace("@", "") + classpath_joined += os.environ[classpath_t] + + test_info_dict["custom_classpath"] = classpath_joined + + continue # Check if we are case sensitive (has to be toggled on) From 81e34557714d51af2b0b139be7e8c1da209bef99 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Thu, 17 Jul 2025 14:56:35 +0100 Subject: [PATCH 20/38] Updated pytest parameterize to not run prepare_tests twice and IDs now match what is expected --- IntegrationTests/test_runner.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 09b2110f..afd383ce 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -388,11 +388,25 @@ def prepare_tests(): test_files = sorted(test_files) return collect_tests(test_files) +def assign_ids(tests): + """ + Assign test IDs the same way as the names are treated + """ + test_ids = [] + for test in tests: + test_name = test[0] + test_t = "Tests/"+test_name.split("Tests/")[-1] + test_ids.append(test_t) + + return test_ids + +# Stops prepare_tests() being called twice +TEST_FILES = prepare_tests() @pytest.mark.parametrize( "name,stdout,stderr,custom_classpath,case_sensitive", - prepare_tests(), - ids=[str(test_args[0]) for test_args in prepare_tests()], + TEST_FILES, + ids=assign_ids(TEST_FILES), ) # pylint: disable=too-many-branches def tests_runner(name, stdout, stderr, custom_classpath, case_sensitive): From bc2d1ce9672f61f3d7581ac5938527f7a61eba6f Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Thu, 17 Jul 2025 15:00:29 +0100 Subject: [PATCH 21/38] Updated GENERATE_REPORT to have all tests with a consistent name from Tests/ So all tests will be Tests/test.som --- IntegrationTests/conftest.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/IntegrationTests/conftest.py b/IntegrationTests/conftest.py index 944f2cec..0c7af2c5 100644 --- a/IntegrationTests/conftest.py +++ b/IntegrationTests/conftest.py @@ -93,7 +93,7 @@ def pytest_sessionfinish(exitstatus): if len(tests_failed_unexpectedly) != 0: for test in tests_failed_unexpectedly: # Remove the part of the path that is incompatible with multiple directory running - known_failures.append("Tests/"+str(test).split("Tests/")[len(test.split("Tests/"))-1]) + known_failures.append("Tests/"+str(test).split("Tests/")[-1]) # Generate a report_message to save report_data = { @@ -106,8 +106,8 @@ def pytest_sessionfinish(exitstatus): "note": "Totals include expected failures", }, "unexpected": { - "passed": [str(test) for test in tests_passed_unexpectedly], - "failed": [str(test) for test in tests_failed_unexpectedly], + "passed": ["Tests/"+str(test).split("Tests/")[-1] for test in tests_passed_unexpectedly], + "failed": ["Tests/"+str(test).split("Tests/")[-1] for test in tests_failed_unexpectedly], }, "environment": { "virtual machine": VM, From 5c34d6ffde5ed74cbb831e588ec530cb81267b7a Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Thu, 17 Jul 2025 22:57:35 +0100 Subject: [PATCH 22/38] Apply black Signed-off-by: Stefan Marr --- IntegrationTests/conftest.py | 12 +++++++++--- IntegrationTests/test_runner.py | 9 ++++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/IntegrationTests/conftest.py b/IntegrationTests/conftest.py index 0c7af2c5..90364311 100644 --- a/IntegrationTests/conftest.py +++ b/IntegrationTests/conftest.py @@ -93,7 +93,7 @@ def pytest_sessionfinish(exitstatus): if len(tests_failed_unexpectedly) != 0: for test in tests_failed_unexpectedly: # Remove the part of the path that is incompatible with multiple directory running - known_failures.append("Tests/"+str(test).split("Tests/")[-1]) + known_failures.append("Tests/" + str(test).split("Tests/")[-1]) # Generate a report_message to save report_data = { @@ -106,8 +106,14 @@ def pytest_sessionfinish(exitstatus): "note": "Totals include expected failures", }, "unexpected": { - "passed": ["Tests/"+str(test).split("Tests/")[-1] for test in tests_passed_unexpectedly], - "failed": ["Tests/"+str(test).split("Tests/")[-1] for test in tests_failed_unexpectedly], + "passed": [ + "Tests/" + str(test).split("Tests/")[-1] + for test in tests_passed_unexpectedly + ], + "failed": [ + "Tests/" + str(test).split("Tests/")[-1] + for test in tests_failed_unexpectedly + ], }, "environment": { "virtual machine": VM, diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index afd383ce..0beab093 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -109,8 +109,7 @@ def parse_test_file(test_file): classpath_joined += os.environ[classpath_t] test_info_dict["custom_classpath"] = classpath_joined - - + continue # Check if we are case sensitive (has to be toggled on) @@ -358,6 +357,7 @@ def read_test_exceptions(filename): if test is not None ] + def prepare_tests(): location = os.path.relpath(os.path.dirname(__file__)) if not os.path.exists(location + "/Tests"): @@ -388,6 +388,7 @@ def prepare_tests(): test_files = sorted(test_files) return collect_tests(test_files) + def assign_ids(tests): """ Assign test IDs the same way as the names are treated @@ -395,14 +396,16 @@ def assign_ids(tests): test_ids = [] for test in tests: test_name = test[0] - test_t = "Tests/"+test_name.split("Tests/")[-1] + test_t = "Tests/" + test_name.split("Tests/")[-1] test_ids.append(test_t) return test_ids + # Stops prepare_tests() being called twice TEST_FILES = prepare_tests() + @pytest.mark.parametrize( "name,stdout,stderr,custom_classpath,case_sensitive", TEST_FILES, From d068996a61d9d1c10c7d638c19f46ae61fda6e20 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Thu, 17 Jul 2025 22:57:59 +0100 Subject: [PATCH 23/38] Remove redundant assert messages Signed-off-by: Stefan Marr --- IntegrationTests/test_tester.py | 109 ++++++++++---------------------- 1 file changed, 34 insertions(+), 75 deletions(-) diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index f8e1fcbd..d68412e4 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -1,3 +1,4 @@ +# pylint: disable=missing-function-docstring """ Tests that will check if the test_runner is wokring ok """ @@ -39,14 +40,10 @@ def test_parse_file(): # Parse test and assert values are as above result_tuple = parse_test_file(soms_for_testing_location + "/som_test_1.som") - assert result_tuple[1] == exp_stdout, "som_test_1.som stdout is not correct" - assert result_tuple[2] == exp_stderr, "som_test_1.som stderr is not correct" - assert ( - result_tuple[3] is custom_classpath - ), "som_test_1.som custom_classpath should be None" - assert ( - result_tuple[4] is case_sensitive - ), "som_test_1.som case_sensitive shoudl be False" + assert result_tuple[1] == exp_stdout + assert result_tuple[2] == exp_stderr + assert result_tuple[3] is custom_classpath + assert result_tuple[4] is case_sensitive # Firstly assign what is expected from parsing som_test_2.som exp_stdout = ["I AM cAsE sensitiVe", "...", "Dots/inTest"] @@ -56,14 +53,10 @@ def test_parse_file(): # Parse test and assert values are as above result_tuple = parse_test_file(soms_for_testing_location + "/som_test_2.som") - assert result_tuple[1] == exp_stdout, "som_test_2.som stdout is not correct" - assert result_tuple[2] == exp_stderr, "som_test_2.som stderr is not correct" - assert ( - result_tuple[3] is custom_classpath - ), "som_test_2.som custom_classpath should be None" - assert ( - result_tuple[4] is case_sensitive - ), "som_test_2.som case_sensitive shoudl be True" + assert result_tuple[1] == exp_stdout + assert result_tuple[2] == exp_stderr + assert result_tuple[3] is custom_classpath + assert result_tuple[4] is case_sensitive # Firstly assign what is expected from parsing som_test_3.som exp_stdout = ["..."] @@ -73,14 +66,10 @@ def test_parse_file(): # Parse test and assert values are as above result_tuple = parse_test_file(soms_for_testing_location + "/som_test_3.som") - assert result_tuple[1] == exp_stdout, "som_test_3.som stdout is not correct" - assert result_tuple[2] == exp_stderr, "som_test_3.som stderr is not correct" - assert ( - result_tuple[3] == custom_classpath - ), f"som_test_3.som custom_classpath should be {custom_classpath}" - assert ( - result_tuple[4] is case_sensitive - ), "som_test_3.som case_sensitive shoudl be False" + assert result_tuple[1] == exp_stdout + assert result_tuple[2] == exp_stderr + assert result_tuple[3] == custom_classpath + assert result_tuple[4] is case_sensitive @pytest.mark.tester @@ -103,9 +92,7 @@ def test_test_discovery(): f"{str(test_runner_tests_location)}/soms_for_testing/som_test_3.som", ] - assert ( - tests == expected_tests - ), "Some expected tests not found in tests_runner_tests, discovery could be incorrect" + assert tests == expected_tests @pytest.mark.tester @@ -122,13 +109,10 @@ def test_check_output(): # This just simulates that. It is very difficult to actually run this # Check case sensitive - assert ( - check_exp_given(stdout.split("\n"), expected_stdout) == 0 - ), "Output here should evaluate to False, currently case_sensitive" + assert check_exp_given(stdout.split("\n"), expected_stdout) == 0 + # Check case insensitive - assert ( - check_exp_given(stdout.lower().split("\n"), expected_stdout) == 1 - ), "Output here should evaluate to True, currently case_insensitive" + assert check_exp_given(stdout.lower().split("\n"), expected_stdout) == 1 # Check large output file with ... used inline at beginning and at end stdout = """This is SOM++ @@ -150,9 +134,7 @@ def test_check_output(): "... NUMBER ... NUMBER", ] - assert ( - check_exp_given(stdout.split("\n"), expected_stdout) == 1 - ), "Evaluation should have been successfull" + assert check_exp_given(stdout.split("\n"), expected_stdout) == 1 stdout = """This is SOM++ Hello, this is some sample output @@ -165,42 +147,29 @@ def test_check_output(): "... little ...", ] - assert ( - check_exp_given(stdout.split("\n"), expected_stdout) == 1 - ), "Evaluation should have been successfull" + assert check_exp_given(stdout.split("\n"), expected_stdout) == 1 - # Now check some outputs with *** - # A couple of assertions are run on this expacted expected = ["...", "Really***LongWord"] stdout = "Some output, as an example\nExtra Line\nReallyLongWord" - assert check_exp_given( - stdout.split("\n"), expected - ), "Evaluation should've been successfull" + assert check_exp_given(stdout.split("\n"), expected) + stdout = "Some output, as an example\nExtra Line\nReally" - assert check_exp_given( - stdout.split("\n"), expected - ), "Evaluation should've been successfull" + assert check_exp_given(stdout.split("\n"), expected) + stdout = "Some output, as an example\nExtra Line\nReallyLong" - assert check_exp_given( - stdout.split("\n"), expected - ), "Evaluation should've been successfull" + assert check_exp_given(stdout.split("\n"), expected) + stdout = "Some output, as an example\nExtra Line\nReallyLo" - assert check_exp_given( - stdout.split("\n"), expected - ), "Evaluation should've been successfull" + assert check_exp_given(stdout.split("\n"), expected) # Now assert some failures to test when it should fail stdout = "Some output, as an example\nExtra Line\nReallyLongTestFunction" - assert not check_exp_given( - stdout.split("\n"), expected - ), "Evaluation should've been successfull" + assert not check_exp_given(stdout.split("\n"), expected) # This one should fail as there is still more word than expected stdout = "Some output, as an example\nExtra Line\nReallyLongWordExtra" - assert not check_exp_given( - stdout.split("\n"), expected - ), "Evaluation should've been successfull" + assert not check_exp_given(stdout.split("\n"), expected) @pytest.mark.tester @@ -219,24 +188,14 @@ def test_different_yaml(): yaml_for_testing_location = os.path.relpath( os.path.dirname(__file__) + "/test_runner_tests/yaml_for_testing" ) - full_path_from_CWD = os.path.relpath( - os.path.dirname(__file__) - ) + full_path_from_cwd = os.path.relpath(os.path.dirname(__file__)) # Read a yaml file with nothing after tag (Should all be empty lists) read_test_exceptions(yaml_for_testing_location + "/missing_known_declaration.yaml") - assert ( - external_vars.known_failures == [] - ), "known_failures was not [] in missing_known_declaration.yaml" - assert ( - external_vars.failing_as_unspecified == [] - ), "failing_as_unspecified was not [] in missing_known_declaration.yaml" - assert ( - external_vars.unsupported == [] - ), "unsupported was not [] in missing_known_declaration.yaml" - assert ( - external_vars.do_not_run == [] - ), "do_not_run was not [] in missing_known_declaration.yaml" + assert external_vars.known_failures == [] + assert external_vars.failing_as_unspecified == [] + assert external_vars.unsupported == [] + assert external_vars.do_not_run == [] # Read a yaml file with null after each tag (Should all be []) read_test_exceptions(yaml_for_testing_location + "/set_to_be_null.yaml") @@ -255,7 +214,7 @@ def test_different_yaml(): # Read a yaml file where each tag has one test included # [core-lib/IntegrationTests/Tests/mutate_superclass_method/test.som] read_test_exceptions(yaml_for_testing_location + "/tests_in_each.yaml") - test_list = [f"{str(full_path_from_CWD)}/Tests/mutate_superclass_method/test.som"] + test_list = [f"{str(full_path_from_cwd)}/Tests/mutate_superclass_method/test.som"] assert external_vars.known_failures == test_list assert external_vars.failing_as_unspecified == test_list assert external_vars.unsupported == test_list From 7a024e72e93a3f6ec033c8a710323f58b59c3289 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Fri, 18 Jul 2025 09:29:08 +0100 Subject: [PATCH 24/38] Updated test_runner to feature more robust @tag classpath swapping. Updated parse_test_file to feature methods rather than one large method, Allows for more fine grained testing --- IntegrationTests/test_runner.py | 142 ++++++++++++++++++++------------ 1 file changed, 89 insertions(+), 53 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 0beab093..16f627f9 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -60,6 +60,91 @@ def collect_tests(test_files): return tests +# pylint: disable=too-many-nested-blocks +def parse_custom_classpath(comment): + """ + Based on the comment will calculate the custom_classpath + for the current test + + Return: The custom classpath + """ + comment_lines = comment.split("\n") + for line in comment_lines: + if "custom_classpath" in line: + classpath = line.split("custom_classpath:")[1].strip() + + classpath_t = classpath + + # Now check our custom classpath for any tags + # Tags are defined as @tag in the classpath + # Will then assign the EXACT value of the + # Environment variable to that spot + + if classpath_t.find("@") >= 0: + + classpath_joined = "" + # Does the classpath have a splitter ":" + if ":" in classpath_t: + split_list = classpath_t.split(":") + for tag in split_list: + if tag.find("@") >= 0: + tag = tag.replace("@", "") + if tag in os.environ: + classpath_joined += os.environ[tag] + ":" + classpath_joined += tag + ":" + continue + pytest.fail(f"Environment variable {tag} not set") + else: + classpath_t = classpath_t.replace("@", "") + if classpath_t in os.environ: + classpath_joined += os.environ[classpath_t] + else: + pytest.fail(f"Environment variable {classpath_t} should be set") + + classpath = classpath_joined + + return classpath + return None + + +def parse_case_sensitive(comment): + """ + Based on a comment decide whether a case_sensitive is requried + """ + comment_lines = comment.split("\n") + for line in comment_lines: + if "case_sensitive" in line: + return bool(line.split("case_sensitive:")[1].strip().lower() == "true") + + return False + + +def parse_stdout(comment): + """ + Based on a comment parse the expected stdout + """ + std_out = comment.split("stdout:")[1] + if "stderr" in std_out: + std_err_inx = std_out.index("stderr:") + std_out = std_out[:std_err_inx] + std_err_l = std_out.split("\n") + std_err_l = [line.strip() for line in std_err_l if line.strip()] + return std_err_l + + +def parse_stderr(comment): + """ + Based on a comment parse the expected stderr + """ + std_err = comment.split("stderr:")[1] + if "stdout" in std_err: + std_out_inx = std_err.index("stdout:") + std_err = std_err[:std_out_inx] + std_err_l = std_err.split("\n") + std_err_l = [line.strip() for line in std_err_l if line.strip()] + return std_err_l + + def parse_test_file(test_file): """ parse the test file to extract the important information @@ -78,66 +163,17 @@ def parse_test_file(test_file): # Make sure if using a custom test classpath that it is above # Stdout and Stderr if "custom_classpath" in comment: - comment_lines = comment.split("\n") - for line in comment_lines: - if "custom_classpath" in line: - test_info_dict["custom_classpath"] = line.split( - "custom_classpath:" - )[1].strip() - - classpath_t = test_info_dict["custom_classpath"] - - # Now check our custom classpath for any tags - # Tags are defined as @tag in the classpath - # Will then assign the EXACT value of the - # Environment variable to that spot - - if "@" in classpath_t: - - classpath_joined = "" - # Does the classpath have a splitter ":" - if ":" in classpath_t: - split_list = classpath_t.split(":") - for tag in split_list: - if "@" in tag: - tag = tag.replace("@", "") - classpath_joined += os.environ[tag] + ":" - continue - classpath_joined += tag + ":" - else: - classpath_t = classpath_t.replace("@", "") - classpath_joined += os.environ[classpath_t] - - test_info_dict["custom_classpath"] = classpath_joined - - continue + test_info_dict["custom_classpath"] = parse_custom_classpath(comment) # Check if we are case sensitive (has to be toggled on) if "case_sensitive" in comment: - comment_lines = comment.split("\n") - for line in comment_lines: - if "case_sensitive" in line: - test_info_dict["case_sensitive"] = bool( - line.split("case_sensitive:")[1].strip().lower() == "true" - ) + test_info_dict["case_sensitive"] = parse_case_sensitive(comment) if "stdout" in comment: - std_out = comment.split("stdout:")[1] - if "stderr" in std_out: - std_err_inx = std_out.index("stderr:") - std_out = std_out[:std_err_inx] - std_err_l = std_out.split("\n") - std_err_l = [line.strip() for line in std_err_l if line.strip()] - test_info_dict["stdout"] = std_err_l + test_info_dict["stdout"] = parse_stdout(comment) if "stderr" in comment: - std_err = comment.split("stderr:")[1] - if "stdout" in std_err: - std_out_inx = std_err.index("stdout:") - std_err = std_err[:std_out_inx] - std_err_l = std_err.split("\n") - std_err_l = [line.strip() for line in std_err_l if line.strip()] - test_info_dict["stderr"] = std_err_l + test_info_dict["stderr"] = parse_stderr(comment) if test_info_dict["case_sensitive"]: test_tuple = ( From a760cf258b05c10e87ca61d6c85e4f12fcd7f710 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Fri, 18 Jul 2025 10:03:23 +0100 Subject: [PATCH 25/38] Updated test_tester to feature more rigorous tests for parsing a test file --- IntegrationTests/test_runner.py | 7 ++-- .../soms_for_testing/som_test_4.som | 17 ++++++++++ .../soms_for_testing/som_test_5.som | 17 ++++++++++ .../soms_for_testing/som_test_6.som | 17 ++++++++++ IntegrationTests/test_tester.py | 33 ++++++++++++++++++- 5 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 IntegrationTests/test_runner_tests/soms_for_testing/som_test_4.som create mode 100644 IntegrationTests/test_runner_tests/soms_for_testing/som_test_5.som create mode 100644 IntegrationTests/test_runner_tests/soms_for_testing/som_test_6.som diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 16f627f9..4f95aa88 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -91,9 +91,11 @@ def parse_custom_classpath(comment): tag = tag.replace("@", "") if tag in os.environ: classpath_joined += os.environ[tag] + ":" - classpath_joined += tag + ":" continue pytest.fail(f"Environment variable {tag} not set") + # Add a normal classpath inside of tags + classpath_joined += tag + ":" + else: classpath_t = classpath_t.replace("@", "") if classpath_t in os.environ: @@ -101,7 +103,8 @@ def parse_custom_classpath(comment): else: pytest.fail(f"Environment variable {classpath_t} should be set") - classpath = classpath_joined + # Remove the final ":" + classpath = classpath_joined[:-1] return classpath return None diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_4.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_4.som new file mode 100644 index 00000000..48845925 --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_4.som @@ -0,0 +1,17 @@ +" +VM: + status: success + case_sensitive: False + custom_classpath: @AWFYtest:@experimental:@oneWord + stdout: + ... + stderr: + ... +" + +som_test_4 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_5.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_5.som new file mode 100644 index 00000000..dbfdba1f --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_5.som @@ -0,0 +1,17 @@ +" +VM: + status: success + case_sensitive: False + custom_classpath: one/the/outside:@AWFYtest:then/another/one + stdout: + ... + stderr: + ... +" + +som_test_5 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_runner_tests/soms_for_testing/som_test_6.som b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_6.som new file mode 100644 index 00000000..4785f622 --- /dev/null +++ b/IntegrationTests/test_runner_tests/soms_for_testing/som_test_6.som @@ -0,0 +1,17 @@ +" +VM: + status: success + case_sensitive: False + custom_classpath: @IDontExist + stdout: + ... + stderr: + ... +" + +som_test_6 = ( + run = ( + 'this is just for parsing / testing of som features within test_runner' println. + 'this should not be run as part of the testing framework' println. + ) +) \ No newline at end of file diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index d68412e4..a2853053 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -5,6 +5,8 @@ import os import pytest +from _pytest.outcomes import Failed + from test_runner import ( parse_test_file, read_directory, @@ -71,6 +73,30 @@ def test_parse_file(): assert result_tuple[3] == custom_classpath assert result_tuple[4] is case_sensitive + # Now test the ability to parse a test file which contains a + # @tag classpath object som_test_4.som + custom_classpath = "core-lib/AreWeFastYet/Core:experiments/Classpath:anotherOne" + os.environ["AWFYtest"] = "core-lib/AreWeFastYet/Core" + os.environ["experimental"] = "experiments/Classpath" + os.environ["oneWord"] = "anotherOne" + + result_tuple = parse_test_file(soms_for_testing_location + "/som_test_4.som") + assert result_tuple[1] == exp_stdout + assert result_tuple[2] == exp_stderr + assert result_tuple[3] == custom_classpath + assert result_tuple[4] is case_sensitive + + # Now test the ability to interleave regular classpaths + custom_classpath = "one/the/outside:core-lib/AreWeFastYet/Core:then/another/one" + result_tuple = parse_test_file(soms_for_testing_location + "/som_test_5.som") + assert result_tuple[1] == exp_stdout + assert result_tuple[2] == exp_stderr + assert result_tuple[3] == custom_classpath + assert result_tuple[4] is case_sensitive + + # Now assert a failure on a classpath envvar that hasnt been set + with pytest.raises(Failed, match=r"Environment variable IDontExist should be set"): + parse_test_file(soms_for_testing_location + "/som_test_6.som") @pytest.mark.tester def test_test_discovery(): @@ -90,6 +116,9 @@ def test_test_discovery(): f"{str(test_runner_tests_location)}/soms_for_testing/som_test_1.som", f"{str(test_runner_tests_location)}/soms_for_testing/som_test_2.som", f"{str(test_runner_tests_location)}/soms_for_testing/som_test_3.som", + f"{str(test_runner_tests_location)}/soms_for_testing/som_test_4.som", + f"{str(test_runner_tests_location)}/soms_for_testing/som_test_5.som", + f"{str(test_runner_tests_location)}/soms_for_testing/som_test_6.som", ] assert tests == expected_tests @@ -189,6 +218,8 @@ def test_different_yaml(): os.path.dirname(__file__) + "/test_runner_tests/yaml_for_testing" ) full_path_from_cwd = os.path.relpath(os.path.dirname(__file__)) + if full_path_from_cwd == ".": + full_path_from_cwd = "" # Read a yaml file with nothing after tag (Should all be empty lists) read_test_exceptions(yaml_for_testing_location + "/missing_known_declaration.yaml") @@ -214,7 +245,7 @@ def test_different_yaml(): # Read a yaml file where each tag has one test included # [core-lib/IntegrationTests/Tests/mutate_superclass_method/test.som] read_test_exceptions(yaml_for_testing_location + "/tests_in_each.yaml") - test_list = [f"{str(full_path_from_cwd)}/Tests/mutate_superclass_method/test.som"] + test_list = [f"{str(full_path_from_cwd)}Tests/mutate_superclass_method/test.som"] assert external_vars.known_failures == test_list assert external_vars.failing_as_unspecified == test_list assert external_vars.unsupported == test_list From d33082f0da5c677c03e37b6caad488817ba99d19 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Fri, 18 Jul 2025 10:43:46 +0100 Subject: [PATCH 26/38] Updated README to better represent the project --- IntegrationTests/README.md | 82 +++++++++++++++++++++++++++++--------- 1 file changed, 64 insertions(+), 18 deletions(-) diff --git a/IntegrationTests/README.md b/IntegrationTests/README.md index c4e27e3b..07a4a148 100644 --- a/IntegrationTests/README.md +++ b/IntegrationTests/README.md @@ -2,6 +2,10 @@ Most of the tests for the integration testing come from lang_tests of [yksom](https://github.com/softdevteam/yksom/tree/master/lang_tests). Tests are identified by their path from core-lib to test.som, this ensures there can be multiple tests named test.som in different directories. +This integration test does not replace the unit tests located in TestSuite but acts as a compliment to it. These integration tests can test more than unit tests can: +- SOM level errors that would cause the VM to exit +- Multiple different classpaths + ## Running the Integration Tests The tests can be run using pytest by simply running pytest in the base directory of any SOM implementation that includes a version of the core-library with IntegrationTests. It requires multiple python modules installed and environment variables set. @@ -13,17 +17,6 @@ VM=./path-to-build CLASSPATH=./core-lib/Smalltalk python3 -m pytest ### Optionals A set of optionals have been created for this test suite which can be added. -#### Executing the test_runner tests -This optional flag will not execute any normal SOM tests but instead test the test_runner. -``` -VM=./path-to-build CLASSPATH=./core-lib/Smalltalk python3 -m pytest -m tester -``` - -## Prerequisites -- [PyYaml](https://pypi.org/project/PyYAML/) -- [Pytest](https://pypi.org/project/pytest/) - - ## Environment variables The environment variables are split into required and optional. Some optionals may be required for different implementations of SOM. @@ -32,9 +25,11 @@ This is the path from the current working directory to the executable VM of SOM. #### CLASSPATH The exact classpath required by SOM to find the Object class etc. #### TEST_EXCEPTIONS -A yaml file which details the tags of tests. Specifically it labels tests that are expected to fail for one reason or another. **Give the whole path to the file**. +A yaml file which details the tags of tests. Specifically it labels tests that are expected to fail for one reason or another. **Give the whole path to the file from CWD**. #### GENERATE_REPORT Generates a yaml file which can be used as a **TEST_EXCEPTIONS** file. It will also include additional information about how many tests passed, which tests passed that were not expected to and which tests failed. **Give a full path from CWD to where it should be saved including .yaml**. +#### ARBITRARY ENVVARS +When setting custom_classpaths in a test environment variables can be specified to replace tags in those tests, specify those along with all the other variables being specified. Check custom_classpath for more information on runtime classpaths. ## TEST_EXCEPTIONS (How to write a file) There are four tags that are currently supported by the SOM integration tests. All tags will run the tests still, other than do_not_run, but will not fail on test failure, a tagged test will cause the run to fail only when it passes unexpectedly. Check for example file IntegrationTests/test_tags.yaml. @@ -64,7 +59,7 @@ For a test to be collected by Pytest it has to start with a comment, the comment " VM: status: error - custom_classpath: ./core-lib/Examples/AreWeFastYet/Core:./core-lib/Smalltalk + custom_classpath: @AWFY:example/classpath:@CLASSPATH case_sensitive: False stdout: 1000 @@ -87,12 +82,57 @@ This is required as the base of the test structure and what allows the tests to #### custom_classpath: This allows for the specification of a custom classpath to be used. This is useful for loading different versions of classes with the same name. I.e. AWFY Vector instead of core-lib Vector. **The path to ./Smalltalk must still be specified after so that the Object class can be loaded** +Tags can be used to specify a different classpaths at runtime, this is generally recommended otherwise tests would be directory dependent. These tags can be specified with ```@tag``` where tag is the **exact** spelling and caputalisation of the environment variable that matches. Currently to run the tests ```@AWFY``` must be specified alongside ```@CLASSPATH```. + #### case_sensitive By default the tests are case insensitive (All outputs and expecteds are converted to be lower case) but by specifying True in case_sensitive that test can be checked as case_sensitive. #### stderr or stdout: -This is your expected output, each new line will be a new "thing" to check for. Writing ... signifies a gap in checking, the output does not have to feature this gap but may do. For example: +This is your expected output, each new line will be a new "thing" to check for. Writing ... signifies a gap in checking, the output does not have to feature this gap but may do. Another option that is featured in stdout, stderr checking is *** which signifies an arbitrary precision "word". + +**Please note that *** is not compatible in the same line as ...** +```python +# not accpeted +... 1.11***11 ... + +# accpeted +... this line ... +Hel***lo +... another ... line +``` + +### Understanding how the "***" works in test_runner +A word is loosely defined as any connected string of characters in this instance, it can be both numbers or letters. What placing the *** in the word does is as follows: +1. All characters before the *** must be present +2. Not all characters after the *** have to be present, but if they are present must match exactly. +3. There cannot be more characters than the stdout specifies. + +This allows for different SOM implementations to pass tests on different levels of precision. But no SOM will pass on an incorrect result. + +#### Example +```python +# Expected +stdout: + 1.111***123 +# Accepted outputs + +1.111 +1.1111 +1.11112 +1.111123 + +# Not accepted +1.1 +1.11 +1.111124 +1.1111234 +``` + +### Understanding how the "..." works in test_runner +There are situations where the ... is necessary for your output. Here are some example use cases, when they may be necessary and how to write the tests for it. As a preface the check_output will check a line as a whole so writing ... allows for a gap, a more precise check can be made by including as much of the expected output as possible. + +#### Possible evaluations of "..." ``` stdout: Hello, World @@ -111,9 +151,6 @@ Hello, World Goodbye ``` -### Understanding how the "..." works in test_runner -There are situations where the ... is necessary for your output. Here are some example use cases, when they may be necessary and how to write the tests for it. As a preface the check_output will check a line as a whole so writing ... allows for a gap, a more precise check can be made by including as much of the expected output as possible. - Line 1 in the below expected stdout says match on a whole line which has Hello, some other text as a gap then the word sample then whatever comes after on that line. Line 2 specifies that we must end with the word line. Whilst line 3 says somewhere in this line the word little must appear. #### Stdout @@ -137,4 +174,13 @@ VM: ### When not to use "..." - When the word you are searching for is the end of the line do not do this "*word* ...". -- When the word you are searching for is at the beginning of the line do not do this "... *word*" \ No newline at end of file +- When the word you are searching for is at the beginning of the line do not do this "... *word*" + +## Developing the test_runner +For development of the test_runner with more features in the future I have created another set of tests that can be run. These tests test the test_runner itself, they make sure parsing a test file works, output checking works and setting dynamic classpaths works as expected. + + +#### Run this command below to execute those tests +``` +pytest -m tester +``` \ No newline at end of file From 25ba5d3bf74c9f28b018ecb02006110789f68279 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Fri, 18 Jul 2025 12:27:00 +0100 Subject: [PATCH 27/38] Added a new test for check_partial_word --- IntegrationTests/test_tester.py | 35 +++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index a2853053..a9237100 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -12,9 +12,43 @@ read_directory, check_exp_given, read_test_exceptions, + check_partial_word, ) import conftest as external_vars +@pytest.mark.tester +def test_check_partial_word(): + """ + Test whether checking a partial word works correctly + """ + + expected = "1.111111111***1111111111" + assert not check_partial_word("", expected) + assert not check_partial_word("1", expected) + assert not check_partial_word("1.", expected) + assert not check_partial_word("1.1", expected) + assert not check_partial_word("1.11", expected) + assert not check_partial_word("1.111", expected) + assert not check_partial_word("1.1111", expected) + assert not check_partial_word("1.11111", expected) + assert not check_partial_word("1.111111", expected) + assert not check_partial_word("1.1111111", expected) + assert not check_partial_word("1.11111111", expected) + assert check_partial_word("1.111111111", expected) + assert check_partial_word("1.1111111111", expected) + assert not check_partial_word("1.1111111112", expected) + assert check_partial_word("1.11111111111", expected) + assert check_partial_word("1.111111111111", expected) + assert check_partial_word("1.1111111111111", expected) + assert check_partial_word("1.11111111111111", expected) + assert check_partial_word("1.111111111111111", expected) + assert not check_partial_word("1.211111111111111", expected) + assert check_partial_word("1.1111111111111111", expected) + assert check_partial_word("1.11111111111111111", expected) + assert check_partial_word("1.111111111111111111", expected) + assert check_partial_word("1.1111111111111111111", expected) + assert not check_partial_word("1.11111111111111111111", expected) + assert not check_partial_word("1.11111111111111111112", expected) @pytest.mark.tester def test_parse_file(): @@ -112,6 +146,7 @@ def test_test_discovery(): read_directory(test_runner_tests_location, tests) tests = sorted(tests) + # If a new tests is added to soms_for_testing then please add it here expected_tests = [ f"{str(test_runner_tests_location)}/soms_for_testing/som_test_1.som", f"{str(test_runner_tests_location)}/soms_for_testing/som_test_2.som", From 98b1d9c19b215ecdb8a38de663e19ccbe36f1b06 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Fri, 18 Jul 2025 12:27:21 +0100 Subject: [PATCH 28/38] Ran black --- IntegrationTests/test_tester.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index a9237100..265cdf1c 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -16,6 +16,7 @@ ) import conftest as external_vars + @pytest.mark.tester def test_check_partial_word(): """ @@ -50,6 +51,7 @@ def test_check_partial_word(): assert not check_partial_word("1.11111111111111111111", expected) assert not check_partial_word("1.11111111111111111112", expected) + @pytest.mark.tester def test_parse_file(): """ @@ -132,6 +134,7 @@ def test_parse_file(): with pytest.raises(Failed, match=r"Environment variable IDontExist should be set"): parse_test_file(soms_for_testing_location + "/som_test_6.som") + @pytest.mark.tester def test_test_discovery(): """ From 4a46a6e3be87b61114a3d27f954faca078bae4e8 Mon Sep 17 00:00:00 2001 From: Rhys Walker Date: Fri, 18 Jul 2025 12:53:07 +0100 Subject: [PATCH 29/38] Added new tests and updated formatting and style checks accordingly --- IntegrationTests/conftest.py | 8 ++- IntegrationTests/test_runner.py | 2 +- IntegrationTests/test_tester.py | 120 +++++++++++++++++++++++++++++++- 3 files changed, 125 insertions(+), 5 deletions(-) diff --git a/IntegrationTests/conftest.py b/IntegrationTests/conftest.py index 90364311..a1e157dd 100644 --- a/IntegrationTests/conftest.py +++ b/IntegrationTests/conftest.py @@ -93,7 +93,9 @@ def pytest_sessionfinish(exitstatus): if len(tests_failed_unexpectedly) != 0: for test in tests_failed_unexpectedly: # Remove the part of the path that is incompatible with multiple directory running - known_failures.append("Tests/" + str(test).split("Tests/")[-1]) + known_failures.append( + "Tests/" + str(test).rsplit("Tests/", maxsplit=1)[-1] + ) # Generate a report_message to save report_data = { @@ -107,11 +109,11 @@ def pytest_sessionfinish(exitstatus): }, "unexpected": { "passed": [ - "Tests/" + str(test).split("Tests/")[-1] + "Tests/" + str(test).rsplit("Tests/", maxsplit=1)[-1] for test in tests_passed_unexpectedly ], "failed": [ - "Tests/" + str(test).split("Tests/")[-1] + "Tests/" + str(test).rsplit("Tests/", maxsplit=1)[-1] for test in tests_failed_unexpectedly ], }, diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 4f95aa88..892af8c5 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -101,7 +101,7 @@ def parse_custom_classpath(comment): if classpath_t in os.environ: classpath_joined += os.environ[classpath_t] else: - pytest.fail(f"Environment variable {classpath_t} should be set") + pytest.fail(f"Environment variable {classpath_t} not set") # Remove the final ":" classpath = classpath_joined[:-1] diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index 265cdf1c..5f21c427 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -13,6 +13,10 @@ check_exp_given, read_test_exceptions, check_partial_word, + parse_custom_classpath, + parse_case_sensitive, + parse_stdout, + parse_stderr, ) import conftest as external_vars @@ -131,7 +135,7 @@ def test_parse_file(): assert result_tuple[4] is case_sensitive # Now assert a failure on a classpath envvar that hasnt been set - with pytest.raises(Failed, match=r"Environment variable IDontExist should be set"): + with pytest.raises(Failed, match=r"Environment variable IDontExist not set"): parse_test_file(soms_for_testing_location + "/som_test_6.som") @@ -294,3 +298,117 @@ def test_different_yaml(): external_vars.failing_as_unspecified = temp_unspecified external_vars.unsupported = temp_unsupported external_vars.do_not_run = temp_do_not_run + + +# ######################################### # +# ALL TEST BELOW HERE SHARE THESE COMMENTS # +# ######################################### # + +COMMENT_TESTERS = """ +VM: + status: success + case_sensitive: True + custom_classpath: @custom_1:./some/other/one:@custom_2 + stdout: + Some random output + ... some other output + even more output ... + ... + the last bit std + stderr: + Some random error + ... some other error + even more error ... + ... + the last bit of error +""" + +# Causes fail on parse_custom_classpath +# False in case_sensitive +COMMENT_TESTERS_2 = """ +VM: + status: success + case_sensitive: False + custom_classpath: @no_exist_1:./some/other/one:@no_exist_2 + stdout: + ... + stderr: + ... +""" + + +@pytest.mark.tester +def test_custom_classpath(): + """ + Test parsing a custom_classpath + """ + os.environ["custom_1"] = "classpath_1" + os.environ["custom_2"] = "classpath_2" + + expected = "classpath_1:./some/other/one:classpath_2" + + assert expected == parse_custom_classpath(COMMENT_TESTERS) + + # Now assert a failure on a classpath envvar that hasnt been set + with pytest.raises(Failed, match=r"Environment variable no_exist_1 not set"): + parse_custom_classpath(COMMENT_TESTERS_2) + + os.environ["no_exist_1"] = "exists_1" + + # Now assert we fail on the second + with pytest.raises(Failed, match=r"Environment variable no_exist_2 not set"): + parse_custom_classpath(COMMENT_TESTERS_2) + + os.environ["no_exist_2"] = "exists_2" + + # Now we should pass + expected = "exists_1:./some/other/one:exists_2" + assert expected == parse_custom_classpath(COMMENT_TESTERS_2) + + +@pytest.mark.tester +def test_case_sensitive(): + """ + Test that parsing case_sensitive generates the correct values + """ + assert parse_case_sensitive(COMMENT_TESTERS) + assert not parse_case_sensitive(COMMENT_TESTERS_2) + + +# THESE BELOW MUST BE DIFFERENT EVEN THOUGH THE FUNCTIONS DO ESSENTIALLY THE SAME THING + + +@pytest.mark.tester +def test_parse_stdout(): + """ + Check that parsing the test comment generates the correct output + """ + comment_testers_expected_1 = [ + "Some random output", + "... some other output", + "even more output ...", + "...", + "the last bit std", + ] + comment_testers_expected_2 = ["..."] + + assert comment_testers_expected_1 == parse_stdout(COMMENT_TESTERS) + assert comment_testers_expected_2 == parse_stdout(COMMENT_TESTERS_2) + + +@pytest.mark.tester +def test_parse_stderr(): + """ + Check that parsing the test comment generates the correct output + """ + comment_testers_expected_1 = [ + "Some random error", + "... some other error", + "even more error ...", + "...", + "the last bit of error", + ] + comment_testers_expected_2 = ["..."] + + assert comment_testers_expected_1 == parse_stderr(COMMENT_TESTERS) + assert comment_testers_expected_2 == parse_stderr(COMMENT_TESTERS_2) From bd513b51985d97cb88dc609134fe01b549da7515 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Fri, 18 Jul 2025 22:56:17 +0100 Subject: [PATCH 30/38] Fix typo Signed-off-by: Stefan Marr --- IntegrationTests/test_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 892af8c5..075f616f 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -458,7 +458,7 @@ def tests_runner(name, stdout, stderr, custom_classpath, case_sensitive): Cleanup the build directory if required """ - # Check if a test shoudld not be ran + # Check if a test should not be ran if str(name) in external_vars.do_not_run: pytest.skip("Test included in do_not_run") From 1790a5596d43195397b275e7b9130840ac899959 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Fri, 18 Jul 2025 22:56:59 +0100 Subject: [PATCH 31/38] =?UTF-8?q?Turn=20test=20into=20two=20parameterized?= =?UTF-8?q?=20ones,=20which=20is=20more=20explicit=20about=20what=E2=80=99?= =?UTF-8?q?s=20going=20on?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan Marr --- IntegrationTests/test_tester.py | 76 +++++++++++++++++++-------------- 1 file changed, 43 insertions(+), 33 deletions(-) diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index 5f21c427..1ae7581d 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -1,6 +1,6 @@ # pylint: disable=missing-function-docstring """ -Tests that will check if the test_runner is wokring ok +Tests for the tester functionality itself. """ import os @@ -21,39 +21,49 @@ import conftest as external_vars +@pytest.mark.parametrize( + "word", + [ + "1.111111111", + "1.1111111111", + "1.11111111111", + "1.1111111111111", + "1.11111111111111", + "1.111111111111111", + "1.1111111111111111", + "1.11111111111111111", + "1.111111111111111111", + "1.1111111111111111111", + ], +) @pytest.mark.tester -def test_check_partial_word(): - """ - Test whether checking a partial word works correctly - """ - - expected = "1.111111111***1111111111" - assert not check_partial_word("", expected) - assert not check_partial_word("1", expected) - assert not check_partial_word("1.", expected) - assert not check_partial_word("1.1", expected) - assert not check_partial_word("1.11", expected) - assert not check_partial_word("1.111", expected) - assert not check_partial_word("1.1111", expected) - assert not check_partial_word("1.11111", expected) - assert not check_partial_word("1.111111", expected) - assert not check_partial_word("1.1111111", expected) - assert not check_partial_word("1.11111111", expected) - assert check_partial_word("1.111111111", expected) - assert check_partial_word("1.1111111111", expected) - assert not check_partial_word("1.1111111112", expected) - assert check_partial_word("1.11111111111", expected) - assert check_partial_word("1.111111111111", expected) - assert check_partial_word("1.1111111111111", expected) - assert check_partial_word("1.11111111111111", expected) - assert check_partial_word("1.111111111111111", expected) - assert not check_partial_word("1.211111111111111", expected) - assert check_partial_word("1.1111111111111111", expected) - assert check_partial_word("1.11111111111111111", expected) - assert check_partial_word("1.111111111111111111", expected) - assert check_partial_word("1.1111111111111111111", expected) - assert not check_partial_word("1.11111111111111111111", expected) - assert not check_partial_word("1.11111111111111111112", expected) +def test_check_partial_word_matches(word): + assert check_partial_word(word, "1.111111111***1111111111") + + +@pytest.mark.parametrize( + "word", + [ + "", + "1", + "1.", + "1.1", + "1.11", + "1.111", + "1.1111", + "1.11111", + "1.111111", + "1.1111111", + "1.11111111", + "1.1111111112", + "1.211111111111111", + "1.11111111111111111111", + "1.11111111111111111112", + ], +) +@pytest.mark.tester +def test_check_partial_word_does_not_match(word): + assert not check_partial_word(word, "1.111111111***1111111111") @pytest.mark.tester From d05311043be2e4a1c6504e3f5978416d89178d9d Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Fri, 18 Jul 2025 23:10:09 +0100 Subject: [PATCH 32/38] Use more parameterized tests, and split up different scenarios Signed-off-by: Stefan Marr --- IntegrationTests/test_tester.py | 96 ++++++++++++++------------------- 1 file changed, 39 insertions(+), 57 deletions(-) diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index 1ae7581d..03de5ead 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -66,85 +66,67 @@ def test_check_partial_word_does_not_match(word): assert not check_partial_word(word, "1.111111111***1111111111") -@pytest.mark.tester -def test_parse_file(): - """ - Test that the test_runner can parse a file correctly. - Expected output should be lower-case - """ - - # EXAMPLE TUPLE - # 0. test_info_dict["name"], - # 1. test_info_dict["stdout"], - # 2. test_info_dict["stderr"], - # 3. test_info_dict["custom_classpath"], - # 4. test_info_dict["case_sensitive"], - - soms_for_testing_location = os.path.relpath( +@pytest.fixture +def soms_for_testing_location(): + return os.path.relpath( os.path.dirname(__file__) + "/test_runner_tests/soms_for_testing" ) - # Firstly assign what is expected from parsing som_test_1.som - exp_stdout = ["1", "2", "3", "4", "5", "...", "10"] - exp_stderr = ["this is an error", "...", "hello, world"] - custom_classpath = None - case_sensitive = False - - # Parse test and assert values are as above - result_tuple = parse_test_file(soms_for_testing_location + "/som_test_1.som") - assert result_tuple[1] == exp_stdout - assert result_tuple[2] == exp_stderr - assert result_tuple[3] is custom_classpath - assert result_tuple[4] is case_sensitive - - # Firstly assign what is expected from parsing som_test_2.som - exp_stdout = ["I AM cAsE sensitiVe", "...", "Dots/inTest"] - exp_stderr = ["CaSE sensitive ErrOr", "...", "TestCaseSensitivity"] - custom_classpath = None - case_sensitive = True - - # Parse test and assert values are as above - result_tuple = parse_test_file(soms_for_testing_location + "/som_test_2.som") - assert result_tuple[1] == exp_stdout - assert result_tuple[2] == exp_stderr - assert result_tuple[3] is custom_classpath - assert result_tuple[4] is case_sensitive - - # Firstly assign what is expected from parsing som_test_3.som - exp_stdout = ["..."] - exp_stderr = ["..."] - custom_classpath = "core-lib/AreWeFastYet/Core" - case_sensitive = False - # Parse test and assert values are as above - result_tuple = parse_test_file(soms_for_testing_location + "/som_test_3.som") +@pytest.mark.parametrize( + "test_file, exp_stdout, exp_stderr, custom_classpath, case_sensitive", + [ + ( + "/som_test_1.som", + ["1", "2", "3", "4", "5", "...", "10"], + ["this is an error", "...", "hello, world"], + None, + False, + ), + ( + "/som_test_2.som", + ["I AM cAsE sensitiVe", "...", "Dots/inTest"], + ["CaSE sensitive ErrOr", "...", "TestCaseSensitivity"], + None, + True, + ), + ("/som_test_3.som", ["..."], ["..."], "core-lib/AreWeFastYet/Core", False), + ], +) +@pytest.mark.tester +def test_parse_file( + test_file, + exp_stdout, + exp_stderr, + custom_classpath, + case_sensitive, + soms_for_testing_location, +): + result_tuple = parse_test_file(soms_for_testing_location + test_file) assert result_tuple[1] == exp_stdout assert result_tuple[2] == exp_stderr assert result_tuple[3] == custom_classpath assert result_tuple[4] is case_sensitive - # Now test the ability to parse a test file which contains a - # @tag classpath object som_test_4.som + +@pytest.mark.tester +def test_parse_file_correctly_using_envvars(soms_for_testing_location): custom_classpath = "core-lib/AreWeFastYet/Core:experiments/Classpath:anotherOne" os.environ["AWFYtest"] = "core-lib/AreWeFastYet/Core" os.environ["experimental"] = "experiments/Classpath" os.environ["oneWord"] = "anotherOne" result_tuple = parse_test_file(soms_for_testing_location + "/som_test_4.som") - assert result_tuple[1] == exp_stdout - assert result_tuple[2] == exp_stderr assert result_tuple[3] == custom_classpath - assert result_tuple[4] is case_sensitive # Now test the ability to interleave regular classpaths custom_classpath = "one/the/outside:core-lib/AreWeFastYet/Core:then/another/one" result_tuple = parse_test_file(soms_for_testing_location + "/som_test_5.som") - assert result_tuple[1] == exp_stdout - assert result_tuple[2] == exp_stderr assert result_tuple[3] == custom_classpath - assert result_tuple[4] is case_sensitive - # Now assert a failure on a classpath envvar that hasnt been set + +@pytest.mark.tester +def test_parse_file_failing_because_of_envvar_not_being_set(soms_for_testing_location): with pytest.raises(Failed, match=r"Environment variable IDontExist not set"): parse_test_file(soms_for_testing_location + "/som_test_6.som") From 1a0066ae02397b7944cb812ca62a8f6ffcd2cff0 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Fri, 18 Jul 2025 23:17:29 +0100 Subject: [PATCH 33/38] Rename function to be more explicit, and make code less verbose Signed-off-by: Stefan Marr --- IntegrationTests/test_runner.py | 6 +++--- IntegrationTests/test_tester.py | 21 ++++++--------------- 2 files changed, 9 insertions(+), 18 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 075f616f..f178e417 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -29,7 +29,7 @@ def locate_tests(path, test_files): return test_files -def read_directory(path, test_files): +def discover_test_files(path, test_files): """ Recursively read all sub directories Path is the directory we are currently in @@ -37,7 +37,7 @@ def read_directory(path, test_files): """ for directory in Path(path).iterdir(): if directory.is_dir(): - read_directory(directory, test_files) + discover_test_files(directory, test_files) else: continue @@ -423,7 +423,7 @@ def prepare_tests(): read_test_exceptions(external_vars.TEST_EXCEPTIONS) test_files = [] - read_directory(location + "/Tests", test_files) + discover_test_files(location + "/Tests", test_files) test_files = sorted(test_files) return collect_tests(test_files) diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index 03de5ead..1bcbc530 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -9,7 +9,7 @@ from test_runner import ( parse_test_file, - read_directory, + discover_test_files, check_exp_given, read_test_exceptions, check_partial_word, @@ -132,27 +132,18 @@ def test_parse_file_failing_because_of_envvar_not_being_set(soms_for_testing_loc @pytest.mark.tester -def test_test_discovery(): - """ - Test the som test discovery methods in the test_runner_tests directory - Three tests should be located, Update this method if more tests are added - """ - # Locate all SOM tests +def test_discover_test_files(): test_runner_tests_location = os.path.relpath( os.path.dirname(__file__) + "/test_runner_tests" ) tests = [] - read_directory(test_runner_tests_location, tests) + discover_test_files(test_runner_tests_location, tests) tests = sorted(tests) - # If a new tests is added to soms_for_testing then please add it here expected_tests = [ - f"{str(test_runner_tests_location)}/soms_for_testing/som_test_1.som", - f"{str(test_runner_tests_location)}/soms_for_testing/som_test_2.som", - f"{str(test_runner_tests_location)}/soms_for_testing/som_test_3.som", - f"{str(test_runner_tests_location)}/soms_for_testing/som_test_4.som", - f"{str(test_runner_tests_location)}/soms_for_testing/som_test_5.som", - f"{str(test_runner_tests_location)}/soms_for_testing/som_test_6.som", + f"{test_runner_tests_location}/soms_for_testing/som_test_{i}.som" + # add missing tests here, or make the test independent of the actual test files + for i in range(1, 7) ] assert tests == expected_tests From ff4eb90c6b32a8991bebbdecdc13c05b447db03c Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Fri, 18 Jul 2025 23:17:36 +0100 Subject: [PATCH 34/38] Pylint Signed-off-by: Stefan Marr --- IntegrationTests/test_tester.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index 1bcbc530..ff290282 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -1,4 +1,4 @@ -# pylint: disable=missing-function-docstring +# pylint: disable=missing-function-docstring,redefined-outer-name """ Tests for the tester functionality itself. """ @@ -94,7 +94,7 @@ def soms_for_testing_location(): ], ) @pytest.mark.tester -def test_parse_file( +def test_parse_file( # pylint: disable=too-many-arguments,too-many-positional-arguments test_file, exp_stdout, exp_stderr, From 173a46bfae3ef71f2f48b9c272aa04a4a029fe88 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sat, 19 Jul 2025 10:22:51 +0100 Subject: [PATCH 35/38] Rename to check_output_matches and return boolean Signed-off-by: Stefan Marr --- IntegrationTests/test_runner.py | 27 +++---- IntegrationTests/test_tester.py | 131 +++++++++++++++++--------------- 2 files changed, 78 insertions(+), 80 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index f178e417..fb47f0fa 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -268,21 +268,19 @@ def check_partial_word(word, exp_word): return True -def check_exp_given(given, expected): +def check_output_matches(given, expected): """ Check if the expected output is contained in the given output - given: list of strings representing some kind of SOM output + given: list of strings representing the actual output expected: list of strings representing the expected output - - return: 1 if success 0 if failure """ # Check if the stdout matches the expected stdout exp_std_inx = 0 for g_out in given: # Check that checks don't pass before out of outputs if exp_std_inx >= len(expected): - return 1 + return True if expected[exp_std_inx] == "...": # If the expected output is '...' then we skip this line @@ -293,7 +291,7 @@ def check_exp_given(given, expected): if "***" in expected[exp_std_inx]: # Now do some partial checking partial_output = check_partial_word(g_out, expected[exp_std_inx]) - if partial_output is True: + if partial_output: exp_std_inx += 1 continue @@ -313,9 +311,9 @@ def check_exp_given(given, expected): if exp_std_inx != len(expected): # It is not all contained in the output - return 0 + return False - return 1 + return True def check_output(test_outputs, expected_std_out, expected_std_err): @@ -337,16 +335,9 @@ def check_output(test_outputs, expected_std_out, expected_std_err): given_std_out = test_outputs.stdout.split("\n") given_std_err = test_outputs.stderr.split("\n") - passing = 0 - - passing += check_exp_given(given_std_out, expected_std_out) - passing += check_exp_given(given_std_err, expected_std_err) - - if passing == 2: - # If we have two passing then we know we have what we expect - return True - - return False + return check_output_matches( + given_std_out, expected_std_out + ) and check_output_matches(given_std_err, expected_std_err) # Read the test exceptions file and set the variables correctly diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index ff290282..3fe487f5 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -10,7 +10,7 @@ from test_runner import ( parse_test_file, discover_test_files, - check_exp_given, + check_output_matches, read_test_exceptions, check_partial_word, parse_custom_classpath, @@ -149,27 +149,15 @@ def test_discover_test_files(): assert tests == expected_tests -@pytest.mark.tester -def test_check_output(): - """ - Test that the check_output function complies with the expected output. - """ - - stdout = "Hello World\nSome other output in the Middle\nThis is a test\n" - - expected_stdout = ["hello world", "...", "this is a test"] - - # For checking case sensitivity all are converted to lower at differnt parts of the program - # This just simulates that. It is very difficult to actually run this - - # Check case sensitive - assert check_exp_given(stdout.split("\n"), expected_stdout) == 0 - - # Check case insensitive - assert check_exp_given(stdout.lower().split("\n"), expected_stdout) == 1 - - # Check large output file with ... used inline at beginning and at end - stdout = """This is SOM++ +@pytest.mark.parametrize( + "out, expected", + [ + ( + "Hello World\nSome other output in the Middle\nThis is a test\n".lower(), + ["hello world", "...", "this is a test"], + ), + ( + """This is SOM++ Hello Rhys this is some sample output 1\n2\n3\n4\n4\n56\n6\n7\n7\n8\n9\n9 1010101\n10101\n1010101 @@ -179,51 +167,70 @@ def test_check_output(): Extra text more Numbers NUMBER NUMBER NUMBER NUMBER -""" - expected_stdout = [ - "Hello ... this is ... sample output", - "Rhys Walker", - "... on", - "more ...", - "... NUMBER ... NUMBER", - ] - - assert check_exp_given(stdout.split("\n"), expected_stdout) == 1 - - stdout = """This is SOM++ +""", + [ + "Hello ... this is ... sample output", + "Rhys Walker", + "... on", + "more ...", + "... NUMBER ... NUMBER", + ], + ), + ( + """This is SOM++ Hello, this is some sample output There is some more on this line And a little more here -""" - expected_stdout = [ - "Hello, ... sample ...", - "... is ... this line", - "... little ...", - ] - - assert check_exp_given(stdout.split("\n"), expected_stdout) == 1 - - expected = ["...", "Really***LongWord"] - - stdout = "Some output, as an example\nExtra Line\nReallyLongWord" - assert check_exp_given(stdout.split("\n"), expected) - - stdout = "Some output, as an example\nExtra Line\nReally" - assert check_exp_given(stdout.split("\n"), expected) - - stdout = "Some output, as an example\nExtra Line\nReallyLong" - assert check_exp_given(stdout.split("\n"), expected) - - stdout = "Some output, as an example\nExtra Line\nReallyLo" - assert check_exp_given(stdout.split("\n"), expected) +""", + [ + "Hello, ... sample ...", + "... is ... this line", + "... little ...", + ], + ), + ( + "Some output, as an example\nExtra Line\nReallyLongWord", + ["...", "Really***LongWord"], + ), + ( + "Some output, as an example\nExtra Line\nReally", + ["...", "Really***LongWord"], + ), + ( + "Some output, as an example\nExtra Line\nReallyLong", + ["...", "Really***LongWord"], + ), + ( + "Some output, as an example\nExtra Line\nReallyLo", + ["...", "Really***LongWord"], + ), + ], +) +@pytest.mark.tester +def test_check_output_matches(out, expected): + assert check_output_matches(out.split("\n"), expected) - # Now assert some failures to test when it should fail - stdout = "Some output, as an example\nExtra Line\nReallyLongTestFunction" - assert not check_exp_given(stdout.split("\n"), expected) - # This one should fail as there is still more word than expected - stdout = "Some output, as an example\nExtra Line\nReallyLongWordExtra" - assert not check_exp_given(stdout.split("\n"), expected) +@pytest.mark.parametrize( + "out, expected", + [ + ( + "Hello World\nSome other output in the Middle\nThis is a test\n", + ["hello world", "...", "this is a test"], + ), + ( + "Some output, as an example\nExtra Line\nReallyLongTestFunction", + ["...", "Really***LongWord"], + ), + ( + "Some output, as an example\nExtra Line\nReallyLongWordExtra", + ["...", "Really***LongWord"], + ), + ], +) +@pytest.mark.tester +def test_check_output_does_not_match(out, expected): + assert not check_output_matches(out.split("\n"), expected) @pytest.mark.tester From 5bc336f788577964df615e5aa241e7cc92e72a84 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sat, 19 Jul 2025 10:41:14 +0100 Subject: [PATCH 36/38] Simplify discover_test_files() Signed-off-by: Stefan Marr --- IntegrationTests/test_runner.py | 33 +++++++++++---------------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index fb47f0fa..5094f510 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -13,35 +13,24 @@ import conftest as external_vars -def locate_tests(path, test_files): - """ - Locate all tests which are in the current directory. - Add them to the list test_files and return - A check if made on if the file has VM: in it's content - """ - # To ID a file will be opened and at the top there should be a comment which starts with VM: - for file_path in Path(path).glob("*.som"): - with open(file_path, "r", encoding="utf-8") as f: +def is_som_test(path): + if path.suffix == ".som": + with open(path, "r", encoding="utf-8") as f: contents = f.read() if "VM:" in contents: - test_files.append(str(file_path)) - - return test_files + return True + return False def discover_test_files(path, test_files): """ - Recursively read all sub directories - Path is the directory we are currently in - test_files is the list of test files we are building up + Recursively read the directory tree and add all .som test files to `test_files`. """ - for directory in Path(path).iterdir(): - if directory.is_dir(): - discover_test_files(directory, test_files) - else: - continue - - locate_tests(path, test_files) + for element in Path(path).iterdir(): + if element.is_dir(): + discover_test_files(element, test_files) + elif element.is_file() and is_som_test(element): + test_files.append(str(element)) def collect_tests(test_files): From 3b13e749ff94220ccb98f0f6a61f8f41e94c3c90 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sat, 19 Jul 2025 10:41:45 +0100 Subject: [PATCH 37/38] Simplify parse_test_file Signed-off-by: Stefan Marr --- IntegrationTests/test_runner.py | 46 +++++++++++---------------------- 1 file changed, 15 insertions(+), 31 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 5094f510..515c248e 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -141,13 +141,12 @@ def parse_test_file(test_file): """ parse the test file to extract the important information """ - test_info_dict = { - "name": test_file, - "stdout": [], - "stderr": [], - "custom_classpath": None, - "case_sensitive": False, - } + name = test_file + stdout = [] + stderr = [] + custom_classpath = None + case_sensitive = False + with open(test_file, "r", encoding="utf-8") as open_file: contents = open_file.read() comment = contents.split('"')[1] @@ -155,37 +154,22 @@ def parse_test_file(test_file): # Make sure if using a custom test classpath that it is above # Stdout and Stderr if "custom_classpath" in comment: - test_info_dict["custom_classpath"] = parse_custom_classpath(comment) + custom_classpath = parse_custom_classpath(comment) - # Check if we are case sensitive (has to be toggled on) if "case_sensitive" in comment: - test_info_dict["case_sensitive"] = parse_case_sensitive(comment) + case_sensitive = parse_case_sensitive(comment) if "stdout" in comment: - test_info_dict["stdout"] = parse_stdout(comment) + stdout = parse_stdout(comment) if "stderr" in comment: - test_info_dict["stderr"] = parse_stderr(comment) - - if test_info_dict["case_sensitive"]: - test_tuple = ( - test_info_dict["name"], - test_info_dict["stdout"], - test_info_dict["stderr"], - test_info_dict["custom_classpath"], - test_info_dict["case_sensitive"], - ) - return test_tuple - - test_tuple = ( - test_info_dict["name"], - [s.lower() for s in test_info_dict["stdout"]], - [s.lower() for s in test_info_dict["stderr"]], - test_info_dict["custom_classpath"], - test_info_dict["case_sensitive"], - ) + stderr = parse_stderr(comment) + + if not case_sensitive: + stdout = [s.lower() for s in stdout] + stderr = [s.lower() for s in stderr] - return test_tuple + return name, stdout, stderr, custom_classpath, case_sensitive def make_a_diff(expected, given): From db4a1cc765acdbca16a58ae317e5db7f47d60513 Mon Sep 17 00:00:00 2001 From: Stefan Marr Date: Sat, 19 Jul 2025 23:42:36 +0100 Subject: [PATCH 38/38] Reify test definition as object, and make env var failures test definitions that fail as a test Signed-off-by: Stefan Marr --- IntegrationTests/test_runner.py | 195 ++++++++++++++++++++------------ IntegrationTests/test_tester.py | 28 ++--- 2 files changed, 137 insertions(+), 86 deletions(-) diff --git a/IntegrationTests/test_runner.py b/IntegrationTests/test_runner.py index 515c248e..1a7a76ae 100644 --- a/IntegrationTests/test_runner.py +++ b/IntegrationTests/test_runner.py @@ -1,4 +1,4 @@ -# pylint: disable=missing-function-docstring +# pylint: disable=missing-function-docstring, missing-class-docstring, too-many-arguments, too-many-positional-arguments, too-few-public-methods """ This is the SOM integration test runner file. Pytest automatically discovers this file and will find all .som test files in the below directories. @@ -13,6 +13,35 @@ import conftest as external_vars +class Definition: + def __init__( + self, + name: str, + stdout: list[str], + stderr: list[str], + custom_classpath: str | None, + case_sensitive: bool, + definition_fail_msg: str | None = None, + ): + self.name = name + self.stdout = stdout + self.stderr = stderr + self.custom_classpath = custom_classpath + self.case_sensitive = case_sensitive + self.definition_fail_msg = definition_fail_msg + + +class ParseError(Exception): + """ + Exception raised when a test file cannot be parsed correctly. + This is used to fail the test in the test runner. + """ + + def __init__(self, message): + super().__init__(message) + self.message = message + + def is_som_test(path): if path.suffix == ".som": with open(path, "r", encoding="utf-8") as f: @@ -33,7 +62,7 @@ def discover_test_files(path, test_files): test_files.append(str(element)) -def collect_tests(test_files): +def collect_tests(test_files) -> list[Definition]: """ Assemble a dictionary of name: the name of the test file @@ -50,7 +79,7 @@ def collect_tests(test_files): # pylint: disable=too-many-nested-blocks -def parse_custom_classpath(comment): +def parse_custom_classpath(comment) -> str | None: """ Based on the comment will calculate the custom_classpath for the current test @@ -81,7 +110,7 @@ def parse_custom_classpath(comment): if tag in os.environ: classpath_joined += os.environ[tag] + ":" continue - pytest.fail(f"Environment variable {tag} not set") + raise ParseError(f"Environment variable {tag} not set") # Add a normal classpath inside of tags classpath_joined += tag + ":" @@ -90,7 +119,7 @@ def parse_custom_classpath(comment): if classpath_t in os.environ: classpath_joined += os.environ[classpath_t] else: - pytest.fail(f"Environment variable {classpath_t} not set") + raise ParseError(f"Environment variable {classpath_t} not set") # Remove the final ":" classpath = classpath_joined[:-1] @@ -137,7 +166,7 @@ def parse_stderr(comment): return std_err_l -def parse_test_file(test_file): +def parse_test_file(test_file) -> Definition: """ parse the test file to extract the important information """ @@ -147,29 +176,32 @@ def parse_test_file(test_file): custom_classpath = None case_sensitive = False - with open(test_file, "r", encoding="utf-8") as open_file: - contents = open_file.read() - comment = contents.split('"')[1] + try: + with open(test_file, "r", encoding="utf-8") as open_file: + contents = open_file.read() + comment = contents.split('"')[1] - # Make sure if using a custom test classpath that it is above - # Stdout and Stderr - if "custom_classpath" in comment: - custom_classpath = parse_custom_classpath(comment) + # Make sure if using a custom test classpath that it is above + # Stdout and Stderr + if "custom_classpath" in comment: + custom_classpath = parse_custom_classpath(comment) - if "case_sensitive" in comment: - case_sensitive = parse_case_sensitive(comment) + if "case_sensitive" in comment: + case_sensitive = parse_case_sensitive(comment) - if "stdout" in comment: - stdout = parse_stdout(comment) + if "stdout" in comment: + stdout = parse_stdout(comment) - if "stderr" in comment: - stderr = parse_stderr(comment) + if "stderr" in comment: + stderr = parse_stderr(comment) - if not case_sensitive: - stdout = [s.lower() for s in stdout] - stderr = [s.lower() for s in stderr] + if not case_sensitive: + stdout = [s.lower() for s in stdout] + stderr = [s.lower() for s in stderr] + except ParseError as e: + return Definition(name, [], [], None, False, e.message) - return name, stdout, stderr, custom_classpath, case_sensitive + return Definition(name, stdout, stderr, custom_classpath, case_sensitive) def make_a_diff(expected, given): @@ -364,16 +396,41 @@ def read_test_exceptions(filename): def prepare_tests(): location = os.path.relpath(os.path.dirname(__file__)) if not os.path.exists(location + "/Tests"): - pytest.fail( - "`Tests` directory not found. Please make sure the lang_tests are installed" - ) + return [ + Definition( + "test-setup", + [], + [], + None, + False, + "`Tests` directory not found. Please make sure the lang_tests are installed", + ) + ] # Work out settings for the application (They are labelled REQUIRED or OPTIONAL) if "CLASSPATH" not in os.environ: # REQUIRED - pytest.fail("Please set the CLASSPATH environment variable") + return [ + Definition( + "test-setup", + [], + [], + None, + False, + "Please set the CLASSPATH environment variable", + ) + ] if "VM" not in os.environ: # REQUIRED - pytest.fail("Please set the VM environment variable") + return [ + Definition( + "test-setup", + [], + [], + None, + False, + "Please set the VM environment variable", + ) + ] if "TEST_EXCEPTIONS" in os.environ: # OPTIONAL external_vars.TEST_EXCEPTIONS = os.environ["TEST_EXCEPTIONS"] @@ -392,30 +449,18 @@ def prepare_tests(): return collect_tests(test_files) -def assign_ids(tests): - """ - Assign test IDs the same way as the names are treated - """ - test_ids = [] - for test in tests: - test_name = test[0] - test_t = "Tests/" + test_name.split("Tests/")[-1] - test_ids.append(test_t) - - return test_ids - - -# Stops prepare_tests() being called twice -TEST_FILES = prepare_tests() +def get_test_id(test): + print(test) + return "Tests/" + test.name.split("Tests/")[-1] @pytest.mark.parametrize( - "name,stdout,stderr,custom_classpath,case_sensitive", - TEST_FILES, - ids=assign_ids(TEST_FILES), + "test", + prepare_tests(), + ids=get_test_id, ) # pylint: disable=too-many-branches -def tests_runner(name, stdout, stderr, custom_classpath, case_sensitive): +def tests_runner(test): """ Take an array of dictionaries with test file names and expected output Run all of the tests and check the output @@ -423,13 +468,16 @@ def tests_runner(name, stdout, stderr, custom_classpath, case_sensitive): """ # Check if a test should not be ran - if str(name) in external_vars.do_not_run: + if test.name in external_vars.do_not_run: pytest.skip("Test included in do_not_run") - if custom_classpath is not None: - command = f"{external_vars.VM} -cp {custom_classpath} {name}" + if test.definition_fail_msg: + pytest.fail(test.definition_fail_msg) + + if test.custom_classpath is not None: + command = f"{external_vars.VM} -cp {test.custom_classpath} {test.name}" else: - command = f"{external_vars.VM} -cp {external_vars.CLASSPATH} {name}" + command = f"{external_vars.VM} -cp {external_vars.CLASSPATH} {test.name}" try: result = subprocess.run( @@ -442,13 +490,18 @@ def tests_runner(name, stdout, stderr, custom_classpath, case_sensitive): ) # lower-case comparisons unless specified otherwise - if case_sensitive is False: + if test.case_sensitive is False: result.stdout = str(result.stdout).lower() result.stderr = str(result.stderr).lower() # Produce potential error messages now and then run assertion error_message = build_error_message( - result.stdout, result.stderr, stdout, stderr, command, case_sensitive + result.stdout, + result.stderr, + test.stdout, + test.stderr, + command, + test.case_sensitive, ) # Related to above line (Rather than change how stdout and stderr are @@ -457,34 +510,32 @@ def tests_runner(name, stdout, stderr, custom_classpath, case_sensitive): if result.returncode != 0: error_message += f"Command failed with return code: {result.returncode}\n" - test_pass_bool = check_output(result, stdout, stderr) + test_pass_bool = check_output(result, test.stdout, test.stderr) # Check if we have any unexpectedly passing tests if ( - name in external_vars.known_failures and test_pass_bool + test.name in external_vars.known_failures and test_pass_bool ): # Test passed when it is not expected to - external_vars.tests_passed_unexpectedly.append(name) - assert False, f"Test {name} is in known_failures but passed" + external_vars.tests_passed_unexpectedly.append(test.name) + assert False, f"Test {test.name} is in known_failures but passed" - if ( - str(name) in external_vars.failing_as_unspecified and test_pass_bool - ): # Test passed when it is not expected tp - external_vars.tests_passed_unexpectedly.append(name) - assert False, f"Test {name} is in failing_as_unspecified but passed" + if test.name in external_vars.failing_as_unspecified and test_pass_bool: + # Test passed when it is not expected tp + external_vars.tests_passed_unexpectedly.append(test.name) + assert False, f"Test {test.name} is in failing_as_unspecified but passed" - if ( - name in external_vars.unsupported and test_pass_bool - ): # Test passed when it is not expected tp - external_vars.tests_passed_unexpectedly.append(name) - assert False, f"Test {name} is in unsupported but passed" + if test.name in external_vars.unsupported and test_pass_bool: + # Test passed when it is not expected tp + external_vars.tests_passed_unexpectedly.append(test.name) + assert False, f"Test {test.name} is in unsupported but passed" if ( - name not in external_vars.unsupported - and name not in external_vars.known_failures - and name not in external_vars.failing_as_unspecified + test.name not in external_vars.unsupported + and test.name not in external_vars.known_failures + and test.name not in external_vars.failing_as_unspecified ): if not test_pass_bool: - external_vars.tests_failed_unexpectedly.append(name) + external_vars.tests_failed_unexpectedly.append(test.name) assert ( test_pass_bool - ), f"Error on test, {name} expected to pass: {error_message}" + ), f"Error on test, {test.name} expected to pass: {error_message}" diff --git a/IntegrationTests/test_tester.py b/IntegrationTests/test_tester.py index 3fe487f5..e44fccb2 100644 --- a/IntegrationTests/test_tester.py +++ b/IntegrationTests/test_tester.py @@ -5,7 +5,6 @@ import os import pytest -from _pytest.outcomes import Failed from test_runner import ( parse_test_file, @@ -17,6 +16,7 @@ parse_case_sensitive, parse_stdout, parse_stderr, + ParseError, ) import conftest as external_vars @@ -102,11 +102,11 @@ def test_parse_file( # pylint: disable=too-many-arguments,too-many-positional-a case_sensitive, soms_for_testing_location, ): - result_tuple = parse_test_file(soms_for_testing_location + test_file) - assert result_tuple[1] == exp_stdout - assert result_tuple[2] == exp_stderr - assert result_tuple[3] == custom_classpath - assert result_tuple[4] is case_sensitive + result = parse_test_file(soms_for_testing_location + test_file) + assert result.stdout == exp_stdout + assert result.stderr == exp_stderr + assert result.custom_classpath == custom_classpath + assert result.case_sensitive is case_sensitive @pytest.mark.tester @@ -116,19 +116,19 @@ def test_parse_file_correctly_using_envvars(soms_for_testing_location): os.environ["experimental"] = "experiments/Classpath" os.environ["oneWord"] = "anotherOne" - result_tuple = parse_test_file(soms_for_testing_location + "/som_test_4.som") - assert result_tuple[3] == custom_classpath + result = parse_test_file(soms_for_testing_location + "/som_test_4.som") + assert result.custom_classpath == custom_classpath # Now test the ability to interleave regular classpaths custom_classpath = "one/the/outside:core-lib/AreWeFastYet/Core:then/another/one" - result_tuple = parse_test_file(soms_for_testing_location + "/som_test_5.som") - assert result_tuple[3] == custom_classpath + result = parse_test_file(soms_for_testing_location + "/som_test_5.som") + assert result.custom_classpath == custom_classpath @pytest.mark.tester def test_parse_file_failing_because_of_envvar_not_being_set(soms_for_testing_location): - with pytest.raises(Failed, match=r"Environment variable IDontExist not set"): - parse_test_file(soms_for_testing_location + "/som_test_6.som") + result = parse_test_file(soms_for_testing_location + "/som_test_6.som") + assert result.definition_fail_msg == "Environment variable IDontExist not set" @pytest.mark.tester @@ -340,13 +340,13 @@ def test_custom_classpath(): assert expected == parse_custom_classpath(COMMENT_TESTERS) # Now assert a failure on a classpath envvar that hasnt been set - with pytest.raises(Failed, match=r"Environment variable no_exist_1 not set"): + with pytest.raises(ParseError, match=r"Environment variable no_exist_1 not set"): parse_custom_classpath(COMMENT_TESTERS_2) os.environ["no_exist_1"] = "exists_1" # Now assert we fail on the second - with pytest.raises(Failed, match=r"Environment variable no_exist_2 not set"): + with pytest.raises(ParseError, match=r"Environment variable no_exist_2 not set"): parse_custom_classpath(COMMENT_TESTERS_2) os.environ["no_exist_2"] = "exists_2"