diff --git a/.github/workflows/Arch.yaml b/.github/workflows/Arch.yaml index 98d751a8..e2784a19 100644 --- a/.github/workflows/Arch.yaml +++ b/.github/workflows/Arch.yaml @@ -9,7 +9,7 @@ jobs: runs-on: self-hosted strategy: matrix: - python-version: ["3.9", "3.13.1"] + python-version: ["3.10", "3.13.1"] name: pytest-arch-python-${{ matrix.python-version }} container: image: archlinux:latest diff --git a/.github/workflows/GithubRunner.yaml b/.github/workflows/GithubRunner.yaml index 78b2b43e..c8607718 100644 --- a/.github/workflows/GithubRunner.yaml +++ b/.github/workflows/GithubRunner.yaml @@ -8,7 +8,7 @@ jobs: pytest: strategy: matrix: - python-version: ["3.9", "3.13"] + python-version: ["3.10", "3.13"] name: pytest-github-runner-python-${{ matrix.python-version }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/Ubuntu.yaml b/.github/workflows/Ubuntu.yaml index 115358bf..130f78b2 100644 --- a/.github/workflows/Ubuntu.yaml +++ b/.github/workflows/Ubuntu.yaml @@ -9,7 +9,7 @@ jobs: runs-on: self-hosted strategy: matrix: - python-version: ["3.9", "3.13"] + python-version: ["3.10", "3.13"] name: pytest-ubuntu-python-${{ matrix.python-version }} container: image: ubuntu:latest diff --git a/setup.cfg b/setup.cfg index 8003957a..1324c232 100644 --- a/setup.cfg +++ b/setup.cfg @@ -20,7 +20,7 @@ classifiers = packages = find_namespace: packages_dir = src include_package_data = True -python_requires = >=3.9 +python_requires = >=3.10 install_requires = argparse argcomplete @@ -30,6 +30,7 @@ install_requires = importlib-resources psutil packaging + sio3pack==1.0.0.dev5 [options.packages.find] where = src diff --git a/src/sinol_make/__init__.py b/src/sinol_make/__init__.py index 5a936997..2b9cd2f5 100644 --- a/src/sinol_make/__init__.py +++ b/src/sinol_make/__init__.py @@ -5,14 +5,17 @@ import argcomplete from sinol_make import util, sio2jail -from sinol_make.helpers import cache, oicompare +from sinol_make.helpers import cache, oicompare, paths # Required for side effects from sinol_make.task_type.normal import NormalTaskType # noqa from sinol_make.task_type.interactive import InteractiveTaskType # noqa +# SIO3Pack +from sio3pack.exceptions import SIO3PackException -__version__ = "1.9.8" + +__version__ = "2.0.0.dev1" def configure_parsers(): @@ -102,6 +105,16 @@ def main(): util.exit_with_error(err) except SystemExit as err: exit(err.code) + except SIO3PackException as err: + print(util.color_red("Short description of the error:")) + print(err.message) + print() + print(util.color_red("Full description:")) + print(err.full_message) + print() + with open(paths.get_cache_path('traceback'), 'w') as f: + traceback.print_exc(file=f) + print(util.warning('Full traceback saved to .cache/traceback')) except Exception: print(traceback.format_exc()) util.exit_with_error('An error occurred while running the command.\n' diff --git a/src/sinol_make/commands/chkwer/__init__.py b/src/sinol_make/commands/chkwer/__init__.py index 55d3e1ca..670875be 100644 --- a/src/sinol_make/commands/chkwer/__init__.py +++ b/src/sinol_make/commands/chkwer/__init__.py @@ -55,22 +55,21 @@ def run_test(self, execution: ChkwerExecution) -> RunResult: """ Verifies a test and returns the result of chkwer on this test. """ - output_file = paths.get_chkwer_path(os.path.basename(execution.out_test_path)) - with open(execution.in_test_path, 'r') as inf, open(output_file, 'w') as outf: + output_file = paths.get_chkwer_path(os.path.basename(execution.test.out_file.path)) + with open(execution.test.in_file.path, 'r') as inf, open(output_file, 'w') as outf: process = subprocess.Popen([execution.model_exe], stdin=inf, stdout=outf) process.wait() - ok, points, comment = self.task_type.check_output(execution.in_test_path, output_file, execution.out_test_path) + ok, points, comment = self.task_type.check_output(execution.test.in_file.path, output_file, execution.test.out_file.path) - return RunResult(execution.in_test_path, ok, int(points), comment) + return RunResult(execution.test, ok, int(points), comment) def run_and_print_table(self) -> Dict[str, TestResult]: results = {} - sorted_tests = sorted(self.tests, key=lambda test: package_util.get_group(test, self.task_id)) + sorted_tests = sorted(self.tests, key=lambda test: test.group) executions: List[ChkwerExecution] = [] for test in sorted_tests: - results[test] = TestResult(test, self.task_id) - executions.append(ChkwerExecution(test, results[test].test_name, package_util.get_out_from_in(test), - self.checker_executable, self.model_executable)) + results[test.test_name] = TestResult(test) + executions.append(ChkwerExecution(test, self.checker_executable, self.model_executable)) has_terminal, terminal_width, terminal_height = util.get_terminal_size() table_data = TableData(results, 0, self.task_id, self.contest_type.max_score_per_test()) @@ -84,7 +83,7 @@ def run_and_print_table(self) -> Dict[str, TestResult]: try: with mp.Pool(self.cpus) as pool: for i, result in enumerate(pool.imap(self.run_test, executions)): - table_data.results[result.test_path].set_results(result.points, result.ok, result.comment) + table_data.results[result.test.test_name].set_results(result.points, result.ok, result.comment) table_data.i = i except KeyboardInterrupt: keyboard_interrupt = True @@ -108,12 +107,12 @@ def run(self, args): util.exit_with_error("chkwer can be run only for normal tasks.") self.cpus = args.cpus or util.default_cpu_count() - self.tests = package_util.get_tests(self.task_id, args.tests) + self.tests = package_util.get_tests(args.tests) if len(self.tests) == 0: util.exit_with_error("No tests found.") else: - print('Will run on tests: ' + util.bold(', '.join(self.tests))) + print('Will run on tests: ' + util.bold(', '.join([test.test_name for test in self.tests]))) util.change_stack_size_to_unlimited() additional_files = self.task_type.additional_files_to_compile() @@ -122,10 +121,10 @@ def run(self, args): if len(additional_files) != 1: util.exit_with_error("More than one file to compile found. How is that possible?") checker_info = additional_files[0] - model_solution = outgen_util.get_correct_solution(self.task_id) + model_solution = package_util.get_correct_solution() self.checker_executable = self.compile(checker_info[0], checker_info[1], args, "checker", args.compile_mode) - self.model_executable = self.compile(model_solution, package_util.get_executable(model_solution), args, + self.model_executable = self.compile(model_solution.path, package_util.get_executable(model_solution.path), args, "model solution", args.compile_mode) print() diff --git a/src/sinol_make/commands/chkwer/chkwer_util.py b/src/sinol_make/commands/chkwer/chkwer_util.py index c4708eb1..00d587b0 100644 --- a/src/sinol_make/commands/chkwer/chkwer_util.py +++ b/src/sinol_make/commands/chkwer/chkwer_util.py @@ -19,9 +19,9 @@ def print_view(term_width, term_height, table_data: TableData): column_lengths = [0, len('Points') + 1, 0] tests = [] for result in results.values(): - column_lengths[0] = max(column_lengths[0], len(result.test_name)) - tests.append(result.test_path) - tests = sort_tests(tests, table_data.task_id) + column_lengths[0] = max(column_lengths[0], len(result.test.test_name)) + tests.append(result.test) + tests = sort_tests(tests) # 6 is for " | " between columns, 3 for margin. column_lengths[2] = max(10, term_width - column_lengths[0] - column_lengths[1] - 6 - 3) @@ -38,12 +38,12 @@ def print_line_separator(): print_line_separator() last_group = None - for test_path in tests: - result = results[test_path] - if last_group is not None and last_group != result.test_group: + for test in tests: + result = results[test.test_name] + if last_group is not None and last_group != result.test.group: print_line_separator() - last_group = result.test_group - print(margin + result.test_name.ljust(column_lengths[0]) + " | ", end='') + last_group = result.test.group + print(margin + test.test_name.ljust(column_lengths[0]) + " | ", end='') if result.run: if result.ok: diff --git a/src/sinol_make/commands/doc/__init__.py b/src/sinol_make/commands/doc/__init__.py index b647dc11..b2e19348 100644 --- a/src/sinol_make/commands/doc/__init__.py +++ b/src/sinol_make/commands/doc/__init__.py @@ -104,13 +104,13 @@ def run(self, args: argparse.Namespace): elif args.latex_compiler == 'auto': self.compilation_method = 'pdflatex' for extension in ['ps', 'eps']: - if glob.glob(os.path.join(os.getcwd(), 'doc', f'*.{extension}')) != []: + if glob.glob(os.path.join(os.getcwd(), 'doc', f'*.{extension}')): #TODO: SIO3Pack? self.compilation_method = 'latex_dvi' else: util.exit_with_error("Unrecognized latex compiler") if args.files == []: - self.files = glob.glob(os.path.join(os.getcwd(), 'doc', '*.tex')) + self.files = glob.glob(os.path.join(os.getcwd(), 'doc', '*.tex')) #TODO: SIO3Pack? else: self.files = [] for file in args.files: diff --git a/src/sinol_make/commands/export/__init__.py b/src/sinol_make/commands/export/__init__.py index 7eeb367d..c6665d44 100644 --- a/src/sinol_make/commands/export/__init__.py +++ b/src/sinol_make/commands/export/__init__.py @@ -7,12 +7,13 @@ import argparse from sinol_make import util, contest_types -from sinol_make.commands.ingen.ingen_util import get_ingen, compile_ingen, run_ingen, ingen_exists +from sinol_make.commands.ingen.ingen_util import get_ingen_path, compile_ingen, run_ingen, ingen_exists from sinol_make.helpers import package_util, parsers, paths from sinol_make.interfaces.BaseCommand import BaseCommand -from sinol_make.commands.outgen import Command as OutgenCommand, compile_correct_solution, get_correct_solution +from sinol_make.commands.outgen import Command as OutgenCommand, compile_correct_solution from sinol_make.commands.doc import Command as DocCommand from sinol_make.interfaces.Errors import UnknownContestType +from sinol_make.sio3pack.package import SIO3Package class Command(BaseCommand): @@ -53,8 +54,8 @@ def generate_input_tests(self): if os.path.exists(os.path.join(os.getcwd(), 'prog')): shutil.copytree(os.path.join(os.getcwd(), 'prog'), prog_dir) - if ingen_exists(self.task_id): - ingen_path = get_ingen(self.task_id) + if ingen_exists(): + ingen_path = get_ingen_path() ingen_path = os.path.join(prog_dir, os.path.basename(ingen_path)) ingen_exe = compile_ingen(ingen_path, self.args, self.args.compile_mode) if not run_ingen(ingen_exe, in_dir): @@ -75,7 +76,7 @@ def generate_output_files(self): outputs.append(os.path.join(out_dir, os.path.basename(test).replace('.in', '.out'))) if len(outputs) > 0: outgen = OutgenCommand() - correct_solution_exe = compile_correct_solution(get_correct_solution(self.task_id), self.args, + correct_solution_exe = compile_correct_solution(package_util.get_correct_solution().path, self.args, self.args.compile_mode) outgen.args = self.args outgen.correct_solution_exe = correct_solution_exe @@ -86,12 +87,12 @@ def get_generated_tests(self): Returns list of generated tests. Executes ingen to check what tests are generated. """ - if not ingen_exists(self.task_id): + if not ingen_exists(): return [] in_dir = paths.get_cache_path('export', 'tests', 'in') tests = glob.glob(os.path.join(in_dir, f'{self.task_id}*.in')) - return [package_util.extract_test_id(test, self.task_id) for test in tests] + return [SIO3Package().get_test_id_from_filename(os.path.basename(test)) for test in tests] def create_ocen(self, target_dir: str): """ @@ -172,7 +173,7 @@ def copy_package_required_files(self, target_dir: str): tests_to_copy = [] for ext in ['in', 'out']: for test in glob.glob(os.path.join(os.getcwd(), ext, f'{self.task_id}*.{ext}')): - if package_util.extract_test_id(test, self.task_id) not in generated_tests: + if SIO3Package().get_test_id_from_filename(os.path.basename(test)) not in generated_tests: tests_to_copy.append((ext, test)) cache_test_dir = paths.get_cache_path('export', 'tests') @@ -269,7 +270,6 @@ def run(self, args: argparse.Namespace): self.task_id = package_util.get_task_id() self.export_name = self.task_id self.task_type_cls = package_util.get_task_type_cls() - package_util.validate_test_names(self.task_id) try: self.contest = contest_types.get_contest_type() except UnknownContestType as e: diff --git a/src/sinol_make/commands/ingen/__init__.py b/src/sinol_make/commands/ingen/__init__.py index 6f45acaa..1b0b0ce6 100644 --- a/src/sinol_make/commands/ingen/__init__.py +++ b/src/sinol_make/commands/ingen/__init__.py @@ -3,7 +3,7 @@ import os from sinol_make import util -from sinol_make.commands.ingen.ingen_util import get_ingen, compile_ingen, run_ingen +from sinol_make.commands.ingen.ingen_util import get_ingen_path, compile_ingen, run_ingen from sinol_make.helpers import parsers, package_util, paths from sinol_make.interfaces.BaseCommand import BaseCommand @@ -65,9 +65,9 @@ def run(self, args: argparse.Namespace): self.task_id = package_util.get_task_id() util.change_stack_size_to_unlimited() - self.ingen = get_ingen(self.task_id, args.ingen_path) - print(f'Using ingen file {os.path.basename(self.ingen)}') - self.ingen_exe = compile_ingen(self.ingen, self.args, self.args.compile_mode, self.args.fsanitize) + self.ingen_path = get_ingen_path(args.ingen_path) + print(f'Using ingen file {os.path.basename(self.ingen_path)}') + self.ingen_exe = compile_ingen(self.ingen_path, self.args, self.args.compile_mode, self.args.fsanitize) previous_tests = [] try: @@ -86,10 +86,11 @@ def run(self, args: argparse.Namespace): util.exit_with_error('Failed to generate input files.') self.delete_dangling_files(dates) + package_util.reload_tests() with open(paths.get_cache_path("input_tests"), "w") as f: - f.write("\n".join(glob.glob(os.path.join(os.getcwd(), "in", f"{self.task_id}*.in")))) + f.write("\n".join(glob.glob(os.path.join(os.getcwd(), "in", f"{self.task_id}*.in")))) # TODO: refactor if not self.args.no_validate: - tests = sorted(glob.glob(os.path.join(os.getcwd(), "in", f"{self.task_id}*.in"))) + tests = sorted(glob.glob(os.path.join(os.getcwd(), "in", f"{self.task_id}*.in"))) # TODO: refactor package_util.validate_tests(tests, self.args.cpus, 'input') diff --git a/src/sinol_make/commands/ingen/ingen_util.py b/src/sinol_make/commands/ingen/ingen_util.py index 2c6eecb5..496d64ad 100644 --- a/src/sinol_make/commands/ingen/ingen_util.py +++ b/src/sinol_make/commands/ingen/ingen_util.py @@ -8,43 +8,43 @@ from sinol_make.helpers import package_util, compiler, compile -def ingen_exists(task_id): +def ingen_exists(): """ Checks if ingen source file exists. - :param task_id: task id, for example abc :return: True if exists, False otherwise """ - return package_util.any_files_matching_pattern(task_id, f'{task_id}ingen.*') + task_id = package_util.get_task_id() + return package_util.any_files_matching_pattern(f'{task_id}ingen.*') -def get_ingen(task_id, ingen_path=None): +def get_ingen_path(ingen_path=None) -> str: """ Find ingen source file in `prog/` directory. If `ingen_path` is specified, then it will be used (if exists). - :param task_id: task id, for example abc. :param ingen_path: path to ingen source file :return: path to ingen source file or None if not found """ + task_id = package_util.get_task_id() if ingen_path is not None: if os.path.exists(ingen_path): return ingen_path else: util.exit_with_error(f'Ingen source file {ingen_path} does not exist.') - ingen = package_util.get_files_matching_pattern(task_id, f'{task_id}ingen.*') + ingen = package_util.get_files_matching_pattern(f'{task_id}ingen.*') if len(ingen) == 0: util.exit_with_error(f'Ingen source file for task {task_id} does not exist.') # Sio2 first chooses shell scripts, then non-shell source codes. correct_ingen = None for i in ingen: - if os.path.splitext(i)[1] == '.sh': + if os.path.splitext(i.path)[1] == '.sh': correct_ingen = i break if correct_ingen is None: correct_ingen = ingen[0] - return correct_ingen + return correct_ingen.path def compile_ingen(ingen_path: str, args: argparse.Namespace, compilation_flags='default', use_fsanitize=False): diff --git a/src/sinol_make/commands/inwer/__init__.py b/src/sinol_make/commands/inwer/__init__.py index a050a759..0d170ae8 100644 --- a/src/sinol_make/commands/inwer/__init__.py +++ b/src/sinol_make/commands/inwer/__init__.py @@ -8,6 +8,8 @@ from functools import cmp_to_key from typing import Dict, List +from sio3pack.test import Test + from sinol_make import util, contest_types from sinol_make.structs.inwer_structs import TestResult, InwerExecution, VerificationResult, TableData from sinol_make.helpers import package_util, printer, paths, parsers @@ -49,37 +51,37 @@ def verify_test(execution: InwerExecution) -> VerificationResult: """ Verifies a test and returns the result of inwer on this test. """ - output_dir = paths.get_executables_path(execution.test_name) + output_dir = paths.get_executables_path(execution.test.test_name) os.makedirs(output_dir, exist_ok=True) - command = [execution.inwer_exe_path, os.path.basename(execution.test_path)] - with open(execution.test_path, 'r') as test: + command = [execution.inwer_exe_path, execution.test.in_file.filename] + with open(execution.test.in_file.path, 'r') as test: process = subprocess.Popen(command, stdin=test, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) process.wait() exit_code = process.returncode out, _ = process.communicate() return VerificationResult( - execution.test_path, + execution.test, exit_code == 0, out.decode('utf-8') ) - def verify_and_print_table(self) -> Dict[str, TestResult]: + def verify_and_print_table(self) -> Dict[Test, TestResult]: """ Verifies all tests and prints the results in a table. :return: dictionary of TestResult objects """ results = {} - sorted_tests = sorted(self.tests, key=lambda test: package_util.get_group(test, self.task_id)) + sorted_tests = sorted(self.tests, key=lambda test: test.group) executions: List[InwerExecution] = [] for test in sorted_tests: - results[test] = TestResult(test, self.task_id) - executions.append(InwerExecution(test, results[test].test_name, self.inwer_executable)) + results[test.test_name] = TestResult(test) + executions.append(InwerExecution(test, self.inwer_executable)) has_terminal, terminal_width, terminal_height = util.get_terminal_size() - table_data = TableData(results, 0, self.task_id) + table_data = TableData(results, 0) if has_terminal: run_event = threading.Event() run_event.set() @@ -91,7 +93,7 @@ def verify_and_print_table(self) -> Dict[str, TestResult]: try: with mp.Pool(self.cpus) as pool: for i, result in enumerate(pool.imap(self.verify_test, executions)): - table_data.results[result.test_path].set_results(result.valid, result.output) + table_data.results[result.test.test_name].set_results(result.valid, result.output) table_data.i = i if util.has_sanitizer_error(result.output, 0 if result.valid else 1): sanitizer_error = True @@ -117,21 +119,17 @@ def verify_tests_order(self): """ Verifies if tests are in correct order. """ - def get_id(test, func=str.isalpha): - basename = os.path.basename(os.path.splitext(test)[0]) - return "".join(filter(func, basename[len(self.task_id):])) - ocen = sorted([test for test in self.tests if test.endswith('ocen.in')], - key=lambda test: int("".join(filter(str.isdigit, get_id(test, str.isdigit))))) + ocen = sorted([test for test in self.tests if test.in_file.path.endswith('ocen.in')], + key=lambda test: int(test.group)) tests = list(set(self.tests) - set(ocen)) last_id = None last_test = None for test in ocen: - basename = os.path.basename(os.path.splitext(test)[0]) - test_id = int("".join(filter(str.isdigit, basename))) + test_id = int("".join(filter(str.isdigit, test.test_id))) if last_id is not None and test_id != last_id + 1: - util.exit_with_error(f'Test {os.path.basename(test)} is in wrong order. ' - f'Last test was {os.path.basename(last_test)}.') + util.exit_with_error(f'Test {test.test_id} is in wrong order. ' + f'Last test was {last_test.test_id}.') last_id = test_id last_test = test @@ -152,9 +150,9 @@ def is_next(last, curr): last = 'a' + last return last == curr - def compare_id(test1, test2): - id1 = get_id(test1) - id2 = get_id(test2) + def compare_id(test1: Test, test2: Test): + id1 = test1.test_id + id2 = test2.test_id if id1 == id2: return 0 if len(id1) == len(id2): @@ -166,17 +164,17 @@ def compare_id(test1, test2): return 1 groups = {} - for group in package_util.get_groups(self.tests, self.task_id): - groups[group] = sorted([test for test in tests if package_util.get_group(test, self.task_id) == group], + for group in package_util.get_groups(self.tests): + groups[group] = sorted([test for test in tests if int(test.group) == group], key=cmp_to_key(compare_id)) for group, group_tests in groups.items(): last_id = None last_test = None for test in group_tests: - test_id = get_id(test) + test_id = "".join([c for c in test.test_id if not c.isdigit()]) if last_id is not None and not is_next(last_id, test_id): - util.exit_with_error(f'Test {os.path.basename(test)} is in wrong order. ' - f'Last test was {os.path.basename(last_test)}.') + util.exit_with_error(f'Test {test.test_id} is in wrong order. ' + f'Last test was {last_test.test_id}.') last_id = test_id last_test = test @@ -184,8 +182,7 @@ def run(self, args: argparse.Namespace): args = util.init_package_command(args) self.task_id = package_util.get_task_id() - package_util.validate_test_names(self.task_id) - self.inwer = inwer_util.get_inwer_path(self.task_id, args.inwer_path) + self.inwer = inwer_util.get_inwer_path(args.inwer_path) if self.inwer is None: if args.inwer_path is None: util.exit_with_error('No inwer found in `prog/` directory.') @@ -195,23 +192,23 @@ def run(self, args: argparse.Namespace): print(f'Verifying with inwer {util.bold(relative_path)}') self.cpus = args.cpus or util.default_cpu_count() - self.tests = package_util.get_tests(self.task_id, args.tests) + self.tests = package_util.get_tests(args.tests) self.contest_type = contest_types.get_contest_type() if len(self.tests) == 0: util.exit_with_error('No tests found.') else: - print('Verifying tests: ' + util.bold(', '.join(self.tests))) + print('Verifying tests: ' + util.bold(', '.join([test.in_file.filename for test in self.tests]))) util.change_stack_size_to_unlimited() self.inwer_executable = inwer_util.compile_inwer(self.inwer, args, args.compile_mode, args.fsanitize) - results: Dict[str, TestResult] = self.verify_and_print_table() + results: Dict[Test, TestResult] = self.verify_and_print_table() print('') failed_tests = [] for result in results.values(): if not result.valid: - failed_tests.append(result.test_name) + failed_tests.append(result.test.in_file.filename) if len(failed_tests) > 0: util.exit_with_error(f'Verification failed for tests: {", ".join(failed_tests)}') diff --git a/src/sinol_make/commands/inwer/inwer_util.py b/src/sinol_make/commands/inwer/inwer_util.py index b8cc31cd..203a1e11 100644 --- a/src/sinol_make/commands/inwer/inwer_util.py +++ b/src/sinol_make/commands/inwer/inwer_util.py @@ -2,10 +2,12 @@ import os import sys from io import StringIO -from typing import Union +from typing import Union, List import argparse +from sio3pack.test import Test + from sinol_make import util from sinol_make.commands.inwer import TestResult, TableData from sinol_make.helpers import compile, package_util @@ -13,15 +15,16 @@ from sinol_make.interfaces.Errors import CompilationError -def get_inwer_path(task_id: str, path=None) -> Union[str, None]: +def get_inwer_path(path=None) -> Union[str, None]: """ Returns path to inwer executable for given task or None if no inwer was found. """ + task_id = package_util.get_task_id() if path is None: - inwers = package_util.get_files_matching_pattern(task_id, f'{task_id}inwer.*') + inwers = package_util.get_files_matching_pattern(f'{task_id}inwer.*') if len(inwers) == 0: return None - return inwers[0] + return inwers[0].path else: inwer = os.path.join(os.getcwd(), path) if os.path.exists(inwer): @@ -46,9 +49,9 @@ def compile_inwer(inwer_path: str, args: argparse.Namespace, compilation_flags=' return inwer_exe -def sort_tests(tests, task_id): +def sort_tests(tests: List[Test]) -> List[Test]: # First sort by group, then by test name. - tests.sort(key=lambda test: [package_util.get_group(test, task_id), test]) + tests.sort(key=lambda test: [int(test.group), test.test_id]) return tests @@ -65,10 +68,10 @@ def print_view(term_width, term_height, table_data: TableData): column_lengths = [0, len('Group') + 1, len('Status') + 1, 0] tests = [] for result in results.values(): - column_lengths[0] = max(column_lengths[0], len(result.test_name)) - column_lengths[1] = max(column_lengths[1], len(result.test_group)) - tests.append(result.test_path) - tests = sort_tests(tests, table_data.task_id) + column_lengths[0] = max(column_lengths[0], len(result.test.test_name)) + column_lengths[1] = max(column_lengths[1], len(result.test.group)) + tests.append(result.test) + tests = sort_tests(tests) column_lengths[3] = max(10, term_width - column_lengths[0] - column_lengths[1] - column_lengths[ 2] - 9 - 3) # 9 is for " | " between columns, 3 for margin. @@ -87,13 +90,13 @@ def print_line_separator(): print_line_separator() last_group = None - for test_path in tests: - result = results[test_path] - if last_group is not None and last_group != result.test_group: + for test in tests: + result = results[test.test_name] + if last_group is not None and last_group != result.test.group: print_line_separator() - last_group = result.test_group - print(margin + result.test_name.ljust(column_lengths[0]) + " | ", end='') - print(result.test_group.ljust(column_lengths[1] - 1) + " | ", end='') + last_group = result.test.group + print(margin + test.test_name.ljust(column_lengths[0]) + " | ", end='') + print(result.test.group.ljust(column_lengths[1] - 1) + " | ", end='') if result.verified: if result.valid: diff --git a/src/sinol_make/commands/outgen/__init__.py b/src/sinol_make/commands/outgen/__init__.py index fc71abf8..337017dc 100644 --- a/src/sinol_make/commands/outgen/__init__.py +++ b/src/sinol_make/commands/outgen/__init__.py @@ -1,12 +1,16 @@ import argparse import glob import os +from typing import List + import yaml import multiprocessing as mp +from sio3pack.test import Test + from sinol_make import util -from sinol_make.commands.outgen.outgen_util import get_correct_solution, compile_correct_solution, generate_output +from sinol_make.commands.outgen.outgen_util import compile_correct_solution, generate_output from sinol_make.structs.gen_structs import OutputGenerationArguments from sinol_make.helpers import parsers, package_util, cache, paths from sinol_make.interfaces.BaseCommand import BaseCommand @@ -62,7 +66,7 @@ def calculate_md5_sums(self, tests=None): list of input tests based on which the output tests will be generated) """ if tests is None: - tests = glob.glob(os.path.join(os.getcwd(), 'in', '*.in')) + tests = package_util.get_tests() old_md5_sums = None try: @@ -76,27 +80,27 @@ def calculate_md5_sums(self, tests=None): md5_sums = {} outputs_to_generate = [] from_inputs = [] - for file in tests: - basename = os.path.basename(file) + for test in tests: + basename = os.path.basename(test.in_file.path) output_basename = os.path.splitext(os.path.basename(basename))[0] + '.out' output_path = os.path.join(os.getcwd(), 'out', output_basename) - md5_sums[basename] = util.get_file_md5(file) + md5_sums[basename] = util.get_file_md5(test.in_file.path) if old_md5_sums is None or old_md5_sums.get(basename, '') != md5_sums[basename]: outputs_to_generate.append(output_path) - from_inputs.append(file) + from_inputs.append(test) elif not os.path.exists(output_path): # If output file does not exist, generate it. outputs_to_generate.append(output_path) - from_inputs.append(file) + from_inputs.append(test) return md5_sums, outputs_to_generate, from_inputs - def clean_cache(self, inputs): + def clean_cache(self, tests: List[Test]): """ Cleans cache for the given input files. """ - md5_sums = [util.get_file_md5(file) for file in inputs] + md5_sums = [util.get_file_md5(file.in_file.path) for file in tests] for solution in glob.glob(paths.get_cache_path("md5sums", "*")): sol_cache = cache.get_cache_file(solution) for input in md5_sums: @@ -112,19 +116,19 @@ def run(self, args: argparse.Namespace): self.task_type = package_util.get_task_type_cls() if not self.task_type.run_outgen(): util.exit_with_error('Output generation is not supported for this task type.') - package_util.validate_test_names(self.task_id) util.change_stack_size_to_unlimited() - cache.check_correct_solution(self.task_id) - self.correct_solution = get_correct_solution(self.task_id) + cache.check_correct_solution() + self.correct_solution = package_util.get_correct_solution() md5_sums, outputs_to_generate, from_inputs = self.calculate_md5_sums() if len(outputs_to_generate) == 0: print(util.info('All output files are up to date.')) else: self.clean_cache(from_inputs) - self.correct_solution_exe = compile_correct_solution(self.correct_solution, self.args, + self.correct_solution_exe = compile_correct_solution(self.correct_solution.path, self.args, self.args.compile_mode) self.generate_outputs(outputs_to_generate) + package_util.reload_tests() with open(os.path.join(os.getcwd(), 'in', '.md5sums'), 'w') as f: yaml.dump(md5_sums, f) diff --git a/src/sinol_make/commands/outgen/outgen_util.py b/src/sinol_make/commands/outgen/outgen_util.py index 8557345b..a89543dc 100644 --- a/src/sinol_make/commands/outgen/outgen_util.py +++ b/src/sinol_make/commands/outgen/outgen_util.py @@ -9,18 +9,6 @@ from sinol_make.helpers import package_util, compiler, compile -def get_correct_solution(task_id): - """ - Returns path to correct solution for given task. - :param task_id: task id, for example abc - :return: path to correct solution or None if not found - """ - correct_solution = package_util.get_files_matching_pattern(task_id, f'{task_id}.*') - if len(correct_solution) == 0: - util.exit_with_error(f'Correct solution for task {task_id} does not exist.') - return correct_solution[0] - - def compile_correct_solution(solution_path: str, args: argparse.Namespace, compilation_flags='default'): """ Compiles correct solution and returns path to compiled executable. diff --git a/src/sinol_make/commands/run/__init__.py b/src/sinol_make/commands/run/__init__.py index 53f38bec..0e51e913 100644 --- a/src/sinol_make/commands/run/__init__.py +++ b/src/sinol_make/commands/run/__init__.py @@ -13,7 +13,12 @@ from io import StringIO from typing import Dict +from sio3pack.files import LocalFile +from sio3pack.test import Test + from sinol_make import contest_types, util, sio2jail +from sinol_make.commands.inwer.inwer_util import sort_tests +from sinol_make.sio3pack.package import SIO3Package from sinol_make.structs.run_structs import ExecutionData, PrintData from sinol_make.structs.cache_structs import CacheTest, CacheFile from sinol_make.interfaces.BaseCommand import BaseCommand @@ -88,10 +93,10 @@ def print_view(term_width, term_height, task_id, program_groups_scores, all_resu program_memory = collections.defaultdict(lambda: (-1, 0)) time_sum = 0 - for solution in names: - lang = package_util.get_file_lang(solution) + for name in names: + lang = package_util.get_file_lang(name) for test in tests: - time_sum += package_util.get_time_limit(test, config, lang, task_id, args) + time_sum += package_util.get_time_limit(test, lang, args) time_remaining = (len(executions) - print_data.i - 1) * 2 * time_sum / cpus / 1000.0 title = 'Done %4d/%4d. Time remaining (in the worst case): %5d seconds.' \ @@ -151,18 +156,16 @@ def print_table_end(): status = results[test].Status if results[test].Time is not None: if program_times[program][0] < results[test].Time: - program_times[program] = (results[test].Time, package_util.get_time_limit(test, config, - lang, task_id, args)) + program_times[program] = (results[test].Time, package_util.get_time_limit(test, lang, args)) elif status == Status.TL: - program_times[program] = (2 * package_util.get_time_limit(test, config, lang, task_id, args), - package_util.get_time_limit(test, config, lang, task_id, args)) + program_times[program] = (2 * package_util.get_time_limit(test, lang, args), + package_util.get_time_limit(test, lang, args)) if results[test].Memory is not None: if program_memory[program][0] < results[test].Memory: - program_memory[program] = (results[test].Memory, package_util.get_memory_limit(test, config, - lang, task_id, args)) + program_memory[program] = (results[test].Memory, package_util.get_memory_limit(test, lang, args)) elif status == Status.ML: - program_memory[program] = (2 * package_util.get_memory_limit(test, config, lang, task_id, args), - package_util.get_memory_limit(test, config, lang, task_id, args)) + program_memory[program] = (2 * package_util.get_memory_limit(test, lang, args), + package_util.get_memory_limit(test, lang, args)) if status == Status.PENDING: group_status = Status.PENDING else: @@ -220,34 +223,34 @@ def print_group_seperator(): last_group = None for test in tests: - group = package_util.get_group(test, task_id) + group = int(test.group) if last_group != group: if last_group is not None: print_group_seperator() last_group = group - print(margin + "%6s" % package_util.extract_test_id(test, task_id), end=" | ") + print(margin + "%6s" % test.test_id, end=" | ") for program in program_group: lang = package_util.get_file_lang(program) - result = all_results[program][package_util.get_group(test, task_id)][test] + result = all_results[program][int(test.group)][test] status = result.Status if status == Status.PENDING: print(13 * ' ', end=" | ") else: print("%3s" % colorize_status(status), - ("%20s" % color_time(result.Time, package_util.get_time_limit(test, config, lang, task_id, args))) + ("%20s" % color_time(result.Time, package_util.get_time_limit(test, lang, args))) if result.Time is not None else 10*" ", end=" | ") print() if not hide_memory: print(8*" ", end=" | ") for program in program_group: lang = package_util.get_file_lang(program) - result = all_results[program][package_util.get_group(test, task_id)][test] + result = all_results[program][int(test.group)][test] if result.Status != Status.PENDING: print(colorize_points(int(result.Points), contest.min_score_per_test(), contest.max_score_per_test()).ljust(13), end="") else: print(3*" ", end="") - print(("%20s" % color_memory(result.Memory, package_util.get_memory_limit(test, config, lang, task_id, args))) + print(("%20s" % color_memory(result.Memory, package_util.get_memory_limit(test, lang, args))) if result.Memory is not None else 10*" ", end=" | ") print() @@ -304,12 +307,6 @@ def configure_subparser(self, subparser): parsers.add_compilation_arguments(parser) return parser - def extract_file_name(self, file_path): - return os.path.split(file_path)[1] - - def get_group(self, test_path): - return package_util.get_group(test_path, self.ID) - def get_solution_from_exe(self, executable): file = os.path.splitext(executable)[0] for ext in self.SOURCE_EXTENSIONS: @@ -323,24 +320,21 @@ def get_possible_score(self, groups): possible_score += self.scores[group] return possible_score - def get_groups(self, tests): - return sorted(list(set([self.get_group(test) for test in tests]))) - def compile_solutions(self, solutions): print("Compiling %d solutions..." % len(solutions)) - args = [(solution, None, True, False, None) for solution in solutions] + args = [(solution.filename, None, True, False, None) for solution in solutions] with mp.Pool(self.cpus) as pool: compilation_results = pool.starmap(self.compile, args) return compilation_results def compile(self, solution, dest=None, use_extras=False, clear_cache=False, name=None): - compile_log_file = paths.get_compilation_log_path("%s.compile_log" % package_util.get_file_name(solution)) + compile_log_file = paths.get_compilation_log_path("%s.compile_log" % os.path.basename(solution)) source_file = os.path.join(os.getcwd(), "prog", self.get_solution_from_exe(solution)) if dest: output = dest else: output = paths.get_executables_path(package_util.get_executable(solution)) - name = name or "file " + package_util.get_file_name(solution) + name = name or "file " + os.path.basename(solution) extra_compilation_args = [] extra_compilation_files = [] @@ -375,14 +369,15 @@ def run_solution(self, data_for_execution: ExecutionData): Run an execution and return the result as ExecutionResult object. """ - (name, executable, test, time_limit, memory_limit, timetool_path, execution_dir) = data_for_execution - file_no_ext = paths.get_executions_path(name, package_util.extract_test_id(test, self.ID)) + (file, executable, test, time_limit, memory_limit, timetool_path, execution_dir) = data_for_execution + name = file.filename + file_no_ext = paths.get_executions_path(name, test.test_id) output_file = file_no_ext + ".out" result_file = file_no_ext + ".res" hard_time_limit = math.ceil(2 * time_limit / 1000.0) - return self.task_type.run(time_limit, hard_time_limit, memory_limit, test, output_file, - package_util.get_out_from_in(test), result_file, executable, execution_dir) + return self.task_type.run(time_limit, hard_time_limit, memory_limit, test.in_file.path, output_file, + getattr(test.out_file, "path", ""), result_file, executable, execution_dir) def run_solutions(self, compiled_commands, names, solutions, executables_dir): """ @@ -401,35 +396,37 @@ def run_solutions(self, compiled_commands, names, solutions, executables_dir): for file in glob.glob(os.path.join(os.getcwd(), "prog", f"_{self.ID}lib.so")): shutil.copy(file, executables_dir) - for (name, executable, result) in compiled_commands: + for (file, executable, result) in compiled_commands: + name = file.filename lang = package_util.get_file_lang(name) solution_cache = cache.get_cache_file(os.path.join(os.getcwd(), "prog", name)) all_cache_files[name] = solution_cache if result: for test in self.tests: - test_time_limit = package_util.get_time_limit(test, self.config, lang, self.ID, self.args) - test_memory_limit = package_util.get_memory_limit(test, self.config, lang, self.ID, self.args) + test_time_limit = package_util.get_time_limit(test, lang, self.args) + test_memory_limit = package_util.get_memory_limit(test, lang, self.args) - test_result: CacheTest = solution_cache.tests.get(self.test_md5sums[os.path.basename(test)], None) + test_result: CacheTest = solution_cache.tests.get(self.test_md5sums[test.in_file.filename], None) if test_result is not None and test_result.time_limit == test_time_limit and \ test_result.memory_limit == test_memory_limit and \ test_result.time_tool == self.timetool_name: - all_results[name][self.get_group(test)][test] = test_result.result + all_results[name][int(test.group)][test] = test_result.result else: - executions.append((name, executable, test, test_time_limit, test_memory_limit, + executions.append((file, executable, test, test_time_limit, test_memory_limit, self.timetool_path, os.path.dirname(executable))) - all_results[name][self.get_group(test)][test] = ExecutionResult(Status.PENDING) + all_results[name][int(test.group)][test] = ExecutionResult(Status.PENDING) os.makedirs(paths.get_executions_path(name), exist_ok=True) else: for test in self.tests: - all_results[name][self.get_group(test)][test] = ExecutionResult(Status.CE) + all_results[name][int(test.group)][test] = ExecutionResult(Status.CE) print() - executions.sort(key = lambda x: (package_util.get_executable_key(x[1], self.ID), x[2])) + executions.sort(key = lambda x: (package_util.get_executable_key(x[1]), x[2].test_name)) program_groups_scores = collections.defaultdict(dict) print_data = PrintData(0) has_terminal, terminal_width, terminal_height = util.get_terminal_size() + # has_terminal = False if has_terminal: run_event = threading.Event() @@ -443,18 +440,22 @@ def run_solutions(self, compiled_commands, names, solutions, executables_dir): pool = mp.Pool(self.cpus) keyboard_interrupt = False try: + print(executions) for i, result in enumerate(pool.imap(self.run_solution, executions)): - (name, executable, test, time_limit, memory_limit) = executions[i][:5] + (file, executable, test, time_limit, memory_limit) = executions[i][:5] + name = file.filename contest_points = self.contest.get_test_score(result, time_limit, memory_limit) result.Points = contest_points - all_results[name][self.get_group(test)][test] = result + all_results[name][int(test.group)][test] = result print_data.i = i + print(name, test) + # We store the result in dictionary to write it to cache files later. - lang = package_util.get_file_lang(name) - test_time_limit = package_util.get_time_limit(test, self.config, lang, self.ID, self.args) - test_memory_limit = package_util.get_memory_limit(test, self.config, lang, self.ID, self.args) - all_cache_files[name].tests[self.test_md5sums[os.path.basename(test)]] = CacheTest( + lang = package_util.get_file_lang(file.path) + test_time_limit = package_util.get_time_limit(test, lang, self.args) + test_memory_limit = package_util.get_memory_limit(test, lang, self.args) + all_cache_files[name].tests[self.test_md5sums[test.in_file.filename]] = CacheTest( time_limit=test_time_limit, memory_limit=test_memory_limit, time_tool=self.timetool_name, @@ -474,8 +475,8 @@ def run_solutions(self, compiled_commands, names, solutions, executables_dir): self.cpus, self.args.hide_memory, self.config, self.contest, self.args)[0])) # Write cache files. - for solution, cache_data in all_cache_files.items(): - cache_data.save(os.path.join(os.getcwd(), "prog", solution)) + for name, cache_data in all_cache_files.items(): + cache_data.save(os.path.join(os.getcwd(), "prog", name)) if keyboard_interrupt: util.exit_with_error("Stopped due to keyboard interrupt.") @@ -487,9 +488,9 @@ def compile_and_run(self, solutions): for i in range(len(solutions)): if not compilation_results[i]: self.failed_compilations.append(solutions[i]) - executables = [paths.get_executables_path(package_util.get_executable(solution)) for solution in solutions] + executables = [paths.get_executables_path(package_util.get_executable(solution.filename)) for solution in solutions] compiled_commands = zip(solutions, executables, compilation_results) - names = solutions + names = [solution.filename for solution in solutions] return self.run_solutions(compiled_commands, names, solutions, paths.get_executables_path()) def convert_status_to_string(self, dictionary): @@ -512,15 +513,15 @@ def get_whole_groups(self): Returns a list of groups for which all tests were run. """ group_sizes = {} - for test in package_util.get_tests(self.ID): - group = package_util.get_group(test, self.ID) + for test in package_util.get_tests(): + group = int(test.group) if group not in group_sizes: group_sizes[group] = 0 group_sizes[group] += 1 run_group_sizes = {} for test in self.tests: - group = package_util.get_group(test, self.ID) + group = int(test.group) if group not in run_group_sizes: run_group_sizes[group] = 0 run_group_sizes[group] += 1 @@ -757,7 +758,7 @@ def set_constants(self): self.SOLUTIONS_RE = package_util.get_solutions_re(self.ID) def validate_arguments(self, args): - compilers = compiler.verify_compilers(args, package_util.get_solutions(self.ID, None)) + compilers = compiler.verify_compilers(args, [solution.path for solution in package_util.get_solutions()]) def use_sio2jail(): timetool_path = None @@ -816,7 +817,7 @@ def exit(self): cnt=len(self.failed_compilations), letter='' if len(self.failed_compilations) == 1 else 's')) def set_scores(self): - self.groups = self.get_groups(self.tests) + self.groups = package_util.get_groups(self.tests) self.scores = collections.defaultdict(int) if 'scores' not in self.config.keys(): @@ -833,35 +834,33 @@ def set_scores(self): self.possible_score = self.contest.get_possible_score(self.groups, self.scores) - def get_valid_input_files(self): + def get_valid_tests(self): """ Returns list of input files that have corresponding output file. """ - output_tests = glob.glob(os.path.join(os.getcwd(), "out", "*.out")) - output_tests_ids = [package_util.extract_test_id(test, self.ID) for test in output_tests] - valid_input_files = [] - for test in self.tests: - if package_util.extract_test_id(test, self.ID) in output_tests_ids: - valid_input_files.append(test) - return valid_input_files + valid_tests = [] + for test in SIO3Package().get_tests_with_inputs(self.tests): + if test.in_file and test.out_file: + valid_tests.append(test) + return valid_tests def validate_existence_of_outputs(self): """ Checks if all input files have corresponding output files. """ - valid_input_files = self.get_valid_input_files() + valid_input_files = self.get_valid_tests() if len(valid_input_files) != len(self.tests): missing_tests = list(set(self.tests) - set(valid_input_files)) - missing_tests.sort() + missing_tests = sort_tests(missing_tests) print(util.warning('Missing output files for tests: ' + ', '.join( - [self.extract_file_name(test) for test in missing_tests]))) + [(test.in_file or test.out_file).filename for test in missing_tests]))) if not self.args.allow_no_outputs: util.exit_with_error('There are tests without outputs. \n' 'Run outgen to fix this issue or add the --no-outputs flag to ignore the issue.') print(util.warning('Running only on tests with output files.')) self.tests = valid_input_files - self.groups = self.get_groups(self.tests) + self.groups = package_util.get_groups(self.tests) if len(self.groups) < 1: util.exit_with_error('No tests with valid outputs.') @@ -871,9 +870,9 @@ def check_are_any_tests_to_run(self): if all input files have corresponding output files. """ if len(self.tests) > 0: - print(util.bold('Tests that will be run:'), ' '.join([self.extract_file_name(test) for test in self.tests])) + print(util.bold('Tests that will be run:'), ' '.join([test.test_name for test in self.tests])) - example_tests = [test for test in self.tests if self.get_group(test) == 0] + example_tests = [test for test in self.tests if int(test.group) == 0] if len(example_tests) == len(self.tests): print(util.warning('Running only on example tests.')) @@ -882,7 +881,7 @@ def check_are_any_tests_to_run(self): else: util.exit_with_error('There are no tests to run.') - def check_errors(self, results: Dict[str, Dict[str, Dict[str, ExecutionResult]]]): + def check_errors(self, results: Dict[LocalFile, Dict[str, Dict[Test, ExecutionResult]]]): """ Checks if there were any errors during execution and exits if there were. :param results: Dictionary of results. @@ -894,7 +893,7 @@ def check_errors(self, results: Dict[str, Dict[str, Dict[str, ExecutionResult]]] for group in results[solution]: for test in results[solution][group]: if results[solution][group][test].Error is not None: - error_msg += (f'Solution {solution} had an error on test {test}: ' + error_msg += (f'Solution {solution} had an error on test {test.test_id}: ' f'{results[solution][group][test].Error}') if results[solution][group][test].Stderr != ['']: error_msg += f' Stderr:\n{results[solution][group][test].Stderr}' @@ -936,7 +935,6 @@ def run(self, args): args = util.init_package_command(args) self.set_constants() - package_util.validate_test_names(self.ID) self.args = args self.config = package_util.get_config() try: @@ -959,23 +957,24 @@ def run(self, args): self.set_task_type(self.timetool_name, self.timetool_path) self.compile_additional_files() - lib = package_util.get_files_matching_pattern(self.ID, f'{self.ID}lib.*') + lib = package_util.get_files_matching_pattern(f'{self.ID}lib.*') self.has_lib = len(lib) != 0 - self.tests = package_util.get_tests(self.ID, self.args.tests) - self.test_md5sums = {os.path.basename(test): util.get_file_md5(test) for test in self.tests} + self.tests = package_util.get_tests(self.args.tests) + self.test_md5sums = {os.path.basename(test.in_file.path): util.get_file_md5(test.in_file.path) for test in self.tests if test.in_file} self.check_are_any_tests_to_run() self.set_scores() self.failed_compilations = [] - solutions = package_util.get_solutions(self.ID, self.args.solutions) + solutions = package_util.get_solutions(self.args.solutions) util.change_stack_size_to_unlimited() for solution in solutions: - lang = package_util.get_file_lang(solution) + lang = package_util.get_file_lang(solution.path) for test in self.tests: # The functions will exit if the limits are not set - _ = package_util.get_time_limit(test, self.config, lang, self.ID, self.args) - _ = package_util.get_memory_limit(test, self.config, lang, self.ID, self.args) + _ = package_util.get_time_limit(test, lang, self.args) + _ = package_util.get_memory_limit(test, lang, self.args) + results, all_results = self.compile_and_run(solutions) self.check_errors(all_results) diff --git a/src/sinol_make/commands/verify/__init__.py b/src/sinol_make/commands/verify/__init__.py index 76c4b4cc..d84f979c 100644 --- a/src/sinol_make/commands/verify/__init__.py +++ b/src/sinol_make/commands/verify/__init__.py @@ -154,7 +154,7 @@ def run(self, args: argparse.Namespace): print(util.bold(' Generating tests '.center(util.get_terminal_size()[1], '='))) gen = GenCommand() gen.run(self.prepare_args(gen)) - self.verify_scores(package_util.get_groups(package_util.get_all_inputs(self.task_id), self.task_id)) + self.verify_scores(package_util.get_groups()) # Generate problem statements print(util.bold(' Generating problem statements '.center(util.get_terminal_size()[1], '='))) @@ -162,7 +162,7 @@ def run(self, args: argparse.Namespace): doc.run(self.prepare_args(doc)) # Run inwer - if inwer_util.get_inwer_path(self.task_id) is None: + if inwer_util.get_inwer_path() is None: print(util.warning("Package doesn't have inwer.")) else: print(util.bold(' Running inwer '.center(util.get_terminal_size()[1], '='))) diff --git a/src/sinol_make/contest_types/__init__.py b/src/sinol_make/contest_types/__init__.py index 8f3a72a6..75b43d02 100644 --- a/src/sinol_make/contest_types/__init__.py +++ b/src/sinol_make/contest_types/__init__.py @@ -2,14 +2,14 @@ from sinol_make.contest_types.icpc import ICPCContest from sinol_make.contest_types.oi import OIContest from sinol_make.contest_types.oij import OIJContest +from sinol_make.helpers import package_util from sinol_make.helpers.func_cache import cache_result -from sinol_make.helpers.package_util import get_config from sinol_make.interfaces.Errors import UnknownContestType @cache_result(cwd=True) def get_contest_type(): - config = get_config() + config = package_util.get_config() contest_type = config.get("sinol_contest_type", "default").lower() if contest_type == "default": diff --git a/src/sinol_make/contest_types/default.py b/src/sinol_make/contest_types/default.py index 35108449..efe935f5 100644 --- a/src/sinol_make/contest_types/default.py +++ b/src/sinol_make/contest_types/default.py @@ -109,7 +109,7 @@ def get_global_score(self, groups_scores: Dict[int, Dict], global_max_score) -> def verify_config(self): """ - Used for verifing contest specific config.yml settings + Used for verifying contest specific config.yml settings """ pass diff --git a/src/sinol_make/contest_types/oi.py b/src/sinol_make/contest_types/oi.py index e0520741..2fa7eeff 100644 --- a/src/sinol_make/contest_types/oi.py +++ b/src/sinol_make/contest_types/oi.py @@ -2,6 +2,7 @@ from sinol_make import util from sinol_make.helpers import package_util +from sinol_make.sio3pack.package import SIO3Package from sinol_make.structs.status_structs import ExecutionResult from sinol_make.contest_types.default import DefaultContest @@ -52,3 +53,20 @@ def allow_per_test_limits(self): def verify_tests_order(self): return True + + def verify_config(self): + """ + Checks if all tests in groups have the same time limits, unless + `sinol_undocumented_test_limits` is set to True in config. + """ + config = package_util.get_config() + if 'sinol_undocumented_test_limits' in config and config['sinol_undocumented_test_limits']: + return + + time_limits = {} + for test in SIO3Package().tests: + tl = SIO3Package().get_time_limit_for_test(test, "cpp") + if test.group in time_limits and time_limits[test.group] != tl: + util.exit_with_error(f"{test.test_name}: Specifying limit for a single test is not allowed for OI packages.") + if test.group not in time_limits: + time_limits[test.group] = tl diff --git a/src/sinol_make/helpers/cache.py b/src/sinol_make/helpers/cache.py index 5c3923eb..d6a2b385 100644 --- a/src/sinol_make/helpers/cache.py +++ b/src/sinol_make/helpers/cache.py @@ -163,17 +163,16 @@ def has_file_changed(file_path: str) -> bool: return True -def check_correct_solution(task_id: str): +def check_correct_solution(): """ Checks if correct solution has changed. If it did, removes cache for input files. - :param task_id: Task id """ try: - solution = package_util.get_correct_solution(task_id) + solution = package_util.get_correct_solution() except FileNotFoundError: return - if has_file_changed(solution) and os.path.exists(os.path.join(os.getcwd(), 'in', '.md5sums')): + if has_file_changed(solution.path) and os.path.exists(os.path.join(os.getcwd(), 'in', '.md5sums')): os.unlink(os.path.join(os.getcwd(), 'in', '.md5sums')) diff --git a/src/sinol_make/helpers/compile.py b/src/sinol_make/helpers/compile.py index 30f445ba..92284bad 100644 --- a/src/sinol_make/helpers/compile.py +++ b/src/sinol_make/helpers/compile.py @@ -114,6 +114,7 @@ def compile(program, output, compilers: Compilers = None, compile_log=None, comp if process.returncode != 0: raise CompilationError('Compilation failed') else: + print("Saving", program) save_compiled(program, output, compilation_flags, use_fsanitize, clear_cache) return True diff --git a/src/sinol_make/helpers/package_util.py b/src/sinol_make/helpers/package_util.py index f9d46d35..e6ffad62 100644 --- a/src/sinol_make/helpers/package_util.py +++ b/src/sinol_make/helpers/package_util.py @@ -1,66 +1,30 @@ import os import re -import yaml -import glob import fnmatch import multiprocessing as mp -from enum import Enum -from typing import List, Union, Dict, Any, Tuple, Type +from typing import List, Union, Tuple, Type -from sinol_make.helpers.func_cache import cache_result -from sinol_make import util, contest_types +from sio3pack.files import LocalFile +from sio3pack.test import Test + +from sinol_make.commands.inwer.inwer_util import sort_tests +from sinol_make import util from sinol_make.helpers import paths from sinol_make.task_type import BaseTaskType +from sinol_make.sio3pack.package import SIO3Package -@cache_result(cwd=True) def get_task_id() -> str: - config = get_config() - if "sinol_task_id" in config: - return config["sinol_task_id"] - else: - print(util.warning("sinol_task_id not specified in config.yml. Using task id from directory name.")) - task_id = os.path.split(os.getcwd())[-1] - if len(task_id) == 3: - return task_id - else: - util.exit_with_error("Invalid task id. Task id should be 3 characters long.") - - -def extract_test_id(test_path, task_id): - """ - Extracts test group and number from test path. - For example for test abc1a.in it returns 1a. - :param test_path: Path to test file. - :param task_id: Task id. - :return: Test group and number. - """ - return os.path.split(os.path.splitext(test_path)[0])[1][len(task_id):] + return SIO3Package().short_name - -def get_group(test_path, task_id): - if extract_test_id(test_path, task_id).endswith("ocen"): - return 0 - return int("".join(re.search(r'\d+', extract_test_id(test_path, task_id)).group())) - - -def get_groups(tests, task_id): - return sorted(list(set([get_group(test, task_id) for test in tests]))) - - -def get_test_key(test, task_id): - return get_group(test, task_id), test +def get_groups(tests: List[Test] = None): + if tests is None: + tests = SIO3Package().tests + return sorted(list(set([int(test.group) for test in tests]))) def get_config(): - try: - with open(os.path.join(os.getcwd(), "config.yml"), "r") as config_file: - return yaml.load(config_file, Loader=yaml.FullLoader) or {} - except FileNotFoundError: - # Potentially redundant with util:exit_if_not_package - util.exit_with_error("You are not in a package directory (couldn't find config.yml in current directory).") - except yaml.YAMLError as e: - util.exit_with_error("config.yml is not a valid YAML. Fix it before continuing:\n" + str(e)) + return SIO3Package().config def get_solutions_re(task_id: str) -> re.Pattern: @@ -71,9 +35,9 @@ def get_solutions_re(task_id: str) -> re.Pattern: return re.compile(r"^%s[bs]?[0-9]*(_.*)?\.(c|cpp|cc|py)$" % task_id) -def get_executable_key(executable, task_id): - name = get_file_name(executable) - task_id_len = len(task_id) +def get_executable_key(path_to_exe): + name = os.path.basename(path_to_exe) + task_id_len = len(get_task_id()) value = [0, 0] if name[task_id_len] == 's': value[0] = 1 @@ -94,181 +58,136 @@ def get_executable_key(executable, task_id): return tuple(value) -def get_files_matching(patterns: List[str], directory: str) -> List[str]: +def get_matching_tests(tests: List[Test], patterns: List[str]) -> List[Test]: """ - Returns list of files matching given patterns. - If pattern is absolute path, it is returned as is. - If pattern is relative path, it is searched in current directory and in directory specified as argument. + Returns list of tests matching given path patterns. + :param tests: List of all tests available. :param patterns: List of patterns to match. - :param directory: Directory to search in. - :return: List of files matching given patterns. - """ - files_matching = set() - for solution in patterns: - if os.path.isabs(solution): - files_matching.add(solution) - else: - # If solution already has `/` prefix: - files_matching.update(glob.glob(os.path.join(os.getcwd(), solution))) - # If solution does not have `/` prefix: - files_matching.update(glob.glob(os.path.join(os.getcwd(), directory, solution))) - - return list(files_matching) - - -def get_tests(task_id: str, arg_tests: Union[List[str], None] = None) -> List[str]: + :return: List of tests with paths matching given path patterns. + """ + matching_tests = set() + for pattern in patterns: + matched_to_pattern = set() + for test in tests: + print(test.in_file.path) + # if absolute path is given, match it directly + if os.path.isabs(pattern) and fnmatch.fnmatch(test.in_file.path, pattern): + matched_to_pattern.add(test) + else: + # if relative path is given, match it with current working directory + pattern_relative = os.path.join(os.getcwd(), pattern) + if fnmatch.fnmatch(test.in_file.path, pattern_relative): + matched_to_pattern.add(test) + else: + # if pattern is given, match it with tests filename + if fnmatch.fnmatch(os.path.basename(test.in_file.path), pattern): + matched_to_pattern.add(test) + if len(matched_to_pattern) == 0: + util.warning("Test %s does not exist" % pattern) + matching_tests.update(matched_to_pattern) + + return list(matching_tests) + +def get_matching_files(files: List[LocalFile], patterns: List[str]) -> List[LocalFile]: + """ + Returns list of files matching given path patterns. + :param files: List of all files available. + :param patterns: List of patterns to match. + :return: List of files with paths matching given path patterns. + """ + matching_files = set() + for pattern in patterns: + matched_to_pattern = set() + for file in files: + # if absolute path is given, match it directly + if os.path.isabs(pattern) and fnmatch.fnmatch(file.path, pattern): + matched_to_pattern.add(file) + else: + # if relative path is given, match it with current working directory + pattern_relative = os.path.join(os.getcwd(), pattern) + if fnmatch.fnmatch(file.path, pattern_relative): + matched_to_pattern.add(file) + else: + # if pattern is given, match it with filename + if fnmatch.fnmatch(os.path.basename(file.path), pattern): + matched_to_pattern.add(file) + if len(matched_to_pattern) == 0: + util.exit_with_error("File %s does not exist" % pattern) + matching_files.update(matched_to_pattern) + + return list(matching_files) + +def get_tests(arg_tests: Union[List[str], None] = None) -> List[Test]: #Zwracało iny """ Returns list of tests to run. - :param task_id: Task id. :param arg_tests: Tests specified in command line arguments. If None, all tests are returned. :return: List of tests to run. """ + tests = SIO3Package().tests if arg_tests is None: - all_tests = ["in/%s" % test for test in os.listdir("in/") - if test[-3:] == ".in"] - return sorted(all_tests, key=lambda test: get_test_key(test, task_id)) + return sort_tests(tests) else: - existing_tests = [] - for test in get_files_matching(arg_tests, "in"): - if not os.path.isfile(test): - util.exit_with_error("Test %s does not exist" % test) - if os.path.splitext(test)[1] == ".in": - existing_tests.append(os.path.join("in", os.path.basename(test))) - return sorted(existing_tests, key=lambda test: get_test_key(test, task_id)) + matching_tests = get_matching_tests(tests, arg_tests) + return sort_tests(matching_tests) -def get_solutions(task_id: str, args_solutions: Union[List[str], None] = None) -> List[str]: +def get_solutions(args_solutions: Union[List[str], None] = None) -> List[LocalFile]: """ Returns list of solutions to run. - :param task_id: Task id. :param args_solutions: Solutions specified in command line arguments. If None, all solutions are returned. - :return: List of solutions to run. + :return: List of paths of solutions to run. """ - solutions_re = get_solutions_re(task_id) + + solutions = [s.get('file') for s in SIO3Package().model_solutions] if args_solutions is None: - solutions = [solution for solution in os.listdir("prog/") - if solutions_re.match(solution)] - return sorted(solutions, key=lambda solution: get_executable_key(solution, task_id)) + return sorted(solutions, key=lambda solution: get_executable_key(solution.path)) else: - solutions = [] - for solution in get_files_matching(args_solutions, "prog"): - if not os.path.isfile(solution): - util.exit_with_error("Solution %s does not exist" % solution) - if solutions_re.match(os.path.basename(solution)) is not None: - solutions.append(os.path.basename(solution)) - - return sorted(solutions, key=lambda solution: get_executable_key(solution, task_id)) + matching_solutions = get_matching_files(solutions, args_solutions) + return sorted(matching_solutions, key=lambda solution: get_executable_key(solution.path)) -def get_correct_solution(task_id: str) -> str: +def get_correct_solution() -> LocalFile: """ Returns path to correct solution. - :param task_id: Task id. :return: Path to correct solution. """ - correct_solution = get_solutions(task_id, [f'{task_id}.*']) + task_id = get_task_id() + correct_solution = get_solutions([f'{task_id}.*']) if len(correct_solution) == 0: raise FileNotFoundError("Correct solution not found.") - return os.path.join(os.getcwd(), "prog", correct_solution[0]) - - -def get_file_name(file_path): - return os.path.split(file_path)[1] + return correct_solution[0] def get_file_name_without_extension(file_path): - return os.path.splitext(get_file_name(file_path))[0] + return os.path.splitext(os.path.basename(file_path))[0] def get_executable(file_path): return os.path.basename(file_path) + ".e" -def get_executable_path(solution: str) -> str: - """ - Returns path to compiled executable for given solution. - """ - return paths.get_executables_path(get_executable(solution)) - - def get_file_lang(file_path): return os.path.splitext(file_path)[1][1:].lower() -class LimitTypes(Enum): - TIME_LIMIT = 1 - MEMORY_LIMIT = 2 - - -def _get_limit_from_dict(dict: Dict[str, Any], limit_type: LimitTypes, test_id: str, test_group: str, test_path: str, - allow_test_limit: bool = False): - if limit_type == LimitTypes.TIME_LIMIT: - limit_name = "time_limit" - plural_limit_name = "time_limits" - elif limit_type == LimitTypes.MEMORY_LIMIT: - limit_name = "memory_limit" - plural_limit_name = "memory_limits" - else: - raise ValueError("Invalid limit type.") - - if plural_limit_name in dict: - if test_id in dict[plural_limit_name] and test_id != "0": - if allow_test_limit: - return dict[plural_limit_name][test_id] - else: - util.exit_with_error( - f'{os.path.basename(test_path)}: Specifying limit for a single test is not allowed in sinol-make.') - elif test_group in dict[plural_limit_name]: - return dict[plural_limit_name][test_group] - if limit_name in dict: - return dict[limit_name] - else: - return None - - -def _get_limit(limit_type: LimitTypes, test_path: str, config: Dict[str, Any], lang: str, task_id: str): - test_id = extract_test_id(test_path, task_id) - test_group = str(get_group(test_path, task_id)) - contest_type = contest_types.get_contest_type() - allow_test_limit = config.get("sinol_undocumented_test_limits", False) or contest_type.allow_per_test_limits() - global_limit = _get_limit_from_dict(config, limit_type, test_id, test_group, test_path, allow_test_limit) - override_limits_dict = config.get("override_limits", {}).get(lang, {}) - overriden_limit = _get_limit_from_dict(override_limits_dict, limit_type, test_id, test_group, test_path, - allow_test_limit) - if overriden_limit is not None: - return overriden_limit - else: - if global_limit is not None: - return global_limit - else: - if limit_type == LimitTypes.TIME_LIMIT: - util.exit_with_error( - f'Time limit was not defined for test {os.path.basename(test_path)} in config.yml.') - elif limit_type == LimitTypes.MEMORY_LIMIT: - util.exit_with_error( - f'Memory limit was not defined for test {os.path.basename(test_path)} in config.yml.') - - -def get_time_limit(test_path, config, lang, task_id, args=None): +def get_time_limit(test: Test, lang: str, args=None): """ Returns time limit for given test. """ if args is not None and hasattr(args, "tl") and args.tl is not None: return args.tl * 1000 - str_config = util.stringify_keys(config) - return _get_limit(LimitTypes.TIME_LIMIT, test_path, str_config, lang, task_id) + return SIO3Package().get_time_limit_for_test(test, lang) -def get_memory_limit(test_path, config, lang, task_id, args=None): +def get_memory_limit(test: Test, lang: str, args=None): """ Returns memory limit for given test. """ if args is not None and hasattr(args, "ml") and args.ml is not None: return int(args.ml * 1024) - str_config = util.stringify_keys(config) - return _get_limit(LimitTypes.MEMORY_LIMIT, test_path, str_config, lang, task_id) + return SIO3Package().get_memory_limit_for_test(test, lang) def get_in_tests_re(task_id: str) -> re.Pattern: @@ -279,60 +198,31 @@ def get_out_tests_re(task_id: str) -> re.Pattern: return re.compile(r'^%s(([0-9]+)([a-z]?[a-z0-9]*))\.out$' % re.escape(task_id)) -def validate_test_names(task_id): - """ - Checks if all files in the package have valid names. - """ - - def get_invalid_files(path, pattern): - invalid_files = [] - for file in glob.glob(os.path.join(os.getcwd(), path)): - if not pattern.match(os.path.basename(file)): - invalid_files.append(os.path.basename(file)) - return invalid_files - - in_test_re = get_in_tests_re(task_id) - invalid_in_tests = get_invalid_files(os.path.join("in", "*.in"), in_test_re) - if len(invalid_in_tests) > 0: - util.exit_with_error(f'Input tests with invalid names: {", ".join(invalid_in_tests)}.') - - out_test_re = get_out_tests_re(task_id) - invalid_out_tests = get_invalid_files(os.path.join("out", "*.out"), out_test_re) - if len(invalid_out_tests) > 0: - util.exit_with_error(f'Output tests with invalid names: {", ".join(invalid_out_tests)}.') - - -def get_all_code_files(task_id: str) -> List[str]: +def get_all_code_files() -> List[LocalFile]: """ Returns all code files in package. - :param task_id: Task id. :return: List of code files. """ - result = glob.glob(os.path.join(os.getcwd(), "prog", f"{task_id}ingen.sh")) - for ext in ["c", "cpp", "py", "java"]: - result += glob.glob(os.path.join(os.getcwd(), f"prog/{task_id}*.{ext}")) - return result + return [sol["file"] for sol in SIO3Package().model_solutions] + SIO3Package().additional_files -def get_files_matching_pattern(task_id: str, pattern: str) -> List[str]: +def get_files_matching_pattern(pattern: str) -> List[LocalFile]: """ Returns all files in package matching given pattern. - :param task_id: Task id. :param pattern: Pattern to match. :return: List of files matching the pattern. """ - all_files = get_all_code_files(task_id) - return [file for file in all_files if fnmatch.fnmatch(os.path.basename(file), pattern)] + all_files = get_all_code_files() + return [file for file in all_files if fnmatch.fnmatch(os.path.basename(file.path), pattern)] -def any_files_matching_pattern(task_id: str, pattern: str) -> bool: +def any_files_matching_pattern(pattern: str) -> bool: """ Returns True if any file in package matches given pattern. - :param task_id: Task id. :param pattern: Pattern to match. :return: True if any file in package matches given pattern. """ - return len(get_files_matching_pattern(task_id, pattern)) > 0 + return len(get_files_matching_pattern(pattern)) > 0 def check_if_contest_type_changed(contest_type): @@ -412,13 +302,8 @@ def validate_tests(tests: List[str], cpus: int, type: str = 'input'): print(util.info(f'All {type} tests are valid!')) -def get_all_inputs(task_id): - in_test_re = get_in_tests_re(task_id) - inputs = [] - for file in glob.glob(os.path.join(os.getcwd(), "in", "*.in")): - if in_test_re.match(os.path.basename(file)): - inputs.append(file) - return inputs +def get_all_inputs() -> List[LocalFile]: + return [file.in_file for file in SIO3Package().get_tests()] def get_task_type_cls() -> Type[BaseTaskType]: @@ -430,8 +315,14 @@ def get_task_type(timetool_name, timetool_path) -> BaseTaskType: return task_type_cls(timetool_name, timetool_path) -def get_out_from_in(test) -> str: +def get_out_from_in(test) -> str: #TODO not needed? """ Returns path to output file corresponding to given input file. """ return os.path.join("out", os.path.splitext(os.path.basename(test))[0] + ".out") + +def reload_tests(): + """ + Reloads tests from package. + """ + SIO3Package().reload_tests() diff --git a/src/sinol_make/sio2jail/__init__.py b/src/sinol_make/sio2jail/__init__.py index eadf770d..f512d0ae 100644 --- a/src/sinol_make/sio2jail/__init__.py +++ b/src/sinol_make/sio2jail/__init__.py @@ -11,7 +11,12 @@ from sinol_make.structs.status_structs import Status def sio2jail_supported(): - return util.is_linux() + if not util.is_linux(): + return False + try: + check_perf_counters_enabled() + except: + return False def get_default_sio2jail_path(): diff --git a/src/sinol_make/sio3pack/__init__.py b/src/sinol_make/sio3pack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/sinol_make/sio3pack/package.py b/src/sinol_make/sio3pack/package.py new file mode 100644 index 00000000..b3aa9880 --- /dev/null +++ b/src/sinol_make/sio3pack/package.py @@ -0,0 +1,26 @@ +import os + +from sio3pack.packages.sinolpack import Sinolpack +from sio3pack.packages.package import Package +from sio3pack.files import LocalFile + + +def _get_local_file(): + return LocalFile(os.getcwd()) + + +class SIO3Package: + """ + Singleton class for package base class. + """ + + _instance = None + + def __new__(cls) -> Sinolpack: + if cls._instance is None: + cls._instance = Package.from_file(_get_local_file()) + return cls._instance + + @classmethod + def reset(cls): + cls._instance = None diff --git a/src/sinol_make/structs/chkwer_structs.py b/src/sinol_make/structs/chkwer_structs.py index 2f339511..10e8908b 100644 --- a/src/sinol_make/structs/chkwer_structs.py +++ b/src/sinol_make/structs/chkwer_structs.py @@ -2,23 +2,19 @@ from dataclasses import dataclass from typing import Dict -from sinol_make.helpers import package_util +from sio3pack.test import Test @dataclass class TestResult: - test_path: str - test_name: str - test_group: str + test: Test run: bool points: int ok: bool comment: str - def __init__(self, test_path, task_id): - self.test_path = test_path - self.test_name = os.path.split(test_path)[-1] - self.test_group = str(package_util.get_group(self.test_path, task_id)) + def __init__(self, test: Test): + self.test = test self.comment = "" self.points = 0 @@ -53,16 +49,14 @@ class TableData: @dataclass class ChkwerExecution: - in_test_path: str - in_test_name: str - out_test_path: str + test: Test checker_exe: str model_exe: str @dataclass class RunResult: - test_path: str + test: Test ok: bool points: int comment: str diff --git a/src/sinol_make/structs/inwer_structs.py b/src/sinol_make/structs/inwer_structs.py index 8329ba9c..631fb228 100644 --- a/src/sinol_make/structs/inwer_structs.py +++ b/src/sinol_make/structs/inwer_structs.py @@ -2,22 +2,20 @@ from dataclasses import dataclass from typing import Dict +from sio3pack.test import Test + from sinol_make.helpers import package_util @dataclass class TestResult: - test_path: str - test_name: str - test_group: str + test: Test verified: bool valid: bool output: str - def __init__(self, test_path, task_id): - self.test_path = test_path - self.test_name = os.path.split(test_path)[-1] - self.test_group = str(package_util.get_group(self.test_path, task_id)) + def __init__(self, test: Test): + self.test = test self.verified = False self.valid = False @@ -34,23 +32,19 @@ class TableData: Data used for printing table with verification results. """ - # Dictionary with test path as key and verification result as value. - results: Dict[str, TestResult] + # Dictionary with test as key and verification result as value. + results: Dict[Test, TestResult] # Number of executions finished i: int - # Task id - task_id: str - @dataclass class InwerExecution: - test_path: str - test_name: str + test: Test inwer_exe_path: str @dataclass class VerificationResult: - test_path: str + test: Test valid: bool output: str diff --git a/src/sinol_make/structs/run_structs.py b/src/sinol_make/structs/run_structs.py index 2488b1ee..99f505cd 100644 --- a/src/sinol_make/structs/run_structs.py +++ b/src/sinol_make/structs/run_structs.py @@ -1,17 +1,19 @@ from dataclasses import dataclass +from sio3pack.files import LocalFile +from sio3pack.test import Test @dataclass class ExecutionData: """ Represents data for execution of a solution on a specified test. """ - # Name of the solution - name: str + # The solution + solution: LocalFile # Filename of the executable executable: str # Filename of the test - test: str + test: Test # Time limit for this test in milliseconds time_limit: int # Memory limit in KB diff --git a/src/sinol_make/task_type/__init__.py b/src/sinol_make/task_type/__init__.py index 428be58c..ba02edda 100644 --- a/src/sinol_make/task_type/__init__.py +++ b/src/sinol_make/task_type/__init__.py @@ -97,10 +97,10 @@ def additional_files_to_compile(self) -> List[Tuple[str, str, str, bool, bool]]: """ ret = [] task_id = package_util.get_task_id() - checker = package_util.get_files_matching_pattern(task_id, f'{task_id}chk.*') + checker = package_util.get_files_matching_pattern(f'{task_id}chk.*') if len(checker) > 0: self.has_checker = True - checker = checker[0] + checker = checker[0].path checker_basename = os.path.basename(checker) self.checker_path = paths.get_executables_path(checker_basename + ".e") ret += [(checker, self.checker_path, "checker", True, True)] diff --git a/src/sinol_make/task_type/interactive.py b/src/sinol_make/task_type/interactive.py index 5e746cd1..4f1c6d59 100644 --- a/src/sinol_make/task_type/interactive.py +++ b/src/sinol_make/task_type/interactive.py @@ -81,9 +81,9 @@ def __init__(self, timetool, sio2jail_path): def additional_files_to_compile(self) -> List[Tuple[str, str, str, bool, bool]]: ret = [] task_id = package_util.get_task_id() - interactor = package_util.get_files_matching_pattern(task_id, f'{task_id}soc.*') + interactor = package_util.get_files_matching_pattern(f'{task_id}soc.*') if len(interactor) > 0: - interactor = interactor[0] + interactor = interactor[0].path interactor_basename = os.path.basename(interactor) self.interactor = paths.get_executables_path(interactor_basename + ".e") ret += [(interactor, self.interactor, "interactor", True, True)] diff --git a/src/sinol_make/util.py b/src/sinol_make/util.py index fbb058bf..3a9eb096 100644 --- a/src/sinol_make/util.py +++ b/src/sinol_make/util.py @@ -12,7 +12,7 @@ from sinol_make.helpers import paths, cache from sinol_make.helpers.func_cache import cache_result from sinol_make.structs.status_structs import Status - +from sinol_make.sio3pack.package import SIO3Package @cache_result() def get_commands(): @@ -55,12 +55,20 @@ def find_and_chdir_package(): return False +def instantiate_package(): + """ + Function to instantiate package from current directory. + """ + SIO3Package() + + def init_package_command(args): """ Updates arguments with contest specific overrides for commands that require being in package directory """ exit_if_not_package() + instantiate_package() contest = get_contest_type() contest.verify_config() return contest.argument_overrides(args) @@ -102,6 +110,7 @@ def save_config(config): "time_limits", "override_limits", "scores", + "extra_files", { "key": "extra_compilation_files", "default_flow_style": None @@ -137,6 +146,7 @@ def save_config(config): print(warning("Found unknown fields in config.yml: " + ", ".join([str(x) for x in config]))) # All remaining non-considered fields are appended to the end of the file. yaml.dump(config, config_file, allow_unicode=True) + SIO3Package().reload_config() def import_importlib_resources(): diff --git a/tests/commands/export/test_unit.py b/tests/commands/export/test_unit.py index 8a170a8b..e07d7105 100644 --- a/tests/commands/export/test_unit.py +++ b/tests/commands/export/test_unit.py @@ -2,12 +2,14 @@ import shutil import tempfile +from sinol_make.sio3pack.package import SIO3Package from tests import util from .util import * def _create_package(tmpdir, path): package_path = os.path.join(tmpdir, os.path.basename(path)) + SIO3Package.reset() shutil.copytree(path, package_path) os.chdir(package_path) command = get_command() @@ -71,6 +73,7 @@ def test_create_makefile_in(): """ def _create_package(path): + SIO3Package.reset() os.chdir(path) with open(os.path.join(os.getcwd(), "config.yml"), "r") as config_file: config = yaml.load(config_file, Loader=yaml.FullLoader) diff --git a/tests/commands/gen/test_integration.py b/tests/commands/gen/test_integration.py index bc47eb1b..bd2aba87 100644 --- a/tests/commands/gen/test_integration.py +++ b/tests/commands/gen/test_integration.py @@ -7,7 +7,7 @@ from sinol_make import util as sm_util from sinol_make.commands.gen import Command from sinol_make.commands.ingen import Command as IngenCommand -from sinol_make.commands.ingen.ingen_util import get_ingen +from sinol_make.commands.ingen.ingen_util import get_ingen_path from sinol_make.commands.outgen import Command as OutgenCommand from sinol_make.commands.run import Command as RunCommand from sinol_make.helpers import package_util, paths, cache @@ -58,9 +58,8 @@ def test_correct_inputs(capsys, create_package): """ Test `ingen` command with all unchanged inputs. """ - task_id = package_util.get_task_id() - correct_solution = package_util.get_correct_solution(task_id) - cache.save_compiled(correct_solution, "exe", "default", False) + correct_solution = package_util.get_correct_solution() + cache.save_compiled(correct_solution.path, "exe", "default", False) simple_run() md5_sums = get_md5_sums(create_package) @@ -77,9 +76,8 @@ def test_changed_inputs(capsys, create_package): """ Test `ingen` command with changed inputs. """ - task_id = package_util.get_task_id() - correct_solution = package_util.get_correct_solution(task_id) - cache.save_compiled(correct_solution, "exe", "default", False) + correct_solution = package_util.get_correct_solution() + cache.save_compiled(correct_solution.path, "exe", "default", False) simple_run() md5_sums = get_md5_sums(create_package) correct_md5 = md5_sums.copy() @@ -109,7 +107,7 @@ def test_shell_ingen_unchanged(create_package): """ package_path = create_package task_id = package_util.get_task_id() - shell_ingen_path = get_ingen(task_id) + shell_ingen_path = get_ingen_path() assert os.path.splitext(shell_ingen_path)[1] == ".sh" edited_time = os.path.getmtime(shell_ingen_path) simple_run() @@ -156,6 +154,7 @@ def test_only_outputs_flag(create_package): in1 = ins[0] for file in ins[1:]: os.unlink(file) + package_util.reload_tests() assert len(outs) == 0 def in_to_out(file): return os.path.join(create_package, "out", os.path.basename(file).replace(".in", ".out")) @@ -180,6 +179,7 @@ def test_outgen(create_package): in1 = ins[0] for file in ins[1:]: os.unlink(file) + package_util.reload_tests() assert len(outs) == 0 def in_to_out(file): return os.path.join(create_package, "out", os.path.basename(file).replace(".in", ".out")) @@ -218,7 +218,6 @@ def test_correct_solution_changed(create_package): Test if `.md5sums` is deleted when correct solution is changed. """ package_path = create_package - task_id = package_util.get_task_id() md5sums = os.path.join(package_path, "in", ".md5sums") simple_run() assert os.path.exists(md5sums) @@ -226,10 +225,10 @@ def test_correct_solution_changed(create_package): for output in glob.glob(os.path.join(package_path, "out", "*.out")): outputs[os.path.basename(output)] = sm_util.get_file_md5(output) - solution = package_util.get_correct_solution(task_id) - with open(os.path.join(solution), "w") as f: + solution = package_util.get_correct_solution() + with open(os.path.join(solution.path), "w") as f: f.write("int main() {}") - cache.check_correct_solution(task_id) + cache.check_correct_solution() assert not os.path.exists(md5sums) simple_run() assert os.path.exists(md5sums) diff --git a/tests/commands/gen/test_unit.py b/tests/commands/gen/test_unit.py index 16ab4736..02861d73 100644 --- a/tests/commands/gen/test_unit.py +++ b/tests/commands/gen/test_unit.py @@ -1,7 +1,7 @@ import glob -from sinol_make.commands.ingen.ingen_util import get_ingen, compile_ingen, run_ingen -from sinol_make.commands.outgen.outgen_util import get_correct_solution, compile_correct_solution, generate_output +from sinol_make.commands.ingen.ingen_util import get_ingen_path, compile_ingen, run_ingen +from sinol_make.commands.outgen.outgen_util import compile_correct_solution, generate_output from sinol_make.structs.gen_structs import OutputGenerationArguments from sinol_make.helpers import package_util, compiler from tests import util @@ -15,33 +15,35 @@ def test_get_ingen(): simple_package_path = util.get_simple_package_path() gen_package_path = util.get_shell_ingen_pack_path() with tempfile.TemporaryDirectory() as tmpdir: - shutil.copytree(simple_package_path, os.path.join(tmpdir, 'simple')) - os.chdir(os.path.join(tmpdir, 'simple')) + SIO3Package.reset() + shutil.copytree(simple_package_path, os.path.join(tmpdir, 'abc')) + os.chdir(os.path.join(tmpdir, 'abc')) - ingen_path = get_ingen("abc") + ingen_path = get_ingen_path() assert os.path.basename(ingen_path) == "abcingen.cpp" - ingen_path = get_ingen("abc", "prog/abcingen.cpp") + ingen_path = get_ingen_path("prog/abcingen.cpp") assert os.path.basename(ingen_path) == "abcingen.cpp" with pytest.raises(SystemExit) as e: - get_ingen("abc", "prog/abcingen.c") + get_ingen_path("prog/abcingen.c") assert e.type == SystemExit assert e.value.code == 1 + SIO3Package.reset() shutil.copytree(gen_package_path, os.path.join(tmpdir, 'gen')) os.chdir(os.path.join(tmpdir, 'gen')) - ingen_path = get_ingen("gen") + ingen_path = get_ingen_path() assert os.path.basename(ingen_path) == "geningen.sh" with pytest.raises(SystemExit) as e: - get_ingen("gen", "prog/geningen.cpp") + get_ingen_path("prog/geningen.cpp") assert e.type == SystemExit assert e.value.code == 1 os.rename("prog/gen_helper.cpp", "prog/geningen.cpp") - ingen_path = get_ingen("gen") + ingen_path = get_ingen_path() assert os.path.basename(ingen_path) == "geningen.sh" @@ -50,8 +52,7 @@ def test_compile_ingen(create_package): """ Test compilation of ingen. """ - task_id = package_util.get_task_id() - ingen_path = get_ingen(task_id) + ingen_path = get_ingen_path() args = compiler.get_default_compilers() executable = compile_ingen(ingen_path, args) assert os.path.exists(executable) @@ -62,8 +63,7 @@ def test_get_correct_solution(create_package): """ Test getting correct solution. """ - task_id = package_util.get_task_id() - correct_solution_path = get_correct_solution(task_id) + correct_solution_path = package_util.get_correct_solution().path assert os.path.basename(correct_solution_path) == "abc.cpp" @@ -72,8 +72,7 @@ def test_compile_correct_solution(create_package): """ Test compilation of correct solution. """ - task_id = package_util.get_task_id() - correct_solution_path = get_correct_solution(task_id) + correct_solution_path = package_util.get_correct_solution().path args = compiler.get_default_compilers() executable = compile_correct_solution(correct_solution_path, args) assert os.path.exists(executable) @@ -85,8 +84,7 @@ def test_run_ingen(create_package): Test running ingen. """ package_path = create_package - task_id = package_util.get_task_id() - ingen_path = get_ingen(task_id) + ingen_path = get_ingen_path() args = compiler.get_default_compilers() executable = compile_ingen(ingen_path, args) @@ -102,12 +100,11 @@ def test_generate_output(create_package): Test generating outputs. """ package_path = create_package - task_id = package_util.get_task_id() - ingen_path = get_ingen(task_id) + ingen_path = get_ingen_path() args = compiler.get_default_compilers() ingen_exe = compile_ingen(ingen_path, args) - correct_solution = get_correct_solution(task_id) + correct_solution = package_util.get_correct_solution().path correct_sol_exe = compile_correct_solution(correct_solution, args) run_ingen(ingen_exe) @@ -121,8 +118,7 @@ def test_validate_tests(create_package, capsys): Test validating test contents. """ package_path = create_package - task_id = package_util.get_task_id() - ingen_path = get_ingen(task_id) + ingen_path = get_ingen_path() args = compiler.get_default_compilers() ingen_exe = compile_ingen(ingen_path, args) run_ingen(ingen_exe) diff --git a/tests/commands/inwer/test_unit.py b/tests/commands/inwer/test_unit.py index cdb76a90..3bb52a50 100644 --- a/tests/commands/inwer/test_unit.py +++ b/tests/commands/inwer/test_unit.py @@ -7,15 +7,17 @@ from sinol_make.commands.inwer import inwer_util, InwerExecution from sinol_make.commands.inwer import Command +from sio3pack.files import LocalFile +from sio3pack.test import Test def test_get_inwer_path(): """ Test getting default and custom inwer. """ + SIO3Package.reset() os.chdir(util.get_inwer_package_path()) - task_id = package_util.get_task_id() - assert inwer_util.get_inwer_path(task_id) is not None - assert inwer_util.get_inwer_path(task_id, 'prog/werinwer2.cpp') == os.path.join(os.getcwd(), 'prog', 'werinwer2.cpp') + assert inwer_util.get_inwer_path() is not None + assert inwer_util.get_inwer_path('prog/werinwer2.cpp') == os.path.join(os.getcwd(), 'prog', 'werinwer2.cpp') @pytest.mark.parametrize("create_package", [util.get_inwer_package_path()], indirect=True) @@ -24,7 +26,7 @@ def test_compile_inwer(create_package): Test compilation of inwer. """ task_id = package_util.get_task_id() - inwer_path = inwer_util.get_inwer_path(task_id) + inwer_path = inwer_util.get_inwer_path() args = compiler.get_default_compilers() executable = inwer_util.compile_inwer(inwer_path, args) assert os.path.exists(executable) @@ -49,8 +51,7 @@ def test_asserting_inwer(create_package): execution = InwerExecution( inwer_exe_path=executable, - test_name='wer2a.in', - test_path=os.path.join(os.getcwd(), 'in', 'wer2a.in'), + test=Test('wer2a', '2a', LocalFile(os.path.join(os.getcwd(), 'in', 'wer2a.in')), None, '2'), ) res = Command.verify_test(execution) @@ -63,51 +64,51 @@ def test_asserting_inwer(create_package): def test_tests_comparator(): for ti in ["abc", "long_task_id", ""]: - assert inwer_util.sort_tests([f"{ti}2a.in", f"{ti}1a.in"], ti) == [f"{ti}1a.in", f"{ti}2a.in"] - assert inwer_util.sort_tests([f"{ti}2a.in", f"{ti}1a.in", f"{ti}1b.in"], ti) == \ - [f"{ti}1a.in", f"{ti}1b.in", f"{ti}2a.in"] - assert inwer_util.sort_tests([f"{ti}2a.in", f"{ti}1a.in", f"{ti}1b.in", f"{ti}10a.in"], ti) == \ - [f"{ti}1a.in", f"{ti}1b.in", f"{ti}2a.in", f"{ti}10a.in"] - assert inwer_util.sort_tests([f"{ti}2a.in", f"{ti}1a.in", f"{ti}1b.in", f"{ti}10a.in", f"{ti}10b.in"], ti) == \ - [f"{ti}1a.in", f"{ti}1b.in", f"{ti}2a.in", f"{ti}10a.in", f"{ti}10b.in"] + assert [test.test_name for test in inwer_util.sort_tests(util.from_test_names(ti, [f"{ti}2a", f"{ti}1a"]))] == [f"{ti}1a", f"{ti}2a"] + assert [test.test_name for test in inwer_util.sort_tests(util.from_test_names(ti, [f"{ti}2a", f"{ti}1a", f"{ti}1b"]))] == \ + [f"{ti}1a", f"{ti}1b", f"{ti}2a"] + assert [test.test_name for test in inwer_util.sort_tests(util.from_test_names(ti, [f"{ti}2a", f"{ti}1a", f"{ti}1b", f"{ti}10a"]))] == \ + [f"{ti}1a", f"{ti}1b", f"{ti}2a", f"{ti}10a"] + assert [test.test_name for test in inwer_util.sort_tests(util.from_test_names(ti, [f"{ti}2a", f"{ti}1a", f"{ti}1b", f"{ti}10a", f"{ti}10b"]))] == \ + [f"{ti}1a", f"{ti}1b", f"{ti}2a", f"{ti}10a", f"{ti}10b"] def test_verify_tests_order(): command = Command() command.task_id = "abc" - command.tests = ["abc1ocen.in", "abc2ocen.in", "abc3ocen.in", - "abc1a.in", "abc1b.in", "abc1c.in", "abc1d.in", - "abc2z.in", "abc2aa.in", "abc2ab.in", "abc2ac.in"] + command.tests = util.from_test_names("abc", ["abc1ocen", "abc2ocen", "abc3ocen", + "abc1a", "abc1b", "abc1c", "abc1d", + "abc2z", "abc2aa", "abc2ab", "abc2ac"]) command.verify_tests_order() - command.tests.remove("abc2ocen.in") + command.tests = util.from_test_names("abc", ["abc1ocen", "abc3ocen", + "abc1a", "abc1b", "abc1c", "abc1d", + "abc2z", "abc2aa", "abc2ab", "abc2ac"]) with pytest.raises(SystemExit): command.verify_tests_order() - command.tests.append("abc2ocen.in") - command.tests.remove("abc1c.in") + command.tests = util.from_test_names("abc", ["abc1ocen", "abc2ocen", "abc3ocen", + "abc1a", "abc1b", "abc1d", + "abc2z", "abc2aa", "abc2ab", "abc2ac"]) with pytest.raises(SystemExit): command.verify_tests_order() - command.tests.append("abc1c.in") - command.tests.remove("abc2aa.in") + command.tests = util.from_test_names("abc", ["abc1ocen", "abc2ocen", "abc3ocen", + "abc1a", "abc1b", "abc1c", "abc1d", + "abc2z", "abc2ab", "abc2ac"]) with pytest.raises(SystemExit): command.verify_tests_order() - command.tests.append("abc2aa.in") - command.tests.remove("abc1ocen.in") - command.tests.remove("abc2ocen.in") - command.tests.remove("abc3ocen.in") - command.tests.append("abc9ocen.in") - command.tests.append("abc10ocen.in") - command.tests.append("abc11ocen.in") - + command.tests = util.from_test_names("abc", ["abc9ocen", "abc10ocen", "abc11ocen", + "abc1a", "abc1b", "abc1c", "abc1d", + "abc2z", "abc2aa", "abc2ab", "abc2ac"]) command.verify_tests_order() - command.tests = ["abc0.in", "abc0a.in", "abc0b.in", - "abc1.in", "abc1a.in", "abc1b.in"] + command.tests = util.from_test_names("abc", ["abc0", "abc0a", "abc0b", + "abc1", "abc1a", "abc1b"]) command.verify_tests_order() - command.tests.remove("abc0a.in") + command.tests = util.from_test_names("abc", ["abc0", "abc0b", + "abc1", "abc1a", "abc1b"]) with pytest.raises(SystemExit): command.verify_tests_order() diff --git a/tests/commands/run/test_integration.py b/tests/commands/run/test_integration.py index 237ec7f1..d8e595aa 100644 --- a/tests/commands/run/test_integration.py +++ b/tests/commands/run/test_integration.py @@ -90,6 +90,7 @@ def test_no_expected_scores(capsys, create_package, time_tool): del config["sinol_expected_scores"] with open(config_path, "w") as config_file: config_file.write(yaml.dump(config)) + SIO3Package().reload_config() parser = configure_parsers() args = parser.parse_args(["run", "--time-tool", time_tool]) @@ -103,8 +104,8 @@ def test_no_expected_scores(capsys, create_package, time_tool): out = capsys.readouterr().out assert "Solutions were added:" in out assert "There was an unknown change in expected scores." not in out - solution = package_util.get_files_matching_pattern(command.ID, f"{command.ID}.*")[0] - assert os.path.basename(solution) in out + solution = package_util.get_files_matching_pattern(f"{command.ID}.*")[0] + assert solution.filename in out @pytest.mark.parametrize("create_package", [get_simple_package_path(), get_verify_status_package_path(), @@ -129,6 +130,7 @@ def test_apply_suggestions(create_package, time_tool, capsys): del config["sinol_expected_scores"] with open(config_path, "w") as config_file: config_file.write(yaml.dump(config)) + SIO3Package().reload_config() parser = configure_parsers() args = parser.parse_args(["run", "--apply-suggestions", "--time-tool", time_tool]) @@ -136,7 +138,7 @@ def test_apply_suggestions(create_package, time_tool, capsys): command.run(args) out = capsys.readouterr().out - assert "There was an unknown change in expected scores." + assert "There was an unknown change in expected scores." not in out with open(config_path, "r") as config_file: config = yaml.load(config_file, Loader=yaml.SafeLoader) assert config["sinol_expected_scores"] == expected_scores @@ -158,6 +160,7 @@ def test_incorrect_expected_scores(capsys, create_package, time_tool): config["sinol_expected_scores"]["abc.cpp"]["points"] = 75 with open(config_path, "w") as config_file: config_file.write(yaml.dump(config)) + SIO3Package().reload_config() parser = configure_parsers() args = parser.parse_args(["run", "--time-tool", time_tool]) @@ -194,7 +197,7 @@ def test_flag_tests(create_package, time_tool): except SystemExit: pass - assert command.tests == [os.path.join("in", os.path.basename(test))] + assert [test.in_file.path for test in command.tests] == [test] @pytest.mark.parametrize("create_package", [get_simple_package_path(), get_verify_status_package_path(), @@ -208,16 +211,16 @@ def test_flag_solutions(capsys, create_package, time_tool): create_ins_outs(package_path) task_id = package_util.get_task_id() - solutions = package_util.get_files_matching_pattern(task_id, f'{task_id}?.*') + solutions = package_util.get_files_matching_pattern(f'{task_id}?.*') parser = configure_parsers() - args = parser.parse_args(["run", "--solutions", solutions[0], "--time-tool", time_tool]) + args = parser.parse_args(["run", "--solutions", solutions[0].filename, "--time-tool", time_tool]) command = Command() command.run(args) out = capsys.readouterr().out - assert os.path.basename(solutions[0]) in out - assert os.path.basename(solutions[1]) not in out + assert solutions[0].filename in out + assert solutions[1].filename not in out @pytest.mark.parametrize("create_package", [get_simple_package_path(), get_verify_status_package_path(), @@ -230,21 +233,18 @@ def test_flag_solutions_multiple(capsys, create_package, time_tool): create_ins_outs(package_path) task_id = package_util.get_task_id() - solutions = [ - os.path.basename(file) - for file in package_util.get_files_matching_pattern(task_id, f'{task_id}?.*') - ] + solutions = package_util.get_files_matching_pattern(f'{task_id}?.*') parser = configure_parsers() - args = parser.parse_args(["run", "--solutions", solutions[0], os.path.join("prog", solutions[1]), + args = parser.parse_args(["run", "--solutions", solutions[0].filename, os.path.join("prog", solutions[1].filename), "--time-tool", time_tool]) command = Command() command.run(args) out = capsys.readouterr().out - assert os.path.basename(solutions[0]) in out - assert os.path.basename(solutions[1]) in out - assert os.path.basename(solutions[2]) not in out + assert solutions[0].filename in out + assert solutions[1].filename in out + assert solutions[2].filename not in out @pytest.mark.parametrize("create_package", [get_weak_compilation_flags_package_path()], indirect=True) @@ -302,6 +302,7 @@ def test_no_scores(capsys, create_package, time_tool): del config["scores"] with open(config_path, "w") as config_file: config_file.write(yaml.dump(config)) + SIO3Package().reload_config() parser = configure_parsers() args = parser.parse_args(["run", "--time-tool", time_tool]) @@ -325,8 +326,9 @@ def test_missing_output_files(capsys, create_package): outs.sort() os.unlink(outs[0]) os.unlink(outs[1]) - out1 = command.extract_file_name(outs[0]).replace(".out", ".in") - out2 = command.extract_file_name(outs[1]).replace(".out", ".in") + package_util.reload_tests() + out1 = os.path.basename(outs[0]).replace(".out", ".in") + out2 = os.path.basename(outs[1]).replace(".out", ".in") parser = configure_parsers() args = parser.parse_args(["run", "--time-tool", "time"]) @@ -353,6 +355,7 @@ def test_missing_output_files_allow_missing(capsys, create_package): outs = glob.glob(os.path.join(package_path, "out", "*.out")) for i in outs: os.unlink(i) + package_util.reload_tests() parser = configure_parsers() args = parser.parse_args(["run", "--time-tool", "time", "--no-outputs"]) @@ -383,6 +386,7 @@ def test_no_limits_in_config(capsys, create_package, time_tool): del config["memory_limits"] with open(config_path, "w") as config_file: config_file.write(yaml.dump(config)) + SIO3Package().reload_config() parser = configure_parsers() args = parser.parse_args(["run", "--time-tool", time_tool]) @@ -452,6 +456,7 @@ def test_override_limits(create_package, time_tool): del config["override_limits"] with open(config_file_path, "w") as config_file: config_file.write(yaml.dump(config)) + SIO3Package().reload_config() parser = configure_parsers() args = parser.parse_args(["run", "--apply-suggestions", "--time-tool", time_tool]) @@ -475,6 +480,7 @@ def test_override_limits(create_package, time_tool): config["memory_limit"] = 256 with open(config_file_path, "w") as config_file: config_file.write(yaml.dump(config)) + SIO3Package().reload_config() command = Command() command.run(args) @@ -557,6 +563,7 @@ def test_undocumented_test_limits_option(create_package, capsys): del config["sinol_undocumented_test_limits"] with open(os.path.join(os.getcwd(), "config.yml"), "w") as config_file: config_file.write(yaml.dump(config)) + SIO3Package().reload_config() command = Command() with pytest.raises(SystemExit) as e: @@ -564,7 +571,7 @@ def test_undocumented_test_limits_option(create_package, capsys): assert e.value.code == 1 out = capsys.readouterr().out - assert "und1a.in: Specifying limit for a single test is not allowed in sinol-make." in out + assert "und1b: Specifying limit for a single test is not allowed for OI packages." in out @pytest.mark.parametrize("create_package", [get_simple_package_path(), get_example_tests_package_path()], indirect=True) @@ -644,16 +651,15 @@ def run(): end_time = time.time() - start_time assert end_time - start_time < length / 2 - task_id = package_util.get_task_id() - solutions = package_util.get_solutions(task_id, None) + solutions = package_util.get_solutions() for solution in solutions: - cache_file: CacheFile = cache.get_cache_file(solution) + cache_file: CacheFile = cache.get_cache_file(solution.filename) for test in command.tests: - assert util.get_file_md5(test) in cache_file.tests - test_cache = cache_file.tests[util.get_file_md5(test)] - lang = package_util.get_file_lang(solution) - assert test_cache.time_limit == package_util.get_time_limit(test, command.config, lang, command.ID) - assert test_cache.memory_limit == package_util.get_memory_limit(test, command.config, lang, command.ID) + assert util.get_file_md5(test.in_file.path) in cache_file.tests + test_cache = cache_file.tests[util.get_file_md5(test.in_file.path)] + lang = package_util.get_file_lang(solution.filename) + assert test_cache.time_limit == package_util.get_time_limit(test, lang) + assert test_cache.memory_limit == package_util.get_memory_limit(test, lang) assert cache_file is not None assert cache_file.tests != {} @@ -681,10 +687,9 @@ def test_results_caching_checker_changed(create_package, time_tool): # Compile checker check if test results are removed. command.compile_additional_files() - task_id = package_util.get_task_id() - solutions = package_util.get_solutions(task_id, None) + solutions = package_util.get_solutions() for solution in solutions: - cache_file: CacheFile = cache.get_cache_file(solution) + cache_file: CacheFile = cache.get_cache_file(solution.filename) assert cache_file.tests == {} @@ -716,13 +721,12 @@ def test(file_to_change, lang, comment_character, extra_compilation_files=True): cache.process_extra_compilation_files(command.config.get("extra_compilation_files", []), command.ID) else: cache.process_extra_execution_files(command.config.get("extra_execution_files", {}), command.ID) - task_id = package_util.get_task_id() - solutions = package_util.get_solutions(task_id, None) + solutions = package_util.get_solutions() for solution in solutions: - if package_util.get_file_lang(solution) == lang: + if package_util.get_file_lang(solution.filename) == lang: print(file_to_change, solution) - assert not os.path.exists(paths.get_cache_path("md5sums", solution)) - info = cache.get_cache_file(solution) + assert not os.path.exists(paths.get_cache_path("md5sums", solution.filename)) + info = cache.get_cache_file(solution.filename) assert info == CacheFile() test("liblib.cpp", "cpp", "//") @@ -751,19 +755,20 @@ def test_contest_type_change(create_package, time_tool): config["sinol_contest_type"] = "oi" with open(config_path, "w") as f: f.write(yaml.dump(config)) + SIO3Package().reload_config() # Compile checker check if test results are removed. command = Command() # We remove tests, so that `run()` exits before creating new cached test results. for test in glob.glob("in/*.in"): os.unlink(test) + package_util.reload_tests() with pytest.raises(SystemExit): command.run(args) - task_id = package_util.get_task_id() - solutions = package_util.get_solutions(task_id, None) + solutions = package_util.get_solutions() for solution in solutions: - cache_file: CacheFile = cache.get_cache_file(solution) + cache_file: CacheFile = cache.get_cache_file(solution.filename) assert cache_file.tests == {} diff --git a/tests/commands/run/test_unit.py b/tests/commands/run/test_unit.py index 2ce6a818..7bffd8c9 100644 --- a/tests/commands/run/test_unit.py +++ b/tests/commands/run/test_unit.py @@ -18,7 +18,7 @@ def test_get_output_file(): def test_compile_solutions(create_package): package_path = create_package command = get_command(package_path) - solutions = package_util.get_solutions("abc", None) + solutions = package_util.get_solutions() result = command.compile_solutions(solutions) assert result == [True for _ in solutions] @@ -29,18 +29,18 @@ def test_execution(create_package, time_tool): command.args.time_tool = time_tool command.timetool_name = time_tool command.task_type = NormalTaskType(timetool=time_tool, sio2jail_path=sio2jail.get_default_sio2jail_path()) - solution = "abc.cpp" - executable = package_util.get_executable(solution) + solution = LocalFile(os.path.join(package_path, "prog", "abc.cpp")) + executable = package_util.get_executable(solution.filename) result = command.compile_solutions([solution]) assert result == [True] create_ins_outs(package_path) - test = package_util.get_tests("abc", None)[0] + test = package_util.get_tests()[0] with open(os.path.join(package_path, "config.yml"), "r") as config_file: config = yaml.load(config_file, Loader=yaml.FullLoader) - os.makedirs(paths.get_executions_path(solution), exist_ok=True) + os.makedirs(paths.get_executions_path(solution.filename), exist_ok=True) result = command.run_solution((solution, paths.get_executables_path(executable), test, config['time_limit'], config['memory_limit'], sio2jail.get_default_sio2jail_path(), paths.get_executions_path())) assert result.Status == Status.OK @@ -52,9 +52,9 @@ def test_run_solutions(create_package, time_tool): command.args = argparse.Namespace(solutions_report=False, time_tool=time_tool, compile_mode='default', hide_memory=False) create_ins_outs(package_path) - command.tests = package_util.get_tests("abc", None) - command.test_md5sums = {os.path.basename(test): util.get_file_md5(test) for test in command.tests} - command.groups = list(sorted(set([command.get_group(test) for test in command.tests]))) + command.tests = package_util.get_tests() + command.test_md5sums = {test.in_file.filename: util.get_file_md5(test.in_file.path) for test in command.tests} + command.groups = package_util.get_groups(command.tests) command.scores = command.config["scores"] command.possible_score = command.get_possible_score(command.groups) command.memory_limit = command.config["memory_limit"] @@ -69,8 +69,10 @@ def flatten_results(results): for group, group_result in results[solution].items()) return new_results - assert flatten_results(command.compile_and_run(["abc.cpp"])[0]) == {"abc.cpp": {1: Status.OK, 2: Status.OK, 3: Status.OK, 4: Status.OK}} - assert flatten_results(command.compile_and_run(["abc.cpp", "abc4.cpp"])[0]) == { + solutions = [LocalFile(os.path.join(package_path, "prog", "abc.cpp"))] + assert flatten_results(command.compile_and_run(solutions)[0]) == {"abc.cpp": {1: Status.OK, 2: Status.OK, 3: Status.OK, 4: Status.OK}} + solutions = [LocalFile(os.path.join(package_path, "prog", "abc.cpp")), LocalFile(os.path.join(package_path, "prog", "abc4.cpp"))] + assert flatten_results(command.compile_and_run(solutions)[0]) == { "abc.cpp": {1: Status.OK, 2: Status.OK, 3: Status.OK, 4: Status.OK}, "abc4.cpp": {1: Status.OK, 2: Status.OK, 3: "WA", 4: "RE"} } @@ -80,8 +82,8 @@ def test_validate_expected_scores_success(): os.chdir(get_simple_package_path()) command = get_command() command.scores = command.config["scores"] - command.tests = ["in/abc1a.in", "in/abc2a.in", "in/abc3a.in", "in/abc4a.in"] - command.groups = command.get_groups(command.tests) + command.tests = from_test_names("abc", ["abc1a", "abc2a", "abc3a", "abc4a"]) + command.groups = package_util.get_groups(command.tests) command.possible_score = command.contest.get_possible_score(command.groups, command.scores) # Test with correct expected scores. @@ -433,8 +435,8 @@ def test_set_scores(create_package): """ package_path = create_package command = get_command(package_path) - command.tests = ["in/abc0a.in", "in/abc1a.in", "in/abc2a.in", "in/abc3a.in", "in/abc4a.in", - "in/abc5a.in", "in/abc6a.in"] + command.tests = from_test_names("abc", ["abc0a", "abc1a", "abc2a", "abc3a", "abc4a", + "abc5a", "abc6a"]) del command.config["scores"] command.set_scores() assert command.scores == { @@ -448,24 +450,6 @@ def test_set_scores(create_package): } -@pytest.mark.parametrize("create_package", [get_simple_package_path(), get_verify_status_package_path()], indirect=True) -def test_get_valid_input_files(create_package): - """ - Test get_valid_input_files function. - """ - package_path = create_package - command = get_command(package_path) - create_ins_outs(package_path) - command.tests = package_util.get_tests(command.ID, None) - - outputs = glob.glob(os.path.join(package_path, "out", "*.out")) - os.unlink(outputs[0]) - valid_inputs = command.get_valid_input_files() - assert len(valid_inputs) == len(outputs) - 1 - assert "in/" + os.path.basename(outputs[0].replace(".out", ".in")) not in valid_inputs - assert "in/" + os.path.basename(outputs[1].replace(".out", ".in")) in valid_inputs - - def test_update_group_status(): from sinol_make.commands.run import update_group_status assert update_group_status(Status.OK, Status.WA) == Status.WA diff --git a/tests/commands/run/util.py b/tests/commands/run/util.py index 26f9b18e..0df4151d 100644 --- a/tests/commands/run/util.py +++ b/tests/commands/run/util.py @@ -16,6 +16,7 @@ def get_command(path = None): """ if path is None: path = get_simple_package_path() + SIO3Package.reset() command = Command() command.set_constants() command.cpus = mp.cpu_count() diff --git a/tests/fixtures.py b/tests/fixtures.py index b1cba902..8cbb0083 100644 --- a/tests/fixtures.py +++ b/tests/fixtures.py @@ -1,4 +1,6 @@ import pytest, tempfile, os, shutil + +from sinol_make.sio3pack.package import SIO3Package from .util import get_simple_package_path @@ -19,6 +21,9 @@ def create_package(request): shutil.copytree(path, package_path) os.chdir(package_path) + # Without this pytest is trolling. + SIO3Package.reset() + yield package_path tmpdir.cleanup() diff --git a/tests/helpers/test_package_util.py b/tests/helpers/test_package_util.py index ae27c69f..a894e19d 100644 --- a/tests/helpers/test_package_util.py +++ b/tests/helpers/test_package_util.py @@ -1,4 +1,6 @@ -import pytest +import os.path + +from sio3pack.exceptions import ProcessPackageError from ..commands.run.util import create_ins from ..fixtures import * @@ -6,43 +8,24 @@ from sinol_make.helpers import package_util, func_cache -@pytest.mark.parametrize("create_package", [util.get_long_name_package_path()], indirect=True) -def test_get_task_id(create_package): - package_path = create_package - assert package_util.get_task_id() == "lpn" - with open(os.path.join(package_path, "config.yml"), "w") as config_file: - config_file.write("title: Long package name\n") - func_cache.clear_cache() - with pytest.raises(SystemExit): - package_util.get_task_id() - - -def test_extract_test_id(): - assert package_util.extract_test_id("in/abc1a.in", "abc") == "1a" - assert package_util.extract_test_id("in/abc10a.in", "abc") == "10a" - assert package_util.extract_test_id("in/abc12ca.in", "abc") == "12ca" - assert package_util.extract_test_id("in/abc0ocen.in", "abc") == "0ocen" - assert package_util.extract_test_id("in/long_task_id2bc.in", "long_task_id") == "2bc" - - -def test_get_group(): - assert package_util.get_group("in/abc1a.in", "abc") == 1 - assert package_util.get_group("in/long_name2ocen.in", "long_name") == 0 - - def test_get_tests(create_package): + SIO3Package.reset() os.chdir(create_package) task_id = package_util.get_task_id() create_ins(create_package, task_id) - tests = package_util.get_tests("abc", None) - assert tests == ["in/abc1a.in", "in/abc2a.in", "in/abc3a.in", "in/abc4a.in"] + tests = package_util.get_tests() + assert list(map(lambda t: t.test_name, tests)) == ["abc1a", "abc2a", "abc3a", "abc4a"] with tempfile.TemporaryDirectory() as tmpdir: + tmpdir = os.path.realpath(tmpdir) def create_file(name): with open(os.path.join(tmpdir, "in", name), "w") as f: f.write("") + SIO3Package.reset() os.chdir(tmpdir) + with open(os.path.join(tmpdir, "config.yml"), "w") as f: + f.write("sinol_task_id: abc") os.mkdir("in") create_file("abc0.in") create_file("abc0a.in") @@ -51,221 +34,84 @@ def create_file(name): create_file("abc1a.in") create_file("abc1b.in") create_file("abc2a.in") + os.mkdir("out") + package_util.reload_tests() - assert set(package_util.get_tests("abc", None)) == \ - {"in/abc0.in", "in/abc0a.in", "in/abc1a.in", "in/abc1b.in", "in/abc1ocen.in", "in/abc2a.in", "in/abc2ocen.in"} - assert package_util.get_tests("abc", ["in/abc1a.in"]) == ["in/abc1a.in"] - assert package_util.get_tests("abc", ["in/abc??.in"]) == \ - ["in/abc0a.in", "in/abc1a.in", "in/abc1b.in", "in/abc2a.in"] - assert package_util.get_tests("abc", ["abc1a.in"]) == ["in/abc1a.in"] - assert package_util.get_tests("abc", ["abc?ocen.in", "abc0.in"]) == ["in/abc0.in", "in/abc1ocen.in", "in/abc2ocen.in"] - assert package_util.get_tests("abc", [os.path.join(tmpdir, "in", "abc1a.in")]) == ["in/abc1a.in"] - - -def test_extract_file_name(): - assert package_util.get_file_name("in/abc1a.in") == "abc1a.in" + assert set(map(lambda t: t.test_name, package_util.get_tests())) == \ + {"abc0", "abc0a", "abc1a", "abc1b", "abc1ocen", "abc2a", "abc2ocen"} + assert list(map(lambda t: t.test_name, package_util.get_tests(["in/abc1a.in"]))) == ["abc1a"] + assert list(map(lambda t: t.test_name, package_util.get_tests(["in/abc??.in"]))) == \ + ["abc0a", "abc1a", "abc1b", "abc2a"] + assert list(map(lambda t: t.test_name, package_util.get_tests(["abc1a.in"]))) == ["abc1a"] + assert list(map(lambda t: t.test_name, package_util.get_tests(["abc?ocen.in", "abc0.in"]))) == ["abc0", "abc1ocen", "abc2ocen"] + assert list(map(lambda t: t.test_name, package_util.get_tests([os.path.join(tmpdir, "in", "abc1a.in")]))) == ["abc1a"] def test_get_executable(): assert package_util.get_executable("abc.cpp") == "abc.cpp.e" -def test_get_time_limit(): - config = { - "time_limit": 1000, - "time_limits": { - "0": 5000, - "2": 2000, - }, - "override_limits": { - "py": { - "time_limit": 2000, - "time_limits": { - "0": 6000, - "2": 3000, - }, - } - } - } - - with tempfile.TemporaryDirectory() as tmpdir: - os.chdir(tmpdir) - with open("config.yml", "w") as f: - f.write("") - assert package_util.get_time_limit("in/abc1a.in", config, "cpp", "abc") == 1000 - assert package_util.get_time_limit("in/abc2a.in", config, "cpp", "abc") == 2000 - assert package_util.get_time_limit("in/abc2b.in", config, "cpp", "abc") == 2000 - assert package_util.get_time_limit("in/abc3a.in", config, "cpp", "abc") == 1000 - assert package_util.get_time_limit("in/abc3ocen.in", config, "cpp", "abc") == 5000 - - assert package_util.get_time_limit("in/abc1a.in", config, "py", "abc") == 2000 - assert package_util.get_time_limit("in/abc2a.in", config, "py", "abc") == 3000 - assert package_util.get_time_limit("in/abc2b.in", config, "py", "abc") == 3000 - assert package_util.get_time_limit("in/abc3a.in", config, "py", "abc") == 2000 - assert package_util.get_time_limit("in/abc3ocen.in", config, "py", "abc") == 6000 - - # Test getting default time limit. - config = { - "time_limits": { - "1": 1000, - }, - "override_limits": { - "py": { - "time_limits": { - "1": 2000, - } - } - } - } - assert package_util.get_time_limit("in/abc1a.in", config, "cpp", "abc") == 1000 - assert package_util.get_time_limit("in/abc1a.in", config, "py", "abc") == 2000 - with pytest.raises(SystemExit): - package_util.get_time_limit("in/abc2a.in", config, "cpp", "abc") - with pytest.raises(SystemExit): - package_util.get_time_limit("in/abc2a.in", config, "py", "abc") - - config = { - "time_limits": { - "1": 1000, - }, - "override_limits": { - "py": { - "time_limit": 500, - "time_limits": { - "1": 1000, - } - } - } - } - assert package_util.get_time_limit("in/abc1a.in", config, "cpp", "abc") == 1000 - with pytest.raises(SystemExit): - package_util.get_time_limit("in/abc2a.in", config, "cpp", "abc") - assert package_util.get_time_limit("in/abc1a.in", config, "py", "abc") == 1000 - assert package_util.get_time_limit("in/abc2a.in", config, "py", "abc") == 500 - - -def test_get_memory_limit(): - config = { - "memory_limit": 256, - "memory_limits": { - "0": 128, - "2": 512, - }, - "override_limits": { - "py": { - "memory_limit": 512, - "memory_limits": { - "0": 256, - "2": 1024, - }, - } - } - } - - with tempfile.TemporaryDirectory() as tmpdir: - os.chdir(tmpdir) - with open("config.yml", "w") as f: - f.write("") - assert package_util.get_memory_limit("in/abc1a.in", config, "cpp", "abc") == 256 - assert package_util.get_memory_limit("in/abc2a.in", config, "cpp", "abc") == 512 - assert package_util.get_memory_limit("in/abc2b.in", config, "cpp", "abc") == 512 - assert package_util.get_memory_limit("in/abc3ocen.in", config, "cpp", "abc") == 128 - - assert package_util.get_memory_limit("in/abc1a.in", config, "py", "abc") == 512 - assert package_util.get_memory_limit("in/abc2a.in", config, "py", "abc") == 1024 - assert package_util.get_memory_limit("in/abc2b.in", config, "py", "abc") == 1024 - assert package_util.get_memory_limit("in/abc3ocen.in", config, "py", "abc") == 256 - - # Test getting default memory limit. - config = { - "memory_limits": { - "1": 1024, - }, - "override_limits": { - "py": { - "memory_limits": { - "1": 2048, - } - } - } - } - assert package_util.get_memory_limit("in/abc1a.in", config, "cpp", "abc") == 1024 - assert package_util.get_memory_limit("in/abc1a.in", config, "py", "abc") == 2048 - with pytest.raises(SystemExit): - package_util.get_memory_limit("in/abc2a.in", config, "cpp", "abc") - with pytest.raises(SystemExit): - package_util.get_memory_limit("in/abc2a.in", config, "py", "abc") - - config = { - "memory_limits": { - "1": 1024, - }, - "override_limits": { - "py": { - "memory_limit": 512, - "memory_limits": { - "1": 1024, - } - } - } - } - assert package_util.get_memory_limit("in/abc1a.in", config, "cpp", "abc") == 1024 - with pytest.raises(SystemExit): - package_util.get_memory_limit("in/abc2a.in", config, "cpp", "abc") - assert package_util.get_memory_limit("in/abc1a.in", config, "py", "abc") == 1024 - assert package_util.get_memory_limit("in/abc2a.in", config, "py", "abc") == 512 - - @pytest.mark.parametrize("create_package", [util.get_simple_package_path()], indirect=True) def test_validate_files(create_package, capsys): package_path = create_package util.create_ins_outs(package_path) task_id = package_util.get_task_id() assert task_id == "abc" - package_util.validate_test_names(task_id) os.rename(os.path.join(package_path, "in", "abc1a.in"), os.path.join(package_path, "in", "def1a.in")) - with pytest.raises(SystemExit): - package_util.validate_test_names(task_id) - out = capsys.readouterr().out - assert "def1a.in" in out + + with pytest.raises(ProcessPackageError) as e: + package_util.reload_tests() + assert "def1a.in" in e.value.full_message os.rename(os.path.join(package_path, "in", "def1a.in"), os.path.join(package_path, "in", "abc1a.in")) os.rename(os.path.join(package_path, "out", "abc1a.out"), os.path.join(package_path, "out", "def1a.out")) - with pytest.raises(SystemExit): - package_util.validate_test_names(task_id) - out = capsys.readouterr().out - assert "def1a.out" in out + with pytest.raises(ProcessPackageError) as e: + package_util.reload_tests() + assert "def1a.out" in e.value.full_message -def test_get_executable_key(): - os.chdir(get_simple_package_path()) - for task_id in ["abc", "long_task_id", "", "x"]: - assert package_util.get_executable_key(f"{task_id}1.cpp.e", task_id) == (0, 1) - assert package_util.get_executable_key(f"{task_id}2.cpp.e", task_id) == (0, 2) - assert package_util.get_executable_key(f"{task_id}s20.cpp.e", task_id) == (1, 20) - assert package_util.get_executable_key(f"{task_id}s21.cpp.e", task_id) == (1, 21) - assert package_util.get_executable_key(f"{task_id}b100.cpp.e", task_id) == (2, 100) - assert package_util.get_executable_key(f"{task_id}b101.cpp.e", task_id) == (2, 101) - assert package_util.get_executable_key(f"{task_id}x1000.cpp.e", task_id) == (0, 0) +@pytest.mark.parametrize("create_package", [util.get_simple_package_path()], indirect=True) +def test_get_executable_key(create_package): + for task_id in ["abc", "long_task_id", "x"]: + with open(os.path.join(create_package, "config.yml"), "w") as f: + f.write(f"sinol_task_id: {task_id}") + SIO3Package().reload_config() + assert package_util.get_executable_key(f"{task_id}1.cpp.e") == (0, 1) + assert package_util.get_executable_key(f"{task_id}2.cpp.e") == (0, 2) + assert package_util.get_executable_key(f"{task_id}s20.cpp.e") == (1, 20) + assert package_util.get_executable_key(f"{task_id}s21.cpp.e") == (1, 21) + assert package_util.get_executable_key(f"{task_id}b100.cpp.e") == (2, 100) + assert package_util.get_executable_key(f"{task_id}b101.cpp.e") == (2, 101) + assert package_util.get_executable_key(f"{task_id}x1000.cpp.e") == (0, 0) def test_get_solutions(): + SIO3Package.reset() os.chdir(get_simple_package_path()) - solutions = package_util.get_solutions("abc", None) + def to_filenames(solutions): + return [sol.filename for sol in solutions] + + solutions = to_filenames(package_util.get_solutions()) assert solutions == ["abc.cpp", "abc1.cpp", "abc2.cpp", "abc3.cpp", "abc4.cpp"] - solutions = package_util.get_solutions("abc", ["prog/abc.cpp"]) + solutions = to_filenames(package_util.get_solutions(["prog/abc.cpp"])) assert solutions == ["abc.cpp"] assert "abc1.cpp" not in solutions with tempfile.TemporaryDirectory() as tmpdir: + tmpdir = os.path.realpath(tmpdir) def create_file(name): with open(os.path.join(tmpdir, "prog", name), "w") as f: f.write("") + SIO3Package.reset() os.chdir(tmpdir) + os.mkdir("in") + os.mkdir("out") os.mkdir("prog") + with open(os.path.join(tmpdir, "config.yml"), "w") as f: + f.write("sinol_task_id: abc") create_file("abc.cpp") create_file("abc1.cpp") @@ -273,14 +119,14 @@ def create_file(name): create_file("abcs1.cpp") create_file("abcs2.cpp") - assert package_util.get_solutions("abc", None) == ["abc.cpp", "abc1.cpp", "abc2.cpp", "abcs1.cpp", "abcs2.cpp"] - assert package_util.get_solutions("abc", ["prog/abc.cpp"]) == ["abc.cpp"] - assert package_util.get_solutions("abc", ["abc.cpp"]) == ["abc.cpp"] - assert package_util.get_solutions("abc", [os.path.join(tmpdir, "prog", "abc.cpp")]) == ["abc.cpp"] - assert package_util.get_solutions("abc", ["prog/abc?.cpp"]) == ["abc1.cpp", "abc2.cpp"] - assert package_util.get_solutions("abc", ["abc?.cpp"]) == ["abc1.cpp", "abc2.cpp"] - assert package_util.get_solutions("abc", ["prog/abc*.cpp"]) == ["abc.cpp", "abc1.cpp", "abc2.cpp", "abcs1.cpp", "abcs2.cpp"] - assert package_util.get_solutions("abc", ["abc*.cpp"]) == ["abc.cpp", "abc1.cpp", "abc2.cpp", "abcs1.cpp", "abcs2.cpp"] - assert package_util.get_solutions("abc", ["prog/abc.cpp", "abc1.cpp"]) == ["abc.cpp", "abc1.cpp"] - assert package_util.get_solutions("abc", ["prog/abc.cpp", "abc?.cpp"]) == ["abc.cpp", "abc1.cpp", "abc2.cpp"] - assert package_util.get_solutions("abc", ["abc.cpp", "abc2.cpp", "abcs2.cpp"]) == ["abc.cpp", "abc2.cpp", "abcs2.cpp"] + assert to_filenames(package_util.get_solutions()) == ["abc.cpp", "abc1.cpp", "abc2.cpp", "abcs1.cpp", "abcs2.cpp"] + assert to_filenames(package_util.get_solutions(["prog/abc.cpp"])) == ["abc.cpp"] + assert to_filenames(package_util.get_solutions(["abc.cpp"])) == ["abc.cpp"] + assert to_filenames(package_util.get_solutions([os.path.join(tmpdir, "prog", "abc.cpp")])) == ["abc.cpp"] + assert to_filenames(package_util.get_solutions(["prog/abc?.cpp"])) == ["abc1.cpp", "abc2.cpp"] + assert to_filenames(package_util.get_solutions(["abc?.cpp"])) == ["abc1.cpp", "abc2.cpp"] + assert to_filenames(package_util.get_solutions(["prog/abc*.cpp"])) == ["abc.cpp", "abc1.cpp", "abc2.cpp", "abcs1.cpp", "abcs2.cpp"] + assert to_filenames(package_util.get_solutions(["abc*.cpp"])) == ["abc.cpp", "abc1.cpp", "abc2.cpp", "abcs1.cpp", "abcs2.cpp"] + assert to_filenames(package_util.get_solutions(["prog/abc.cpp", "abc1.cpp"])) == ["abc.cpp", "abc1.cpp"] + assert to_filenames(package_util.get_solutions(["prog/abc.cpp", "abc?.cpp"])) == ["abc.cpp", "abc1.cpp", "abc2.cpp"] + assert to_filenames(package_util.get_solutions(["abc.cpp", "abc2.cpp", "abcs2.cpp"])) == ["abc.cpp", "abc2.cpp", "abcs2.cpp"] diff --git a/tests/util.py b/tests/util.py index be399e31..8e51478c 100644 --- a/tests/util.py +++ b/tests/util.py @@ -1,8 +1,13 @@ import os import glob +import re import subprocess from sinol_make.helpers import compile, paths, package_util +from sinol_make.sio3pack.package import SIO3Package + +from sio3pack.files import LocalFile +from sio3pack.test import Test def get_simple_package_path(): @@ -203,42 +208,61 @@ def create_ins(package_path, task_id): """ Create .in files for package. """ - all_ingens = package_util.get_files_matching_pattern(task_id, f'{task_id}ingen.*') + all_ingens = package_util.get_files_matching_pattern(f'{task_id}ingen.*') if len(all_ingens) == 0: return ingen = all_ingens[0] ingen_executable = paths.get_executables_path("ingen.e") os.makedirs(paths.get_executables_path(), exist_ok=True) - assert compile.compile(ingen, ingen_executable) + assert compile.compile(ingen.path, ingen_executable) os.chdir(os.path.join(package_path, "in")) os.system("../.cache/executables/ingen.e") os.chdir(package_path) + package_util.reload_tests() def create_outs(package_path, task_id): """ Create .out files for package. """ - solution = package_util.get_files_matching_pattern(task_id, f'{task_id}.*')[0] + solution = package_util.get_files_matching_pattern(f'{task_id}.*')[0] solution_executable = paths.get_executables_path("solution.e") os.makedirs(paths.get_executables_path(), exist_ok=True) - assert compile.compile(solution, solution_executable) + assert compile.compile(solution.path, solution_executable) os.chdir(os.path.join(package_path, "in")) for file in glob.glob("*.in"): with open(file, "r") as in_file, open(os.path.join("../out", file.replace(".in", ".out")), "w") as out_file: subprocess.Popen([os.path.join(package_path, ".cache", "executables", "solution.e")], stdin=in_file, stdout=out_file).wait() os.chdir(package_path) + package_util.reload_tests() def create_ins_outs(package_path): """ Create .in and .out files for package. """ + SIO3Package.reset() os.chdir(package_path) task_id = package_util.get_task_id() task_type = package_util.get_task_type_cls() create_ins(package_path, task_id) - has_lib = package_util.any_files_matching_pattern(task_id, f"{task_id}lib.*") + has_lib = package_util.any_files_matching_pattern(f"{task_id}lib.*") if not has_lib and task_type.run_outgen(): create_outs(package_path, task_id) + + +def from_test_names(task_id: str, names: list[str]): + def create_test(name: str): + test_id = name.removeprefix(task_id) + gr_match = re.match(r"^\d+", test_id) + if gr_match: + group = gr_match.group(0) + else: + group = None + return Test(name, test_id, LocalFile(os.path.join(task_id, "in", name + ".in"), False), LocalFile(os.path.join(task_id, "out", name + ".out"), False), group) + + tests = [] + for name in names: + tests.append(create_test(name)) + return tests