Skip to content

Commit 68463e8

Browse files
Add checks if output files are missing (#55)
* Add checks for missing output files * Add tests * Split code into functions * Add tests * Fix tests * Add more comments and split into smaller functions * Update src/sinol_make/commands/run/__init__.py Co-authored-by: Tomasz Nowak <[email protected]> * Fix spelling mistake * Fix tests --------- Co-authored-by: Tomasz Nowak <[email protected]>
1 parent 89a4970 commit 68463e8

File tree

3 files changed

+95
-11
lines changed

3 files changed

+95
-11
lines changed

src/sinol_make/commands/run/__init__.py

+50-10
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
# Author of the original code: Bartosz Kostka <[email protected]>
33
# Version 0.6 (2021-08-29)
44
import subprocess
5+
import glob
56

67
from sinol_make.commands.run.structs import ExecutionResult, ResultChange, ValidationResult, ExecutionData, PointsChange
78
from sinol_make.helpers.parsers import add_compilation_arguments
@@ -155,6 +156,10 @@ def get_output_file(self, test_path):
155156
return os.path.join("out", os.path.split(os.path.splitext(test_path)[0])[1]) + ".out"
156157

157158

159+
def get_groups(self, tests):
160+
return sorted(list(set([self.get_group(test) for test in tests])))
161+
162+
158163
def compile_solutions(self, solutions):
159164
os.makedirs(self.COMPILATION_DIR, exist_ok=True)
160165
os.makedirs(self.EXECUTABLES_DIR, exist_ok=True)
@@ -808,7 +813,7 @@ def exit(self):
808813

809814
def set_scores(self):
810815
self.tests = package_util.get_tests(self.args.tests)
811-
self.groups = list(sorted(set([self.get_group(test) for test in self.tests])))
816+
self.groups = self.get_groups(self.tests)
812817
self.scores = collections.defaultdict(int)
813818

814819
if 'scores' not in self.config.keys():
@@ -846,6 +851,49 @@ def set_scores(self):
846851

847852
self.possible_score = self.get_possible_score(self.groups)
848853

854+
def get_valid_input_files(self):
855+
"""
856+
Returns list of input files that have corresponding output file.
857+
"""
858+
output_tests = glob.glob(os.path.join(os.getcwd(), "out", "*.out"))
859+
output_tests_ids = [self.extract_test_id(test) for test in output_tests]
860+
valid_input_files = []
861+
for test in self.tests:
862+
if self.extract_test_id(test) in output_tests_ids:
863+
valid_input_files.append(test)
864+
return valid_input_files
865+
866+
def validate_existence_of_outputs(self):
867+
"""
868+
Checks if all input files have corresponding output files.
869+
"""
870+
valid_input_files = self.get_valid_input_files()
871+
if len(valid_input_files) != len(self.tests):
872+
missing_tests = list(set(self.tests) - set(valid_input_files))
873+
missing_tests.sort()
874+
875+
print(util.warning('Missing output files for tests: ' + ', '.join(
876+
[self.extract_file_name(test) for test in missing_tests])))
877+
print(util.warning('Running only on tests with output files.'))
878+
self.tests = valid_input_files
879+
self.groups = self.get_groups(self.tests)
880+
881+
def check_are_any_tests_to_run(self):
882+
"""
883+
Checks if there are any tests to run and prints them and checks
884+
if all input files have corresponding output files.
885+
"""
886+
if len(self.tests) > 0:
887+
print(util.bold('Tests that will be run:'), ' '.join([self.extract_file_name(test) for test in self.tests]))
888+
889+
example_tests = [test for test in self.tests if self.get_group(test) == 0]
890+
if len(example_tests) == len(self.tests):
891+
print(util.warning('Running only on example tests.'))
892+
893+
self.validate_existence_of_outputs()
894+
else:
895+
print(util.warning('There are no tests to run.'))
896+
849897
def check_errors(self, results: dict[str, dict[str, dict[str, ExecutionResult]]]):
850898
error_msg = ""
851899
for solution in results:
@@ -909,15 +957,7 @@ def run(self, args):
909957
self.checker = None
910958

911959
self.set_scores()
912-
913-
if len(self.tests) > 0:
914-
print(util.bold('Tests that will be run:'), ' '.join([self.extract_file_name(test) for test in self.tests]))
915-
916-
example_tests = [test for test in self.tests if self.get_group(test) == 0]
917-
if len(example_tests) == len(self.tests):
918-
print(util.warning('Running only on example tests.'))
919-
else:
920-
print(util.warning('There are no tests to run.'))
960+
self.check_are_any_tests_to_run()
921961

922962
self.failed_compilations = []
923963
solutions = self.get_solutions(self.args.solutions)

tests/commands/run/test_integration.py

+26
Original file line numberDiff line numberDiff line change
@@ -189,3 +189,29 @@ def test_no_scores(capsys, create_package, time_tool):
189189

190190
out = capsys.readouterr().out
191191
assert "Scores are not defined in config.yml. Points will be assigned equally to all groups." in out
192+
193+
194+
@pytest.mark.parametrize("create_package", [get_simple_package_path(), get_verify_status_package_path()], indirect=True)
195+
def test_missing_output_files(capsys, create_package):
196+
"""
197+
Test with missing output files.
198+
"""
199+
package_path = create_package
200+
command = get_command()
201+
create_ins_outs(package_path)
202+
203+
outs = glob.glob(os.path.join(package_path, "out", "*.out"))
204+
outs.sort()
205+
os.unlink(outs[0])
206+
os.unlink(outs[1])
207+
out1 = command.extract_file_name(outs[0]).replace(".out", ".in")
208+
out2 = command.extract_file_name(outs[1]).replace(".out", ".in")
209+
210+
parser = configure_parsers()
211+
args = parser.parse_args(["run", "--time_tool", "time"])
212+
command = Command()
213+
with pytest.raises(SystemExit):
214+
command.run(args)
215+
216+
out = capsys.readouterr().out
217+
assert f'Missing output files for tests: {out1}, {out2}' in out

tests/commands/run/test_unit.py

+19-1
Original file line numberDiff line numberDiff line change
@@ -403,7 +403,7 @@ def test_print_expected_scores_diff(capsys, create_package):
403403
}
404404

405405

406-
@pytest.mark.parametrize("create_package", [get_simple_package_path()], indirect=["create_package"])
406+
@pytest.mark.parametrize("create_package", [get_simple_package_path()], indirect=True)
407407
def test_set_scores(create_package):
408408
"""
409409
Test set_scores function.
@@ -423,3 +423,21 @@ def test_set_scores(create_package):
423423
5: 16,
424424
6: 20
425425
}
426+
427+
428+
@pytest.mark.parametrize("create_package", [get_simple_package_path(), get_verify_status_package_path()], indirect=True)
429+
def test_get_valid_input_files(create_package):
430+
"""
431+
Test get_valid_input_files function.
432+
"""
433+
package_path = create_package
434+
command = get_command(package_path)
435+
create_ins_outs(package_path)
436+
command.tests = package_util.get_tests(None)
437+
438+
outputs = glob.glob(os.path.join(package_path, "out", "*.out"))
439+
os.unlink(outputs[0])
440+
valid_inputs = command.get_valid_input_files()
441+
assert len(valid_inputs) == len(outputs) - 1
442+
assert "in/" + os.path.basename(outputs[0].replace(".out", ".in")) not in valid_inputs
443+
assert "in/" + os.path.basename(outputs[1].replace(".out", ".in")) in valid_inputs

0 commit comments

Comments
 (0)