Skip to content
This repository was archived by the owner on Sep 9, 2022. It is now read-only.

Commit d94db24

Browse files
Support having sample subtask on output only (#19)
* Support having sample subtask on output only * Add error alert if samples subtask index is non-zero * Add a comment explaining why tests_map uses list
1 parent 72b9f2c commit d94db24

File tree

3 files changed

+27
-9
lines changed

3 files changed

+27
-9
lines changed

scripts/internal/gen_data_parser.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def parse_data_or_throw(gen_data, task_data, visitor):
3939
line_number = 0
4040

4141
testset_index, testset_name = -1, None
42-
subtask_index, subtask_counter = -1, -1
42+
subtask_index, subtask_counter = -1, 0
4343
test_index, test_offset = 1, -1
4444
defined_testsets = set()
4545

@@ -62,8 +62,11 @@ def parse_data_or_throw(gen_data, task_data, visitor):
6262
visitor.on_testset(testset_name, line_number)
6363
if command == "@subtask":
6464
visitor.on_subtask(testset_name, line_number)
65-
subtask_counter += 1
66-
subtask_index = subtask_counter
65+
if testset_name != "samples":
66+
subtask_counter += 1
67+
subtask_index = subtask_counter
68+
else:
69+
subtask_index = 0
6770
else:
6871
subtask_index = -1
6972
elif command == "@include":

scripts/templates/test_name.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,5 @@
22
def get_test_name(task_data, testset_name, testset_index, subtask_index, test_index, test_offset, gen_line): #pylint: disable=too-many-arguments
33
#pylint: disable=unused-argument
44
if task_data['type'] == "OutputOnly":
5-
return "%02d" % test_index
5+
return "%02d" % subtask_index
66
return (testset_name if subtask_index < 0 else str(subtask_index)) + "-%02d" % test_offset

scripts/verify.py

+20-5
Original file line numberDiff line numberDiff line change
@@ -371,6 +371,8 @@ def check_validator_key(parent, key, name, parName=None):
371371
if Verification.problem['type'] != 'OutputOnly':
372372
check_keys(subtasks, ['samples'])
373373
hasSamples = True
374+
else:
375+
hasSamples = 'samples' in subtasks
374376
except KeyError:
375377
pass
376378

@@ -394,6 +396,8 @@ def check_validator_key(parent, key, name, parName=None):
394396
elif name == 'samples':
395397
if data['score'] != 0:
396398
error('samples subtask score is non-zero')
399+
if data['index'] != 0:
400+
error('samples subtask index is non-zero')
397401
else:
398402
score_sum += data['score']
399403

@@ -407,8 +411,9 @@ def check_validator_key(parent, key, name, parName=None):
407411
error('sum of scores is {}'.format(score_sum))
408412

409413
for i in range(len(subtasks)):
410-
if i+(0 if hasSamples else 1) not in indexes:
411-
error('missing index {} in subtask indexes'.format(i))
414+
index = i+(0 if hasSamples else 1)
415+
if index not in indexes:
416+
error('missing index {} in subtask indexes'.format(index))
412417

413418
return subtasks
414419

@@ -424,18 +429,20 @@ def __init__(self):
424429

425430
def on_testset(self, testset_name, line_number):
426431
self.testsets.append(testset_name)
427-
self.tests_map[testset_name] = set()
432+
# We use list instead of set here to detect if there is more than
433+
# one test in each subtask (since they will have the same filename).
434+
self.tests_map[testset_name] = []
428435

429436
def on_subtask(self, subtask_name, line_number):
430437
self.subtasks.append(subtask_name)
431438
self.used_testsets.add(subtask_name)
432439

433440
def on_include(self, testset_name, included_testset, line_number):
434441
self.used_testsets.add(included_testset)
435-
self.tests_map[testset_name] |= self.tests_map[included_testset]
442+
self.tests_map[testset_name] += self.tests_map[included_testset]
436443

437444
def on_test(self, testset_name, test_name, line, line_number):
438-
self.tests_map[testset_name].add(test_name)
445+
self.tests_map[testset_name].append(test_name)
439446

440447

441448
gen_data = GenDataVisitor()
@@ -484,6 +491,14 @@ def on_test(self, testset_name, test_name, line, line_number):
484491
else:
485492
warning("testset '{}' has no tests".format(testset))
486493

494+
#Checking for multiple tests in one subtask for output only:
495+
if Verification.problem['type'] == 'OutputOnly':
496+
for testset in gen_data.testsets:
497+
if testset not in gen_subtasks:
498+
error('testset {} must not be defined in output only'.format(testset))
499+
elif len(gen_data.tests_map[testset]) != 1:
500+
error('subtask {} has more than one test'.format(testset))
501+
487502
#Checking if a testset is defined but not used:
488503
for ts in set(gen_data.testsets)-set(gen_data.used_testsets):
489504
warning("testset '{}' is not used anywhere".format(ts))

0 commit comments

Comments
 (0)