Skip to content

Commit

Permalink
Reducing lots of noise from debug statements and unneeded detail in l…
Browse files Browse the repository at this point in the history
…ogs.
  • Loading branch information
sven-oly committed Nov 13, 2024
1 parent 734ef2c commit 5ec198a
Show file tree
Hide file tree
Showing 11 changed files with 138 additions and 151 deletions.
24 changes: 12 additions & 12 deletions schema/check_generated_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,20 +14,20 @@
import schema_validator
from schema_files import ALL_TEST_TYPES

logger = logging.Logger("Checking Test Data vs. Schemas LOGGER")
logger.setLevel(logging.INFO)


def main(args):
logging.config.fileConfig("../logging.conf")

if len(args) <= 1:
logging.error('Please specify the path to test data directory')
logger.error('Please specify the path to test data directory')
return
else:
test_data_path = args[1]

logging.debug('TEST DATA PATH = %s', test_data_path)

logger = logging.Logger("Checking Test Data vs. Schemas LOGGER")
logger.setLevel(logging.INFO)
logger.info('+++ Test Generated test data vs. schemas files')

# TODO: get ICU versions
Expand All @@ -38,8 +38,8 @@ def main(args):
for dir_name in icu_dirs:
icu_versions.append(os.path.basename(dir_name))

logging.debug('ICU directories = %s', icu_versions)
logging.debug('test types = %s', ALL_TEST_TYPES)
logger.debug('ICU directories = %s', icu_versions)
logger.debug('test types = %s', ALL_TEST_TYPES)

validator = schema_validator.ConformanceSchemaValidator()

Expand All @@ -51,7 +51,7 @@ def main(args):
validator.debug = 1

all_results = validator.validate_test_data_with_schema()
logging.info(' %d results for generated test data', len(all_results))
logger.debug(' %d results for generated test data', len(all_results))

schema_errors = []
failed_validations = []
Expand All @@ -77,7 +77,7 @@ def main(args):
try:
summary_data = json.dumps(summary_json)
except BaseException as error:
logging.error('json.dumps Summary data problem: %s at %s', error, error)
logger.error('json.dumps Summary data problem: %s at %s', error, error)
exit(1)

output_filename = os.path.join(test_data_path, 'test_data_validation_summary.json')
Expand All @@ -87,17 +87,17 @@ def main(args):
file_out.close()
except BaseException as error:
schema_errors.append(output_filename)
logging.fatal('Error: %s. Cannot save validation summary in file %s', error, output_filename)
logger.fatal('Error: %s. Cannot save validation summary in file %s', error, output_filename)
exit(1)

if schema_errors:
logging.critical('Test data file files: %d fail out of %d:',
logger.critical('Test data file files: %d fail out of %d:',
len(schema_errors), schema_count)
for failure in schema_errors:
logging.critical(' %s', failure)
logger.critical(' %s', failure)
exit(1)
else:
logging.info("All %d generated test data files match with schema", schema_count)
logger.info("All %d generated test data files match with schema", schema_count)
exit(0)


Expand Down
22 changes: 12 additions & 10 deletions schema/check_test_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,12 @@ def main(args):
else:
test_output_path = args[1]

logging.debug('TEST OUTPUT PATH = %s', test_output_path)

logger = logging.Logger("Checking Test Data vs. Schemas LOGGER")
logger.setLevel(logging.INFO)

logger.debug('TEST OUTPUT PATH = %s', test_output_path)

logger.info('+++ Test Generated test data vs. schemas files')

# TODO: get ICU versions
Expand All @@ -54,14 +56,14 @@ def main(args):
test_type = schema_files.TEST_FILE_TO_TEST_TYPE_MAP[test_file_prefix]
test_type_set.add(test_type)
except BaseException as err:
logging.debug('No file (%s) during schema check output: %s', file, err
logger.debug('No file (%s) during schema check output: %s', file, err
)
for dir_nane in icu_dirs:
icu_version_set.add(os.path.basename(dir_nane))

icu_versions = sorted(list(icu_version_set))
logging.debug('ICU directories = %s', icu_versions)
logging.debug('test types = %s', ALL_TEST_TYPES)
logger.debug('ICU directories = %s', icu_versions)
logger.debug('test types = %s', ALL_TEST_TYPES)

validator = schema_validator.ConformanceSchemaValidator()
# Todo: use setters to initialize validator
Expand All @@ -74,7 +76,7 @@ def main(args):
validator.debug = 1

all_results, test_validation_plans = validator.validate_test_output_with_schema()
logging.info(' %d results for test output', len(all_results))
logger.info(' %d results for test output', len(all_results))

# Check if any files in the expected list were not validated.
test_paths = set()
Expand All @@ -83,7 +85,7 @@ def main(args):

for json_file in json_files:
if json_file not in test_paths:
logging.fatal('JSON file %s was not verified against a schema', json_file)
logger.fatal('JSON file %s was not verified against a schema', json_file)
# Bail out right away!
exit(1)

Expand All @@ -108,14 +110,14 @@ def main(args):
}
}
except BaseException as error:
logging.fatal('Cannot create summary_json %s', error)
logger.fatal('Cannot create summary_json %s', error)
exit(1)

# Create outputs from these results.
try:
summary_data = json.dumps(summary_json)
except TypeError as err:
logging.fatal('Error: %s\n Cannot dump JSON for %s', err, summary_json)
logger.fatal('Error: %s\n Cannot dump JSON for %s', err, summary_json)
exit(1)

output_filename = os.path.join(test_output_path, 'test_output_validation_summary.json')
Expand All @@ -124,11 +126,11 @@ def main(args):
file_out.write(summary_data)
file_out.close()
except BaseException as error:
logging.fatal('Error: %s. Cannot save validation summary in file %s', error, output_filename)
logger.fatal('Error: %s. Cannot save validation summary in file %s', error, output_filename)
# Don't continue after this problem.
exit(1)

logging.info("All %d test output files match with schema", schema_count)
logger.info("All %d test output files match with schema", schema_count)
return


Expand Down
22 changes: 10 additions & 12 deletions schema/schema_validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@ def __init__(self):
self.test_types = schema_files.ALL_TEST_TYPES
self.executors = []
self.icu_versions = []
self.debug_leve = 0

logging.config.fileConfig("../logging.conf")

Expand Down Expand Up @@ -145,7 +144,6 @@ def validate_test_data_with_schema(self):
schema_test_info.append(file_path_pair)
else:
test_data_files_not_found.append([icu_version, test_type])
logging.debug('No data test file %s for %s, %s', file_path_pair, test_type, icu_version)
pass

if test_data_files_not_found:
Expand All @@ -161,7 +159,8 @@ def validate_test_data_with_schema(self):
logging.warning('FAIL: Test data %s, %s. MSG=%s',
result_data['test_type'], result_data['icu_version'], result_data['err_info'])
else:
logging.debug('Test data validated: %s %s', result_data['test_type'], result_data['icu_version'])
pass

all_results.append(result_data)
return all_results

Expand Down Expand Up @@ -217,7 +216,7 @@ def check_test_data_against_schema(self, schema_info):

def check_test_data_schema(self, icu_version, test_type):
# Check the generated test data for structure against the schema
logging.debug('Validating %s with %s', test_type, icu_version)
logging.info('Validating %s with %s', test_type, icu_version)

# Check test output vs. the test data schema
schema_verify_file = os.path.join(self.schema_base, test_type, 'test_schema.json')
Expand Down Expand Up @@ -247,7 +246,7 @@ def check_test_data_schema(self, icu_version, test_type):

results['result'] = result
if result:
logging.debug('Test data %s validated successfully, with ICU %s', test_type, icu_version)
logging.info('Test data %s validated with ICU %s', test_type, icu_version)
else:
logging.error('Test data %s FAILED with ICU %s: %s', test_type, icu_version, err_info)

Expand Down Expand Up @@ -278,7 +277,7 @@ def get_test_output_schema_plan(self, icu_version, test_type, executor):

def check_test_output_schema(self, icu_version, test_type, executor):
# Check the output of the tests for structure against the schema
logging.debug('Validating test output: %s %s %s', executor, test_type, icu_version)
logging.info('Validating test output: %s %s %s', executor, test_type, icu_version)

# Check test output vs. the schema
schema_file_name = SCHEMA_FILE_MAP[test_type]['result_data']['schema_file']
Expand Down Expand Up @@ -333,7 +332,6 @@ def validate_schema_file(self, schema_file_path):
logging.fatal('%s for %s. Cannot get test_type value', err, schema_file_path, test_type)
return [False, err, schema_file_path, test_type]

logging.info('Checking schema %s', schema_file_path)
try:
# With just a schema, it validates the schema.
# However Validator.check_schema doesn't fail as expected.
Expand Down Expand Up @@ -461,20 +459,20 @@ def main(args):
'icu76']
schema_validator.executors = ['node', 'rust', 'dart_web', 'dart_native', 'icu4j']

logging.info('Checking test outputs')
logger.info('Checking test outputs')
all_test_out_results = schema_validator.validate_test_output_with_schema()

# Check all schema files for correctness.
schema_errors = schema_validator.check_schema_files()
if schema_errors:
logging.error('INVALID SCHEMA: %s', schema_errors)
logger.error('INVALID SCHEMA: %s', schema_errors)
else:
logging.info('All schemas are valid: %s', schema_errors)
logger.info('All schemas are valid: %s', schema_errors)

logging.info('Checking generated data')
logger.info('Checking generated data')
all_test_data_results = schema_validator.validate_test_data_with_schema()

logging.info('Checking test outputs')
logger.info('Checking test outputs')
all_test_out_results = schema_validator.validate_test_output_with_schema()

return
Expand Down
1 change: 1 addition & 0 deletions testdriver/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,6 +472,7 @@ def printCldrIcuMap():
except KeyError:
logging.debug(' %s not in map', name)


def main(args):

logging.config.fileConfig("../logging.conf")
Expand Down
37 changes: 15 additions & 22 deletions testdriver/testdriver.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@

from testplan import TestPlan

logger = logging.Logger("TEST DRIVER LOGGER")
logger.setLevel(logging.INFO)

# TODO: Separate TestPlan into another module.

class TestDriver:
def __init__(self):
Expand All @@ -46,20 +47,19 @@ def set_args(self, arg_options):
for test_type in arg_options.test_type:

if test_type not in ddt_data.testDatasets:
logging.warning('**** WARNING: test_type %s not in testDatasets', test_type)
logger.warning('**** WARNING: test_type %s not in testDatasets', test_type)
else:
# Create a test plan based on data and options
test_data_info = ddt_data.testDatasets[test_type]
if self.debug:
logging.debug('$$$$$ test_type = %s test_data_info = %s',
test_type, test_data_info.testDataFilename)
logger.debug('$$$$$ test_type = %s test_data_info = %s',
test_type, test_data_info.testDataFilename)

for executor in arg_options.exec:
if not ddt_data.allExecutors.has(executor):
# Run a non-specified executor. Compatibility of versions
# between test data and the executor should be done the text executor
# program itself.
logging.error('No executable command configured for executor platform: %s', executor)
logger.error('No executable command configured for executor platform: %s', executor)
exec_command = {'path': executor}
else:
# Set details for execution from ExecutorInfo
Expand All @@ -76,7 +76,7 @@ def set_args(self, arg_options):
test_data = ddt_data.testDatasets[test_type]
new_plan.set_test_data(test_data)
except KeyError as err:
logging.warning('!!! %s: No test data filename for %s', err, test_type)
logger.warning('!!! %s: No test data filename for %s', err, test_type)

if not new_plan.ignore:
self.test_plans.append(new_plan)
Expand All @@ -91,7 +91,7 @@ def parse_args(self, args):

# Get all the arguments
argparse = ddtargs.DdtArgs(args)
logging.debug('TestDriver OPTIONS: %s', argparse.getOptions())
logger.debug('TestDriver OPTIONS: %s', argparse.getOptions())

# Now use the argparse.options to set the values in the driver
self.set_args(argparse.getOptions())
Expand All @@ -104,7 +104,7 @@ def run_plans(self):
plan.run_plan()

def run_one(self, plan):
logging.debug("Parallel of %s %s %s" % (plan.test_lang, plan.test_type, plan.icu_version))
logger.debug("Parallel of %s %s %s" % (plan.test_lang, plan.test_type, plan.icu_version))
plan.run_plan()

def run_plans_parallel(self):
Expand All @@ -114,36 +114,29 @@ def run_plans_parallel(self):
num_processors = mp.cpu_count()

plan_info = '%s, %s' % (self.test_plans[0].test_type, self.test_plans[0].exec_command)
logging.info('TestDriver: %s processors for %s plans. %s' %
logger.info('TestDriver: %s processors for %s plans. %s' %
(num_processors, len(self.test_plans), plan_info))

processor_pool = mp.Pool(num_processors)
with processor_pool as p:
p.map(self.run_one, self.test_plans)



# Run the test with command line arguments
def main(args):
driver = TestDriver()
# print('ARGS = %s' % (args))
driver.parse_args(args[1:])

logger = logging.Logger("TEST DRIVER LOGGER")
logger.setLevel(logging.INFO)

driver = TestDriver()

logger.debug('ARGS = %s', args)
driver.parse_args(args[1:])

if driver.run_serial:
driver.run_plans()
else:
driver.run_plans_parallel()

# if len(args)> 2:
# Set limit on number to run
# numberToRun = int(args[2])
# driver.runLimit = numberToRun

# driver.initExecutor()


if __name__ == "__main__":
main(sys.argv)
Loading

0 comments on commit 5ec198a

Please sign in to comment.