Skip to content

Commit

Permalink
Reduce standard output printing (#136)
Browse files Browse the repository at this point in the history
  • Loading branch information
echeran authored Nov 27, 2023
1 parent c202ef1 commit 2a9f9bf
Show file tree
Hide file tree
Showing 15 changed files with 192 additions and 147 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,6 @@ testgen/*.json

TEMP_DATA/*

# log files (including logrotate state and file backups)
debug.log*
logrotate.state
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,10 @@ Requirements to run Data Driven Testing code locally:
```
rustup update
```
- Install `logrotate`
```
sudo apt-get install logrotate
```

# History

Expand Down
45 changes: 32 additions & 13 deletions generateDataAndRun.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,28 +5,26 @@
# Save the results
set -e

# Rotate log files
logrotate -s logrotate.state logrotate.conf

##########
# Setup (generate) test data & expected values
##########

# Enable seting the version of NodeJS
export NVM_DIR=$HOME/.nvm;
source $NVM_DIR/nvm.sh;

#
# Setup
#

export TEMP_DIR=TEMP_DATA
rm -rf $TEMP_DIR

# Clear out old data, then create new directory and copy test / verify data there
mkdir -p $TEMP_DIR/testData

#
# Setup (generate) test data & expected values
#

source_file=${1:-'run_config.json'}


# Generates all new test data
source_file=${1:-'run_config.json'}
pushd testgen
all_icu_versions=$(jq '.[].run.icu_version' ../$source_file | jq -s '.' | jq 'unique' | jq -r 'join(" ")')
python3 testdata_gen.py --icu_versions $all_icu_versions
Expand All @@ -41,7 +39,10 @@ python3 check_schemas.py $pwd
python3 check_generated_data.py ../$TEMP_DIR/testData
popd

all_execs_json=$(jq '.[].run.exec' $source_file | jq -s '.' | jq 'unique')
##########
# Run tests using per-platform executors
##########

#
# Run test data tests through all executors
#
Expand All @@ -54,6 +55,15 @@ all_execs_json=$(jq '.[].run.exec' $source_file | jq -s '.' | jq 'unique')
# popd
# fi

#
# Run Dart executors in a custom way
#

# TODO(?): Figure out why datasets.py can't support runnign multiple CLI commands,
# if that is the reason why Dart needs custom handling in this end-to-end script

all_execs_json=$(jq '.[].run.exec' $source_file | jq -s '.' | jq 'unique')

if jq -e 'index("dart_native")' <<< $all_execs_json > /dev/null
then
pushd executors/dart_native/
Expand All @@ -73,12 +83,17 @@ fi
# Executes all tests on that new data in the new directory
mkdir -p $TEMP_DIR/testOutput

#
# Invoke all tests on all platforms
#

# Change to directory of `testdriver` (which will be used to invoke each platform executor)
pushd testdriver

# Set to use NVM
source "$HOME/.nvm/nvm.sh"

# Invoke all tests
jq -c '.[]' ../$source_file | while read i; do
if jq -e 'has("prereq")' <<< $i > /dev/null
then
Expand All @@ -97,9 +112,9 @@ done
# Done with test execution
popd

#
##########
# Run verifier
#
##########

# Verify that test output matches schema.
pushd schema
Expand All @@ -116,6 +131,10 @@ python3 verifier.py --file_base ../$TEMP_DIR --exec $all_execs --test_type $all_

popd

##########
# Finish and clean up
##########

#
# Push testresults and test reports to Cloud Storge
# TODO
Expand Down
7 changes: 2 additions & 5 deletions logging.conf
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,10 @@ format=[%(asctime)s,%(module)s:%(lineno)d] [%(levelname)s] %(message)s
keys=file,screen

[handler_file]
class=handlers.TimedRotatingFileHandler
interval=midnight
backupCount=5
class=handlers.RotatingFileHandler
formatter=complex
level=DEBUG
# equivalent to: 'debug.log', when='S', interval=10, backupCount=5
args=('debug.log', 'S', 10, 5)
args=('debug.log', 'a')

[handler_screen]
class=StreamHandler
Expand Down
19 changes: 19 additions & 0 deletions logrotate.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
"testdriver/debug.log" {
rotate 3
missingok
}

"verifier/debug.log" {
rotate 3
missingok
}

"schema/debug.log" {
rotate 3
missingok
}

"testgen/debug.log" {
rotate 3
missingok
}
18 changes: 9 additions & 9 deletions schema/check_generated_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def main(args):
else:
test_data_path = args[1]

print('TEST DATA PATH = %s' % test_data_path)
logging.debug('TEST DATA PATH = %s', test_data_path)

logger = logging.Logger("Checking Test Data vs. Schemas LOGGER")
logger.setLevel(logging.INFO)
Expand All @@ -37,12 +37,12 @@ def main(args):
if os.path.exists(test_data_path):
check_path = os.path.join(test_data_path, 'icu*')
icu_dirs = glob.glob(check_path)
print('ICU DIRECTORIES = %s' % icu_dirs)
logging.debug('ICU DIRECTORIES = %s', icu_dirs)
for dir in icu_dirs:
icu_versions.append(os.path.basename(dir))

print('ICU directories = %s' % icu_versions)
print('test types = %s' % ALL_TEST_TYPES)
logging.debug('ICU directories = %s', icu_versions)
logging.debug('test types = %s', ALL_TEST_TYPES)

validator = schema_validator.ConformanceSchemaValidator()
# Todo: use setters to initialize validator
Expand All @@ -56,7 +56,7 @@ def main(args):
schema_count = 0

all_results = validator.validate_test_data_with_schema()
print(' %d results for generated test data' % (len(all_results)))
logging.info(' %d results for generated test data', len(all_results))

schema_errors = 0
failed_validations = []
Expand Down Expand Up @@ -92,13 +92,13 @@ def main(args):


if schema_errors:
print('Test data file files: %d fail out of %d:' % (
len(schema_errors, schema_count)))
logging.critical('Test data file files: %d fail out of %d:',
len(schema_errors, schema_count))
for failure in schema_errors:
print(' %s' % failure)
logging.critical(' %s', failure)
exit(1)
else:
print("All %d generated test data files match with schema" % schema_count)
logging.info("All %d generated test data files match with schema", schema_count)
exit(0)

if __name__ == "__main__":
Expand Down
10 changes: 5 additions & 5 deletions schema/check_schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def save_schema_validation_summary(self, validation_status):
failed_validations = []
passed_validations = []
for result in validation_status:
print(result)
logging.debug(result)
if result['result']:
passed_validations.append(result)
else:
Expand Down Expand Up @@ -99,13 +99,13 @@ def main(args):
ok = val_schema.save_schema_validation_summary(validation_status)

if schema_errors:
print('SCHEMA: %d fail out of %d:' % (
len(schema_errors), schema_count))
logging.error('SCHEMA: %d fail out of %d:',
len(schema_errors), schema_count)
for failure in schema_errors:
print(' %s' % failure)
logging.error(' %s', failure)
exit(1)
else:
print("All %d schema are valid" % schema_count)
logging.info("All %d schema are valid", schema_count)
exit(0)


Expand Down
18 changes: 9 additions & 9 deletions schema/check_test_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def main(args):
else:
test_output_path = args[1]

print('TEST OUTPUT PATH = %s' % test_output_path)
logging.debug('TEST OUTPUT PATH = %s', test_output_path)

logger = logging.Logger("Checking Test Data vs. Schemas LOGGER")
logger.setLevel(logging.INFO)
Expand Down Expand Up @@ -61,8 +61,8 @@ def main(args):
icu_version_set.add(os.path.basename(dir))

icu_versions = sorted(list(icu_version_set))
print('ICU directories = %s' % icu_versions)
print('test types = %s' % ALL_TEST_TYPES)
logging.debug('ICU directories = %s', icu_versions)
logging.debug('test types = %s', ALL_TEST_TYPES)

validator = schema_validator.ConformanceSchemaValidator()
# Todo: use setters to initialize validator
Expand All @@ -78,14 +78,14 @@ def main(args):
schema_count = 0

all_results = validator.validate_test_output_with_schema()
print(' %d results for generated test data' % (len(all_results)))
logging.debug(' %d results for generated test data', len(all_results))

schema_errors = 0
failed_validations = []
passed_validations = []
schema_count = len(all_results)
for result in all_results:
print(result)
logging.debug(result)
if result['result']:
passed_validations.append(result)
else:
Expand Down Expand Up @@ -114,13 +114,13 @@ def main(args):


if schema_errors:
print('Test data file files: %d fail out of %d:' % (
len(schema_errors, schema_count)))
logging.error('Test data file files: %d fail out of %d:',
len(schema_errors, schema_count))
for failure in schema_errors:
print(' %s' % failure)
logging.error(' %s', failure)
exit(1)
else:
print("All %d test output files match with schema" % schema_count)
logging.info("All %d test output files match with schema", schema_count)
exit(0)

if __name__ == "__main__":
Expand Down
22 changes: 11 additions & 11 deletions schema/schema_validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,10 +93,10 @@ def validate_test_data_with_schema(self):
for test_type in self.test_types:
for icu_version in self.icu_versions:
if self.debug > 0:
logging.debug('Checking test data %s, %s', test_type, icu_version)
logging.info('Checking test data %s, %s', test_type, icu_version)
logging.info('Checking %s, %s', test_type, icu_version)
result_data = self.check_test_data_schema(icu_version, test_type)
print(result_data)
logging.debug('test result data = %s', result_data)
msg = result_data['err_info']
if not result_data['data_file_name']:
# This is not an error but simple a test that wasn't run.
Expand All @@ -105,7 +105,7 @@ def validate_test_data_with_schema(self):
logging.warning('VALIDATION FAILS: %s %s. MSG=%s',
test_type, icu_version, result_data['err_info'])
else:
logging.warning('VALIDATION WORKS: %s %s', test_type, icu_version)
logging.info('VALIDATION WORKS: %s %s', test_type, icu_version)
all_results.append(result_data)
return all_results

Expand Down Expand Up @@ -244,7 +244,7 @@ def validate_test_output_with_schema(self):
logging.warning('VALIDATION FAILS: %s %s %s. MSG=%s',
test_type, icu_version, executor, results['err_info'])
else:
logging.warning('VALIDATION WORKS: %s %s %s', test_type, icu_version, executor)
logging.info('VALIDATION WORKS: %s %s %s', test_type, icu_version, executor)
all_results.append(results)
return all_results

Expand All @@ -266,7 +266,7 @@ def process_args(args):
# Directory for test result files
# Get name of test and type
if len(args) < 2:
print('you gotta give me something...')
logging.error('Not enough arguments provided')
return

base_folder = args[1]
Expand Down Expand Up @@ -327,10 +327,10 @@ def main(args):
schema_validator.icu_versions = ['icu71', 'icu72', 'icu73', 'icu74']
schema_validator.executors = ['node', 'rust', 'dart_web']

print('Checking test outputs')
logging.info('Checking test outputs')
all_test_out_results = schema_validator.validate_test_output_with_schema()
for result in all_test_out_results:
print(' %s' % result)
logging.debug(' %s', result)

# Check all schema files for correctness.
schema_errors = schema_validator.check_schema_files()
Expand All @@ -342,16 +342,16 @@ def main(args):
icu_versions = ['icu71', 'icu72', 'icu73', 'icu74']
executor_list = ['node', 'rust', 'dart_web']

print('Checking generated data')
logging.info('Checking generated data')
all_test_data_results = schema_validator.validate_test_data_with_schema()
for result in all_test_data_results:

print(' %s' % result)
logging.debug(' %s', result)

print('Checking test outputs')
logging.info('Checking test outputs')
all_test_out_results = schema_validator.validate_test_output_with_schema()
for result in all_test_out_results:
print(' %s' % result)
logging.debug(' %s', result)
return

if __name__ == "__main__":
Expand Down
Loading

0 comments on commit 2a9f9bf

Please sign in to comment.