Skip to content

Merge pull request #191 from rowingdude/rowingdude-patch-1 #36

Merge pull request #191 from rowingdude/rowingdude-patch-1

Merge pull request #191 from rowingdude/rowingdude-patch-1 #36

Workflow file for this run

name: Release Testing
on:
release:
types: [published]
push:
tags:
- 'v*'
workflow_dispatch:
jobs:
comprehensive-test:
name: Comprehensive Release Testing
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e .
pip install pytest pytest-asyncio PyYAML
- name: Generate comprehensive test data
run: | python analyzeMFT.py --generate-test-mft small_normal.mft --test-records 25 --test-type normal

Check failure on line 35 in .github/workflows/release-test.yml

View workflow run for this annotation

GitHub Actions / .github/workflows/release-test.yml

Invalid workflow file

You have an error in your yaml syntax on line 35
python analyzeMFT.py --generate-test-mft medium_normal.mft --test-records 500 --test-type normal
python analyzeMFT.py --generate-test-mft small_anomaly.mft --test-records 25 --test-type anomaly
python analyzeMFT.py --generate-test-mft medium_anomaly.mft --test-records 200 --test-type anomaly
- name: Test all export formats
run: |
echo "Testing CSV export..."
python analyzeMFT.py -f small_normal.mft -o release_test.csv --csv -v
echo "Testing JSON export..."
python analyzeMFT.py -f small_normal.mft -o release_test.json --json -v
echo "Testing XML export..."
python analyzeMFT.py -f small_normal.mft -o release_test.xml --xml -v
echo "Testing SQLite export..."
python analyzeMFT.py -f small_normal.mft -o release_test.db --sqlite -v
echo "Testing Body file export..."
python analyzeMFT.py -f small_normal.mft -o release_test.body --body -v
echo "Testing Timeline export..."
python analyzeMFT.py -f small_normal.mft -o release_test.timeline --timeline -v
echo "Testing TSK export..."
python analyzeMFT.py -f small_normal.mft -o release_test.tsk --tsk -v
- name: Test analysis profiles
run: |
echo "Testing profile listing..."
python analyzeMFT.py --list-profiles
echo "Testing default profile..."
python analyzeMFT.py -f small_normal.mft -o profile_default.csv --profile default -v
echo "Testing quick profile..."
python analyzeMFT.py -f small_normal.mft -o profile_quick.csv --profile quick -v
echo "Testing forensic profile..."
python analyzeMFT.py -f small_normal.mft -o profile_forensic.csv --profile forensic -v
echo "Testing performance profile..."
python analyzeMFT.py -f medium_normal.mft -o profile_performance.csv --profile performance -v
- name: Test configuration features
run: |
echo "Testing config file creation..."
python analyzeMFT.py --create-config test_config.json
echo "Testing config file usage..."
python analyzeMFT.py -f small_normal.mft -o config_test.csv --config test_config.json -v
- name: Test chunked processing with different sizes
run: |
echo "Testing small chunks..."
python analyzeMFT.py -f medium_normal.mft -o chunked_small.csv --chunk-size 10 -v
echo "Testing medium chunks..."
python analyzeMFT.py -f medium_normal.mft -o chunked_medium.csv --chunk-size 100 -v
echo "Testing large chunks..."
python analyzeMFT.py -f medium_normal.mft -o chunked_large.csv --chunk-size 250 -v
- name: Test hash computation
run: |
echo "Testing hash computation..."
python analyzeMFT.py -f small_normal.mft -o hashes_test.csv --hash -v
- name: Test error handling with invalid files
run: |
echo "Testing with non-existent file..."
python analyzeMFT.py -f nonexistent.mft -o error_test.csv -v || echo "Expected error for non-existent file"
echo "Testing with invalid MFT file..."
echo "This is not an MFT file" > invalid.mft
python analyzeMFT.py -f invalid.mft -o invalid_test.csv -v || echo "Expected error for invalid MFT file"
- name: Validate output file integrity
run: |
echo "Validating CSV output..."
python -c "
import csv
with open('release_test.csv', 'r') as f:
reader = csv.reader(f)
headers = next(reader)
rows = list(reader)
print(f'CSV has {len(headers)} columns and {len(rows)} data rows')
assert len(rows) > 0, 'CSV should have data rows'
assert 'Record Number' in headers, 'CSV should have Record Number column'
"
echo "Validating JSON output..."
python -c "
import json
with open('release_test.json', 'r') as f:
data = json.load(f)
print(f'JSON has {len(data)} records')
assert len(data) > 0, 'JSON should have records'
assert isinstance(data, list), 'JSON should be a list'
"
echo "Validating SQLite output..."
python -c "
import sqlite3
conn = sqlite3.connect('release_test.db')
cursor = conn.cursor()
cursor.execute('SELECT name FROM sqlite_master WHERE type=\"table\"')
tables = [row[0] for row in cursor.fetchall()]
print(f'SQLite has tables: {tables}')
cursor.execute('SELECT COUNT(*) FROM mft_records')
count = cursor.fetchone()[0]
print(f'SQLite has {count} records')
assert count > 0, 'SQLite should have records'
assert 'mft_records' in tables, 'SQLite should have mft_records table'
conn.close()
"
- name: Test memory efficiency with larger files
if: matrix.python-version == '3.11' && matrix.os == 'ubuntu-latest'
run: |
echo "Testing memory efficiency with larger dataset..."
python analyzeMFT.py --generate-test-mft large_test.mft --test-records 2000 --test-type normal
python analyzeMFT.py -f large_test.mft -o large_test.csv --chunk-size 50 -v
python analyzeMFT.py -f large_test.mft -o large_test.db --sqlite --chunk-size 100 -v
- name: Performance benchmark
if: matrix.python-version == '3.11' && matrix.os == 'ubuntu-latest'
run: |
echo "Running performance benchmark..."
python -c "
import time
import subprocess
import osstart_time = time.time()
result = subprocess.run(['python', 'analyzeMFT.py', '-f', 'medium_normal.mft', '-o', 'benchmark.csv', '--csv'],
capture_output=True, text=True)
end_time = time.time()
processing_time = end_time - start_time
file_size = os.path.getsize('medium_normal.mft')
records_per_second = 500 / processing_time
print(f'Processed {file_size} bytes in {processing_time:.2f} seconds')
print(f'Processing rate: {records_per_second:.1f} records/second')assert processing_time < 30, f'Processing took too long: {processing_time}s'
assert records_per_second > 10, f'Processing too slow: {records_per_second} rec/s'
"
- name: Cross-platform path handling test
run: |
echo "Testing cross-platform compatibility..."
python analyzeMFT.py -f small_normal.mft -o "path test/output.csv" --csv -v || echo "Path handling test completed"
- name: Archive test results
if: always()
uses: actions/upload-artifact@v4
with:
name: release-test-results-${{ matrix.os }}-py${{ matrix.python-version }}
path: |
*.csv
*.json
*.xml
*.db
*.body
*.timeline
*.tsk
*.mft
retention-days: 7
package-test:
name: Test Package Installation
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.8', '3.12']
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Test pip installation
run: |
python -m pip install --upgrade pip
pip install . python -c "import analyzeMFT; print('Package imported successfully')"
- name: Test installed script functionality
run: | python analyzeMFT.py --generate-test-mft package_test.mft --test-records 50
python analyzeMFT.py -f package_test.mft -o package_test.csv -v [ -f package_test.csv ] && echo "Package installation test successful" || exit 1
documentation-test:
name: Test Documentation Examples
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e .
- name: Test README examples
run: |
echo "Testing examples from documentation..." python analyzeMFT.py --generate-test-mft example.mft --test-records 100
python analyzeMFT.py -f example.mft -o example_output.csv -v python analyzeMFT.py -f example.mft -o example.db --sqlite --hash -v
python analyzeMFT.py -f example.mft -o example.json --json --profile forensic -v
- name: Validate help documentation
run: |
echo "Testing help documentation..."
python analyzeMFT.py --help | grep -q "MFT file to analyze" || exit 1
python analyzeMFT.py --list-profiles | grep -q "Available profiles" || exit 1
final-validation:
name: Final Release Validation
runs-on: ubuntu-latest
needs: [comprehensive-test, package-test, documentation-test]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install and test
run: |
python -m pip install --upgrade pip
pip install -e .
- name: Final functionality verification
run: |
echo "🧪 Running final verification tests..." python analyzeMFT.py --generate-test-mft final_test.mft --test-records 100 --test-type normal python analyzeMFT.py -f final_test.mft -o final.csv --csv -v
python analyzeMFT.py -f final_test.mft -o final.json --json -v
python analyzeMFT.py -f final_test.mft -o final.db --sqlite -v [ -s final.csv ] && echo "[[ Success ]] CSV export working"
[ -s final.json ] && echo "[[ Success ]] JSON export working"
[ -s final.db ] && echo "[[ Success ]] SQLite export working" python -c "
import sqlite3
conn = sqlite3.connect('final.db')
cursor = conn.cursor()
cursor.execute('SELECT COUNT(*) FROM mft_records')
count = cursor.fetchone()[0]
print(f'[[ Success ]] SQLite database contains {count} records')
conn.close()
"
echo "🎉 All release tests passed!"