Skip to content

Better GH Actions, test 1 #1

Better GH Actions, test 1

Better GH Actions, test 1 #1

Workflow file for this run

name: Test and Build
on:
push:
branches: [ master, main, develop ]
pull_request:
branches: [ master, main, develop ]
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Cache pip dependencies
uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements*.txt', '**/setup.py', '**/pyproject.toml') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest pytest-cov pytest-asyncio
pip install -e .
# Install optional dependencies for full feature support
pip install PyYAML || echo "PyYAML not available"
- name: Lint with flake8 (optional)
run: |
pip install flake8 || echo "flake8 not available"
# Stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics || echo "Lint warnings found"
# Exit-zero treats all errors as warnings
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics || echo "Lint check completed"
- name: Generate test MFT files
run: |
python analyzeMFT.py --generate-test-mft test_normal.mft --test-records 100 --test-type normal
python analyzeMFT.py --generate-test-mft test_anomaly.mft --test-records 50 --test-type anomaly
- name: Test CSV export
run: |
python analyzeMFT.py -f test_normal.mft -o test_output.csv --csv -v
[ -f test_output.csv ] && echo "CSV export successful" || exit 1
- name: Test JSON export
run: |
python analyzeMFT.py -f test_normal.mft -o test_output.json --json -v
[ -f test_output.json ] && echo "JSON export successful" || exit 1
- name: Test SQLite export
run: |
python analyzeMFT.py -f test_normal.mft -o test_output.db --sqlite -v
[ -f test_output.db ] && echo "SQLite export successful" || exit 1
# Verify SQLite database is readable
python -c "import sqlite3; conn=sqlite3.connect('test_output.db'); cursor=conn.cursor(); cursor.execute('SELECT COUNT(*) FROM mft_records'); print(f'SQLite contains {cursor.fetchone()[0]} records'); conn.close()"
- name: Test XML export
run: |
python analyzeMFT.py -f test_normal.mft -o test_output.xml --xml -v
[ -f test_output.xml ] && echo "XML export successful" || exit 1
- name: Test Excel export
run: |
python analyzeMFT.py -f test_normal.mft -o test_output.xlsx --excel -v || echo "Excel export may not be available"
- name: Test configuration profiles
run: |
python analyzeMFT.py --list-profiles
python analyzeMFT.py -f test_normal.mft -o test_quick.csv --profile quick -v
python analyzeMFT.py -f test_normal.mft -o test_forensic.csv --profile forensic -v
- name: Test chunked processing
run: |
python analyzeMFT.py -f test_normal.mft -o test_chunked.csv --chunk-size 10 -v
- name: Test anomaly MFT
run: |
python analyzeMFT.py -f test_anomaly.mft -o test_anomaly_output.csv -v
- name: Test configuration file creation
run: |
python analyzeMFT.py --create-config sample_config.json
[ -f sample_config.json ] && echo "Config file creation successful" || exit 1
- name: Run unit tests (if available)
run: |
if [ -d "tests" ]; then
pytest tests/ -v --cov=src --cov-report=xml || echo "Some tests failed"
else
echo "No tests directory found, skipping unit tests"
fi
- name: Test with hash computation
run: |
python analyzeMFT.py -f test_normal.mft -o test_hashes.csv --hash -v
- name: Validate output files
run: |
# Check that output files are not empty and contain expected content
[ -s test_output.csv ] && echo "CSV file has content" || exit 1
[ -s test_output.json ] && echo "JSON file has content" || exit 1
[ -s test_output.db ] && echo "SQLite file has content" || exit 1
# Basic content validation
grep -q "Record Number" test_output.csv && echo "CSV has header" || exit 1
python -c "import json; data=json.load(open('test_output.json')); print(f'JSON has {len(data)} records')" || exit 1
python -c "import sqlite3; conn=sqlite3.connect('test_output.db'); print(f'SQLite tables: {[row[0] for row in conn.execute(\"SELECT name FROM sqlite_master WHERE type=\\'table\\'\")]}')" || exit 1
- name: Upload coverage to Codecov (if tests ran)
if: matrix.python-version == '3.11'
uses: codecov/codecov-action@v3
with:
file: ./coverage.xml
flags: unittests
name: codecov-umbrella
fail_ci_if_error: false
compatibility-test:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
python-version: ['3.9', '3.11']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e .
- name: Basic functionality test
run: |
python analyzeMFT.py --generate-test-mft test.mft --test-records 50
python analyzeMFT.py -f test.mft -o output.csv -v
- name: Test profiles
run: |
python analyzeMFT.py --list-profiles
security-scan:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install security scanning tools
run: |
python -m pip install --upgrade pip
pip install bandit safety
- name: Run Bandit security scan
run: |
bandit -r src/ -f json -o bandit-report.json || echo "Bandit scan completed with warnings"
- name: Run Safety check
run: |
safety check || echo "Safety check completed with warnings"
- name: Upload security scan results
uses: actions/upload-artifact@v3
if: always()
with:
name: security-scan-results
path: bandit-report.json