Add Skia-based augmentation backend for faster data pipeline #1590
Workflow file for this run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: CI | |
| on: | |
| pull_request: | |
| types: [opened, reopened, synchronize] | |
| paths: | |
| - "sleap_nn/**" | |
| - "tests/**" | |
| - ".github/workflows/ci.yml" | |
| - "pyproject.toml" | |
| jobs: | |
| lint: | |
| name: Lint | |
| runs-on: ubuntu-latest | |
| env: | |
| CUDA_VISIBLE_DEVICES: "" | |
| USE_CUDA: "0" | |
| steps: | |
| - name: Checkout repo | |
| uses: actions/checkout@v4 | |
| - name: Set up uv | |
| uses: astral-sh/setup-uv@v5 | |
| with: | |
| enable-cache: false | |
| - name: Set up Python | |
| run: uv python install 3.13 | |
| - name: Install dev dependencies and torch | |
| run: uv sync --extra torch-cpu | |
| - name: Run Black | |
| run: uv run --frozen --extra torch-cpu black --check sleap_nn tests | |
| - name: Run Ruff | |
| run: uv run --frozen --extra torch-cpu ruff check sleap_nn/ | |
| tests: | |
| timeout-minutes: 30 | |
| env: | |
| CUDA_VISIBLE_DEVICES: "" | |
| USE_CUDA: "0" | |
| UV_FROZEN: "1" | |
| strategy: | |
| fail-fast: false | |
| matrix: | |
| os: ["ubuntu", "windows", "mac"] # "self-hosted-gpu" temporarily disabled | |
| include: | |
| - os: ubuntu | |
| runs-on: ubuntu-latest | |
| - os: windows | |
| runs-on: windows-latest | |
| - os: mac | |
| runs-on: macos-14 | |
| # - os: self-hosted-gpu | |
| # runs-on: [self-hosted, puma, gpu, 2xgpu] | |
| python: [3.13] | |
| name: Tests (${{ matrix.os }}, Python ${{ matrix.python }}) | |
| runs-on: ${{ matrix.runs-on }} | |
| steps: | |
| - name: Checkout repo | |
| uses: actions/checkout@v4 | |
| - name: Set up uv | |
| uses: astral-sh/setup-uv@v5 | |
| with: | |
| enable-cache: false | |
| - name: Set up Python (non-self-hosted GPU) | |
| if: matrix.os != 'self-hosted-gpu' | |
| run: uv python install ${{ matrix.python }} | |
| - name: Install dev dependencies and torch (self-hosted GPU) | |
| if: matrix.os == 'self-hosted-gpu' | |
| run: uv sync --python 3.13 --extra torch-cuda128 | |
| - name: Install dev dependencies and torch (non-self-hosted GPU) | |
| if: matrix.os != 'self-hosted-gpu' | |
| run: uv sync --extra torch-cpu | |
| - name: Print environment info | |
| run: | | |
| echo "=== UV Environment ===" | |
| uv run --frozen --extra torch-cpu python --version | |
| uv run --frozen --extra torch-cpu python -c "import sys; print('Python executable:', sys.executable)" | |
| echo "=== UV Environment NumPy Check ===" | |
| uv run --frozen --extra torch-cpu python -c "import numpy; print('NumPy version:', numpy.__version__); print('NumPy location:', numpy.__file__)" || echo "NumPy import failed in uv environment" | |
| echo "=== CUDA Availability Check ===" | |
| uv run --frozen --extra torch-cpu python -c " | |
| import torch | |
| print(f'PyTorch version: {torch.__version__}') | |
| print(f'CUDA available: {torch.cuda.is_available()}') | |
| print(f'CUDA device count: {torch.cuda.device_count()}') | |
| if torch.cuda.is_available(): | |
| print(f'CUDA version: {torch.version.cuda}') | |
| print(f'Current device: {torch.cuda.current_device()}') | |
| print(f'Device name: {torch.cuda.get_device_name(0)}') | |
| else: | |
| print('CUDA is not available') | |
| " || echo "CUDA check failed" | |
| echo "=== Import Test ===" | |
| uv run --frozen --extra torch-cpu python -c "import torch; import lightning; import kornia; print('All imports successful')" || echo "Import test failed" | |
| - name: Check MPS backend (macOS only) | |
| if: runner.os == 'macOS' | |
| run: | | |
| echo "=== macOS MPS Backend Check ===" | |
| uv run --frozen --extra torch-cpu python -c " | |
| import torch | |
| print(f'PyTorch version: {torch.__version__}') | |
| print(f'MPS available: {torch.backends.mps.is_available()}') | |
| print(f'MPS built: {torch.backends.mps.is_built()}') | |
| if torch.backends.mps.is_available(): | |
| print('MPS backend is available and ready to use!') | |
| device = torch.device('mps') | |
| test_tensor = torch.randn(3, 3).to(device) | |
| print(f'Test tensor on MPS: {test_tensor.device}') | |
| else: | |
| print('MPS backend is not available on this macOS system') | |
| " | |
| - name: Run pytest | |
| run: | | |
| echo "=== Final environment check before tests ===" | |
| uv run --frozen --extra torch-cpu python -c "import numpy, torch, lightning, kornia; print(f'All packages available: numpy={numpy.__version__}, torch={torch.__version__}')" | |
| echo "=== Running pytest ===" | |
| uv run --frozen --extra torch-cpu pytest --cov=sleap_nn --cov-report=xml --durations=-1 tests/ | |
| - name: Upload coverage | |
| uses: codecov/codecov-action@v5 | |
| with: | |
| fail_ci_if_error: true | |
| verbose: false | |
| token: ${{ secrets.CODECOV_TOKEN }} |