Skip to content

feat: add langchain v1.0 #5

feat: add langchain v1.0

feat: add langchain v1.0 #5

Workflow file for this run

name: Test Suite
on:
push:
branches: [main, pre/beta, dev]
pull_request:
branches: [main, pre/beta]
workflow_dispatch:
jobs:
unit-tests:
name: Unit Tests (Python ${{ matrix.python-version }})
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python-version: ['3.10', '3.11', '3.12']
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install uv
uses: astral-sh/setup-uv@v4
- name: Install dependencies
run: |
uv sync
- name: Install Playwright browsers
run: |
uv run playwright install chromium
- name: Run unit tests
run: |
uv run pytest tests/ -m "unit or not integration" --cov --cov-report=xml --cov-report=term
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
file: ./coverage.xml
flags: unittests
name: codecov-${{ matrix.os }}-py${{ matrix.python-version }}
token: ${{ secrets.CODECOV_TOKEN }}
if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'
integration-tests:
name: Integration Tests
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
test-group: [smart-scraper, multi-graph, file-formats]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install uv
uses: astral-sh/setup-uv@v4
- name: Install dependencies
run: |
uv sync
- name: Install Playwright browsers
run: |
uv run playwright install chromium
- name: Run integration tests
env:
OPENAI_APIKEY: ${{ secrets.OPENAI_APIKEY }}
ANTHROPIC_APIKEY: ${{ secrets.ANTHROPIC_APIKEY }}
GROQ_APIKEY: ${{ secrets.GROQ_APIKEY }}
run: |
uv run pytest tests/integration/ -m integration --integration -v
- name: Upload test results
uses: actions/upload-artifact@v4
if: always()
with:
name: integration-test-results-${{ matrix.test-group }}
path: |
htmlcov/
benchmark_results/
benchmark-tests:
name: Performance Benchmarks
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install uv
uses: astral-sh/setup-uv@v4
- name: Install dependencies
run: |
uv sync
- name: Install Playwright browsers
run: |
uv run playwright install chromium
- name: Run performance benchmarks
env:
OPENAI_APIKEY: ${{ secrets.OPENAI_APIKEY }}
run: |
uv run pytest tests/ -m benchmark --benchmark -v
- name: Upload benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: benchmark_results/
- name: Compare with baseline
if: github.event_name == 'pull_request'
run: |
# Download baseline from main branch
# Compare and comment on PR if regression detected
echo "Benchmark comparison would run here"
code-quality:
name: Code Quality Checks
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install uv
uses: astral-sh/setup-uv@v4
- name: Install dependencies
run: |
uv sync
- name: Run Ruff linting
run: |
uv run ruff check scrapegraphai/ tests/
- name: Run Black formatting check
run: |
uv run black --check scrapegraphai/ tests/
- name: Run isort check
run: |
uv run isort --check-only scrapegraphai/ tests/
- name: Run type checking with mypy
run: |
uv run mypy scrapegraphai/
continue-on-error: true
test-coverage-report:
name: Test Coverage Report
needs: [unit-tests, integration-tests]
runs-on: ubuntu-latest
if: always()
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download coverage artifacts
uses: actions/download-artifact@v4
- name: Generate coverage report
run: |
echo "Coverage report generation would run here"
- name: Comment coverage on PR
if: github.event_name == 'pull_request'
uses: py-cov-action/python-coverage-comment-action@v3
with:
GITHUB_TOKEN: ${{ github.token }}
test-summary:
name: Test Summary
needs: [unit-tests, integration-tests, code-quality]
runs-on: ubuntu-latest
if: always()
steps:
- name: Check test results
run: |
echo "All test jobs completed"
echo "Unit tests: ${{ needs.unit-tests.result }}"
echo "Integration tests: ${{ needs.integration-tests.result }}"
echo "Code quality: ${{ needs.code-quality.result }}"