Skip to content

Commit

Permalink
Merge branch 'main' of https://github.com/PrefectHQ/prefect into jean…
Browse files Browse the repository at this point in the history
…/cloud-565-task-run-instrumentation
  • Loading branch information
jeanluciano committed Nov 11, 2024
2 parents f3ae6b3 + 7a3e50f commit c03c73c
Show file tree
Hide file tree
Showing 202 changed files with 35,572 additions and 1,172 deletions.
7 changes: 5 additions & 2 deletions .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,8 @@
# documentation
/docs @discdiver @cicdw @desertaxle @zzstoatzz
# imports
/src/prefect/__init__.py @aaazzam @chrisguidry @cicdw @desertaxle @zzstoatzz
/src/prefect/main.py @aaazzam @chrisguidry @cicdw @desertaxle @zzstoatzz
/src/prefect/__init__.py @aaazzam @chrisguidry @cicdw @desertaxle @zzstoatzz
/src/prefect/main.py @aaazzam @chrisguidry @cicdw @desertaxle @zzstoatzz

# UI Replatform
/ui-v2 @aaazzam @cicdw @desertaxle @zzstoatzz
3 changes: 2 additions & 1 deletion .github/workflows/codspeed-benchmarks.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ jobs:
- name: Install packages
run: |
python -m pip install -U uv
uv pip install --upgrade --system .[dev]
uv pip install --upgrade --system .[dev] pytest-codspeed
uv pip uninstall --system pytest-benchmark
- name: Start server
run: |
Expand Down
41 changes: 35 additions & 6 deletions .github/workflows/integration-tests.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
# TODO: Replace `wait-for-server` with dedicated command
# https://github.com/PrefectHQ/prefect/issues/6990

name: Integration tests
on:
pull_request:
Expand Down Expand Up @@ -85,9 +88,6 @@ jobs:
./scripts/wait-for-server.py
# TODO: Replace `wait-for-server` with dedicated command
# https://github.com/PrefectHQ/prefect/issues/6990
- name: Start server
if: ${{ matrix.server-version.version == 'main' }}
env:
Expand All @@ -98,9 +98,6 @@ jobs:
./scripts/wait-for-server.py
# TODO: Replace `wait-for-server` with dedicated command
# https://github.com/PrefectHQ/prefect/issues/6990
- name: Run integration flows
env:
PREFECT_API_URL: http://127.0.0.1:4200/api
Expand All @@ -113,3 +110,35 @@ jobs:
run: |
cat server.log || echo "No logs available"
docker logs prefect-server || echo "No logs available"
sqlite-3-24-0:
name: Test SQLite 3.24.0 Compatibility
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Test with SQLite 3.24.0
run: >
docker build -t prefect-server-old-sqlite \
--build-arg SQLITE_VERSION=3240000 \
--build-arg SQLITE_YEAR=2018 \
-f old-sqlite.Dockerfile . &&
docker run prefect-server-old-sqlite sh -c "prefect server database downgrade --yes -r base && prefect server database upgrade --yes"
sqlite-3-31-1:
name: Test SQLite 3.31.1 Compatibility
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Test with SQLite 3.31.1
run: >
docker build -t prefect-server-new-sqlite \
--build-arg SQLITE_VERSION=3310100 \
--build-arg SQLITE_YEAR=2020 \
-f old-sqlite.Dockerfile . &&
docker run prefect-server-new-sqlite sh -c "prefect server database downgrade --yes -r base && prefect server database upgrade --yes"
1 change: 1 addition & 0 deletions .github/workflows/markdown-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ jobs:
python -m pip install -U uv
uv pip install --upgrade --system -e '.[dev]'
uv pip install --upgrade --system -r requirements-markdown-tests.txt
uv pip uninstall --system pytest-benchmark
- name: Start server
run: |
Expand Down
10 changes: 10 additions & 0 deletions .github/workflows/python-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,11 @@ jobs:
echo "COVERAGE_FILE=${COVERAGE_FILE}" >> $GITHUB_ENV
echo "artifact_name=coverage-data-${sanitized_test_type}-${{ matrix.python-version }}-${sanitized_database}" >> $GITHUB_OUTPUT
- name: Set coverage core
if: ${{ matrix.python-version == '3.12' }}
run: |
echo "COVERAGE_CORE=sysmon" >> $GITHUB_ENV
- name: Run tests
run: |
echo "Using COVERAGE_FILE=$COVERAGE_FILE"
Expand Down Expand Up @@ -344,6 +349,11 @@ jobs:
echo "COVERAGE_FILE=${COVERAGE_FILE}" >> $GITHUB_ENV
echo "artifact_name=coverage-data-docker-${{ matrix.python-version }}-${sanitized_database}" >> $GITHUB_OUTPUT
- name: Set coverage core
if: ${{ matrix.python-version == '3.12' }}
run: |
echo "COVERAGE_CORE=sysmon" >> $GITHUB_ENV
- name: Run tests
run: |
echo "Using COVERAGE_FILE=$COVERAGE_FILE"
Expand Down
56 changes: 56 additions & 0 deletions .github/workflows/ui-v2-checks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
name: UI v2 Checks

on:
pull_request:
paths:
- .github/workflows/ui-v2-checks.yml
- ui-v2/**
- .nvmrc
push:
branches:
- main

permissions:
contents: read

# Limit concurrency by workflow/branch combination.
#
# For pull request builds, pushing additional changes to the
# branch will cancel prior in-progress and pending builds.
#
# For builds triggered on a branch push, additional changes
# will wait for prior builds to complete before starting.
#
# https://docs.github.com/en/actions/using-jobs/using-concurrency
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}

jobs:
build-ui:
name: Build ui
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v4

- uses: actions/setup-node@v4
with:
node-version-file: ".nvmrc"
cache-dependency-path: "**/package-lock.json"

- name: Install UI dependencies
working-directory: ./ui-v2
run: npm ci install

- name: Check formatting
working-directory: ./ui-v2
run: npm run format:check

- name: Lint
working-directory: ./ui-v2
run: npm run lint

- name: Build UI
working-directory: ./ui-v2
run: npm run build
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ dask-worker-space/
!ui/.vscode/

# Prefect files
prefect.toml
prefect.yaml

# Deployment recipes
Expand Down
14 changes: 14 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,3 +59,17 @@ repos:
src/prefect/settings/models/.*|
scripts/generate_settings_schema.py
)$
- repo: local
hooks:
- id: generate-settings-ref
name: Generating Settings Reference
language: system
entry: uv run --with 'pydantic>=2.9.0' ./scripts/generate_settings_ref.py
pass_filenames: false
files: |
(?x)^(
.pre-commit-config.yaml|
src/prefect/settings/models/.*|
scripts/generate_settings_ref.py
)$
22 changes: 13 additions & 9 deletions benches/bench_flows.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,16 @@
TODO: Add benches for higher number of tasks; blocked by engine deadlocks in CI.
"""

from typing import TYPE_CHECKING

import anyio
import pytest
from pytest_benchmark.fixture import BenchmarkFixture

from prefect import flow, task

if TYPE_CHECKING:
from pytest_benchmark.fixture import BenchmarkFixture


def noop_function():
pass
Expand All @@ -17,12 +21,12 @@ async def anoop_function():
pass


def bench_flow_decorator(benchmark: BenchmarkFixture):
def bench_flow_decorator(benchmark: "BenchmarkFixture"):
benchmark(flow, noop_function)


@pytest.mark.parametrize("options", [{}, {"timeout_seconds": 10}])
def bench_flow_call(benchmark: BenchmarkFixture, options):
def bench_flow_call(benchmark: "BenchmarkFixture", options):
noop_flow = flow(**options)(noop_function)
benchmark(noop_flow)

Expand All @@ -35,7 +39,7 @@ def bench_flow_call(benchmark: BenchmarkFixture, options):


@pytest.mark.parametrize("num_tasks", [10, 50, 100])
def bench_flow_with_submitted_tasks(benchmark: BenchmarkFixture, num_tasks: int):
def bench_flow_with_submitted_tasks(benchmark: "BenchmarkFixture", num_tasks: int):
test_task = task(noop_function)

@flow
Expand All @@ -47,7 +51,7 @@ def benchmark_flow():


@pytest.mark.parametrize("num_tasks", [10, 50, 100, 250])
def bench_flow_with_called_tasks(benchmark: BenchmarkFixture, num_tasks: int):
def bench_flow_with_called_tasks(benchmark: "BenchmarkFixture", num_tasks: int):
test_task = task(noop_function)

@flow
Expand All @@ -62,7 +66,7 @@ def benchmark_flow():


@pytest.mark.parametrize("num_tasks", [10, 50, 100, 250])
def bench_async_flow_with_async_tasks(benchmark: BenchmarkFixture, num_tasks: int):
def bench_async_flow_with_async_tasks(benchmark: "BenchmarkFixture", num_tasks: int):
test_task = task(anoop_function)

@flow
Expand All @@ -78,7 +82,7 @@ async def benchmark_flow():


@pytest.mark.parametrize("num_flows", [5, 10, 20])
def bench_flow_with_subflows(benchmark: BenchmarkFixture, num_flows: int):
def bench_flow_with_subflows(benchmark: "BenchmarkFixture", num_flows: int):
test_flow = flow(noop_function)

@flow
Expand All @@ -91,7 +95,7 @@ def benchmark_flow():

@pytest.mark.parametrize("num_flows", [5, 10, 20])
def bench_async_flow_with_sequential_subflows(
benchmark: BenchmarkFixture, num_flows: int
benchmark: "BenchmarkFixture", num_flows: int
):
test_flow = flow(anoop_function)

Expand All @@ -105,7 +109,7 @@ async def benchmark_flow():

@pytest.mark.parametrize("num_flows", [5, 10, 20])
def bench_async_flow_with_concurrent_subflows(
benchmark: BenchmarkFixture, num_flows: int
benchmark: "BenchmarkFixture", num_flows: int
):
test_flow = flow(anoop_function)

Expand Down
9 changes: 6 additions & 3 deletions benches/bench_import.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
import importlib
import sys
from typing import TYPE_CHECKING

import pytest
from prometheus_client import REGISTRY
from pytest_benchmark.fixture import BenchmarkFixture

if TYPE_CHECKING:
from pytest_benchmark.fixture import BenchmarkFixture


def reset_imports():
Expand All @@ -21,7 +24,7 @@ def reset_imports():


@pytest.mark.benchmark(group="imports")
def bench_import_prefect(benchmark: BenchmarkFixture):
def bench_import_prefect(benchmark: "BenchmarkFixture"):
def import_prefect():
reset_imports()

Expand All @@ -32,7 +35,7 @@ def import_prefect():

@pytest.mark.timeout(180)
@pytest.mark.benchmark(group="imports")
def bench_import_prefect_flow(benchmark: BenchmarkFixture):
def bench_import_prefect_flow(benchmark: "BenchmarkFixture"):
def import_prefect_flow():
reset_imports()

Expand Down
11 changes: 7 additions & 4 deletions benches/bench_tasks.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
from pytest_benchmark.fixture import BenchmarkFixture
from typing import TYPE_CHECKING

if TYPE_CHECKING:
from pytest_benchmark.fixture import BenchmarkFixture

from prefect import flow, task

Expand All @@ -7,11 +10,11 @@ def noop_function():
pass


def bench_task_decorator(benchmark: BenchmarkFixture):
def bench_task_decorator(benchmark: "BenchmarkFixture"):
benchmark(task, noop_function)


def bench_task_call(benchmark: BenchmarkFixture):
def bench_task_call(benchmark: "BenchmarkFixture"):
noop_task = task(noop_function)

@flow
Expand All @@ -21,7 +24,7 @@ def benchmark_flow():
benchmark_flow()


def bench_task_submit(benchmark: BenchmarkFixture):
def bench_task_submit(benchmark: "BenchmarkFixture"):
noop_task = task(noop_function)

# The benchmark occurs within the flow to measure _submission_ time without
Expand Down
22 changes: 0 additions & 22 deletions benches/conftest.py

This file was deleted.

2 changes: 1 addition & 1 deletion compat-tests
Loading

0 comments on commit c03c73c

Please sign in to comment.