Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
9f50507
update contributing page
VeckoTheGecko Nov 12, 2024
0f5fa2a
Update dev environment to use py3.10
VeckoTheGecko Nov 12, 2024
48ef818
Add initial asv config
VeckoTheGecko Nov 12, 2024
fcf2ff3
patch patch asv config and add docs
VeckoTheGecko Nov 13, 2024
f8a37ef
patch benchmarks ci
VeckoTheGecko Nov 13, 2024
6d25f40
Revert "Update dev environment to use py3.10"
VeckoTheGecko Nov 13, 2024
6a6b8bf
Update docs
VeckoTheGecko Nov 13, 2024
e659f40
Add 3d advection benchmark
willirath Feb 11, 2025
60c7d55
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 11, 2025
84b8c26
Add integration test benchmarks
willirath Feb 12, 2025
5c41d41
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 12, 2025
6541d28
Add particle execution timings
willirath Feb 12, 2025
ce2a540
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 12, 2025
c5d0f2f
ArgoFloat benchmark
Feb 18, 2025
51bf80a
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 18, 2025
7bbdcd6
Argo fixed
Feb 26, 2025
62cb3be
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 26, 2025
1e08f59
adapt runtime
Feb 26, 2025
7d3348f
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 26, 2025
579a105
nemo start
danliba Feb 26, 2025
4559980
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 26, 2025
1f9c6c9
nemo curvilinear
danliba Feb 28, 2025
28782df
nemo curvilinear
danliba Feb 28, 2025
50fee82
nemo curvilinear
danliba Feb 28, 2025
0177534
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Feb 28, 2025
61f93a5
Fix benchmark workflow
willirath Jul 2, 2025
e9b34e3
Bump runner image
willirath Jul 2, 2025
886af3d
Fix multistep execution
willirath Jul 2, 2025
bf33c82
Also fix Scipy multi step execution
willirath Jul 2, 2025
0ad59a0
Make asv verbose and skip bottleneck
willirath Jul 2, 2025
bda0028
Set large timeout
willirath Jul 2, 2025
5bd49d4
[pre-commit.ci] auto fixes from pre-commit.com hooks
pre-commit-ci[bot] Jul 2, 2025
0a271e6
Try avoiding git describe issue
willirath Jul 2, 2025
0e9a85b
Don't fail by grepping
willirath Jul 2, 2025
557ea92
Be less verbose in benchmark
willirath Jul 2, 2025
68d8d96
Make repo url explicit
willirath Jul 6, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 78 additions & 0 deletions .github/workflows/benchmarks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# This workflow was adapted from xarray. See licenses/XARRAY_LICENSE for license details.
name: Benchmark

on:
pull_request:
types: [opened, reopened, synchronize, labeled]
workflow_dispatch:

env:
PR_HEAD_LABEL: ${{ github.event.pull_request.head.label }}

jobs:
benchmark:
if: ${{ contains( github.event.pull_request.labels.*.name, 'run-benchmark') && github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' }}
name: Linux
runs-on: ubuntu-24.04
env:
ASV_DIR: "./asv_bench"
CONDA_ENV_FILE: environment.yml

steps:
# We need the full repo to avoid this issue
# https://github.com/actions/checkout/issues/23
- uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Set up conda environment
uses: mamba-org/setup-micromamba@v2
with:
micromamba-version: "1.5.10-0"
environment-file: ${{env.CONDA_ENV_FILE}}
environment-name: parcels-dev
cache-environment: true
cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}-benchmark"
# add "build" because of https://github.com/airspeed-velocity/asv/issues/1385
create-args: >-
python-build
asv
mamba<=1.5.10

- name: Run benchmarks
shell: bash -l {0}
id: benchmark
env:
OPENBLAS_NUM_THREADS: 1
MKL_NUM_THREADS: 1
OMP_NUM_THREADS: 1
ASV_FACTOR: 1.5
ASV_SKIP_SLOW: 1
run: |
set -x
# ID this runner
asv machine --yes
echo "Baseline: ${{ github.event.pull_request.base.sha }} (${{ github.event.pull_request.base.label }})"
echo "Contender: ${GITHUB_SHA} ($PR_HEAD_LABEL)"
# Run benchmarks for current commit against base
ASV_OPTIONS="--split --factor $ASV_FACTOR"
asv continuous $ASV_OPTIONS ${{ github.event.pull_request.base.sha }} ${GITHUB_SHA} \
| sed "/Traceback \|failed$\|PERFORMANCE DECREASED/ s/^/::error::/" \
| tee benchmarks.log
# # Report and export results for subsequent steps
# if grep "Traceback \|failed\|PERFORMANCE DECREASED" benchmarks.log > /dev/null ; then
# exit 1
# fi
working-directory: ${{ env.ASV_DIR }}

- name: Add instructions to artifact
if: always()
run: |
cp benchmarks/README_CI.md benchmarks.log .asv/results/
working-directory: ${{ env.ASV_DIR }}

- uses: actions/upload-artifact@v4
if: always()
with:
name: asv-benchmark-results-${{ runner.os }}
path: ${{ env.ASV_DIR }}/.asv/results
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ lib/
bin/
parcels_examples

# asv environments
asv_bench/.asv
asv_bench/pkgs

*.so
*.log
*.nc
Expand Down
1 change: 1 addition & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ repos:
- id: check-json
types: [text]
files: \.(json|ipynb)$
exclude: ^asv_bench/asv\.conf\.json$
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.2
hooks:
Expand Down
206 changes: 206 additions & 0 deletions asv_bench/asv.conf.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
{
// The version of the config file format. Do not change, unless
// you know what you are doing.
"version": 1,

// The name of the project being benchmarked
"project": "parcels",

// The project's homepage
"project_url": "http://docs.oceanparcels.org/",

// The URL or local path of the source code repository for the
// project being benchmarked
"repo": "https://github.com/oceanparcels/parcels.git",

// The Python project's subdirectory in your repo. If missing or
// the empty string, the project is assumed to be located at the root
// of the repository.
// "repo_subdir": "",

// Customizable commands for building, installing, and
// uninstalling the project. See asv.conf.json documentation.
//
// "install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"],
// "uninstall_command": ["return-code=any python -mpip uninstall -y {project}"],
// fix for bad builds
// https://github.com/airspeed-velocity/asv/issues/1389#issuecomment-2076131185
"build_command": [
"python -m build",
"python -mpip wheel --no-deps --no-build-isolation --no-index -w {build_cache_dir} {build_dir}"
],
// List of branches to benchmark. If not provided, defaults to "master"
// (for git) or "default" (for mercurial).
"branches": ["main"], // for git
// "branches": ["default"], // for mercurial

// The DVCS being used. If not set, it will be automatically
// determined from "repo" by looking at the protocol in the URL
// (if remote), or by looking for special directories, such as
// ".git" (if local).
"dvcs": "git",

// The tool to use to create environments. May be "conda",
// "virtualenv" or other value depending on the plugins in use.
// If missing or the empty string, the tool will be automatically
// determined by looking for tools on the PATH environment
// variable.
"environment_type": "mamba",
"conda_channels": ["conda-forge"],

// timeout in seconds for installing any dependencies in environment
// defaults to 10 min
"install_timeout": 600,

// the base URL to show a commit for the project.
"show_commit_url": "https://github.com/oceanparcels/parcels/commit/",

// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
"pythons": ["3.11"],

// The list of conda channel names to be searched for benchmark
// dependency packages in the specified order
// "conda_channels": ["conda-forge", "defaults"],

// A conda environment file that is used for environment creation.
// "conda_environment_file": "environment.yml",

// The matrix of dependencies to test. Each key of the "req"
// requirements dictionary is the name of a package (in PyPI) and
// the values are version numbers. An empty list or empty string
// indicates to just test against the default (latest)
// version. null indicates that the package is to not be
// installed. If the package to be tested is only available from
// PyPi, and the 'environment_type' is conda, then you can preface
// the package name by 'pip+', and the package will be installed
// via pip (with all the conda available packages installed first,
// followed by the pip installed packages).
//
// The ``@env`` and ``@env_nobuild`` keys contain the matrix of
// environment variables to pass to build and benchmark commands.
// An environment will be created for every combination of the
// cartesian product of the "@env" variables in this matrix.
// Variables in "@env_nobuild" will be passed to every environment
// during the benchmark phase, but will not trigger creation of
// new environments. A value of ``null`` means that the variable
// will not be set for the current combination.
//
// "matrix": {
// "req": {
// "numpy": ["1.6", "1.7"],
// "six": ["", null], // test with and without six installed
// "pip+emcee": [""] // emcee is only available for install with pip.
// },
// "env": {"ENV_VAR_1": ["val1", "val2"]},
// "env_nobuild": {"ENV_VAR_2": ["val3", null]},
// },

// using dependencies listed in parcels feedstock
"matrix": {
"setuptools_scm": [""],
"cftime": [""],
"cgen": [""],
"dask": [""],
"matplotlib-base": [""],
"netcdf4": [""],
"numpy": [""],
"platformdirs": [""],
"psutil": [""],
"pymbolic": [""],
"pytest": [""],
"scipy": [""],
// "trajan": [""],
"tqdm": [""],
"xarray": [""],
"zarr": [""]
},

// Combinations of libraries/python versions can be excluded/included
// from the set to test. Each entry is a dictionary containing additional
// key-value pairs to include/exclude.
//
// An exclude entry excludes entries where all values match. The
// values are regexps that should match the whole string.
//
// An include entry adds an environment. Only the packages listed
// are installed. The 'python' key is required. The exclude rules
// do not apply to includes.
//
// In addition to package names, the following keys are available:
//
// - python
// Python version, as in the *pythons* variable above.
// - environment_type
// Environment type, as above.
// - sys_platform
// Platform, as in sys.platform. Possible values for the common
// cases: 'linux2', 'win32', 'cygwin', 'darwin'.
// - req
// Required packages
// - env
// Environment variables
// - env_nobuild
// Non-build environment variables
//
// "exclude": [
// {"python": "3.2", "sys_platform": "win32"}, // skip py3.2 on windows
// {"environment_type": "conda", "req": {"six": null}}, // don't run without six on conda
// {"env": {"ENV_VAR_1": "val2"}}, // skip val2 for ENV_VAR_1
// ],
//
// "include": [
// // additional env for python2.7
// {"python": "2.7", "req": {"numpy": "1.8"}, "env_nobuild": {"FOO": "123"}},
// // additional env if run on windows+conda
// {"platform": "win32", "environment_type": "conda", "python": "2.7", "req": {"libpython": ""}},
// ],

// The directory (relative to the current directory) that benchmarks are
// stored in. If not provided, defaults to "benchmarks"
// "benchmark_dir": "benchmarks",

// The directory (relative to the current directory) to cache the Python
// environments in. If not provided, defaults to "env"
"env_dir": ".asv/env",

// The directory (relative to the current directory) that raw benchmark
// results are stored in. If not provided, defaults to "results".
"results_dir": ".asv/results",

// The directory (relative to the current directory) that the html tree
// should be written to. If not provided, defaults to "html".
"html_dir": ".asv/html"

// The number of characters to retain in the commit hashes.
// "hash_length": 8,

// `asv` will cache results of the recent builds in each
// environment, making them faster to install next time. This is
// the number of builds to keep, per environment.
// "build_cache_size": 2,

// The commits after which the regression search in `asv publish`
// should start looking for regressions. Dictionary whose keys are
// regexps matching to benchmark names, and values corresponding to
// the commit (exclusive) after which to start looking for
// regressions. The default is to start from the first commit
// with results. If the commit is `null`, regression detection is
// skipped for the matching benchmark.
//
// "regressions_first_commits": {
// "some_benchmark": "352cdf", // Consider regressions only after this commit
// "another_benchmark": null, // Skip regression detection altogether
// },

// The thresholds for relative change in results, after which `asv
// publish` starts reporting regressions. Dictionary of the same
// form as in ``regressions_first_commits``, with values
// indicating the thresholds. If multiple entries match, the
// maximum is taken. If no entry matches, the default is 5%.
//
// "regressions_thresholds": {
// "some_benchmark": 0.01, // Threshold of 1%
// "another_benchmark": 0.5, // Threshold of 50%
// },
}
Loading
Loading