diff --git a/docs/documentation/index.md b/docs/documentation/index.md index b9f99adf9..d57007514 100644 --- a/docs/documentation/index.md +++ b/docs/documentation/index.md @@ -34,7 +34,7 @@ The tutorials written for Parcels v3 are currently being updated for Parcels v4. :caption: Creating ParticleSets :name: tutorial-particlesets - +../examples/tutorial_delaystart.ipynb ``` ```{nbgallery} @@ -43,6 +43,8 @@ The tutorials written for Parcels v3 are currently being updated for Parcels v4. ../examples/tutorial_sampling.ipynb +../examples/tutorial_Argofloats.ipynb +../examples/tutorial_gsw_density.ipynb @@ -66,7 +68,7 @@ The tutorials written for Parcels v3 are currently being updated for Parcels v4. :caption: Worked examples :name: tutorial-examples -../examples/tutorial_Argofloats.ipynb + ``` diff --git a/docs/examples/tutorial_Argofloats.ipynb b/docs/examples/tutorial_Argofloats.ipynb index 11927c44b..883340fcb 100644 --- a/docs/examples/tutorial_Argofloats.ipynb +++ b/docs/examples/tutorial_Argofloats.ipynb @@ -274,7 +274,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.11" + "version": "3.11.0" } }, "nbformat": 4, diff --git a/docs/examples/tutorial_delaystart.ipynb b/docs/examples/tutorial_delaystart.ipynb new file mode 100644 index 000000000..23e446eb6 --- /dev/null +++ b/docs/examples/tutorial_delaystart.ipynb @@ -0,0 +1,452 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Delayed starts\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In many applications, it is needed to 'delay' the start of particle advection. For example because particles need to be released at different times throughout an experiment. Or because particles need to be released at a constant rate from the same set of locations.\n", + "\n", + "This tutorial will show how this can be done. We start with importing the relevant modules.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from datetime import timedelta\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import xarray as xr\n", + "from IPython.display import HTML\n", + "from matplotlib.animation import FuncAnimation\n", + "\n", + "import parcels" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First import a `FieldSet` (from the Argo example, in this case)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load the CopernicusMarine data in the Agulhas region from the example_datasets\n", + "example_dataset_folder = parcels.download_example_dataset(\n", + " \"CopernicusMarine_data_for_Argo_tutorial\"\n", + ")\n", + "\n", + "ds = xr.open_mfdataset(f\"{example_dataset_folder}/*.nc\", combine=\"by_coords\")\n", + "ds.load() # load the dataset into memory\n", + "\n", + "fieldset = parcels.FieldSet.from_copernicusmarine(ds)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining initial `time` as particle release\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Defining the initial times of particles is done when the `ParticleSet` is defined. Although `time` and `z` are optional arguments (with FieldSet t=0 and z=0 as defaults), it is good practice to define them explicitly to ensure expected behavior. The simplest way to delay the start of a particle is to use the `time` argument for each particle.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "npart = 10 # number of particles to be released\n", + "lon = 32 * np.ones(npart)\n", + "lat = np.linspace(-31.5, -30.5, npart, dtype=np.float32)\n", + "# release every particle one hour later from the initial fieldset time\n", + "time = ds.time.values[0] + np.arange(0, npart) * np.timedelta64(1, \"h\")\n", + "z = np.repeat(ds.depth.values[0], npart)\n", + "\n", + "pset = parcels.ParticleSet(\n", + " fieldset=fieldset, pclass=parcels.Particle, lon=lon, lat=lat, time=time, z=z\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then we can execute the `pset` as usual\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "output_file = parcels.ParticleFile(\n", + " \"DelayParticle_time.zarr\", outputdt=np.timedelta64(1, \"h\")\n", + ")\n", + "\n", + "pset.execute(\n", + " parcels.kernels.AdvectionRK4,\n", + " runtime=np.timedelta64(24, \"h\"),\n", + " dt=np.timedelta64(5, \"m\"),\n", + " output_file=output_file,\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And then finally, we can show a movie of the particles. Note that the southern-most particles start to move first.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "\n", + "ds_out = xr.open_zarr(\"DelayParticle_time.zarr\")\n", + "\n", + "fig = plt.figure(figsize=(7, 5), constrained_layout=True)\n", + "ax = fig.add_subplot()\n", + "\n", + "ax.set_ylabel(\"Latitude [deg N]\")\n", + "ax.set_xlabel(\"Longitude [deg E]\")\n", + "ax.set_xlim(31, 33)\n", + "ax.set_ylim(-32, -30)\n", + "\n", + "timerange = np.unique(ds_out[\"time\"].values[np.isfinite(ds_out[\"time\"])])\n", + "\n", + "# Indices of the data where time = 0\n", + "time_id = np.where(ds_out[\"time\"] == timerange[0])\n", + "\n", + "sc = ax.scatter(ds_out[\"lon\"].values[time_id], ds_out[\"lat\"].values[time_id])\n", + "\n", + "t = timerange[0].astype(\"datetime64[h]\")\n", + "title = ax.set_title(f\"Particles at t = {t}\")\n", + "\n", + "\n", + "def animate(i):\n", + " t = timerange[i].astype(\"datetime64[h]\")\n", + " title.set_text(f\"Particles at t = {t}\")\n", + "\n", + " time_id = np.where(ds_out[\"time\"] == timerange[i])\n", + " sc.set_offsets(np.c_[ds_out[\"lon\"].values[time_id], ds_out[\"lat\"].values[time_id]])\n", + "\n", + "\n", + "anim = FuncAnimation(fig, animate, frames=len(timerange), interval=100)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "HTML(anim.to_jshtml())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Release particles repeatedly at a set frequency using `np.broadcast_to`\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When you want to repeatedly release particles from the same set of locations, you can expand the initial array of particle release locations. Here we show how to release particles at a set frequency `repeatdt`, for a fixed number of releases `nrepeat`. The total number of particles released is then **nrepeat** x **npart**." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "\n", + "Note that the `repeatdt` argument in `parcels.ParticleSet` used in previous versions of parcels is no longer supported as of v4\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "npart = 10 # number of particles to be released\n", + "\n", + "lon_i = 32 * np.ones(npart)\n", + "lat_i = np.linspace(-31.5, -30.5, npart, dtype=np.float32)\n", + "time_i = np.repeat(ds.time.values[0], npart)\n", + "z_i = np.repeat(ds.depth.values[0], npart)\n", + "\n", + "# repeat release at frequency `repeatdt` for `nrepeat` different releases\n", + "repeatdt = np.timedelta64(6, \"h\")\n", + "nrepeat = 3\n", + "\n", + "lon = np.broadcast_to(lon_i, (nrepeat, npart))\n", + "lat = np.broadcast_to(lat_i, (nrepeat, npart))\n", + "time = (\n", + " np.broadcast_to(time_i, (nrepeat, npart))\n", + " + np.arange(0, nrepeat)[:, np.newaxis] * repeatdt\n", + ")\n", + "print(f\"pset.time shape = {time.shape}\")\n", + "z = np.broadcast_to(z_i, (nrepeat, npart))\n", + "\n", + "pset = parcels.ParticleSet(\n", + " fieldset=fieldset, pclass=parcels.Particle, lon=lon, lat=lat, time=time, z=z\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we again define an output file and execute the `pset` as usual.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "output_file = parcels.ParticleFile(\n", + " \"DelayParticle_releasedt.zarr\", outputdt=timedelta(hours=1)\n", + ")\n", + "\n", + "pset.execute(\n", + " parcels.kernels.AdvectionRK4,\n", + " runtime=timedelta(hours=24),\n", + " dt=timedelta(minutes=5),\n", + " output_file=output_file,\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And we get an animation where a new particle is released every 6 hours from each start location\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "\n", + "ds_out = xr.open_zarr(\"DelayParticle_releasedt.zarr\")\n", + "\n", + "fig = plt.figure(figsize=(7, 5), constrained_layout=True)\n", + "ax = fig.add_subplot()\n", + "\n", + "ax.set_ylabel(\"Latitude [deg N]\")\n", + "ax.set_xlabel(\"Longitude [deg E]\")\n", + "ax.set_xlim(31, 33)\n", + "ax.set_ylim(-32, -30)\n", + "\n", + "timerange = np.unique(ds_out[\"time\"].values[np.isfinite(ds_out[\"time\"])])\n", + "\n", + "# Indices of the data where time = 0\n", + "time_id = np.where(ds_out[\"time\"] == timerange[0])\n", + "\n", + "sc = ax.scatter(ds_out[\"lon\"].values[time_id], ds_out[\"lat\"].values[time_id])\n", + "\n", + "t = timerange[0].astype(\"datetime64[h]\")\n", + "title = ax.set_title(f\"Particles at t = {t}\")\n", + "\n", + "\n", + "def animate(i):\n", + " t = timerange[i].astype(\"datetime64[h]\")\n", + " title.set_text(f\"Particles at t = {t}\")\n", + "\n", + " time_id = np.where(ds_out[\"time\"] == timerange[i])\n", + " sc.set_offsets(np.c_[ds_out[\"lon\"].values[time_id], ds_out[\"lat\"].values[time_id]])\n", + "\n", + "\n", + "anim = FuncAnimation(fig, animate, frames=len(timerange), interval=100)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "HTML(anim.to_jshtml())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Synced `time` in the output file\n", + "\n", + "Note that, because the `outputdt` variable controls the Kernel-loop, all particles are written _at the same time_, even when they start at a non-multiple of `outputdt`.\n", + "\n", + "For example, if your particles start at `time=[0, 1, 2]` and `outputdt=2`, then the times written (for `dt=1` and `endtime=4`) will be\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "outtime_expected = np.array(\n", + " [[0, 2, 4], [2, 4, np.datetime64(\"NaT\")], [2, 4, np.datetime64(\"NaT\")]],\n", + " dtype=\"timedelta64[h]\",\n", + ")\n", + "print(outtime_expected)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "outfilepath = \"DelayParticle_nonmatchingtime.zarr\"\n", + "\n", + "pset = parcels.ParticleSet(\n", + " fieldset=fieldset,\n", + " pclass=parcels.Particle,\n", + " lat=[-31] * 3,\n", + " lon=[32] * 3,\n", + " time=ds.time.values[0] + np.arange(3) * np.timedelta64(1, \"h\"),\n", + " z=[0.5] * 3,\n", + ")\n", + "\n", + "output_file = parcels.ParticleFile(outfilepath, outputdt=np.timedelta64(2, \"h\"))\n", + "pset.execute(\n", + " parcels.kernels.AdvectionRK4,\n", + " endtime=ds.time.values[0] + np.timedelta64(4, \"h\"),\n", + " dt=np.timedelta64(1, \"h\"),\n", + " output_file=output_file,\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And indeed, the `time` values in the NetCDF output file are as expected\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "outtime_infile = (\n", + " xr.open_zarr(outfilepath).time.values[:] - ds.time.values[0]\n", + ") # subtract initial time to convert from datetime64 to timedelta64\n", + "print(outtime_infile.astype(\"timedelta64[h]\"))\n", + "\n", + "assert (\n", + " outtime_expected[np.isfinite(outtime_expected)]\n", + " == outtime_infile[np.isfinite(outtime_infile)]\n", + ").all()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, for some applications, this behavior may be undesirable; for example when particles need to be analyzed at a same age (instead of at a same time). In that case, we recommend either changing `outputdt` so that it is a common divisor of all start times; or doing multiple Parcels runs with subsets of the original `ParticleSet` (e.g., in the example above, one run with the Particles that start at `time=[0, 2]` and one with the Particle at `time=[1]`). In that case, you will get two files:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for times in np.array([[0, 2], [1, 3]]).astype(\"timedelta64[h]\"):\n", + " pset = parcels.ParticleSet(\n", + " fieldset=fieldset,\n", + " pclass=parcels.Particle,\n", + " lat=[-31] * len(times),\n", + " lon=[32] * len(times),\n", + " time=ds.time.values[0] + times,\n", + " z=[0.5] * len(times),\n", + " )\n", + " output_file = parcels.ParticleFile(outfilepath, outputdt=np.timedelta64(2, \"h\"))\n", + " pset.execute(\n", + " parcels.kernels.AdvectionRK4,\n", + " runtime=np.timedelta64(4, \"h\"),\n", + " dt=np.timedelta64(1, \"h\"),\n", + " output_file=output_file,\n", + " )\n", + " outtime_infile = xr.open_zarr(outfilepath).time.values[:] - ds.time.values[0]\n", + " print(outtime_infile.astype(\"timedelta64[h]\"))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "test-notebooks", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/docs/examples_v3/tutorial_output.ipynb b/docs/examples/tutorial_output.ipynb similarity index 62% rename from docs/examples_v3/tutorial_output.ipynb rename to docs/examples/tutorial_output.ipynb index f562f4e90..13778feca 100644 --- a/docs/examples_v3/tutorial_output.ipynb +++ b/docs/examples/tutorial_output.ipynb @@ -21,7 +21,7 @@ "- [**Plotting**](#Plotting)\n", "- [**Animations**](#Animations)\n", "\n", - "First we need to create some parcels output to analyze. We simulate a set of particles using the setup described in the [Delay start tutorial](https://docs.oceanparcels.org/en/latest/examples/tutorial_delaystart.html). We will also add some user defined metadata to the output file.\n" + "For more advanced reading and tutorials on the analysis of Lagrangian trajectories, we recommend checking out the [Lagrangian Diagnostics](https://lagrangian-diags.readthedocs.io/en/latest/index.html) project. The [TrajAn package]() can be used to read and plot datasets of Lagrangian trajectories." ] }, { @@ -33,47 +33,60 @@ "from datetime import datetime, timedelta\n", "\n", "import numpy as np\n", + "import xarray as xr\n", "\n", "import parcels" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First we need to create some parcels output to analyze. We simulate a set of particles using the setup described in the [Delay start tutorial](https://docs.oceanparcels.org/en/latest/examples/tutorial_delaystart.html). We will also add some user defined metadata to the output file." + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "example_dataset_folder = parcels.download_example_dataset(\"Peninsula_data\")\n", - "example_dataset_folder = parcels.download_example_dataset(\"Peninsula_data\")\n", - "filenames = {\n", - " \"U\": str(example_dataset_folder / \"peninsulaU.nc\"),\n", - " \"V\": str(example_dataset_folder / \"peninsulaV.nc\"),\n", - "}\n", - "variables = {\"U\": \"vozocrtx\", \"V\": \"vomecrty\"}\n", - "dimensions = {\"lon\": \"nav_lon\", \"lat\": \"nav_lat\", \"time\": \"time_counter\"}\n", - "fieldset = parcels.FieldSet.from_netcdf(\n", - " filenames, variables, dimensions, allow_time_extrapolation=True\n", + "# Load the CopernicusMarine data in the Agulhas region from the example_datasets\n", + "example_dataset_folder = parcels.download_example_dataset(\n", + " \"CopernicusMarine_data_for_Argo_tutorial\"\n", ")\n", "\n", - "npart = 10 # number of particles to be released\n", - "lon = 3e3 * np.ones(npart)\n", - "lat = np.linspace(3e3, 45e3, npart, dtype=np.float32)\n", + "ds = xr.open_mfdataset(f\"{example_dataset_folder}/*.nc\", combine=\"by_coords\")\n", + "ds.load() # load the dataset into memory\n", "\n", - "# release every particle two hours later\n", - "time = np.arange(npart) * timedelta(hours=2).total_seconds()\n", + "fieldset = parcels.FieldSet.from_copernicusmarine(ds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Particle locations and initial time\n", + "npart = 10 # number of particles to be released\n", + "lon = 32 * np.ones(npart)\n", + "lat = np.linspace(-32.5, -30.5, npart, dtype=np.float32)\n", + "time = ds.time.values[0] + np.arange(0, npart) * np.timedelta64(2, \"h\")\n", + "z = np.repeat(ds.depth.values[0], npart)\n", "\n", "pset = parcels.ParticleSet(\n", - " fieldset=fieldset, pclass=parcels.Particle, lon=lon, lat=lat, time=time\n", + " fieldset=fieldset, pclass=parcels.Particle, lon=lon, lat=lat, time=time, z=z\n", ")\n", "\n", - "output_file = pset.ParticleFile(name=\"Output.zarr\", outputdt=timedelta(hours=2))" + "output_file = parcels.ParticleFile(\"Output.zarr\", outputdt=np.timedelta64(2, \"h\"))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Parcels saves some metadata in the output file with every simulation (Parcels version, CF convention information, etc.). This metadata is just a dictionary which is propogated to `xr.Dataset(attrs=...)` and is stored in the `.metadata` attribute. The user is free to manipulate this dictionary to add any custom, xarray-compatible metadata relevant to their simulation. Here we add a custom metadata field `date_created` to the output file." + "Parcels saves some metadata in the output file with every simulation (Parcels version, CF convention information, etc.). This metadata is just a dictionary which is propogated to `xr.Dataset(attrs=...)` and is stored in the `.metadata` attribute. We are free to manipulate this dictionary to add any custom, xarray-compatible metadata relevant to their simulation. Here we add a custom metadata field `date_created` to the output file." ] }, { @@ -82,7 +95,15 @@ "metadata": {}, "outputs": [], "source": [ - "output_file.metadata[\"date_created\"] = datetime.now().isoformat()" + "output_file.metadata[\"date_created\"] = datetime.now().isoformat()\n", + "output_file.metadata" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To write the metadata to the output_file, we need to add it before running `pset.execute()` which writes the particleset including the metadata to the output_file." ] }, { @@ -93,8 +114,8 @@ "source": [ "pset.execute(\n", " parcels.kernels.AdvectionRK4,\n", - " runtime=timedelta(hours=24),\n", - " dt=timedelta(minutes=5),\n", + " runtime=np.timedelta64(48, \"h\"),\n", + " dt=np.timedelta64(5, \"m\"),\n", " output_file=output_file,\n", ")" ] @@ -106,7 +127,7 @@ "source": [ "## Reading the output file\n", "\n", - "Parcels exports output trajectories in `zarr` [format](https://zarr.readthedocs.io/en/stable/). Files in `zarr` are typically _much_ smaller in size than netcdf, although may be slightly more challenging to handle (but `xarray` has a fairly seamless `open_zarr()` method). Note when when we display the dataset we cam see our custom metadata field `date_created`.\n" + "Parcels exports output trajectories in `zarr` [format](https://zarr.readthedocs.io/en/stable/). Files in `zarr` are typically _much_ smaller in size than netcdf, although may be slightly more challenging to handle (but `xarray` has a fairly seamless `open_zarr()` method). Note when we display the dataset we can see our custom metadata field `date_created`.\n" ] }, { @@ -115,21 +136,11 @@ "metadata": {}, "outputs": [], "source": [ - "import xarray as xr\n", - "\n", "data_xarray = xr.open_zarr(\"Output.zarr\")\n", + "\n", "print(data_xarray)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(data_xarray[\"trajectory\"])" - ] - }, { "attachments": {}, "cell_type": "markdown", @@ -160,10 +171,9 @@ "source": [ "np.set_printoptions(linewidth=160)\n", "one_hour = np.timedelta64(1, \"h\") # Define timedelta object to help with conversion\n", + "time_from_start = data_xarray[\"time\"].values - fieldset.time_interval.left\n", "\n", - "print(\n", - " data_xarray[\"time\"].data.compute() / one_hour\n", - ") # timedelta / timedelta -> float number of hours" + "print(time_from_start / one_hour) # timedelta / timedelta -> float number of hours" ] }, { @@ -198,9 +208,9 @@ " np.sqrt(np.square(np.diff(x)) + np.square(np.diff(y))), axis=1\n", ") # d = (dx^2 + dy^2)^(1/2)\n", "\n", - "real_time = data_xarray[\"time\"] / one_hour # convert time to hours\n", + "real_time = time_from_start / one_hour # convert time to hours\n", "time_since_release = (\n", - " real_time.values.transpose() - real_time.values[:, 0]\n", + " real_time.transpose() - real_time[:, 0]\n", ") # substract the initial time from each timeseries" ] }, @@ -361,7 +371,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Trajectory plots like the ones above can become very cluttered for large sets of particles. To better see patterns, it's a good idea to create an animation in time and space. To do this, matplotlib offers an [animation package](https://matplotlib.org/stable/api/animation_api.html). Here we show how to use the [**FuncAnimation**](https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.animation.FuncAnimation.html#matplotlib.animation.FuncAnimation) class to animate parcels trajectory data.\n", + "Trajectory plots like the ones above can become very cluttered for large sets of particles. To better see patterns, it's a good idea to create an animation in time and space. To do this, matplotlib offers an [animation package](https://matplotlib.org/stable/api/animation_api.html). Here we show how to use the [**FuncAnimation**](https://matplotlib.org/3.3.2/api/_as_gen/matplotlib.animation.FuncAnimation.html#matplotlib.animation.FuncAnimation) class to animate parcels trajectory data, based on [this visualisation tutorial](https://github.com/Parcels-code/10year-anniversary-session5/blob/eaf7ac35f43c222280fa5577858be81dc346c06b/animations_tutorial.ipynb) from 10-years Parcels. \n", "\n", "To correctly reveal the patterns in time we must remember that the `obs` dimension does not necessarily correspond to the `time` variable ([see the section of Trajectory data structure above](#Trajectory-data-structure)). In the animation of the particles, we usually want to draw the points at each consecutive moment in time, not necessarily at each moment since the start of the trajectory. To do this we must [select the correct data](#Conditional-selection) in each rendering.\n" ] @@ -372,17 +382,10 @@ "metadata": {}, "outputs": [], "source": [ - "from IPython.display import HTML\n", - "from matplotlib.animation import FuncAnimation\n", - "\n", - "outputdt = timedelta(hours=2)\n", - "\n", - "# timerange in nanoseconds\n", - "timerange = np.arange(\n", - " np.nanmin(data_xarray[\"time\"].values),\n", - " np.nanmax(data_xarray[\"time\"].values) + np.timedelta64(outputdt),\n", - " outputdt,\n", - ")" + "import cartopy.crs as ccrs\n", + "import cartopy.feature as cfeature\n", + "import matplotlib\n", + "from matplotlib.animation import FuncAnimation" ] }, { @@ -391,38 +394,149 @@ "metadata": {}, "outputs": [], "source": [ - "%%capture\n", - "fig = plt.figure(figsize=(5, 5), constrained_layout=True)\n", - "ax = fig.add_subplot()\n", - "\n", - "ax.set_ylabel(\"Meridional distance [m]\")\n", - "ax.set_xlabel(\"Zonal distance [m]\")\n", - "ax.set_xlim(0, 90000)\n", - "ax.set_ylim(0, 50000)\n", - "plt.xticks(rotation=45)\n", - "\n", - "# Indices of the data where time = 0\n", - "time_id = np.where(data_xarray[\"time\"] == timerange[0])\n", - "\n", - "scatter = ax.scatter(\n", - " data_xarray[\"lon\"].values[time_id], data_xarray[\"lat\"].values[time_id]\n", - ")\n", - "\n", - "t = str(timerange[0].astype(\"timedelta64[h]\"))\n", - "title = ax.set_title(\"Particles at t = \" + t)\n", - "\n", - "\n", - "def animate(i):\n", - " t = str(timerange[i].astype(\"timedelta64[h]\"))\n", - " title.set_text(\"Particles at t = \" + t)\n", - "\n", - " time_id = np.where(data_xarray[\"time\"] == timerange[i])\n", - " scatter.set_offsets(\n", - " np.c_[data_xarray[\"lon\"].values[time_id], data_xarray[\"lat\"].values[time_id]]\n", + "# for interactive display of animation\n", + "plt.rcParams[\"animation.html\"] = \"jshtml\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Number of timesteps to animate\n", + "nframes = 25 # use less frames for testing purposes\n", + "nreducedtrails = 1 # every 10th particle will have a trail (if 1, all particles have trails. Adjust for faster performance)\n", + "\n", + "\n", + "# Set up the colors and associated trajectories:\n", + "# get release times for each particle (first valide obs for each trajectory)\n", + "release_times = data_xarray[\"time\"].min(dim=\"obs\", skipna=True).values\n", + "\n", + "# get unique release times and assign colors\n", + "unique_release_times = np.unique(release_times[~np.isnat(release_times)])\n", + "n_release_times = len(unique_release_times)\n", + "print(f\"Number of unique release times: {n_release_times}\")\n", + "\n", + "# choose a continuous colormap\n", + "colormap = matplotlib.colormaps[\"tab20b\"]\n", + "\n", + "# set up a unique color for each release time\n", + "release_time_to_color = {}\n", + "for i, release_time in enumerate(unique_release_times):\n", + " release_time_to_color[release_time] = colormap(i / max(n_release_times - 1, 1))\n", + "\n", + "\n", + "# --> Store data for all timeframes (this is needed for faster performance)\n", + "print(\"Pre-computing all particle positions...\")\n", + "all_particles_data = []\n", + "for i, target_time in enumerate(timerange):\n", + " time_id = np.where(data_xarray[\"time\"] == target_time)\n", + " lons = data_xarray[\"lon\"].values[time_id]\n", + " lats = data_xarray[\"lat\"].values[time_id]\n", + " particle_indices = time_id[0]\n", + " valid = ~np.isnan(lons) & ~np.isnan(lats)\n", + "\n", + " all_particles_data.append(\n", + " {\n", + " \"lons\": lons[valid],\n", + " \"lats\": lats[valid],\n", + " \"particle_indices\": particle_indices[valid],\n", + " \"valid_count\": np.sum(valid),\n", + " }\n", " )\n", "\n", "\n", - "anim = FuncAnimation(fig, animate, frames=len(timerange), interval=100)" + "# figure setup\n", + "fig, ax = plt.subplots(figsize=(6, 5), subplot_kw={\"projection\": ccrs.PlateCarree()})\n", + "ax.set_xlim(30, 33)\n", + "ax.set_xticks(np.arange(30, 33.5, 0.5))\n", + "ax.set_xlabel(\"Longitude (deg E)\")\n", + "ax.set_ylim(-33, -30)\n", + "ax.set_yticks(ticks=np.arange(-33, -29.5, 0.5))\n", + "ax.set_yticklabels(np.arange(33, 29.5, -0.5).astype(str))\n", + "ax.set_ylabel(\"Latitude (deg S)\")\n", + "ax.coastlines(color=\"saddlebrown\")\n", + "ax.add_feature(cfeature.LAND, alpha=0.5, facecolor=\"saddlebrown\")\n", + "\n", + "# --> Use pre-computed data for initial setup\n", + "initial_data = all_particles_data[0]\n", + "initial_colors = []\n", + "for particle_idx in initial_data[\"particle_indices\"]:\n", + " rt = release_times[particle_idx]\n", + " if rt in release_time_to_color:\n", + " initial_colors.append(release_time_to_color[rt])\n", + " else:\n", + " initial_colors.append(\"blue\")\n", + "\n", + "# --> plot first timestep\n", + "scatter = ax.scatter(initial_data[\"lons\"], initial_data[\"lats\"], s=10, c=initial_colors)\n", + "\n", + "# --> initialize trails\n", + "trail_plot = []\n", + "\n", + "# Set initial title\n", + "t_str = str(timerange[0])[:19] # Format datetime nicely\n", + "title = ax.set_title(f\"Particles at t = {t_str}\")\n", + "\n", + "\n", + "# loop over for animation\n", + "def animate(i):\n", + " print(f\"Animating frame {i + 1}/{len(timerange)} at time {timerange[i]}\")\n", + " t_str = str(timerange[i])[:19]\n", + " title.set_text(f\"Particles at t = {t_str}\")\n", + "\n", + " # Find particles at current time\n", + " current_data = all_particles_data[i]\n", + "\n", + " if current_data[\"valid_count\"] > 0:\n", + " current_colors = []\n", + " for particle_idx in current_data[\"particle_indices\"]:\n", + " rt = release_times[particle_idx]\n", + " current_colors.append(release_time_to_color[rt])\n", + "\n", + " scatter.set_offsets(np.c_[current_data[\"lons\"], current_data[\"lats\"]])\n", + " scatter.set_color(current_colors)\n", + "\n", + " # --> add trails\n", + "\n", + " for trail in trail_plot:\n", + " trail.remove()\n", + " trail_plot.clear()\n", + "\n", + " trail_length = min(10, i) # trails will have max length of 10 time steps\n", + "\n", + " if trail_length > 0:\n", + " sampled_particles = current_data[\"particle_indices\"][\n", + " ::nreducedtrails\n", + " ] # use all or sample if you want faster computation\n", + "\n", + " for particle_idx in sampled_particles:\n", + " trail_lons = []\n", + " trail_lats = []\n", + " for j in range(i - trail_length, i + 1):\n", + " past_data = all_particles_data[j]\n", + " if particle_idx in past_data[\"particle_indices\"]:\n", + " idx = np.where(past_data[\"particle_indices\"] == particle_idx)[\n", + " 0\n", + " ][0]\n", + " trail_lons.append(past_data[\"lons\"][idx])\n", + " trail_lats.append(past_data[\"lats\"][idx])\n", + " if len(trail_lons) > 1:\n", + " rt = release_times[particle_idx]\n", + " color = release_time_to_color[rt]\n", + " (trail,) = ax.plot(\n", + " trail_lons, trail_lats, color=color, linewidth=0.6, alpha=0.6\n", + " )\n", + " trail_plot.append(trail)\n", + "\n", + " else:\n", + " scatter.set_offsets(np.empty((0, 2)))\n", + "\n", + "\n", + "# Create animation\n", + "anim = FuncAnimation(fig, animate, frames=nframes, interval=100)\n", + "anim" ] }, { @@ -430,15 +544,13 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [ - "HTML(anim.to_jshtml())" - ] + "source": [] } ], "metadata": { "celltoolbar": "Metagegevens bewerken", "kernelspec": { - "display_name": "parcels", + "display_name": "test-notebooks", "language": "python", "name": "python3" }, @@ -452,7 +564,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.3" + "version": "3.11.0" } }, "nbformat": 4, diff --git a/docs/examples_v3/tutorial_delaystart.ipynb b/docs/examples_v3/tutorial_delaystart.ipynb deleted file mode 100644 index 6584dfc15..000000000 --- a/docs/examples_v3/tutorial_delaystart.ipynb +++ /dev/null @@ -1,619 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Delayed starts\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In many applications, it is needed to 'delay' the start of particle advection. For example because particles need to be released at different times throughout an experiment. Or because particles need to be released at a constant rate from the same set of locations.\n", - "\n", - "This tutorial will show how this can be done. We start with importing the relevant modules.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from datetime import timedelta\n", - "\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "import xarray as xr\n", - "from IPython.display import HTML\n", - "from matplotlib.animation import FuncAnimation\n", - "\n", - "import parcels" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First import a `FieldSet` (from the Peninsula example, in this case)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "example_dataset_folder = parcels.download_example_dataset(\"Peninsula_data\")\n", - "filenames = {\n", - " \"U\": str(example_dataset_folder / \"peninsulaU.nc\"),\n", - " \"V\": str(example_dataset_folder / \"peninsulaV.nc\"),\n", - "}\n", - "variables = {\"U\": \"vozocrtx\", \"V\": \"vomecrty\"}\n", - "dimensions = {\"lon\": \"nav_lon\", \"lat\": \"nav_lat\", \"time\": \"time_counter\"}\n", - "fieldset = parcels.FieldSet.from_netcdf(\n", - " filenames, variables, dimensions, allow_time_extrapolation=True\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, there are two ways to delay the start of particles. Either by defining the whole `ParticleSet` at initialisation and giving each particle its own `time`. Or by using the `repeatdt` argument. We will show both options here\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Assigning each particle its own `time`\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The simplest way to delay the start of a particle is to use the `time` argument for each particle\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "npart = 10 # number of particles to be released\n", - "lon = 3e3 * np.ones(npart)\n", - "lat = np.linspace(3e3, 45e3, npart, dtype=np.float32)\n", - "\n", - "# release every particle one hour later\n", - "time = np.arange(0, npart) * timedelta(hours=1).total_seconds()\n", - "\n", - "pset = parcels.ParticleSet(\n", - " fieldset=fieldset, pclass=parcels.Particle, lon=lon, lat=lat, time=time\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then we can execute the `pset` as usual\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "output_file = pset.ParticleFile(\n", - " name=\"DelayParticle_time.zarr\", outputdt=timedelta(hours=1)\n", - ")\n", - "\n", - "pset.execute(\n", - " parcels.kernels.AdvectionRK4,\n", - " runtime=timedelta(hours=24),\n", - " dt=timedelta(minutes=5),\n", - " output_file=output_file,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And then finally, we can show a movie of the particles. Note that the southern-most particles start to move first.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%capture\n", - "\n", - "ds = xr.open_zarr(\"DelayParticle_time.zarr\")\n", - "\n", - "fig = plt.figure(figsize=(7, 5), constrained_layout=True)\n", - "ax = fig.add_subplot()\n", - "\n", - "ax.set_ylabel(\"Meridional distance [m]\")\n", - "ax.set_xlabel(\"Zonal distance [m]\")\n", - "ax.set_xlim(0, 9e4)\n", - "ax.set_ylim(0, 5e4)\n", - "\n", - "timerange = np.unique(ds[\"time\"].values[np.isfinite(ds[\"time\"])])\n", - "\n", - "# Indices of the data where time = 0\n", - "time_id = np.where(ds[\"time\"] == timerange[0])\n", - "\n", - "sc = ax.scatter(ds[\"lon\"].values[time_id], ds[\"lat\"].values[time_id])\n", - "\n", - "t = str(timerange[0].astype(\"timedelta64[h]\"))\n", - "title = ax.set_title(f\"Particles at t = {t}\")\n", - "\n", - "\n", - "def animate(i):\n", - " t = str(timerange[i].astype(\"timedelta64[h]\"))\n", - " title.set_text(f\"Particles at t = {t}\")\n", - "\n", - " time_id = np.where(ds[\"time\"] == timerange[i])\n", - " sc.set_offsets(np.c_[ds[\"lon\"].values[time_id], ds[\"lat\"].values[time_id]])\n", - "\n", - "\n", - "anim = FuncAnimation(fig, animate, frames=len(timerange), interval=100)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "HTML(anim.to_jshtml())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Using the `repeatdt` argument\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The second method to delay the start of particle releases is to use the `repeatdt` argument when constructing a `ParticleSet`. This is especially useful if you want to repeatedly release particles from the same set of locations.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "npart = 10 # number of particles to be released\n", - "lon = 3e3 * np.ones(npart)\n", - "lat = np.linspace(3e3, 45e3, npart, dtype=np.float32)\n", - "repeatdt = timedelta(hours=3) # release from the same set of locations every 3h\n", - "\n", - "pset = parcels.ParticleSet(\n", - " fieldset=fieldset, pclass=parcels.Particle, lon=lon, lat=lat, repeatdt=repeatdt\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we again define an output file and execute the `pset` as usual.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "output_file = pset.ParticleFile(\n", - " name=\"DelayParticle_releasedt\", outputdt=timedelta(hours=1)\n", - ")\n", - "\n", - "pset.execute(\n", - " parcels.kernels.AdvectionRK4,\n", - " runtime=timedelta(hours=24),\n", - " dt=timedelta(minutes=5),\n", - " output_file=output_file,\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And we get an animation where a new particle is released every 3 hours from each start location\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%capture\n", - "\n", - "ds = xr.open_zarr(\"DelayParticle_releasedt.zarr\")\n", - "\n", - "fig = plt.figure(figsize=(7, 5), constrained_layout=True)\n", - "ax = fig.add_subplot()\n", - "\n", - "ax.set_ylabel(\"Meridional distance [m]\")\n", - "ax.set_xlabel(\"Zonal distance [m]\")\n", - "ax.set_xlim(0, 9e4)\n", - "ax.set_ylim(0, 5e4)\n", - "\n", - "timerange = np.unique(ds[\"time\"].values[np.isfinite(ds[\"time\"])])\n", - "\n", - "# Indices of the data where time = 0\n", - "time_id = np.where(ds[\"time\"] == timerange[0])\n", - "\n", - "sc = ax.scatter(ds[\"lon\"].values[time_id], ds[\"lat\"].values[time_id])\n", - "\n", - "t = str(timerange[0].astype(\"timedelta64[h]\"))\n", - "title = ax.set_title(f\"Particles at t = {t}\")\n", - "\n", - "\n", - "def animate(i):\n", - " t = str(timerange[i].astype(\"timedelta64[h]\"))\n", - " title.set_text(f\"Particles at t = {t}\")\n", - "\n", - " time_id = np.where(ds[\"time\"] == timerange[i])\n", - " sc.set_offsets(np.c_[ds[\"lon\"].values[time_id], ds[\"lat\"].values[time_id]])\n", - "\n", - "\n", - "anim = FuncAnimation(fig, animate, frames=len(timerange), interval=100)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "HTML(anim.to_jshtml())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that, if you want to if you want to at some point stop the repeatdt, the easiest implementation is to use two calls to `pset.execute()`. For example, if in the above example you only want four releases of the pset, you could do the following\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pset = parcels.ParticleSet(\n", - " fieldset=fieldset, pclass=parcels.Particle, lon=lon, lat=lat, repeatdt=repeatdt\n", - ")\n", - "output_file = pset.ParticleFile(\n", - " name=\"DelayParticle_releasedt_9hrs\", outputdt=timedelta(hours=1)\n", - ")\n", - "\n", - "# first run for 3 * 3 hrs\n", - "pset.execute(\n", - " parcels.kernels.AdvectionRK4,\n", - " runtime=timedelta(hours=9),\n", - " dt=timedelta(minutes=5),\n", - " output_file=output_file,\n", - ")\n", - "\n", - "# now stop the repeated release\n", - "pset.repeatdt = None\n", - "\n", - "# now continue running for the remaining 15 hours\n", - "pset.execute(\n", - " parcels.kernels.AdvectionRK4,\n", - " runtime=timedelta(hours=15),\n", - " dt=timedelta(minutes=5),\n", - " output_file=output_file,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "%%capture\n", - "\n", - "ds = xr.open_zarr(\"DelayParticle_releasedt_9hrs.zarr\")\n", - "\n", - "fig = plt.figure(figsize=(7, 5), constrained_layout=True)\n", - "ax = fig.add_subplot()\n", - "\n", - "ax.set_ylabel(\"Meridional distance [m]\")\n", - "ax.set_xlabel(\"Zonal distance [m]\")\n", - "ax.set_xlim(0, 9e4)\n", - "ax.set_ylim(0, 5e4)\n", - "\n", - "timerange = np.unique(ds[\"time\"].values[np.isfinite(ds[\"time\"])])\n", - "\n", - "# Indices of the data where time = 0\n", - "time_id = np.where(ds[\"time\"] == timerange[0])\n", - "\n", - "sc = ax.scatter(ds[\"lon\"].values[time_id], ds[\"lat\"].values[time_id])\n", - "\n", - "t = str(timerange[0].astype(\"timedelta64[h]\"))\n", - "title = ax.set_title(f\"Particles at t = {t}\")\n", - "\n", - "\n", - "def animate(i):\n", - " t = str(timerange[i].astype(\"timedelta64[h]\"))\n", - " title.set_text(f\"Particles at t = {t}\")\n", - "\n", - " time_id = np.where(ds[\"time\"] == timerange[i])\n", - " sc.set_offsets(np.c_[ds[\"lon\"].values[time_id], ds[\"lat\"].values[time_id]])\n", - "\n", - "\n", - "anim = FuncAnimation(fig, animate, frames=len(timerange), interval=100)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "HTML(anim.to_jshtml())" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Synced `time` in the output file\n", - "\n", - "Note that, because the `outputdt` variable controls the Kernel-loop, all particles are written _at the same time_, even when they start at a non-multiple of `outputdt`.\n", - "\n", - "For example, if your particles start at `time=[0, 1, 2]` and `outputdt=2`, then the times written (for `dt=1` and `endtime=4`) will be\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "outtime_expected = np.array(\n", - " [[0, 2, 4], [2, 4, np.datetime64(\"NaT\")], [2, 4, np.datetime64(\"NaT\")]],\n", - " dtype=\"timedelta64[s]\",\n", - ")\n", - "print(outtime_expected)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "outfilepath = \"DelayParticle_nonmatchingtime.zarr\"\n", - "\n", - "pset = parcels.ParticleSet(\n", - " fieldset=fieldset,\n", - " pclass=parcels.Particle,\n", - " lat=[3e3] * 3,\n", - " lon=[3e3] * 3,\n", - " time=[0, 1, 2],\n", - ")\n", - "\n", - "output_file = pset.ParticleFile(name=outfilepath, outputdt=2)\n", - "pset.execute(\n", - " parcels.kernels.AdvectionRK4,\n", - " endtime=4,\n", - " dt=1,\n", - " output_file=output_file,\n", - ")\n", - "\n", - "# Note that we also need to write the final time to the file\n", - "output_file.write_latest_locations(pset, 4)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And indeed, the `time` values in the NetCDF output file are as expected\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "outtime_infile = xr.open_zarr(outfilepath).time.values[:]\n", - "print(outtime_infile.astype(\"timedelta64[s]\"))\n", - "\n", - "assert (\n", - " outtime_expected[np.isfinite(outtime_expected)]\n", - " == outtime_infile[np.isfinite(outtime_infile)]\n", - ").all()" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, for some applications, this behavior may be undesirable; for example when particles need to be analyzed at a same age (instead of at a same time). In that case, we recommend either changing `outputdt` so that it is a common divisor of all start times; or doing multiple Parcels runs with subsets of the original `ParticleSet` (e.g., in the example above, one run with the Particles that start at `time=[0, 2]` and one with the Particle at `time=[1]`). In that case, you will get two files:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for times in [[0, 2], [1]]:\n", - " pset = parcels.ParticleSet(\n", - " fieldset=fieldset,\n", - " pclass=parcels.Particle,\n", - " lat=[3e3] * len(times),\n", - " lon=[3e3] * len(times),\n", - " time=times,\n", - " )\n", - " output_file = pset.ParticleFile(name=outfilepath, outputdt=2)\n", - " pset.execute(\n", - " parcels.kernels.AdvectionRK4,\n", - " endtime=4,\n", - " dt=1,\n", - " output_file=output_file,\n", - " )\n", - " # Note that we also need to write the final time to the file\n", - " output_file.write_latest_locations(pset, 4)\n", - " print(xr.open_zarr(outfilepath).time.values[:].astype(\"timedelta64[s]\"))" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Adding new particles to a ParticleSet during runtime\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "In the examples above, all particles were defined at the start of the simulation. There are use-cases, though, where it is important to be able to add particles 'on-the-fly', during the runtime of a Parcels simulation.\n", - "\n", - "Unfortuantely, Parcels does not (yet) support adding new particles _within_ a `Kernel`. A workaround is to temporarily leave the `execution()` call to add particles via the `ParticleSet.add()` method, before continuing with `execution()`.\n", - "\n", - "See the example below, where we add 'mass' to a particle each timestep, based on a probablistic condition, and then split the particle once its 'mass' is larger than 5\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "GrowingParticle = parcels.Particle.add_variables(\n", - " [\n", - " parcels.Variable(\"mass\", initial=0),\n", - " parcels.Variable(\"splittime\", initial=-1),\n", - " parcels.Variable(\"splitmass\", initial=0),\n", - " ]\n", - ")\n", - "\n", - "\n", - "def GrowParticles(particle, fieldset, time):\n", - " import random\n", - "\n", - " # 25% chance per timestep for particle to grow\n", - " if random.random() < 0.25:\n", - " particle.mass += 1.0\n", - " if (particle.mass >= 5.0) and (particle.splittime < 0):\n", - " particle.splittime = time\n", - " particle.splitmass = particle.mass / 2.0\n", - " particle.mass = particle.mass / 2.0\n", - "\n", - "\n", - "pset = parcels.ParticleSet(fieldset=fieldset, pclass=GrowingParticle, lon=0, lat=0)\n", - "outfile = parcels.ParticleFile(\"growingparticles.zarr\", pset, outputdt=1)\n", - "\n", - "for t in range(40):\n", - " pset.execute(\n", - " GrowParticles, runtime=1, dt=1, output_file=outfile, verbose_progress=False\n", - " )\n", - " for p in pset:\n", - " if p.splittime > 0:\n", - " pset.add(\n", - " parcels.ParticleSet(\n", - " fieldset=fieldset,\n", - " pclass=GrowingParticle,\n", - " lon=0,\n", - " lat=0,\n", - " time=p.splittime,\n", - " mass=p.splitmass,\n", - " )\n", - " )\n", - " p.splittime = -1 # reset splittime" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The 'trick' is that we place the `pset.execute()` call in a for-loop, so that we leave the Kernel-loop and can add Particles to the ParticleSet.\n", - "\n", - "Indeed, if we plot the mass of particles as a function of time, we see that new particles are created every time a particle reaches a mass of 5.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ds = xr.open_zarr(\"growingparticles.zarr\")\n", - "plt.plot(ds.time.values[:].astype(\"timedelta64[s]\").T, ds.mass.T)\n", - "plt.grid()\n", - "plt.xlabel(\"Time\")\n", - "plt.ylabel(\"Particle 'mass'\")\n", - "plt.show()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "parcels", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.3" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/docs/examples_v3/tutorial_kernelloop.ipynb b/docs/examples_v3/tutorial_kernelloop.ipynb index 3b88f54b1..e10d2c410 100644 --- a/docs/examples_v3/tutorial_kernelloop.ipynb +++ b/docs/examples_v3/tutorial_kernelloop.ipynb @@ -26,7 +26,7 @@ "\n", "When you run a Parcels simulation (i.e. a call to `pset.execute()`), the Kernel loop is the main part of the code that is executed. This part of the code loops through all particles and executes the Kernels that are defined for each particle.\n", "\n", - "In order to make sure that the displacements of a particle in the different Kernels can be summed, all Kernels add to a _change_ in position (`particles.dlon`, `particles.dlat`, and `particles.ddepth`). This is important, because there are situations where movement kernels would otherwise not commute. Take the example of advecting particles by currents _and_ winds. If the particle would first be moved by the currents and then by the winds, the result could be different from first moving by the winds and then by the currents. Instead, by adding the changes in position, the ordering of the Kernels has no consequence on the particle displacement." + "In order to make sure that the displacements of a particle in the different Kernels can be summed, all Kernels add to a _change_ in position (`particles.dlon`, `particles.dlat`, and `particles.dz`). This is important, because there are situations where movement kernels would otherwise not commute. Take the example of advecting particles by currents _and_ winds. If the particle would first be moved by the currents and then by the winds, the result could be different from first moving by the winds and then by the currents. Instead, by adding the changes in position, the ordering of the Kernels has no consequence on the particle displacement." ] }, { @@ -40,17 +40,17 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Below is a structured overview of the Kernel loop is implemented. Note that this is for longitude only, but the same process is applied for latitude and depth.\n", + "Below is a structured overview of the Kernel loop is implemented. Note that this is for `lon` only, but the same process is applied for `lat` and `z`.\n", "\n", - "1. Initialise an extra Variable `particles.lon=0` and `particles.time_nextloop = particles.time`\n", + "1. Initialise an extra Variable `particles.dlon=0`\n", "\n", "2. Within the Kernel loop, for each particle:
\n", "\n", " 1. Update `particles.lon += particles.dlon`
\n", "\n", - " 2. Set variable `particles.dlon = 0`
\n", + " 2. Update `particles.time += particles.dt` (except for on the first iteration of the Kernel loop)
\n", "\n", - " 3. Update `particles.time = particles.time_nextloop`\n", + " 3. Set variable `particles.dlon = 0`
\n", "\n", " 4. For each Kernel in the list of Kernels:\n", " \n", @@ -58,11 +58,9 @@ " \n", " 2. Update `particles.dlon` by adding the change in longitude, if needed
\n", "\n", - " 5. Update `particles.time_nextloop += particles.dt`
\n", + " 5. If `outputdt` is a multiple of `particle.time`, write `particle.lon` and `particle.time` to zarr output file
\n", "\n", - " 6. If `outputdt` is a multiple of `particle.time`, write `particle.lon` and `particle.time` to zarr output file
\n", - "\n", - "Besides having commutable Kernels, the main advantage of this implementation is that, when using Field Sampling with e.g. `particle.temp = fieldset.Temp[particle.time, particle.depth, particle.lat, particle.lon]`, the particle location stays the same throughout the entire Kernel loop. Additionally, this implementation ensures that the particle location is the same as the location of the sampled field in the output file." + "Besides having commutable Kernels, the main advantage of this implementation is that, when using Field Sampling with e.g. `particle.temp = fieldset.Temp[particle.time, particle.z, particle.lat, particle.lon]`, the particle location stays the same throughout the entire Kernel loop. Additionally, this implementation ensures that the particle location is the same as the location of the sampled field in the output file." ] }, { @@ -155,10 +153,10 @@ "source": [ "def wind_kernel(particle, fieldset, time):\n", " particle_dlon += (\n", - " fieldset.UWind[time, particle.depth, particle.lat, particle.lon] * particle.dt\n", + " fieldset.UWind[time, particle.z, particle.lat, particle.lon] * particle.dt\n", " )\n", " particle_dlat += (\n", - " fieldset.VWind[time, particle.depth, particle.lat, particle.lon] * particle.dt\n", + " fieldset.VWind[time, particle.z, particle.lat, particle.lon] * particle.dt\n", " )" ] }, @@ -242,30 +240,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Caveats" + "## Caveat: Avoid updating particle locations directly in Kernels" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "There are a few important considerations to take into account when writing Kernels\n", - "\n", - "### 1. Avoid updating particle locations directly in Kernels\n", "It is better not to update `particle.lon` directly in a Kernel, as it can interfere with the loop above. Assigning a value to `particle.lon` in a Kernel will throw a warning. \n", "\n", - "Instead, update the local variable `particle.dlon`.\n", - "\n", - "### 2. Be careful with updating particle variables that do not depend on Fields.\n", - "While assigning the interpolated value of a `Field` to a Particle goes well in the loop above, this is not necessarily so for assigning other attributes. For example, a line like `particle.age += particle.dt` is executed directly so may result in the age being `dt` at `time = 0` in the output file. \n", - "\n", - "A workaround is to either initialise the age to `-dt`, or to increase the `age` only when `particle.time > 0` (using an `np.where` statement).\n", - "\n", - "\n", - "### 3. The last time is not written to file\n", - "Because the location at the start of the loop is written at the end of the Kernel loop, the last `particle.time` of the particle is not written to file. This is similar behaviour to e.g. `np.arange(start, stop)`, which also doesn't include the `stop` value itself. \n", - "\n", - "If you do want to write the last time to file, you can increase the `runtime` or `endtime` by `dt` (although this may cause an OutsideTimeInterval if your run was to the end of the available hydrodynamic data), or you can call `pfile.write_latest_locations(pset, time=pset[0].time_nextloop)`. Note that in the latter case, the particle locations (longitude, latitude and depth) will be updated, but other variables will _not_ be updated as the Kernels are not run again." + "Instead, update the local variable `particle.dlon`." ] }, { @@ -355,7 +339,7 @@ "source": [ "def KeepInOcean(particle, fieldset, time):\n", " if particle.state == StatusCode.ErrorThroughSurface:\n", - " particle_ddepth = 0.0\n", + " particle_dz = 0.0\n", " particle.state = StatusCode.Success" ] }, diff --git a/docs/examples_v3/tutorial_splitparticles.ipynb b/docs/examples_v3/tutorial_splitparticles.ipynb new file mode 100644 index 000000000..02a0d26c9 --- /dev/null +++ b/docs/examples_v3/tutorial_splitparticles.ipynb @@ -0,0 +1,158 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Adding new particles to a ParticleSet during runtime\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There are use-cases, where it is important to be able to add particles 'on-the-fly', during the runtime of a Parcels simulation.\n", + "\n", + "Unfortuantely, Parcels does not (yet) support adding new particles _within_ a `Kernel`. A workaround is to temporarily leave the `execution()` call to add particles via the `ParticleSet.add()` method, before continuing with `execution()`.\n", + "\n", + "See the example below, where we add 'mass' to a particle each timestep, based on a probablistic condition, and then split the particle once its 'mass' is larger than 5\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from datetime import timedelta\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import xarray as xr\n", + "from IPython.display import HTML\n", + "from matplotlib.animation import FuncAnimation\n", + "\n", + "import parcels" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load the CopernicusMarine data in the Agulhas region from the example_datasets\n", + "example_dataset_folder = parcels.download_example_dataset(\n", + " \"CopernicusMarine_data_for_Argo_tutorial\"\n", + ")\n", + "\n", + "ds = xr.open_mfdataset(f\"{example_dataset_folder}/*.nc\", combine=\"by_coords\")\n", + "ds.load() # load the dataset into memory\n", + "\n", + "fieldset = parcels.FieldSet.from_copernicusmarine(ds)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "GrowingParticle = parcels.Particle.add_variable([parcels.Variable(\"mass\", initial=0)])\n", + "\n", + "\n", + "def GrowParticles(particles, fieldset):\n", + " # 25% chance per timestep for particle to grow\n", + " growing_particles = np.random.random_sample(len(particles)) < 0.25\n", + " particles[growing_particles].mass += 1.0\n", + "\n", + "\n", + "def SplitParticles(particles, fieldset):\n", + " splitting_particles = particles.mass >= 5.0\n", + " particles[splitting_particles].mass = particles[splitting_particles].mass / 2.0\n", + "\n", + "\n", + "pset = parcels.ParticleSet(\n", + " fieldset=fieldset,\n", + " pclass=GrowingParticle,\n", + " lon=0,\n", + " lat=0,\n", + " time=fieldset.time_interval.left,\n", + ")\n", + "outfile = parcels.ParticleFile(\"growingparticles.zarr\", outputdt=np.timedelta64(1, \"h\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pset.execute(\n", + " GrowParticles,\n", + " runtime=np.timedelta64(40, \"h\"),\n", + " dt=np.timedelta64(1, \"h\"),\n", + " output_file=outfile,\n", + " verbose_progress=False,\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The 'trick' is that we place the `pset.execute()` call in a for-loop, so that we leave the Kernel-loop and can add Particles to the ParticleSet.\n", + "\n", + "Indeed, if we plot the mass of particles as a function of time, we see that new particles are created every time a particle reaches a mass of 5.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds_out = xr.open_zarr(\"growingparticles.zarr\")\n", + "plt.plot(\n", + " (ds_out.time.values[:].T - ds.time.values[0]).astype(\"timedelta64[h]\"),\n", + " ds_out.mass.T,\n", + ")\n", + "plt.grid()\n", + "plt.xlabel(\"Time [h]\")\n", + "plt.ylabel(\"Particle 'mass'\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "test-notebooks", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/src/parcels/_core/field.py b/src/parcels/_core/field.py index c03dec06c..094056604 100644 --- a/src/parcels/_core/field.py +++ b/src/parcels/_core/field.py @@ -316,18 +316,20 @@ def eval(self, time: datetime, z, y, x, particles=None, applyConversion=True): w = self.W._interp_method(self.W, ti, position, tau, time, z, y, x) else: w = 0.0 - else: - (u, v, w) = self._vector_interp_method(self, ti, position, tau, time, z, y, x) - if applyConversion: - u = self.U.units.to_target(u, z, y, x) - v = self.V.units.to_target(v, z, y, x) - if "3D" in self.vector_type: - w = self.W.units.to_target(w, z, y, x) + if applyConversion: + u = self.U.units.to_target(u, z, y, x) + v = self.V.units.to_target(v, z, y, x) + + else: + (u, v, w) = self._vector_interp_method(self, ti, position, tau, time, z, y, x, applyConversion) for vel in (u, v, w): _update_particle_states_interp_value(particles, vel) + if applyConversion and ("3D" in self.vector_type): + w = self.W.units.to_target(w, z, y, x) if self.W else 0.0 + if "3D" in self.vector_type: return (u, v, w) else: diff --git a/src/parcels/_core/kernel.py b/src/parcels/_core/kernel.py index 14b6f853e..a1404dc08 100644 --- a/src/parcels/_core/kernel.py +++ b/src/parcels/_core/kernel.py @@ -75,7 +75,7 @@ def __init__( self._fieldset = fieldset self._ptype = ptype - self._positionupdate_kernels_added = False + self._positionupdate_kernel_added = False for f in pyfuncs: self.check_fieldsets_in_kernels(f) @@ -111,23 +111,19 @@ def remove_deleted(self, pset): if len(indices) > 0: pset.remove_indices(indices) - def add_positionupdate_kernels(self): + def add_positionupdate_kernel(self): # Adding kernels that set and update the coordinate changes - def Setcoords(particles, fieldset): # pragma: no cover + def PositionUpdate(particles, fieldset): # pragma: no cover particles.lon += particles.dlon particles.lat += particles.dlat particles.z += particles.dz + particles.time += particles.dt particles.dlon = 0 particles.dlat = 0 particles.dz = 0 - particles.time = particles.time_nextloop - - def UpdateTime(particles, fieldset): # pragma: no cover - particles.time_nextloop = particles.time + particles.dt - - self._pyfuncs = (Setcoords + self + UpdateTime)._pyfuncs + self._pyfuncs = (PositionUpdate + self)._pyfuncs def check_fieldsets_in_kernels(self, pyfunc): # TODO v4: this can go into another method? assert_is_compatible()? """ @@ -234,14 +230,13 @@ def execute(self, pset, endtime, dt): pset._data["state"][:] = StatusCode.Evaluate - if not self._positionupdate_kernels_added: - self.add_positionupdate_kernels() - self._positionupdate_kernels_added = True - while (len(pset) > 0) and np.any(np.isin(pset.state, [StatusCode.Evaluate, StatusCode.Repeat])): - time_to_endtime = compute_time_direction * (endtime - pset.time_nextloop) + time_to_endtime = compute_time_direction * (endtime - pset.time) - if all(time_to_endtime <= 0): + evaluate_particles = (np.isin(pset.state, [StatusCode.Success, StatusCode.Evaluate])) & ( + time_to_endtime >= 0 + ) + if not np.any(evaluate_particles): return StatusCode.Success # adapt dt to end exactly on endtime @@ -251,7 +246,6 @@ def execute(self, pset, endtime, dt): pset.dt = np.minimum(np.maximum(pset.dt, -time_to_endtime), 0) # run kernels for all particles that need to be evaluated - evaluate_particles = (pset.state == StatusCode.Evaluate) & (pset.dt != 0) for f in self._pyfuncs: f(pset[evaluate_particles], self._fieldset) @@ -265,9 +259,9 @@ def execute(self, pset, endtime, dt): if not hasattr(self.fieldset, "RK45_tol"): pset._data["dt"][:] = dt - # Reset particle state for particles that signalled success and have not reached endtime yet - particles_to_evaluate = (pset.state == StatusCode.Success) & (time_to_endtime > 0) - pset[particles_to_evaluate].state = StatusCode.Evaluate + # Set particle state for particles that reached endtime + particles_endofloop = (pset.state == StatusCode.Evaluate) & (pset.time == endtime) + pset[particles_endofloop].state = StatusCode.EndofLoop # delete particles that signalled deletion self.remove_deleted(pset) @@ -284,4 +278,9 @@ def execute(self, pset, endtime, dt): else: error_func(pset[inds].z, pset[inds].lat, pset[inds].lon) + # Only add PositionUpdate kernel at the end of the first execute call to avoid adding dt to time too early + if not self._positionupdate_kernel_added: + self.add_positionupdate_kernel() + self._positionupdate_kernel_added = True + return pset diff --git a/src/parcels/_core/particle.py b/src/parcels/_core/particle.py index 5bfcd0878..cf2727d2c 100644 --- a/src/parcels/_core/particle.py +++ b/src/parcels/_core/particle.py @@ -180,7 +180,6 @@ def get_default_particle(spatial_dtype: np.float32 | np.float64) -> ParticleClas dtype=_SAME_AS_FIELDSET_TIME_INTERVAL.VALUE, attrs={"standard_name": "time", "units": "seconds", "axis": "T"}, ), - Variable("time_nextloop", dtype=_SAME_AS_FIELDSET_TIME_INTERVAL.VALUE, to_write=False), Variable( "trajectory", dtype=np.int64, diff --git a/src/parcels/_core/particlefile.py b/src/parcels/_core/particlefile.py index 7e5093071..123efd438 100644 --- a/src/parcels/_core/particlefile.py +++ b/src/parcels/_core/particlefile.py @@ -71,6 +71,9 @@ def __init__(self, store, outputdt, chunks=None, create_new_zarrfile=True): if not isinstance(outputdt, np.timedelta64): raise ValueError(f"Expected outputdt to be a np.timedelta64 or datetime.timedelta, got {type(outputdt)}") + if outputdt <= np.timedelta64(0, "s"): + raise ValueError(f"outputdt must be a positive non-zero timedelta. Got {outputdt=!r}") + self._outputdt = outputdt _assert_valid_chunks_tuple(chunks) @@ -242,23 +245,6 @@ def _write_particle_data(self, *, particle_data, pclass, time_interval, time, in particle_data["obs_written"][indices_to_write] = obs + 1 - def write_latest_locations(self, pset, time): - """Write the current (latest) particle locations to zarr file. - This can be useful at the end of a pset.execute(), when the last locations are not written yet. - Note that this only updates the locations, not any of the other Variables. Therefore, use with care. - - Parameters - ---------- - pset : - ParticleSet object to write - time : - Time at which to write ParticleSet. Note that typically this would be pset.time_nextloop - """ - for var in ["lon", "lat", "z"]: - pset._data[f"{var}"] += pset._data[f"d{var}"] - pset._data["time"] = pset._data["time_nextloop"] - self.write(pset, time) - def _get_store_from_pathlike(path: Path | str) -> DirectoryStore: path = str(Path(path)) # Ensure valid path, and convert to string @@ -301,21 +287,21 @@ def _to_write_particles(particle_data, time): ( np.less_equal( time - np.abs(particle_data["dt"] / 2), - particle_data["time_nextloop"], - where=np.isfinite(particle_data["time_nextloop"]), + particle_data["time"], + where=np.isfinite(particle_data["time"]), ) & np.greater_equal( time + np.abs(particle_data["dt"] / 2), - particle_data["time_nextloop"], - where=np.isfinite(particle_data["time_nextloop"]), + particle_data["time"], + where=np.isfinite(particle_data["time"]), ) # check time - dt/2 <= particle_data["time"] <= time + dt/2 | ( (np.isnan(particle_data["dt"])) - & np.equal(time, particle_data["time_nextloop"], where=np.isfinite(particle_data["time_nextloop"])) + & np.equal(time, particle_data["time"], where=np.isfinite(particle_data["time"])) ) # or dt is NaN and time matches particle_data["time"] ) & (np.isfinite(particle_data["trajectory"])) - & (np.isfinite(particle_data["time_nextloop"])) + & (np.isfinite(particle_data["time"])) )[0] @@ -324,9 +310,6 @@ def _convert_particle_data_time_to_float_seconds(particle_data, time_interval): particle_data = particle_data.copy() particle_data["time"] = ((particle_data["time"] - time_interval.left) / np.timedelta64(1, "s")).astype(np.float64) - particle_data["time_nextloop"] = ( - (particle_data["time_nextloop"] - time_interval.left) / np.timedelta64(1, "s") - ).astype(np.float64) particle_data["dt"] = (particle_data["dt"] / np.timedelta64(1, "s")).astype(np.float64) return particle_data diff --git a/src/parcels/_core/particleset.py b/src/parcels/_core/particleset.py index ace99e973..e98d202d7 100644 --- a/src/parcels/_core/particleset.py +++ b/src/parcels/_core/particleset.py @@ -124,7 +124,6 @@ def __init__( lat=lat, z=z, time=time, - time_nextloop=time, trajectory=trajectory_ids, ), ) @@ -421,7 +420,7 @@ def update_dt_dtype(self, dt_dtype: np.dtype): dt_dtype : np.dtype New dtype for dt. """ - if dt_dtype not in [np.timedelta64, "timedelta64[ns]", "timedelta64[ms]", "timedelta64[s]"]: + if dt_dtype not in [np.timedelta64, "timedelta64[ns]", "timedelta64[μs]", "timedelta64[ms]", "timedelta64[s]"]: raise ValueError(f"dt_dtype must be a numpy timedelta64 dtype. Got {dt_dtype=!r}") self._data["dt"] = self._data["dt"].astype(dt_dtype) @@ -516,15 +515,16 @@ def execute( raise ValueError( f"The runtime must be a datetime.timedelta or np.timedelta64 object. Got {type(runtime)}" ) from e + if runtime < np.timedelta64(0, "s"): + raise ValueError(f"The runtime must be a non-negative timedelta. Got {runtime=!r}") start_time, end_time = _get_simulation_start_and_end_times( - self.fieldset.time_interval, self._data["time_nextloop"], runtime, endtime, sign_dt + self.fieldset.time_interval, self._data["time"], runtime, endtime, sign_dt ) # Set the time of the particles if it hadn't been set on initialisation if np.isnat(self._data["time"]).any(): self._data["time"][:] = start_time - self._data["time_nextloop"][:] = start_time outputdt = output_file.outputdt if output_file else None @@ -536,7 +536,7 @@ def execute( pbar = tqdm(total=(end_time - start_time) / np.timedelta64(1, "s"), file=sys.stdout) pbar.set_description("Integration time: " + str(start_time)) - next_output = start_time + sign_dt * outputdt if output_file else None + next_output = start_time if output_file else None time = start_time while sign_dt * (time - end_time) < 0: @@ -553,7 +553,7 @@ def execute( if output_file: output_file.write(self, next_output) if np.isfinite(outputdt): - next_output += outputdt + next_output += outputdt * sign_dt if verbose_progress: pbar.set_description("Integration time: " + str(time)) diff --git a/src/parcels/_core/statuscodes.py b/src/parcels/_core/statuscodes.py index 9b8849aba..22d0d1788 100644 --- a/src/parcels/_core/statuscodes.py +++ b/src/parcels/_core/statuscodes.py @@ -20,6 +20,7 @@ class StatusCode: """Class defining the status codes for particles.state.""" Success = 0 + EndofLoop = 1 Evaluate = 10 Repeat = 20 Delete = 30 diff --git a/src/parcels/interpolators.py b/src/parcels/interpolators.py index eba5bc802..ec57f847e 100644 --- a/src/parcels/interpolators.py +++ b/src/parcels/interpolators.py @@ -52,6 +52,7 @@ def ZeroInterpolator_Vector( z: np.float32 | np.float64, y: np.float32 | np.float64, x: np.float32 | np.float64, + applyConversion: bool, ) -> np.float32 | np.float64: """Template function used for the signature check of the interpolation methods for velocity fields.""" return 0.0 @@ -157,6 +158,7 @@ def CGrid_Velocity( z: np.float32 | np.float64, y: np.float32 | np.float64, x: np.float32 | np.float64, + applyConversion: bool, ): """ Interpolation kernel for velocity fields on a C-Grid. @@ -274,7 +276,11 @@ def CGrid_Velocity( U = (1 - xsi) * U0 + xsi * U1 V = (1 - eta) * V0 + eta * V1 - meshJac = 1852 * 60.0 if grid._mesh == "spherical" else 1 + deg2m = 1852 * 60.0 + if applyConversion: + meshJac = (deg2m * deg2m * np.cos(np.deg2rad(y))) if grid._mesh == "spherical" else 1 + else: + meshJac = deg2m if grid._mesh == "spherical" else 1 jac = i_u._compute_jacobian_determinant(py, px, eta, xsi) * meshJac @@ -527,6 +533,7 @@ def XFreeslip( z: np.float32 | np.float64, y: np.float32 | np.float64, x: np.float32 | np.float64, + applyConversion: bool, ): """Free-slip boundary condition interpolation for velocity fields.""" return _Spatialslip(vectorfield, ti, position, tau, t, z, y, x, a=1.0, b=0.0) @@ -541,6 +548,7 @@ def XPartialslip( z: np.float32 | np.float64, y: np.float32 | np.float64, x: np.float32 | np.float64, + applyConversion: bool, ): """Partial-slip boundary condition interpolation for velocity fields.""" return _Spatialslip(vectorfield, ti, position, tau, t, z, y, x, a=0.5, b=0.5) diff --git a/tests/test_advection.py b/tests/test_advection.py index cb4651492..00055f10b 100644 --- a/tests/test_advection.py +++ b/tests/test_advection.py @@ -102,8 +102,8 @@ def test_advection_zonal_periodic(): startlon = np.array([0.5, 0.4]) pset = ParticleSet(fieldset, pclass=PeriodicParticle, lon=startlon, lat=[0.5, 0.5]) pset.execute([AdvectionEE, periodicBC], runtime=np.timedelta64(40, "s"), dt=np.timedelta64(1, "s")) - np.testing.assert_allclose(pset.total_dlon, 4, atol=1e-5) - np.testing.assert_allclose(pset.lon + pset.dlon, startlon, atol=1e-5) + np.testing.assert_allclose(pset.total_dlon, 4.1, atol=1e-5) + np.testing.assert_allclose(pset.lon, startlon, atol=1e-5) np.testing.assert_allclose(pset.lat, 0.5, atol=1e-5) @@ -165,7 +165,7 @@ def SubmergeParticle(particles, fieldset): # pragma: no cover kernels.append(DeleteParticle) pset = ParticleSet(fieldset=fieldset, lon=0.5, lat=0.5, z=0.9) - pset.execute(kernels, runtime=np.timedelta64(11, "s"), dt=np.timedelta64(1, "s")) + pset.execute(kernels, runtime=np.timedelta64(10, "s"), dt=np.timedelta64(1, "s")) if direction == "up" and wErrorThroughSurface: np.testing.assert_allclose(pset.lon[0], 0.6, atol=1e-5) @@ -222,7 +222,7 @@ def test_length1dimensions(u, v, w): # TODO: Refactor this test to be more read x0, y0, z0 = 2, 8, -4 pset = ParticleSet(fieldset, lon=x0, lat=y0, z=z0) kernel = AdvectionRK4 if w is None else AdvectionRK4_3D - pset.execute(kernel, runtime=np.timedelta64(5, "s"), dt=np.timedelta64(1, "s")) + pset.execute(kernel, runtime=np.timedelta64(4, "s"), dt=np.timedelta64(1, "s")) assert len(pset.lon) == len([p.lon for p in pset]) np.testing.assert_allclose(np.array([p.lon - x0 for p in pset]), 4 * u, atol=1e-6) @@ -332,7 +332,7 @@ def test_decaying_moving_eddy(method, rtol): fieldset.add_constant("RK45_min_dt", 10 * 60) pset = ParticleSet(fieldset, lon=start_lon, lat=start_lat, time=np.timedelta64(0, "s")) - pset.execute(kernel[method], dt=dt, endtime=np.timedelta64(1, "D")) + pset.execute(kernel[method], dt=dt, endtime=np.timedelta64(23, "h")) def truth_moving(x_0, y_0, t): t /= np.timedelta64(1, "s") @@ -412,7 +412,7 @@ def test_peninsula_fieldset(method, rtol, grid_type): fieldset = FieldSet([U, V, P, UV]) dt = np.timedelta64(30, "m") - runtime = np.timedelta64(1, "D") + runtime = np.timedelta64(23, "h") start_lat = np.linspace(3e3, 47e3, npart) start_lon = 3e3 * np.ones_like(start_lat) @@ -457,7 +457,7 @@ def test_nemo_curvilinear_fieldset(): U = parcels.Field("U", ds["U"], grid) V = parcels.Field("V", ds["V"], grid) U.units = parcels.GeographicPolar() - V.units = parcels.GeographicPolar() # U and V need GoegraphicPolar for C-Grid interpolation to work correctly + V.units = parcels.Geographic() UV = parcels.VectorField("UV", U, V, vector_interp_method=CGrid_Velocity) fieldset = parcels.FieldSet([U, V, UV]) @@ -543,7 +543,7 @@ def test_nemo_3D_curvilinear_fieldset(method): V = parcels.Field("V", ds["V"], grid) W = parcels.Field("W", ds["W"], grid) U.units = parcels.GeographicPolar() - V.units = parcels.GeographicPolar() # U and V need GoegraphicPolar for C-Grid interpolation to work correctly + V.units = parcels.Geographic() UV = parcels.VectorField("UV", U, V, vector_interp_method=CGrid_Velocity) UVW = parcels.VectorField("UVW", U, V, W, vector_interp_method=CGrid_Velocity) fieldset = parcels.FieldSet([U, V, W, UV, UVW]) @@ -553,7 +553,7 @@ def test_nemo_3D_curvilinear_fieldset(method): lats = np.linspace(52.5, 51.6, npart) pset = parcels.ParticleSet(fieldset, lon=lons, lat=lats, z=np.ones_like(lons)) - pset.execute(kernel[method], runtime=np.timedelta64(4, "D"), dt=np.timedelta64(6, "h")) + pset.execute(kernel[method], runtime=np.timedelta64(3, "D") + np.timedelta64(18, "h"), dt=np.timedelta64(6, "h")) if method == "RK4": np.testing.assert_equal(round_and_hash_float_array([p.lon for p in pset], decimals=5), 29977383852960156017546) diff --git a/tests/test_diffusion.py b/tests/test_diffusion.py index 495a7ac2c..d0091c7d6 100644 --- a/tests/test_diffusion.py +++ b/tests/test_diffusion.py @@ -77,7 +77,7 @@ def test_fieldKh_SpatiallyVaryingDiffusion(mesh, kernel): np.random.seed(1636) pset = ParticleSet(fieldset=fieldset, lon=np.zeros(npart), lat=np.zeros(npart)) - pset.execute(pset.Kernel(kernel), runtime=np.timedelta64(4, "h"), dt=np.timedelta64(1, "h")) + pset.execute(pset.Kernel(kernel), runtime=np.timedelta64(3, "h"), dt=np.timedelta64(1, "h")) tol = 2000 * mesh_conversion # effectively 2000 m errors (because of low numbers of particles) assert np.allclose(np.mean(pset.lon), 0, atol=tol) diff --git a/tests/test_field.py b/tests/test_field.py index 0f47f2524..b35076971 100644 --- a/tests/test_field.py +++ b/tests/test_field.py @@ -159,7 +159,7 @@ def test_vectorfield_invalid_interpolator(): ds = datasets_structured["ds_2d_left"] grid = XGrid.from_dataset(ds) - def invalid_interpolator_wrong_signature(self, ti, position, tau, t, z, y, invalid): + def invalid_interpolator_wrong_signature(self, ti, position, tau, t, z, y, applyConversion, invalid): return 0.0 # Create component fields diff --git a/tests/test_particlefile.py b/tests/test_particlefile.py index 136804616..6cf4ebc07 100755 --- a/tests/test_particlefile.py +++ b/tests/test_particlefile.py @@ -7,8 +7,18 @@ import xarray as xr from zarr.storage import MemoryStore -import parcels -from parcels import Field, FieldSet, Particle, ParticleFile, ParticleSet, StatusCode, Variable, VectorField, XGrid +from parcels import ( + Field, + FieldSet, + Particle, + ParticleFile, + ParticleSet, + StatusCode, + Variable, + VectorField, + XGrid, + download_example_dataset, +) from parcels._core.particle import Particle, create_particle_data, get_default_particle from parcels._core.utils.time import TimeInterval from parcels._datasets.structured.generic import datasets @@ -69,12 +79,10 @@ def test_pfile_array_remove_particles(fieldset, tmp_zarrfile): ) pfile = ParticleFile(tmp_zarrfile, outputdt=np.timedelta64(1, "s")) pset._data["time"][:] = fieldset.time_interval.left - pset._data["time_nextloop"][:] = fieldset.time_interval.left pfile.write(pset, time=fieldset.time_interval.left) pset.remove_indices(3) new_time = fieldset.time_interval.left + np.timedelta64(1, "D") pset._data["time"][:] = new_time - pset._data["time_nextloop"][:] = new_time pfile.write(pset, new_time) ds = xr.open_zarr(tmp_zarrfile) timearr = ds["time"][:] @@ -99,11 +107,8 @@ def test_pfile_array_remove_all_particles(fieldset, chunks_obs, tmp_zarrfile): pfile.write(pset, fieldset.time_interval.left + np.timedelta64(1, "D")) pfile.write(pset, fieldset.time_interval.left + np.timedelta64(2, "D")) - ds = xr.open_zarr(tmp_zarrfile, decode_cf=False).load() - pytest.skip( - "TODO v4: Set decode_cf=True, which will mean that missing values get decoded to NaT rather than fill value" - ) - assert np.allclose(ds["time"][:, 0], np.timedelta64(0, "s"), atol=np.timedelta64(1, "ms")) + ds = xr.open_zarr(tmp_zarrfile) + np.testing.assert_allclose(ds["time"][:, 0] - fieldset.time_interval.left, np.timedelta64(0, "s")) if chunks_obs is not None: assert ds["time"][:].shape == chunks else: @@ -111,20 +116,16 @@ def test_pfile_array_remove_all_particles(fieldset, chunks_obs, tmp_zarrfile): assert np.all(np.isnan(ds["time"][:, 1:])) -@pytest.mark.skip(reason="TODO v4: stuck in infinite loop") def test_variable_write_double(fieldset, tmp_zarrfile): def Update_lon(particles, fieldset): # pragma: no cover particles.dlon += 0.1 + dt = np.timedelta64(1, "us") particle = get_default_particle(np.float64) pset = ParticleSet(fieldset, pclass=particle, lon=[0], lat=[0]) - ofile = ParticleFile(tmp_zarrfile, outputdt=np.timedelta64(10, "us")) - pset.execute( - pset.Kernel(Update_lon), - runtime=np.timedelta64(1, "ms"), - dt=np.timedelta64(10, "us"), - output_file=ofile, - ) + pset.update_dt_dtype(dt.dtype) + ofile = ParticleFile(tmp_zarrfile, outputdt=dt) + pset.execute(Update_lon, runtime=np.timedelta64(10, "us"), dt=dt, output_file=ofile) ds = xr.open_zarr(tmp_zarrfile) lons = ds["lon"][:] @@ -165,20 +166,12 @@ def test_variable_written_once(): ... -@pytest.mark.parametrize( - "dt", - [ - pytest.param(-np.timedelta64(1, "s"), marks=pytest.mark.xfail(reason="need to fix backwards in time")), - np.timedelta64(1, "s"), - ], -) +@pytest.mark.parametrize("dt", [-np.timedelta64(1, "s"), np.timedelta64(1, "s")]) @pytest.mark.parametrize("maxvar", [2, 4, 10]) def test_pset_repeated_release_delayed_adding_deleting(fieldset, tmp_zarrfile, dt, maxvar): """Tests that if particles are released and deleted based on age that resulting output file is correct.""" npart = 10 - runtime = np.timedelta64(npart, "s") fieldset.add_constant("maxvar", maxvar) - pset = None MyParticle = Particle.add_variable( [Variable("sample_var", initial=0.0), Variable("v_once", dtype=np.float64, initial=0.0, to_write="once")] @@ -189,7 +182,7 @@ def test_pset_repeated_release_delayed_adding_deleting(fieldset, tmp_zarrfile, d lon=np.zeros(npart), lat=np.zeros(npart), pclass=MyParticle, - time=fieldset.time_interval.left + np.array([np.timedelta64(i, "s") for i in range(npart)]), + time=fieldset.time_interval.left + [np.timedelta64(i + 1, "s") for i in range(npart)], ) pfile = ParticleFile(tmp_zarrfile, outputdt=abs(dt), chunks=(1, 1)) @@ -204,12 +197,9 @@ def IncrLon(particles, fieldset): # pragma: no cover for _ in range(npart): pset.execute(IncrLon, dt=dt, runtime=np.timedelta64(1, "s"), output_file=pfile) - ds = xr.open_zarr(tmp_zarrfile, decode_cf=False) - pytest.skip( - "TODO v4: Set decode_cf=True, which will mean that missing values get decoded to NaT rather than fill value" - ) + ds = xr.open_zarr(tmp_zarrfile) samplevar = ds["sample_var"][:] - assert samplevar.shape == (runtime, min(maxvar + 1, runtime)) + assert samplevar.shape == (npart, min(maxvar, npart + 1)) # test whether samplevar[:, k] = k for k in range(samplevar.shape[1]): assert np.allclose([p for p in samplevar[:, k] if np.isfinite(p)], k + 1) @@ -217,25 +207,21 @@ def IncrLon(particles, fieldset): # pragma: no cover assert filesize < 1024 * 65 # test that chunking leads to filesize less than 65KB -@pytest.mark.xfail(reason="need to fix backwards in time") def test_write_timebackward(fieldset, tmp_zarrfile): - def Update_lon(particles, fieldset): # pragma: no cover - dt = particles.dt / np.timedelta64(1, "s") - particles.dlon -= 0.1 * dt - - pset = ParticleSet( - fieldset, - pclass=Particle, - lat=np.linspace(0, 1, 3), - lon=[0, 0, 0], - time=np.array([np.datetime64("2000-01-01") for _ in range(3)]), - ) + release_time = fieldset.time_interval.left + [np.timedelta64(i + 1, "s") for i in range(3)] + pset = ParticleSet(fieldset, lat=[0, 1, 2], lon=[0, 0, 0], time=release_time) pfile = ParticleFile(tmp_zarrfile, outputdt=np.timedelta64(1, "s")) - pset.execute(pset.Kernel(Update_lon), runtime=np.timedelta64(1, "s"), dt=-np.timedelta64(1, "s"), output_file=pfile) + pset.execute(DoNothing, runtime=np.timedelta64(3, "s"), dt=-np.timedelta64(1, "s"), output_file=pfile) + ds = xr.open_zarr(tmp_zarrfile) trajs = ds["trajectory"][:] + + output_time = ds["time"][:].values + assert trajs.values.dtype == "int64" assert np.all(np.diff(trajs.values) < 0) # all particles written in order of release + doutput_time = np.diff(output_time, axis=1) + assert np.all(doutput_time[~np.isnan(doutput_time)] < 0) # all times written in decreasing order @pytest.mark.xfail @@ -291,31 +277,52 @@ def SampleP(particles, fieldset): # pragma: no cover assert fieldset.U.grid.lat[yi] <= lat < fieldset.U.grid.lat[yi + 1] -@pytest.mark.skip -@pytest.mark.v4alpha +@pytest.mark.parametrize("outputdt", [np.timedelta64(1, "s"), np.timedelta64(2, "s"), np.timedelta64(3, "s")]) +def test_time_is_age(fieldset, tmp_zarrfile, outputdt): + # Test that particle age is same as time - initial_time + npart = 10 + + AgeParticle = get_default_particle(np.float64).add_variable(Variable("age", initial=0.0)) + + def IncreaseAge(particles, fieldset): # pragma: no cover + particles.age += particles.dt / np.timedelta64(1, "s") + + time = fieldset.time_interval.left + np.arange(npart) * np.timedelta64(1, "s") + pset = ParticleSet(fieldset, pclass=AgeParticle, lon=npart * [0], lat=npart * [0], time=time) + ofile = ParticleFile(tmp_zarrfile, outputdt=outputdt) + + pset.execute(IncreaseAge, runtime=np.timedelta64(npart * 2, "s"), dt=np.timedelta64(1, "s"), output_file=ofile) + + ds = xr.open_zarr(tmp_zarrfile) + age = ds["age"][:].values + ds_timediff = np.zeros_like(age) + for i in range(npart): + ds_timediff[i, :] = (ds.time.values[i, :] - time[i]) / np.timedelta64(1, "s") + np.testing.assert_equal(age, ds_timediff) + + def test_reset_dt(fieldset, tmp_zarrfile): # Assert that p.dt gets reset when a write_time is not a multiple of dt # for p.dt=0.02 to reach outputdt=0.05 and endtime=0.1, the steps should be [0.2, 0.2, 0.1, 0.2, 0.2, 0.1], resulting in 6 kernel executions + dt = np.timedelta64(20, "ms") def Update_lon(particles, fieldset): # pragma: no cover particles.dlon += 0.1 particle = get_default_particle(np.float64) pset = ParticleSet(fieldset, pclass=particle, lon=[0], lat=[0]) + pset.update_dt_dtype(dt.dtype) ofile = ParticleFile(tmp_zarrfile, outputdt=np.timedelta64(50, "ms")) - dt = np.timedelta64(20, "ms") - pset.execute(pset.Kernel(Update_lon), runtime=6 * dt, dt=dt, output_file=ofile) + pset.execute(pset.Kernel(Update_lon), runtime=5 * dt, dt=dt, output_file=ofile) assert np.allclose(pset.lon, 0.6) -@pytest.mark.v4alpha -@pytest.mark.xfail def test_correct_misaligned_outputdt_dt(fieldset, tmp_zarrfile): """Testing that outputdt does not need to be a multiple of dt.""" def Update_lon(particles, fieldset): # pragma: no cover - particles.dlon += particles.dt / np.timedelta64(1, "s") + particles.lon += particles.dt / np.timedelta64(1, "s") particle = get_default_particle(np.float64) pset = ParticleSet(fieldset, pclass=particle, lon=[0], lat=[0]) @@ -324,9 +331,7 @@ def Update_lon(particles, fieldset): # pragma: no cover ds = xr.open_zarr(tmp_zarrfile) assert np.allclose(ds.lon.values, [0, 3, 6, 9]) - assert np.allclose( - ds.time.values[0, :], [np.timedelta64(t, "s") for t in [0, 3, 6, 9]], atol=np.timedelta64(1, "ns") - ) + assert np.allclose((ds.time.values - ds.time.values[0, 0]) / np.timedelta64(1, "s"), [0, 3, 6, 9]) def setup_pset_execute(*, fieldset: FieldSet, outputdt: timedelta, execute_kwargs, particle_class=Particle): @@ -360,7 +365,20 @@ def test_pset_execute_outputdt_forwards(fieldset): assert np.all(ds.isel(trajectory=0).time.diff(dim="obs").values == np.timedelta64(outputdt)) -@pytest.mark.skip(reason="backwards in time not yet working") +def test_pset_execute_output_time_forwards(fieldset): + """Testing output times start at initial time and end at initial time + runtime.""" + outputdt = np.timedelta64(1, "h") + runtime = np.timedelta64(5, "h") + dt = np.timedelta64(5, "m") + + ds = setup_pset_execute(fieldset=fieldset, outputdt=outputdt, execute_kwargs=dict(runtime=runtime, dt=dt)) + + assert ( + ds.time[0, 0].values == fieldset.time_interval.left + and ds.time[0, -1].values == fieldset.time_interval.left + runtime + ) + + def test_pset_execute_outputdt_backwards(fieldset): """Testing output data dt matches outputdt in backwards time.""" outputdt = timedelta(hours=1) @@ -372,7 +390,6 @@ def test_pset_execute_outputdt_backwards(fieldset): assert np.all(file_outputdt == np.timedelta64(-outputdt)) -@pytest.mark.xfail(reason="TODO v4: Update dataset loading") def test_pset_execute_outputdt_backwards_fieldset_timevarying(): """test_pset_execute_outputdt_backwards() still passed despite #1722 as it doesn't account for time-varying fields, which for some reason #1722 @@ -382,14 +399,9 @@ def test_pset_execute_outputdt_backwards_fieldset_timevarying(): dt = -timedelta(minutes=5) # TODO: Not ideal using the `download_example_dataset` here, but I'm struggling to recreate this error using the test suite fieldsets we have - example_dataset_folder = parcels.download_example_dataset("MovingEddies_data") - filenames = { - "U": str(example_dataset_folder / "moving_eddiesU.nc"), - "V": str(example_dataset_folder / "moving_eddiesV.nc"), - } - variables = {"U": "vozocrtx", "V": "vomecrty"} - dimensions = {"lon": "nav_lon", "lat": "nav_lat", "time": "time_counter"} - fieldset = parcels.FieldSet.from_netcdf(filenames, variables, dimensions) + example_dataset_folder = download_example_dataset("CopernicusMarine_data_for_Argo_tutorial") + ds_in = xr.open_mfdataset(f"{example_dataset_folder}/*.nc", combine="by_coords") + fieldset = FieldSet.from_copernicusmarine(ds_in) ds = setup_pset_execute(outputdt=outputdt, execute_kwargs=dict(runtime=runtime, dt=dt), fieldset=fieldset) file_outputdt = ds.isel(trajectory=0).time.diff(dim="obs").values @@ -429,7 +441,6 @@ def test_particlefile_write_particle_data(tmp_store): time_interval=time_interval, initial={ "time": np.full(nparticles, fill_value=left), - "time_nextloop": np.full(nparticles, fill_value=left), "lon": initial_lon, "dt": np.full(nparticles, fill_value=1.0), "trajectory": np.arange(nparticles), diff --git a/tests/test_particleset.py b/tests/test_particleset.py index 06a8dd09d..081e6966b 100644 --- a/tests/test_particleset.py +++ b/tests/test_particleset.py @@ -124,7 +124,7 @@ def Addlon(particles, fieldset): # pragma: no cover particles.dlon += particles.dt / np.timedelta64(1, "s") pset.execute(Addlon, dt=np.timedelta64(2, "s"), runtime=np.timedelta64(8, "s"), verbose_progress=False) - assert np.allclose([p.lon + p.dlon for p in pset], [8 - t for t in times]) + assert np.allclose([p.lon + p.dlon for p in pset], [10 - t for t in times]) def test_populate_indices(fieldset): diff --git a/tests/test_particleset_execute.py b/tests/test_particleset_execute.py index fa4591986..d76b92499 100644 --- a/tests/test_particleset_execute.py +++ b/tests/test_particleset_execute.py @@ -162,7 +162,7 @@ def AddDt(particles, fieldset): # pragma: no cover pset = ParticleSet(fieldset, pclass=pclass, lon=0, lat=0) pset.update_dt_dtype(dt.dtype) pset.execute(AddDt, runtime=dt * 10, dt=dt) - np.testing.assert_allclose(pset[0].added_dt, 10.0 * dt / np.timedelta64(1, "s"), atol=1e-5) + np.testing.assert_allclose(pset[0].added_dt, 11.0 * dt / np.timedelta64(1, "s"), atol=1e-5) def test_pset_execute_subsecond_dt_error(fieldset): @@ -207,7 +207,7 @@ def AddLat(particles, fieldset): # pragma: no cover particles.dlat += 0.1 k_add = pset.Kernel(AddLat) - for _ in range(n + 1): + for _ in range(n): pset.execute(k_add, runtime=np.timedelta64(1, "s"), dt=np.timedelta64(1, "s")) if with_delete: pset.remove_indices(len(pset) - 1) @@ -227,7 +227,7 @@ def test_execution_endtime(fieldset, starttime, endtime, dt): dt = np.timedelta64(dt, "s") pset = ParticleSet(fieldset, time=starttime, lon=0, lat=0) pset.execute(DoNothing, endtime=endtime, dt=dt) - assert abs(pset.time_nextloop - endtime) < np.timedelta64(1, "ms") + assert abs(pset.time - endtime) < np.timedelta64(1, "ms") def test_dont_run_particles_outside_starttime(fieldset): @@ -241,9 +241,9 @@ def AddLon(particles, fieldset): # pragma: no cover pset = ParticleSet(fieldset, lon=np.zeros(len(start_times)), lat=np.zeros(len(start_times)), time=start_times) pset.execute(AddLon, dt=np.timedelta64(1, "s"), endtime=endtime) - np.testing.assert_array_equal(pset.lon, [8, 6, 0]) - assert pset.time_nextloop[0:1] == endtime - assert pset.time_nextloop[2] == start_times[2] # this particle has not been executed + np.testing.assert_array_equal(pset.lon, [9, 7, 0]) + assert pset.time[0:1] == endtime + assert pset.time[2] == start_times[2] # this particle has not been executed # Test backward in time (note third particle is outside endtime) start_times = [fieldset.time_interval.right - np.timedelta64(t, "s") for t in [0, 2, 10]] @@ -252,9 +252,9 @@ def AddLon(particles, fieldset): # pragma: no cover pset = ParticleSet(fieldset, lon=np.zeros(len(start_times)), lat=np.zeros(len(start_times)), time=start_times) pset.execute(AddLon, dt=-np.timedelta64(1, "s"), endtime=endtime) - np.testing.assert_array_equal(pset.lon, [8, 6, 0]) - assert pset.time_nextloop[0:1] == endtime - assert pset.time_nextloop[2] == start_times[2] # this particle has not been executed + np.testing.assert_array_equal(pset.lon, [9, 7, 0]) + assert pset.time[0:1] == endtime + assert pset.time[2] == start_times[2] # this particle has not been executed def test_some_particles_throw_outofbounds(zonal_flow_fieldset): @@ -336,7 +336,7 @@ def MoveLeft(particles, fieldset): # pragma: no cover lon = np.linspace(0.05, 6.95, npart) lat = np.linspace(1, 0, npart) pset = ParticleSet(fieldset, lon=lon, lat=lat) - pset.execute([MoveRight, MoveLeft], runtime=np.timedelta64(61, "s"), dt=np.timedelta64(1, "s")) + pset.execute([MoveRight, MoveLeft], runtime=np.timedelta64(60, "s"), dt=np.timedelta64(1, "s")) assert len(pset) == npart np.testing.assert_allclose(pset.lon, [6.05, 5.95], rtol=1e-5) np.testing.assert_allclose(pset.lat, lat, rtol=1e-5) @@ -354,7 +354,7 @@ def test_execution_runtime(fieldset, starttime, runtime, dt, npart): dt = np.timedelta64(dt, "s") pset = ParticleSet(fieldset, time=starttime, lon=np.zeros(npart), lat=np.zeros(npart)) pset.execute(DoNothing, runtime=runtime, dt=dt) - assert all([abs(p.time_nextloop - starttime - runtime * sign_dt) < np.timedelta64(1, "ms") for p in pset]) + assert all([abs(p.time - starttime - runtime * sign_dt) < np.timedelta64(1, "ms") for p in pset]) def test_changing_dt_in_kernel(fieldset): @@ -363,9 +363,9 @@ def KernelCounter(particles, fieldset): # pragma: no cover pset = ParticleSet(fieldset, lon=np.zeros(1), lat=np.zeros(1)) pset.execute(KernelCounter, dt=np.timedelta64(2, "s"), runtime=np.timedelta64(5, "s")) - assert pset.lon == 3 - print(pset.dt) + assert pset.lon == 4 assert pset.dt == np.timedelta64(2, "s") + assert pset.time == fieldset.time_interval.left + np.timedelta64(5, "s") @pytest.mark.parametrize("npart", [1, 100])