diff --git a/.github/scripts/setup_devbranch.py b/.github/scripts/setup_devbranch.py index 96ab60dbb..001390fa0 100644 --- a/.github/scripts/setup_devbranch.py +++ b/.github/scripts/setup_devbranch.py @@ -104,8 +104,9 @@ def setup_devbranch(): Just changes files, all `git` commands are in the setup_devbranch.sh file. """ main_version = get_last_version().strip('v') - - dev_version = f"{main_version}-dev" + semver = main_version.split(".") + semver[-1] = f"{int(semver[-1]) + 1}-dev" + dev_version = ".".join(semver) update_setup(dev_version) update_version(dev_version) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..686ec1e9a --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,70 @@ +name: GitHub CI + +# Execute this for every push +on: [push] + +# Use bash explicitly for being able to enter the conda environment +defaults: + run: + shell: bash -l {0} + +jobs: + build-and-test: + name: Build Env, Install, Unit Tests + runs-on: ubuntu-latest + permissions: + # For publishing results + checks: write + + # Run this test for different Python versions + strategy: + # Do not abort other tests if only a single one fails + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11"] + + steps: + - + name: Checkout Repo + uses: actions/checkout@v3 + - + # Store the current date to use it as cache key for the environment + name: Get current date + id: date + run: echo "date=$(date +%Y-%m-%d)" >> "${GITHUB_OUTPUT}" + - + name: Create Environment with Mamba + uses: mamba-org/setup-micromamba@v1 + with: + environment-name: climada_env_${{ matrix.python-version }} + environment-file: requirements/env_climada.yml + create-args: >- + python=${{ matrix.python-version }} + make + init-shell: >- + bash + # Persist environment for branch, Python version, single day + cache-environment-key: env-${{ github.ref }}-${{ matrix.python-version }}-${{ steps.date.outputs.date }} + - + name: Install CLIMADA + run: | + python -m pip install ".[test]" + - + name: Run Unit Tests + run: | + make unit_test + - + name: Publish Test Results + uses: EnricoMi/publish-unit-test-result-action@v2 + if: always() + with: + junit_files: tests_xml/tests.xml + check_name: "Unit Test Results Python ${{ matrix.python-version }}" + comment_mode: "off" + - + name: Upload Coverage Reports + if: always() + uses: actions/upload-artifact@v3 + with: + name: coverage-report-unittests-py${{ matrix.python-version }} + path: coverage/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c270940c..9d60ac6bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,8 @@ Code freeze date: YYYY-MM-DD ### Changed +- Rearranged file-system structure: `data` directory moved into `climada` package directory. [#781](https://github.com/CLIMADA-project/climada_python/pull/781) + ### Fixed ### Deprecated diff --git a/Makefile b/Makefile index 7364ab949..57c5a7035 100644 --- a/Makefile +++ b/Makefile @@ -29,11 +29,11 @@ install_test : ## Test installation was successful .PHONY : data_test data_test : ## Test data APIs - python test_data_api.py + python script/jenkins/test_data_api.py .PHONY : notebook_test notebook_test : ## Test notebooks in doc/tutorial - python test_notebooks.py + python script/jenkins/test_notebooks.py report .PHONY : integ_test integ_test : ## Integration tests execution with xml reports diff --git a/README.md b/README.md index 908af1486..56f37a948 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,8 @@ CLIMADA stands for **CLIM**ate **ADA**ptation and is a probabilistic natural catastrophe impact model, that also calculates averted damage (benefit) thanks to adaptation measures of any kind (from grey to green infrastructure, behavioural, etc.). -As of today, CLIMADA provides global coverage of major climate-related extreme-weather hazards at high resolution via a [data API](https://climada.ethz.ch/data-api/v1/docs), namely (i) tropical cyclones, (ii) river flood, (iii) agro drought and (iv) European winter storms, all at 4km spatial resolution - wildfire to be added soon. For all hazards, historic and probabilistic event sets exist, for some also under select climate forcing scenarios (RCPs) at distinct time horizons (e.g. 2040). See also [papers](https://github.com/CLIMADA-project/climada_papers) for details. +As of today, CLIMADA provides global coverage of major climate-related extreme-weather hazards at high resolution (4x4km) via a [data API](https://climada.ethz.ch/data-api/v1/docs) For select hazards, historic and probabilistic events sets, for past, present and future climate exist at distinct time horizons. +You will find a repository containing scientific peer-reviewed articles that explain software components implemented in CLIMADA [here](https://github.com/CLIMADA-project/climada_papers). CLIMADA is divided into two parts (two repositories): @@ -15,30 +16,32 @@ CLIMADA is divided into two parts (two repositories): It is recommend for new users to begin with the core (1) and the [tutorials](https://github.com/CLIMADA-project/climada_python/tree/main/doc/tutorial) therein. -This is the Python (3.8+) version of CLIMADA - please see https://github.com/davidnbresch/climada for backward compatibility (MATLAB). +This is the Python (3.9+) version of CLIMADA - please see [here](https://github.com/davidnbresch/climada) for backward compatibility with the MATLAB version. ## Getting started CLIMADA runs on Windows, macOS and Linux. The released versions of the CLIMADA core can be installed directly through Anaconda: + ```shell conda install -c conda-forge climada ``` + It is **highly recommended** to install CLIMADA into a **separate** Anaconda environment. See the [installation guide](https://climada-python.readthedocs.io/en/latest/guide/install.html) for further information. -Follow the [tutorial](https://climada-python.readthedocs.io/en/latest/tutorial/1_main_climada.html) `climada_python-x.y.z/doc/tutorial/1_main_climada.ipynb` in a Jupyter Notebook to see what can be done with CLIMADA and how. +Follow the [tutorials](https://climada-python.readthedocs.io/en/stable/tutorial/1_main_climada.html) in a Jupyter Notebook to see what can be done with CLIMADA and how. ## Documentation -Documentation is available on Read the Docs: +The online documentation is available on [Read the Docs](https://climada-python.readthedocs.io/en/stable/).The documentation of each release version of CLIMADA can be accessed separately through the drop-down menu at the bottom of the left sidebar. Additionally, the version 'stable' refers to the most recent release (installed via `conda`), and 'latest' refers to the latest unstable development version (the `develop` branch). -Note that all the documentations has two versions,'latest' and 'stable', and explicit version numbers, such as 'v3.1.1', in the url path. 'latest' is created from the 'develop' branch and has the latest changes by developers, 'stable' from the latest release. For more details about documentation versions, please have a look at [here](https://readthedocs.org/projects/climada-python/versions/). CLIMADA python: * [online (recommended)](https://climada-python.readthedocs.io/en/latest/) * [PDF file](https://climada-python.readthedocs.io/_/downloads/en/stable/pdf/) +* [core Tutorials on GitHub](https://github.com/CLIMADA-project/climada_python/tree/main/doc/tutorial) CLIMADA petals: @@ -50,23 +53,12 @@ The documentation can also be [built locally](https://climada-python.readthedocs ## Citing CLIMADA -If you use CLIMADA please cite (in general, in particular for academic work) : - -The [used version](https://zenodo.org/search?page=1&size=20&q=climada) - -and/or the following published articles: +See the [Citation Guide](https://climada-python.readthedocs.io/en/latest/misc/citation.html). -Aznar-Siguan, G. and Bresch, D. N., 2019: CLIMADA v1: a global weather and climate risk assessment platform, Geosci. Model Dev., 12, 3085–3097, https://doi.org/10.5194/gmd-12-3085-2019 +Please use the following logo if you are presenting results obtained with or through CLIMADA: -Bresch, D. N. and Aznar-Siguan, G., 2021: CLIMADA v1.4.1: towards a globally consistent adaptation options appraisal tool, Geosci. Model Dev., 14, 351-363, https://doi.org/10.5194/gmd-14-351-2021 - -Please see all CLIMADA-related scientific publications in our [repository of scientific publications](https://github.com/CLIMADA-project/climada_papers) and cite according to your use of select features, be it hazard set(s), exposure(s) ... - -In presentations or other graphical material, as well as in reports etc., where applicable, please add the logo as follows:\ ![https://github.com/CLIMADA-project/climada_python/blob/main/doc/guide/img/CLIMADA_logo_QR.png](https://github.com/CLIMADA-project/climada_python/blob/main/doc/guide/img/CLIMADA_logo_QR.png?raw=true) -As key link, please use https://wcr.ethz.ch/research/climada.html, as it will last and provides a bit of an intro, especially for those not familiar with GitHub - plus a nice CLIMADA infographic towards the bottom of the page - ## Contributing See the [Contribution Guide](CONTRIBUTING.md). diff --git a/climada/__init__.py b/climada/__init__.py index f3f7245dc..8fc4b8764 100755 --- a/climada/__init__.py +++ b/climada/__init__.py @@ -28,7 +28,7 @@ GSDP_DIR = SYSTEM_DIR.joinpath('GSDP') REPO_DATA = { - 'data/system': [ + 'climada/data/system': [ ISIMIP_GPWV3_NATID_150AS, GLB_CENTROIDS_MAT, ENT_TEMPLATE_XLS, @@ -44,12 +44,12 @@ SYSTEM_DIR.joinpath('tc_impf_cal_v01_EDR.csv'), SYSTEM_DIR.joinpath('tc_impf_cal_v01_RMSF.csv'), ], - 'data/system/GSDP': [ + 'climada/data/system/GSDP': [ GSDP_DIR.joinpath(f'{cc}_GSDP.xls') for cc in ['AUS', 'BRA', 'CAN', 'CHE', 'CHN', 'DEU', 'FRA', 'IDN', 'IND', 'JPN', 'MEX', 'TUR', 'USA', 'ZAF'] ], - 'data/demo': [ + 'climada/data/demo': [ ENT_DEMO_TODAY, ENT_DEMO_FUTURE, EXP_DEMO_H5, diff --git a/data/demo/SC22000_VE__M1.grd.gz b/climada/data/demo/SC22000_VE__M1.grd.gz old mode 100755 new mode 100644 similarity index 100% rename from data/demo/SC22000_VE__M1.grd.gz rename to climada/data/demo/SC22000_VE__M1.grd.gz diff --git a/data/demo/atl_prob_nonames.mat b/climada/data/demo/atl_prob_nonames.mat similarity index 100% rename from data/demo/atl_prob_nonames.mat rename to climada/data/demo/atl_prob_nonames.mat diff --git a/data/demo/demo_emdat_impact_data_2020.csv b/climada/data/demo/demo_emdat_impact_data_2020.csv similarity index 100% rename from data/demo/demo_emdat_impact_data_2020.csv rename to climada/data/demo/demo_emdat_impact_data_2020.csv diff --git a/data/demo/demo_future_TEST.xlsx b/climada/data/demo/demo_future_TEST.xlsx similarity index 100% rename from data/demo/demo_future_TEST.xlsx rename to climada/data/demo/demo_future_TEST.xlsx diff --git a/data/demo/demo_today.xlsx b/climada/data/demo/demo_today.xlsx similarity index 100% rename from data/demo/demo_today.xlsx rename to climada/data/demo/demo_today.xlsx diff --git a/data/demo/earth_engine/dresden.tif b/climada/data/demo/earth_engine/dresden.tif old mode 100755 new mode 100644 similarity index 100% rename from data/demo/earth_engine/dresden.tif rename to climada/data/demo/earth_engine/dresden.tif diff --git a/data/demo/earth_engine/population-density_median.tif b/climada/data/demo/earth_engine/population-density_median.tif old mode 100755 new mode 100644 similarity index 100% rename from data/demo/earth_engine/population-density_median.tif rename to climada/data/demo/earth_engine/population-density_median.tif diff --git a/data/demo/exp_demo_today.h5 b/climada/data/demo/exp_demo_today.h5 similarity index 100% rename from data/demo/exp_demo_today.h5 rename to climada/data/demo/exp_demo_today.h5 diff --git a/data/demo/fp_lothar_crop-test.nc b/climada/data/demo/fp_lothar_crop-test.nc similarity index 100% rename from data/demo/fp_lothar_crop-test.nc rename to climada/data/demo/fp_lothar_crop-test.nc diff --git a/data/demo/fp_xynthia_crop-test.nc b/climada/data/demo/fp_xynthia_crop-test.nc similarity index 100% rename from data/demo/fp_xynthia_crop-test.nc rename to climada/data/demo/fp_xynthia_crop-test.nc diff --git a/data/demo/ibtracs_global_intp-None_1992230N11325.csv b/climada/data/demo/ibtracs_global_intp-None_1992230N11325.csv similarity index 100% rename from data/demo/ibtracs_global_intp-None_1992230N11325.csv rename to climada/data/demo/ibtracs_global_intp-None_1992230N11325.csv diff --git a/data/demo/nl_rails.gpkg b/climada/data/demo/nl_rails.gpkg similarity index 100% rename from data/demo/nl_rails.gpkg rename to climada/data/demo/nl_rails.gpkg diff --git a/data/demo/tc_fl_1990_2004.h5 b/climada/data/demo/tc_fl_1990_2004.h5 similarity index 100% rename from data/demo/tc_fl_1990_2004.h5 rename to climada/data/demo/tc_fl_1990_2004.h5 diff --git a/data/system/FAOSTAT_data_country_codes.csv b/climada/data/system/FAOSTAT_data_country_codes.csv similarity index 100% rename from data/system/FAOSTAT_data_country_codes.csv rename to climada/data/system/FAOSTAT_data_country_codes.csv diff --git a/data/system/GDP_TWN_IMF_WEO_data.csv b/climada/data/system/GDP_TWN_IMF_WEO_data.csv similarity index 100% rename from data/system/GDP_TWN_IMF_WEO_data.csv rename to climada/data/system/GDP_TWN_IMF_WEO_data.csv diff --git a/data/system/GLB_NatID_grid_0360as_adv_2.mat b/climada/data/system/GLB_NatID_grid_0360as_adv_2.mat similarity index 100% rename from data/system/GLB_NatID_grid_0360as_adv_2.mat rename to climada/data/system/GLB_NatID_grid_0360as_adv_2.mat diff --git a/data/system/GSDP/AUS_GSDP.xls b/climada/data/system/GSDP/AUS_GSDP.xls similarity index 100% rename from data/system/GSDP/AUS_GSDP.xls rename to climada/data/system/GSDP/AUS_GSDP.xls diff --git a/data/system/GSDP/BRA_GSDP.xls b/climada/data/system/GSDP/BRA_GSDP.xls similarity index 100% rename from data/system/GSDP/BRA_GSDP.xls rename to climada/data/system/GSDP/BRA_GSDP.xls diff --git a/data/system/GSDP/CAN_GSDP.xls b/climada/data/system/GSDP/CAN_GSDP.xls similarity index 100% rename from data/system/GSDP/CAN_GSDP.xls rename to climada/data/system/GSDP/CAN_GSDP.xls diff --git a/data/system/GSDP/CHE_GSDP.xls b/climada/data/system/GSDP/CHE_GSDP.xls similarity index 100% rename from data/system/GSDP/CHE_GSDP.xls rename to climada/data/system/GSDP/CHE_GSDP.xls diff --git a/data/system/GSDP/CHN_GSDP.xls b/climada/data/system/GSDP/CHN_GSDP.xls similarity index 100% rename from data/system/GSDP/CHN_GSDP.xls rename to climada/data/system/GSDP/CHN_GSDP.xls diff --git a/data/system/GSDP/DEU_GSDP.xls b/climada/data/system/GSDP/DEU_GSDP.xls similarity index 100% rename from data/system/GSDP/DEU_GSDP.xls rename to climada/data/system/GSDP/DEU_GSDP.xls diff --git a/data/system/GSDP/FRA_GSDP.xls b/climada/data/system/GSDP/FRA_GSDP.xls similarity index 100% rename from data/system/GSDP/FRA_GSDP.xls rename to climada/data/system/GSDP/FRA_GSDP.xls diff --git a/data/system/GSDP/IDN_GSDP.xls b/climada/data/system/GSDP/IDN_GSDP.xls similarity index 100% rename from data/system/GSDP/IDN_GSDP.xls rename to climada/data/system/GSDP/IDN_GSDP.xls diff --git a/data/system/GSDP/IND_GSDP.xls b/climada/data/system/GSDP/IND_GSDP.xls similarity index 100% rename from data/system/GSDP/IND_GSDP.xls rename to climada/data/system/GSDP/IND_GSDP.xls diff --git a/data/system/GSDP/JPN_GSDP.xls b/climada/data/system/GSDP/JPN_GSDP.xls similarity index 100% rename from data/system/GSDP/JPN_GSDP.xls rename to climada/data/system/GSDP/JPN_GSDP.xls diff --git a/data/system/GSDP/MEX_GSDP.xls b/climada/data/system/GSDP/MEX_GSDP.xls similarity index 100% rename from data/system/GSDP/MEX_GSDP.xls rename to climada/data/system/GSDP/MEX_GSDP.xls diff --git a/data/system/GSDP/TUR_GSDP.xls b/climada/data/system/GSDP/TUR_GSDP.xls similarity index 100% rename from data/system/GSDP/TUR_GSDP.xls rename to climada/data/system/GSDP/TUR_GSDP.xls diff --git a/data/system/GSDP/USA_GSDP.xls b/climada/data/system/GSDP/USA_GSDP.xls similarity index 100% rename from data/system/GSDP/USA_GSDP.xls rename to climada/data/system/GSDP/USA_GSDP.xls diff --git a/data/system/GSDP/ZAF_GSDP.xls b/climada/data/system/GSDP/ZAF_GSDP.xls similarity index 100% rename from data/system/GSDP/ZAF_GSDP.xls rename to climada/data/system/GSDP/ZAF_GSDP.xls diff --git a/data/system/NatEarth_Centroids_150as.hdf5 b/climada/data/system/NatEarth_Centroids_150as.hdf5 similarity index 100% rename from data/system/NatEarth_Centroids_150as.hdf5 rename to climada/data/system/NatEarth_Centroids_150as.hdf5 diff --git a/data/system/NatEarth_Centroids_360as.hdf5 b/climada/data/system/NatEarth_Centroids_360as.hdf5 similarity index 100% rename from data/system/NatEarth_Centroids_360as.hdf5 rename to climada/data/system/NatEarth_Centroids_360as.hdf5 diff --git a/data/system/NatID_grid_0150as.nc b/climada/data/system/NatID_grid_0150as.nc old mode 100755 new mode 100644 similarity index 100% rename from data/system/NatID_grid_0150as.nc rename to climada/data/system/NatID_grid_0150as.nc diff --git a/data/system/NatRegIDs.csv b/climada/data/system/NatRegIDs.csv similarity index 100% rename from data/system/NatRegIDs.csv rename to climada/data/system/NatRegIDs.csv diff --git a/data/system/WEALTH2GDP_factors_CRI_2016.csv b/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv similarity index 100% rename from data/system/WEALTH2GDP_factors_CRI_2016.csv rename to climada/data/system/WEALTH2GDP_factors_CRI_2016.csv diff --git a/data/system/entity_template.xlsx b/climada/data/system/entity_template.xlsx similarity index 100% rename from data/system/entity_template.xlsx rename to climada/data/system/entity_template.xlsx diff --git a/data/system/hazard_template.xlsx b/climada/data/system/hazard_template.xlsx similarity index 100% rename from data/system/hazard_template.xlsx rename to climada/data/system/hazard_template.xlsx diff --git a/data/system/rcp_db.xls b/climada/data/system/rcp_db.xls similarity index 100% rename from data/system/rcp_db.xls rename to climada/data/system/rcp_db.xls diff --git a/data/system/tc_impf_cal_v01_EDR.csv b/climada/data/system/tc_impf_cal_v01_EDR.csv similarity index 100% rename from data/system/tc_impf_cal_v01_EDR.csv rename to climada/data/system/tc_impf_cal_v01_EDR.csv diff --git a/data/system/tc_impf_cal_v01_RMSF.csv b/climada/data/system/tc_impf_cal_v01_RMSF.csv similarity index 100% rename from data/system/tc_impf_cal_v01_RMSF.csv rename to climada/data/system/tc_impf_cal_v01_RMSF.csv diff --git a/data/system/tc_impf_cal_v01_TDR1.0.csv b/climada/data/system/tc_impf_cal_v01_TDR1.0.csv similarity index 100% rename from data/system/tc_impf_cal_v01_TDR1.0.csv rename to climada/data/system/tc_impf_cal_v01_TDR1.0.csv diff --git a/data/system/tc_impf_cal_v01_TDR1.5.csv b/climada/data/system/tc_impf_cal_v01_TDR1.5.csv similarity index 100% rename from data/system/tc_impf_cal_v01_TDR1.5.csv rename to climada/data/system/tc_impf_cal_v01_TDR1.5.csv diff --git a/climada/engine/impact.py b/climada/engine/impact.py index c1cbb8fc2..68033641f 100644 --- a/climada/engine/impact.py +++ b/climada/engine/impact.py @@ -21,7 +21,7 @@ __all__ = ['ImpactFreqCurve', 'Impact'] -from dataclasses import dataclass +from dataclasses import dataclass, field import logging import copy import csv @@ -1785,10 +1785,10 @@ class ImpactFreqCurve(): """Impact exceedence frequency curve. """ - return_per : np.array = np.array([]) + return_per : np.ndarray = field(default_factory=lambda: np.empty(0)) """return period""" - impact : np.array = np.array([]) + impact : np.ndarray = field(default_factory=lambda: np.empty(0)) """impact exceeding frequency""" unit : str = '' diff --git a/climada/engine/impact_data.py b/climada/engine/impact_data.py index f8cde2838..805773220 100644 --- a/climada/engine/impact_data.py +++ b/climada/engine/impact_data.py @@ -802,30 +802,46 @@ def emdat_impact_yearlysum(emdat_file_csv, countries=None, hazard=None, year_ran df_data[imp_str + " scaled"] = scale_impact2refyear(df_data[imp_str].values, df_data.Year.values, df_data.ISO.values, reference_year=reference_year) - out = pd.DataFrame(columns=['ISO', 'region_id', 'year', 'impact', - 'impact_scaled', 'reference_year']) - for country in df_data.ISO.unique(): - country = u_coord.country_to_iso(country, "alpha3") - if not df_data.loc[df_data.ISO == country].size: - continue - all_years = np.arange(min(df_data.Year), max(df_data.Year) + 1) - data_out = pd.DataFrame(index=np.arange(0, len(all_years)), - columns=out.columns) - df_country = df_data.loc[df_data.ISO == country] - for cnt, year in enumerate(all_years): - data_out.loc[cnt, 'year'] = year - data_out.loc[cnt, 'reference_year'] = reference_year - data_out.loc[cnt, 'ISO'] = country - data_out.loc[cnt, 'region_id'] = u_coord.country_to_iso(country, "numeric") - data_out.loc[cnt, 'impact'] = \ - np.nansum(df_country[df_country.Year.isin([year])][imp_str]) - data_out.loc[cnt, 'impact_scaled'] = \ - np.nansum(df_country[df_country.Year.isin([year])][imp_str + " scaled"]) - if '000 US' in imp_str: # EM-DAT damages provided in '000 USD - data_out.loc[cnt, 'impact'] = data_out.loc[cnt, 'impact'] * 1e3 - data_out.loc[cnt, 'impact_scaled'] = data_out.loc[cnt, 'impact_scaled'] * 1e3 - out = pd.concat([out, data_out]) - out = out.reset_index(drop=True) + + def country_df(df_data): + for data_iso in df_data.ISO.unique(): + country = u_coord.country_to_iso(data_iso, "alpha3") + + df_country = df_data.loc[df_data.ISO == country] + if not df_country.size: + continue + + # Retrieve impact data for all years + all_years = np.arange(min(df_data.Year), max(df_data.Year) + 1) + data_out = pd.DataFrame.from_records( + [ + ( + year, + np.nansum(df_country[df_country.Year.isin([year])][imp_str]), + np.nansum( + df_country[df_country.Year.isin([year])][ + imp_str + " scaled" + ] + ), + ) + for year in all_years + ], + columns=["year", "impact", "impact_scaled"] + ) + + # Add static data + data_out["reference_year"] = reference_year + data_out["ISO"] = country + data_out["region_id"] = u_coord.country_to_iso(country, "numeric") + + # EMDAT provides damage data in 1000 USD + if "000 US" in imp_str: + data_out["impact"] = data_out["impact"] * 1e3 + data_out["impact_scaled"] = data_out["impact_scaled"] * 1e3 + + yield data_out + + out = pd.concat(list(country_df(df_data)), ignore_index=True) return out diff --git a/climada/engine/test/test_impact_data.py b/climada/engine/test/test_impact_data.py index 65738c501..ccb3d966a 100644 --- a/climada/engine/test/test_impact_data.py +++ b/climada/engine/test/test_impact_data.py @@ -144,7 +144,7 @@ def test_emdat_impact_event_2020(self): self.assertEqual(2000, df['reference_year'].min()) def test_emdat_impact_yearlysum_no_futurewarning(self): - """Ensure that no FutureWarning is issued""" + """Ensure that no FutureWarning about `DataFrame.append` being deprecated is issued""" with warnings.catch_warnings(): # Make sure that FutureWarning will cause an error warnings.simplefilter("error", category=FutureWarning) diff --git a/climada/entity/exposures/test/test_litpop.py b/climada/entity/exposures/test/test_litpop.py index 851c910fb..d8ec001cd 100644 --- a/climada/entity/exposures/test/test_litpop.py +++ b/climada/entity/exposures/test/test_litpop.py @@ -317,7 +317,7 @@ def test_gridpoints_core_calc_offsets_exp_rescale(self): self.assertEqual(result_array.shape, results_check.shape) self.assertAlmostEqual(result_array.sum(), tot) self.assertEqual(result_array[1,2], results_check[1,2]) - np.testing.assert_array_almost_equal_nulp(result_array, results_check) + np.testing.assert_allclose(result_array, results_check) def test_grp_read_pass(self): """test _grp_read() to pass and return either dict with admin1 values or None""" diff --git a/climada/entity/exposures/test/test_nightlight.py b/climada/entity/exposures/test/test_nightlight.py index f7158ac77..f7b83b6a4 100644 --- a/climada/entity/exposures/test/test_nightlight.py +++ b/climada/entity/exposures/test/test_nightlight.py @@ -56,22 +56,6 @@ def test_required_files(self): self.assertRaises(ValueError, nightlight.get_required_nl_files, (-90, 90)) - def test_check_files_exist(self): - """Test check_nightlight_local_file_exists""" - # If invalid directory is supplied it has to fail - try: - nightlight.check_nl_local_file_exists( - np.ones(np.count_nonzero(BM_FILENAMES)), 'Invalid/path')[0] - raise Exception("if the path is not valid, check_nl_local_file_exists should fail") - except ValueError: - pass - files_exist = nightlight.check_nl_local_file_exists( - np.ones(np.count_nonzero(BM_FILENAMES)), SYSTEM_DIR) - self.assertTrue( - files_exist.sum() > 0, - f'{files_exist} {BM_FILENAMES}' - ) - def test_download_nightlight_files(self): """Test check_nightlight_local_file_exists""" # Not the same length of arguments @@ -118,42 +102,6 @@ def test_get_required_nl_files(self): bool = np.array_equal(np.array([0, 0, 0, 0, 0, 0, 1, 0]), req_files) self.assertTrue(bool) - def test_check_nl_local_file_exists(self): - """ Test that an array with the correct number of already existing files - is produced, the LOGGER messages logged and the ValueError raised. """ - - # check logger messages by giving a to short req_file - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='WARNING') as cm: - nightlight.check_nl_local_file_exists(required_files = np.array([0, 0, 1, 1])) - self.assertIn('The parameter \'required_files\' was too short and is ignored', - cm.output[0]) - - # check logger message: not all files are available - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: - nightlight.check_nl_local_file_exists() - self.assertIn('Not all satellite files available. Found ', cm.output[0]) - self.assertIn(f' out of 8 required files in {Path(SYSTEM_DIR)}', cm.output[0]) - - # check logger message: no files found in checkpath - check_path = Path('climada/entity/exposures') - with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='INFO') as cm: - # using a random path where no files are stored - nightlight.check_nl_local_file_exists(check_path=check_path) - self.assertIn(f'No satellite files found locally in {check_path}', - cm.output[0]) - - # test raises with wrong path - check_path = Path('/random/wrong/path') - with self.assertRaises(ValueError) as cm: - nightlight.check_nl_local_file_exists(check_path=check_path) - self.assertEqual(f'The given path does not exist: {check_path}', - str(cm.exception)) - - # test that files_exist is correct - files_exist = nightlight.check_nl_local_file_exists() - self.assertGreaterEqual(int(sum(files_exist)), 3) - self.assertLessEqual(int(sum(files_exist)), 8) - # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestNightLight) diff --git a/climada/entity/impact_funcs/test/test_tc.py b/climada/entity/impact_funcs/test/test_tc.py index c469b12a1..e2db9e609 100644 --- a/climada/entity/impact_funcs/test/test_tc.py +++ b/climada/entity/impact_funcs/test/test_tc.py @@ -39,21 +39,30 @@ def test_default_values_pass(self): self.assertTrue(np.array_equal(imp_fun.intensity, np.arange(0, 121, 5))) self.assertTrue(np.array_equal(imp_fun.paa, np.ones((25,)))) self.assertTrue(np.array_equal(imp_fun.mdd[0:6], np.zeros((6,)))) - self.assertTrue(np.array_equal(imp_fun.mdd[6:10], - np.array([0.0006753419543492556, 0.006790495604105169, - 0.02425254393374475, 0.05758706257339458]))) - self.assertTrue(np.array_equal(imp_fun.mdd[10:15], - np.array([0.10870556455111065, 0.1761433569521351, - 0.2553983618763961, 0.34033822528795565, - 0.4249447743109498]))) - self.assertTrue(np.array_equal(imp_fun.mdd[15:20], - np.array([0.5045777092933046, 0.576424302849412, - 0.6393091739184916, 0.6932203123193963, - 0.7388256596555696]))) - self.assertTrue(np.array_equal(imp_fun.mdd[20:25], - np.array([0.777104531116526, 0.8091124649261859, - 0.8358522190681132, 0.8582150905529946, - 0.8769633232141456]))) + np.testing.assert_allclose( + imp_fun.mdd[6:25], + [ + 0.0006753419543492556, + 0.006790495604105169, + 0.02425254393374475, + 0.05758706257339458, + 0.10870556455111065, + 0.1761433569521351, + 0.2553983618763961, + 0.34033822528795565, + 0.4249447743109498, + 0.5045777092933046, + 0.576424302849412, + 0.6393091739184916, + 0.6932203123193963, + 0.7388256596555696, + 0.777104531116526, + 0.8091124649261859, + 0.8358522190681132, + 0.8582150905529946, + 0.8769633232141456, + ], + ) def test_values_pass(self): """Compute mdr interpolating values.""" diff --git a/climada/test/test_nightlight.py b/climada/test/test_nightlight.py index ce571cef2..caa05820d 100644 --- a/climada/test/test_nightlight.py +++ b/climada/test/test_nightlight.py @@ -254,6 +254,58 @@ def test_untar_noaa_stable_nighlight(self): self.assertIn('found more than one potential intensity file in', cm.output[0]) path_tar.unlink() + def test_check_nl_local_file_exists(self): + """ Test that an array with the correct number of already existing files + is produced, the LOGGER messages logged and the ValueError raised. """ + + # check logger messages by giving a to short req_file + with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='WARNING') as cm: + nightlight.check_nl_local_file_exists(required_files = np.array([0, 0, 1, 1])) + self.assertIn('The parameter \'required_files\' was too short and is ignored', + cm.output[0]) + + # check logger message: not all files are available + with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='DEBUG') as cm: + nightlight.check_nl_local_file_exists() + self.assertIn('Not all satellite files available. Found ', cm.output[0]) + self.assertIn(f' out of 8 required files in {Path(SYSTEM_DIR)}', cm.output[0]) + + # check logger message: no files found in checkpath + check_path = Path('climada/entity/exposures') + with self.assertLogs('climada.entity.exposures.litpop.nightlight', level='INFO') as cm: + # using a random path where no files are stored + nightlight.check_nl_local_file_exists(check_path=check_path) + self.assertIn(f'No satellite files found locally in {check_path}', + cm.output[0]) + + # test raises with wrong path + check_path = Path('/random/wrong/path') + with self.assertRaises(ValueError) as cm: + nightlight.check_nl_local_file_exists(check_path=check_path) + self.assertEqual(f'The given path does not exist: {check_path}', + str(cm.exception)) + + # test that files_exist is correct + files_exist = nightlight.check_nl_local_file_exists() + self.assertGreaterEqual(int(sum(files_exist)), 3) + self.assertLessEqual(int(sum(files_exist)), 8) + + def test_check_files_exist(self): + """Test check_nightlight_local_file_exists""" + # If invalid directory is supplied it has to fail + try: + nightlight.check_nl_local_file_exists( + np.ones(np.count_nonzero(BM_FILENAMES)), 'Invalid/path')[0] + raise Exception("if the path is not valid, check_nl_local_file_exists should fail") + except ValueError: + pass + files_exist = nightlight.check_nl_local_file_exists( + np.ones(np.count_nonzero(BM_FILENAMES)), SYSTEM_DIR) + self.assertTrue( + files_exist.sum() > 0, + f'{files_exist} {BM_FILENAMES}' + ) + # Execute Tests if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestNightlight) diff --git a/doc/guide/Guide_Continuous_Integration_and_Testing.ipynb b/doc/guide/Guide_Continuous_Integration_and_Testing.ipynb index fa6383471..ce1800d50 100644 --- a/doc/guide/Guide_Continuous_Integration_and_Testing.ipynb +++ b/doc/guide/Guide_Continuous_Integration_and_Testing.ipynb @@ -299,7 +299,12 @@ "\n", "- All tests must pass before submitting a pull request.\n", "- Integration tests don't run on feature branches in Jenkins, therefore developers are requested to run them locally.\n", - "- After a pull request was accepted and the changes are merged to the develop branch, integration tests may still fail there and have to be addressed." + "- After a pull request was accepted and the changes are merged to the develop branch, integration tests may still fail there and have to be addressed.\n", + "\n", + "#### GitHub Actions\n", + "\n", + "We adopted test automation via GitHub Actions in an experimental state.\n", + "See [GitHub Actions CI](github-actions.rst) for details." ] }, { diff --git a/doc/guide/Guide_Euler.ipynb b/doc/guide/Guide_Euler.ipynb index db3bff221..ee6ddccd9 100644 --- a/doc/guide/Guide_Euler.ipynb +++ b/doc/guide/Guide_Euler.ipynb @@ -111,7 +111,7 @@ "{\n", " \"local_data\": {\n", " \"system\": \"/cluster/work/climate/USERNAME/climada/data\",\n", - " \"demo\": \"/cluster/project/climate/USERNAME/climada_python/data/demo\",\n", + " \"demo\": \"/cluster/project/climate/USERNAME/climada/data/demo\",\n", " \"save_dir\": \"/cluster/work/climate/USERNAME/climada/results\"\n", " }\n", "}\n", diff --git a/doc/guide/github-actions.rst b/doc/guide/github-actions.rst new file mode 100644 index 000000000..efaddc276 --- /dev/null +++ b/doc/guide/github-actions.rst @@ -0,0 +1,29 @@ +================= +GitHub Actions CI +================= + +CLIMADA has been using a private Jenkins instance for automated testing (Continuous Integration, CI), see :doc:`Guide_Continuous_Integration_and_Testing`. +We recently adopted `GitHub Actions `_ for automated unit testing. +GitHub Actions is a service provided by GitHub, which lets you configure CI/CD pipelines based on YAML configuration files. +GitHub provides servers which ample computational resources to create software environments, install software, test it, and deploy it. +See the `GitHub Actions Overview `_ for a technical introduction, and the `Workflow Syntax `_ for a reference of the pipeline definitions. + +The CI results for each pull request can be inspected in the "Checks" tab. +For GitHub Actions, users can inspect the logs of every step for every job. + +.. note:: + + As of CLIMADA v4.0, the default CI technology remains Jenkins. + GitHub Actions CI is currently considered experimental for CLIMADA development. + +--------------------- +Unit Testing Pipeline +--------------------- + +This pipeline is defined by the ``.github/workflows/ci.yml`` file. +It contains a single job which will create a CLIMADA environment with Mamba for multiple Python versions, install CLIMADA, run the unit tests, and report the test coverage as well as the simplified test results. +The job has a `strategy `_ which runs it for multiple times for different Python versions. +This way, we make sure that CLIMADA is compatible with all currently supported versions of Python. + +The coverage reports in HTML format will be uploaded as job artifacts and can be downloaded as ZIP files. +The test results are simple testing summaries that will appear as individual checks/jobs after the respective job completed. diff --git a/doc/index.rst b/doc/index.rst index d68e16dcf..171ecbba7 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -10,6 +10,8 @@ CLIMADA stands for CLIMate ADAptation and is a probabilistic natural catastrophe CLIMADA is primarily developed and maintained by the `Weather and Climate Risks Group `_ at `ETH Zürich `_. +If you use CLIMADA for your own scientific work, please reference the appropriate publications according to the :doc:`misc/citation`. + This is the documentation of the CLIMADA core module which contains all functionalities necessary for performing climate risk analysis and appraisal of adaptation options. Modules for generating different types of hazards and other specialized applications can be found in the `CLIMADA Petals `_ module. Jump right in: @@ -102,6 +104,7 @@ Jump right in: Performance and Best Practices Coding Conventions Building the Documentation + guide/github-actions .. toctree:: @@ -112,3 +115,4 @@ Jump right in: Changelog List of Authors Contribution Guide + misc/citation diff --git a/doc/misc/README.md b/doc/misc/README.md index 369cecb58..d39bb6700 100644 --- a/doc/misc/README.md +++ b/doc/misc/README.md @@ -2,7 +2,8 @@ CLIMADA stands for **CLIM**ate **ADA**ptation and is a probabilistic natural catastrophe impact model, that also calculates averted damage (benefit) thanks to adaptation measures of any kind (from grey to green infrastructure, behavioural, etc.). -As of today, CLIMADA provides global coverage of major climate-related extreme-weather hazards at high resolution via a [data API](https://climada.ethz.ch/data-api/v1/docs), namely (i) tropical cyclones, (ii) river flood, (iii) agro drought and (iv) European winter storms, all at 4km spatial resolution - wildfire to be added soon. For all hazards, historic and probabilistic event sets exist, for some also under select climate forcing scenarios (RCPs) at distinct time horizons (e.g. 2040). See also [papers](https://github.com/CLIMADA-project/climada_papers) for details. +As of today, CLIMADA provides global coverage of major climate-related extreme-weather hazards at high resolution (4x4km) via a [data API](https://climada.ethz.ch/data-api/v1/docs) For select hazards, historic and probabilistic events sets, for past, present and future climate exist at distinct time horizons. +You will find a repository containing scientific peer-reviewed articles that explain software components implemented in CLIMADA [here](https://github.com/CLIMADA-project/climada_papers). CLIMADA is divided into two parts (two repositories): @@ -11,30 +12,32 @@ CLIMADA is divided into two parts (two repositories): It is recommend for new users to begin with the core (1) and the [tutorials](https://github.com/CLIMADA-project/climada_python/tree/main/doc/tutorial) therein. -This is the Python (3.8+) version of CLIMADA - please see https://github.com/davidnbresch/climada for backward compatibility (MATLAB). +This is the Python (3.9+) version of CLIMADA - please see [here](https://github.com/davidnbresch/climada) for backward compatibility with the MATLAB version. ## Getting started CLIMADA runs on Windows, macOS and Linux. The released versions of the CLIMADA core can be installed directly through Anaconda: + ```shell conda install -c conda-forge climada ``` + It is **highly recommended** to install CLIMADA into a **separate** Anaconda environment. See the [installation guide](https://climada-python.readthedocs.io/en/latest/guide/install.html) for further information. -Follow the [tutorial](https://climada-python.readthedocs.io/en/latest/tutorial/1_main_climada.html) `climada_python-x.y.z/doc/tutorial/1_main_climada.ipynb` in a Jupyter Notebook to see what can be done with CLIMADA and how. +Follow the [tutorials](https://climada-python.readthedocs.io/en/stable/tutorial/1_main_climada.html) in a Jupyter Notebook to see what can be done with CLIMADA and how. ## Documentation -Documentation is available on Read the Docs: +The online documentation is available on [Read the Docs](https://climada-python.readthedocs.io/en/stable/).The documentation of each release version of CLIMADA can be accessed separately through the drop-down menu at the bottom of the left sidebar. Additionally, the version 'stable' refers to the most recent release (installed via `conda`), and 'latest' refers to the latest unstable development version (the `develop` branch). -Note that all the documentations has two versions,'latest' and 'stable', and explicit version numbers, such as 'v3.1.1', in the url path. 'latest' is created from the 'develop' branch and has the latest changes by developers, 'stable' from the latest release. For more details about documentation versions, please have a look at [here](https://readthedocs.org/projects/climada-python/versions/). CLIMADA python: * [online (recommended)](https://climada-python.readthedocs.io/en/latest/) * [PDF file](https://climada-python.readthedocs.io/_/downloads/en/stable/pdf/) +* [core Tutorials on GitHub](https://github.com/CLIMADA-project/climada_python/tree/main/doc/tutorial) CLIMADA petals: @@ -46,23 +49,12 @@ The documentation can also be [built locally](https://climada-python.readthedocs ## Citing CLIMADA -If you use CLIMADA please cite (in general, in particular for academic work) : - -The [used version](https://zenodo.org/search?page=1&size=20&q=climada) - -and/or the following published articles: +See the [Citation Guide](https://climada-python.readthedocs.io/en/latest/misc/citation.html). -Aznar-Siguan, G. and Bresch, D. N., 2019: CLIMADA v1: a global weather and climate risk assessment platform, Geosci. Model Dev., 12, 3085–3097, https://doi.org/10.5194/gmd-12-3085-2019 +Please use the following logo if you are presenting results obtained with or through CLIMADA: -Bresch, D. N. and Aznar-Siguan, G., 2021: CLIMADA v1.4.1: towards a globally consistent adaptation options appraisal tool, Geosci. Model Dev., 14, 351-363, https://doi.org/10.5194/gmd-14-351-2021 - -Please see all CLIMADA-related scientific publications in our [repository of scientific publications](https://github.com/CLIMADA-project/climada_papers) and cite according to your use of select features, be it hazard set(s), exposure(s) ... - -In presentations or other graphical material, as well as in reports etc., where applicable, please add the logo as follows:\ ![https://github.com/CLIMADA-project/climada_python/blob/main/doc/guide/img/CLIMADA_logo_QR.png](https://github.com/CLIMADA-project/climada_python/blob/main/doc/guide/img/CLIMADA_logo_QR.png?raw=true) -As key link, please use https://wcr.ethz.ch/research/climada.html, as it will last and provides a bit of an intro, especially for those not familiar with GitHub - plus a nice CLIMADA infographic towards the bottom of the page - ## Contributing See the [Contribution Guide](CONTRIBUTING.md). diff --git a/doc/misc/citation.rst b/doc/misc/citation.rst new file mode 100644 index 000000000..cfc7d6650 --- /dev/null +++ b/doc/misc/citation.rst @@ -0,0 +1,42 @@ +============== +Citation Guide +============== + +If you use CLIMADA for your work, please cite the appropriate publications. +A list of all CLIMADA code related articles is available on `Zotero `_ and can be downloaded as single Bibtex file: :download:`climada_publications.bib` + + +Publications by Module +---------------------- + +If you use specific tools and modules of CLIMADA, please cite the appropriate publications presenting these modules according to the following table: + +.. list-table:: + :widths: 1 3 + :header-rows: 1 + + * - Module or tool used + - Publication to cite + * - *Any* + - The `Zenodo archive `_ of the CLIMADA version you are using + * - :doc:`Impact calculations ` + - Aznar-Siguan, G. and Bresch, D. N. (2019): CLIMADA v1: A global weather and climate risk assessment platform, Geosci. Model Dev., 12, 3085–3097, https://doi.org/10.5194/gmd-14-351-2021 + * - :doc:`Cost-benefit analysis ` + - Bresch, D. N. and Aznar-Siguan, G. (2021): CLIMADA v1.4.1: Towards a globally consistent adaptation options appraisal tool, Geosci. Model Dev., 14, 351–363, https://doi.org/10.5194/gmd-14-351-2021 + * - :doc:`Uncertainty and sensitivity analysis ` + - Kropf, C. M. et al. (2022): Uncertainty and sensitivity analysis for probabilistic weather and climate-risk modelling: an implementation in CLIMADA v.3.1.0. Geosci. Model Dev. 15, 7177–7201, https://doi.org/10.5194/gmd-15-7177-2022 + * - :doc:`Lines and polygons exposures ` *or* `Open Street Map exposures `_ + - Mühlhofer, E., et al. (2023): OpenStreetMap for Multi-Faceted Climate Risk Assessments https://eartharxiv.org/repository/view/5615/ + * - :doc:`LitPop exposures ` + - Eberenz, S., et al. (2020): Asset exposure data for global physical risk assessment. Earth System Science Data 12, 817–833, https://doi.org/10.3929/ethz-b-000409595 + +Please find the code to reprocduce selected CLIMADA-related scientific publications in our `repository of scientific publications `_. + +Links and Logo +-------------- + +In presentations or other graphical material, as well as in reports etc., where applicable, please add the following logo: :download:`climada_logo_QR.png `: + +.. image:: https://github.com/CLIMADA-project/climada_python/blob/main/doc/guide/img/CLIMADA_logo_QR.png?raw=true + +As key link, please use https://wcr.ethz.ch/research/climada.html, as it provides a brief introduction especially for those not familiar with GitHub. diff --git a/doc/misc/climada_publications.bib b/doc/misc/climada_publications.bib new file mode 100644 index 000000000..f00b9cc16 --- /dev/null +++ b/doc/misc/climada_publications.bib @@ -0,0 +1,73 @@ +@article{Aznar-Siguan2019, + title = {{{CLIMADA}} v1: A Global Weather and Climate Risk Assessment Platform}, + shorttitle = {{{CLIMADA}} V1}, + author = {{Aznar-Siguan}, Gabriela and Bresch, David N.}, + year = {2019}, + journal = {Geoscientific Model Development}, + volume = {12}, + number = {7}, + pages = {3085--3097}, + publisher = {{Copernicus GmbH}}, + issn = {1991-959X}, + doi = {10.5194/gmd-12-3085-2019} +} + +@article{Bresch2021, + title = {{{CLIMADA}} v1.4.1: Towards a Globally Consistent Adaptation Options Appraisal Tool}, + shorttitle = {{{CLIMADA}} v1.4.1}, + author = {Bresch, David N. and {Aznar-Siguan}, Gabriela}, + year = {2021}, + journal = {Geoscientific Model Development}, + volume = {14}, + number = {1}, + pages = {351--363}, + publisher = {{Copernicus GmbH}}, + issn = {1991-959X}, + doi = {10.5194/gmd-14-351-2021} +} + +@article{Eberenz2020, + title = {Asset Exposure Data for Global Physical Risk Assessment}, + author = {Eberenz, Samuel and Stocker, Dario and R{\"o}{\"o}sli, Thomas and Bresch, David N.}, + year = {2020}, + journal = {Earth System Science Data}, + volume = {12}, + number = {2}, + pages = {817--833}, + publisher = {{Copernicus GmbH}}, + issn = {1866-3508}, + doi = {10.5194/essd-12-817-2020} +} + +@article{Kropf2022c, + title = {Uncertainty and Sensitivity Analysis for Probabilistic Weather and Climate-Risk Modelling: An Implementation in {{CLIMADA}} v.3.1.0}, + shorttitle = {Uncertainty and Sensitivity Analysis for Probabilistic Weather and Climate-Risk Modelling}, + author = {Kropf, Chahan M. and Ciullo, Alessio and Otth, Laura and Meiler, Simona and Rana, Arun and Schmid, Emanuel and McCaughey, Jamie W. and Bresch, David N.}, + year = {2022}, + journal = {Geoscientific Model Development}, + volume = {15}, + number = {18}, + pages = {7177--7201}, + publisher = {{Copernicus GmbH}}, + issn = {1991-959X}, + doi = {10.5194/gmd-15-7177-2022} +} + +@article{Muhlhofer2023b, + title = {{{OpenStreetMap}} for {{Multi-Faceted Climate Risk Assessments}}}, + author = {M{\"u}hlhofer, Evelyn and Kropf, Chahan M. and Bresch, David N. and Koks, Elco E.}, + year = {2023}, + publisher = {{EarthArXiv}}, + url = {https://eartharxiv.org/repository/view/5615/} +} + +@article{Muhlhofer2023c, + title = {A Generalized Natural Hazard Risk Modelling Framework for Infrastructure Failure Cascades}, + author = {M{\"u}hlhofer, Evelyn and Koks, Elco E. and Kropf, Chahan M. and Sansavini, Giovanni and Bresch, David N.}, + year = {2023}, + journal = {Reliability Engineering \& System Safety}, + volume = {234}, + pages = {109194}, + issn = {0951-8320}, + doi = {10.1016/j.ress.2023.109194} +} diff --git a/doc/tutorial/1_main_climada.ipynb b/doc/tutorial/1_main_climada.ipynb index c8a8b269d..8c78b96fa 100644 --- a/doc/tutorial/1_main_climada.ipynb +++ b/doc/tutorial/1_main_climada.ipynb @@ -480,7 +480,7 @@ "\n", "The entity class is a container class that stores exposures and impact functions (vulnerability curves) needed for a risk calculation, and the discount rates and adaptation measures for an adaptation cost-benefit analysis.\n", "\n", - "As with Hazard objects, Entities can be read from files or created through code. The Excel template can be found in `climada_python/data/system/entity_template.xlsx`.\n", + "As with Hazard objects, Entities can be read from files or created through code. The Excel template can be found in `climada_python/climada/data/system/entity_template.xlsx`.\n", "\n", "In this tutorial we will create an Exposure object using the LitPop economic exposure module, and load a pre-defined wind damage function." ] diff --git a/doc/tutorial/climada_entity_DiscRates.ipynb b/doc/tutorial/climada_entity_DiscRates.ipynb index c88cdc18f..3d33797d1 100644 --- a/doc/tutorial/climada_entity_DiscRates.ipynb +++ b/doc/tutorial/climada_entity_DiscRates.ipynb @@ -87,7 +87,7 @@ "source": [ "## Read discount rates of an Excel file\n", "\n", - "Discount rates defined in an excel file following the template provided in sheet `discount` of `climada_python/data/system/entity_template.xlsx` can be ingested directly using the method `from_excel()`." + "Discount rates defined in an excel file following the template provided in sheet `discount` of `climada_python/climada/data/system/entity_template.xlsx` can be ingested directly using the method `from_excel()`." ] }, { diff --git a/doc/tutorial/climada_entity_Exposures.ipynb b/doc/tutorial/climada_entity_Exposures.ipynb index c2c3990dc..ee6dac22f 100644 --- a/doc/tutorial/climada_entity_Exposures.ipynb +++ b/doc/tutorial/climada_entity_Exposures.ipynb @@ -883,7 +883,7 @@ "### Exposures from an excel file\n", "\n", "If you manually collect exposure data, Excel may be your preferred option. \n", - "In this case, it is easiest if you format your data according to the structure provided in the template `climada_python/data/system/entity_template.xlsx`, in the sheet `assets`." + "In this case, it is easiest if you format your data according to the structure provided in the template `climada_python/climada/data/system/entity_template.xlsx`, in the sheet `assets`." ] }, { diff --git a/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb b/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb index e4c004d9f..4adad4439 100644 --- a/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb +++ b/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb @@ -509,7 +509,7 @@ ], "source": [ "# define hazard\n", - "storms = StormEurope.from_footprints(WS_DEMO_NC, description='test_description')\n", + "storms = StormEurope.from_footprints(WS_DEMO_NC)\n", "# define impact function\n", "impf = ImpfStormEurope.from_welker()\n", "impf_set = ImpactFuncSet([impf])" diff --git a/doc/tutorial/climada_entity_ImpactFuncSet.ipynb b/doc/tutorial/climada_entity_ImpactFuncSet.ipynb index 05989b913..b59aead9f 100644 --- a/doc/tutorial/climada_entity_ImpactFuncSet.ipynb +++ b/doc/tutorial/climada_entity_ImpactFuncSet.ipynb @@ -442,7 +442,7 @@ "source": [ "#### Reading impact functions from an Excel file\n", "\n", - "Impact functions defined in an excel file following the template provided in sheet `impact_functions` of `climada_python/data/system/entity_template.xlsx` can be ingested directly using the method `from_excel()`." + "Impact functions defined in an excel file following the template provided in sheet `impact_functions` of `climada_python/climada/data/system/entity_template.xlsx` can be ingested directly using the method `from_excel()`." ] }, { diff --git a/doc/tutorial/climada_entity_MeasureSet.ipynb b/doc/tutorial/climada_entity_MeasureSet.ipynb index 5170f02c4..aae3aaeac 100644 --- a/doc/tutorial/climada_entity_MeasureSet.ipynb +++ b/doc/tutorial/climada_entity_MeasureSet.ipynb @@ -21,7 +21,7 @@ " * name (str): name of the action\n", " * haz_type (str): related hazard type (peril), e.g. TC\n", " * color_rgb (np.array): integer array of size 3. Gives color code of this measure in RGB\n", - " * cost (float): discounted cost (in same units as assets). Needs to be provided by the user. See the example provided in `climada_python/data/system/entity_template.xlsx` sheets `_measures_details` and `_discounting_sheet` to see how the discounting is done.\n", + " * cost (float): discounted cost (in same units as assets). Needs to be provided by the user. See the example provided in `climada_python/climada/data/system/entity_template.xlsx` sheets `_measures_details` and `_discounting_sheet` to see how the discounting is done.\n", " \n", "Related to a measure's impact:\n", " * hazard_set (str): file name of hazard to use\n", diff --git a/doc/tutorial/climada_hazard_Hazard.ipynb b/doc/tutorial/climada_hazard_Hazard.ipynb index 9c250c4dd..2375b5d97 100644 --- a/doc/tutorial/climada_hazard_Hazard.ipynb +++ b/doc/tutorial/climada_hazard_Hazard.ipynb @@ -224,7 +224,7 @@ " \n", "## Part 2: Read hazards from other data\n", "\n", - "- excel: Hazards can be read from Excel files following the template in `climada_python/data/system/hazard_template.xlsx` using the `from_excel()` method. \n", + "- excel: Hazards can be read from Excel files following the template in `climada_python/climada/data/system/hazard_template.xlsx` using the `from_excel()` method. \n", "- MATLAB: Hazards generated with CLIMADA's MATLAB version (.mat format) can be read using `from_mat()`.\n", "- vector data: Use `Hazard`'s `from_vector`-constructor to read shape data (all formats supported by [fiona](https://fiona.readthedocs.io/en/latest/manual.html)).\n", "- hdf5: Hazards generated with the CLIMADA in Python (.h5 format) can be read using `from_hdf5()`." diff --git a/doc/tutorial/climada_hazard_TropCyclone.ipynb b/doc/tutorial/climada_hazard_TropCyclone.ipynb index e6c18d0e9..f7940b866 100644 --- a/doc/tutorial/climada_hazard_TropCyclone.ipynb +++ b/doc/tutorial/climada_hazard_TropCyclone.ipynb @@ -99,9 +99,9 @@ " \n", "## a) Load TC tracks from historical records\n", "\n", - "The best-track historical data from the International Best Track Archive for Climate Stewardship ([IBTrACS](https://www.ncdc.noaa.gov/ibtracs/)) can easily be loaded into CLIMADA to study the historical records of TC events. The constructor `from_ibtracs_netcdf()` generates the `Datasets` for tracks selected by [IBTrACS](https://www.ncdc.noaa.gov/ibtracs/) id, or by basin and year range. To achieve this, it downloads the first time the [IBTrACS data v4 in netcdf format](https://www.ncei.noaa.gov/data/international-best-track-archive-for-climate-stewardship-ibtracs/v04r00/access/netcdf/) and stores it in `climada_python/data/system`. The tracks can be accessed later either using the attribute `data` or using `get_track()`, which allows to select tracks by its name or id. Use the method `append()` to extend the `data` list.\n", + "The best-track historical data from the International Best Track Archive for Climate Stewardship ([IBTrACS](https://www.ncdc.noaa.gov/ibtracs/)) can easily be loaded into CLIMADA to study the historical records of TC events. The constructor `from_ibtracs_netcdf()` generates the `Datasets` for tracks selected by [IBTrACS](https://www.ncdc.noaa.gov/ibtracs/) id, or by basin and year range. To achieve this, it downloads the first time the [IBTrACS data v4 in netcdf format](https://www.ncei.noaa.gov/data/international-best-track-archive-for-climate-stewardship-ibtracs/v04r00/access/netcdf/) and stores it in `~/climada/data/`. The tracks can be accessed later either using the attribute `data` or using `get_track()`, which allows to select tracks by its name or id. Use the method `append()` to extend the `data` list.\n", "\n", - "If you get an error downloading the IBTrACS data, try to manually access [https://www.ncei.noaa.gov/data/international-best-track-archive-for-climate-stewardship-ibtracs/v04r00/access/netcdf/](https://www.ncei.noaa.gov/data/international-best-track-archive-for-climate-stewardship-ibtracs/v04r00/access/netcdf/), click on the file `IBTrACS.ALL.v04r00.nc` and copy it to `climada_python/data/system`.\n", + "If you get an error downloading the IBTrACS data, try to manually access [https://www.ncei.noaa.gov/data/international-best-track-archive-for-climate-stewardship-ibtracs/v04r00/access/netcdf/](https://www.ncei.noaa.gov/data/international-best-track-archive-for-climate-stewardship-ibtracs/v04r00/access/netcdf/), click on the file `IBTrACS.ALL.v04r00.nc` and copy it to `~/climada/data/`.\n", "\n", "To visualize the tracks use `plot()`.\n" ] diff --git a/requirements/env_climada.yml b/requirements/env_climada.yml index ec1f11ce4..6f1726f4f 100644 --- a/requirements/env_climada.yml +++ b/requirements/env_climada.yml @@ -17,7 +17,7 @@ dependencies: - netcdf4>=1.6 - numba>=0.57 - openpyxl>=3.1 - - pandas>=1.5,<2.0 # 2.0 removed append and iteritems from DataFrame + - pandas>=1.5 - pandas-datareader>=0.10 - pathos>=0.3 - pint>=0.22 @@ -26,7 +26,7 @@ dependencies: - pycountry>=22.3 - pyepsg>=0.4 - pytables>=3.7 - - python=3.9 + - python>=3.9,<3.12 - pyxlsb>=1.0 - rasterio>=1.3 - requests>=2.31 diff --git a/test_data_api.py b/script/jenkins/test_data_api.py similarity index 96% rename from test_data_api.py rename to script/jenkins/test_data_api.py index ed047a739..42e910374 100644 --- a/test_data_api.py +++ b/script/jenkins/test_data_api.py @@ -122,4 +122,6 @@ def test_icon_centroids_download(self): # Execute Tests if __name__ == '__main__': TESTS = unittest.TestLoader().loadTestsFromTestCase(TestDataAvail) - xmlrunner.XMLTestRunner(output=str(Path(__file__).parent.joinpath('tests_xml'))).run(TESTS) + from sys import argv + outputdir = argv[1] if len(argv) > 1 else str(Path.cwd().joinpath('tests_xml')) + xmlrunner.XMLTestRunner(output=outputdir).run(TESTS) diff --git a/test_notebooks.py b/script/jenkins/test_notebooks.py similarity index 83% rename from test_notebooks.py rename to script/jenkins/test_notebooks.py index 1f89fce34..bb0420194 100644 --- a/test_notebooks.py +++ b/script/jenkins/test_notebooks.py @@ -10,8 +10,6 @@ import climada -NOTEBOOK_DIR = Path(__file__).parent.joinpath('doc', 'tutorial') -'''The path to the notebook directories.''' BOUND_TO_FAIL = '# Note: execution of this cell will fail' '''Cells containing this line will not be executed in the test''' @@ -19,6 +17,7 @@ EXCLUDED_FROM_NOTEBOOK_TEST = ['climada_installation_step_by_step.ipynb'] '''These notebooks are excluded from being tested''' + class NotebookTest(unittest.TestCase): '''Generic TestCase for testing the executability of notebooks @@ -93,7 +92,7 @@ def test_notebook(self): and not ln.startswith('ask_ok(') and not ln.startswith('pool') # by convention Pool objects are called pool and not ln.strip().endswith('?') - and not 'Pool(' in ln # prevent Pool object creation + and not re.search(r'(\W|^)Pool\(', ln) # prevent Pool object creation ]) # execute the python code @@ -117,10 +116,17 @@ def test_notebook(self): os.chdir(cwd) -def main(): +def main(install_dir): + import xmlrunner + + sys.path.append(str(install_dir)) + + notebook_dir = install_dir.joinpath('doc', 'tutorial') + '''The path to the notebook directories.''' + # list notebooks in the NOTEBOOK_DIR notebooks = [f.absolute() - for f in sorted(NOTEBOOK_DIR.iterdir()) + for f in sorted(notebook_dir.iterdir()) if os.path.splitext(f)[1] == ('.ipynb') and not f.name in EXCLUDED_FROM_NOTEBOOK_TEST] @@ -132,22 +138,16 @@ class NBTest(NotebookTest): pass setattr(NBTest, test_name, NBTest.test_notebook) suite.addTest(NBTest(test_name, notebook.parent, notebook.name)) - # run the tests depending on the first input argument: None or 'report'. - # write xml reports for 'report' - if sys.argv[1:]: - arg = sys.argv[1] - if arg == 'report': - import xmlrunner - outdirstr = str(Path(__file__).parent.joinpath('tests_xml')) - xmlrunner.XMLTestRunner(output=outdirstr).run(suite) - else: - jd, nb = os.path.split(arg) - unittest.TextTestRunner(verbosity=2).run(NotebookTest('test_notebook', jd, nb)) - # with no argument just run the test - else: - unittest.TextTestRunner(verbosity=2).run(suite) + # run the tests and write xml reports to tests_xml + output_dir = install_dir.joinpath('tests_xml') + xmlrunner.XMLTestRunner(output=str(output_dir)).run(suite) if __name__ == '__main__': - sys.path.append(str(Path.cwd())) - main() + if sys.argv[1] == 'report': + install_dir = Path(sys.argv[2]) if len(sys.argv) > 2 else Path.cwd() + main(install_dir) + + else: + jd, nb = os.path.split(sys.argv[1]) + unittest.TextTestRunner(verbosity=2).run(NotebookTest('test_notebook', jd, nb)) diff --git a/setup.py b/setup.py index eb2e29637..7cccbdad9 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ """ from pathlib import Path -from setuptools import find_packages, setup +from setuptools import setup, find_namespace_packages here = Path(__file__).parent.absolute() @@ -33,7 +33,7 @@ setup( name='climada', - version='4.0.0-dev', + version='4.0.1-dev', description='CLIMADA in Python', @@ -48,9 +48,6 @@ license='OSI Approved :: GNU Lesser General Public License v3 (GPLv3)', classifiers=[ - # 3 - Alpha - # 4 - Beta - # 5 - Production/Stable 'Development Status :: 4 - Beta', 'Programming Language :: Python :: 3.9', 'Topic :: Scientific/Engineering :: Atmospheric Science', @@ -60,7 +57,7 @@ keywords='climate adaptation', - packages=find_packages() + ['data'], + python_requires=">=3.9,<3.12", install_requires=[ 'bottleneck', @@ -104,5 +101,8 @@ "dev": DEPS_DOC + DEPS_TEST }, - include_package_data=True + packages=find_namespace_packages(include=['climada*']), + + setup_requires=['setuptools_scm'], + include_package_data=True, )