diff --git a/climada/test/common_test_fixtures.py b/climada/test/common_test_fixtures.py
deleted file mode 100644
index a78e34a4bd..0000000000
--- a/climada/test/common_test_fixtures.py
+++ /dev/null
@@ -1,205 +0,0 @@
-"""
-This file is part of CLIMADA.
-
-Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS.
-
-CLIMADA is free software: you can redistribute it and/or modify it under the
-terms of the GNU General Public License as published by the Free
-Software Foundation, version 3.
-
-CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along
-with CLIMADA. If not, see .
----
-
-A set of reusable objects for testing purpose.
-
-The objective of this file is to provide minimalistic, understandable and consistent
-default objects for unit and integration testing.
-
-"""
-
-import geopandas as gpd
-import numpy as np
-from scipy.sparse import csr_matrix
-from shapely.geometry import Point
-
-from climada.entity import Exposures, ImpactFunc, ImpactFuncSet
-from climada.hazard import Centroids, Hazard
-from climada.trajectories.snapshot import Snapshot
-
-# ---------------------------------------------------------------------------
-# Coordinate system and metadata
-# ---------------------------------------------------------------------------
-CRS_WGS84 = "EPSG:4326"
-
-# ---------------------------------------------------------------------------
-# Exposure attributes
-# ---------------------------------------------------------------------------
-EXP_DESC = "Test exposure dataset"
-EXP_DESC_LATLON = "Test exposure dataset (lat/lon)"
-EXPOSURE_REF_YEAR = 2020
-EXPOSURE_VALUE_UNIT = "USD"
-VALUES = np.array([0, 1000, 2000, 3000])
-REGIONS = np.array(["A", "A", "B", "B"])
-CATEGORIES = np.array([1, 1, 2, 1])
-
-# Exposure coordinates
-EXP_LONS = np.array([4, 4.5, 4, 4.5])
-EXP_LATS = np.array([45, 45, 45.5, 45.5])
-
-# ---------------------------------------------------------------------------
-# Hazard definition
-# ---------------------------------------------------------------------------
-HAZARD_TYPE = "TEST_HAZARD_TYPE"
-HAZARD_UNIT = "TEST_HAZARD_UNIT"
-
-# Hazard centroid positions
-HAZ_JITTER = 0.1 # To test centroid matching
-HAZ_LONS = EXP_LONS + HAZ_JITTER
-HAZ_LATS = EXP_LATS + HAZ_JITTER
-
-# Hazard events
-EVENT_IDS = np.array([1, 2, 3, 4])
-EVENT_NAMES = ["ev1", "ev2", "ev3", "ev4"]
-DATES = np.array([1, 2, 3, 4])
-
-# Frequency are choosen so that they cumulate nicely
-# to correspond to 100, 50, and 20y return periods (for impacts)
-FREQUENCY = np.array([0.1, 0.03, 0.01, 0.01])
-FREQUENCY_UNIT = "1/year"
-
-# Hazard maximum intensity
-# 100 to match 0 to 100% idea
-# also in line with linear 1:1 impact function
-# for easy mental calculus
-HAZARD_MAX_INTENSITY = 100
-
-# ---------------------------------------------------------------------------
-# Impact function
-# ---------------------------------------------------------------------------
-IMPF_ID = 1
-IMPF_NAME = "IMPF_1"
-
-# ---------------------------------------------------------------------------
-# Future years
-# ---------------------------------------------------------------------------
-EXPOSURE_FUTURE_YEAR = 2040
-
-
-def reusable_minimal_exposures(
- values=VALUES,
- regions=REGIONS,
- group_id=None,
- lon=EXP_LONS,
- lat=EXP_LATS,
- crs=CRS_WGS84,
- desc=EXP_DESC,
- ref_year=EXPOSURE_REF_YEAR,
- value_unit=EXPOSURE_VALUE_UNIT,
- assign_impf=IMPF_ID,
- increase_value_factor=1,
-) -> Exposures:
- data = gpd.GeoDataFrame(
- {
- "value": values * increase_value_factor,
- "region_id": regions,
- f"impf_{HAZARD_TYPE}": assign_impf,
- "geometry": [Point(lon, lat) for lon, lat in zip(lon, lat)],
- },
- crs=crs,
- )
- if group_id is not None:
- data["group_id"] = group_id
- return Exposures(
- data=data,
- description=desc,
- ref_year=ref_year,
- value_unit=value_unit,
- )
-
-
-def reusable_intensity_mat(max_intensity=HAZARD_MAX_INTENSITY):
- # Choosen such that:
- # - 1st event has 0 intensity
- # - 2nd event has max intensity in first exposure point (defaulting to 0 value)
- # - 3rd event has 1/2* of max intensity in second centroid
- # - 4th event has 1/4* of max intensity everywhere
- # *: So that you can double intensity of the hazard and expect double impacts
- return csr_matrix(
- [
- [0, 0, 0, 0],
- [max_intensity, 0, 0, 0],
- [0, max_intensity / 2, 0, 0],
- [
- max_intensity / 4,
- max_intensity / 4,
- max_intensity / 4,
- max_intensity / 4,
- ],
- ]
- )
-
-
-def reusable_minimal_hazard(
- haz_type=HAZARD_TYPE,
- units=HAZARD_UNIT,
- lat=HAZ_LATS,
- lon=HAZ_LONS,
- crs=CRS_WGS84,
- event_id=EVENT_IDS,
- event_name=EVENT_NAMES,
- date=DATES,
- frequency=FREQUENCY,
- frequency_unit=FREQUENCY_UNIT,
- intensity=None,
- intensity_factor=1,
-) -> Hazard:
- intensity = reusable_intensity_mat() if intensity is None else intensity
- intensity *= intensity_factor
- return Hazard(
- haz_type=haz_type,
- units=units,
- centroids=Centroids(lat=lat, lon=lon, crs=crs),
- event_id=event_id,
- event_name=event_name,
- date=date,
- frequency=frequency,
- frequency_unit=frequency_unit,
- intensity=intensity,
- )
-
-
-def reusable_minimal_impfset(
- hazard=None, name=IMPF_NAME, impf_id=IMPF_ID, max_intensity=HAZARD_MAX_INTENSITY
-):
- hazard = reusable_minimal_hazard() if hazard is None else hazard
- return ImpactFuncSet(
- [
- ImpactFunc(
- haz_type=hazard.haz_type,
- intensity_unit=hazard.units,
- name=name,
- intensity=np.array([0, max_intensity / 2, max_intensity]),
- mdd=np.array([0, 0.5, 1]),
- paa=np.array([1, 1, 1]),
- id=impf_id,
- )
- ]
- )
-
-
-def reusable_snapshot(
- hazard_intensity_increase_factor=1,
- exposure_value_increase_factor=1,
- date=EXPOSURE_REF_YEAR,
-):
- exposures = reusable_minimal_exposures(
- increase_value_factor=exposure_value_increase_factor
- )
- hazard = reusable_minimal_hazard(intensity_factor=hazard_intensity_increase_factor)
- impfset = reusable_minimal_impfset()
- return Snapshot(exposure=exposures, hazard=hazard, impfset=impfset, date=str(date))
diff --git a/climada/test/conftest.py b/climada/test/conftest.py
new file mode 100644
index 0000000000..57181b8217
--- /dev/null
+++ b/climada/test/conftest.py
@@ -0,0 +1,328 @@
+"""
+This file is part of CLIMADA.
+
+Copyright (C) 2017 ETH Zurich, CLIMADA contributors listed in AUTHORS.
+
+CLIMADA is free software: you can redistribute it and/or modify it under the
+terms of the GNU General Public License as published by the Free
+Software Foundation, version 3.
+
+CLIMADA is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along
+with CLIMADA. If not, see .
+---
+
+A set of reusable fixtures for testing purpose.
+
+The objective of this file is to provide minimalistic, understandable and consistent
+default objects for unit and integration testing.
+
+Values are chosen such that:
+ - Exposure value of the first points is 0. (First location should always have 0 impacts)
+ - Category / Group id of all points is 1, except for third point, valued at 2000 (Impacts on that category are always a share of 2000)
+ - Hazard centroids are the exposure centroids shifted by `HAZARD_JITTER` on both lon and lat.
+ - There are 4 events, with frequencies == 0.03, 0.01, 0.006, 0.004, 0,
+ such that impacts for RP250, 100 and 50 and 20 are at_event,
+ (freq sorted cumulate to 1/250, 1/100, 1/50 and 1/20).
+ - Hazard intensity is:
+ * Event 1: zero everywhere (always no impact)
+ * Event 2: max intensity at first centroid (also always no impact (first centroid is 0))
+ * Event 3: half max intensity at second centroid (impact == half second centroid)
+ * Event 4: quarter max intensity everywhere (impact == 1/4 total value)
+ * Event 5: max intensity everywhere (but zero frequency)
+ With max intensity set at 100
+ - Impact function is the "identity function", x intensity is x% damages
+ - Impact values should be:
+ * AAI = 18 = 1000*1/2*0.006+(1000+2000+3000+4000+5000)*0.25*0.004
+ * RP20 = event1 = 0
+ * RP50 = event2 = 0
+ * RP100 = event3 = 500 = 1000*1/2
+ * RP250 = event4 = 3750 = (1000+2000+3000+4000+5000)*0.25
+
+"""
+
+import geopandas as gpd
+import numpy as np
+import pytest
+from scipy.sparse import csr_matrix
+from shapely.geometry import Point
+
+from climada.entity import Exposures, ImpactFunc, ImpactFuncSet
+from climada.hazard import Centroids, Hazard
+
+# ---------------------------------------------------------------------------
+# Coordinate system and metadata
+# ---------------------------------------------------------------------------
+CRS_WGS84 = "EPSG:4326"
+
+# ---------------------------------------------------------------------------
+# Exposure attributes
+# ---------------------------------------------------------------------------
+EXP_DESC = "Test exposure dataset"
+EXPOSURE_REF_YEAR = 2020
+EXPOSURE_VALUE_UNIT = "USD"
+VALUES = np.array([0, 1000, 2000, 3000, 4000, 5000])
+CATEGORIES = np.array([1, 1, 2, 1, 1, 3])
+
+# Exposure coordinates
+EXP_LONS = np.array([4, 4.25, 4.5, 4, 4.25, 4.5])
+EXP_LATS = np.array([45, 45, 45, 45.25, 45.25, 45.25])
+
+# ---------------------------------------------------------------------------
+# Hazard definition
+# ---------------------------------------------------------------------------
+HAZARD_TYPE = "TEST_HAZARD_TYPE"
+HAZARD_UNIT = "TEST_HAZARD_UNIT"
+
+# Hazard centroid positions
+HAZ_JITTER = 0.1 # To test centroid matching
+HAZ_LONS = EXP_LONS + HAZ_JITTER
+HAZ_LATS = EXP_LATS + HAZ_JITTER
+
+# Hazard events
+EVENT_IDS = np.array([1, 2, 3, 4, 5])
+EVENT_NAMES = ["ev1", "ev2", "ev3", "ev4", "ev5"]
+DATES = np.array([1, 2, 3, 4, 5])
+
+# Frequency are choosen so that they cumulate nicely
+# to correspond to 250, 100, 50, and 20y return periods (for impacts)
+FREQUENCY = np.array([0.03, 0.01, 0.006, 0.004, 0.0])
+FREQUENCY_UNIT = "1/year"
+
+# Hazard maximum intensity
+# 100 to match 0 to 100% idea
+# also in line with linear 1:1 impact function
+# for easy mental calculus
+HAZARD_MAX_INTENSITY = 100
+
+# ---------------------------------------------------------------------------
+# Impact function
+# ---------------------------------------------------------------------------
+IMPF_ID = 1
+IMPF_NAME = "IMPF_1"
+
+# Sanity checks
+for const in [VALUES, CATEGORIES, EXP_LONS, EXP_LATS]:
+ assert len(const) == len(
+ VALUES
+ ), "VALUES, REGIONS, CATEGORIES, EXP_LONS, EXP_LATS should all have the same lengths."
+
+for const in [EVENT_IDS, EVENT_NAMES, DATES, FREQUENCY]:
+ assert len(const) == len(
+ EVENT_IDS
+ ), "EVENT_IDS, EVENT_NAMES, DATES, FREQUENCY should all have the same lengths."
+
+
+@pytest.fixture(scope="session")
+def exposure_values():
+ return VALUES.copy()
+
+
+@pytest.fixture(scope="session")
+def categories():
+ return CATEGORIES.copy()
+
+
+@pytest.fixture(scope="session")
+def exposure_geometry():
+ return [Point(lon, lat) for lon, lat in zip(EXP_LONS, EXP_LATS)]
+
+
+@pytest.fixture(scope="session")
+def exposures_factory(
+ exposure_values,
+ exposure_geometry,
+):
+ def _make_exposures(
+ value_factor=1.0,
+ ref_year=EXPOSURE_REF_YEAR,
+ hazard_type=HAZARD_TYPE,
+ group_id=None,
+ ):
+ gdf = gpd.GeoDataFrame(
+ {
+ "value": exposure_values * value_factor,
+ f"impf_{hazard_type}": IMPF_ID,
+ "geometry": exposure_geometry,
+ },
+ crs=CRS_WGS84,
+ )
+ if group_id is not None:
+ gdf["group_id"] = group_id
+
+ return Exposures(
+ data=gdf,
+ description=EXP_DESC,
+ ref_year=ref_year,
+ value_unit=EXPOSURE_VALUE_UNIT,
+ )
+
+ return _make_exposures
+
+
+@pytest.fixture(scope="session")
+def exposures(exposures_factory):
+ return exposures_factory()
+
+
+@pytest.fixture(scope="session")
+def hazard_frequency_factory():
+ base = FREQUENCY
+
+ def _make_frequency(scale=1.0):
+ return base * scale
+
+ return _make_frequency
+
+
+@pytest.fixture(scope="session")
+def hazard_frequency():
+ return hazard_frequency_factory()
+
+
+@pytest.fixture(scope="session")
+def hazard_intensity_factory():
+ """
+ Intensity matrix designed for analytical expectations:
+ - Event 1: zero
+ - Event 2: max intensity at first centroid
+ - Event 3: half max intensity at second centroid
+ - Event 4: quarter max intensity everywhere
+ """
+ base = csr_matrix(
+ [
+ [0, 0, 0, 0, 0, 0],
+ [HAZARD_MAX_INTENSITY, 0, 0, 0, 0, 0],
+ [0, HAZARD_MAX_INTENSITY / 2, 0, 0, 0, 0],
+ [
+ HAZARD_MAX_INTENSITY / 4,
+ HAZARD_MAX_INTENSITY / 4,
+ HAZARD_MAX_INTENSITY / 4,
+ HAZARD_MAX_INTENSITY / 4,
+ HAZARD_MAX_INTENSITY / 4,
+ HAZARD_MAX_INTENSITY / 4,
+ ],
+ [
+ HAZARD_MAX_INTENSITY,
+ HAZARD_MAX_INTENSITY,
+ HAZARD_MAX_INTENSITY,
+ HAZARD_MAX_INTENSITY,
+ HAZARD_MAX_INTENSITY,
+ HAZARD_MAX_INTENSITY,
+ ],
+ ]
+ )
+
+ def _make_intensity(scale=1.0):
+ return base * scale
+
+ return _make_intensity
+
+
+@pytest.fixture(scope="session")
+def hazard_intensity_matrix(hazard_intensity_factory):
+ return hazard_intensity_factory()
+
+
+@pytest.fixture(scope="session")
+def centroids():
+ return Centroids(lat=HAZ_LATS, lon=HAZ_LONS, crs=CRS_WGS84)
+
+
+@pytest.fixture(scope="session")
+def hazard_factory(
+ hazard_intensity_factory,
+ hazard_frequency_factory,
+ centroids,
+):
+ def _make_hazard(
+ intensity_scale=1.0,
+ frequency_scale=1.0,
+ hazard_type=HAZARD_TYPE,
+ hazard_unit=HAZARD_UNIT,
+ ):
+ return Hazard(
+ haz_type=hazard_type,
+ units=hazard_unit,
+ centroids=centroids,
+ event_id=EVENT_IDS,
+ event_name=EVENT_NAMES,
+ date=DATES,
+ frequency=hazard_frequency_factory(scale=frequency_scale),
+ frequency_unit=FREQUENCY_UNIT,
+ intensity=hazard_intensity_factory(scale=intensity_scale),
+ )
+
+ return _make_hazard
+
+
+@pytest.fixture(scope="session")
+def hazard(hazard_factory):
+ return hazard_factory()
+
+
+@pytest.fixture(scope="session")
+def impf_factory():
+ def _make_impf(
+ paa_scale=1.0,
+ max_intensity=HAZARD_MAX_INTENSITY,
+ hazard_type=HAZARD_TYPE,
+ hazard_unit=HAZARD_UNIT,
+ impf_id=IMPF_ID,
+ negative_intensities=False,
+ ):
+ intensity = np.array([0, max_intensity / 2, max_intensity])
+ mdd = np.array([0, 0.5, 1])
+ if negative_intensities:
+ intensity = np.flip(intensity) * -1
+ mdd = np.flip(mdd)
+ return ImpactFunc(
+ haz_type=hazard_type,
+ intensity_unit=hazard_unit,
+ name=IMPF_NAME,
+ intensity=intensity,
+ mdd=mdd,
+ paa=np.array([1, 1, 1]) * paa_scale,
+ id=impf_id,
+ )
+
+ return _make_impf
+
+
+@pytest.fixture(scope="session")
+def linear_impact_function(impf_factory):
+ return impf_factory()
+
+
+@pytest.fixture(scope="session")
+def impfset_factory(impf_factory):
+ def _make_impfset(
+ paa_scale=1.0,
+ max_intensity=HAZARD_MAX_INTENSITY,
+ hazard_type=HAZARD_TYPE,
+ hazard_unit=HAZARD_UNIT,
+ impf_id=IMPF_ID,
+ negative_intensities=False,
+ ):
+ return ImpactFuncSet(
+ [
+ impf_factory(
+ paa_scale,
+ max_intensity,
+ hazard_type,
+ hazard_unit,
+ impf_id,
+ negative_intensities,
+ )
+ ]
+ )
+
+ return _make_impfset
+
+
+@pytest.fixture(scope="session")
+def impfset(impfset_factory):
+ return impfset_factory()
diff --git a/doc/development/Guide_Testing.ipynb b/doc/development/Guide_Testing.ipynb
index 12f59efb3f..aeba22d902 100644
--- a/doc/development/Guide_Testing.ipynb
+++ b/doc/development/Guide_Testing.ipynb
@@ -20,7 +20,9 @@
"Writing tests is work. As a matter of facts, it can be a _lot_ of work, depending on the program often more than writing the original code.\\\n",
"Luckily, it essentially follows always the same basic procedure and a there are a lot of tools and frameworks available to facilitate this work.\n",
"\n",
- "In CLIMADA we use the Python in-built _test runner_ [pytest](https://docs.pytest.org/en/7.1.x/index.html) for execution of the tests."
+ "In CLIMADA we use the Python in-built _test runner_ [pytest](https://docs.pytest.org/en/7.1.x/index.html) for execution of the tests.\n",
+ "\n",
+ "We now leverage `pytest` fixtures for defining the data and context used by tests. Please read [how to use fixtures for testing in CLIMADA](Guide_test_fixtures.ipynb)."
]
},
{
@@ -53,7 +55,6 @@
},
{
"cell_type": "markdown",
- "id": "5819e8c6",
"metadata": {},
"source": [
"### Basic Test Procedure\n",
@@ -277,7 +278,9 @@
"source": [
"### Test Configuration\n",
"\n",
- "Use the configuration file `climada.config` in the installation directory to define file paths and external resources used during tests (see the [Constants and Configuration Guide](./Guide_Configuration.ipynb))."
+ "Integration tests should make use of the fixtures defined in `climada/test/conftest.py`. Learn how to do that in [how to use fixtures for testing in CLIMADA](Guide_test_fixtures.ipynb).\n",
+ "\n",
+ "Test data can also use paths defined in the configuration file `climada.config` (in the installation directory) which define file paths and external resources to be used during tests (see the [Constants and Configuration Guide](./Guide_Configuration.ipynb))."
]
},
{
@@ -334,9 +337,9 @@
],
"metadata": {
"kernelspec": {
- "display_name": "Python 3 (ipykernel)",
+ "display_name": "Python [conda env:climada_env_dev]",
"language": "python",
- "name": "python3"
+ "name": "conda-env-climada_env_dev-py"
},
"language_info": {
"codemirror_mode": {
@@ -348,7 +351,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.12.6"
+ "version": "3.11.15"
},
"vscode": {
"interpreter": {
diff --git a/doc/development/Guide_test_fixtures.ipynb b/doc/development/Guide_test_fixtures.ipynb
new file mode 100644
index 0000000000..88b1b750c1
--- /dev/null
+++ b/doc/development/Guide_test_fixtures.ipynb
@@ -0,0 +1,175 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "359d0d32-68c5-4e8d-8b34-2c5b2a0cad21",
+ "metadata": {},
+ "source": [
+ "# Testing with pytest Fixtures\n",
+ "\n",
+ "**Fixtures** are pytest's way of setting up the data and objects that tests need. Think of them as reusable \"preparation steps\" that run before your tests.\n",
+ "\n",
+ "Instead of copy-pasting setup code into every test function, you define a fixture once and let pytest inject it automatically in tests:\n",
+ "```python\n",
+ "import pytest\n",
+ "\n",
+ "@pytest.fixture\n",
+ "def sample_user():\n",
+ " return {\"name\": \"Alice\", \"role\": \"admin\"}\n",
+ "\n",
+ "def test_user_has_name(sample_user):\n",
+ " assert sample_user[\"name\"] == \"Alice\"\n",
+ "```\n",
+ "\n",
+ "Fixtures can also handle **teardown** (cleanup after a test), be **scoped** to run once per module or session, and be **parametrized** to run the same test against multiple inputs. They aim at keeping the test suite easy to maintain.\n",
+ "\n",
+ "## Usefull references\n",
+ "- [pytest fixtures — official docs](https://docs.pytest.org/en/stable/reference/fixtures.html)\n",
+ "- [How to use fixtures — pytest how-to guide](https://docs.pytest.org/en/stable/how-to/fixtures.html)\n",
+ "- [A Complete Guide to Pytest Fixtures — Better Stack](https://betterstack.com/community/guides/testing/pytest-fixtures-guide/#step-6-parametrizing-fixtures) *(includes parametrizing fixtures)*"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "62247b28-05e5-46a5-8aca-c35b94d64216",
+ "metadata": {},
+ "source": [
+ "## The `conftest.py` File\n",
+ "\n",
+ "Pytest has a special file called `conftest.py` that acts as a **shared fixture library**. Any fixture defined there is automatically available to every test in the same directory and all its subdirectories.\n",
+ "\n",
+ "### How it works\n",
+ "```\n",
+ "climada/\n",
+ "├── test/\n",
+ "| ├── conftest.py ← fixtures here are available to all tests in climada/test/\n",
+ "| ├── test_engine.py\n",
+ "| └── ...\n",
+ "├── entity/exposures/test/\n",
+ " |── conftest.py ← fixtures here are available only within climada/entity/exposures/test/\n",
+ " └── test_exposures.py\n",
+ "...\n",
+ "```\n",
+ "\n",
+ "Define your fixture in the corresponding `conftest.py` or in your test file directly depending on its specificity:\n",
+ "```python\n",
+ "import pytest\n",
+ "from climada.entity.exposures\n",
+ "\n",
+ "@pytest.fixture\n",
+ "def empty_exposures():\n",
+ " return Exposure()\n",
+ "```\n",
+ "\n",
+ "Then just use it by name in any test that requires it.\n",
+ "```python\n",
+ "def test_empty_exposures(empty_exposures): # pytest injects the fixture automatically\n",
+ " assert empty_exposures.gdf.empty()\n",
+ "```\n",
+ "\n",
+ "> **Never import fixtures directly.** pytest's injection mechanism won't work properly with imported fixtures, and you may get confusing errors. Just use the fixture name as a function argument and let pytest handle the rest.\n",
+ "\n",
+ "### When to use `conftest.py`\n",
+ "\n",
+ "| Put fixtures in `conftest.py` when... | Keep fixtures in the test file when... |\n",
+ "|---|---|\n",
+ "| Multiple test files need them | Only one test file uses them |\n",
+ "| They set up shared resources | They are very specific to one test scenario |"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "550e12a9-2ab0-4f27-9276-ebc412a78c0b",
+ "metadata": {},
+ "source": [
+ "## How to use `climada/test/conftest.py` for integration tests.\n",
+ "\n",
+ "Our integration test `conftest.py` defines a set of **ready-made CLIMADA objects** (exposures, hazard, impact functions) with values chosen so that expected results are rather easy to compute by hand and minimalistic. All fixtures are `session`-scoped, meaning they are created once and shared across the entire test session.\n",
+ "\n",
+ "Here is what is available at a glance:\n",
+ "\n",
+ "| Fixture | What it gives you |\n",
+ "|---|---|\n",
+ "| `exposures` | An `Exposures` object with 6 points and values `[0, 1000, 2000, 3000, 4000, 5000]` |\n",
+ "| `hazard` | A `Hazard` with 5 events and \"helpful\" intensities and frequencies (see below) |\n",
+ "| `linear_impact_function` | An `ImpactFunc` where intensity % == damage % (identity) |\n",
+ "| `impfset` | An `ImpactFuncSet` wrapping the linear impact function |\n",
+ "| `centroids` | Hazard centroids (slightly offset from exposure points (`+0.1°`)) |\n",
+ "\n",
+ "### Using fixtures directly for simple assertions\n",
+ "\n",
+ "When you just need a standard object to test against, request the fixture by name:\n",
+ "```python\n",
+ "def test_impact_aai(exposures, hazard, impfset):\n",
+ " impact = ImpactCalc(exposures, impfset, hazard).impact()\n",
+ " assert impact.aai_agg == pytest.approx(18) # analytically known value\n",
+ "```\n",
+ "\n",
+ "Values of the defaults fixtures were chosen such that impacts are rather easy to compute by hand. \n",
+ "This is documented at the top of `conftest.py`, but here are some key design choices:\n",
+ "\n",
+ "- There are 4 events, with frequencies == 0.03, 0.01, 0.006, 0.004, 0,\n",
+ " such that impacts for RP250, 100 and 50 and 20 correspond to `at_event`,\n",
+ " (sorted frequencies cumulate to 1/250, 1/100, 1/50 and 1/20).\n",
+ "- Hazard intensity is:\n",
+ " * Event 1: zero everywhere (always no impact)\n",
+ " * Event 2: max intensity (100) at first centroid (also always no impact (first centroid is 0))\n",
+ " * Event 3: half max intensity at second centroid (impact == half second centroid)\n",
+ " * Event 4: quarter max intensity everywhere (impact == 1/4 total value)\n",
+ " * Event 5: max intensity everywhere (but zero frequency)\n",
+ "\n",
+ "This results in the following expected values:\n",
+ "\n",
+ "| Metric | Expected value | Why |\n",
+ "|---|---|---|\n",
+ "| AAI | `18` | Events 3 & 4 weighted by frequency |\n",
+ "| RP 20 & 50 | `0` | Events 1 & 2 produce zero impact |\n",
+ "| RP 100 | `500` | Event 3: half intensity on second point (value 1000) |\n",
+ "| RP 250 | `3750` | Event 4: quarter intensity on all points |\n",
+ "\n",
+ "> Note: The overview above reflects the `conftest.py` file at the time of writing. If you notice any discrepancy, the docstring at the top of `conftest.py` is the authoritative source.\n",
+ "\n",
+ "### Using factories for custom scenarios\n",
+ "\n",
+ "When your test needs a variation (e.g. scaled intensity, a different hazard type, group IDs), you can make use of the `_factory` fixtures. Each factory is a callable that accepts keyword arguments:\n",
+ "\n",
+ "```python\n",
+ "def test_scaled_hazard(hazard_factory):\n",
+ " stronger_hazard = hazard_factory(intensity_scale=1.5)\n",
+ " # intensities are halved so expected impacts scale accordingly\n",
+ "\n",
+ "def test_grouped_exposure(exposures_factory):\n",
+ " exp = exposures_factory(group_id=np.array([1, 1, 2, 1, 1, 3]))\n",
+ " # exposure with group_id column populated\n",
+ "\n",
+ "def test_custom_impf(impf_factory):\n",
+ " impf = impf_factory(paa_scale=0.5)\n",
+ " # PAA halved\n",
+ "```\n",
+ "\n",
+ "> **Tip:** Prefer the direct fixtures (`exposures`, `hazard`, …) when the default setup is sufficient. Reach for factories only when your test specifically targets behaviour that depends on a variation — this keeps tests focused and their intent clear."
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python [conda env:climada_env_dev]",
+ "language": "python",
+ "name": "conda-env-climada_env_dev-py"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.15"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/doc/development/index.rst b/doc/development/index.rst
index a5e5f90c68..0249d35eba 100644
--- a/doc/development/index.rst
+++ b/doc/development/index.rst
@@ -25,6 +25,7 @@ If you are interested in contributing to CLIMADA, we recommand you to start with
CLIMADA Configuration convention
Documenting your code
Writing tests for your code
+ Using fixtures for writing tests
Guide_Review
Guide_Euler
Authors <../misc/AUTHORS>