From b901f096ffb247d9cf10d89fe111fe76d9470940 Mon Sep 17 00:00:00 2001 From: Andres Perez Hortal <16256571+aperezhortal@users.noreply.github.com> Date: Mon, 3 Jan 2022 21:25:55 -0500 Subject: [PATCH 1/9] Pull sphinx-gallery from external repository --- ...te_tutorial_notebooks_in_external_repo.yml | 88 ++ .gitignore | 3 + doc/requirements.txt | 1 - doc/source/conf.py | 178 ++-- doc/source/user_guide/index.rst | 2 +- examples/LK_buffer_mask.py | 239 ----- examples/README.txt | 10 - examples/advection_correction.py | 152 ---- examples/anvil_nowcast.py | 217 ----- examples/data_transformations.py | 215 ----- examples/linda_nowcasts.py | 185 ---- examples/my_first_nowcast.ipynb | 832 ------------------ examples/optical_flow_methods_convergence.py | 375 -------- examples/plot_cascade_decomposition.py | 143 --- examples/plot_ensemble_verification.py | 191 ---- examples/plot_extrapolation_nowcast.py | 139 --- examples/plot_noise_generators.py | 174 ---- examples/plot_optical_flow.py | 144 --- examples/plot_steps_nowcast.py | 197 ----- examples/probability_forecast.py | 148 ---- examples/rainfarm_downscale.py | 139 --- .../thunderstorm_detection_and_tracking.py | 166 ---- 22 files changed, 152 insertions(+), 3786 deletions(-) create mode 100644 .github/workflows/execute_tutorial_notebooks_in_external_repo.yml delete mode 100644 examples/LK_buffer_mask.py delete mode 100644 examples/README.txt delete mode 100644 examples/advection_correction.py delete mode 100644 examples/anvil_nowcast.py delete mode 100644 examples/data_transformations.py delete mode 100644 examples/linda_nowcasts.py delete mode 100644 examples/my_first_nowcast.ipynb delete mode 100644 examples/optical_flow_methods_convergence.py delete mode 100644 examples/plot_cascade_decomposition.py delete mode 100644 examples/plot_ensemble_verification.py delete mode 100644 examples/plot_extrapolation_nowcast.py delete mode 100644 examples/plot_noise_generators.py delete mode 100644 examples/plot_optical_flow.py delete mode 100644 examples/plot_steps_nowcast.py delete mode 100644 examples/probability_forecast.py delete mode 100644 examples/rainfarm_downscale.py delete mode 100644 examples/thunderstorm_detection_and_tracking.py diff --git a/.github/workflows/execute_tutorial_notebooks_in_external_repo.yml b/.github/workflows/execute_tutorial_notebooks_in_external_repo.yml new file mode 100644 index 000000000..229e4f666 --- /dev/null +++ b/.github/workflows/execute_tutorial_notebooks_in_external_repo.yml @@ -0,0 +1,88 @@ +######################################################################################## +# Trigger the notebooks' execution in the external repository. +# +# If the "main" branch triggered this event, then the nb execution is triggered in the +# "main" branch, otherwise in the dev branch. +# +# Naming convention used in this workflow: +# +# "main repository": Project repository with the python package and the documentation. +# "linked repository": Repository where the example gallery of jupyter notebooks is +# stored. +######################################################################################## +name: Trigger notebook execution in the external repository + +env: + # URL for the external repository linked with the notebooks in this project. + LINKED_REPO: pysteps/pysteps_tutorials + THIS_REPO: pysteps/pysteps + +on: + push: + branches: [ main, dev ] + pull_request: + branches: [ main, dev ] + +jobs: + trigger_nb_execution: + name: Trigger the execution in the external repository with the notebooks + # The triggering is done by pushing an empty commit to the linked repository. + # The commit message contains the Hash of the main repository's commit that + # trigger the event. + runs-on: "ubuntu-latest" + + defaults: + run: + shell: bash -l {0} + + steps: + - name: Find the branch that trigger the event + id: get_dest_branch + run: | + if [[ "${GITHUB_EVENT_NAME}" == "push" ]]; then + event_branch=$(echo ${GITHUB_REF##*/}) + elif [[ "${GITHUB_EVENT_NAME}" == "pull_request" ]]; then + event_branch=$(echo $GITHUB_BASE_REF) + else + event_branch=unknown + fi + echo "::set-output name=event_branch::${event_branch}" + + # We only push to latest or to dev. + if [[ "${event_branch}" == "main" ]]; then + echo "::set-output name=dest_branch::main" + else + echo "::set-output name=dest_branch::dev" + fi + + - name: Print debug information + env: + DEST_BRANCH: ${{steps.get_dest_branch.outputs.dest_branch}} + EVENT_BRANCH: ${{steps.get_dest_branch.outputs.event_branch}} + run: | + echo "EVENT_BRANCH=${EVENT_BRANCH}" + echo "GITHUB_SHA=${GITHUB_SHA}" + echo "DEST_BRANCH=${DEST_BRANCH}" + + - name: Clone linked repository + uses: actions/checkout@v2 + with: + persist-credentials: false # Avoid using the GITHUB_TOKEN instead of the personal access token + fetch-depth: 0 # Avoid errors pushing refs to the destination repository. + repository: ${{ env.LINKED_REPO }} + ref: ${{steps.get_dest_branch.outputs.dest_branch}} + + - name: Create empty commit in linked repo + run: | + git config user.name 'github-actions[bot]' + git config user.email 'github-actions[bot]@users.noreply.github.com' + git commit --allow-empty \ + -m "Triggering nb execution from ${GITHUB_SHA::8}" \ + -m "https://github.com/${THIS_REPO}/commit/{$GITHUB_SHA}" + + - name: Push the empty commit to trigger the workflow + uses: ad-m/github-push-action@master + with: + repository: ${{ env.LINKED_REPO }} + github_token: ${{ secrets.LINKED_TOKEN }} + branch: ${{steps.get_dest_branch.outputs.dest_branch}} diff --git a/.gitignore b/.gitignore index f26dba4ad..db6214500 100644 --- a/.gitignore +++ b/.gitignore @@ -85,3 +85,6 @@ venv.bak/ # mypy .mypy_cache/ +/doc/source/user_guide/examples_gallery/ +/doc/source/examples_gallery/ +/examples/ diff --git a/doc/requirements.txt b/doc/requirements.txt index 42eda9fad..ecf9c4591 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -6,4 +6,3 @@ sphinx_rtd_theme sphinx_gallery scikit-image pandas - diff --git a/doc/source/conf.py b/doc/source/conf.py index b863a47ef..8fdec6a2a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,30 +1,21 @@ -# -*- coding: utf-8 -*- - -# All configuration values have a default; values that are commented out -# serve to show the default. - +import json import os +import shutil import subprocess import sys +import tempfile +from pathlib import Path -import json from jsmin import jsmin -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -if "READTHEDOCS" not in os.environ: - sys.path.insert(1, os.path.abspath("../../")) - -# -- General configuration ------------------------------------------------ +# -- Global variables ------------------------------------------------ +DOCS_DIR = Path(__file__).parent / ".." +PROJECT_ROOT_DIR = DOCS_DIR / ".." +EXAMPLES_GALLERY_REPOSITORY = "https://github.com/pySTEPS/pysteps_tutorials.git" -# If your documentation needs a minimal Sphinx version, state it here. -# -needs_sphinx = "1.6" +sys.path.insert(1, os.path.abspath(str(PROJECT_ROOT_DIR))) -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. +# -- General configuration ------------------------------------------------ extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", @@ -36,73 +27,41 @@ ] bibtex_bibfiles = ["references.bib"] - -# numpydoc_show_class_members = False -# Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] source_suffix = ".rst" - -# The master toctree document. master_doc = "index" - -# General information about the project. +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] +todo_include_todos = False project = "pysteps" copyright = "2020, PySteps developers" author = "PySteps developers" -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# def get_version(): - """ - Returns project version as string from 'git describe' command. - """ - + """Returns project version as string from 'git describe' command.""" from subprocess import check_output _version = check_output(["git", "describe", "--tags", "--always"]) - if _version: - return _version.decode("utf-8") + _version = _version.decode("utf-8") else: - return "X.Y" - + _version = "X.Y" -# The short X.Y version. -version = get_version().lstrip("v").rstrip().split("-")[0] + return _version.lstrip("v").rstrip().split("-")[0] -# The full version, including alpha/beta/rc tags. -release = version -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None +release = get_version() -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False +# Sphinx-Gallery options +sphinx_gallery_conf = { + "examples_dirs": "../../examples", # path to your example scripts + "gallery_dirs": "user_guide/examples_gallery", # path where to save gallery generated examples + "filename_pattern": r"/*\.py", # Include all the files in the examples dir + 'plot_gallery': False, # Do not execute the examples +} # -- Read the Docs build -------------------------------------------------- - - def set_root(): fn = os.path.abspath(os.path.join("..", "..", "pysteps", "pystepsrc")) with open(fn, "r") as f: @@ -119,56 +78,52 @@ def set_root(): json.dump(rcparams, f, indent=4) -if "READTHEDOCS" in os.environ: - repourl = "https://github.com/pySTEPS/pysteps-data.git" - dir = os.path.join(os.getcwd(), "..", "..", "pysteps-data") - dir = os.path.abspath(dir) - subprocess.check_call(["rm", "-rf", dir]) - subprocess.check_call(["git", "clone", repourl, dir]) - set_root() - pystepsrc = os.path.abspath(os.path.join("..", "..", "pystepsrc.rtd")) - os.environ["PYSTEPSRC"] = pystepsrc +def pull_example_gallery_from_external_repo(): + global EXAMPLES_GALLERY_REPOSITORY -# -- Options for HTML output ---------------------------------------------- + # Default to the "latest" branch, containing the latest rendered notebooks. + rtd_version = os.environ.get("READTHEDOCS_VERSION", "latest") -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -# html_theme = 'alabaster' -# html_theme = 'classic' -html_theme = "sphinx_rtd_theme" + if rtd_version == "stable": + try: + rtd_version = subprocess.check_output( + ["git", "describe", "--abbrev=0", "--tags"], universal_newlines=True + ).strip() + except subprocess.CalledProcessError: + rtd_version = "latest" -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} + print(f"\nRTD Version: {rtd_version}\n") -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["../_static"] -html_css_files = ["../_static/pysteps.css"] + with tempfile.TemporaryDirectory() as work_dir: + cmd = ( + f"git clone {EXAMPLES_GALLERY_REPOSITORY} --depth 1 --branch {rtd_version} {work_dir}" + ) + subprocess.check_output(cmd.split(" ")) -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# This is required for the alabaster theme -# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars -html_sidebars = { - "**": [ - "relations.html", # needs 'show_related': True theme option to display - "searchbox.html", - ] -} + examples_gallery_dir = DOCS_DIR / "source/examples_gallery" + shutil.rmtree(examples_gallery_dir, ignore_errors=True) -html_domain_indices = True + examples_dir = PROJECT_ROOT_DIR / "examples" + shutil.rmtree(examples_dir, ignore_errors=True) + work_dir = Path(work_dir) + example_gallery_dir_in_remote = work_dir / "docs/examples_gallery" + shutil.copytree(example_gallery_dir_in_remote, examples_gallery_dir) + + examples_dir_in_remote = work_dir / "examples" + shutil.copytree(examples_dir_in_remote, examples_dir) + + +pull_example_gallery_from_external_repo() + +# -- Options for HTML output ---------------------------------------------- +html_theme = "sphinx_rtd_theme" +html_static_path = ["../_static"] +html_css_files = ["../_static/pysteps.css"] +html_domain_indices = True autosummary_generate = True # -- Options for HTMLHelp output ------------------------------------------ - -# Output file base name for HTML help builder. htmlhelp_basename = "pystepsdoc" # -- Options for LaTeX output --------------------------------------------- @@ -200,9 +155,6 @@ def set_root(): "papersize": "a4paper", "pointsize": "10pt", "preamble": latex_preamble - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', } latex_domain_indices = False @@ -236,13 +188,3 @@ def set_root(): "Miscellaneous", ), ] - -# -- Options for Sphinx-Gallery ------------------------------------------- - -# The configuration dictionary for Sphinx-Gallery - -sphinx_gallery_conf = { - "examples_dirs": "../../examples", # path to your example scripts - "gallery_dirs": "auto_examples", # path where to save gallery generated examples - "filename_pattern": r"/*\.py", # Include all the files in the examples dir -} diff --git a/doc/source/user_guide/index.rst b/doc/source/user_guide/index.rst index 634ecdb62..79a23112a 100644 --- a/doc/source/user_guide/index.rst +++ b/doc/source/user_guide/index.rst @@ -16,6 +16,6 @@ the package, see the :ref:`pysteps-reference`. install_pysteps example_data set_pystepsrc - ../auto_examples/index + ../examples_gallery/index machine_learning_pysteps diff --git a/examples/LK_buffer_mask.py b/examples/LK_buffer_mask.py deleted file mode 100644 index 35efd5297..000000000 --- a/examples/LK_buffer_mask.py +++ /dev/null @@ -1,239 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Handling of no-data in Lucas-Kanade -=================================== - -Areas of missing data in radar images are typically caused by visibility limits -such as beam blockage and the radar coverage itself. These artifacts can mislead -the echo tracking algorithms. For instance, precipitation leaving the domain -might be erroneously detected as having nearly stationary velocity. - -This example shows how the Lucas-Kanade algorithm can be tuned to avoid the -erroneous interpretation of velocities near the maximum range of the radars by -buffering the no-data mask in the radar image in order to exclude all vectors -detected nearby no-data areas. -""" - -from datetime import datetime -from matplotlib import cm, colors - -import matplotlib.pyplot as plt -import numpy as np - -from pysteps import io, motion, nowcasts, rcparams, verification -from pysteps.utils import conversion, transformation -from pysteps.visualization import plot_precip_field, quiver - -################################################################################ -# Read the radar input images -# --------------------------- -# -# First, we will import the sequence of radar composites. -# You need the pysteps-data archive downloaded and the pystepsrc file -# configured with the data_source paths pointing to data folders. - -# Selected case -date = datetime.strptime("201607112100", "%Y%m%d%H%M") -data_source = rcparams.data_sources["mch"] - -############################################################################### -# Load the data from the archive -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -root_path = data_source["root_path"] -path_fmt = data_source["path_fmt"] -fn_pattern = data_source["fn_pattern"] -fn_ext = data_source["fn_ext"] -importer_name = data_source["importer"] -importer_kwargs = data_source["importer_kwargs"] -timestep = data_source["timestep"] - -# Find the two input files from the archive -fns = io.archive.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_prev_files=1 -) - -# Read the radar composites -importer = io.get_method(importer_name, "importer") -R, quality, metadata = io.read_timeseries(fns, importer, **importer_kwargs) - -del quality # Not used - -############################################################################### -# Preprocess the data -# ~~~~~~~~~~~~~~~~~~~ - -# Convert to mm/h -R, metadata = conversion.to_rainrate(R, metadata) - -# Keep the reference frame in mm/h and its mask (for plotting purposes) -ref_mm = R[0, :, :].copy() -mask = np.ones(ref_mm.shape) -mask[~np.isnan(ref_mm)] = np.nan - -# Log-transform the data [dBR] -R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0) - -# Keep the reference frame in dBR (for plotting purposes) -ref_dbr = R[0].copy() -ref_dbr[ref_dbr < -10] = np.nan - -# Plot the reference field -plot_precip_field(ref_mm, title="Reference field") -circle = plt.Circle((620, 400), 100, color="b", clip_on=False, fill=False) -plt.gca().add_artist(circle) -plt.show() - -############################################################################### -# Notice the "half-in, half-out" precipitation area within the blue circle. -# As we are going to show next, the tracking algorithm can erroneously interpret -# precipitation leaving the domain as stationary motion. -# -# Also note that the radar image includes NaNs in areas of missing data. -# These are used by the optical flow algorithm to define the radar mask. -# -# Sparse Lucas-Kanade -# ------------------- -# -# By setting the optional argument ``dense=False`` in ``xy, uv = dense_lucaskanade(...)``, -# the LK algorithm returns the motion vectors detected by the Lucas-Kanade scheme -# without interpolating them on the grid. -# This allows us to better identify the presence of wrongly detected -# stationary motion in areas where precipitation is leaving the domain (look -# for the red dots within the blue circle in the figure below). - -# Get Lucas-Kanade optical flow method -dense_lucaskanade = motion.get_method("LK") - -# Mask invalid values -R = np.ma.masked_invalid(R) - -# Use no buffering of the radar mask -fd_kwargs1 = {"buffer_mask": 0} -xy, uv = dense_lucaskanade(R, dense=False, fd_kwargs=fd_kwargs1) -plt.imshow(ref_dbr, cmap=plt.get_cmap("Greys")) -plt.imshow(mask, cmap=colors.ListedColormap(["black"]), alpha=0.5) -plt.quiver( - xy[:, 0], - xy[:, 1], - uv[:, 0], - uv[:, 1], - color="red", - angles="xy", - scale_units="xy", - scale=0.2, -) -circle = plt.Circle((620, 245), 100, color="b", clip_on=False, fill=False) -plt.gca().add_artist(circle) -plt.title("buffer_mask = 0") -plt.show() - -################################################################################ -# The LK algorithm cannot distinguish missing values from no precipitation, that is, -# no-data are the same as no-echoes. As a result, the fixed boundaries produced -# by precipitation in contact with no-data areas are interpreted as stationary motion. -# One way to mitigate this effect of the boundaries is to introduce a slight buffer -# of the no-data mask so that the algorithm will ignore all the portions of the -# radar domain that are nearby no-data areas. -# This buffer can be set by the keyword argument ``buffer_mask`` within the -# feature detection optional arguments ``fd_kwargs``. -# Note that by default ``dense_lucaskanade`` uses a 5-pixel buffer. - -# with buffer -buffer = 10 -fd_kwargs2 = {"buffer_mask": buffer} -xy, uv = dense_lucaskanade(R, dense=False, fd_kwargs=fd_kwargs2) -plt.imshow(ref_dbr, cmap=plt.get_cmap("Greys")) -plt.imshow(mask, cmap=colors.ListedColormap(["black"]), alpha=0.5) -plt.quiver( - xy[:, 0], - xy[:, 1], - uv[:, 0], - uv[:, 1], - color="red", - angles="xy", - scale_units="xy", - scale=0.2, -) -circle = plt.Circle((620, 245), 100, color="b", clip_on=False, fill=False) -plt.gca().add_artist(circle) -plt.title("buffer_mask = %i" % buffer) -plt.show() - -################################################################################ -# Dense Lucas-Kanade -# ------------------ -# -# The above displacement vectors produced by the Lucas-Kanade method are now -# interpolated to produce a full field of motion (i.e., ``dense=True``). -# By comparing the velocity of the motion fields, we can easily notice -# the negative bias that is introduced by the the erroneous interpretation of -# velocities near the maximum range of the radars. - -UV1 = dense_lucaskanade(R, dense=True, fd_kwargs=fd_kwargs1) -UV2 = dense_lucaskanade(R, dense=True, fd_kwargs=fd_kwargs2) - -V1 = np.sqrt(UV1[0] ** 2 + UV1[1] ** 2) -V2 = np.sqrt(UV2[0] ** 2 + UV2[1] ** 2) - -plt.imshow((V1 - V2) / V2, cmap=cm.RdBu_r, vmin=-0.5, vmax=0.5) -plt.colorbar(fraction=0.04, pad=0.04) -plt.title("Relative difference in motion speed") -plt.show() - -################################################################################ -# Notice how the presence of erroneous velocity vectors produces a significantly -# slower motion field near the right edge of the domain. -# -# Forecast skill -# -------------- -# -# We are now going to evaluate the benefit of buffering the radar mask by computing -# the forecast skill in terms of the Spearman correlation coefficient. -# The extrapolation forecasts are computed using the dense UV motion fields -# estimated above. - -# Get the advection routine and extrapolate the last radar frame by 12 time steps -# (i.e., 1 hour lead time) -extrapolate = nowcasts.get_method("extrapolation") -R[~np.isfinite(R)] = metadata["zerovalue"] -R_f1 = extrapolate(R[-1], UV1, 12) -R_f2 = extrapolate(R[-1], UV2, 12) - -# Back-transform to rain rate -R_f1 = transformation.dB_transform(R_f1, threshold=-10.0, inverse=True)[0] -R_f2 = transformation.dB_transform(R_f2, threshold=-10.0, inverse=True)[0] - -# Find the veriyfing observations in the archive -fns = io.archive.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_next_files=12 -) - -# Read and convert the radar composites -R_o, _, metadata_o = io.read_timeseries(fns, importer, **importer_kwargs) -R_o, metadata_o = conversion.to_rainrate(R_o, metadata_o) - -# Compute Spearman correlation -skill = verification.get_method("corr_s") -score_1 = [] -score_2 = [] -for i in range(12): - score_1.append(skill(R_f1[i, :, :], R_o[i + 1, :, :])["corr_s"]) - score_2.append(skill(R_f2[i, :, :], R_o[i + 1, :, :])["corr_s"]) - -x = (np.arange(12) + 1) * 5 # [min] -plt.plot(x, score_1, label="buffer_mask = 0") -plt.plot(x, score_2, label="buffer_mask = %i" % buffer) -plt.legend() -plt.xlabel("Lead time [min]") -plt.ylabel("Corr. coeff. []") -plt.title("Spearman correlation") - -plt.tight_layout() -plt.show() - -################################################################################ -# As expected, the corrected motion field produces better forecast skill already -# within the first hour into the nowcast. - -# sphinx_gallery_thumbnail_number = 2 diff --git a/examples/README.txt b/examples/README.txt deleted file mode 100644 index 2fe8a648a..000000000 --- a/examples/README.txt +++ /dev/null @@ -1,10 +0,0 @@ -.. _example_gallery: - -pySTEPS examples gallery -======================== - -Below is a collection of example scripts and tutorials to illustrate the usage -of pysteps. - -These scripts require the pySTEPS example data. -See the installation instructions in the :ref:`example_data` section. \ No newline at end of file diff --git a/examples/advection_correction.py b/examples/advection_correction.py deleted file mode 100644 index 5bab1d9bd..000000000 --- a/examples/advection_correction.py +++ /dev/null @@ -1,152 +0,0 @@ -""" -Advection correction -==================== - -This tutorial shows how to use the optical flow routines of pysteps to implement -the advection correction procedure described in Anagnostou and Krajewski (1999). - -Advection correction is a temporal interpolation procedure that is often used -when estimating rainfall accumulations to correct for the shift of rainfall patterns -between consecutive radar rainfall maps. This shift becomes particularly -significant for long radar scanning cycles and in presence of fast moving -precipitation features. - -.. note:: The code for the advection correction using pysteps was originally - written by `Daniel Wolfensberger `_. - -""" - -from datetime import datetime -import matplotlib.pyplot as plt -import numpy as np - -from pysteps import io, motion, rcparams -from pysteps.utils import conversion, dimension -from pysteps.visualization import plot_precip_field -from scipy.ndimage import map_coordinates - -################################################################################ -# Read the radar input images -# --------------------------- -# -# First, we import a sequence of 36 images of 5-minute radar composites -# that we will use to produce a 3-hour rainfall accumulation map. -# We will keep only one frame every 10 minutes, to simulate a longer scanning -# cycle and thus better highlight the need for advection correction. -# -# You need the pysteps-data archive downloaded and the pystepsrc file -# configured with the data_source paths pointing to data folders. - -# Selected case -date = datetime.strptime("201607112100", "%Y%m%d%H%M") -data_source = rcparams.data_sources["mch"] - -############################################################################### -# Load the data from the archive -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -root_path = data_source["root_path"] -path_fmt = data_source["path_fmt"] -fn_pattern = data_source["fn_pattern"] -fn_ext = data_source["fn_ext"] -importer_name = data_source["importer"] -importer_kwargs = data_source["importer_kwargs"] -timestep = data_source["timestep"] - -# Find the input files from the archive -fns = io.archive.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_next_files=35 -) - -# Read the radar composites -importer = io.get_method(importer_name, "importer") -R, __, metadata = io.read_timeseries(fns, importer, **importer_kwargs) - -# Convert to mm/h -R, metadata = conversion.to_rainrate(R, metadata) - -# Upscale to 2 km (simply to reduce the memory demand) -R, metadata = dimension.aggregate_fields_space(R, metadata, 2000) - -# Keep only one frame every 10 minutes (i.e., every 2 timesteps) -# (to highlight the need for advection correction) -R = R[::2] - -################################################################################ -# Advection correction -# -------------------- -# -# Now we need to implement the advection correction for a pair of successive -# radar images. The procedure is based on the algorithm described in Anagnostou -# and Krajewski (Appendix A, 1999). -# -# To evaluate the advection occurred between two successive radar images, we are -# going to use the Lucas-Kanade optical flow routine available in pysteps. - - -def advection_correction(R, T=5, t=1): - """ - R = np.array([qpe_previous, qpe_current]) - T = time between two observations (5 min) - t = interpolation timestep (1 min) - """ - - # Evaluate advection - oflow_method = motion.get_method("LK") - fd_kwargs = {"buffer_mask": 10} # avoid edge effects - V = oflow_method(np.log(R), fd_kwargs=fd_kwargs) - - # Perform temporal interpolation - Rd = np.zeros((R[0].shape)) - x, y = np.meshgrid( - np.arange(R[0].shape[1], dtype=float), np.arange(R[0].shape[0], dtype=float) - ) - for i in range(t, T + t, t): - - pos1 = (y - i / T * V[1], x - i / T * V[0]) - R1 = map_coordinates(R[0], pos1, order=1) - - pos2 = (y + (T - i) / T * V[1], x + (T - i) / T * V[0]) - R2 = map_coordinates(R[1], pos2, order=1) - - Rd += (T - i) * R1 + i * R2 - - return t / T ** 2 * Rd - - -############################################################################### -# Finally, we apply the advection correction to the whole sequence of radar -# images and produce the rainfall accumulation map. - -R_ac = R[0].copy() -for i in range(R.shape[0] - 1): - R_ac += advection_correction(R[i : (i + 2)], T=10, t=1) -R_ac /= R.shape[0] - -############################################################################### -# Results -# ------- -# -# We compare the two accumulation maps. The first map on the left is -# computed without advection correction and we can therefore see that the shift -# between successive images 10 minutes apart produces irregular accumulations. -# Conversely, the rainfall accumulation of the right is produced using advection -# correction to account for this spatial shift. The final result is a smoother -# rainfall accumulation map. - -plt.figure(figsize=(9, 4)) -plt.subplot(121) -plot_precip_field(R.mean(axis=0), title="3-h rainfall accumulation") -plt.subplot(122) -plot_precip_field(R_ac, title="Same with advection correction") -plt.tight_layout() -plt.show() - -################################################################################ -# Reference -# ~~~~~~~~~ -# -# Anagnostou, E. N., and W. F. Krajewski. 1999. "Real-Time Radar Rainfall -# Estimation. Part I: Algorithm Formulation." Journal of Atmospheric and -# Oceanic Technology 16: 189–97. -# https://doi.org/10.1175/1520-0426(1999)016<0189:RTRREP>2.0.CO;2 diff --git a/examples/anvil_nowcast.py b/examples/anvil_nowcast.py deleted file mode 100644 index 57802ad4e..000000000 --- a/examples/anvil_nowcast.py +++ /dev/null @@ -1,217 +0,0 @@ -# coding: utf-8 - -""" -ANVIL nowcast -============= - -This example demonstrates how to use ANVIL and the advantages compared to -extrapolation nowcast and S-PROG. - -Load the libraries. -""" -from datetime import datetime, timedelta -import warnings - -warnings.simplefilter("ignore") -import matplotlib.pyplot as plt -import numpy as np -from pysteps import motion, io, rcparams, utils -from pysteps.nowcasts import anvil, extrapolation, sprog -from pysteps.utils import transformation -from pysteps.visualization import plot_precip_field - -############################################################################### -# Read the input data -# ------------------- -# -# ANVIL was originally developed to use vertically integrated liquid (VIL) as -# the input data, but the model allows using any two-dimensional input fields. -# Here we use a composite of rain rates. - -date = datetime.strptime("201505151620", "%Y%m%d%H%M") - -# Read the data source information from rcparams -data_source = rcparams.data_sources["mch"] - -root_path = data_source["root_path"] -path_fmt = data_source["path_fmt"] -fn_pattern = data_source["fn_pattern"] -fn_ext = data_source["fn_ext"] -importer_name = data_source["importer"] -importer_kwargs = data_source["importer_kwargs"] - -# Find the input files in the archive. Use history length of 5 timesteps -filenames = io.archive.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_prev_files=5 -) - -# Read the input time series -importer = io.get_method(importer_name, "importer") -rainrate_field, quality, metadata = io.read_timeseries( - filenames, importer, **importer_kwargs -) - -# Convert to rain rate (mm/h) -rainrate_field, metadata = utils.to_rainrate(rainrate_field, metadata) - -################################################################################ -# Compute the advection field -# --------------------------- -# -# Apply the Lucas-Kanade method with the parameters given in Pulkkinen et al. -# (2020) to compute the advection field. - -fd_kwargs = {} -fd_kwargs["max_corners"] = 1000 -fd_kwargs["quality_level"] = 0.01 -fd_kwargs["min_distance"] = 2 -fd_kwargs["block_size"] = 8 - -lk_kwargs = {} -lk_kwargs["winsize"] = (15, 15) - -oflow_kwargs = {} -oflow_kwargs["fd_kwargs"] = fd_kwargs -oflow_kwargs["lk_kwargs"] = lk_kwargs -oflow_kwargs["decl_scale"] = 10 - -oflow = motion.get_method("lucaskanade") - -# transform the input data to logarithmic scale -rainrate_field_log, _ = utils.transformation.dB_transform( - rainrate_field, metadata=metadata -) -velocity = oflow(rainrate_field_log, **oflow_kwargs) - -############################################################################### -# Compute the nowcasts and threshold rain rates below 0.5 mm/h -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -forecast_extrap = extrapolation.forecast( - rainrate_field[-1], velocity, 3, extrap_kwargs={"allow_nonfinite_values": True} -) -forecast_extrap[forecast_extrap < 0.5] = 0.0 - -# log-transform the data and the threshold value to dBR units for S-PROG -rainrate_field_db, _ = transformation.dB_transform( - rainrate_field, metadata, threshold=0.1, zerovalue=-15.0 -) -rainrate_thr, _ = transformation.dB_transform( - np.array([0.5]), metadata, threshold=0.1, zerovalue=-15.0 -) -forecast_sprog = sprog.forecast( - rainrate_field_db[-3:], velocity, 3, n_cascade_levels=8, R_thr=rainrate_thr[0] -) -forecast_sprog, _ = transformation.dB_transform( - forecast_sprog, threshold=-10.0, inverse=True -) -forecast_sprog[forecast_sprog < 0.5] = 0.0 - -forecast_anvil = anvil.forecast( - rainrate_field[-4:], velocity, 3, ar_window_radius=25, ar_order=2 -) -forecast_anvil[forecast_anvil < 0.5] = 0.0 - -############################################################################### -# Read the reference observation field and threshold rain rates below 0.5 mm/h -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -filenames = io.archive.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_next_files=3 -) - -refobs_field, quality, metadata = io.read_timeseries( - filenames, importer, **importer_kwargs -) - -refobs_field, metadata = utils.to_rainrate(refobs_field[-1], metadata) -refobs_field[refobs_field < 0.5] = 0.0 - -############################################################################### -# Plot the extrapolation, S-PROG and ANVIL nowcasts. -# -------------------------------------------------- -# -# For comparison, the observed rain rate fields are also plotted. Growth and -# decay areas are marked with red and blue circles, respectively. -def plot_growth_decay_circles(ax): - circle = plt.Circle( - (360, 300), 25, color="b", clip_on=False, fill=False, zorder=1e9 - ) - ax.add_artist(circle) - circle = plt.Circle( - (420, 350), 30, color="b", clip_on=False, fill=False, zorder=1e9 - ) - ax.add_artist(circle) - circle = plt.Circle( - (405, 380), 30, color="b", clip_on=False, fill=False, zorder=1e9 - ) - ax.add_artist(circle) - circle = plt.Circle( - (420, 500), 25, color="b", clip_on=False, fill=False, zorder=1e9 - ) - ax.add_artist(circle) - circle = plt.Circle( - (480, 535), 30, color="b", clip_on=False, fill=False, zorder=1e9 - ) - ax.add_artist(circle) - circle = plt.Circle( - (330, 470), 35, color="b", clip_on=False, fill=False, zorder=1e9 - ) - ax.add_artist(circle) - circle = plt.Circle( - (505, 205), 30, color="b", clip_on=False, fill=False, zorder=1e9 - ) - ax.add_artist(circle) - circle = plt.Circle( - (440, 180), 30, color="r", clip_on=False, fill=False, zorder=1e9 - ) - ax.add_artist(circle) - circle = plt.Circle( - (590, 240), 30, color="r", clip_on=False, fill=False, zorder=1e9 - ) - ax.add_artist(circle) - circle = plt.Circle( - (585, 160), 15, color="r", clip_on=False, fill=False, zorder=1e9 - ) - ax.add_artist(circle) - - -fig = plt.figure(figsize=(10, 13)) - -ax = fig.add_subplot(321) -rainrate_field[-1][rainrate_field[-1] < 0.5] = 0.0 -plot_precip_field(rainrate_field[-1]) -plot_growth_decay_circles(ax) -ax.set_title("Obs. %s" % str(date)) - -ax = fig.add_subplot(322) -plot_precip_field(refobs_field) -plot_growth_decay_circles(ax) -ax.set_title("Obs. %s" % str(date + timedelta(minutes=15))) - -ax = fig.add_subplot(323) -plot_precip_field(forecast_extrap[-1]) -plot_growth_decay_circles(ax) -ax.set_title("Extrapolation +15 minutes") - -ax = fig.add_subplot(324) -plot_precip_field(forecast_sprog[-1]) -plot_growth_decay_circles(ax) -ax.set_title("S-PROG (with post-processing)\n +15 minutes") - -ax = fig.add_subplot(325) -plot_precip_field(forecast_anvil[-1]) -plot_growth_decay_circles(ax) -ax.set_title("ANVIL +15 minutes") - -plt.show() - -############################################################################### -# Remarks -# ------- -# -# The extrapolation nowcast is static, i.e. it does not predict any growth or -# decay. While S-PROG is to some extent able to predict growth and decay, this -# this comes with loss of small-scale features. In addition, statistical -# post-processing needs to be applied to correct the bias and incorrect wet-area -# ratio introduced by the autoregressive process. ANVIL is able to do both: -# predict growth and decay and preserve the small-scale structure in a way that -# post-processing is not necessary. diff --git a/examples/data_transformations.py b/examples/data_transformations.py deleted file mode 100644 index 24962ec6f..000000000 --- a/examples/data_transformations.py +++ /dev/null @@ -1,215 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Data transformations -==================== - -The statistics of intermittent precipitation rates are particularly non-Gaussian -and display an asymmetric distribution bounded at zero. -Such properties restrict the usage of well-established statistical methods that -assume symmetric or Gaussian data. - -A common workaround is to introduce a suitable data transformation to approximate -a normal distribution. - -In this example, we test the data transformation methods available in pysteps -in order to obtain a more symmetric distribution of the precipitation data -(excluding the zeros). -The currently available transformations include the Box-Cox, dB, square-root and -normal quantile transforms. - -""" - -from datetime import datetime -import matplotlib.pyplot as plt -import numpy as np -from pysteps import io, rcparams -from pysteps.utils import conversion, transformation -from scipy.stats import skew - -############################################################################### -# Read the radar input images -# --------------------------- -# -# First, we will import the sequence of radar composites. -# You need the pysteps-data archive downloaded and the pystepsrc file -# configured with the data_source paths pointing to data folders. - -# Selected case -date = datetime.strptime("201609281600", "%Y%m%d%H%M") -data_source = rcparams.data_sources["fmi"] - - -############################################################################### -# Load the data from the archive -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -root_path = data_source["root_path"] -path_fmt = data_source["path_fmt"] -fn_pattern = data_source["fn_pattern"] -fn_ext = data_source["fn_ext"] -importer_name = data_source["importer"] -importer_kwargs = data_source["importer_kwargs"] -timestep = data_source["timestep"] - -# Get 1 hour of observations in the data archive -fns = io.archive.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep, num_next_files=11 -) - -# Read the radar composites -importer = io.get_method(importer_name, "importer") -Z, _, metadata = io.read_timeseries(fns, importer, **importer_kwargs) - -# Keep only positive rainfall values -Z = Z[Z > metadata["zerovalue"]].flatten() - -# Convert to rain rate -R, metadata = conversion.to_rainrate(Z, metadata) - -############################################################################### -# Test data transformations -# ------------------------- - -# Define method to visualize the data distribution with boxplots and plot the -# corresponding skewness -def plot_distribution(data, labels, skw): - - N = len(data) - fig, ax1 = plt.subplots() - ax2 = ax1.twinx() - - ax2.plot(np.arange(N + 2), np.zeros(N + 2), ":r") - ax1.boxplot(data, labels=labels, sym="", medianprops={"color": "k"}) - - ymax = [] - for i in range(N): - y = skw[i] - x = i + 1 - ax2.plot(x, y, "*r", ms=10, markeredgecolor="k") - ymax.append(np.max(data[i])) - - # ylims - ylims = np.percentile(ymax, 50) - ax1.set_ylim((-1 * ylims, ylims)) - ylims = np.max(np.abs(skw)) - ax2.set_ylim((-1.1 * ylims, 1.1 * ylims)) - - # labels - ax1.set_ylabel(r"Standardized values [$\sigma$]") - ax2.set_ylabel(r"Skewness []", color="r") - ax2.tick_params(axis="y", labelcolor="r") - - -############################################################################### -# Box-Cox transform -# ~~~~~~~~~~~~~~~~~ -# The Box-Cox transform is a well-known power transformation introduced by -# `Box and Cox (1964)`_. In its one-parameter version, the Box-Cox transform -# takes the form T(x) = ln(x) for lambda = 0, or T(x) = (x**lambda - 1)/lambda -# otherwise. -# -# To find a suitable lambda, we will experiment with a range of values -# and select the one that produces the most symmetric distribution, i.e., the -# lambda associated with a value of skewness closest to zero. -# To visually compare the results, the transformed data are standardized. -# -# .. _`Box and Cox (1964)`: https://doi.org/10.1111/j.2517-6161.1964.tb00553.x - -data = [] -labels = [] -skw = [] - -# Test a range of values for the transformation parameter Lambda -Lambdas = np.linspace(-0.4, 0.4, 11) -for i, Lambda in enumerate(Lambdas): - R_, _ = transformation.boxcox_transform(R, metadata, Lambda) - R_ = (R_ - np.mean(R_)) / np.std(R_) - data.append(R_) - labels.append("{0:.2f}".format(Lambda)) - skw.append(skew(R_)) # skewness - -# Plot the transformed data distribution as a function of lambda -plot_distribution(data, labels, skw) -plt.title("Box-Cox transform") -plt.tight_layout() -plt.show() - -# Best lambda -idx_best = np.argmin(np.abs(skw)) -Lambda = Lambdas[idx_best] - -print("Best parameter lambda: %.2f\n(skewness = %.2f)" % (Lambda, skw[idx_best])) - -############################################################################### -# Compare data transformations -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -data = [] -labels = [] -skw = [] - -############################################################################### -# Rain rates -# ~~~~~~~~~~ -# First, let's have a look at the original rain rate values. - -data.append((R - np.mean(R)) / np.std(R)) -labels.append("R") -skw.append(skew(R)) - -############################################################################### -# dB transform -# ~~~~~~~~~~~~ -# We transform the rainfall data into dB units: 10*log(R) - -R_, _ = transformation.dB_transform(R, metadata) -data.append((R_ - np.mean(R_)) / np.std(R_)) -labels.append("dB") -skw.append(skew(R_)) - -############################################################################### -# Square-root transform -# ~~~~~~~~~~~~~~~~~~~~~ -# Transform the data using the square-root: sqrt(R) - -R_, _ = transformation.sqrt_transform(R, metadata) -data.append((R_ - np.mean(R_)) / np.std(R_)) -labels.append("sqrt") -skw.append(skew(R_)) - -############################################################################### -# Box-Cox transform -# ~~~~~~~~~~~~~~~~~ -# We now apply the Box-Cox transform using the best parameter lambda found above. - -R_, _ = transformation.boxcox_transform(R, metadata, Lambda) -data.append((R_ - np.mean(R_)) / np.std(R_)) -labels.append("Box-Cox\n($\lambda=$%.2f)" % Lambda) -skw.append(skew(R_)) - -############################################################################### -# Normal quantile transform -# ~~~~~~~~~~~~~~~~~~~~~~~~~ -# At last, we apply the empirical normal quantile (NQ) transform as described in -# `Bogner et al (2012)`_. -# -# .. _`Bogner et al (2012)`: http://dx.doi.org/10.5194/hess-16-1085-2012 - -R_, _ = transformation.NQ_transform(R, metadata) -data.append((R_ - np.mean(R_)) / np.std(R_)) -labels.append("NQ") -skw.append(skew(R_)) - -############################################################################### -# By plotting all the results, we can notice first of all the strongly asymmetric -# distribution of the original data (R) and that all transformations manage to -# reduce its skewness. Among these, the Box-Cox transform (using the best parameter -# lambda) and the normal quantile (NQ) transform provide the best correction. -# Despite not producing a perfectly symmetric distribution, the square-root (sqrt) -# transform has the strong advantage of being defined for zeros, too, while all -# other transformations need an arbitrary rule for non-positive values. - -plot_distribution(data, labels, skw) -plt.title("Data transforms") -plt.tight_layout() -plt.show() diff --git a/examples/linda_nowcasts.py b/examples/linda_nowcasts.py deleted file mode 100644 index 991bade6c..000000000 --- a/examples/linda_nowcasts.py +++ /dev/null @@ -1,185 +0,0 @@ -#!/bin/env python -""" -LINDA nowcasts -============== - -This example shows how to compute and plot a deterministic and ensemble LINDA -nowcasts using Swiss radar data. - -""" - -from datetime import datetime -import warnings - -warnings.simplefilter("ignore") - -import matplotlib.pyplot as plt - -from pysteps import io, rcparams -from pysteps.motion.lucaskanade import dense_lucaskanade -from pysteps.nowcasts import linda, sprog, steps -from pysteps.utils import conversion, dimension, transformation -from pysteps.visualization import plot_precip_field - -############################################################################### -# Read the input rain rate fields -# ------------------------------- - -date = datetime.strptime("201701311200", "%Y%m%d%H%M") -data_source = "mch" - -# Read the data source information from rcparams -datasource_params = rcparams.data_sources[data_source] - -# Find the radar files in the archive -fns = io.find_by_date( - date, - datasource_params["root_path"], - datasource_params["path_fmt"], - datasource_params["fn_pattern"], - datasource_params["fn_ext"], - datasource_params["timestep"], - num_prev_files=2, -) - -# Read the data from the archive -importer = io.get_method(datasource_params["importer"], "importer") -reflectivity, _, metadata = io.read_timeseries( - fns, importer, **datasource_params["importer_kwargs"] -) - -# Convert reflectivity to rain rate -rainrate, metadata = conversion.to_rainrate(reflectivity, metadata) - -# Upscale data to 2 km to reduce computation time -rainrate, metadata = dimension.aggregate_fields_space(rainrate, metadata, 2000) - -# Plot the most recent rain rate field -plt.figure() -plot_precip_field(rainrate[-1, :, :]) -plt.show() - -############################################################################### -# Estimate the advection field -# ---------------------------- - -# The advection field is estimated using the Lucas-Kanade optical flow -advection = dense_lucaskanade(rainrate, verbose=True) - -############################################################################### -# Deterministic nowcast -# --------------------- - -# Compute 30-minute LINDA nowcast with 8 parallel workers -# Restrict the number of features to 15 to reduce computation time -nowcast_linda = linda.forecast( - rainrate, - advection, - 6, - max_num_features=15, - add_perturbations=False, - num_workers=8, - measure_time=True, -)[0] - -# Compute S-PROG nowcast for comparison -rainrate_db, _ = transformation.dB_transform( - rainrate, metadata, threshold=0.1, zerovalue=-15.0 -) -nowcast_sprog = sprog.forecast( - rainrate_db[-3:, :, :], - advection, - 6, - n_cascade_levels=6, - R_thr=-10.0, -) - -# Convert reflectivity nowcast to rain rate -nowcast_sprog = transformation.dB_transform( - nowcast_sprog, threshold=-10.0, inverse=True -)[0] - -# Plot the nowcasts -fig = plt.figure(figsize=(9, 4)) -ax = fig.add_subplot(1, 2, 1) -plot_precip_field( - nowcast_linda[-1, :, :], - title="LINDA (+ 30 min)", -) - -ax = fig.add_subplot(1, 2, 2) -plot_precip_field( - nowcast_sprog[-1, :, :], - title="S-PROG (+ 30 min)", -) - -plt.show() - -############################################################################### -# The above figure shows that the filtering scheme implemented in LINDA preserves -# small-scale and band-shaped features better than S-PROG. This is because the -# former uses a localized elliptical convolution kernel instead of the -# cascade-based autoregressive process, where the parameters are estimated over -# the whole domain. - -############################################################################### -# Probabilistic nowcast -# --------------------- - -# Compute 30-minute LINDA nowcast ensemble with 40 members and 8 parallel workers -nowcast_linda = linda.forecast( - rainrate, - advection, - 6, - max_num_features=15, - add_perturbations=True, - num_ens_members=40, - num_workers=8, - measure_time=True, -)[0] - -# Compute 40-member STEPS nowcast for comparison -nowcast_steps = steps.forecast( - rainrate_db[-3:, :, :], - advection, - 6, - 40, - n_cascade_levels=6, - R_thr=-10.0, - mask_method="incremental", - kmperpixel=2.0, - timestep=datasource_params["timestep"], - vel_pert_method=None, -) - -# Convert reflectivity nowcast to rain rate -nowcast_steps = transformation.dB_transform( - nowcast_steps, threshold=-10.0, inverse=True -)[0] - -# Plot two ensemble members of both nowcasts -fig = plt.figure() -for i in range(2): - ax = fig.add_subplot(2, 2, i + 1) - ax = plot_precip_field( - nowcast_linda[i, -1, :, :], geodata=metadata, colorbar=False, axis="off" - ) - ax.set_title(f"LINDA Member {i+1}") - -for i in range(2): - ax = fig.add_subplot(2, 2, 3 + i) - ax = plot_precip_field( - nowcast_steps[i, -1, :, :], geodata=metadata, colorbar=False, axis="off" - ) - ax.set_title(f"STEPS Member {i+1}") - -############################################################################### -# The above figure shows the main difference between LINDA and STEPS. In -# addition to the convolution kernel, another improvement in LINDA is a -# localized perturbation generator using the short-space Fourier transform -# (SSFT) and a spatially variable marginal distribution. As a result, the -# LINDA ensemble members preserve the anisotropic and small-scale structures -# considerably better than STEPS. - -plt.tight_layout() -plt.show() diff --git a/examples/my_first_nowcast.ipynb b/examples/my_first_nowcast.ipynb deleted file mode 100644 index 3113c4977..000000000 --- a/examples/my_first_nowcast.ipynb +++ /dev/null @@ -1,832 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "L_dntwSQBnbK" - }, - "source": [ - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/pySTEPS/pysteps/blob/master/examples/my_first_nowcast.ipynb)\n", - "\n", - "# My first precipitation nowcast\n", - "\n", - "In this example, we will use pysteps to compute and plot an extrapolation nowcast using the NSSL's Multi-Radar/Multi-Sensor System\n", - "([MRMS](https://www.nssl.noaa.gov/projects/mrms/)) rain rate product.\n", - "\n", - "The MRMS precipitation product is available every 2 minutes, over the contiguous US. \n", - "Each precipitation composite has 3500 x 7000 grid points, separated 1 km from each other.\n", - "\n", - "## Set-up Colab environment\n", - "\n", - "**Important**: In colab, execute this section one cell at a time. Trying to excecute all the cells at once may results in cells being skipped and some dependencies not being installed.\n", - "\n", - "First, let's set up our working environment. Note that these steps are only needed to work with google colab. \n", - "\n", - "To install pysteps locally, you can follow [these instructions](https://pysteps.readthedocs.io/en/latest/user_guide/install_pysteps.html).\n", - "\n", - "First, let's install the latest Pysteps version from the Python Package Index (PyPI) using pip. This will also install the minimal dependencies needed to run pysteps. \n", - "\n", - "#### Install optional dependencies\n", - "\n", - "Now, let's install the optional dependendies that will allow us to plot and read the example data.\n", - "- pygrib: to read the MRMS data grib format\n", - "- pyproj: needed by pygrib\n", - "\n", - "**NOTE:** Do not import pysteps in this notebook until the following optional dependencies are loaded. Otherwise, pysteps will assume that they are not installed and some of its functionalities won't work." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "mFx4hq_DBtp-" - }, - "outputs": [], - "source": [ - "# These libraries are needed for the pygrib library in Colab. \n", - "# Note that is needed if you install pygrib using pip.\n", - "# If you use conda, the libraries will be installed automatically.\n", - "! apt-get install libeccodes-dev libproj-dev\n", - "\n", - "# Install the python packages\n", - "! pip install pyproj\n", - "! pip install pygrib" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "6BF2paxnTuGB" - }, - "outputs": [], - "source": [ - "# Uninstall existing shapely\n", - "# We will re-install shapely in the next step by ignoring the binary\n", - "# wheels to make it compatible with other modules that depend on \n", - "# GEOS, such as Cartopy (used here).\n", - "!pip uninstall --yes shapely" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "7x8Hx_4hE_BU" - }, - "outputs": [], - "source": [ - "# To install cartopy in Colab using pip, we need to install the library \n", - "# dependencies first.\n", - "\n", - "!apt-get install -qq libgdal-dev libgeos-dev\n", - "!pip install shapely --no-binary shapely\n", - "!pip install cartopy" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "ybD55ZJhmdYa" - }, - "source": [ - "#### Install pysteps\n", - "\n", - "Now that all dependencies are installed, we can install pysteps." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "VA7zp3nRmhfF" - }, - "outputs": [], - "source": [ - "# ! pip install git+https://github.com/pySTEPS/pysteps\n", - "! pip install pysteps" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "-AkfR6JSBujn" - }, - "source": [ - "## Getting the example data\n", - "\n", - "Now that we have the environment ready, let's install the example data and configure the pysteps's default parameters by following [this tutorial](https://pysteps.readthedocs.io/en/latest/user_guide/example_data.html).\n", - "\n", - "First, we will use the [pysteps.datasets.download_pysteps_data()](https://pysteps.readthedocs.io/en/latest/generated/pysteps.datasets.download_pysteps_data.html) function to download the data.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "vri-R_ZVGihj" - }, - "outputs": [], - "source": [ - "# Import the helper functions\n", - "from pysteps.datasets import download_pysteps_data, create_default_pystepsrc\n", - "\n", - "# Download the pysteps data in the \"pysteps_data\"\n", - "download_pysteps_data(\"pysteps_data\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "wdKfjliCKXhx" - }, - "source": [ - "Next, we need to create a default configuration file that points to the downloaded data. \n", - "By default, pysteps will place the configuration file in `$HOME/.pysteps` (unix and Mac OS X) or `$USERPROFILE/pysteps` (windows).\n", - "To quickly create a configuration file, we will use the [pysteps.datasets.create_default_pystepsrc()](https://pysteps.readthedocs.io/en/latest/generated/pysteps.datasets.create_default_pystepsrc.html#pysteps.datasets.create_default_pystepsrc) helper function." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "pGdKHa36H5JX" - }, - "outputs": [], - "source": [ - "# If the configuration file is placed in one of the default locations \n", - "# (https://pysteps.readthedocs.io/en/latest/user_guide/set_pystepsrc.html#configuration-file-lookup) \n", - "# it will be loaded automatically when pysteps is imported. \n", - "config_file_path = create_default_pystepsrc(\"pysteps_data\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "DAFUJgR5K1CS" - }, - "source": [ - "Since pysteps was already initialized in this notebook, we need to load the new configuration file and update the default configuration." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "tMIbQLPAK42h" - }, - "outputs": [], - "source": [ - "# Import pysteps and load the new configuration file\n", - "import pysteps\n", - "_ = pysteps.load_config_file(config_file_path, verbose=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "SzSqp1DFJ0M9" - }, - "source": [ - "Let's see what the default parameters look like (these are stored in the\n", - "[pystepsrc file](https://pysteps.readthedocs.io/en/latest/user_guide/set_pystepsrc.html)). We will be using them to load the MRMS data set." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "6Gr65nH4BnbP" - }, - "outputs": [], - "source": [ - "# The default parameters are stored in pysteps.rcparams.\n", - "from pprint import pprint\n", - "pprint(pysteps.rcparams.data_sources['mrms'])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "9M_buv7WBnbf" - }, - "source": [ - "This should have printed the following lines:\n", - "\n", - "- `fn_ext`: 'grib2' -- The file extension\n", - "- `fn_pattern`: 'PrecipRate_00.00_%Y%m%d-%H%M%S' -- The file naming convention of the MRMS data.\n", - "- `importer`: 'mrms_grib' -- The name of the importer for the MRMS data.\n", - "- `importer_kwargs`: {} -- Extra options provided to the importer. None in this example.\n", - "- `path_fmt`: '%Y/%m/%d' -- The folder structure in which the files are stored. Here, year/month/day/filename.\n", - "- `root_path`: '/content/pysteps_data/mrms' -- The root path of the MRMS-data.\n", - "- `timestep`: 2 -- The temporal interval of the (radar) rainfall data\n", - "\n", - "Note that the default `timestep` parameter is 2 minutes, which corresponds to the time interval at which the MRMS product is available.\n", - "\n", - "## Load the MRMS example data\n", - "\n", - "Now that we have installed the example data, let's import the example MRMS dataset using the [load_dataset()](https://pysteps.readthedocs.io/en/latest/generated/pysteps.datasets.load_dataset.html) helper function from the `pysteps.datasets` module.\n", - "\n", - "We import 1 hour and 10 minutes of data, which corresponds to a sequence of 35 frames of 2-D precipitation composites.\n", - "Note that importing the data takes approximately 30 seconds." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "-8Q4e58VBnbl" - }, - "outputs": [], - "source": [ - "from pysteps.datasets import load_dataset\n", - "\n", - "# We'll import the time module to measure the time the importer needed\n", - "import time\n", - "\n", - "start_time = time.time()\n", - "\n", - "# Import the data\n", - "precipitation, metadata, timestep = load_dataset('mrms',frames=35) # precipitation in mm/h\n", - "\n", - "end_time = time.time()\n", - "\n", - "print(\"Precipitation data imported\")\n", - "print(\"Importing the data took \", (end_time - start_time), \" seconds\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "btiTxYYMBnby" - }, - "source": [ - "Let's have a look at the values returned by the `load_dataset()` function. \n", - "\n", - "- `precipitation`: A numpy array with (time, latitude, longitude) dimensions.\n", - "- `metadata`: A dictionary with additional information (pixel sizes, map projections, etc.).\n", - "- `timestep`: Time separation between each sample (in minutes)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "WqUHbJ_qBnb3" - }, - "outputs": [], - "source": [ - "# Let's inspect the shape of the imported data array\n", - "precipitation.shape" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xa8woT0ABncD" - }, - "source": [ - "Note that the shape of the precipitation is 4 times smaller than the raw MRMS data (3500 x 7000).\n", - "The `load_dataset()` function uses the default parameters from `importers` to read the data. By default, the MRMS importer upscales the data 4x. That is, from ~1km resolution to ~4km. It also uses single precision to reduce the memory requirements.\n", - "Thanks to the upscaling, the memory footprint of this example dataset is ~200Mb instead of the 3.1Gb of the raw (3500 x 7000) data. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "22O2YXrfBncG" - }, - "outputs": [], - "source": [ - "timestep # In minutes" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "J8_4hwcXBncT" - }, - "outputs": [], - "source": [ - "pprint(metadata)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "uQREORtJBnch" - }, - "source": [ - "# Time to make a nowcast\n", - "\n", - "So far, we have 1 hour and 10 minutes of precipitation images, separated 2 minutes from each other.\n", - "But, how do we use that data to run a precipitation forecast? \n", - "\n", - "A simple way is by extrapolating the precipitation field, assuming it will continue to move as observed in the recent past, and without changes in intensity. This is commonly known as *Lagrangian persistence*.\n", - "\n", - "The first step to run our nowcast based on Lagrangian persistence, is the estimation of the motion field from a sequence of past precipitation observations.\n", - "We use the Lucas-Kanade (LK) optical flow method implemented in pysteps.\n", - "This method follows a local tracking approach that relies on the OpenCV package.\n", - "Local features are tracked in a sequence of two or more radar images.\n", - "The scheme includes a final interpolation step to produce a smooth field of motion vectors.\n", - "Other optical flow methods are also available in pysteps. \n", - "Check the full list [here](https://pysteps.readthedocs.io/en/latest/pysteps_reference/motion.html).\n", - "\n", - "Now let's use the first 5 precipitation images (10 min) to estimate the motion field of the radar pattern and the remaining 30 images (1h) to evaluate the quality of our forecast." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "jcb2Sf6xBnck" - }, - "outputs": [], - "source": [ - "# precipitation[0:5] -> Used to find motion (past data). Let's call it training precip.\n", - "train_precip = precipitation[0:5]\n", - "\n", - "# precipitation[5:] -> Used to evaluate forecasts (future data, not available in \"real\" forecast situation)\n", - "# Let's call it observed precipitation because we will use it to compare our forecast with the actual observations.\n", - "observed_precip = precipitation[3:]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xt1TbB0RBncu" - }, - "source": [ - "Let's see what this 'training' precipitation event looks like using the [pysteps.visualization.plot_precip_field](https://pysteps.readthedocs.io/en/latest/generated/pysteps.visualization.precipfields.plot_precip_field.html) function." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "bmNYLo1jBncw" - }, - "outputs": [], - "source": [ - "from matplotlib import pyplot as plt\n", - "from pysteps.visualization import plot_precip_field\n", - "\n", - "# Set a figure size that looks nice ;)\n", - "plt.figure(figsize=(9, 5), dpi=100)\n", - "\n", - "# Plot the last rainfall field in the \"training\" data.\n", - "# train_precip[-1] -> Last available composite for nowcasting.\n", - "plot_precip_field(train_precip[-1], geodata=metadata, axis=\"off\")\n", - "plt.show() # (This line is actually not needed if you are using jupyter notebooks)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "NVRfJm11Bnc7" - }, - "source": [ - "Did you note the **shaded grey** regions? Those are the regions were no valid observations where available to estimate the precipitation (e.g., due to ground clutter, no radar coverage, or radar beam blockage).\n", - "Those regions need to be handled with care when we run our nowcast.\n", - "\n", - "### Data exploration\n", - "\n", - "Before we produce a forecast, let's explore the precipitation data. In particular, let's see how the distribution of the rain rate values looks." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "WER6RttPBnc9" - }, - "outputs": [], - "source": [ - "import numpy as np\n", - "\n", - "# Let's define some plotting default parameters for the next plots\n", - "# Note: This is not strictly needed.\n", - "plt.rc('figure', figsize=(4,4))\n", - "plt.rc('figure', dpi=100)\n", - "plt.rc('font', size=14) # controls default text sizes\n", - "plt.rc('axes', titlesize=14) # fontsize of the axes title\n", - "plt.rc('axes', labelsize=14) # fontsize of the x and y labels\n", - "plt.rc('xtick', labelsize=14) # fontsize of the tick labels\n", - "plt.rc('ytick', labelsize=14) # fontsize of the tick labels\n", - "\n", - "# Let's use the last available composite for nowcasting from the \"training\" data (train_precip[-1])\n", - "# Also, we will discard any invalid value.\n", - "valid_precip_values = train_precip[-1][~np.isnan(train_precip[-1])]\n", - "\n", - "# Plot the histogram\n", - "bins= np.concatenate( ([-0.01,0.01], np.linspace(1,40,39)))\n", - "plt.hist(valid_precip_values,bins=bins,log=True, edgecolor='black')\n", - "plt.autoscale(tight=True, axis='x')\n", - "plt.xlabel(\"Rainfall intensity [mm/h]\")\n", - "plt.ylabel(\"Counts\")\n", - "plt.title('Precipitation rain rate histogram in mm/h units')\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "O6TvIXS3BndH" - }, - "source": [ - "The histogram shows that rain rate values have a non-Gaussian and asymmetric distribution that is bounded at zero. Also, the probability of occurrence decays extremely fast with increasing rain rate values (note the logarithmic y-axis).\n", - "\n", - "\n", - "For better performance of the motion estimation algorithms, we can convert the rain rate values (in mm/h) to a more log-normal distribution of rain rates by applying the following logarithmic transformation:\n", - "\n", - "\\begin{equation}\n", - "R\\rightarrow\n", - "\\begin{cases}\n", - " 10\\log_{10}R, & \\text{if } R\\geq 0.1\\text{mm h$^{-1}$} \\\\\n", - " -15, & \\text{otherwise}\n", - "\\end{cases}\n", - "\\end{equation}\n", - "\n", - "The transformed precipitation corresponds to logarithmic rain rates in units of dBR. The value of −15 dBR is equivalent to assigning a rain rate of approximately 0.03 mm h$^{−1}$ to the zeros. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "hgA4PeapBndK" - }, - "outputs": [], - "source": [ - "from pysteps.utils import transformation\n", - "\n", - "# Log-transform the data to dBR. \n", - "# The threshold of 0.1 mm/h sets the fill value to -15 dBR.\n", - "train_precip_dbr, metadata_dbr = transformation.dB_transform(train_precip, metadata, \n", - " threshold=0.1, \n", - " zerovalue=-15.0)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Nx3VESBlBndU" - }, - "source": [ - "Let's inspect the resulting **transformed precipitation** distribution." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "rYS5hBIGBndX" - }, - "outputs": [], - "source": [ - "# Only use the valid data!\n", - "valid_precip_dbr = train_precip_dbr[-1][~np.isnan(train_precip_dbr[-1])]\n", - "\n", - "plt.figure(figsize=(4, 4), dpi=100)\n", - "\n", - "# Plot the histogram\n", - "counts, bins, _ = plt.hist(valid_precip_dbr, bins=40, log=True, edgecolor=\"black\")\n", - "plt.autoscale(tight=True, axis=\"x\")\n", - "plt.xlabel(\"Rainfall intensity [dB]\")\n", - "plt.ylabel(\"Counts\")\n", - "plt.title(\"Precipitation rain rate histogram in dB units\")\n", - "\n", - "# Let's add a lognormal distribution that fits that data to the plot.\n", - "import scipy\n", - "\n", - "bin_center = (bins[1:] + bins[:-1]) * 0.5\n", - "bin_width = np.diff(bins)\n", - "\n", - "# We will only use one composite to fit the function to speed up things.\n", - "# First, remove the no precip areas.\"\n", - "precip_to_fit = valid_precip_dbr[valid_precip_dbr > -15] \n", - "\n", - "fit_params = scipy.stats.lognorm.fit(precip_to_fit)\n", - "\n", - "fitted_pdf = scipy.stats.lognorm.pdf(bin_center, *fit_params)\n", - "\n", - "# Multiply pdf by the bin width and the total number of grid points: pdf -> total counts per bin.\n", - "fitted_pdf = fitted_pdf * bin_width * precip_to_fit.size\n", - "\n", - "# Plot the log-normal fit\n", - "plt.plot(bin_center, fitted_pdf, label=\"Fitted log-normal\")\n", - "plt.legend()\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "ZocO5zqUBndg" - }, - "source": [ - "That looks more like a log-normal distribution. Note the large peak at -15dB. That peak corresponds to \"zero\" (below threshold) precipitation. The jump with no data in between -15 and -10 dB is caused by the precision of the data, which we had set to 1 decimal. Hence, the lowest precipitation intensities (above zero) are 0.1 mm/h (= -10 dB).\n", - "\n", - "## Compute the nowcast\n", - "\n", - "These are the minimal steps to compute a short-term forecast using Lagrangian extrapolation of the precipitation patterns:\n", - " \n", - " 1. Estimate the precipitation motion field.\n", - " 1. Use the motion field to advect the most recent radar rainfall field and produce an extrapolation forecast.\n", - "\n", - "### Estimate the motion field\n", - "\n", - "Now we can estimate the motion field. Here we use a local feature-tracking approach (Lucas-Kanade).\n", - "However, check the other methods available in the [pysteps.motion](https://pysteps.readthedocs.io/en/latest/pysteps_reference/motion.html) module." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "mnACmX_0Bndi" - }, - "outputs": [], - "source": [ - "# Estimate the motion field with Lucas-Kanade\n", - "from pysteps import motion\n", - "from pysteps.visualization import plot_precip_field, quiver\n", - "\n", - "# Import the Lucas-Kanade optical flow algorithm\n", - "oflow_method = motion.get_method(\"LK\")\n", - "\n", - "# Estimate the motion field from the training data (in dBR)\n", - "motion_field = oflow_method(train_precip_dbr)\n", - "\n", - "## Plot the motion field.\n", - "# Use a figure size that looks nice ;)\n", - "plt.figure(figsize=(9, 5), dpi=100)\n", - "plt.title(\"Estimated motion field with the Lukas-Kanade algorithm\")\n", - "\n", - "# Plot the last rainfall field in the \"training\" data.\n", - "# Remember to use the mm/h precipitation data since plot_precip_field assumes \n", - "# mm/h by default. You can change this behavior using the \"units\" keyword.\n", - "plot_precip_field(train_precip[-1], geodata=metadata, axis=\"off\")\n", - "\n", - "# Plot the motion field vectors\n", - "quiver(motion_field, geodata=metadata, step=40)\n", - "\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "YObddRFCBnd1" - }, - "source": [ - "### Extrapolate the observations\n", - "\n", - "We have all ingredients to make an extrapolation nowcast now. \n", - "The final step is to advect the most recent radar rainfall field along the estimated motion field, producing an extrapolation forecast." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "erSLAzvNBnd3" - }, - "outputs": [], - "source": [ - "from pysteps import nowcasts\n", - "\n", - "start = time.time()\n", - "\n", - "# Extrapolate the last radar observation\n", - "extrapolate = nowcasts.get_method(\"extrapolation\")\n", - "\n", - "# You can use the precipitation observations directly in mm/h for this step.\n", - "last_observation = train_precip[-1]\n", - "\n", - "last_observation[~np.isfinite(last_observation)] = metadata[\"zerovalue\"]\n", - "\n", - "# We set the number of leadtimes (the length of the forecast horizon) to the \n", - "# length of the observed/verification preipitation data. In this way, we'll get\n", - "# a forecast that covers these time intervals.\n", - "n_leadtimes = observed_precip.shape[0]\n", - "\n", - "# Advect the most recent radar rainfall field and make the nowcast.\n", - "precip_forecast = extrapolate(train_precip[-1], motion_field, n_leadtimes)\n", - "\n", - "# This shows the shape of the resulting array with [time intervals, rows, cols]\n", - "print(\"The shape of the resulting array is: \", precip_forecast.shape)\n", - "\n", - "end = time.time()\n", - "print(\"Advecting the radar rainfall fields took \", (end - start), \" seconds\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "csy5s-yRBneB" - }, - "source": [ - "Let's inspect the last forecast time (hence this is the forecast rainfall an hour ahead)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "MUiS5-HPBneD" - }, - "outputs": [], - "source": [ - "# Plot precipitation at the end of the forecast period.\n", - "plt.figure(figsize=(9, 5), dpi=100)\n", - "plot_precip_field(precip_forecast[-1], geodata=metadata, axis=\"off\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "mQEseXvhBneI" - }, - "source": [ - "## Evaluate the forecast quality\n", - "\n", - "Many verification methods are already present in pysteps (see a complete list [here](https://pysteps.readthedocs.io/en/latest/pysteps_reference/verification.html)). We just have to import them. \n", - "\n", - "Here, we will evaluate our forecast using the Fractions Skill Score (FSS). \n", - "This metric provides an intuitive assessment of the dependency of forecast skill on spatial scale and intensity. This makes the FSS an ideal skill score for high-resolution precipitation forecasts.\n", - "\n", - "More precisely, the FSS is a neighborhood spatial verification method that directly compares the fractional coverage of events in windows surrounding the observations and forecasts.\n", - "The FSS varies from 0 (total mismatch) to 1 (perfect forecast).\n", - "For most situations, an FSS value of > 0.5 serves as a good indicator of a useful forecast ([Roberts and Lean, 2008](https://journals.ametsoc.org/doi/full/10.1175/2007MWR2123.1) and [Skok and Roberts, 2016](https://rmets.onlinelibrary.wiley.com/doi/full/10.1002/qj.2849)). " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "No3qBjqSBneK" - }, - "outputs": [], - "source": [ - "from pysteps import verification\n", - "\n", - "fss = verification.get_method(\"FSS\")\n", - "\n", - "# Compute fractions skill score (FSS) for all lead times for different scales using a 1 mm/h detection threshold.\n", - "scales = [\n", - " 2,\n", - " 4,\n", - " 8,\n", - " 16,\n", - " 32,\n", - " 64,\n", - "] # In grid points.\n", - "\n", - "scales_in_km = np.array(scales)*4\n", - "\n", - "# Set the threshold\n", - "thr = 1.0 # in mm/h\n", - "\n", - "score = []\n", - "\n", - "# Calculate the FSS for every lead time and all predefined scales.\n", - "for i in range(n_leadtimes):\n", - " score_ = []\n", - " for scale in scales:\n", - " score_.append(\n", - " fss(precip_forecast[i, :, :], observed_precip[i, :, :], thr, scale)\n", - " )\n", - " score.append(score_)\n", - "\n", - "# Now plot it\n", - "plt.figure()\n", - "x = np.arange(1, n_leadtimes+1) * timestep\n", - "plt.plot(x, score, lw=2.0)\n", - "plt.xlabel(\"Lead time [min]\")\n", - "plt.ylabel(\"FSS ( > 1.0 mm/h ) \")\n", - "plt.title(\"Fractions Skill Score\")\n", - "plt.legend(\n", - " scales_in_km, \n", - " title=\"Scale [km]\",\n", - " loc=\"center left\",\n", - " bbox_to_anchor=(1.01, 0.5),\n", - " bbox_transform=plt.gca().transAxes,\n", - ")\n", - "plt.autoscale(axis=\"x\", tight=True)\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As you can see, the FSS decreases with increasing lead time.\n", - "This is expected, as the forecasting quality slowly decreases when we forecast further ahead.\n", - "Upscaling the forecast, however, clearly leads to higher skill (up to longer ahead) compared to the forecast on the highest resolutions.\n", - "\n", - "## Concluding remarks\n", - "Congratulations, you have successfully made your first nowcast using the pysteps library!\n", - "This was a simple extrapolation-based nowcast and a lot more advanced options are possible too, see [the pysteps examples gallery](https://pysteps.readthedocs.io/en/latest/auto_examples/index.html) for some nice examples." - ] - } - ], - "metadata": { - "colab": { - "collapsed_sections": [], - "name": "my_first_nowcast.ipynb", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.2" - }, - "pycharm": { - "stem_cell": { - "cell_type": "raw", - "metadata": { - "collapsed": false - }, - "source": [] - } - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} \ No newline at end of file diff --git a/examples/optical_flow_methods_convergence.py b/examples/optical_flow_methods_convergence.py deleted file mode 100644 index 8dd7522fc..000000000 --- a/examples/optical_flow_methods_convergence.py +++ /dev/null @@ -1,375 +0,0 @@ -# coding: utf-8 - -""" -Optical flow methods convergence -================================ - -In this example we test the convergence of the optical flow methods available in -pySTEPS using idealized motion fields. - -To test the convergence, using an example precipitation field we will: - -- Read precipitation field from a file -- Morph the precipitation field using a given motion field (linear or rotor) to - generate a sequence of moving precipitation patterns. -- Using the available optical flow methods, retrieve the motion field from the - precipitation time sequence (synthetic precipitation observations). - -Let's first load the libraries that we will use. -""" -from datetime import datetime -import time - -import matplotlib.pyplot as plt -import numpy as np -from matplotlib.cm import get_cmap -from scipy.ndimage import uniform_filter - -import pysteps as stp -from pysteps import motion, io, rcparams -from pysteps.motion.vet import morph -from pysteps.visualization import plot_precip_field, quiver - -################################################################################ -# Load the reference precipitation data -# ------------------------------------- -# -# First, we will import a radar composite from the archive. -# You need the pysteps-data archive downloaded and the pystepsrc file -# configured with the data_source paths pointing to data folders. - - -# Selected case -date = datetime.strptime("201505151630", "%Y%m%d%H%M") -data_source = rcparams.data_sources["mch"] - -############################################################################### -# Load the data from the archive -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -root_path = data_source["root_path"] -path_fmt = data_source["path_fmt"] -fn_pattern = data_source["fn_pattern"] -fn_ext = data_source["fn_ext"] -importer_name = data_source["importer"] -importer_kwargs = data_source["importer_kwargs"] - -# Find the reference field in the archive -fns = io.archive.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_prev_files=0 -) - -# Read the reference radar composite -importer = io.get_method(importer_name, "importer") -reference_field, quality, metadata = io.read_timeseries( - fns, importer, **importer_kwargs -) - -del quality # Not used - -reference_field = np.squeeze(reference_field) # Remove time dimension - -############################################################################### -# Preprocess the data -# ~~~~~~~~~~~~~~~~~~~ - -# Convert to mm/h -reference_field, metadata = stp.utils.to_rainrate(reference_field, metadata) - -# Mask invalid values -reference_field = np.ma.masked_invalid(reference_field) - -# Plot the reference precipitation -plot_precip_field(reference_field, title="Reference field") -plt.show() - -# Log-transform the data [dBR] -reference_field, metadata = stp.utils.dB_transform( - reference_field, metadata, threshold=0.1, zerovalue=-15.0 -) - -print("Precip. pattern shape: " + str(reference_field.shape)) - -# This suppress nan conversion warnings in plot functions -reference_field.data[reference_field.mask] = np.nan - - -################################################################################ -# Synthetic precipitation observations -# ------------------------------------ -# -# Now we need to create a series of precipitation fields by applying the ideal -# motion field to the reference precipitation field "n" times. -# -# To evaluate the accuracy of the computed_motion vectors, we will use -# a relative RMSE measure. -# Relative MSE = <(expected_motion - computed_motion)^2> / - -# Relative RMSE = Rel_RMSE = sqrt(Relative MSE) -# -# - Rel_RMSE = 0%: no error -# - Rel_RMSE = 100%: The retrieved motion field has an average error equal in -# magnitude to the motion field. -# -# Relative RMSE is computed over a region surrounding the precipitation -# field, were there is enough information to retrieve the motion field. -# The "precipitation region" includes the precipitation pattern plus a margin of -# approximately 20 grid points. - -################################################################################ -# Let's create a function to construct different motion fields. -def create_motion_field(input_precip, motion_type): - """ - Create idealized motion fields to be applied to the reference image. - - Parameters - ---------- - - input_precip: numpy array (lat, lon) - - motion_type: str - The supported motion fields are: - - - linear_x: (u=2, v=0) - - linear_y: (u=0, v=2) - - rotor: rotor field - - Returns - ------- - ideal_motion : numpy array (u, v) - """ - - # Create an imaginary grid on the image and create a motion field to be - # applied to the image. - ny, nx = input_precip.shape - - x_pos = np.arange(nx) - y_pos = np.arange(ny) - x, y = np.meshgrid(x_pos, y_pos, indexing="ij") - - ideal_motion = np.zeros((2, nx, ny)) - - if motion_type == "linear_x": - ideal_motion[0, :] = 2 # Motion along x - elif motion_type == "linear_y": - ideal_motion[1, :] = 2 # Motion along y - elif motion_type == "rotor": - x_mean = x.mean() - y_mean = y.mean() - norm = np.sqrt(x * x + y * y) - mask = norm != 0 - ideal_motion[0, mask] = 2 * (y - y_mean)[mask] / norm[mask] - ideal_motion[1, mask] = -2 * (x - x_mean)[mask] / norm[mask] - else: - raise ValueError("motion_type not supported.") - - # We need to swap the axes because the optical flow methods expect - # (lat, lon) or (y,x) indexing convention. - ideal_motion = ideal_motion.swapaxes(1, 2) - return ideal_motion - - -################################################################################ -# Let's create another function that construct the temporal series of -# precipitation observations. -def create_observations(input_precip, motion_type, num_times=9): - """ - Create synthetic precipitation observations by displacing the input field - using an ideal motion field. - - Parameters - ---------- - - input_precip: numpy array (lat, lon) - Input precipitation field. - - motion_type: str - The supported motion fields are: - - - linear_x: (u=2, v=0) - - linear_y: (u=0, v=2) - - rotor: rotor field - - num_times: int, optional - Length of the observations sequence. - - - Returns - ------- - synthetic_observations: numpy array - Sequence of observations - """ - - ideal_motion = create_motion_field(input_precip, motion_type) - - # The morph function expects (lon, lat) or (x, y) dimensions. - # Hence, we need to swap the lat,lon axes. - - # NOTE: The motion field passed to the morph function can't have any NaNs. - # Otherwise, it can result in a segmentation fault. - morphed_field, mask = morph( - input_precip.swapaxes(0, 1), ideal_motion.swapaxes(1, 2) - ) - - mask = np.array(mask, dtype=bool) - - synthetic_observations = np.ma.MaskedArray(morphed_field, mask=mask) - synthetic_observations = synthetic_observations[np.newaxis, :] - - for t in range(1, num_times): - morphed_field, mask = morph( - synthetic_observations[t - 1], ideal_motion.swapaxes(1, 2) - ) - mask = np.array(mask, dtype=bool) - - morphed_field = np.ma.MaskedArray( - morphed_field[np.newaxis, :], mask=mask[np.newaxis, :] - ) - - synthetic_observations = np.ma.concatenate( - [synthetic_observations, morphed_field], axis=0 - ) - - # Swap back to (lat, lon) - synthetic_observations = synthetic_observations.swapaxes(1, 2) - - synthetic_observations = np.ma.masked_invalid(synthetic_observations) - - synthetic_observations.data[np.ma.getmaskarray(synthetic_observations)] = 0 - - return ideal_motion, synthetic_observations - - -def plot_optflow_method_convergence(input_precip, optflow_method_name, motion_type): - """ - Test the convergence to the actual solution of the optical flow method used. - - Parameters - ---------- - - input_precip: numpy array (lat, lon) - Input precipitation field. - - optflow_method_name: str - Optical flow method name - - motion_type: str - The supported motion fields are: - - - linear_x: (u=2, v=0) - - linear_y: (u=0, v=2) - - rotor: rotor field - """ - - if optflow_method_name.lower() != "darts": - num_times = 2 - else: - num_times = 9 - - ideal_motion, precip_obs = create_observations( - input_precip, motion_type, num_times=num_times - ) - - oflow_method = motion.get_method(optflow_method_name) - - elapsed_time = time.perf_counter() - - computed_motion = oflow_method(precip_obs, verbose=False) - - print( - f"{optflow_method_name} computation time: " - f"{(time.perf_counter() - elapsed_time):.1f} [s]" - ) - - precip_obs, _ = stp.utils.dB_transform(precip_obs, inverse=True) - - precip_data = precip_obs.max(axis=0) - precip_data.data[precip_data.mask] = 0 - - precip_mask = (uniform_filter(precip_data, size=20) > 0.1) & ~precip_obs.mask.any( - axis=0 - ) - - cmap = get_cmap("jet").copy() - cmap.set_under("grey", alpha=0.25) - cmap.set_over("none") - - # Compare retrieved motion field with the ideal one - plt.figure(figsize=(9, 4)) - plt.subplot(1, 2, 1) - ax = plot_precip_field(precip_obs[0], title="Reference motion") - quiver(ideal_motion, step=25, ax=ax) - - plt.subplot(1, 2, 2) - ax = plot_precip_field(precip_obs[0], title="Retrieved motion") - quiver(computed_motion, step=25, ax=ax) - - # To evaluate the accuracy of the computed_motion vectors, we will use - # a relative RMSE measure. - # Relative MSE = < (expected_motion - computed_motion)^2 > / - # Relative RMSE = sqrt(Relative MSE) - - mse = ((ideal_motion - computed_motion)[:, precip_mask] ** 2).mean() - - rel_mse = mse / (ideal_motion[:, precip_mask] ** 2).mean() - plt.suptitle( - f"{optflow_method_name} " f"Relative RMSE: {np.sqrt(rel_mse) * 100:.2f}%" - ) - plt.show() - - -################################################################################ -# Lucas-Kanade -# ------------ -# -# Constant motion x-direction -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -plot_optflow_method_convergence(reference_field, "LucasKanade", "linear_x") - -################################################################################ -# Constant motion y-direction -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -plot_optflow_method_convergence(reference_field, "LucasKanade", "linear_y") - -################################################################################ -# Rotational motion -# ~~~~~~~~~~~~~~~~~ -plot_optflow_method_convergence(reference_field, "LucasKanade", "rotor") - -################################################################################ -# Variational Echo Tracking (VET) -# ------------------------------- -# -# Constant motion x-direction -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -plot_optflow_method_convergence(reference_field, "VET", "linear_x") - -################################################################################ -# Constant motion y-direction -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -plot_optflow_method_convergence(reference_field, "VET", "linear_y") - -################################################################################ -# Rotational motion -# ~~~~~~~~~~~~~~~~~ -plot_optflow_method_convergence(reference_field, "VET", "rotor") - -################################################################################ -# DARTS -# ----- -# -# Constant motion x-direction -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -plot_optflow_method_convergence(reference_field, "DARTS", "linear_x") - -################################################################################ -# Constant motion y-direction -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -plot_optflow_method_convergence(reference_field, "DARTS", "linear_y") - -################################################################################ -# Rotational motion -# ~~~~~~~~~~~~~~~~~ -plot_optflow_method_convergence(reference_field, "DARTS", "rotor") - -# sphinx_gallery_thumbnail_number = 5 diff --git a/examples/plot_cascade_decomposition.py b/examples/plot_cascade_decomposition.py deleted file mode 100644 index 2149716d0..000000000 --- a/examples/plot_cascade_decomposition.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/bin/env python -""" -Cascade decomposition -===================== - -This example script shows how to compute and plot the cascade decompositon of -a single radar precipitation field in pysteps. - -""" - -from matplotlib import cm, pyplot as plt -import numpy as np -import os -from pprint import pprint -from pysteps.cascade.bandpass_filters import filter_gaussian -from pysteps import io, rcparams -from pysteps.cascade.decomposition import decomposition_fft -from pysteps.utils import conversion, transformation -from pysteps.visualization import plot_precip_field - -############################################################################### -# Read precipitation field -# ------------------------ -# -# First thing, the radar composite is imported and transformed in units -# of dB. - -# Import the example radar composite -root_path = rcparams.data_sources["fmi"]["root_path"] -filename = os.path.join( - root_path, "20160928", "201609281600_fmi.radar.composite.lowest_FIN_SUOMI1.pgm.gz" -) -R, _, metadata = io.import_fmi_pgm(filename, gzipped=True) - -# Convert to rain rate -R, metadata = conversion.to_rainrate(R, metadata) - -# Nicely print the metadata -pprint(metadata) - -# Plot the rainfall field -plot_precip_field(R, geodata=metadata) -plt.show() - -# Log-transform the data -R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0) - -############################################################################### -# 2D Fourier spectrum -# -------------------- -# -# Compute and plot the 2D Fourier power spectrum of the precipitaton field. - -# Set Nans as the fill value -R[~np.isfinite(R)] = metadata["zerovalue"] - -# Compute the Fourier transform of the input field -F = abs(np.fft.fftshift(np.fft.fft2(R))) - -# Plot the power spectrum -M, N = F.shape -fig, ax = plt.subplots() -im = ax.imshow( - np.log(F ** 2), vmin=4, vmax=24, cmap=cm.jet, extent=(-N / 2, N / 2, -M / 2, M / 2) -) -cb = fig.colorbar(im) -ax.set_xlabel("Wavenumber $k_x$") -ax.set_ylabel("Wavenumber $k_y$") -ax.set_title("Log-power spectrum of R") -plt.show() - -############################################################################### -# Cascade decomposition -# --------------------- -# -# First, construct a set of Gaussian bandpass filters and plot the corresponding -# 1D filters. - -num_cascade_levels = 7 - -# Construct the Gaussian bandpass filters -filter = filter_gaussian(R.shape, num_cascade_levels) - -# Plot the bandpass filter weights -L = max(N, M) -fig, ax = plt.subplots() -for k in range(num_cascade_levels): - ax.semilogx( - np.linspace(0, L / 2, len(filter["weights_1d"][k, :])), - filter["weights_1d"][k, :], - "k-", - base=pow(0.5 * L / 3, 1.0 / (num_cascade_levels - 2)), - ) -ax.set_xlim(1, L / 2) -ax.set_ylim(0, 1) -xt = np.hstack([[1.0], filter["central_wavenumbers"][1:]]) -ax.set_xticks(xt) -ax.set_xticklabels(["%.2f" % cf for cf in filter["central_wavenumbers"]]) -ax.set_xlabel("Radial wavenumber $|\mathbf{k}|$") -ax.set_ylabel("Normalized weight") -ax.set_title("Bandpass filter weights") -plt.show() - -############################################################################### -# Finally, apply the 2D Gaussian filters to decompose the radar rainfall field -# into a set of cascade levels of decreasing spatial scale and plot them. - -decomp = decomposition_fft(R, filter, compute_stats=True) - -# Plot the normalized cascade levels -for i in range(num_cascade_levels): - mu = decomp["means"][i] - sigma = decomp["stds"][i] - decomp["cascade_levels"][i] = (decomp["cascade_levels"][i] - mu) / sigma - -fig, ax = plt.subplots(nrows=2, ncols=4) - -ax[0, 0].imshow(R, cmap=cm.RdBu_r, vmin=-5, vmax=5) -ax[0, 1].imshow(decomp["cascade_levels"][0], cmap=cm.RdBu_r, vmin=-3, vmax=3) -ax[0, 2].imshow(decomp["cascade_levels"][1], cmap=cm.RdBu_r, vmin=-3, vmax=3) -ax[0, 3].imshow(decomp["cascade_levels"][2], cmap=cm.RdBu_r, vmin=-3, vmax=3) -ax[1, 0].imshow(decomp["cascade_levels"][3], cmap=cm.RdBu_r, vmin=-3, vmax=3) -ax[1, 1].imshow(decomp["cascade_levels"][4], cmap=cm.RdBu_r, vmin=-3, vmax=3) -ax[1, 2].imshow(decomp["cascade_levels"][5], cmap=cm.RdBu_r, vmin=-3, vmax=3) -ax[1, 3].imshow(decomp["cascade_levels"][6], cmap=cm.RdBu_r, vmin=-3, vmax=3) - -ax[0, 0].set_title("Observed") -ax[0, 1].set_title("Level 1") -ax[0, 2].set_title("Level 2") -ax[0, 3].set_title("Level 3") -ax[1, 0].set_title("Level 4") -ax[1, 1].set_title("Level 5") -ax[1, 2].set_title("Level 6") -ax[1, 3].set_title("Level 7") - -for i in range(2): - for j in range(4): - ax[i, j].set_xticks([]) - ax[i, j].set_yticks([]) -plt.tight_layout() -plt.show() - -# sphinx_gallery_thumbnail_number = 4 diff --git a/examples/plot_ensemble_verification.py b/examples/plot_ensemble_verification.py deleted file mode 100644 index 54d2500c8..000000000 --- a/examples/plot_ensemble_verification.py +++ /dev/null @@ -1,191 +0,0 @@ -#!/bin/env python -""" -Ensemble verification -===================== - -In this tutorial we perform a verification of a probabilistic extrapolation nowcast -using MeteoSwiss radar data. - -""" - -from datetime import datetime -import matplotlib.pyplot as plt -import numpy as np -from pprint import pprint -from pysteps import io, nowcasts, rcparams, verification -from pysteps.motion.lucaskanade import dense_lucaskanade -from pysteps.postprocessing import ensemblestats -from pysteps.utils import conversion, dimension, transformation -from pysteps.visualization import plot_precip_field - - -############################################################################### -# Read precipitation field -# ------------------------ -# -# First, we will import the sequence of MeteoSwiss ("mch") radar composites. -# You need the pysteps-data archive downloaded and the pystepsrc file -# configured with the data_source paths pointing to data folders. - -# Selected case -date = datetime.strptime("201607112100", "%Y%m%d%H%M") -data_source = rcparams.data_sources["mch"] -n_ens_members = 20 -n_leadtimes = 6 -seed = 24 - -############################################################################### -# Load the data from the archive -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# -# The data are upscaled to 2 km resolution to limit the memory usage and thus -# be able to afford a larger number of ensemble members. - -root_path = data_source["root_path"] -path_fmt = data_source["path_fmt"] -fn_pattern = data_source["fn_pattern"] -fn_ext = data_source["fn_ext"] -importer_name = data_source["importer"] -importer_kwargs = data_source["importer_kwargs"] -timestep = data_source["timestep"] - -# Find the radar files in the archive -fns = io.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep, num_prev_files=2 -) - -# Read the data from the archive -importer = io.get_method(importer_name, "importer") -R, _, metadata = io.read_timeseries(fns, importer, **importer_kwargs) - -# Convert to rain rate -R, metadata = conversion.to_rainrate(R, metadata) - -# Upscale data to 2 km -R, metadata = dimension.aggregate_fields_space(R, metadata, 2000) - -# Plot the rainfall field -plot_precip_field(R[-1, :, :], geodata=metadata) -plt.show() - -# Log-transform the data to unit of dBR, set the threshold to 0.1 mm/h, -# set the fill value to -15 dBR -R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0) - -# Set missing values with the fill value -R[~np.isfinite(R)] = -15.0 - -# Nicely print the metadata -pprint(metadata) - -############################################################################### -# Forecast -# -------- -# -# We use the STEPS approach to produce a ensemble nowcast of precipitation fields. - -# Estimate the motion field -V = dense_lucaskanade(R) - -# Perform the ensemble nowcast with STEPS -nowcast_method = nowcasts.get_method("steps") -R_f = nowcast_method( - R[-3:, :, :], - V, - n_leadtimes, - n_ens_members, - n_cascade_levels=6, - R_thr=-10.0, - kmperpixel=2.0, - timestep=timestep, - decomp_method="fft", - bandpass_filter_method="gaussian", - noise_method="nonparametric", - vel_pert_method="bps", - mask_method="incremental", - seed=seed, -) - -# Back-transform to rain rates -R_f = transformation.dB_transform(R_f, threshold=-10.0, inverse=True)[0] - -# Plot some of the realizations -fig = plt.figure() -for i in range(4): - ax = fig.add_subplot(221 + i) - ax.set_title("Member %02d" % i) - plot_precip_field(R_f[i, -1, :, :], geodata=metadata, colorbar=False, axis="off") -plt.tight_layout() -plt.show() - -############################################################################### -# Verification -# ------------ -# -# Pysteps includes a number of verification metrics to help users to analyze -# the general characteristics of the nowcasts in terms of consistency and -# quality (or goodness). -# Here, we will verify our probabilistic forecasts using the ROC curve, -# reliability diagrams, and rank histograms, as implemented in the verification -# module of pysteps. - -# Find the files containing the verifying observations -fns = io.archive.find_by_date( - date, - root_path, - path_fmt, - fn_pattern, - fn_ext, - timestep, - 0, - num_next_files=n_leadtimes, -) - -# Read the observations -R_o, _, metadata_o = io.read_timeseries(fns, importer, **importer_kwargs) - -# Convert to mm/h -R_o, metadata_o = conversion.to_rainrate(R_o, metadata_o) - -# Upscale data to 2 km -R_o, metadata_o = dimension.aggregate_fields_space(R_o, metadata_o, 2000) - -# Compute the verification for the last lead time - -# compute the exceedance probability of 0.1 mm/h from the ensemble -P_f = ensemblestats.excprob(R_f[:, -1, :, :], 0.1, ignore_nan=True) - -############################################################################### -# ROC curve -# ~~~~~~~~~ - -roc = verification.ROC_curve_init(0.1, n_prob_thrs=10) -verification.ROC_curve_accum(roc, P_f, R_o[-1, :, :]) -fig, ax = plt.subplots() -verification.plot_ROC(roc, ax, opt_prob_thr=True) -ax.set_title("ROC curve (+%i min)" % (n_leadtimes * timestep)) -plt.show() - -############################################################################### -# Reliability diagram -# ~~~~~~~~~~~~~~~~~~~ - -reldiag = verification.reldiag_init(0.1) -verification.reldiag_accum(reldiag, P_f, R_o[-1, :, :]) -fig, ax = plt.subplots() -verification.plot_reldiag(reldiag, ax) -ax.set_title("Reliability diagram (+%i min)" % (n_leadtimes * timestep)) -plt.show() - -############################################################################### -# Rank histogram -# ~~~~~~~~~~~~~~ - -rankhist = verification.rankhist_init(R_f.shape[0], 0.1) -verification.rankhist_accum(rankhist, R_f[:, -1, :, :], R_o[-1, :, :]) -fig, ax = plt.subplots() -verification.plot_rankhist(rankhist, ax) -ax.set_title("Rank histogram (+%i min)" % (n_leadtimes * timestep)) -plt.show() - -# sphinx_gallery_thumbnail_number = 5 diff --git a/examples/plot_extrapolation_nowcast.py b/examples/plot_extrapolation_nowcast.py deleted file mode 100644 index 203a5909d..000000000 --- a/examples/plot_extrapolation_nowcast.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/env python -""" -Extrapolation nowcast -===================== - -This tutorial shows how to compute and plot an extrapolation nowcast using -Finnish radar data. - -""" - -from datetime import datetime -import matplotlib.pyplot as plt -import numpy as np -from pprint import pprint -from pysteps import io, motion, nowcasts, rcparams, verification -from pysteps.utils import conversion, transformation -from pysteps.visualization import plot_precip_field, quiver - -############################################################################### -# Read the radar input images -# --------------------------- -# -# First, we will import the sequence of radar composites. -# You need the pysteps-data archive downloaded and the pystepsrc file -# configured with the data_source paths pointing to data folders. - -# Selected case -date = datetime.strptime("201609281600", "%Y%m%d%H%M") -data_source = rcparams.data_sources["fmi"] -n_leadtimes = 12 - -############################################################################### -# Load the data from the archive -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -root_path = data_source["root_path"] -path_fmt = data_source["path_fmt"] -fn_pattern = data_source["fn_pattern"] -fn_ext = data_source["fn_ext"] -importer_name = data_source["importer"] -importer_kwargs = data_source["importer_kwargs"] -timestep = data_source["timestep"] - -# Find the input files from the archive -fns = io.archive.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep, num_prev_files=2 -) - -# Read the radar composites -importer = io.get_method(importer_name, "importer") -Z, _, metadata = io.read_timeseries(fns, importer, **importer_kwargs) - -# Convert to rain rate -R, metadata = conversion.to_rainrate(Z, metadata) - -# Plot the rainfall field -plot_precip_field(R[-1, :, :], geodata=metadata) -plt.show() - -# Store the last frame for plotting it later later -R_ = R[-1, :, :].copy() - -# Log-transform the data to unit of dBR, set the threshold to 0.1 mm/h, -# set the fill value to -15 dBR -R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0) - -# Nicely print the metadata -pprint(metadata) - -############################################################################### -# Compute the nowcast -# ------------------- -# -# The extrapolation nowcast is based on the estimation of the motion field, -# which is here performed using a local tracking approach (Lucas-Kanade). -# The most recent radar rainfall field is then simply advected along this motion -# field in oder to produce an extrapolation forecast. - -# Estimate the motion field with Lucas-Kanade -oflow_method = motion.get_method("LK") -V = oflow_method(R[-3:, :, :]) - -# Extrapolate the last radar observation -extrapolate = nowcasts.get_method("extrapolation") -R[~np.isfinite(R)] = metadata["zerovalue"] -R_f = extrapolate(R[-1, :, :], V, n_leadtimes) - -# Back-transform to rain rate -R_f = transformation.dB_transform(R_f, threshold=-10.0, inverse=True)[0] - -# Plot the motion field -plot_precip_field(R_, geodata=metadata) -quiver(V, geodata=metadata, step=50) -plt.show() - -############################################################################### -# Verify with FSS -# --------------- -# -# The fractions skill score (FSS) provides an intuitive assessment of the -# dependency of skill on spatial scale and intensity, which makes it an ideal -# skill score for high-resolution precipitation forecasts. - -# Find observations in the data archive -fns = io.archive.find_by_date( - date, - root_path, - path_fmt, - fn_pattern, - fn_ext, - timestep, - num_prev_files=0, - num_next_files=n_leadtimes, -) -# Read the radar composites -R_o, _, metadata_o = io.read_timeseries(fns, importer, **importer_kwargs) -R_o, metadata_o = conversion.to_rainrate(R_o, metadata_o, 223.0, 1.53) - -# Compute fractions skill score (FSS) for all lead times, a set of scales and 1 mm/h -fss = verification.get_method("FSS") -scales = [2, 4, 8, 16, 32, 64, 128, 256, 512] -thr = 1.0 -score = [] -for i in range(n_leadtimes): - score_ = [] - for scale in scales: - score_.append(fss(R_f[i, :, :], R_o[i + 1, :, :], thr, scale)) - score.append(score_) - -plt.figure() -x = np.arange(1, n_leadtimes + 1) * timestep -plt.plot(x, score) -plt.legend(scales, title="Scale [km]") -plt.xlabel("Lead time [min]") -plt.ylabel("FSS ( > 1.0 mm/h ) ") -plt.title("Fractions skill score") -plt.show() - -# sphinx_gallery_thumbnail_number = 3 diff --git a/examples/plot_noise_generators.py b/examples/plot_noise_generators.py deleted file mode 100644 index dd392ca4f..000000000 --- a/examples/plot_noise_generators.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/bin/env python -""" -Generation of stochastic noise -============================== - -This example script shows how to run the stochastic noise field generators -included in pysteps. - -These noise fields are used as perturbation terms during an extrapolation -nowcast in order to represent the uncertainty in the evolution of the rainfall -field. -""" - -from matplotlib import cm, pyplot as plt -import numpy as np -import os -from pprint import pprint -from pysteps import io, rcparams -from pysteps.noise.fftgenerators import initialize_param_2d_fft_filter -from pysteps.noise.fftgenerators import initialize_nonparam_2d_fft_filter -from pysteps.noise.fftgenerators import generate_noise_2d_fft_filter -from pysteps.utils import conversion, rapsd, transformation -from pysteps.visualization import plot_precip_field, plot_spectrum1d - -############################################################################### -# Read precipitation field -# ------------------------ -# -# First thing, the radar composite is imported and transformed in units -# of dB. -# This image will be used to train the Fourier filters that are necessary to -# produce the fields of spatially correlated noise. - -# Import the example radar composite -root_path = rcparams.data_sources["mch"]["root_path"] -filename = os.path.join(root_path, "20160711", "AQC161932100V_00005.801.gif") -R, _, metadata = io.import_mch_gif(filename, product="AQC", unit="mm", accutime=5.0) - -# Convert to mm/h -R, metadata = conversion.to_rainrate(R, metadata) - -# Nicely print the metadata -pprint(metadata) - -# Plot the rainfall field -plot_precip_field(R, geodata=metadata) -plt.show() - -# Log-transform the data -R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0) - -# Assign the fill value to all the Nans -R[~np.isfinite(R)] = metadata["zerovalue"] - -############################################################################### -# Parametric filter -# ----------------- -# -# In the parametric approach, a power-law model is used to approximate the power -# spectral density (PSD) of a given rainfall field. -# -# The parametric model uses a piece-wise linear function with two spectral -# slopes (beta1 and beta2) and one breaking point - -# Fit the parametric PSD to the observation -Fp = initialize_param_2d_fft_filter(R) - -# Compute the observed and fitted 1D PSD -L = np.max(Fp["input_shape"]) -if L % 2 == 1: - wn = np.arange(0, int(L / 2) + 1) -else: - wn = np.arange(0, int(L / 2)) -R_, freq = rapsd(R, fft_method=np.fft, return_freq=True) -f = np.exp(Fp["model"](np.log(wn), *Fp["pars"])) - -# Extract the scaling break in km, beta1 and beta2 -w0 = L / np.exp(Fp["pars"][0]) -b1 = Fp["pars"][2] -b2 = Fp["pars"][3] - -# Plot the observed power spectrum and the model -fig, ax = plt.subplots() -plot_scales = [512, 256, 128, 64, 32, 16, 8, 4] -plot_spectrum1d( - freq, - R_, - x_units="km", - y_units="dBR", - color="k", - ax=ax, - label="Observed", - wavelength_ticks=plot_scales, -) -plot_spectrum1d( - freq, - f, - x_units="km", - y_units="dBR", - color="r", - ax=ax, - label="Fit", - wavelength_ticks=plot_scales, -) -plt.legend() -ax.set_title( - "Radially averaged log-power spectrum of R\n" - r"$\omega_0=%.0f km, \beta_1=%.1f, \beta_2=%.1f$" % (w0, b1, b2) -) -plt.show() - -############################################################################### -# Nonparametric filter -# -------------------- -# -# In the nonparametric approach, the Fourier filter is obtained directly -# from the power spectrum of the observed precipitation field R. - -Fnp = initialize_nonparam_2d_fft_filter(R) - -############################################################################### -# Noise generator -# --------------- -# -# The parametric and nonparametric filters obtained above can now be used -# to produce N realizations of random fields of prescribed power spectrum, -# hence with the same correlation structure as the initial rainfall field. - -seed = 42 -num_realizations = 3 - -# Generate noise -Np = [] -Nnp = [] -for k in range(num_realizations): - Np.append(generate_noise_2d_fft_filter(Fp, seed=seed + k)) - Nnp.append(generate_noise_2d_fft_filter(Fnp, seed=seed + k)) - -# Plot the generated noise fields - -fig, ax = plt.subplots(nrows=2, ncols=3) - -# parametric noise -ax[0, 0].imshow(Np[0], cmap=cm.RdBu_r, vmin=-3, vmax=3) -ax[0, 1].imshow(Np[1], cmap=cm.RdBu_r, vmin=-3, vmax=3) -ax[0, 2].imshow(Np[2], cmap=cm.RdBu_r, vmin=-3, vmax=3) - -# nonparametric noise -ax[1, 0].imshow(Nnp[0], cmap=cm.RdBu_r, vmin=-3, vmax=3) -ax[1, 1].imshow(Nnp[1], cmap=cm.RdBu_r, vmin=-3, vmax=3) -ax[1, 2].imshow(Nnp[2], cmap=cm.RdBu_r, vmin=-3, vmax=3) - -for i in range(2): - for j in range(3): - ax[i, j].set_xticks([]) - ax[i, j].set_yticks([]) -plt.tight_layout() -plt.show() - -############################################################################### -# The above figure highlights the main limitation of the parametric approach -# (top row), that is, the assumption of an isotropic power law scaling -# relationship, meaning that anisotropic structures such as rainfall bands -# cannot be represented. -# -# Instead, the nonparametric approach (bottom row) allows generating -# perturbation fields with anisotropic structures, but it also requires a -# larger sample size and is sensitive to the quality of the input data, e.g. -# the presence of residual clutter in the radar image. -# -# In addition, both techniques assume spatial stationarity of the covariance -# structure of the field. - -# sphinx_gallery_thumbnail_number = 3 diff --git a/examples/plot_optical_flow.py b/examples/plot_optical_flow.py deleted file mode 100644 index e1bc6035b..000000000 --- a/examples/plot_optical_flow.py +++ /dev/null @@ -1,144 +0,0 @@ -""" -Optical flow -============ - -This tutorial offers a short overview of the optical flow routines available in -pysteps and it will cover how to compute and plot the motion field from a -sequence of radar images. -""" - -from datetime import datetime -from pprint import pprint -import matplotlib.pyplot as plt -import numpy as np - -from pysteps import io, motion, rcparams -from pysteps.utils import conversion, transformation -from pysteps.visualization import plot_precip_field, quiver - -################################################################################ -# Read the radar input images -# --------------------------- -# -# First, we will import the sequence of radar composites. -# You need the pysteps-data archive downloaded and the pystepsrc file -# configured with the data_source paths pointing to data folders. - -# Selected case -date = datetime.strptime("201505151630", "%Y%m%d%H%M") -data_source = rcparams.data_sources["mch"] - -############################################################################### -# Load the data from the archive -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -root_path = data_source["root_path"] -path_fmt = data_source["path_fmt"] -fn_pattern = data_source["fn_pattern"] -fn_ext = data_source["fn_ext"] -importer_name = data_source["importer"] -importer_kwargs = data_source["importer_kwargs"] -timestep = data_source["timestep"] - -# Find the input files from the archive -fns = io.archive.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_prev_files=9 -) - -# Read the radar composites -importer = io.get_method(importer_name, "importer") -R, quality, metadata = io.read_timeseries(fns, importer, **importer_kwargs) - -del quality # Not used - -############################################################################### -# Preprocess the data -# ~~~~~~~~~~~~~~~~~~~ - -# Convert to mm/h -R, metadata = conversion.to_rainrate(R, metadata) - -# Store the reference frame -R_ = R[-1, :, :].copy() - -# Log-transform the data [dBR] -R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0) - -# Nicely print the metadata -pprint(metadata) - -################################################################################ -# Lucas-Kanade (LK) -# ----------------- -# -# The Lucas-Kanade optical flow method implemented in pysteps is a local -# tracking approach that relies on the OpenCV package. -# Local features are tracked in a sequence of two or more radar images. The -# scheme includes a final interpolation step in order to produce a smooth -# field of motion vectors. - -oflow_method = motion.get_method("LK") -V1 = oflow_method(R[-3:, :, :]) - -# Plot the motion field on top of the reference frame -plot_precip_field(R_, geodata=metadata, title="LK") -quiver(V1, geodata=metadata, step=25) -plt.show() - -################################################################################ -# Variational echo tracking (VET) -# ------------------------------- -# -# This module implements the VET algorithm presented -# by Laroche and Zawadzki (1995) and used in the McGill Algorithm for -# Prediction by Lagrangian Extrapolation (MAPLE) described in -# Germann and Zawadzki (2002). -# The approach essentially consists of a global optimization routine that seeks -# at minimizing a cost function between the displaced and the reference image. - -oflow_method = motion.get_method("VET") -V2 = oflow_method(R[-3:, :, :]) - -# Plot the motion field -plot_precip_field(R_, geodata=metadata, title="VET") -quiver(V2, geodata=metadata, step=25) -plt.show() - -################################################################################ -# Dynamic and adaptive radar tracking of storms (DARTS) -# ----------------------------------------------------- -# -# DARTS uses a spectral approach to optical flow that is based on the discrete -# Fourier transform (DFT) of a temporal sequence of radar fields. -# The level of truncation of the DFT coefficients controls the degree of -# smoothness of the estimated motion field, allowing for an efficient -# motion estimation. DARTS requires a longer sequence of radar fields for -# estimating the motion, here we are going to use all the available 10 fields. - -oflow_method = motion.get_method("DARTS") -R[~np.isfinite(R)] = metadata["zerovalue"] -V3 = oflow_method(R) # needs longer training sequence - -# Plot the motion field -plot_precip_field(R_, geodata=metadata, title="DARTS") -quiver(V3, geodata=metadata, step=25) -plt.show() - -################################################################################ -# Anisotropic diffusion method (Proesmans et al 1994) -# --------------------------------------------------- -# -# This module implements the anisotropic diffusion method presented in Proesmans -# et al. (1994), a robust optical flow technique which employs the notion of -# inconsitency during the solution of the optical flow equations. - -oflow_method = motion.get_method("proesmans") -R[~np.isfinite(R)] = metadata["zerovalue"] -V4 = oflow_method(R[-2:, :, :]) - -# Plot the motion field -plot_precip_field(R_, geodata=metadata, title="Proesmans") -quiver(V4, geodata=metadata, step=25) -plt.show() - -# sphinx_gallery_thumbnail_number = 1 diff --git a/examples/plot_steps_nowcast.py b/examples/plot_steps_nowcast.py deleted file mode 100644 index 43b01a6b4..000000000 --- a/examples/plot_steps_nowcast.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/bin/env python -""" -STEPS nowcast -============= - -This tutorial shows how to compute and plot an ensemble nowcast using Swiss -radar data. - -""" - -import matplotlib.pyplot as plt -import numpy as np - -from datetime import datetime -from pprint import pprint -from pysteps import io, nowcasts, rcparams -from pysteps.motion.lucaskanade import dense_lucaskanade -from pysteps.postprocessing.ensemblestats import excprob -from pysteps.utils import conversion, dimension, transformation -from pysteps.visualization import plot_precip_field - -# Set nowcast parameters -n_ens_members = 20 -n_leadtimes = 6 -seed = 24 - -############################################################################### -# Read precipitation field -# ------------------------ -# -# First thing, the sequence of Swiss radar composites is imported, converted and -# transformed into units of dBR. - - -date = datetime.strptime("201701311200", "%Y%m%d%H%M") -data_source = "mch" - -# Load data source config -root_path = rcparams.data_sources[data_source]["root_path"] -path_fmt = rcparams.data_sources[data_source]["path_fmt"] -fn_pattern = rcparams.data_sources[data_source]["fn_pattern"] -fn_ext = rcparams.data_sources[data_source]["fn_ext"] -importer_name = rcparams.data_sources[data_source]["importer"] -importer_kwargs = rcparams.data_sources[data_source]["importer_kwargs"] -timestep = rcparams.data_sources[data_source]["timestep"] - -# Find the radar files in the archive -fns = io.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep, num_prev_files=2 -) - -# Read the data from the archive -importer = io.get_method(importer_name, "importer") -R, _, metadata = io.read_timeseries(fns, importer, **importer_kwargs) - -# Convert to rain rate -R, metadata = conversion.to_rainrate(R, metadata) - -# Upscale data to 2 km to limit memory usage -R, metadata = dimension.aggregate_fields_space(R, metadata, 2000) - -# Plot the rainfall field -plot_precip_field(R[-1, :, :], geodata=metadata) -plt.show() - -# Log-transform the data to unit of dBR, set the threshold to 0.1 mm/h, -# set the fill value to -15 dBR -R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0) - -# Set missing values with the fill value -R[~np.isfinite(R)] = -15.0 - -# Nicely print the metadata -pprint(metadata) - -############################################################################### -# Deterministic nowcast with S-PROG -# --------------------------------- -# -# First, the motiong field is estimated using a local tracking approach based -# on the Lucas-Kanade optical flow. -# The motion field can then be used to generate a deterministic nowcast with -# the S-PROG model, which implements a scale filtering appraoch in order to -# progressively remove the unpredictable spatial scales during the forecast. - -# Estimate the motion field -V = dense_lucaskanade(R) - -# The S-PROG nowcast -nowcast_method = nowcasts.get_method("sprog") -R_f = nowcast_method( - R[-3:, :, :], - V, - n_leadtimes, - n_cascade_levels=6, - R_thr=-10.0, -) - -# Back-transform to rain rate -R_f = transformation.dB_transform(R_f, threshold=-10.0, inverse=True)[0] - -# Plot the S-PROG forecast -plot_precip_field( - R_f[-1, :, :], - geodata=metadata, - title="S-PROG (+ %i min)" % (n_leadtimes * timestep), -) -plt.show() - -############################################################################### -# As we can see from the figure above, the forecast produced by S-PROG is a -# smooth field. In other words, the forecast variance is lower than the -# variance of the original observed field. -# However, certain applications demand that the forecast retain the same -# statistical properties of the observations. In such cases, the S-PROG -# forecasts are of limited use and a stochatic approach might be of more -# interest. - -############################################################################### -# Stochastic nowcast with STEPS -# ----------------------------- -# -# The S-PROG approach is extended to include a stochastic term which represents -# the variance associated to the unpredictable development of precipitation. This -# approach is known as STEPS (short-term ensemble prediction system). - -# The STEPS nowcast -nowcast_method = nowcasts.get_method("steps") -R_f = nowcast_method( - R[-3:, :, :], - V, - n_leadtimes, - n_ens_members, - n_cascade_levels=6, - R_thr=-10.0, - kmperpixel=2.0, - timestep=timestep, - noise_method="nonparametric", - vel_pert_method="bps", - mask_method="incremental", - seed=seed, -) - -# Back-transform to rain rates -R_f = transformation.dB_transform(R_f, threshold=-10.0, inverse=True)[0] - - -# Plot the ensemble mean -R_f_mean = np.mean(R_f[:, -1, :, :], axis=0) -plot_precip_field( - R_f_mean, - geodata=metadata, - title="Ensemble mean (+ %i min)" % (n_leadtimes * timestep), -) -plt.show() - -############################################################################### -# The mean of the ensemble displays similar properties as the S-PROG -# forecast seen above, although the degree of smoothing also depends on -# the ensemble size. In this sense, the S-PROG forecast can be seen as -# the mean of an ensemble of infinite size. - -# Plot some of the realizations -fig = plt.figure() -for i in range(4): - ax = fig.add_subplot(221 + i) - ax = plot_precip_field( - R_f[i, -1, :, :], geodata=metadata, colorbar=False, axis="off" - ) - ax.set_title("Member %02d" % i) -plt.tight_layout() -plt.show() - -############################################################################### -# As we can see from these two members of the ensemble, the stochastic forecast -# mantains the same variance as in the observed rainfall field. -# STEPS also includes a stochatic perturbation of the motion field in order -# to quantify the its uncertainty. - -############################################################################### -# Finally, it is possible to derive probabilities from our ensemble forecast. - -# Compute exceedence probabilities for a 0.5 mm/h threshold -P = excprob(R_f[:, -1, :, :], 0.5) - -# Plot the field of probabilities -plot_precip_field( - P, - geodata=metadata, - ptype="prob", - units="mm/h", - probthr=0.5, - title="Exceedence probability (+ %i min)" % (n_leadtimes * timestep), -) -plt.show() - -# sphinx_gallery_thumbnail_number = 5 diff --git a/examples/probability_forecast.py b/examples/probability_forecast.py deleted file mode 100644 index ee996c9ea..000000000 --- a/examples/probability_forecast.py +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/env python -""" -Probability forecasts -===================== - -This example script shows how to forecast the probability of exceeding an -intensity threshold. - -The method is based on the local Lagrangian approach described in Germann and -Zawadzki (2004). -""" - -import matplotlib.pyplot as plt -import numpy as np - -from pysteps.nowcasts.lagrangian_probability import forecast -from pysteps.visualization import plot_precip_field - -############################################################################### -# Numerical example -# ----------------- -# -# First, we use some dummy data to show the basic principle of this approach. -# The probability forecast is produced by sampling a spatial neighborhood that is -# increased as a function of lead time. As a result, the edges of -# the yellow square becomes more and more smooth as t increases. This represents -# the strong loss of predictability with lead time of any extrapolation nowcast. - -# parameters -precip = np.zeros((100, 100)) -precip[10:50, 10:50] = 1 -velocity = np.ones((2, *precip.shape)) -timesteps = [0, 2, 6, 12] -thr = 0.5 -slope = 1 # pixels / timestep - -# compute probability forecast -out = forecast(precip, velocity, timesteps, thr, slope=slope) -# plot -for n, frame in enumerate(out): - plt.subplot(2, 2, n + 1) - plt.imshow(frame, interpolation="nearest", vmin=0, vmax=1) - plt.title(f"t={timesteps[n]}") - plt.xticks([]) - plt.yticks([]) -plt.show() - -############################################################################### -# Real-data example -# ----------------- -# -# We now apply the same method to real data. We use a slope of 1 km / minute -# as suggested by Germann and Zawadzki (2004), meaning that after 30 minutes, -# the probabilities are computed by using all pixels within a neighborhood of 30 -# kilometers. - -from datetime import datetime - -from pysteps import io, rcparams, utils -from pysteps.motion.lucaskanade import dense_lucaskanade -from pysteps.verification import reldiag_init, reldiag_accum, plot_reldiag - -# data source -source = rcparams.data_sources["mch"] -root = rcparams.data_sources["mch"]["root_path"] -fmt = rcparams.data_sources["mch"]["path_fmt"] -pattern = rcparams.data_sources["mch"]["fn_pattern"] -ext = rcparams.data_sources["mch"]["fn_ext"] -timestep = rcparams.data_sources["mch"]["timestep"] -importer_name = rcparams.data_sources["mch"]["importer"] -importer_kwargs = rcparams.data_sources["mch"]["importer_kwargs"] - -# read precip field -date = datetime.strptime("201607112100", "%Y%m%d%H%M") -fns = io.find_by_date(date, root, fmt, pattern, ext, timestep, num_prev_files=2) -importer = io.get_method(importer_name, "importer") -precip, __, metadata = io.read_timeseries(fns, importer, **importer_kwargs) -precip, metadata = utils.to_rainrate(precip, metadata) -# precip[np.isnan(precip)] = 0 - -# motion -motion = dense_lucaskanade(precip) - -# parameters -nleadtimes = 6 -thr = 1 # mm / h -slope = 1 * timestep # km / min - -# compute probability forecast -extrap_kwargs = dict(allow_nonfinite_values=True) -fct = forecast( - precip[-1], motion, nleadtimes, thr, slope=slope, extrap_kwargs=extrap_kwargs -) - -# plot -for n, frame in enumerate(fct): - plt.subplot(2, 3, n + 1) - plt.imshow(frame, interpolation="nearest", vmin=0, vmax=1) - plt.xticks([]) - plt.yticks([]) -plt.show() - -################################################################################ -# Let's plot one single leadtime in more detail using the pysteps visualization -# functionality. - -plt.close() -# Plot the field of probabilities -plot_precip_field( - fct[2], - geodata=metadata, - ptype="prob", - probthr=thr, - title="Exceedence probability (+ %i min)" % (nleadtimes * timestep), -) -plt.show() - -############################################################################### -# Verification -# ------------ - -# verifying observations -importer = io.get_method(importer_name, "importer") -fns = io.find_by_date( - date, root, fmt, pattern, ext, timestep, num_next_files=nleadtimes -) -obs, __, metadata = io.read_timeseries(fns, importer, **importer_kwargs) -obs, metadata = utils.to_rainrate(obs, metadata) -obs[np.isnan(obs)] = 0 - -# reliability diagram -reldiag = reldiag_init(thr) -reldiag_accum(reldiag, fct, obs[1:]) -fig, ax = plt.subplots() -plot_reldiag(reldiag, ax) -ax.set_title("Reliability diagram") -plt.show() - - -############################################################################### -# References -# ---------- -# Germann, U. and I. Zawadzki, 2004: -# Scale Dependence of the Predictability of Precipitation from Continental -# Radar Images. Part II: Probability Forecasts. -# Journal of Applied Meteorology, 43(1), 74-89. - -# sphinx_gallery_thumbnail_number = 3 diff --git a/examples/rainfarm_downscale.py b/examples/rainfarm_downscale.py deleted file mode 100644 index 1f6a635f1..000000000 --- a/examples/rainfarm_downscale.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/bin/env python -""" -Precipitation downscaling with RainFARM -======================================= - -This example script shows how to use the stochastic downscaling method RainFARM -available in pysteps. - -RainFARM is a downscaling algorithm for rainfall fields developed by Rebora et -al. (2006). The method can represent the realistic small-scale variability of the -downscaled precipitation field by means of Gaussian random fields. - -""" - -import matplotlib.pyplot as plt -import numpy as np -import os -from pprint import pprint - -from pysteps import io, rcparams -from pysteps.utils import aggregate_fields_space, square_domain, to_rainrate -from pysteps.downscaling import rainfarm -from pysteps.visualization import plot_precip_field - -############################################################################### -# Read the input data -# ------------------- -# -# As first step, we need to import the precipitation field that we are going -# to use in this example. - -# Import the example radar composite -root_path = rcparams.data_sources["mch"]["root_path"] -filename = os.path.join(root_path, "20160711", "AQC161932100V_00005.801.gif") -precip, _, metadata = io.import_mch_gif( - filename, product="AQC", unit="mm", accutime=5.0 -) - -# Convert to mm/h -precip, metadata = to_rainrate(precip, metadata) - -# Reduce to a square domain -precip, metadata = square_domain(precip, metadata, "crop") - -# Nicely print the metadata -pprint(metadata) - -# Plot the original rainfall field -plot_precip_field(precip, geodata=metadata) -plt.show() - -# Assign the fill value to all the Nans -precip[~np.isfinite(precip)] = metadata["zerovalue"] - -############################################################################### -# Upscale the field -# ----------------- -# -# To test our downscaling method, we first need to upscale the original field to -# a lower resolution. We are going to use an upscaling factor of 16 x. - -upscaling_factor = 16 -upscale_to = metadata["xpixelsize"] * upscaling_factor # upscaling factor : 16 x -precip_lr, metadata_lr = aggregate_fields_space(precip, metadata, upscale_to) - -# Plot the upscaled rainfall field -plt.figure() -plot_precip_field(precip_lr, geodata=metadata_lr) - -############################################################################### -# Downscale the field -# ------------------- -# -# We can now use RainFARM to generate stochastic realizations of the downscaled -# precipitation field. - -fig = plt.figure(figsize=(5, 8)) -# Set the number of stochastic realizations -num_realizations = 5 - -# Per realization, generate a stochastically downscaled precipitation field -# and plot it. -# The first time, the spectral slope alpha needs to be estimated. To illustrate -# the sensitity of this parameter, we are going to plot some realizations with -# half or double the estimated slope. -alpha = None -for n in range(num_realizations): - - # Spectral slope estimated from the upscaled field - precip_hr, alpha = rainfarm.downscale( - precip_lr, alpha=alpha, ds_factor=upscaling_factor, return_alpha=True - ) - plt.subplot(num_realizations, 3, n * 3 + 2) - plot_precip_field(precip_hr, geodata=metadata, axis="off", colorbar=False) - if n == 0: - plt.title(f"alpha={alpha:.1f}") - - # Half the estimated slope - precip_hr = rainfarm.downscale( - precip_lr, alpha=alpha * 0.5, ds_factor=upscaling_factor - ) - plt.subplot(num_realizations, 3, n * 3 + 1) - plot_precip_field(precip_hr, geodata=metadata, axis="off", colorbar=False) - if n == 0: - plt.title(f"alpha={alpha * 0.5:.1f}") - - # Double the estimated slope - precip_hr = rainfarm.downscale( - precip_lr, alpha=alpha * 2, ds_factor=upscaling_factor - ) - plt.subplot(num_realizations, 3, n * 3 + 3) - plot_precip_field(precip_hr, geodata=metadata, axis="off", colorbar=False) - if n == 0: - plt.title(f"alpha={alpha * 2:.1f}") - - plt.subplots_adjust(wspace=0, hspace=0) - -plt.tight_layout() -plt.show() - -############################################################################### -# Remarks -# ------- -# -# Currently, the pysteps implementation of RainFARM only covers spatial downscaling. -# That is, it can improve the spatial resolution of a rainfall field. However, unlike -# the original algorithm from Rebora et al. (2006), it cannot downscale the temporal -# dimension. - - -############################################################################### -# References -# ---------- -# -# Rebora, N., L. Ferraris, J. von Hardenberg, and A. Provenzale, 2006: RainFARM: -# Rainfall downscaling by a filtered autoregressive model. J. Hydrometeor., 7, -# 724–738. - -# sphinx_gallery_thumbnail_number = 2 diff --git a/examples/thunderstorm_detection_and_tracking.py b/examples/thunderstorm_detection_and_tracking.py deleted file mode 100644 index aa27ef75f..000000000 --- a/examples/thunderstorm_detection_and_tracking.py +++ /dev/null @@ -1,166 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Thunderstorm Detection and Tracking - T-DaTing -============================================ - -This example shows how to use the thunderstorm DaTing module. The example is based on -MeteoSwiss radar data and uses the Cartesian composite of maximum reflectivity on a -1 km grid. All default values are tuned to this grid, but can be modified. -The first section demonstrates thunderstorm cell detection and how to plot contours. -The second section demonstrates detection and tracking in combination, -as well as how to plot the resulting tracks. -This module was implemented following the procedures used in the TRT Thunderstorms -Radar Tracking algorithm (:cite:`TRT2004`) used operationally at MeteoSwiss. -Modifications include advecting the identified thunderstorms with the optical flow -obtained from pysteps, as well as additional options in the thresholding. A detailed -description is published in Appendix A of :cite:`Feldmann2021`. - -References -.......... -:cite:`TRT2004` -:cite:`Feldmann2021` - -@author: feldmann-m -""" -################################################################################ -# Import all required functions -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -from datetime import datetime -from pprint import pprint - -import matplotlib.pyplot as plt -import numpy as np - -from pysteps import io, rcparams -from pysteps.feature import tstorm as tstorm_detect -from pysteps.tracking import tdating as tstorm_dating -from pysteps.utils import to_reflectivity -from pysteps.visualization import plot_precip_field, plot_track, plot_cart_contour - -################################################################################ -# Read the radar input images -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# -# A series of 20 files containing Swiss Cartesian gridded rain rates are imported. Since -# the algorithm is tuned to Swiss max-reflectivity data, the rain rates are transformed -# to reflectivity fields using the 'to_reflectivity' utility in pysteps.utils. - -# Select the input data -date = datetime.strptime("201607112100", "%Y%m%d%H%M") -data_source = rcparams.data_sources["mch"] - -# Extract corresponding settings -root_path = data_source["root_path"] -path_fmt = data_source["path_fmt"] -fn_pattern = data_source["fn_pattern"] -fn_ext = data_source["fn_ext"] -importer_name = data_source["importer"] -importer_kwargs = data_source["importer_kwargs"] -timestep = data_source["timestep"] - -# Load the data from the archive -fns = io.archive.find_by_date( - date, root_path, path_fmt, fn_pattern, fn_ext, timestep, num_next_files=20 -) -importer = io.get_method(importer_name, "importer") -R, _, metadata = io.read_timeseries(fns, importer, **importer_kwargs) - -# Convert to reflectivity (it is possible to give the a- and b- parameters of the -# Marshall-Palmer relationship here: zr_a = and zr_b =). -Z, metadata = to_reflectivity(R, metadata) - -# Extract the list of timestamps -timelist = metadata["timestamps"] - -pprint(metadata) - -############################################################################### -# Example of thunderstorm identification in a single timestep -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# The function tstorm_detect.detection requires a 2-D input image, all further inputs are -# optional. - -input_image = Z[2, :, :].copy() -time = timelist[2] -cells_id, labels = tstorm_detect.detection(input_image, time=time) - -############################################################################### -# Properties of one of the identified cells: -print(cells_id.iloc[0]) - -############################################################################### -# Example of thunderstorm tracking over a timeseries -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# The tstorm-dating function requires the entire pre-loaded time series. -# The first two timesteps are required to initialize the -# flow prediction and are not used to compute tracks. - -track_list, cell_list, label_list = tstorm_dating.dating( - input_video=Z, timelist=timelist -) - -############################################################################### -# Plotting the results -# ~~~~~~~~~~~~~~~~~~~~ - -# Plot precipitation field -plot_precip_field(Z[2, :, :], geodata=metadata, units=metadata["unit"]) -plt.xlabel("Swiss easting [m]") -plt.ylabel("Swiss northing [m]") - -# Add the identified cells -plot_cart_contour(cells_id.cont, geodata=metadata) - -# Filter the tracks to only contain cells existing in this timestep -IDs = cells_id.ID.values -track_filt = [] -for track in track_list: - if np.unique(track.ID) in IDs: - track_filt.append(track) - -# Add their tracks -plot_track(track_filt, geodata=metadata) -plt.show() - -################################################################################ -# Evaluating temporal behaviour of cell -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# Maximum reflectivity of cells in time - -# Make an empty list -tlen = [] -# Get a list of colors that we will use for the plot -color = iter(plt.cm.ocean(np.linspace(0, 0.8, len(track_filt)))) -# Now, loop through all the tracks and plot the maximum reflectivity of the cell -# in time. -for track in track_filt: - plt.plot(np.arange(len(track)), track.max_ref, c=next(color)) - tlen.append(len(track)) -plt.xticks(np.arange(max(tlen) + 1), labels=np.arange(max(tlen) + 1) * 5) -plt.ylabel("Maximum reflectivity (dBZ)") -plt.xlabel("Time since cell detection (min)") -plt.legend(IDs, loc="lower right", ncol=3, title="Track number") -plt.show() - -############################################################################### -# The size of the thunderstorm cells in time - -# Make an empty list -tlen = [] -# Get a list of colors that we will use for the plot -color = iter(plt.cm.ocean(np.linspace(0, 0.8, len(track_filt)))) -# Now, loop through all the tracks and plot the cell size of the thunderstorms -# in time. -for track in track_filt: - size = [] - for ID, t in track.iterrows(): - size.append(len(t.x)) - plt.plot(np.arange(len(track)), size, c=next(color)) - tlen.append(len(track)) -plt.xticks(np.arange(max(tlen) + 1), labels=np.arange(max(tlen) + 1) * 5) -plt.ylabel("Thunderstorm cell size (pixels)") -plt.xlabel("Time since cell detection (min)") -plt.legend(IDs, loc="upper left", ncol=3, title="Track number") -plt.show() From baedcdd08ab77b9a0615492f0e218938b37b26d2 Mon Sep 17 00:00:00 2001 From: Andres Perez Hortal <16256571+aperezhortal@users.noreply.github.com> Date: Mon, 3 Jan 2022 21:41:05 -0500 Subject: [PATCH 2/9] Update examples repository target branch --- doc/source/conf.py | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 8fdec6a2a..63f861392 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -57,7 +57,7 @@ def get_version(): "examples_dirs": "../../examples", # path to your example scripts "gallery_dirs": "user_guide/examples_gallery", # path where to save gallery generated examples "filename_pattern": r"/*\.py", # Include all the files in the examples dir - 'plot_gallery': False, # Do not execute the examples + "plot_gallery": False, # Do not execute the examples } @@ -78,26 +78,23 @@ def set_root(): json.dump(rcparams, f, indent=4) -def pull_example_gallery_from_external_repo(): - global EXAMPLES_GALLERY_REPOSITORY +def get_branch_to_pull(): + try: + rtd_version = subprocess.check_output( + ["git", "describe", "--abbrev=0", "--tags"], universal_newlines=True + ).strip() + except subprocess.CalledProcessError: + # If we are not in a tag, use latest. + rtd_version = "latest" - # Default to the "latest" branch, containing the latest rendered notebooks. - rtd_version = os.environ.get("READTHEDOCS_VERSION", "latest") + return rtd_version - if rtd_version == "stable": - try: - rtd_version = subprocess.check_output( - ["git", "describe", "--abbrev=0", "--tags"], universal_newlines=True - ).strip() - except subprocess.CalledProcessError: - rtd_version = "latest" - print(f"\nRTD Version: {rtd_version}\n") +def pull_example_gallery_from_external_repo(branch): + global EXAMPLES_GALLERY_REPOSITORY with tempfile.TemporaryDirectory() as work_dir: - cmd = ( - f"git clone {EXAMPLES_GALLERY_REPOSITORY} --depth 1 --branch {rtd_version} {work_dir}" - ) + cmd = f"git clone {EXAMPLES_GALLERY_REPOSITORY} --depth 1 --branch {branch} {work_dir}" subprocess.check_output(cmd.split(" ")) examples_gallery_dir = DOCS_DIR / "source/examples_gallery" @@ -114,7 +111,7 @@ def pull_example_gallery_from_external_repo(): shutil.copytree(examples_dir_in_remote, examples_dir) -pull_example_gallery_from_external_repo() +pull_example_gallery_from_external_repo(get_branch_to_pull()) # -- Options for HTML output ---------------------------------------------- html_theme = "sphinx_rtd_theme" @@ -154,7 +151,7 @@ def pull_example_gallery_from_external_repo(): latex_elements = { "papersize": "a4paper", "pointsize": "10pt", - "preamble": latex_preamble + "preamble": latex_preamble, } latex_domain_indices = False From 88f9b50561f0f5b50f1f6f21a55cfc5e97fd8c00 Mon Sep 17 00:00:00 2001 From: Andres Perez Hortal <16256571+aperezhortal@users.noreply.github.com> Date: Sun, 9 Jan 2022 09:25:30 -0500 Subject: [PATCH 3/9] Remove references to jupyter notebooks from comments --- ...er_examples_rendering_in_external_repo.yml} | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) rename .github/workflows/{execute_tutorial_notebooks_in_external_repo.yml => trigger_examples_rendering_in_external_repo.yml} (82%) diff --git a/.github/workflows/execute_tutorial_notebooks_in_external_repo.yml b/.github/workflows/trigger_examples_rendering_in_external_repo.yml similarity index 82% rename from .github/workflows/execute_tutorial_notebooks_in_external_repo.yml rename to .github/workflows/trigger_examples_rendering_in_external_repo.yml index 229e4f666..5628bfe22 100644 --- a/.github/workflows/execute_tutorial_notebooks_in_external_repo.yml +++ b/.github/workflows/trigger_examples_rendering_in_external_repo.yml @@ -1,20 +1,18 @@ ######################################################################################## -# Trigger the notebooks' execution in the external repository. +# Trigger the examples rendering in the external repository. # -# If the "main" branch triggered this event, then the nb execution is triggered in the -# "main" branch, otherwise in the dev branch. +# If the "main" branch triggered this event, then the tasks for rendering the +# example gallery is triggered in the "main" branch, otherwise in the dev branch. # # Naming convention used in this workflow: # # "main repository": Project repository with the python package and the documentation. -# "linked repository": Repository where the example gallery of jupyter notebooks is -# stored. +# "linked repository": Repository where the example gallery is stored. ######################################################################################## -name: Trigger notebook execution in the external repository +name: Trigger example rendering tasks in the external repository env: - # URL for the external repository linked with the notebooks in this project. - LINKED_REPO: pysteps/pysteps_tutorials + LINKED_REPO: pysteps/pysteps-tutorials # External repository with the example gallery. THIS_REPO: pysteps/pysteps on: @@ -24,8 +22,8 @@ on: branches: [ main, dev ] jobs: - trigger_nb_execution: - name: Trigger the execution in the external repository with the notebooks + trigger_example_gallery_rendering: + name: Trigger example rendering tasks in the external repository # The triggering is done by pushing an empty commit to the linked repository. # The commit message contains the Hash of the main repository's commit that # trigger the event. From 2a27b3e6fd3f33f83ef48fdc7f1fd0c68058e6d0 Mon Sep 17 00:00:00 2001 From: Andres Perez Hortal <16256571+aperezhortal@users.noreply.github.com> Date: Sat, 8 Jan 2022 14:27:13 -0500 Subject: [PATCH 4/9] Update example gallery's URL in documentation --- README.rst | 4 ++-- doc/.gitignore | 2 +- doc/rebuild_docs.sh | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index 7edd867bc..e6a563c50 100644 --- a/README.rst +++ b/README.rst @@ -68,7 +68,7 @@ pySTEPS - Python framework for short-term ensemble prediction systems .. |gallery| image:: https://img.shields.io/badge/example-gallery-blue.svg :alt: pysteps example gallery - :target: https://pysteps.readthedocs.io/en/latest/auto_examples/index.html + :target: https://pysteps.readthedocs.io/en/latest/examples_gallery/index.html .. |latest| image:: https://img.shields.io/badge/docs-latest-blue.svg :alt: pysteps documentation @@ -121,7 +121,7 @@ Use You can have a look at the `gallery of examples`__ to get a better idea of how the library can be used. -__ https://pysteps.readthedocs.io/en/latest/auto_examples/index.html +__ https://pysteps.readthedocs.io/en/latest/examples_gallery/index.html For a more detailed description of the implemented functions, check the `pysteps reference page`__. diff --git a/doc/.gitignore b/doc/.gitignore index b106f217b..0016985c4 100644 --- a/doc/.gitignore +++ b/doc/.gitignore @@ -1,3 +1,3 @@ _build/ generated -auto_examples \ No newline at end of file +examples_gallery \ No newline at end of file diff --git a/doc/rebuild_docs.sh b/doc/rebuild_docs.sh index be18ed18f..b706f83b4 100755 --- a/doc/rebuild_docs.sh +++ b/doc/rebuild_docs.sh @@ -1,7 +1,7 @@ # Build documentation from scratch. rm -r source/generated &> /dev/null -rm -r source/auto_examples &> /dev/null +rm -r source/examples_gallery &> /dev/null make clean From 3780450cdb506cc539488a229b29f00e7bb1d1a5 Mon Sep 17 00:00:00 2001 From: Andres Perez Hortal <16256571+aperezhortal@users.noreply.github.com> Date: Sun, 9 Jan 2022 09:33:58 -0500 Subject: [PATCH 5/9] Place the task to find the destination branch in a separate step --- .../trigger_examples_rendering_in_external_repo.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/trigger_examples_rendering_in_external_repo.yml b/.github/workflows/trigger_examples_rendering_in_external_repo.yml index 5628bfe22..60e57bc97 100644 --- a/.github/workflows/trigger_examples_rendering_in_external_repo.yml +++ b/.github/workflows/trigger_examples_rendering_in_external_repo.yml @@ -35,7 +35,7 @@ jobs: steps: - name: Find the branch that trigger the event - id: get_dest_branch + id: find_triggering_branch run: | if [[ "${GITHUB_EVENT_NAME}" == "push" ]]; then event_branch=$(echo ${GITHUB_REF##*/}) @@ -46,6 +46,9 @@ jobs: fi echo "::set-output name=event_branch::${event_branch}" + - name: Set destination branch based on the triggering branch + id: set_destination_branch + run: | # We only push to latest or to dev. if [[ "${event_branch}" == "main" ]]; then echo "::set-output name=dest_branch::main" @@ -55,8 +58,8 @@ jobs: - name: Print debug information env: - DEST_BRANCH: ${{steps.get_dest_branch.outputs.dest_branch}} - EVENT_BRANCH: ${{steps.get_dest_branch.outputs.event_branch}} + DEST_BRANCH: ${{steps.set_destination_branch.outputs.dest_branch}} + EVENT_BRANCH: ${{steps.find_triggering_branch.outputs.event_branch}} run: | echo "EVENT_BRANCH=${EVENT_BRANCH}" echo "GITHUB_SHA=${GITHUB_SHA}" @@ -68,7 +71,7 @@ jobs: persist-credentials: false # Avoid using the GITHUB_TOKEN instead of the personal access token fetch-depth: 0 # Avoid errors pushing refs to the destination repository. repository: ${{ env.LINKED_REPO }} - ref: ${{steps.get_dest_branch.outputs.dest_branch}} + ref: ${{steps.set_destination_branch.outputs.dest_branch}} - name: Create empty commit in linked repo run: | @@ -83,4 +86,4 @@ jobs: with: repository: ${{ env.LINKED_REPO }} github_token: ${{ secrets.LINKED_TOKEN }} - branch: ${{steps.get_dest_branch.outputs.dest_branch}} + branch: ${{steps.set_destination_branch.outputs.dest_branch}} From b6bd7c05b8dd7f8d5a6894e2df803501ae006382 Mon Sep 17 00:00:00 2001 From: Andres Perez Hortal <16256571+aperezhortal@users.noreply.github.com> Date: Sun, 9 Jan 2022 09:48:06 -0500 Subject: [PATCH 6/9] Add the event branch name to the triggering commit message --- .../trigger_examples_rendering_in_external_repo.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/trigger_examples_rendering_in_external_repo.yml b/.github/workflows/trigger_examples_rendering_in_external_repo.yml index 60e57bc97..1eb804120 100644 --- a/.github/workflows/trigger_examples_rendering_in_external_repo.yml +++ b/.github/workflows/trigger_examples_rendering_in_external_repo.yml @@ -74,12 +74,15 @@ jobs: ref: ${{steps.set_destination_branch.outputs.dest_branch}} - name: Create empty commit in linked repo + env: + EVENT_BRANCH: ${{steps.find_triggering_branch.outputs.event_branch}} run: | git config user.name 'github-actions[bot]' git config user.email 'github-actions[bot]@users.noreply.github.com' git commit --allow-empty \ - -m "Triggering nb execution from ${GITHUB_SHA::8}" \ - -m "https://github.com/${THIS_REPO}/commit/{$GITHUB_SHA}" + -m "Triggering example rendering tasks from ${GITHUB_SHA::8}" \ + -m "Branch: ${EVENT_BRANCH}" \ + -m "https://github.com/${THIS_REPO}/commit/{$GITHUB_SHA}" - name: Push the empty commit to trigger the workflow uses: ad-m/github-push-action@master From cfa5a7b682a1c88393f185c8dc2f886c4b507adf Mon Sep 17 00:00:00 2001 From: Andres Perez Hortal <16256571+aperezhortal@users.noreply.github.com> Date: Sun, 9 Jan 2022 10:07:34 -0500 Subject: [PATCH 7/9] Allow the external_gallery to trigger the rendering action --- .../trigger_examples_rendering_in_external_repo.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/trigger_examples_rendering_in_external_repo.yml b/.github/workflows/trigger_examples_rendering_in_external_repo.yml index 1eb804120..bd389db07 100644 --- a/.github/workflows/trigger_examples_rendering_in_external_repo.yml +++ b/.github/workflows/trigger_examples_rendering_in_external_repo.yml @@ -17,7 +17,7 @@ env: on: push: - branches: [ main, dev ] + branches: [ main, dev, external_gallery] pull_request: branches: [ main, dev ] @@ -34,8 +34,8 @@ jobs: shell: bash -l {0} steps: - - name: Find the branch that trigger the event - id: find_triggering_branch + - name: Get the name of the branch that trigger this event + id: get_triggering_branch run: | if [[ "${GITHUB_EVENT_NAME}" == "push" ]]; then event_branch=$(echo ${GITHUB_REF##*/}) @@ -46,7 +46,7 @@ jobs: fi echo "::set-output name=event_branch::${event_branch}" - - name: Set destination branch based on the triggering branch + - name: Set destination branch based on the triggering branch's name id: set_destination_branch run: | # We only push to latest or to dev. @@ -59,7 +59,7 @@ jobs: - name: Print debug information env: DEST_BRANCH: ${{steps.set_destination_branch.outputs.dest_branch}} - EVENT_BRANCH: ${{steps.find_triggering_branch.outputs.event_branch}} + EVENT_BRANCH: ${{steps.get_triggering_branch.outputs.event_branch}} run: | echo "EVENT_BRANCH=${EVENT_BRANCH}" echo "GITHUB_SHA=${GITHUB_SHA}" @@ -75,7 +75,7 @@ jobs: - name: Create empty commit in linked repo env: - EVENT_BRANCH: ${{steps.find_triggering_branch.outputs.event_branch}} + EVENT_BRANCH: ${{steps.get_triggering_branch.outputs.event_branch}} run: | git config user.name 'github-actions[bot]' git config user.email 'github-actions[bot]@users.noreply.github.com' From 080d46a9da9dac80662d683c02012c0c6d060bce Mon Sep 17 00:00:00 2001 From: Andres Perez Hortal <16256571+aperezhortal@users.noreply.github.com> Date: Sun, 9 Jan 2022 10:55:40 -0500 Subject: [PATCH 8/9] Improve variables and task names --- ...er_examples_rendering_in_external_repo.yml | 24 +++++++++---------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/.github/workflows/trigger_examples_rendering_in_external_repo.yml b/.github/workflows/trigger_examples_rendering_in_external_repo.yml index bd389db07..1ab602591 100644 --- a/.github/workflows/trigger_examples_rendering_in_external_repo.yml +++ b/.github/workflows/trigger_examples_rendering_in_external_repo.yml @@ -4,15 +4,13 @@ # If the "main" branch triggered this event, then the tasks for rendering the # example gallery is triggered in the "main" branch, otherwise in the dev branch. # -# Naming convention used in this workflow: -# -# "main repository": Project repository with the python package and the documentation. -# "linked repository": Repository where the example gallery is stored. +# This workflow requires the EXAMPLES_GALLERY_TOKEN secret with the access token +# for the repository containing the example gallery. ######################################################################################## name: Trigger example rendering tasks in the external repository env: - LINKED_REPO: pysteps/pysteps-tutorials # External repository with the example gallery. + EXAMPLES_GALLERY_REPO: pysteps/pysteps-tutorials # External repository with the example gallery. THIS_REPO: pysteps/pysteps on: @@ -24,9 +22,9 @@ on: jobs: trigger_example_gallery_rendering: name: Trigger example rendering tasks in the external repository - # The triggering is done by pushing an empty commit to the linked repository. - # The commit message contains the Hash of the main repository's commit that - # trigger the event. + # The triggering is done by pushing an empty commit to the external repository + # with the example gallery. The commit message contains the Hash of the + # main repository's commit and the branch name that trigger the event. runs-on: "ubuntu-latest" defaults: @@ -65,15 +63,15 @@ jobs: echo "GITHUB_SHA=${GITHUB_SHA}" echo "DEST_BRANCH=${DEST_BRANCH}" - - name: Clone linked repository + - name: Clone external repository with the example gallery. uses: actions/checkout@v2 with: persist-credentials: false # Avoid using the GITHUB_TOKEN instead of the personal access token fetch-depth: 0 # Avoid errors pushing refs to the destination repository. - repository: ${{ env.LINKED_REPO }} + repository: ${{ env.EXAMPLES_GALLERY_REPO }} ref: ${{steps.set_destination_branch.outputs.dest_branch}} - - name: Create empty commit in linked repo + - name: Create empty commit in example gallery's repository env: EVENT_BRANCH: ${{steps.get_triggering_branch.outputs.event_branch}} run: | @@ -87,6 +85,6 @@ jobs: - name: Push the empty commit to trigger the workflow uses: ad-m/github-push-action@master with: - repository: ${{ env.LINKED_REPO }} - github_token: ${{ secrets.LINKED_TOKEN }} + repository: ${{ env.EXAMPLES_GALLERY_REPO }} + github_token: ${{ secrets.EXAMPLES_GALLERY_TOKEN }} branch: ${{steps.set_destination_branch.outputs.dest_branch}} From c4e4aeff907af3238b70e1975d42e7c5d9cc90d8 Mon Sep 17 00:00:00 2001 From: Andres Perez Hortal <16256571+aperezhortal@users.noreply.github.com> Date: Sun, 9 Jan 2022 12:52:27 -0500 Subject: [PATCH 9/9] Update commit message --- ...er_examples_rendering_in_external_repo.yml | 24 ++++++++++++------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/.github/workflows/trigger_examples_rendering_in_external_repo.yml b/.github/workflows/trigger_examples_rendering_in_external_repo.yml index 1ab602591..649fe06a8 100644 --- a/.github/workflows/trigger_examples_rendering_in_external_repo.yml +++ b/.github/workflows/trigger_examples_rendering_in_external_repo.yml @@ -1,8 +1,14 @@ ######################################################################################## # Trigger the examples rendering in the external repository. # -# If the "main" branch triggered this event, then the tasks for rendering the -# example gallery is triggered in the "main" branch, otherwise in the dev branch. +# This task push an empty commit to trigger the rendering of the example gallery +# in an external repo as follows: +# +# ------------------------------------------ +# | Local branch -> Example gallery branch | +# | master -> main | +# | other_branch -> dev | +# ------------------------------------------ # # This workflow requires the EXAMPLES_GALLERY_TOKEN secret with the access token # for the repository containing the example gallery. @@ -15,9 +21,9 @@ env: on: push: - branches: [ main, dev, external_gallery] - pull_request: - branches: [ main, dev ] + branches: [ master, dev, external_gallery] +# pull_request: +# branches: [ master, dev ] jobs: trigger_example_gallery_rendering: @@ -32,7 +38,7 @@ jobs: shell: bash -l {0} steps: - - name: Get the name of the branch that trigger this event + - name: Get the name of the branch triggering this event id: get_triggering_branch run: | if [[ "${GITHUB_EVENT_NAME}" == "push" ]]; then @@ -78,9 +84,9 @@ jobs: git config user.name 'github-actions[bot]' git config user.email 'github-actions[bot]@users.noreply.github.com' git commit --allow-empty \ - -m "Triggering example rendering tasks from ${GITHUB_SHA::8}" \ - -m "Branch: ${EVENT_BRANCH}" \ - -m "https://github.com/${THIS_REPO}/commit/{$GITHUB_SHA}" + -m "[TRIGGER] Trigger rendering task from ${GITHUB_SHA::8}" \ + -m "Branch:${EVENT_BRANCH}" \ + -m "https://github.com/${THIS_REPO}/commit/${GITHUB_SHA}" - name: Push the empty commit to trigger the workflow uses: ad-m/github-push-action@master