diff --git a/pysteps/decorators.py b/pysteps/decorators.py index 7c25f7866..8f6e42d80 100644 --- a/pysteps/decorators.py +++ b/pysteps/decorators.py @@ -9,7 +9,6 @@ .. autosummary:: :toctree: ../generated/ - postprocess_import check_input_frames prepare_interpolator memoize @@ -22,8 +21,6 @@ import numpy as np -from pysteps.xarray_helpers import convert_input_to_xarray_dataset - def _add_extra_kwrds_to_docstrings(target_func, extra_kwargs_doc_text): """ @@ -44,76 +41,6 @@ def _add_extra_kwrds_to_docstrings(target_func, extra_kwargs_doc_text): return target_func -def postprocess_import(fillna=np.nan, dtype="double"): - """ - Postprocess the imported precipitation data. - Operations: - - Allow type casting (dtype keyword) - - Set invalid or missing data to predefined value (fillna keyword) - This decorator replaces the text "{extra_kwargs}" in the function's - docstring with the documentation of the keywords used in the postprocessing. - The additional docstrings are added as "Other Parameters" in the importer function. - - Parameters - ---------- - dtype: str - Default data type for precipitation. Double precision by default. - fillna: float or np.nan - Default value used to represent the missing data ("No Coverage"). - By default, np.nan is used. - If the importer returns a MaskedArray, all the masked values are set to the - fillna value. If a numpy array is returned, all the invalid values (nan and inf) - are set to the fillna value. - - """ - - def _postprocess_import(importer): - @wraps(importer) - def _import_with_postprocessing(*args, **kwargs): - precip, quality, metadata = importer(*args, **kwargs) - - _dtype = kwargs.get("dtype", dtype) - - accepted_precisions = ["float32", "float64", "single", "double"] - if _dtype not in accepted_precisions: - raise ValueError( - "The selected precision does not correspond to a valid value." - "The accepted values are: " + str(accepted_precisions) - ) - - if isinstance(precip, np.ma.MaskedArray): - invalid_mask = np.ma.getmaskarray(precip) - precip.data[invalid_mask] = fillna - else: - # If plain numpy arrays are used, the importers should indicate - # the invalid values with np.nan. - _fillna = kwargs.get("fillna", fillna) - if _fillna is not np.nan: - mask = ~np.isfinite(precip) - precip[mask] = _fillna - - return convert_input_to_xarray_dataset( - precip.astype(_dtype), quality, metadata - ) - - extra_kwargs_doc = """ - Other Parameters - ---------------- - dtype: str - Data-type to which the array is cast. - Valid values: "float32", "float64", "single", and "double". - fillna: float or np.nan - Value used to represent the missing data ("No Coverage"). - By default, np.nan is used. - """ - - _add_extra_kwrds_to_docstrings(_import_with_postprocessing, extra_kwargs_doc) - - return _import_with_postprocessing - - return _postprocess_import - - def check_input_frames( minimum_input_frames=2, maximum_input_frames=np.inf, just_ndim=False ): diff --git a/pysteps/extrapolation/eulerian_persistence.py b/pysteps/extrapolation/eulerian_persistence.py index 7ada0e7e1..16e45ad75 100644 --- a/pysteps/extrapolation/eulerian_persistence.py +++ b/pysteps/extrapolation/eulerian_persistence.py @@ -1,47 +1,40 @@ import numpy as np -import xarray as xr -from pysteps.xarray_helpers import convert_output_to_xarray_dataset -def extrapolate(precip_dataset: xr.Dataset, timesteps, outval=np.nan, **kwargs): +def extrapolate(precip, velocity, timesteps, outval=np.nan, **kwargs): """ A dummy extrapolation method to apply Eulerian persistence to a two-dimensional precipitation field. The method returns the a sequence of the same initial field with no extrapolation applied (i.e. Eulerian persistence). - Parameters ---------- - precip_dataset : xarray.Dataset - xarray dataset containing the input precipitation field. All + precip : array-like + Array of shape (m,n) containing the input precipitation field. All values are required to be finite. + velocity : array-like + Not used by the method. timesteps : int or list of floats Number of time steps or a list of time steps. outval : float, optional Not used by the method. - Other Parameters ---------------- return_displacement : bool If True, return the total advection velocity (displacement) between the initial input field and the advected one integrated along the trajectory. Default : False - Returns ------- out : array or tuple If return_displacement=False, return a sequence of the same initial field of shape (num_timesteps,m,n). Otherwise, return a tuple containing the replicated fields and a (2,m,n) array of zeros. - References ---------- :cite:`GZ2002` - """ - del outval # Unused by _eulerian_persistence - precip_var = precip_dataset.attrs["precip_var"] - precip = precip_dataset[precip_var].values[-1] + del velocity, outval # Unused by _eulerian_persistence if isinstance(timesteps, int): num_timesteps = timesteps @@ -51,11 +44,8 @@ def extrapolate(precip_dataset: xr.Dataset, timesteps, outval=np.nan, **kwargs): return_displacement = kwargs.get("return_displacement", False) extrapolated_precip = np.repeat(precip[np.newaxis, :, :], num_timesteps, axis=0) - extrapolated_precip_dataset = convert_output_to_xarray_dataset( - precip_dataset, timesteps, extrapolated_precip - ) if not return_displacement: - return extrapolated_precip_dataset + return extrapolated_precip else: - return extrapolated_precip_dataset, np.zeros((2,) + extrapolated_precip.shape) + return extrapolated_precip, np.zeros((2,) + extrapolated_precip.shape) diff --git a/pysteps/io/importers.py b/pysteps/io/importers.py index f7c7fb289..5462cb4ca 100644 --- a/pysteps/io/importers.py +++ b/pysteps/io/importers.py @@ -203,16 +203,17 @@ import_dwd_radolan """ -import gzip -import os import array import datetime +import gzip +import os from functools import partial +from typing import Any import numpy as np +import numpy.typing as npt from matplotlib.pyplot import imread -from pysteps.decorators import postprocess_import from pysteps.exceptions import DataModelError, MissingOptionalDependency from pysteps.utils import aggregate_fields from pysteps.xarray_helpers import convert_input_to_xarray_dataset @@ -267,6 +268,26 @@ PYGRIB_IMPORTED = False +def _postprocess_precip(precip, fillna=np.nan, dtype="double") -> npt.NDArray[Any]: + accepted_precisions = ["float32", "float64", "single", "double"] + if dtype not in accepted_precisions: + raise ValueError( + "The selected precision does not correspond to a valid value." + "The accepted values are: " + str(accepted_precisions) + ) + + if isinstance(precip, np.ma.MaskedArray): + invalid_mask = np.ma.getmaskarray(precip) + precip.data[invalid_mask] = fillna + else: + # If plain numpy arrays are used, the importers should indicate + # the invalid values with np.nan. + if fillna is not np.nan: + mask = ~np.isfinite(precip) + precip[mask] = fillna + return precip.astype(dtype) + + def _check_coords_range(selected_range, coordinate, full_range): """ Check that the coordinates range arguments follow the expected pattern in @@ -354,7 +375,9 @@ def _get_threshold_value(precip): return np.nan -def import_mrms_grib(filename, extent=None, window_size=4, **kwargs): +def import_mrms_grib( + filename, extent=None, window_size=4, fillna=np.nan, dtype="float32" +): """ Importer for NSSL's Multi-Radar/Multi-Sensor System ([MRMS](https://www.nssl.noaa.gov/projects/mrms/)) rainrate product @@ -407,13 +430,10 @@ def import_mrms_grib(filename, extent=None, window_size=4, **kwargs): If an integer value is given, the same block shape is used for all the image dimensions. Default: window_size=4. - - Other Parameters - ---------------- - dtype: str + dtype: str, optional Data-type to which the array is cast. Valid values: "float32", "float64", "single", and "double". - fillna: float or np.nan + fillna: float or np.nan, optional Value used to represent the missing data ("No Coverage"). By default, np.nan is used. @@ -428,33 +448,6 @@ def import_mrms_grib(filename, extent=None, window_size=4, **kwargs): metadata: dict Associated metadata (pixel sizes, map projections, etc.). """ - dataset = _import_mrms_grib(filename, extent, window_size, **kwargs) - # Create a function with default arguments for aggregate_fields - block_reduce = partial(aggregate_fields, method="mean", trim=True) - - if window_size != (1, 1): - # Downscale data - precip_var = dataset.attrs["precip_var"] - # block_reduce does not handle nan values - if "fillna" in kwargs: - no_data_mask = dataset[precip_var].values == kwargs["fillna"] - else: - no_data_mask = np.isnan(dataset[precip_var].values) - dataset[precip_var].data[no_data_mask] = 0 - dataset["no_data_mask"] = (("y", "x"), no_data_mask) - dataset = block_reduce(dataset, window_size, dim=("y", "x")) - - # Consider that if a single invalid observation is located in the block, - # then mark that value as invalid. - no_data_mask = dataset.no_data_mask.values == 1.0 - dataset = dataset.drop_vars("no_data_mask") - - return dataset - - -@postprocess_import(dtype="float32") -def _import_mrms_grib(filename, extent=None, window_size=4, **kwargs): - del kwargs if not PYGRIB_IMPORTED: raise MissingOptionalDependency( @@ -464,7 +457,7 @@ def _import_mrms_grib(filename, extent=None, window_size=4, **kwargs): try: grib_file = pygrib.open(filename) except OSError: - raise OSError(f"Error opening NCEP's MRMS file. " f"File Not Found: {filename}") + raise OSError(f"Error opening NCEP's MRMS file. File Not Found: {filename}") if isinstance(window_size, int): window_size = (window_size, window_size) @@ -550,12 +543,31 @@ def _import_mrms_grib(filename, extent=None, window_size=4, **kwargs): y2=y2 + ysize / 2, cartesian_unit="degrees", ) + precip = _postprocess_precip(precip, fillna, dtype) - return convert_input_to_xarray_dataset(precip, None, metadata) + precip_dataset = convert_input_to_xarray_dataset(precip, None, metadata) + if window_size != (1, 1): + # Create a function with default arguments for aggregate_fields + block_reduce = partial(aggregate_fields, method="mean", trim=True) + # Downscale data + precip_var = precip_dataset.attrs["precip_var"] + # block_reduce does not handle nan values + no_data_mask = np.isnan(precip_dataset[precip_var].values) + precip_dataset[precip_var].data[no_data_mask] = 0 + precip_dataset["no_data_mask"] = (("y", "x"), no_data_mask) + precip_dataset = block_reduce(precip_dataset, window_size, dim=("y", "x")) -@postprocess_import() -def import_bom_rf3(filename, **kwargs): + # Consider that if a single invalid observation is located in the block, + # then mark that value as invalid. + no_data_mask = precip_dataset.no_data_mask.values == 1.0 + precip_dataset = precip_dataset.drop_vars("no_data_mask") + precip_dataset[precip_var].data[no_data_mask] = fillna + + return precip_dataset + + +def import_bom_rf3(filename, gzipped=False, fillna=np.nan, dtype="double"): """ Import a NetCDF radar rainfall product from the BoM Rainfields3. @@ -563,8 +575,12 @@ def import_bom_rf3(filename, **kwargs): ---------- filename: str Name of the file to import. - - {extra_kwargs_doc} + dtype: str, optional + Data-type to which the array is cast. + Valid values: "float32", "float64", "single", and "double". + fillna: float or np.nan, optional + Value used to represent the missing data ("No Coverage"). + By default, np.nan is used. Returns ------- @@ -585,7 +601,9 @@ def import_bom_rf3(filename, **kwargs): metadata["zerovalue"] = np.nanmin(precip) metadata["threshold"] = _get_threshold_value(precip) - return precip, None, metadata + precip = _postprocess_precip(precip, fillna, dtype) + + return convert_input_to_xarray_dataset(precip, None, metadata) def _import_bom_rf3_data(filename): @@ -682,8 +700,7 @@ def _import_bom_rf3_geodata(ds_rainfall): return geodata -@postprocess_import() -def import_fmi_geotiff(filename, **kwargs): +def import_fmi_geotiff(filename, fillna=np.nan, dtype="double"): """ Import a reflectivity field (dBZ) from an FMI GeoTIFF file. @@ -691,8 +708,12 @@ def import_fmi_geotiff(filename, **kwargs): ---------- filename: str Name of the file to import. - - {extra_kwargs_doc} + dtype: str, optional + Data-type to which the array is cast. + Valid values: "float32", "float64", "single", and "double". + fillna: float or np.nan, optional + Value used to represent the missing data ("No Coverage"). + By default, np.nan is used. Returns ------- @@ -747,11 +768,12 @@ def import_fmi_geotiff(filename, **kwargs): metadata["zr_a"] = 223.0 metadata["zr_b"] = 1.53 - return precip, None, metadata + precip = _postprocess_precip(precip, fillna, dtype) + + return convert_input_to_xarray_dataset(precip, None, metadata) -@postprocess_import() -def import_fmi_pgm(filename, gzipped=False, **kwargs): +def import_fmi_pgm(filename, gzipped=False, fillna=np.nan, dtype="double"): """ Import a 8-bit PGM radar reflectivity composite from the FMI archive. @@ -761,8 +783,12 @@ def import_fmi_pgm(filename, gzipped=False, **kwargs): Name of the file to import. gzipped: bool If True, the input file is treated as a compressed gzip file. - - {extra_kwargs_doc} + dtype: str, optional + Data-type to which the array is cast. + Valid values: "float32", "float64", "single", and "double". + fillna: float or np.nan, optional + Value used to represent the missing data ("No Coverage"). + By default, np.nan is used. Returns ------- @@ -806,7 +832,9 @@ def import_fmi_pgm(filename, gzipped=False, **kwargs): metadata["zr_a"] = 223.0 metadata["zr_b"] = 1.53 - return precip, None, metadata + precip = _postprocess_precip(precip, fillna, dtype) + + return convert_input_to_xarray_dataset(precip, None, metadata) def _import_fmi_pgm_geodata(metadata): @@ -877,13 +905,8 @@ def _import_fmi_pgm_metadata(filename, gzipped=False): return metadata -@postprocess_import() def import_knmi_hdf5( - filename, - qty="ACRR", - accutime=5.0, - pixelsize=1000.0, - **kwargs, + filename, qty="ACRR", accutime=5.0, pixelsize=1000.0, fillna=np.nan, dtype="double" ): """ Import a precipitation or reflectivity field (and optionally the quality @@ -905,8 +928,12 @@ def import_knmi_hdf5( The pixel size of a raster cell in meters. The default value for the KNMI datasets is a 1000 m grid cell size, but datasets with 2400 m pixel size are also available. - - {extra_kwargs_doc} + dtype: str, optional + Data-type to which the array is cast. + Valid values: "float32", "float64", "single", and "double". + fillna: float or np.nan, optional + Value used to represent the missing data ("No Coverage"). + By default, np.nan is used. Returns ------- @@ -1030,11 +1057,12 @@ def import_knmi_hdf5( f.close() - return precip, None, metadata + precip = _postprocess_precip(precip, fillna, dtype) + return convert_input_to_xarray_dataset(precip, None, metadata) -@postprocess_import() -def import_mch_gif(filename, product, unit, accutime, **kwargs): + +def import_mch_gif(filename, product, unit, accutime, fillna=np.nan, dtype="double"): """ Import a 8-bit gif radar reflectivity composite from the MeteoSwiss archive. @@ -1063,8 +1091,12 @@ def import_mch_gif(filename, product, unit, accutime, **kwargs): the physical unit of the data accutime: float the accumulation time in minutes of the data - - {extra_kwargs_doc} + dtype: str, optional + Data-type to which the array is cast. + Valid values: "float32", "float64", "single", and "double". + fillna: float or np.nan, optional + Value used to represent the missing data ("No Coverage"). + By default, np.nan is used. Returns ------- @@ -1160,11 +1192,12 @@ def import_mch_gif(filename, product, unit, accutime, **kwargs): metadata["zr_a"] = 316.0 metadata["zr_b"] = 1.5 - return precip, None, metadata + precip = _postprocess_precip(precip, fillna, dtype) + return convert_input_to_xarray_dataset(precip, None, metadata) -@postprocess_import() -def import_mch_hdf5(filename, qty="RATE", **kwargs): + +def import_mch_hdf5(filename, qty="RATE", fillna=np.nan, dtype="double"): """ Import a precipitation field (and optionally the quality field) from a MeteoSwiss HDF5 file conforming to the ODIM specification. @@ -1178,8 +1211,12 @@ def import_mch_hdf5(filename, qty="RATE", **kwargs): are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value is 'RATE'. - - {extra_kwargs_doc} + dtype: str, optional + Data-type to which the array is cast. + Valid values: "float32", "float64", "single", and "double". + fillna: float or np.nan, optional + Value used to represent the missing data ("No Coverage"). + By default, np.nan is used. Returns ------- @@ -1295,7 +1332,9 @@ def import_mch_hdf5(filename, qty="RATE", **kwargs): f.close() - return precip, quality, metadata + precip = _postprocess_precip(precip, fillna, dtype) + + return convert_input_to_xarray_dataset(precip, None, metadata) def _read_mch_hdf5_what_group(whatgrp): @@ -1308,7 +1347,6 @@ def _read_mch_hdf5_what_group(whatgrp): return qty, gain, offset, nodata, undetect -@postprocess_import() def import_mch_metranet(filename, product, unit, accutime): """ Import a 8-bit bin radar reflectivity composite from the MeteoSwiss @@ -1338,8 +1376,12 @@ def import_mch_metranet(filename, product, unit, accutime): the physical unit of the data accutime: float the accumulation time in minutes of the data - - {extra_kwargs_doc} + dtype: str, optional + Data-type to which the array is cast. + Valid values: "float32", "float64", "single", and "double". + fillna: float or np.nan, optional + Value used to represent the missing data ("No Coverage"). + By default, np.nan is used. Returns ------- @@ -1370,7 +1412,9 @@ def import_mch_metranet(filename, product, unit, accutime): metadata["zr_a"] = 316.0 metadata["zr_b"] = 1.5 - return precip, None, metadata + precip = _postprocess_precip(precip, fillna, dtype) + + return convert_input_to_xarray_dataset(precip, None, metadata) def _import_mch_geodata(): @@ -1408,8 +1452,7 @@ def _import_mch_geodata(): return geodata -@postprocess_import() -def import_odim_hdf5(filename, qty="RATE", **kwargs): +def import_odim_hdf5(filename, qty="RATE", fillna=np.nan, dtype="double", **kwargs): """ Import a precipitation field (and optionally the quality field) from a HDF5 file conforming to the ODIM specification. @@ -1426,8 +1469,12 @@ def import_odim_hdf5(filename, qty="RATE", **kwargs): are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value is 'RATE'. - - {extra_kwargs_doc} + dtype: str, optional + Data-type to which the array is cast. + Valid values: "float32", "float64", "single", and "double". + fillna: float or np.nan, optional + Value used to represent the missing data ("No Coverage"). + By default, np.nan is used. Returns ------- @@ -1629,17 +1676,19 @@ def import_odim_hdf5(filename, qty="RATE", **kwargs): f.close() - return precip, quality, metadata + precip = _postprocess_precip(precip, fillna, dtype) + return convert_input_to_xarray_dataset(precip, None, metadata) -def import_opera_hdf5(filename, qty="RATE", **kwargs): + +def import_opera_hdf5(filename, qty="RATE", fillna=np.nan, dtype="double"): """ Wrapper to :py:func:`pysteps.io.importers.import_odim_hdf5` to maintain backward compatibility with previous pysteps versions. **Important:** Use :py:func:`~pysteps.io.importers.import_odim_hdf5` instead. """ - return import_odim_hdf5(filename, qty=qty, **kwargs) + return import_odim_hdf5(filename, qty, fillna, dtype) def _read_opera_hdf5_what_group(whatgrp): @@ -1652,8 +1701,9 @@ def _read_opera_hdf5_what_group(whatgrp): return qty, gain, offset, nodata, undetect -@postprocess_import() -def import_saf_crri(filename, extent=None, **kwargs): +def import_saf_crri( + filename, extent=None, gzipped=False, fillna=np.nan, dtype="double" +): """ Import a NetCDF radar rainfall product from the Convective Rainfall Rate Intensity (CRRI) product from the Satellite Application Facilities (SAF). @@ -1668,8 +1718,12 @@ def import_saf_crri(filename, extent=None, **kwargs): extent: scalars (left, right, bottom, top), optional The spatial extent specified in data coordinates. If None, the full extent is imported. - - {extra_kwargs_doc} + dtype: str, optional + Data-type to which the array is cast. + Valid values: "float32", "float64", "single", and "double". + fillna: float or np.nan, optional + Value used to represent the missing data ("No Coverage"). + By default, np.nan is used. Returns ------- @@ -1725,7 +1779,9 @@ def import_saf_crri(filename, extent=None, **kwargs): metadata["zerovalue"] = np.nanmin(precip) metadata["threshold"] = _get_threshold_value(precip) - return precip, quality, metadata + precip = _postprocess_precip(precip, fillna, dtype) + + return convert_input_to_xarray_dataset(precip, None, metadata) def _import_saf_crri_data(filename, idx_x=None, idx_y=None): @@ -1786,8 +1842,7 @@ def _import_saf_crri_geodata(filename): return geodata -@postprocess_import() -def import_dwd_hdf5(filename, qty="RATE", **kwargs): +def import_dwd_hdf5(filename, qty="RATE", fillna=np.nan, dtype="double", **kwargs): """ Import a DWD precipitation product field (and optionally the quality field) from a HDF5 file conforming to the ODIM specification @@ -1801,8 +1856,12 @@ def import_dwd_hdf5(filename, qty="RATE", **kwargs): are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value is 'RATE'. - - {extra_kwargs_doc} + dtype: str, optional + Data-type to which the array is cast. + Valid values: "float32", "float64", "single", and "double". + fillna: float or np.nan, optional + Value used to represent the missing data ("No Coverage"). + By default, np.nan is used. Returns ------- @@ -1984,7 +2043,9 @@ def import_dwd_hdf5(filename, qty="RATE", **kwargs): f.close() - return precip, quality, metadata + precip = _postprocess_precip(precip, fillna, dtype) + + return convert_input_to_xarray_dataset(precip, None, metadata) def _read_hdf5_cont(f, d): @@ -2062,8 +2123,7 @@ def _get_whatgrp(d, g): return -@postprocess_import() -def import_dwd_radolan(filename, product_name): +def import_dwd_radolan(filename, product_name, fillna=np.nan, dtype="double"): """ Import a RADOLAN precipitation product from a binary file. @@ -2076,8 +2136,12 @@ def import_dwd_radolan(filename, product_name): https://www.dwd.de/DE/leistungen/radolan/radolan_info/ radolan_radvor_op_komposit_format_pdf.pdf for a detailed description. - - {extra_kwargs_doc} + dtype: str, optional + Data-type to which the array is cast. + Valid values: "float32", "float64", "single", and "double". + fillna: float or np.nan, optional + Value used to represent the missing data ("No Coverage"). + By default, np.nan is used. Returns ------- @@ -2160,7 +2224,9 @@ def import_dwd_radolan(filename, product_name): geodata = _import_dwd_geodata(product_name, dims) metadata = geodata - return data, None, metadata + precip = _postprocess_precip(precip, fillna, dtype) + + return convert_input_to_xarray_dataset(precip, None, metadata) def _identify_info_bits(data): diff --git a/pysteps/io/interface.py b/pysteps/io/interface.py index 58b684ea3..828d791fa 100644 --- a/pysteps/io/interface.py +++ b/pysteps/io/interface.py @@ -14,7 +14,6 @@ """ from importlib.metadata import entry_points -from pysteps.decorators import postprocess_import from pysteps.io import importers, exporters, interface from pprint import pprint @@ -57,8 +56,6 @@ def discover_importers(): importer_function_name = _importer.__name__ importer_short_name = importer_function_name.replace("import_", "") - _postprocess_kws = getattr(_importer, "postprocess_kws", dict()) - _importer = postprocess_import(**_postprocess_kws)(_importer) if importer_short_name not in _importer_methods: _importer_methods[importer_short_name] = _importer else: diff --git a/pysteps/io/nowcast_importers.py b/pysteps/io/nowcast_importers.py index f353ec75b..cc14c7cd9 100755 --- a/pysteps/io/nowcast_importers.py +++ b/pysteps/io/nowcast_importers.py @@ -70,8 +70,7 @@ import numpy as np -from pysteps.decorators import postprocess_import -from pysteps.exceptions import MissingOptionalDependency, DataModelError +from pysteps.exceptions import MissingOptionalDependency import xarray as xr try: diff --git a/pysteps/nowcasts/extrapolation.py b/pysteps/nowcasts/extrapolation.py index a70b6985c..6e45092d2 100644 --- a/pysteps/nowcasts/extrapolation.py +++ b/pysteps/nowcasts/extrapolation.py @@ -71,7 +71,7 @@ def forecast( dataset = dataset.copy(deep=True) precip_var = dataset.attrs["precip_var"] - precip = dataset[precip_var].values[0] + precip = dataset[precip_var].values[-1] velocity = np.stack([dataset["velocity_x"], dataset["velocity_y"]]) _check_inputs(precip, velocity, timesteps) diff --git a/pysteps/nowcasts/interface.py b/pysteps/nowcasts/interface.py index 2af2048c3..3b89ddab6 100644 --- a/pysteps/nowcasts/interface.py +++ b/pysteps/nowcasts/interface.py @@ -30,7 +30,7 @@ get_method """ -from pysteps.extrapolation.interface import eulerian_persistence +from functools import partial from pysteps.nowcasts import ( anvil, extrapolation, @@ -41,6 +41,8 @@ ) from pysteps.nowcasts import lagrangian_probability +eulerian_persistence = partial(extrapolation.forecast, extrap_method="eulerian") + _nowcast_methods = dict() _nowcast_methods["anvil"] = anvil.forecast _nowcast_methods["eulerian"] = eulerian_persistence.extrapolate diff --git a/pysteps/tests/test_blending_linear_blending.py b/pysteps/tests/test_blending_linear_blending.py index 34d3f3875..5dae1425a 100644 --- a/pysteps/tests/test_blending_linear_blending.py +++ b/pysteps/tests/test_blending_linear_blending.py @@ -1,164 +1,47 @@ # -*- coding: utf-8 -*- from datetime import datetime + import numpy as np import pytest -from pysteps.blending.linear_blending import forecast, _get_ranked_salience, _get_ws from numpy.testing import assert_array_almost_equal + +from pysteps.blending.linear_blending import (_get_ranked_salience, _get_ws, + forecast) from pysteps.utils import transformation from pysteps.xarray_helpers import convert_input_to_xarray_dataset # Test function arguments linear_arg_values = [ - (5, 30, 60, 20, 45, "eulerian", None, 1, False, True, False), - (5, 30, 60, 20, 45, "eulerian", None, 2, False, False, False), - (5, 30, 60, 20, 45, "eulerian", None, 0, False, False, False), - (4, 23, 33, 9, 28, "eulerian", None, 1, False, False, False), - (3, 18, 36, 13, 27, "eulerian", None, 1, False, False, False), - (7, 30, 68, 11, 49, "eulerian", None, 1, False, False, False), - (7, 30, 68, 11, 49, "eulerian", None, 1, False, False, True), - (10, 100, 160, 25, 130, "eulerian", None, 1, False, False, False), - (6, 60, 180, 22, 120, "eulerian", None, 1, False, False, False), - (5, 100, 200, 40, 150, "eulerian", None, 1, False, False, False), - ( - 5, - 30, - 60, - 20, - 45, - "extrapolation", - np.zeros((2, 200, 200)), - 1, - False, - False, - False, - ), - ( - 4, - 23, - 33, - 9, - 28, - "extrapolation", - np.zeros((2, 200, 200)), - 1, - False, - False, - False, - ), - ( - 3, - 18, - 36, - 13, - 27, - "extrapolation", - np.zeros((2, 200, 200)), - 1, - False, - False, - False, - ), - ( - 7, - 30, - 68, - 11, - 49, - "extrapolation", - np.zeros((2, 200, 200)), - 1, - False, - False, - False, - ), - ( - 10, - 100, - 160, - 25, - 130, - "extrapolation", - np.zeros((2, 200, 200)), - 1, - False, - False, - False, - ), - ( - 6, - 60, - 180, - 22, - 120, - "extrapolation", - np.zeros((2, 200, 200)), - 1, - False, - False, - False, - ), - ( - 5, - 100, - 200, - 40, - 150, - "extrapolation", - np.zeros((2, 200, 200)), - 1, - False, - False, - False, - ), - ( - 5, - 100, - 200, - 40, - 150, - "extrapolation", - np.zeros((2, 200, 200)), - 1, - False, - False, - True, - ), - (5, 30, 60, 20, 45, "eulerian", None, 1, True, True, False), - (5, 30, 60, 20, 45, "eulerian", None, 2, True, False, False), - (5, 30, 60, 20, 45, "eulerian", None, 0, True, False, False), - ( - 5, - 30, - 60, - 20, - 45, - "extrapolation", - np.zeros((2, 200, 200)), - 1, - True, - False, - False, - ), - (4, 23, 33, 9, 28, "extrapolation", np.zeros((2, 200, 200)), 1, True, False, False), - ( - 3, - 18, - 36, - 13, - 27, - "extrapolation", - np.zeros((2, 200, 200)), - 1, - True, - False, - False, - ), + (5, 30, 60, 20, 45, "eulerian", 1, False, True, False), + (5, 30, 60, 20, 45, "eulerian", 2, False, False, False), + (5, 30, 60, 20, 45, "eulerian", 0, False, False, False), + (4, 23, 33, 9, 28, "eulerian", 1, False, False, False), + (3, 18, 36, 13, 27, "eulerian", 1, False, False, False), + (7, 30, 68, 11, 49, "eulerian", 1, False, False, False), + (7, 30, 68, 11, 49, "eulerian", 1, False, False, True), + (10, 100, 160, 25, 130, "eulerian", 1, False, False, False), + (6, 60, 180, 22, 120, "eulerian", 1, False, False, False), + (5, 100, 200, 40, 150, "eulerian", 1, False, False, False), + (5, 30, 60, 20, 45, "extrapolation", 1, False, False, False), + (4, 23, 33, 9, 28, "extrapolation", 1, False, False, False), + (3, 18, 36, 13, 27, "extrapolation", 1, False, False, False), + (7, 30, 68, 11, 49, "extrapolation", 1, False, False, False), + (10, 100, 160, 25, 130, "extrapolation", 1, False, False, False), + (6, 60, 180, 22, 120, "extrapolation", 1, False, False, False), + (5, 100, 200, 40, 150, "extrapolation", 1, False, False, False), + (5, 100, 200, 40, 150, "extrapolation", 1, False, False, True), + (5, 30, 60, 20, 45, "eulerian", 1, True, True, False), + (5, 30, 60, 20, 45, "eulerian", 2, True, False, False), + (5, 30, 60, 20, 45, "eulerian", 0, True, False, False), + (5, 30, 60, 20, 45, "extrapolation", 1, True, False, False), + (4, 23, 33, 9, 28, "extrapolation", 1, True, False, False), + (3, 18, 36, 13, 27, "extrapolation", 1, True, False, False), ] @pytest.mark.parametrize( - "timestep, start_blending, end_blending, n_timesteps, controltime, nowcast_method, V, n_models, salient_blending, squeeze_nwp_array, fill_nwp", + "timestep, start_blending, end_blending, n_timesteps, controltime, nowcast_method, n_models, salient_blending, squeeze_nwp_array, fill_nwp", linear_arg_values, ) def test_linear_blending( @@ -168,7 +51,6 @@ def test_linear_blending( n_timesteps, controltime, nowcast_method, - V, n_models, salient_blending, squeeze_nwp_array, @@ -253,9 +135,9 @@ def test_linear_blending( radar_dataset = transformation.dB_transform( radar_dataset, threshold=0.1, zerovalue=-15.0 ) - if V is not None: - radar_dataset["velocity_x"] = (["y", "x"], V[0]) - radar_dataset["velocity_y"] = (["y", "x"], V[1]) + V = np.zeros((2, 200, 200)) + radar_dataset["velocity_x"] = (["y", "x"], V[0]) + radar_dataset["velocity_y"] = (["y", "x"], V[1]) if r_nwp is None: model_dataset = None @@ -347,3 +229,6 @@ def test_salient_weight( ), "The shape of the ranked salience array does not have the expected value. The shape is {}".format( ws.shape ) + ), "The shape of the ranked salience array does not have the expected value. The shape is {}".format( + ws.shape + ) diff --git a/pysteps/tests/test_downscaling_rainfarm.py b/pysteps/tests/test_downscaling_rainfarm.py index 3b60f48b3..aeb6deba5 100644 --- a/pysteps/tests/test_downscaling_rainfarm.py +++ b/pysteps/tests/test_downscaling_rainfarm.py @@ -10,7 +10,7 @@ @pytest.fixture(scope="module") def dataset(): precip_dataset = get_precipitation_fields( - num_prev_files=0, num_next_files=0, return_raw=False, metadata=True + num_prev_files=0, num_next_files=0, return_raw=False ) precip_dataset = square_domain(precip_dataset, "crop") return precip_dataset diff --git a/pysteps/tests/test_exporters.py b/pysteps/tests/test_exporters.py index 274390724..6208f3f7d 100644 --- a/pysteps/tests/test_exporters.py +++ b/pysteps/tests/test_exporters.py @@ -71,7 +71,7 @@ def test_io_export_netcdf_one_member_one_time_step( pytest.importorskip("pyproj") precip_dataset: xr.Dataset = get_precipitation_fields( - num_prev_files=2, return_raw=True, metadata=True, source="fmi" + num_prev_files=2, return_raw=True, source="fmi" ) precip_var = precip_dataset.attrs["precip_var"] diff --git a/pysteps/tests/test_feature.py b/pysteps/tests/test_feature.py index d1a257aff..084744b29 100644 --- a/pysteps/tests/test_feature.py +++ b/pysteps/tests/test_feature.py @@ -18,7 +18,6 @@ def test_feature(method, max_num_features): num_prev_files=0, num_next_files=0, return_raw=True, - metadata=False, upscale=None, source="mch", ) diff --git a/pysteps/tests/test_interfaces.py b/pysteps/tests/test_interfaces.py index bfe1d6683..1f1c2ef6d 100644 --- a/pysteps/tests/test_interfaces.py +++ b/pysteps/tests/test_interfaces.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- +from functools import partial import numpy import pytest @@ -61,14 +62,14 @@ def test_extrapolation_interface(): from pysteps import extrapolation from pysteps.extrapolation import semilagrangian - from pysteps.extrapolation.interface import eulerian_persistence as eulerian + from pysteps.extrapolation import eulerian_persistence from pysteps.extrapolation.interface import _do_nothing as do_nothing method_getter = extrapolation.interface.get_method valid_returned_objs = dict() valid_returned_objs["semilagrangian"] = semilagrangian.extrapolate - valid_returned_objs["eulerian"] = eulerian + valid_returned_objs["eulerian"] = eulerian_persistence.extrapolate valid_returned_objs[None] = do_nothing valid_returned_objs["None"] = do_nothing @@ -295,6 +296,7 @@ def test_nowcasts_interface(): valid_names_func_pair = [ ("anvil", anvil.forecast), ("extrapolation", extrapolation.forecast), + ("eulerian", pysteps.nowcasts.interface.eulerian_persistence), ("lagrangian", extrapolation.forecast), ("linda", linda.forecast), ("probability", lagrangian_probability.forecast), @@ -307,15 +309,6 @@ def test_nowcasts_interface(): invalid_names = ["extrap", "step", "s-prog", "pysteps"] _generic_interface_test(method_getter, valid_names_func_pair, invalid_names) - # Test eulerian persistence method - precip = numpy.random.rand(100, 100) - velocity = numpy.random.rand(100, 100) - num_timesteps = 10 - for name in ["eulerian", "EULERIAN"]: - forecast = method_getter(name)(precip, velocity, num_timesteps) - for i in range(num_timesteps): - assert numpy.all(forecast[i] == precip) - def test_utils_interface(): """Test utils module interface.""" diff --git a/pysteps/tests/test_io_bom_rf3.py b/pysteps/tests/test_io_bom_rf3.py index 66f075e0e..22535c4dd 100644 --- a/pysteps/tests/test_io_bom_rf3.py +++ b/pysteps/tests/test_io_bom_rf3.py @@ -11,7 +11,6 @@ num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="bom", log_transform=False, ) @@ -46,7 +45,6 @@ def test_io_import_bom_shape(): (precip_dataset.x.attrs["units"], "m", None), (precip_dataset.y.attrs["units"], "m", None), (precip_dataarray.attrs["accutime"], 6, 1e-4), - (precip_dataarray.attrs["transform"], None, None), (precip_dataarray.attrs["zerovalue"], 0.0, 1e-4), (precip_dataarray.attrs["units"], "mm", None), ] diff --git a/pysteps/tests/test_io_dwd_hdf5.py b/pysteps/tests/test_io_dwd_hdf5.py index 950da8d86..e69b2a6bb 100644 --- a/pysteps/tests/test_io_dwd_hdf5.py +++ b/pysteps/tests/test_io_dwd_hdf5.py @@ -9,7 +9,6 @@ num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="dwd", log_transform=False, ) diff --git a/pysteps/tests/test_io_fmi_geotiff.py b/pysteps/tests/test_io_fmi_geotiff.py index 2a07f03b0..db2d0a3c2 100644 --- a/pysteps/tests/test_io_fmi_geotiff.py +++ b/pysteps/tests/test_io_fmi_geotiff.py @@ -7,8 +7,6 @@ precip_dataset = get_precipitation_fields( num_prev_files=0, num_next_files=0, - return_raw=True, - metadata=True, source="fmi_geotiff", log_transform=False, ) diff --git a/pysteps/tests/test_io_fmi_pgm.py b/pysteps/tests/test_io_fmi_pgm.py index a704e0e50..48075bd21 100644 --- a/pysteps/tests/test_io_fmi_pgm.py +++ b/pysteps/tests/test_io_fmi_pgm.py @@ -7,7 +7,6 @@ num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="fmi", log_transform=False, ) diff --git a/pysteps/tests/test_io_knmi_hdf5.py b/pysteps/tests/test_io_knmi_hdf5.py index e585055f9..ef65d8b02 100644 --- a/pysteps/tests/test_io_knmi_hdf5.py +++ b/pysteps/tests/test_io_knmi_hdf5.py @@ -8,10 +8,9 @@ num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="knmi", log_transform=False, - importer_kwargs=dict(qty="ACRR"), + qty="ACRR", ) precip_var = precip_dataset.attrs["precip_var"] diff --git a/pysteps/tests/test_io_mch_gif.py b/pysteps/tests/test_io_mch_gif.py index aa08bcb53..254eb526f 100644 --- a/pysteps/tests/test_io_mch_gif.py +++ b/pysteps/tests/test_io_mch_gif.py @@ -8,10 +8,9 @@ num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="mch", log_transform=False, - importer_kwargs=dict(qty="AQC"), + product="AQC", ) precip_var = precip_dataset.attrs["precip_var"] diff --git a/pysteps/tests/test_io_mrms_grib.py b/pysteps/tests/test_io_mrms_grib.py index 3b28fc5af..d70b8ffbd 100644 --- a/pysteps/tests/test_io_mrms_grib.py +++ b/pysteps/tests/test_io_mrms_grib.py @@ -8,13 +8,11 @@ num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="mrms", log_transform=False, window_size=1, ) -print(precip_dataset) precip_var = precip_dataset.attrs["precip_var"] precip_dataarray = precip_dataset[precip_var] @@ -36,7 +34,6 @@ def test_io_import_mrms_grib(): None, ), (precip_dataarray.attrs["units"], "mm/h", None), - (precip_dataarray.attrs["transform"], None, None), (precip_dataarray.attrs["zerovalue"], 0.0, 1e-6), (precip_dataarray.attrs["threshold"], 0.1, 1e-10), (precip_dataset.x.isel(x=0).values, -129.995, 1e-10), @@ -63,7 +60,6 @@ def test_io_import_mrms_grib_dataset_extent(): num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="mrms", log_transform=False, extent=(230, 300, 20, 55), @@ -79,7 +75,6 @@ def test_io_import_mrms_grib_dataset_extent(): num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="mrms", log_transform=False, extent=(250, 260, 30, 35), @@ -91,6 +86,7 @@ def test_io_import_mrms_grib_dataset_extent(): smart_assert(precip_dataarray_even_smaller.shape, (1, 500, 1000), None) # XR: we had to change the selection of the original field since these is a flip happening in the way the data is read in. # XR: We had two ways to solve this: precip_dataarray[:,::-1, :][:, 2000:2500, 2000:3000][:,::-1, :] or switch the 2000:2500 to + # I think this is logical as both the extend selected data and the reference data are flipped. That is why we need the double flip assert_array_almost_equal( precip_dataarray.values[:, 1000:1500, 2000:3000], precip_dataarray_even_smaller.values, @@ -100,7 +96,6 @@ def test_io_import_mrms_grib_dataset_extent(): num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="mrms", log_transform=False, extent=(250, 260, 30, 35), @@ -116,7 +111,6 @@ def test_io_import_mrms_grib_dataset_extent(): num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="mrms", log_transform=False, extent=(250, 260, 30, 35), diff --git a/pysteps/tests/test_io_nowcast_importers.py b/pysteps/tests/test_io_nowcast_importers.py index a19388afe..e6f98f1ad 100644 --- a/pysteps/tests/test_io_nowcast_importers.py +++ b/pysteps/tests/test_io_nowcast_importers.py @@ -10,7 +10,6 @@ num_prev_files=1, num_next_files=0, return_raw=False, - metadata=True, upscale=2000, ) diff --git a/pysteps/tests/test_io_readers.py b/pysteps/tests/test_io_readers.py index 692e2d9eb..9417e931e 100644 --- a/pysteps/tests/test_io_readers.py +++ b/pysteps/tests/test_io_readers.py @@ -11,7 +11,6 @@ def test_read_timeseries_mch(): num_prev_files=1, num_next_files=1, return_raw=True, - metadata=True, source="mch", log_transform=False, ) diff --git a/pysteps/tests/test_io_saf_crri.py b/pysteps/tests/test_io_saf_crri.py index 9ecedf135..b75273e4a 100644 --- a/pysteps/tests/test_io_saf_crri.py +++ b/pysteps/tests/test_io_saf_crri.py @@ -8,7 +8,6 @@ num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="saf", log_transform=False, ) @@ -37,7 +36,6 @@ def test_io_import_saf_crri_extent(extent, expected_extent, expected_shape, tole num_prev_files=0, num_next_files=0, return_raw=True, - metadata=True, source="saf", log_transform=False, extent=extent, diff --git a/pysteps/tests/test_motion_lk.py b/pysteps/tests/test_motion_lk.py index 3ce9e2059..7cffb6768 100644 --- a/pysteps/tests/test_motion_lk.py +++ b/pysteps/tests/test_motion_lk.py @@ -64,7 +64,6 @@ def test_lk( num_prev_files=2, num_next_files=0, return_raw=False, - metadata=True, upscale=2000, ) precip_var = dataset.attrs["precip_var"] diff --git a/pysteps/tests/test_noise_fftgenerators.py b/pysteps/tests/test_noise_fftgenerators.py index cc27258e1..fafdedaa3 100644 --- a/pysteps/tests/test_noise_fftgenerators.py +++ b/pysteps/tests/test_noise_fftgenerators.py @@ -8,7 +8,6 @@ num_prev_files=0, num_next_files=0, return_raw=False, - metadata=False, upscale=2000, ) diff --git a/pysteps/tests/test_nowcasts_anvil.py b/pysteps/tests/test_nowcasts_anvil.py index f9259fc93..7a082ad65 100644 --- a/pysteps/tests/test_nowcasts_anvil.py +++ b/pysteps/tests/test_nowcasts_anvil.py @@ -92,7 +92,6 @@ def test_anvil_rainrate( num_prev_files=4, num_next_files=0, return_raw=False, - metadata=False, upscale=2000, ) diff --git a/pysteps/tests/test_nowcasts_lagrangian_probability.py b/pysteps/tests/test_nowcasts_lagrangian_probability.py index d75b29e87..a92204ac1 100644 --- a/pysteps/tests/test_nowcasts_lagrangian_probability.py +++ b/pysteps/tests/test_nowcasts_lagrangian_probability.py @@ -86,7 +86,6 @@ def test_real_case(): num_prev_files=2, num_next_files=0, return_raw=False, - metadata=True, upscale=2000, ) precip_var = dataset_input.attrs["precip_var"] diff --git a/pysteps/tests/test_nowcasts_linda.py b/pysteps/tests/test_nowcasts_linda.py index 51d688644..8bc6f8913 100644 --- a/pysteps/tests/test_nowcasts_linda.py +++ b/pysteps/tests/test_nowcasts_linda.py @@ -76,7 +76,6 @@ def test_linda( dataset_input = get_precipitation_fields( num_prev_files=2, num_next_files=0, - metadata=True, clip=(354000, 866000, -96000, 416000), upscale=4000, log_transform=False, @@ -203,7 +202,6 @@ def test_linda_callback(tmp_path): num_prev_files=2, num_next_files=0, return_raw=False, - metadata=True, upscale=2000, ) precip_input = precip_input.filled() diff --git a/pysteps/tests/test_nowcasts_sprog.py b/pysteps/tests/test_nowcasts_sprog.py index 383fc155e..63e9b795d 100644 --- a/pysteps/tests/test_nowcasts_sprog.py +++ b/pysteps/tests/test_nowcasts_sprog.py @@ -61,7 +61,6 @@ def test_sprog( num_prev_files=2, num_next_files=0, return_raw=False, - metadata=True, upscale=2000, ) diff --git a/pysteps/tests/test_nowcasts_sseps.py b/pysteps/tests/test_nowcasts_sseps.py index ee7a6b885..bfe05a002 100644 --- a/pysteps/tests/test_nowcasts_sseps.py +++ b/pysteps/tests/test_nowcasts_sseps.py @@ -108,7 +108,6 @@ def test_sseps( num_prev_files=2, num_next_files=0, return_raw=False, - metadata=True, upscale=2000, ) precip_var = dataset_input.attrs["precip_var"] diff --git a/pysteps/tests/test_nowcasts_steps.py b/pysteps/tests/test_nowcasts_steps.py index a10760192..4e489dcfb 100644 --- a/pysteps/tests/test_nowcasts_steps.py +++ b/pysteps/tests/test_nowcasts_steps.py @@ -77,7 +77,6 @@ def test_steps_skill( num_prev_files=2, num_next_files=0, return_raw=False, - metadata=True, upscale=2000, ) @@ -133,7 +132,6 @@ def test_steps_callback(tmp_path): num_prev_files=2, num_next_files=0, return_raw=False, - metadata=True, upscale=2000, ) precip_input = precip_input.filled() diff --git a/pysteps/tests/test_nowcasts_utils.py b/pysteps/tests/test_nowcasts_utils.py index 1dfeb27a9..a1a8d8133 100644 --- a/pysteps/tests/test_nowcasts_utils.py +++ b/pysteps/tests/test_nowcasts_utils.py @@ -29,8 +29,6 @@ def test_nowcast_main_loop( dataset = get_precipitation_fields( num_prev_files=2, num_next_files=0, - return_raw=False, - metadata=False, upscale=2000, ) diff --git a/pysteps/tests/test_plt_animate.py b/pysteps/tests/test_plt_animate.py index f6b0d25a1..422ac0057 100644 --- a/pysteps/tests/test_plt_animate.py +++ b/pysteps/tests/test_plt_animate.py @@ -14,7 +14,6 @@ num_prev_files=2, num_next_files=0, return_raw=True, - metadata=True, upscale=2000, ) diff --git a/pysteps/tests/test_plugins_support.py b/pysteps/tests/test_plugins_support.py index f4170492c..46bfd0e99 100644 --- a/pysteps/tests/test_plugins_support.py +++ b/pysteps/tests/test_plugins_support.py @@ -20,13 +20,6 @@ from pysteps import io, postprocessing -# BUG: -# XR: Cookie cutter makes two calls to importers, one -# which is not wrapped with postprocess_import resulting in the importer -# returning 3 values on the first call. On the second call the importer goes -# through the postprocess_import resulting in one dataset being returned. -# Should fix this issue first before fixing tests. -# Test has been therefore commented out. def _check_installed_importer_plugin(import_func_name): # reload the pysteps module to detect the installed plugin io.discover_importers() @@ -90,7 +83,12 @@ def _uninstall_plugin(project_name): ) -# XR: Commented out test for reason explained above +# BUG: +# XR: Cookie cutter tries to add an importer using the postprocess importer +# decorator. I removed this decorator and made it so that any importer +# just directly returns an xarray. This example plugin needs to be updated +# to reflect that before this test will work again. + # def test_importers_plugins(): # with _create_and_install_plugin("pysteps-importer-institution-fun", "importer"): # _check_installed_importer_plugin("importer_institution_fun") diff --git a/pysteps/tests/test_utils_reprojection.py b/pysteps/tests/test_utils_reprojection.py index 58a1231cf..c5f1fa032 100644 --- a/pysteps/tests/test_utils_reprojection.py +++ b/pysteps/tests/test_utils_reprojection.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import os import numpy as np import pytest import xarray as xr @@ -92,7 +91,6 @@ def build_precip_dataset( num_next_files=0, source="rmi", return_raw=True, - metadata=True, log_transform=False, ) diff --git a/pysteps/tests/test_verification_salscores.py b/pysteps/tests/test_verification_salscores.py index fbed23def..aa225c2bd 100644 --- a/pysteps/tests/test_verification_salscores.py +++ b/pysteps/tests/test_verification_salscores.py @@ -20,9 +20,7 @@ class TestSAL: def test_sal_zeros(self, converter, thr_factor): """Test the SAL verification method.""" - dataset_input = get_precipitation_fields( - num_prev_files=0, log_transform=False, metadata=True - ) + dataset_input = get_precipitation_fields(num_prev_files=0, log_transform=False) dataset_input = converter(dataset_input) precip_var = dataset_input.attrs["precip_var"] precip = dataset_input[precip_var].values[0] @@ -37,9 +35,7 @@ def test_sal_zeros(self, converter, thr_factor): def test_sal_same_image(self, converter, thr_factor): """Test the SAL verification method.""" - dataset_input = get_precipitation_fields( - num_prev_files=0, log_transform=False, metadata=True - ) + dataset_input = get_precipitation_fields(num_prev_files=0, log_transform=False) dataset_input = converter(dataset_input) precip_var = dataset_input.attrs["precip_var"] precip = dataset_input[precip_var].values[0] @@ -49,9 +45,7 @@ def test_sal_same_image(self, converter, thr_factor): assert np.allclose(result, [0, 0, 0]) def test_sal_translation(self, converter, thr_factor): - dataset_input = get_precipitation_fields( - num_prev_files=0, log_transform=False, metadata=True - ) + dataset_input = get_precipitation_fields(num_prev_files=0, log_transform=False) dataset_input = converter(dataset_input) precip_var = dataset_input.attrs["precip_var"] precip = dataset_input[precip_var].values[0] diff --git a/pysteps/utils/interface.py b/pysteps/utils/interface.py index ded0c3094..eb91dacf3 100644 --- a/pysteps/utils/interface.py +++ b/pysteps/utils/interface.py @@ -11,17 +11,21 @@ get_method """ -from . import arrays -from . import cleansing -from . import conversion -from . import dimension -from . import fft -from . import images -from . import interpolate -from . import reprojection -from . import spectral -from . import tapering -from . import transformation +import xarray as xr + +from . import ( + arrays, + cleansing, + conversion, + dimension, + fft, + images, + interpolate, + reprojection, + spectral, + tapering, + transformation, +) def get_method(name, **kwargs): @@ -163,8 +167,8 @@ def get_method(name, **kwargs): name = name.lower() - def donothing(R, metadata=None, *args, **kwargs): - return R.copy(), {} if metadata is None else metadata.copy() + def donothing(dataset: xr.Dataset, *args, **kwargs): + return dataset methods_objects = dict() methods_objects["none"] = donothing diff --git a/pysteps/xarray_helpers.py b/pysteps/xarray_helpers.py index 821fd9298..a45ed27ba 100644 --- a/pysteps/xarray_helpers.py +++ b/pysteps/xarray_helpers.py @@ -19,7 +19,6 @@ import pyproj import xarray as xr - # TODO(converters): Write methods for converting Proj.4 projection definitions # into CF grid mapping attributes. Currently this has been implemented for # the stereographic projection. @@ -215,7 +214,7 @@ def convert_input_to_xarray_dataset( "units": attrs["units"], "standard_name": attrs["standard_name"], "long_name": attrs["long_name"], - "grid_mapping": "projection", + "grid_mapping": grid_mapping_name, }, ) } @@ -239,7 +238,7 @@ def convert_input_to_xarray_dataset( { "units": "1", "standard_name": "quality_flag", - "grid_mapping": "projection", + "grid_mapping": grid_mapping_name, }, ) coords = { @@ -330,17 +329,22 @@ def convert_output_to_xarray_dataset( precip_var = dataset.attrs["precip_var"] metadata = dataset[precip_var].attrs - last_timestamp = ( + forecast_reference_time = ( dataset["time"][-1].values.astype("datetime64[us]").astype(datetime) ) time_metadata = dataset["time"].attrs time_encoding = dataset["time"].encoding timestep_seconds = dataset["time"].attrs["stepsize"] dataset = dataset.drop_vars([precip_var]).drop_dims(["time"]) + if "velocity_x" in dataset: + dataset = dataset.drop_vars(["velocity_x"]) + if "velocity_y" in dataset: + dataset = dataset.drop_vars(["velocity_y"]) if isinstance(timesteps, int): timesteps = list(range(1, timesteps + 1)) next_timestamps = [ - last_timestamp + timedelta(seconds=timestep_seconds * i) for i in timesteps + forecast_reference_time + timedelta(seconds=timestep_seconds * i) + for i in timesteps ] dataset = dataset.assign_coords( {"time": (["time"], next_timestamps, time_metadata, time_encoding)} @@ -364,4 +368,15 @@ def convert_output_to_xarray_dataset( else: dataset[precip_var] = (["time", "y", "x"], output, metadata) + dataset = dataset.assign_coords( + { + "forecast_reference_time": ( + [], + forecast_reference_time, + {"long_name": "forecast reference time"}, + time_encoding, + ) + } + ) + return dataset