From f69b8d22046fb8f2939522be6855941ea5013332 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Fri, 7 Nov 2025 15:48:32 +0100 Subject: [PATCH 001/104] Added Lightgbm, LightGBM Linear Trees and Hybrid Stacking Forecasters --- packages/openstef-models/pyproject.toml | 2 + .../openstef_models/estimators/__init__.py | 9 + .../src/openstef_models/estimators/hybrid.py | 175 +++++++++ .../openstef_models/estimators/lightgbm.py | 201 ++++++++++ .../models/forecasting/hybrid_forecaster.py | 137 +++++++ .../forecasting/lgblinear_forecaster.py | 346 ++++++++++++++++++ .../models/forecasting/lightgbm_forecaster.py | 340 +++++++++++++++++ .../presets/forecasting_workflow.py | 171 +++++++-- .../tests/unit/estimators/test_hybrid.py | 39 ++ .../tests/unit/estimators/test_lightgbm.py | 39 ++ .../forecasting/test_lgblinear_forecaster.py | 159 ++++++++ .../forecasting/test_lightgbm_forecaster.py | 159 ++++++++ uv.lock | 55 +++ 13 files changed, 1803 insertions(+), 29 deletions(-) create mode 100644 packages/openstef-models/src/openstef_models/estimators/__init__.py create mode 100644 packages/openstef-models/src/openstef_models/estimators/hybrid.py create mode 100644 packages/openstef-models/src/openstef_models/estimators/lightgbm.py create mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py create mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py create mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py create mode 100644 packages/openstef-models/tests/unit/estimators/test_hybrid.py create mode 100644 packages/openstef-models/tests/unit/estimators/test_lightgbm.py create mode 100644 packages/openstef-models/tests/unit/models/forecasting/test_lgblinear_forecaster.py create mode 100644 packages/openstef-models/tests/unit/models/forecasting/test_lightgbm_forecaster.py diff --git a/packages/openstef-models/pyproject.toml b/packages/openstef-models/pyproject.toml index 5f267c901..43914f38f 100644 --- a/packages/openstef-models/pyproject.toml +++ b/packages/openstef-models/pyproject.toml @@ -28,8 +28,10 @@ classifiers = [ ] dependencies = [ + "lightgbm>=4.6.0", "openstef-core", "pycountry>=24.6.1", + "skops>=0.13.0", ] optional-dependencies.all = [ diff --git a/packages/openstef-models/src/openstef_models/estimators/__init__.py b/packages/openstef-models/src/openstef_models/estimators/__init__.py new file mode 100644 index 000000000..487060783 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/estimators/__init__.py @@ -0,0 +1,9 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Custom estimators for multi quantiles""" + +from .lightgbm import LGBMQuantileRegressor + +__all__ = ["LGBMQuantileRegressor"] diff --git a/packages/openstef-models/src/openstef_models/estimators/hybrid.py b/packages/openstef-models/src/openstef_models/estimators/hybrid.py new file mode 100644 index 000000000..abffd154a --- /dev/null +++ b/packages/openstef-models/src/openstef_models/estimators/hybrid.py @@ -0,0 +1,175 @@ +"""Hybrid quantile regression estimators for multi-quantile forecasting. + +This module provides the HybridQuantileRegressor class, which combines LightGBM and linear models +using stacking for robust multi-quantile regression, including serialization utilities. +""" + +from typing import Self + +import numpy as np +import numpy.typing as npt +import pandas as pd +from lightgbm import LGBMRegressor +from sklearn.ensemble import StackingRegressor +from sklearn.linear_model import QuantileRegressor +from skops.io import dumps, loads +from xgboost import XGBRegressor + +from openstef_core.exceptions import ModelLoadingError + + +class HybridQuantileRegressor: + """Custom Hybrid regressor for multi-quantile estimation using sample weights.""" + + def __init__( + self, + quantiles: list[float], + lightgbm_n_estimators: int = 100, + lightgbm_learning_rate: float = 0.1, + lightgbm_max_depth: int = -1, + lightgbm_min_child_weight: float = 1.0, + ligntgbm_min_child_samples: int = 1, + lightgbm_min_data_in_leaf: int = 20, + lightgbm_min_data_in_bin: int = 10, + lightgbm_reg_alpha: float = 0.0, + lightgbm_reg_lambda: float = 0.0, + lightgbm_num_leaves: int = 31, + lightgbm_max_bin: int = 255, + lightgbm_subsample: float = 1.0, + lightgbm_colsample_by_tree: float = 1.0, + lightgbm_colsample_by_node: float = 1.0, + gblinear_n_estimators: int = 100, + gblinear_learning_rate: float = 0.15, + gblinear_reg_alpha: float = 0.0001, + gblinear_reg_lambda: float = 0, + gblinear_feature_selector: str = "shuffle", + gblinear_updater: str = "shotgun", + ): + self.quantiles = quantiles + + self._models: list[StackingRegressor] = [] + + for q in quantiles: + lightgbm_model = LGBMRegressor( + objective="quantile", + alpha=q, + min_child_samples=ligntgbm_min_child_samples, + n_estimators=lightgbm_n_estimators, + learning_rate=lightgbm_learning_rate, + max_depth=lightgbm_max_depth, + min_child_weight=lightgbm_min_child_weight, + min_data_in_leaf=lightgbm_min_data_in_leaf, + min_data_in_bin=lightgbm_min_data_in_bin, + reg_alpha=lightgbm_reg_alpha, + reg_lambda=lightgbm_reg_lambda, + num_leaves=lightgbm_num_leaves, + max_bin=lightgbm_max_bin, + subsample=lightgbm_subsample, + colsample_bytree=lightgbm_colsample_by_tree, + colsample_bynode=lightgbm_colsample_by_node, + verbosity=-1, + linear_tree=False, + ) + + linear = XGBRegressor( + booster="gblinear", + # Core parameters for forecasting + objective="reg:quantileerror", + n_estimators=gblinear_n_estimators, + learning_rate=gblinear_learning_rate, + # Regularization parameters + reg_alpha=gblinear_reg_alpha, + reg_lambda=gblinear_reg_lambda, + # Boosting structure control + feature_selector=gblinear_feature_selector, + updater=gblinear_updater, + quantile_alpha=q, + ) + + final_estimator = QuantileRegressor(quantile=q) + + self._models.append( + StackingRegressor( + estimators=[("lightgbm", lightgbm_model), ("gblinear", linear)], # type: ignore + final_estimator=final_estimator, + verbose=3, + passthrough=False, + n_jobs=None, + cv=2, + ) + ) + self.is_fitted: bool = False + + def fit( + self, + X: npt.NDArray[np.floating] | pd.DataFrame, # noqa: N803 + y: npt.NDArray[np.floating] | pd.Series, + sample_weight: npt.NDArray[np.floating] | None = None, + feature_name: list[str] | None = None, + ) -> None: + """Fit the multi-quantile regressor. + + Args: + X: Input features as a DataFrame. + y: Target values as a 2D array where each column corresponds to a quantile. + sample_weight: Sample weights for training data. + feature_name: List of feature names. + """ + X = X.ffill().fillna(0) # type: ignore + for model in self._models: + model.fit( + X=X, # type: ignore + y=y, + sample_weight=sample_weight, + ) + self.is_fitted = True + + def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: # noqa: N803 + """Predict quantiles for the input features. + + Args: + X: Input features as a DataFrame. + + Returns: + + A 2D array where each column corresponds to predicted quantiles. + """ # noqa: D412 + X = X.ffill().fillna(0) # type: ignore + return np.column_stack([model.predict(X=X) for model in self._models]) # type: ignore + + def save_bytes(self) -> bytes: + """Serialize the model. + + Returns: + A string representation of the model. + """ + return dumps(self) + + @classmethod + def load_bytes(cls, model_bytes: bytes) -> Self: + """Deserialize the model from bytes using joblib. + + Args: + model_bytes : Bytes representing the serialized model. + + Returns: + An instance of LightGBMQuantileRegressor. + + Raises: + ModelLoadingError: If the deserialized object is not a HybridQuantileRegressor. + """ + trusted_types = [ + "collections.OrderedDict", + "lightgbm.basic.Booster", + "lightgbm.sklearn.LGBMRegressor", + "sklearn.utils._bunch.Bunch", + "xgboost.core.Booster", + "xgboost.sklearn.XGBRegressor", + "openstef_models.estimators.hybrid.HybridQuantileRegressor", + ] + instance = loads(model_bytes, trusted=trusted_types) + + if not isinstance(instance, cls): + raise ModelLoadingError("Deserialized object is not a HybridQuantileRegressor") + + return instance diff --git a/packages/openstef-models/src/openstef_models/estimators/lightgbm.py b/packages/openstef-models/src/openstef_models/estimators/lightgbm.py new file mode 100644 index 000000000..66f222327 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/estimators/lightgbm.py @@ -0,0 +1,201 @@ +"""Custom LightGBM regressor for multi-quantile regression. + +This module provides the LGBMQuantileRegressor class, which extends LightGBM's LGBMRegressor +to support multi-quantile output by configuring the objective function accordingly. Each quantile is predicted +by a separate tree within the same boosting ensemble. The module also includes serialization utilities. +""" + +from typing import Self + +import numpy as np +import numpy.typing as npt +import pandas as pd +from lightgbm import LGBMRegressor +from sklearn.base import BaseEstimator, RegressorMixin +from skops.io import dumps, loads + +from openstef_core.exceptions import ModelLoadingError + + +class LGBMQuantileRegressor(BaseEstimator, RegressorMixin): + """Custom LightGBM regressor for multi-quantile regression. + + Extends LGBMRegressor to support multi-quantile output by configuring + the objective function accordingly. Each quantile is predicted by a + separate tree within the same boosting ensemble. + """ + + def __init__( + self, + quantiles: list[float], + linear_tree: bool, # noqa: FBT001 + n_estimators: int = 100, + learning_rate: float = 0.1, + max_depth: int = -1, + min_child_weight: float = 1.0, + min_data_in_leaf: int = 20, + min_data_in_bin: int = 10, + reg_alpha: float = 0.0, + reg_lambda: float = 0.0, + num_leaves: int = 31, + max_bin: int = 255, + subsample: float = 1.0, + colsample_bytree: float = 1.0, + colsample_bynode: float = 1.0, + random_state: int | None = None, + early_stopping_rounds: int | None = None, + verbosity: int = -1, + ) -> None: # type: ignore + """Initialize LgbLinearQuantileRegressor with quantiles. + + Args: + quantiles: List of quantiles to predict (e.g., [0.1, 0.5, 0.9]). + n_estimators: Number of boosting rounds/trees to fit. + learning_rate: Step size shrinkage used to prevent overfitting. + max_depth: Maximum depth of trees. + min_child_weight: Minimum sum of instance weight (hessian) needed in a child. + min_data_in_leaf: Minimum number of data points in a leaf. + min_data_in_bin: Minimum number of data points in a bin. + reg_alpha: L1 regularization on leaf weights. + reg_lambda: L2 regularization on leaf weights. + num_leaves: Maximum number of leaves. + max_bin: Maximum number of discrete bins for continuous features. + subsample: Fraction of training samples used for each tree. + colsample_bytree: Fraction of features used when constructing each tree. + colsample_bynode: Fraction of features used for each split/node. + random_state: Random seed for reproducibility. + early_stopping_rounds: Training will stop if performance doesn't improve for this many rounds. + verbosity: Verbosity level for LgbLinear training. + + """ + self.quantiles = quantiles + self.linear_tree = linear_tree + self.n_estimators = n_estimators + self.learning_rate = learning_rate + self.max_depth = max_depth + self.min_child_weight = min_child_weight + self.min_data_in_leaf = min_data_in_leaf + self.min_data_in_bin = min_data_in_bin + self.reg_alpha = reg_alpha + self.reg_lambda = reg_lambda + self.num_leaves = num_leaves + self.max_bin = max_bin + self.subsample = subsample + self.colsample_bytree = colsample_bytree + self.colsample_bynode = colsample_bynode + self.random_state = random_state + self.early_stopping_rounds = early_stopping_rounds + self.verbosity = verbosity + + self._models: list[LGBMRegressor] = [ + LGBMRegressor( + objective="quantile", + alpha=q, + n_estimators=n_estimators, + learning_rate=learning_rate, + max_depth=max_depth, + min_child_weight=min_child_weight, + min_data_in_leaf=min_data_in_leaf, + min_data_in_bin=min_data_in_bin, + reg_alpha=reg_alpha, + reg_lambda=reg_lambda, + num_leaves=num_leaves, + max_bin=max_bin, + subsample=subsample, + colsample_bytree=colsample_bytree, + colsample_bynode=colsample_bynode, + random_state=random_state, + early_stopping_rounds=early_stopping_rounds, + verbosity=verbosity, + linear_tree=linear_tree, + ) + for q in quantiles # type: ignore + ] + + def fit( + self, + X: npt.NDArray[np.floating] | pd.DataFrame, # noqa: N803 + y: npt.NDArray[np.floating] | pd.Series, + sample_weight: npt.NDArray[np.floating] | None = None, + feature_name: list[str] | None = None, + eval_set: npt.NDArray[np.floating] | None = None, + eval_sample_weight: npt.NDArray[np.floating] | None = None, + ) -> None: + """Fit the multi-quantile regressor. + + Args: + X: Input features as a DataFrame. + y: Target values as a 2D array where each column corresponds to a quantile. + sample_weight: Sample weights for training data. + feature_name: List of feature names. + eval_set: Evaluation set for early stopping. + eval_sample_weight: Sample weights for evaluation data. + """ + for model in self._models: + if eval_set is None: + model.set_params(early_stopping_rounds=None) + else: + model.set_params(early_stopping_rounds=self.early_stopping_rounds) + model.fit( # type: ignore + X=np.asarray(X), + y=y, + eval_metric="quantile", + sample_weight=sample_weight, + eval_set=eval_set, # type: ignore + eval_sample_weight=eval_sample_weight, # type: ignore + feature_name=feature_name, # type: ignore + ) + + def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: # noqa: N803 + """Predict quantiles for the input features. + + Args: + X: Input features as a DataFrame. + + Returns: + + A 2D array where each column corresponds to predicted quantiles. + """ # noqa: D412 + return np.column_stack([model.predict(X=np.asarray(X)) for model in self._models]) # type: ignore + + def __sklearn_is_fitted__(self) -> bool: # noqa: PLW3201 + """Check if all models are fitted. + + Returns: + True if all quantile models are fitted, False otherwise. + """ + return all(model.__sklearn_is_fitted__() for model in self._models) + + def save_bytes(self) -> bytes: + """Serialize the model. + + Returns: + A string representation of the model. + """ + return dumps(self) + + @classmethod + def load_bytes(cls, model_bytes: bytes) -> Self: + """Deserialize the model from bytes using joblib. + + Args: + model_bytes : Bytes representing the serialized model. + + Returns: + An instance of LgbLinearQuantileRegressor. + + Raises: + ModelLoadingError: If the deserialized object is not a LgbLinearQuantileRegressor. + """ + trusted_types = [ + "collections.OrderedDict", + "lightgbm.basic.Booster", + "lightgbm.sklearn.LGBMRegressor", + "openstef_models.estimators.lightgbm.LGBMQuantileRegressor", + ] + instance = loads(model_bytes, trusted=trusted_types) + + if not isinstance(instance, cls): + raise ModelLoadingError("Deserialized object is not a LgbLinearQuantileRegressor") + + return instance diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py new file mode 100644 index 000000000..b1909b068 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -0,0 +1,137 @@ +from __future__ import annotations + +import base64 +import logging +from typing import Any, Literal, Self, Union, cast, override + +import numpy as np +import numpy.typing as npt +import pandas as pd +from pydantic import Field + + +from openstef_core.base_model import BaseConfig +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import ( + ModelLoadingError, + NotFittedError, +) +from openstef_core.mixins import HyperParams +from openstef_models.estimators.hybrid import HybridQuantileRegressor +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig +from openstef_models.models.forecasting.lightgbm_forecaster import LightGBMHyperParams + + +class HybridHyperParams(HyperParams): + """Hyperparameters for Support Vector Regression (Hybrid).""" + + lightgbm_params: LightGBMHyperParams = LightGBMHyperParams() + + l1_penalty: float = Field( + default=0.0, + description="L1 regularization term for the quantile regression.", + ) + + +class HybridForecasterConfig(ForecasterConfig): + """Configuration for Hybrid-based forecasting models.""" + + hyperparams: HybridHyperParams = HybridHyperParams() + + verbosity: bool = Field( + default=True, + description="Enable verbose output from the Hybrid model (True/False).", + ) + + +MODEL_CODE_VERSION = 2 + + +class HybridForecasterState(BaseConfig): + """Serializable state for Hybrid forecaster persistence.""" + + version: int = Field(default=MODEL_CODE_VERSION, description="Version of the model code.") + config: HybridForecasterConfig = Field(..., description="Forecaster configuration.") + model: str = Field(..., description="Base64-encoded serialized Hybrid model.") + + +class HybridForecaster(Forecaster): + """Wrapper for sklearn's Hybrid to make it compatible with HorizonForecaster.""" + + Config = HybridForecasterConfig + HyperParams = HybridHyperParams + + _config: HybridForecasterConfig + model: HybridQuantileRegressor + + def __init__(self, config: HybridForecasterConfig) -> None: + """Initialize the Hybrid forecaster. + + Args: + kernel: Kernel type for Hybrid. Must be one of "linear", "poly", "rbf", "sigmoid", or "precomputed". + C: Regularization parameter. + epsilon: Epsilon in the epsilon-Hybrid model. + """ + self._config = config + + self._model = HybridQuantileRegressor( + quantiles=config.quantiles, + lightgbm_n_estimators=config.hyperparams.lightgbm_params.n_estimators, + lightgbm_learning_rate=config.hyperparams.lightgbm_params.learning_rate, + lightgbm_max_depth=config.hyperparams.lightgbm_params.max_depth, + lightgbm_min_child_weight=config.hyperparams.lightgbm_params.min_child_weight, + lightgbm_min_data_in_leaf=config.hyperparams.lightgbm_params.min_data_in_leaf, + lightgbm_min_data_in_bin=config.hyperparams.lightgbm_params.min_data_in_bin, + lightgbm_reg_alpha=config.hyperparams.lightgbm_params.reg_alpha, + lightgbm_reg_lambda=config.hyperparams.lightgbm_params.reg_lambda, + lightgbm_num_leaves=config.hyperparams.lightgbm_params.num_leaves, + lightgbm_max_bin=config.hyperparams.lightgbm_params.max_bin, + lightgbm_subsample=config.hyperparams.lightgbm_params.subsample, + lightgbm_colsample_by_tree=config.hyperparams.lightgbm_params.colsample_bytree, + lightgbm_colsample_by_node=config.hyperparams.lightgbm_params.colsample_bynode, + ) + + @property + @override + def config(self) -> ForecasterConfig: + return self._config + + @property + def is_fitted(self) -> bool: + """Check if the model is fitted.""" + return self._model.is_fitted + + @override + def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: + """Fit the Hybrid model to the training data. + + Args: + data: Training data in the expected ForecastInputDataset format. + data_val: Validation data for tuning the model (optional, not used in this implementation). + + """ + + input_data: pd.DataFrame = data.input_data() + target: npt.NDArray[np.floating] = data.target_series.to_numpy() # type: ignore + + self._model.fit(X=input_data, y=target) + + @override + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + if not self._model.is_fitted: + raise NotFittedError(self.__class__.__name__) + + input_data: pd.DataFrame = data.input_data(start=data.forecast_start) + prediction: npt.NDArray[np.floating] = self._model.predict(X=input_data) + + return ForecastDataset( + data=pd.DataFrame( + data=prediction, + index=input_data.index, + columns=[quantile.format() for quantile in self.config.quantiles], + ), + sample_interval=data.sample_interval, + ) + + +__all__ = ["HybridForecaster", "HybridForecasterConfig", "HybridHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py new file mode 100644 index 000000000..dc5babb7e --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py @@ -0,0 +1,346 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""LightGBM-based forecasting models for probabilistic energy forecasting. + +Provides gradient boosting tree models using LightGBM for multi-quantile energy +forecasting. Optimized for time series data with specialized loss functions and +comprehensive hyperparameter control for production forecasting workflows. +""" + +import base64 +from typing import TYPE_CHECKING, Any, Literal, Self, cast, override + +import numpy as np +import pandas as pd +from pydantic import Field + +from openstef_core.base_model import BaseConfig +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import ( + ModelLoadingError, + NotFittedError, +) +from openstef_core.mixins import HyperParams +from openstef_models.estimators.lightgbm import LGBMQuantileRegressor +from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig + +if TYPE_CHECKING: + import numpy.typing as npt + + +class LgbLinearHyperParams(HyperParams): + """LgbLinear hyperparameters for gradient boosting tree models. + + Example: + Creating custom hyperparameters for deep trees with regularization: + + >>> hyperparams = LGBMHyperParams( + ... n_estimators=200, + ... max_depth=8, + ... learning_rate=0.1, + ... reg_alpha=0.1, + ... reg_lambda=1.0, + ... subsample=0.8 + ... ) + + Note: + These parameters are optimized for probabilistic forecasting with + quantile regression. The default objective function is specialized + for magnitude-weighted pinball loss. + """ + + # Core Tree Boosting Parameters + + n_estimators: int = Field( + default=150, + description="Number of boosting rounds/trees to fit. Higher values may improve performance but " + "increase training time and risk overfitting.", + ) + learning_rate: float = Field( + default=0.3, + alias="eta", + description="Step size shrinkage used to prevent overfitting. Range: [0,1]. Lower values require " + "more boosting rounds.", + ) + max_depth: int = Field( + default=4, + description="Maximum depth of trees. Higher values capture more complex patterns but risk " + "overfitting. Range: [1,∞]", + ) + min_child_weight: float = Field( + default=1, + description="Minimum sum of instance weight (hessian) needed in a child. Higher values prevent " + "overfitting. Range: [0,∞]", + ) + + min_data_in_leaf: int = Field( + default=5, + description="Minimum number of data points in a leaf. Higher values prevent overfitting. Range: [1,∞]", + ) + min_data_in_bin: int = Field( + default=5, + description="Minimum number of data points in a bin. Higher values prevent overfitting. Range: [1,∞]", + ) + + # Regularization + reg_alpha: float = Field( + default=0, + description="L1 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", + ) + reg_lambda: float = Field( + default=1, + description="L2 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", + ) + + # Tree Structure Control + num_leaves: int = Field( + default=31, + description="Maximum number of leaves. 0 means no limit. Only relevant when grow_policy='lossguide'.", + ) + + max_bin: int = Field( + default=256, + description="Maximum number of discrete bins for continuous features. Higher values may improve accuracy but " + "increase memory. Only for hist tree_method.", + ) + + # Subsampling Parameters + subsample: float = Field( + default=0.5, + description="Fraction of training samples used for each tree. Lower values prevent overfitting. Range: (0,1]", + ) + colsample_bytree: float = Field( + default=0.5, + description="Fraction of features used when constructing each tree. Range: (0,1]", + ) + colsample_bynode: float = Field( + default=0.5, + description="Fraction of features used for each split/node. Range: (0,1]", + ) + + # General Parameters + random_state: int | None = Field( + default=None, + alias="seed", + description="Random seed for reproducibility. Controls tree structure randomness.", + ) + + early_stopping_rounds: int | None = Field( + default=10, + description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", + ) + + +class LgbLinearForecasterConfig(ForecasterConfig): + """Configuration for LgbLinear-based forecaster. + Extends HorizonForecasterConfig with LgbLinear-specific hyperparameters + and execution settings. + + Example: + Creating a LgbLinear forecaster configuration with custom hyperparameters: + >>> from datetime import timedelta + >>> from openstef_core.types import LeadTime, Quantile + >>> config = LgbLinearForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime(timedelta(hours=1 + ))], + ... hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=6) + ... ). + """ # noqa: D205 + + hyperparams: LgbLinearHyperParams = LgbLinearHyperParams() + + # General Parameters + device: str = Field( + default="cpu", + description="Device for LgbLinear computation. Options: 'cpu', 'cuda', 'cuda:', 'gpu'", + ) + n_jobs: int = Field( + default=1, + description="Number of parallel threads for tree construction. -1 uses all available cores.", + ) + verbosity: Literal[0, 1, 2, 3] = Field( + default=0, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + ) + + +MODEL_CODE_VERSION = 1 + + +class LgbLinearForecasterState(BaseConfig): + """Serializable state for LgbLinear forecaster persistence. + + Contains all information needed to restore a trained LgbLinear model, + including configuration and the serialized model weights. Used for + model saving, loading, and version management in production systems. + """ + + version: int = Field(default=MODEL_CODE_VERSION, description="Version of the model code.") + config: LgbLinearForecasterConfig = Field(..., description="Forecaster configuration.") + model: str = Field(..., description="Base64-encoded serialized LgbLinear model.") + + +class LgbLinearForecaster(Forecaster, ExplainableForecaster): + """LgbLinear-based forecaster for probabilistic energy forecasting. + + Implements gradient boosting trees using LgbLinear for multi-quantile forecasting. + Optimized for time series prediction with specialized loss functions and + comprehensive hyperparameter control suitable for production energy forecasting. + + The forecaster uses a multi-output strategy where each quantile is predicted + by separate trees within the same boosting ensemble. This approach provides + well-calibrated uncertainty estimates while maintaining computational efficiency. + + Invariants: + - fit() must be called before predict() to train the model + - Configuration quantiles determine the number of prediction outputs + - Model state is preserved across predict() calls after fitting + - Input features must match training data structure during prediction + + Example: + Basic forecasting workflow: + + >>> from datetime import timedelta + >>> from openstef_core.types import LeadTime, Quantile + >>> config = LgbLinearForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime(timedelta(hours=1))], + ... hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=6) + ... ) + >>> forecaster = LgbLinearForecaster(config) + >>> # forecaster.fit(training_data) + >>> # predictions = forecaster.predict(test_data) + + Note: + LgbLinear dependency is optional and must be installed separately. + The model automatically handles multi-quantile output and uses + magnitude-weighted pinball loss by default for better forecasting performance. + + See Also: + LgbLinearHyperParams: Detailed hyperparameter configuration options. + HorizonForecaster: Base interface for all forecasting models. + GBLinearForecaster: Alternative linear model using LgbLinear. + """ + + Config = LgbLinearForecasterConfig + HyperParams = LgbLinearHyperParams + + _config: LgbLinearForecasterConfig + _lgblinear_model: LGBMQuantileRegressor + + def __init__(self, config: LgbLinearForecasterConfig) -> None: + """Initialize LgbLinear forecaster with configuration. + + Creates an untrained LgbLinear regressor with the specified configuration. + The underlying LgbLinear model is configured for multi-output quantile + regression using the provided hyperparameters and execution settings. + + Args: + config: Complete configuration including hyperparameters, quantiles, + and execution settings for the LgbLinear model. + """ + self._config = config + + self._lgblinear_model = LGBMQuantileRegressor( + quantiles=[float(q) for q in config.quantiles], + linear_tree=True, + n_estimators=config.hyperparams.n_estimators, + learning_rate=config.hyperparams.learning_rate, + max_depth=config.hyperparams.max_depth, + min_child_weight=config.hyperparams.min_child_weight, + min_data_in_leaf=config.hyperparams.min_data_in_leaf, + min_data_in_bin=config.hyperparams.min_data_in_bin, + reg_alpha=config.hyperparams.reg_alpha, + reg_lambda=config.hyperparams.reg_lambda, + num_leaves=config.hyperparams.num_leaves, + max_bin=config.hyperparams.max_bin, + subsample=config.hyperparams.subsample, + colsample_bytree=config.hyperparams.colsample_bytree, + colsample_bynode=config.hyperparams.colsample_bynode, + random_state=config.hyperparams.random_state, + early_stopping_rounds=config.hyperparams.early_stopping_rounds, + verbosity=config.verbosity, + ) + + @property + @override + def config(self) -> ForecasterConfig: + return self._config + + @property + @override + def hyperparams(self) -> LgbLinearHyperParams: + return self._config.hyperparams + + @property + @override + def is_fitted(self) -> bool: + return self._lgblinear_model.__sklearn_is_fitted__() + + @override + def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: + input_data: pd.DataFrame = data.input_data() + target: npt.NDArray[np.floating] = data.target_series.to_numpy() # type: ignore + + sample_weight = data.sample_weight_series + + # Prepare validation data if provided + eval_set = None + eval_sample_weight = None + if data_val is not None: + val_input_data: pd.DataFrame = data_val.input_data() + val_target: npt.NDArray[np.floating] = data_val.target_series.to_numpy() # type: ignore + val_sample_weight = data_val.sample_weight_series.to_numpy() # type: ignore + eval_set = [(val_input_data, val_target)] + + eval_sample_weight = [val_sample_weight] + + self._lgblinear_model.fit( # type: ignore + X=input_data, + y=target, + feature_name=input_data.columns.tolist(), + sample_weight=sample_weight, + eval_set=eval_set, + eval_sample_weight=eval_sample_weight, + ) + + @override + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + input_data: pd.DataFrame = data.input_data(start=data.forecast_start) + prediction: npt.NDArray[np.floating] = self._lgblinear_model.predict(X=input_data) + + return ForecastDataset( + data=pd.DataFrame( + data=prediction, + index=input_data.index, + columns=[quantile.format() for quantile in self.config.quantiles], + ), + sample_interval=data.sample_interval, + ) + + @property + @override + def feature_importances(self) -> pd.DataFrame: + models = self._lgblinear_model._models # noqa: SLF001 + weights_df = pd.DataFrame( + [models[i].feature_importances_ for i in range(len(models))], + index=[quantile.format() for quantile in self.config.quantiles], + columns=models[0].feature_name_, + ).transpose() + + weights_df.index.name = "feature_name" + weights_df.columns.name = "quantiles" + + weights_abs = weights_df.abs() + total = weights_abs.sum(axis=0).replace(to_replace=0, value=1.0) # pyright: ignore[reportUnknownMemberType] + + return weights_abs / total + + +__all__ = ["LgbLinearForecaster", "LgbLinearForecasterConfig", "LgbLinearHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py new file mode 100644 index 000000000..cc5a26856 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py @@ -0,0 +1,340 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""LightGBM-based forecasting models for probabilistic energy forecasting. + +Provides gradient boosting tree models using LightGBM for multi-quantile energy +forecasting. Optimized for time series data with specialized loss functions and +comprehensive hyperparameter control for production forecasting workflows. +""" + +from typing import Literal, override + +import numpy as np +import numpy.typing as npt +import pandas as pd +from pydantic import Field + +from openstef_core.base_model import BaseConfig +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import ( + NotFittedError, +) +from openstef_core.mixins import HyperParams +from openstef_models.estimators.lightgbm import LGBMQuantileRegressor +from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig + + +class LightGBMHyperParams(HyperParams): + """LightGBM hyperparameters for gradient boosting tree models. + + Example: + Creating custom hyperparameters for deep trees with regularization: + + >>> hyperparams = LGBMHyperParams( + ... n_estimators=200, + ... max_depth=8, + ... learning_rate=0.1, + ... reg_alpha=0.1, + ... reg_lambda=1.0, + ... subsample=0.8 + ... ) + + Note: + These parameters are optimized for probabilistic forecasting with + quantile regression. The default objective function is specialized + for magnitude-weighted pinball loss. + """ + + # Core Tree Boosting Parameters + n_estimators: int = Field( + default=100, + description="Number of boosting rounds/trees to fit. Higher values may improve performance but " + "increase training time and risk overfitting.", + ) + learning_rate: float = Field( + default=0.49, # 0.3 + alias="eta", + description="Step size shrinkage used to prevent overfitting. Range: [0,1]. Lower values require " + "more boosting rounds.", + ) + max_depth: int = Field( + default=2, # 8, + description="Maximum depth of trees. Higher values capture more complex patterns but risk " + "overfitting. Range: [1,∞]", + ) + min_child_weight: float = Field( + default=1, + description="Minimum sum of instance weight (hessian) needed in a child. Higher values prevent " + "overfitting. Range: [0,∞]", + ) + + min_data_in_leaf: int = Field( + default=10, + description="Minimum number of data points in a leaf. Higher values prevent overfitting. Range: [1,∞]", + ) + min_data_in_bin: int = Field( + default=10, + description="Minimum number of data points in a bin. Higher values prevent overfitting. Range: [1,∞]", + ) + + # Regularization + reg_alpha: float = Field( + default=0, + description="L1 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", + ) + reg_lambda: float = Field( + default=1, + description="L2 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", + ) + + # Tree Structure Control + num_leaves: int = Field( + default=100, # 31 + description="Maximum number of leaves. 0 means no limit. Only relevant when grow_policy='lossguide'.", + ) + + max_bin: int = Field( + default=256, + description="Maximum number of discrete bins for continuous features. Higher values may improve accuracy but " + "increase memory. Only for hist tree_method.", + ) + + # Subsampling Parameters + subsample: float = Field( + default=1.0, + description="Fraction of training samples used for each tree. Lower values prevent overfitting. Range: (0,1]", + ) + colsample_bytree: float = Field( + default=1.0, + description="Fraction of features used when constructing each tree. Range: (0,1]", + ) + colsample_bynode: float = Field( + default=1.0, + description="Fraction of features used for each split/node. Range: (0,1]", + ) + + # General Parameters + random_state: int | None = Field( + default=None, + alias="seed", + description="Random seed for reproducibility. Controls tree structure randomness.", + ) + + early_stopping_rounds: int | None = Field( + default=None, + description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", + ) + + +class LightGBMForecasterConfig(ForecasterConfig): + """Configuration for LightGBM-based forecaster. + Extends HorizonForecasterConfig with LightGBM-specific hyperparameters + and execution settings. + + Example: + Creating a LightGBM forecaster configuration with custom hyperparameters: + >>> from datetime import timedelta + >>> from openstef_core.types import LeadTime, Quantile + >>> config = LightGBMForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime(timedelta(hours=1 + ))], + ... hyperparams=LightGBMHyperParams(n_estimators=100, max_depth=6) + ... ). + """ # noqa: D205 + + hyperparams: LightGBMHyperParams = LightGBMHyperParams() + + # General Parameters + device: str = Field( + default="cpu", + description="Device for LightGBM computation. Options: 'cpu', 'cuda', 'cuda:', 'gpu'", + ) + n_jobs: int = Field( + default=1, + description="Number of parallel threads for tree construction. -1 uses all available cores.", + ) + verbosity: Literal[-1, 0, 1, 2, 3] = Field( + default=-1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + ) + + +MODEL_CODE_VERSION = 1 + + +class LightGBMForecasterState(BaseConfig): + """Serializable state for LightGBM forecaster persistence. + + Contains all information needed to restore a trained LightGBM model, + including configuration and the serialized model weights. Used for + model saving, loading, and version management in production systems. + """ + + version: int = Field(default=MODEL_CODE_VERSION, description="Version of the model code.") + config: LightGBMForecasterConfig = Field(..., description="Forecaster configuration.") + model: str = Field(..., description="Base64-encoded serialized LightGBM model.") + + +class LightGBMForecaster(Forecaster, ExplainableForecaster): + """LightGBM-based forecaster for probabilistic energy forecasting. + + Implements gradient boosting trees using LightGBM for multi-quantile forecasting. + Optimized for time series prediction with specialized loss functions and + comprehensive hyperparameter control suitable for production energy forecasting. + + The forecaster uses a multi-output strategy where each quantile is predicted + by separate trees within the same boosting ensemble. This approach provides + well-calibrated uncertainty estimates while maintaining computational efficiency. + + Invariants: + - fit() must be called before predict() to train the model + - Configuration quantiles determine the number of prediction outputs + - Model state is preserved across predict() calls after fitting + - Input features must match training data structure during prediction + + Example: + Basic forecasting workflow: + + >>> from datetime import timedelta + >>> from openstef_core.types import LeadTime, Quantile + >>> config = LightGBMForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime(timedelta(hours=1))], + ... hyperparams=LightGBMHyperParams(n_estimators=100, max_depth=6) + ... ) + >>> forecaster = LightGBMForecaster(config) + >>> # forecaster.fit(training_data) + >>> # predictions = forecaster.predict(test_data) + + Note: + LightGBM dependency is optional and must be installed separately. + The model automatically handles multi-quantile output and uses + magnitude-weighted pinball loss by default for better forecasting performance. + + See Also: + LightGBMHyperParams: Detailed hyperparameter configuration options. + HorizonForecaster: Base interface for all forecasting models. + GBLinearForecaster: Alternative linear model using LightGBM. + """ + + Config = LightGBMForecasterConfig + HyperParams = LightGBMHyperParams + + _config: LightGBMForecasterConfig + _lightgbm_model: LGBMQuantileRegressor + + def __init__(self, config: LightGBMForecasterConfig) -> None: + """Initialize LightGBM forecaster with configuration. + + Creates an untrained LightGBM regressor with the specified configuration. + The underlying LightGBM model is configured for multi-output quantile + regression using the provided hyperparameters and execution settings. + + Args: + config: Complete configuration including hyperparameters, quantiles, + and execution settings for the LightGBM model. + """ + self._config = config + + self._lightgbm_model = LGBMQuantileRegressor( + quantiles=[float(q) for q in config.quantiles], + linear_tree=False, + n_estimators=config.hyperparams.n_estimators, + learning_rate=config.hyperparams.learning_rate, + max_depth=config.hyperparams.max_depth, + min_child_weight=config.hyperparams.min_child_weight, + min_data_in_leaf=config.hyperparams.min_data_in_leaf, + min_data_in_bin=config.hyperparams.min_data_in_bin, + reg_alpha=config.hyperparams.reg_alpha, + reg_lambda=config.hyperparams.reg_lambda, + num_leaves=config.hyperparams.num_leaves, + max_bin=config.hyperparams.max_bin, + subsample=config.hyperparams.subsample, + colsample_bytree=config.hyperparams.colsample_bytree, + colsample_bynode=config.hyperparams.colsample_bynode, + random_state=config.hyperparams.random_state, + early_stopping_rounds=config.hyperparams.early_stopping_rounds, + verbosity=config.verbosity, + ) + + @property + @override + def config(self) -> ForecasterConfig: + return self._config + + @property + @override + def hyperparams(self) -> LightGBMHyperParams: + return self._config.hyperparams + + @property + @override + def is_fitted(self) -> bool: + return self._lightgbm_model.__sklearn_is_fitted__() + + @override + def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: + input_data: pd.DataFrame = data.input_data() + target: npt.NDArray[np.floating] = data.target_series.to_numpy() # type: ignore + + sample_weight = data.sample_weight_series + + # Prepare validation data if provided + eval_set = None + eval_sample_weight = None + if data_val is not None: + val_input_data: pd.DataFrame = data_val.input_data() + val_target: npt.NDArray[np.floating] = data_val.target_series.to_numpy() # type: ignore + val_sample_weight = data_val.sample_weight_series.to_numpy() # type: ignore + eval_set = (val_input_data, val_target) + eval_sample_weight = [val_sample_weight] + + self._lightgbm_model.fit( + X=input_data, + y=target, + feature_name=input_data.columns.tolist(), + sample_weight=sample_weight, # type: ignore + eval_set=eval_set, # type: ignore + eval_sample_weight=eval_sample_weight, # type: ignore + ) + + @override + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + input_data: pd.DataFrame = data.input_data(start=data.forecast_start) + prediction: npt.NDArray[np.floating] = self._lightgbm_model.predict(X=input_data) + + return ForecastDataset( + data=pd.DataFrame( + data=prediction, + index=input_data.index, + columns=[quantile.format() for quantile in self.config.quantiles], + ), + sample_interval=data.sample_interval, + ) + + @property + @override + def feature_importances(self) -> pd.DataFrame: + models = self._lightgbm_model._models + weights_df = pd.DataFrame( + [models[i].feature_importances_ for i in range(len(models))], + index=[quantile.format() for quantile in self.config.quantiles], + columns=models[0].feature_name_, + ).transpose() + + weights_df.index.name = "feature_name" + weights_df.columns.name = "quantiles" + + weights_abs = weights_df.abs() + total = weights_abs.sum(axis=0).replace(to_replace=0, value=1.0) # pyright: ignore[reportUnknownMemberType] + + return weights_abs / total + + +__all__ = ["LightGBMForecaster", "LightGBMForecasterConfig", "LightGBMHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index f7152bf11..517f5be83 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -30,10 +30,22 @@ from openstef_models.models import ForecastingModel from openstef_models.models.forecasting.flatliner_forecaster import FlatlinerForecaster from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster +from openstef_models.models.forecasting.hybrid_forecaster import HybridForecaster +from openstef_models.models.forecasting.lgblinear_forecaster import LgbLinearForecaster +from openstef_models.models.forecasting.lightgbm_forecaster import LightGBMForecaster from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster from openstef_models.transforms.energy_domain import WindPowerFeatureAdder -from openstef_models.transforms.general import Clipper, EmptyFeatureRemover, Imputer, SampleWeighter, Scaler -from openstef_models.transforms.postprocessing import ConfidenceIntervalApplicator, QuantileSorter +from openstef_models.transforms.general import ( + Clipper, + EmptyFeatureRemover, + Imputer, + SampleWeighter, + Scaler, +) +from openstef_models.transforms.postprocessing import ( + ConfidenceIntervalApplicator, + QuantileSorter, +) from openstef_models.transforms.time_domain import ( CyclicFeaturesAdder, DatetimeFeaturesAdder, @@ -41,19 +53,36 @@ RollingAggregatesAdder, ) from openstef_models.transforms.time_domain.lags_adder import LagsAdder -from openstef_models.transforms.time_domain.rolling_aggregates_adder import AggregationFunction -from openstef_models.transforms.validation import CompletenessChecker, FlatlineChecker, InputConsistencyChecker -from openstef_models.transforms.weather_domain import DaylightFeatureAdder, RadiationDerivedFeaturesAdder -from openstef_models.transforms.weather_domain.atmosphere_derived_features_adder import AtmosphereDerivedFeaturesAdder +from openstef_models.transforms.time_domain.rolling_aggregates_adder import ( + AggregationFunction, +) +from openstef_models.transforms.validation import ( + CompletenessChecker, + FlatlineChecker, + InputConsistencyChecker, +) +from openstef_models.transforms.weather_domain import ( + DaylightFeatureAdder, + RadiationDerivedFeaturesAdder, +) +from openstef_models.transforms.weather_domain.atmosphere_derived_features_adder import ( + AtmosphereDerivedFeaturesAdder, +) from openstef_models.utils.data_split import DataSplitter from openstef_models.utils.feature_selection import Exclude, FeatureSelection, Include -from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow, ForecastingCallback +from openstef_models.workflows.custom_forecasting_workflow import ( + CustomForecastingWorkflow, + ForecastingCallback, +) class LocationConfig(BaseConfig): """Configuration for location information in forecasting workflows.""" - name: str = Field(default="test_location", description="Name of the forecasting location or workflow.") + name: str = Field( + default="test_location", + description="Name of the forecasting location or workflow.", + ) description: str = Field(default="", description="Description of the forecasting workflow.") coordinate: Coordinate = Field( default=Coordinate( @@ -63,7 +92,8 @@ class LocationConfig(BaseConfig): description="Geographic coordinate of the location.", ) country_code: CountryAlpha2 = Field( - default=CountryAlpha2("NL"), description="Country code for holiday feature generation." + default=CountryAlpha2("NL"), + description="Country code for holiday feature generation.", ) @property @@ -87,42 +117,65 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob model_id: ModelIdentifier = Field(description="Unique identifier for the forecasting model.") # Model configuration - model: Literal["xgboost", "gblinear", "flatliner"] = Field( + model: Literal["xgboost", "gblinear", "flatliner", "hybrid", "lightgbm", "lgblinear"] = Field( description="Type of forecasting model to use." ) # TODO(#652): Implement median forecaster quantiles: list[Quantile] = Field( - default=[Q(0.5)], description="List of quantiles to predict for probabilistic forecasting." + default=[Q(0.5)], + description="List of quantiles to predict for probabilistic forecasting.", ) sample_interval: timedelta = Field( - default=timedelta(minutes=15), description="Time interval between consecutive data samples." + default=timedelta(minutes=15), + description="Time interval between consecutive data samples.", ) horizons: list[LeadTime] = Field( - default=[LeadTime.from_string("PT48H")], description="List of forecast horizons to predict." + default=[LeadTime.from_string("PT48H")], + description="List of forecast horizons to predict.", ) xgboost_hyperparams: XGBoostForecaster.HyperParams = Field( - default=XGBoostForecaster.HyperParams(), description="Hyperparameters for XGBoost forecaster." + default=XGBoostForecaster.HyperParams(), + description="Hyperparameters for XGBoost forecaster.", ) gblinear_hyperparams: GBLinearForecaster.HyperParams = Field( - default=GBLinearForecaster.HyperParams(), description="Hyperparameters for GBLinear forecaster." + default=GBLinearForecaster.HyperParams(), + description="Hyperparameters for GBLinear forecaster.", + ) + + lightgbm_hyperparams: LightGBMForecaster.HyperParams = Field( + default=LightGBMForecaster.HyperParams(), + description="Hyperparameters for LightGBM forecaster.", + ) + + lgblinear_hyperparams: LgbLinearForecaster.HyperParams = Field( + default=LgbLinearForecaster.HyperParams(), + description="Hyperparameters for LightGBM forecaster.", + ) + + hybrid_hyperparams: HybridForecaster.HyperParams = Field( + default=HybridForecaster.HyperParams(), + description="Hyperparameters for Hybrid forecaster.", ) location: LocationConfig = Field( - default=LocationConfig(), description="Location information for the forecasting workflow." + default=LocationConfig(), + description="Location information for the forecasting workflow.", ) # Data properties target_column: str = Field(default="load", description="Name of the target variable column in datasets.") energy_price_column: str = Field( - default="day_ahead_electricity_price", description="Name of the energy price column in datasets." + default="day_ahead_electricity_price", + description="Name of the energy price column in datasets.", ) radiation_column: str = Field(default="radiation", description="Name of the radiation column in datasets.") wind_speed_column: str = Field(default="windspeed", description="Name of the wind speed column in datasets.") pressure_column: str = Field(default="pressure", description="Name of the pressure column in datasets.") temperature_column: str = Field(default="temperature", description="Name of the temperature column in datasets.") relative_humidity_column: str = Field( - default="relative_humidity", description="Name of the relative humidity column in datasets." + default="relative_humidity", + description="Name of the relative humidity column in datasets.", ) predict_history: timedelta = Field( default=timedelta(days=14), @@ -131,7 +184,8 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob # Feature engineering and validation completeness_threshold: float = Field( - default=0.5, description="Minimum fraction of data that should be available for making a regular forecast." + default=0.5, + description="Minimum fraction of data that should be available for making a regular forecast.", ) flatliner_threshold: timedelta = Field( default=timedelta(hours=24), @@ -150,7 +204,9 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob description="Feature selection for which features to clip.", ) sample_weight_exponent: float = Field( - default_factory=lambda data: 1.0 if data.get("model") == "gblinear" else 0.0, + default_factory=lambda data: 1.0 + if data.get("model") in {"gblinear", "lgblinear", "lightgbm", "hybrid", "xgboost"} + else 0.0, description="Exponent applied to scale the sample weights. " "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " "Note: Defaults to 1.0 for gblinear congestion models.", @@ -174,16 +230,22 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob # Callbacks mlflow_storage: MLFlowStorage | None = Field( - default_factory=MLFlowStorage, description="Configuration for MLflow experiment tracking and model storage." + default_factory=MLFlowStorage, + description="Configuration for MLflow experiment tracking and model storage.", ) - model_reuse_enable: bool = Field(default=True, description="Whether to enable reuse of previously trained models.") + model_reuse_enable: bool = Field( + default=True, + description="Whether to enable reuse of previously trained models.", + ) model_reuse_max_age: timedelta = Field( - default=timedelta(days=7), description="Maximum age of a model to be considered for reuse." + default=timedelta(days=7), + description="Maximum age of a model to be considered for reuse.", ) model_selection_enable: bool = Field( - default=True, description="Whether to enable automatic model selection based on performance." + default=True, + description="Whether to enable automatic model selection based on performance.", ) model_selection_metric: tuple[QuantileOrGlobal, str, MetricDirection] = Field( default=(Q(0.5), "R2", "higher_is_better"), @@ -201,7 +263,9 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob ) -def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomForecastingWorkflow: +def create_forecasting_workflow( + config: ForecastingWorkflowConfig, +) -> CustomForecastingWorkflow: """Create a forecasting workflow from configuration. Builds a complete forecasting pipeline including preprocessing, forecaster, and postprocessing @@ -222,7 +286,7 @@ def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomFore load_column=config.target_column, flatliner_threshold=config.flatliner_threshold, detect_non_zero_flatliner=config.detect_non_zero_flatliner, - error_on_flatliner=True, + error_on_flatliner=False, ), CompletenessChecker(completeness_threshold=config.completeness_threshold), EmptyFeatureRemover(), @@ -256,7 +320,10 @@ def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomFore ), ] feature_standardizers = [ - Clipper(selection=Include(config.energy_price_column).combine(config.clip_features), mode="standard"), + Clipper( + selection=Include(config.energy_price_column).combine(config.clip_features), + mode="standard", + ), Scaler(selection=Exclude(config.target_column), method="standard"), SampleWeighter( target_column=config.target_column, @@ -281,7 +348,38 @@ def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomFore ) ) postprocessing = [QuantileSorter()] - + elif config.model == "lgblinear": + preprocessing = [ + *checks, + *feature_adders, + HolidayFeatureAdder(country_code=config.location.country_code), + DatetimeFeaturesAdder(onehot_encode=False), + *feature_standardizers, + ] + forecaster = LgbLinearForecaster( + config=LgbLinearForecaster.Config( + quantiles=config.quantiles, + horizons=config.horizons, + hyperparams=config.lgblinear_hyperparams, + ) + ) + postprocessing = [QuantileSorter()] + elif config.model == "lightgbm": + preprocessing = [ + *checks, + *feature_adders, + HolidayFeatureAdder(country_code=config.location.country_code), + DatetimeFeaturesAdder(onehot_encode=False), + *feature_standardizers, + ] + forecaster = LightGBMForecaster( + config=LightGBMForecaster.Config( + quantiles=config.quantiles, + horizons=config.horizons, + hyperparams=config.lightgbm_hyperparams, + ) + ) + postprocessing = [QuantileSorter()] elif config.model == "gblinear": preprocessing = [ *checks, @@ -296,7 +394,7 @@ def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomFore hyperparams=config.gblinear_hyperparams, ) ) - postprocessing = [] + postprocessing = [QuantileSorter()] elif config.model == "flatliner": preprocessing = [] forecaster = FlatlinerForecaster( @@ -308,6 +406,21 @@ def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomFore postprocessing = [ ConfidenceIntervalApplicator(quantiles=config.quantiles), ] + elif config.model == "hybrid": + preprocessing = [ + *checks, + Imputer(selection=Exclude(config.target_column), imputation_strategy="mean"), + *feature_adders, + *feature_standardizers, + ] + forecaster = HybridForecaster( + config=HybridForecaster.Config( + quantiles=config.quantiles, + horizons=config.horizons, + hyperparams=config.hybrid_hyperparams, + ) + ) + postprocessing = [QuantileSorter()] else: msg = f"Unsupported model type: {config.model}" raise ValueError(msg) diff --git a/packages/openstef-models/tests/unit/estimators/test_hybrid.py b/packages/openstef-models/tests/unit/estimators/test_hybrid.py new file mode 100644 index 000000000..857cb7705 --- /dev/null +++ b/packages/openstef-models/tests/unit/estimators/test_hybrid.py @@ -0,0 +1,39 @@ +import pandas as pd +import pytest +from numpy.random import default_rng + +from openstef_models.estimators.hybrid import HybridQuantileRegressor + + +@pytest.fixture +def dataset() -> tuple[pd.DataFrame, pd.Series]: + n_samples = 100 + n_features = 5 + rng = default_rng() + X = pd.DataFrame(rng.random((n_samples, n_features))) + y = pd.Series(rng.random(n_samples)) + return X, y + + +def test_init_sets_quantiles_and_models(): + quantiles = [0.1, 0.5, 0.9] + model = HybridQuantileRegressor(quantiles=quantiles) + assert model.quantiles == quantiles + assert len(model._models) == len(quantiles) + + +def test_fit_and_predict_shape(dataset: tuple[pd.DataFrame, pd.Series]): + quantiles = [0.1, 0.5, 0.9] + X, y = dataset[0], dataset[1] + model = HybridQuantileRegressor(quantiles=quantiles) + model.fit(X, y) + preds = model.predict(X) + assert preds.shape == (X.shape[0], len(quantiles)) + + +def test_is_fitted(dataset: tuple[pd.DataFrame, pd.Series]): + quantiles = [0.1, 0.5, 0.9] + X, y = dataset[0], dataset[1] + model = HybridQuantileRegressor(quantiles=quantiles) + model.fit(X, y) + assert model.is_fitted diff --git a/packages/openstef-models/tests/unit/estimators/test_lightgbm.py b/packages/openstef-models/tests/unit/estimators/test_lightgbm.py new file mode 100644 index 000000000..555add5cb --- /dev/null +++ b/packages/openstef-models/tests/unit/estimators/test_lightgbm.py @@ -0,0 +1,39 @@ +import pandas as pd +import pytest +from numpy.random import default_rng + +from openstef_models.estimators.lightgbm import LGBMQuantileRegressor + + +@pytest.fixture +def dataset() -> tuple[pd.DataFrame, pd.Series]: + n_samples = 100 + n_features = 5 + rng = default_rng() + X = pd.DataFrame(rng.random((n_samples, n_features))) + y = pd.Series(rng.random(n_samples)) + return X, y + + +def test_init_sets_quantiles_and_models(): + quantiles = [0.1, 0.5, 0.9] + model = LGBMQuantileRegressor(quantiles=quantiles, linear_tree=False) + assert model.quantiles == quantiles + assert len(model._models) == len(quantiles) + + +def test_fit_and_predict_shape(dataset: tuple[pd.DataFrame, pd.Series]): + quantiles = [0.1, 0.5, 0.9] + X, y = dataset[0], dataset[1] + model = LGBMQuantileRegressor(quantiles=quantiles, linear_tree=False, n_estimators=5) + model.fit(X, y) + preds = model.predict(X) + assert preds.shape == (X.shape[0], len(quantiles)) + + +def test_sklearn_is_fitted_true_after_fit(dataset: tuple[pd.DataFrame, pd.Series]): + quantiles = [0.1, 0.5, 0.9] + X, y = dataset[0], dataset[1] + model = LGBMQuantileRegressor(quantiles=quantiles, linear_tree=False, n_estimators=2) + model.fit(X, y) + assert model.__sklearn_is_fitted__() diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgblinear_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgblinear_forecaster.py new file mode 100644 index 000000000..0288a42d6 --- /dev/null +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgblinear_forecaster.py @@ -0,0 +1,159 @@ +from datetime import timedelta + +import pandas as pd +import pytest + +from openstef_core.datasets import ForecastInputDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_models.models.forecasting.lgblinear_forecaster import ( + LgbLinearForecaster, + LgbLinearForecasterConfig, + LgbLinearHyperParams, +) + + +@pytest.fixture +def base_config() -> LgbLinearForecasterConfig: + """Base configuration for LgbLinear forecaster tests.""" + + return LgbLinearForecasterConfig( + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime(timedelta(days=1))], + hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=3, min_data_in_leaf=1, min_data_in_bin=1), + device="cpu", + n_jobs=1, + verbosity=0, + ) + + +@pytest.fixture +def forecaster(base_config: LgbLinearForecasterConfig) -> LgbLinearForecaster: + return LgbLinearForecaster(base_config) + + +def test_initialization(forecaster: LgbLinearForecaster): + assert isinstance(forecaster, LgbLinearForecaster) + assert forecaster.config.hyperparams.n_estimators == 100 # type: ignore + + +def test_quantile_lgblinear_forecaster__fit_predict( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LgbLinearForecasterConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = LgbLinearForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + # Since forecast is deterministic with fixed random seed, check value spread (vectorized) + # All quantiles should have some variation (not all identical values) + stds = result.data.std() + assert (stds > 0).all(), f"All columns should have variation, got stds: {dict(stds)}" + + +def test_lgblinear_forecaster__not_fitted_error( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LgbLinearForecasterConfig, +): + """Test that NotFittedError is raised when predicting before fitting.""" + # Arrange + forecaster = LgbLinearForecaster(config=base_config) + + # Act & Assert + with pytest.raises(NotFittedError): + forecaster.predict(sample_forecast_input_dataset) + + +def test_lgblinear_forecaster__predict_not_fitted_raises_error( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LgbLinearForecasterConfig, +): + """Test that predict() raises NotFittedError when called before fit().""" + # Arrange + forecaster = LgbLinearForecaster(config=base_config) + + # Act & Assert + with pytest.raises( + NotFittedError, + match="The LgbLinearForecaster has not been fitted yet. Please call 'fit' before using it.", # noqa: RUF043 + ): + forecaster.predict(sample_forecast_input_dataset) + + +def test_lgblinear_forecaster__with_sample_weights( + sample_dataset_with_weights: ForecastInputDataset, + base_config: LgbLinearForecasterConfig, +): + """Test that forecaster works with sample weights and produces different results.""" + # Arrange + forecaster_with_weights = LgbLinearForecaster(config=base_config) + + # Create dataset without weights for comparison + data_without_weights = ForecastInputDataset( + data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), + sample_interval=sample_dataset_with_weights.sample_interval, + target_column=sample_dataset_with_weights.target_column, + forecast_start=sample_dataset_with_weights.forecast_start, + ) + forecaster_without_weights = LgbLinearForecaster(config=base_config) + + # Act + forecaster_with_weights.fit(sample_dataset_with_weights) + forecaster_without_weights.fit(data_without_weights) + + # Predict using data without sample_weight column (since that's used for training, not prediction) + result_with_weights = forecaster_with_weights.predict(data_without_weights) + result_without_weights = forecaster_without_weights.predict(data_without_weights) + + # Assert + # Both should produce valid forecasts + assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" + assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + + # Sample weights should affect the model, so results should be different + # (This is a statistical test - with different weights, predictions should differ) + differences = (result_with_weights.data - result_without_weights.data).abs() + assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +def test_lgblinear_forecaster__feature_importances( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LgbLinearForecasterConfig, +): + """Test that feature_importances returns correct normalized importance scores.""" + # Arrange + forecaster = LgbLinearForecaster(config=base_config) + forecaster.fit(sample_forecast_input_dataset) + + # Act + feature_importances = forecaster.feature_importances + + # Assert + assert len(feature_importances.index) > 0 + + # Columns should match expected quantile formats + expected_columns = pd.Index([q.format() for q in base_config.quantiles], name="quantiles") + pd.testing.assert_index_equal(feature_importances.columns, expected_columns) + + # Values should be normalized (sum to 1.0 per quantile column) and non-negative + col_sums = feature_importances.sum(axis=0) + pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=expected_columns), atol=1e-10) + assert (feature_importances >= 0).all().all() diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lightgbm_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lightgbm_forecaster.py new file mode 100644 index 000000000..f0c7ff7e7 --- /dev/null +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lightgbm_forecaster.py @@ -0,0 +1,159 @@ +from datetime import timedelta + +import pandas as pd +import pytest + +from openstef_core.datasets import ForecastInputDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_models.models.forecasting.lightgbm_forecaster import ( + LightGBMForecaster, + LightGBMForecasterConfig, + LightGBMHyperParams, +) + + +@pytest.fixture +def base_config() -> LightGBMForecasterConfig: + """Base configuration for LightGBM forecaster tests.""" + + return LightGBMForecasterConfig( + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime(timedelta(days=1))], + hyperparams=LightGBMHyperParams(n_estimators=100, max_depth=3, min_data_in_leaf=1, min_data_in_bin=1), + device="cpu", + n_jobs=1, + verbosity=0, + ) + + +@pytest.fixture +def forecaster(base_config: LightGBMForecasterConfig) -> LightGBMForecaster: + return LightGBMForecaster(base_config) + + +def test_initialization(forecaster: LightGBMForecaster): + assert isinstance(forecaster, LightGBMForecaster) + assert forecaster.config.hyperparams.n_estimators == 100 # type: ignore + + +def test_quantile_lightgbm_forecaster__fit_predict( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LightGBMForecasterConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = LightGBMForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + # Since forecast is deterministic with fixed random seed, check value spread (vectorized) + # All quantiles should have some variation (not all identical values) + stds = result.data.std() + assert (stds > 0).all(), f"All columns should have variation, got stds: {dict(stds)}" + + +def test_lightgbm_forecaster__not_fitted_error( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LightGBMForecasterConfig, +): + """Test that NotFittedError is raised when predicting before fitting.""" + # Arrange + forecaster = LightGBMForecaster(config=base_config) + + # Act & Assert + with pytest.raises(NotFittedError): + forecaster.predict(sample_forecast_input_dataset) + + +def test_lightgbm_forecaster__predict_not_fitted_raises_error( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LightGBMForecasterConfig, +): + """Test that predict() raises NotFittedError when called before fit().""" + # Arrange + forecaster = LightGBMForecaster(config=base_config) + + # Act & Assert + with pytest.raises( + NotFittedError, + match="The LightGBMForecaster has not been fitted yet. Please call 'fit' before using it.", # noqa: RUF043 + ): + forecaster.predict(sample_forecast_input_dataset) + + +def test_lightgbm_forecaster__with_sample_weights( + sample_dataset_with_weights: ForecastInputDataset, + base_config: LightGBMForecasterConfig, +): + """Test that forecaster works with sample weights and produces different results.""" + # Arrange + forecaster_with_weights = LightGBMForecaster(config=base_config) + + # Create dataset without weights for comparison + data_without_weights = ForecastInputDataset( + data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), + sample_interval=sample_dataset_with_weights.sample_interval, + target_column=sample_dataset_with_weights.target_column, + forecast_start=sample_dataset_with_weights.forecast_start, + ) + forecaster_without_weights = LightGBMForecaster(config=base_config) + + # Act + forecaster_with_weights.fit(sample_dataset_with_weights) + forecaster_without_weights.fit(data_without_weights) + + # Predict using data without sample_weight column (since that's used for training, not prediction) + result_with_weights = forecaster_with_weights.predict(data_without_weights) + result_without_weights = forecaster_without_weights.predict(data_without_weights) + + # Assert + # Both should produce valid forecasts + assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" + assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + + # Sample weights should affect the model, so results should be different + # (This is a statistical test - with different weights, predictions should differ) + differences = (result_with_weights.data - result_without_weights.data).abs() + assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +def test_lightgbm_forecaster__feature_importances( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LightGBMForecasterConfig, +): + """Test that feature_importances returns correct normalized importance scores.""" + # Arrange + forecaster = LightGBMForecaster(config=base_config) + forecaster.fit(sample_forecast_input_dataset) + + # Act + feature_importances = forecaster.feature_importances + + # Assert + assert len(feature_importances.index) > 0 + + # Columns should match expected quantile formats + expected_columns = pd.Index([q.format() for q in base_config.quantiles], name="quantiles") + pd.testing.assert_index_equal(feature_importances.columns, expected_columns) + + # Values should be normalized (sum to 1.0 per quantile column) and non-negative + col_sums = feature_importances.sum(axis=0) + pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=expected_columns), atol=1e-10) + assert (feature_importances >= 0).all().all() diff --git a/uv.lock b/uv.lock index 769d7e81b..f8bf4a7b4 100644 --- a/uv.lock +++ b/uv.lock @@ -1115,6 +1115,8 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" }, { url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" }, { url = "https://files.pythonhosted.org/packages/3f/c7/12381b18e21aef2c6bd3a636da1088b888b97b7a0362fac2e4de92405f97/greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f", size = 1151142, upload-time = "2025-08-07T13:18:22.981Z" }, + { url = "https://files.pythonhosted.org/packages/27/45/80935968b53cfd3f33cf99ea5f08227f2646e044568c9b1555b58ffd61c2/greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0", size = 1564846, upload-time = "2025-11-04T12:42:15.191Z" }, + { url = "https://files.pythonhosted.org/packages/69/02/b7c30e5e04752cb4db6202a3858b149c0710e5453b71a3b2aec5d78a1aab/greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d", size = 1633814, upload-time = "2025-11-04T12:42:17.175Z" }, { url = "https://files.pythonhosted.org/packages/e9/08/b0814846b79399e585f974bbeebf5580fbe59e258ea7be64d9dfb253c84f/greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02", size = 299899, upload-time = "2025-08-07T13:38:53.448Z" }, { url = "https://files.pythonhosted.org/packages/49/e8/58c7f85958bda41dafea50497cbd59738c5c43dbbea5ee83d651234398f4/greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31", size = 272814, upload-time = "2025-08-07T13:15:50.011Z" }, { url = "https://files.pythonhosted.org/packages/62/dd/b9f59862e9e257a16e4e610480cfffd29e3fae018a68c2332090b53aac3d/greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945", size = 641073, upload-time = "2025-08-07T13:42:57.23Z" }, @@ -1124,6 +1126,8 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ee/43/3cecdc0349359e1a527cbf2e3e28e5f8f06d3343aaf82ca13437a9aa290f/greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671", size = 610497, upload-time = "2025-08-07T13:18:31.636Z" }, { url = "https://files.pythonhosted.org/packages/b8/19/06b6cf5d604e2c382a6f31cafafd6f33d5dea706f4db7bdab184bad2b21d/greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b", size = 1121662, upload-time = "2025-08-07T13:42:41.117Z" }, { url = "https://files.pythonhosted.org/packages/a2/15/0d5e4e1a66fab130d98168fe984c509249c833c1a3c16806b90f253ce7b9/greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae", size = 1149210, upload-time = "2025-08-07T13:18:24.072Z" }, + { url = "https://files.pythonhosted.org/packages/1c/53/f9c440463b3057485b8594d7a638bed53ba531165ef0ca0e6c364b5cc807/greenlet-3.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e343822feb58ac4d0a1211bd9399de2b3a04963ddeec21530fc426cc121f19b", size = 1564759, upload-time = "2025-11-04T12:42:19.395Z" }, + { url = "https://files.pythonhosted.org/packages/47/e4/3bb4240abdd0a8d23f4f88adec746a3099f0d86bfedb623f063b2e3b4df0/greenlet-3.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca7f6f1f2649b89ce02f6f229d7c19f680a6238af656f61e0115b24857917929", size = 1634288, upload-time = "2025-11-04T12:42:21.174Z" }, { url = "https://files.pythonhosted.org/packages/0b/55/2321e43595e6801e105fcfdee02b34c0f996eb71e6ddffca6b10b7e1d771/greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b", size = 299685, upload-time = "2025-08-07T13:24:38.824Z" }, { url = "https://files.pythonhosted.org/packages/22/5c/85273fd7cc388285632b0498dbbab97596e04b154933dfe0f3e68156c68c/greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0", size = 273586, upload-time = "2025-08-07T13:16:08.004Z" }, { url = "https://files.pythonhosted.org/packages/d1/75/10aeeaa3da9332c2e761e4c50d4c3556c21113ee3f0afa2cf5769946f7a3/greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f", size = 686346, upload-time = "2025-08-07T13:42:59.944Z" }, @@ -1131,6 +1135,8 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/dc/8b/29aae55436521f1d6f8ff4e12fb676f3400de7fcf27fccd1d4d17fd8fecd/greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1", size = 694659, upload-time = "2025-08-07T13:53:17.759Z" }, { url = "https://files.pythonhosted.org/packages/92/2e/ea25914b1ebfde93b6fc4ff46d6864564fba59024e928bdc7de475affc25/greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735", size = 695355, upload-time = "2025-08-07T13:18:34.517Z" }, { url = "https://files.pythonhosted.org/packages/72/60/fc56c62046ec17f6b0d3060564562c64c862948c9d4bc8aa807cf5bd74f4/greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337", size = 657512, upload-time = "2025-08-07T13:18:33.969Z" }, + { url = "https://files.pythonhosted.org/packages/23/6e/74407aed965a4ab6ddd93a7ded3180b730d281c77b765788419484cdfeef/greenlet-3.2.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2917bdf657f5859fbf3386b12d68ede4cf1f04c90c3a6bc1f013dd68a22e2269", size = 1612508, upload-time = "2025-11-04T12:42:23.427Z" }, + { url = "https://files.pythonhosted.org/packages/0d/da/343cd760ab2f92bac1845ca07ee3faea9fe52bee65f7bcb19f16ad7de08b/greenlet-3.2.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:015d48959d4add5d6c9f6c5210ee3803a830dce46356e3bc326d6776bde54681", size = 1680760, upload-time = "2025-11-04T12:42:25.341Z" }, { url = "https://files.pythonhosted.org/packages/e3/a5/6ddab2b4c112be95601c13428db1d8b6608a8b6039816f2ba09c346c08fc/greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01", size = 303425, upload-time = "2025-08-07T13:32:27.59Z" }, ] @@ -1585,6 +1591,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/65/bd/606e2f7eb0da042bffd8711a7427f7a28ca501aa6b1e3367ae3c7d4dc489/licensecheck-2025.1.0-py3-none-any.whl", hash = "sha256:eb20131cd8f877e5396958fd7b00cdb2225436c37a59dba4cf36d36079133a17", size = 26681, upload-time = "2025-03-26T22:58:03.145Z" }, ] +[[package]] +name = "lightgbm" +version = "4.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/0b/a2e9f5c5da7ef047cc60cef37f86185088845e8433e54d2e7ed439cce8a3/lightgbm-4.6.0.tar.gz", hash = "sha256:cb1c59720eb569389c0ba74d14f52351b573af489f230032a1c9f314f8bab7fe", size = 1703705, upload-time = "2025-02-15T04:03:03.111Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/75/cffc9962cca296bc5536896b7e65b4a7cdeb8db208e71b9c0133c08f8f7e/lightgbm-4.6.0-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:b7a393de8a334d5c8e490df91270f0763f83f959574d504c7ccb9eee4aef70ed", size = 2010151, upload-time = "2025-02-15T04:02:50.961Z" }, + { url = "https://files.pythonhosted.org/packages/21/1b/550ee378512b78847930f5d74228ca1fdba2a7fbdeaac9aeccc085b0e257/lightgbm-4.6.0-py3-none-macosx_12_0_arm64.whl", hash = "sha256:2dafd98d4e02b844ceb0b61450a660681076b1ea6c7adb8c566dfd66832aafad", size = 1592172, upload-time = "2025-02-15T04:02:53.937Z" }, + { url = "https://files.pythonhosted.org/packages/64/41/4fbde2c3d29e25ee7c41d87df2f2e5eda65b431ee154d4d462c31041846c/lightgbm-4.6.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4d68712bbd2b57a0b14390cbf9376c1d5ed773fa2e71e099cac588703b590336", size = 3454567, upload-time = "2025-02-15T04:02:56.443Z" }, + { url = "https://files.pythonhosted.org/packages/42/86/dabda8fbcb1b00bcfb0003c3776e8ade1aa7b413dff0a2c08f457dace22f/lightgbm-4.6.0-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:cb19b5afea55b5b61cbb2131095f50538bd608a00655f23ad5d25ae3e3bf1c8d", size = 3569831, upload-time = "2025-02-15T04:02:58.925Z" }, + { url = "https://files.pythonhosted.org/packages/5e/23/f8b28ca248bb629b9e08f877dd2965d1994e1674a03d67cd10c5246da248/lightgbm-4.6.0-py3-none-win_amd64.whl", hash = "sha256:37089ee95664b6550a7189d887dbf098e3eadab03537e411f52c63c121e3ba4b", size = 1451509, upload-time = "2025-02-15T04:03:01.515Z" }, +] + [[package]] name = "loguru" version = "0.7.3" @@ -2363,8 +2386,10 @@ name = "openstef-models" version = "0.0.0" source = { editable = "packages/openstef-models" } dependencies = [ + { name = "lightgbm" }, { name = "openstef-core" }, { name = "pycountry" }, + { name = "skops" }, ] [package.optional-dependencies] @@ -2381,11 +2406,13 @@ all = [ requires-dist = [ { name = "holidays", marker = "extra == 'all'", specifier = ">=0.79" }, { name = "joblib", marker = "extra == 'all'", specifier = ">=1" }, + { name = "lightgbm", specifier = ">=4.6.0" }, { name = "mlflow", marker = "extra == 'all'", specifier = ">=3.4" }, { name = "openstef-core", editable = "packages/openstef-core" }, { name = "pvlib", marker = "extra == 'all'", specifier = ">=0.13" }, { name = "pycountry", specifier = ">=24.6.1" }, { name = "scikit-learn", marker = "extra == 'all'", specifier = ">=1.7.1" }, + { name = "skops", specifier = ">=0.13.0" }, { name = "xgboost", marker = "extra == 'all'", specifier = ">=3" }, ] provides-extras = ["all"] @@ -2690,6 +2717,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/92/1b/5337af1a6a478d25a3e3c56b9b4b42b0a160314e02f4a0498d5322c8dac4/poethepoet-0.37.0-py3-none-any.whl", hash = "sha256:861790276315abcc8df1b4bd60e28c3d48a06db273edd3092f3c94e1a46e5e22", size = 90062, upload-time = "2025-08-11T18:00:27.595Z" }, ] +[[package]] +name = "prettytable" +version = "3.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/b1/85e18ac92afd08c533603e3393977b6bc1443043115a47bb094f3b98f94f/prettytable-3.16.0.tar.gz", hash = "sha256:3c64b31719d961bf69c9a7e03d0c1e477320906a98da63952bc6698d6164ff57", size = 66276, upload-time = "2025-03-24T19:39:04.008Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/c7/5613524e606ea1688b3bdbf48aa64bafb6d0a4ac3750274c43b6158a390f/prettytable-3.16.0-py3-none-any.whl", hash = "sha256:b5eccfabb82222f5aa46b798ff02a8452cf530a352c31bddfa29be41242863aa", size = 33863, upload-time = "2025-03-24T19:39:02.359Z" }, +] + [[package]] name = "prompt-toolkit" version = "3.0.52" @@ -3718,6 +3757,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] +[[package]] +name = "skops" +version = "0.13.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "packaging" }, + { name = "prettytable" }, + { name = "scikit-learn" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b5/0c/5ec987633e077dd0076178ea6ade2d6e57780b34afea0b497fb507d7a1ed/skops-0.13.0.tar.gz", hash = "sha256:66949fd3c95cbb5c80270fbe40293c0fe1e46cb4a921860e42584dd9c20ebeb1", size = 581312, upload-time = "2025-08-06T09:48:14.916Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/e8/6a2b2030f0689f894432b9c2f0357f2f3286b2a00474827e04b8fe9eea13/skops-0.13.0-py3-none-any.whl", hash = "sha256:55e2cccb18c86f5916e4cfe5acf55ed7b0eecddf08a151906414c092fa5926dc", size = 131200, upload-time = "2025-08-06T09:48:13.356Z" }, +] + [[package]] name = "smmap" version = "5.0.2" From 6fcd632b77205185544e8cad1f9e7b60fd7d4ea4 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Fri, 7 Nov 2025 17:02:16 +0100 Subject: [PATCH 002/104] Fixed small issues --- .../src/openstef_models/estimators/hybrid.py | 11 +- .../openstef_models/estimators/lightgbm.py | 4 + .../models/forecasting/hybrid_forecaster.py | 40 ++--- .../forecasting/lgblinear_forecaster.py | 7 +- .../tests/unit/estimators/test_hybrid.py | 4 + .../tests/unit/estimators/test_lightgbm.py | 3 + .../forecasting/test_hybrid_forecaster.py | 149 ++++++++++++++++++ .../forecasting/test_lgblinear_forecaster.py | 3 + .../forecasting/test_lightgbm_forecaster.py | 3 + 9 files changed, 196 insertions(+), 28 deletions(-) create mode 100644 packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py diff --git a/packages/openstef-models/src/openstef_models/estimators/hybrid.py b/packages/openstef-models/src/openstef_models/estimators/hybrid.py index abffd154a..81ad400bf 100644 --- a/packages/openstef-models/src/openstef_models/estimators/hybrid.py +++ b/packages/openstef-models/src/openstef_models/estimators/hybrid.py @@ -21,7 +21,7 @@ class HybridQuantileRegressor: """Custom Hybrid regressor for multi-quantile estimation using sample weights.""" - def __init__( + def __init__( # noqa: D107, PLR0913, PLR0917 self, quantiles: list[float], lightgbm_n_estimators: int = 100, @@ -38,7 +38,7 @@ def __init__( lightgbm_subsample: float = 1.0, lightgbm_colsample_by_tree: float = 1.0, lightgbm_colsample_by_node: float = 1.0, - gblinear_n_estimators: int = 100, + gblinear_n_steps: int = 100, gblinear_learning_rate: float = 0.15, gblinear_reg_alpha: float = 0.0001, gblinear_reg_lambda: float = 0, @@ -75,7 +75,7 @@ def __init__( booster="gblinear", # Core parameters for forecasting objective="reg:quantileerror", - n_estimators=gblinear_n_estimators, + n_estimators=gblinear_n_steps, learning_rate=gblinear_learning_rate, # Regularization parameters reg_alpha=gblinear_reg_alpha, @@ -99,12 +99,13 @@ def __init__( ) ) self.is_fitted: bool = False + self.feature_names: list[str] = [] def fit( self, X: npt.NDArray[np.floating] | pd.DataFrame, # noqa: N803 y: npt.NDArray[np.floating] | pd.Series, - sample_weight: npt.NDArray[np.floating] | None = None, + sample_weight: npt.NDArray[np.floating] | pd.Series | None = None, feature_name: list[str] | None = None, ) -> None: """Fit the multi-quantile regressor. @@ -115,6 +116,8 @@ def fit( sample_weight: Sample weights for training data. feature_name: List of feature names. """ + self.feature_names = feature_name if feature_name is not None else [] + X = X.ffill().fillna(0) # type: ignore for model in self._models: model.fit( diff --git a/packages/openstef-models/src/openstef_models/estimators/lightgbm.py b/packages/openstef-models/src/openstef_models/estimators/lightgbm.py index 66f222327..130729407 100644 --- a/packages/openstef-models/src/openstef_models/estimators/lightgbm.py +++ b/packages/openstef-models/src/openstef_models/estimators/lightgbm.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """Custom LightGBM regressor for multi-quantile regression. This module provides the LGBMQuantileRegressor class, which extends LightGBM's LGBMRegressor diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index b1909b068..f5e97289a 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -1,31 +1,34 @@ -from __future__ import annotations +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 -import base64 -import logging -from typing import Any, Literal, Self, Union, cast, override -import numpy as np -import numpy.typing as npt +from typing import TYPE_CHECKING, override + import pandas as pd from pydantic import Field - from openstef_core.base_model import BaseConfig from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ( - ModelLoadingError, NotFittedError, ) from openstef_core.mixins import HyperParams from openstef_models.estimators.hybrid import HybridQuantileRegressor from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig +from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams from openstef_models.models.forecasting.lightgbm_forecaster import LightGBMHyperParams +if TYPE_CHECKING: + import numpy as np + import numpy.typing as npt + class HybridHyperParams(HyperParams): - """Hyperparameters for Support Vector Regression (Hybrid).""" + """Hyperparameters for Stacked LGBM GBLinear Regressor.""" lightgbm_params: LightGBMHyperParams = LightGBMHyperParams() + gb_linear_params: GBLinearHyperParams = GBLinearHyperParams() l1_penalty: float = Field( default=0.0, @@ -56,7 +59,7 @@ class HybridForecasterState(BaseConfig): class HybridForecaster(Forecaster): - """Wrapper for sklearn's Hybrid to make it compatible with HorizonForecaster.""" + """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" Config = HybridForecasterConfig HyperParams = HybridHyperParams @@ -65,17 +68,11 @@ class HybridForecaster(Forecaster): model: HybridQuantileRegressor def __init__(self, config: HybridForecasterConfig) -> None: - """Initialize the Hybrid forecaster. - - Args: - kernel: Kernel type for Hybrid. Must be one of "linear", "poly", "rbf", "sigmoid", or "precomputed". - C: Regularization parameter. - epsilon: Epsilon in the epsilon-Hybrid model. - """ + """Initialize the Hybrid forecaster.""" self._config = config self._model = HybridQuantileRegressor( - quantiles=config.quantiles, + quantiles=[float(q) for q in config.quantiles], lightgbm_n_estimators=config.hyperparams.lightgbm_params.n_estimators, lightgbm_learning_rate=config.hyperparams.lightgbm_params.learning_rate, lightgbm_max_depth=config.hyperparams.lightgbm_params.max_depth, @@ -89,6 +86,10 @@ def __init__(self, config: HybridForecasterConfig) -> None: lightgbm_subsample=config.hyperparams.lightgbm_params.subsample, lightgbm_colsample_by_tree=config.hyperparams.lightgbm_params.colsample_bytree, lightgbm_colsample_by_node=config.hyperparams.lightgbm_params.colsample_bynode, + gblinear_n_steps=config.hyperparams.gb_linear_params.n_steps, + gblinear_learning_rate=config.hyperparams.gb_linear_params.learning_rate, + gblinear_reg_alpha=config.hyperparams.gb_linear_params.reg_alpha, + gblinear_reg_lambda=config.hyperparams.gb_linear_params.reg_lambda, ) @property @@ -113,8 +114,9 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None input_data: pd.DataFrame = data.input_data() target: npt.NDArray[np.floating] = data.target_series.to_numpy() # type: ignore + sample_weights: pd.Series = data.sample_weight_series - self._model.fit(X=input_data, y=target) + self._model.fit(X=input_data, y=target, sample_weight=sample_weights) @override def predict(self, data: ForecastInputDataset) -> ForecastDataset: diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py index dc5babb7e..04293c048 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py @@ -9,8 +9,7 @@ comprehensive hyperparameter control for production forecasting workflows. """ -import base64 -from typing import TYPE_CHECKING, Any, Literal, Self, cast, override +from typing import TYPE_CHECKING, Literal, override import numpy as np import pandas as pd @@ -19,7 +18,6 @@ from openstef_core.base_model import BaseConfig from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ( - ModelLoadingError, NotFittedError, ) from openstef_core.mixins import HyperParams @@ -145,8 +143,7 @@ class LgbLinearForecasterConfig(ForecasterConfig): >>> from openstef_core.types import LeadTime, Quantile >>> config = LgbLinearForecasterConfig( ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], - ... horizons=[LeadTime(timedelta(hours=1 - ))], + ... horizons=[LeadTime(timedelta(hours=1))], ... hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=6) ... ). """ # noqa: D205 diff --git a/packages/openstef-models/tests/unit/estimators/test_hybrid.py b/packages/openstef-models/tests/unit/estimators/test_hybrid.py index 857cb7705..4c8a1ac97 100644 --- a/packages/openstef-models/tests/unit/estimators/test_hybrid.py +++ b/packages/openstef-models/tests/unit/estimators/test_hybrid.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + import pandas as pd import pytest from numpy.random import default_rng diff --git a/packages/openstef-models/tests/unit/estimators/test_lightgbm.py b/packages/openstef-models/tests/unit/estimators/test_lightgbm.py index 555add5cb..936b2e097 100644 --- a/packages/openstef-models/tests/unit/estimators/test_lightgbm.py +++ b/packages/openstef-models/tests/unit/estimators/test_lightgbm.py @@ -1,3 +1,6 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 import pandas as pd import pytest from numpy.random import default_rng diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py new file mode 100644 index 000000000..0a0334fbc --- /dev/null +++ b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py @@ -0,0 +1,149 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import pytest + +from openstef_core.datasets import ForecastInputDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams +from openstef_models.models.forecasting.hybrid_forecaster import ( + HybridForecaster, + HybridForecasterConfig, + HybridHyperParams, +) +from openstef_models.models.forecasting.lightgbm_forecaster import LightGBMHyperParams + + +@pytest.fixture +def base_config() -> HybridForecasterConfig: + """Base configuration for Hybrid forecaster tests.""" + lightgbm_params = LightGBMHyperParams(n_estimators=10, max_depth=2) + gb_linear_params = GBLinearHyperParams(n_steps=5, learning_rate=0.1, reg_alpha=0.0, reg_lambda=0.0) + params = HybridHyperParams( + lightgbm_params=lightgbm_params, + gb_linear_params=gb_linear_params, + ) + return HybridForecasterConfig( + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime(timedelta(days=1))], + hyperparams=params, + verbosity=False, + ) + + +def test_hybrid_forecaster__fit_predict( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: HybridForecasterConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = HybridForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + # Since forecast is deterministic with fixed random seed, check value spread (vectorized) + # All quantiles should have some variation (not all identical values) + stds = result.data.std() + assert (stds > 0).all(), f"All columns should have variation, got stds: {dict(stds)}" + + +def test_hybrid_forecaster__predict_not_fitted_raises_error( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: HybridForecasterConfig, +): + """Test that predict() raises NotFittedError when called before fit().""" + # Arrange + forecaster = HybridForecaster(config=base_config) + + # Act & Assert + with pytest.raises(NotFittedError, match="HybridForecaster"): + forecaster.predict(sample_forecast_input_dataset) + + +def test_hybrid_forecaster__with_sample_weights( + sample_dataset_with_weights: ForecastInputDataset, + base_config: HybridForecasterConfig, +): + """Test that forecaster works with sample weights and produces different results.""" + # Arrange + forecaster_with_weights = HybridForecaster(config=base_config) + + # Create dataset without weights for comparison + data_without_weights = ForecastInputDataset( + data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), + sample_interval=sample_dataset_with_weights.sample_interval, + target_column=sample_dataset_with_weights.target_column, + forecast_start=sample_dataset_with_weights.forecast_start, + ) + forecaster_without_weights = HybridForecaster(config=base_config) + + # Act + forecaster_with_weights.fit(sample_dataset_with_weights) + forecaster_without_weights.fit(data_without_weights) + + # Predict using data without sample_weight column (since that's used for training, not prediction) + result_with_weights = forecaster_with_weights.predict(data_without_weights) + result_without_weights = forecaster_without_weights.predict(data_without_weights) + + # Assert + # Both should produce valid forecasts + assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" + assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + + # Sample weights should affect the model, so results should be different + # (This is a statistical test - with different weights, predictions should differ) + differences = (result_with_weights.data - result_without_weights.data).abs() + assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +@pytest.mark.parametrize("objective", ["pinball_loss", "arctan_loss"]) +def test_hybrid_forecaster__different_objectives( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: HybridForecasterConfig, + objective: str, +): + """Test that forecaster works with different objective functions.""" + # Arrange + config = base_config.model_copy( + update={ + "hyperparams": base_config.hyperparams.model_copy( + update={"objective": objective} # type: ignore[arg-type] + ) + } + ) + forecaster = HybridForecaster(config=config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality should work regardless of objective + assert forecaster.is_fitted, f"Model with {objective} should be fitted" + assert not result.data.isna().any().any(), f"Forecast with {objective} should not contain NaN values" + + # Check value spread for each objective + # Note: Some objectives (like arctan_loss) may produce zero variation for some quantiles with small datasets + stds = result.data.std() + # At least one quantile should have variation (the model should not be completely degenerate) + assert (stds > 0).any(), f"At least one column should have variation with {objective}, got stds: {dict(stds)}" diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgblinear_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgblinear_forecaster.py index 0288a42d6..dc743be07 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_lgblinear_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgblinear_forecaster.py @@ -1,3 +1,6 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 from datetime import timedelta import pandas as pd diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lightgbm_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lightgbm_forecaster.py index f0c7ff7e7..efc728ac3 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_lightgbm_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lightgbm_forecaster.py @@ -1,3 +1,6 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 from datetime import timedelta import pandas as pd From 75239877c8fc1099a9c98e9320497799752488a6 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 10 Nov 2025 10:06:52 +0100 Subject: [PATCH 003/104] Ruff compliance --- .../openstef_models/estimators/__init__.py | 2 +- .../src/openstef_models/estimators/hybrid.py | 27 +++++++++++++------ .../openstef_models/estimators/lightgbm.py | 26 +++++++++--------- .../models/forecasting/hybrid_forecaster.py | 9 ++++--- .../forecasting/lgblinear_forecaster.py | 13 +-------- .../models/forecasting/lightgbm_forecaster.py | 21 +++++---------- .../tests/unit/estimators/__init__.py | 0 pyproject.toml | 2 +- 8 files changed, 48 insertions(+), 52 deletions(-) create mode 100644 packages/openstef-models/tests/unit/estimators/__init__.py diff --git a/packages/openstef-models/src/openstef_models/estimators/__init__.py b/packages/openstef-models/src/openstef_models/estimators/__init__.py index 487060783..2b139bdb0 100644 --- a/packages/openstef-models/src/openstef_models/estimators/__init__.py +++ b/packages/openstef-models/src/openstef_models/estimators/__init__.py @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: MPL-2.0 -"""Custom estimators for multi quantiles""" +"""Custom estimators for multi quantiles.""" from .lightgbm import LGBMQuantileRegressor diff --git a/packages/openstef-models/src/openstef_models/estimators/hybrid.py b/packages/openstef-models/src/openstef_models/estimators/hybrid.py index 81ad400bf..c6f827d64 100644 --- a/packages/openstef-models/src/openstef_models/estimators/hybrid.py +++ b/packages/openstef-models/src/openstef_models/estimators/hybrid.py @@ -1,3 +1,6 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 """Hybrid quantile regression estimators for multi-quantile forecasting. This module provides the HybridQuantileRegressor class, which combines LightGBM and linear models @@ -35,9 +38,7 @@ def __init__( # noqa: D107, PLR0913, PLR0917 lightgbm_reg_lambda: float = 0.0, lightgbm_num_leaves: int = 31, lightgbm_max_bin: int = 255, - lightgbm_subsample: float = 1.0, lightgbm_colsample_by_tree: float = 1.0, - lightgbm_colsample_by_node: float = 1.0, gblinear_n_steps: int = 100, gblinear_learning_rate: float = 0.15, gblinear_reg_alpha: float = 0.0001, @@ -64,9 +65,7 @@ def __init__( # noqa: D107, PLR0913, PLR0917 reg_lambda=lightgbm_reg_lambda, num_leaves=lightgbm_num_leaves, max_bin=lightgbm_max_bin, - subsample=lightgbm_subsample, colsample_bytree=lightgbm_colsample_by_tree, - colsample_bynode=lightgbm_colsample_by_node, verbosity=-1, linear_tree=False, ) @@ -101,9 +100,22 @@ def __init__( # noqa: D107, PLR0913, PLR0917 self.is_fitted: bool = False self.feature_names: list[str] = [] + @staticmethod + def _prepare_input(X: npt.NDArray[np.floating] | pd.DataFrame) -> pd.DataFrame: + """Prepare input data by handling missing values. + + Args: + X: Input features as a DataFrame or ndarray. + + Returns: + A DataFrame with missing values handled. + """ + filled_forward = pd.DataFrame(X).ffill() + return pd.DataFrame(filled_forward).fillna(0) + def fit( self, - X: npt.NDArray[np.floating] | pd.DataFrame, # noqa: N803 + X: npt.NDArray[np.floating] | pd.DataFrame, y: npt.NDArray[np.floating] | pd.Series, sample_weight: npt.NDArray[np.floating] | pd.Series | None = None, feature_name: list[str] | None = None, @@ -118,16 +130,15 @@ def fit( """ self.feature_names = feature_name if feature_name is not None else [] - X = X.ffill().fillna(0) # type: ignore for model in self._models: model.fit( - X=X, # type: ignore + X=self._prepare_input(X), # type: ignore y=y, sample_weight=sample_weight, ) self.is_fitted = True - def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: # noqa: N803 + def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: """Predict quantiles for the input features. Args: diff --git a/packages/openstef-models/src/openstef_models/estimators/lightgbm.py b/packages/openstef-models/src/openstef_models/estimators/lightgbm.py index 130729407..13bc36477 100644 --- a/packages/openstef-models/src/openstef_models/estimators/lightgbm.py +++ b/packages/openstef-models/src/openstef_models/estimators/lightgbm.py @@ -43,9 +43,7 @@ def __init__( reg_lambda: float = 0.0, num_leaves: int = 31, max_bin: int = 255, - subsample: float = 1.0, colsample_bytree: float = 1.0, - colsample_bynode: float = 1.0, random_state: int | None = None, early_stopping_rounds: int | None = None, verbosity: int = -1, @@ -54,6 +52,7 @@ def __init__( Args: quantiles: List of quantiles to predict (e.g., [0.1, 0.5, 0.9]). + linear_tree: Whether to use linear trees. n_estimators: Number of boosting rounds/trees to fit. learning_rate: Step size shrinkage used to prevent overfitting. max_depth: Maximum depth of trees. @@ -64,9 +63,7 @@ def __init__( reg_lambda: L2 regularization on leaf weights. num_leaves: Maximum number of leaves. max_bin: Maximum number of discrete bins for continuous features. - subsample: Fraction of training samples used for each tree. colsample_bytree: Fraction of features used when constructing each tree. - colsample_bynode: Fraction of features used for each split/node. random_state: Random seed for reproducibility. early_stopping_rounds: Training will stop if performance doesn't improve for this many rounds. verbosity: Verbosity level for LgbLinear training. @@ -84,9 +81,7 @@ def __init__( self.reg_lambda = reg_lambda self.num_leaves = num_leaves self.max_bin = max_bin - self.subsample = subsample self.colsample_bytree = colsample_bytree - self.colsample_bynode = colsample_bynode self.random_state = random_state self.early_stopping_rounds = early_stopping_rounds self.verbosity = verbosity @@ -105,9 +100,7 @@ def __init__( reg_lambda=reg_lambda, num_leaves=num_leaves, max_bin=max_bin, - subsample=subsample, colsample_bytree=colsample_bytree, - colsample_bynode=colsample_bynode, random_state=random_state, early_stopping_rounds=early_stopping_rounds, verbosity=verbosity, @@ -118,12 +111,12 @@ def __init__( def fit( self, - X: npt.NDArray[np.floating] | pd.DataFrame, # noqa: N803 + X: npt.NDArray[np.floating] | pd.DataFrame, y: npt.NDArray[np.floating] | pd.Series, - sample_weight: npt.NDArray[np.floating] | None = None, + sample_weight: npt.NDArray[np.floating] | pd.Series | None = None, feature_name: list[str] | None = None, eval_set: npt.NDArray[np.floating] | None = None, - eval_sample_weight: npt.NDArray[np.floating] | None = None, + eval_sample_weight: npt.NDArray[np.floating] | pd.Series | list[float] | None = None, ) -> None: """Fit the multi-quantile regressor. @@ -150,7 +143,7 @@ def fit( feature_name=feature_name, # type: ignore ) - def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: # noqa: N803 + def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: """Predict quantiles for the input features. Args: @@ -203,3 +196,12 @@ def load_bytes(cls, model_bytes: bytes) -> Self: raise ModelLoadingError("Deserialized object is not a LgbLinearQuantileRegressor") return instance + + @property + def models(self) -> list[LGBMRegressor]: + """Get the list of underlying quantile models. + + Returns: + List of LGBMRegressor instances for each quantile. + """ + return self._models diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index f5e97289a..70aeeed04 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -1,7 +1,13 @@ # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 +"""Hybrid Forecaster (Stacked LightGBM + Linear Model Gradient Boosting). +Provides method that attempts to combine the advantages of a linear model (Extraplolation) +and tree-based model (Non-linear patterns). This is acieved by training two base learners, +followed by a small linear model that regresses on the baselearners' predictions. +The implementation is based on sklearn's StackingRegressor. +""" from typing import TYPE_CHECKING, override @@ -83,9 +89,7 @@ def __init__(self, config: HybridForecasterConfig) -> None: lightgbm_reg_lambda=config.hyperparams.lightgbm_params.reg_lambda, lightgbm_num_leaves=config.hyperparams.lightgbm_params.num_leaves, lightgbm_max_bin=config.hyperparams.lightgbm_params.max_bin, - lightgbm_subsample=config.hyperparams.lightgbm_params.subsample, lightgbm_colsample_by_tree=config.hyperparams.lightgbm_params.colsample_bytree, - lightgbm_colsample_by_node=config.hyperparams.lightgbm_params.colsample_bynode, gblinear_n_steps=config.hyperparams.gb_linear_params.n_steps, gblinear_learning_rate=config.hyperparams.gb_linear_params.learning_rate, gblinear_reg_alpha=config.hyperparams.gb_linear_params.reg_alpha, @@ -111,7 +115,6 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None data_val: Validation data for tuning the model (optional, not used in this implementation). """ - input_data: pd.DataFrame = data.input_data() target: npt.NDArray[np.floating] = data.target_series.to_numpy() # type: ignore sample_weights: pd.Series = data.sample_weight_series diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py index 04293c048..66be71821 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py @@ -11,7 +11,6 @@ from typing import TYPE_CHECKING, Literal, override -import numpy as np import pandas as pd from pydantic import Field @@ -26,6 +25,7 @@ from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig if TYPE_CHECKING: + import numpy as np import numpy.typing as npt @@ -41,7 +41,6 @@ class LgbLinearHyperParams(HyperParams): ... learning_rate=0.1, ... reg_alpha=0.1, ... reg_lambda=1.0, - ... subsample=0.8 ... ) Note: @@ -106,18 +105,10 @@ class LgbLinearHyperParams(HyperParams): ) # Subsampling Parameters - subsample: float = Field( - default=0.5, - description="Fraction of training samples used for each tree. Lower values prevent overfitting. Range: (0,1]", - ) colsample_bytree: float = Field( default=0.5, description="Fraction of features used when constructing each tree. Range: (0,1]", ) - colsample_bynode: float = Field( - default=0.5, - description="Fraction of features used for each split/node. Range: (0,1]", - ) # General Parameters random_state: int | None = Field( @@ -254,9 +245,7 @@ def __init__(self, config: LgbLinearForecasterConfig) -> None: reg_lambda=config.hyperparams.reg_lambda, num_leaves=config.hyperparams.num_leaves, max_bin=config.hyperparams.max_bin, - subsample=config.hyperparams.subsample, colsample_bytree=config.hyperparams.colsample_bytree, - colsample_bynode=config.hyperparams.colsample_bynode, random_state=config.hyperparams.random_state, early_stopping_rounds=config.hyperparams.early_stopping_rounds, verbosity=config.verbosity, diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py index cc5a26856..b4049b83c 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py @@ -9,10 +9,8 @@ comprehensive hyperparameter control for production forecasting workflows. """ -from typing import Literal, override +from typing import TYPE_CHECKING, Literal, override -import numpy as np -import numpy.typing as npt import pandas as pd from pydantic import Field @@ -26,6 +24,10 @@ from openstef_models.explainability.mixins import ExplainableForecaster from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig +if TYPE_CHECKING: + import numpy as np + import numpy.typing as npt + class LightGBMHyperParams(HyperParams): """LightGBM hyperparameters for gradient boosting tree models. @@ -39,7 +41,6 @@ class LightGBMHyperParams(HyperParams): ... learning_rate=0.1, ... reg_alpha=0.1, ... reg_lambda=1.0, - ... subsample=0.8 ... ) Note: @@ -103,18 +104,10 @@ class LightGBMHyperParams(HyperParams): ) # Subsampling Parameters - subsample: float = Field( - default=1.0, - description="Fraction of training samples used for each tree. Lower values prevent overfitting. Range: (0,1]", - ) colsample_bytree: float = Field( default=1.0, description="Fraction of features used when constructing each tree. Range: (0,1]", ) - colsample_bynode: float = Field( - default=1.0, - description="Fraction of features used for each split/node. Range: (0,1]", - ) # General Parameters random_state: int | None = Field( @@ -252,9 +245,7 @@ def __init__(self, config: LightGBMForecasterConfig) -> None: reg_lambda=config.hyperparams.reg_lambda, num_leaves=config.hyperparams.num_leaves, max_bin=config.hyperparams.max_bin, - subsample=config.hyperparams.subsample, colsample_bytree=config.hyperparams.colsample_bytree, - colsample_bynode=config.hyperparams.colsample_bynode, random_state=config.hyperparams.random_state, early_stopping_rounds=config.hyperparams.early_stopping_rounds, verbosity=config.verbosity, @@ -321,7 +312,7 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: @property @override def feature_importances(self) -> pd.DataFrame: - models = self._lightgbm_model._models + models = self._lightgbm_model.models weights_df = pd.DataFrame( [models[i].feature_importances_ for i in range(len(models))], index=[quantile.format() for quantile in self.config.quantiles], diff --git a/packages/openstef-models/tests/unit/estimators/__init__.py b/packages/openstef-models/tests/unit/estimators/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pyproject.toml b/pyproject.toml index f6a17e8e6..164f27276 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -158,7 +158,7 @@ lint.pylint.allow-dunder-method-names = [ ] # valid pydantic name lint.pylint.max-args = 7 # the default of 5 is a bit limiting. 7 should be enough for nearly all cases lint.preview = true - +lint.pep8-naming.ignore-names = [ "X" ] # Allow X for SKLearn-like feature matrices [tool.pyproject-fmt] column_width = 120 indent = 2 From c680aa160c2e5a336ae41f96e2f53c2fc9dbbc87 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 10 Nov 2025 11:25:33 +0100 Subject: [PATCH 004/104] fixed quality checks --- packages/openstef-models/pyproject.toml | 4 ++-- .../src/openstef_models/estimators/hybrid.py | 3 +-- .../src/openstef_models/estimators/lightgbm.py | 8 ++++---- .../models/forecasting/lgblinear_forecaster.py | 15 ++++++--------- .../models/forecasting/lightgbm_forecaster.py | 8 +++----- pyproject.toml | 3 ++- 6 files changed, 18 insertions(+), 23 deletions(-) diff --git a/packages/openstef-models/pyproject.toml b/packages/openstef-models/pyproject.toml index 43914f38f..c33d6e0e1 100644 --- a/packages/openstef-models/pyproject.toml +++ b/packages/openstef-models/pyproject.toml @@ -28,10 +28,10 @@ classifiers = [ ] dependencies = [ - "lightgbm>=4.6.0", + "lightgbm>=4.6", "openstef-core", "pycountry>=24.6.1", - "skops>=0.13.0", + "skops>=0.13", ] optional-dependencies.all = [ diff --git a/packages/openstef-models/src/openstef_models/estimators/hybrid.py b/packages/openstef-models/src/openstef_models/estimators/hybrid.py index c6f827d64..54e12ab91 100644 --- a/packages/openstef-models/src/openstef_models/estimators/hybrid.py +++ b/packages/openstef-models/src/openstef_models/estimators/hybrid.py @@ -110,8 +110,7 @@ def _prepare_input(X: npt.NDArray[np.floating] | pd.DataFrame) -> pd.DataFrame: Returns: A DataFrame with missing values handled. """ - filled_forward = pd.DataFrame(X).ffill() - return pd.DataFrame(filled_forward).fillna(0) + return pd.DataFrame(X).ffill().fillna(0) # type: ignore[reportUnknownMemberType] def fit( self, diff --git a/packages/openstef-models/src/openstef_models/estimators/lightgbm.py b/packages/openstef-models/src/openstef_models/estimators/lightgbm.py index 13bc36477..0214b0929 100644 --- a/packages/openstef-models/src/openstef_models/estimators/lightgbm.py +++ b/packages/openstef-models/src/openstef_models/estimators/lightgbm.py @@ -29,7 +29,7 @@ class LGBMQuantileRegressor(BaseEstimator, RegressorMixin): separate tree within the same boosting ensemble. """ - def __init__( + def __init__( # noqa: PLR0913, PLR0917 self, quantiles: list[float], linear_tree: bool, # noqa: FBT001 @@ -47,7 +47,7 @@ def __init__( random_state: int | None = None, early_stopping_rounds: int | None = None, verbosity: int = -1, - ) -> None: # type: ignore + ) -> None: """Initialize LgbLinearQuantileRegressor with quantiles. Args: @@ -115,8 +115,8 @@ def fit( y: npt.NDArray[np.floating] | pd.Series, sample_weight: npt.NDArray[np.floating] | pd.Series | None = None, feature_name: list[str] | None = None, - eval_set: npt.NDArray[np.floating] | None = None, - eval_sample_weight: npt.NDArray[np.floating] | pd.Series | list[float] | None = None, + eval_set: list[tuple[pd.DataFrame, npt.NDArray[np.floating]]] | None = None, + eval_sample_weight: list[npt.NDArray[np.floating]] | None = None, ) -> None: """Fit the multi-quantile regressor. diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py index 66be71821..ed86aa48e 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py @@ -9,8 +9,10 @@ comprehensive hyperparameter control for production forecasting workflows. """ -from typing import TYPE_CHECKING, Literal, override +from typing import Literal, cast, override +import numpy as np +import numpy.typing as npt import pandas as pd from pydantic import Field @@ -24,10 +26,6 @@ from openstef_models.explainability.mixins import ExplainableForecaster from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig -if TYPE_CHECKING: - import numpy as np - import numpy.typing as npt - class LgbLinearHyperParams(HyperParams): """LgbLinear hyperparameters for gradient boosting tree models. @@ -35,7 +33,7 @@ class LgbLinearHyperParams(HyperParams): Example: Creating custom hyperparameters for deep trees with regularization: - >>> hyperparams = LGBMHyperParams( + >>> hyperparams = LgbLinearHyperParams( ... n_estimators=200, ... max_depth=8, ... learning_rate=0.1, @@ -136,7 +134,7 @@ class LgbLinearForecasterConfig(ForecasterConfig): ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], ... horizons=[LeadTime(timedelta(hours=1))], ... hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=6) - ... ). + ... ) """ # noqa: D205 hyperparams: LgbLinearHyperParams = LgbLinearHyperParams() @@ -270,7 +268,6 @@ def is_fitted(self) -> bool: def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: input_data: pd.DataFrame = data.input_data() target: npt.NDArray[np.floating] = data.target_series.to_numpy() # type: ignore - sample_weight = data.sample_weight_series # Prepare validation data if provided @@ -279,7 +276,7 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None if data_val is not None: val_input_data: pd.DataFrame = data_val.input_data() val_target: npt.NDArray[np.floating] = data_val.target_series.to_numpy() # type: ignore - val_sample_weight = data_val.sample_weight_series.to_numpy() # type: ignore + val_sample_weight = cast(npt.NDArray[np.floating], data_val.sample_weight_series.to_numpy()) # type: ignore eval_set = [(val_input_data, val_target)] eval_sample_weight = [val_sample_weight] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py index b4049b83c..8fb2a396c 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py @@ -35,7 +35,7 @@ class LightGBMHyperParams(HyperParams): Example: Creating custom hyperparameters for deep trees with regularization: - >>> hyperparams = LGBMHyperParams( + >>> hyperparams = LightGBMHyperParams( ... n_estimators=200, ... max_depth=8, ... learning_rate=0.1, @@ -133,10 +133,8 @@ class LightGBMForecasterConfig(ForecasterConfig): >>> from openstef_core.types import LeadTime, Quantile >>> config = LightGBMForecasterConfig( ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], - ... horizons=[LeadTime(timedelta(hours=1 - ))], - ... hyperparams=LightGBMHyperParams(n_estimators=100, max_depth=6) - ... ). + ... horizons=[LeadTime(timedelta(hours=1))], + ... hyperparams=LightGBMHyperParams(n_estimators=100, max_depth=6)) """ # noqa: D205 hyperparams: LightGBMHyperParams = LightGBMHyperParams() diff --git a/pyproject.toml b/pyproject.toml index 164f27276..787b231dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -151,6 +151,7 @@ lint.isort.known-first-party = [ "tests", "examples", ] # Useful if ruff does not run from the actual root of the project and to import form tests +lint.pep8-naming.ignore-names = [ "X" ] # Allow X for SKLearn-like feature matrices lint.pydocstyle.convention = "google" lint.pylint.allow-dunder-method-names = [ "__get_pydantic_core_schema__", @@ -158,7 +159,7 @@ lint.pylint.allow-dunder-method-names = [ ] # valid pydantic name lint.pylint.max-args = 7 # the default of 5 is a bit limiting. 7 should be enough for nearly all cases lint.preview = true -lint.pep8-naming.ignore-names = [ "X" ] # Allow X for SKLearn-like feature matrices + [tool.pyproject-fmt] column_width = 120 indent = 2 From 9c1e3d38d3348d76962ec7f9004b20f16b6bc869 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 10 Nov 2025 11:29:06 +0100 Subject: [PATCH 005/104] Fixed last issues, Signed-off-by: Lars van Someren --- .../tests/unit/models/forecasting/test_hybrid_forecaster.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py index 0a0334fbc..e89fe13f9 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py @@ -61,11 +61,6 @@ def test_hybrid_forecaster__fit_predict( # Forecast data quality assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" - # Since forecast is deterministic with fixed random seed, check value spread (vectorized) - # All quantiles should have some variation (not all identical values) - stds = result.data.std() - assert (stds > 0).all(), f"All columns should have variation, got stds: {dict(stds)}" - def test_hybrid_forecaster__predict_not_fitted_raises_error( sample_forecast_input_dataset: ForecastInputDataset, From 4394895bafe0c609b12683229ae73151b463f75d Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 11 Nov 2025 20:02:13 +0100 Subject: [PATCH 006/104] fixed comments --- .../models/forecasting/hybrid_forecaster.py | 9 --------- .../models/forecasting/lgblinear_forecaster.py | 14 -------------- .../models/forecasting/lightgbm_forecaster.py | 14 -------------- 3 files changed, 37 deletions(-) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index 70aeeed04..ec3cb0ed6 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -14,7 +14,6 @@ import pandas as pd from pydantic import Field -from openstef_core.base_model import BaseConfig from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ( NotFittedError, @@ -56,14 +55,6 @@ class HybridForecasterConfig(ForecasterConfig): MODEL_CODE_VERSION = 2 -class HybridForecasterState(BaseConfig): - """Serializable state for Hybrid forecaster persistence.""" - - version: int = Field(default=MODEL_CODE_VERSION, description="Version of the model code.") - config: HybridForecasterConfig = Field(..., description="Forecaster configuration.") - model: str = Field(..., description="Base64-encoded serialized Hybrid model.") - - class HybridForecaster(Forecaster): """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py index ed86aa48e..262c739f7 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py @@ -16,7 +16,6 @@ import pandas as pd from pydantic import Field -from openstef_core.base_model import BaseConfig from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ( NotFittedError, @@ -156,19 +155,6 @@ class LgbLinearForecasterConfig(ForecasterConfig): MODEL_CODE_VERSION = 1 -class LgbLinearForecasterState(BaseConfig): - """Serializable state for LgbLinear forecaster persistence. - - Contains all information needed to restore a trained LgbLinear model, - including configuration and the serialized model weights. Used for - model saving, loading, and version management in production systems. - """ - - version: int = Field(default=MODEL_CODE_VERSION, description="Version of the model code.") - config: LgbLinearForecasterConfig = Field(..., description="Forecaster configuration.") - model: str = Field(..., description="Base64-encoded serialized LgbLinear model.") - - class LgbLinearForecaster(Forecaster, ExplainableForecaster): """LgbLinear-based forecaster for probabilistic energy forecasting. diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py index 8fb2a396c..4e2dabe73 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py @@ -14,7 +14,6 @@ import pandas as pd from pydantic import Field -from openstef_core.base_model import BaseConfig from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ( NotFittedError, @@ -156,19 +155,6 @@ class LightGBMForecasterConfig(ForecasterConfig): MODEL_CODE_VERSION = 1 -class LightGBMForecasterState(BaseConfig): - """Serializable state for LightGBM forecaster persistence. - - Contains all information needed to restore a trained LightGBM model, - including configuration and the serialized model weights. Used for - model saving, loading, and version management in production systems. - """ - - version: int = Field(default=MODEL_CODE_VERSION, description="Version of the model code.") - config: LightGBMForecasterConfig = Field(..., description="Forecaster configuration.") - model: str = Field(..., description="Base64-encoded serialized LightGBM model.") - - class LightGBMForecaster(Forecaster, ExplainableForecaster): """LightGBM-based forecaster for probabilistic energy forecasting. From 5745212b656211b851f608c931322aec94180db8 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 11 Nov 2025 20:06:21 +0100 Subject: [PATCH 007/104] Refactor LightGBM to LGBM --- .../openstef_models/estimators/__init__.py | 2 +- .../src/openstef_models/estimators/hybrid.py | 52 +++++----- .../estimators/{lightgbm.py => lgbm.py} | 8 +- .../models/forecasting/forecaster.py | 19 ++++ .../models/forecasting/hybrid_forecaster.py | 26 ++--- ...htgbm_forecaster.py => lgbm_forecaster.py} | 48 +++++----- ...forecaster.py => lgbmlinear_forecaster.py} | 95 +++++++++---------- .../presets/forecasting_workflow.py | 32 +++---- .../{test_lightgbm.py => test_lgbm.py} | 2 +- .../forecasting/test_hybrid_forecaster.py | 6 +- ..._forecaster.py => test_lgbm_forecaster.py} | 56 +++++------ ...aster.py => test_lgbmlinear_forecaster.py} | 56 +++++------ 12 files changed, 210 insertions(+), 192 deletions(-) rename packages/openstef-models/src/openstef_models/estimators/{lightgbm.py => lgbm.py} (96%) rename packages/openstef-models/src/openstef_models/models/forecasting/{lightgbm_forecaster.py => lgbm_forecaster.py} (88%) rename packages/openstef-models/src/openstef_models/models/forecasting/{lgblinear_forecaster.py => lgbmlinear_forecaster.py} (84%) rename packages/openstef-models/tests/unit/estimators/{test_lightgbm.py => test_lgbm.py} (95%) rename packages/openstef-models/tests/unit/models/forecasting/{test_lightgbm_forecaster.py => test_lgbm_forecaster.py} (75%) rename packages/openstef-models/tests/unit/models/forecasting/{test_lgblinear_forecaster.py => test_lgbmlinear_forecaster.py} (75%) diff --git a/packages/openstef-models/src/openstef_models/estimators/__init__.py b/packages/openstef-models/src/openstef_models/estimators/__init__.py index 2b139bdb0..2b2e5ebb4 100644 --- a/packages/openstef-models/src/openstef_models/estimators/__init__.py +++ b/packages/openstef-models/src/openstef_models/estimators/__init__.py @@ -4,6 +4,6 @@ """Custom estimators for multi quantiles.""" -from .lightgbm import LGBMQuantileRegressor +from .lgbm import LGBMQuantileRegressor __all__ = ["LGBMQuantileRegressor"] diff --git a/packages/openstef-models/src/openstef_models/estimators/hybrid.py b/packages/openstef-models/src/openstef_models/estimators/hybrid.py index 54e12ab91..6e107917f 100644 --- a/packages/openstef-models/src/openstef_models/estimators/hybrid.py +++ b/packages/openstef-models/src/openstef_models/estimators/hybrid.py @@ -27,18 +27,18 @@ class HybridQuantileRegressor: def __init__( # noqa: D107, PLR0913, PLR0917 self, quantiles: list[float], - lightgbm_n_estimators: int = 100, - lightgbm_learning_rate: float = 0.1, - lightgbm_max_depth: int = -1, - lightgbm_min_child_weight: float = 1.0, + lgbm_n_estimators: int = 100, + lgbm_learning_rate: float = 0.1, + lgbm_max_depth: int = -1, + lgbm_min_child_weight: float = 1.0, ligntgbm_min_child_samples: int = 1, - lightgbm_min_data_in_leaf: int = 20, - lightgbm_min_data_in_bin: int = 10, - lightgbm_reg_alpha: float = 0.0, - lightgbm_reg_lambda: float = 0.0, - lightgbm_num_leaves: int = 31, - lightgbm_max_bin: int = 255, - lightgbm_colsample_by_tree: float = 1.0, + lgbm_min_data_in_leaf: int = 20, + lgbm_min_data_in_bin: int = 10, + lgbm_reg_alpha: float = 0.0, + lgbm_reg_lambda: float = 0.0, + lgbm_num_leaves: int = 31, + lgbm_max_bin: int = 255, + lgbm_colsample_by_tree: float = 1.0, gblinear_n_steps: int = 100, gblinear_learning_rate: float = 0.15, gblinear_reg_alpha: float = 0.0001, @@ -51,21 +51,21 @@ def __init__( # noqa: D107, PLR0913, PLR0917 self._models: list[StackingRegressor] = [] for q in quantiles: - lightgbm_model = LGBMRegressor( + lgbm_model = LGBMRegressor( objective="quantile", alpha=q, min_child_samples=ligntgbm_min_child_samples, - n_estimators=lightgbm_n_estimators, - learning_rate=lightgbm_learning_rate, - max_depth=lightgbm_max_depth, - min_child_weight=lightgbm_min_child_weight, - min_data_in_leaf=lightgbm_min_data_in_leaf, - min_data_in_bin=lightgbm_min_data_in_bin, - reg_alpha=lightgbm_reg_alpha, - reg_lambda=lightgbm_reg_lambda, - num_leaves=lightgbm_num_leaves, - max_bin=lightgbm_max_bin, - colsample_bytree=lightgbm_colsample_by_tree, + n_estimators=lgbm_n_estimators, + learning_rate=lgbm_learning_rate, + max_depth=lgbm_max_depth, + min_child_weight=lgbm_min_child_weight, + min_data_in_leaf=lgbm_min_data_in_leaf, + min_data_in_bin=lgbm_min_data_in_bin, + reg_alpha=lgbm_reg_alpha, + reg_lambda=lgbm_reg_lambda, + num_leaves=lgbm_num_leaves, + max_bin=lgbm_max_bin, + colsample_bytree=lgbm_colsample_by_tree, verbosity=-1, linear_tree=False, ) @@ -89,7 +89,7 @@ def __init__( # noqa: D107, PLR0913, PLR0917 self._models.append( StackingRegressor( - estimators=[("lightgbm", lightgbm_model), ("gblinear", linear)], # type: ignore + estimators=[("lgbm", lgbm_model), ("gblinear", linear)], # type: ignore final_estimator=final_estimator, verbose=3, passthrough=False, @@ -173,8 +173,8 @@ def load_bytes(cls, model_bytes: bytes) -> Self: """ trusted_types = [ "collections.OrderedDict", - "lightgbm.basic.Booster", - "lightgbm.sklearn.LGBMRegressor", + "lgbm.basic.Booster", + "lgbm.sklearn.LGBMRegressor", "sklearn.utils._bunch.Bunch", "xgboost.core.Booster", "xgboost.sklearn.XGBRegressor", diff --git a/packages/openstef-models/src/openstef_models/estimators/lightgbm.py b/packages/openstef-models/src/openstef_models/estimators/lgbm.py similarity index 96% rename from packages/openstef-models/src/openstef_models/estimators/lightgbm.py rename to packages/openstef-models/src/openstef_models/estimators/lgbm.py index 0214b0929..ed4115b06 100644 --- a/packages/openstef-models/src/openstef_models/estimators/lightgbm.py +++ b/packages/openstef-models/src/openstef_models/estimators/lgbm.py @@ -153,7 +153,7 @@ def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np. A 2D array where each column corresponds to predicted quantiles. """ # noqa: D412 - return np.column_stack([model.predict(X=np.asarray(X)) for model in self._models]) # type: ignore + return np.column_stack([model.predict(X=X) for model in self._models]) # type: ignore def __sklearn_is_fitted__(self) -> bool: # noqa: PLW3201 """Check if all models are fitted. @@ -186,9 +186,9 @@ def load_bytes(cls, model_bytes: bytes) -> Self: """ trusted_types = [ "collections.OrderedDict", - "lightgbm.basic.Booster", - "lightgbm.sklearn.LGBMRegressor", - "openstef_models.estimators.lightgbm.LGBMQuantileRegressor", + "lgbm.basic.Booster", + "lgbm.sklearn.LGBMRegressor", + "openstef_models.estimators.lgbm.LGBMQuantileRegressor", ] instance = loads(model_bytes, trusted=trusted_types) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py index d796b0ef3..d77e50fda 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py @@ -197,6 +197,25 @@ class Forecaster(BatchPredictor[ForecastInputDataset, ForecastDataset], Configur ... ) """ + @abstractmethod + def __init__(self, config: ForecasterConfig) -> None: + """Initialize the forecaster with the given configuration. + + Args: + config: Configuration object specifying quantiles, horizons, and batching support. + """ + raise NotImplementedError("Subclasses must implement __init__") + + @property + @abstractmethod + def config(self) -> ForecasterConfig: + """Access the model's configuration parameters. + + Returns: + Configuration object containing fundamental model parameters. + """ + raise NotImplementedError("Subclasses must implement config") + __all__ = [ "Forecaster", diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index ec3cb0ed6..6e6c7e9f3 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -22,7 +22,7 @@ from openstef_models.estimators.hybrid import HybridQuantileRegressor from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams -from openstef_models.models.forecasting.lightgbm_forecaster import LightGBMHyperParams +from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams if TYPE_CHECKING: import numpy as np @@ -32,7 +32,7 @@ class HybridHyperParams(HyperParams): """Hyperparameters for Stacked LGBM GBLinear Regressor.""" - lightgbm_params: LightGBMHyperParams = LightGBMHyperParams() + lgbm_params: LGBMHyperParams = LGBMHyperParams() gb_linear_params: GBLinearHyperParams = GBLinearHyperParams() l1_penalty: float = Field( @@ -70,17 +70,17 @@ def __init__(self, config: HybridForecasterConfig) -> None: self._model = HybridQuantileRegressor( quantiles=[float(q) for q in config.quantiles], - lightgbm_n_estimators=config.hyperparams.lightgbm_params.n_estimators, - lightgbm_learning_rate=config.hyperparams.lightgbm_params.learning_rate, - lightgbm_max_depth=config.hyperparams.lightgbm_params.max_depth, - lightgbm_min_child_weight=config.hyperparams.lightgbm_params.min_child_weight, - lightgbm_min_data_in_leaf=config.hyperparams.lightgbm_params.min_data_in_leaf, - lightgbm_min_data_in_bin=config.hyperparams.lightgbm_params.min_data_in_bin, - lightgbm_reg_alpha=config.hyperparams.lightgbm_params.reg_alpha, - lightgbm_reg_lambda=config.hyperparams.lightgbm_params.reg_lambda, - lightgbm_num_leaves=config.hyperparams.lightgbm_params.num_leaves, - lightgbm_max_bin=config.hyperparams.lightgbm_params.max_bin, - lightgbm_colsample_by_tree=config.hyperparams.lightgbm_params.colsample_bytree, + lgbm_n_estimators=config.hyperparams.lgbm_params.n_estimators, + lgbm_learning_rate=config.hyperparams.lgbm_params.learning_rate, + lgbm_max_depth=config.hyperparams.lgbm_params.max_depth, + lgbm_min_child_weight=config.hyperparams.lgbm_params.min_child_weight, + lgbm_min_data_in_leaf=config.hyperparams.lgbm_params.min_data_in_leaf, + lgbm_min_data_in_bin=config.hyperparams.lgbm_params.min_data_in_bin, + lgbm_reg_alpha=config.hyperparams.lgbm_params.reg_alpha, + lgbm_reg_lambda=config.hyperparams.lgbm_params.reg_lambda, + lgbm_num_leaves=config.hyperparams.lgbm_params.num_leaves, + lgbm_max_bin=config.hyperparams.lgbm_params.max_bin, + lgbm_colsample_by_tree=config.hyperparams.lgbm_params.colsample_bytree, gblinear_n_steps=config.hyperparams.gb_linear_params.n_steps, gblinear_learning_rate=config.hyperparams.gb_linear_params.learning_rate, gblinear_reg_alpha=config.hyperparams.gb_linear_params.reg_alpha, diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py similarity index 88% rename from packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py rename to packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index 4e2dabe73..4c4508117 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lightgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -19,7 +19,7 @@ NotFittedError, ) from openstef_core.mixins import HyperParams -from openstef_models.estimators.lightgbm import LGBMQuantileRegressor +from openstef_models.estimators.lgbm import LGBMQuantileRegressor from openstef_models.explainability.mixins import ExplainableForecaster from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig @@ -28,13 +28,13 @@ import numpy.typing as npt -class LightGBMHyperParams(HyperParams): +class LGBMHyperParams(HyperParams): """LightGBM hyperparameters for gradient boosting tree models. Example: Creating custom hyperparameters for deep trees with regularization: - >>> hyperparams = LightGBMHyperParams( + >>> hyperparams = LGBMHyperParams( ... n_estimators=200, ... max_depth=8, ... learning_rate=0.1, @@ -121,7 +121,7 @@ class LightGBMHyperParams(HyperParams): ) -class LightGBMForecasterConfig(ForecasterConfig): +class LGBMForecasterConfig(ForecasterConfig): """Configuration for LightGBM-based forecaster. Extends HorizonForecasterConfig with LightGBM-specific hyperparameters and execution settings. @@ -130,13 +130,13 @@ class LightGBMForecasterConfig(ForecasterConfig): Creating a LightGBM forecaster configuration with custom hyperparameters: >>> from datetime import timedelta >>> from openstef_core.types import LeadTime, Quantile - >>> config = LightGBMForecasterConfig( + >>> config = LGBMForecasterConfig( ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], ... horizons=[LeadTime(timedelta(hours=1))], - ... hyperparams=LightGBMHyperParams(n_estimators=100, max_depth=6)) + ... hyperparams=LGBMHyperParams(n_estimators=100, max_depth=6)) """ # noqa: D205 - hyperparams: LightGBMHyperParams = LightGBMHyperParams() + hyperparams: LGBMHyperParams = LGBMHyperParams() # General Parameters device: str = Field( @@ -155,7 +155,7 @@ class LightGBMForecasterConfig(ForecasterConfig): MODEL_CODE_VERSION = 1 -class LightGBMForecaster(Forecaster, ExplainableForecaster): +class LGBMForecaster(Forecaster, ExplainableForecaster): """LightGBM-based forecaster for probabilistic energy forecasting. Implements gradient boosting trees using LightGBM for multi-quantile forecasting. @@ -177,12 +177,12 @@ class LightGBMForecaster(Forecaster, ExplainableForecaster): >>> from datetime import timedelta >>> from openstef_core.types import LeadTime, Quantile - >>> config = LightGBMForecasterConfig( + >>> config = LGBMForecasterConfig( ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], ... horizons=[LeadTime(timedelta(hours=1))], - ... hyperparams=LightGBMHyperParams(n_estimators=100, max_depth=6) + ... hyperparams=LGBMHyperParams(n_estimators=100, max_depth=6) ... ) - >>> forecaster = LightGBMForecaster(config) + >>> forecaster = LGBMForecaster(config) >>> # forecaster.fit(training_data) >>> # predictions = forecaster.predict(test_data) @@ -192,18 +192,18 @@ class LightGBMForecaster(Forecaster, ExplainableForecaster): magnitude-weighted pinball loss by default for better forecasting performance. See Also: - LightGBMHyperParams: Detailed hyperparameter configuration options. + LGBMHyperParams: Detailed hyperparameter configuration options. HorizonForecaster: Base interface for all forecasting models. GBLinearForecaster: Alternative linear model using LightGBM. """ - Config = LightGBMForecasterConfig - HyperParams = LightGBMHyperParams + Config = LGBMForecasterConfig + HyperParams = LGBMHyperParams - _config: LightGBMForecasterConfig - _lightgbm_model: LGBMQuantileRegressor + _config: LGBMForecasterConfig + _lgbm_model: LGBMQuantileRegressor - def __init__(self, config: LightGBMForecasterConfig) -> None: + def __init__(self, config: LGBMForecasterConfig) -> None: """Initialize LightGBM forecaster with configuration. Creates an untrained LightGBM regressor with the specified configuration. @@ -216,7 +216,7 @@ def __init__(self, config: LightGBMForecasterConfig) -> None: """ self._config = config - self._lightgbm_model = LGBMQuantileRegressor( + self._lgbm_model = LGBMQuantileRegressor( quantiles=[float(q) for q in config.quantiles], linear_tree=False, n_estimators=config.hyperparams.n_estimators, @@ -242,13 +242,13 @@ def config(self) -> ForecasterConfig: @property @override - def hyperparams(self) -> LightGBMHyperParams: + def hyperparams(self) -> LGBMHyperParams: return self._config.hyperparams @property @override def is_fitted(self) -> bool: - return self._lightgbm_model.__sklearn_is_fitted__() + return self._lgbm_model.__sklearn_is_fitted__() @override def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: @@ -267,7 +267,7 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None eval_set = (val_input_data, val_target) eval_sample_weight = [val_sample_weight] - self._lightgbm_model.fit( + self._lgbm_model.fit( X=input_data, y=target, feature_name=input_data.columns.tolist(), @@ -282,7 +282,7 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: raise NotFittedError(self.__class__.__name__) input_data: pd.DataFrame = data.input_data(start=data.forecast_start) - prediction: npt.NDArray[np.floating] = self._lightgbm_model.predict(X=input_data) + prediction: npt.NDArray[np.floating] = self._lgbm_model.predict(X=input_data) return ForecastDataset( data=pd.DataFrame( @@ -296,7 +296,7 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: @property @override def feature_importances(self) -> pd.DataFrame: - models = self._lightgbm_model.models + models = self._lgbm_model.models weights_df = pd.DataFrame( [models[i].feature_importances_ for i in range(len(models))], index=[quantile.format() for quantile in self.config.quantiles], @@ -312,4 +312,4 @@ def feature_importances(self) -> pd.DataFrame: return weights_abs / total -__all__ = ["LightGBMForecaster", "LightGBMForecasterConfig", "LightGBMHyperParams"] +__all__ = ["LGBMForecaster", "LGBMForecasterConfig", "LGBMHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py similarity index 84% rename from packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py rename to packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py index 262c739f7..2636a7a13 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -21,18 +21,18 @@ NotFittedError, ) from openstef_core.mixins import HyperParams -from openstef_models.estimators.lightgbm import LGBMQuantileRegressor +from openstef_models.estimators.lgbm import LGBMQuantileRegressor from openstef_models.explainability.mixins import ExplainableForecaster from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig -class LgbLinearHyperParams(HyperParams): +class LGBMLinearHyperParams(HyperParams): """LgbLinear hyperparameters for gradient boosting tree models. Example: Creating custom hyperparameters for deep trees with regularization: - >>> hyperparams = LgbLinearHyperParams( + >>> hyperparams = LGBMLinearHyperParams( ... n_estimators=200, ... max_depth=8, ... learning_rate=0.1, @@ -49,23 +49,23 @@ class LgbLinearHyperParams(HyperParams): # Core Tree Boosting Parameters n_estimators: int = Field( - default=150, + default=77, description="Number of boosting rounds/trees to fit. Higher values may improve performance but " "increase training time and risk overfitting.", ) learning_rate: float = Field( - default=0.3, + default=0.07, alias="eta", description="Step size shrinkage used to prevent overfitting. Range: [0,1]. Lower values require " "more boosting rounds.", ) max_depth: int = Field( - default=4, + default=1, description="Maximum depth of trees. Higher values capture more complex patterns but risk " "overfitting. Range: [1,∞]", ) min_child_weight: float = Field( - default=1, + default=0.06, description="Minimum sum of instance weight (hessian) needed in a child. Higher values prevent " "overfitting. Range: [0,∞]", ) @@ -75,7 +75,7 @@ class LgbLinearHyperParams(HyperParams): description="Minimum number of data points in a leaf. Higher values prevent overfitting. Range: [1,∞]", ) min_data_in_bin: int = Field( - default=5, + default=13, description="Minimum number of data points in a bin. Higher values prevent overfitting. Range: [1,∞]", ) @@ -91,14 +91,14 @@ class LgbLinearHyperParams(HyperParams): # Tree Structure Control num_leaves: int = Field( - default=31, + default=78, description="Maximum number of leaves. 0 means no limit. Only relevant when grow_policy='lossguide'.", ) max_bin: int = Field( - default=256, + default=12, description="Maximum number of discrete bins for continuous features. Higher values may improve accuracy but " - "increase memory. Only for hist tree_method.", + "increase memory.", ) # Subsampling Parameters @@ -107,20 +107,8 @@ class LgbLinearHyperParams(HyperParams): description="Fraction of features used when constructing each tree. Range: (0,1]", ) - # General Parameters - random_state: int | None = Field( - default=None, - alias="seed", - description="Random seed for reproducibility. Controls tree structure randomness.", - ) - - early_stopping_rounds: int | None = Field( - default=10, - description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", - ) - -class LgbLinearForecasterConfig(ForecasterConfig): +class LGBMLinearForecasterConfig(ForecasterConfig): """Configuration for LgbLinear-based forecaster. Extends HorizonForecasterConfig with LgbLinear-specific hyperparameters and execution settings. @@ -129,14 +117,14 @@ class LgbLinearForecasterConfig(ForecasterConfig): Creating a LgbLinear forecaster configuration with custom hyperparameters: >>> from datetime import timedelta >>> from openstef_core.types import LeadTime, Quantile - >>> config = LgbLinearForecasterConfig( + >>> config = LGBMLinearForecasterConfig( ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], ... horizons=[LeadTime(timedelta(hours=1))], - ... hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=6) + ... hyperparams=LGBMLinearHyperParams(n_estimators=100, max_depth=6) ... ) """ # noqa: D205 - hyperparams: LgbLinearHyperParams = LgbLinearHyperParams() + hyperparams: LGBMLinearHyperParams = LGBMLinearHyperParams() # General Parameters device: str = Field( @@ -147,15 +135,26 @@ class LgbLinearForecasterConfig(ForecasterConfig): default=1, description="Number of parallel threads for tree construction. -1 uses all available cores.", ) - verbosity: Literal[0, 1, 2, 3] = Field( - default=0, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + verbosity: Literal[-1, 0, 1, 2, 3] = Field( + default=-1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + ) + + random_state: int | None = Field( + default=None, + alias="seed", + description="Random seed for reproducibility. Controls tree structure randomness.", + ) + + early_stopping_rounds: int | None = Field( + default=10, + description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", ) MODEL_CODE_VERSION = 1 -class LgbLinearForecaster(Forecaster, ExplainableForecaster): +class LGBMLinearForecaster(Forecaster, ExplainableForecaster): """LgbLinear-based forecaster for probabilistic energy forecasting. Implements gradient boosting trees using LgbLinear for multi-quantile forecasting. @@ -177,12 +176,12 @@ class LgbLinearForecaster(Forecaster, ExplainableForecaster): >>> from datetime import timedelta >>> from openstef_core.types import LeadTime, Quantile - >>> config = LgbLinearForecasterConfig( + >>> config = LGBMLinearForecasterConfig( ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], ... horizons=[LeadTime(timedelta(hours=1))], - ... hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=6) + ... hyperparams=LGBMLinearHyperParams(n_estimators=100, max_depth=6) ... ) - >>> forecaster = LgbLinearForecaster(config) + >>> forecaster = LGBMLinearForecaster(config) >>> # forecaster.fit(training_data) >>> # predictions = forecaster.predict(test_data) @@ -192,18 +191,18 @@ class LgbLinearForecaster(Forecaster, ExplainableForecaster): magnitude-weighted pinball loss by default for better forecasting performance. See Also: - LgbLinearHyperParams: Detailed hyperparameter configuration options. + LGBMLinearHyperParams: Detailed hyperparameter configuration options. HorizonForecaster: Base interface for all forecasting models. GBLinearForecaster: Alternative linear model using LgbLinear. """ - Config = LgbLinearForecasterConfig - HyperParams = LgbLinearHyperParams + Config = LGBMLinearForecasterConfig + HyperParams = LGBMLinearHyperParams - _config: LgbLinearForecasterConfig - _lgblinear_model: LGBMQuantileRegressor + _config: LGBMLinearForecasterConfig + _lgbmlinear_model: LGBMQuantileRegressor - def __init__(self, config: LgbLinearForecasterConfig) -> None: + def __init__(self, config: LGBMLinearForecasterConfig) -> None: """Initialize LgbLinear forecaster with configuration. Creates an untrained LgbLinear regressor with the specified configuration. @@ -216,7 +215,7 @@ def __init__(self, config: LgbLinearForecasterConfig) -> None: """ self._config = config - self._lgblinear_model = LGBMQuantileRegressor( + self._lgbmlinear_model = LGBMQuantileRegressor( quantiles=[float(q) for q in config.quantiles], linear_tree=True, n_estimators=config.hyperparams.n_estimators, @@ -230,8 +229,8 @@ def __init__(self, config: LgbLinearForecasterConfig) -> None: num_leaves=config.hyperparams.num_leaves, max_bin=config.hyperparams.max_bin, colsample_bytree=config.hyperparams.colsample_bytree, - random_state=config.hyperparams.random_state, - early_stopping_rounds=config.hyperparams.early_stopping_rounds, + random_state=config.random_state, + early_stopping_rounds=config.early_stopping_rounds, verbosity=config.verbosity, ) @@ -242,13 +241,13 @@ def config(self) -> ForecasterConfig: @property @override - def hyperparams(self) -> LgbLinearHyperParams: + def hyperparams(self) -> LGBMLinearHyperParams: return self._config.hyperparams @property @override def is_fitted(self) -> bool: - return self._lgblinear_model.__sklearn_is_fitted__() + return self._lgbmlinear_model.__sklearn_is_fitted__() @override def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: @@ -267,7 +266,7 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None eval_sample_weight = [val_sample_weight] - self._lgblinear_model.fit( # type: ignore + self._lgbmlinear_model.fit( # type: ignore X=input_data, y=target, feature_name=input_data.columns.tolist(), @@ -282,7 +281,7 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: raise NotFittedError(self.__class__.__name__) input_data: pd.DataFrame = data.input_data(start=data.forecast_start) - prediction: npt.NDArray[np.floating] = self._lgblinear_model.predict(X=input_data) + prediction: npt.NDArray[np.floating] = self._lgbmlinear_model.predict(X=input_data) return ForecastDataset( data=pd.DataFrame( @@ -296,7 +295,7 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: @property @override def feature_importances(self) -> pd.DataFrame: - models = self._lgblinear_model._models # noqa: SLF001 + models = self._lgbmlinear_model._models # noqa: SLF001 weights_df = pd.DataFrame( [models[i].feature_importances_ for i in range(len(models))], index=[quantile.format() for quantile in self.config.quantiles], @@ -312,4 +311,4 @@ def feature_importances(self) -> pd.DataFrame: return weights_abs / total -__all__ = ["LgbLinearForecaster", "LgbLinearForecasterConfig", "LgbLinearHyperParams"] +__all__ = ["LGBMLinearForecaster", "LGBMLinearForecasterConfig", "LGBMLinearHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 517f5be83..1f5fdaba0 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -31,8 +31,8 @@ from openstef_models.models.forecasting.flatliner_forecaster import FlatlinerForecaster from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster from openstef_models.models.forecasting.hybrid_forecaster import HybridForecaster -from openstef_models.models.forecasting.lgblinear_forecaster import LgbLinearForecaster -from openstef_models.models.forecasting.lightgbm_forecaster import LightGBMForecaster +from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster from openstef_models.transforms.energy_domain import WindPowerFeatureAdder from openstef_models.transforms.general import ( @@ -117,7 +117,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob model_id: ModelIdentifier = Field(description="Unique identifier for the forecasting model.") # Model configuration - model: Literal["xgboost", "gblinear", "flatliner", "hybrid", "lightgbm", "lgblinear"] = Field( + model: Literal["xgboost", "gblinear", "flatliner", "hybrid", "lgbm", "lgbmlinear"] = Field( description="Type of forecasting model to use." ) # TODO(#652): Implement median forecaster quantiles: list[Quantile] = Field( @@ -143,13 +143,13 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob description="Hyperparameters for GBLinear forecaster.", ) - lightgbm_hyperparams: LightGBMForecaster.HyperParams = Field( - default=LightGBMForecaster.HyperParams(), + lgbm_hyperparams: LGBMForecaster.HyperParams = Field( + default=LGBMForecaster.HyperParams(), description="Hyperparameters for LightGBM forecaster.", ) - lgblinear_hyperparams: LgbLinearForecaster.HyperParams = Field( - default=LgbLinearForecaster.HyperParams(), + lgbmlinear_hyperparams: LGBMLinearForecaster.HyperParams = Field( + default=LGBMLinearForecaster.HyperParams(), description="Hyperparameters for LightGBM forecaster.", ) @@ -205,7 +205,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob ) sample_weight_exponent: float = Field( default_factory=lambda data: 1.0 - if data.get("model") in {"gblinear", "lgblinear", "lightgbm", "hybrid", "xgboost"} + if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "hybrid", "xgboost"} else 0.0, description="Exponent applied to scale the sample weights. " "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " @@ -348,7 +348,7 @@ def create_forecasting_workflow( ) ) postprocessing = [QuantileSorter()] - elif config.model == "lgblinear": + elif config.model == "lgbmlinear": preprocessing = [ *checks, *feature_adders, @@ -356,15 +356,15 @@ def create_forecasting_workflow( DatetimeFeaturesAdder(onehot_encode=False), *feature_standardizers, ] - forecaster = LgbLinearForecaster( - config=LgbLinearForecaster.Config( + forecaster = LGBMLinearForecaster( + config=LGBMLinearForecaster.Config( quantiles=config.quantiles, horizons=config.horizons, - hyperparams=config.lgblinear_hyperparams, + hyperparams=config.lgbmlinear_hyperparams, ) ) postprocessing = [QuantileSorter()] - elif config.model == "lightgbm": + elif config.model == "lgbm": preprocessing = [ *checks, *feature_adders, @@ -372,11 +372,11 @@ def create_forecasting_workflow( DatetimeFeaturesAdder(onehot_encode=False), *feature_standardizers, ] - forecaster = LightGBMForecaster( - config=LightGBMForecaster.Config( + forecaster = LGBMForecaster( + config=LGBMForecaster.Config( quantiles=config.quantiles, horizons=config.horizons, - hyperparams=config.lightgbm_hyperparams, + hyperparams=config.lgbm_hyperparams, ) ) postprocessing = [QuantileSorter()] diff --git a/packages/openstef-models/tests/unit/estimators/test_lightgbm.py b/packages/openstef-models/tests/unit/estimators/test_lgbm.py similarity index 95% rename from packages/openstef-models/tests/unit/estimators/test_lightgbm.py rename to packages/openstef-models/tests/unit/estimators/test_lgbm.py index 936b2e097..5dfa0bb5b 100644 --- a/packages/openstef-models/tests/unit/estimators/test_lightgbm.py +++ b/packages/openstef-models/tests/unit/estimators/test_lgbm.py @@ -5,7 +5,7 @@ import pytest from numpy.random import default_rng -from openstef_models.estimators.lightgbm import LGBMQuantileRegressor +from openstef_models.estimators.lgbm import LGBMQuantileRegressor @pytest.fixture diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py index e89fe13f9..f8251d484 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py @@ -15,16 +15,16 @@ HybridForecasterConfig, HybridHyperParams, ) -from openstef_models.models.forecasting.lightgbm_forecaster import LightGBMHyperParams +from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams @pytest.fixture def base_config() -> HybridForecasterConfig: """Base configuration for Hybrid forecaster tests.""" - lightgbm_params = LightGBMHyperParams(n_estimators=10, max_depth=2) + lgbm_params = LGBMHyperParams(n_estimators=10, max_depth=2) gb_linear_params = GBLinearHyperParams(n_steps=5, learning_rate=0.1, reg_alpha=0.0, reg_lambda=0.0) params = HybridHyperParams( - lightgbm_params=lightgbm_params, + lgbm_params=lgbm_params, gb_linear_params=gb_linear_params, ) return HybridForecasterConfig( diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lightgbm_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py similarity index 75% rename from packages/openstef-models/tests/unit/models/forecasting/test_lightgbm_forecaster.py rename to packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py index efc728ac3..5ef874537 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_lightgbm_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py @@ -9,21 +9,21 @@ from openstef_core.datasets import ForecastInputDataset from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q -from openstef_models.models.forecasting.lightgbm_forecaster import ( - LightGBMForecaster, - LightGBMForecasterConfig, - LightGBMHyperParams, +from openstef_models.models.forecasting.lgbm_forecaster import ( + LGBMForecaster, + LGBMForecasterConfig, + LGBMHyperParams, ) @pytest.fixture -def base_config() -> LightGBMForecasterConfig: +def base_config() -> LGBMForecasterConfig: """Base configuration for LightGBM forecaster tests.""" - return LightGBMForecasterConfig( + return LGBMForecasterConfig( quantiles=[Q(0.1), Q(0.5), Q(0.9)], horizons=[LeadTime(timedelta(days=1))], - hyperparams=LightGBMHyperParams(n_estimators=100, max_depth=3, min_data_in_leaf=1, min_data_in_bin=1), + hyperparams=LGBMHyperParams(n_estimators=100, max_depth=3, min_data_in_leaf=1, min_data_in_bin=1), device="cpu", n_jobs=1, verbosity=0, @@ -31,23 +31,23 @@ def base_config() -> LightGBMForecasterConfig: @pytest.fixture -def forecaster(base_config: LightGBMForecasterConfig) -> LightGBMForecaster: - return LightGBMForecaster(base_config) +def forecaster(base_config: LGBMForecasterConfig) -> LGBMForecaster: + return LGBMForecaster(base_config) -def test_initialization(forecaster: LightGBMForecaster): - assert isinstance(forecaster, LightGBMForecaster) +def test_initialization(forecaster: LGBMForecaster): + assert isinstance(forecaster, LGBMForecaster) assert forecaster.config.hyperparams.n_estimators == 100 # type: ignore -def test_quantile_lightgbm_forecaster__fit_predict( +def test_quantile_lgbm_forecaster__fit_predict( sample_forecast_input_dataset: ForecastInputDataset, - base_config: LightGBMForecasterConfig, + base_config: LGBMForecasterConfig, ): """Test basic fit and predict workflow with comprehensive output validation.""" # Arrange expected_quantiles = base_config.quantiles - forecaster = LightGBMForecaster(config=base_config) + forecaster = LGBMForecaster(config=base_config) # Act forecaster.fit(sample_forecast_input_dataset) @@ -72,42 +72,42 @@ def test_quantile_lightgbm_forecaster__fit_predict( assert (stds > 0).all(), f"All columns should have variation, got stds: {dict(stds)}" -def test_lightgbm_forecaster__not_fitted_error( +def test_lgbm_forecaster__not_fitted_error( sample_forecast_input_dataset: ForecastInputDataset, - base_config: LightGBMForecasterConfig, + base_config: LGBMForecasterConfig, ): """Test that NotFittedError is raised when predicting before fitting.""" # Arrange - forecaster = LightGBMForecaster(config=base_config) + forecaster = LGBMForecaster(config=base_config) # Act & Assert with pytest.raises(NotFittedError): forecaster.predict(sample_forecast_input_dataset) -def test_lightgbm_forecaster__predict_not_fitted_raises_error( +def test_lgbm_forecaster__predict_not_fitted_raises_error( sample_forecast_input_dataset: ForecastInputDataset, - base_config: LightGBMForecasterConfig, + base_config: LGBMForecasterConfig, ): """Test that predict() raises NotFittedError when called before fit().""" # Arrange - forecaster = LightGBMForecaster(config=base_config) + forecaster = LGBMForecaster(config=base_config) # Act & Assert with pytest.raises( NotFittedError, - match="The LightGBMForecaster has not been fitted yet. Please call 'fit' before using it.", # noqa: RUF043 + match="The LGBMForecaster has not been fitted yet. Please call 'fit' before using it.", # noqa: RUF043 ): forecaster.predict(sample_forecast_input_dataset) -def test_lightgbm_forecaster__with_sample_weights( +def test_lgbm_forecaster__with_sample_weights( sample_dataset_with_weights: ForecastInputDataset, - base_config: LightGBMForecasterConfig, + base_config: LGBMForecasterConfig, ): """Test that forecaster works with sample weights and produces different results.""" # Arrange - forecaster_with_weights = LightGBMForecaster(config=base_config) + forecaster_with_weights = LGBMForecaster(config=base_config) # Create dataset without weights for comparison data_without_weights = ForecastInputDataset( @@ -116,7 +116,7 @@ def test_lightgbm_forecaster__with_sample_weights( target_column=sample_dataset_with_weights.target_column, forecast_start=sample_dataset_with_weights.forecast_start, ) - forecaster_without_weights = LightGBMForecaster(config=base_config) + forecaster_without_weights = LGBMForecaster(config=base_config) # Act forecaster_with_weights.fit(sample_dataset_with_weights) @@ -137,13 +137,13 @@ def test_lightgbm_forecaster__with_sample_weights( assert differences.sum().sum() > 0, "Sample weights should affect model predictions" -def test_lightgbm_forecaster__feature_importances( +def test_lgbm_forecaster__feature_importances( sample_forecast_input_dataset: ForecastInputDataset, - base_config: LightGBMForecasterConfig, + base_config: LGBMForecasterConfig, ): """Test that feature_importances returns correct normalized importance scores.""" # Arrange - forecaster = LightGBMForecaster(config=base_config) + forecaster = LGBMForecaster(config=base_config) forecaster.fit(sample_forecast_input_dataset) # Act diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgblinear_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py similarity index 75% rename from packages/openstef-models/tests/unit/models/forecasting/test_lgblinear_forecaster.py rename to packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py index dc743be07..61882e51d 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_lgblinear_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py @@ -9,21 +9,21 @@ from openstef_core.datasets import ForecastInputDataset from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q -from openstef_models.models.forecasting.lgblinear_forecaster import ( - LgbLinearForecaster, - LgbLinearForecasterConfig, - LgbLinearHyperParams, +from openstef_models.models.forecasting.lgbmlinear_forecaster import ( + LGBMLinearForecaster, + LGBMLinearForecasterConfig, + LGBMLinearHyperParams, ) @pytest.fixture -def base_config() -> LgbLinearForecasterConfig: +def base_config() -> LGBMLinearForecasterConfig: """Base configuration for LgbLinear forecaster tests.""" - return LgbLinearForecasterConfig( + return LGBMLinearForecasterConfig( quantiles=[Q(0.1), Q(0.5), Q(0.9)], horizons=[LeadTime(timedelta(days=1))], - hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=3, min_data_in_leaf=1, min_data_in_bin=1), + hyperparams=LGBMLinearHyperParams(n_estimators=100, max_depth=3, min_data_in_leaf=1, min_data_in_bin=1), device="cpu", n_jobs=1, verbosity=0, @@ -31,23 +31,23 @@ def base_config() -> LgbLinearForecasterConfig: @pytest.fixture -def forecaster(base_config: LgbLinearForecasterConfig) -> LgbLinearForecaster: - return LgbLinearForecaster(base_config) +def forecaster(base_config: LGBMLinearForecasterConfig) -> LGBMLinearForecaster: + return LGBMLinearForecaster(base_config) -def test_initialization(forecaster: LgbLinearForecaster): - assert isinstance(forecaster, LgbLinearForecaster) +def test_initialization(forecaster: LGBMLinearForecaster): + assert isinstance(forecaster, LGBMLinearForecaster) assert forecaster.config.hyperparams.n_estimators == 100 # type: ignore -def test_quantile_lgblinear_forecaster__fit_predict( +def test_quantile_lgbmlinear_forecaster__fit_predict( sample_forecast_input_dataset: ForecastInputDataset, - base_config: LgbLinearForecasterConfig, + base_config: LGBMLinearForecasterConfig, ): """Test basic fit and predict workflow with comprehensive output validation.""" # Arrange expected_quantiles = base_config.quantiles - forecaster = LgbLinearForecaster(config=base_config) + forecaster = LGBMLinearForecaster(config=base_config) # Act forecaster.fit(sample_forecast_input_dataset) @@ -72,42 +72,42 @@ def test_quantile_lgblinear_forecaster__fit_predict( assert (stds > 0).all(), f"All columns should have variation, got stds: {dict(stds)}" -def test_lgblinear_forecaster__not_fitted_error( +def test_lgbmlinear_forecaster__not_fitted_error( sample_forecast_input_dataset: ForecastInputDataset, - base_config: LgbLinearForecasterConfig, + base_config: LGBMLinearForecasterConfig, ): """Test that NotFittedError is raised when predicting before fitting.""" # Arrange - forecaster = LgbLinearForecaster(config=base_config) + forecaster = LGBMLinearForecaster(config=base_config) # Act & Assert with pytest.raises(NotFittedError): forecaster.predict(sample_forecast_input_dataset) -def test_lgblinear_forecaster__predict_not_fitted_raises_error( +def test_lgbmlinear_forecaster__predict_not_fitted_raises_error( sample_forecast_input_dataset: ForecastInputDataset, - base_config: LgbLinearForecasterConfig, + base_config: LGBMLinearForecasterConfig, ): """Test that predict() raises NotFittedError when called before fit().""" # Arrange - forecaster = LgbLinearForecaster(config=base_config) + forecaster = LGBMLinearForecaster(config=base_config) # Act & Assert with pytest.raises( NotFittedError, - match="The LgbLinearForecaster has not been fitted yet. Please call 'fit' before using it.", # noqa: RUF043 + match="The LGBMLinearForecaster has not been fitted yet. Please call 'fit' before using it.", # noqa: RUF043 ): forecaster.predict(sample_forecast_input_dataset) -def test_lgblinear_forecaster__with_sample_weights( +def test_lgbmlinear_forecaster__with_sample_weights( sample_dataset_with_weights: ForecastInputDataset, - base_config: LgbLinearForecasterConfig, + base_config: LGBMLinearForecasterConfig, ): """Test that forecaster works with sample weights and produces different results.""" # Arrange - forecaster_with_weights = LgbLinearForecaster(config=base_config) + forecaster_with_weights = LGBMLinearForecaster(config=base_config) # Create dataset without weights for comparison data_without_weights = ForecastInputDataset( @@ -116,7 +116,7 @@ def test_lgblinear_forecaster__with_sample_weights( target_column=sample_dataset_with_weights.target_column, forecast_start=sample_dataset_with_weights.forecast_start, ) - forecaster_without_weights = LgbLinearForecaster(config=base_config) + forecaster_without_weights = LGBMLinearForecaster(config=base_config) # Act forecaster_with_weights.fit(sample_dataset_with_weights) @@ -137,13 +137,13 @@ def test_lgblinear_forecaster__with_sample_weights( assert differences.sum().sum() > 0, "Sample weights should affect model predictions" -def test_lgblinear_forecaster__feature_importances( +def test_lgbmlinear_forecaster__feature_importances( sample_forecast_input_dataset: ForecastInputDataset, - base_config: LgbLinearForecasterConfig, + base_config: LGBMLinearForecasterConfig, ): """Test that feature_importances returns correct normalized importance scores.""" # Arrange - forecaster = LgbLinearForecaster(config=base_config) + forecaster = LGBMLinearForecaster(config=base_config) forecaster.fit(sample_forecast_input_dataset) # Act From a2538b6aa766f0f68235002a707dc12859ebcfe4 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 11 Nov 2025 21:14:10 +0100 Subject: [PATCH 008/104] Update LGBM and LGBMLinear defaults, fixed comments --- .../src/openstef_models/estimators/lgbm.py | 47 +-- .../mixins/model_serializer.py | 2 + .../models/forecasting/hybrid_forecaster.py | 35 +- .../forecasting/lgblinear_forecaster.py | 305 ++++++++++++++++++ .../models/forecasting/lgbm_forecaster.py | 72 ++--- .../forecasting/lgbmlinear_forecaster.py | 12 +- .../forecasting/test_lgbm_forecaster.py | 19 +- .../forecasting/test_lgbmlinear_forecaster.py | 19 +- 8 files changed, 381 insertions(+), 130 deletions(-) create mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py diff --git a/packages/openstef-models/src/openstef_models/estimators/lgbm.py b/packages/openstef-models/src/openstef_models/estimators/lgbm.py index ed4115b06..cf149c54a 100644 --- a/packages/openstef-models/src/openstef_models/estimators/lgbm.py +++ b/packages/openstef-models/src/openstef_models/estimators/lgbm.py @@ -9,16 +9,13 @@ by a separate tree within the same boosting ensemble. The module also includes serialization utilities. """ -from typing import Self +from typing import Any import numpy as np import numpy.typing as npt import pandas as pd from lightgbm import LGBMRegressor from sklearn.base import BaseEstimator, RegressorMixin -from skops.io import dumps, loads - -from openstef_core.exceptions import ModelLoadingError class LGBMQuantileRegressor(BaseEstimator, RegressorMixin): @@ -116,7 +113,7 @@ def fit( sample_weight: npt.NDArray[np.floating] | pd.Series | None = None, feature_name: list[str] | None = None, eval_set: list[tuple[pd.DataFrame, npt.NDArray[np.floating]]] | None = None, - eval_sample_weight: list[npt.NDArray[np.floating]] | None = None, + eval_sample_weight: list[npt.NDArray[np.floating]] | list[pd.Series[Any]] | None = None, ) -> None: """Fit the multi-quantile regressor. @@ -138,9 +135,9 @@ def fit( y=y, eval_metric="quantile", sample_weight=sample_weight, - eval_set=eval_set, # type: ignore - eval_sample_weight=eval_sample_weight, # type: ignore - feature_name=feature_name, # type: ignore + eval_set=eval_set, + eval_sample_weight=eval_sample_weight, + feature_name=feature_name if feature_name is not None else "auto", ) def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: @@ -163,40 +160,6 @@ def __sklearn_is_fitted__(self) -> bool: # noqa: PLW3201 """ return all(model.__sklearn_is_fitted__() for model in self._models) - def save_bytes(self) -> bytes: - """Serialize the model. - - Returns: - A string representation of the model. - """ - return dumps(self) - - @classmethod - def load_bytes(cls, model_bytes: bytes) -> Self: - """Deserialize the model from bytes using joblib. - - Args: - model_bytes : Bytes representing the serialized model. - - Returns: - An instance of LgbLinearQuantileRegressor. - - Raises: - ModelLoadingError: If the deserialized object is not a LgbLinearQuantileRegressor. - """ - trusted_types = [ - "collections.OrderedDict", - "lgbm.basic.Booster", - "lgbm.sklearn.LGBMRegressor", - "openstef_models.estimators.lgbm.LGBMQuantileRegressor", - ] - instance = loads(model_bytes, trusted=trusted_types) - - if not isinstance(instance, cls): - raise ModelLoadingError("Deserialized object is not a LgbLinearQuantileRegressor") - - return instance - @property def models(self) -> list[LGBMRegressor]: """Get the list of underlying quantile models. diff --git a/packages/openstef-models/src/openstef_models/mixins/model_serializer.py b/packages/openstef-models/src/openstef_models/mixins/model_serializer.py index ab00993f7..1167a9930 100644 --- a/packages/openstef-models/src/openstef_models/mixins/model_serializer.py +++ b/packages/openstef-models/src/openstef_models/mixins/model_serializer.py @@ -69,4 +69,6 @@ def deserialize(self, file: BinaryIO) -> object: """ +# TODO @egordm, @MvLieshout : Add SkopsModelSerializer implementation + __all__ = ["ModelIdentifier", "ModelSerializer"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index 6e6c7e9f3..838f2abba 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -9,8 +9,10 @@ The implementation is based on sklearn's StackingRegressor. """ +import logging from typing import TYPE_CHECKING, override +import numpy as np import pandas as pd from pydantic import Field @@ -24,8 +26,9 @@ from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +logger = logging.getLogger(__name__) + if TYPE_CHECKING: - import numpy as np import numpy.typing as npt @@ -97,6 +100,18 @@ def is_fitted(self) -> bool: """Check if the model is fitted.""" return self._model.is_fitted + @staticmethod + def _prepare_fit_input(data: ForecastInputDataset) -> tuple[pd.DataFrame, np.ndarray, pd.Series]: + input_data: pd.DataFrame = data.input_data() + + # Scale the target variable + target: np.ndarray = np.asarray(data.target_series.values) + + # Prepare sample weights + sample_weight: pd.Series = data.sample_weight_series + + return input_data, target, sample_weight + @override def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: """Fit the Hybrid model to the training data. @@ -106,11 +121,19 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None data_val: Validation data for tuning the model (optional, not used in this implementation). """ - input_data: pd.DataFrame = data.input_data() - target: npt.NDArray[np.floating] = data.target_series.to_numpy() # type: ignore - sample_weights: pd.Series = data.sample_weight_series - - self._model.fit(X=input_data, y=target, sample_weight=sample_weights) + # Prepare training data + input_data, target, sample_weight = self._prepare_fit_input(data) + + if data_val is not None: + logger.warning( + "Validation data provided, but HybridForecaster does not currently support validation during fitting." + ) + + self._model.fit( + X=input_data, + y=target, + sample_weight=sample_weight, + ) @override def predict(self, data: ForecastInputDataset) -> ForecastDataset: diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py new file mode 100644 index 000000000..9176fda14 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py @@ -0,0 +1,305 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""LightGBM-based forecasting models for probabilistic energy forecasting. + +Provides gradient boosting tree models using LightGBM for multi-quantile energy +forecasting. Optimized for time series data with specialized loss functions and +comprehensive hyperparameter control for production forecasting workflows. +""" + +from typing import Literal, cast, override + +import numpy as np +import numpy.typing as npt +import pandas as pd +from pydantic import Field + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import ( + NotFittedError, +) +from openstef_core.mixins import HyperParams +from openstef_models.estimators.lgbm import LGBMQuantileRegressor +from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig + + +class LgbLinearHyperParams(HyperParams): + """LgbLinear hyperparameters for gradient boosting tree models. + + Example: + Creating custom hyperparameters for deep trees with regularization: + + >>> hyperparams = LgbLinearHyperParams( + ... n_estimators=200, + ... max_depth=8, + ... learning_rate=0.1, + ... reg_alpha=0.1, + ... reg_lambda=1.0, + ... ) + + Note: + These parameters are optimized for probabilistic forecasting with + quantile regression. The default objective function is specialized + for magnitude-weighted pinball loss. + """ + + # Core Tree Boosting Parameters + + n_estimators: int = Field( + default=100, + description="Number of boosting rounds/trees to fit. Higher values may improve performance but " + "increase training time and risk overfitting.", + ) + learning_rate: float = Field( + default=0.1, + alias="eta", + description="Step size shrinkage used to prevent overfitting. Range: [0,1]. Lower values require " + "more boosting rounds.", + ) + max_depth: int = Field( + default=4, # Different from Factory Default (-1, unlimited) + description="Maximum depth of trees. Higher values capture more complex patterns but risk " + "overfitting. Range: [1,∞]", + ) + min_child_weight: float = Field( + default=1e-3, + description="Minimum sum of instance weight (hessian) needed in a child. Higher values prevent " + "overfitting. Range: [0,∞]", + ) + + min_data_in_leaf: int = Field( + default=20, + description="Minimum number of data points in a leaf. Higher values prevent overfitting. Range: [1,∞]", + ) + min_data_in_bin: int = Field( + default=20, + description="Minimum number of data points in a bin. Higher values prevent overfitting. Range: [1,∞]", + ) + + # Regularization + reg_alpha: float = Field( + default=0, + description="L1 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", + ) + reg_lambda: float = Field( + default=0.0, + description="L2 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", + ) + + # Tree Structure Control + num_leaves: int = Field( + default=31, + description="Maximum number of leaves. 0 means no limit. Only relevant when grow_policy='lossguide'.", + ) + + max_bin: int = Field( + default=256, + description="Maximum number of discrete bins for continuous features. Higher values may improve accuracy but " + "increase memory. Only for hist tree_method.", + ) + + # Subsampling Parameters + colsample_bytree: float = Field( + default=1, + description="Fraction of features used when constructing each tree. Range: (0,1]", + ) + + +class LgbLinearForecasterConfig(ForecasterConfig): + """Configuration for LgbLinear-based forecaster. + Extends HorizonForecasterConfig with LgbLinear-specific hyperparameters + and execution settings. + + Example: + Creating a LgbLinear forecaster configuration with custom hyperparameters: + >>> from datetime import timedelta + >>> from openstef_core.types import LeadTime, Quantile + >>> config = LgbLinearForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime(timedelta(hours=1))], + ... hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=6) + ... ) + """ # noqa: D205 + + hyperparams: LgbLinearHyperParams = LgbLinearHyperParams() + + # General Parameters + device: str = Field( + default="cpu", + description="Device for LgbLinear computation. Options: 'cpu', 'cuda', 'cuda:', 'gpu'", + ) + n_jobs: int = Field( + default=1, + description="Number of parallel threads for tree construction. -1 uses all available cores.", + ) + verbosity: Literal[0, 1, 2, 3] = Field( + default=0, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + ) + + # General Parameters + random_state: int | None = Field( + default=None, + alias="seed", + description="Random seed for reproducibility. Controls tree structure randomness.", + ) + + early_stopping_rounds: int | None = Field( + default=0, + description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", + ) + + +MODEL_CODE_VERSION = 1 + + +class LgbLinearForecaster(Forecaster, ExplainableForecaster): + """LgbLinear-based forecaster for probabilistic energy forecasting. + + Implements gradient boosting trees using LgbLinear for multi-quantile forecasting. + Optimized for time series prediction with specialized loss functions and + comprehensive hyperparameter control suitable for production energy forecasting. + + The forecaster uses a multi-output strategy where each quantile is predicted + by separate trees within the same boosting ensemble. This approach provides + well-calibrated uncertainty estimates while maintaining computational efficiency. + + Invariants: + - fit() must be called before predict() to train the model + - Configuration quantiles determine the number of prediction outputs + - Model state is preserved across predict() calls after fitting + - Input features must match training data structure during prediction + + Example: + Basic forecasting workflow: + + >>> from datetime import timedelta + >>> from openstef_core.types import LeadTime, Quantile + >>> config = LgbLinearForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime(timedelta(hours=1))], + ... hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=6) + ... ) + >>> forecaster = LgbLinearForecaster(config) + >>> # forecaster.fit(training_data) + >>> # predictions = forecaster.predict(test_data) + + Note: + LgbLinear dependency is optional and must be installed separately. + The model automatically handles multi-quantile output and uses + magnitude-weighted pinball loss by default for better forecasting performance. + + See Also: + LgbLinearHyperParams: Detailed hyperparameter configuration options. + HorizonForecaster: Base interface for all forecasting models. + GBLinearForecaster: Alternative linear model using LgbLinear. + """ + + Config = LgbLinearForecasterConfig + HyperParams = LgbLinearHyperParams + + _config: LgbLinearForecasterConfig + _lgblinear_model: LGBMQuantileRegressor + + def __init__(self, config: LgbLinearForecasterConfig) -> None: + """Initialize LgbLinear forecaster with configuration. + + Creates an untrained LgbLinear regressor with the specified configuration. + The underlying LgbLinear model is configured for multi-output quantile + regression using the provided hyperparameters and execution settings. + + Args: + config: Complete configuration including hyperparameters, quantiles, + and execution settings for the LgbLinear model. + """ + self._config = config + + self._lgblinear_model = LGBMQuantileRegressor( + quantiles=[float(q) for q in config.quantiles], + linear_tree=True, + random_state=config.random_state, + early_stopping_rounds=config.early_stopping_rounds, + verbosity=config.verbosity, + **config.hyperparams.model_dump(), + ) + + @property + @override + def config(self) -> ForecasterConfig: + return self._config + + @property + @override + def hyperparams(self) -> LgbLinearHyperParams: + return self._config.hyperparams + + @property + @override + def is_fitted(self) -> bool: + return self._lgblinear_model.__sklearn_is_fitted__() + + @override + def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: + input_data: pd.DataFrame = data.input_data() + target: npt.NDArray[np.floating] = data.target_series.to_numpy() # type: ignore + sample_weight = data.sample_weight_series + + # Prepare validation data if provided + eval_set = None + eval_sample_weight = None + if data_val is not None: + val_input_data: pd.DataFrame = data_val.input_data() + val_target: npt.NDArray[np.floating] = data_val.target_series.to_numpy() # type: ignore + val_sample_weight = cast(npt.NDArray[np.floating], data_val.sample_weight_series.to_numpy()) # type: ignore + eval_set = [(val_input_data, val_target)] + + eval_sample_weight = [val_sample_weight] + + self._lgblinear_model.fit( # type: ignore + X=input_data, + y=target, + feature_name=input_data.columns.tolist(), + sample_weight=sample_weight, + eval_set=eval_set, + eval_sample_weight=eval_sample_weight, + ) + + @override + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + input_data: pd.DataFrame = data.input_data(start=data.forecast_start) + prediction: npt.NDArray[np.floating] = self._lgblinear_model.predict(X=input_data) + + return ForecastDataset( + data=pd.DataFrame( + data=prediction, + index=input_data.index, + columns=[quantile.format() for quantile in self.config.quantiles], + ), + sample_interval=data.sample_interval, + ) + + @property + @override + def feature_importances(self) -> pd.DataFrame: + models = self._lgblinear_model._models # noqa: SLF001 + weights_df = pd.DataFrame( + [models[i].feature_importances_ for i in range(len(models))], + index=[quantile.format() for quantile in self.config.quantiles], + columns=models[0].feature_name_, + ).transpose() + + weights_df.index.name = "feature_name" + weights_df.columns.name = "quantiles" + + weights_abs = weights_df.abs() + total = weights_abs.sum(axis=0).replace(to_replace=0, value=1.0) # pyright: ignore[reportUnknownMemberType] + + return weights_abs / total + + +__all__ = ["LgbLinearForecaster", "LgbLinearForecasterConfig", "LgbLinearHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index 4c4508117..0c4442328 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -108,18 +108,6 @@ class LGBMHyperParams(HyperParams): description="Fraction of features used when constructing each tree. Range: (0,1]", ) - # General Parameters - random_state: int | None = Field( - default=None, - alias="seed", - description="Random seed for reproducibility. Controls tree structure randomness.", - ) - - early_stopping_rounds: int | None = Field( - default=None, - description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", - ) - class LGBMForecasterConfig(ForecasterConfig): """Configuration for LightGBM-based forecaster. @@ -151,6 +139,17 @@ class LGBMForecasterConfig(ForecasterConfig): default=-1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" ) + random_state: int | None = Field( + default=None, + alias="seed", + description="Random seed for reproducibility. Controls tree structure randomness.", + ) + + early_stopping_rounds: int | None = Field( + default=None, + description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", + ) + MODEL_CODE_VERSION = 1 @@ -219,20 +218,10 @@ def __init__(self, config: LGBMForecasterConfig) -> None: self._lgbm_model = LGBMQuantileRegressor( quantiles=[float(q) for q in config.quantiles], linear_tree=False, - n_estimators=config.hyperparams.n_estimators, - learning_rate=config.hyperparams.learning_rate, - max_depth=config.hyperparams.max_depth, - min_child_weight=config.hyperparams.min_child_weight, - min_data_in_leaf=config.hyperparams.min_data_in_leaf, - min_data_in_bin=config.hyperparams.min_data_in_bin, - reg_alpha=config.hyperparams.reg_alpha, - reg_lambda=config.hyperparams.reg_lambda, - num_leaves=config.hyperparams.num_leaves, - max_bin=config.hyperparams.max_bin, - colsample_bytree=config.hyperparams.colsample_bytree, - random_state=config.hyperparams.random_state, - early_stopping_rounds=config.hyperparams.early_stopping_rounds, + random_state=config.random_state, + early_stopping_rounds=config.early_stopping_rounds, verbosity=config.verbosity, + **config.hyperparams.model_dump(), ) @property @@ -250,30 +239,35 @@ def hyperparams(self) -> LGBMHyperParams: def is_fitted(self) -> bool: return self._lgbm_model.__sklearn_is_fitted__() + @staticmethod + def _prepare_fit_input(data: ForecastInputDataset) -> tuple[pd.DataFrame, np.ndarray, pd.Series]: + input_data: pd.DataFrame = data.input_data() + target: np.ndarray = np.asarray(data.target_series.values) + sample_weight: pd.Series = data.sample_weight_series + + return input_data, target, sample_weight + @override def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: - input_data: pd.DataFrame = data.input_data() - target: npt.NDArray[np.floating] = data.target_series.to_numpy() # type: ignore + # Prepare training data + input_data, target, sample_weight = self._prepare_fit_input(data) - sample_weight = data.sample_weight_series + # Evaluation sets + eval_set = [(input_data, target)] + sample_weight_eval_set = [sample_weight] - # Prepare validation data if provided - eval_set = None - eval_sample_weight = None if data_val is not None: - val_input_data: pd.DataFrame = data_val.input_data() - val_target: npt.NDArray[np.floating] = data_val.target_series.to_numpy() # type: ignore - val_sample_weight = data_val.sample_weight_series.to_numpy() # type: ignore - eval_set = (val_input_data, val_target) - eval_sample_weight = [val_sample_weight] + input_data_val, target_val, sample_weight_val = self._prepare_fit_input(data_val) + eval_set.append((input_data_val, target_val)) + sample_weight_eval_set.append(sample_weight_val) self._lgbm_model.fit( X=input_data, y=target, feature_name=input_data.columns.tolist(), - sample_weight=sample_weight, # type: ignore - eval_set=eval_set, # type: ignore - eval_sample_weight=eval_sample_weight, # type: ignore + sample_weight=sample_weight, + eval_set=eval_set, + eval_sample_weight=sample_weight_eval_set, ) @override diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py index 2636a7a13..202dd1e4e 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -218,20 +218,10 @@ def __init__(self, config: LGBMLinearForecasterConfig) -> None: self._lgbmlinear_model = LGBMQuantileRegressor( quantiles=[float(q) for q in config.quantiles], linear_tree=True, - n_estimators=config.hyperparams.n_estimators, - learning_rate=config.hyperparams.learning_rate, - max_depth=config.hyperparams.max_depth, - min_child_weight=config.hyperparams.min_child_weight, - min_data_in_leaf=config.hyperparams.min_data_in_leaf, - min_data_in_bin=config.hyperparams.min_data_in_bin, - reg_alpha=config.hyperparams.reg_alpha, - reg_lambda=config.hyperparams.reg_lambda, - num_leaves=config.hyperparams.num_leaves, - max_bin=config.hyperparams.max_bin, - colsample_bytree=config.hyperparams.colsample_bytree, random_state=config.random_state, early_stopping_rounds=config.early_stopping_rounds, verbosity=config.verbosity, + **config.hyperparams.model_dump(), ) @property diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py index 5ef874537..47bed1774 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py @@ -85,22 +85,6 @@ def test_lgbm_forecaster__not_fitted_error( forecaster.predict(sample_forecast_input_dataset) -def test_lgbm_forecaster__predict_not_fitted_raises_error( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: LGBMForecasterConfig, -): - """Test that predict() raises NotFittedError when called before fit().""" - # Arrange - forecaster = LGBMForecaster(config=base_config) - - # Act & Assert - with pytest.raises( - NotFittedError, - match="The LGBMForecaster has not been fitted yet. Please call 'fit' before using it.", # noqa: RUF043 - ): - forecaster.predict(sample_forecast_input_dataset) - - def test_lgbm_forecaster__with_sample_weights( sample_dataset_with_weights: ForecastInputDataset, base_config: LGBMForecasterConfig, @@ -160,3 +144,6 @@ def test_lgbm_forecaster__feature_importances( col_sums = feature_importances.sum(axis=0) pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=expected_columns), atol=1e-10) assert (feature_importances >= 0).all().all() + + +# TODO : Add tests on different loss functions @MvLieshout diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py index 61882e51d..1a2ce31cf 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py @@ -85,22 +85,6 @@ def test_lgbmlinear_forecaster__not_fitted_error( forecaster.predict(sample_forecast_input_dataset) -def test_lgbmlinear_forecaster__predict_not_fitted_raises_error( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: LGBMLinearForecasterConfig, -): - """Test that predict() raises NotFittedError when called before fit().""" - # Arrange - forecaster = LGBMLinearForecaster(config=base_config) - - # Act & Assert - with pytest.raises( - NotFittedError, - match="The LGBMLinearForecaster has not been fitted yet. Please call 'fit' before using it.", # noqa: RUF043 - ): - forecaster.predict(sample_forecast_input_dataset) - - def test_lgbmlinear_forecaster__with_sample_weights( sample_dataset_with_weights: ForecastInputDataset, base_config: LGBMLinearForecasterConfig, @@ -160,3 +144,6 @@ def test_lgbmlinear_forecaster__feature_importances( col_sums = feature_importances.sum(axis=0) pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=expected_columns), atol=1e-10) assert (feature_importances >= 0).all().all() + + +# TODO : Add tests on different loss functions @MvLieshout From 3d5460413a54a43acf1c8d393a2a434c899dfca5 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Wed, 12 Nov 2025 09:49:26 +0100 Subject: [PATCH 009/104] Fixed comments --- .../src/openstef_models/estimators/hybrid.py | 44 +-- .../src/openstef_models/estimators/lgbm.py | 4 +- .../models/forecasting/hybrid_forecaster.py | 2 + .../forecasting/lgblinear_forecaster.py | 305 ------------------ .../models/forecasting/lgbm_forecaster.py | 2 +- 5 files changed, 5 insertions(+), 352 deletions(-) delete mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py diff --git a/packages/openstef-models/src/openstef_models/estimators/hybrid.py b/packages/openstef-models/src/openstef_models/estimators/hybrid.py index 6e107917f..1660d8707 100644 --- a/packages/openstef-models/src/openstef_models/estimators/hybrid.py +++ b/packages/openstef-models/src/openstef_models/estimators/hybrid.py @@ -7,19 +7,14 @@ using stacking for robust multi-quantile regression, including serialization utilities. """ -from typing import Self - import numpy as np import numpy.typing as npt import pandas as pd from lightgbm import LGBMRegressor from sklearn.ensemble import StackingRegressor from sklearn.linear_model import QuantileRegressor -from skops.io import dumps, loads from xgboost import XGBRegressor -from openstef_core.exceptions import ModelLoadingError - class HybridQuantileRegressor: """Custom Hybrid regressor for multi-quantile estimation using sample weights.""" @@ -94,7 +89,7 @@ def __init__( # noqa: D107, PLR0913, PLR0917 verbose=3, passthrough=False, n_jobs=None, - cv=2, + cv=1, ) ) self.is_fitted: bool = False @@ -149,40 +144,3 @@ def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np. """ # noqa: D412 X = X.ffill().fillna(0) # type: ignore return np.column_stack([model.predict(X=X) for model in self._models]) # type: ignore - - def save_bytes(self) -> bytes: - """Serialize the model. - - Returns: - A string representation of the model. - """ - return dumps(self) - - @classmethod - def load_bytes(cls, model_bytes: bytes) -> Self: - """Deserialize the model from bytes using joblib. - - Args: - model_bytes : Bytes representing the serialized model. - - Returns: - An instance of LightGBMQuantileRegressor. - - Raises: - ModelLoadingError: If the deserialized object is not a HybridQuantileRegressor. - """ - trusted_types = [ - "collections.OrderedDict", - "lgbm.basic.Booster", - "lgbm.sklearn.LGBMRegressor", - "sklearn.utils._bunch.Bunch", - "xgboost.core.Booster", - "xgboost.sklearn.XGBRegressor", - "openstef_models.estimators.hybrid.HybridQuantileRegressor", - ] - instance = loads(model_bytes, trusted=trusted_types) - - if not isinstance(instance, cls): - raise ModelLoadingError("Deserialized object is not a HybridQuantileRegressor") - - return instance diff --git a/packages/openstef-models/src/openstef_models/estimators/lgbm.py b/packages/openstef-models/src/openstef_models/estimators/lgbm.py index cf149c54a..666f5d8cf 100644 --- a/packages/openstef-models/src/openstef_models/estimators/lgbm.py +++ b/packages/openstef-models/src/openstef_models/estimators/lgbm.py @@ -9,8 +9,6 @@ by a separate tree within the same boosting ensemble. The module also includes serialization utilities. """ -from typing import Any - import numpy as np import numpy.typing as npt import pandas as pd @@ -113,7 +111,7 @@ def fit( sample_weight: npt.NDArray[np.floating] | pd.Series | None = None, feature_name: list[str] | None = None, eval_set: list[tuple[pd.DataFrame, npt.NDArray[np.floating]]] | None = None, - eval_sample_weight: list[npt.NDArray[np.floating]] | list[pd.Series[Any]] | None = None, + eval_sample_weight: list[npt.NDArray[np.floating]] | list[pd.Series] | None = None, ) -> None: """Fit the multi-quantile regressor. diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index 838f2abba..2e9ff448b 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -152,5 +152,7 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: sample_interval=data.sample_interval, ) + # TODO(@MvLieshout, @Lars800): Make forecaster Explainable + __all__ = ["HybridForecaster", "HybridForecasterConfig", "HybridHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py deleted file mode 100644 index 9176fda14..000000000 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgblinear_forecaster.py +++ /dev/null @@ -1,305 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""LightGBM-based forecasting models for probabilistic energy forecasting. - -Provides gradient boosting tree models using LightGBM for multi-quantile energy -forecasting. Optimized for time series data with specialized loss functions and -comprehensive hyperparameter control for production forecasting workflows. -""" - -from typing import Literal, cast, override - -import numpy as np -import numpy.typing as npt -import pandas as pd -from pydantic import Field - -from openstef_core.datasets import ForecastDataset, ForecastInputDataset -from openstef_core.exceptions import ( - NotFittedError, -) -from openstef_core.mixins import HyperParams -from openstef_models.estimators.lgbm import LGBMQuantileRegressor -from openstef_models.explainability.mixins import ExplainableForecaster -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig - - -class LgbLinearHyperParams(HyperParams): - """LgbLinear hyperparameters for gradient boosting tree models. - - Example: - Creating custom hyperparameters for deep trees with regularization: - - >>> hyperparams = LgbLinearHyperParams( - ... n_estimators=200, - ... max_depth=8, - ... learning_rate=0.1, - ... reg_alpha=0.1, - ... reg_lambda=1.0, - ... ) - - Note: - These parameters are optimized for probabilistic forecasting with - quantile regression. The default objective function is specialized - for magnitude-weighted pinball loss. - """ - - # Core Tree Boosting Parameters - - n_estimators: int = Field( - default=100, - description="Number of boosting rounds/trees to fit. Higher values may improve performance but " - "increase training time and risk overfitting.", - ) - learning_rate: float = Field( - default=0.1, - alias="eta", - description="Step size shrinkage used to prevent overfitting. Range: [0,1]. Lower values require " - "more boosting rounds.", - ) - max_depth: int = Field( - default=4, # Different from Factory Default (-1, unlimited) - description="Maximum depth of trees. Higher values capture more complex patterns but risk " - "overfitting. Range: [1,∞]", - ) - min_child_weight: float = Field( - default=1e-3, - description="Minimum sum of instance weight (hessian) needed in a child. Higher values prevent " - "overfitting. Range: [0,∞]", - ) - - min_data_in_leaf: int = Field( - default=20, - description="Minimum number of data points in a leaf. Higher values prevent overfitting. Range: [1,∞]", - ) - min_data_in_bin: int = Field( - default=20, - description="Minimum number of data points in a bin. Higher values prevent overfitting. Range: [1,∞]", - ) - - # Regularization - reg_alpha: float = Field( - default=0, - description="L1 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", - ) - reg_lambda: float = Field( - default=0.0, - description="L2 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", - ) - - # Tree Structure Control - num_leaves: int = Field( - default=31, - description="Maximum number of leaves. 0 means no limit. Only relevant when grow_policy='lossguide'.", - ) - - max_bin: int = Field( - default=256, - description="Maximum number of discrete bins for continuous features. Higher values may improve accuracy but " - "increase memory. Only for hist tree_method.", - ) - - # Subsampling Parameters - colsample_bytree: float = Field( - default=1, - description="Fraction of features used when constructing each tree. Range: (0,1]", - ) - - -class LgbLinearForecasterConfig(ForecasterConfig): - """Configuration for LgbLinear-based forecaster. - Extends HorizonForecasterConfig with LgbLinear-specific hyperparameters - and execution settings. - - Example: - Creating a LgbLinear forecaster configuration with custom hyperparameters: - >>> from datetime import timedelta - >>> from openstef_core.types import LeadTime, Quantile - >>> config = LgbLinearForecasterConfig( - ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], - ... horizons=[LeadTime(timedelta(hours=1))], - ... hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=6) - ... ) - """ # noqa: D205 - - hyperparams: LgbLinearHyperParams = LgbLinearHyperParams() - - # General Parameters - device: str = Field( - default="cpu", - description="Device for LgbLinear computation. Options: 'cpu', 'cuda', 'cuda:', 'gpu'", - ) - n_jobs: int = Field( - default=1, - description="Number of parallel threads for tree construction. -1 uses all available cores.", - ) - verbosity: Literal[0, 1, 2, 3] = Field( - default=0, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" - ) - - # General Parameters - random_state: int | None = Field( - default=None, - alias="seed", - description="Random seed for reproducibility. Controls tree structure randomness.", - ) - - early_stopping_rounds: int | None = Field( - default=0, - description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", - ) - - -MODEL_CODE_VERSION = 1 - - -class LgbLinearForecaster(Forecaster, ExplainableForecaster): - """LgbLinear-based forecaster for probabilistic energy forecasting. - - Implements gradient boosting trees using LgbLinear for multi-quantile forecasting. - Optimized for time series prediction with specialized loss functions and - comprehensive hyperparameter control suitable for production energy forecasting. - - The forecaster uses a multi-output strategy where each quantile is predicted - by separate trees within the same boosting ensemble. This approach provides - well-calibrated uncertainty estimates while maintaining computational efficiency. - - Invariants: - - fit() must be called before predict() to train the model - - Configuration quantiles determine the number of prediction outputs - - Model state is preserved across predict() calls after fitting - - Input features must match training data structure during prediction - - Example: - Basic forecasting workflow: - - >>> from datetime import timedelta - >>> from openstef_core.types import LeadTime, Quantile - >>> config = LgbLinearForecasterConfig( - ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], - ... horizons=[LeadTime(timedelta(hours=1))], - ... hyperparams=LgbLinearHyperParams(n_estimators=100, max_depth=6) - ... ) - >>> forecaster = LgbLinearForecaster(config) - >>> # forecaster.fit(training_data) - >>> # predictions = forecaster.predict(test_data) - - Note: - LgbLinear dependency is optional and must be installed separately. - The model automatically handles multi-quantile output and uses - magnitude-weighted pinball loss by default for better forecasting performance. - - See Also: - LgbLinearHyperParams: Detailed hyperparameter configuration options. - HorizonForecaster: Base interface for all forecasting models. - GBLinearForecaster: Alternative linear model using LgbLinear. - """ - - Config = LgbLinearForecasterConfig - HyperParams = LgbLinearHyperParams - - _config: LgbLinearForecasterConfig - _lgblinear_model: LGBMQuantileRegressor - - def __init__(self, config: LgbLinearForecasterConfig) -> None: - """Initialize LgbLinear forecaster with configuration. - - Creates an untrained LgbLinear regressor with the specified configuration. - The underlying LgbLinear model is configured for multi-output quantile - regression using the provided hyperparameters and execution settings. - - Args: - config: Complete configuration including hyperparameters, quantiles, - and execution settings for the LgbLinear model. - """ - self._config = config - - self._lgblinear_model = LGBMQuantileRegressor( - quantiles=[float(q) for q in config.quantiles], - linear_tree=True, - random_state=config.random_state, - early_stopping_rounds=config.early_stopping_rounds, - verbosity=config.verbosity, - **config.hyperparams.model_dump(), - ) - - @property - @override - def config(self) -> ForecasterConfig: - return self._config - - @property - @override - def hyperparams(self) -> LgbLinearHyperParams: - return self._config.hyperparams - - @property - @override - def is_fitted(self) -> bool: - return self._lgblinear_model.__sklearn_is_fitted__() - - @override - def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: - input_data: pd.DataFrame = data.input_data() - target: npt.NDArray[np.floating] = data.target_series.to_numpy() # type: ignore - sample_weight = data.sample_weight_series - - # Prepare validation data if provided - eval_set = None - eval_sample_weight = None - if data_val is not None: - val_input_data: pd.DataFrame = data_val.input_data() - val_target: npt.NDArray[np.floating] = data_val.target_series.to_numpy() # type: ignore - val_sample_weight = cast(npt.NDArray[np.floating], data_val.sample_weight_series.to_numpy()) # type: ignore - eval_set = [(val_input_data, val_target)] - - eval_sample_weight = [val_sample_weight] - - self._lgblinear_model.fit( # type: ignore - X=input_data, - y=target, - feature_name=input_data.columns.tolist(), - sample_weight=sample_weight, - eval_set=eval_set, - eval_sample_weight=eval_sample_weight, - ) - - @override - def predict(self, data: ForecastInputDataset) -> ForecastDataset: - if not self.is_fitted: - raise NotFittedError(self.__class__.__name__) - - input_data: pd.DataFrame = data.input_data(start=data.forecast_start) - prediction: npt.NDArray[np.floating] = self._lgblinear_model.predict(X=input_data) - - return ForecastDataset( - data=pd.DataFrame( - data=prediction, - index=input_data.index, - columns=[quantile.format() for quantile in self.config.quantiles], - ), - sample_interval=data.sample_interval, - ) - - @property - @override - def feature_importances(self) -> pd.DataFrame: - models = self._lgblinear_model._models # noqa: SLF001 - weights_df = pd.DataFrame( - [models[i].feature_importances_ for i in range(len(models))], - index=[quantile.format() for quantile in self.config.quantiles], - columns=models[0].feature_name_, - ).transpose() - - weights_df.index.name = "feature_name" - weights_df.columns.name = "quantiles" - - weights_abs = weights_df.abs() - total = weights_abs.sum(axis=0).replace(to_replace=0, value=1.0) # pyright: ignore[reportUnknownMemberType] - - return weights_abs / total - - -__all__ = ["LgbLinearForecaster", "LgbLinearForecasterConfig", "LgbLinearHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index 0c4442328..50dd9f50b 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -11,6 +11,7 @@ from typing import TYPE_CHECKING, Literal, override +import numpy as np import pandas as pd from pydantic import Field @@ -24,7 +25,6 @@ from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig if TYPE_CHECKING: - import numpy as np import numpy.typing as npt From 34fc3e5af68b89eec8ec39c168bbbd773c6e3cfd Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Wed, 12 Nov 2025 11:41:01 +0100 Subject: [PATCH 010/104] Added SkopsModelSerializer --- .../integrations/skops/__init__.py | 15 +++ .../skops/skops_model_serializer.py | 105 ++++++++++++++++++ .../mixins/model_serializer.py | 3 +- .../presets/forecasting_workflow.py | 2 +- .../tests/unit/integrations/skops/__init__.py | 5 + .../skops/test_skops_model_serializer.py | 72 ++++++++++++ 6 files changed, 199 insertions(+), 3 deletions(-) create mode 100644 packages/openstef-models/src/openstef_models/integrations/skops/__init__.py create mode 100644 packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py create mode 100644 packages/openstef-models/tests/unit/integrations/skops/__init__.py create mode 100644 packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py diff --git a/packages/openstef-models/src/openstef_models/integrations/skops/__init__.py b/packages/openstef-models/src/openstef_models/integrations/skops/__init__.py new file mode 100644 index 000000000..16fcbd789 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/integrations/skops/__init__.py @@ -0,0 +1,15 @@ +"""Joblib-based model storage integration. + +Provides local file-based model persistence using Skops for serialization. +This integration provides a safe way for storing and loading ForecastingModel instances on +the local filesystem, making it suitable for development, testing, and +single-machine deployments. +""" + +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from .skops_model_serializer import SkopsModelSerializer + +__all__ = ["SkopsModelSerializer"] diff --git a/packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py b/packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py new file mode 100644 index 000000000..6296d3abb --- /dev/null +++ b/packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py @@ -0,0 +1,105 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Local model storage implementation using joblib serialization. + +Provides file-based persistence for ForecastingModel instances using joblib's +pickle-based serialization. This storage backend is suitable for development, +testing, and single-machine deployments where models need to be persisted +to the local filesystem. +""" + +from typing import BinaryIO, ClassVar, override + +from openstef_core.exceptions import MissingExtraError +from openstef_models.mixins.model_serializer import ModelSerializer + +try: + from skops.io import dump, get_untrusted_types, load +except ImportError as e: + raise MissingExtraError("joblib", package="openstef-models") from e + + +class SkopsModelSerializer(ModelSerializer): + """File-based model storage using joblib serialization. + + Provides persistent storage for ForecastingModel instances on the local + filesystem. Models are serialized using joblib and stored as pickle files + in the specified directory. + + This storage implementation is suitable for development, testing, and + single-machine deployments where simple file-based persistence is sufficient. + + Note: + joblib.dump() and joblib.load() are based on the Python pickle serialization model, + which means that arbitrary Python code can be executed when loading a serialized object + with joblib.load(). + + joblib.load() should therefore never be used to load objects from an untrusted source + or otherwise you will introduce a security vulnerability in your program. + + Invariants: + - Models are stored as .pkl files in the configured storage directory + - Model files use the pattern: {model_id}.pkl + - Storage directory is created automatically if it doesn't exist + - Load operations fail with ModelNotFoundError if model file doesn't exist + + Example: + Basic usage with model persistence: + + >>> from pathlib import Path + >>> from openstef_models.models.forecasting_model import ForecastingModel + >>> storage = LocalModelStorage(storage_dir=Path("./models")) # doctest: +SKIP + >>> storage.save_model("my_model", my_forecasting_model) # doctest: +SKIP + >>> loaded_model = storage.load_model("my_model") # doctest: +SKIP + """ + + extension: ClassVar[str] = ".skops" + + @override + def serialize(self, model: object, file: BinaryIO) -> None: + dump(model, file) # type: ignore[reportUnknownMemberType] + + @staticmethod + def _get_stateful_types() -> set[str]: + return { + "tests.unit.integrations.skops.test_skops_model_serializer.SimpleSerializableModel", + "openstef_core.mixins.predictor.BatchPredictor", + "openstef_models.models.forecasting.forecaster.Forecaster", + "openstef_models.models.forecasting.xgboost_forecaster.XGBoostForecaster", + "openstef_models.models.component_splitting_model.ComponentSplittingModel", + "openstef_core.mixins.transform.TransformPipeline", + "openstef_core.mixins.transform.TransformPipeline[EnergyComponentDataset]", + "openstef_core.mixins.transform.TransformPipeline[TimeSeriesDataset]", + "openstef_models.models.forecasting.lgbm_forecaster.LGBMForecaster", + "openstef_models.models.component_splitting.component_splitter.ComponentSplitter", + "openstef_models.models.forecasting_model.ForecastingModel", + "openstef_core.mixins.transform.Transform", + "openstef_core.mixins.transform.TransformPipeline[ForecastDataset]", + "openstef_core.mixins.predictor.Predictor", + "openstef_models.models.forecasting.lgbmlinear_forecaster.LGBMLinearForecaster", + } + + @override + def deserialize(self, file: BinaryIO) -> object: + """Load a model's state from a binary file and restore it. + + Returns: + The restored model instance. + + Raises: + ValueError: If no safe types are found in the serialized model. + """ + safe_types = self._get_stateful_types() + + # Weak security measure that checks a safe class is present. + # Can be improved to ensure no unsafe classes are present. + model_types: set[str] = set(get_untrusted_types(file=file)) # type: ignore + + if len(safe_types.intersection(model_types)) == 0: + raise ValueError("Deserialization aborted: No safe types found in the serialized model.") + + return load(file, trusted=list(model_types)) # type: ignore[reportUnknownMemberType] + + +__all__ = ["SkopsModelSerializer"] diff --git a/packages/openstef-models/src/openstef_models/mixins/model_serializer.py b/packages/openstef-models/src/openstef_models/mixins/model_serializer.py index 1167a9930..40e74a52a 100644 --- a/packages/openstef-models/src/openstef_models/mixins/model_serializer.py +++ b/packages/openstef-models/src/openstef_models/mixins/model_serializer.py @@ -34,6 +34,7 @@ class ModelSerializer(BaseConfig, ABC): See Also: JoblibModelSerializer: Concrete implementation using joblib. + SkopsModelSerializer: Concrete implementation using skops. """ extension: ClassVar[str] @@ -69,6 +70,4 @@ def deserialize(self, file: BinaryIO) -> object: """ -# TODO @egordm, @MvLieshout : Add SkopsModelSerializer implementation - __all__ = ["ModelIdentifier", "ModelSerializer"] diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 1f5fdaba0..999ed701f 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -31,8 +31,8 @@ from openstef_models.models.forecasting.flatliner_forecaster import FlatlinerForecaster from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster from openstef_models.models.forecasting.hybrid_forecaster import HybridForecaster -from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster +from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster from openstef_models.transforms.energy_domain import WindPowerFeatureAdder from openstef_models.transforms.general import ( diff --git a/packages/openstef-models/tests/unit/integrations/skops/__init__.py b/packages/openstef-models/tests/unit/integrations/skops/__init__.py new file mode 100644 index 000000000..63d543f53 --- /dev/null +++ b/packages/openstef-models/tests/unit/integrations/skops/__init__.py @@ -0,0 +1,5 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +__all__ = [] diff --git a/packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py b/packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py new file mode 100644 index 000000000..8d4bb9eb7 --- /dev/null +++ b/packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py @@ -0,0 +1,72 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from __future__ import annotations + +from io import BytesIO +from typing import TYPE_CHECKING + +import pytest + +from openstef_core.mixins import Stateful +from openstef_core.types import LeadTime, Q +from openstef_models.integrations.skops.skops_model_serializer import SkopsModelSerializer +from openstef_models.models.forecasting.forecaster import ForecasterConfig +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster +from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster +from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster + +if TYPE_CHECKING: + from openstef_models.models.forecasting.forecaster import Forecaster + + +class SimpleSerializableModel(Stateful): + """A simple model class that can be pickled for testing.""" + + def __init__(self) -> None: + self.target_column = "load" + self.is_fitted = True + + +def test_skops_model_serializer__roundtrip__preserves_model_integrity(): + """Test complete serialize/deserialize roundtrip preserves model state.""" + # Arrange + buffer = BytesIO() + serializer = SkopsModelSerializer() + model = SimpleSerializableModel() + + # Act - Serialize then deserialize + serializer.serialize(model, buffer) + buffer.seek(0) + restored_model = serializer.deserialize(buffer) + + # Assert - Model state should be identical + assert isinstance(restored_model, SimpleSerializableModel) + assert restored_model.target_column == model.target_column + assert restored_model.is_fitted == model.is_fitted + + +@pytest.mark.parametrize( + "forecaster_class", + [ + XGBoostForecaster, + LGBMForecaster, + LGBMLinearForecaster, + ], +) +def test_skops_works_with_different_forecasters(forecaster_class: type[Forecaster]): + buffer = BytesIO() + serializer = SkopsModelSerializer() + + config: ForecasterConfig = forecaster_class.Config(horizons=[LeadTime.from_string("PT12H")], quantiles=[Q(0.5)]) # type: ignore + assert isinstance(config, ForecasterConfig) + forecaster = forecaster_class(config=config) + + # Act - Serialize then deserialize + serializer.serialize(forecaster, buffer) + buffer.seek(0) + restored_model = serializer.deserialize(buffer) + + # Assert - Model state should be identical + assert isinstance(restored_model, forecaster.__class__) From bad4c449bc0a9180d2c6fca1d9e30bb9753f534f Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Wed, 12 Nov 2025 12:32:01 +0100 Subject: [PATCH 011/104] Fixed issues --- .../src/openstef_models/models/forecasting/hybrid_forecaster.py | 2 +- .../tests/unit/models/forecasting/test_lgbm_forecaster.py | 2 +- .../tests/unit/models/forecasting/test_lgbmlinear_forecaster.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index 2e9ff448b..a3f2849a3 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -152,7 +152,7 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: sample_interval=data.sample_interval, ) - # TODO(@MvLieshout, @Lars800): Make forecaster Explainable + # TODO(@Lars800): #745: Make forecaster Explainable __all__ = ["HybridForecaster", "HybridForecasterConfig", "HybridHyperParams"] diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py index 47bed1774..b4fe1c989 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py @@ -146,4 +146,4 @@ def test_lgbm_forecaster__feature_importances( assert (feature_importances >= 0).all().all() -# TODO : Add tests on different loss functions @MvLieshout +# TODO(@MvLieshout): Add tests on different loss functions # noqa: TD003 diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py index 1a2ce31cf..cc4b4701e 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py @@ -146,4 +146,4 @@ def test_lgbmlinear_forecaster__feature_importances( assert (feature_importances >= 0).all().all() -# TODO : Add tests on different loss functions @MvLieshout +# TODO(@MvLieshout): Add tests on different loss functions # noqa: TD003 From 99c9bc5e8e3dbed522ae7874c1bc78d14ea1c202 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 13 Nov 2025 12:00:44 +0100 Subject: [PATCH 012/104] Gitignore optimization and dev sandbox --- .gitignore | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index b181a07c5..81c981c66 100644 --- a/.gitignore +++ b/.gitignore @@ -123,5 +123,7 @@ certificates/ *.html *.pkl -# Benchmark outputs -benchmark_results/ \ No newline at end of file +# Experiment outputs +benchmark_results/ +optimization_results/ +dev_sandbox/ \ No newline at end of file From 4027de7fd0d0834da2b636ea1866cdaec3995857 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 13 Nov 2025 14:48:53 +0100 Subject: [PATCH 013/104] Added MultiQuantileAdapter Class --- .../utils/multi_quantile_regressor.py | 119 ++++++++++++++++++ .../tests/unit/estimators/test_lgbm.py | 42 ------- .../utils/test_multi_quantile_regressor.py | 107 ++++++++++++++++ 3 files changed, 226 insertions(+), 42 deletions(-) create mode 100644 packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py delete mode 100644 packages/openstef-models/tests/unit/estimators/test_lgbm.py create mode 100644 packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py diff --git a/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py new file mode 100644 index 000000000..d48e01b1e --- /dev/null +++ b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py @@ -0,0 +1,119 @@ +import logging + +import numpy as np +import numpy.typing as npt +import pandas as pd +from sklearn.base import BaseEstimator, RegressorMixin + +logger = logging.getLogger(__name__) + +ParamType = float | int | str | bool | None + + +class MultiQuantileRegressor(BaseEstimator, RegressorMixin): + """Adaptor for multi-quantile regression using a base quantile regressor. + + This class creates separate instances of a given quantile regressor for each quantile + and manages their training and prediction. + """ + + def __init__( + self, + base_learner: type[BaseEstimator], + quantile_param: str, + quantiles: list[float], + hyperparams: dict[str, ParamType], + ): + """Initialize MultiQuantileRegressor. + + This is an adaptor that allows any quantile-capable regressor to predict multiple quantiles + by instantiating separate models for each quantile. + + Args: + base_learner: A scikit-learn compatible regressor class that supports quantile regression. + quantile_param: The name of the parameter in base_learner that sets the quantile level. + quantiles: List of quantiles to predict (e.g., [0.1, 0.5, 0.9]). + hyperparams: Dictionary of hyperparameters to pass to each base learner instance. + """ + self.quantiles = quantiles + self.hyperparams = hyperparams + self.quantile_param = quantile_param + self.base_learner = base_learner + self.is_fitted = False + self._models = [self._init_model(q) for q in quantiles] + + def _init_model(self, q: float) -> BaseEstimator: + params = self.hyperparams.copy() + params[self.quantile_param] = q + base_learner = self.base_learner(**params) + + try: + q == base_learner.get_params()[self.quantile_param] # type: ignore + except AttributeError as e: + msg = f"The base learner does not support the quantile parameter '{self.quantile_param}'." + raise ValueError(msg) from e + + return base_learner + + def fit( + self, + X: npt.NDArray[np.floating] | pd.DataFrame, + y: npt.NDArray[np.floating] | pd.Series, + sample_weight: npt.NDArray[np.floating] | pd.Series | None = None, + feature_name: list[str] | None = None, + eval_set: list[tuple[pd.DataFrame, npt.NDArray[np.floating]]] | None = None, + eval_sample_weight: list[npt.NDArray[np.floating]] | list[pd.Series] | None = None, + ) -> None: + """Fit the multi-quantile regressor. + + Args: + X: Input features as a DataFrame. + y: Target values as a 2D array where each column corresponds to a quantile. + sample_weight: Sample weights for training data. + feature_name: List of feature names. + eval_set: Evaluation set for early stopping. + eval_sample_weight: Sample weights for evaluation data. + """ + for model in self._models: + if eval_set is None and "early_stopping_rounds" in self.hyperparams: + model.set_params(early_stopping_rounds=None) # type: ignore + elif "early_stopping_rounds" in self.hyperparams: + model.set_params(early_stopping_rounds=self.hyperparams.early_stopping_rounds) # type: ignore + + if eval_set or eval_sample_weight: + logger.warning( + "Evaluation sets or sample weights provided, but MultiQuantileRegressor does not currently support " + "these during fitting." + ) + + if feature_name: + logger.warning( + "Feature names provided, but MultiQuantileRegressor does not currently support feature names during fitting." + ) + model.fit( # type: ignore + X=np.asarray(X), + y=y, + sample_weight=sample_weight, + ) + self.is_fitted = True + + def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: + """Predict quantiles for the input features. + + Args: + X: Input features as a DataFrame. + + Returns: + + A 2D array where each column corresponds to predicted quantiles. + """ # noqa: D412 + return np.column_stack([model.predict(X=X) for model in self._models]) # type: ignore + + @property + def models(self) -> list[BaseEstimator]: + """Get the list of underlying quantile models. + + Returns: + List of BaseEstimator instances for each quantile. + """ + return self._models diff --git a/packages/openstef-models/tests/unit/estimators/test_lgbm.py b/packages/openstef-models/tests/unit/estimators/test_lgbm.py deleted file mode 100644 index 5dfa0bb5b..000000000 --- a/packages/openstef-models/tests/unit/estimators/test_lgbm.py +++ /dev/null @@ -1,42 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -import pandas as pd -import pytest -from numpy.random import default_rng - -from openstef_models.estimators.lgbm import LGBMQuantileRegressor - - -@pytest.fixture -def dataset() -> tuple[pd.DataFrame, pd.Series]: - n_samples = 100 - n_features = 5 - rng = default_rng() - X = pd.DataFrame(rng.random((n_samples, n_features))) - y = pd.Series(rng.random(n_samples)) - return X, y - - -def test_init_sets_quantiles_and_models(): - quantiles = [0.1, 0.5, 0.9] - model = LGBMQuantileRegressor(quantiles=quantiles, linear_tree=False) - assert model.quantiles == quantiles - assert len(model._models) == len(quantiles) - - -def test_fit_and_predict_shape(dataset: tuple[pd.DataFrame, pd.Series]): - quantiles = [0.1, 0.5, 0.9] - X, y = dataset[0], dataset[1] - model = LGBMQuantileRegressor(quantiles=quantiles, linear_tree=False, n_estimators=5) - model.fit(X, y) - preds = model.predict(X) - assert preds.shape == (X.shape[0], len(quantiles)) - - -def test_sklearn_is_fitted_true_after_fit(dataset: tuple[pd.DataFrame, pd.Series]): - quantiles = [0.1, 0.5, 0.9] - X, y = dataset[0], dataset[1] - model = LGBMQuantileRegressor(quantiles=quantiles, linear_tree=False, n_estimators=2) - model.fit(X, y) - assert model.__sklearn_is_fitted__() diff --git a/packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py b/packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py new file mode 100644 index 000000000..d2e8ad7be --- /dev/null +++ b/packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py @@ -0,0 +1,107 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +import pandas as pd +import pytest +from lightgbm import LGBMRegressor +from numpy.random import default_rng +from pydantic import BaseModel +from sklearn.base import BaseEstimator +from sklearn.linear_model import QuantileRegressor +from xgboost import XGBRegressor + +from openstef_models.utils.multi_quantile_regressor import MultiQuantileRegressor, ParamType + +ParamDict = dict[str, ParamType] +BaseLearner = BaseEstimator + + +class BaseLearnerConfig(BaseModel): + base_learner: type[BaseLearner] + quantile_param: str + hyperparams: ParamDict + + +@pytest.fixture +def dataset() -> tuple[pd.DataFrame, pd.Series]: + n_samples = 100 + n_features = 5 + rng = default_rng() + X = pd.DataFrame(rng.random((n_samples, n_features))) + y = pd.Series(rng.random(n_samples)) + return X, y + + +@pytest.fixture(params=["sklearn_quantile", "lgbm", "xgboost"]) +def baselearner_config(request: pytest.FixtureRequest) -> BaseLearnerConfig: # type : ignore + model: str = request.param + if model == "sklearn_quantile": + return BaseLearnerConfig( + base_learner=QuantileRegressor, + quantile_param="quantile", + hyperparams={"alpha": 0.1, "solver": "highs", "fit_intercept": True}, + ) + if model == "lgbm": + return BaseLearnerConfig( + base_learner=LGBMRegressor, # type: ignore + quantile_param="alpha", + hyperparams={ + "objective": "quantile", + "n_estimators": 10, + "learning_rate": 0.1, + "max_depth": -1, + }, + ) + return BaseLearnerConfig( + base_learner=XGBRegressor, + quantile_param="quantile_alpha", + hyperparams={ + "objective": "reg:quantileerror", + "n_estimators": 10, + "learning_rate": 0.1, + "max_depth": 3, + }, + ) + + +def test_init_sets_quantiles_and_models(baselearner_config: BaseLearnerConfig): + quantiles = [0.1, 0.5, 0.9] + + model = MultiQuantileRegressor( + base_learner=baselearner_config.base_learner, + quantile_param=baselearner_config.quantile_param, + quantiles=quantiles, + hyperparams=baselearner_config.hyperparams, + ) + + assert model.quantiles == quantiles + assert len(model._models) == len(quantiles) + + +def test_fit_and_predict_shape(dataset: tuple[pd.DataFrame, pd.Series], baselearner_config: BaseLearnerConfig): + quantiles = [0.1, 0.5, 0.9] + + X, y = dataset[0], dataset[1] + model = MultiQuantileRegressor( + base_learner=baselearner_config.base_learner, + quantile_param=baselearner_config.quantile_param, + quantiles=quantiles, + hyperparams=baselearner_config.hyperparams, + ) + + model.fit(X, y) + preds = model.predict(X) + assert preds.shape == (X.shape[0], len(quantiles)) + + +def test_is_fitted_true_after_fit(dataset: tuple[pd.DataFrame, pd.Series], baselearner_config: BaseLearnerConfig): + quantiles = [0.1, 0.5, 0.9] + X, y = dataset[0], dataset[1] + model = MultiQuantileRegressor( + base_learner=baselearner_config.base_learner, + quantile_param=baselearner_config.quantile_param, + quantiles=quantiles, + hyperparams=baselearner_config.hyperparams, + ) + model.fit(X, y) + assert model.is_fitted From 064a92db8dd5910615149b5e2e750cf09c1104ad Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 13 Nov 2025 14:50:49 +0100 Subject: [PATCH 014/104] small fix --- .../src/openstef_models/utils/multi_quantile_regressor.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py index d48e01b1e..e96a92993 100644 --- a/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py +++ b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py @@ -47,11 +47,9 @@ def _init_model(self, q: float) -> BaseEstimator: params[self.quantile_param] = q base_learner = self.base_learner(**params) - try: - q == base_learner.get_params()[self.quantile_param] # type: ignore - except AttributeError as e: + if self.quantile_param not in base_learner.get_params(): # type: ignore msg = f"The base learner does not support the quantile parameter '{self.quantile_param}'." - raise ValueError(msg) from e + raise ValueError(msg) return base_learner From ed83b3a9d5bce96b8e04d6c00d5c910af5b9bef7 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 17 Nov 2025 15:43:36 +0100 Subject: [PATCH 015/104] Hybrid V2 --- .../openstef_models/estimators/__init__.py | 4 +- .../src/openstef_models/estimators/lgbm.py | 168 ----------- .../models/forecasting/forecaster.py | 9 + .../models/forecasting/gblinear_forecaster.py | 17 ++ .../models/forecasting/hybrid_forecaster.py | 273 +++++++++++++----- .../models/forecasting/lgbm_forecaster.py | 48 ++- .../forecasting/lgbmlinear_forecaster.py | 85 ++++-- .../models/forecasting/xgboost_forecaster.py | 17 ++ .../utils/multi_quantile_regressor.py | 52 +++- .../forecasting/test_hybrid_forecaster.py | 48 +-- 10 files changed, 393 insertions(+), 328 deletions(-) delete mode 100644 packages/openstef-models/src/openstef_models/estimators/lgbm.py diff --git a/packages/openstef-models/src/openstef_models/estimators/__init__.py b/packages/openstef-models/src/openstef_models/estimators/__init__.py index 2b2e5ebb4..07a4cbc99 100644 --- a/packages/openstef-models/src/openstef_models/estimators/__init__.py +++ b/packages/openstef-models/src/openstef_models/estimators/__init__.py @@ -4,6 +4,4 @@ """Custom estimators for multi quantiles.""" -from .lgbm import LGBMQuantileRegressor - -__all__ = ["LGBMQuantileRegressor"] +__all__ = [] diff --git a/packages/openstef-models/src/openstef_models/estimators/lgbm.py b/packages/openstef-models/src/openstef_models/estimators/lgbm.py deleted file mode 100644 index 666f5d8cf..000000000 --- a/packages/openstef-models/src/openstef_models/estimators/lgbm.py +++ /dev/null @@ -1,168 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""Custom LightGBM regressor for multi-quantile regression. - -This module provides the LGBMQuantileRegressor class, which extends LightGBM's LGBMRegressor -to support multi-quantile output by configuring the objective function accordingly. Each quantile is predicted -by a separate tree within the same boosting ensemble. The module also includes serialization utilities. -""" - -import numpy as np -import numpy.typing as npt -import pandas as pd -from lightgbm import LGBMRegressor -from sklearn.base import BaseEstimator, RegressorMixin - - -class LGBMQuantileRegressor(BaseEstimator, RegressorMixin): - """Custom LightGBM regressor for multi-quantile regression. - - Extends LGBMRegressor to support multi-quantile output by configuring - the objective function accordingly. Each quantile is predicted by a - separate tree within the same boosting ensemble. - """ - - def __init__( # noqa: PLR0913, PLR0917 - self, - quantiles: list[float], - linear_tree: bool, # noqa: FBT001 - n_estimators: int = 100, - learning_rate: float = 0.1, - max_depth: int = -1, - min_child_weight: float = 1.0, - min_data_in_leaf: int = 20, - min_data_in_bin: int = 10, - reg_alpha: float = 0.0, - reg_lambda: float = 0.0, - num_leaves: int = 31, - max_bin: int = 255, - colsample_bytree: float = 1.0, - random_state: int | None = None, - early_stopping_rounds: int | None = None, - verbosity: int = -1, - ) -> None: - """Initialize LgbLinearQuantileRegressor with quantiles. - - Args: - quantiles: List of quantiles to predict (e.g., [0.1, 0.5, 0.9]). - linear_tree: Whether to use linear trees. - n_estimators: Number of boosting rounds/trees to fit. - learning_rate: Step size shrinkage used to prevent overfitting. - max_depth: Maximum depth of trees. - min_child_weight: Minimum sum of instance weight (hessian) needed in a child. - min_data_in_leaf: Minimum number of data points in a leaf. - min_data_in_bin: Minimum number of data points in a bin. - reg_alpha: L1 regularization on leaf weights. - reg_lambda: L2 regularization on leaf weights. - num_leaves: Maximum number of leaves. - max_bin: Maximum number of discrete bins for continuous features. - colsample_bytree: Fraction of features used when constructing each tree. - random_state: Random seed for reproducibility. - early_stopping_rounds: Training will stop if performance doesn't improve for this many rounds. - verbosity: Verbosity level for LgbLinear training. - - """ - self.quantiles = quantiles - self.linear_tree = linear_tree - self.n_estimators = n_estimators - self.learning_rate = learning_rate - self.max_depth = max_depth - self.min_child_weight = min_child_weight - self.min_data_in_leaf = min_data_in_leaf - self.min_data_in_bin = min_data_in_bin - self.reg_alpha = reg_alpha - self.reg_lambda = reg_lambda - self.num_leaves = num_leaves - self.max_bin = max_bin - self.colsample_bytree = colsample_bytree - self.random_state = random_state - self.early_stopping_rounds = early_stopping_rounds - self.verbosity = verbosity - - self._models: list[LGBMRegressor] = [ - LGBMRegressor( - objective="quantile", - alpha=q, - n_estimators=n_estimators, - learning_rate=learning_rate, - max_depth=max_depth, - min_child_weight=min_child_weight, - min_data_in_leaf=min_data_in_leaf, - min_data_in_bin=min_data_in_bin, - reg_alpha=reg_alpha, - reg_lambda=reg_lambda, - num_leaves=num_leaves, - max_bin=max_bin, - colsample_bytree=colsample_bytree, - random_state=random_state, - early_stopping_rounds=early_stopping_rounds, - verbosity=verbosity, - linear_tree=linear_tree, - ) - for q in quantiles # type: ignore - ] - - def fit( - self, - X: npt.NDArray[np.floating] | pd.DataFrame, - y: npt.NDArray[np.floating] | pd.Series, - sample_weight: npt.NDArray[np.floating] | pd.Series | None = None, - feature_name: list[str] | None = None, - eval_set: list[tuple[pd.DataFrame, npt.NDArray[np.floating]]] | None = None, - eval_sample_weight: list[npt.NDArray[np.floating]] | list[pd.Series] | None = None, - ) -> None: - """Fit the multi-quantile regressor. - - Args: - X: Input features as a DataFrame. - y: Target values as a 2D array where each column corresponds to a quantile. - sample_weight: Sample weights for training data. - feature_name: List of feature names. - eval_set: Evaluation set for early stopping. - eval_sample_weight: Sample weights for evaluation data. - """ - for model in self._models: - if eval_set is None: - model.set_params(early_stopping_rounds=None) - else: - model.set_params(early_stopping_rounds=self.early_stopping_rounds) - model.fit( # type: ignore - X=np.asarray(X), - y=y, - eval_metric="quantile", - sample_weight=sample_weight, - eval_set=eval_set, - eval_sample_weight=eval_sample_weight, - feature_name=feature_name if feature_name is not None else "auto", - ) - - def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: - """Predict quantiles for the input features. - - Args: - X: Input features as a DataFrame. - - Returns: - - A 2D array where each column corresponds to predicted quantiles. - """ # noqa: D412 - return np.column_stack([model.predict(X=X) for model in self._models]) # type: ignore - - def __sklearn_is_fitted__(self) -> bool: # noqa: PLW3201 - """Check if all models are fitted. - - Returns: - True if all quantile models are fitted, False otherwise. - """ - return all(model.__sklearn_is_fitted__() for model in self._models) - - @property - def models(self) -> list[LGBMRegressor]: - """Get the list of underlying quantile models. - - Returns: - List of LGBMRegressor instances for each quantile. - """ - return self._models diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py index d77e50fda..9628c61e3 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py @@ -111,6 +111,15 @@ def with_horizon(self, horizon: LeadTime) -> Self: """ return self.model_copy(update={"horizons": [horizon]}) + @classmethod + def forecaster_class(cls) -> type["Forecaster"]: + """Get the associated Forecaster class for this configuration. + + Returns: + The Forecaster class that uses this configuration. + """ + raise NotImplementedError("Subclasses must implement forecaster_class") + class ConfigurableForecaster: @property diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py index 974e9a077..0bc4a1c2f 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py @@ -83,6 +83,15 @@ class GBLinearHyperParams(HyperParams): description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", ) + @classmethod + def forecaster_class(cls) -> "type[GBLinearForecaster]": + """Forecaster class for these hyperparams. + + Returns: + Forecaster class associated with this configuration. + """ + return GBLinearForecaster + class GBLinearForecasterConfig(ForecasterConfig): """Configuration for GBLinear forecaster.""" @@ -107,6 +116,14 @@ class GBLinearForecasterConfig(ForecasterConfig): default=1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" ) + def forecaster_from_config(self) -> "GBLinearForecaster": + """Create a GBLinearForecaster instance from this configuration. + + Returns: + Forecaster instance associated with this configuration. + """ + return GBLinearForecaster(config=self) + MODEL_CODE_VERSION = 1 diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index a3f2849a3..eba72a66d 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -10,11 +10,11 @@ """ import logging -from typing import TYPE_CHECKING, override +from typing import override -import numpy as np import pandas as pd -from pydantic import Field +from pydantic import Field, field_validator +from sklearn.linear_model import QuantileRegressor from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ( @@ -22,27 +22,59 @@ ) from openstef_core.mixins import HyperParams from openstef_models.estimators.hybrid import HybridQuantileRegressor -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig -from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams -from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +from openstef_models.models.forecasting.forecaster import ( + Forecaster, + ForecasterConfig, +) +from openstef_models.models.forecasting.gblinear_forecaster import ( + GBLinearForecaster, + GBLinearForecasterConfig, + GBLinearHyperParams, +) +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMForecasterConfig, LGBMHyperParams +from openstef_models.models.forecasting.lgbmlinear_forecaster import ( + LGBMLinearForecaster, + LGBMLinearForecasterConfig, + LGBMLinearHyperParams, +) +from openstef_models.models.forecasting.xgboost_forecaster import ( + XGBoostForecaster, + XGBoostForecasterConfig, + XGBoostHyperParams, +) logger = logging.getLogger(__name__) -if TYPE_CHECKING: - import numpy.typing as npt + +BaseLearner = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster +BaseLearnerHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams +BaseLearnerConfig = ( + LGBMForecasterConfig | LGBMLinearForecasterConfig | XGBoostForecasterConfig | GBLinearForecasterConfig +) class HybridHyperParams(HyperParams): """Hyperparameters for Stacked LGBM GBLinear Regressor.""" - lgbm_params: LGBMHyperParams = LGBMHyperParams() - gb_linear_params: GBLinearHyperParams = GBLinearHyperParams() + base_hyperparams: list[BaseLearnerHyperParams] = Field( + default=[LGBMHyperParams(), GBLinearHyperParams()], + description="List of hyperparameter configurations for base learners. " + "Defaults to [LGBMHyperParams, GBLinearHyperParams].", + ) l1_penalty: float = Field( default=0.0, description="L1 regularization term for the quantile regression.", ) + @field_validator("base_hyperparams", mode="after") + @classmethod + def _check_classes(cls, v: list[BaseLearnerHyperParams]) -> list[BaseLearnerHyperParams]: + hp_classes = [type(hp) for hp in v] + if not len(hp_classes) == len(set(hp_classes)): + raise ValueError("Duplicate base learner hyperparameter classes are not allowed.") + return v + class HybridForecasterConfig(ForecasterConfig): """Configuration for Hybrid-based forecasting models.""" @@ -55,9 +87,6 @@ class HybridForecasterConfig(ForecasterConfig): ) -MODEL_CODE_VERSION = 2 - - class HybridForecaster(Forecaster): """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" @@ -71,46 +100,86 @@ def __init__(self, config: HybridForecasterConfig) -> None: """Initialize the Hybrid forecaster.""" self._config = config - self._model = HybridQuantileRegressor( - quantiles=[float(q) for q in config.quantiles], - lgbm_n_estimators=config.hyperparams.lgbm_params.n_estimators, - lgbm_learning_rate=config.hyperparams.lgbm_params.learning_rate, - lgbm_max_depth=config.hyperparams.lgbm_params.max_depth, - lgbm_min_child_weight=config.hyperparams.lgbm_params.min_child_weight, - lgbm_min_data_in_leaf=config.hyperparams.lgbm_params.min_data_in_leaf, - lgbm_min_data_in_bin=config.hyperparams.lgbm_params.min_data_in_bin, - lgbm_reg_alpha=config.hyperparams.lgbm_params.reg_alpha, - lgbm_reg_lambda=config.hyperparams.lgbm_params.reg_lambda, - lgbm_num_leaves=config.hyperparams.lgbm_params.num_leaves, - lgbm_max_bin=config.hyperparams.lgbm_params.max_bin, - lgbm_colsample_by_tree=config.hyperparams.lgbm_params.colsample_bytree, - gblinear_n_steps=config.hyperparams.gb_linear_params.n_steps, - gblinear_learning_rate=config.hyperparams.gb_linear_params.learning_rate, - gblinear_reg_alpha=config.hyperparams.gb_linear_params.reg_alpha, - gblinear_reg_lambda=config.hyperparams.gb_linear_params.reg_lambda, + self._base_learners: list[BaseLearner] = self._init_base_learners( + base_hyperparams=config.hyperparams.base_hyperparams ) + self._final_learner = [ + QuantileRegressor(quantile=float(q), alpha=config.hyperparams.l1_penalty) for q in config.quantiles + ] - @property - @override - def config(self) -> ForecasterConfig: - return self._config + self._is_fitted: bool = False @property + @override def is_fitted(self) -> bool: - """Check if the model is fitted.""" - return self._model.is_fitted + return self._is_fitted + + @staticmethod + def _hyperparams_forecast_map(hyperparams: type[BaseLearnerHyperParams]) -> type[BaseLearner]: + """Map hyperparameters to forecast types. + + Args: + hyperparams: Hyperparameters of the base learner. + + Returns: + Corresponding Forecaster class. + + Raises: + TypeError: If a nested HybridForecaster is attempted. + """ + if isinstance(hyperparams, HybridHyperParams): + raise TypeError("Nested HybridForecaster is not supported.") + + mapping: dict[type[BaseLearnerHyperParams], type[BaseLearner]] = { + LGBMHyperParams: LGBMForecaster, + LGBMLinearHyperParams: LGBMLinearForecaster, + XGBoostHyperParams: XGBoostForecaster, + GBLinearHyperParams: GBLinearForecaster, + } + return mapping[hyperparams] @staticmethod - def _prepare_fit_input(data: ForecastInputDataset) -> tuple[pd.DataFrame, np.ndarray, pd.Series]: - input_data: pd.DataFrame = data.input_data() + def _base_learner_config(base_learner_class: type[BaseLearner]) -> type[BaseLearnerConfig]: + """Extract the configuration from a base learner. - # Scale the target variable - target: np.ndarray = np.asarray(data.target_series.values) + Args: + base_learner_class: The base learner forecaster. - # Prepare sample weights - sample_weight: pd.Series = data.sample_weight_series + Returns: + The configuration of the base learner. + """ + mapping: dict[type[BaseLearner], type[BaseLearnerConfig]] = { + LGBMForecaster: LGBMForecasterConfig, + LGBMLinearForecaster: LGBMLinearForecasterConfig, + XGBoostForecaster: XGBoostForecasterConfig, + GBLinearForecaster: GBLinearForecasterConfig, + } + return mapping[base_learner_class] + + def _init_base_learners(self, base_hyperparams: list[BaseLearnerHyperParams]) -> list[BaseLearner]: + """Initialize base learners based on provided hyperparameters. + + Returns: + list[Forecaster]: List of initialized base learner forecasters. + """ + base_learners: list[BaseLearner] = [] + horizons = self.config.horizons + quantiles = self.config.quantiles - return input_data, target, sample_weight + for hyperparams in base_hyperparams: + forecaster_cls = hyperparams.forecaster_class() + config = forecaster_cls.Config(horizons=horizons, quantiles=quantiles) + if "hyperparams" in forecaster_cls.Config.model_fields: + config = config.model_copy(update={"hyperparams": hyperparams}) + + base_learners.append(config.forecaster_from_config()) + + return base_learners + + @property + @override + def config(self) -> ForecasterConfig: + return self._config @override def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: @@ -121,37 +190,111 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None data_val: Validation data for tuning the model (optional, not used in this implementation). """ - # Prepare training data - input_data, target, sample_weight = self._prepare_fit_input(data) - - if data_val is not None: - logger.warning( - "Validation data provided, but HybridForecaster does not currently support validation during fitting." - ) - - self._model.fit( - X=input_data, - y=target, - sample_weight=sample_weight, + # Fit base learners + [x.fit(data=data, data_val=data_val) for x in self._base_learners] + + full_dataset = ForecastInputDataset( + data=data.data, + sample_interval=data.sample_interval, + target_column=data.target_column, + forecast_start=data.index[0], ) - @override - def predict(self, data: ForecastInputDataset) -> ForecastDataset: - if not self._model.is_fitted: + base_predictions = self._predict_base_learners(data=full_dataset) + + quantile_dataframes = self._prepare_input_final_learner(base_predictions=base_predictions) + + self._fit_final_learner(target=data.target_series, quantile_df=quantile_dataframes) + + self._is_fitted = True + + def _fit_final_learner( + self, + target: pd.Series, + quantile_df: dict[str, pd.DataFrame], + ) -> None: + """Fit the final learner using base learner predictions. + + Args: + target: Target values for training. + quantile_df: Dictionary mapping quantile strings to DataFrames of base learner predictions. + """ + for i, df in enumerate(quantile_df.values()): + self._final_learner[i].fit(X=df, y=target) + + def _predict_base_learners(self, data: ForecastInputDataset) -> dict[str, ForecastDataset]: + """Generate predictions from base learners. + + Args: + data: Input data for prediction. + + Returns: + DataFrame containing base learner predictions. + """ + base_predictions: dict[str, ForecastDataset] = {} + for learner in self._base_learners: + preds = learner.predict(data=data) + base_predictions[learner.__class__.__name__] = preds + + return base_predictions + + def _predict_final_learner( + self, quantile_df: dict[str, pd.DataFrame], data: ForecastInputDataset + ) -> ForecastDataset: + if not self.is_fitted: raise NotFittedError(self.__class__.__name__) - input_data: pd.DataFrame = data.input_data(start=data.forecast_start) - prediction: npt.NDArray[np.floating] = self._model.predict(X=input_data) + # Generate predictions + predictions_dict = [ + pd.Series(self._final_learner[i].predict(X=quantile_df[q_str]), index=quantile_df[q_str].index, name=q_str) + for i, q_str in enumerate(quantile_df.keys()) + ] + + # Construct DataFrame with appropriate quantile columns + predictions = pd.DataFrame( + data=predictions_dict, + ).T return ForecastDataset( - data=pd.DataFrame( - data=prediction, - index=input_data.index, - columns=[quantile.format() for quantile in self.config.quantiles], - ), + data=predictions, sample_interval=data.sample_interval, ) + @staticmethod + def _prepare_input_final_learner(base_predictions: dict[str, ForecastDataset]) -> dict[str, pd.DataFrame]: + """Prepare input data for the final learner based on base learner predictions. + + Args: + base_predictions: Dictionary of base learner predictions. + + Returns: + dictionary mapping quantile strings to DataFrames of base learner predictions. + """ + predictions_quantiles: dict[str, pd.DataFrame] = {} + first_key = next(iter(base_predictions)) + for quantile in base_predictions[first_key].quantiles: + quantile_str = quantile.format() + quantile_preds = pd.DataFrame({ + learner_name: preds.data[quantile_str] for learner_name, preds in base_predictions.items() + }) + predictions_quantiles[quantile_str] = quantile_preds + + return predictions_quantiles + + @override + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + base_predictions = self._predict_base_learners(data=data) + + final_learner_input = self._prepare_input_final_learner(base_predictions=base_predictions) + + return self._predict_final_learner( + quantile_df=final_learner_input, + data=data, + ) + # TODO(@Lars800): #745: Make forecaster Explainable diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index 50dd9f50b..f46009502 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -13,6 +13,7 @@ import numpy as np import pandas as pd +from lightgbm import LGBMRegressor from pydantic import Field from openstef_core.datasets import ForecastDataset, ForecastInputDataset @@ -20,9 +21,9 @@ NotFittedError, ) from openstef_core.mixins import HyperParams -from openstef_models.estimators.lgbm import LGBMQuantileRegressor from openstef_models.explainability.mixins import ExplainableForecaster -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig +from openstef_models.models.forecasting.forecaster import ForecasterConfig, Forecaster +from openstef_models.utils.multi_quantile_regressor import MultiQuantileRegressor if TYPE_CHECKING: import numpy.typing as npt @@ -108,6 +109,15 @@ class LGBMHyperParams(HyperParams): description="Fraction of features used when constructing each tree. Range: (0,1]", ) + @classmethod + def forecaster_class(cls) -> "type[LGBMForecaster]": + """Create a LightGBM forecaster instance from this configuration. + + Returns: + Forecaster class associated with this configuration. + """ + return LGBMForecaster + class LGBMForecasterConfig(ForecasterConfig): """Configuration for LightGBM-based forecaster. @@ -150,6 +160,14 @@ class LGBMForecasterConfig(ForecasterConfig): description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", ) + def forecaster_from_config(self) -> "LGBMForecaster": + """Create a LGBMForecaster instance from this configuration. + + Returns: + Forecaster instance associated with this configuration. + """ + return LGBMForecaster(config=self) + MODEL_CODE_VERSION = 1 @@ -200,7 +218,6 @@ class LGBMForecaster(Forecaster, ExplainableForecaster): HyperParams = LGBMHyperParams _config: LGBMForecasterConfig - _lgbm_model: LGBMQuantileRegressor def __init__(self, config: LGBMForecasterConfig) -> None: """Initialize LightGBM forecaster with configuration. @@ -215,13 +232,20 @@ def __init__(self, config: LGBMForecasterConfig) -> None: """ self._config = config - self._lgbm_model = LGBMQuantileRegressor( - quantiles=[float(q) for q in config.quantiles], - linear_tree=False, - random_state=config.random_state, - early_stopping_rounds=config.early_stopping_rounds, - verbosity=config.verbosity, + lgbm_params = { + "linear_tree": False, + "objective": "quantile", + "random_state": config.random_state, + "early_stopping_rounds": config.early_stopping_rounds, + "verbosity": config.verbosity, **config.hyperparams.model_dump(), + } + + self._lgbm_model: MultiQuantileRegressor = MultiQuantileRegressor( + base_learner=LGBMRegressor, # type: ignore + quantile_param="alpha", + hyperparams=lgbm_params, + quantiles=[float(q) for q in config.quantiles], ) @property @@ -237,7 +261,7 @@ def hyperparams(self) -> LGBMHyperParams: @property @override def is_fitted(self) -> bool: - return self._lgbm_model.__sklearn_is_fitted__() + return self._lgbm_model.is_fitted @staticmethod def _prepare_fit_input(data: ForecastInputDataset) -> tuple[pd.DataFrame, np.ndarray, pd.Series]: @@ -290,11 +314,11 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: @property @override def feature_importances(self) -> pd.DataFrame: - models = self._lgbm_model.models + models: list[LGBMRegressor] = self._lgbm_model.models # type: ignore weights_df = pd.DataFrame( [models[i].feature_importances_ for i in range(len(models))], index=[quantile.format() for quantile in self.config.quantiles], - columns=models[0].feature_name_, + columns=self._lgbm_model.model_feature_names if self._lgbm_model.has_feature_names else None, ).transpose() weights_df.index.name = "feature_name" diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py index 202dd1e4e..57a9d96f8 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -9,11 +9,11 @@ comprehensive hyperparameter control for production forecasting workflows. """ -from typing import Literal, cast, override +from typing import TYPE_CHECKING, Literal, override import numpy as np -import numpy.typing as npt import pandas as pd +from lightgbm import LGBMRegressor from pydantic import Field from openstef_core.datasets import ForecastDataset, ForecastInputDataset @@ -21,9 +21,12 @@ NotFittedError, ) from openstef_core.mixins import HyperParams -from openstef_models.estimators.lgbm import LGBMQuantileRegressor from openstef_models.explainability.mixins import ExplainableForecaster from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig +from openstef_models.utils.multi_quantile_regressor import MultiQuantileRegressor + +if TYPE_CHECKING: + import numpy.typing as npt class LGBMLinearHyperParams(HyperParams): @@ -107,6 +110,15 @@ class LGBMLinearHyperParams(HyperParams): description="Fraction of features used when constructing each tree. Range: (0,1]", ) + @classmethod + def forecaster_class(cls) -> "type[LGBMLinearForecaster]": + """Get forecaster class for these hyperparams. + + Returns: + Forecaster class associated with this configuration. + """ + return LGBMLinearForecaster + class LGBMLinearForecasterConfig(ForecasterConfig): """Configuration for LgbLinear-based forecaster. @@ -150,6 +162,14 @@ class LGBMLinearForecasterConfig(ForecasterConfig): description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", ) + def forecaster_from_config(self) -> "LGBMLinearForecaster": + """Create a LGBMLinearForecaster instance from this configuration. + + Returns: + Forecaster instance associated with this configuration. + """ + return LGBMLinearForecaster(config=self) + MODEL_CODE_VERSION = 1 @@ -200,7 +220,6 @@ class LGBMLinearForecaster(Forecaster, ExplainableForecaster): HyperParams = LGBMLinearHyperParams _config: LGBMLinearForecasterConfig - _lgbmlinear_model: LGBMQuantileRegressor def __init__(self, config: LGBMLinearForecasterConfig) -> None: """Initialize LgbLinear forecaster with configuration. @@ -215,13 +234,20 @@ def __init__(self, config: LGBMLinearForecasterConfig) -> None: """ self._config = config - self._lgbmlinear_model = LGBMQuantileRegressor( - quantiles=[float(q) for q in config.quantiles], - linear_tree=True, - random_state=config.random_state, - early_stopping_rounds=config.early_stopping_rounds, - verbosity=config.verbosity, + lgbmlinear_params = { + "linear_tree": True, + "objective": "quantile", + "random_state": config.random_state, + "early_stopping_rounds": config.early_stopping_rounds, + "verbosity": config.verbosity, **config.hyperparams.model_dump(), + } + + self._lgbmlinear_model: MultiQuantileRegressor = MultiQuantileRegressor( + base_learner=LGBMRegressor, # type: ignore + quantile_param="alpha", + hyperparams=lgbmlinear_params, + quantiles=[float(q) for q in config.quantiles], ) @property @@ -237,32 +263,37 @@ def hyperparams(self) -> LGBMLinearHyperParams: @property @override def is_fitted(self) -> bool: - return self._lgbmlinear_model.__sklearn_is_fitted__() + return self._lgbmlinear_model.is_fitted + + @staticmethod + def _prepare_fit_input(data: ForecastInputDataset) -> tuple[pd.DataFrame, np.ndarray, pd.Series]: + input_data: pd.DataFrame = data.input_data() + target: np.ndarray = np.asarray(data.target_series.values) + sample_weight: pd.Series = data.sample_weight_series + + return input_data, target, sample_weight @override def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: - input_data: pd.DataFrame = data.input_data() - target: npt.NDArray[np.floating] = data.target_series.to_numpy() # type: ignore - sample_weight = data.sample_weight_series + # Prepare training data + input_data, target, sample_weight = self._prepare_fit_input(data) - # Prepare validation data if provided - eval_set = None - eval_sample_weight = None - if data_val is not None: - val_input_data: pd.DataFrame = data_val.input_data() - val_target: npt.NDArray[np.floating] = data_val.target_series.to_numpy() # type: ignore - val_sample_weight = cast(npt.NDArray[np.floating], data_val.sample_weight_series.to_numpy()) # type: ignore - eval_set = [(val_input_data, val_target)] + # Evaluation sets + eval_set = [(input_data, target)] + sample_weight_eval_set = [sample_weight] - eval_sample_weight = [val_sample_weight] + if data_val is not None: + input_data_val, target_val, sample_weight_val = self._prepare_fit_input(data_val) + eval_set.append((input_data_val, target_val)) + sample_weight_eval_set.append(sample_weight_val) - self._lgbmlinear_model.fit( # type: ignore + self._lgbmlinear_model.fit( X=input_data, y=target, feature_name=input_data.columns.tolist(), sample_weight=sample_weight, eval_set=eval_set, - eval_sample_weight=eval_sample_weight, + eval_sample_weight=sample_weight_eval_set, ) @override @@ -287,9 +318,9 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: def feature_importances(self) -> pd.DataFrame: models = self._lgbmlinear_model._models # noqa: SLF001 weights_df = pd.DataFrame( - [models[i].feature_importances_ for i in range(len(models))], + [models[i].feature_importances_ for i in range(len(models))], # type: ignore index=[quantile.format() for quantile in self.config.quantiles], - columns=models[0].feature_name_, + columns=self._lgbmlinear_model.model_feature_names if self._lgbmlinear_model.has_feature_names else None, ).transpose() weights_df.index.name = "feature_name" diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py index f48bef2bb..2a35fb987 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py @@ -160,6 +160,15 @@ class XGBoostHyperParams(HyperParams): description="Whether to apply standard scaling to the target variable before training. Improves convergence.", ) + @classmethod + def forecaster_class(cls) -> "type[XGBoostForecaster]": + """Get the forecaster class for these hyperparams. + + Returns: + Forecaster class associated with this configuration. + """ + return XGBoostForecaster + class XGBoostForecasterConfig(ForecasterConfig): """Configuration for XGBoost-based forecasting models. @@ -196,6 +205,14 @@ class XGBoostForecasterConfig(ForecasterConfig): default=1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" ) + def forecaster_from_config(self) -> "XGBoostForecaster": + """Create a XGBoost forecaster instance from this configuration. + + Returns: + Forecaster instance associated with this configuration. + """ + return XGBoostForecaster(config=self) + MODEL_CODE_VERSION = 1 diff --git a/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py index e96a92993..8a6276927 100644 --- a/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py +++ b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py @@ -72,29 +72,52 @@ def fit( eval_set: Evaluation set for early stopping. eval_sample_weight: Sample weights for evaluation data. """ + # Pass model-specific eval arguments + kwargs = {} for model in self._models: + # Check if early stopping is supported + # Check that eval_set is supported if eval_set is None and "early_stopping_rounds" in self.hyperparams: model.set_params(early_stopping_rounds=None) # type: ignore - elif "early_stopping_rounds" in self.hyperparams: - model.set_params(early_stopping_rounds=self.hyperparams.early_stopping_rounds) # type: ignore - if eval_set or eval_sample_weight: - logger.warning( - "Evaluation sets or sample weights provided, but MultiQuantileRegressor does not currently support " - "these during fitting." - ) + if eval_set is not None and self.learner_eval_sample_weight_param is not None: # type: ignore + kwargs[self.learner_eval_sample_weight_param] = eval_sample_weight + + if "early_stopping_rounds" in self.hyperparams and self.learner_eval_sample_weight_param is not None: + model.set_params(early_stopping_rounds=self.hyperparams["early_stopping_rounds"]) # type: ignore if feature_name: - logger.warning( - "Feature names provided, but MultiQuantileRegressor does not currently support feature names during fitting." - ) + self.model_feature_names = feature_name + else: + self.model_feature_names = [] + + if eval_sample_weight is not None and self.learner_eval_sample_weight_param: + kwargs[self.learner_eval_sample_weight_param] = eval_sample_weight + model.fit( # type: ignore X=np.asarray(X), y=y, sample_weight=sample_weight, + **kwargs, ) + self.is_fitted = True + @property + def learner_eval_sample_weight_param(self) -> str | None: + """Get the name of the sample weight parameter for evaluation sets. + + Returns: + The name of the sample weight parameter if supported, else None. + """ + learner_name: str = self.base_learner.__name__ + params: dict[str, str | None] = { + "QuantileRegressor": None, + "LGBMRegressor": "eval_sample_weight", + "XGBRegressor": "sample_weight_eval_set", + } + return params.get(learner_name) + def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: """Predict quantiles for the input features. @@ -115,3 +138,12 @@ def models(self) -> list[BaseEstimator]: List of BaseEstimator instances for each quantile. """ return self._models + + @property + def has_feature_names(self) -> bool: + """Check if the base learners have feature names. + + Returns: + True if the base learners have feature names, False otherwise. + """ + return len(self.model_feature_names) > 0 diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py index f8251d484..09c69f24f 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py @@ -15,18 +15,13 @@ HybridForecasterConfig, HybridHyperParams, ) -from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams @pytest.fixture def base_config() -> HybridForecasterConfig: """Base configuration for Hybrid forecaster tests.""" - lgbm_params = LGBMHyperParams(n_estimators=10, max_depth=2) - gb_linear_params = GBLinearHyperParams(n_steps=5, learning_rate=0.1, reg_alpha=0.0, reg_lambda=0.0) - params = HybridHyperParams( - lgbm_params=lgbm_params, - gb_linear_params=gb_linear_params, - ) + + params = HybridHyperParams() return HybridForecasterConfig( quantiles=[Q(0.1), Q(0.5), Q(0.9)], horizons=[LeadTime(timedelta(days=1))], @@ -35,7 +30,7 @@ def base_config() -> HybridForecasterConfig: ) -def test_hybrid_forecaster__fit_predict( +def test_hybrid_forecaster_fit_predict( sample_forecast_input_dataset: ForecastInputDataset, base_config: HybridForecasterConfig, ): @@ -62,7 +57,7 @@ def test_hybrid_forecaster__fit_predict( assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" -def test_hybrid_forecaster__predict_not_fitted_raises_error( +def test_hybrid_forecaster_predict_not_fitted_raises_error( sample_forecast_input_dataset: ForecastInputDataset, base_config: HybridForecasterConfig, ): @@ -75,7 +70,7 @@ def test_hybrid_forecaster__predict_not_fitted_raises_error( forecaster.predict(sample_forecast_input_dataset) -def test_hybrid_forecaster__with_sample_weights( +def test_hybrid_forecaster_with_sample_weights( sample_dataset_with_weights: ForecastInputDataset, base_config: HybridForecasterConfig, ): @@ -109,36 +104,3 @@ def test_hybrid_forecaster__with_sample_weights( # (This is a statistical test - with different weights, predictions should differ) differences = (result_with_weights.data - result_without_weights.data).abs() assert differences.sum().sum() > 0, "Sample weights should affect model predictions" - - -@pytest.mark.parametrize("objective", ["pinball_loss", "arctan_loss"]) -def test_hybrid_forecaster__different_objectives( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: HybridForecasterConfig, - objective: str, -): - """Test that forecaster works with different objective functions.""" - # Arrange - config = base_config.model_copy( - update={ - "hyperparams": base_config.hyperparams.model_copy( - update={"objective": objective} # type: ignore[arg-type] - ) - } - ) - forecaster = HybridForecaster(config=config) - - # Act - forecaster.fit(sample_forecast_input_dataset) - result = forecaster.predict(sample_forecast_input_dataset) - - # Assert - # Basic functionality should work regardless of objective - assert forecaster.is_fitted, f"Model with {objective} should be fitted" - assert not result.data.isna().any().any(), f"Forecast with {objective} should not contain NaN values" - - # Check value spread for each objective - # Note: Some objectives (like arctan_loss) may produce zero variation for some quantiles with small datasets - stds = result.data.std() - # At least one quantile should have variation (the model should not be completely degenerate) - assert (stds > 0).any(), f"At least one column should have variation with {objective}, got stds: {dict(stds)}" From bfa2e2f198faf165de719b58ed110dc0ce065baf Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 17 Nov 2025 16:09:41 +0100 Subject: [PATCH 016/104] Small fix --- .../models/forecasting/hybrid_forecaster.py | 13 ++++++------- .../models/forecasting/test_hybrid_forecaster.py | 1 - 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index eba72a66d..7a41035b6 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -98,6 +98,7 @@ class HybridForecaster(Forecaster): def __init__(self, config: HybridForecasterConfig) -> None: """Initialize the Hybrid forecaster.""" + self._config = config self._base_learners: list[BaseLearner] = self._init_base_learners( @@ -107,13 +108,6 @@ def __init__(self, config: HybridForecasterConfig) -> None: QuantileRegressor(quantile=float(q), alpha=config.hyperparams.l1_penalty) for q in config.quantiles ] - self._is_fitted: bool = False - - @property - @override - def is_fitted(self) -> bool: - return self._is_fitted - @staticmethod def _hyperparams_forecast_map(hyperparams: type[BaseLearnerHyperParams]) -> type[BaseLearner]: """Map hyperparameters to forecast types. @@ -176,6 +170,11 @@ def _init_base_learners(self, base_hyperparams: list[BaseLearnerHyperParams]) -> return base_learners + @property + @override + def is_fitted(self) -> bool: + return all(x.is_fitted for x in self._base_learners) + @property @override def config(self) -> ForecasterConfig: diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py index 09c69f24f..4e36e125d 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py @@ -9,7 +9,6 @@ from openstef_core.datasets import ForecastInputDataset from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q -from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams from openstef_models.models.forecasting.hybrid_forecaster import ( HybridForecaster, HybridForecasterConfig, From ce2172a65c9a4ff469122165a340189ed5e040a0 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 17 Nov 2025 21:17:07 +0100 Subject: [PATCH 017/104] Squashed commit of the following: commit 37089b84bdea12d22506174ef1393c4fc346ca36 Author: Egor Dmitriev Date: Mon Nov 17 15:29:59 2025 +0100 fix(#728): Fixed parallelism stability issues, and gblinear feature pipeline. (#752) * fix(STEF-2475): Added loky as default option for parallelism since fork causes instabilities for xgboost results. Signed-off-by: Egor Dmitriev * fix(STEF-2475): Added better support for flatliners and predicting when data is sparse. Signed-off-by: Egor Dmitriev * fix(STEF-2475): Feature handing improvements for gblinear. Like imputation, nan dropping, and checking if features are available. Signed-off-by: Egor Dmitriev * fix(#728): Added checks on metrics to gracefully handle empty data. Added flatline filtering during evalution. Signed-off-by: Egor Dmitriev * fix(#728): Updated xgboost to skip scaling on empty prediction. Signed-off-by: Egor Dmitriev * fix(STEF-2475): Added parallelism parameters. Signed-off-by: Egor Dmitriev --------- Signed-off-by: Egor Dmitriev commit a85a3f709c9a54b85658578b5c2aefc001bdf803 Author: Egor Dmitriev Date: Fri Nov 14 14:31:34 2025 +0100 fix(STEF-2475): Fixed rolling aggregate adder by adding forward filling and stating support for only one horizon. (#750) Signed-off-by: Egor Dmitriev commit 4f0c6648516bf184608d268020fdfa4107050c83 Author: Egor Dmitriev Date: Thu Nov 13 16:54:15 2025 +0100 feature: Disabled data cutoff by default to be consistent with openstef 3. And other minor improvements. (#748) commit 493126e9f16836d0da03d9c43e391537c5bea7ca Author: Egor Dmitriev Date: Thu Nov 13 16:12:35 2025 +0100 fix(STEF-2475) fix and refactor backtesting iction in context of backtestforecasting config for clarity. Added more colors. Fixed data split function to handle 0.0 splits. (#747) * fix: Fixed data collation during backtesting. Renamed horizon to prediction in context of backtestforecasting config for clarity. Added more colors. Fixed data split function to handle 0.0 splits. * fix: Formatting. Signed-off-by: Egor Dmitriev * fix: Formatting. Signed-off-by: Egor Dmitriev --------- Signed-off-by: Egor Dmitriev commit 6b1da449b7841f1b13a5fac1f16e48bbeb9b9692 Author: Egor Dmitriev Date: Thu Nov 13 16:05:32 2025 +0100 feature: forecaster hyperparams and eval metrics (#746) * feature(#729) Removed to_state and from_state methods in favor of builtin python state saving functions. Signed-off-by: Egor Dmitriev * feature(#729): Fixed issue where generic transform pipeline could not be serialized. Signed-off-by: Egor Dmitriev * feature(#729): Added more state saving tests Signed-off-by: Egor Dmitriev * feature(#729): Added more state saving tests Signed-off-by: Egor Dmitriev * feature(#729): Added more state saving tests Signed-off-by: Egor Dmitriev * feature: standardized objective function. Added custom evaluation functions for forecasters. * fix: Formatting. Signed-off-by: Egor Dmitriev --------- Signed-off-by: Egor Dmitriev --- .gitignore | 3 +- ...liander_2024_benchmark_xgboost_gblinear.py | 70 +- .../liander_2024_compare_results.py | 50 + .../plots/forecast_time_series_plotter.py | 2 + .../backtesting/backtest_event_generator.py | 4 +- .../backtest_forecaster/dummy_forecaster.py | 4 +- .../backtesting/backtest_forecaster/mixins.py | 4 +- .../openstef4_backtest_forecaster.py | 73 +- .../restricted_horizon_timeseries.py | 19 +- .../benchmarking/benchmark_pipeline.py | 1 + .../benchmarking/target_provider.py | 69 +- .../metrics/metrics_deterministic.py | 11 + .../metrics/metrics_probabilistic.py | 56 + .../test_backtest_event_generator.py | 16 +- .../backtesting/test_backtest_pipeline.py | 20 +- .../unit/backtesting/test_batch_prediction.py | 4 +- .../benchmarking/test_benchmark_pipeline.py | 4 +- .../unit/benchmarking/test_target_provider.py | 48 +- .../metrics/test_metrics_deterministic.py | 47 + .../metrics/test_metrics_probabilistic.py | 33 + .../src/openstef_core/exceptions.py | 4 + .../openstef_core/utils/multiprocessing.py | 33 +- .../tests/unit/utils/test_multiprocessing.py | 26 +- .../models/forecasting/gblinear_forecaster.py | 60 +- .../models/forecasting/xgboost_forecaster.py | 35 +- .../models/forecasting_model.py | 4 +- .../presets/forecasting_workflow.py | 75 +- .../transforms/general/__init__.py | 2 + .../transforms/general/imputer.py | 15 +- .../transforms/general/nan_dropper.py | 89 + .../time_domain/rolling_aggregates_adder.py | 19 +- .../transforms/weather_domain/__init__.py | 5 +- .../src/openstef_models/utils/data_split.py | 21 +- .../utils/evaluation_functions.py | 30 + .../utils/feature_selection.py | 2 + .../openstef_models/utils/loss_functions.py | 16 +- .../workflows/custom_forecasting_workflow.py | 4 +- .../transforms/general/test_nan_dropper.py | 48 + .../test_rolling_aggregates_adder.py | 13 +- uv.lock | 1482 +++++++++-------- 40 files changed, 1625 insertions(+), 896 deletions(-) create mode 100644 examples/benchmarks/liander_2024_compare_results.py create mode 100644 packages/openstef-models/src/openstef_models/transforms/general/nan_dropper.py create mode 100644 packages/openstef-models/src/openstef_models/utils/evaluation_functions.py create mode 100644 packages/openstef-models/tests/unit/transforms/general/test_nan_dropper.py diff --git a/.gitignore b/.gitignore index 81c981c66..db241c85e 100644 --- a/.gitignore +++ b/.gitignore @@ -123,7 +123,8 @@ certificates/ *.html *.pkl +# Benchmark outputs +benchmark_results*/ # Experiment outputs -benchmark_results/ optimization_results/ dev_sandbox/ \ No newline at end of file diff --git a/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py b/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py index 1f5f016d0..12d0bc9c0 100644 --- a/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py +++ b/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py @@ -10,6 +10,12 @@ # # SPDX-License-Identifier: MPL-2.0 +import os + +os.environ["OMP_NUM_THREADS"] = "1" # Set OMP_NUM_THREADS to 1 to avoid issues with parallel execution and xgboost +os.environ["OPENBLAS_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" + import logging from datetime import timedelta from pathlib import Path @@ -38,19 +44,32 @@ BENCHMARK_RESULTS_PATH_XGBOOST = OUTPUT_PATH / "XGBoost" BENCHMARK_RESULTS_PATH_GBLINEAR = OUTPUT_PATH / "GBLinear" -N_PROCESSES = 1 # Amount of parallel processes to use for the benchmark +N_PROCESSES = 12 # Amount of parallel processes to use for the benchmark # Model configuration FORECAST_HORIZONS = [LeadTime.from_string("P3D")] # Forecast horizon(s) -PREDICTION_QUANTILES = [Q(0.1), Q(0.3), Q(0.5), Q(0.7), Q(0.9)] # Quantiles for probabilistic forecasts +PREDICTION_QUANTILES = [ + Q(0.05), + Q(0.1), + Q(0.3), + Q(0.5), + Q(0.7), + Q(0.9), + Q(0.95), +] # Quantiles for probabilistic forecasts BENCHMARK_FILTER: list[Liander2024Category] | None = None -storage = MLFlowStorage( - tracking_uri=str(OUTPUT_PATH / "mlflow_artifacts"), - local_artifacts_path=OUTPUT_PATH / "mlflow_tracking_artifacts", -) +USE_MLFLOW_STORAGE = False + +if USE_MLFLOW_STORAGE: + storage = MLFlowStorage( + tracking_uri=str(OUTPUT_PATH / "mlflow_artifacts"), + local_artifacts_path=OUTPUT_PATH / "mlflow_tracking_artifacts", + ) +else: + storage = None common_config = ForecastingWorkflowConfig( model_id="common_model_", @@ -58,19 +77,32 @@ horizons=FORECAST_HORIZONS, quantiles=PREDICTION_QUANTILES, model_reuse_enable=False, - mlflow_storage=storage, + mlflow_storage=None, radiation_column="shortwave_radiation", rolling_aggregate_features=["mean", "median", "max", "min"], wind_speed_column="wind_speed_80m", pressure_column="surface_pressure", temperature_column="temperature_2m", relative_humidity_column="relative_humidity_2m", + energy_price_column="EPEX_NL", ) xgboost_config = common_config.model_copy(update={"model": "xgboost"}) gblinear_config = common_config.model_copy(update={"model": "gblinear"}) +# Create the backtest configuration +backtest_config = BacktestForecasterConfig( + requires_training=True, + predict_length=timedelta(days=7), + predict_min_length=timedelta(minutes=15), + predict_context_length=timedelta(days=14), # Context needed for lag features + predict_context_min_coverage=0.5, + training_context_length=timedelta(days=90), # Three months of training data + training_context_min_coverage=0.5, + predict_sample_interval=timedelta(minutes=15), +) + def _target_forecaster_factory( context: BenchmarkContext, @@ -99,18 +131,6 @@ def _create_workflow() -> CustomForecastingWorkflow: ) ) - # Create the backtest configuration - backtest_config = BacktestForecasterConfig( - requires_training=True, - horizon_length=timedelta(days=7), - horizon_min_length=timedelta(minutes=15), - predict_context_length=timedelta(days=14), # Context needed for lag features - predict_context_min_coverage=0.5, - training_context_length=timedelta(days=90), # Three months of training data - training_context_min_coverage=0.5, - predict_sample_interval=timedelta(minutes=15), - ) - return OpenSTEF4BacktestForecaster( config=backtest_config, workflow_factory=_create_workflow, @@ -120,24 +140,24 @@ def _create_workflow() -> CustomForecastingWorkflow: if __name__ == "__main__": - # Run for GBLinear model + # Run for XGBoost model create_liander2024_benchmark_runner( - storage=LocalBenchmarkStorage(base_path=BENCHMARK_RESULTS_PATH_GBLINEAR), + storage=LocalBenchmarkStorage(base_path=BENCHMARK_RESULTS_PATH_XGBOOST), callbacks=[StrictExecutionCallback()], ).run( forecaster_factory=_target_forecaster_factory, - run_name="gblinear", + run_name="xgboost", n_processes=N_PROCESSES, filter_args=BENCHMARK_FILTER, ) - # Run for XGBoost model + # Run for GBLinear model create_liander2024_benchmark_runner( - storage=LocalBenchmarkStorage(base_path=BENCHMARK_RESULTS_PATH_XGBOOST), + storage=LocalBenchmarkStorage(base_path=BENCHMARK_RESULTS_PATH_GBLINEAR), callbacks=[StrictExecutionCallback()], ).run( forecaster_factory=_target_forecaster_factory, - run_name="xgboost", + run_name="gblinear", n_processes=N_PROCESSES, filter_args=BENCHMARK_FILTER, ) diff --git a/examples/benchmarks/liander_2024_compare_results.py b/examples/benchmarks/liander_2024_compare_results.py new file mode 100644 index 000000000..3de16460c --- /dev/null +++ b/examples/benchmarks/liander_2024_compare_results.py @@ -0,0 +1,50 @@ +"""Example for comparing benchmark results from different runs on the Liander 2024 dataset.""" +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from pathlib import Path + +from openstef_beam.analysis.models import RunName +from openstef_beam.benchmarking import BenchmarkComparisonPipeline, LocalBenchmarkStorage +from openstef_beam.benchmarking.benchmarks import create_liander2024_benchmark_runner +from openstef_beam.benchmarking.benchmarks.liander2024 import LIANDER2024_ANALYSIS_CONFIG +from openstef_beam.benchmarking.storage import BenchmarkStorage + +BASE_DIR = Path() + +OUTPUT_PATH = BASE_DIR / "./benchmark_results_comparison" + +BENCHMARK_DIR_GBLINEAR = BASE_DIR / "benchmark_results" / "GBLinear" +BENCHMARK_DIR_XGBOOST = BASE_DIR / "benchmark_results" / "XGBoost" +BENCHMARK_DIR_GBLINEAR_OPENSTEF3 = BASE_DIR / "benchmark_results_openstef3" / "gblinear" +BENCHMARK_DIR_XGBOOST_OPENSTEF3 = BASE_DIR / "benchmark_results_openstef3" / "xgboost" + +check_dirs = [ + BENCHMARK_DIR_GBLINEAR, + BENCHMARK_DIR_XGBOOST, + BENCHMARK_DIR_GBLINEAR_OPENSTEF3, + BENCHMARK_DIR_XGBOOST_OPENSTEF3, +] +for dir_path in check_dirs: + if not dir_path.exists(): + msg = f"Benchmark directory not found: {dir_path}. Make sure to run the benchmarks first." + raise FileNotFoundError(msg) + +run_storages: dict[RunName, BenchmarkStorage] = { + "gblinear": LocalBenchmarkStorage(base_path=BENCHMARK_DIR_GBLINEAR), + "gblinear_openstef3": LocalBenchmarkStorage(base_path=BENCHMARK_DIR_GBLINEAR_OPENSTEF3), + "xgboost": LocalBenchmarkStorage(base_path=BENCHMARK_DIR_XGBOOST), + "xgboost_openstef3": LocalBenchmarkStorage(base_path=BENCHMARK_DIR_XGBOOST_OPENSTEF3), +} + +target_provider = create_liander2024_benchmark_runner( + storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH), +).target_provider + +comparison_pipeline = BenchmarkComparisonPipeline( + analysis_config=LIANDER2024_ANALYSIS_CONFIG, + storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH), + target_provider=target_provider, +) +comparison_pipeline.run(run_data=run_storages) diff --git a/packages/openstef-beam/src/openstef_beam/analysis/plots/forecast_time_series_plotter.py b/packages/openstef-beam/src/openstef_beam/analysis/plots/forecast_time_series_plotter.py index ac3a4e65f..9fdf6684c 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/plots/forecast_time_series_plotter.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/plots/forecast_time_series_plotter.py @@ -98,6 +98,8 @@ class ForecastTimeSeriesPlotter: "green": "Greens", "purple": "Purples", "orange": "Oranges", + "magenta": "Magenta", + "grey": "Greys", } colors: ClassVar[list[str]] = list(COLOR_SCHEME.keys()) colormaps: ClassVar[list[str]] = list(COLOR_SCHEME.values()) diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event_generator.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event_generator.py index 32f3bd62a..32638dc30 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event_generator.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event_generator.py @@ -99,7 +99,7 @@ def _predict_iterator(self) -> Iterator[BacktestEvent]: current_time = align_datetime_to_time(self.start, self.align_time, mode="ceil") while current_time <= end_time: - horizon_end = current_time + self.forecaster_config.horizon_min_length + horizon_end = current_time + self.forecaster_config.predict_min_length if horizon_end > end_time: break @@ -124,7 +124,7 @@ def _train_iterator(self) -> Iterator[BacktestEvent]: current_time = align_datetime_to_time(self.start, self.align_time, mode="ceil") while current_time <= end_time: - horizon_end = current_time + self.forecaster_config.horizon_min_length + horizon_end = current_time + self.forecaster_config.predict_min_length if horizon_end > end_time: break diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/dummy_forecaster.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/dummy_forecaster.py index 9e24e7e8e..9b931ad6d 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/dummy_forecaster.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/dummy_forecaster.py @@ -43,8 +43,8 @@ def __init__( super().__init__() self.config = config or BacktestForecasterConfig( requires_training=False, - horizon_length=timedelta(days=7), - horizon_min_length=timedelta(days=0), + predict_length=timedelta(days=7), + predict_min_length=timedelta(days=0), predict_context_length=timedelta(days=0), predict_context_min_coverage=0.0, training_context_length=timedelta(days=0), diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/mixins.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/mixins.py index 6597c5b22..5c02a6fa5 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/mixins.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/mixins.py @@ -35,8 +35,8 @@ class BacktestForecasterConfig(BaseConfig): default=timedelta(minutes=15), description="Time interval between prediction samples." ) - horizon_length: timedelta = Field(description="Length of the prediction horizon.") - horizon_min_length: timedelta = Field(description="Minimum horizon length that can be predicted.") + predict_length: timedelta = Field(description="Length of the prediction.") + predict_min_length: timedelta = Field(description="Minimum length that can be predicted.") predict_context_length: timedelta = Field(description="Length of the prediction context.") predict_context_min_coverage: float = Field( diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py index 4b6574b9e..96a1e6e93 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py @@ -4,6 +4,7 @@ """OpenSTEF 4.0 forecaster for backtesting pipelines.""" +import logging from collections.abc import Callable from pathlib import Path from typing import Any, override @@ -14,9 +15,8 @@ from openstef_beam.backtesting.restricted_horizon_timeseries import RestrictedHorizonVersionedTimeSeries from openstef_core.base_model import BaseModel from openstef_core.datasets import TimeSeriesDataset -from openstef_core.exceptions import NotFittedError +from openstef_core.exceptions import FlatlinerDetectedError, NotFittedError from openstef_core.types import Q -from openstef_models.models.forecasting_model import restore_target from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow @@ -42,6 +42,9 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): ) _workflow: CustomForecastingWorkflow | None = PrivateAttr(default=None) + _is_flatliner_detected: bool = PrivateAttr(default=False) + + _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) @override def model_post_init(self, context: Any) -> None: @@ -59,30 +62,27 @@ def quantiles(self) -> list[Q]: @override def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: # Create a new workflow for this training cycle - self._workflow = self.workflow_factory() - - # Get training data window based on config - training_end = data.horizon - training_start = training_end - self.config.training_context_length + workflow = self.workflow_factory() - # Extract the versioned dataset for training - training_data_versioned = data.get_window_versioned( - start=training_start, end=training_end, available_before=data.horizon - ) - # Convert to horizons - training_data = training_data_versioned.to_horizons(horizons=self._workflow.model.config.horizons) - training_data = restore_target( - dataset=training_data, - original_dataset=training_data_versioned.select_version(), - target_column=self._workflow.model.target_column, + # Extract the dataset for training + training_data = data.get_window( + start=data.horizon - self.config.training_context_length, end=data.horizon, available_before=data.horizon ) if self.debug: id_str = data.horizon.strftime("%Y%m%d%H%M%S") training_data.to_parquet(path=self.cache_dir / f"debug_{id_str}_training.parquet") - # Use the workflow's fit method - self._workflow.fit(data=training_data) + try: + # Use the workflow's fit method + workflow.fit(data=training_data) + self._is_flatliner_detected = False + except FlatlinerDetectedError: + self._logger.warning("Flatliner detected during training") + self._is_flatliner_detected = True + return # Skip setting the workflow on flatliner detection + + self._workflow = workflow if self.debug: id_str = data.horizon.strftime("%Y%m%d%H%M%S") @@ -92,33 +92,28 @@ def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: @override def predict(self, data: RestrictedHorizonVersionedTimeSeries) -> TimeSeriesDataset | None: + if self._is_flatliner_detected: + self._logger.info("Skipping prediction due to prior flatliner detection") + return None + if self._workflow is None: raise NotFittedError("Must call fit() before predict()") - # Define the time windows: - # - Historical context: used for features (lags, etc.) - # - Forecast period: the period we want to predict - predict_context_start = data.horizon - self.config.predict_context_length - forecast_end = data.horizon + self.config.horizon_length - # Extract the dataset including both historical context and forecast period - predict_data_versioned = data.get_window_versioned( - start=predict_context_start, - end=forecast_end, # Include the forecast period + predict_data = data.get_window( + start=data.horizon - self.config.predict_context_length, + end=data.horizon + self.config.predict_length, # Include the forecast period available_before=data.horizon, # Only use data available at prediction time (prevents lookahead bias) ) - # Convert to horizons - predict_data = predict_data_versioned.to_horizons(horizons=self._workflow.model.config.horizons) - predict_data = restore_target( - dataset=predict_data, - original_dataset=predict_data_versioned.select_version(), - target_column=self._workflow.model.target_column, - ) - forecast = self._workflow.predict( - data=predict_data, - forecast_start=data.horizon, # Where historical data ends and forecasting begins - ) + try: + forecast = self._workflow.predict( + data=predict_data, + forecast_start=data.horizon, # Where historical data ends and forecasting begins + ) + except FlatlinerDetectedError: + self._logger.info("Flatliner detected during prediction") + return None if self.debug: id_str = data.horizon.strftime("%Y%m%d%H%M%S") diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/restricted_horizon_timeseries.py b/packages/openstef-beam/src/openstef_beam/backtesting/restricted_horizon_timeseries.py index 387c4d8b3..5040a038b 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/restricted_horizon_timeseries.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/restricted_horizon_timeseries.py @@ -41,9 +41,7 @@ def get_window(self, start: datetime, end: datetime, available_before: datetime Returns: DataFrame with data from the specified window. """ - dataset = self.dataset.filter_by_range(start=start, end=end) - if available_before is not None: - dataset = dataset.filter_by_available_before(available_before=available_before) + dataset = self.get_window_versioned(start=start, end=end, available_before=available_before) return dataset.select_version() @@ -54,12 +52,19 @@ def get_window_versioned( Returns: DataFrame with data from the specified window. + + Raises: + ValueError: If available_before is after the horizon. """ - dataset = self.dataset.filter_by_range(start=start, end=end) - if available_before is not None: - dataset = dataset.filter_by_available_before(available_before=available_before) + if available_before is None: + available_before = self.horizon - return dataset + if available_before > self.horizon: + raise ValueError("available_before cannot be after the horizon") + + dataset = self.dataset.filter_by_range(start=start, end=end) + # Make sure to only include data available before the cutoff + return dataset.filter_by_available_before(available_before=available_before) __all__ = [ diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py index 3ee54ffa5..bcd1ef070 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py @@ -187,6 +187,7 @@ def run( process_fn=partial(self._run_for_target, context, forecaster_factory), items=targets, n_processes=n_processes, + mode="loky", ) if not self.storage.has_analysis_output( diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/target_provider.py b/packages/openstef-beam/src/openstef_beam/benchmarking/target_provider.py index 49df1f4c6..bbd2c7bb2 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/target_provider.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/target_provider.py @@ -19,6 +19,7 @@ from pathlib import Path from typing import cast, override +import numpy as np import pandas as pd from pydantic import Field, TypeAdapter @@ -268,16 +269,6 @@ def get_targets(self, filter_args: F | None = None) -> list[T]: def get_metrics_for_target(self, target: T) -> list[MetricProvider]: return self.metrics if isinstance(self.metrics, list) else self.metrics(target) # type: ignore[return-value] - measurements_path_for_target: Callable[[T], Path] = Field( - default=lambda target: Path(target.group_name) / f"load_data_{target.name}.parquet", - description="Function to build file path for target measurements using configured template", - ) - - weather_path_for_target: Callable[[T], Path] = Field( - default=lambda target: Path(target.group_name) / f"weather_data_{target.name}.parquet", - description="Function to build file path for target weather data using configured template", - ) - def _get_measurements_path_for_target(self, target: T) -> Path: return self.data_dir / str(target.group_name) / self.measurements_path_template.format(name=target.name) @@ -353,4 +344,60 @@ def get_prices(self) -> VersionedTimeSeriesDataset: @override def get_evaluation_mask_for_target(self, target: T) -> pd.DatetimeIndex | None: - return None + measurement_series = self.get_measurements_for_target(target).select_version().data[self.target_column] + + filtered_series = filter_away_flatline_chunks( + measurement_series=measurement_series, + min_length=24 * 4, + threshold=0.05, + ) + return pd.DatetimeIndex(cast(pd.DatetimeIndex, filtered_series.dropna().index)) # type: ignore[reportUnknownMemberType] + + +def filter_away_flatline_chunks( + measurement_series: pd.Series, + min_length: int = 96, + threshold: float = 1.0, +) -> pd.Series: + """Mask long flatline segments in a target series. + + Detects contiguous segments where the standard deviation inside both centered and + right-aligned windows falls below `threshold` times the global standard deviation + for at least `min_length` samples. Values inside those segments are replaced with + `NaN` so downstream logic can drop them and derive a clean evaluation mask. + + Args: + measurement_series: Time-indexed series containing the target observations. + min_length: Minimum length (in samples) for a chunk to be treated as a flatline. + threshold: Multiplier on the global standard deviation to define the flatline cutoff. + + Returns: + A copy of *measurement_series* with flatline chunks set to `NaN`. + """ + series_std = measurement_series.std() + actual_threshold = threshold * series_std + + rolling_std_center = measurement_series.rolling(window=min_length, center=True).std() + rolling_std_right = measurement_series.rolling(window=min_length, center=False).std() + + flatline_mask = (rolling_std_center < actual_threshold) | (rolling_std_right < actual_threshold) + flatline_mask = flatline_mask.fillna(value=False) # pyright: ignore[reportUnknownMemberType] + + flatline_chunks: list[tuple[int, int]] = [] + start_idx: int | None = None + for idx, is_flat in enumerate(flatline_mask): + if is_flat and start_idx is None: + start_idx = idx + elif not is_flat and start_idx is not None: + if idx - start_idx >= min_length: + flatline_chunks.append((start_idx, idx)) + start_idx = None + + if start_idx is not None and len(flatline_mask) - start_idx >= min_length: + flatline_chunks.append((start_idx, len(flatline_mask))) + + filtered_series = measurement_series.copy() + for start, end in flatline_chunks: + filtered_series.iloc[start:end] = np.nan + + return filtered_series diff --git a/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py b/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py index 44640c929..2de97042d 100644 --- a/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py +++ b/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py @@ -69,6 +69,8 @@ def rmae( # Ensure inputs are numpy arrays y_true = np.array(y_true) y_pred = np.array(y_pred) + if y_true.size == 0 or y_pred.size == 0: + return float("NaN") # Calculate MAE mae = np.average(np.abs(y_true - y_pred), weights=sample_weights) @@ -124,6 +126,8 @@ def mape( # Ensure inputs are numpy arrays y_true = np.array(y_true) y_pred = np.array(y_pred) + if y_true.size == 0 or y_pred.size == 0: + return float("NaN") # Calculate MAPE mape_value = np.mean(np.abs((y_true - y_pred) / y_true)) @@ -388,6 +392,8 @@ def riqd( y_true = np.array(y_true) y_pred_lower_q = np.array(y_pred_lower_q) y_pred_upper_q = np.array(y_pred_upper_q) + if y_true.size == 0 or y_pred_lower_q.size == 0 or y_pred_upper_q.size == 0: + return float("NaN") y_range = np.quantile(y_true, q=measurement_range_upper_q) - np.quantile(y_true, q=measurement_range_lower_q) @@ -451,6 +457,9 @@ def r2( >>> isinstance(score, float) True """ + if len(y_true) == 0 or len(y_pred) == 0: + return float("NaN") + return float(r2_score(y_true, y_pred, sample_weight=sample_weights)) @@ -499,6 +508,8 @@ def relative_pinball_loss( # Ensure inputs are numpy arrays y_true = np.array(y_true) y_pred = np.array(y_pred) + if y_true.size == 0 or y_pred.size == 0: + return float("NaN") # Calculate pinball loss for each sample errors = y_true - y_pred diff --git a/packages/openstef-beam/src/openstef_beam/metrics/metrics_probabilistic.py b/packages/openstef-beam/src/openstef_beam/metrics/metrics_probabilistic.py index ff15cef6e..79bfa85f7 100644 --- a/packages/openstef-beam/src/openstef_beam/metrics/metrics_probabilistic.py +++ b/packages/openstef-beam/src/openstef_beam/metrics/metrics_probabilistic.py @@ -19,6 +19,7 @@ import numpy.typing as npt from openstef_core.exceptions import MissingExtraError +from openstef_core.types import Quantile def crps( @@ -214,3 +215,58 @@ def mean_absolute_calibration_error( """ observed_probs = np.array([observed_probability(y_true, y_pred[:, i]) for i in range(len(quantiles))]) return float(np.mean(np.abs(observed_probs - quantiles))) + + +def mean_pinball_loss( + y_true: npt.NDArray[np.floating], + y_pred: npt.NDArray[np.floating], + quantiles: list[Quantile], + sample_weight: npt.NDArray[np.floating] | None = None, +) -> float: + """Calculate the Mean Pinball Loss for quantile forecasts. + + The Pinball Loss is a proper scoring rule for evaluating quantile forecasts. + It penalizes under- and over-predictions differently based on the quantile level. + + Args: + y_true: Observed values with shape (num_samples,) or (num_samples, num_quantiles). + y_pred: Predicted quantiles with shape (num_samples, num_quantiles). + Each column corresponds to predictions for a specific quantile level. + quantiles: Quantile levels with shape (num_quantiles,). + Must be sorted in ascending order and contain values in [0, 1]. + sample_weight: Optional weights for each sample with shape (num_samples,). + + Returns: + The weighted average Pinball Loss across all samples and quantiles. Lower values indicate better + forecast quality. + """ + # Resize the predictions and targets. + y_pred = np.reshape(y_pred, [-1, len(quantiles)]) + n_rows = y_pred.shape[0] + y_true = np.reshape(y_true, [n_rows, -1]) + sample_weight = np.reshape(sample_weight, [n_rows, 1]) if sample_weight is not None else None + + # Extract quantile values into array for vectorized operations + quantile_values = np.array(quantiles) # shape: (n_quantiles,) + + # Compute errors for all quantiles at once + errors = y_true - y_pred # shape: (num_samples, num_quantiles) + + # Compute masks for all quantiles simultaneously + underpredict_mask = errors >= 0 # y_true >= y_pred, shape: (num_samples, num_quantiles) + overpredict_mask = errors < 0 # y_true < y_pred, shape: (num_samples, num_quantiles) + + # Vectorized pinball loss computation using broadcasting + # quantiles broadcasts from (num_quantiles,) to (num_samples, num_quantiles) + loss = quantiles * underpredict_mask * errors - (1 - quantile_values) * overpredict_mask * errors + + # Apply sample weights if provided + if sample_weight is not None: + sample_weight = np.asarray(sample_weight).reshape(-1, 1) # shape: (num_samples, 1) + loss *= sample_weight + total_weight = sample_weight.sum() * len(quantiles) + else: + total_weight = loss.size + + # Return mean loss across all samples and quantiles + return float(loss.sum() / total_weight) diff --git a/packages/openstef-beam/tests/unit/backtesting/test_backtest_event_generator.py b/packages/openstef-beam/tests/unit/backtesting/test_backtest_event_generator.py index 3878c1518..7d86f4004 100644 --- a/packages/openstef-beam/tests/unit/backtesting/test_backtest_event_generator.py +++ b/packages/openstef-beam/tests/unit/backtesting/test_backtest_event_generator.py @@ -17,8 +17,8 @@ def config() -> BacktestForecasterConfig: return BacktestForecasterConfig( requires_training=True, - horizon_length=timedelta(hours=24), - horizon_min_length=timedelta(hours=6), + predict_length=timedelta(hours=24), + predict_min_length=timedelta(hours=6), predict_context_length=timedelta(hours=12), predict_context_min_coverage=0.5, training_context_length=timedelta(hours=24), @@ -122,8 +122,8 @@ def test_iterate_without_training_only_predicts(hourly_index: pd.DatetimeIndex): # Arrange config = BacktestForecasterConfig( requires_training=False, - horizon_length=timedelta(hours=24), - horizon_min_length=timedelta(hours=6), + predict_length=timedelta(hours=24), + predict_min_length=timedelta(hours=6), predict_context_length=timedelta(hours=12), predict_context_min_coverage=0.8, training_context_length=timedelta(hours=24), @@ -165,8 +165,8 @@ def test_iterate_returns_empty_when_insufficient_time(): # Arrange config = BacktestForecasterConfig( requires_training=True, - horizon_length=timedelta(hours=24), - horizon_min_length=timedelta(hours=6), + predict_length=timedelta(hours=24), + predict_min_length=timedelta(hours=6), predict_context_length=timedelta(hours=1), predict_context_min_coverage=0.8, training_context_length=timedelta(days=10), # Impossibly long @@ -195,8 +195,8 @@ def test_insufficient_coverage_filters_out_events(): sparse_index = pd.DatetimeIndex(["2025-01-01T12:00:00", "2025-01-01T18:00:00"]) config = BacktestForecasterConfig( requires_training=False, - horizon_length=timedelta(hours=24), - horizon_min_length=timedelta(hours=1), + predict_length=timedelta(hours=24), + predict_min_length=timedelta(hours=1), predict_context_length=timedelta(hours=6), predict_context_min_coverage=0.9, # High requirement training_context_length=timedelta(hours=24), diff --git a/packages/openstef-beam/tests/unit/backtesting/test_backtest_pipeline.py b/packages/openstef-beam/tests/unit/backtesting/test_backtest_pipeline.py index b8eda17d3..8fbea70b0 100644 --- a/packages/openstef-beam/tests/unit/backtesting/test_backtest_pipeline.py +++ b/packages/openstef-beam/tests/unit/backtesting/test_backtest_pipeline.py @@ -117,8 +117,8 @@ def test_run_training_scenarios( config = BacktestConfig(predict_interval=timedelta(hours=6), train_interval=timedelta(hours=12)) forecaster_config = BacktestForecasterConfig( requires_training=requires_training, - horizon_length=timedelta(hours=24), - horizon_min_length=timedelta(hours=1), + predict_length=timedelta(hours=24), + predict_min_length=timedelta(hours=1), predict_context_length=timedelta(hours=6), predict_context_min_coverage=0.5, training_context_length=timedelta(hours=12), @@ -173,8 +173,8 @@ def test_run_date_boundary_handling( mock_forecaster = MockForecaster( BacktestForecasterConfig( requires_training=True, - horizon_length=timedelta(hours=24), - horizon_min_length=timedelta(hours=1), + predict_length=timedelta(hours=24), + predict_min_length=timedelta(hours=1), predict_context_length=timedelta(hours=6), predict_context_min_coverage=0.5, training_context_length=timedelta(hours=12), @@ -211,8 +211,8 @@ def test_run_output_validation_and_concatenation( mock_forecaster = MockForecaster( BacktestForecasterConfig( requires_training=True, - horizon_length=timedelta(hours=24), - horizon_min_length=timedelta(hours=1), + predict_length=timedelta(hours=24), + predict_min_length=timedelta(hours=1), predict_context_length=timedelta(hours=6), predict_context_min_coverage=0.5, training_context_length=timedelta(hours=12), @@ -272,8 +272,8 @@ def test_run_handles_none_predictions(datasets: tuple[VersionedTimeSeriesDataset mock_forecaster = MockForecaster( BacktestForecasterConfig( requires_training=True, - horizon_length=timedelta(hours=24), - horizon_min_length=timedelta(hours=1), + predict_length=timedelta(hours=24), + predict_min_length=timedelta(hours=1), predict_context_length=timedelta(hours=6), predict_context_min_coverage=0.5, training_context_length=timedelta(hours=12), @@ -335,8 +335,8 @@ def test_run_edge_cases( base_model_config = BacktestForecasterConfig( requires_training=True, - horizon_length=timedelta(hours=24), - horizon_min_length=timedelta(hours=1), + predict_length=timedelta(hours=24), + predict_min_length=timedelta(hours=1), predict_context_length=timedelta(hours=1), predict_context_min_coverage=0.8, training_context_length=timedelta(hours=12), diff --git a/packages/openstef-beam/tests/unit/backtesting/test_batch_prediction.py b/packages/openstef-beam/tests/unit/backtesting/test_batch_prediction.py index 692d417d1..e7ff1ddd4 100644 --- a/packages/openstef-beam/tests/unit/backtesting/test_batch_prediction.py +++ b/packages/openstef-beam/tests/unit/backtesting/test_batch_prediction.py @@ -25,8 +25,8 @@ class MockModelConfig(BacktestForecasterConfig): requires_training: bool = True batch_size: int | None = 4 - horizon_length: timedelta = timedelta(hours=6) - horizon_min_length: timedelta = timedelta(hours=1) + predict_length: timedelta = timedelta(hours=6) + predict_min_length: timedelta = timedelta(hours=1) predict_context_length: timedelta = timedelta(hours=1) predict_context_min_coverage: float = 0.8 diff --git a/packages/openstef-beam/tests/unit/benchmarking/test_benchmark_pipeline.py b/packages/openstef-beam/tests/unit/benchmarking/test_benchmark_pipeline.py index bb060a222..1f18c83b8 100644 --- a/packages/openstef-beam/tests/unit/benchmarking/test_benchmark_pipeline.py +++ b/packages/openstef-beam/tests/unit/benchmarking/test_benchmark_pipeline.py @@ -163,8 +163,8 @@ def forecaster_config() -> BacktestForecasterConfig: """Create a realistic forecaster config with all required fields.""" return BacktestForecasterConfig( requires_training=True, - horizon_length=timedelta(hours=24), - horizon_min_length=timedelta(hours=1), + predict_length=timedelta(hours=24), + predict_min_length=timedelta(hours=1), predict_context_length=timedelta(hours=48), predict_context_min_coverage=0.8, training_context_length=timedelta(days=14), diff --git a/packages/openstef-beam/tests/unit/benchmarking/test_target_provider.py b/packages/openstef-beam/tests/unit/benchmarking/test_target_provider.py index d40d4ba6f..7ee94db89 100644 --- a/packages/openstef-beam/tests/unit/benchmarking/test_target_provider.py +++ b/packages/openstef-beam/tests/unit/benchmarking/test_target_provider.py @@ -11,7 +11,10 @@ from pydantic import ValidationError from openstef_beam.benchmarking.models import BenchmarkTarget -from openstef_beam.benchmarking.target_provider import SimpleTargetProvider +from openstef_beam.benchmarking.target_provider import ( + SimpleTargetProvider, + filter_away_flatline_chunks, +) from openstef_core.datasets import VersionedTimeSeriesDataset @@ -110,3 +113,46 @@ def get_prices(self) -> VersionedTimeSeriesDataset: assert isinstance(result, VersionedTimeSeriesDataset) assert {"temp", "prof", "price"} <= set(result.feature_names) assert len(result.index) == 3 + + +@pytest.mark.parametrize( + ( + "values", + "min_length", + "threshold", + "expected", + ), + [ + pytest.param( + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 6.0, 6.0, 6.0, 7.0, 8.0], + 3, + 0.1, + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, float("nan"), float("nan"), float("nan"), 7.0, 8.0], + id="flatline-chunk-masked", + ), + pytest.param( + [1.0, 2.0, 3.0, 4.0], + 2, + 0.1, + [1.0, 2.0, 3.0, 4.0], + id="no-flatline", + ), + ], +) +def test_filter_away_flatline_chunks_expected_series( + values: list[float], + min_length: int, + threshold: float, + expected: list[float], +) -> None: + """Compare the filtered output with the expected flatline suppression result.""" + # Arrange + index = pd.date_range("2023-01-01", periods=len(values), freq="h") + series = pd.Series(values, index=index) + + # Act + filtered = filter_away_flatline_chunks(series, min_length=min_length, threshold=threshold) + + # Assert: the filtered series matches the expected output + expected_series = pd.Series(expected, index=index) + pd.testing.assert_series_equal(filtered, expected_series) diff --git a/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py b/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py index 1678bc189..66960c087 100644 --- a/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py +++ b/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py @@ -129,6 +129,18 @@ def test_rmae_sample_weights_behavior( assert abs(result - expected) < tol, f"Expected {expected} but got {result} for weights={sample_weights}" +def test_rmae_returns_nan_when_inputs_empty() -> None: + # Arrange + y_true_arr = np.array([]) + y_pred_arr = np.array([]) + + # Act + result = rmae(y_true_arr, y_pred_arr) + + # Assert + assert np.isnan(result) + + @pytest.mark.parametrize( ("y_true", "y_pred", "expected", "tol"), [ @@ -149,6 +161,18 @@ def test_mape_various(y_true: Sequence[float], y_pred: Sequence[float], expected assert abs(result - expected) < tol, f"Expected {expected} but got {result}" +def test_mape_returns_nan_when_inputs_empty() -> None: + # Arrange + y_true_arr = np.array([]) + y_pred_arr = np.array([]) + + # Act + result = mape(y_true_arr, y_pred_arr) + + # Assert + assert np.isnan(result) + + @pytest.mark.parametrize( ("y_true", "y_pred"), [ @@ -378,6 +402,17 @@ def test_riqd_various( assert abs(result - expected) < tol, f"Expected {expected} but got {result}" +def test_riqd_returns_nan_when_inputs_empty() -> None: + # Arrange + empty_arr = np.array([]) + + # Act + result = riqd(empty_arr, empty_arr, empty_arr) + + # Assert + assert np.isnan(result) + + @pytest.mark.parametrize( ( "y_true", @@ -499,3 +534,15 @@ def test_relative_pinball_loss_various( assert np.isnan(result), f"Expected NaN but got {result}" else: assert abs(result - expected) < tol, f"Expected {expected} but got {result}" + + +def test_relative_pinball_loss_returns_nan_when_inputs_empty() -> None: + # Arrange + y_true_arr = np.array([]) + y_pred_arr = np.array([]) + + # Act + result = relative_pinball_loss(y_true_arr, y_pred_arr, quantile=0.5) + + # Assert + assert np.isnan(result) diff --git a/packages/openstef-beam/tests/unit/metrics/test_metrics_probabilistic.py b/packages/openstef-beam/tests/unit/metrics/test_metrics_probabilistic.py index 3b5dcf8e9..a05bfbd7f 100644 --- a/packages/openstef-beam/tests/unit/metrics/test_metrics_probabilistic.py +++ b/packages/openstef-beam/tests/unit/metrics/test_metrics_probabilistic.py @@ -6,8 +6,11 @@ import numpy as np import pytest +from sklearn.metrics import mean_pinball_loss as sk_mean_pinball_loss from openstef_beam.metrics import crps, mean_absolute_calibration_error, rcrps +from openstef_beam.metrics.metrics_probabilistic import mean_pinball_loss +from openstef_core.types import Q # CRPS Test Cases @@ -151,3 +154,33 @@ def test_mean_absolute_calibration_error() -> None: assert isinstance(result, float) assert result == (0.4 + 0.4) / 3 # observed probabilities are 0.5, 0.5, 0.5 vs 0.1, 0.5, 0.9 quantiles + + +def test_mean_pinball_loss_matches_sklearn_average_when_multi_quantile(): + # Arrange + rng = np.random.default_rng(seed=42) + n = 40 + y_true = rng.normal(loc=1.0, scale=2.0, size=n) + quantiles = [Q(0.1), Q(0.5), Q(0.9)] + # Simulate predictions with different biases per quantile; shape (n, q) + y_pred = np.stack( + [ + y_true + rng.normal(0, 0.7, size=n) - 0.4, # q=0.1 + y_true + rng.normal(0, 0.5, size=n) + 0.0, # q=0.5 + y_true + rng.normal(0, 0.7, size=n) + 0.4, # q=0.9 + ], + axis=1, + ) + + # Act + actual = mean_pinball_loss(y_true=y_true, y_pred=y_pred, quantiles=quantiles) + expected = np.mean( + np.array( + [sk_mean_pinball_loss(y_true, y_pred[:, i], alpha=float(quantile)) for i, quantile in enumerate(quantiles)], + dtype=float, + ) + ) + + # Assert + # Multi-quantile mean should equal average of sklearn per-quantile losses + assert np.allclose(actual, expected, rtol=1e-12, atol=1e-12) diff --git a/packages/openstef-core/src/openstef_core/exceptions.py b/packages/openstef-core/src/openstef_core/exceptions.py index 3edf70974..061f0b824 100644 --- a/packages/openstef-core/src/openstef_core/exceptions.py +++ b/packages/openstef-core/src/openstef_core/exceptions.py @@ -104,6 +104,10 @@ class PredictError(Exception): """Exception raised for errors during forecasting operations.""" +class InputValidationError(ValueError): + """Exception raised for input validation errors.""" + + class ModelLoadingError(Exception): """Exception raised when a model fails to load properly.""" diff --git a/packages/openstef-core/src/openstef_core/utils/multiprocessing.py b/packages/openstef-core/src/openstef_core/utils/multiprocessing.py index 0411be0ad..517d51bfe 100644 --- a/packages/openstef-core/src/openstef_core/utils/multiprocessing.py +++ b/packages/openstef-core/src/openstef_core/utils/multiprocessing.py @@ -10,11 +10,16 @@ """ import multiprocessing -import sys from collections.abc import Callable, Iterable +from typing import Literal -def run_parallel[T, R](process_fn: Callable[[T], R], items: Iterable[T], n_processes: int | None = None) -> list[R]: +def run_parallel[T, R]( + process_fn: Callable[[T], R], + items: Iterable[T], + n_processes: int | None = None, + mode: Literal["loky", "spawn", "fork"] = "loky", +) -> list[R]: """Execute a function in parallel across multiple processes. On macOS, explicitly uses fork context to avoid issues with the default @@ -28,6 +33,10 @@ def run_parallel[T, R](process_fn: Callable[[T], R], items: Iterable[T], n_proce items: Iterable of items to process. n_processes: Number of processes to use. If None or <= 1, runs sequentially. Typically set to number of CPU cores or logical cores. + mode: Multiprocessing start method. 'loky' is recommeneded for robust + ml use-cases. 'fork' is more efficient on macOS, while 'spawn' is + default on Windows/Linux. Xgboost seems to have bugs + when used with 'fork'. Returns: List of results from applying process_fn to each item, in the same order @@ -48,23 +57,21 @@ def run_parallel[T, R](process_fn: Callable[[T], R], items: Iterable[T], n_proce >>> # Empty input handling >>> run_parallel(square, [], n_processes=1) [] - - Note: - macOS Implementation Details: - - Uses fork context instead of spawn to avoid serialization overhead - - Fork preserves parent memory space, including imported modules and variables - - More efficient for ML models and large data structures - - On other platforms, uses the default context (usually spawn on Windows/Linux) """ if n_processes is None or n_processes <= 1: # If only one process is requested, run the function sequentially return [process_fn(item) for item in items] + if mode == "loky": + from joblib import Parallel, delayed # pyright: ignore[reportUnknownVariableType] # noqa: PLC0415 + + # Use joblib with loky backend for robust process management + return Parallel(n_jobs=n_processes, backend="loky")( # pyright: ignore[reportUnknownVariableType] + delayed(process_fn)(item) for item in items + ) # type: ignore + # Auto-configure for macOS - if sys.platform == "darwin": - context = multiprocessing.get_context("fork") - else: - context = multiprocessing.get_context() + context = multiprocessing.get_context(method=mode) with context.Pool(processes=n_processes) as pool: return pool.map(process_fn, items) diff --git a/packages/openstef-core/tests/unit/utils/test_multiprocessing.py b/packages/openstef-core/tests/unit/utils/test_multiprocessing.py index 7eb20d38b..fead7f859 100644 --- a/packages/openstef-core/tests/unit/utils/test_multiprocessing.py +++ b/packages/openstef-core/tests/unit/utils/test_multiprocessing.py @@ -5,7 +5,13 @@ """Tests for multiprocessing utilities.""" # Fix for macOS multiprocessing hanging in tests -from openstef_core.utils import run_parallel +from datetime import UTC, datetime, timedelta +from functools import partial +from typing import Literal + +import pytest + +from openstef_core.utils import align_datetime, run_parallel def double_number(n: int) -> int: @@ -25,13 +31,21 @@ def test_run_parallel_single_process(): assert result == expected -def test_run_parallel_multiple_processes(): - # Arrange - items = [1, 2, 3, 4] - expected = [2, 4, 6, 8] +@pytest.mark.parametrize( + ("mode"), + [ + pytest.param("loky", id="loky"), + pytest.param("fork", id="fork"), + ], +) +def test_run_parallel_multiple_processes(mode: Literal["loky", "fork"]): + # Arrange - note: we can't use double number since it gives import issues with loki, since testing is not a real module + items = [datetime(year=2025, month=1, day=i, hour=i, tzinfo=UTC) for i in range(1, 5)] + expected = [datetime(year=2025, month=1, day=i + 1, hour=0, tzinfo=UTC) for i in range(1, 5)] # Act - result = run_parallel(double_number, items, n_processes=2) + function = partial(align_datetime, interval=timedelta(days=1)) + result = run_parallel(process_fn=function, items=items, n_processes=2, mode=mode) # Assert assert result == expected diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py index 0bc4a1c2f..cbda4b2df 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py @@ -11,7 +11,6 @@ to predict values outside the range of the training data. """ -from functools import partial from typing import Literal, override import numpy as np @@ -22,11 +21,16 @@ from openstef_core.datasets.mixins import LeadTime from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset -from openstef_core.exceptions import MissingExtraError, NotFittedError +from openstef_core.exceptions import InputValidationError, MissingExtraError, NotFittedError from openstef_core.mixins.predictor import HyperParams from openstef_models.explainability.mixins import ExplainableForecaster from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig -from openstef_models.utils.loss_functions import OBJECTIVE_MAP, ObjectiveFunctionType, xgb_prepare_target_for_objective +from openstef_models.utils.evaluation_functions import EvaluationFunctionType, get_evaluation_function +from openstef_models.utils.loss_functions import ( + ObjectiveFunctionType, + get_objective_function, + xgb_prepare_target_for_objective, +) try: import xgboost as xgb @@ -52,8 +56,14 @@ class GBLinearHyperParams(HyperParams): "rounds.", ) objective: ObjectiveFunctionType | Literal["reg:quantileerror"] = Field( - default="pinball_loss", - description="Objective function for training. 'pinball_loss' is recommended for probabilistic forecasting.", + default="reg:quantileerror", + description="Objective function for training. 'reg:quantileerror' is recommended " + "for probabilistic forecasting.", + ) + evaluation_metric: EvaluationFunctionType = Field( + default="mean_pinball_loss", + description="Metric used for evaluation during training. Defaults to 'mean_pinball_loss' " + "for quantile regression.", ) # Regularization @@ -61,7 +71,7 @@ class GBLinearHyperParams(HyperParams): default=0.0001, description="L1 regularization on weights. Higher values increase regularization. Range: [0,∞]" ) reg_lambda: float = Field( - default=0.0, description="L2 regularization on weights. Higher values increase regularization. Range: [0,∞]" + default=0.1, description="L2 regularization on weights. Higher values increase regularization. Range: [0,∞]" ) # Feature selection @@ -193,15 +203,9 @@ def __init__(self, config: GBLinearForecasterConfig) -> None: """ self._config = config or GBLinearForecasterConfig() - if self.config.hyperparams.objective == "reg:quantileerror": - objective = "reg:quantileerror" - else: - objective = partial(OBJECTIVE_MAP[self._config.hyperparams.objective], quantiles=self._config.quantiles) - self._gblinear_model = xgb.XGBRegressor( booster="gblinear", # Core parameters for forecasting - objective=objective, n_estimators=self._config.hyperparams.n_steps, learning_rate=self._config.hyperparams.learning_rate, early_stopping_rounds=self._config.hyperparams.early_stopping_rounds, @@ -213,6 +217,16 @@ def __init__(self, config: GBLinearForecasterConfig) -> None: updater=self._config.hyperparams.updater, quantile_alpha=[float(q) for q in self._config.quantiles], top_k=self._config.hyperparams.top_k if self._config.hyperparams.feature_selector == "thrifty" else None, + # Objective + objective=get_objective_function( + function_type=self._config.hyperparams.objective, quantiles=self._config.quantiles + ) + if self._config.hyperparams.objective != "reg:quantileerror" + else "reg:quantileerror", + eval_metric=get_evaluation_function( + function_type=self._config.hyperparams.evaluation_metric, quantiles=self._config.quantiles + ), + disable_default_eval_metric=True, ) self._target_scaler = StandardScaler() @@ -233,7 +247,6 @@ def is_fitted(self) -> bool: def _prepare_fit_input(self, data: ForecastInputDataset) -> tuple[pd.DataFrame, np.ndarray, pd.Series]: input_data: pd.DataFrame = data.input_data() - # Scale the target variable target: np.ndarray = np.asarray(data.target_series.values) target = self._target_scaler.transform(target.reshape(-1, 1)).flatten() @@ -251,9 +264,15 @@ def _prepare_fit_input(self, data: ForecastInputDataset) -> tuple[pd.DataFrame, @override def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: - # Fit the target scaler - target: np.ndarray = np.asarray(data.target_series.values) - self._target_scaler.fit(target.reshape(-1, 1)) + # Data checks + if data.data.isna().any().any(): + raise InputValidationError("There are nan values in the input data. Use imputation transform to fix them.") + + if len(data.data) == 0: + raise InputValidationError("The input data is empty after dropping NaN values.") + + # Fit the scalers + self._target_scaler.fit(data.target_series.to_frame()) # Prepare training data input_data, target, sample_weight = self._prepare_fit_input(data) @@ -281,14 +300,19 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: if not self.is_fitted: raise NotFittedError(self.__class__.__name__) + # Data checks + if data.input_data().isna().any().any(): + raise InputValidationError("There are nan values in the input data. Use imputation transform to fix them.") + # Get input features for prediction input_data: pd.DataFrame = data.input_data(start=data.forecast_start) # Generate predictions - predictions_array: np.ndarray = self._gblinear_model.predict(input_data) + predictions_array: np.ndarray = self._gblinear_model.predict(input_data).reshape(-1, len(self.config.quantiles)) # Inverse transform the scaled predictions - predictions_array = self._target_scaler.inverse_transform(predictions_array) + if len(predictions_array) > 0: + predictions_array = self._target_scaler.inverse_transform(predictions_array) # Construct DataFrame with appropriate quantile columns predictions = pd.DataFrame( diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py index 2a35fb987..94571a7d0 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py @@ -9,7 +9,6 @@ comprehensive hyperparameter control for production forecasting workflows. """ -from functools import partial from typing import Literal, override import numpy as np @@ -22,7 +21,12 @@ from openstef_core.mixins import HyperParams from openstef_models.explainability.mixins import ExplainableForecaster from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig -from openstef_models.utils.loss_functions import OBJECTIVE_MAP, ObjectiveFunctionType, xgb_prepare_target_for_objective +from openstef_models.utils.evaluation_functions import EvaluationFunctionType, get_evaluation_function +from openstef_models.utils.loss_functions import ( + ObjectiveFunctionType, + get_objective_function, + xgb_prepare_target_for_objective, +) try: import xgboost as xgb @@ -61,7 +65,7 @@ class XGBoostHyperParams(HyperParams): # Core Tree Boosting Parameters n_estimators: int = Field( - default=500, + default=100, description="Number of boosting rounds/trees to fit. Higher values may improve performance but " "increase training time and risk overfitting.", ) @@ -91,6 +95,11 @@ class XGBoostHyperParams(HyperParams): default="pinball_loss", description="Objective function for training. 'pinball_loss' is recommended for probabilistic forecasting.", ) + evaluation_metric: EvaluationFunctionType = Field( + default="mean_pinball_loss", + description="Metric used for evaluation during training. Defaults to 'mean_pinball_loss' " + "for quantile regression.", + ) # Regularization reg_alpha: float = Field( @@ -149,10 +158,10 @@ class XGBoostHyperParams(HyperParams): # General Parameters random_state: int | None = Field( - default=None, alias="seed", description="Random seed for reproducibility. Controls tree structure randomness." + default=42, description="Random seed for reproducibility. Controls tree structure randomness." ) early_stopping_rounds: int | None = Field( - default=10, + default=None, description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", ) use_target_scaling: bool = Field( @@ -201,7 +210,7 @@ class XGBoostForecasterConfig(ForecasterConfig): n_jobs: int = Field( default=1, description="Number of parallel threads for tree construction. -1 uses all available cores." ) - verbosity: Literal[0, 1, 2, 3] = Field( + verbosity: Literal[0, 1, 2, 3, True] = Field( default=1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" ) @@ -279,8 +288,6 @@ def __init__(self, config: XGBoostForecasterConfig) -> None: """ self._config = config - objective = partial(OBJECTIVE_MAP[self._config.hyperparams.objective], quantiles=self._config.quantiles) - self._xgboost_model = xgb.XGBRegressor( # Multi-output configuration multi_strategy="one_output_per_tree", @@ -314,7 +321,13 @@ def __init__(self, config: XGBoostForecasterConfig) -> None: # Early stopping handled in fit method early_stopping_rounds=self._config.hyperparams.early_stopping_rounds, # Objective - objective=objective, + objective=get_objective_function( + function_type=self._config.hyperparams.objective, quantiles=self._config.quantiles + ), + eval_metric=get_evaluation_function( + function_type=self._config.hyperparams.evaluation_metric, quantiles=self._config.quantiles + ), + disable_default_eval_metric=True, ) self._target_scaler = StandardScaler() if self._config.hyperparams.use_target_scaling else None @@ -389,10 +402,10 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: input_data: pd.DataFrame = data.input_data(start=data.forecast_start) # Generate predictions - predictions_array: np.ndarray = self._xgboost_model.predict(input_data) + predictions_array: np.ndarray = self._xgboost_model.predict(input_data).reshape(-1, len(self.config.quantiles)) # Inverse transform the scaled predictions - if self._target_scaler is not None: + if self._target_scaler is not None and len(predictions_array) > 0: predictions_array = self._target_scaler.inverse_transform(predictions_array) # Construct DataFrame with appropriate quantile columns diff --git a/packages/openstef-models/src/openstef_models/models/forecasting_model.py b/packages/openstef-models/src/openstef_models/models/forecasting_model.py index 9197e5a10..9d9e47498 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting_model.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting_model.py @@ -129,8 +129,8 @@ class ForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]) ) cutoff_history: timedelta = Field( default=timedelta(days=0), - description="Amount of historical data to exclude from training due to incomplete features from lag-based " - "preprocessing. When using lag transforms (e.g., lag-14), the first N days contain NaN values. " + description="Amount of historical data to exclude from training and prediction due to incomplete features " + "from lag-based preprocessing. When using lag transforms (e.g., lag-14), the first N days contain NaN values. " "Set this to match your maximum lag duration (e.g., timedelta(days=14)). " "Default of 0 assumes no invalid rows are created by preprocessing.", ) diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 999ed701f..263965b06 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -35,17 +35,8 @@ from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster from openstef_models.transforms.energy_domain import WindPowerFeatureAdder -from openstef_models.transforms.general import ( - Clipper, - EmptyFeatureRemover, - Imputer, - SampleWeighter, - Scaler, -) -from openstef_models.transforms.postprocessing import ( - ConfidenceIntervalApplicator, - QuantileSorter, -) +from openstef_models.transforms.general import Clipper, EmptyFeatureRemover, Imputer, NaNDropper, SampleWeighter, Scaler +from openstef_models.transforms.postprocessing import ConfidenceIntervalApplicator, QuantileSorter from openstef_models.transforms.time_domain import ( CyclicFeaturesAdder, DatetimeFeaturesAdder, @@ -53,21 +44,13 @@ RollingAggregatesAdder, ) from openstef_models.transforms.time_domain.lags_adder import LagsAdder -from openstef_models.transforms.time_domain.rolling_aggregates_adder import ( - AggregationFunction, -) -from openstef_models.transforms.validation import ( - CompletenessChecker, - FlatlineChecker, - InputConsistencyChecker, -) +from openstef_models.transforms.time_domain.rolling_aggregates_adder import AggregationFunction +from openstef_models.transforms.validation import CompletenessChecker, FlatlineChecker, InputConsistencyChecker from openstef_models.transforms.weather_domain import ( + AtmosphereDerivedFeaturesAdder, DaylightFeatureAdder, RadiationDerivedFeaturesAdder, ) -from openstef_models.transforms.weather_domain.atmosphere_derived_features_adder import ( - AtmosphereDerivedFeaturesAdder, -) from openstef_models.utils.data_split import DataSplitter from openstef_models.utils.feature_selection import Exclude, FeatureSelection, Include from openstef_models.workflows.custom_forecasting_workflow import ( @@ -181,6 +164,15 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob default=timedelta(days=14), description="Amount of historical data available at prediction time.", ) + cutoff_history: timedelta = Field( + default=timedelta(days=0), + description="Amount of historical data to exclude from training and prediction due to incomplete features " + "from lag-based preprocessing. When using lag transforms (e.g., lag-14), the first N days contain NaN values. " + "Set this to match your maximum lag duration (e.g., timedelta(days=14)). " + "Default of 0 assumes no invalid rows are created by preprocessing. " + "Note: should be same as predict_history if you are using lags. We default to disabled to keep the same " + "behaviour as openstef 3.0.", + ) # Feature engineering and validation completeness_threshold: float = Field( @@ -203,6 +195,11 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob default=FeatureSelection(include=None, exclude=None), description="Feature selection for which features to clip.", ) + sample_weight_scale_percentile: int = Field( + default=95, + description="Percentile of target values used as scaling reference. " + "Values are normalized relative to this percentile before weighting.", + ) sample_weight_exponent: float = Field( default_factory=lambda data: 1.0 if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "hybrid", "xgboost"} @@ -218,7 +215,13 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob # Data splitting strategy data_splitter: DataSplitter = Field( - default_factory=DataSplitter, + default=DataSplitter( + # Copied from OpenSTEF3 pipeline defaults + val_fraction=0.15, + test_fraction=0.0, + stratification_fraction=0.15, + min_days_for_stratification=4, + ), description="Configuration for splitting data into training, validation, and test sets.", ) @@ -256,6 +259,10 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob description="Penalty to apply to the old model's metric to bias selection towards newer models.", ) + verbosity: Literal[0, 1, 2, 3, True] = Field( + default=1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + ) + # Metadata tags: dict[str, str] = Field( default_factory=dict, @@ -289,14 +296,14 @@ def create_forecasting_workflow( error_on_flatliner=False, ), CompletenessChecker(completeness_threshold=config.completeness_threshold), - EmptyFeatureRemover(), ] feature_adders = [ LagsAdder( history_available=config.predict_history, horizons=config.horizons, - add_trivial_lags=True, + add_trivial_lags=config.model != "gblinear", # GBLinear uses only 7day lag. target_column=config.target_column, + custom_lags=[timedelta(days=7)] if config.model == "gblinear" else [], ), WindPowerFeatureAdder( windspeed_reference_column=config.wind_speed_column, @@ -317,6 +324,7 @@ def create_forecasting_workflow( RollingAggregatesAdder( feature=config.target_column, aggregation_functions=config.rolling_aggregate_features, + horizons=config.horizons, ), ] feature_standardizers = [ @@ -329,7 +337,9 @@ def create_forecasting_workflow( target_column=config.target_column, weight_exponent=config.sample_weight_exponent, weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, ), + EmptyFeatureRemover(), ] if config.model == "xgboost": @@ -345,6 +355,7 @@ def create_forecasting_workflow( quantiles=config.quantiles, horizons=config.horizons, hyperparams=config.xgboost_hyperparams, + verbosity=config.verbosity, ) ) postprocessing = [QuantileSorter()] @@ -383,16 +394,24 @@ def create_forecasting_workflow( elif config.model == "gblinear": preprocessing = [ *checks, - Imputer(selection=Exclude(config.target_column), imputation_strategy="mean"), *feature_adders, *feature_standardizers, + Imputer( + selection=Exclude(config.target_column), + imputation_strategy="mean", + fill_future_values=Include(config.energy_price_column), + ), + NaNDropper( + selection=Exclude(config.target_column), + ), ] forecaster = GBLinearForecaster( config=GBLinearForecaster.Config( quantiles=config.quantiles, horizons=config.horizons, hyperparams=config.gblinear_hyperparams, - ) + verbosity=config.verbosity, + ), ) postprocessing = [QuantileSorter()] elif config.model == "flatliner": @@ -451,7 +470,7 @@ def create_forecasting_workflow( postprocessing=TransformPipeline(transforms=postprocessing), target_column=config.target_column, data_splitter=config.data_splitter, - cutoff_history=config.predict_history, + cutoff_history=config.cutoff_history, # Evaluation evaluation_metrics=config.evaluation_metrics, # Other diff --git a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py index 32afe179b..79e59f58b 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py @@ -14,6 +14,7 @@ EmptyFeatureRemover, ) from openstef_models.transforms.general.imputer import Imputer +from openstef_models.transforms.general.nan_dropper import NaNDropper from openstef_models.transforms.general.sample_weighter import SampleWeighter from openstef_models.transforms.general.scaler import Scaler @@ -22,6 +23,7 @@ "DimensionalityReducer", "EmptyFeatureRemover", "Imputer", + "NaNDropper", "SampleWeighter", "Scaler", ] diff --git a/packages/openstef-models/src/openstef_models/transforms/general/imputer.py b/packages/openstef-models/src/openstef_models/transforms/general/imputer.py index af2157dba..2a08dcf05 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/imputer.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/imputer.py @@ -130,9 +130,9 @@ class Imputer(BaseConfig, TimeSeriesTransform): >>> result_iterative = transform_iterative.transform(dataset) >>> result_iterative.data["temperature"].isna().sum() == 0 # Temperature NaNs filled np.True_ - >>> np.isnan(result_iterative.data["radiation"][1]) # Radiation first NaN replaced + >>> np.isnan(result_iterative.data["radiation"].iloc[1]) # Radiation first NaN replaced np.False_ - >>> np.isnan(result_iterative.data["radiation"][3]) # Check if trailing NaN is preserved + >>> np.isnan(result_iterative.data["radiation"].iloc[3]) # Check if trailing NaN is preserved np.True_ >>> result_iterative.data["wind_speed"].isna().sum() == 2 # Wind speed NaNs preserved np.True_ @@ -172,6 +172,13 @@ class Imputer(BaseConfig, TimeSeriesTransform): "Features to impute. If strategy is 'iterative', these features are also used as predictors for imputation." ), ) + fill_future_values: FeatureSelection = Field( + default=FeatureSelection.NONE, + description=( + "Features for which to fill future missing values. " + "This transform does not fill future missing values by default to preserve time series integrity." + ), + ) _imputer: SimpleImputer | IterativeImputer = PrivateAttr() _is_fitted: bool = PrivateAttr(default=False) @@ -252,7 +259,9 @@ def transform(self, data: TimeSeriesDataset) -> TimeSeriesDataset: data_transformed = cast(pd.DataFrame, self._imputer.transform(data_subset)) # Set imputed trailing NaNs back to NaN since they cannot be reasonably imputed - for col in data_transformed.columns: + fill_future_features = self.fill_future_values.resolve(features) + no_fill_future_features = set(features) - set(fill_future_features) + for col in no_fill_future_features: last_valid = data_subset[col].last_valid_index() data_transformed.loc[data_transformed.index > (last_valid or data_transformed.index[0]), col] = np.nan diff --git a/packages/openstef-models/src/openstef_models/transforms/general/nan_dropper.py b/packages/openstef-models/src/openstef_models/transforms/general/nan_dropper.py new file mode 100644 index 000000000..0d8cef10c --- /dev/null +++ b/packages/openstef-models/src/openstef_models/transforms/general/nan_dropper.py @@ -0,0 +1,89 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Transform for dropping rows containing NaN values. + +This module provides functionality to drop rows containing NaN values in selected +columns, useful for data cleaning and ensuring complete cases for model training. +""" + +import logging +from typing import override + +from pydantic import Field, PrivateAttr + +from openstef_core.base_model import BaseConfig +from openstef_core.datasets import TimeSeriesDataset +from openstef_core.transforms import TimeSeriesTransform +from openstef_models.utils.feature_selection import FeatureSelection + + +class NaNDropper(BaseConfig, TimeSeriesTransform): + """Transform that drops rows containing NaN values in selected columns. + + This transform removes any row that has at least one NaN value in the + specified columns. It operates statelessly - no fitting is required. + + Example: + >>> import pandas as pd + >>> import numpy as np + >>> from datetime import timedelta + >>> from openstef_core.datasets import TimeSeriesDataset + >>> from openstef_models.transforms.general import NaNDropper + >>> + >>> # Create sample dataset with NaN values + >>> data = pd.DataFrame({ + ... 'load': [100.0, np.nan, 110.0, 130.0], + ... 'temperature': [20.0, 22.0, np.nan, 23.0], + ... 'humidity': [60.0, 65.0, 70.0, 75.0] + ... }, index=pd.date_range('2025-01-01', periods=4, freq='1h')) + >>> dataset = TimeSeriesDataset(data, timedelta(hours=1)) + >>> + >>> # Drop rows with NaN in load or temperature + >>> dropper = NaNDropper(selection=FeatureSelection(include=['load', 'temperature'])) + >>> transformed = dropper.transform(dataset) + >>> len(transformed.data) + 2 + >>> transformed.data['load'].tolist() + [100.0, 130.0] + + """ + + selection: FeatureSelection = Field( + default=FeatureSelection.ALL, + description="Features to check for NaN values. Rows with NaN in any selected column are dropped.", + ) + warn_threshold: float = Field( + default=0.1, + ge=0.0, + le=1.0, + description="Log a warning if the fraction of dropped rows exceeds this threshold (0.0 to 1.0).", + ) + + _logger: logging.Logger = PrivateAttr(default_factory=lambda: logging.getLogger(__name__)) + + @override + def transform(self, data: TimeSeriesDataset) -> TimeSeriesDataset: + features = self.selection.resolve(data.feature_names) + original_row_count = len(data.data) + + # Drop rows containing NaN in selected columns + transformed_data = data.data.dropna(subset=features) # pyright: ignore[reportUnknownMemberType] + dropped_count = original_row_count - len(transformed_data) + + # Log warning if substantial percentage of rows was dropped + if original_row_count > 0 and dropped_count / original_row_count > self.warn_threshold: + self._logger.warning( + "NaNDropper dropped %d of %d rows (%.1f%%) due to NaN values in columns %s", + dropped_count, + original_row_count, + dropped_count / original_row_count * 100, + features, + ) + + return data.copy_with(data=transformed_data, is_sorted=True) + + @override + def features_added(self) -> list[str]: + return [] diff --git a/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py b/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py index c5ac4e8bc..a4e95a1d8 100644 --- a/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py @@ -20,6 +20,7 @@ from openstef_core.datasets import TimeSeriesDataset from openstef_core.datasets.validation import validate_required_columns from openstef_core.transforms import TimeSeriesTransform +from openstef_core.types import LeadTime from openstef_core.utils import timedelta_to_isoformat type AggregationFunction = Literal["mean", "median", "max", "min"] @@ -51,7 +52,8 @@ class RollingAggregatesAdder(BaseConfig, TimeSeriesTransform): >>> transform = RollingAggregatesAdder( ... feature='load', ... rolling_window_size=timedelta(hours=2), - ... aggregation_functions=["mean", "max"] + ... aggregation_functions=["mean", "max"], + ... horizons=[LeadTime.from_string("PT36H")], ... ) >>> transformed_dataset = transform.transform(dataset) >>> result = transformed_dataset.data[['rolling_mean_load_PT2H', 'rolling_max_load_PT2H']] @@ -66,6 +68,10 @@ class RollingAggregatesAdder(BaseConfig, TimeSeriesTransform): feature: str = Field( description="Feature to compute rolling aggregates for.", ) + horizons: list[LeadTime] = Field( + description="List of forecast horizons.", + min_length=1, + ) rolling_window_size: timedelta = Field( default=timedelta(hours=24), description="Rolling window size for the aggregation.", @@ -80,8 +86,11 @@ class RollingAggregatesAdder(BaseConfig, TimeSeriesTransform): def _transform_pandas(self, df: pd.DataFrame) -> pd.DataFrame: rolling_df = cast( pd.DataFrame, - df[self.feature].rolling(window=self.rolling_window_size).agg(self.aggregation_functions), # type: ignore + df[self.feature].dropna().rolling(window=self.rolling_window_size).agg(self.aggregation_functions), # pyright: ignore[reportUnknownMemberType, reportCallIssue, reportArgumentType] ) + # Fill missing values with the last known value + rolling_df = rolling_df.reindex(df.index).ffill() + suffix = timedelta_to_isoformat(td=self.rolling_window_size) rolling_df = rolling_df.rename( columns={func: f"rolling_{func}_{self.feature}_{suffix}" for func in self.aggregation_functions} @@ -97,6 +106,12 @@ def transform(self, data: TimeSeriesDataset) -> TimeSeriesDataset: ) return data + if len(self.horizons) > 1: + self._logger.warning( + "Multiple horizons for RollingAggregatesAdder is not yet supported. Returning original data." + ) + return data + validate_required_columns(df=data.data, required_columns=[self.feature]) return data.pipe_pandas(self._transform_pandas) diff --git a/packages/openstef-models/src/openstef_models/transforms/weather_domain/__init__.py b/packages/openstef-models/src/openstef_models/transforms/weather_domain/__init__.py index f9bd80d9c..f4f1e8fc7 100644 --- a/packages/openstef-models/src/openstef_models/transforms/weather_domain/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/weather_domain/__init__.py @@ -9,9 +9,12 @@ engineering for improved forecasting accuracy. """ +from openstef_models.transforms.weather_domain.atmosphere_derived_features_adder import ( + AtmosphereDerivedFeaturesAdder, +) from openstef_models.transforms.weather_domain.daylight_feature_adder import DaylightFeatureAdder from openstef_models.transforms.weather_domain.radiation_derived_features_adder import ( RadiationDerivedFeaturesAdder, ) -__all__ = ["DaylightFeatureAdder", "RadiationDerivedFeaturesAdder"] +__all__ = ["AtmosphereDerivedFeaturesAdder", "DaylightFeatureAdder", "RadiationDerivedFeaturesAdder"] diff --git a/packages/openstef-models/src/openstef_models/utils/data_split.py b/packages/openstef-models/src/openstef_models/utils/data_split.py index 4beea7cd7..908203fda 100644 --- a/packages/openstef-models/src/openstef_models/utils/data_split.py +++ b/packages/openstef-models/src/openstef_models/utils/data_split.py @@ -155,9 +155,9 @@ def stratified_train_test_split[T: TimeSeriesDataset]( max_days, min_days, other_days = _get_extreme_days(target_series=target_series, fraction=stratification_fraction) # Split each group proportionally between train and test - test_max_days, _ = _sample_dates_for_split(dates=max_days, test_fraction=test_fraction, rng=rng) - test_min_days, _ = _sample_dates_for_split(dates=min_days, test_fraction=test_fraction, rng=rng) - test_other_days, _ = _sample_dates_for_split(dates=other_days, test_fraction=test_fraction, rng=rng) + _, test_max_days = _sample_dates_for_split(dates=max_days, test_fraction=test_fraction, rng=rng) + _, test_min_days = _sample_dates_for_split(dates=min_days, test_fraction=test_fraction, rng=rng) + _, test_other_days = _sample_dates_for_split(dates=other_days, test_fraction=test_fraction, rng=rng) # Combine all train and test dates test_dates = cast(pd.DatetimeIndex, test_max_days.union(test_min_days).union(test_other_days)) @@ -166,12 +166,15 @@ def stratified_train_test_split[T: TimeSeriesDataset]( def _sample_dates_for_split( - dates: pd.DatetimeIndex, test_fraction: float, rng: np.random.Generator + dates: pd.DatetimeIndex, + test_fraction: float, + rng: np.random.Generator, ) -> tuple[pd.DatetimeIndex, pd.DatetimeIndex]: if dates.empty: return pd.DatetimeIndex([]), pd.DatetimeIndex([]) - n_test = max(1, int(test_fraction * len(dates))) + min_test_days = 1 if test_fraction > 0.0 else 0 + n_test = max(min_test_days, int(test_fraction * len(dates))) n_test = min(n_test, len(dates) - 1) # Ensure at least one for train if possible if len(dates) == 1: @@ -181,7 +184,7 @@ def _sample_dates_for_split( test_dates = pd.DatetimeIndex(np.sort(rng.choice(dates, size=n_test, replace=False))) train_dates = dates.difference(test_dates, sort=True) # type: ignore - return test_dates, train_dates + return train_dates, test_dates def _get_extreme_days( @@ -279,6 +282,10 @@ class DataSplitter(BaseConfig): default=4, description="Minimum number of unique days required to perform stratified splitting.", ) + random_state: int = Field( + default=42, + description="Random seed for reproducible splits when stratification is used.", + ) def split_dataset[T: TimeSeriesDataset]( self, @@ -306,7 +313,7 @@ def split_dataset[T: TimeSeriesDataset]( test_fraction=fraction, stratification_fraction=self.stratification_fraction, target_column=target_column, - random_state=42, + random_state=self.random_state, min_days_for_stratification=self.min_days_for_stratification, ), val_fraction=self.val_fraction if data_val is None else 0.0, diff --git a/packages/openstef-models/src/openstef_models/utils/evaluation_functions.py b/packages/openstef-models/src/openstef_models/utils/evaluation_functions.py new file mode 100644 index 000000000..7d568af13 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/utils/evaluation_functions.py @@ -0,0 +1,30 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Utility functions for evaluation metrics in forecasting models.""" + +from collections.abc import Callable +from functools import partial +from typing import Any, Literal + +import numpy as np + +from openstef_beam.metrics.metrics_probabilistic import mean_pinball_loss +from openstef_core.types import Quantile + +type EvaluationFunctionType = Literal["mean_pinball_loss"] + +EVALUATION_MAP = { + "mean_pinball_loss": mean_pinball_loss, +} + + +def get_evaluation_function( + function_type: EvaluationFunctionType, quantiles: list[Quantile] | None = None, **kwargs: Any +) -> Callable[[np.ndarray, np.ndarray], float]: + eval_metric = partial(EVALUATION_MAP[function_type], quantiles=quantiles, **kwargs) + eval_metric.__name__ = function_type # pyright: ignore[reportAttributeAccessIssue] + return eval_metric + + +__all__ = ["EVALUATION_MAP", "EvaluationFunctionType"] diff --git a/packages/openstef-models/src/openstef_models/utils/feature_selection.py b/packages/openstef-models/src/openstef_models/utils/feature_selection.py index 33a882907..ae260fa87 100644 --- a/packages/openstef-models/src/openstef_models/utils/feature_selection.py +++ b/packages/openstef-models/src/openstef_models/utils/feature_selection.py @@ -36,6 +36,7 @@ class FeatureSelection(BaseConfig): ) ALL: ClassVar[Self] + NONE: ClassVar[Self] def resolve(self, features: list[str]) -> list[str]: """Resolve the final list of features based on include and exclude lists. @@ -72,6 +73,7 @@ def combine(self, other: Self | None) -> Self: FeatureSelection.ALL = FeatureSelection(include=None, exclude=None) +FeatureSelection.NONE = FeatureSelection(include=set(), exclude=None) def Include(*features: str) -> FeatureSelection: # noqa: N802 diff --git a/packages/openstef-models/src/openstef_models/utils/loss_functions.py b/packages/openstef-models/src/openstef_models/utils/loss_functions.py index dff00be21..60b8dddfe 100644 --- a/packages/openstef-models/src/openstef_models/utils/loss_functions.py +++ b/packages/openstef-models/src/openstef_models/utils/loss_functions.py @@ -9,7 +9,9 @@ pinball loss. All functions support sample weighting for flexible training. """ -from typing import Literal +from collections.abc import Callable +from functools import partial +from typing import Any, Literal import numpy as np import numpy.typing as npt @@ -232,6 +234,18 @@ def xgb_prepare_target_for_objective( return np.repeat(target[:, np.newaxis], repeats=len(quantiles), axis=1) +def get_objective_function( + function_type: ObjectiveFunctionType, + quantiles: list[Quantile], + **kwargs: Any, +) -> Callable[ + [npt.NDArray[np.floating], npt.NDArray[np.floating]], tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]] +]: + fn = partial(OBJECTIVE_MAP[function_type], quantiles=quantiles, **kwargs) + fn.__name__ = function_type # pyright: ignore[reportAttributeAccessIssue] + return fn + + __all__ = [ "OBJECTIVE_MAP", "ObjectiveFunctionType", diff --git a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py index 9c3411ade..a740ac7c0 100644 --- a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py @@ -129,6 +129,7 @@ def fit( self, data: TimeSeriesDataset, data_val: TimeSeriesDataset | None = None, + data_test: TimeSeriesDataset | None = None, ) -> ModelFitResult | None: """Train the forecasting model with callback execution. @@ -138,6 +139,7 @@ def fit( Args: data: Training dataset for the forecasting model. data_val: Optional validation dataset for model tuning. + data_test: Optional test dataset for final evaluation. Returns: ModelFitResult containing training metrics and information, @@ -149,7 +151,7 @@ def fit( for callback in self.callbacks: callback.on_fit_start(context=context, data=data) - result = self.model.fit(data=data, data_val=data_val) + result = self.model.fit(data=data, data_val=data_val, data_test=data_test) for callback in self.callbacks: callback.on_fit_end(context=context, result=result) diff --git a/packages/openstef-models/tests/unit/transforms/general/test_nan_dropper.py b/packages/openstef-models/tests/unit/transforms/general/test_nan_dropper.py new file mode 100644 index 000000000..7c6c1d5c3 --- /dev/null +++ b/packages/openstef-models/tests/unit/transforms/general/test_nan_dropper.py @@ -0,0 +1,48 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import numpy as np +import pandas as pd +import pytest + +from openstef_core.datasets import TimeSeriesDataset +from openstef_models.transforms.general import NaNDropper +from openstef_models.utils.feature_selection import FeatureSelection + + +def test_nan_dropper__removes_rows_with_nan_in_selected_columns(caplog: pytest.LogCaptureFixture): + """Test that NaNDropper removes rows containing NaN in selected columns and logs warning.""" + # Arrange + data = pd.DataFrame( + { + "load": [100.0, np.nan, 110.0, 130.0, 140.0], + "temperature": [20.0, 22.0, np.nan, 23.0, 24.0], + "humidity": [60.0, 65.0, 70.0, np.nan, 80.0], + }, + index=pd.date_range("2025-01-01", periods=5, freq="1h"), + ) + dataset = TimeSeriesDataset(data, timedelta(hours=1)) + dropper = NaNDropper(selection=FeatureSelection(include={"load", "temperature"})) + + # Act + transformed = dropper.transform(dataset) + + # Assert + # Row 1 (index 1) has NaN in load, row 2 (index 2) has NaN in temperature - both should be dropped + # Row 3 (index 3) has NaN in humidity but humidity is not selected, so it should remain + # Remaining rows: 0, 3, 4 + expected_df = pd.DataFrame( + { + "load": [100.0, 130.0, 140.0], + "temperature": [20.0, 23.0, 24.0], + "humidity": [60.0, np.nan, 80.0], + }, + index=pd.DatetimeIndex(["2025-01-01 00:00:00", "2025-01-01 03:00:00", "2025-01-01 04:00:00"], name="timestamp"), + ) + pd.testing.assert_frame_equal(transformed.data, expected_df) + assert transformed.sample_interval == dataset.sample_interval + # 40% of rows dropped (2 out of 5), should trigger warning (default threshold is 10%) + assert "NaNDropper dropped 2 of 5 rows (40.0%)" in caplog.text diff --git a/packages/openstef-models/tests/unit/transforms/time_domain/test_rolling_aggregates_adder.py b/packages/openstef-models/tests/unit/transforms/time_domain/test_rolling_aggregates_adder.py index 263302160..5bda95ea5 100644 --- a/packages/openstef-models/tests/unit/transforms/time_domain/test_rolling_aggregates_adder.py +++ b/packages/openstef-models/tests/unit/transforms/time_domain/test_rolling_aggregates_adder.py @@ -10,6 +10,7 @@ from openstef_core.datasets import TimeSeriesDataset from openstef_core.exceptions import MissingColumnsError +from openstef_core.types import LeadTime from openstef_models.transforms.time_domain import RollingAggregatesAdder @@ -26,6 +27,7 @@ def test_rolling_aggregate_features_basic(): feature="load", rolling_window_size=timedelta(hours=2), # 2-hour window aggregation_functions=["mean", "max", "min"], + horizons=[LeadTime.from_string("PT36H")], ) # Act @@ -67,6 +69,7 @@ def test_rolling_aggregate_features_with_nan(): feature="load", rolling_window_size=timedelta(hours=2), aggregation_functions=["mean"], + horizons=[LeadTime.from_string("PT36H")], ) # Act @@ -90,7 +93,10 @@ def test_rolling_aggregate_features_missing_column_raises_error(): index=pd.date_range("2023-01-01 00:00:00", periods=3, freq="1h"), ) dataset = TimeSeriesDataset(data, sample_interval=timedelta(minutes=15)) - transform = RollingAggregatesAdder(feature="load") + transform = RollingAggregatesAdder( + feature="load", + horizons=[LeadTime.from_string("PT36H")], + ) # Act & Assert with pytest.raises(MissingColumnsError, match="Missing required columns"): @@ -106,7 +112,10 @@ def test_rolling_aggregate_features_default_parameters(): ) dataset = TimeSeriesDataset(data, sample_interval=timedelta(hours=1)) - transform = RollingAggregatesAdder(feature="load") + transform = RollingAggregatesAdder( + feature="load", + horizons=[LeadTime.from_string("PT36H")], + ) # Act result = transform.transform(dataset) diff --git a/uv.lock b/uv.lock index f8bf4a7b4..a5b72e3fd 100644 --- a/uv.lock +++ b/uv.lock @@ -26,7 +26,7 @@ wheels = [ [[package]] name = "aiobotocore" -version = "2.25.0" +version = "2.25.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -37,9 +37,9 @@ dependencies = [ { name = "python-dateutil" }, { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/29/89/b1ae494cfd12520c5d3b19704a14ffa19153634be47d48052e45223eee86/aiobotocore-2.25.0.tar.gz", hash = "sha256:169d07de312fd51292292f2c8faf8f67d0f466f525cea03855fe065ddc85f79d", size = 120514, upload-time = "2025-10-10T17:39:12.291Z" } +sdist = { url = "https://files.pythonhosted.org/packages/52/48/cf3c88c5e3fecdeed824f97a8a98a9fc0d7ef33e603f8f22c2fd32b9ef09/aiobotocore-2.25.2.tar.gz", hash = "sha256:ae0a512b34127097910b7af60752956254099ae54402a84c2021830768f92cda", size = 120585, upload-time = "2025-11-11T18:51:28.056Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/4e/3592d88436bbd60984a08440793c0ba245f538f9f6287b59c1e2c0aead8c/aiobotocore-2.25.0-py3-none-any.whl", hash = "sha256:0524fd36f6d522ddc9d013df2c19fb56369ffdfbffd129895918fbfe95216dad", size = 86028, upload-time = "2025-10-10T17:39:10.423Z" }, + { url = "https://files.pythonhosted.org/packages/8e/ad/a2f3964aa37da5a4c94c1e5f3934d6ac1333f991f675fcf08a618397a413/aiobotocore-2.25.2-py3-none-any.whl", hash = "sha256:0cec45c6ba7627dd5e5460337291c86ac38c3b512ec4054ce76407d0f7f2a48f", size = 86048, upload-time = "2025-11-11T18:51:26.139Z" }, ] [[package]] @@ -53,7 +53,7 @@ wheels = [ [[package]] name = "aiohttp" -version = "3.13.1" +version = "3.13.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohappyeyeballs" }, @@ -64,85 +64,85 @@ dependencies = [ { name = "propcache" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ba/fa/3ae643cd525cf6844d3dc810481e5748107368eb49563c15a5fb9f680750/aiohttp-3.13.1.tar.gz", hash = "sha256:4b7ee9c355015813a6aa085170b96ec22315dabc3d866fd77d147927000e9464", size = 7835344, upload-time = "2025-10-17T14:03:29.337Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1a/72/d463a10bf29871f6e3f63bcf3c91362dc4d72ed5917a8271f96672c415ad/aiohttp-3.13.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0760bd9a28efe188d77b7c3fe666e6ef74320d0f5b105f2e931c7a7e884c8230", size = 736218, upload-time = "2025-10-17T14:00:03.51Z" }, - { url = "https://files.pythonhosted.org/packages/26/13/f7bccedbe52ea5a6eef1e4ebb686a8d7765319dfd0a5939f4238cb6e79e6/aiohttp-3.13.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7129a424b441c3fe018a414401bf1b9e1d49492445f5676a3aecf4f74f67fcdb", size = 491251, upload-time = "2025-10-17T14:00:05.756Z" }, - { url = "https://files.pythonhosted.org/packages/0c/7c/7ea51b5aed6cc69c873f62548da8345032aa3416336f2d26869d4d37b4a2/aiohttp-3.13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e1cb04ae64a594f6ddf5cbb024aba6b4773895ab6ecbc579d60414f8115e9e26", size = 490394, upload-time = "2025-10-17T14:00:07.504Z" }, - { url = "https://files.pythonhosted.org/packages/31/05/1172cc4af4557f6522efdee6eb2b9f900e1e320a97e25dffd3c5a6af651b/aiohttp-3.13.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:782d656a641e755decd6bd98d61d2a8ea062fd45fd3ff8d4173605dd0d2b56a1", size = 1737455, upload-time = "2025-10-17T14:00:09.403Z" }, - { url = "https://files.pythonhosted.org/packages/24/3d/ce6e4eca42f797d6b1cd3053cf3b0a22032eef3e4d1e71b9e93c92a3f201/aiohttp-3.13.1-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f92ad8169767429a6d2237331726c03ccc5f245222f9373aa045510976af2b35", size = 1699176, upload-time = "2025-10-17T14:00:11.314Z" }, - { url = "https://files.pythonhosted.org/packages/25/04/7127ba55653e04da51477372566b16ae786ef854e06222a1c96b4ba6c8ef/aiohttp-3.13.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0e778f634ca50ec005eefa2253856921c429581422d887be050f2c1c92e5ce12", size = 1767216, upload-time = "2025-10-17T14:00:13.668Z" }, - { url = "https://files.pythonhosted.org/packages/b8/3b/43bca1e75847e600f40df829a6b2f0f4e1d4c70fb6c4818fdc09a462afd5/aiohttp-3.13.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:9bc36b41cf4aab5d3b34d22934a696ab83516603d1bc1f3e4ff9930fe7d245e5", size = 1865870, upload-time = "2025-10-17T14:00:15.852Z" }, - { url = "https://files.pythonhosted.org/packages/9e/69/b204e5d43384197a614c88c1717c324319f5b4e7d0a1b5118da583028d40/aiohttp-3.13.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3fd4570ea696aee27204dd524f287127ed0966d14d309dc8cc440f474e3e7dbd", size = 1751021, upload-time = "2025-10-17T14:00:18.297Z" }, - { url = "https://files.pythonhosted.org/packages/1c/af/845dc6b6fdf378791d720364bf5150f80d22c990f7e3a42331d93b337cc7/aiohttp-3.13.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7bda795f08b8a620836ebfb0926f7973972a4bf8c74fdf9145e489f88c416811", size = 1561448, upload-time = "2025-10-17T14:00:20.152Z" }, - { url = "https://files.pythonhosted.org/packages/7a/91/d2ab08cd77ed76a49e4106b1cfb60bce2768242dd0c4f9ec0cb01e2cbf94/aiohttp-3.13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:055a51d90e351aae53dcf324d0eafb2abe5b576d3ea1ec03827d920cf81a1c15", size = 1698196, upload-time = "2025-10-17T14:00:22.131Z" }, - { url = "https://files.pythonhosted.org/packages/5e/d1/082f0620dc428ecb8f21c08a191a4694915cd50f14791c74a24d9161cc50/aiohttp-3.13.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d4131df864cbcc09bb16d3612a682af0db52f10736e71312574d90f16406a867", size = 1719252, upload-time = "2025-10-17T14:00:24.453Z" }, - { url = "https://files.pythonhosted.org/packages/fc/78/2af2f44491be7b08e43945b72d2b4fd76f0a14ba850ba9e41d28a7ce716a/aiohttp-3.13.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:163d3226e043f79bf47c87f8dfc89c496cc7bc9128cb7055ce026e435d551720", size = 1736529, upload-time = "2025-10-17T14:00:26.567Z" }, - { url = "https://files.pythonhosted.org/packages/b0/34/3e919ecdc93edaea8d140138049a0d9126141072e519535e2efa38eb7a02/aiohttp-3.13.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:a2370986a3b75c1a5f3d6f6d763fc6be4b430226577b0ed16a7c13a75bf43d8f", size = 1553723, upload-time = "2025-10-17T14:00:28.592Z" }, - { url = "https://files.pythonhosted.org/packages/21/4b/d8003aeda2f67f359b37e70a5a4b53fee336d8e89511ac307ff62aeefcdb/aiohttp-3.13.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d7c14de0c7c9f1e6e785ce6cbe0ed817282c2af0012e674f45b4e58c6d4ea030", size = 1763394, upload-time = "2025-10-17T14:00:31.051Z" }, - { url = "https://files.pythonhosted.org/packages/4c/7b/1dbe6a39e33af9baaafc3fc016a280663684af47ba9f0e5d44249c1f72ec/aiohttp-3.13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bb611489cf0db10b99beeb7280bd39e0ef72bc3eb6d8c0f0a16d8a56075d1eb7", size = 1718104, upload-time = "2025-10-17T14:00:33.407Z" }, - { url = "https://files.pythonhosted.org/packages/5c/88/bd1b38687257cce67681b9b0fa0b16437be03383fa1be4d1a45b168bef25/aiohttp-3.13.1-cp312-cp312-win32.whl", hash = "sha256:f90fe0ee75590f7428f7c8b5479389d985d83c949ea10f662ab928a5ed5cf5e6", size = 425303, upload-time = "2025-10-17T14:00:35.829Z" }, - { url = "https://files.pythonhosted.org/packages/0e/e3/4481f50dd6f27e9e58c19a60cff44029641640237e35d32b04aaee8cf95f/aiohttp-3.13.1-cp312-cp312-win_amd64.whl", hash = "sha256:3461919a9dca272c183055f2aab8e6af0adc810a1b386cce28da11eb00c859d9", size = 452071, upload-time = "2025-10-17T14:00:37.764Z" }, - { url = "https://files.pythonhosted.org/packages/16/6d/d267b132342e1080f4c1bb7e1b4e96b168b3cbce931ec45780bff693ff95/aiohttp-3.13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:55785a7f8f13df0c9ca30b5243d9909bd59f48b274262a8fe78cee0828306e5d", size = 730727, upload-time = "2025-10-17T14:00:39.681Z" }, - { url = "https://files.pythonhosted.org/packages/92/c8/1cf495bac85cf71b80fad5f6d7693e84894f11b9fe876b64b0a1e7cbf32f/aiohttp-3.13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4bef5b83296cebb8167707b4f8d06c1805db0af632f7a72d7c5288a84667e7c3", size = 488678, upload-time = "2025-10-17T14:00:41.541Z" }, - { url = "https://files.pythonhosted.org/packages/a8/19/23c6b81cca587ec96943d977a58d11d05a82837022e65cd5502d665a7d11/aiohttp-3.13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:27af0619c33f9ca52f06069ec05de1a357033449ab101836f431768ecfa63ff5", size = 487637, upload-time = "2025-10-17T14:00:43.527Z" }, - { url = "https://files.pythonhosted.org/packages/48/58/8f9464afb88b3eed145ad7c665293739b3a6f91589694a2bb7e5778cbc72/aiohttp-3.13.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a47fe43229a8efd3764ef7728a5c1158f31cdf2a12151fe99fde81c9ac87019c", size = 1718975, upload-time = "2025-10-17T14:00:45.496Z" }, - { url = "https://files.pythonhosted.org/packages/e1/8b/c3da064ca392b2702f53949fd7c403afa38d9ee10bf52c6ad59a42537103/aiohttp-3.13.1-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6e68e126de5b46e8b2bee73cab086b5d791e7dc192056916077aa1e2e2b04437", size = 1686905, upload-time = "2025-10-17T14:00:47.707Z" }, - { url = "https://files.pythonhosted.org/packages/0a/a4/9c8a3843ecf526daee6010af1a66eb62579be1531d2d5af48ea6f405ad3c/aiohttp-3.13.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e65ef49dd22514329c55970d39079618a8abf856bae7147913bb774a3ab3c02f", size = 1754907, upload-time = "2025-10-17T14:00:49.702Z" }, - { url = "https://files.pythonhosted.org/packages/a4/80/1f470ed93e06436e3fc2659a9fc329c192fa893fb7ed4e884d399dbfb2a8/aiohttp-3.13.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0e425a7e0511648b3376839dcc9190098671a47f21a36e815b97762eb7d556b0", size = 1857129, upload-time = "2025-10-17T14:00:51.822Z" }, - { url = "https://files.pythonhosted.org/packages/cc/e6/33d305e6cce0a8daeb79c7d8d6547d6e5f27f4e35fa4883fc9c9eb638596/aiohttp-3.13.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:010dc9b7110f055006acd3648d5d5955bb6473b37c3663ec42a1b4cba7413e6b", size = 1738189, upload-time = "2025-10-17T14:00:53.976Z" }, - { url = "https://files.pythonhosted.org/packages/ac/42/8df03367e5a64327fe0c39291080697795430c438fc1139c7cc1831aa1df/aiohttp-3.13.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1b5c722d0ca5f57d61066b5dfa96cdb87111e2519156b35c1f8dd17c703bee7a", size = 1553608, upload-time = "2025-10-17T14:00:56.144Z" }, - { url = "https://files.pythonhosted.org/packages/96/17/6d5c73cd862f1cf29fddcbb54aac147037ff70a043a2829d03a379e95742/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:93029f0e9b77b714904a281b5aa578cdc8aa8ba018d78c04e51e1c3d8471b8ec", size = 1681809, upload-time = "2025-10-17T14:00:58.603Z" }, - { url = "https://files.pythonhosted.org/packages/be/31/8926c8ab18533f6076ce28d2c329a203b58c6861681906e2d73b9c397588/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:d1824c7d08d8ddfc8cb10c847f696942e5aadbd16fd974dfde8bd2c3c08a9fa1", size = 1711161, upload-time = "2025-10-17T14:01:01.744Z" }, - { url = "https://files.pythonhosted.org/packages/f2/36/2f83e1ca730b1e0a8cf1c8ab9559834c5eec9f5da86e77ac71f0d16b521d/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8f47d0ff5b3eb9c1278a2f56ea48fda667da8ebf28bd2cb378b7c453936ce003", size = 1731999, upload-time = "2025-10-17T14:01:04.626Z" }, - { url = "https://files.pythonhosted.org/packages/b9/ec/1f818cc368dfd4d5ab4e9efc8f2f6f283bfc31e1c06d3e848bcc862d4591/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:8a396b1da9b51ded79806ac3b57a598f84e0769eaa1ba300655d8b5e17b70c7b", size = 1548684, upload-time = "2025-10-17T14:01:06.828Z" }, - { url = "https://files.pythonhosted.org/packages/d3/ad/33d36efd16e4fefee91b09a22a3a0e1b830f65471c3567ac5a8041fac812/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d9c52a65f54796e066b5d674e33b53178014752d28bca555c479c2c25ffcec5b", size = 1756676, upload-time = "2025-10-17T14:01:09.517Z" }, - { url = "https://files.pythonhosted.org/packages/3c/c4/4a526d84e77d464437713ca909364988ed2e0cd0cdad2c06cb065ece9e08/aiohttp-3.13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a89da72d18d6c95a653470b78d8ee5aa3c4b37212004c103403d0776cbea6ff0", size = 1715577, upload-time = "2025-10-17T14:01:11.958Z" }, - { url = "https://files.pythonhosted.org/packages/a2/21/e39638b7d9c7f1362c4113a91870f89287e60a7ea2d037e258b81e8b37d5/aiohttp-3.13.1-cp313-cp313-win32.whl", hash = "sha256:02e0258b7585ddf5d01c79c716ddd674386bfbf3041fbbfe7bdf9c7c32eb4a9b", size = 424468, upload-time = "2025-10-17T14:01:14.344Z" }, - { url = "https://files.pythonhosted.org/packages/cc/00/f3a92c592a845ebb2f47d102a67f35f0925cb854c5e7386f1a3a1fdff2ab/aiohttp-3.13.1-cp313-cp313-win_amd64.whl", hash = "sha256:ef56ffe60e8d97baac123272bde1ab889ee07d3419606fae823c80c2b86c403e", size = 450806, upload-time = "2025-10-17T14:01:16.437Z" }, - { url = "https://files.pythonhosted.org/packages/97/be/0f6c41d2fd0aab0af133c509cabaf5b1d78eab882cb0ceb872e87ceeabf7/aiohttp-3.13.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:77f83b3dc5870a2ea79a0fcfdcc3fc398187ec1675ff61ec2ceccad27ecbd303", size = 733828, upload-time = "2025-10-17T14:01:18.58Z" }, - { url = "https://files.pythonhosted.org/packages/75/14/24e2ac5efa76ae30e05813e0f50737005fd52da8ddffee474d4a5e7f38a6/aiohttp-3.13.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:9cafd2609ebb755e47323306c7666283fbba6cf82b5f19982ea627db907df23a", size = 489320, upload-time = "2025-10-17T14:01:20.644Z" }, - { url = "https://files.pythonhosted.org/packages/da/5a/4cbe599358d05ea7db4869aff44707b57d13f01724d48123dc68b3288d5a/aiohttp-3.13.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9c489309a2ca548d5f11131cfb4092f61d67954f930bba7e413bcdbbb82d7fae", size = 489899, upload-time = "2025-10-17T14:01:22.638Z" }, - { url = "https://files.pythonhosted.org/packages/67/96/3aec9d9cfc723273d4386328a1e2562cf23629d2f57d137047c49adb2afb/aiohttp-3.13.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:79ac15fe5fdbf3c186aa74b656cd436d9a1e492ba036db8901c75717055a5b1c", size = 1716556, upload-time = "2025-10-17T14:01:25.406Z" }, - { url = "https://files.pythonhosted.org/packages/b9/99/39a3d250595b5c8172843831221fa5662884f63f8005b00b4034f2a7a836/aiohttp-3.13.1-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:095414be94fce3bc080684b4cd50fb70d439bc4662b2a1984f45f3bf9ede08aa", size = 1665814, upload-time = "2025-10-17T14:01:27.683Z" }, - { url = "https://files.pythonhosted.org/packages/3b/96/8319e7060a85db14a9c178bc7b3cf17fad458db32ba6d2910de3ca71452d/aiohttp-3.13.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c68172e1a2dca65fa1272c85ca72e802d78b67812b22827df01017a15c5089fa", size = 1755767, upload-time = "2025-10-17T14:01:29.914Z" }, - { url = "https://files.pythonhosted.org/packages/1c/c6/0a2b3d886b40aa740fa2294cd34ed46d2e8108696748492be722e23082a7/aiohttp-3.13.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3751f9212bcd119944d4ea9de6a3f0fee288c177b8ca55442a2cdff0c8201eb3", size = 1836591, upload-time = "2025-10-17T14:01:32.28Z" }, - { url = "https://files.pythonhosted.org/packages/fb/34/8ab5904b3331c91a58507234a1e2f662f837e193741609ee5832eb436251/aiohttp-3.13.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8619dca57d98a8353abdc7a1eeb415548952b39d6676def70d9ce76d41a046a9", size = 1714915, upload-time = "2025-10-17T14:01:35.138Z" }, - { url = "https://files.pythonhosted.org/packages/b5/d3/d36077ca5f447649112189074ac6c192a666bf68165b693e48c23b0d008c/aiohttp-3.13.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:97795a0cb0a5f8a843759620e9cbd8889f8079551f5dcf1ccd99ed2f056d9632", size = 1546579, upload-time = "2025-10-17T14:01:38.237Z" }, - { url = "https://files.pythonhosted.org/packages/a8/14/dbc426a1bb1305c4fc78ce69323498c9e7c699983366ef676aa5d3f949fa/aiohttp-3.13.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1060e058da8f9f28a7026cdfca9fc886e45e551a658f6a5c631188f72a3736d2", size = 1680633, upload-time = "2025-10-17T14:01:40.902Z" }, - { url = "https://files.pythonhosted.org/packages/29/83/1e68e519aff9f3ef6d4acb6cdda7b5f592ef5c67c8f095dc0d8e06ce1c3e/aiohttp-3.13.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:f48a2c26333659101ef214907d29a76fe22ad7e912aa1e40aeffdff5e8180977", size = 1678675, upload-time = "2025-10-17T14:01:43.779Z" }, - { url = "https://files.pythonhosted.org/packages/38/b9/7f3e32a81c08b6d29ea15060c377e1f038ad96cd9923a85f30e817afff22/aiohttp-3.13.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:f1dfad638b9c91ff225162b2824db0e99ae2d1abe0dc7272b5919701f0a1e685", size = 1726829, upload-time = "2025-10-17T14:01:46.546Z" }, - { url = "https://files.pythonhosted.org/packages/23/ce/610b1f77525a0a46639aea91377b12348e9f9412cc5ddcb17502aa4681c7/aiohttp-3.13.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:8fa09ab6dd567cb105db4e8ac4d60f377a7a94f67cf669cac79982f626360f32", size = 1542985, upload-time = "2025-10-17T14:01:49.082Z" }, - { url = "https://files.pythonhosted.org/packages/53/39/3ac8dfdad5de38c401846fa071fcd24cb3b88ccfb024854df6cbd9b4a07e/aiohttp-3.13.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:4159fae827f9b5f655538a4f99b7cbc3a2187e5ca2eee82f876ef1da802ccfa9", size = 1741556, upload-time = "2025-10-17T14:01:51.846Z" }, - { url = "https://files.pythonhosted.org/packages/2a/48/b1948b74fea7930b0f29595d1956842324336de200593d49a51a40607fdc/aiohttp-3.13.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:ad671118c19e9cfafe81a7a05c294449fe0ebb0d0c6d5bb445cd2190023f5cef", size = 1696175, upload-time = "2025-10-17T14:01:54.232Z" }, - { url = "https://files.pythonhosted.org/packages/96/26/063bba38e4b27b640f56cc89fe83cc3546a7ae162c2e30ca345f0ccdc3d1/aiohttp-3.13.1-cp314-cp314-win32.whl", hash = "sha256:c5c970c148c48cf6acb65224ca3c87a47f74436362dde75c27bc44155ccf7dfc", size = 430254, upload-time = "2025-10-17T14:01:56.451Z" }, - { url = "https://files.pythonhosted.org/packages/88/aa/25fd764384dc4eab714023112d3548a8dd69a058840d61d816ea736097a2/aiohttp-3.13.1-cp314-cp314-win_amd64.whl", hash = "sha256:748a00167b7a88385756fa615417d24081cba7e58c8727d2e28817068b97c18c", size = 456256, upload-time = "2025-10-17T14:01:58.752Z" }, - { url = "https://files.pythonhosted.org/packages/d4/9f/9ba6059de4bad25c71cd88e3da53f93e9618ea369cf875c9f924b1c167e2/aiohttp-3.13.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:390b73e99d7a1f0f658b3f626ba345b76382f3edc65f49d6385e326e777ed00e", size = 765956, upload-time = "2025-10-17T14:02:01.515Z" }, - { url = "https://files.pythonhosted.org/packages/1f/30/b86da68b494447d3060f45c7ebb461347535dab4af9162a9267d9d86ca31/aiohttp-3.13.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:27e83abb330e687e019173d8fc1fd6a1cf471769624cf89b1bb49131198a810a", size = 503206, upload-time = "2025-10-17T14:02:03.818Z" }, - { url = "https://files.pythonhosted.org/packages/c1/21/d27a506552843ff9eeb9fcc2d45f943b09eefdfdf205aab044f4f1f39f6a/aiohttp-3.13.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2b20eed07131adbf3e873e009c2869b16a579b236e9d4b2f211bf174d8bef44a", size = 507719, upload-time = "2025-10-17T14:02:05.947Z" }, - { url = "https://files.pythonhosted.org/packages/58/23/4042230ec7e4edc7ba43d0342b5a3d2fe0222ca046933c4251a35aaf17f5/aiohttp-3.13.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:58fee9ef8477fd69e823b92cfd1f590ee388521b5ff8f97f3497e62ee0656212", size = 1862758, upload-time = "2025-10-17T14:02:08.469Z" }, - { url = "https://files.pythonhosted.org/packages/df/88/525c45bea7cbb9f65df42cadb4ff69f6a0dbf95931b0ff7d1fdc40a1cb5f/aiohttp-3.13.1-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:1f62608fcb7b3d034d5e9496bea52d94064b7b62b06edba82cd38191336bbeda", size = 1717790, upload-time = "2025-10-17T14:02:11.37Z" }, - { url = "https://files.pythonhosted.org/packages/1d/80/21e9b5eb77df352a5788713f37359b570a793f0473f3a72db2e46df379b9/aiohttp-3.13.1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:fdc4d81c3dfc999437f23e36d197e8b557a3f779625cd13efe563a9cfc2ce712", size = 1842088, upload-time = "2025-10-17T14:02:13.872Z" }, - { url = "https://files.pythonhosted.org/packages/d2/bf/d1738f6d63fe8b2a0ad49533911b3347f4953cd001bf3223cb7b61f18dff/aiohttp-3.13.1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:601d7ec812f746fd80ff8af38eeb3f196e1bab4a4d39816ccbc94c222d23f1d0", size = 1934292, upload-time = "2025-10-17T14:02:16.624Z" }, - { url = "https://files.pythonhosted.org/packages/04/e6/26cab509b42610ca49573f2fc2867810f72bd6a2070182256c31b14f2e98/aiohttp-3.13.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47c3f21c469b840d9609089435c0d9918ae89f41289bf7cc4afe5ff7af5458db", size = 1791328, upload-time = "2025-10-17T14:02:19.051Z" }, - { url = "https://files.pythonhosted.org/packages/8a/6d/baf7b462852475c9d045bee8418d9cdf280efb687752b553e82d0c58bcc2/aiohttp-3.13.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d6c6cdc0750db88520332d4aaa352221732b0cafe89fd0e42feec7cb1b5dc236", size = 1622663, upload-time = "2025-10-17T14:02:21.397Z" }, - { url = "https://files.pythonhosted.org/packages/c8/48/396a97318af9b5f4ca8b3dc14a67976f71c6400a9609c622f96da341453f/aiohttp-3.13.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:58a12299eeb1fca2414ee2bc345ac69b0f765c20b82c3ab2a75d91310d95a9f6", size = 1787791, upload-time = "2025-10-17T14:02:24.212Z" }, - { url = "https://files.pythonhosted.org/packages/a8/e2/6925f6784134ce3ff3ce1a8502ab366432a3b5605387618c1a939ce778d9/aiohttp-3.13.1-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:0989cbfc195a4de1bb48f08454ef1cb47424b937e53ed069d08404b9d3c7aea1", size = 1775459, upload-time = "2025-10-17T14:02:26.971Z" }, - { url = "https://files.pythonhosted.org/packages/c3/e3/b372047ba739fc39f199b99290c4cc5578ce5fd125f69168c967dac44021/aiohttp-3.13.1-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:feb5ee664300e2435e0d1bc3443a98925013dfaf2cae9699c1f3606b88544898", size = 1789250, upload-time = "2025-10-17T14:02:29.686Z" }, - { url = "https://files.pythonhosted.org/packages/02/8c/9f48b93d7d57fc9ef2ad4adace62e4663ea1ce1753806c4872fb36b54c39/aiohttp-3.13.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:58a6f8702da0c3606fb5cf2e669cce0ca681d072fe830968673bb4c69eb89e88", size = 1616139, upload-time = "2025-10-17T14:02:32.151Z" }, - { url = "https://files.pythonhosted.org/packages/5c/c6/c64e39d61aaa33d7de1be5206c0af3ead4b369bf975dac9fdf907a4291c1/aiohttp-3.13.1-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:a417ceb433b9d280e2368ffea22d4bc6e3e0d894c4bc7768915124d57d0964b6", size = 1815829, upload-time = "2025-10-17T14:02:34.635Z" }, - { url = "https://files.pythonhosted.org/packages/22/75/e19e93965ea675f1151753b409af97a14f1d888588a555e53af1e62b83eb/aiohttp-3.13.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8ac8854f7b0466c5d6a9ea49249b3f6176013859ac8f4bb2522ad8ed6b94ded2", size = 1760923, upload-time = "2025-10-17T14:02:37.364Z" }, - { url = "https://files.pythonhosted.org/packages/6c/a4/06ed38f1dabd98ea136fd116cba1d02c9b51af5a37d513b6850a9a567d86/aiohttp-3.13.1-cp314-cp314t-win32.whl", hash = "sha256:be697a5aeff42179ed13b332a411e674994bcd406c81642d014ace90bf4bb968", size = 463318, upload-time = "2025-10-17T14:02:39.924Z" }, - { url = "https://files.pythonhosted.org/packages/04/0f/27e4fdde899e1e90e35eeff56b54ed63826435ad6cdb06b09ed312d1b3fa/aiohttp-3.13.1-cp314-cp314t-win_amd64.whl", hash = "sha256:f1d6aa90546a4e8f20c3500cb68ab14679cd91f927fa52970035fd3207dfb3da", size = 496721, upload-time = "2025-10-17T14:02:42.199Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/1c/ce/3b83ebba6b3207a7135e5fcaba49706f8a4b6008153b4e30540c982fae26/aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca", size = 7837994, upload-time = "2025-10-28T20:59:39.937Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/9b/01f00e9856d0a73260e86dd8ed0c2234a466c5c1712ce1c281548df39777/aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b", size = 737623, upload-time = "2025-10-28T20:56:30.797Z" }, + { url = "https://files.pythonhosted.org/packages/5a/1b/4be39c445e2b2bd0aab4ba736deb649fabf14f6757f405f0c9685019b9e9/aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc", size = 492664, upload-time = "2025-10-28T20:56:32.708Z" }, + { url = "https://files.pythonhosted.org/packages/28/66/d35dcfea8050e131cdd731dff36434390479b4045a8d0b9d7111b0a968f1/aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7", size = 491808, upload-time = "2025-10-28T20:56:34.57Z" }, + { url = "https://files.pythonhosted.org/packages/00/29/8e4609b93e10a853b65f8291e64985de66d4f5848c5637cddc70e98f01f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb", size = 1738863, upload-time = "2025-10-28T20:56:36.377Z" }, + { url = "https://files.pythonhosted.org/packages/9d/fa/4ebdf4adcc0def75ced1a0d2d227577cd7b1b85beb7edad85fcc87693c75/aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3", size = 1700586, upload-time = "2025-10-28T20:56:38.034Z" }, + { url = "https://files.pythonhosted.org/packages/da/04/73f5f02ff348a3558763ff6abe99c223381b0bace05cd4530a0258e52597/aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f", size = 1768625, upload-time = "2025-10-28T20:56:39.75Z" }, + { url = "https://files.pythonhosted.org/packages/f8/49/a825b79ffec124317265ca7d2344a86bcffeb960743487cb11988ffb3494/aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6", size = 1867281, upload-time = "2025-10-28T20:56:41.471Z" }, + { url = "https://files.pythonhosted.org/packages/b9/48/adf56e05f81eac31edcfae45c90928f4ad50ef2e3ea72cb8376162a368f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e", size = 1752431, upload-time = "2025-10-28T20:56:43.162Z" }, + { url = "https://files.pythonhosted.org/packages/30/ab/593855356eead019a74e862f21523db09c27f12fd24af72dbc3555b9bfd9/aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7", size = 1562846, upload-time = "2025-10-28T20:56:44.85Z" }, + { url = "https://files.pythonhosted.org/packages/39/0f/9f3d32271aa8dc35036e9668e31870a9d3b9542dd6b3e2c8a30931cb27ae/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d", size = 1699606, upload-time = "2025-10-28T20:56:46.519Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3c/52d2658c5699b6ef7692a3f7128b2d2d4d9775f2a68093f74bca06cf01e1/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b", size = 1720663, upload-time = "2025-10-28T20:56:48.528Z" }, + { url = "https://files.pythonhosted.org/packages/9b/d4/8f8f3ff1fb7fb9e3f04fcad4e89d8a1cd8fc7d05de67e3de5b15b33008ff/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8", size = 1737939, upload-time = "2025-10-28T20:56:50.77Z" }, + { url = "https://files.pythonhosted.org/packages/03/d3/ddd348f8a27a634daae39a1b8e291ff19c77867af438af844bf8b7e3231b/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16", size = 1555132, upload-time = "2025-10-28T20:56:52.568Z" }, + { url = "https://files.pythonhosted.org/packages/39/b8/46790692dc46218406f94374903ba47552f2f9f90dad554eed61bfb7b64c/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169", size = 1764802, upload-time = "2025-10-28T20:56:54.292Z" }, + { url = "https://files.pythonhosted.org/packages/ba/e4/19ce547b58ab2a385e5f0b8aa3db38674785085abcf79b6e0edd1632b12f/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248", size = 1719512, upload-time = "2025-10-28T20:56:56.428Z" }, + { url = "https://files.pythonhosted.org/packages/70/30/6355a737fed29dcb6dfdd48682d5790cb5eab050f7b4e01f49b121d3acad/aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e", size = 426690, upload-time = "2025-10-28T20:56:58.736Z" }, + { url = "https://files.pythonhosted.org/packages/0a/0d/b10ac09069973d112de6ef980c1f6bb31cb7dcd0bc363acbdad58f927873/aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45", size = 453465, upload-time = "2025-10-28T20:57:00.795Z" }, + { url = "https://files.pythonhosted.org/packages/bf/78/7e90ca79e5aa39f9694dcfd74f4720782d3c6828113bb1f3197f7e7c4a56/aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be", size = 732139, upload-time = "2025-10-28T20:57:02.455Z" }, + { url = "https://files.pythonhosted.org/packages/db/ed/1f59215ab6853fbaa5c8495fa6cbc39edfc93553426152b75d82a5f32b76/aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742", size = 490082, upload-time = "2025-10-28T20:57:04.784Z" }, + { url = "https://files.pythonhosted.org/packages/68/7b/fe0fe0f5e05e13629d893c760465173a15ad0039c0a5b0d0040995c8075e/aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293", size = 489035, upload-time = "2025-10-28T20:57:06.894Z" }, + { url = "https://files.pythonhosted.org/packages/d2/04/db5279e38471b7ac801d7d36a57d1230feeee130bbe2a74f72731b23c2b1/aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811", size = 1720387, upload-time = "2025-10-28T20:57:08.685Z" }, + { url = "https://files.pythonhosted.org/packages/31/07/8ea4326bd7dae2bd59828f69d7fdc6e04523caa55e4a70f4a8725a7e4ed2/aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a", size = 1688314, upload-time = "2025-10-28T20:57:10.693Z" }, + { url = "https://files.pythonhosted.org/packages/48/ab/3d98007b5b87ffd519d065225438cc3b668b2f245572a8cb53da5dd2b1bc/aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4", size = 1756317, upload-time = "2025-10-28T20:57:12.563Z" }, + { url = "https://files.pythonhosted.org/packages/97/3d/801ca172b3d857fafb7b50c7c03f91b72b867a13abca982ed6b3081774ef/aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a", size = 1858539, upload-time = "2025-10-28T20:57:14.623Z" }, + { url = "https://files.pythonhosted.org/packages/f7/0d/4764669bdf47bd472899b3d3db91fffbe925c8e3038ec591a2fd2ad6a14d/aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e", size = 1739597, upload-time = "2025-10-28T20:57:16.399Z" }, + { url = "https://files.pythonhosted.org/packages/c4/52/7bd3c6693da58ba16e657eb904a5b6decfc48ecd06e9ac098591653b1566/aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb", size = 1555006, upload-time = "2025-10-28T20:57:18.288Z" }, + { url = "https://files.pythonhosted.org/packages/48/30/9586667acec5993b6f41d2ebcf96e97a1255a85f62f3c653110a5de4d346/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded", size = 1683220, upload-time = "2025-10-28T20:57:20.241Z" }, + { url = "https://files.pythonhosted.org/packages/71/01/3afe4c96854cfd7b30d78333852e8e851dceaec1c40fd00fec90c6402dd2/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b", size = 1712570, upload-time = "2025-10-28T20:57:22.253Z" }, + { url = "https://files.pythonhosted.org/packages/11/2c/22799d8e720f4697a9e66fd9c02479e40a49de3de2f0bbe7f9f78a987808/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8", size = 1733407, upload-time = "2025-10-28T20:57:24.37Z" }, + { url = "https://files.pythonhosted.org/packages/34/cb/90f15dd029f07cebbd91f8238a8b363978b530cd128488085b5703683594/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04", size = 1550093, upload-time = "2025-10-28T20:57:26.257Z" }, + { url = "https://files.pythonhosted.org/packages/69/46/12dce9be9d3303ecbf4d30ad45a7683dc63d90733c2d9fe512be6716cd40/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476", size = 1758084, upload-time = "2025-10-28T20:57:28.349Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c8/0932b558da0c302ffd639fc6362a313b98fdf235dc417bc2493da8394df7/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23", size = 1716987, upload-time = "2025-10-28T20:57:30.233Z" }, + { url = "https://files.pythonhosted.org/packages/5d/8b/f5bd1a75003daed099baec373aed678f2e9b34f2ad40d85baa1368556396/aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254", size = 425859, upload-time = "2025-10-28T20:57:32.105Z" }, + { url = "https://files.pythonhosted.org/packages/5d/28/a8a9fc6957b2cee8902414e41816b5ab5536ecf43c3b1843c10e82c559b2/aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a", size = 452192, upload-time = "2025-10-28T20:57:34.166Z" }, + { url = "https://files.pythonhosted.org/packages/9b/36/e2abae1bd815f01c957cbf7be817b3043304e1c87bad526292a0410fdcf9/aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b", size = 735234, upload-time = "2025-10-28T20:57:36.415Z" }, + { url = "https://files.pythonhosted.org/packages/ca/e3/1ee62dde9b335e4ed41db6bba02613295a0d5b41f74a783c142745a12763/aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61", size = 490733, upload-time = "2025-10-28T20:57:38.205Z" }, + { url = "https://files.pythonhosted.org/packages/1a/aa/7a451b1d6a04e8d15a362af3e9b897de71d86feac3babf8894545d08d537/aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4", size = 491303, upload-time = "2025-10-28T20:57:40.122Z" }, + { url = "https://files.pythonhosted.org/packages/57/1e/209958dbb9b01174870f6a7538cd1f3f28274fdbc88a750c238e2c456295/aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b", size = 1717965, upload-time = "2025-10-28T20:57:42.28Z" }, + { url = "https://files.pythonhosted.org/packages/08/aa/6a01848d6432f241416bc4866cae8dc03f05a5a884d2311280f6a09c73d6/aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694", size = 1667221, upload-time = "2025-10-28T20:57:44.869Z" }, + { url = "https://files.pythonhosted.org/packages/87/4f/36c1992432d31bbc789fa0b93c768d2e9047ec8c7177e5cd84ea85155f36/aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906", size = 1757178, upload-time = "2025-10-28T20:57:47.216Z" }, + { url = "https://files.pythonhosted.org/packages/ac/b4/8e940dfb03b7e0f68a82b88fd182b9be0a65cb3f35612fe38c038c3112cf/aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9", size = 1838001, upload-time = "2025-10-28T20:57:49.337Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ef/39f3448795499c440ab66084a9db7d20ca7662e94305f175a80f5b7e0072/aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011", size = 1716325, upload-time = "2025-10-28T20:57:51.327Z" }, + { url = "https://files.pythonhosted.org/packages/d7/51/b311500ffc860b181c05d91c59a1313bdd05c82960fdd4035a15740d431e/aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6", size = 1547978, upload-time = "2025-10-28T20:57:53.554Z" }, + { url = "https://files.pythonhosted.org/packages/31/64/b9d733296ef79815226dab8c586ff9e3df41c6aff2e16c06697b2d2e6775/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213", size = 1682042, upload-time = "2025-10-28T20:57:55.617Z" }, + { url = "https://files.pythonhosted.org/packages/3f/30/43d3e0f9d6473a6db7d472104c4eff4417b1e9df01774cb930338806d36b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49", size = 1680085, upload-time = "2025-10-28T20:57:57.59Z" }, + { url = "https://files.pythonhosted.org/packages/16/51/c709f352c911b1864cfd1087577760ced64b3e5bee2aa88b8c0c8e2e4972/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae", size = 1728238, upload-time = "2025-10-28T20:57:59.525Z" }, + { url = "https://files.pythonhosted.org/packages/19/e2/19bd4c547092b773caeb48ff5ae4b1ae86756a0ee76c16727fcfd281404b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa", size = 1544395, upload-time = "2025-10-28T20:58:01.914Z" }, + { url = "https://files.pythonhosted.org/packages/cf/87/860f2803b27dfc5ed7be532832a3498e4919da61299b4a1f8eb89b8ff44d/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4", size = 1742965, upload-time = "2025-10-28T20:58:03.972Z" }, + { url = "https://files.pythonhosted.org/packages/67/7f/db2fc7618925e8c7a601094d5cbe539f732df4fb570740be88ed9e40e99a/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a", size = 1697585, upload-time = "2025-10-28T20:58:06.189Z" }, + { url = "https://files.pythonhosted.org/packages/0c/07/9127916cb09bb38284db5036036042b7b2c514c8ebaeee79da550c43a6d6/aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940", size = 431621, upload-time = "2025-10-28T20:58:08.636Z" }, + { url = "https://files.pythonhosted.org/packages/fb/41/554a8a380df6d3a2bba8a7726429a23f4ac62aaf38de43bb6d6cde7b4d4d/aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4", size = 457627, upload-time = "2025-10-28T20:58:11Z" }, + { url = "https://files.pythonhosted.org/packages/c7/8e/3824ef98c039d3951cb65b9205a96dd2b20f22241ee17d89c5701557c826/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673", size = 767360, upload-time = "2025-10-28T20:58:13.358Z" }, + { url = "https://files.pythonhosted.org/packages/a4/0f/6a03e3fc7595421274fa34122c973bde2d89344f8a881b728fa8c774e4f1/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd", size = 504616, upload-time = "2025-10-28T20:58:15.339Z" }, + { url = "https://files.pythonhosted.org/packages/c6/aa/ed341b670f1bc8a6f2c6a718353d13b9546e2cef3544f573c6a1ff0da711/aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3", size = 509131, upload-time = "2025-10-28T20:58:17.693Z" }, + { url = "https://files.pythonhosted.org/packages/7f/f0/c68dac234189dae5c4bbccc0f96ce0cc16b76632cfc3a08fff180045cfa4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf", size = 1864168, upload-time = "2025-10-28T20:58:20.113Z" }, + { url = "https://files.pythonhosted.org/packages/8f/65/75a9a76db8364b5d0e52a0c20eabc5d52297385d9af9c35335b924fafdee/aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e", size = 1719200, upload-time = "2025-10-28T20:58:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/f5/55/8df2ed78d7f41d232f6bd3ff866b6f617026551aa1d07e2f03458f964575/aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5", size = 1843497, upload-time = "2025-10-28T20:58:24.672Z" }, + { url = "https://files.pythonhosted.org/packages/e9/e0/94d7215e405c5a02ccb6a35c7a3a6cfff242f457a00196496935f700cde5/aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad", size = 1935703, upload-time = "2025-10-28T20:58:26.758Z" }, + { url = "https://files.pythonhosted.org/packages/0b/78/1eeb63c3f9b2d1015a4c02788fb543141aad0a03ae3f7a7b669b2483f8d4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e", size = 1792738, upload-time = "2025-10-28T20:58:29.787Z" }, + { url = "https://files.pythonhosted.org/packages/41/75/aaf1eea4c188e51538c04cc568040e3082db263a57086ea74a7d38c39e42/aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61", size = 1624061, upload-time = "2025-10-28T20:58:32.529Z" }, + { url = "https://files.pythonhosted.org/packages/9b/c2/3b6034de81fbcc43de8aeb209073a2286dfb50b86e927b4efd81cf848197/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661", size = 1789201, upload-time = "2025-10-28T20:58:34.618Z" }, + { url = "https://files.pythonhosted.org/packages/c9/38/c15dcf6d4d890217dae79d7213988f4e5fe6183d43893a9cf2fe9e84ca8d/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98", size = 1776868, upload-time = "2025-10-28T20:58:38.835Z" }, + { url = "https://files.pythonhosted.org/packages/04/75/f74fd178ac81adf4f283a74847807ade5150e48feda6aef024403716c30c/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693", size = 1790660, upload-time = "2025-10-28T20:58:41.507Z" }, + { url = "https://files.pythonhosted.org/packages/e7/80/7368bd0d06b16b3aba358c16b919e9c46cf11587dc572091031b0e9e3ef0/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a", size = 1617548, upload-time = "2025-10-28T20:58:43.674Z" }, + { url = "https://files.pythonhosted.org/packages/7d/4b/a6212790c50483cb3212e507378fbe26b5086d73941e1ec4b56a30439688/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be", size = 1817240, upload-time = "2025-10-28T20:58:45.787Z" }, + { url = "https://files.pythonhosted.org/packages/ff/f7/ba5f0ba4ea8d8f3c32850912944532b933acbf0f3a75546b89269b9b7dde/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c", size = 1762334, upload-time = "2025-10-28T20:58:47.936Z" }, + { url = "https://files.pythonhosted.org/packages/7e/83/1a5a1856574588b1cad63609ea9ad75b32a8353ac995d830bf5da9357364/aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734", size = 464685, upload-time = "2025-10-28T20:58:50.642Z" }, + { url = "https://files.pythonhosted.org/packages/9f/4d/d22668674122c08f4d56972297c51a624e64b3ed1efaa40187607a7cb66e/aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f", size = 498093, upload-time = "2025-10-28T20:58:52.782Z" }, ] [[package]] name = "aioitertools" -version = "0.12.0" +version = "0.13.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/06/de/38491a84ab323b47c7f86e94d2830e748780525f7a10c8600b67ead7e9ea/aioitertools-0.12.0.tar.gz", hash = "sha256:c2a9055b4fbb7705f561b9d86053e8af5d10cc845d22c32008c43490b2d8dd6b", size = 19369, upload-time = "2024-09-02T03:33:40.349Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fd/3c/53c4a17a05fb9ea2313ee1777ff53f5e001aefd5cc85aa2f4c2d982e1e38/aioitertools-0.13.0.tar.gz", hash = "sha256:620bd241acc0bbb9ec819f1ab215866871b4bbd1f73836a55f799200ee86950c", size = 19322, upload-time = "2025-11-06T22:17:07.609Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/85/13/58b70a580de00893223d61de8fea167877a3aed97d4a5e1405c9159ef925/aioitertools-0.12.0-py3-none-any.whl", hash = "sha256:fc1f5fac3d737354de8831cbba3eb04f79dd649d8f3afb4c5b114925e662a796", size = 24345, upload-time = "2024-09-02T03:34:59.454Z" }, + { url = "https://files.pythonhosted.org/packages/10/a1/510b0a7fadc6f43a6ce50152e69dbd86415240835868bb0bd9b5b88b1e06/aioitertools-0.13.0-py3-none-any.whl", hash = "sha256:0be0292b856f08dfac90e31f4739432f4cb6d7520ab9eb73e143f4f2fa5259be", size = 24182, upload-time = "2025-11-06T22:17:06.502Z" }, ] [[package]] @@ -169,16 +169,25 @@ wheels = [ [[package]] name = "alembic" -version = "1.17.0" +version = "1.17.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mako" }, { name = "sqlalchemy" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6b/45/6f4555f2039f364c3ce31399529dcf48dd60726ff3715ad67f547d87dfd2/alembic-1.17.0.tar.gz", hash = "sha256:4652a0b3e19616b57d652b82bfa5e38bf5dbea0813eed971612671cb9e90c0fe", size = 1975526, upload-time = "2025-10-11T18:40:13.585Z" } +sdist = { url = "https://files.pythonhosted.org/packages/02/a6/74c8cadc2882977d80ad756a13857857dbcf9bd405bc80b662eb10651282/alembic-1.17.2.tar.gz", hash = "sha256:bbe9751705c5e0f14877f02d46c53d10885e377e3d90eda810a016f9baa19e8e", size = 1988064, upload-time = "2025-11-14T20:35:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/88/6237e97e3385b57b5f1528647addea5cc03d4d65d5979ab24327d41fb00d/alembic-1.17.2-py3-none-any.whl", hash = "sha256:f483dd1fe93f6c5d49217055e4d15b905b425b6af906746abb35b69c1996c4e6", size = 248554, upload-time = "2025-11-14T20:35:05.699Z" }, +] + +[[package]] +name = "annotated-doc" +version = "0.0.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/44/1f/38e29b06bfed7818ebba1f84904afdc8153ef7b6c7e0d8f3bc6643f5989c/alembic-1.17.0-py3-none-any.whl", hash = "sha256:80523bc437d41b35c5db7e525ad9d908f79de65c27d6a5a5eab6df348a352d99", size = 247449, upload-time = "2025-10-11T18:40:16.288Z" }, + { url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" }, ] [[package]] @@ -224,11 +233,11 @@ wheels = [ [[package]] name = "asttokens" -version = "3.0.0" +version = "3.0.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978, upload-time = "2024-11-30T04:30:14.439Z" } +sdist = { url = "https://files.pythonhosted.org/packages/be/a5/8e3f9b6771b0b408517c82d97aed8f2036509bc247d46114925e32fe33f0/asttokens-3.0.1.tar.gz", hash = "sha256:71a4ee5de0bde6a31d64f6b13f2293ac190344478f081c3d1bccfcf5eacb0cb7", size = 62308, upload-time = "2025-11-15T16:43:48.578Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918, upload-time = "2024-11-30T04:30:10.946Z" }, + { url = "https://files.pythonhosted.org/packages/d2/39/e7eaf1799466a4aef85b6a4fe7bd175ad2b1c6345066aa33f1f58d4b18d0/asttokens-3.0.1-py3-none-any.whl", hash = "sha256:15a3ebc0f43c2d0a50eeafea25e19046c68398e487b9f1f5b517f7c0f40f976a", size = 27047, upload-time = "2025-11-15T16:43:16.109Z" }, ] [[package]] @@ -257,15 +266,15 @@ wheels = [ [[package]] name = "aws-xray-sdk" -version = "2.14.0" +version = "2.15.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, { name = "wrapt" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e0/6c/8e7fb2a45f20afc5c19d52807b560793fb48b0feca1de7de116b62a7893e/aws_xray_sdk-2.14.0.tar.gz", hash = "sha256:aab843c331af9ab9ba5cefb3a303832a19db186140894a523edafc024cc0493c", size = 93976, upload-time = "2024-06-04T22:11:38.124Z" } +sdist = { url = "https://files.pythonhosted.org/packages/14/25/0cbd7a440080def5e6f063720c3b190a25f8aa2938c1e34415dc18241596/aws_xray_sdk-2.15.0.tar.gz", hash = "sha256:794381b96e835314345068ae1dd3b9120bd8b4e21295066c37e8814dbb341365", size = 76315, upload-time = "2025-10-29T20:59:45Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/41/69/b417833a8926fa5491e5346d7c233bf7d8a9b12ba1f4ef41ccea2494000c/aws_xray_sdk-2.14.0-py2.py3-none-any.whl", hash = "sha256:cfbe6feea3d26613a2a869d14c9246a844285c97087ad8f296f901633554ad94", size = 101922, upload-time = "2024-06-04T22:12:25.729Z" }, + { url = "https://files.pythonhosted.org/packages/ef/c3/f30a7a63e664acc7c2545ca0491b6ce8264536e0e5cad3965f1d1b91e960/aws_xray_sdk-2.15.0-py2.py3-none-any.whl", hash = "sha256:422d62ad7d52e373eebb90b642eb1bb24657afe03b22a8df4a8b2e5108e278a3", size = 103228, upload-time = "2025-10-29T21:00:24.12Z" }, ] [[package]] @@ -310,39 +319,39 @@ wheels = [ [[package]] name = "boto3" -version = "1.40.49" +version = "1.40.70" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, { name = "jmespath" }, { name = "s3transfer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/32/5b/165dbfc6de77774b0dac5582ac8a7aa92652d61215871ff4c88854864fb0/boto3-1.40.49.tar.gz", hash = "sha256:ea37d133548fbae543092ada61aeb08bced8f9aecd2e96e803dc8237459a80a0", size = 111572, upload-time = "2025-10-09T19:21:49.295Z" } +sdist = { url = "https://files.pythonhosted.org/packages/37/12/d5ac34e0536e1914dde28245f014a635056dde0427f6efa09f104d7999f4/boto3-1.40.70.tar.gz", hash = "sha256:191443707b391232ed15676bf6bba7e53caec1e71aafa12ccad2e825c5ee15cc", size = 111638, upload-time = "2025-11-10T20:29:15.199Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/71/07/9b622ec8691911e3420c9872a50a9d333d4880d217e9eb25b327193099dc/boto3-1.40.49-py3-none-any.whl", hash = "sha256:64eb7af5f66998b34ad629786ff4a7f81d74c2d4ef9e42f69d99499dbee46d07", size = 139345, upload-time = "2025-10-09T19:21:46.886Z" }, + { url = "https://files.pythonhosted.org/packages/f3/cf/e24d08b37cd318754a8e94906c8b34b88676899aad1907ff6942311f13c4/boto3-1.40.70-py3-none-any.whl", hash = "sha256:e8c2f4f4cb36297270f1023ebe5b100333e0e88ab6457a9687d80143d2e15bf9", size = 139358, upload-time = "2025-11-10T20:29:13.512Z" }, ] [[package]] name = "botocore" -version = "1.40.49" +version = "1.40.70" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath" }, { name = "python-dateutil" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/01/6a/eb7503536552bbd3388b2607bc7a64e59d4f988336406b51a69d29f17ed2/botocore-1.40.49.tar.gz", hash = "sha256:fe8d4cbcc22de84c20190ae728c46b931bafeb40fce247010fb071c31b6532b5", size = 14415240, upload-time = "2025-10-09T19:21:37.133Z" } +sdist = { url = "https://files.pythonhosted.org/packages/35/c1/8c4c199ae1663feee579a15861e34f10b29da11ae6ea0ad7b6a847ef3823/botocore-1.40.70.tar.gz", hash = "sha256:61b1f2cecd54d1b28a081116fa113b97bf4e17da57c62ae2c2751fe4c528af1f", size = 14444592, upload-time = "2025-11-10T20:29:04.046Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fc/7b/dce396a3f7078e0432d40a9778602cbf0785ca91e7bcb64e05f19dfb5662/botocore-1.40.49-py3-none-any.whl", hash = "sha256:bf1089d0e77e4fc2e195d81c519b194ab62a4d4dd3e7113ee4e2bf903b0b75ab", size = 14085172, upload-time = "2025-10-09T19:21:32.721Z" }, + { url = "https://files.pythonhosted.org/packages/55/d2/507fd0ee4dd574d2bdbdeac5df83f39d2cae1ffe97d4622cca6f6bab39f1/botocore-1.40.70-py3-none-any.whl", hash = "sha256:4a394ad25f5d9f1ef0bed610365744523eeb5c22de6862ab25d8c93f9f6d295c", size = 14106829, upload-time = "2025-11-10T20:29:01.101Z" }, ] [[package]] name = "cachetools" -version = "6.2.1" +version = "6.2.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cc/7e/b975b5814bd36faf009faebe22c1072a1fa1168db34d285ef0ba071ad78c/cachetools-6.2.1.tar.gz", hash = "sha256:3f391e4bd8f8bf0931169baf7456cc822705f4e2a31f840d218f445b9a854201", size = 31325, upload-time = "2025-10-12T14:55:30.139Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/44/ca1675be2a83aeee1886ab745b28cda92093066590233cc501890eb8417a/cachetools-6.2.2.tar.gz", hash = "sha256:8e6d266b25e539df852251cfd6f990b4bc3a141db73b939058d809ebd2590fc6", size = 31571, upload-time = "2025-11-13T17:42:51.465Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/96/c5/1e741d26306c42e2bf6ab740b2202872727e0f606033c9dd713f8b93f5a8/cachetools-6.2.1-py3-none-any.whl", hash = "sha256:09868944b6dde876dfd44e1d47e18484541eaf12f26f29b7af91b26cc892d701", size = 11280, upload-time = "2025-10-12T14:55:28.382Z" }, + { url = "https://files.pythonhosted.org/packages/e6/46/eb6eca305c77a4489affe1c5d8f4cae82f285d9addd8de4ec084a7184221/cachetools-6.2.2-py3-none-any.whl", hash = "sha256:6c09c98183bf58560c97b2abfcedcbaf6a896a490f534b031b661d3723b45ace", size = 11503, upload-time = "2025-11-13T17:42:50.232Z" }, ] [[package]] @@ -359,11 +368,11 @@ wheels = [ [[package]] name = "certifi" -version = "2025.10.5" +version = "2025.11.12" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4c/5b/b6ce21586237c77ce67d01dc5507039d444b630dd76611bbca2d8e5dcd91/certifi-2025.10.5.tar.gz", hash = "sha256:47c09d31ccf2acf0be3f701ea53595ee7e0b8fa08801c6624be771df09ae7b43", size = 164519, upload-time = "2025-10-05T04:12:15.808Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e4/37/af0d2ef3967ac0d6113837b44a4f0bfe1328c2b9763bd5b1744520e5cfed/certifi-2025.10.5-py3-none-any.whl", hash = "sha256:0f212c2744a9bb6de0c56639a6f68afe01ecd92d91f14ae897c4fe7bbeeef0de", size = 163286, upload-time = "2025-10-05T04:12:14.03Z" }, + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, ] [[package]] @@ -425,7 +434,7 @@ wheels = [ [[package]] name = "cfn-lint" -version = "1.40.2" +version = "1.40.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aws-sam-translator" }, @@ -436,9 +445,9 @@ dependencies = [ { name = "sympy" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/cd/3e/0e653b305bf8f77d377943e7294176bdc4b1db9d29c989c1cc6255f7ac40/cfn_lint-1.40.2.tar.gz", hash = "sha256:5822b2c90f7f2646823a47db9df7a60c23df46bbac34b2081d8a0b3b806c91eb", size = 3352309, upload-time = "2025-10-14T17:59:48.146Z" } +sdist = { url = "https://files.pythonhosted.org/packages/07/32/9355c1309345622aaee6e997e1417dcd2382e05c14e09c49584c4fbe83a7/cfn_lint-1.40.4.tar.gz", hash = "sha256:7c8bcf3cf5f2cf8d96fd30fdee1115bfc2480a4c619afc8bce36d551fbb228e1", size = 3401228, upload-time = "2025-11-03T20:38:49.744Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4c/8e/ec1a99f4441bd14569e59ba0e5bca150c807493bb9cb06feabc8ac2bbb5f/cfn_lint-1.40.2-py3-none-any.whl", hash = "sha256:fa44a3101bd8d7f644bc146b8a9e63d0fa2b64cd61c8a767e65c46920646277c", size = 5670475, upload-time = "2025-10-14T17:59:45.294Z" }, + { url = "https://files.pythonhosted.org/packages/12/22/6a0e9a88ec1e2d0751fff31f3d9c2eb1568879903fa7fcae3770e62609b5/cfn_lint-1.40.4-py3-none-any.whl", hash = "sha256:7b8bf9dac877842633d8403a8b2c31874b21c9922d74813da34e552b4cf03915", size = 5638505, upload-time = "2025-11-03T20:38:47.416Z" }, ] [[package]] @@ -500,23 +509,23 @@ wheels = [ [[package]] name = "click" -version = "8.3.0" +version = "8.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/46/61/de6cd827efad202d7057d93e0fed9294b96952e188f7384832791c7b2254/click-8.3.0.tar.gz", hash = "sha256:e7b8232224eba16f4ebe410c25ced9f7875cb5f3263ffc93cc3e8da705e229c4", size = 276943, upload-time = "2025-09-18T17:32:23.696Z" } +sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/db/d3/9dcc0f5797f070ec8edf30fbadfb200e71d9db6b84d211e3b2085a7589a0/click-8.3.0-py3-none-any.whl", hash = "sha256:9b9f285302c6e3064f4330c05f05b81945b2a39544279343e6e7c5f27a9baddc", size = 107295, upload-time = "2025-09-18T17:32:22.42Z" }, + { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, ] [[package]] name = "cloudpickle" -version = "3.1.1" +version = "3.1.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/52/39/069100b84d7418bc358d81669d5748efb14b9cceacd2f9c75f550424132f/cloudpickle-3.1.1.tar.gz", hash = "sha256:b216fa8ae4019d5482a8ac3c95d8f6346115d8835911fd4aefd1a445e4242c64", size = 22113, upload-time = "2025-01-14T17:02:05.085Z" } +sdist = { url = "https://files.pythonhosted.org/packages/27/fb/576f067976d320f5f0114a8d9fa1215425441bb35627b1993e5afd8111e5/cloudpickle-3.1.2.tar.gz", hash = "sha256:7fda9eb655c9c230dab534f1983763de5835249750e85fbcef43aaa30a9a2414", size = 22330, upload-time = "2025-11-03T09:25:26.604Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/e8/64c37fadfc2816a7701fa8a6ed8d87327c7d54eacfbfb6edab14a2f2be75/cloudpickle-3.1.1-py3-none-any.whl", hash = "sha256:c8c5a44295039331ee9dad40ba100a9c7297b6f988e50e87ccdf3765a668350e", size = 20992, upload-time = "2025-01-14T17:02:02.417Z" }, + { url = "https://files.pythonhosted.org/packages/88/39/799be3f2f0f38cc727ee3b4f1445fe6d5e4133064ec2e4115069418a5bb6/cloudpickle-3.1.2-py3-none-any.whl", hash = "sha256:9acb47f6afd73f60dc1df93bb801b472f05ff42fa6c84167d25cb206be1fbf4a", size = 22228, upload-time = "2025-11-03T09:25:25.534Z" }, ] [[package]] @@ -596,76 +605,76 @@ wheels = [ [[package]] name = "coverage" -version = "7.11.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1c/38/ee22495420457259d2f3390309505ea98f98a5eed40901cf62196abad006/coverage-7.11.0.tar.gz", hash = "sha256:167bd504ac1ca2af7ff3b81d245dfea0292c5032ebef9d66cc08a7d28c1b8050", size = 811905, upload-time = "2025-10-15T15:15:08.542Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c4/db/86f6906a7c7edc1a52b2c6682d6dd9be775d73c0dfe2b84f8923dfea5784/coverage-7.11.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9c49e77811cf9d024b95faf86c3f059b11c0c9be0b0d61bc598f453703bd6fd1", size = 216098, upload-time = "2025-10-15T15:13:02.916Z" }, - { url = "https://files.pythonhosted.org/packages/21/54/e7b26157048c7ba555596aad8569ff903d6cd67867d41b75287323678ede/coverage-7.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a61e37a403a778e2cda2a6a39abcc895f1d984071942a41074b5c7ee31642007", size = 216331, upload-time = "2025-10-15T15:13:04.403Z" }, - { url = "https://files.pythonhosted.org/packages/b9/19/1ce6bf444f858b83a733171306134a0544eaddf1ca8851ede6540a55b2ad/coverage-7.11.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c79cae102bb3b1801e2ef1511fb50e91ec83a1ce466b2c7c25010d884336de46", size = 247825, upload-time = "2025-10-15T15:13:05.92Z" }, - { url = "https://files.pythonhosted.org/packages/71/0b/d3bcbbc259fcced5fb67c5d78f6e7ee965f49760c14afd931e9e663a83b2/coverage-7.11.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:16ce17ceb5d211f320b62df002fa7016b7442ea0fd260c11cec8ce7730954893", size = 250573, upload-time = "2025-10-15T15:13:07.471Z" }, - { url = "https://files.pythonhosted.org/packages/58/8d/b0ff3641a320abb047258d36ed1c21d16be33beed4152628331a1baf3365/coverage-7.11.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:80027673e9d0bd6aef86134b0771845e2da85755cf686e7c7c59566cf5a89115", size = 251706, upload-time = "2025-10-15T15:13:09.4Z" }, - { url = "https://files.pythonhosted.org/packages/59/c8/5a586fe8c7b0458053d9c687f5cff515a74b66c85931f7fe17a1c958b4ac/coverage-7.11.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4d3ffa07a08657306cd2215b0da53761c4d73cb54d9143b9303a6481ec0cd415", size = 248221, upload-time = "2025-10-15T15:13:10.964Z" }, - { url = "https://files.pythonhosted.org/packages/d0/ff/3a25e3132804ba44cfa9a778cdf2b73dbbe63ef4b0945e39602fc896ba52/coverage-7.11.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a3b6a5f8b2524fd6c1066bc85bfd97e78709bb5e37b5b94911a6506b65f47186", size = 249624, upload-time = "2025-10-15T15:13:12.5Z" }, - { url = "https://files.pythonhosted.org/packages/c5/12/ff10c8ce3895e1b17a73485ea79ebc1896a9e466a9d0f4aef63e0d17b718/coverage-7.11.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fcc0a4aa589de34bc56e1a80a740ee0f8c47611bdfb28cd1849de60660f3799d", size = 247744, upload-time = "2025-10-15T15:13:14.554Z" }, - { url = "https://files.pythonhosted.org/packages/16/02/d500b91f5471b2975947e0629b8980e5e90786fe316b6d7299852c1d793d/coverage-7.11.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:dba82204769d78c3fd31b35c3d5f46e06511936c5019c39f98320e05b08f794d", size = 247325, upload-time = "2025-10-15T15:13:16.438Z" }, - { url = "https://files.pythonhosted.org/packages/77/11/dee0284fbbd9cd64cfce806b827452c6df3f100d9e66188e82dfe771d4af/coverage-7.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:81b335f03ba67309a95210caf3eb43bd6fe75a4e22ba653ef97b4696c56c7ec2", size = 249180, upload-time = "2025-10-15T15:13:17.959Z" }, - { url = "https://files.pythonhosted.org/packages/59/1b/cdf1def928f0a150a057cab03286774e73e29c2395f0d30ce3d9e9f8e697/coverage-7.11.0-cp312-cp312-win32.whl", hash = "sha256:037b2d064c2f8cc8716fe4d39cb705779af3fbf1ba318dc96a1af858888c7bb5", size = 218479, upload-time = "2025-10-15T15:13:19.608Z" }, - { url = "https://files.pythonhosted.org/packages/ff/55/e5884d55e031da9c15b94b90a23beccc9d6beee65e9835cd6da0a79e4f3a/coverage-7.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:d66c0104aec3b75e5fd897e7940188ea1892ca1d0235316bf89286d6a22568c0", size = 219290, upload-time = "2025-10-15T15:13:21.593Z" }, - { url = "https://files.pythonhosted.org/packages/23/a8/faa930cfc71c1d16bc78f9a19bb73700464f9c331d9e547bfbc1dbd3a108/coverage-7.11.0-cp312-cp312-win_arm64.whl", hash = "sha256:d91ebeac603812a09cf6a886ba6e464f3bbb367411904ae3790dfe28311b15ad", size = 217924, upload-time = "2025-10-15T15:13:23.39Z" }, - { url = "https://files.pythonhosted.org/packages/60/7f/85e4dfe65e400645464b25c036a26ac226cf3a69d4a50c3934c532491cdd/coverage-7.11.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:cc3f49e65ea6e0d5d9bd60368684fe52a704d46f9e7fc413918f18d046ec40e1", size = 216129, upload-time = "2025-10-15T15:13:25.371Z" }, - { url = "https://files.pythonhosted.org/packages/96/5d/dc5fa98fea3c175caf9d360649cb1aa3715e391ab00dc78c4c66fabd7356/coverage-7.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f39ae2f63f37472c17b4990f794035c9890418b1b8cca75c01193f3c8d3e01be", size = 216380, upload-time = "2025-10-15T15:13:26.976Z" }, - { url = "https://files.pythonhosted.org/packages/b2/f5/3da9cc9596708273385189289c0e4d8197d37a386bdf17619013554b3447/coverage-7.11.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7db53b5cdd2917b6eaadd0b1251cf4e7d96f4a8d24e174bdbdf2f65b5ea7994d", size = 247375, upload-time = "2025-10-15T15:13:28.923Z" }, - { url = "https://files.pythonhosted.org/packages/65/6c/f7f59c342359a235559d2bc76b0c73cfc4bac7d61bb0df210965cb1ecffd/coverage-7.11.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10ad04ac3a122048688387828b4537bc9cf60c0bf4869c1e9989c46e45690b82", size = 249978, upload-time = "2025-10-15T15:13:30.525Z" }, - { url = "https://files.pythonhosted.org/packages/e7/8c/042dede2e23525e863bf1ccd2b92689692a148d8b5fd37c37899ba882645/coverage-7.11.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4036cc9c7983a2b1f2556d574d2eb2154ac6ed55114761685657e38782b23f52", size = 251253, upload-time = "2025-10-15T15:13:32.174Z" }, - { url = "https://files.pythonhosted.org/packages/7b/a9/3c58df67bfa809a7bddd786356d9c5283e45d693edb5f3f55d0986dd905a/coverage-7.11.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7ab934dd13b1c5e94b692b1e01bd87e4488cb746e3a50f798cb9464fd128374b", size = 247591, upload-time = "2025-10-15T15:13:34.147Z" }, - { url = "https://files.pythonhosted.org/packages/26/5b/c7f32efd862ee0477a18c41e4761305de6ddd2d49cdeda0c1116227570fd/coverage-7.11.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59a6e5a265f7cfc05f76e3bb53eca2e0dfe90f05e07e849930fecd6abb8f40b4", size = 249411, upload-time = "2025-10-15T15:13:38.425Z" }, - { url = "https://files.pythonhosted.org/packages/76/b5/78cb4f1e86c1611431c990423ec0768122905b03837e1b4c6a6f388a858b/coverage-7.11.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:df01d6c4c81e15a7c88337b795bb7595a8596e92310266b5072c7e301168efbd", size = 247303, upload-time = "2025-10-15T15:13:40.464Z" }, - { url = "https://files.pythonhosted.org/packages/87/c9/23c753a8641a330f45f221286e707c427e46d0ffd1719b080cedc984ec40/coverage-7.11.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:8c934bd088eed6174210942761e38ee81d28c46de0132ebb1801dbe36a390dcc", size = 247157, upload-time = "2025-10-15T15:13:42.087Z" }, - { url = "https://files.pythonhosted.org/packages/c5/42/6e0cc71dc8a464486e944a4fa0d85bdec031cc2969e98ed41532a98336b9/coverage-7.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a03eaf7ec24078ad64a07f02e30060aaf22b91dedf31a6b24d0d98d2bba7f48", size = 248921, upload-time = "2025-10-15T15:13:43.715Z" }, - { url = "https://files.pythonhosted.org/packages/e8/1c/743c2ef665e6858cccb0f84377dfe3a4c25add51e8c7ef19249be92465b6/coverage-7.11.0-cp313-cp313-win32.whl", hash = "sha256:695340f698a5f56f795b2836abe6fb576e7c53d48cd155ad2f80fd24bc63a040", size = 218526, upload-time = "2025-10-15T15:13:45.336Z" }, - { url = "https://files.pythonhosted.org/packages/ff/d5/226daadfd1bf8ddbccefbd3aa3547d7b960fb48e1bdac124e2dd13a2b71a/coverage-7.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:2727d47fce3ee2bac648528e41455d1b0c46395a087a229deac75e9f88ba5a05", size = 219317, upload-time = "2025-10-15T15:13:47.401Z" }, - { url = "https://files.pythonhosted.org/packages/97/54/47db81dcbe571a48a298f206183ba8a7ba79200a37cd0d9f4788fcd2af4a/coverage-7.11.0-cp313-cp313-win_arm64.whl", hash = "sha256:0efa742f431529699712b92ecdf22de8ff198df41e43aeaaadf69973eb93f17a", size = 217948, upload-time = "2025-10-15T15:13:49.096Z" }, - { url = "https://files.pythonhosted.org/packages/e5/8b/cb68425420154e7e2a82fd779a8cc01549b6fa83c2ad3679cd6c088ebd07/coverage-7.11.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:587c38849b853b157706407e9ebdca8fd12f45869edb56defbef2daa5fb0812b", size = 216837, upload-time = "2025-10-15T15:13:51.09Z" }, - { url = "https://files.pythonhosted.org/packages/33/55/9d61b5765a025685e14659c8d07037247de6383c0385757544ffe4606475/coverage-7.11.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b971bdefdd75096163dd4261c74be813c4508477e39ff7b92191dea19f24cd37", size = 217061, upload-time = "2025-10-15T15:13:52.747Z" }, - { url = "https://files.pythonhosted.org/packages/52/85/292459c9186d70dcec6538f06ea251bc968046922497377bf4a1dc9a71de/coverage-7.11.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:269bfe913b7d5be12ab13a95f3a76da23cf147be7fa043933320ba5625f0a8de", size = 258398, upload-time = "2025-10-15T15:13:54.45Z" }, - { url = "https://files.pythonhosted.org/packages/1f/e2/46edd73fb8bf51446c41148d81944c54ed224854812b6ca549be25113ee0/coverage-7.11.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:dadbcce51a10c07b7c72b0ce4a25e4b6dcb0c0372846afb8e5b6307a121eb99f", size = 260574, upload-time = "2025-10-15T15:13:56.145Z" }, - { url = "https://files.pythonhosted.org/packages/07/5e/1df469a19007ff82e2ca8fe509822820a31e251f80ee7344c34f6cd2ec43/coverage-7.11.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9ed43fa22c6436f7957df036331f8fe4efa7af132054e1844918866cd228af6c", size = 262797, upload-time = "2025-10-15T15:13:58.635Z" }, - { url = "https://files.pythonhosted.org/packages/f9/50/de216b31a1434b94d9b34a964c09943c6be45069ec704bfc379d8d89a649/coverage-7.11.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9516add7256b6713ec08359b7b05aeff8850c98d357784c7205b2e60aa2513fa", size = 257361, upload-time = "2025-10-15T15:14:00.409Z" }, - { url = "https://files.pythonhosted.org/packages/82/1e/3f9f8344a48111e152e0fd495b6fff13cc743e771a6050abf1627a7ba918/coverage-7.11.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb92e47c92fcbcdc692f428da67db33337fa213756f7adb6a011f7b5a7a20740", size = 260349, upload-time = "2025-10-15T15:14:02.188Z" }, - { url = "https://files.pythonhosted.org/packages/65/9b/3f52741f9e7d82124272f3070bbe316006a7de1bad1093f88d59bfc6c548/coverage-7.11.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d06f4fc7acf3cabd6d74941d53329e06bab00a8fe10e4df2714f0b134bfc64ef", size = 258114, upload-time = "2025-10-15T15:14:03.907Z" }, - { url = "https://files.pythonhosted.org/packages/0b/8b/918f0e15f0365d50d3986bbd3338ca01178717ac5678301f3f547b6619e6/coverage-7.11.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:6fbcee1a8f056af07ecd344482f711f563a9eb1c2cad192e87df00338ec3cdb0", size = 256723, upload-time = "2025-10-15T15:14:06.324Z" }, - { url = "https://files.pythonhosted.org/packages/44/9e/7776829f82d3cf630878a7965a7d70cc6ca94f22c7d20ec4944f7148cb46/coverage-7.11.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dbbf012be5f32533a490709ad597ad8a8ff80c582a95adc8d62af664e532f9ca", size = 259238, upload-time = "2025-10-15T15:14:08.002Z" }, - { url = "https://files.pythonhosted.org/packages/9a/b8/49cf253e1e7a3bedb85199b201862dd7ca4859f75b6cf25ffa7298aa0760/coverage-7.11.0-cp313-cp313t-win32.whl", hash = "sha256:cee6291bb4fed184f1c2b663606a115c743df98a537c969c3c64b49989da96c2", size = 219180, upload-time = "2025-10-15T15:14:09.786Z" }, - { url = "https://files.pythonhosted.org/packages/ac/e1/1a541703826be7ae2125a0fb7f821af5729d56bb71e946e7b933cc7a89a4/coverage-7.11.0-cp313-cp313t-win_amd64.whl", hash = "sha256:a386c1061bf98e7ea4758e4313c0ab5ecf57af341ef0f43a0bf26c2477b5c268", size = 220241, upload-time = "2025-10-15T15:14:11.471Z" }, - { url = "https://files.pythonhosted.org/packages/d5/d1/5ee0e0a08621140fd418ec4020f595b4d52d7eb429ae6a0c6542b4ba6f14/coverage-7.11.0-cp313-cp313t-win_arm64.whl", hash = "sha256:f9ea02ef40bb83823b2b04964459d281688fe173e20643870bb5d2edf68bc836", size = 218510, upload-time = "2025-10-15T15:14:13.46Z" }, - { url = "https://files.pythonhosted.org/packages/f4/06/e923830c1985ce808e40a3fa3eb46c13350b3224b7da59757d37b6ce12b8/coverage-7.11.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c770885b28fb399aaf2a65bbd1c12bf6f307ffd112d6a76c5231a94276f0c497", size = 216110, upload-time = "2025-10-15T15:14:15.157Z" }, - { url = "https://files.pythonhosted.org/packages/42/82/cdeed03bfead45203fb651ed756dfb5266028f5f939e7f06efac4041dad5/coverage-7.11.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a3d0e2087dba64c86a6b254f43e12d264b636a39e88c5cc0a01a7c71bcfdab7e", size = 216395, upload-time = "2025-10-15T15:14:16.863Z" }, - { url = "https://files.pythonhosted.org/packages/fc/ba/e1c80caffc3199aa699813f73ff097bc2df7b31642bdbc7493600a8f1de5/coverage-7.11.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:73feb83bb41c32811973b8565f3705caf01d928d972b72042b44e97c71fd70d1", size = 247433, upload-time = "2025-10-15T15:14:18.589Z" }, - { url = "https://files.pythonhosted.org/packages/80/c0/5b259b029694ce0a5bbc1548834c7ba3db41d3efd3474489d7efce4ceb18/coverage-7.11.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c6f31f281012235ad08f9a560976cc2fc9c95c17604ff3ab20120fe480169bca", size = 249970, upload-time = "2025-10-15T15:14:20.307Z" }, - { url = "https://files.pythonhosted.org/packages/8c/86/171b2b5e1aac7e2fd9b43f7158b987dbeb95f06d1fbecad54ad8163ae3e8/coverage-7.11.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9570ad567f880ef675673992222746a124b9595506826b210fbe0ce3f0499cd", size = 251324, upload-time = "2025-10-15T15:14:22.419Z" }, - { url = "https://files.pythonhosted.org/packages/1a/7e/7e10414d343385b92024af3932a27a1caf75c6e27ee88ba211221ff1a145/coverage-7.11.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8badf70446042553a773547a61fecaa734b55dc738cacf20c56ab04b77425e43", size = 247445, upload-time = "2025-10-15T15:14:24.205Z" }, - { url = "https://files.pythonhosted.org/packages/c4/3b/e4f966b21f5be8c4bf86ad75ae94efa0de4c99c7bbb8114476323102e345/coverage-7.11.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a09c1211959903a479e389685b7feb8a17f59ec5a4ef9afde7650bd5eabc2777", size = 249324, upload-time = "2025-10-15T15:14:26.234Z" }, - { url = "https://files.pythonhosted.org/packages/00/a2/8479325576dfcd909244d0df215f077f47437ab852ab778cfa2f8bf4d954/coverage-7.11.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:5ef83b107f50db3f9ae40f69e34b3bd9337456c5a7fe3461c7abf8b75dd666a2", size = 247261, upload-time = "2025-10-15T15:14:28.42Z" }, - { url = "https://files.pythonhosted.org/packages/7b/d8/3a9e2db19d94d65771d0f2e21a9ea587d11b831332a73622f901157cc24b/coverage-7.11.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:f91f927a3215b8907e214af77200250bb6aae36eca3f760f89780d13e495388d", size = 247092, upload-time = "2025-10-15T15:14:30.784Z" }, - { url = "https://files.pythonhosted.org/packages/b3/b1/bbca3c472544f9e2ad2d5116b2379732957048be4b93a9c543fcd0207e5f/coverage-7.11.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:cdbcd376716d6b7fbfeedd687a6c4be019c5a5671b35f804ba76a4c0a778cba4", size = 248755, upload-time = "2025-10-15T15:14:32.585Z" }, - { url = "https://files.pythonhosted.org/packages/89/49/638d5a45a6a0f00af53d6b637c87007eb2297042186334e9923a61aa8854/coverage-7.11.0-cp314-cp314-win32.whl", hash = "sha256:bab7ec4bb501743edc63609320aaec8cd9188b396354f482f4de4d40a9d10721", size = 218793, upload-time = "2025-10-15T15:14:34.972Z" }, - { url = "https://files.pythonhosted.org/packages/30/cc/b675a51f2d068adb3cdf3799212c662239b0ca27f4691d1fff81b92ea850/coverage-7.11.0-cp314-cp314-win_amd64.whl", hash = "sha256:3d4ba9a449e9364a936a27322b20d32d8b166553bfe63059bd21527e681e2fad", size = 219587, upload-time = "2025-10-15T15:14:37.047Z" }, - { url = "https://files.pythonhosted.org/packages/93/98/5ac886876026de04f00820e5094fe22166b98dcb8b426bf6827aaf67048c/coverage-7.11.0-cp314-cp314-win_arm64.whl", hash = "sha256:ce37f215223af94ef0f75ac68ea096f9f8e8c8ec7d6e8c346ee45c0d363f0479", size = 218168, upload-time = "2025-10-15T15:14:38.861Z" }, - { url = "https://files.pythonhosted.org/packages/14/d1/b4145d35b3e3ecf4d917e97fc8895bcf027d854879ba401d9ff0f533f997/coverage-7.11.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:f413ce6e07e0d0dc9c433228727b619871532674b45165abafe201f200cc215f", size = 216850, upload-time = "2025-10-15T15:14:40.651Z" }, - { url = "https://files.pythonhosted.org/packages/ca/d1/7f645fc2eccd318369a8a9948acc447bb7c1ade2911e31d3c5620544c22b/coverage-7.11.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:05791e528a18f7072bf5998ba772fe29db4da1234c45c2087866b5ba4dea710e", size = 217071, upload-time = "2025-10-15T15:14:42.755Z" }, - { url = "https://files.pythonhosted.org/packages/54/7d/64d124649db2737ceced1dfcbdcb79898d5868d311730f622f8ecae84250/coverage-7.11.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cacb29f420cfeb9283b803263c3b9a068924474ff19ca126ba9103e1278dfa44", size = 258570, upload-time = "2025-10-15T15:14:44.542Z" }, - { url = "https://files.pythonhosted.org/packages/6c/3f/6f5922f80dc6f2d8b2c6f974835c43f53eb4257a7797727e6ca5b7b2ec1f/coverage-7.11.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:314c24e700d7027ae3ab0d95fbf8d53544fca1f20345fd30cd219b737c6e58d3", size = 260738, upload-time = "2025-10-15T15:14:46.436Z" }, - { url = "https://files.pythonhosted.org/packages/0e/5f/9e883523c4647c860b3812b417a2017e361eca5b635ee658387dc11b13c1/coverage-7.11.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:630d0bd7a293ad2fc8b4b94e5758c8b2536fdf36c05f1681270203e463cbfa9b", size = 262994, upload-time = "2025-10-15T15:14:48.3Z" }, - { url = "https://files.pythonhosted.org/packages/07/bb/43b5a8e94c09c8bf51743ffc65c4c841a4ca5d3ed191d0a6919c379a1b83/coverage-7.11.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e89641f5175d65e2dbb44db15fe4ea48fade5d5bbb9868fdc2b4fce22f4a469d", size = 257282, upload-time = "2025-10-15T15:14:50.236Z" }, - { url = "https://files.pythonhosted.org/packages/aa/e5/0ead8af411411330b928733e1d201384b39251a5f043c1612970310e8283/coverage-7.11.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c9f08ea03114a637dab06cedb2e914da9dc67fa52c6015c018ff43fdde25b9c2", size = 260430, upload-time = "2025-10-15T15:14:52.413Z" }, - { url = "https://files.pythonhosted.org/packages/ae/66/03dd8bb0ba5b971620dcaac145461950f6d8204953e535d2b20c6b65d729/coverage-7.11.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:ce9f3bde4e9b031eaf1eb61df95c1401427029ea1bfddb8621c1161dcb0fa02e", size = 258190, upload-time = "2025-10-15T15:14:54.268Z" }, - { url = "https://files.pythonhosted.org/packages/45/ae/28a9cce40bf3174426cb2f7e71ee172d98e7f6446dff936a7ccecee34b14/coverage-7.11.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:e4dc07e95495923d6fd4d6c27bf70769425b71c89053083843fd78f378558996", size = 256658, upload-time = "2025-10-15T15:14:56.436Z" }, - { url = "https://files.pythonhosted.org/packages/5c/7c/3a44234a8599513684bfc8684878fd7b126c2760f79712bb78c56f19efc4/coverage-7.11.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:424538266794db2861db4922b05d729ade0940ee69dcf0591ce8f69784db0e11", size = 259342, upload-time = "2025-10-15T15:14:58.538Z" }, - { url = "https://files.pythonhosted.org/packages/e1/e6/0108519cba871af0351725ebdb8660fd7a0fe2ba3850d56d32490c7d9b4b/coverage-7.11.0-cp314-cp314t-win32.whl", hash = "sha256:4c1eeb3fb8eb9e0190bebafd0462936f75717687117339f708f395fe455acc73", size = 219568, upload-time = "2025-10-15T15:15:00.382Z" }, - { url = "https://files.pythonhosted.org/packages/c9/76/44ba876e0942b4e62fdde23ccb029ddb16d19ba1bef081edd00857ba0b16/coverage-7.11.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b56efee146c98dbf2cf5cffc61b9829d1e94442df4d7398b26892a53992d3547", size = 220687, upload-time = "2025-10-15T15:15:02.322Z" }, - { url = "https://files.pythonhosted.org/packages/b9/0c/0df55ecb20d0d0ed5c322e10a441775e1a3a5d78c60f0c4e1abfe6fcf949/coverage-7.11.0-cp314-cp314t-win_arm64.whl", hash = "sha256:b5c2705afa83f49bd91962a4094b6b082f94aef7626365ab3f8f4bd159c5acf3", size = 218711, upload-time = "2025-10-15T15:15:04.575Z" }, - { url = "https://files.pythonhosted.org/packages/5f/04/642c1d8a448ae5ea1369eac8495740a79eb4e581a9fb0cbdce56bbf56da1/coverage-7.11.0-py3-none-any.whl", hash = "sha256:4b7589765348d78fb4e5fb6ea35d07564e387da2fc5efff62e0222971f155f68", size = 207761, upload-time = "2025-10-15T15:15:06.439Z" }, +version = "7.11.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d2/59/9698d57a3b11704c7b89b21d69e9d23ecf80d538cabb536c8b63f4a12322/coverage-7.11.3.tar.gz", hash = "sha256:0f59387f5e6edbbffec2281affb71cdc85e0776c1745150a3ab9b6c1d016106b", size = 815210, upload-time = "2025-11-10T00:13:17.18Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/39/af056ec7a27c487e25c7f6b6e51d2ee9821dba1863173ddf4dc2eebef4f7/coverage-7.11.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5b771b59ac0dfb7f139f70c85b42717ef400a6790abb6475ebac1ecee8de782f", size = 216676, upload-time = "2025-11-10T00:11:11.566Z" }, + { url = "https://files.pythonhosted.org/packages/3c/f8/21126d34b174d037b5d01bea39077725cbb9a0da94a95c5f96929c695433/coverage-7.11.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:603c4414125fc9ae9000f17912dcfd3d3eb677d4e360b85206539240c96ea76e", size = 217034, upload-time = "2025-11-10T00:11:13.12Z" }, + { url = "https://files.pythonhosted.org/packages/d5/3f/0fd35f35658cdd11f7686303214bd5908225838f374db47f9e457c8d6df8/coverage-7.11.3-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:77ffb3b7704eb7b9b3298a01fe4509cef70117a52d50bcba29cffc5f53dd326a", size = 248531, upload-time = "2025-11-10T00:11:15.023Z" }, + { url = "https://files.pythonhosted.org/packages/8f/59/0bfc5900fc15ce4fd186e092451de776bef244565c840c9c026fd50857e1/coverage-7.11.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:4d4ca49f5ba432b0755ebb0fc3a56be944a19a16bb33802264bbc7311622c0d1", size = 251290, upload-time = "2025-11-10T00:11:16.628Z" }, + { url = "https://files.pythonhosted.org/packages/71/88/d5c184001fa2ac82edf1b8f2cd91894d2230d7c309e937c54c796176e35b/coverage-7.11.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:05fd3fb6edff0c98874d752013588836f458261e5eba587afe4c547bba544afd", size = 252375, upload-time = "2025-11-10T00:11:18.249Z" }, + { url = "https://files.pythonhosted.org/packages/5c/29/f60af9f823bf62c7a00ce1ac88441b9a9a467e499493e5cc65028c8b8dd2/coverage-7.11.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0e920567f8c3a3ce68ae5a42cf7c2dc4bb6cc389f18bff2235dd8c03fa405de5", size = 248946, upload-time = "2025-11-10T00:11:20.202Z" }, + { url = "https://files.pythonhosted.org/packages/67/16/4662790f3b1e03fce5280cad93fd18711c35980beb3c6f28dca41b5230c6/coverage-7.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4bec8c7160688bd5a34e65c82984b25409563134d63285d8943d0599efbc448e", size = 250310, upload-time = "2025-11-10T00:11:21.689Z" }, + { url = "https://files.pythonhosted.org/packages/8f/75/dd6c2e28308a83e5fc1ee602f8204bd3aa5af685c104cb54499230cf56db/coverage-7.11.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:adb9b7b42c802bd8cb3927de8c1c26368ce50c8fdaa83a9d8551384d77537044", size = 248461, upload-time = "2025-11-10T00:11:23.384Z" }, + { url = "https://files.pythonhosted.org/packages/16/fe/b71af12be9f59dc9eb060688fa19a95bf3223f56c5af1e9861dfa2275d2c/coverage-7.11.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:c8f563b245b4ddb591e99f28e3cd140b85f114b38b7f95b2e42542f0603eb7d7", size = 248039, upload-time = "2025-11-10T00:11:25.07Z" }, + { url = "https://files.pythonhosted.org/packages/11/b8/023b2003a2cd96bdf607afe03d9b96c763cab6d76e024abe4473707c4eb8/coverage-7.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e2a96fdc7643c9517a317553aca13b5cae9bad9a5f32f4654ce247ae4d321405", size = 249903, upload-time = "2025-11-10T00:11:26.992Z" }, + { url = "https://files.pythonhosted.org/packages/d6/ee/5f1076311aa67b1fa4687a724cc044346380e90ce7d94fec09fd384aa5fd/coverage-7.11.3-cp312-cp312-win32.whl", hash = "sha256:e8feeb5e8705835f0622af0fe7ff8d5cb388948454647086494d6c41ec142c2e", size = 219201, upload-time = "2025-11-10T00:11:28.619Z" }, + { url = "https://files.pythonhosted.org/packages/4f/24/d21688f48fe9fcc778956680fd5aaf69f4e23b245b7c7a4755cbd421d25b/coverage-7.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:abb903ffe46bd319d99979cdba350ae7016759bb69f47882242f7b93f3356055", size = 220012, upload-time = "2025-11-10T00:11:30.234Z" }, + { url = "https://files.pythonhosted.org/packages/4f/9e/d5eb508065f291456378aa9b16698b8417d87cb084c2b597f3beb00a8084/coverage-7.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:1451464fd855d9bd000c19b71bb7dafea9ab815741fb0bd9e813d9b671462d6f", size = 218652, upload-time = "2025-11-10T00:11:32.165Z" }, + { url = "https://files.pythonhosted.org/packages/6d/f6/d8572c058211c7d976f24dab71999a565501fb5b3cdcb59cf782f19c4acb/coverage-7.11.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84b892e968164b7a0498ddc5746cdf4e985700b902128421bb5cec1080a6ee36", size = 216694, upload-time = "2025-11-10T00:11:34.296Z" }, + { url = "https://files.pythonhosted.org/packages/4a/f6/b6f9764d90c0ce1bce8d995649fa307fff21f4727b8d950fa2843b7b0de5/coverage-7.11.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f761dbcf45e9416ec4698e1a7649248005f0064ce3523a47402d1bff4af2779e", size = 217065, upload-time = "2025-11-10T00:11:36.281Z" }, + { url = "https://files.pythonhosted.org/packages/a5/8d/a12cb424063019fd077b5be474258a0ed8369b92b6d0058e673f0a945982/coverage-7.11.3-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1410bac9e98afd9623f53876fae7d8a5db9f5a0ac1c9e7c5188463cb4b3212e2", size = 248062, upload-time = "2025-11-10T00:11:37.903Z" }, + { url = "https://files.pythonhosted.org/packages/7f/9c/dab1a4e8e75ce053d14259d3d7485d68528a662e286e184685ea49e71156/coverage-7.11.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:004cdcea3457c0ea3233622cd3464c1e32ebba9b41578421097402bee6461b63", size = 250657, upload-time = "2025-11-10T00:11:39.509Z" }, + { url = "https://files.pythonhosted.org/packages/3f/89/a14f256438324f33bae36f9a1a7137729bf26b0a43f5eda60b147ec7c8c7/coverage-7.11.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8f067ada2c333609b52835ca4d4868645d3b63ac04fb2b9a658c55bba7f667d3", size = 251900, upload-time = "2025-11-10T00:11:41.372Z" }, + { url = "https://files.pythonhosted.org/packages/04/07/75b0d476eb349f1296486b1418b44f2d8780cc8db47493de3755e5340076/coverage-7.11.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:07bc7745c945a6d95676953e86ba7cebb9f11de7773951c387f4c07dc76d03f5", size = 248254, upload-time = "2025-11-10T00:11:43.27Z" }, + { url = "https://files.pythonhosted.org/packages/5a/4b/0c486581fa72873489ca092c52792d008a17954aa352809a7cbe6cf0bf07/coverage-7.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8bba7e4743e37484ae17d5c3b8eb1ce78b564cb91b7ace2e2182b25f0f764cb5", size = 250041, upload-time = "2025-11-10T00:11:45.274Z" }, + { url = "https://files.pythonhosted.org/packages/af/a3/0059dafb240ae3e3291f81b8de00e9c511d3dd41d687a227dd4b529be591/coverage-7.11.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fbffc22d80d86fbe456af9abb17f7a7766e7b2101f7edaacc3535501691563f7", size = 248004, upload-time = "2025-11-10T00:11:46.93Z" }, + { url = "https://files.pythonhosted.org/packages/83/93/967d9662b1eb8c7c46917dcc7e4c1875724ac3e73c3cb78e86d7a0ac719d/coverage-7.11.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:0dba4da36730e384669e05b765a2c49f39514dd3012fcc0398dd66fba8d746d5", size = 247828, upload-time = "2025-11-10T00:11:48.563Z" }, + { url = "https://files.pythonhosted.org/packages/4c/1c/5077493c03215701e212767e470b794548d817dfc6247a4718832cc71fac/coverage-7.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ae12fe90b00b71a71b69f513773310782ce01d5f58d2ceb2b7c595ab9d222094", size = 249588, upload-time = "2025-11-10T00:11:50.581Z" }, + { url = "https://files.pythonhosted.org/packages/7f/a5/77f64de461016e7da3e05d7d07975c89756fe672753e4cf74417fc9b9052/coverage-7.11.3-cp313-cp313-win32.whl", hash = "sha256:12d821de7408292530b0d241468b698bce18dd12ecaf45316149f53877885f8c", size = 219223, upload-time = "2025-11-10T00:11:52.184Z" }, + { url = "https://files.pythonhosted.org/packages/ed/1c/ec51a3c1a59d225b44bdd3a4d463135b3159a535c2686fac965b698524f4/coverage-7.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:6bb599052a974bb6cedfa114f9778fedfad66854107cf81397ec87cb9b8fbcf2", size = 220033, upload-time = "2025-11-10T00:11:53.871Z" }, + { url = "https://files.pythonhosted.org/packages/01/ec/e0ce39746ed558564c16f2cc25fa95ce6fc9fa8bfb3b9e62855d4386b886/coverage-7.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:bb9d7efdb063903b3fdf77caec7b77c3066885068bdc0d44bc1b0c171033f944", size = 218661, upload-time = "2025-11-10T00:11:55.597Z" }, + { url = "https://files.pythonhosted.org/packages/46/cb/483f130bc56cbbad2638248915d97b185374d58b19e3cc3107359715949f/coverage-7.11.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:fb58da65e3339b3dbe266b607bb936efb983d86b00b03eb04c4ad5b442c58428", size = 217389, upload-time = "2025-11-10T00:11:57.59Z" }, + { url = "https://files.pythonhosted.org/packages/cb/ae/81f89bae3afef75553cf10e62feb57551535d16fd5859b9ee5a2a97ddd27/coverage-7.11.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8d16bbe566e16a71d123cd66382c1315fcd520c7573652a8074a8fe281b38c6a", size = 217742, upload-time = "2025-11-10T00:11:59.519Z" }, + { url = "https://files.pythonhosted.org/packages/db/6e/a0fb897041949888191a49c36afd5c6f5d9f5fd757e0b0cd99ec198a324b/coverage-7.11.3-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:a8258f10059b5ac837232c589a350a2df4a96406d6d5f2a09ec587cbdd539655", size = 259049, upload-time = "2025-11-10T00:12:01.592Z" }, + { url = "https://files.pythonhosted.org/packages/d9/b6/d13acc67eb402d91eb94b9bd60593411799aed09ce176ee8d8c0e39c94ca/coverage-7.11.3-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:4c5627429f7fbff4f4131cfdd6abd530734ef7761116811a707b88b7e205afd7", size = 261113, upload-time = "2025-11-10T00:12:03.639Z" }, + { url = "https://files.pythonhosted.org/packages/ea/07/a6868893c48191d60406df4356aa7f0f74e6de34ef1f03af0d49183e0fa1/coverage-7.11.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:465695268414e149bab754c54b0c45c8ceda73dd4a5c3ba255500da13984b16d", size = 263546, upload-time = "2025-11-10T00:12:05.485Z" }, + { url = "https://files.pythonhosted.org/packages/24/e5/28598f70b2c1098332bac47925806353b3313511d984841111e6e760c016/coverage-7.11.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4ebcddfcdfb4c614233cff6e9a3967a09484114a8b2e4f2c7a62dc83676ba13f", size = 258260, upload-time = "2025-11-10T00:12:07.137Z" }, + { url = "https://files.pythonhosted.org/packages/0e/58/58e2d9e6455a4ed746a480c4b9cf96dc3cb2a6b8f3efbee5efd33ae24b06/coverage-7.11.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:13b2066303a1c1833c654d2af0455bb009b6e1727b3883c9964bc5c2f643c1d0", size = 261121, upload-time = "2025-11-10T00:12:09.138Z" }, + { url = "https://files.pythonhosted.org/packages/17/57/38803eefb9b0409934cbc5a14e3978f0c85cb251d2b6f6a369067a7105a0/coverage-7.11.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d8750dd20362a1b80e3cf84f58013d4672f89663aee457ea59336df50fab6739", size = 258736, upload-time = "2025-11-10T00:12:11.195Z" }, + { url = "https://files.pythonhosted.org/packages/a8/f3/f94683167156e93677b3442be1d4ca70cb33718df32a2eea44a5898f04f6/coverage-7.11.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:ab6212e62ea0e1006531a2234e209607f360d98d18d532c2fa8e403c1afbdd71", size = 257625, upload-time = "2025-11-10T00:12:12.843Z" }, + { url = "https://files.pythonhosted.org/packages/87/ed/42d0bf1bc6bfa7d65f52299a31daaa866b4c11000855d753857fe78260ac/coverage-7.11.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a6b17c2b5e0b9bb7702449200f93e2d04cb04b1414c41424c08aa1e5d352da76", size = 259827, upload-time = "2025-11-10T00:12:15.128Z" }, + { url = "https://files.pythonhosted.org/packages/d3/76/5682719f5d5fbedb0c624c9851ef847407cae23362deb941f185f489c54e/coverage-7.11.3-cp313-cp313t-win32.whl", hash = "sha256:426559f105f644b69290ea414e154a0d320c3ad8a2bb75e62884731f69cf8e2c", size = 219897, upload-time = "2025-11-10T00:12:17.274Z" }, + { url = "https://files.pythonhosted.org/packages/10/e0/1da511d0ac3d39e6676fa6cc5ec35320bbf1cebb9b24e9ee7548ee4e931a/coverage-7.11.3-cp313-cp313t-win_amd64.whl", hash = "sha256:90a96fcd824564eae6137ec2563bd061d49a32944858d4bdbae5c00fb10e76ac", size = 220959, upload-time = "2025-11-10T00:12:19.292Z" }, + { url = "https://files.pythonhosted.org/packages/e5/9d/e255da6a04e9ec5f7b633c54c0fdfa221a9e03550b67a9c83217de12e96c/coverage-7.11.3-cp313-cp313t-win_arm64.whl", hash = "sha256:1e33d0bebf895c7a0905fcfaff2b07ab900885fc78bba2a12291a2cfbab014cc", size = 219234, upload-time = "2025-11-10T00:12:21.251Z" }, + { url = "https://files.pythonhosted.org/packages/84/d6/634ec396e45aded1772dccf6c236e3e7c9604bc47b816e928f32ce7987d1/coverage-7.11.3-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:fdc5255eb4815babcdf236fa1a806ccb546724c8a9b129fd1ea4a5448a0bf07c", size = 216746, upload-time = "2025-11-10T00:12:23.089Z" }, + { url = "https://files.pythonhosted.org/packages/28/76/1079547f9d46f9c7c7d0dad35b6873c98bc5aa721eeabceafabd722cd5e7/coverage-7.11.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fe3425dc6021f906c6325d3c415e048e7cdb955505a94f1eb774dafc779ba203", size = 217077, upload-time = "2025-11-10T00:12:24.863Z" }, + { url = "https://files.pythonhosted.org/packages/2d/71/6ad80d6ae0d7cb743b9a98df8bb88b1ff3dc54491508a4a97549c2b83400/coverage-7.11.3-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4ca5f876bf41b24378ee67c41d688155f0e54cdc720de8ef9ad6544005899240", size = 248122, upload-time = "2025-11-10T00:12:26.553Z" }, + { url = "https://files.pythonhosted.org/packages/20/1d/784b87270784b0b88e4beec9d028e8d58f73ae248032579c63ad2ac6f69a/coverage-7.11.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9061a3e3c92b27fd8036dafa26f25d95695b6aa2e4514ab16a254f297e664f83", size = 250638, upload-time = "2025-11-10T00:12:28.555Z" }, + { url = "https://files.pythonhosted.org/packages/f5/26/b6dd31e23e004e9de84d1a8672cd3d73e50f5dae65dbd0f03fa2cdde6100/coverage-7.11.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:abcea3b5f0dc44e1d01c27090bc32ce6ffb7aa665f884f1890710454113ea902", size = 251972, upload-time = "2025-11-10T00:12:30.246Z" }, + { url = "https://files.pythonhosted.org/packages/c9/ef/f9c64d76faac56b82daa036b34d4fe9ab55eb37f22062e68e9470583e688/coverage-7.11.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:68c4eb92997dbaaf839ea13527be463178ac0ddd37a7ac636b8bc11a51af2428", size = 248147, upload-time = "2025-11-10T00:12:32.195Z" }, + { url = "https://files.pythonhosted.org/packages/b6/eb/5b666f90a8f8053bd264a1ce693d2edef2368e518afe70680070fca13ecd/coverage-7.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:149eccc85d48c8f06547534068c41d69a1a35322deaa4d69ba1561e2e9127e75", size = 249995, upload-time = "2025-11-10T00:12:33.969Z" }, + { url = "https://files.pythonhosted.org/packages/eb/7b/871e991ffb5d067f8e67ffb635dabba65b231d6e0eb724a4a558f4a702a5/coverage-7.11.3-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:08c0bcf932e47795c49f0406054824b9d45671362dfc4269e0bc6e4bff010704", size = 247948, upload-time = "2025-11-10T00:12:36.341Z" }, + { url = "https://files.pythonhosted.org/packages/0a/8b/ce454f0af9609431b06dbe5485fc9d1c35ddc387e32ae8e374f49005748b/coverage-7.11.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:39764c6167c82d68a2d8c97c33dba45ec0ad9172570860e12191416f4f8e6e1b", size = 247770, upload-time = "2025-11-10T00:12:38.167Z" }, + { url = "https://files.pythonhosted.org/packages/61/8f/79002cb58a61dfbd2085de7d0a46311ef2476823e7938db80284cedd2428/coverage-7.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:3224c7baf34e923ffc78cb45e793925539d640d42c96646db62dbd61bbcfa131", size = 249431, upload-time = "2025-11-10T00:12:40.354Z" }, + { url = "https://files.pythonhosted.org/packages/58/cc/d06685dae97468ed22999440f2f2f5060940ab0e7952a7295f236d98cce7/coverage-7.11.3-cp314-cp314-win32.whl", hash = "sha256:c713c1c528284d636cd37723b0b4c35c11190da6f932794e145fc40f8210a14a", size = 219508, upload-time = "2025-11-10T00:12:42.231Z" }, + { url = "https://files.pythonhosted.org/packages/5f/ed/770cd07706a3598c545f62d75adf2e5bd3791bffccdcf708ec383ad42559/coverage-7.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:c381a252317f63ca0179d2c7918e83b99a4ff3101e1b24849b999a00f9cd4f86", size = 220325, upload-time = "2025-11-10T00:12:44.065Z" }, + { url = "https://files.pythonhosted.org/packages/ee/ac/6a1c507899b6fb1b9a56069954365f655956bcc648e150ce64c2b0ecbed8/coverage-7.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:3e33a968672be1394eded257ec10d4acbb9af2ae263ba05a99ff901bb863557e", size = 218899, upload-time = "2025-11-10T00:12:46.18Z" }, + { url = "https://files.pythonhosted.org/packages/9a/58/142cd838d960cd740654d094f7b0300d7b81534bb7304437d2439fb685fb/coverage-7.11.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:f9c96a29c6d65bd36a91f5634fef800212dff69dacdb44345c4c9783943ab0df", size = 217471, upload-time = "2025-11-10T00:12:48.392Z" }, + { url = "https://files.pythonhosted.org/packages/bc/2c/2f44d39eb33e41ab3aba80571daad32e0f67076afcf27cb443f9e5b5a3ee/coverage-7.11.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2ec27a7a991d229213c8070d31e3ecf44d005d96a9edc30c78eaeafaa421c001", size = 217742, upload-time = "2025-11-10T00:12:50.182Z" }, + { url = "https://files.pythonhosted.org/packages/32/76/8ebc66c3c699f4de3174a43424c34c086323cd93c4930ab0f835731c443a/coverage-7.11.3-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:72c8b494bd20ae1c58528b97c4a67d5cfeafcb3845c73542875ecd43924296de", size = 259120, upload-time = "2025-11-10T00:12:52.451Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/78a3302b9595f331b86e4f12dfbd9252c8e93d97b8631500888f9a3a2af7/coverage-7.11.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:60ca149a446da255d56c2a7a813b51a80d9497a62250532598d249b3cdb1a926", size = 261229, upload-time = "2025-11-10T00:12:54.667Z" }, + { url = "https://files.pythonhosted.org/packages/07/59/1a9c0844dadef2a6efac07316d9781e6c5a3f3ea7e5e701411e99d619bfd/coverage-7.11.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb5069074db19a534de3859c43eec78e962d6d119f637c41c8e028c5ab3f59dd", size = 263642, upload-time = "2025-11-10T00:12:56.841Z" }, + { url = "https://files.pythonhosted.org/packages/37/86/66c15d190a8e82eee777793cabde730640f555db3c020a179625a2ad5320/coverage-7.11.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac5d5329c9c942bbe6295f4251b135d860ed9f86acd912d418dce186de7c19ac", size = 258193, upload-time = "2025-11-10T00:12:58.687Z" }, + { url = "https://files.pythonhosted.org/packages/c7/c7/4a4aeb25cb6f83c3ec4763e5f7cc78da1c6d4ef9e22128562204b7f39390/coverage-7.11.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e22539b676fafba17f0a90ac725f029a309eb6e483f364c86dcadee060429d46", size = 261107, upload-time = "2025-11-10T00:13:00.502Z" }, + { url = "https://files.pythonhosted.org/packages/ed/91/b986b5035f23cf0272446298967ecdd2c3c0105ee31f66f7e6b6948fd7f8/coverage-7.11.3-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:2376e8a9c889016f25472c452389e98bc6e54a19570b107e27cde9d47f387b64", size = 258717, upload-time = "2025-11-10T00:13:02.747Z" }, + { url = "https://files.pythonhosted.org/packages/f0/c7/6c084997f5a04d050c513545d3344bfa17bd3b67f143f388b5757d762b0b/coverage-7.11.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:4234914b8c67238a3c4af2bba648dc716aa029ca44d01f3d51536d44ac16854f", size = 257541, upload-time = "2025-11-10T00:13:04.689Z" }, + { url = "https://files.pythonhosted.org/packages/3b/c5/38e642917e406930cb67941210a366ccffa767365c8f8d9ec0f465a8b218/coverage-7.11.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f0b4101e2b3c6c352ff1f70b3a6fcc7c17c1ab1a91ccb7a33013cb0782af9820", size = 259872, upload-time = "2025-11-10T00:13:06.559Z" }, + { url = "https://files.pythonhosted.org/packages/b7/67/5e812979d20c167f81dbf9374048e0193ebe64c59a3d93d7d947b07865fa/coverage-7.11.3-cp314-cp314t-win32.whl", hash = "sha256:305716afb19133762e8cf62745c46c4853ad6f9eeba54a593e373289e24ea237", size = 220289, upload-time = "2025-11-10T00:13:08.635Z" }, + { url = "https://files.pythonhosted.org/packages/24/3a/b72573802672b680703e0df071faadfab7dcd4d659aaaffc4626bc8bbde8/coverage-7.11.3-cp314-cp314t-win_amd64.whl", hash = "sha256:9245bd392572b9f799261c4c9e7216bafc9405537d0f4ce3ad93afe081a12dc9", size = 221398, upload-time = "2025-11-10T00:13:10.734Z" }, + { url = "https://files.pythonhosted.org/packages/f8/4e/649628f28d38bad81e4e8eb3f78759d20ac173e3c456ac629123815feb40/coverage-7.11.3-cp314-cp314t-win_arm64.whl", hash = "sha256:9a1d577c20b4334e5e814c3d5fe07fa4a8c3ae42a601945e8d7940bab811d0bd", size = 219435, upload-time = "2025-11-10T00:13:12.712Z" }, + { url = "https://files.pythonhosted.org/packages/19/8f/92bdd27b067204b99f396a1414d6342122f3e2663459baf787108a6b8b84/coverage-7.11.3-py3-none-any.whl", hash = "sha256:351511ae28e2509c8d8cae5311577ea7dd511ab8e746ffc8814a0896c3d33fbe", size = 208478, upload-time = "2025-11-10T00:13:14.908Z" }, ] [[package]] @@ -735,16 +744,16 @@ wheels = [ [[package]] name = "databricks-sdk" -version = "0.69.0" +version = "0.73.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "google-auth" }, { name = "protobuf" }, { name = "requests" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/19/ba/1dc248e4cc646a1a29504bcbb910bfb28d3affe58063df622e7e3c5c0634/databricks_sdk-0.69.0.tar.gz", hash = "sha256:5ad7514325d941afe47da4cf8748ba9f7da7250977666c519f534c9f6298d2f5", size = 794676, upload-time = "2025-10-20T11:38:15.004Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a8/7f/cfb2a00d10f6295332616e5b22f2ae3aaf2841a3afa6c49262acb6b94f5b/databricks_sdk-0.73.0.tar.gz", hash = "sha256:db09eaaacd98e07dded78d3e7ab47d2f6c886e0380cb577977bd442bace8bd8d", size = 801017, upload-time = "2025-11-05T06:52:58.509Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/73/6f82f2a926a2129f9a08ba550b3f5c837d23156082c8d1f4226801168456/databricks_sdk-0.69.0-py3-none-any.whl", hash = "sha256:f75c37c0da2126d9fec31cefd7b5c5491a7c8b5d62481cd661d3e9f1efec0b1f", size = 749754, upload-time = "2025-10-20T11:38:13.451Z" }, + { url = "https://files.pythonhosted.org/packages/a7/27/b822b474aaefb684d11df358d52e012699a2a8af231f9b47c54b73f280cb/databricks_sdk-0.73.0-py3-none-any.whl", hash = "sha256:a4d3cfd19357a2b459d2dc3101454d7f0d1b62865ce099c35d0c342b66ac64ff", size = 753896, upload-time = "2025-11-05T06:52:56.451Z" }, ] [[package]] @@ -806,11 +815,11 @@ wheels = [ [[package]] name = "execnet" -version = "2.1.1" +version = "2.1.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524, upload-time = "2024-04-08T09:04:19.245Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bf/89/780e11f9588d9e7128a3f87788354c7946a9cbb1401ad38a48c4db9a4f07/execnet-2.1.2.tar.gz", hash = "sha256:63d83bfdd9a23e35b9c6a3261412324f964c2ec8dcd8d3c6916ee9373e0befcd", size = 166622, upload-time = "2025-11-12T09:56:37.75Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612, upload-time = "2024-04-08T09:04:17.414Z" }, + { url = "https://files.pythonhosted.org/packages/ab/84/02fc1827e8cdded4aa65baef11296a9bbe595c474f0d6d758af082d849fd/execnet-2.1.2-py3-none-any.whl", hash = "sha256:67fba928dd5a544b783f6056f449e5e3931a5c378b128bc18501f7ea79e296ec", size = 40708, upload-time = "2025-11-12T09:56:36.333Z" }, ] [[package]] @@ -824,16 +833,17 @@ wheels = [ [[package]] name = "fastapi" -version = "0.119.1" +version = "0.121.2" source = { registry = "https://pypi.org/simple" } dependencies = [ + { name = "annotated-doc" }, { name = "pydantic" }, { name = "starlette" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a6/f4/152127681182e6413e7a89684c434e19e7414ed7ac0c632999c3c6980640/fastapi-0.119.1.tar.gz", hash = "sha256:a5e3426edce3fe221af4e1992c6d79011b247e3b03cc57999d697fe76cbf8ae0", size = 338616, upload-time = "2025-10-20T11:30:27.734Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/48/f08f264da34cf160db82c62ffb335e838b1fc16cbcc905f474c7d4c815db/fastapi-0.121.2.tar.gz", hash = "sha256:ca8e932b2b823ec1721c641e3669472c855ad9564a2854c9899d904c2848b8b9", size = 342944, upload-time = "2025-11-13T17:05:54.692Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b1/26/e6d959b4ac959fdb3e9c4154656fc160794db6af8e64673d52759456bf07/fastapi-0.119.1-py3-none-any.whl", hash = "sha256:0b8c2a2cce853216e150e9bd4faaed88227f8eb37de21cb200771f491586a27f", size = 108123, upload-time = "2025-10-20T11:30:26.185Z" }, + { url = "https://files.pythonhosted.org/packages/eb/23/dfb161e91db7c92727db505dc72a384ee79681fe0603f706f9f9f52c2901/fastapi-0.121.2-py3-none-any.whl", hash = "sha256:f2d80b49a86a846b70cc3a03eb5ea6ad2939298bf6a7fe377aa9cd3dd079d358", size = 109201, upload-time = "2025-11-13T17:05:52.718Z" }, ] [[package]] @@ -1020,11 +1030,11 @@ wheels = [ [[package]] name = "fsspec" -version = "2025.9.0" +version = "2025.10.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/de/e0/bab50af11c2d75c9c4a2a26a5254573c0bd97cea152254401510950486fa/fsspec-2025.9.0.tar.gz", hash = "sha256:19fd429483d25d28b65ec68f9f4adc16c17ea2c7c7bf54ec61360d478fb19c19", size = 304847, upload-time = "2025-09-02T19:10:49.215Z" } +sdist = { url = "https://files.pythonhosted.org/packages/24/7f/2747c0d332b9acfa75dc84447a066fdf812b5a6b8d30472b74d309bfe8cb/fsspec-2025.10.0.tar.gz", hash = "sha256:b6789427626f068f9a83ca4e8a3cc050850b6c0f71f99ddb4f542b8266a26a59", size = 309285, upload-time = "2025-10-30T14:58:44.036Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/47/71/70db47e4f6ce3e5c37a607355f80da8860a33226be640226ac52cb05ef2e/fsspec-2025.9.0-py3-none-any.whl", hash = "sha256:530dc2a2af60a414a832059574df4a6e10cce927f6f4a78209390fe38955cfb7", size = 199289, upload-time = "2025-09-02T19:10:47.708Z" }, + { url = "https://files.pythonhosted.org/packages/eb/02/a6b21098b1d5d6249b7c5ab69dde30108a71e4e819d4a9778f1de1d5b70d/fsspec-2025.10.0-py3-none-any.whl", hash = "sha256:7c7712353ae7d875407f97715f0e1ffcc21e33d5b24556cb1e090ae9409ec61d", size = 200966, upload-time = "2025-10-30T14:58:42.53Z" }, ] [[package]] @@ -1053,16 +1063,16 @@ wheels = [ [[package]] name = "google-auth" -version = "2.41.1" +version = "2.43.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cachetools" }, { name = "pyasn1-modules" }, { name = "rsa" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a8/af/5129ce5b2f9688d2fa49b463e544972a7c82b0fdb50980dafee92e121d9f/google_auth-2.41.1.tar.gz", hash = "sha256:b76b7b1f9e61f0cb7e88870d14f6a94aeef248959ef6992670efee37709cbfd2", size = 292284, upload-time = "2025-09-30T22:51:26.363Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ff/ef/66d14cf0e01b08d2d51ffc3c20410c4e134a1548fc246a6081eae585a4fe/google_auth-2.43.0.tar.gz", hash = "sha256:88228eee5fc21b62a1b5fe773ca15e67778cb07dc8363adcb4a8827b52d81483", size = 296359, upload-time = "2025-11-06T00:13:36.587Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/be/a4/7319a2a8add4cc352be9e3efeff5e2aacee917c85ca2fa1647e29089983c/google_auth-2.41.1-py2.py3-none-any.whl", hash = "sha256:754843be95575b9a19c604a848a41be03f7f2afd8c019f716dc1f51ee41c639d", size = 221302, upload-time = "2025-09-30T22:51:24.212Z" }, + { url = "https://files.pythonhosted.org/packages/6f/d1/385110a9ae86d91cc14c5282c61fe9f4dc41c0b9f7d423c6ad77038c4448/google_auth-2.43.0-py2.py3-none-any.whl", hash = "sha256:af628ba6fa493f75c7e9dbe9373d148ca9f4399b5ea29976519e0a3848eddd16", size = 223114, upload-time = "2025-11-06T00:13:35.209Z" }, ] [[package]] @@ -1082,11 +1092,11 @@ wheels = [ [[package]] name = "graphql-core" -version = "3.2.6" +version = "3.2.7" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c4/16/7574029da84834349b60ed71614d66ca3afe46e9bf9c7b9562102acb7d4f/graphql_core-3.2.6.tar.gz", hash = "sha256:c08eec22f9e40f0bd61d805907e3b3b1b9a320bc606e23dc145eebca07c8fbab", size = 505353, upload-time = "2025-01-26T16:36:27.374Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ac/9b/037a640a2983b09aed4a823f9cf1729e6d780b0671f854efa4727a7affbe/graphql_core-3.2.7.tar.gz", hash = "sha256:27b6904bdd3b43f2a0556dad5d579bdfdeab1f38e8e8788e555bdcb586a6f62c", size = 513484, upload-time = "2025-11-01T22:30:40.436Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ae/4f/7297663840621022bc73c22d7d9d80dbc78b4db6297f764b545cd5dd462d/graphql_core-3.2.6-py3-none-any.whl", hash = "sha256:78b016718c161a6fb20a7d97bbf107f331cd1afe53e45566c59f776ed7f0b45f", size = 203416, upload-time = "2025-01-26T16:36:24.868Z" }, + { url = "https://files.pythonhosted.org/packages/0a/14/933037032608787fb92e365883ad6a741c235e0ff992865ec5d904a38f1e/graphql_core-3.2.7-py3-none-any.whl", hash = "sha256:17fc8f3ca4a42913d8e24d9ac9f08deddf0a0b2483076575757f6c412ead2ec0", size = 207262, upload-time = "2025-11-01T22:30:38.912Z" }, ] [[package]] @@ -1198,48 +1208,101 @@ wheels = [ [[package]] name = "hf-xet" -version = "1.1.10" +version = "1.2.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/74/31/feeddfce1748c4a233ec1aa5b7396161c07ae1aa9b7bdbc9a72c3c7dd768/hf_xet-1.1.10.tar.gz", hash = "sha256:408aef343800a2102374a883f283ff29068055c111f003ff840733d3b715bb97", size = 487910, upload-time = "2025-09-12T20:10:27.12Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f7/a2/343e6d05de96908366bdc0081f2d8607d61200be2ac802769c4284cc65bd/hf_xet-1.1.10-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:686083aca1a6669bc85c21c0563551cbcdaa5cf7876a91f3d074a030b577231d", size = 2761466, upload-time = "2025-09-12T20:10:22.836Z" }, - { url = "https://files.pythonhosted.org/packages/31/f9/6215f948ac8f17566ee27af6430ea72045e0418ce757260248b483f4183b/hf_xet-1.1.10-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:71081925383b66b24eedff3013f8e6bbd41215c3338be4b94ba75fd75b21513b", size = 2623807, upload-time = "2025-09-12T20:10:21.118Z" }, - { url = "https://files.pythonhosted.org/packages/15/07/86397573efefff941e100367bbda0b21496ffcdb34db7ab51912994c32a2/hf_xet-1.1.10-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b6bceb6361c80c1cc42b5a7b4e3efd90e64630bcf11224dcac50ef30a47e435", size = 3186960, upload-time = "2025-09-12T20:10:19.336Z" }, - { url = "https://files.pythonhosted.org/packages/01/a7/0b2e242b918cc30e1f91980f3c4b026ff2eedaf1e2ad96933bca164b2869/hf_xet-1.1.10-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:eae7c1fc8a664e54753ffc235e11427ca61f4b0477d757cc4eb9ae374b69f09c", size = 3087167, upload-time = "2025-09-12T20:10:17.255Z" }, - { url = "https://files.pythonhosted.org/packages/4a/25/3e32ab61cc7145b11eee9d745988e2f0f4fafda81b25980eebf97d8cff15/hf_xet-1.1.10-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0a0005fd08f002180f7a12d4e13b22be277725bc23ed0529f8add5c7a6309c06", size = 3248612, upload-time = "2025-09-12T20:10:24.093Z" }, - { url = "https://files.pythonhosted.org/packages/2c/3d/ab7109e607ed321afaa690f557a9ada6d6d164ec852fd6bf9979665dc3d6/hf_xet-1.1.10-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f900481cf6e362a6c549c61ff77468bd59d6dd082f3170a36acfef2eb6a6793f", size = 3353360, upload-time = "2025-09-12T20:10:25.563Z" }, - { url = "https://files.pythonhosted.org/packages/ee/0e/471f0a21db36e71a2f1752767ad77e92d8cde24e974e03d662931b1305ec/hf_xet-1.1.10-cp37-abi3-win_amd64.whl", hash = "sha256:5f54b19cc347c13235ae7ee98b330c26dd65ef1df47e5316ffb1e87713ca7045", size = 2804691, upload-time = "2025-09-12T20:10:28.433Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/5e/6e/0f11bacf08a67f7fb5ee09740f2ca54163863b07b70d579356e9222ce5d8/hf_xet-1.2.0.tar.gz", hash = "sha256:a8c27070ca547293b6890c4bf389f713f80e8c478631432962bb7f4bc0bd7d7f", size = 506020, upload-time = "2025-10-24T19:04:32.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/a5/85ef910a0aa034a2abcfadc360ab5ac6f6bc4e9112349bd40ca97551cff0/hf_xet-1.2.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:ceeefcd1b7aed4956ae8499e2199607765fbd1c60510752003b6cc0b8413b649", size = 2861870, upload-time = "2025-10-24T19:04:11.422Z" }, + { url = "https://files.pythonhosted.org/packages/ea/40/e2e0a7eb9a51fe8828ba2d47fe22a7e74914ea8a0db68a18c3aa7449c767/hf_xet-1.2.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b70218dd548e9840224df5638fdc94bd033552963cfa97f9170829381179c813", size = 2717584, upload-time = "2025-10-24T19:04:09.586Z" }, + { url = "https://files.pythonhosted.org/packages/a5/7d/daf7f8bc4594fdd59a8a596f9e3886133fdc68e675292218a5e4c1b7e834/hf_xet-1.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d40b18769bb9a8bc82a9ede575ce1a44c75eb80e7375a01d76259089529b5dc", size = 3315004, upload-time = "2025-10-24T19:04:00.314Z" }, + { url = "https://files.pythonhosted.org/packages/b1/ba/45ea2f605fbf6d81c8b21e4d970b168b18a53515923010c312c06cd83164/hf_xet-1.2.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:cd3a6027d59cfb60177c12d6424e31f4b5ff13d8e3a1247b3a584bf8977e6df5", size = 3222636, upload-time = "2025-10-24T19:03:58.111Z" }, + { url = "https://files.pythonhosted.org/packages/4a/1d/04513e3cab8f29ab8c109d309ddd21a2705afab9d52f2ba1151e0c14f086/hf_xet-1.2.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6de1fc44f58f6dd937956c8d304d8c2dea264c80680bcfa61ca4a15e7b76780f", size = 3408448, upload-time = "2025-10-24T19:04:20.951Z" }, + { url = "https://files.pythonhosted.org/packages/f0/7c/60a2756d7feec7387db3a1176c632357632fbe7849fce576c5559d4520c7/hf_xet-1.2.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f182f264ed2acd566c514e45da9f2119110e48a87a327ca271027904c70c5832", size = 3503401, upload-time = "2025-10-24T19:04:22.549Z" }, + { url = "https://files.pythonhosted.org/packages/4e/64/48fffbd67fb418ab07451e4ce641a70de1c40c10a13e25325e24858ebe5a/hf_xet-1.2.0-cp313-cp313t-win_amd64.whl", hash = "sha256:293a7a3787e5c95d7be1857358a9130694a9c6021de3f27fa233f37267174382", size = 2900866, upload-time = "2025-10-24T19:04:33.461Z" }, + { url = "https://files.pythonhosted.org/packages/e2/51/f7e2caae42f80af886db414d4e9885fac959330509089f97cccb339c6b87/hf_xet-1.2.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:10bfab528b968c70e062607f663e21e34e2bba349e8038db546646875495179e", size = 2861861, upload-time = "2025-10-24T19:04:19.01Z" }, + { url = "https://files.pythonhosted.org/packages/6e/1d/a641a88b69994f9371bd347f1dd35e5d1e2e2460a2e350c8d5165fc62005/hf_xet-1.2.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a212e842647b02eb6a911187dc878e79c4aa0aa397e88dd3b26761676e8c1f8", size = 2717699, upload-time = "2025-10-24T19:04:17.306Z" }, + { url = "https://files.pythonhosted.org/packages/df/e0/e5e9bba7d15f0318955f7ec3f4af13f92e773fbb368c0b8008a5acbcb12f/hf_xet-1.2.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30e06daccb3a7d4c065f34fc26c14c74f4653069bb2b194e7f18f17cbe9939c0", size = 3314885, upload-time = "2025-10-24T19:04:07.642Z" }, + { url = "https://files.pythonhosted.org/packages/21/90/b7fe5ff6f2b7b8cbdf1bd56145f863c90a5807d9758a549bf3d916aa4dec/hf_xet-1.2.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:29c8fc913a529ec0a91867ce3d119ac1aac966e098cf49501800c870328cc090", size = 3221550, upload-time = "2025-10-24T19:04:05.55Z" }, + { url = "https://files.pythonhosted.org/packages/6f/cb/73f276f0a7ce46cc6a6ec7d6c7d61cbfe5f2e107123d9bbd0193c355f106/hf_xet-1.2.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e159cbfcfbb29f920db2c09ed8b660eb894640d284f102ada929b6e3dc410a", size = 3408010, upload-time = "2025-10-24T19:04:28.598Z" }, + { url = "https://files.pythonhosted.org/packages/b8/1e/d642a12caa78171f4be64f7cd9c40e3ca5279d055d0873188a58c0f5fbb9/hf_xet-1.2.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9c91d5ae931510107f148874e9e2de8a16052b6f1b3ca3c1b12f15ccb491390f", size = 3503264, upload-time = "2025-10-24T19:04:30.397Z" }, + { url = "https://files.pythonhosted.org/packages/17/b5/33764714923fa1ff922770f7ed18c2daae034d21ae6e10dbf4347c854154/hf_xet-1.2.0-cp314-cp314t-win_amd64.whl", hash = "sha256:210d577732b519ac6ede149d2f2f34049d44e8622bf14eb3d63bbcd2d4b332dc", size = 2901071, upload-time = "2025-10-24T19:04:37.463Z" }, + { url = "https://files.pythonhosted.org/packages/96/2d/22338486473df5923a9ab7107d375dbef9173c338ebef5098ef593d2b560/hf_xet-1.2.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:46740d4ac024a7ca9b22bebf77460ff43332868b661186a8e46c227fdae01848", size = 2866099, upload-time = "2025-10-24T19:04:15.366Z" }, + { url = "https://files.pythonhosted.org/packages/7f/8c/c5becfa53234299bc2210ba314eaaae36c2875e0045809b82e40a9544f0c/hf_xet-1.2.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:27df617a076420d8845bea087f59303da8be17ed7ec0cd7ee3b9b9f579dff0e4", size = 2722178, upload-time = "2025-10-24T19:04:13.695Z" }, + { url = "https://files.pythonhosted.org/packages/9a/92/cf3ab0b652b082e66876d08da57fcc6fa2f0e6c70dfbbafbd470bb73eb47/hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3651fd5bfe0281951b988c0facbe726aa5e347b103a675f49a3fa8144c7968fd", size = 3320214, upload-time = "2025-10-24T19:04:03.596Z" }, + { url = "https://files.pythonhosted.org/packages/46/92/3f7ec4a1b6a65bf45b059b6d4a5d38988f63e193056de2f420137e3c3244/hf_xet-1.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d06fa97c8562fb3ee7a378dd9b51e343bc5bc8190254202c9771029152f5e08c", size = 3229054, upload-time = "2025-10-24T19:04:01.949Z" }, + { url = "https://files.pythonhosted.org/packages/0b/dd/7ac658d54b9fb7999a0ccb07ad863b413cbaf5cf172f48ebcd9497ec7263/hf_xet-1.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:4c1428c9ae73ec0939410ec73023c4f842927f39db09b063b9482dac5a3bb737", size = 3413812, upload-time = "2025-10-24T19:04:24.585Z" }, + { url = "https://files.pythonhosted.org/packages/92/68/89ac4e5b12a9ff6286a12174c8538a5930e2ed662091dd2572bbe0a18c8a/hf_xet-1.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a55558084c16b09b5ed32ab9ed38421e2d87cf3f1f89815764d1177081b99865", size = 3508920, upload-time = "2025-10-24T19:04:26.927Z" }, + { url = "https://files.pythonhosted.org/packages/cb/44/870d44b30e1dcfb6a65932e3e1506c103a8a5aea9103c337e7a53180322c/hf_xet-1.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:e6584a52253f72c9f52f9e549d5895ca7a471608495c4ecaa6cc73dba2b24d69", size = 2905735, upload-time = "2025-10-24T19:04:35.928Z" }, ] [[package]] name = "holidays" -version = "0.83" +version = "0.84" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "python-dateutil" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ea/cf/c15e08bbeeb117186a49fd21067fcf3c0b140e9549b6eca246efd0083fd0/holidays-0.83.tar.gz", hash = "sha256:99b97b002079ab57dac93295933907d2aae2742ad9a4d64fe33864dfae6805fa", size = 795071, upload-time = "2025-10-20T20:04:00.496Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2b/91/7301d71a49cfbb499c704615565199180676131da1a56896365bfff6df52/holidays-0.84.tar.gz", hash = "sha256:d604490717c2315e0800269d03c86bf8275e132e4bd140f19d62eb6ccddb5ddc", size = 797583, upload-time = "2025-11-03T20:34:15.139Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/a3/26f03449945a1cae7fbd3de10b88d9c139e20753537e4d527304b1e51dea/holidays-0.84-py3-none-any.whl", hash = "sha256:bca376e3becb36ea8e370d08268520934d8a9bd897ae5007ba51b3d7b4e34988", size = 1310662, upload-time = "2025-11-03T20:34:13.284Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "huey" +version = "2.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/c6/dfe74b0ee9708216ab798449b694d6ba7c1b701bdc2e5d378ec0505ca9a9/huey-2.5.4.tar.gz", hash = "sha256:4b7fb217b640fbb46efc4f4681b446b40726593522f093e8ef27c4a8fcb6cfbb", size = 848666, upload-time = "2025-10-23T13:04:55.549Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/96/70e8c138643ad2895efd96b1b8ca4f00209beea1fed5f5d02b74ab057ee6/holidays-0.83-py3-none-any.whl", hash = "sha256:e36a368227b5b62129871463697bfde7e5212f6f77e43640320b727b79a875a8", size = 1307149, upload-time = "2025-10-20T20:03:58.887Z" }, + { url = "https://files.pythonhosted.org/packages/0a/86/fb8f2ec721106ee9d47adb3a757f937044a52239adb26bae6d9ad753927b/huey-2.5.4-py3-none-any.whl", hash = "sha256:0eac1fb2711f6366a1db003629354a0cea470a3db720d5bab0d140c28e993f9c", size = 76843, upload-time = "2025-10-23T20:58:10.572Z" }, ] [[package]] name = "huggingface-hub" -version = "0.35.3" +version = "1.1.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, { name = "fsspec" }, - { name = "hf-xet", marker = "platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, + { name = "hf-xet", marker = "platform_machine == 'AMD64' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" }, + { name = "httpx" }, { name = "packaging" }, { name = "pyyaml" }, - { name = "requests" }, + { name = "shellingham" }, { name = "tqdm" }, + { name = "typer-slim" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/10/7e/a0a97de7c73671863ca6b3f61fa12518caf35db37825e43d63a70956738c/huggingface_hub-0.35.3.tar.gz", hash = "sha256:350932eaa5cc6a4747efae85126ee220e4ef1b54e29d31c3b45c5612ddf0b32a", size = 461798, upload-time = "2025-09-29T14:29:58.625Z" } +sdist = { url = "https://files.pythonhosted.org/packages/44/8a/3cba668d9cd1b4e3eb6c1c3ff7bf0f74a7809bdbb5c327bcdbdbac802d23/huggingface_hub-1.1.4.tar.gz", hash = "sha256:a7424a766fffa1a11e4c1ac2040a1557e2101f86050fdf06627e7b74cc9d2ad6", size = 606842, upload-time = "2025-11-13T10:51:57.602Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/31/a0/651f93d154cb72323358bf2bbae3e642bdb5d2f1bfc874d096f7cb159fa0/huggingface_hub-0.35.3-py3-none-any.whl", hash = "sha256:0e3a01829c19d86d03793e4577816fe3bdfc1602ac62c7fb220d593d351224ba", size = 564262, upload-time = "2025-09-29T14:29:55.813Z" }, + { url = "https://files.pythonhosted.org/packages/33/3f/969137c9d9428ed8bf171d27604243dd950a47cac82414826e2aebbc0a4c/huggingface_hub-1.1.4-py3-none-any.whl", hash = "sha256:867799fbd2ef338b7f8b03d038d9c0e09415dfe45bb2893b48a510d1d746daa5", size = 515580, upload-time = "2025-11-13T10:51:55.742Z" }, ] [[package]] @@ -1283,7 +1346,7 @@ wheels = [ [[package]] name = "ipython" -version = "9.6.0" +version = "9.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -1297,9 +1360,9 @@ dependencies = [ { name = "stack-data" }, { name = "traitlets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2a/34/29b18c62e39ee2f7a6a3bba7efd952729d8aadd45ca17efc34453b717665/ipython-9.6.0.tar.gz", hash = "sha256:5603d6d5d356378be5043e69441a072b50a5b33b4503428c77b04cb8ce7bc731", size = 4396932, upload-time = "2025-09-29T10:55:53.948Z" } +sdist = { url = "https://files.pythonhosted.org/packages/29/e6/48c74d54039241a456add616464ea28c6ebf782e4110d419411b83dae06f/ipython-9.7.0.tar.gz", hash = "sha256:5f6de88c905a566c6a9d6c400a8fed54a638e1f7543d17aae2551133216b1e4e", size = 4422115, upload-time = "2025-11-05T12:18:54.646Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/48/c5/d5e07995077e48220269c28a221e168c91123ad5ceee44d548f54a057fc0/ipython-9.6.0-py3-none-any.whl", hash = "sha256:5f77efafc886d2f023442479b8149e7d86547ad0a979e9da9f045d252f648196", size = 616170, upload-time = "2025-09-29T10:55:47.676Z" }, + { url = "https://files.pythonhosted.org/packages/05/aa/62893d6a591d337aa59dcc4c6f6c842f1fe20cd72c8c5c1f980255243252/ipython-9.7.0-py3-none-any.whl", hash = "sha256:bce8ac85eb9521adc94e1845b4c03d88365fd6ac2f4908ec4ed1eb1b0a065f9f", size = 618911, upload-time = "2025-11-05T12:18:52.484Z" }, ] [[package]] @@ -1367,14 +1430,14 @@ wheels = [ [[package]] name = "joserfc" -version = "1.4.0" +version = "1.4.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cryptography" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/26/a0/4b8dfecc8ec3c15aa1f2ff7d5b947344378b5b595ce37c8a8fe6e25c1400/joserfc-1.4.0.tar.gz", hash = "sha256:e8c2f327bf10a937d284d57e9f8aec385381e5e5850469b50a7dade1aba59759", size = 196339, upload-time = "2025-10-09T07:47:00.835Z" } +sdist = { url = "https://files.pythonhosted.org/packages/79/88/69505be49b52ac808b290c25ac3796142bcf4349de79adb0175ece83427c/joserfc-1.4.2.tar.gz", hash = "sha256:1b4cebf769eeb8105d2e3433cae49e7217c83e4abf8493e91e7d0f0ad3579fdd", size = 199746, upload-time = "2025-11-17T09:03:15.644Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/05/342459b7629c6fcb5f99a646886ee2904491955b8cce6b26b0b9a498f67c/joserfc-1.4.0-py3-none-any.whl", hash = "sha256:46917e6b53f1ec0c7e20d34d6f3e6c27da0fa43d0d4ebfb89aada7c86582933a", size = 66390, upload-time = "2025-10-09T07:46:59.591Z" }, + { url = "https://files.pythonhosted.org/packages/79/ee/5134fa786f6c4090ac5daec7d18656ca825f7a7754139e38aaad95e544a2/joserfc-1.4.2-py3-none-any.whl", hash = "sha256:b15a5ea3a464c37e8006105665c159a288892fa73856fa40be60266dbc20b49d", size = 66435, upload-time = "2025-11-17T09:03:14.46Z" }, ] [[package]] @@ -1635,11 +1698,11 @@ wheels = [ [[package]] name = "markdown" -version = "3.9" +version = "3.10" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/8d/37/02347f6d6d8279247a5837082ebc26fc0d5aaeaf75aa013fcbb433c777ab/markdown-3.9.tar.gz", hash = "sha256:d2900fe1782bd33bdbbd56859defef70c2e78fc46668f8eb9df3128138f2cb6a", size = 364585, upload-time = "2025-09-04T20:25:22.885Z" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/ab/7dd27d9d863b3376fcf23a5a13cb5d024aed1db46f963f1b5735ae43b3be/markdown-3.10.tar.gz", hash = "sha256:37062d4f2aa4b2b6b32aefb80faa300f82cc790cb949a35b8caede34f2b68c0e", size = 364931, upload-time = "2025-11-03T19:51:15.007Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/70/ae/44c4a6a4cbb496d93c6257954260fe3a6e91b7bed2240e5dad2a717f5111/markdown-3.9-py3-none-any.whl", hash = "sha256:9f4d91ed810864ea88a6f32c07ba8bee1346c0cc1f6b1f9f6c822f2a9667d280", size = 107441, upload-time = "2025-09-04T20:25:21.784Z" }, + { url = "https://files.pythonhosted.org/packages/70/81/54e3ce63502cd085a0c556652a4e1b919c45a446bd1e5300e10c44c8c521/markdown-3.10-py3-none-any.whl", hash = "sha256:b5b99d6951e2e4948d939255596523444c0e677c669700b1d17aa4a8a464cb7c", size = 107678, upload-time = "2025-11-03T19:51:13.887Z" }, ] [[package]] @@ -1773,14 +1836,14 @@ wheels = [ [[package]] name = "matplotlib-inline" -version = "0.1.7" +version = "0.2.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "traitlets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159, upload-time = "2024-04-15T13:44:44.803Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/74/97e72a36efd4ae2bccb3463284300f8953f199b5ffbc04cbbb0ec78f74b1/matplotlib_inline-0.2.1.tar.gz", hash = "sha256:e1ee949c340d771fc39e241ea75683deb94762c8fa5f2927ec57c83c4dffa9fe", size = 8110, upload-time = "2025-10-23T09:00:22.126Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899, upload-time = "2024-04-15T13:44:43.265Z" }, + { url = "https://files.pythonhosted.org/packages/af/33/ee4519fa02ed11a94aef9559552f3b17bb863f2ecfe1a35dc7f548cde231/matplotlib_inline-0.2.1-py3-none-any.whl", hash = "sha256:d56ce5156ba6085e00a9d54fead6ed29a9c47e215cd1bba2e976ef39f5710a76", size = 9516, upload-time = "2025-10-23T09:00:20.675Z" }, ] [[package]] @@ -1811,7 +1874,7 @@ source = { git = "https://github.com/microsoft/python-type-stubs.git#692c37c3969 [[package]] name = "mlflow" -version = "3.5.1" +version = "3.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "alembic" }, @@ -1821,6 +1884,7 @@ dependencies = [ { name = "flask-cors" }, { name = "graphene" }, { name = "gunicorn", marker = "sys_platform != 'win32'" }, + { name = "huey" }, { name = "matplotlib" }, { name = "mlflow-skinny" }, { name = "mlflow-tracing" }, @@ -1832,14 +1896,14 @@ dependencies = [ { name = "sqlalchemy" }, { name = "waitress", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/64/7e/516ba65bfa6f5857904ce18bcb738234004663dae1197cee082d48f1ad29/mlflow-3.5.1.tar.gz", hash = "sha256:32630f2aaadeb6dc6ccbde56247a1500518b38d0a7cc12f714be1703b6ee3ea1", size = 8300179, upload-time = "2025-10-22T18:11:47.263Z" } +sdist = { url = "https://files.pythonhosted.org/packages/9b/25/930b5312952b2645f066ffacca5bee8e36577c35e327545da225440cbb6a/mlflow-3.6.0.tar.gz", hash = "sha256:d945d259b5c6b551a9f26846db8979fd84c78114a027b77ada3298f821a9b0e1", size = 8371484, upload-time = "2025-11-07T19:00:30.312Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/98/e1/33cf2596dfbdfe49c2a4696e4321a90e835faeb46e590980461d1d4ef811/mlflow-3.5.1-py3-none-any.whl", hash = "sha256:ebbf5fef59787161a15f2878f210877a62d54d943ad6cea140621687b2393f85", size = 8773271, upload-time = "2025-10-22T18:11:44.6Z" }, + { url = "https://files.pythonhosted.org/packages/79/69/5b018518b2fbd02481b58f7ca14f4a489b51e3c2d95cdc1b973135e8d456/mlflow-3.6.0-py3-none-any.whl", hash = "sha256:04d1691facd412be8e61b963fad859286cfeb2dbcafaea294e6aa0b83a15fc04", size = 8860293, upload-time = "2025-11-07T19:00:27.555Z" }, ] [[package]] name = "mlflow-skinny" -version = "3.5.1" +version = "3.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cachetools" }, @@ -1862,14 +1926,14 @@ dependencies = [ { name = "typing-extensions" }, { name = "uvicorn" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fb/1a/ede3fb7a4085bf640e2842c0a4d3d95ef665b21e6d0e92cfb7867ba58ef7/mlflow_skinny-3.5.1.tar.gz", hash = "sha256:4358a5489221cdecf53cf045e10df28919dedb9489965434ce3445f7cbabf365", size = 1927869, upload-time = "2025-10-22T17:58:41.623Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/8e/2a2d0cd5b1b985c5278202805f48aae6f2adc3ddc0fce3385ec50e07e258/mlflow_skinny-3.6.0.tar.gz", hash = "sha256:cc04706b5b6faace9faf95302a6e04119485e1bfe98ddc9b85b81984e80944b6", size = 1963286, upload-time = "2025-11-07T18:33:52.596Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b4/88/75690e7cdc6fe56374e24178055bb2a7385e1e29c51a8cbb2fb747892af1/mlflow_skinny-3.5.1-py3-none-any.whl", hash = "sha256:e5f96977d21a093a3ffda789bee90070855dbfe1b9d0703c0c3e34d2f8d7fba8", size = 2314304, upload-time = "2025-10-22T17:58:39.526Z" }, + { url = "https://files.pythonhosted.org/packages/0e/78/e8fdc3e1708bdfd1eba64f41ce96b461cae1b505aa08b69352ac99b4caa4/mlflow_skinny-3.6.0-py3-none-any.whl", hash = "sha256:c83b34fce592acb2cc6bddcb507587a6d9ef3f590d9e7a8658c85e0980596d78", size = 2364629, upload-time = "2025-11-07T18:33:50.744Z" }, ] [[package]] name = "mlflow-tracing" -version = "3.5.1" +version = "3.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cachetools" }, @@ -1881,14 +1945,14 @@ dependencies = [ { name = "protobuf" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/18/38/ade11b09edfee133078015656aec8a3854f1a6ed1bd6e6d9af333fcdaaf9/mlflow_tracing-3.5.1.tar.gz", hash = "sha256:bca266b1871692ae2ec812ed177cdc108ccef1cb3fb82725a8b959ec98d5fba0", size = 1056089, upload-time = "2025-10-22T17:56:12.047Z" } +sdist = { url = "https://files.pythonhosted.org/packages/11/4e/a1b2f977a50ed3860e2848548a9173b9018806628d46d5bdafa8b45bc0c7/mlflow_tracing-3.6.0.tar.gz", hash = "sha256:ccff80b3aad6caa18233c98ba69922a91a6f914e0a13d12e1977af7523523d4c", size = 1061879, upload-time = "2025-11-07T18:36:24.818Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/29/7f/99006f6c261ef694363e8599ad858c223aa9918231e8bd7a1569041967ac/mlflow_tracing-3.5.1-py3-none-any.whl", hash = "sha256:4fd685347158e0d2c48f5bec3d15ecfc6fadc1dbb48073cb220ded438408fa65", size = 1273904, upload-time = "2025-10-22T17:56:10.748Z" }, + { url = "https://files.pythonhosted.org/packages/5e/ec/ba3f513152cf5404e36263604d484728d47e61678c39228c36eb769199af/mlflow_tracing-3.6.0-py3-none-any.whl", hash = "sha256:a68ff03ba5129c67dc98e6871e0d5ef512dd3ee66d01e1c1a0c946c08a6d4755", size = 1281617, upload-time = "2025-11-07T18:36:23.299Z" }, ] [[package]] name = "moto" -version = "5.1.15" +version = "5.1.16" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "boto3" }, @@ -1901,9 +1965,9 @@ dependencies = [ { name = "werkzeug" }, { name = "xmltodict" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/73/f9/5e4129558fa8f255c44b3b938a189ffc2c8a85e4ed3f9ddb3bf4d0f79df7/moto-5.1.15.tar.gz", hash = "sha256:2ad9cc9710a3460505511543dba6761c8bd2006a49954ad3988bbf20ce9e6413", size = 7288767, upload-time = "2025-10-17T20:45:30.912Z" } +sdist = { url = "https://files.pythonhosted.org/packages/33/0e/346bdadba09fd86854fa3363892ca12f4232652c9d210b93c673c48807ea/moto-5.1.16.tar.gz", hash = "sha256:792045b345d16a8aa09068ad4a7656894e707c796f0799b438fffb738e8fae7c", size = 8229581, upload-time = "2025-11-02T21:56:40.257Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d6/f9/1a91d8dece9c7d5a4c28437b70a333c2320ea6daca6c163fff23a44bb03b/moto-5.1.15-py3-none-any.whl", hash = "sha256:0ffcf943f421bc6e7248889c7c44182a9ec26f8df3457cd4b52418dab176a720", size = 5403349, upload-time = "2025-10-17T20:45:28.632Z" }, + { url = "https://files.pythonhosted.org/packages/a6/a5/403b7adbf9932861ff7f3b19f4f9b9b8ec0dceb1fcea0633046b7f5e9ced/moto-5.1.16-py3-none-any.whl", hash = "sha256:8e6186f20b3aa91755d186e47701fe7e47f74e625c36fdf3bd7747da68468b19", size = 6330584, upload-time = "2025-11-02T21:56:37.585Z" }, ] [package.optional-dependencies] @@ -2055,11 +2119,11 @@ wheels = [ [[package]] name = "narwhals" -version = "2.9.0" +version = "2.12.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b7/95/aa46616f5e567ff5d262f4c207d5ca79cb2766010c786c351b8e7f930ef4/narwhals-2.9.0.tar.gz", hash = "sha256:d8cde40a6a8a7049d8e66608b7115ab19464acc6f305d136a8dc8ba396c4acfe", size = 584098, upload-time = "2025-10-20T12:19:16.893Z" } +sdist = { url = "https://files.pythonhosted.org/packages/93/f8/e1c28f24b641871c14ccae7ba6381f3c7827789a06e947ce975ae8a9075a/narwhals-2.12.0.tar.gz", hash = "sha256:075b6d56f3a222613793e025744b129439ecdff9292ea6615dd983af7ba6ea44", size = 590404, upload-time = "2025-11-17T10:53:28.381Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/13/34/00c7ae8194074ed82b64e0bb7c24220eac5f77ac90c16e23cf0d2cfd2a03/narwhals-2.9.0-py3-none-any.whl", hash = "sha256:c59f7de4763004ae81691ce16df71b4e55aead0ead7ccde8c8f2ef8c9559c765", size = 422255, upload-time = "2025-10-20T12:19:15.228Z" }, + { url = "https://files.pythonhosted.org/packages/0b/9a/c6f79de7ba3a0a8473129936b7b90aa461d3d46fec6f1627672b1dccf4e9/narwhals-2.12.0-py3-none-any.whl", hash = "sha256:baeba5d448a30b04c299a696bd9ee5ff73e4742143e06c49ca316b46539a7cbb", size = 425014, upload-time = "2025-11-17T10:53:26.65Z" }, ] [[package]] @@ -2091,65 +2155,65 @@ wheels = [ [[package]] name = "numpy" -version = "2.3.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b5/f4/098d2270d52b41f1bd7db9fc288aaa0400cb48c2a3e2af6fa365d9720947/numpy-2.3.4.tar.gz", hash = "sha256:a7d018bfedb375a8d979ac758b120ba846a7fe764911a64465fd87b8729f4a6a", size = 20582187, upload-time = "2025-10-15T16:18:11.77Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/96/7a/02420400b736f84317e759291b8edaeee9dc921f72b045475a9cbdb26b17/numpy-2.3.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ef1b5a3e808bc40827b5fa2c8196151a4c5abe110e1726949d7abddfe5c7ae11", size = 20957727, upload-time = "2025-10-15T16:15:44.9Z" }, - { url = "https://files.pythonhosted.org/packages/18/90/a014805d627aa5750f6f0e878172afb6454552da929144b3c07fcae1bb13/numpy-2.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c2f91f496a87235c6aaf6d3f3d89b17dba64996abadccb289f48456cff931ca9", size = 14187262, upload-time = "2025-10-15T16:15:47.761Z" }, - { url = "https://files.pythonhosted.org/packages/c7/e4/0a94b09abe89e500dc748e7515f21a13e30c5c3fe3396e6d4ac108c25fca/numpy-2.3.4-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:f77e5b3d3da652b474cc80a14084927a5e86a5eccf54ca8ca5cbd697bf7f2667", size = 5115992, upload-time = "2025-10-15T16:15:50.144Z" }, - { url = "https://files.pythonhosted.org/packages/88/dd/db77c75b055c6157cbd4f9c92c4458daef0dd9cbe6d8d2fe7f803cb64c37/numpy-2.3.4-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:8ab1c5f5ee40d6e01cbe96de5863e39b215a4d24e7d007cad56c7184fdf4aeef", size = 6648672, upload-time = "2025-10-15T16:15:52.442Z" }, - { url = "https://files.pythonhosted.org/packages/e1/e6/e31b0d713719610e406c0ea3ae0d90760465b086da8783e2fd835ad59027/numpy-2.3.4-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:77b84453f3adcb994ddbd0d1c5d11db2d6bda1a2b7fd5ac5bd4649d6f5dc682e", size = 14284156, upload-time = "2025-10-15T16:15:54.351Z" }, - { url = "https://files.pythonhosted.org/packages/f9/58/30a85127bfee6f108282107caf8e06a1f0cc997cb6b52cdee699276fcce4/numpy-2.3.4-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4121c5beb58a7f9e6dfdee612cb24f4df5cd4db6e8261d7f4d7450a997a65d6a", size = 16641271, upload-time = "2025-10-15T16:15:56.67Z" }, - { url = "https://files.pythonhosted.org/packages/06/f2/2e06a0f2adf23e3ae29283ad96959267938d0efd20a2e25353b70065bfec/numpy-2.3.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65611ecbb00ac9846efe04db15cbe6186f562f6bb7e5e05f077e53a599225d16", size = 16059531, upload-time = "2025-10-15T16:15:59.412Z" }, - { url = "https://files.pythonhosted.org/packages/b0/e7/b106253c7c0d5dc352b9c8fab91afd76a93950998167fa3e5afe4ef3a18f/numpy-2.3.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dabc42f9c6577bcc13001b8810d300fe814b4cfbe8a92c873f269484594f9786", size = 18578983, upload-time = "2025-10-15T16:16:01.804Z" }, - { url = "https://files.pythonhosted.org/packages/73/e3/04ecc41e71462276ee867ccbef26a4448638eadecf1bc56772c9ed6d0255/numpy-2.3.4-cp312-cp312-win32.whl", hash = "sha256:a49d797192a8d950ca59ee2d0337a4d804f713bb5c3c50e8db26d49666e351dc", size = 6291380, upload-time = "2025-10-15T16:16:03.938Z" }, - { url = "https://files.pythonhosted.org/packages/3d/a8/566578b10d8d0e9955b1b6cd5db4e9d4592dd0026a941ff7994cedda030a/numpy-2.3.4-cp312-cp312-win_amd64.whl", hash = "sha256:985f1e46358f06c2a09921e8921e2c98168ed4ae12ccd6e5e87a4f1857923f32", size = 12787999, upload-time = "2025-10-15T16:16:05.801Z" }, - { url = "https://files.pythonhosted.org/packages/58/22/9c903a957d0a8071b607f5b1bff0761d6e608b9a965945411f867d515db1/numpy-2.3.4-cp312-cp312-win_arm64.whl", hash = "sha256:4635239814149e06e2cb9db3dd584b2fa64316c96f10656983b8026a82e6e4db", size = 10197412, upload-time = "2025-10-15T16:16:07.854Z" }, - { url = "https://files.pythonhosted.org/packages/57/7e/b72610cc91edf138bc588df5150957a4937221ca6058b825b4725c27be62/numpy-2.3.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c090d4860032b857d94144d1a9976b8e36709e40386db289aaf6672de2a81966", size = 20950335, upload-time = "2025-10-15T16:16:10.304Z" }, - { url = "https://files.pythonhosted.org/packages/3e/46/bdd3370dcea2f95ef14af79dbf81e6927102ddf1cc54adc0024d61252fd9/numpy-2.3.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a13fc473b6db0be619e45f11f9e81260f7302f8d180c49a22b6e6120022596b3", size = 14179878, upload-time = "2025-10-15T16:16:12.595Z" }, - { url = "https://files.pythonhosted.org/packages/ac/01/5a67cb785bda60f45415d09c2bc245433f1c68dd82eef9c9002c508b5a65/numpy-2.3.4-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:3634093d0b428e6c32c3a69b78e554f0cd20ee420dcad5a9f3b2a63762ce4197", size = 5108673, upload-time = "2025-10-15T16:16:14.877Z" }, - { url = "https://files.pythonhosted.org/packages/c2/cd/8428e23a9fcebd33988f4cb61208fda832800ca03781f471f3727a820704/numpy-2.3.4-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:043885b4f7e6e232d7df4f51ffdef8c36320ee9d5f227b380ea636722c7ed12e", size = 6641438, upload-time = "2025-10-15T16:16:16.805Z" }, - { url = "https://files.pythonhosted.org/packages/3e/d1/913fe563820f3c6b079f992458f7331278dcd7ba8427e8e745af37ddb44f/numpy-2.3.4-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4ee6a571d1e4f0ea6d5f22d6e5fbd6ed1dc2b18542848e1e7301bd190500c9d7", size = 14281290, upload-time = "2025-10-15T16:16:18.764Z" }, - { url = "https://files.pythonhosted.org/packages/9e/7e/7d306ff7cb143e6d975cfa7eb98a93e73495c4deabb7d1b5ecf09ea0fd69/numpy-2.3.4-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fc8a63918b04b8571789688b2780ab2b4a33ab44bfe8ccea36d3eba51228c953", size = 16636543, upload-time = "2025-10-15T16:16:21.072Z" }, - { url = "https://files.pythonhosted.org/packages/47/6a/8cfc486237e56ccfb0db234945552a557ca266f022d281a2f577b98e955c/numpy-2.3.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:40cc556d5abbc54aabe2b1ae287042d7bdb80c08edede19f0c0afb36ae586f37", size = 16056117, upload-time = "2025-10-15T16:16:23.369Z" }, - { url = "https://files.pythonhosted.org/packages/b1/0e/42cb5e69ea901e06ce24bfcc4b5664a56f950a70efdcf221f30d9615f3f3/numpy-2.3.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ecb63014bb7f4ce653f8be7f1df8cbc6093a5a2811211770f6606cc92b5a78fd", size = 18577788, upload-time = "2025-10-15T16:16:27.496Z" }, - { url = "https://files.pythonhosted.org/packages/86/92/41c3d5157d3177559ef0a35da50f0cda7fa071f4ba2306dd36818591a5bc/numpy-2.3.4-cp313-cp313-win32.whl", hash = "sha256:e8370eb6925bb8c1c4264fec52b0384b44f675f191df91cbe0140ec9f0955646", size = 6282620, upload-time = "2025-10-15T16:16:29.811Z" }, - { url = "https://files.pythonhosted.org/packages/09/97/fd421e8bc50766665ad35536c2bb4ef916533ba1fdd053a62d96cc7c8b95/numpy-2.3.4-cp313-cp313-win_amd64.whl", hash = "sha256:56209416e81a7893036eea03abcb91c130643eb14233b2515c90dcac963fe99d", size = 12784672, upload-time = "2025-10-15T16:16:31.589Z" }, - { url = "https://files.pythonhosted.org/packages/ad/df/5474fb2f74970ca8eb978093969b125a84cc3d30e47f82191f981f13a8a0/numpy-2.3.4-cp313-cp313-win_arm64.whl", hash = "sha256:a700a4031bc0fd6936e78a752eefb79092cecad2599ea9c8039c548bc097f9bc", size = 10196702, upload-time = "2025-10-15T16:16:33.902Z" }, - { url = "https://files.pythonhosted.org/packages/11/83/66ac031464ec1767ea3ed48ce40f615eb441072945e98693bec0bcd056cc/numpy-2.3.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:86966db35c4040fdca64f0816a1c1dd8dbd027d90fca5a57e00e1ca4cd41b879", size = 21049003, upload-time = "2025-10-15T16:16:36.101Z" }, - { url = "https://files.pythonhosted.org/packages/5f/99/5b14e0e686e61371659a1d5bebd04596b1d72227ce36eed121bb0aeab798/numpy-2.3.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:838f045478638b26c375ee96ea89464d38428c69170360b23a1a50fa4baa3562", size = 14302980, upload-time = "2025-10-15T16:16:39.124Z" }, - { url = "https://files.pythonhosted.org/packages/2c/44/e9486649cd087d9fc6920e3fc3ac2aba10838d10804b1e179fb7cbc4e634/numpy-2.3.4-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d7315ed1dab0286adca467377c8381cd748f3dc92235f22a7dfc42745644a96a", size = 5231472, upload-time = "2025-10-15T16:16:41.168Z" }, - { url = "https://files.pythonhosted.org/packages/3e/51/902b24fa8887e5fe2063fd61b1895a476d0bbf46811ab0c7fdf4bd127345/numpy-2.3.4-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:84f01a4d18b2cc4ade1814a08e5f3c907b079c847051d720fad15ce37aa930b6", size = 6739342, upload-time = "2025-10-15T16:16:43.777Z" }, - { url = "https://files.pythonhosted.org/packages/34/f1/4de9586d05b1962acdcdb1dc4af6646361a643f8c864cef7c852bf509740/numpy-2.3.4-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:817e719a868f0dacde4abdfc5c1910b301877970195db9ab6a5e2c4bd5b121f7", size = 14354338, upload-time = "2025-10-15T16:16:46.081Z" }, - { url = "https://files.pythonhosted.org/packages/1f/06/1c16103b425de7969d5a76bdf5ada0804b476fed05d5f9e17b777f1cbefd/numpy-2.3.4-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85e071da78d92a214212cacea81c6da557cab307f2c34b5f85b628e94803f9c0", size = 16702392, upload-time = "2025-10-15T16:16:48.455Z" }, - { url = "https://files.pythonhosted.org/packages/34/b2/65f4dc1b89b5322093572b6e55161bb42e3e0487067af73627f795cc9d47/numpy-2.3.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:2ec646892819370cf3558f518797f16597b4e4669894a2ba712caccc9da53f1f", size = 16134998, upload-time = "2025-10-15T16:16:51.114Z" }, - { url = "https://files.pythonhosted.org/packages/d4/11/94ec578896cdb973aaf56425d6c7f2aff4186a5c00fac15ff2ec46998b46/numpy-2.3.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:035796aaaddfe2f9664b9a9372f089cfc88bd795a67bd1bfe15e6e770934cf64", size = 18651574, upload-time = "2025-10-15T16:16:53.429Z" }, - { url = "https://files.pythonhosted.org/packages/62/b7/7efa763ab33dbccf56dade36938a77345ce8e8192d6b39e470ca25ff3cd0/numpy-2.3.4-cp313-cp313t-win32.whl", hash = "sha256:fea80f4f4cf83b54c3a051f2f727870ee51e22f0248d3114b8e755d160b38cfb", size = 6413135, upload-time = "2025-10-15T16:16:55.992Z" }, - { url = "https://files.pythonhosted.org/packages/43/70/aba4c38e8400abcc2f345e13d972fb36c26409b3e644366db7649015f291/numpy-2.3.4-cp313-cp313t-win_amd64.whl", hash = "sha256:15eea9f306b98e0be91eb344a94c0e630689ef302e10c2ce5f7e11905c704f9c", size = 12928582, upload-time = "2025-10-15T16:16:57.943Z" }, - { url = "https://files.pythonhosted.org/packages/67/63/871fad5f0073fc00fbbdd7232962ea1ac40eeaae2bba66c76214f7954236/numpy-2.3.4-cp313-cp313t-win_arm64.whl", hash = "sha256:b6c231c9c2fadbae4011ca5e7e83e12dc4a5072f1a1d85a0a7b3ed754d145a40", size = 10266691, upload-time = "2025-10-15T16:17:00.048Z" }, - { url = "https://files.pythonhosted.org/packages/72/71/ae6170143c115732470ae3a2d01512870dd16e0953f8a6dc89525696069b/numpy-2.3.4-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:81c3e6d8c97295a7360d367f9f8553973651b76907988bb6066376bc2252f24e", size = 20955580, upload-time = "2025-10-15T16:17:02.509Z" }, - { url = "https://files.pythonhosted.org/packages/af/39/4be9222ffd6ca8a30eda033d5f753276a9c3426c397bb137d8e19dedd200/numpy-2.3.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:7c26b0b2bf58009ed1f38a641f3db4be8d960a417ca96d14e5b06df1506d41ff", size = 14188056, upload-time = "2025-10-15T16:17:04.873Z" }, - { url = "https://files.pythonhosted.org/packages/6c/3d/d85f6700d0a4aa4f9491030e1021c2b2b7421b2b38d01acd16734a2bfdc7/numpy-2.3.4-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:62b2198c438058a20b6704351b35a1d7db881812d8512d67a69c9de1f18ca05f", size = 5116555, upload-time = "2025-10-15T16:17:07.499Z" }, - { url = "https://files.pythonhosted.org/packages/bf/04/82c1467d86f47eee8a19a464c92f90a9bb68ccf14a54c5224d7031241ffb/numpy-2.3.4-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:9d729d60f8d53a7361707f4b68a9663c968882dd4f09e0d58c044c8bf5faee7b", size = 6643581, upload-time = "2025-10-15T16:17:09.774Z" }, - { url = "https://files.pythonhosted.org/packages/0c/d3/c79841741b837e293f48bd7db89d0ac7a4f2503b382b78a790ef1dc778a5/numpy-2.3.4-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bd0c630cf256b0a7fd9d0a11c9413b42fef5101219ce6ed5a09624f5a65392c7", size = 14299186, upload-time = "2025-10-15T16:17:11.937Z" }, - { url = "https://files.pythonhosted.org/packages/e8/7e/4a14a769741fbf237eec5a12a2cbc7a4c4e061852b6533bcb9e9a796c908/numpy-2.3.4-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5e081bc082825f8b139f9e9fe42942cb4054524598aaeb177ff476cc76d09d2", size = 16638601, upload-time = "2025-10-15T16:17:14.391Z" }, - { url = "https://files.pythonhosted.org/packages/93/87/1c1de269f002ff0a41173fe01dcc925f4ecff59264cd8f96cf3b60d12c9b/numpy-2.3.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:15fb27364ed84114438fff8aaf998c9e19adbeba08c0b75409f8c452a8692c52", size = 16074219, upload-time = "2025-10-15T16:17:17.058Z" }, - { url = "https://files.pythonhosted.org/packages/cd/28/18f72ee77408e40a76d691001ae599e712ca2a47ddd2c4f695b16c65f077/numpy-2.3.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:85d9fb2d8cd998c84d13a79a09cc0c1091648e848e4e6249b0ccd7f6b487fa26", size = 18576702, upload-time = "2025-10-15T16:17:19.379Z" }, - { url = "https://files.pythonhosted.org/packages/c3/76/95650169b465ececa8cf4b2e8f6df255d4bf662775e797ade2025cc51ae6/numpy-2.3.4-cp314-cp314-win32.whl", hash = "sha256:e73d63fd04e3a9d6bc187f5455d81abfad05660b212c8804bf3b407e984cd2bc", size = 6337136, upload-time = "2025-10-15T16:17:22.886Z" }, - { url = "https://files.pythonhosted.org/packages/dc/89/a231a5c43ede5d6f77ba4a91e915a87dea4aeea76560ba4d2bf185c683f0/numpy-2.3.4-cp314-cp314-win_amd64.whl", hash = "sha256:3da3491cee49cf16157e70f607c03a217ea6647b1cea4819c4f48e53d49139b9", size = 12920542, upload-time = "2025-10-15T16:17:24.783Z" }, - { url = "https://files.pythonhosted.org/packages/0d/0c/ae9434a888f717c5ed2ff2393b3f344f0ff6f1c793519fa0c540461dc530/numpy-2.3.4-cp314-cp314-win_arm64.whl", hash = "sha256:6d9cd732068e8288dbe2717177320723ccec4fb064123f0caf9bbd90ab5be868", size = 10480213, upload-time = "2025-10-15T16:17:26.935Z" }, - { url = "https://files.pythonhosted.org/packages/83/4b/c4a5f0841f92536f6b9592694a5b5f68c9ab37b775ff342649eadf9055d3/numpy-2.3.4-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:22758999b256b595cf0b1d102b133bb61866ba5ceecf15f759623b64c020c9ec", size = 21052280, upload-time = "2025-10-15T16:17:29.638Z" }, - { url = "https://files.pythonhosted.org/packages/3e/80/90308845fc93b984d2cc96d83e2324ce8ad1fd6efea81b324cba4b673854/numpy-2.3.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:9cb177bc55b010b19798dc5497d540dea67fd13a8d9e882b2dae71de0cf09eb3", size = 14302930, upload-time = "2025-10-15T16:17:32.384Z" }, - { url = "https://files.pythonhosted.org/packages/3d/4e/07439f22f2a3b247cec4d63a713faae55e1141a36e77fb212881f7cda3fb/numpy-2.3.4-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:0f2bcc76f1e05e5ab58893407c63d90b2029908fa41f9f1cc51eecce936c3365", size = 5231504, upload-time = "2025-10-15T16:17:34.515Z" }, - { url = "https://files.pythonhosted.org/packages/ab/de/1e11f2547e2fe3d00482b19721855348b94ada8359aef5d40dd57bfae9df/numpy-2.3.4-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:8dc20bde86802df2ed8397a08d793da0ad7a5fd4ea3ac85d757bf5dd4ad7c252", size = 6739405, upload-time = "2025-10-15T16:17:36.128Z" }, - { url = "https://files.pythonhosted.org/packages/3b/40/8cd57393a26cebe2e923005db5134a946c62fa56a1087dc7c478f3e30837/numpy-2.3.4-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5e199c087e2aa71c8f9ce1cb7a8e10677dc12457e7cc1be4798632da37c3e86e", size = 14354866, upload-time = "2025-10-15T16:17:38.884Z" }, - { url = "https://files.pythonhosted.org/packages/93/39/5b3510f023f96874ee6fea2e40dfa99313a00bf3ab779f3c92978f34aace/numpy-2.3.4-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85597b2d25ddf655495e2363fe044b0ae999b75bc4d630dc0d886484b03a5eb0", size = 16703296, upload-time = "2025-10-15T16:17:41.564Z" }, - { url = "https://files.pythonhosted.org/packages/41/0d/19bb163617c8045209c1996c4e427bccbc4bbff1e2c711f39203c8ddbb4a/numpy-2.3.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:04a69abe45b49c5955923cf2c407843d1c85013b424ae8a560bba16c92fe44a0", size = 16136046, upload-time = "2025-10-15T16:17:43.901Z" }, - { url = "https://files.pythonhosted.org/packages/e2/c1/6dba12fdf68b02a21ac411c9df19afa66bed2540f467150ca64d246b463d/numpy-2.3.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e1708fac43ef8b419c975926ce1eaf793b0c13b7356cfab6ab0dc34c0a02ac0f", size = 18652691, upload-time = "2025-10-15T16:17:46.247Z" }, - { url = "https://files.pythonhosted.org/packages/f8/73/f85056701dbbbb910c51d846c58d29fd46b30eecd2b6ba760fc8b8a1641b/numpy-2.3.4-cp314-cp314t-win32.whl", hash = "sha256:863e3b5f4d9915aaf1b8ec79ae560ad21f0b8d5e3adc31e73126491bb86dee1d", size = 6485782, upload-time = "2025-10-15T16:17:48.872Z" }, - { url = "https://files.pythonhosted.org/packages/17/90/28fa6f9865181cb817c2471ee65678afa8a7e2a1fb16141473d5fa6bacc3/numpy-2.3.4-cp314-cp314t-win_amd64.whl", hash = "sha256:962064de37b9aef801d33bc579690f8bfe6c5e70e29b61783f60bcba838a14d6", size = 13113301, upload-time = "2025-10-15T16:17:50.938Z" }, - { url = "https://files.pythonhosted.org/packages/54/23/08c002201a8e7e1f9afba93b97deceb813252d9cfd0d3351caed123dcf97/numpy-2.3.4-cp314-cp314t-win_arm64.whl", hash = "sha256:8b5a9a39c45d852b62693d9b3f3e0fe052541f804296ff401a72a1b60edafb29", size = 10547532, upload-time = "2025-10-15T16:17:53.48Z" }, +version = "2.3.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/76/65/21b3bc86aac7b8f2862db1e808f1ea22b028e30a225a34a5ede9bf8678f2/numpy-2.3.5.tar.gz", hash = "sha256:784db1dcdab56bf0517743e746dfb0f885fc68d948aba86eeec2cba234bdf1c0", size = 20584950, upload-time = "2025-11-16T22:52:42.067Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/37/e669fe6cbb2b96c62f6bbedc6a81c0f3b7362f6a59230b23caa673a85721/numpy-2.3.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:74ae7b798248fe62021dbf3c914245ad45d1a6b0cb4a29ecb4b31d0bfbc4cc3e", size = 16733873, upload-time = "2025-11-16T22:49:49.84Z" }, + { url = "https://files.pythonhosted.org/packages/c5/65/df0db6c097892c9380851ab9e44b52d4f7ba576b833996e0080181c0c439/numpy-2.3.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee3888d9ff7c14604052b2ca5535a30216aa0a58e948cdd3eeb8d3415f638769", size = 12259838, upload-time = "2025-11-16T22:49:52.863Z" }, + { url = "https://files.pythonhosted.org/packages/5b/e1/1ee06e70eb2136797abe847d386e7c0e830b67ad1d43f364dd04fa50d338/numpy-2.3.5-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:612a95a17655e213502f60cfb9bf9408efdc9eb1d5f50535cc6eb365d11b42b5", size = 5088378, upload-time = "2025-11-16T22:49:55.055Z" }, + { url = "https://files.pythonhosted.org/packages/6d/9c/1ca85fb86708724275103b81ec4cf1ac1d08f465368acfc8da7ab545bdae/numpy-2.3.5-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3101e5177d114a593d79dd79658650fe28b5a0d8abeb8ce6f437c0e6df5be1a4", size = 6628559, upload-time = "2025-11-16T22:49:57.371Z" }, + { url = "https://files.pythonhosted.org/packages/74/78/fcd41e5a0ce4f3f7b003da85825acddae6d7ecb60cf25194741b036ca7d6/numpy-2.3.5-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b973c57ff8e184109db042c842423ff4f60446239bd585a5131cc47f06f789d", size = 14250702, upload-time = "2025-11-16T22:49:59.632Z" }, + { url = "https://files.pythonhosted.org/packages/b6/23/2a1b231b8ff672b4c450dac27164a8b2ca7d9b7144f9c02d2396518352eb/numpy-2.3.5-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0d8163f43acde9a73c2a33605353a4f1bc4798745a8b1d73183b28e5b435ae28", size = 16606086, upload-time = "2025-11-16T22:50:02.127Z" }, + { url = "https://files.pythonhosted.org/packages/a0/c5/5ad26fbfbe2012e190cc7d5003e4d874b88bb18861d0829edc140a713021/numpy-2.3.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:51c1e14eb1e154ebd80e860722f9e6ed6ec89714ad2db2d3aa33c31d7c12179b", size = 16025985, upload-time = "2025-11-16T22:50:04.536Z" }, + { url = "https://files.pythonhosted.org/packages/d2/fa/dd48e225c46c819288148d9d060b047fd2a6fb1eb37eae25112ee4cb4453/numpy-2.3.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b46b4ec24f7293f23adcd2d146960559aaf8020213de8ad1909dba6c013bf89c", size = 18542976, upload-time = "2025-11-16T22:50:07.557Z" }, + { url = "https://files.pythonhosted.org/packages/05/79/ccbd23a75862d95af03d28b5c6901a1b7da4803181513d52f3b86ed9446e/numpy-2.3.5-cp312-cp312-win32.whl", hash = "sha256:3997b5b3c9a771e157f9aae01dd579ee35ad7109be18db0e85dbdbe1de06e952", size = 6285274, upload-time = "2025-11-16T22:50:10.746Z" }, + { url = "https://files.pythonhosted.org/packages/2d/57/8aeaf160312f7f489dea47ab61e430b5cb051f59a98ae68b7133ce8fa06a/numpy-2.3.5-cp312-cp312-win_amd64.whl", hash = "sha256:86945f2ee6d10cdfd67bcb4069c1662dd711f7e2a4343db5cecec06b87cf31aa", size = 12782922, upload-time = "2025-11-16T22:50:12.811Z" }, + { url = "https://files.pythonhosted.org/packages/78/a6/aae5cc2ca78c45e64b9ef22f089141d661516856cf7c8a54ba434576900d/numpy-2.3.5-cp312-cp312-win_arm64.whl", hash = "sha256:f28620fe26bee16243be2b7b874da327312240a7cdc38b769a697578d2100013", size = 10194667, upload-time = "2025-11-16T22:50:16.16Z" }, + { url = "https://files.pythonhosted.org/packages/db/69/9cde09f36da4b5a505341180a3f2e6fadc352fd4d2b7096ce9778db83f1a/numpy-2.3.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d0f23b44f57077c1ede8c5f26b30f706498b4862d3ff0a7298b8411dd2f043ff", size = 16728251, upload-time = "2025-11-16T22:50:19.013Z" }, + { url = "https://files.pythonhosted.org/packages/79/fb/f505c95ceddd7027347b067689db71ca80bd5ecc926f913f1a23e65cf09b/numpy-2.3.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa5bc7c5d59d831d9773d1170acac7893ce3a5e130540605770ade83280e7188", size = 12254652, upload-time = "2025-11-16T22:50:21.487Z" }, + { url = "https://files.pythonhosted.org/packages/78/da/8c7738060ca9c31b30e9301ee0cf6c5ffdbf889d9593285a1cead337f9a5/numpy-2.3.5-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:ccc933afd4d20aad3c00bcef049cb40049f7f196e0397f1109dba6fed63267b0", size = 5083172, upload-time = "2025-11-16T22:50:24.562Z" }, + { url = "https://files.pythonhosted.org/packages/a4/b4/ee5bb2537fb9430fd2ef30a616c3672b991a4129bb1c7dcc42aa0abbe5d7/numpy-2.3.5-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:afaffc4393205524af9dfa400fa250143a6c3bc646c08c9f5e25a9f4b4d6a903", size = 6622990, upload-time = "2025-11-16T22:50:26.47Z" }, + { url = "https://files.pythonhosted.org/packages/95/03/dc0723a013c7d7c19de5ef29e932c3081df1c14ba582b8b86b5de9db7f0f/numpy-2.3.5-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c75442b2209b8470d6d5d8b1c25714270686f14c749028d2199c54e29f20b4d", size = 14248902, upload-time = "2025-11-16T22:50:28.861Z" }, + { url = "https://files.pythonhosted.org/packages/f5/10/ca162f45a102738958dcec8023062dad0cbc17d1ab99d68c4e4a6c45fb2b/numpy-2.3.5-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11e06aa0af8c0f05104d56450d6093ee639e15f24ecf62d417329d06e522e017", size = 16597430, upload-time = "2025-11-16T22:50:31.56Z" }, + { url = "https://files.pythonhosted.org/packages/2a/51/c1e29be863588db58175175f057286900b4b3327a1351e706d5e0f8dd679/numpy-2.3.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed89927b86296067b4f81f108a2271d8926467a8868e554eaf370fc27fa3ccaf", size = 16024551, upload-time = "2025-11-16T22:50:34.242Z" }, + { url = "https://files.pythonhosted.org/packages/83/68/8236589d4dbb87253d28259d04d9b814ec0ecce7cb1c7fed29729f4c3a78/numpy-2.3.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:51c55fe3451421f3a6ef9a9c1439e82101c57a2c9eab9feb196a62b1a10b58ce", size = 18533275, upload-time = "2025-11-16T22:50:37.651Z" }, + { url = "https://files.pythonhosted.org/packages/40/56/2932d75b6f13465239e3b7b7e511be27f1b8161ca2510854f0b6e521c395/numpy-2.3.5-cp313-cp313-win32.whl", hash = "sha256:1978155dd49972084bd6ef388d66ab70f0c323ddee6f693d539376498720fb7e", size = 6277637, upload-time = "2025-11-16T22:50:40.11Z" }, + { url = "https://files.pythonhosted.org/packages/0c/88/e2eaa6cffb115b85ed7c7c87775cb8bcf0816816bc98ca8dbfa2ee33fe6e/numpy-2.3.5-cp313-cp313-win_amd64.whl", hash = "sha256:00dc4e846108a382c5869e77c6ed514394bdeb3403461d25a829711041217d5b", size = 12779090, upload-time = "2025-11-16T22:50:42.503Z" }, + { url = "https://files.pythonhosted.org/packages/8f/88/3f41e13a44ebd4034ee17baa384acac29ba6a4fcc2aca95f6f08ca0447d1/numpy-2.3.5-cp313-cp313-win_arm64.whl", hash = "sha256:0472f11f6ec23a74a906a00b48a4dcf3849209696dff7c189714511268d103ae", size = 10194710, upload-time = "2025-11-16T22:50:44.971Z" }, + { url = "https://files.pythonhosted.org/packages/13/cb/71744144e13389d577f867f745b7df2d8489463654a918eea2eeb166dfc9/numpy-2.3.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:414802f3b97f3c1eef41e530aaba3b3c1620649871d8cb38c6eaff034c2e16bd", size = 16827292, upload-time = "2025-11-16T22:50:47.715Z" }, + { url = "https://files.pythonhosted.org/packages/71/80/ba9dc6f2a4398e7f42b708a7fdc841bb638d353be255655498edbf9a15a8/numpy-2.3.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5ee6609ac3604fa7780e30a03e5e241a7956f8e2fcfe547d51e3afa5247ac47f", size = 12378897, upload-time = "2025-11-16T22:50:51.327Z" }, + { url = "https://files.pythonhosted.org/packages/2e/6d/db2151b9f64264bcceccd51741aa39b50150de9b602d98ecfe7e0c4bff39/numpy-2.3.5-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:86d835afea1eaa143012a2d7a3f45a3adce2d7adc8b4961f0b362214d800846a", size = 5207391, upload-time = "2025-11-16T22:50:54.542Z" }, + { url = "https://files.pythonhosted.org/packages/80/ae/429bacace5ccad48a14c4ae5332f6aa8ab9f69524193511d60ccdfdc65fa/numpy-2.3.5-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:30bc11310e8153ca664b14c5f1b73e94bd0503681fcf136a163de856f3a50139", size = 6721275, upload-time = "2025-11-16T22:50:56.794Z" }, + { url = "https://files.pythonhosted.org/packages/74/5b/1919abf32d8722646a38cd527bc3771eb229a32724ee6ba340ead9b92249/numpy-2.3.5-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1062fde1dcf469571705945b0f221b73928f34a20c904ffb45db101907c3454e", size = 14306855, upload-time = "2025-11-16T22:50:59.208Z" }, + { url = "https://files.pythonhosted.org/packages/a5/87/6831980559434973bebc30cd9c1f21e541a0f2b0c280d43d3afd909b66d0/numpy-2.3.5-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce581db493ea1a96c0556360ede6607496e8bf9b3a8efa66e06477267bc831e9", size = 16657359, upload-time = "2025-11-16T22:51:01.991Z" }, + { url = "https://files.pythonhosted.org/packages/dd/91/c797f544491ee99fd00495f12ebb7802c440c1915811d72ac5b4479a3356/numpy-2.3.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:cc8920d2ec5fa99875b670bb86ddeb21e295cb07aa331810d9e486e0b969d946", size = 16093374, upload-time = "2025-11-16T22:51:05.291Z" }, + { url = "https://files.pythonhosted.org/packages/74/a6/54da03253afcbe7a72785ec4da9c69fb7a17710141ff9ac5fcb2e32dbe64/numpy-2.3.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9ee2197ef8c4f0dfe405d835f3b6a14f5fee7782b5de51ba06fb65fc9b36e9f1", size = 18594587, upload-time = "2025-11-16T22:51:08.585Z" }, + { url = "https://files.pythonhosted.org/packages/80/e9/aff53abbdd41b0ecca94285f325aff42357c6b5abc482a3fcb4994290b18/numpy-2.3.5-cp313-cp313t-win32.whl", hash = "sha256:70b37199913c1bd300ff6e2693316c6f869c7ee16378faf10e4f5e3275b299c3", size = 6405940, upload-time = "2025-11-16T22:51:11.541Z" }, + { url = "https://files.pythonhosted.org/packages/d5/81/50613fec9d4de5480de18d4f8ef59ad7e344d497edbef3cfd80f24f98461/numpy-2.3.5-cp313-cp313t-win_amd64.whl", hash = "sha256:b501b5fa195cc9e24fe102f21ec0a44dffc231d2af79950b451e0d99cea02234", size = 12920341, upload-time = "2025-11-16T22:51:14.312Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ab/08fd63b9a74303947f34f0bd7c5903b9c5532c2d287bead5bdf4c556c486/numpy-2.3.5-cp313-cp313t-win_arm64.whl", hash = "sha256:a80afd79f45f3c4a7d341f13acbe058d1ca8ac017c165d3fa0d3de6bc1a079d7", size = 10262507, upload-time = "2025-11-16T22:51:16.846Z" }, + { url = "https://files.pythonhosted.org/packages/ba/97/1a914559c19e32d6b2e233cf9a6a114e67c856d35b1d6babca571a3e880f/numpy-2.3.5-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:bf06bc2af43fa8d32d30fae16ad965663e966b1a3202ed407b84c989c3221e82", size = 16735706, upload-time = "2025-11-16T22:51:19.558Z" }, + { url = "https://files.pythonhosted.org/packages/57/d4/51233b1c1b13ecd796311216ae417796b88b0616cfd8a33ae4536330748a/numpy-2.3.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:052e8c42e0c49d2575621c158934920524f6c5da05a1d3b9bab5d8e259e045f0", size = 12264507, upload-time = "2025-11-16T22:51:22.492Z" }, + { url = "https://files.pythonhosted.org/packages/45/98/2fe46c5c2675b8306d0b4a3ec3494273e93e1226a490f766e84298576956/numpy-2.3.5-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:1ed1ec893cff7040a02c8aa1c8611b94d395590d553f6b53629a4461dc7f7b63", size = 5093049, upload-time = "2025-11-16T22:51:25.171Z" }, + { url = "https://files.pythonhosted.org/packages/ce/0e/0698378989bb0ac5f1660c81c78ab1fe5476c1a521ca9ee9d0710ce54099/numpy-2.3.5-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:2dcd0808a421a482a080f89859a18beb0b3d1e905b81e617a188bd80422d62e9", size = 6626603, upload-time = "2025-11-16T22:51:27Z" }, + { url = "https://files.pythonhosted.org/packages/5e/a6/9ca0eecc489640615642a6cbc0ca9e10df70df38c4d43f5a928ff18d8827/numpy-2.3.5-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:727fd05b57df37dc0bcf1a27767a3d9a78cbbc92822445f32cc3436ba797337b", size = 14262696, upload-time = "2025-11-16T22:51:29.402Z" }, + { url = "https://files.pythonhosted.org/packages/c8/f6/07ec185b90ec9d7217a00eeeed7383b73d7e709dae2a9a021b051542a708/numpy-2.3.5-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fffe29a1ef00883599d1dc2c51aa2e5d80afe49523c261a74933df395c15c520", size = 16597350, upload-time = "2025-11-16T22:51:32.167Z" }, + { url = "https://files.pythonhosted.org/packages/75/37/164071d1dde6a1a84c9b8e5b414fa127981bad47adf3a6b7e23917e52190/numpy-2.3.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8f7f0e05112916223d3f438f293abf0727e1181b5983f413dfa2fefc4098245c", size = 16040190, upload-time = "2025-11-16T22:51:35.403Z" }, + { url = "https://files.pythonhosted.org/packages/08/3c/f18b82a406b04859eb026d204e4e1773eb41c5be58410f41ffa511d114ae/numpy-2.3.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2e2eb32ddb9ccb817d620ac1d8dae7c3f641c1e5f55f531a33e8ab97960a75b8", size = 18536749, upload-time = "2025-11-16T22:51:39.698Z" }, + { url = "https://files.pythonhosted.org/packages/40/79/f82f572bf44cf0023a2fe8588768e23e1592585020d638999f15158609e1/numpy-2.3.5-cp314-cp314-win32.whl", hash = "sha256:66f85ce62c70b843bab1fb14a05d5737741e74e28c7b8b5a064de10142fad248", size = 6335432, upload-time = "2025-11-16T22:51:42.476Z" }, + { url = "https://files.pythonhosted.org/packages/a3/2e/235b4d96619931192c91660805e5e49242389742a7a82c27665021db690c/numpy-2.3.5-cp314-cp314-win_amd64.whl", hash = "sha256:e6a0bc88393d65807d751a614207b7129a310ca4fe76a74e5c7da5fa5671417e", size = 12919388, upload-time = "2025-11-16T22:51:45.275Z" }, + { url = "https://files.pythonhosted.org/packages/07/2b/29fd75ce45d22a39c61aad74f3d718e7ab67ccf839ca8b60866054eb15f8/numpy-2.3.5-cp314-cp314-win_arm64.whl", hash = "sha256:aeffcab3d4b43712bb7a60b65f6044d444e75e563ff6180af8f98dd4b905dfd2", size = 10476651, upload-time = "2025-11-16T22:51:47.749Z" }, + { url = "https://files.pythonhosted.org/packages/17/e1/f6a721234ebd4d87084cfa68d081bcba2f5cfe1974f7de4e0e8b9b2a2ba1/numpy-2.3.5-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:17531366a2e3a9e30762c000f2c43a9aaa05728712e25c11ce1dbe700c53ad41", size = 16834503, upload-time = "2025-11-16T22:51:50.443Z" }, + { url = "https://files.pythonhosted.org/packages/5c/1c/baf7ffdc3af9c356e1c135e57ab7cf8d247931b9554f55c467efe2c69eff/numpy-2.3.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:d21644de1b609825ede2f48be98dfde4656aefc713654eeee280e37cadc4e0ad", size = 12381612, upload-time = "2025-11-16T22:51:53.609Z" }, + { url = "https://files.pythonhosted.org/packages/74/91/f7f0295151407ddc9ba34e699013c32c3c91944f9b35fcf9281163dc1468/numpy-2.3.5-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:c804e3a5aba5460c73955c955bdbd5c08c354954e9270a2c1565f62e866bdc39", size = 5210042, upload-time = "2025-11-16T22:51:56.213Z" }, + { url = "https://files.pythonhosted.org/packages/2e/3b/78aebf345104ec50dd50a4d06ddeb46a9ff5261c33bcc58b1c4f12f85ec2/numpy-2.3.5-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:cc0a57f895b96ec78969c34f682c602bf8da1a0270b09bc65673df2e7638ec20", size = 6724502, upload-time = "2025-11-16T22:51:58.584Z" }, + { url = "https://files.pythonhosted.org/packages/02/c6/7c34b528740512e57ef1b7c8337ab0b4f0bddf34c723b8996c675bc2bc91/numpy-2.3.5-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:900218e456384ea676e24ea6a0417f030a3b07306d29d7ad843957b40a9d8d52", size = 14308962, upload-time = "2025-11-16T22:52:01.698Z" }, + { url = "https://files.pythonhosted.org/packages/80/35/09d433c5262bc32d725bafc619e095b6a6651caf94027a03da624146f655/numpy-2.3.5-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:09a1bea522b25109bf8e6f3027bd810f7c1085c64a0c7ce050c1676ad0ba010b", size = 16655054, upload-time = "2025-11-16T22:52:04.267Z" }, + { url = "https://files.pythonhosted.org/packages/7a/ab/6a7b259703c09a88804fa2430b43d6457b692378f6b74b356155283566ac/numpy-2.3.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:04822c00b5fd0323c8166d66c701dc31b7fbd252c100acd708c48f763968d6a3", size = 16091613, upload-time = "2025-11-16T22:52:08.651Z" }, + { url = "https://files.pythonhosted.org/packages/c2/88/330da2071e8771e60d1038166ff9d73f29da37b01ec3eb43cb1427464e10/numpy-2.3.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d6889ec4ec662a1a37eb4b4fb26b6100841804dac55bd9df579e326cdc146227", size = 18591147, upload-time = "2025-11-16T22:52:11.453Z" }, + { url = "https://files.pythonhosted.org/packages/51/41/851c4b4082402d9ea860c3626db5d5df47164a712cb23b54be028b184c1c/numpy-2.3.5-cp314-cp314t-win32.whl", hash = "sha256:93eebbcf1aafdf7e2ddd44c2923e2672e1010bddc014138b229e49725b4d6be5", size = 6479806, upload-time = "2025-11-16T22:52:14.641Z" }, + { url = "https://files.pythonhosted.org/packages/90/30/d48bde1dfd93332fa557cff1972fbc039e055a52021fbef4c2c4b1eefd17/numpy-2.3.5-cp314-cp314t-win_amd64.whl", hash = "sha256:c8a9958e88b65c3b27e22ca2a076311636850b612d6bbfb76e8d156aacde2aaf", size = 13105760, upload-time = "2025-11-16T22:52:17.975Z" }, + { url = "https://files.pythonhosted.org/packages/2d/fd/4b5eb0b3e888d86aee4d198c23acec7d214baaf17ea93c1adec94c9518b9/numpy-2.3.5-cp314-cp314t-win_arm64.whl", hash = "sha256:6203fdf9f3dc5bdaed7319ad8698e685c7a3be10819f41d32a0723e611733b42", size = 10545459, upload-time = "2025-11-16T22:52:20.55Z" }, ] [[package]] @@ -2675,15 +2739,15 @@ wheels = [ [[package]] name = "plotly" -version = "6.3.1" +version = "6.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "narwhals" }, { name = "packaging" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/0c/63/961d47c9ffd592a575495891cdcf7875dc0903ebb33ac238935714213789/plotly-6.3.1.tar.gz", hash = "sha256:dd896e3d940e653a7ce0470087e82c2bd903969a55e30d1b01bb389319461bb0", size = 6956460, upload-time = "2025-10-02T16:10:34.16Z" } +sdist = { url = "https://files.pythonhosted.org/packages/06/e6/b768650072837505804bed4790c5449ba348a3b720e27ca7605414e998cd/plotly-6.4.0.tar.gz", hash = "sha256:68c6db2ed2180289ef978f087841148b7efda687552276da15a6e9b92107052a", size = 7012379, upload-time = "2025-11-04T17:59:26.45Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3f/93/023955c26b0ce614342d11cc0652f1e45e32393b6ab9d11a664a60e9b7b7/plotly-6.3.1-py3-none-any.whl", hash = "sha256:8b4420d1dcf2b040f5983eed433f95732ed24930e496d36eb70d211923532e64", size = 9833698, upload-time = "2025-10-02T16:10:22.584Z" }, + { url = "https://files.pythonhosted.org/packages/78/ae/89b45ccccfeebc464c9233de5675990f75241b8ee4cd63227800fdf577d1/plotly-6.4.0-py3-none-any.whl", hash = "sha256:a1062eafbdc657976c2eedd276c90e184ccd6c21282a5e9ee8f20efca9c9a4c5", size = 9892458, upload-time = "2025-11-04T17:59:22.622Z" }, ] [[package]] @@ -2827,17 +2891,17 @@ wheels = [ [[package]] name = "protobuf" -version = "6.33.0" +version = "6.33.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/19/ff/64a6c8f420818bb873713988ca5492cba3a7946be57e027ac63495157d97/protobuf-6.33.0.tar.gz", hash = "sha256:140303d5c8d2037730c548f8c7b93b20bb1dc301be280c378b82b8894589c954", size = 443463, upload-time = "2025-10-15T20:39:52.159Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/03/a1440979a3f74f16cab3b75b0da1a1a7f922d56a8ddea96092391998edc0/protobuf-6.33.1.tar.gz", hash = "sha256:97f65757e8d09870de6fd973aeddb92f85435607235d20b2dfed93405d00c85b", size = 443432, upload-time = "2025-11-13T16:44:18.895Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/ee/52b3fa8feb6db4a833dfea4943e175ce645144532e8a90f72571ad85df4e/protobuf-6.33.0-cp310-abi3-win32.whl", hash = "sha256:d6101ded078042a8f17959eccd9236fb7a9ca20d3b0098bbcb91533a5680d035", size = 425593, upload-time = "2025-10-15T20:39:40.29Z" }, - { url = "https://files.pythonhosted.org/packages/7b/c6/7a465f1825872c55e0341ff4a80198743f73b69ce5d43ab18043699d1d81/protobuf-6.33.0-cp310-abi3-win_amd64.whl", hash = "sha256:9a031d10f703f03768f2743a1c403af050b6ae1f3480e9c140f39c45f81b13ee", size = 436882, upload-time = "2025-10-15T20:39:42.841Z" }, - { url = "https://files.pythonhosted.org/packages/e1/a9/b6eee662a6951b9c3640e8e452ab3e09f117d99fc10baa32d1581a0d4099/protobuf-6.33.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:905b07a65f1a4b72412314082c7dbfae91a9e8b68a0cc1577515f8df58ecf455", size = 427521, upload-time = "2025-10-15T20:39:43.803Z" }, - { url = "https://files.pythonhosted.org/packages/10/35/16d31e0f92c6d2f0e77c2a3ba93185130ea13053dd16200a57434c882f2b/protobuf-6.33.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:e0697ece353e6239b90ee43a9231318302ad8353c70e6e45499fa52396debf90", size = 324445, upload-time = "2025-10-15T20:39:44.932Z" }, - { url = "https://files.pythonhosted.org/packages/e6/eb/2a981a13e35cda8b75b5585aaffae2eb904f8f351bdd3870769692acbd8a/protobuf-6.33.0-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:e0a1715e4f27355afd9570f3ea369735afc853a6c3951a6afe1f80d8569ad298", size = 339159, upload-time = "2025-10-15T20:39:46.186Z" }, - { url = "https://files.pythonhosted.org/packages/21/51/0b1cbad62074439b867b4e04cc09b93f6699d78fd191bed2bbb44562e077/protobuf-6.33.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:35be49fd3f4fefa4e6e2aacc35e8b837d6703c37a2168a55ac21e9b1bc7559ef", size = 323172, upload-time = "2025-10-15T20:39:47.465Z" }, - { url = "https://files.pythonhosted.org/packages/07/d1/0a28c21707807c6aacd5dc9c3704b2aa1effbf37adebd8caeaf68b17a636/protobuf-6.33.0-py3-none-any.whl", hash = "sha256:25c9e1963c6734448ea2d308cfa610e692b801304ba0908d7bfa564ac5132995", size = 170477, upload-time = "2025-10-15T20:39:51.311Z" }, + { url = "https://files.pythonhosted.org/packages/06/f1/446a9bbd2c60772ca36556bac8bfde40eceb28d9cc7838755bc41e001d8f/protobuf-6.33.1-cp310-abi3-win32.whl", hash = "sha256:f8d3fdbc966aaab1d05046d0240dd94d40f2a8c62856d41eaa141ff64a79de6b", size = 425593, upload-time = "2025-11-13T16:44:06.275Z" }, + { url = "https://files.pythonhosted.org/packages/a6/79/8780a378c650e3df849b73de8b13cf5412f521ca2ff9b78a45c247029440/protobuf-6.33.1-cp310-abi3-win_amd64.whl", hash = "sha256:923aa6d27a92bf44394f6abf7ea0500f38769d4b07f4be41cb52bd8b1123b9ed", size = 436883, upload-time = "2025-11-13T16:44:09.222Z" }, + { url = "https://files.pythonhosted.org/packages/cd/93/26213ff72b103ae55bb0d73e7fb91ea570ef407c3ab4fd2f1f27cac16044/protobuf-6.33.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:fe34575f2bdde76ac429ec7b570235bf0c788883e70aee90068e9981806f2490", size = 427522, upload-time = "2025-11-13T16:44:10.475Z" }, + { url = "https://files.pythonhosted.org/packages/c2/32/df4a35247923393aa6b887c3b3244a8c941c32a25681775f96e2b418f90e/protobuf-6.33.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:f8adba2e44cde2d7618996b3fc02341f03f5bc3f2748be72dc7b063319276178", size = 324445, upload-time = "2025-11-13T16:44:11.869Z" }, + { url = "https://files.pythonhosted.org/packages/8e/d0/d796e419e2ec93d2f3fa44888861c3f88f722cde02b7c3488fcc6a166820/protobuf-6.33.1-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:0f4cf01222c0d959c2b399142deb526de420be8236f22c71356e2a544e153c53", size = 339161, upload-time = "2025-11-13T16:44:12.778Z" }, + { url = "https://files.pythonhosted.org/packages/1d/2a/3c5f05a4af06649547027d288747f68525755de692a26a7720dced3652c0/protobuf-6.33.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:8fd7d5e0eb08cd5b87fd3df49bc193f5cfd778701f47e11d127d0afc6c39f1d1", size = 323171, upload-time = "2025-11-13T16:44:14.035Z" }, + { url = "https://files.pythonhosted.org/packages/08/b4/46310463b4f6ceef310f8348786f3cff181cea671578e3d9743ba61a459e/protobuf-6.33.1-py3-none-any.whl", hash = "sha256:d595a9fd694fdeb061a62fbe10eb039cc1e444df81ec9bb70c7fc59ebcb1eafa", size = 170477, upload-time = "2025-11-13T16:44:17.633Z" }, ] [[package]] @@ -2877,40 +2941,54 @@ wheels = [ [[package]] name = "py-partiql-parser" -version = "0.6.1" +version = "0.6.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/58/a1/0a2867e48b232b4f82c4929ef7135f2a5d72c3886b957dccf63c70aa2fcb/py_partiql_parser-0.6.1.tar.gz", hash = "sha256:8583ff2a0e15560ef3bc3df109a7714d17f87d81d33e8c38b7fed4e58a63215d", size = 17120, upload-time = "2024-12-25T22:06:41.327Z" } +sdist = { url = "https://files.pythonhosted.org/packages/56/7a/a0f6bda783eb4df8e3dfd55973a1ac6d368a89178c300e1b5b91cd181e5e/py_partiql_parser-0.6.3.tar.gz", hash = "sha256:09cecf916ce6e3da2c050f0cb6106166de42c33d34a078ec2eb19377ea70389a", size = 17456, upload-time = "2025-10-18T13:56:13.441Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/97/84/0e410c20bbe9a504fc56e97908f13261c2b313d16cbb3b738556166f044a/py_partiql_parser-0.6.1-py2.py3-none-any.whl", hash = "sha256:ff6a48067bff23c37e9044021bf1d949c83e195490c17e020715e927fe5b2456", size = 23520, upload-time = "2024-12-25T22:06:39.106Z" }, + { url = "https://files.pythonhosted.org/packages/c9/33/a7cbfccc39056a5cf8126b7aab4c8bafbedd4f0ca68ae40ecb627a2d2cd3/py_partiql_parser-0.6.3-py2.py3-none-any.whl", hash = "sha256:deb0769c3346179d2f590dcbde556f708cdb929059fb654bad75f4cf6e07f582", size = 23752, upload-time = "2025-10-18T13:56:12.256Z" }, ] [[package]] name = "pyarrow" -version = "21.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ef/c2/ea068b8f00905c06329a3dfcd40d0fcc2b7d0f2e355bdb25b65e0a0e4cd4/pyarrow-21.0.0.tar.gz", hash = "sha256:5051f2dccf0e283ff56335760cbc8622cf52264d67e359d5569541ac11b6d5bc", size = 1133487, upload-time = "2025-07-18T00:57:31.761Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ca/d4/d4f817b21aacc30195cf6a46ba041dd1be827efa4a623cc8bf39a1c2a0c0/pyarrow-21.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:3a302f0e0963db37e0a24a70c56cf91a4faa0bca51c23812279ca2e23481fccd", size = 31160305, upload-time = "2025-07-18T00:55:35.373Z" }, - { url = "https://files.pythonhosted.org/packages/a2/9c/dcd38ce6e4b4d9a19e1d36914cb8e2b1da4e6003dd075474c4cfcdfe0601/pyarrow-21.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:b6b27cf01e243871390474a211a7922bfbe3bda21e39bc9160daf0da3fe48876", size = 32684264, upload-time = "2025-07-18T00:55:39.303Z" }, - { url = "https://files.pythonhosted.org/packages/4f/74/2a2d9f8d7a59b639523454bec12dba35ae3d0a07d8ab529dc0809f74b23c/pyarrow-21.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:e72a8ec6b868e258a2cd2672d91f2860ad532d590ce94cdf7d5e7ec674ccf03d", size = 41108099, upload-time = "2025-07-18T00:55:42.889Z" }, - { url = "https://files.pythonhosted.org/packages/ad/90/2660332eeb31303c13b653ea566a9918484b6e4d6b9d2d46879a33ab0622/pyarrow-21.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b7ae0bbdc8c6674259b25bef5d2a1d6af5d39d7200c819cf99e07f7dfef1c51e", size = 42829529, upload-time = "2025-07-18T00:55:47.069Z" }, - { url = "https://files.pythonhosted.org/packages/33/27/1a93a25c92717f6aa0fca06eb4700860577d016cd3ae51aad0e0488ac899/pyarrow-21.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:58c30a1729f82d201627c173d91bd431db88ea74dcaa3885855bc6203e433b82", size = 43367883, upload-time = "2025-07-18T00:55:53.069Z" }, - { url = "https://files.pythonhosted.org/packages/05/d9/4d09d919f35d599bc05c6950095e358c3e15148ead26292dfca1fb659b0c/pyarrow-21.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:072116f65604b822a7f22945a7a6e581cfa28e3454fdcc6939d4ff6090126623", size = 45133802, upload-time = "2025-07-18T00:55:57.714Z" }, - { url = "https://files.pythonhosted.org/packages/71/30/f3795b6e192c3ab881325ffe172e526499eb3780e306a15103a2764916a2/pyarrow-21.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:cf56ec8b0a5c8c9d7021d6fd754e688104f9ebebf1bf4449613c9531f5346a18", size = 26203175, upload-time = "2025-07-18T00:56:01.364Z" }, - { url = "https://files.pythonhosted.org/packages/16/ca/c7eaa8e62db8fb37ce942b1ea0c6d7abfe3786ca193957afa25e71b81b66/pyarrow-21.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:e99310a4ebd4479bcd1964dff9e14af33746300cb014aa4a3781738ac63baf4a", size = 31154306, upload-time = "2025-07-18T00:56:04.42Z" }, - { url = "https://files.pythonhosted.org/packages/ce/e8/e87d9e3b2489302b3a1aea709aaca4b781c5252fcb812a17ab6275a9a484/pyarrow-21.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:d2fe8e7f3ce329a71b7ddd7498b3cfac0eeb200c2789bd840234f0dc271a8efe", size = 32680622, upload-time = "2025-07-18T00:56:07.505Z" }, - { url = "https://files.pythonhosted.org/packages/84/52/79095d73a742aa0aba370c7942b1b655f598069489ab387fe47261a849e1/pyarrow-21.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:f522e5709379d72fb3da7785aa489ff0bb87448a9dc5a75f45763a795a089ebd", size = 41104094, upload-time = "2025-07-18T00:56:10.994Z" }, - { url = "https://files.pythonhosted.org/packages/89/4b/7782438b551dbb0468892a276b8c789b8bbdb25ea5c5eb27faadd753e037/pyarrow-21.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:69cbbdf0631396e9925e048cfa5bce4e8c3d3b41562bbd70c685a8eb53a91e61", size = 42825576, upload-time = "2025-07-18T00:56:15.569Z" }, - { url = "https://files.pythonhosted.org/packages/b3/62/0f29de6e0a1e33518dec92c65be0351d32d7ca351e51ec5f4f837a9aab91/pyarrow-21.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:731c7022587006b755d0bdb27626a1a3bb004bb56b11fb30d98b6c1b4718579d", size = 43368342, upload-time = "2025-07-18T00:56:19.531Z" }, - { url = "https://files.pythonhosted.org/packages/90/c7/0fa1f3f29cf75f339768cc698c8ad4ddd2481c1742e9741459911c9ac477/pyarrow-21.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:dc56bc708f2d8ac71bd1dcb927e458c93cec10b98eb4120206a4091db7b67b99", size = 45131218, upload-time = "2025-07-18T00:56:23.347Z" }, - { url = "https://files.pythonhosted.org/packages/01/63/581f2076465e67b23bc5a37d4a2abff8362d389d29d8105832e82c9c811c/pyarrow-21.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:186aa00bca62139f75b7de8420f745f2af12941595bbbfa7ed3870ff63e25636", size = 26087551, upload-time = "2025-07-18T00:56:26.758Z" }, - { url = "https://files.pythonhosted.org/packages/c9/ab/357d0d9648bb8241ee7348e564f2479d206ebe6e1c47ac5027c2e31ecd39/pyarrow-21.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:a7a102574faa3f421141a64c10216e078df467ab9576684d5cd696952546e2da", size = 31290064, upload-time = "2025-07-18T00:56:30.214Z" }, - { url = "https://files.pythonhosted.org/packages/3f/8a/5685d62a990e4cac2043fc76b4661bf38d06efed55cf45a334b455bd2759/pyarrow-21.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:1e005378c4a2c6db3ada3ad4c217b381f6c886f0a80d6a316fe586b90f77efd7", size = 32727837, upload-time = "2025-07-18T00:56:33.935Z" }, - { url = "https://files.pythonhosted.org/packages/fc/de/c0828ee09525c2bafefd3e736a248ebe764d07d0fd762d4f0929dbc516c9/pyarrow-21.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:65f8e85f79031449ec8706b74504a316805217b35b6099155dd7e227eef0d4b6", size = 41014158, upload-time = "2025-07-18T00:56:37.528Z" }, - { url = "https://files.pythonhosted.org/packages/6e/26/a2865c420c50b7a3748320b614f3484bfcde8347b2639b2b903b21ce6a72/pyarrow-21.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:3a81486adc665c7eb1a2bde0224cfca6ceaba344a82a971ef059678417880eb8", size = 42667885, upload-time = "2025-07-18T00:56:41.483Z" }, - { url = "https://files.pythonhosted.org/packages/0a/f9/4ee798dc902533159250fb4321267730bc0a107d8c6889e07c3add4fe3a5/pyarrow-21.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:fc0d2f88b81dcf3ccf9a6ae17f89183762c8a94a5bdcfa09e05cfe413acf0503", size = 43276625, upload-time = "2025-07-18T00:56:48.002Z" }, - { url = "https://files.pythonhosted.org/packages/5a/da/e02544d6997037a4b0d22d8e5f66bc9315c3671371a8b18c79ade1cefe14/pyarrow-21.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6299449adf89df38537837487a4f8d3bd91ec94354fdd2a7d30bc11c48ef6e79", size = 44951890, upload-time = "2025-07-18T00:56:52.568Z" }, - { url = "https://files.pythonhosted.org/packages/e5/4e/519c1bc1876625fe6b71e9a28287c43ec2f20f73c658b9ae1d485c0c206e/pyarrow-21.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:222c39e2c70113543982c6b34f3077962b44fca38c0bd9e68bb6781534425c10", size = 26371006, upload-time = "2025-07-18T00:56:56.379Z" }, +version = "22.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/53/04a7fdc63e6056116c9ddc8b43bc28c12cdd181b85cbeadb79278475f3ae/pyarrow-22.0.0.tar.gz", hash = "sha256:3d600dc583260d845c7d8a6db540339dd883081925da2bd1c5cb808f720b3cd9", size = 1151151, upload-time = "2025-10-24T12:30:00.762Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/63/ba23862d69652f85b615ca14ad14f3bcfc5bf1b99ef3f0cd04ff93fdad5a/pyarrow-22.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:bea79263d55c24a32b0d79c00a1c58bb2ee5f0757ed95656b01c0fb310c5af3d", size = 34211578, upload-time = "2025-10-24T10:05:21.583Z" }, + { url = "https://files.pythonhosted.org/packages/b1/d0/f9ad86fe809efd2bcc8be32032fa72e8b0d112b01ae56a053006376c5930/pyarrow-22.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:12fe549c9b10ac98c91cf791d2945e878875d95508e1a5d14091a7aaa66d9cf8", size = 35989906, upload-time = "2025-10-24T10:05:29.485Z" }, + { url = "https://files.pythonhosted.org/packages/b4/a8/f910afcb14630e64d673f15904ec27dd31f1e009b77033c365c84e8c1e1d/pyarrow-22.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:334f900ff08ce0423407af97e6c26ad5d4e3b0763645559ece6fbf3747d6a8f5", size = 45021677, upload-time = "2025-10-24T10:05:38.274Z" }, + { url = "https://files.pythonhosted.org/packages/13/95/aec81f781c75cd10554dc17a25849c720d54feafb6f7847690478dcf5ef8/pyarrow-22.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c6c791b09c57ed76a18b03f2631753a4960eefbbca80f846da8baefc6491fcfe", size = 47726315, upload-time = "2025-10-24T10:05:47.314Z" }, + { url = "https://files.pythonhosted.org/packages/bb/d4/74ac9f7a54cfde12ee42734ea25d5a3c9a45db78f9def949307a92720d37/pyarrow-22.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c3200cb41cdbc65156e5f8c908d739b0dfed57e890329413da2748d1a2cd1a4e", size = 47990906, upload-time = "2025-10-24T10:05:58.254Z" }, + { url = "https://files.pythonhosted.org/packages/2e/71/fedf2499bf7a95062eafc989ace56572f3343432570e1c54e6599d5b88da/pyarrow-22.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ac93252226cf288753d8b46280f4edf3433bf9508b6977f8dd8526b521a1bbb9", size = 50306783, upload-time = "2025-10-24T10:06:08.08Z" }, + { url = "https://files.pythonhosted.org/packages/68/ed/b202abd5a5b78f519722f3d29063dda03c114711093c1995a33b8e2e0f4b/pyarrow-22.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:44729980b6c50a5f2bfcc2668d36c569ce17f8b17bccaf470c4313dcbbf13c9d", size = 27972883, upload-time = "2025-10-24T10:06:14.204Z" }, + { url = "https://files.pythonhosted.org/packages/a6/d6/d0fac16a2963002fc22c8fa75180a838737203d558f0ed3b564c4a54eef5/pyarrow-22.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:e6e95176209257803a8b3d0394f21604e796dadb643d2f7ca21b66c9c0b30c9a", size = 34204629, upload-time = "2025-10-24T10:06:20.274Z" }, + { url = "https://files.pythonhosted.org/packages/c6/9c/1d6357347fbae062ad3f17082f9ebc29cc733321e892c0d2085f42a2212b/pyarrow-22.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:001ea83a58024818826a9e3f89bf9310a114f7e26dfe404a4c32686f97bd7901", size = 35985783, upload-time = "2025-10-24T10:06:27.301Z" }, + { url = "https://files.pythonhosted.org/packages/ff/c0/782344c2ce58afbea010150df07e3a2f5fdad299cd631697ae7bd3bac6e3/pyarrow-22.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:ce20fe000754f477c8a9125543f1936ea5b8867c5406757c224d745ed033e691", size = 45020999, upload-time = "2025-10-24T10:06:35.387Z" }, + { url = "https://files.pythonhosted.org/packages/1b/8b/5362443737a5307a7b67c1017c42cd104213189b4970bf607e05faf9c525/pyarrow-22.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e0a15757fccb38c410947df156f9749ae4a3c89b2393741a50521f39a8cf202a", size = 47724601, upload-time = "2025-10-24T10:06:43.551Z" }, + { url = "https://files.pythonhosted.org/packages/69/4d/76e567a4fc2e190ee6072967cb4672b7d9249ac59ae65af2d7e3047afa3b/pyarrow-22.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cedb9dd9358e4ea1d9bce3665ce0797f6adf97ff142c8e25b46ba9cdd508e9b6", size = 48001050, upload-time = "2025-10-24T10:06:52.284Z" }, + { url = "https://files.pythonhosted.org/packages/01/5e/5653f0535d2a1aef8223cee9d92944cb6bccfee5cf1cd3f462d7cb022790/pyarrow-22.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:252be4a05f9d9185bb8c18e83764ebcfea7185076c07a7a662253af3a8c07941", size = 50307877, upload-time = "2025-10-24T10:07:02.405Z" }, + { url = "https://files.pythonhosted.org/packages/2d/f8/1d0bd75bf9328a3b826e24a16e5517cd7f9fbf8d34a3184a4566ef5a7f29/pyarrow-22.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:a4893d31e5ef780b6edcaf63122df0f8d321088bb0dee4c8c06eccb1ca28d145", size = 27977099, upload-time = "2025-10-24T10:08:07.259Z" }, + { url = "https://files.pythonhosted.org/packages/90/81/db56870c997805bf2b0f6eeeb2d68458bf4654652dccdcf1bf7a42d80903/pyarrow-22.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:f7fe3dbe871294ba70d789be16b6e7e52b418311e166e0e3cba9522f0f437fb1", size = 34336685, upload-time = "2025-10-24T10:07:11.47Z" }, + { url = "https://files.pythonhosted.org/packages/1c/98/0727947f199aba8a120f47dfc229eeb05df15bcd7a6f1b669e9f882afc58/pyarrow-22.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:ba95112d15fd4f1105fb2402c4eab9068f0554435e9b7085924bcfaac2cc306f", size = 36032158, upload-time = "2025-10-24T10:07:18.626Z" }, + { url = "https://files.pythonhosted.org/packages/96/b4/9babdef9c01720a0785945c7cf550e4acd0ebcd7bdd2e6f0aa7981fa85e2/pyarrow-22.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:c064e28361c05d72eed8e744c9605cbd6d2bb7481a511c74071fd9b24bc65d7d", size = 44892060, upload-time = "2025-10-24T10:07:26.002Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ca/2f8804edd6279f78a37062d813de3f16f29183874447ef6d1aadbb4efa0f/pyarrow-22.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:6f9762274496c244d951c819348afbcf212714902742225f649cf02823a6a10f", size = 47504395, upload-time = "2025-10-24T10:07:34.09Z" }, + { url = "https://files.pythonhosted.org/packages/b9/f0/77aa5198fd3943682b2e4faaf179a674f0edea0d55d326d83cb2277d9363/pyarrow-22.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a9d9ffdc2ab696f6b15b4d1f7cec6658e1d788124418cb30030afbae31c64746", size = 48066216, upload-time = "2025-10-24T10:07:43.528Z" }, + { url = "https://files.pythonhosted.org/packages/79/87/a1937b6e78b2aff18b706d738c9e46ade5bfcf11b294e39c87706a0089ac/pyarrow-22.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ec1a15968a9d80da01e1d30349b2b0d7cc91e96588ee324ce1b5228175043e95", size = 50288552, upload-time = "2025-10-24T10:07:53.519Z" }, + { url = "https://files.pythonhosted.org/packages/60/ae/b5a5811e11f25788ccfdaa8f26b6791c9807119dffcf80514505527c384c/pyarrow-22.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:bba208d9c7decf9961998edf5c65e3ea4355d5818dd6cd0f6809bec1afb951cc", size = 28262504, upload-time = "2025-10-24T10:08:00.932Z" }, + { url = "https://files.pythonhosted.org/packages/bd/b0/0fa4d28a8edb42b0a7144edd20befd04173ac79819547216f8a9f36f9e50/pyarrow-22.0.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:9bddc2cade6561f6820d4cd73f99a0243532ad506bc510a75a5a65a522b2d74d", size = 34224062, upload-time = "2025-10-24T10:08:14.101Z" }, + { url = "https://files.pythonhosted.org/packages/0f/a8/7a719076b3c1be0acef56a07220c586f25cd24de0e3f3102b438d18ae5df/pyarrow-22.0.0-cp314-cp314-macosx_12_0_x86_64.whl", hash = "sha256:e70ff90c64419709d38c8932ea9fe1cc98415c4f87ea8da81719e43f02534bc9", size = 35990057, upload-time = "2025-10-24T10:08:21.842Z" }, + { url = "https://files.pythonhosted.org/packages/89/3c/359ed54c93b47fb6fe30ed16cdf50e3f0e8b9ccfb11b86218c3619ae50a8/pyarrow-22.0.0-cp314-cp314-manylinux_2_28_aarch64.whl", hash = "sha256:92843c305330aa94a36e706c16209cd4df274693e777ca47112617db7d0ef3d7", size = 45068002, upload-time = "2025-10-24T10:08:29.034Z" }, + { url = "https://files.pythonhosted.org/packages/55/fc/4945896cc8638536ee787a3bd6ce7cec8ec9acf452d78ec39ab328efa0a1/pyarrow-22.0.0-cp314-cp314-manylinux_2_28_x86_64.whl", hash = "sha256:6dda1ddac033d27421c20d7a7943eec60be44e0db4e079f33cc5af3b8280ccde", size = 47737765, upload-time = "2025-10-24T10:08:38.559Z" }, + { url = "https://files.pythonhosted.org/packages/cd/5e/7cb7edeb2abfaa1f79b5d5eb89432356155c8426f75d3753cbcb9592c0fd/pyarrow-22.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:84378110dd9a6c06323b41b56e129c504d157d1a983ce8f5443761eb5256bafc", size = 48048139, upload-time = "2025-10-24T10:08:46.784Z" }, + { url = "https://files.pythonhosted.org/packages/88/c6/546baa7c48185f5e9d6e59277c4b19f30f48c94d9dd938c2a80d4d6b067c/pyarrow-22.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:854794239111d2b88b40b6ef92aa478024d1e5074f364033e73e21e3f76b25e0", size = 50314244, upload-time = "2025-10-24T10:08:55.771Z" }, + { url = "https://files.pythonhosted.org/packages/3c/79/755ff2d145aafec8d347bf18f95e4e81c00127f06d080135dfc86aea417c/pyarrow-22.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:b883fe6fd85adad7932b3271c38ac289c65b7337c2c132e9569f9d3940620730", size = 28757501, upload-time = "2025-10-24T10:09:59.891Z" }, + { url = "https://files.pythonhosted.org/packages/0e/d2/237d75ac28ced3147912954e3c1a174df43a95f4f88e467809118a8165e0/pyarrow-22.0.0-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:7a820d8ae11facf32585507c11f04e3f38343c1e784c9b5a8b1da5c930547fe2", size = 34355506, upload-time = "2025-10-24T10:09:02.953Z" }, + { url = "https://files.pythonhosted.org/packages/1e/2c/733dfffe6d3069740f98e57ff81007809067d68626c5faef293434d11bd6/pyarrow-22.0.0-cp314-cp314t-macosx_12_0_x86_64.whl", hash = "sha256:c6ec3675d98915bf1ec8b3c7986422682f7232ea76cad276f4c8abd5b7319b70", size = 36047312, upload-time = "2025-10-24T10:09:10.334Z" }, + { url = "https://files.pythonhosted.org/packages/7c/2b/29d6e3782dc1f299727462c1543af357a0f2c1d3c160ce199950d9ca51eb/pyarrow-22.0.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:3e739edd001b04f654b166204fc7a9de896cf6007eaff33409ee9e50ceaff754", size = 45081609, upload-time = "2025-10-24T10:09:18.61Z" }, + { url = "https://files.pythonhosted.org/packages/8d/42/aa9355ecc05997915af1b7b947a7f66c02dcaa927f3203b87871c114ba10/pyarrow-22.0.0-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:7388ac685cab5b279a41dfe0a6ccd99e4dbf322edfb63e02fc0443bf24134e91", size = 47703663, upload-time = "2025-10-24T10:09:27.369Z" }, + { url = "https://files.pythonhosted.org/packages/ee/62/45abedde480168e83a1de005b7b7043fd553321c1e8c5a9a114425f64842/pyarrow-22.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f633074f36dbc33d5c05b5dc75371e5660f1dbf9c8b1d95669def05e5425989c", size = 48066543, upload-time = "2025-10-24T10:09:34.908Z" }, + { url = "https://files.pythonhosted.org/packages/84/e9/7878940a5b072e4f3bf998770acafeae13b267f9893af5f6d4ab3904b67e/pyarrow-22.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:4c19236ae2402a8663a2c8f21f1870a03cc57f0bef7e4b6eb3238cc82944de80", size = 50288838, upload-time = "2025-10-24T10:09:44.394Z" }, + { url = "https://files.pythonhosted.org/packages/7b/03/f335d6c52b4a4761bcc83499789a1e2e16d9d201a58c327a9b5cc9a41bd9/pyarrow-22.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0c34fe18094686194f204a3b1787a27456897d8a2d62caf84b61e8dfbc0252ae", size = 29185594, upload-time = "2025-10-24T10:09:53.111Z" }, ] [[package]] @@ -2954,7 +3032,7 @@ wheels = [ [[package]] name = "pydantic" -version = "2.12.3" +version = "2.12.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-types" }, @@ -2962,76 +3040,80 @@ dependencies = [ { name = "typing-extensions" }, { name = "typing-inspection" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f3/1e/4f0a3233767010308f2fd6bd0814597e3f63f1dc98304a9112b8759df4ff/pydantic-2.12.3.tar.gz", hash = "sha256:1da1c82b0fc140bb0103bc1441ffe062154c8d38491189751ee00fd8ca65ce74", size = 819383, upload-time = "2025-10-17T15:04:21.222Z" } +sdist = { url = "https://files.pythonhosted.org/packages/96/ad/a17bc283d7d81837c061c49e3eaa27a45991759a1b7eae1031921c6bd924/pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac", size = 821038, upload-time = "2025-11-05T10:50:08.59Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a1/6b/83661fa77dcefa195ad5f8cd9af3d1a7450fd57cc883ad04d65446ac2029/pydantic-2.12.3-py3-none-any.whl", hash = "sha256:6986454a854bc3bc6e5443e1369e06a3a456af9d339eda45510f517d9ea5c6bf", size = 462431, upload-time = "2025-10-17T15:04:19.346Z" }, + { url = "https://files.pythonhosted.org/packages/82/2f/e68750da9b04856e2a7ec56fc6f034a5a79775e9b9a81882252789873798/pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e", size = 463400, upload-time = "2025-11-05T10:50:06.732Z" }, ] [[package]] name = "pydantic-core" -version = "2.41.4" +version = "2.41.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/df/18/d0944e8eaaa3efd0a91b0f1fc537d3be55ad35091b6a87638211ba691964/pydantic_core-2.41.4.tar.gz", hash = "sha256:70e47929a9d4a1905a67e4b687d5946026390568a8e952b92824118063cee4d5", size = 457557, upload-time = "2025-10-14T10:23:47.909Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e9/81/d3b3e95929c4369d30b2a66a91db63c8ed0a98381ae55a45da2cd1cc1288/pydantic_core-2.41.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ab06d77e053d660a6faaf04894446df7b0a7e7aba70c2797465a0a1af00fc887", size = 2099043, upload-time = "2025-10-14T10:20:28.561Z" }, - { url = "https://files.pythonhosted.org/packages/58/da/46fdac49e6717e3a94fc9201403e08d9d61aa7a770fab6190b8740749047/pydantic_core-2.41.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c53ff33e603a9c1179a9364b0a24694f183717b2e0da2b5ad43c316c956901b2", size = 1910699, upload-time = "2025-10-14T10:20:30.217Z" }, - { url = "https://files.pythonhosted.org/packages/1e/63/4d948f1b9dd8e991a5a98b77dd66c74641f5f2e5225fee37994b2e07d391/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:304c54176af2c143bd181d82e77c15c41cbacea8872a2225dd37e6544dce9999", size = 1952121, upload-time = "2025-10-14T10:20:32.246Z" }, - { url = "https://files.pythonhosted.org/packages/b2/a7/e5fc60a6f781fc634ecaa9ecc3c20171d238794cef69ae0af79ac11b89d7/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:025ba34a4cf4fb32f917d5d188ab5e702223d3ba603be4d8aca2f82bede432a4", size = 2041590, upload-time = "2025-10-14T10:20:34.332Z" }, - { url = "https://files.pythonhosted.org/packages/70/69/dce747b1d21d59e85af433428978a1893c6f8a7068fa2bb4a927fba7a5ff/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9f5f30c402ed58f90c70e12eff65547d3ab74685ffe8283c719e6bead8ef53f", size = 2219869, upload-time = "2025-10-14T10:20:35.965Z" }, - { url = "https://files.pythonhosted.org/packages/83/6a/c070e30e295403bf29c4df1cb781317b6a9bac7cd07b8d3acc94d501a63c/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd96e5d15385d301733113bcaa324c8bcf111275b7675a9c6e88bfb19fc05e3b", size = 2345169, upload-time = "2025-10-14T10:20:37.627Z" }, - { url = "https://files.pythonhosted.org/packages/f0/83/06d001f8043c336baea7fd202a9ac7ad71f87e1c55d8112c50b745c40324/pydantic_core-2.41.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f348cbb44fae6e9653c1055db7e29de67ea6a9ca03a5fa2c2e11a47cff0e47", size = 2070165, upload-time = "2025-10-14T10:20:39.246Z" }, - { url = "https://files.pythonhosted.org/packages/14/0a/e567c2883588dd12bcbc110232d892cf385356f7c8a9910311ac997ab715/pydantic_core-2.41.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec22626a2d14620a83ca583c6f5a4080fa3155282718b6055c2ea48d3ef35970", size = 2189067, upload-time = "2025-10-14T10:20:41.015Z" }, - { url = "https://files.pythonhosted.org/packages/f4/1d/3d9fca34273ba03c9b1c5289f7618bc4bd09c3ad2289b5420481aa051a99/pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a95d4590b1f1a43bf33ca6d647b990a88f4a3824a8c4572c708f0b45a5290ed", size = 2132997, upload-time = "2025-10-14T10:20:43.106Z" }, - { url = "https://files.pythonhosted.org/packages/52/70/d702ef7a6cd41a8afc61f3554922b3ed8d19dd54c3bd4bdbfe332e610827/pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:f9672ab4d398e1b602feadcffcdd3af44d5f5e6ddc15bc7d15d376d47e8e19f8", size = 2307187, upload-time = "2025-10-14T10:20:44.849Z" }, - { url = "https://files.pythonhosted.org/packages/68/4c/c06be6e27545d08b802127914156f38d10ca287a9e8489342793de8aae3c/pydantic_core-2.41.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:84d8854db5f55fead3b579f04bda9a36461dab0730c5d570e1526483e7bb8431", size = 2305204, upload-time = "2025-10-14T10:20:46.781Z" }, - { url = "https://files.pythonhosted.org/packages/b0/e5/35ae4919bcd9f18603419e23c5eaf32750224a89d41a8df1a3704b69f77e/pydantic_core-2.41.4-cp312-cp312-win32.whl", hash = "sha256:9be1c01adb2ecc4e464392c36d17f97e9110fbbc906bcbe1c943b5b87a74aabd", size = 1972536, upload-time = "2025-10-14T10:20:48.39Z" }, - { url = "https://files.pythonhosted.org/packages/1e/c2/49c5bb6d2a49eb2ee3647a93e3dae7080c6409a8a7558b075027644e879c/pydantic_core-2.41.4-cp312-cp312-win_amd64.whl", hash = "sha256:d682cf1d22bab22a5be08539dca3d1593488a99998f9f412137bc323179067ff", size = 2031132, upload-time = "2025-10-14T10:20:50.421Z" }, - { url = "https://files.pythonhosted.org/packages/06/23/936343dbcba6eec93f73e95eb346810fc732f71ba27967b287b66f7b7097/pydantic_core-2.41.4-cp312-cp312-win_arm64.whl", hash = "sha256:833eebfd75a26d17470b58768c1834dfc90141b7afc6eb0429c21fc5a21dcfb8", size = 1969483, upload-time = "2025-10-14T10:20:52.35Z" }, - { url = "https://files.pythonhosted.org/packages/13/d0/c20adabd181a029a970738dfe23710b52a31f1258f591874fcdec7359845/pydantic_core-2.41.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:85e050ad9e5f6fe1004eec65c914332e52f429bc0ae12d6fa2092407a462c746", size = 2105688, upload-time = "2025-10-14T10:20:54.448Z" }, - { url = "https://files.pythonhosted.org/packages/00/b6/0ce5c03cec5ae94cca220dfecddc453c077d71363b98a4bbdb3c0b22c783/pydantic_core-2.41.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7393f1d64792763a48924ba31d1e44c2cfbc05e3b1c2c9abb4ceeadd912cced", size = 1910807, upload-time = "2025-10-14T10:20:56.115Z" }, - { url = "https://files.pythonhosted.org/packages/68/3e/800d3d02c8beb0b5c069c870cbb83799d085debf43499c897bb4b4aaff0d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94dab0940b0d1fb28bcab847adf887c66a27a40291eedf0b473be58761c9799a", size = 1956669, upload-time = "2025-10-14T10:20:57.874Z" }, - { url = "https://files.pythonhosted.org/packages/60/a4/24271cc71a17f64589be49ab8bd0751f6a0a03046c690df60989f2f95c2c/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de7c42f897e689ee6f9e93c4bec72b99ae3b32a2ade1c7e4798e690ff5246e02", size = 2051629, upload-time = "2025-10-14T10:21:00.006Z" }, - { url = "https://files.pythonhosted.org/packages/68/de/45af3ca2f175d91b96bfb62e1f2d2f1f9f3b14a734afe0bfeff079f78181/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:664b3199193262277b8b3cd1e754fb07f2c6023289c815a1e1e8fb415cb247b1", size = 2224049, upload-time = "2025-10-14T10:21:01.801Z" }, - { url = "https://files.pythonhosted.org/packages/af/8f/ae4e1ff84672bf869d0a77af24fd78387850e9497753c432875066b5d622/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95b253b88f7d308b1c0b417c4624f44553ba4762816f94e6986819b9c273fb2", size = 2342409, upload-time = "2025-10-14T10:21:03.556Z" }, - { url = "https://files.pythonhosted.org/packages/18/62/273dd70b0026a085c7b74b000394e1ef95719ea579c76ea2f0cc8893736d/pydantic_core-2.41.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a1351f5bbdbbabc689727cb91649a00cb9ee7203e0a6e54e9f5ba9e22e384b84", size = 2069635, upload-time = "2025-10-14T10:21:05.385Z" }, - { url = "https://files.pythonhosted.org/packages/30/03/cf485fff699b4cdaea469bc481719d3e49f023241b4abb656f8d422189fc/pydantic_core-2.41.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1affa4798520b148d7182da0615d648e752de4ab1a9566b7471bc803d88a062d", size = 2194284, upload-time = "2025-10-14T10:21:07.122Z" }, - { url = "https://files.pythonhosted.org/packages/f9/7e/c8e713db32405dfd97211f2fc0a15d6bf8adb7640f3d18544c1f39526619/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7b74e18052fea4aa8dea2fb7dbc23d15439695da6cbe6cfc1b694af1115df09d", size = 2137566, upload-time = "2025-10-14T10:21:08.981Z" }, - { url = "https://files.pythonhosted.org/packages/04/f7/db71fd4cdccc8b75990f79ccafbbd66757e19f6d5ee724a6252414483fb4/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:285b643d75c0e30abda9dc1077395624f314a37e3c09ca402d4015ef5979f1a2", size = 2316809, upload-time = "2025-10-14T10:21:10.805Z" }, - { url = "https://files.pythonhosted.org/packages/76/63/a54973ddb945f1bca56742b48b144d85c9fc22f819ddeb9f861c249d5464/pydantic_core-2.41.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f52679ff4218d713b3b33f88c89ccbf3a5c2c12ba665fb80ccc4192b4608dbab", size = 2311119, upload-time = "2025-10-14T10:21:12.583Z" }, - { url = "https://files.pythonhosted.org/packages/f8/03/5d12891e93c19218af74843a27e32b94922195ded2386f7b55382f904d2f/pydantic_core-2.41.4-cp313-cp313-win32.whl", hash = "sha256:ecde6dedd6fff127c273c76821bb754d793be1024bc33314a120f83a3c69460c", size = 1981398, upload-time = "2025-10-14T10:21:14.584Z" }, - { url = "https://files.pythonhosted.org/packages/be/d8/fd0de71f39db91135b7a26996160de71c073d8635edfce8b3c3681be0d6d/pydantic_core-2.41.4-cp313-cp313-win_amd64.whl", hash = "sha256:d081a1f3800f05409ed868ebb2d74ac39dd0c1ff6c035b5162356d76030736d4", size = 2030735, upload-time = "2025-10-14T10:21:16.432Z" }, - { url = "https://files.pythonhosted.org/packages/72/86/c99921c1cf6650023c08bfab6fe2d7057a5142628ef7ccfa9921f2dda1d5/pydantic_core-2.41.4-cp313-cp313-win_arm64.whl", hash = "sha256:f8e49c9c364a7edcbe2a310f12733aad95b022495ef2a8d653f645e5d20c1564", size = 1973209, upload-time = "2025-10-14T10:21:18.213Z" }, - { url = "https://files.pythonhosted.org/packages/36/0d/b5706cacb70a8414396efdda3d72ae0542e050b591119e458e2490baf035/pydantic_core-2.41.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ed97fd56a561f5eb5706cebe94f1ad7c13b84d98312a05546f2ad036bafe87f4", size = 1877324, upload-time = "2025-10-14T10:21:20.363Z" }, - { url = "https://files.pythonhosted.org/packages/de/2d/cba1fa02cfdea72dfb3a9babb067c83b9dff0bbcb198368e000a6b756ea7/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a870c307bf1ee91fc58a9a61338ff780d01bfae45922624816878dce784095d2", size = 1884515, upload-time = "2025-10-14T10:21:22.339Z" }, - { url = "https://files.pythonhosted.org/packages/07/ea/3df927c4384ed9b503c9cc2d076cf983b4f2adb0c754578dfb1245c51e46/pydantic_core-2.41.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25e97bc1f5f8f7985bdc2335ef9e73843bb561eb1fa6831fdfc295c1c2061cf", size = 2042819, upload-time = "2025-10-14T10:21:26.683Z" }, - { url = "https://files.pythonhosted.org/packages/6a/ee/df8e871f07074250270a3b1b82aad4cd0026b588acd5d7d3eb2fcb1471a3/pydantic_core-2.41.4-cp313-cp313t-win_amd64.whl", hash = "sha256:d405d14bea042f166512add3091c1af40437c2e7f86988f3915fabd27b1e9cd2", size = 1995866, upload-time = "2025-10-14T10:21:28.951Z" }, - { url = "https://files.pythonhosted.org/packages/fc/de/b20f4ab954d6d399499c33ec4fafc46d9551e11dc1858fb7f5dca0748ceb/pydantic_core-2.41.4-cp313-cp313t-win_arm64.whl", hash = "sha256:19f3684868309db5263a11bace3c45d93f6f24afa2ffe75a647583df22a2ff89", size = 1970034, upload-time = "2025-10-14T10:21:30.869Z" }, - { url = "https://files.pythonhosted.org/packages/54/28/d3325da57d413b9819365546eb9a6e8b7cbd9373d9380efd5f74326143e6/pydantic_core-2.41.4-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:e9205d97ed08a82ebb9a307e92914bb30e18cdf6f6b12ca4bedadb1588a0bfe1", size = 2102022, upload-time = "2025-10-14T10:21:32.809Z" }, - { url = "https://files.pythonhosted.org/packages/9e/24/b58a1bc0d834bf1acc4361e61233ee217169a42efbdc15a60296e13ce438/pydantic_core-2.41.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:82df1f432b37d832709fbcc0e24394bba04a01b6ecf1ee87578145c19cde12ac", size = 1905495, upload-time = "2025-10-14T10:21:34.812Z" }, - { url = "https://files.pythonhosted.org/packages/fb/a4/71f759cc41b7043e8ecdaab81b985a9b6cad7cec077e0b92cff8b71ecf6b/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3b4cc4539e055cfa39a3763c939f9d409eb40e85813257dcd761985a108554", size = 1956131, upload-time = "2025-10-14T10:21:36.924Z" }, - { url = "https://files.pythonhosted.org/packages/b0/64/1e79ac7aa51f1eec7c4cda8cbe456d5d09f05fdd68b32776d72168d54275/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b1eb1754fce47c63d2ff57fdb88c351a6c0150995890088b33767a10218eaa4e", size = 2052236, upload-time = "2025-10-14T10:21:38.927Z" }, - { url = "https://files.pythonhosted.org/packages/e9/e3/a3ffc363bd4287b80f1d43dc1c28ba64831f8dfc237d6fec8f2661138d48/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6ab5ab30ef325b443f379ddb575a34969c333004fca5a1daa0133a6ffaad616", size = 2223573, upload-time = "2025-10-14T10:21:41.574Z" }, - { url = "https://files.pythonhosted.org/packages/28/27/78814089b4d2e684a9088ede3790763c64693c3d1408ddc0a248bc789126/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:31a41030b1d9ca497634092b46481b937ff9397a86f9f51bd41c4767b6fc04af", size = 2342467, upload-time = "2025-10-14T10:21:44.018Z" }, - { url = "https://files.pythonhosted.org/packages/92/97/4de0e2a1159cb85ad737e03306717637842c88c7fd6d97973172fb183149/pydantic_core-2.41.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a44ac1738591472c3d020f61c6df1e4015180d6262ebd39bf2aeb52571b60f12", size = 2063754, upload-time = "2025-10-14T10:21:46.466Z" }, - { url = "https://files.pythonhosted.org/packages/0f/50/8cb90ce4b9efcf7ae78130afeb99fd1c86125ccdf9906ef64b9d42f37c25/pydantic_core-2.41.4-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d72f2b5e6e82ab8f94ea7d0d42f83c487dc159c5240d8f83beae684472864e2d", size = 2196754, upload-time = "2025-10-14T10:21:48.486Z" }, - { url = "https://files.pythonhosted.org/packages/34/3b/ccdc77af9cd5082723574a1cc1bcae7a6acacc829d7c0a06201f7886a109/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c4d1e854aaf044487d31143f541f7aafe7b482ae72a022c664b2de2e466ed0ad", size = 2137115, upload-time = "2025-10-14T10:21:50.63Z" }, - { url = "https://files.pythonhosted.org/packages/ca/ba/e7c7a02651a8f7c52dc2cff2b64a30c313e3b57c7d93703cecea76c09b71/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:b568af94267729d76e6ee5ececda4e283d07bbb28e8148bb17adad93d025d25a", size = 2317400, upload-time = "2025-10-14T10:21:52.959Z" }, - { url = "https://files.pythonhosted.org/packages/2c/ba/6c533a4ee8aec6b812c643c49bb3bd88d3f01e3cebe451bb85512d37f00f/pydantic_core-2.41.4-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:6d55fb8b1e8929b341cc313a81a26e0d48aa3b519c1dbaadec3a6a2b4fcad025", size = 2312070, upload-time = "2025-10-14T10:21:55.419Z" }, - { url = "https://files.pythonhosted.org/packages/22/ae/f10524fcc0ab8d7f96cf9a74c880243576fd3e72bd8ce4f81e43d22bcab7/pydantic_core-2.41.4-cp314-cp314-win32.whl", hash = "sha256:5b66584e549e2e32a1398df11da2e0a7eff45d5c2d9db9d5667c5e6ac764d77e", size = 1982277, upload-time = "2025-10-14T10:21:57.474Z" }, - { url = "https://files.pythonhosted.org/packages/b4/dc/e5aa27aea1ad4638f0c3fb41132f7eb583bd7420ee63204e2d4333a3bbf9/pydantic_core-2.41.4-cp314-cp314-win_amd64.whl", hash = "sha256:557a0aab88664cc552285316809cab897716a372afaf8efdbef756f8b890e894", size = 2024608, upload-time = "2025-10-14T10:21:59.557Z" }, - { url = "https://files.pythonhosted.org/packages/3e/61/51d89cc2612bd147198e120a13f150afbf0bcb4615cddb049ab10b81b79e/pydantic_core-2.41.4-cp314-cp314-win_arm64.whl", hash = "sha256:3f1ea6f48a045745d0d9f325989d8abd3f1eaf47dd00485912d1a3a63c623a8d", size = 1967614, upload-time = "2025-10-14T10:22:01.847Z" }, - { url = "https://files.pythonhosted.org/packages/0d/c2/472f2e31b95eff099961fa050c376ab7156a81da194f9edb9f710f68787b/pydantic_core-2.41.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6c1fe4c5404c448b13188dd8bd2ebc2bdd7e6727fa61ff481bcc2cca894018da", size = 1876904, upload-time = "2025-10-14T10:22:04.062Z" }, - { url = "https://files.pythonhosted.org/packages/4a/07/ea8eeb91173807ecdae4f4a5f4b150a520085b35454350fc219ba79e66a3/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:523e7da4d43b113bf8e7b49fa4ec0c35bf4fe66b2230bfc5c13cc498f12c6c3e", size = 1882538, upload-time = "2025-10-14T10:22:06.39Z" }, - { url = "https://files.pythonhosted.org/packages/1e/29/b53a9ca6cd366bfc928823679c6a76c7a4c69f8201c0ba7903ad18ebae2f/pydantic_core-2.41.4-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5729225de81fb65b70fdb1907fcf08c75d498f4a6f15af005aabb1fdadc19dfa", size = 2041183, upload-time = "2025-10-14T10:22:08.812Z" }, - { url = "https://files.pythonhosted.org/packages/c7/3d/f8c1a371ceebcaf94d6dd2d77c6cf4b1c078e13a5837aee83f760b4f7cfd/pydantic_core-2.41.4-cp314-cp314t-win_amd64.whl", hash = "sha256:de2cfbb09e88f0f795fd90cf955858fc2c691df65b1f21f0aa00b99f3fbc661d", size = 1993542, upload-time = "2025-10-14T10:22:11.332Z" }, - { url = "https://files.pythonhosted.org/packages/8a/ac/9fc61b4f9d079482a290afe8d206b8f490e9fd32d4fc03ed4fc698214e01/pydantic_core-2.41.4-cp314-cp314t-win_arm64.whl", hash = "sha256:d34f950ae05a83e0ede899c595f312ca976023ea1db100cd5aa188f7005e3ab0", size = 1973897, upload-time = "2025-10-14T10:22:13.444Z" }, - { url = "https://files.pythonhosted.org/packages/c4/48/ae937e5a831b7c0dc646b2ef788c27cd003894882415300ed21927c21efa/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:4f5d640aeebb438517150fdeec097739614421900e4a08db4a3ef38898798537", size = 2112087, upload-time = "2025-10-14T10:22:56.818Z" }, - { url = "https://files.pythonhosted.org/packages/5e/db/6db8073e3d32dae017da7e0d16a9ecb897d0a4d92e00634916e486097961/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:4a9ab037b71927babc6d9e7fc01aea9e66dc2a4a34dff06ef0724a4049629f94", size = 1920387, upload-time = "2025-10-14T10:22:59.342Z" }, - { url = "https://files.pythonhosted.org/packages/0d/c1/dd3542d072fcc336030d66834872f0328727e3b8de289c662faa04aa270e/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4dab9484ec605c3016df9ad4fd4f9a390bc5d816a3b10c6550f8424bb80b18c", size = 1951495, upload-time = "2025-10-14T10:23:02.089Z" }, - { url = "https://files.pythonhosted.org/packages/2b/c6/db8d13a1f8ab3f1eb08c88bd00fd62d44311e3456d1e85c0e59e0a0376e7/pydantic_core-2.41.4-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8a5028425820731d8c6c098ab642d7b8b999758e24acae03ed38a66eca8335", size = 2139008, upload-time = "2025-10-14T10:23:04.539Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, ] [[package]] @@ -3085,43 +3167,43 @@ wheels = [ [[package]] name = "pyproject-fmt" -version = "2.11.0" +version = "2.11.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "toml-fmt-common" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a8/ef/7ad92c11631fbb147f048f1549d0304c8c41f4514698fa69ff1fbad23797/pyproject_fmt-2.11.0.tar.gz", hash = "sha256:24d2370fccbc29a7d696479ab0792ff81b188accabf399e0b35fd3c433780d1f", size = 46894, upload-time = "2025-10-15T07:05:13.692Z" } +sdist = { url = "https://files.pythonhosted.org/packages/06/6c/f1028a52b5ba3b6ec89479116ed772a768bf9db719b80b614a4d730999c8/pyproject_fmt-2.11.1.tar.gz", hash = "sha256:86f4ebc71d658b848bd14da5f2d2f156a238687e5c9adc0e787ecbf925fd24b1", size = 47310, upload-time = "2025-11-05T12:53:53.406Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/66/69/5ce9f6707a609c2f24f7d5f37a1e3491fd716de782523c7d437534e5c7e8/pyproject_fmt-2.11.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:10fa7a48ad2bb7ba625aade29458da4b8881e3fd17493778858eadf4580b47b6", size = 1268375, upload-time = "2025-10-15T07:04:51.577Z" }, - { url = "https://files.pythonhosted.org/packages/60/05/460d052864473736c4c22f7ae4700fd399d8b09211fa12da9b556614ef3d/pyproject_fmt-2.11.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:d40cca8175e7a0a786fa5164660fcb81a485cf3c16661b7613da5b45cf873c05", size = 1203589, upload-time = "2025-10-15T07:04:53.402Z" }, - { url = "https://files.pythonhosted.org/packages/33/b1/bfbae99403fd848832ecddb105a911d5077e098a2f0befe2f56581c1aaa7/pyproject_fmt-2.11.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d58d0408ea61613c368eef53ae9529bcf9b515cec2dd8d4d7cd66245b210d2", size = 1262489, upload-time = "2025-10-15T07:04:54.86Z" }, - { url = "https://files.pythonhosted.org/packages/e3/33/a231655de9bb90e80ad122504d98e007c3fc1bb90cb9d16287e4b07a02f7/pyproject_fmt-2.11.0-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15170fb8bb5210776fb454a94f886cf689d878e1c0e638542866fbfbcc03ac6f", size = 1221299, upload-time = "2025-10-15T07:04:56.275Z" }, - { url = "https://files.pythonhosted.org/packages/60/01/5fa267f7aaf0cf34a1168c3d1e794fad79c8c72d12a6c6e332c9e5c1965b/pyproject_fmt-2.11.0-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33dca45403260cd638f1053892105420c36ce849bb57b1170d147c33becddc3b", size = 1534025, upload-time = "2025-10-15T07:04:57.497Z" }, - { url = "https://files.pythonhosted.org/packages/38/c1/33f5b15485257f14a24250be072c4f85673f92b5e0aa04316dda81abefd3/pyproject_fmt-2.11.0-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e69ecc2b093c205587774aefa20b3921daa20497501668bcdce147a833c24b15", size = 1421465, upload-time = "2025-10-15T07:04:59.121Z" }, - { url = "https://files.pythonhosted.org/packages/9d/fd/97b19890f8ef519918c131512cc30b62ae76a1c9e22e450c06c7d2e31112/pyproject_fmt-2.11.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90d48d44ea5cc1b039c3be32844c7f3bdb83429a0c15a295d4f2009ef6573c6", size = 1370545, upload-time = "2025-10-15T07:05:00.34Z" }, - { url = "https://files.pythonhosted.org/packages/43/bd/cf541f37ceef41149ea3c53d0a19e58ee9af86f6b9e5221e3ccc99ffb635/pyproject_fmt-2.11.0-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:deaa68f27c1cd02d6bbff6fe7c6e5106285a8693fb09b55101d3f8921a62c8c5", size = 1376150, upload-time = "2025-10-15T07:05:01.597Z" }, - { url = "https://files.pythonhosted.org/packages/e6/f0/b6e46f9907feebe16427d870576fca7b1b6796764cf24174b209a6be7c85/pyproject_fmt-2.11.0-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:3e524725f4dfbfeebeac330407b52f473d0a82c031d270e94de5ab3503c01ad9", size = 1507505, upload-time = "2025-10-15T07:05:02.886Z" }, - { url = "https://files.pythonhosted.org/packages/d0/9e/7c754d068ff4321e7a24c25375e45fe05364d4d97e623a883c5f0d6981f6/pyproject_fmt-2.11.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6349df395f8f31210fa31996462182256ead3c4cf396f3779850e87500320230", size = 1543867, upload-time = "2025-10-15T07:05:04.133Z" }, - { url = "https://files.pythonhosted.org/packages/36/7f/181005917615c27a315edbd75ef1e8c9d3e22564530de6ca33e701db27ef/pyproject_fmt-2.11.0-cp39-abi3-win32.whl", hash = "sha256:6b74f58146540d47c774dddd5014b38c605bdfad47f334ca5b89f9a3ac8f514c", size = 1116975, upload-time = "2025-10-15T07:05:05.642Z" }, - { url = "https://files.pythonhosted.org/packages/10/39/0c69c7329f2c021f18b897c19df707e915a2ab44abdaf60b620c784a3621/pyproject_fmt-2.11.0-cp39-abi3-win_amd64.whl", hash = "sha256:14875b58e0004f6c0382a2e5ac3d0854e57abdfae7571f9f2612498ae6a8e462", size = 1223127, upload-time = "2025-10-15T07:05:07.237Z" }, + { url = "https://files.pythonhosted.org/packages/e1/e8/0d70dec1e031d641b21244db586dd88c62fd188413f2b3f018eb490fe77d/pyproject_fmt-2.11.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:a2dc64e7d048f32b504504fa1ed3285c81dcf7d97e014b382ede8e437b42855a", size = 1273183, upload-time = "2025-11-05T12:53:31.309Z" }, + { url = "https://files.pythonhosted.org/packages/e1/1e/51a262dba55a701c302f753ad716b1bb0bc8874d32dd3a862dffb85537e2/pyproject_fmt-2.11.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:f9950376a9996f07b2b58b8b2ad64023f404f73c2cbc99c216b1add6f33f6cee", size = 1206593, upload-time = "2025-11-05T12:53:33.523Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ba/a2f72a5f900aa66c1ff878550992fb0513a10e17b06f665246be5c45899e/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3d38b570bdeabe7b3b27e7aa2798b1091b7259c4ca4080de83e5145ba65b11e", size = 1268936, upload-time = "2025-11-05T12:53:34.853Z" }, + { url = "https://files.pythonhosted.org/packages/90/1f/32d76300e036af6828df5cbc41bf86ce7974128778e54a2eded9a92c7b42/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2679527bcbd973f1fc1b0fb31ca84455c3fa10199e776184ff125cd6b5157392", size = 1225568, upload-time = "2025-11-05T12:53:36.559Z" }, + { url = "https://files.pythonhosted.org/packages/e9/0b/2bdac7f9f7cddcd8096af44338548f1b7d5b797e3bcee27831c3752c9168/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97b6ba9923975667fab130c23bfd8ead66c4cdea4b66ae238de860a06afbb108", size = 1539351, upload-time = "2025-11-05T12:53:37.836Z" }, + { url = "https://files.pythonhosted.org/packages/06/fc/48b4932570097a08ed6abc3a7455aacf9a15271ff0099c33d48e7f745eaa/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b16ce0874ef2aee219a2c0dacd7c0ce374562c19937bd9c767093ade91e5e452", size = 1429957, upload-time = "2025-11-05T12:53:39.382Z" }, + { url = "https://files.pythonhosted.org/packages/f7/8d/52f52e039e5e1cfb33cf0f79651edd4d8ff7f6a83d1fb5dddf19bca9993a/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2daf29e4958c310c27ce7750741ef60f79b2f4164df26b1f2bdd063f2beddf4c", size = 1375776, upload-time = "2025-11-05T12:53:40.659Z" }, + { url = "https://files.pythonhosted.org/packages/3b/24/bab927c42d88befbb063b229b44c9ce9b8a894f650ca14348969858878f5/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:44b1edad216b33817d2651a15fb2793807fd7c9cfff1ce66d565c4885b89640e", size = 1379396, upload-time = "2025-11-05T12:53:41.857Z" }, + { url = "https://files.pythonhosted.org/packages/09/fe/b98c2156775067e079ca8f2badbe93a5de431ccc061435534b76f11abc73/pyproject_fmt-2.11.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:08ccf565172179fc7f35a90f4541f68abcdbef7e7a4ea35fcead44f8cabe3e3a", size = 1506485, upload-time = "2025-11-05T12:53:43.108Z" }, + { url = "https://files.pythonhosted.org/packages/8e/2f/bf0df9df04a1376d6d1dad6fc49eb41ffafe0c3e63565b2cde8b67a49886/pyproject_fmt-2.11.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:27a9af1fc8d2173deb7a0bbb8c368a585e7817bcbba6acf00922b73c76c8ee23", size = 1546050, upload-time = "2025-11-05T12:53:44.491Z" }, + { url = "https://files.pythonhosted.org/packages/a8/e3/b4e79b486ed8de2e78c18025037512a3474df0f0064f641ef7ebdda54a1c/pyproject_fmt-2.11.1-cp39-abi3-win32.whl", hash = "sha256:0abae947f93cca80108675c025cb67b96a434f7a33148e3f7945e3009db0d073", size = 1123362, upload-time = "2025-11-05T12:53:46.637Z" }, + { url = "https://files.pythonhosted.org/packages/94/73/fed4e436f7afaa12d3f12d1943aa18524d703bd3df8c0a40f2bc58377819/pyproject_fmt-2.11.1-cp39-abi3-win_amd64.whl", hash = "sha256:5bf986b016eb157b30531d0f1036430023db0195cf2d6fd24e4b43cbc02c0da5", size = 1229915, upload-time = "2025-11-05T12:53:47.992Z" }, ] [[package]] name = "pyright" -version = "1.1.406" +version = "1.1.407" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nodeenv" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f7/16/6b4fbdd1fef59a0292cbb99f790b44983e390321eccbc5921b4d161da5d1/pyright-1.1.406.tar.gz", hash = "sha256:c4872bc58c9643dac09e8a2e74d472c62036910b3bd37a32813989ef7576ea2c", size = 4113151, upload-time = "2025-10-02T01:04:45.488Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/1b/0aa08ee42948b61745ac5b5b5ccaec4669e8884b53d31c8ec20b2fcd6b6f/pyright-1.1.407.tar.gz", hash = "sha256:099674dba5c10489832d4a4b2d302636152a9a42d317986c38474c76fe562262", size = 4122872, upload-time = "2025-10-24T23:17:15.145Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/a2/e309afbb459f50507103793aaef85ca4348b66814c86bc73908bdeb66d12/pyright-1.1.406-py3-none-any.whl", hash = "sha256:1d81fb43c2407bf566e97e57abb01c811973fdb21b2df8df59f870f688bdca71", size = 5980982, upload-time = "2025-10-02T01:04:43.137Z" }, + { url = "https://files.pythonhosted.org/packages/dc/93/b69052907d032b00c40cb656d21438ec00b3a471733de137a3f65a49a0a0/pyright-1.1.407-py3-none-any.whl", hash = "sha256:6dd419f54fcc13f03b52285796d65e639786373f433e243f8b94cf93a7444d21", size = 5997008, upload-time = "2025-10-24T23:17:13.159Z" }, ] [[package]] name = "pytest" -version = "8.4.2" +version = "9.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -3130,9 +3212,9 @@ dependencies = [ { name = "pluggy" }, { name = "pygments" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +sdist = { url = "https://files.pythonhosted.org/packages/07/56/f013048ac4bc4c1d9be45afd4ab209ea62822fb1598f40687e6bf45dcea4/pytest-9.0.1.tar.gz", hash = "sha256:3e9c069ea73583e255c3b21cf46b8d3c56f6e3a1a8f6da94ccb0fcf57b9d73c8", size = 1564125, upload-time = "2025-11-12T13:05:09.333Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, + { url = "https://files.pythonhosted.org/packages/0b/8b/6300fb80f858cda1c51ffa17075df5d846757081d11ab4aa35cef9e6258b/pytest-9.0.1-py3-none-any.whl", hash = "sha256:67be0030d194df2dfa7b556f2e56fb3c3315bd5c8822c6951162b92b32ce7dad", size = 373668, upload-time = "2025-11-12T13:05:07.379Z" }, ] [[package]] @@ -3188,11 +3270,11 @@ wheels = [ [[package]] name = "python-dotenv" -version = "1.1.1" +version = "1.2.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" }, + { url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" }, ] [[package]] @@ -3291,80 +3373,80 @@ wheels = [ [[package]] name = "regex" -version = "2025.10.23" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f8/c8/1d2160d36b11fbe0a61acb7c3c81ab032d9ec8ad888ac9e0a61b85ab99dd/regex-2025.10.23.tar.gz", hash = "sha256:8cbaf8ceb88f96ae2356d01b9adf5e6306fa42fa6f7eab6b97794e37c959ac26", size = 401266, upload-time = "2025-10-21T15:58:20.23Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f6/57/eeb274d83ab189d02d778851b1ac478477522a92b52edfa6e2ae9ff84679/regex-2025.10.23-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:7a44d9c00f7a0a02d3b777429281376370f3d13d2c75ae74eb94e11ebcf4a7fc", size = 489187, upload-time = "2025-10-21T15:55:18.322Z" }, - { url = "https://files.pythonhosted.org/packages/55/5c/7dad43a9b6ea88bf77e0b8b7729a4c36978e1043165034212fd2702880c6/regex-2025.10.23-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b83601f84fde939ae3478bb32a3aef36f61b58c3208d825c7e8ce1a735f143f2", size = 291122, upload-time = "2025-10-21T15:55:20.2Z" }, - { url = "https://files.pythonhosted.org/packages/66/21/38b71e6f2818f0f4b281c8fba8d9d57cfca7b032a648fa59696e0a54376a/regex-2025.10.23-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ec13647907bb9d15fd192bbfe89ff06612e098a5709e7d6ecabbdd8f7908fc45", size = 288797, upload-time = "2025-10-21T15:55:21.932Z" }, - { url = "https://files.pythonhosted.org/packages/be/95/888f069c89e7729732a6d7cca37f76b44bfb53a1e35dda8a2c7b65c1b992/regex-2025.10.23-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:78d76dd2957d62501084e7012ddafc5fcd406dd982b7a9ca1ea76e8eaaf73e7e", size = 798442, upload-time = "2025-10-21T15:55:23.747Z" }, - { url = "https://files.pythonhosted.org/packages/76/70/4f903c608faf786627a8ee17c06e0067b5acade473678b69c8094b248705/regex-2025.10.23-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8668e5f067e31a47699ebb354f43aeb9c0ef136f915bd864243098524482ac43", size = 864039, upload-time = "2025-10-21T15:55:25.656Z" }, - { url = "https://files.pythonhosted.org/packages/62/19/2df67b526bf25756c7f447dde554fc10a220fd839cc642f50857d01e4a7b/regex-2025.10.23-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a32433fe3deb4b2d8eda88790d2808fed0dc097e84f5e683b4cd4f42edef6cca", size = 912057, upload-time = "2025-10-21T15:55:27.309Z" }, - { url = "https://files.pythonhosted.org/packages/99/14/9a39b7c9e007968411bc3c843cc14cf15437510c0a9991f080cab654fd16/regex-2025.10.23-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d97d73818c642c938db14c0668167f8d39520ca9d983604575ade3fda193afcc", size = 803374, upload-time = "2025-10-21T15:55:28.9Z" }, - { url = "https://files.pythonhosted.org/packages/d4/f7/3495151dd3ca79949599b6d069b72a61a2c5e24fc441dccc79dcaf708fe6/regex-2025.10.23-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bca7feecc72ee33579e9f6ddf8babbe473045717a0e7dbc347099530f96e8b9a", size = 787714, upload-time = "2025-10-21T15:55:30.628Z" }, - { url = "https://files.pythonhosted.org/packages/28/65/ee882455e051131869957ee8597faea45188c9a98c0dad724cfb302d4580/regex-2025.10.23-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7e24af51e907d7457cc4a72691ec458320b9ae67dc492f63209f01eecb09de32", size = 858392, upload-time = "2025-10-21T15:55:32.322Z" }, - { url = "https://files.pythonhosted.org/packages/53/25/9287fef5be97529ebd3ac79d256159cb709a07eb58d4be780d1ca3885da8/regex-2025.10.23-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d10bcde58bbdf18146f3a69ec46dd03233b94a4a5632af97aa5378da3a47d288", size = 850484, upload-time = "2025-10-21T15:55:34.037Z" }, - { url = "https://files.pythonhosted.org/packages/f3/b4/b49b88b4fea2f14dc73e5b5842755e782fc2e52f74423d6f4adc130d5880/regex-2025.10.23-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:44383bc0c933388516c2692c9a7503e1f4a67e982f20b9a29d2fb70c6494f147", size = 789634, upload-time = "2025-10-21T15:55:35.958Z" }, - { url = "https://files.pythonhosted.org/packages/b6/3c/2f8d199d0e84e78bcd6bdc2be9b62410624f6b796e2893d1837ae738b160/regex-2025.10.23-cp312-cp312-win32.whl", hash = "sha256:6040a86f95438a0114bba16e51dfe27f1bc004fd29fe725f54a586f6d522b079", size = 266060, upload-time = "2025-10-21T15:55:37.902Z" }, - { url = "https://files.pythonhosted.org/packages/d7/67/c35e80969f6ded306ad70b0698863310bdf36aca57ad792f45ddc0e2271f/regex-2025.10.23-cp312-cp312-win_amd64.whl", hash = "sha256:436b4c4352fe0762e3bfa34a5567079baa2ef22aa9c37cf4d128979ccfcad842", size = 276931, upload-time = "2025-10-21T15:55:39.502Z" }, - { url = "https://files.pythonhosted.org/packages/f5/a1/4ed147de7d2b60174f758412c87fa51ada15cd3296a0ff047f4280aaa7ca/regex-2025.10.23-cp312-cp312-win_arm64.whl", hash = "sha256:f4b1b1991617055b46aff6f6db24888c1f05f4db9801349d23f09ed0714a9335", size = 270103, upload-time = "2025-10-21T15:55:41.24Z" }, - { url = "https://files.pythonhosted.org/packages/28/c6/195a6217a43719d5a6a12cc192a22d12c40290cecfa577f00f4fb822f07d/regex-2025.10.23-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:b7690f95404a1293923a296981fd943cca12c31a41af9c21ba3edd06398fc193", size = 488956, upload-time = "2025-10-21T15:55:42.887Z" }, - { url = "https://files.pythonhosted.org/packages/4c/93/181070cd1aa2fa541ff2d3afcf763ceecd4937b34c615fa92765020a6c90/regex-2025.10.23-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1a32d77aeaea58a13230100dd8797ac1a84c457f3af2fdf0d81ea689d5a9105b", size = 290997, upload-time = "2025-10-21T15:55:44.53Z" }, - { url = "https://files.pythonhosted.org/packages/b6/c5/9d37fbe3a40ed8dda78c23e1263002497540c0d1522ed75482ef6c2000f0/regex-2025.10.23-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b24b29402f264f70a3c81f45974323b41764ff7159655360543b7cabb73e7d2f", size = 288686, upload-time = "2025-10-21T15:55:46.186Z" }, - { url = "https://files.pythonhosted.org/packages/5f/e7/db610ff9f10c2921f9b6ac0c8d8be4681b28ddd40fc0549429366967e61f/regex-2025.10.23-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:563824a08c7c03d96856d84b46fdb3bbb7cfbdf79da7ef68725cda2ce169c72a", size = 798466, upload-time = "2025-10-21T15:55:48.24Z" }, - { url = "https://files.pythonhosted.org/packages/90/10/aab883e1fa7fe2feb15ac663026e70ca0ae1411efa0c7a4a0342d9545015/regex-2025.10.23-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a0ec8bdd88d2e2659c3518087ee34b37e20bd169419ffead4240a7004e8ed03b", size = 863996, upload-time = "2025-10-21T15:55:50.478Z" }, - { url = "https://files.pythonhosted.org/packages/a2/b0/8f686dd97a51f3b37d0238cd00a6d0f9ccabe701f05b56de1918571d0d61/regex-2025.10.23-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b577601bfe1d33913fcd9276d7607bbac827c4798d9e14d04bf37d417a6c41cb", size = 912145, upload-time = "2025-10-21T15:55:52.215Z" }, - { url = "https://files.pythonhosted.org/packages/a3/ca/639f8cd5b08797bca38fc5e7e07f76641a428cf8c7fca05894caf045aa32/regex-2025.10.23-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7c9f2c68ac6cb3de94eea08a437a75eaa2bd33f9e97c84836ca0b610a5804368", size = 803370, upload-time = "2025-10-21T15:55:53.944Z" }, - { url = "https://files.pythonhosted.org/packages/0d/1e/a40725bb76959eddf8abc42a967bed6f4851b39f5ac4f20e9794d7832aa5/regex-2025.10.23-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:89f8b9ea3830c79468e26b0e21c3585f69f105157c2154a36f6b7839f8afb351", size = 787767, upload-time = "2025-10-21T15:55:56.004Z" }, - { url = "https://files.pythonhosted.org/packages/3d/d8/8ee9858062936b0f99656dce390aa667c6e7fb0c357b1b9bf76fb5e2e708/regex-2025.10.23-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:98fd84c4e4ea185b3bb5bf065261ab45867d8875032f358a435647285c722673", size = 858335, upload-time = "2025-10-21T15:55:58.185Z" }, - { url = "https://files.pythonhosted.org/packages/d8/0a/ed5faaa63fa8e3064ab670e08061fbf09e3a10235b19630cf0cbb9e48c0a/regex-2025.10.23-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1e11d3e5887b8b096f96b4154dfb902f29c723a9556639586cd140e77e28b313", size = 850402, upload-time = "2025-10-21T15:56:00.023Z" }, - { url = "https://files.pythonhosted.org/packages/79/14/d05f617342f4b2b4a23561da500ca2beab062bfcc408d60680e77ecaf04d/regex-2025.10.23-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f13450328a6634348d47a88367e06b64c9d84980ef6a748f717b13f8ce64e87", size = 789739, upload-time = "2025-10-21T15:56:01.967Z" }, - { url = "https://files.pythonhosted.org/packages/f9/7b/e8ce8eef42a15f2c3461f8b3e6e924bbc86e9605cb534a393aadc8d3aff8/regex-2025.10.23-cp313-cp313-win32.whl", hash = "sha256:37be9296598a30c6a20236248cb8b2c07ffd54d095b75d3a2a2ee5babdc51df1", size = 266054, upload-time = "2025-10-21T15:56:05.291Z" }, - { url = "https://files.pythonhosted.org/packages/71/2d/55184ed6be6473187868d2f2e6a0708195fc58270e62a22cbf26028f2570/regex-2025.10.23-cp313-cp313-win_amd64.whl", hash = "sha256:ea7a3c283ce0f06fe789365841e9174ba05f8db16e2fd6ae00a02df9572c04c0", size = 276917, upload-time = "2025-10-21T15:56:07.303Z" }, - { url = "https://files.pythonhosted.org/packages/9c/d4/927eced0e2bd45c45839e556f987f8c8f8683268dd3c00ad327deb3b0172/regex-2025.10.23-cp313-cp313-win_arm64.whl", hash = "sha256:d9a4953575f300a7bab71afa4cd4ac061c7697c89590a2902b536783eeb49a4f", size = 270105, upload-time = "2025-10-21T15:56:09.857Z" }, - { url = "https://files.pythonhosted.org/packages/3e/b3/95b310605285573341fc062d1d30b19a54f857530e86c805f942c4ff7941/regex-2025.10.23-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:7d6606524fa77b3912c9ef52a42ef63c6cfbfc1077e9dc6296cd5da0da286044", size = 491850, upload-time = "2025-10-21T15:56:11.685Z" }, - { url = "https://files.pythonhosted.org/packages/a4/8f/207c2cec01e34e56db1eff606eef46644a60cf1739ecd474627db90ad90b/regex-2025.10.23-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c037aadf4d64bdc38af7db3dbd34877a057ce6524eefcb2914d6d41c56f968cc", size = 292537, upload-time = "2025-10-21T15:56:13.963Z" }, - { url = "https://files.pythonhosted.org/packages/98/3b/025240af4ada1dc0b5f10d73f3e5122d04ce7f8908ab8881e5d82b9d61b6/regex-2025.10.23-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:99018c331fb2529084a0c9b4c713dfa49fafb47c7712422e49467c13a636c656", size = 290904, upload-time = "2025-10-21T15:56:16.016Z" }, - { url = "https://files.pythonhosted.org/packages/81/8e/104ac14e2d3450c43db18ec03e1b96b445a94ae510b60138f00ce2cb7ca1/regex-2025.10.23-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fd8aba965604d70306eb90a35528f776e59112a7114a5162824d43b76fa27f58", size = 807311, upload-time = "2025-10-21T15:56:17.818Z" }, - { url = "https://files.pythonhosted.org/packages/19/63/78aef90141b7ce0be8a18e1782f764f6997ad09de0e05251f0d2503a914a/regex-2025.10.23-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:238e67264b4013e74136c49f883734f68656adf8257bfa13b515626b31b20f8e", size = 873241, upload-time = "2025-10-21T15:56:19.941Z" }, - { url = "https://files.pythonhosted.org/packages/b3/a8/80eb1201bb49ae4dba68a1b284b4211ed9daa8e74dc600018a10a90399fb/regex-2025.10.23-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b2eb48bd9848d66fd04826382f5e8491ae633de3233a3d64d58ceb4ecfa2113a", size = 914794, upload-time = "2025-10-21T15:56:22.488Z" }, - { url = "https://files.pythonhosted.org/packages/f0/d5/1984b6ee93281f360a119a5ca1af6a8ca7d8417861671388bf750becc29b/regex-2025.10.23-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d36591ce06d047d0c0fe2fc5f14bfbd5b4525d08a7b6a279379085e13f0e3d0e", size = 812581, upload-time = "2025-10-21T15:56:24.319Z" }, - { url = "https://files.pythonhosted.org/packages/c4/39/11ebdc6d9927172a64ae237d16763145db6bd45ebb4055c17b88edab72a7/regex-2025.10.23-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b5d4ece8628d6e364302006366cea3ee887db397faebacc5dacf8ef19e064cf8", size = 795346, upload-time = "2025-10-21T15:56:26.232Z" }, - { url = "https://files.pythonhosted.org/packages/3b/b4/89a591bcc08b5e436af43315284bd233ba77daf0cf20e098d7af12f006c1/regex-2025.10.23-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:39a7e8083959cb1c4ff74e483eecb5a65d3b3e1d821b256e54baf61782c906c6", size = 868214, upload-time = "2025-10-21T15:56:28.597Z" }, - { url = "https://files.pythonhosted.org/packages/3d/ff/58ba98409c1dbc8316cdb20dafbc63ed267380a07780cafecaf5012dabc9/regex-2025.10.23-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:842d449a8fefe546f311656cf8c0d6729b08c09a185f1cad94c756210286d6a8", size = 854540, upload-time = "2025-10-21T15:56:30.875Z" }, - { url = "https://files.pythonhosted.org/packages/9a/f2/4a9e9338d67626e2071b643f828a482712ad15889d7268e11e9a63d6f7e9/regex-2025.10.23-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d614986dc68506be8f00474f4f6960e03e4ca9883f7df47744800e7d7c08a494", size = 799346, upload-time = "2025-10-21T15:56:32.725Z" }, - { url = "https://files.pythonhosted.org/packages/63/be/543d35c46bebf6f7bf2be538cca74d6585f25714700c36f37f01b92df551/regex-2025.10.23-cp313-cp313t-win32.whl", hash = "sha256:a5b7a26b51a9df473ec16a1934d117443a775ceb7b39b78670b2e21893c330c9", size = 268657, upload-time = "2025-10-21T15:56:34.577Z" }, - { url = "https://files.pythonhosted.org/packages/14/9f/4dd6b7b612037158bb2c9bcaa710e6fb3c40ad54af441b9c53b3a137a9f1/regex-2025.10.23-cp313-cp313t-win_amd64.whl", hash = "sha256:ce81c5544a5453f61cb6f548ed358cfb111e3b23f3cd42d250a4077a6be2a7b6", size = 280075, upload-time = "2025-10-21T15:56:36.767Z" }, - { url = "https://files.pythonhosted.org/packages/81/7a/5bd0672aa65d38c8da6747c17c8b441bdb53d816c569e3261013af8e83cf/regex-2025.10.23-cp313-cp313t-win_arm64.whl", hash = "sha256:e9bf7f6699f490e4e43c44757aa179dab24d1960999c84ab5c3d5377714ed473", size = 271219, upload-time = "2025-10-21T15:56:39.033Z" }, - { url = "https://files.pythonhosted.org/packages/73/f6/0caf29fec943f201fbc8822879c99d31e59c1d51a983d9843ee5cf398539/regex-2025.10.23-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:5b5cb5b6344c4c4c24b2dc87b0bfee78202b07ef7633385df70da7fcf6f7cec6", size = 488960, upload-time = "2025-10-21T15:56:40.849Z" }, - { url = "https://files.pythonhosted.org/packages/8e/7d/ebb7085b8fa31c24ce0355107cea2b92229d9050552a01c5d291c42aecea/regex-2025.10.23-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a6ce7973384c37bdf0f371a843f95a6e6f4e1489e10e0cf57330198df72959c5", size = 290932, upload-time = "2025-10-21T15:56:42.875Z" }, - { url = "https://files.pythonhosted.org/packages/27/41/43906867287cbb5ca4cee671c3cc8081e15deef86a8189c3aad9ac9f6b4d/regex-2025.10.23-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2ee3663f2c334959016b56e3bd0dd187cbc73f948e3a3af14c3caaa0c3035d10", size = 288766, upload-time = "2025-10-21T15:56:44.894Z" }, - { url = "https://files.pythonhosted.org/packages/ab/9e/ea66132776700fc77a39b1056e7a5f1308032fead94507e208dc6716b7cd/regex-2025.10.23-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2003cc82a579107e70d013482acce8ba773293f2db534fb532738395c557ff34", size = 798884, upload-time = "2025-10-21T15:56:47.178Z" }, - { url = "https://files.pythonhosted.org/packages/d5/99/aed1453687ab63819a443930770db972c5c8064421f0d9f5da9ad029f26b/regex-2025.10.23-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:182c452279365a93a9f45874f7f191ec1c51e1f1eb41bf2b16563f1a40c1da3a", size = 864768, upload-time = "2025-10-21T15:56:49.793Z" }, - { url = "https://files.pythonhosted.org/packages/99/5d/732fe747a1304805eb3853ce6337eea16b169f7105a0d0dd9c6a5ffa9948/regex-2025.10.23-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b1249e9ff581c5b658c8f0437f883b01f1edcf424a16388591e7c05e5e9e8b0c", size = 911394, upload-time = "2025-10-21T15:56:52.186Z" }, - { url = "https://files.pythonhosted.org/packages/5e/48/58a1f6623466522352a6efa153b9a3714fc559d9f930e9bc947b4a88a2c3/regex-2025.10.23-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b841698f93db3ccc36caa1900d2a3be281d9539b822dc012f08fc80b46a3224", size = 803145, upload-time = "2025-10-21T15:56:55.142Z" }, - { url = "https://files.pythonhosted.org/packages/ea/f6/7dea79be2681a5574ab3fc237aa53b2c1dfd6bd2b44d4640b6c76f33f4c1/regex-2025.10.23-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:956d89e0c92d471e8f7eee73f73fdff5ed345886378c45a43175a77538a1ffe4", size = 787831, upload-time = "2025-10-21T15:56:57.203Z" }, - { url = "https://files.pythonhosted.org/packages/3a/ad/07b76950fbbe65f88120ca2d8d845047c401450f607c99ed38862904671d/regex-2025.10.23-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5c259cb363299a0d90d63b5c0d7568ee98419861618a95ee9d91a41cb9954462", size = 859162, upload-time = "2025-10-21T15:56:59.195Z" }, - { url = "https://files.pythonhosted.org/packages/41/87/374f3b2021b22aa6a4fc0b750d63f9721e53d1631a238f7a1c343c1cd288/regex-2025.10.23-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:185d2b18c062820b3a40d8fefa223a83f10b20a674bf6e8c4a432e8dfd844627", size = 849899, upload-time = "2025-10-21T15:57:01.747Z" }, - { url = "https://files.pythonhosted.org/packages/12/4a/7f7bb17c5a5a9747249807210e348450dab9212a46ae6d23ebce86ba6a2b/regex-2025.10.23-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:281d87fa790049c2b7c1b4253121edd80b392b19b5a3d28dc2a77579cb2a58ec", size = 789372, upload-time = "2025-10-21T15:57:04.018Z" }, - { url = "https://files.pythonhosted.org/packages/c9/dd/9c7728ff544fea09bbc8635e4c9e7c423b11c24f1a7a14e6ac4831466709/regex-2025.10.23-cp314-cp314-win32.whl", hash = "sha256:63b81eef3656072e4ca87c58084c7a9c2b81d41a300b157be635a8a675aacfb8", size = 271451, upload-time = "2025-10-21T15:57:06.266Z" }, - { url = "https://files.pythonhosted.org/packages/48/f8/ef7837ff858eb74079c4804c10b0403c0b740762e6eedba41062225f7117/regex-2025.10.23-cp314-cp314-win_amd64.whl", hash = "sha256:0967c5b86f274800a34a4ed862dfab56928144d03cb18821c5153f8777947796", size = 280173, upload-time = "2025-10-21T15:57:08.206Z" }, - { url = "https://files.pythonhosted.org/packages/8e/d0/d576e1dbd9885bfcd83d0e90762beea48d9373a6f7ed39170f44ed22e336/regex-2025.10.23-cp314-cp314-win_arm64.whl", hash = "sha256:c70dfe58b0a00b36aa04cdb0f798bf3e0adc31747641f69e191109fd8572c9a9", size = 273206, upload-time = "2025-10-21T15:57:10.367Z" }, - { url = "https://files.pythonhosted.org/packages/a6/d0/2025268315e8b2b7b660039824cb7765a41623e97d4cd421510925400487/regex-2025.10.23-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:1f5799ea1787aa6de6c150377d11afad39a38afd033f0c5247aecb997978c422", size = 491854, upload-time = "2025-10-21T15:57:12.526Z" }, - { url = "https://files.pythonhosted.org/packages/44/35/5681c2fec5e8b33454390af209c4353dfc44606bf06d714b0b8bd0454ffe/regex-2025.10.23-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:a9639ab7540cfea45ef57d16dcbea2e22de351998d614c3ad2f9778fa3bdd788", size = 292542, upload-time = "2025-10-21T15:57:15.158Z" }, - { url = "https://files.pythonhosted.org/packages/5d/17/184eed05543b724132e4a18149e900f5189001fcfe2d64edaae4fbaf36b4/regex-2025.10.23-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:08f52122c352eb44c3421dab78b9b73a8a77a282cc8314ae576fcaa92b780d10", size = 290903, upload-time = "2025-10-21T15:57:17.108Z" }, - { url = "https://files.pythonhosted.org/packages/25/d0/5e3347aa0db0de382dddfa133a7b0ae72f24b4344f3989398980b44a3924/regex-2025.10.23-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ebf1baebef1c4088ad5a5623decec6b52950f0e4d7a0ae4d48f0a99f8c9cb7d7", size = 807546, upload-time = "2025-10-21T15:57:19.179Z" }, - { url = "https://files.pythonhosted.org/packages/d2/bb/40c589bbdce1be0c55e9f8159789d58d47a22014f2f820cf2b517a5cd193/regex-2025.10.23-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:16b0f1c2e2d566c562d5c384c2b492646be0a19798532fdc1fdedacc66e3223f", size = 873322, upload-time = "2025-10-21T15:57:21.36Z" }, - { url = "https://files.pythonhosted.org/packages/fe/56/a7e40c01575ac93360e606278d359f91829781a9f7fb6e5aa435039edbda/regex-2025.10.23-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f7ada5d9dceafaab92646aa00c10a9efd9b09942dd9b0d7c5a4b73db92cc7e61", size = 914855, upload-time = "2025-10-21T15:57:24.044Z" }, - { url = "https://files.pythonhosted.org/packages/5c/4b/d55587b192763db3163c3f508b3b67b31bb6f5e7a0e08b83013d0a59500a/regex-2025.10.23-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3a36b4005770044bf08edecc798f0e41a75795b9e7c9c12fe29da8d792ef870c", size = 812724, upload-time = "2025-10-21T15:57:26.123Z" }, - { url = "https://files.pythonhosted.org/packages/33/20/18bac334955fbe99d17229f4f8e98d05e4a501ac03a442be8facbb37c304/regex-2025.10.23-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:af7b2661dcc032da1fae82069b5ebf2ac1dfcd5359ef8b35e1367bfc92181432", size = 795439, upload-time = "2025-10-21T15:57:28.497Z" }, - { url = "https://files.pythonhosted.org/packages/67/46/c57266be9df8549c7d85deb4cb82280cb0019e46fff677534c5fa1badfa4/regex-2025.10.23-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:1cb976810ac1416a67562c2e5ba0accf6f928932320fef302e08100ed681b38e", size = 868336, upload-time = "2025-10-21T15:57:30.867Z" }, - { url = "https://files.pythonhosted.org/packages/b8/f3/bd5879e41ef8187fec5e678e94b526a93f99e7bbe0437b0f2b47f9101694/regex-2025.10.23-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:1a56a54be3897d62f54290190fbcd754bff6932934529fbf5b29933da28fcd43", size = 854567, upload-time = "2025-10-21T15:57:33.062Z" }, - { url = "https://files.pythonhosted.org/packages/e6/57/2b6bbdbd2f24dfed5b028033aa17ad8f7d86bb28f1a892cac8b3bc89d059/regex-2025.10.23-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8f3e6d202fb52c2153f532043bbcf618fd177df47b0b306741eb9b60ba96edc3", size = 799565, upload-time = "2025-10-21T15:57:35.153Z" }, - { url = "https://files.pythonhosted.org/packages/c7/ba/a6168f542ba73b151ed81237adf6b869c7b2f7f8d51618111296674e20ee/regex-2025.10.23-cp314-cp314t-win32.whl", hash = "sha256:1fa1186966b2621b1769fd467c7b22e317e6ba2d2cdcecc42ea3089ef04a8521", size = 274428, upload-time = "2025-10-21T15:57:37.996Z" }, - { url = "https://files.pythonhosted.org/packages/ef/a0/c84475e14a2829e9b0864ebf77c3f7da909df9d8acfe2bb540ff0072047c/regex-2025.10.23-cp314-cp314t-win_amd64.whl", hash = "sha256:08a15d40ce28362eac3e78e83d75475147869c1ff86bc93285f43b4f4431a741", size = 284140, upload-time = "2025-10-21T15:57:40.027Z" }, - { url = "https://files.pythonhosted.org/packages/51/33/6a08ade0eee5b8ba79386869fa6f77afeb835b60510f3525db987e2fffc4/regex-2025.10.23-cp314-cp314t-win_arm64.whl", hash = "sha256:a93e97338e1c8ea2649e130dcfbe8cd69bba5e1e163834752ab64dcb4de6d5ed", size = 274497, upload-time = "2025-10-21T15:57:42.389Z" }, +version = "2025.11.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/a9/546676f25e573a4cf00fe8e119b78a37b6a8fe2dc95cda877b30889c9c45/regex-2025.11.3.tar.gz", hash = "sha256:1fedc720f9bb2494ce31a58a1631f9c82df6a09b49c19517ea5cc280b4541e01", size = 414669, upload-time = "2025-11-03T21:34:22.089Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e8/74/18f04cb53e58e3fb107439699bd8375cf5a835eec81084e0bddbd122e4c2/regex-2025.11.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bc8ab71e2e31b16e40868a40a69007bc305e1109bd4658eb6cad007e0bf67c41", size = 489312, upload-time = "2025-11-03T21:31:34.343Z" }, + { url = "https://files.pythonhosted.org/packages/78/3f/37fcdd0d2b1e78909108a876580485ea37c91e1acf66d3bb8e736348f441/regex-2025.11.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:22b29dda7e1f7062a52359fca6e58e548e28c6686f205e780b02ad8ef710de36", size = 291256, upload-time = "2025-11-03T21:31:35.675Z" }, + { url = "https://files.pythonhosted.org/packages/bf/26/0a575f58eb23b7ebd67a45fccbc02ac030b737b896b7e7a909ffe43ffd6a/regex-2025.11.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3a91e4a29938bc1a082cc28fdea44be420bf2bebe2665343029723892eb073e1", size = 288921, upload-time = "2025-11-03T21:31:37.07Z" }, + { url = "https://files.pythonhosted.org/packages/ea/98/6a8dff667d1af907150432cf5abc05a17ccd32c72a3615410d5365ac167a/regex-2025.11.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:08b884f4226602ad40c5d55f52bf91a9df30f513864e0054bad40c0e9cf1afb7", size = 798568, upload-time = "2025-11-03T21:31:38.784Z" }, + { url = "https://files.pythonhosted.org/packages/64/15/92c1db4fa4e12733dd5a526c2dd2b6edcbfe13257e135fc0f6c57f34c173/regex-2025.11.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3e0b11b2b2433d1c39c7c7a30e3f3d0aeeea44c2a8d0bae28f6b95f639927a69", size = 864165, upload-time = "2025-11-03T21:31:40.559Z" }, + { url = "https://files.pythonhosted.org/packages/f9/e7/3ad7da8cdee1ce66c7cd37ab5ab05c463a86ffeb52b1a25fe7bd9293b36c/regex-2025.11.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:87eb52a81ef58c7ba4d45c3ca74e12aa4b4e77816f72ca25258a85b3ea96cb48", size = 912182, upload-time = "2025-11-03T21:31:42.002Z" }, + { url = "https://files.pythonhosted.org/packages/84/bd/9ce9f629fcb714ffc2c3faf62b6766ecb7a585e1e885eb699bcf130a5209/regex-2025.11.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a12ab1f5c29b4e93db518f5e3872116b7e9b1646c9f9f426f777b50d44a09e8c", size = 803501, upload-time = "2025-11-03T21:31:43.815Z" }, + { url = "https://files.pythonhosted.org/packages/7c/0f/8dc2e4349d8e877283e6edd6c12bdcebc20f03744e86f197ab6e4492bf08/regex-2025.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7521684c8c7c4f6e88e35ec89680ee1aa8358d3f09d27dfbdf62c446f5d4c695", size = 787842, upload-time = "2025-11-03T21:31:45.353Z" }, + { url = "https://files.pythonhosted.org/packages/f9/73/cff02702960bc185164d5619c0c62a2f598a6abff6695d391b096237d4ab/regex-2025.11.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7fe6e5440584e94cc4b3f5f4d98a25e29ca12dccf8873679a635638349831b98", size = 858519, upload-time = "2025-11-03T21:31:46.814Z" }, + { url = "https://files.pythonhosted.org/packages/61/83/0e8d1ae71e15bc1dc36231c90b46ee35f9d52fab2e226b0e039e7ea9c10a/regex-2025.11.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8e026094aa12b43f4fd74576714e987803a315c76edb6b098b9809db5de58f74", size = 850611, upload-time = "2025-11-03T21:31:48.289Z" }, + { url = "https://files.pythonhosted.org/packages/c8/f5/70a5cdd781dcfaa12556f2955bf170cd603cb1c96a1827479f8faea2df97/regex-2025.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:435bbad13e57eb5606a68443af62bed3556de2f46deb9f7d4237bc2f1c9fb3a0", size = 789759, upload-time = "2025-11-03T21:31:49.759Z" }, + { url = "https://files.pythonhosted.org/packages/59/9b/7c29be7903c318488983e7d97abcf8ebd3830e4c956c4c540005fcfb0462/regex-2025.11.3-cp312-cp312-win32.whl", hash = "sha256:3839967cf4dc4b985e1570fd8d91078f0c519f30491c60f9ac42a8db039be204", size = 266194, upload-time = "2025-11-03T21:31:51.53Z" }, + { url = "https://files.pythonhosted.org/packages/1a/67/3b92df89f179d7c367be654ab5626ae311cb28f7d5c237b6bb976cd5fbbb/regex-2025.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:e721d1b46e25c481dc5ded6f4b3f66c897c58d2e8cfdf77bbced84339108b0b9", size = 277069, upload-time = "2025-11-03T21:31:53.151Z" }, + { url = "https://files.pythonhosted.org/packages/d7/55/85ba4c066fe5094d35b249c3ce8df0ba623cfd35afb22d6764f23a52a1c5/regex-2025.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:64350685ff08b1d3a6fff33f45a9ca183dc1d58bbfe4981604e70ec9801bbc26", size = 270330, upload-time = "2025-11-03T21:31:54.514Z" }, + { url = "https://files.pythonhosted.org/packages/e1/a7/dda24ebd49da46a197436ad96378f17df30ceb40e52e859fc42cac45b850/regex-2025.11.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c1e448051717a334891f2b9a620fe36776ebf3dd8ec46a0b877c8ae69575feb4", size = 489081, upload-time = "2025-11-03T21:31:55.9Z" }, + { url = "https://files.pythonhosted.org/packages/19/22/af2dc751aacf88089836aa088a1a11c4f21a04707eb1b0478e8e8fb32847/regex-2025.11.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9b5aca4d5dfd7fbfbfbdaf44850fcc7709a01146a797536a8f84952e940cca76", size = 291123, upload-time = "2025-11-03T21:31:57.758Z" }, + { url = "https://files.pythonhosted.org/packages/a3/88/1a3ea5672f4b0a84802ee9891b86743438e7c04eb0b8f8c4e16a42375327/regex-2025.11.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:04d2765516395cf7dda331a244a3282c0f5ae96075f728629287dfa6f76ba70a", size = 288814, upload-time = "2025-11-03T21:32:01.12Z" }, + { url = "https://files.pythonhosted.org/packages/fb/8c/f5987895bf42b8ddeea1b315c9fedcfe07cadee28b9c98cf50d00adcb14d/regex-2025.11.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d9903ca42bfeec4cebedba8022a7c97ad2aab22e09573ce9976ba01b65e4361", size = 798592, upload-time = "2025-11-03T21:32:03.006Z" }, + { url = "https://files.pythonhosted.org/packages/99/2a/6591ebeede78203fa77ee46a1c36649e02df9eaa77a033d1ccdf2fcd5d4e/regex-2025.11.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:639431bdc89d6429f6721625e8129413980ccd62e9d3f496be618a41d205f160", size = 864122, upload-time = "2025-11-03T21:32:04.553Z" }, + { url = "https://files.pythonhosted.org/packages/94/d6/be32a87cf28cf8ed064ff281cfbd49aefd90242a83e4b08b5a86b38e8eb4/regex-2025.11.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f117efad42068f9715677c8523ed2be1518116d1c49b1dd17987716695181efe", size = 912272, upload-time = "2025-11-03T21:32:06.148Z" }, + { url = "https://files.pythonhosted.org/packages/62/11/9bcef2d1445665b180ac7f230406ad80671f0fc2a6ffb93493b5dd8cd64c/regex-2025.11.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4aecb6f461316adf9f1f0f6a4a1a3d79e045f9b71ec76055a791affa3b285850", size = 803497, upload-time = "2025-11-03T21:32:08.162Z" }, + { url = "https://files.pythonhosted.org/packages/e5/a7/da0dc273d57f560399aa16d8a68ae7f9b57679476fc7ace46501d455fe84/regex-2025.11.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3b3a5f320136873cc5561098dfab677eea139521cb9a9e8db98b7e64aef44cbc", size = 787892, upload-time = "2025-11-03T21:32:09.769Z" }, + { url = "https://files.pythonhosted.org/packages/da/4b/732a0c5a9736a0b8d6d720d4945a2f1e6f38f87f48f3173559f53e8d5d82/regex-2025.11.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:75fa6f0056e7efb1f42a1c34e58be24072cb9e61a601340cc1196ae92326a4f9", size = 858462, upload-time = "2025-11-03T21:32:11.769Z" }, + { url = "https://files.pythonhosted.org/packages/0c/f5/a2a03df27dc4c2d0c769220f5110ba8c4084b0bfa9ab0f9b4fcfa3d2b0fc/regex-2025.11.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:dbe6095001465294f13f1adcd3311e50dd84e5a71525f20a10bd16689c61ce0b", size = 850528, upload-time = "2025-11-03T21:32:13.906Z" }, + { url = "https://files.pythonhosted.org/packages/d6/09/e1cd5bee3841c7f6eb37d95ca91cdee7100b8f88b81e41c2ef426910891a/regex-2025.11.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:454d9b4ae7881afbc25015b8627c16d88a597479b9dea82b8c6e7e2e07240dc7", size = 789866, upload-time = "2025-11-03T21:32:15.748Z" }, + { url = "https://files.pythonhosted.org/packages/eb/51/702f5ea74e2a9c13d855a6a85b7f80c30f9e72a95493260193c07f3f8d74/regex-2025.11.3-cp313-cp313-win32.whl", hash = "sha256:28ba4d69171fc6e9896337d4fc63a43660002b7da53fc15ac992abcf3410917c", size = 266189, upload-time = "2025-11-03T21:32:17.493Z" }, + { url = "https://files.pythonhosted.org/packages/8b/00/6e29bb314e271a743170e53649db0fdb8e8ff0b64b4f425f5602f4eb9014/regex-2025.11.3-cp313-cp313-win_amd64.whl", hash = "sha256:bac4200befe50c670c405dc33af26dad5a3b6b255dd6c000d92fe4629f9ed6a5", size = 277054, upload-time = "2025-11-03T21:32:19.042Z" }, + { url = "https://files.pythonhosted.org/packages/25/f1/b156ff9f2ec9ac441710764dda95e4edaf5f36aca48246d1eea3f1fd96ec/regex-2025.11.3-cp313-cp313-win_arm64.whl", hash = "sha256:2292cd5a90dab247f9abe892ac584cb24f0f54680c73fcb4a7493c66c2bf2467", size = 270325, upload-time = "2025-11-03T21:32:21.338Z" }, + { url = "https://files.pythonhosted.org/packages/20/28/fd0c63357caefe5680b8ea052131acbd7f456893b69cc2a90cc3e0dc90d4/regex-2025.11.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:1eb1ebf6822b756c723e09f5186473d93236c06c579d2cc0671a722d2ab14281", size = 491984, upload-time = "2025-11-03T21:32:23.466Z" }, + { url = "https://files.pythonhosted.org/packages/df/ec/7014c15626ab46b902b3bcc4b28a7bae46d8f281fc7ea9c95e22fcaaa917/regex-2025.11.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1e00ec2970aab10dc5db34af535f21fcf32b4a31d99e34963419636e2f85ae39", size = 292673, upload-time = "2025-11-03T21:32:25.034Z" }, + { url = "https://files.pythonhosted.org/packages/23/ab/3b952ff7239f20d05f1f99e9e20188513905f218c81d52fb5e78d2bf7634/regex-2025.11.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a4cb042b615245d5ff9b3794f56be4138b5adc35a4166014d31d1814744148c7", size = 291029, upload-time = "2025-11-03T21:32:26.528Z" }, + { url = "https://files.pythonhosted.org/packages/21/7e/3dc2749fc684f455f162dcafb8a187b559e2614f3826877d3844a131f37b/regex-2025.11.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44f264d4bf02f3176467d90b294d59bf1db9fe53c141ff772f27a8b456b2a9ed", size = 807437, upload-time = "2025-11-03T21:32:28.363Z" }, + { url = "https://files.pythonhosted.org/packages/1b/0b/d529a85ab349c6a25d1ca783235b6e3eedf187247eab536797021f7126c6/regex-2025.11.3-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7be0277469bf3bd7a34a9c57c1b6a724532a0d235cd0dc4e7f4316f982c28b19", size = 873368, upload-time = "2025-11-03T21:32:30.4Z" }, + { url = "https://files.pythonhosted.org/packages/7d/18/2d868155f8c9e3e9d8f9e10c64e9a9f496bb8f7e037a88a8bed26b435af6/regex-2025.11.3-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0d31e08426ff4b5b650f68839f5af51a92a5b51abd8554a60c2fbc7c71f25d0b", size = 914921, upload-time = "2025-11-03T21:32:32.123Z" }, + { url = "https://files.pythonhosted.org/packages/2d/71/9d72ff0f354fa783fe2ba913c8734c3b433b86406117a8db4ea2bf1c7a2f/regex-2025.11.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e43586ce5bd28f9f285a6e729466841368c4a0353f6fd08d4ce4630843d3648a", size = 812708, upload-time = "2025-11-03T21:32:34.305Z" }, + { url = "https://files.pythonhosted.org/packages/e7/19/ce4bf7f5575c97f82b6e804ffb5c4e940c62609ab2a0d9538d47a7fdf7d4/regex-2025.11.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:0f9397d561a4c16829d4e6ff75202c1c08b68a3bdbfe29dbfcdb31c9830907c6", size = 795472, upload-time = "2025-11-03T21:32:36.364Z" }, + { url = "https://files.pythonhosted.org/packages/03/86/fd1063a176ffb7b2315f9a1b08d17b18118b28d9df163132615b835a26ee/regex-2025.11.3-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:dd16e78eb18ffdb25ee33a0682d17912e8cc8a770e885aeee95020046128f1ce", size = 868341, upload-time = "2025-11-03T21:32:38.042Z" }, + { url = "https://files.pythonhosted.org/packages/12/43/103fb2e9811205e7386366501bc866a164a0430c79dd59eac886a2822950/regex-2025.11.3-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:ffcca5b9efe948ba0661e9df0fa50d2bc4b097c70b9810212d6b62f05d83b2dd", size = 854666, upload-time = "2025-11-03T21:32:40.079Z" }, + { url = "https://files.pythonhosted.org/packages/7d/22/e392e53f3869b75804762c7c848bd2dd2abf2b70fb0e526f58724638bd35/regex-2025.11.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c56b4d162ca2b43318ac671c65bd4d563e841a694ac70e1a976ac38fcf4ca1d2", size = 799473, upload-time = "2025-11-03T21:32:42.148Z" }, + { url = "https://files.pythonhosted.org/packages/4f/f9/8bd6b656592f925b6845fcbb4d57603a3ac2fb2373344ffa1ed70aa6820a/regex-2025.11.3-cp313-cp313t-win32.whl", hash = "sha256:9ddc42e68114e161e51e272f667d640f97e84a2b9ef14b7477c53aac20c2d59a", size = 268792, upload-time = "2025-11-03T21:32:44.13Z" }, + { url = "https://files.pythonhosted.org/packages/e5/87/0e7d603467775ff65cd2aeabf1b5b50cc1c3708556a8b849a2fa4dd1542b/regex-2025.11.3-cp313-cp313t-win_amd64.whl", hash = "sha256:7a7c7fdf755032ffdd72c77e3d8096bdcb0eb92e89e17571a196f03d88b11b3c", size = 280214, upload-time = "2025-11-03T21:32:45.853Z" }, + { url = "https://files.pythonhosted.org/packages/8d/d0/2afc6f8e94e2b64bfb738a7c2b6387ac1699f09f032d363ed9447fd2bb57/regex-2025.11.3-cp313-cp313t-win_arm64.whl", hash = "sha256:df9eb838c44f570283712e7cff14c16329a9f0fb19ca492d21d4b7528ee6821e", size = 271469, upload-time = "2025-11-03T21:32:48.026Z" }, + { url = "https://files.pythonhosted.org/packages/31/e9/f6e13de7e0983837f7b6d238ad9458800a874bf37c264f7923e63409944c/regex-2025.11.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9697a52e57576c83139d7c6f213d64485d3df5bf84807c35fa409e6c970801c6", size = 489089, upload-time = "2025-11-03T21:32:50.027Z" }, + { url = "https://files.pythonhosted.org/packages/a3/5c/261f4a262f1fa65141c1b74b255988bd2fa020cc599e53b080667d591cfc/regex-2025.11.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e18bc3f73bd41243c9b38a6d9f2366cd0e0137a9aebe2d8ff76c5b67d4c0a3f4", size = 291059, upload-time = "2025-11-03T21:32:51.682Z" }, + { url = "https://files.pythonhosted.org/packages/8e/57/f14eeb7f072b0e9a5a090d1712741fd8f214ec193dba773cf5410108bb7d/regex-2025.11.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:61a08bcb0ec14ff4e0ed2044aad948d0659604f824cbd50b55e30b0ec6f09c73", size = 288900, upload-time = "2025-11-03T21:32:53.569Z" }, + { url = "https://files.pythonhosted.org/packages/3c/6b/1d650c45e99a9b327586739d926a1cd4e94666b1bd4af90428b36af66dc7/regex-2025.11.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9c30003b9347c24bcc210958c5d167b9e4f9be786cb380a7d32f14f9b84674f", size = 799010, upload-time = "2025-11-03T21:32:55.222Z" }, + { url = "https://files.pythonhosted.org/packages/99/ee/d66dcbc6b628ce4e3f7f0cbbb84603aa2fc0ffc878babc857726b8aab2e9/regex-2025.11.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4e1e592789704459900728d88d41a46fe3969b82ab62945560a31732ffc19a6d", size = 864893, upload-time = "2025-11-03T21:32:57.239Z" }, + { url = "https://files.pythonhosted.org/packages/bf/2d/f238229f1caba7ac87a6c4153d79947fb0261415827ae0f77c304260c7d3/regex-2025.11.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6538241f45eb5a25aa575dbba1069ad786f68a4f2773a29a2bd3dd1f9de787be", size = 911522, upload-time = "2025-11-03T21:32:59.274Z" }, + { url = "https://files.pythonhosted.org/packages/bd/3d/22a4eaba214a917c80e04f6025d26143690f0419511e0116508e24b11c9b/regex-2025.11.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce22519c989bb72a7e6b36a199384c53db7722fe669ba891da75907fe3587db", size = 803272, upload-time = "2025-11-03T21:33:01.393Z" }, + { url = "https://files.pythonhosted.org/packages/84/b1/03188f634a409353a84b5ef49754b97dbcc0c0f6fd6c8ede505a8960a0a4/regex-2025.11.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:66d559b21d3640203ab9075797a55165d79017520685fb407b9234d72ab63c62", size = 787958, upload-time = "2025-11-03T21:33:03.379Z" }, + { url = "https://files.pythonhosted.org/packages/99/6a/27d072f7fbf6fadd59c64d210305e1ff865cc3b78b526fd147db768c553b/regex-2025.11.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:669dcfb2e38f9e8c69507bace46f4889e3abbfd9b0c29719202883c0a603598f", size = 859289, upload-time = "2025-11-03T21:33:05.374Z" }, + { url = "https://files.pythonhosted.org/packages/9a/70/1b3878f648e0b6abe023172dacb02157e685564853cc363d9961bcccde4e/regex-2025.11.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:32f74f35ff0f25a5021373ac61442edcb150731fbaa28286bbc8bb1582c89d02", size = 850026, upload-time = "2025-11-03T21:33:07.131Z" }, + { url = "https://files.pythonhosted.org/packages/dd/d5/68e25559b526b8baab8e66839304ede68ff6727237a47727d240006bd0ff/regex-2025.11.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e6c7a21dffba883234baefe91bc3388e629779582038f75d2a5be918e250f0ed", size = 789499, upload-time = "2025-11-03T21:33:09.141Z" }, + { url = "https://files.pythonhosted.org/packages/fc/df/43971264857140a350910d4e33df725e8c94dd9dee8d2e4729fa0d63d49e/regex-2025.11.3-cp314-cp314-win32.whl", hash = "sha256:795ea137b1d809eb6836b43748b12634291c0ed55ad50a7d72d21edf1cd565c4", size = 271604, upload-time = "2025-11-03T21:33:10.9Z" }, + { url = "https://files.pythonhosted.org/packages/01/6f/9711b57dc6894a55faf80a4c1b5aa4f8649805cb9c7aef46f7d27e2b9206/regex-2025.11.3-cp314-cp314-win_amd64.whl", hash = "sha256:9f95fbaa0ee1610ec0fc6b26668e9917a582ba80c52cc6d9ada15e30aa9ab9ad", size = 280320, upload-time = "2025-11-03T21:33:12.572Z" }, + { url = "https://files.pythonhosted.org/packages/f1/7e/f6eaa207d4377481f5e1775cdeb5a443b5a59b392d0065f3417d31d80f87/regex-2025.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:dfec44d532be4c07088c3de2876130ff0fbeeacaa89a137decbbb5f665855a0f", size = 273372, upload-time = "2025-11-03T21:33:14.219Z" }, + { url = "https://files.pythonhosted.org/packages/c3/06/49b198550ee0f5e4184271cee87ba4dfd9692c91ec55289e6282f0f86ccf/regex-2025.11.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ba0d8a5d7f04f73ee7d01d974d47c5834f8a1b0224390e4fe7c12a3a92a78ecc", size = 491985, upload-time = "2025-11-03T21:33:16.555Z" }, + { url = "https://files.pythonhosted.org/packages/ce/bf/abdafade008f0b1c9da10d934034cb670432d6cf6cbe38bbb53a1cfd6cf8/regex-2025.11.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:442d86cf1cfe4faabf97db7d901ef58347efd004934da045c745e7b5bd57ac49", size = 292669, upload-time = "2025-11-03T21:33:18.32Z" }, + { url = "https://files.pythonhosted.org/packages/f9/ef/0c357bb8edbd2ad8e273fcb9e1761bc37b8acbc6e1be050bebd6475f19c1/regex-2025.11.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:fd0a5e563c756de210bb964789b5abe4f114dacae9104a47e1a649b910361536", size = 291030, upload-time = "2025-11-03T21:33:20.048Z" }, + { url = "https://files.pythonhosted.org/packages/79/06/edbb67257596649b8fb088d6aeacbcb248ac195714b18a65e018bf4c0b50/regex-2025.11.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf3490bcbb985a1ae97b2ce9ad1c0f06a852d5b19dde9b07bdf25bf224248c95", size = 807674, upload-time = "2025-11-03T21:33:21.797Z" }, + { url = "https://files.pythonhosted.org/packages/f4/d9/ad4deccfce0ea336296bd087f1a191543bb99ee1c53093dcd4c64d951d00/regex-2025.11.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3809988f0a8b8c9dcc0f92478d6501fac7200b9ec56aecf0ec21f4a2ec4b6009", size = 873451, upload-time = "2025-11-03T21:33:23.741Z" }, + { url = "https://files.pythonhosted.org/packages/13/75/a55a4724c56ef13e3e04acaab29df26582f6978c000ac9cd6810ad1f341f/regex-2025.11.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f4ff94e58e84aedb9c9fce66d4ef9f27a190285b451420f297c9a09f2b9abee9", size = 914980, upload-time = "2025-11-03T21:33:25.999Z" }, + { url = "https://files.pythonhosted.org/packages/67/1e/a1657ee15bd9116f70d4a530c736983eed997b361e20ecd8f5ca3759d5c5/regex-2025.11.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7eb542fd347ce61e1321b0a6b945d5701528dca0cd9759c2e3bb8bd57e47964d", size = 812852, upload-time = "2025-11-03T21:33:27.852Z" }, + { url = "https://files.pythonhosted.org/packages/b8/6f/f7516dde5506a588a561d296b2d0044839de06035bb486b326065b4c101e/regex-2025.11.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d6c2d5919075a1f2e413c00b056ea0c2f065b3f5fe83c3d07d325ab92dce51d6", size = 795566, upload-time = "2025-11-03T21:33:32.364Z" }, + { url = "https://files.pythonhosted.org/packages/d9/dd/3d10b9e170cc16fb34cb2cef91513cf3df65f440b3366030631b2984a264/regex-2025.11.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:3f8bf11a4827cc7ce5a53d4ef6cddd5ad25595d3c1435ef08f76825851343154", size = 868463, upload-time = "2025-11-03T21:33:34.459Z" }, + { url = "https://files.pythonhosted.org/packages/f5/8e/935e6beff1695aa9085ff83195daccd72acc82c81793df480f34569330de/regex-2025.11.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:22c12d837298651e5550ac1d964e4ff57c3f56965fc1812c90c9fb2028eaf267", size = 854694, upload-time = "2025-11-03T21:33:36.793Z" }, + { url = "https://files.pythonhosted.org/packages/92/12/10650181a040978b2f5720a6a74d44f841371a3d984c2083fc1752e4acf6/regex-2025.11.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:62ba394a3dda9ad41c7c780f60f6e4a70988741415ae96f6d1bf6c239cf01379", size = 799691, upload-time = "2025-11-03T21:33:39.079Z" }, + { url = "https://files.pythonhosted.org/packages/67/90/8f37138181c9a7690e7e4cb388debbd389342db3c7381d636d2875940752/regex-2025.11.3-cp314-cp314t-win32.whl", hash = "sha256:4bf146dca15cdd53224a1bf46d628bd7590e4a07fbb69e720d561aea43a32b38", size = 274583, upload-time = "2025-11-03T21:33:41.302Z" }, + { url = "https://files.pythonhosted.org/packages/8f/cd/867f5ec442d56beb56f5f854f40abcfc75e11d10b11fdb1869dd39c63aaf/regex-2025.11.3-cp314-cp314t-win_amd64.whl", hash = "sha256:adad1a1bcf1c9e76346e091d22d23ac54ef28e1365117d99521631078dfec9de", size = 284286, upload-time = "2025-11-03T21:33:43.324Z" }, + { url = "https://files.pythonhosted.org/packages/20/31/32c0c4610cbc070362bf1d2e4ea86d1ea29014d400a6d6c2486fcfd57766/regex-2025.11.3-cp314-cp314t-win_arm64.whl", hash = "sha256:c54f768482cef41e219720013cd05933b6f971d9562544d691c68699bf2b6801", size = 274741, upload-time = "2025-11-03T21:33:45.557Z" }, ] [[package]] @@ -3427,7 +3509,7 @@ wheels = [ [[package]] name = "reuse" -version = "6.1.2" +version = "6.2.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, @@ -3438,7 +3520,7 @@ dependencies = [ { name = "python-magic" }, { name = "tomlkit" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/76/b0/ffd3a8978504763982db4735b7d87fc16b57f4b511c49a38ca25a7cb9ad3/reuse-6.1.2.tar.gz", hash = "sha256:6019a75f4ca18ad5b2506e0f3ec1b926b27ba6cdc9da88492e7fc65e3ff12c39", size = 453827, upload-time = "2025-10-07T22:03:58.415Z" } +sdist = { url = "https://files.pythonhosted.org/packages/05/35/298d9410b3635107ce586725cdfbca4c219c08d77a3511551f5e479a78db/reuse-6.2.0.tar.gz", hash = "sha256:4feae057a2334c9a513e6933cdb9be819d8b822f3b5b435a36138bd218897d23", size = 1615611, upload-time = "2025-10-27T15:25:46.336Z" } [[package]] name = "rfc3339-validator" @@ -3476,83 +3558,83 @@ wheels = [ [[package]] name = "rpds-py" -version = "0.28.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/48/dc/95f074d43452b3ef5d06276696ece4b3b5d696e7c9ad7173c54b1390cd70/rpds_py-0.28.0.tar.gz", hash = "sha256:abd4df20485a0983e2ca334a216249b6186d6e3c1627e106651943dbdb791aea", size = 27419, upload-time = "2025-10-22T22:24:29.327Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b8/5c/6c3936495003875fe7b14f90ea812841a08fca50ab26bd840e924097d9c8/rpds_py-0.28.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6b4f28583a4f247ff60cd7bdda83db8c3f5b05a7a82ff20dd4b078571747708f", size = 366439, upload-time = "2025-10-22T22:22:04.525Z" }, - { url = "https://files.pythonhosted.org/packages/56/f9/a0f1ca194c50aa29895b442771f036a25b6c41a35e4f35b1a0ea713bedae/rpds_py-0.28.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d678e91b610c29c4b3d52a2c148b641df2b4676ffe47c59f6388d58b99cdc424", size = 348170, upload-time = "2025-10-22T22:22:06.397Z" }, - { url = "https://files.pythonhosted.org/packages/18/ea/42d243d3a586beb72c77fa5def0487daf827210069a95f36328e869599ea/rpds_py-0.28.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e819e0e37a44a78e1383bf1970076e2ccc4dc8c2bbaa2f9bd1dc987e9afff628", size = 378838, upload-time = "2025-10-22T22:22:07.932Z" }, - { url = "https://files.pythonhosted.org/packages/e7/78/3de32e18a94791af8f33601402d9d4f39613136398658412a4e0b3047327/rpds_py-0.28.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5ee514e0f0523db5d3fb171f397c54875dbbd69760a414dccf9d4d7ad628b5bd", size = 393299, upload-time = "2025-10-22T22:22:09.435Z" }, - { url = "https://files.pythonhosted.org/packages/13/7e/4bdb435afb18acea2eb8a25ad56b956f28de7c59f8a1d32827effa0d4514/rpds_py-0.28.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f3fa06d27fdcee47f07a39e02862da0100cb4982508f5ead53ec533cd5fe55e", size = 518000, upload-time = "2025-10-22T22:22:11.326Z" }, - { url = "https://files.pythonhosted.org/packages/31/d0/5f52a656875cdc60498ab035a7a0ac8f399890cc1ee73ebd567bac4e39ae/rpds_py-0.28.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:46959ef2e64f9e4a41fc89aa20dbca2b85531f9a72c21099a3360f35d10b0d5a", size = 408746, upload-time = "2025-10-22T22:22:13.143Z" }, - { url = "https://files.pythonhosted.org/packages/3e/cd/49ce51767b879cde77e7ad9fae164ea15dce3616fe591d9ea1df51152706/rpds_py-0.28.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8455933b4bcd6e83fde3fefc987a023389c4b13f9a58c8d23e4b3f6d13f78c84", size = 386379, upload-time = "2025-10-22T22:22:14.602Z" }, - { url = "https://files.pythonhosted.org/packages/6a/99/e4e1e1ee93a98f72fc450e36c0e4d99c35370220e815288e3ecd2ec36a2a/rpds_py-0.28.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:ad50614a02c8c2962feebe6012b52f9802deec4263946cddea37aaf28dd25a66", size = 401280, upload-time = "2025-10-22T22:22:16.063Z" }, - { url = "https://files.pythonhosted.org/packages/61/35/e0c6a57488392a8b319d2200d03dad2b29c0db9996f5662c3b02d0b86c02/rpds_py-0.28.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5deca01b271492553fdb6c7fd974659dce736a15bae5dad7ab8b93555bceb28", size = 412365, upload-time = "2025-10-22T22:22:17.504Z" }, - { url = "https://files.pythonhosted.org/packages/ff/6a/841337980ea253ec797eb084665436007a1aad0faac1ba097fb906c5f69c/rpds_py-0.28.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:735f8495a13159ce6a0d533f01e8674cec0c57038c920495f87dcb20b3ddb48a", size = 559573, upload-time = "2025-10-22T22:22:19.108Z" }, - { url = "https://files.pythonhosted.org/packages/e7/5e/64826ec58afd4c489731f8b00729c5f6afdb86f1df1df60bfede55d650bb/rpds_py-0.28.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:961ca621ff10d198bbe6ba4957decca61aa2a0c56695384c1d6b79bf61436df5", size = 583973, upload-time = "2025-10-22T22:22:20.768Z" }, - { url = "https://files.pythonhosted.org/packages/b6/ee/44d024b4843f8386a4eeaa4c171b3d31d55f7177c415545fd1a24c249b5d/rpds_py-0.28.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2374e16cc9131022e7d9a8f8d65d261d9ba55048c78f3b6e017971a4f5e6353c", size = 553800, upload-time = "2025-10-22T22:22:22.25Z" }, - { url = "https://files.pythonhosted.org/packages/7d/89/33e675dccff11a06d4d85dbb4d1865f878d5020cbb69b2c1e7b2d3f82562/rpds_py-0.28.0-cp312-cp312-win32.whl", hash = "sha256:d15431e334fba488b081d47f30f091e5d03c18527c325386091f31718952fe08", size = 216954, upload-time = "2025-10-22T22:22:24.105Z" }, - { url = "https://files.pythonhosted.org/packages/af/36/45f6ebb3210887e8ee6dbf1bc710ae8400bb417ce165aaf3024b8360d999/rpds_py-0.28.0-cp312-cp312-win_amd64.whl", hash = "sha256:a410542d61fc54710f750d3764380b53bf09e8c4edbf2f9141a82aa774a04f7c", size = 227844, upload-time = "2025-10-22T22:22:25.551Z" }, - { url = "https://files.pythonhosted.org/packages/57/91/f3fb250d7e73de71080f9a221d19bd6a1c1eb0d12a1ea26513f6c1052ad6/rpds_py-0.28.0-cp312-cp312-win_arm64.whl", hash = "sha256:1f0cfd1c69e2d14f8c892b893997fa9a60d890a0c8a603e88dca4955f26d1edd", size = 217624, upload-time = "2025-10-22T22:22:26.914Z" }, - { url = "https://files.pythonhosted.org/packages/d3/03/ce566d92611dfac0085c2f4b048cd53ed7c274a5c05974b882a908d540a2/rpds_py-0.28.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e9e184408a0297086f880556b6168fa927d677716f83d3472ea333b42171ee3b", size = 366235, upload-time = "2025-10-22T22:22:28.397Z" }, - { url = "https://files.pythonhosted.org/packages/00/34/1c61da1b25592b86fd285bd7bd8422f4c9d748a7373b46126f9ae792a004/rpds_py-0.28.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:edd267266a9b0448f33dc465a97cfc5d467594b600fe28e7fa2f36450e03053a", size = 348241, upload-time = "2025-10-22T22:22:30.171Z" }, - { url = "https://files.pythonhosted.org/packages/fc/00/ed1e28616848c61c493a067779633ebf4b569eccaacf9ccbdc0e7cba2b9d/rpds_py-0.28.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85beb8b3f45e4e32f6802fb6cd6b17f615ef6c6a52f265371fb916fae02814aa", size = 378079, upload-time = "2025-10-22T22:22:31.644Z" }, - { url = "https://files.pythonhosted.org/packages/11/b2/ccb30333a16a470091b6e50289adb4d3ec656fd9951ba8c5e3aaa0746a67/rpds_py-0.28.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d2412be8d00a1b895f8ad827cc2116455196e20ed994bb704bf138fe91a42724", size = 393151, upload-time = "2025-10-22T22:22:33.453Z" }, - { url = "https://files.pythonhosted.org/packages/8c/d0/73e2217c3ee486d555cb84920597480627d8c0240ff3062005c6cc47773e/rpds_py-0.28.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cf128350d384b777da0e68796afdcebc2e9f63f0e9f242217754e647f6d32491", size = 517520, upload-time = "2025-10-22T22:22:34.949Z" }, - { url = "https://files.pythonhosted.org/packages/c4/91/23efe81c700427d0841a4ae7ea23e305654381831e6029499fe80be8a071/rpds_py-0.28.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a2036d09b363aa36695d1cc1a97b36865597f4478470b0697b5ee9403f4fe399", size = 408699, upload-time = "2025-10-22T22:22:36.584Z" }, - { url = "https://files.pythonhosted.org/packages/ca/ee/a324d3198da151820a326c1f988caaa4f37fc27955148a76fff7a2d787a9/rpds_py-0.28.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8e1e9be4fa6305a16be628959188e4fd5cd6f1b0e724d63c6d8b2a8adf74ea6", size = 385720, upload-time = "2025-10-22T22:22:38.014Z" }, - { url = "https://files.pythonhosted.org/packages/19/ad/e68120dc05af8b7cab4a789fccd8cdcf0fe7e6581461038cc5c164cd97d2/rpds_py-0.28.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:0a403460c9dd91a7f23fc3188de6d8977f1d9603a351d5db6cf20aaea95b538d", size = 401096, upload-time = "2025-10-22T22:22:39.869Z" }, - { url = "https://files.pythonhosted.org/packages/99/90/c1e070620042459d60df6356b666bb1f62198a89d68881816a7ed121595a/rpds_py-0.28.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d7366b6553cdc805abcc512b849a519167db8f5e5c3472010cd1228b224265cb", size = 411465, upload-time = "2025-10-22T22:22:41.395Z" }, - { url = "https://files.pythonhosted.org/packages/68/61/7c195b30d57f1b8d5970f600efee72a4fad79ec829057972e13a0370fd24/rpds_py-0.28.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b43c6a3726efd50f18d8120ec0551241c38785b68952d240c45ea553912ac41", size = 558832, upload-time = "2025-10-22T22:22:42.871Z" }, - { url = "https://files.pythonhosted.org/packages/b0/3d/06f3a718864773f69941d4deccdf18e5e47dd298b4628062f004c10f3b34/rpds_py-0.28.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0cb7203c7bc69d7c1585ebb33a2e6074492d2fc21ad28a7b9d40457ac2a51ab7", size = 583230, upload-time = "2025-10-22T22:22:44.877Z" }, - { url = "https://files.pythonhosted.org/packages/66/df/62fc783781a121e77fee9a21ead0a926f1b652280a33f5956a5e7833ed30/rpds_py-0.28.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7a52a5169c664dfb495882adc75c304ae1d50df552fbd68e100fdc719dee4ff9", size = 553268, upload-time = "2025-10-22T22:22:46.441Z" }, - { url = "https://files.pythonhosted.org/packages/84/85/d34366e335140a4837902d3dea89b51f087bd6a63c993ebdff59e93ee61d/rpds_py-0.28.0-cp313-cp313-win32.whl", hash = "sha256:2e42456917b6687215b3e606ab46aa6bca040c77af7df9a08a6dcfe8a4d10ca5", size = 217100, upload-time = "2025-10-22T22:22:48.342Z" }, - { url = "https://files.pythonhosted.org/packages/3c/1c/f25a3f3752ad7601476e3eff395fe075e0f7813fbb9862bd67c82440e880/rpds_py-0.28.0-cp313-cp313-win_amd64.whl", hash = "sha256:e0a0311caedc8069d68fc2bf4c9019b58a2d5ce3cd7cb656c845f1615b577e1e", size = 227759, upload-time = "2025-10-22T22:22:50.219Z" }, - { url = "https://files.pythonhosted.org/packages/e0/d6/5f39b42b99615b5bc2f36ab90423ea404830bdfee1c706820943e9a645eb/rpds_py-0.28.0-cp313-cp313-win_arm64.whl", hash = "sha256:04c1b207ab8b581108801528d59ad80aa83bb170b35b0ddffb29c20e411acdc1", size = 217326, upload-time = "2025-10-22T22:22:51.647Z" }, - { url = "https://files.pythonhosted.org/packages/5c/8b/0c69b72d1cee20a63db534be0df271effe715ef6c744fdf1ff23bb2b0b1c/rpds_py-0.28.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:f296ea3054e11fc58ad42e850e8b75c62d9a93a9f981ad04b2e5ae7d2186ff9c", size = 355736, upload-time = "2025-10-22T22:22:53.211Z" }, - { url = "https://files.pythonhosted.org/packages/f7/6d/0c2ee773cfb55c31a8514d2cece856dd299170a49babd50dcffb15ddc749/rpds_py-0.28.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5a7306c19b19005ad98468fcefeb7100b19c79fc23a5f24a12e06d91181193fa", size = 342677, upload-time = "2025-10-22T22:22:54.723Z" }, - { url = "https://files.pythonhosted.org/packages/e2/1c/22513ab25a27ea205144414724743e305e8153e6abe81833b5e678650f5a/rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5d9b86aa501fed9862a443c5c3116f6ead8bc9296185f369277c42542bd646b", size = 371847, upload-time = "2025-10-22T22:22:56.295Z" }, - { url = "https://files.pythonhosted.org/packages/60/07/68e6ccdb4b05115ffe61d31afc94adef1833d3a72f76c9632d4d90d67954/rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e5bbc701eff140ba0e872691d573b3d5d30059ea26e5785acba9132d10c8c31d", size = 381800, upload-time = "2025-10-22T22:22:57.808Z" }, - { url = "https://files.pythonhosted.org/packages/73/bf/6d6d15df80781d7f9f368e7c1a00caf764436518c4877fb28b029c4624af/rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a5690671cd672a45aa8616d7374fdf334a1b9c04a0cac3c854b1136e92374fe", size = 518827, upload-time = "2025-10-22T22:22:59.826Z" }, - { url = "https://files.pythonhosted.org/packages/7b/d3/2decbb2976cc452cbf12a2b0aaac5f1b9dc5dd9d1f7e2509a3ee00421249/rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9f1d92ecea4fa12f978a367c32a5375a1982834649cdb96539dcdc12e609ab1a", size = 399471, upload-time = "2025-10-22T22:23:01.968Z" }, - { url = "https://files.pythonhosted.org/packages/b1/2c/f30892f9e54bd02e5faca3f6a26d6933c51055e67d54818af90abed9748e/rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d252db6b1a78d0a3928b6190156042d54c93660ce4d98290d7b16b5296fb7cc", size = 377578, upload-time = "2025-10-22T22:23:03.52Z" }, - { url = "https://files.pythonhosted.org/packages/f0/5d/3bce97e5534157318f29ac06bf2d279dae2674ec12f7cb9c12739cee64d8/rpds_py-0.28.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d61b355c3275acb825f8777d6c4505f42b5007e357af500939d4a35b19177259", size = 390482, upload-time = "2025-10-22T22:23:05.391Z" }, - { url = "https://files.pythonhosted.org/packages/e3/f0/886bd515ed457b5bd93b166175edb80a0b21a210c10e993392127f1e3931/rpds_py-0.28.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:acbe5e8b1026c0c580d0321c8aae4b0a1e1676861d48d6e8c6586625055b606a", size = 402447, upload-time = "2025-10-22T22:23:06.93Z" }, - { url = "https://files.pythonhosted.org/packages/42/b5/71e8777ac55e6af1f4f1c05b47542a1eaa6c33c1cf0d300dca6a1c6e159a/rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8aa23b6f0fc59b85b4c7d89ba2965af274346f738e8d9fc2455763602e62fd5f", size = 552385, upload-time = "2025-10-22T22:23:08.557Z" }, - { url = "https://files.pythonhosted.org/packages/5d/cb/6ca2d70cbda5a8e36605e7788c4aa3bea7c17d71d213465a5a675079b98d/rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7b14b0c680286958817c22d76fcbca4800ddacef6f678f3a7c79a1fe7067fe37", size = 575642, upload-time = "2025-10-22T22:23:10.348Z" }, - { url = "https://files.pythonhosted.org/packages/4a/d4/407ad9960ca7856d7b25c96dcbe019270b5ffdd83a561787bc682c797086/rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bcf1d210dfee61a6c86551d67ee1031899c0fdbae88b2d44a569995d43797712", size = 544507, upload-time = "2025-10-22T22:23:12.434Z" }, - { url = "https://files.pythonhosted.org/packages/51/31/2f46fe0efcac23fbf5797c6b6b7e1c76f7d60773e525cb65fcbc582ee0f2/rpds_py-0.28.0-cp313-cp313t-win32.whl", hash = "sha256:3aa4dc0fdab4a7029ac63959a3ccf4ed605fee048ba67ce89ca3168da34a1342", size = 205376, upload-time = "2025-10-22T22:23:13.979Z" }, - { url = "https://files.pythonhosted.org/packages/92/e4/15947bda33cbedfc134490a41841ab8870a72a867a03d4969d886f6594a2/rpds_py-0.28.0-cp313-cp313t-win_amd64.whl", hash = "sha256:7b7d9d83c942855e4fdcfa75d4f96f6b9e272d42fffcb72cd4bb2577db2e2907", size = 215907, upload-time = "2025-10-22T22:23:15.5Z" }, - { url = "https://files.pythonhosted.org/packages/08/47/ffe8cd7a6a02833b10623bf765fbb57ce977e9a4318ca0e8cf97e9c3d2b3/rpds_py-0.28.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:dcdcb890b3ada98a03f9f2bb108489cdc7580176cb73b4f2d789e9a1dac1d472", size = 353830, upload-time = "2025-10-22T22:23:17.03Z" }, - { url = "https://files.pythonhosted.org/packages/f9/9f/890f36cbd83a58491d0d91ae0db1702639edb33fb48eeb356f80ecc6b000/rpds_py-0.28.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f274f56a926ba2dc02976ca5b11c32855cbd5925534e57cfe1fda64e04d1add2", size = 341819, upload-time = "2025-10-22T22:23:18.57Z" }, - { url = "https://files.pythonhosted.org/packages/09/e3/921eb109f682aa24fb76207698fbbcf9418738f35a40c21652c29053f23d/rpds_py-0.28.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fe0438ac4a29a520ea94c8c7f1754cdd8feb1bc490dfda1bfd990072363d527", size = 373127, upload-time = "2025-10-22T22:23:20.216Z" }, - { url = "https://files.pythonhosted.org/packages/23/13/bce4384d9f8f4989f1a9599c71b7a2d877462e5fd7175e1f69b398f729f4/rpds_py-0.28.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a358a32dd3ae50e933347889b6af9a1bdf207ba5d1a3f34e1a38cd3540e6733", size = 382767, upload-time = "2025-10-22T22:23:21.787Z" }, - { url = "https://files.pythonhosted.org/packages/23/e1/579512b2d89a77c64ccef5a0bc46a6ef7f72ae0cf03d4b26dcd52e57ee0a/rpds_py-0.28.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e80848a71c78aa328fefaba9c244d588a342c8e03bda518447b624ea64d1ff56", size = 517585, upload-time = "2025-10-22T22:23:23.699Z" }, - { url = "https://files.pythonhosted.org/packages/62/3c/ca704b8d324a2591b0b0adcfcaadf9c862375b11f2f667ac03c61b4fd0a6/rpds_py-0.28.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f586db2e209d54fe177e58e0bc4946bea5fb0102f150b1b2f13de03e1f0976f8", size = 399828, upload-time = "2025-10-22T22:23:25.713Z" }, - { url = "https://files.pythonhosted.org/packages/da/37/e84283b9e897e3adc46b4c88bb3f6ec92a43bd4d2f7ef5b13459963b2e9c/rpds_py-0.28.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ae8ee156d6b586e4292491e885d41483136ab994e719a13458055bec14cf370", size = 375509, upload-time = "2025-10-22T22:23:27.32Z" }, - { url = "https://files.pythonhosted.org/packages/1a/c2/a980beab869d86258bf76ec42dec778ba98151f253a952b02fe36d72b29c/rpds_py-0.28.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:a805e9b3973f7e27f7cab63a6b4f61d90f2e5557cff73b6e97cd5b8540276d3d", size = 392014, upload-time = "2025-10-22T22:23:29.332Z" }, - { url = "https://files.pythonhosted.org/packages/da/b5/b1d3c5f9d3fa5aeef74265f9c64de3c34a0d6d5cd3c81c8b17d5c8f10ed4/rpds_py-0.28.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5d3fd16b6dc89c73a4da0b4ac8b12a7ecc75b2864b95c9e5afed8003cb50a728", size = 402410, upload-time = "2025-10-22T22:23:31.14Z" }, - { url = "https://files.pythonhosted.org/packages/74/ae/cab05ff08dfcc052afc73dcb38cbc765ffc86f94e966f3924cd17492293c/rpds_py-0.28.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:6796079e5d24fdaba6d49bda28e2c47347e89834678f2bc2c1b4fc1489c0fb01", size = 553593, upload-time = "2025-10-22T22:23:32.834Z" }, - { url = "https://files.pythonhosted.org/packages/70/80/50d5706ea2a9bfc9e9c5f401d91879e7c790c619969369800cde202da214/rpds_py-0.28.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:76500820c2af232435cbe215e3324c75b950a027134e044423f59f5b9a1ba515", size = 576925, upload-time = "2025-10-22T22:23:34.47Z" }, - { url = "https://files.pythonhosted.org/packages/ab/12/85a57d7a5855a3b188d024b099fd09c90db55d32a03626d0ed16352413ff/rpds_py-0.28.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bbdc5640900a7dbf9dd707fe6388972f5bbd883633eb68b76591044cfe346f7e", size = 542444, upload-time = "2025-10-22T22:23:36.093Z" }, - { url = "https://files.pythonhosted.org/packages/6c/65/10643fb50179509150eb94d558e8837c57ca8b9adc04bd07b98e57b48f8c/rpds_py-0.28.0-cp314-cp314-win32.whl", hash = "sha256:adc8aa88486857d2b35d75f0640b949759f79dc105f50aa2c27816b2e0dd749f", size = 207968, upload-time = "2025-10-22T22:23:37.638Z" }, - { url = "https://files.pythonhosted.org/packages/b4/84/0c11fe4d9aaea784ff4652499e365963222481ac647bcd0251c88af646eb/rpds_py-0.28.0-cp314-cp314-win_amd64.whl", hash = "sha256:66e6fa8e075b58946e76a78e69e1a124a21d9a48a5b4766d15ba5b06869d1fa1", size = 218876, upload-time = "2025-10-22T22:23:39.179Z" }, - { url = "https://files.pythonhosted.org/packages/0f/e0/3ab3b86ded7bb18478392dc3e835f7b754cd446f62f3fc96f4fe2aca78f6/rpds_py-0.28.0-cp314-cp314-win_arm64.whl", hash = "sha256:a6fe887c2c5c59413353b7c0caff25d0e566623501ccfff88957fa438a69377d", size = 212506, upload-time = "2025-10-22T22:23:40.755Z" }, - { url = "https://files.pythonhosted.org/packages/51/ec/d5681bb425226c3501eab50fc30e9d275de20c131869322c8a1729c7b61c/rpds_py-0.28.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7a69df082db13c7070f7b8b1f155fa9e687f1d6aefb7b0e3f7231653b79a067b", size = 355433, upload-time = "2025-10-22T22:23:42.259Z" }, - { url = "https://files.pythonhosted.org/packages/be/ec/568c5e689e1cfb1ea8b875cffea3649260955f677fdd7ddc6176902d04cd/rpds_py-0.28.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b1cde22f2c30ebb049a9e74c5374994157b9b70a16147d332f89c99c5960737a", size = 342601, upload-time = "2025-10-22T22:23:44.372Z" }, - { url = "https://files.pythonhosted.org/packages/32/fe/51ada84d1d2a1d9d8f2c902cfddd0133b4a5eb543196ab5161d1c07ed2ad/rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5338742f6ba7a51012ea470bd4dc600a8c713c0c72adaa0977a1b1f4327d6592", size = 372039, upload-time = "2025-10-22T22:23:46.025Z" }, - { url = "https://files.pythonhosted.org/packages/07/c1/60144a2f2620abade1a78e0d91b298ac2d9b91bc08864493fa00451ef06e/rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1460ebde1bcf6d496d80b191d854adedcc619f84ff17dc1c6d550f58c9efbba", size = 382407, upload-time = "2025-10-22T22:23:48.098Z" }, - { url = "https://files.pythonhosted.org/packages/45/ed/091a7bbdcf4038a60a461df50bc4c82a7ed6d5d5e27649aab61771c17585/rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e3eb248f2feba84c692579257a043a7699e28a77d86c77b032c1d9fbb3f0219c", size = 518172, upload-time = "2025-10-22T22:23:50.16Z" }, - { url = "https://files.pythonhosted.org/packages/54/dd/02cc90c2fd9c2ef8016fd7813bfacd1c3a1325633ec8f244c47b449fc868/rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3bbba5def70b16cd1c1d7255666aad3b290fbf8d0fe7f9f91abafb73611a91", size = 399020, upload-time = "2025-10-22T22:23:51.81Z" }, - { url = "https://files.pythonhosted.org/packages/ab/81/5d98cc0329bbb911ccecd0b9e19fbf7f3a5de8094b4cda5e71013b2dd77e/rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3114f4db69ac5a1f32e7e4d1cbbe7c8f9cf8217f78e6e002cedf2d54c2a548ed", size = 377451, upload-time = "2025-10-22T22:23:53.711Z" }, - { url = "https://files.pythonhosted.org/packages/b4/07/4d5bcd49e3dfed2d38e2dcb49ab6615f2ceb9f89f5a372c46dbdebb4e028/rpds_py-0.28.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:4b0cb8a906b1a0196b863d460c0222fb8ad0f34041568da5620f9799b83ccf0b", size = 390355, upload-time = "2025-10-22T22:23:55.299Z" }, - { url = "https://files.pythonhosted.org/packages/3f/79/9f14ba9010fee74e4f40bf578735cfcbb91d2e642ffd1abe429bb0b96364/rpds_py-0.28.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf681ac76a60b667106141e11a92a3330890257e6f559ca995fbb5265160b56e", size = 403146, upload-time = "2025-10-22T22:23:56.929Z" }, - { url = "https://files.pythonhosted.org/packages/39/4c/f08283a82ac141331a83a40652830edd3a4a92c34e07e2bbe00baaea2f5f/rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1e8ee6413cfc677ce8898d9cde18cc3a60fc2ba756b0dec5b71eb6eb21c49fa1", size = 552656, upload-time = "2025-10-22T22:23:58.62Z" }, - { url = "https://files.pythonhosted.org/packages/61/47/d922fc0666f0dd8e40c33990d055f4cc6ecff6f502c2d01569dbed830f9b/rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b3072b16904d0b5572a15eb9d31c1954e0d3227a585fc1351aa9878729099d6c", size = 576782, upload-time = "2025-10-22T22:24:00.312Z" }, - { url = "https://files.pythonhosted.org/packages/d3/0c/5bafdd8ccf6aa9d3bfc630cfece457ff5b581af24f46a9f3590f790e3df2/rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b670c30fd87a6aec281c3c9896d3bae4b205fd75d79d06dc87c2503717e46092", size = 544671, upload-time = "2025-10-22T22:24:02.297Z" }, - { url = "https://files.pythonhosted.org/packages/2c/37/dcc5d8397caa924988693519069d0beea077a866128719351a4ad95e82fc/rpds_py-0.28.0-cp314-cp314t-win32.whl", hash = "sha256:8014045a15b4d2b3476f0a287fcc93d4f823472d7d1308d47884ecac9e612be3", size = 205749, upload-time = "2025-10-22T22:24:03.848Z" }, - { url = "https://files.pythonhosted.org/packages/d7/69/64d43b21a10d72b45939a28961216baeb721cc2a430f5f7c3bfa21659a53/rpds_py-0.28.0-cp314-cp314t-win_amd64.whl", hash = "sha256:7a4e59c90d9c27c561eb3160323634a9ff50b04e4f7820600a2beb0ac90db578", size = 216233, upload-time = "2025-10-22T22:24:05.471Z" }, +version = "0.29.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/33/23b3b3419b6a3e0f559c7c0d2ca8fc1b9448382b25245033788785921332/rpds_py-0.29.0.tar.gz", hash = "sha256:fe55fe686908f50154d1dc599232016e50c243b438c3b7432f24e2895b0e5359", size = 69359, upload-time = "2025-11-16T14:50:39.532Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/50/bc0e6e736d94e420df79be4deb5c9476b63165c87bb8f19ef75d100d21b3/rpds_py-0.29.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a0891cfd8db43e085c0ab93ab7e9b0c8fee84780d436d3b266b113e51e79f954", size = 376000, upload-time = "2025-11-16T14:48:19.141Z" }, + { url = "https://files.pythonhosted.org/packages/3e/3a/46676277160f014ae95f24de53bed0e3b7ea66c235e7de0b9df7bd5d68ba/rpds_py-0.29.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3897924d3f9a0361472d884051f9a2460358f9a45b1d85a39a158d2f8f1ad71c", size = 360575, upload-time = "2025-11-16T14:48:20.443Z" }, + { url = "https://files.pythonhosted.org/packages/75/ba/411d414ed99ea1afdd185bbabeeaac00624bd1e4b22840b5e9967ade6337/rpds_py-0.29.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a21deb8e0d1571508c6491ce5ea5e25669b1dd4adf1c9d64b6314842f708b5d", size = 392159, upload-time = "2025-11-16T14:48:22.12Z" }, + { url = "https://files.pythonhosted.org/packages/8f/b1/e18aa3a331f705467a48d0296778dc1fea9d7f6cf675bd261f9a846c7e90/rpds_py-0.29.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9efe71687d6427737a0a2de9ca1c0a216510e6cd08925c44162be23ed7bed2d5", size = 410602, upload-time = "2025-11-16T14:48:23.563Z" }, + { url = "https://files.pythonhosted.org/packages/2f/6c/04f27f0c9f2299274c76612ac9d2c36c5048bb2c6c2e52c38c60bf3868d9/rpds_py-0.29.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:40f65470919dc189c833e86b2c4bd21bd355f98436a2cef9e0a9a92aebc8e57e", size = 515808, upload-time = "2025-11-16T14:48:24.949Z" }, + { url = "https://files.pythonhosted.org/packages/83/56/a8412aa464fb151f8bc0d91fb0bb888adc9039bd41c1c6ba8d94990d8cf8/rpds_py-0.29.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:def48ff59f181130f1a2cb7c517d16328efac3ec03951cca40c1dc2049747e83", size = 416015, upload-time = "2025-11-16T14:48:26.782Z" }, + { url = "https://files.pythonhosted.org/packages/04/4c/f9b8a05faca3d9e0a6397c90d13acb9307c9792b2bff621430c58b1d6e76/rpds_py-0.29.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad7bd570be92695d89285a4b373006930715b78d96449f686af422debb4d3949", size = 395325, upload-time = "2025-11-16T14:48:28.055Z" }, + { url = "https://files.pythonhosted.org/packages/34/60/869f3bfbf8ed7b54f1ad9a5543e0fdffdd40b5a8f587fe300ee7b4f19340/rpds_py-0.29.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:5a572911cd053137bbff8e3a52d31c5d2dba51d3a67ad902629c70185f3f2181", size = 410160, upload-time = "2025-11-16T14:48:29.338Z" }, + { url = "https://files.pythonhosted.org/packages/91/aa/e5b496334e3aba4fe4c8a80187b89f3c1294c5c36f2a926da74338fa5a73/rpds_py-0.29.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d583d4403bcbf10cffc3ab5cee23d7643fcc960dff85973fd3c2d6c86e8dbb0c", size = 425309, upload-time = "2025-11-16T14:48:30.691Z" }, + { url = "https://files.pythonhosted.org/packages/85/68/4e24a34189751ceb6d66b28f18159922828dd84155876551f7ca5b25f14f/rpds_py-0.29.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:070befbb868f257d24c3bb350dbd6e2f645e83731f31264b19d7231dd5c396c7", size = 574644, upload-time = "2025-11-16T14:48:31.964Z" }, + { url = "https://files.pythonhosted.org/packages/8c/cf/474a005ea4ea9c3b4f17b6108b6b13cebfc98ebaff11d6e1b193204b3a93/rpds_py-0.29.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fc935f6b20b0c9f919a8ff024739174522abd331978f750a74bb68abd117bd19", size = 601605, upload-time = "2025-11-16T14:48:33.252Z" }, + { url = "https://files.pythonhosted.org/packages/f4/b1/c56f6a9ab8c5f6bb5c65c4b5f8229167a3a525245b0773f2c0896686b64e/rpds_py-0.29.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c5a8ecaa44ce2d8d9d20a68a2483a74c07f05d72e94a4dff88906c8807e77b0", size = 564593, upload-time = "2025-11-16T14:48:34.643Z" }, + { url = "https://files.pythonhosted.org/packages/b3/13/0494cecce4848f68501e0a229432620b4b57022388b071eeff95f3e1e75b/rpds_py-0.29.0-cp312-cp312-win32.whl", hash = "sha256:ba5e1aeaf8dd6d8f6caba1f5539cddda87d511331714b7b5fc908b6cfc3636b7", size = 223853, upload-time = "2025-11-16T14:48:36.419Z" }, + { url = "https://files.pythonhosted.org/packages/1f/6a/51e9aeb444a00cdc520b032a28b07e5f8dc7bc328b57760c53e7f96997b4/rpds_py-0.29.0-cp312-cp312-win_amd64.whl", hash = "sha256:b5f6134faf54b3cb83375db0f113506f8b7770785be1f95a631e7e2892101977", size = 239895, upload-time = "2025-11-16T14:48:37.956Z" }, + { url = "https://files.pythonhosted.org/packages/d1/d4/8bce56cdad1ab873e3f27cb31c6a51d8f384d66b022b820525b879f8bed1/rpds_py-0.29.0-cp312-cp312-win_arm64.whl", hash = "sha256:b016eddf00dca7944721bf0cd85b6af7f6c4efaf83ee0b37c4133bd39757a8c7", size = 230321, upload-time = "2025-11-16T14:48:39.71Z" }, + { url = "https://files.pythonhosted.org/packages/fd/d9/c5de60d9d371bbb186c3e9bf75f4fc5665e11117a25a06a6b2e0afb7380e/rpds_py-0.29.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1585648d0760b88292eecab5181f5651111a69d90eff35d6b78aa32998886a61", size = 375710, upload-time = "2025-11-16T14:48:41.063Z" }, + { url = "https://files.pythonhosted.org/packages/b3/b3/0860cdd012291dc21272895ce107f1e98e335509ba986dd83d72658b82b9/rpds_py-0.29.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:521807963971a23996ddaf764c682b3e46459b3c58ccd79fefbe16718db43154", size = 360582, upload-time = "2025-11-16T14:48:42.423Z" }, + { url = "https://files.pythonhosted.org/packages/92/8a/a18c2f4a61b3407e56175f6aab6deacdf9d360191a3d6f38566e1eaf7266/rpds_py-0.29.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a8896986efaa243ab713c69e6491a4138410f0fe36f2f4c71e18bd5501e8014", size = 391172, upload-time = "2025-11-16T14:48:43.75Z" }, + { url = "https://files.pythonhosted.org/packages/fd/49/e93354258508c50abc15cdcd5fcf7ac4117f67bb6233ad7859f75e7372a0/rpds_py-0.29.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d24564a700ef41480a984c5ebed62b74e6ce5860429b98b1fede76049e953e6", size = 409586, upload-time = "2025-11-16T14:48:45.498Z" }, + { url = "https://files.pythonhosted.org/packages/5a/8d/a27860dae1c19a6bdc901f90c81f0d581df1943355802961a57cdb5b6cd1/rpds_py-0.29.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6596b93c010d386ae46c9fba9bfc9fc5965fa8228edeac51576299182c2e31c", size = 516339, upload-time = "2025-11-16T14:48:47.308Z" }, + { url = "https://files.pythonhosted.org/packages/fc/ad/a75e603161e79b7110c647163d130872b271c6b28712c803c65d492100f7/rpds_py-0.29.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5cc58aac218826d054c7da7f95821eba94125d88be673ff44267bb89d12a5866", size = 416201, upload-time = "2025-11-16T14:48:48.615Z" }, + { url = "https://files.pythonhosted.org/packages/b9/42/555b4ee17508beafac135c8b450816ace5a96194ce97fefc49d58e5652ea/rpds_py-0.29.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de73e40ebc04dd5d9556f50180395322193a78ec247e637e741c1b954810f295", size = 395095, upload-time = "2025-11-16T14:48:50.027Z" }, + { url = "https://files.pythonhosted.org/packages/cd/f0/c90b671b9031e800ec45112be42ea9f027f94f9ac25faaac8770596a16a1/rpds_py-0.29.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:295ce5ac7f0cf69a651ea75c8f76d02a31f98e5698e82a50a5f4d4982fbbae3b", size = 410077, upload-time = "2025-11-16T14:48:51.515Z" }, + { url = "https://files.pythonhosted.org/packages/3d/80/9af8b640b81fe21e6f718e9dec36c0b5f670332747243130a5490f292245/rpds_py-0.29.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1ea59b23ea931d494459c8338056fe7d93458c0bf3ecc061cd03916505369d55", size = 424548, upload-time = "2025-11-16T14:48:53.237Z" }, + { url = "https://files.pythonhosted.org/packages/e4/0b/b5647446e991736e6a495ef510e6710df91e880575a586e763baeb0aa770/rpds_py-0.29.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f49d41559cebd608042fdcf54ba597a4a7555b49ad5c1c0c03e0af82692661cd", size = 573661, upload-time = "2025-11-16T14:48:54.769Z" }, + { url = "https://files.pythonhosted.org/packages/f7/b3/1b1c9576839ff583d1428efbf59f9ee70498d8ce6c0b328ac02f1e470879/rpds_py-0.29.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:05a2bd42768ea988294ca328206efbcc66e220d2d9b7836ee5712c07ad6340ea", size = 600937, upload-time = "2025-11-16T14:48:56.247Z" }, + { url = "https://files.pythonhosted.org/packages/6c/7b/b6cfca2f9fee4c4494ce54f7fb1b9f578867495a9aa9fc0d44f5f735c8e0/rpds_py-0.29.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33ca7bdfedd83339ca55da3a5e1527ee5870d4b8369456b5777b197756f3ca22", size = 564496, upload-time = "2025-11-16T14:48:57.691Z" }, + { url = "https://files.pythonhosted.org/packages/b9/fb/ba29ec7f0f06eb801bac5a23057a9ff7670623b5e8013bd59bec4aa09de8/rpds_py-0.29.0-cp313-cp313-win32.whl", hash = "sha256:20c51ae86a0bb9accc9ad4e6cdeec58d5ebb7f1b09dd4466331fc65e1766aae7", size = 223126, upload-time = "2025-11-16T14:48:59.058Z" }, + { url = "https://files.pythonhosted.org/packages/3c/6b/0229d3bed4ddaa409e6d90b0ae967ed4380e4bdd0dad6e59b92c17d42457/rpds_py-0.29.0-cp313-cp313-win_amd64.whl", hash = "sha256:6410e66f02803600edb0b1889541f4b5cc298a5ccda0ad789cc50ef23b54813e", size = 239771, upload-time = "2025-11-16T14:49:00.872Z" }, + { url = "https://files.pythonhosted.org/packages/e4/38/d2868f058b164f8efd89754d85d7b1c08b454f5c07ac2e6cc2e9bd4bd05b/rpds_py-0.29.0-cp313-cp313-win_arm64.whl", hash = "sha256:56838e1cd9174dc23c5691ee29f1d1be9eab357f27efef6bded1328b23e1ced2", size = 229994, upload-time = "2025-11-16T14:49:02.673Z" }, + { url = "https://files.pythonhosted.org/packages/52/91/5de91c5ec7d41759beec9b251630824dbb8e32d20c3756da1a9a9d309709/rpds_py-0.29.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:37d94eadf764d16b9a04307f2ab1d7af6dc28774bbe0535c9323101e14877b4c", size = 365886, upload-time = "2025-11-16T14:49:04.133Z" }, + { url = "https://files.pythonhosted.org/packages/85/7c/415d8c1b016d5f47ecec5145d9d6d21002d39dce8761b30f6c88810b455a/rpds_py-0.29.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d472cf73efe5726a067dce63eebe8215b14beabea7c12606fd9994267b3cfe2b", size = 355262, upload-time = "2025-11-16T14:49:05.543Z" }, + { url = "https://files.pythonhosted.org/packages/3d/14/bf83e2daa4f980e4dc848aed9299792a8b84af95e12541d9e7562f84a6ef/rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72fdfd5ff8992e4636621826371e3ac5f3e3b8323e9d0e48378e9c13c3dac9d0", size = 384826, upload-time = "2025-11-16T14:49:07.301Z" }, + { url = "https://files.pythonhosted.org/packages/33/b8/53330c50a810ae22b4fbba5e6cf961b68b9d72d9bd6780a7c0a79b070857/rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2549d833abdf8275c901313b9e8ff8fba57e50f6a495035a2a4e30621a2f7cc4", size = 394234, upload-time = "2025-11-16T14:49:08.782Z" }, + { url = "https://files.pythonhosted.org/packages/cc/32/01e2e9645cef0e584f518cfde4567563e57db2257244632b603f61b40e50/rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4448dad428f28a6a767c3e3b80cde3446a22a0efbddaa2360f4bb4dc836d0688", size = 520008, upload-time = "2025-11-16T14:49:10.253Z" }, + { url = "https://files.pythonhosted.org/packages/98/c3/0d1b95a81affae2b10f950782e33a1fd2edd6ce2a479966cac98c9a66f57/rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:115f48170fd4296a33938d8c11f697f5f26e0472e43d28f35624764173a60e4d", size = 409569, upload-time = "2025-11-16T14:49:12.478Z" }, + { url = "https://files.pythonhosted.org/packages/fa/60/aa3b8678f3f009f675b99174fa2754302a7fbfe749162e8043d111de2d88/rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e5bb73ffc029820f4348e9b66b3027493ae00bca6629129cd433fd7a76308ee", size = 385188, upload-time = "2025-11-16T14:49:13.88Z" }, + { url = "https://files.pythonhosted.org/packages/92/02/5546c1c8aa89c18d40c1fcffdcc957ba730dee53fb7c3ca3a46f114761d2/rpds_py-0.29.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:b1581fcde18fcdf42ea2403a16a6b646f8eb1e58d7f90a0ce693da441f76942e", size = 398587, upload-time = "2025-11-16T14:49:15.339Z" }, + { url = "https://files.pythonhosted.org/packages/6c/e0/ad6eeaf47e236eba052fa34c4073078b9e092bd44da6bbb35aaae9580669/rpds_py-0.29.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16e9da2bda9eb17ea318b4c335ec9ac1818e88922cbe03a5743ea0da9ecf74fb", size = 416641, upload-time = "2025-11-16T14:49:16.832Z" }, + { url = "https://files.pythonhosted.org/packages/1a/93/0acedfd50ad9cdd3879c615a6dc8c5f1ce78d2fdf8b87727468bb5bb4077/rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:28fd300326dd21198f311534bdb6d7e989dd09b3418b3a91d54a0f384c700967", size = 566683, upload-time = "2025-11-16T14:49:18.342Z" }, + { url = "https://files.pythonhosted.org/packages/62/53/8c64e0f340a9e801459fc6456821abc15b3582cb5dc3932d48705a9d9ac7/rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2aba991e041d031c7939e1358f583ae405a7bf04804ca806b97a5c0e0af1ea5e", size = 592730, upload-time = "2025-11-16T14:49:19.767Z" }, + { url = "https://files.pythonhosted.org/packages/85/ef/3109b6584f8c4b0d2490747c916df833c127ecfa82be04d9a40a376f2090/rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:7f437026dbbc3f08c99cc41a5b2570c6e1a1ddbe48ab19a9b814254128d4ea7a", size = 557361, upload-time = "2025-11-16T14:49:21.574Z" }, + { url = "https://files.pythonhosted.org/packages/ff/3b/61586475e82d57f01da2c16edb9115a618afe00ce86fe1b58936880b15af/rpds_py-0.29.0-cp313-cp313t-win32.whl", hash = "sha256:6e97846e9800a5d0fe7be4d008f0c93d0feeb2700da7b1f7528dabafb31dfadb", size = 211227, upload-time = "2025-11-16T14:49:23.03Z" }, + { url = "https://files.pythonhosted.org/packages/3b/3a/12dc43f13594a54ea0c9d7e9d43002116557330e3ad45bc56097ddf266e2/rpds_py-0.29.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f49196aec7c4b406495f60e6f947ad71f317a765f956d74bbd83996b9edc0352", size = 225248, upload-time = "2025-11-16T14:49:24.841Z" }, + { url = "https://files.pythonhosted.org/packages/89/b1/0b1474e7899371d9540d3bbb2a499a3427ae1fc39c998563fe9035a1073b/rpds_py-0.29.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:394d27e4453d3b4d82bb85665dc1fcf4b0badc30fc84282defed71643b50e1a1", size = 363731, upload-time = "2025-11-16T14:49:26.683Z" }, + { url = "https://files.pythonhosted.org/packages/28/12/3b7cf2068d0a334ed1d7b385a9c3c8509f4c2bcba3d4648ea71369de0881/rpds_py-0.29.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55d827b2ae95425d3be9bc9a5838b6c29d664924f98146557f7715e331d06df8", size = 354343, upload-time = "2025-11-16T14:49:28.24Z" }, + { url = "https://files.pythonhosted.org/packages/eb/73/5afcf8924bc02a749416eda64e17ac9c9b28f825f4737385295a0e99b0c1/rpds_py-0.29.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc31a07ed352e5462d3ee1b22e89285f4ce97d5266f6d1169da1142e78045626", size = 385406, upload-time = "2025-11-16T14:49:29.943Z" }, + { url = "https://files.pythonhosted.org/packages/c8/37/5db736730662508535221737a21563591b6f43c77f2e388951c42f143242/rpds_py-0.29.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c4695dd224212f6105db7ea62197144230b808d6b2bba52238906a2762f1d1e7", size = 396162, upload-time = "2025-11-16T14:49:31.833Z" }, + { url = "https://files.pythonhosted.org/packages/70/0d/491c1017d14f62ce7bac07c32768d209a50ec567d76d9f383b4cfad19b80/rpds_py-0.29.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcae1770b401167f8b9e1e3f566562e6966ffa9ce63639916248a9e25fa8a244", size = 517719, upload-time = "2025-11-16T14:49:33.804Z" }, + { url = "https://files.pythonhosted.org/packages/d7/25/b11132afcb17cd5d82db173f0c8dab270ffdfaba43e5ce7a591837ae9649/rpds_py-0.29.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:90f30d15f45048448b8da21c41703b31c61119c06c216a1bf8c245812a0f0c17", size = 409498, upload-time = "2025-11-16T14:49:35.222Z" }, + { url = "https://files.pythonhosted.org/packages/0f/7d/e6543cedfb2e6403a1845710a5ab0e0ccf8fc288e0b5af9a70bfe2c12053/rpds_py-0.29.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44a91e0ab77bdc0004b43261a4b8cd6d6b451e8d443754cfda830002b5745b32", size = 382743, upload-time = "2025-11-16T14:49:36.704Z" }, + { url = "https://files.pythonhosted.org/packages/75/11/a4ebc9f654293ae9fefb83b2b6be7f3253e85ea42a5db2f77d50ad19aaeb/rpds_py-0.29.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:4aa195e5804d32c682e453b34474f411ca108e4291c6a0f824ebdc30a91c973c", size = 400317, upload-time = "2025-11-16T14:49:39.132Z" }, + { url = "https://files.pythonhosted.org/packages/52/18/97677a60a81c7f0e5f64e51fb3f8271c5c8fcabf3a2df18e97af53d7c2bf/rpds_py-0.29.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7971bdb7bf4ee0f7e6f67fa4c7fbc6019d9850cc977d126904392d363f6f8318", size = 416979, upload-time = "2025-11-16T14:49:40.575Z" }, + { url = "https://files.pythonhosted.org/packages/f0/69/28ab391a9968f6c746b2a2db181eaa4d16afaa859fedc9c2f682d19f7e18/rpds_py-0.29.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8ae33ad9ce580c7a47452c3b3f7d8a9095ef6208e0a0c7e4e2384f9fc5bf8212", size = 567288, upload-time = "2025-11-16T14:49:42.24Z" }, + { url = "https://files.pythonhosted.org/packages/3b/d3/0c7afdcdb830eee94f5611b64e71354ffe6ac8df82d00c2faf2bfffd1d4e/rpds_py-0.29.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c661132ab2fb4eeede2ef69670fd60da5235209874d001a98f1542f31f2a8a94", size = 593157, upload-time = "2025-11-16T14:49:43.782Z" }, + { url = "https://files.pythonhosted.org/packages/e2/ac/a0fcbc2feed4241cf26d32268c195eb88ddd4bd862adfc9d4b25edfba535/rpds_py-0.29.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bb78b3a0d31ac1bde132c67015a809948db751cb4e92cdb3f0b242e430b6ed0d", size = 554741, upload-time = "2025-11-16T14:49:45.557Z" }, + { url = "https://files.pythonhosted.org/packages/0f/f1/fcc24137c470df8588674a677f33719d5800ec053aaacd1de8a5d5d84d9e/rpds_py-0.29.0-cp314-cp314-win32.whl", hash = "sha256:f475f103488312e9bd4000bc890a95955a07b2d0b6e8884aef4be56132adbbf1", size = 215508, upload-time = "2025-11-16T14:49:47.562Z" }, + { url = "https://files.pythonhosted.org/packages/7b/c7/1d169b2045512eac019918fc1021ea07c30e84a4343f9f344e3e0aa8c788/rpds_py-0.29.0-cp314-cp314-win_amd64.whl", hash = "sha256:b9cf2359a4fca87cfb6801fae83a76aedf66ee1254a7a151f1341632acf67f1b", size = 228125, upload-time = "2025-11-16T14:49:49.064Z" }, + { url = "https://files.pythonhosted.org/packages/be/36/0cec88aaba70ec4a6e381c444b0d916738497d27f0c30406e3d9fcbd3bc2/rpds_py-0.29.0-cp314-cp314-win_arm64.whl", hash = "sha256:9ba8028597e824854f0f1733d8b964e914ae3003b22a10c2c664cb6927e0feb9", size = 221992, upload-time = "2025-11-16T14:49:50.777Z" }, + { url = "https://files.pythonhosted.org/packages/b1/fa/a2e524631717c9c0eb5d90d30f648cfba6b731047821c994acacb618406c/rpds_py-0.29.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:e71136fd0612556b35c575dc2726ae04a1669e6a6c378f2240312cf5d1a2ab10", size = 366425, upload-time = "2025-11-16T14:49:52.691Z" }, + { url = "https://files.pythonhosted.org/packages/a2/a4/6d43ebe0746ff694a30233f63f454aed1677bd50ab7a59ff6b2bb5ac61f2/rpds_py-0.29.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:76fe96632d53f3bf0ea31ede2f53bbe3540cc2736d4aec3b3801b0458499ef3a", size = 355282, upload-time = "2025-11-16T14:49:54.292Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a7/52fd8270e0320b09eaf295766ae81dd175f65394687906709b3e75c71d06/rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9459a33f077130dbb2c7c3cea72ee9932271fb3126404ba2a2661e4fe9eb7b79", size = 384968, upload-time = "2025-11-16T14:49:55.857Z" }, + { url = "https://files.pythonhosted.org/packages/f4/7d/e6bc526b7a14e1ef80579a52c1d4ad39260a058a51d66c6039035d14db9d/rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5c9546cfdd5d45e562cc0444b6dddc191e625c62e866bf567a2c69487c7ad28a", size = 394714, upload-time = "2025-11-16T14:49:57.343Z" }, + { url = "https://files.pythonhosted.org/packages/c0/3f/f0ade3954e7db95c791e7eaf978aa7e08a756d2046e8bdd04d08146ed188/rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12597d11d97b8f7e376c88929a6e17acb980e234547c92992f9f7c058f1a7310", size = 520136, upload-time = "2025-11-16T14:49:59.162Z" }, + { url = "https://files.pythonhosted.org/packages/87/b3/07122ead1b97009715ab9d4082be6d9bd9546099b2b03fae37c3116f72be/rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28de03cf48b8a9e6ec10318f2197b83946ed91e2891f651a109611be4106ac4b", size = 409250, upload-time = "2025-11-16T14:50:00.698Z" }, + { url = "https://files.pythonhosted.org/packages/c9/c6/dcbee61fd1dc892aedcb1b489ba661313101aa82ec84b1a015d4c63ebfda/rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd7951c964069039acc9d67a8ff1f0a7f34845ae180ca542b17dc1456b1f1808", size = 384940, upload-time = "2025-11-16T14:50:02.312Z" }, + { url = "https://files.pythonhosted.org/packages/47/11/914ecb6f3574cf9bf8b38aced4063e0f787d6e1eb30b181a7efbc6c1da9a/rpds_py-0.29.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:c07d107b7316088f1ac0177a7661ca0c6670d443f6fe72e836069025e6266761", size = 399392, upload-time = "2025-11-16T14:50:03.829Z" }, + { url = "https://files.pythonhosted.org/packages/f5/fd/2f4bd9433f58f816434bb934313584caa47dbc6f03ce5484df8ac8980561/rpds_py-0.29.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1de2345af363d25696969befc0c1688a6cb5e8b1d32b515ef84fc245c6cddba3", size = 416796, upload-time = "2025-11-16T14:50:05.558Z" }, + { url = "https://files.pythonhosted.org/packages/79/a5/449f0281af33efa29d5c71014399d74842342ae908d8cd38260320167692/rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:00e56b12d2199ca96068057e1ae7f9998ab6e99cda82431afafd32f3ec98cca9", size = 566843, upload-time = "2025-11-16T14:50:07.243Z" }, + { url = "https://files.pythonhosted.org/packages/ab/32/0a6a1ccee2e37fcb1b7ba9afde762b77182dbb57937352a729c6cd3cf2bb/rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3919a3bbecee589300ed25000b6944174e07cd20db70552159207b3f4bbb45b8", size = 593956, upload-time = "2025-11-16T14:50:09.029Z" }, + { url = "https://files.pythonhosted.org/packages/4a/3d/eb820f95dce4306f07a495ede02fb61bef36ea201d9137d4fcd5ab94ec1e/rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e7fa2ccc312bbd91e43aa5e0869e46bc03278a3dddb8d58833150a18b0f0283a", size = 557288, upload-time = "2025-11-16T14:50:10.73Z" }, + { url = "https://files.pythonhosted.org/packages/e9/f8/b8ff786f40470462a252918e0836e0db903c28e88e3eec66bc4a7856ee5d/rpds_py-0.29.0-cp314-cp314t-win32.whl", hash = "sha256:97c817863ffc397f1e6a6e9d2d89fe5408c0a9922dac0329672fb0f35c867ea5", size = 211382, upload-time = "2025-11-16T14:50:12.827Z" }, + { url = "https://files.pythonhosted.org/packages/c9/7f/1a65ae870bc9d0576aebb0c501ea5dccf1ae2178fe2821042150ebd2e707/rpds_py-0.29.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2023473f444752f0f82a58dfcbee040d0a1b3d1b3c2ec40e884bd25db6d117d2", size = 225919, upload-time = "2025-11-16T14:50:14.734Z" }, ] [[package]] @@ -3569,42 +3651,42 @@ wheels = [ [[package]] name = "ruff" -version = "0.14.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/9e/58/6ca66896635352812de66f71cdf9ff86b3a4f79071ca5730088c0cd0fc8d/ruff-0.14.1.tar.gz", hash = "sha256:1dd86253060c4772867c61791588627320abcb6ed1577a90ef432ee319729b69", size = 5513429, upload-time = "2025-10-16T18:05:41.766Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/8d/39/9cc5ab181478d7a18adc1c1e051a84ee02bec94eb9bdfd35643d7c74ca31/ruff-0.14.1-py3-none-linux_armv6l.whl", hash = "sha256:083bfc1f30f4a391ae09c6f4f99d83074416b471775b59288956f5bc18e82f8b", size = 12445415, upload-time = "2025-10-16T18:04:48.227Z" }, - { url = "https://files.pythonhosted.org/packages/ef/2e/1226961855ccd697255988f5a2474890ac7c5863b080b15bd038df820818/ruff-0.14.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:f6fa757cd717f791009f7669fefb09121cc5f7d9bd0ef211371fad68c2b8b224", size = 12784267, upload-time = "2025-10-16T18:04:52.515Z" }, - { url = "https://files.pythonhosted.org/packages/c1/ea/fd9e95863124ed159cd0667ec98449ae461de94acda7101f1acb6066da00/ruff-0.14.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d6191903d39ac156921398e9c86b7354d15e3c93772e7dbf26c9fcae59ceccd5", size = 11781872, upload-time = "2025-10-16T18:04:55.396Z" }, - { url = "https://files.pythonhosted.org/packages/1e/5a/e890f7338ff537dba4589a5e02c51baa63020acfb7c8cbbaea4831562c96/ruff-0.14.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed04f0e04f7a4587244e5c9d7df50e6b5bf2705d75059f409a6421c593a35896", size = 12226558, upload-time = "2025-10-16T18:04:58.166Z" }, - { url = "https://files.pythonhosted.org/packages/a6/7a/8ab5c3377f5bf31e167b73651841217542bcc7aa1c19e83030835cc25204/ruff-0.14.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5c9e6cf6cd4acae0febbce29497accd3632fe2025c0c583c8b87e8dbdeae5f61", size = 12187898, upload-time = "2025-10-16T18:05:01.455Z" }, - { url = "https://files.pythonhosted.org/packages/48/8d/ba7c33aa55406955fc124e62c8259791c3d42e3075a71710fdff9375134f/ruff-0.14.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6fa2458527794ecdfbe45f654e42c61f2503a230545a91af839653a0a93dbc6", size = 12939168, upload-time = "2025-10-16T18:05:04.397Z" }, - { url = "https://files.pythonhosted.org/packages/b4/c2/70783f612b50f66d083380e68cbd1696739d88e9b4f6164230375532c637/ruff-0.14.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:39f1c392244e338b21d42ab29b8a6392a722c5090032eb49bb4d6defcdb34345", size = 14386942, upload-time = "2025-10-16T18:05:07.102Z" }, - { url = "https://files.pythonhosted.org/packages/48/44/cd7abb9c776b66d332119d67f96acf15830d120f5b884598a36d9d3f4d83/ruff-0.14.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7382fa12a26cce1f95070ce450946bec357727aaa428983036362579eadcc5cf", size = 13990622, upload-time = "2025-10-16T18:05:09.882Z" }, - { url = "https://files.pythonhosted.org/packages/eb/56/4259b696db12ac152fe472764b4f78bbdd9b477afd9bc3a6d53c01300b37/ruff-0.14.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd0bf2be3ae8521e1093a487c4aa3b455882f139787770698530d28ed3fbb37c", size = 13431143, upload-time = "2025-10-16T18:05:13.46Z" }, - { url = "https://files.pythonhosted.org/packages/e0/35/266a80d0eb97bd224b3265b9437bd89dde0dcf4faf299db1212e81824e7e/ruff-0.14.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cabcaa9ccf8089fb4fdb78d17cc0e28241520f50f4c2e88cb6261ed083d85151", size = 13132844, upload-time = "2025-10-16T18:05:16.1Z" }, - { url = "https://files.pythonhosted.org/packages/65/6e/d31ce218acc11a8d91ef208e002a31acf315061a85132f94f3df7a252b18/ruff-0.14.1-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:747d583400f6125ec11a4c14d1c8474bf75d8b419ad22a111a537ec1a952d192", size = 13401241, upload-time = "2025-10-16T18:05:19.395Z" }, - { url = "https://files.pythonhosted.org/packages/9f/b5/dbc4221bf0b03774b3b2f0d47f39e848d30664157c15b965a14d890637d2/ruff-0.14.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5a6e74c0efd78515a1d13acbfe6c90f0f5bd822aa56b4a6d43a9ffb2ae6e56cd", size = 12132476, upload-time = "2025-10-16T18:05:22.163Z" }, - { url = "https://files.pythonhosted.org/packages/98/4b/ac99194e790ccd092d6a8b5f341f34b6e597d698e3077c032c502d75ea84/ruff-0.14.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:0ea6a864d2fb41a4b6d5b456ed164302a0d96f4daac630aeba829abfb059d020", size = 12139749, upload-time = "2025-10-16T18:05:25.162Z" }, - { url = "https://files.pythonhosted.org/packages/47/26/7df917462c3bb5004e6fdfcc505a49e90bcd8a34c54a051953118c00b53a/ruff-0.14.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:0826b8764f94229604fa255918d1cc45e583e38c21c203248b0bfc9a0e930be5", size = 12544758, upload-time = "2025-10-16T18:05:28.018Z" }, - { url = "https://files.pythonhosted.org/packages/64/d0/81e7f0648e9764ad9b51dd4be5e5dac3fcfff9602428ccbae288a39c2c22/ruff-0.14.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:cbc52160465913a1a3f424c81c62ac8096b6a491468e7d872cb9444a860bc33d", size = 13221811, upload-time = "2025-10-16T18:05:30.707Z" }, - { url = "https://files.pythonhosted.org/packages/c3/07/3c45562c67933cc35f6d5df4ca77dabbcd88fddaca0d6b8371693d29fd56/ruff-0.14.1-py3-none-win32.whl", hash = "sha256:e037ea374aaaff4103240ae79168c0945ae3d5ae8db190603de3b4012bd1def6", size = 12319467, upload-time = "2025-10-16T18:05:33.261Z" }, - { url = "https://files.pythonhosted.org/packages/02/88/0ee4ca507d4aa05f67e292d2e5eb0b3e358fbcfe527554a2eda9ac422d6b/ruff-0.14.1-py3-none-win_amd64.whl", hash = "sha256:59d599cdff9c7f925a017f6f2c256c908b094e55967f93f2821b1439928746a1", size = 13401123, upload-time = "2025-10-16T18:05:35.984Z" }, - { url = "https://files.pythonhosted.org/packages/b8/81/4b6387be7014858d924b843530e1b2a8e531846807516e9bea2ee0936bf7/ruff-0.14.1-py3-none-win_arm64.whl", hash = "sha256:e3b443c4c9f16ae850906b8d0a707b2a4c16f8d2f0a7fe65c475c5886665ce44", size = 12436636, upload-time = "2025-10-16T18:05:38.995Z" }, +version = "0.14.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/82/fa/fbb67a5780ae0f704876cb8ac92d6d76da41da4dc72b7ed3565ab18f2f52/ruff-0.14.5.tar.gz", hash = "sha256:8d3b48d7d8aad423d3137af7ab6c8b1e38e4de104800f0d596990f6ada1a9fc1", size = 5615944, upload-time = "2025-11-13T19:58:51.155Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/31/c07e9c535248d10836a94e4f4e8c5a31a1beed6f169b31405b227872d4f4/ruff-0.14.5-py3-none-linux_armv6l.whl", hash = "sha256:f3b8248123b586de44a8018bcc9fefe31d23dda57a34e6f0e1e53bd51fd63594", size = 13171630, upload-time = "2025-11-13T19:57:54.894Z" }, + { url = "https://files.pythonhosted.org/packages/8e/5c/283c62516dca697cd604c2796d1487396b7a436b2f0ecc3fd412aca470e0/ruff-0.14.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:f7a75236570318c7a30edd7f5491945f0169de738d945ca8784500b517163a72", size = 13413925, upload-time = "2025-11-13T19:57:59.181Z" }, + { url = "https://files.pythonhosted.org/packages/b6/f3/aa319f4afc22cb6fcba2b9cdfc0f03bbf747e59ab7a8c5e90173857a1361/ruff-0.14.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:6d146132d1ee115f8802356a2dc9a634dbf58184c51bff21f313e8cd1c74899a", size = 12574040, upload-time = "2025-11-13T19:58:02.056Z" }, + { url = "https://files.pythonhosted.org/packages/f9/7f/cb5845fcc7c7e88ed57f58670189fc2ff517fe2134c3821e77e29fd3b0c8/ruff-0.14.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2380596653dcd20b057794d55681571a257a42327da8894b93bbd6111aa801f", size = 13009755, upload-time = "2025-11-13T19:58:05.172Z" }, + { url = "https://files.pythonhosted.org/packages/21/d2/bcbedbb6bcb9253085981730687ddc0cc7b2e18e8dc13cf4453de905d7a0/ruff-0.14.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2d1fa985a42b1f075a098fa1ab9d472b712bdb17ad87a8ec86e45e7fa6273e68", size = 12937641, upload-time = "2025-11-13T19:58:08.345Z" }, + { url = "https://files.pythonhosted.org/packages/a4/58/e25de28a572bdd60ffc6bb71fc7fd25a94ec6a076942e372437649cbb02a/ruff-0.14.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88f0770d42b7fa02bbefddde15d235ca3aa24e2f0137388cc15b2dcbb1f7c7a7", size = 13610854, upload-time = "2025-11-13T19:58:11.419Z" }, + { url = "https://files.pythonhosted.org/packages/7d/24/43bb3fd23ecee9861970978ea1a7a63e12a204d319248a7e8af539984280/ruff-0.14.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:3676cb02b9061fee7294661071c4709fa21419ea9176087cb77e64410926eb78", size = 15061088, upload-time = "2025-11-13T19:58:14.551Z" }, + { url = "https://files.pythonhosted.org/packages/23/44/a022f288d61c2f8c8645b24c364b719aee293ffc7d633a2ca4d116b9c716/ruff-0.14.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b595bedf6bc9cab647c4a173a61acf4f1ac5f2b545203ba82f30fcb10b0318fb", size = 14734717, upload-time = "2025-11-13T19:58:17.518Z" }, + { url = "https://files.pythonhosted.org/packages/58/81/5c6ba44de7e44c91f68073e0658109d8373b0590940efe5bd7753a2585a3/ruff-0.14.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f55382725ad0bdb2e8ee2babcbbfb16f124f5a59496a2f6a46f1d9d99d93e6e2", size = 14028812, upload-time = "2025-11-13T19:58:20.533Z" }, + { url = "https://files.pythonhosted.org/packages/ad/ef/41a8b60f8462cb320f68615b00299ebb12660097c952c600c762078420f8/ruff-0.14.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7497d19dce23976bdaca24345ae131a1d38dcfe1b0850ad8e9e6e4fa321a6e19", size = 13825656, upload-time = "2025-11-13T19:58:23.345Z" }, + { url = "https://files.pythonhosted.org/packages/7c/00/207e5de737fdb59b39eb1fac806904fe05681981b46d6a6db9468501062e/ruff-0.14.5-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:410e781f1122d6be4f446981dd479470af86537fb0b8857f27a6e872f65a38e4", size = 13959922, upload-time = "2025-11-13T19:58:26.537Z" }, + { url = "https://files.pythonhosted.org/packages/bc/7e/fa1f5c2776db4be405040293618846a2dece5c70b050874c2d1f10f24776/ruff-0.14.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c01be527ef4c91a6d55e53b337bfe2c0f82af024cc1a33c44792d6844e2331e1", size = 12932501, upload-time = "2025-11-13T19:58:29.822Z" }, + { url = "https://files.pythonhosted.org/packages/67/d8/d86bf784d693a764b59479a6bbdc9515ae42c340a5dc5ab1dabef847bfaa/ruff-0.14.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f66e9bb762e68d66e48550b59c74314168ebb46199886c5c5aa0b0fbcc81b151", size = 12927319, upload-time = "2025-11-13T19:58:32.923Z" }, + { url = "https://files.pythonhosted.org/packages/ac/de/ee0b304d450ae007ce0cb3e455fe24fbcaaedae4ebaad6c23831c6663651/ruff-0.14.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d93be8f1fa01022337f1f8f3bcaa7ffee2d0b03f00922c45c2207954f351f465", size = 13206209, upload-time = "2025-11-13T19:58:35.952Z" }, + { url = "https://files.pythonhosted.org/packages/33/aa/193ca7e3a92d74f17d9d5771a765965d2cf42c86e6f0fd95b13969115723/ruff-0.14.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:c135d4b681f7401fe0e7312017e41aba9b3160861105726b76cfa14bc25aa367", size = 13953709, upload-time = "2025-11-13T19:58:39.002Z" }, + { url = "https://files.pythonhosted.org/packages/cc/f1/7119e42aa1d3bf036ffc9478885c2e248812b7de9abea4eae89163d2929d/ruff-0.14.5-py3-none-win32.whl", hash = "sha256:c83642e6fccfb6dea8b785eb9f456800dcd6a63f362238af5fc0c83d027dd08b", size = 12925808, upload-time = "2025-11-13T19:58:42.779Z" }, + { url = "https://files.pythonhosted.org/packages/3b/9d/7c0a255d21e0912114784e4a96bf62af0618e2190cae468cd82b13625ad2/ruff-0.14.5-py3-none-win_amd64.whl", hash = "sha256:9d55d7af7166f143c94eae1db3312f9ea8f95a4defef1979ed516dbb38c27621", size = 14331546, upload-time = "2025-11-13T19:58:45.691Z" }, + { url = "https://files.pythonhosted.org/packages/e5/80/69756670caedcf3b9be597a6e12276a6cf6197076eb62aad0c608f8efce0/ruff-0.14.5-py3-none-win_arm64.whl", hash = "sha256:4b700459d4649e2594b31f20a9de33bc7c19976d4746d8d0798ad959621d64a4", size = 13433331, upload-time = "2025-11-13T19:58:48.434Z" }, ] [[package]] name = "s3fs" -version = "2025.9.0" +version = "2025.10.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiobotocore" }, { name = "aiohttp" }, { name = "fsspec" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ee/f3/8e6371436666aedfd16e63ff68a51b8a8fcf5f33a0eee33c35e0b2476b27/s3fs-2025.9.0.tar.gz", hash = "sha256:6d44257ef19ea64968d0720744c4af7a063a05f5c1be0e17ce943bef7302bc30", size = 77823, upload-time = "2025-09-02T19:18:21.781Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/ee/7cf7de3b17ef6db10b027cc9f8a1108ceb6333e267943e666a35882b1474/s3fs-2025.10.0.tar.gz", hash = "sha256:e8be6cddc77aceea1681ece0f472c3a7f8ef71a0d2acddb1cc92bb6afa3e9e4f", size = 80383, upload-time = "2025-10-30T15:06:04.647Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/37/b3/ca7d58ca25b1bb6df57e6cbd0ca8d6437a4b9ce1cd35adc8a6b2949c113b/s3fs-2025.9.0-py3-none-any.whl", hash = "sha256:c33c93d48f66ed440dbaf6600be149cdf8beae4b6f8f0201a209c5801aeb7e30", size = 30319, upload-time = "2025-09-02T19:18:20.563Z" }, + { url = "https://files.pythonhosted.org/packages/2d/fc/56cba14af8ad8fd020c85b6e44328520ac55939bb1f9d01444ad470504cb/s3fs-2025.10.0-py3-none-any.whl", hash = "sha256:da7ef25efc1541f5fca8e1116361e49ea1081f83f4e8001fbd77347c625da28a", size = 30357, upload-time = "2025-10-30T15:06:03.48Z" }, ] [[package]] @@ -3655,75 +3737,75 @@ wheels = [ [[package]] name = "scipy" -version = "1.16.2" +version = "1.16.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "numpy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/4c/3b/546a6f0bfe791bbb7f8d591613454d15097e53f906308ec6f7c1ce588e8e/scipy-1.16.2.tar.gz", hash = "sha256:af029b153d243a80afb6eabe40b0a07f8e35c9adc269c019f364ad747f826a6b", size = 30580599, upload-time = "2025-09-11T17:48:08.271Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b7/8d/6396e00db1282279a4ddd507c5f5e11f606812b608ee58517ce8abbf883f/scipy-1.16.2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:89d6c100fa5c48472047632e06f0876b3c4931aac1f4291afc81a3644316bb0d", size = 36646259, upload-time = "2025-09-11T17:40:39.329Z" }, - { url = "https://files.pythonhosted.org/packages/3b/93/ea9edd7e193fceb8eef149804491890bde73fb169c896b61aa3e2d1e4e77/scipy-1.16.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ca748936cd579d3f01928b30a17dc474550b01272d8046e3e1ee593f23620371", size = 28888976, upload-time = "2025-09-11T17:40:46.82Z" }, - { url = "https://files.pythonhosted.org/packages/91/4d/281fddc3d80fd738ba86fd3aed9202331180b01e2c78eaae0642f22f7e83/scipy-1.16.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:fac4f8ce2ddb40e2e3d0f7ec36d2a1e7f92559a2471e59aec37bd8d9de01fec0", size = 20879905, upload-time = "2025-09-11T17:40:52.545Z" }, - { url = "https://files.pythonhosted.org/packages/69/40/b33b74c84606fd301b2915f0062e45733c6ff5708d121dd0deaa8871e2d0/scipy-1.16.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:033570f1dcefd79547a88e18bccacff025c8c647a330381064f561d43b821232", size = 23553066, upload-time = "2025-09-11T17:40:59.014Z" }, - { url = "https://files.pythonhosted.org/packages/55/a7/22c739e2f21a42cc8f16bc76b47cff4ed54fbe0962832c589591c2abec34/scipy-1.16.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ea3421209bf00c8a5ef2227de496601087d8f638a2363ee09af059bd70976dc1", size = 33336407, upload-time = "2025-09-11T17:41:06.796Z" }, - { url = "https://files.pythonhosted.org/packages/53/11/a0160990b82999b45874dc60c0c183d3a3a969a563fffc476d5a9995c407/scipy-1.16.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f66bd07ba6f84cd4a380b41d1bf3c59ea488b590a2ff96744845163309ee8e2f", size = 35673281, upload-time = "2025-09-11T17:41:15.055Z" }, - { url = "https://files.pythonhosted.org/packages/96/53/7ef48a4cfcf243c3d0f1643f5887c81f29fdf76911c4e49331828e19fc0a/scipy-1.16.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5e9feab931bd2aea4a23388c962df6468af3d808ddf2d40f94a81c5dc38f32ef", size = 36004222, upload-time = "2025-09-11T17:41:23.868Z" }, - { url = "https://files.pythonhosted.org/packages/49/7f/71a69e0afd460049d41c65c630c919c537815277dfea214031005f474d78/scipy-1.16.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03dfc75e52f72cf23ec2ced468645321407faad8f0fe7b1f5b49264adbc29cb1", size = 38664586, upload-time = "2025-09-11T17:41:31.021Z" }, - { url = "https://files.pythonhosted.org/packages/34/95/20e02ca66fb495a95fba0642fd48e0c390d0ece9b9b14c6e931a60a12dea/scipy-1.16.2-cp312-cp312-win_amd64.whl", hash = "sha256:0ce54e07bbb394b417457409a64fd015be623f36e330ac49306433ffe04bc97e", size = 38550641, upload-time = "2025-09-11T17:41:36.61Z" }, - { url = "https://files.pythonhosted.org/packages/92/ad/13646b9beb0a95528ca46d52b7babafbe115017814a611f2065ee4e61d20/scipy-1.16.2-cp312-cp312-win_arm64.whl", hash = "sha256:2a8ffaa4ac0df81a0b94577b18ee079f13fecdb924df3328fc44a7dc5ac46851", size = 25456070, upload-time = "2025-09-11T17:41:41.3Z" }, - { url = "https://files.pythonhosted.org/packages/c1/27/c5b52f1ee81727a9fc457f5ac1e9bf3d6eab311805ea615c83c27ba06400/scipy-1.16.2-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:84f7bf944b43e20b8a894f5fe593976926744f6c185bacfcbdfbb62736b5cc70", size = 36604856, upload-time = "2025-09-11T17:41:47.695Z" }, - { url = "https://files.pythonhosted.org/packages/32/a9/15c20d08e950b540184caa8ced675ba1128accb0e09c653780ba023a4110/scipy-1.16.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:5c39026d12edc826a1ef2ad35ad1e6d7f087f934bb868fc43fa3049c8b8508f9", size = 28864626, upload-time = "2025-09-11T17:41:52.642Z" }, - { url = "https://files.pythonhosted.org/packages/4c/fc/ea36098df653cca26062a627c1a94b0de659e97127c8491e18713ca0e3b9/scipy-1.16.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e52729ffd45b68777c5319560014d6fd251294200625d9d70fd8626516fc49f5", size = 20855689, upload-time = "2025-09-11T17:41:57.886Z" }, - { url = "https://files.pythonhosted.org/packages/dc/6f/d0b53be55727f3e6d7c72687ec18ea6d0047cf95f1f77488b99a2bafaee1/scipy-1.16.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:024dd4a118cccec09ca3209b7e8e614931a6ffb804b2a601839499cb88bdf925", size = 23512151, upload-time = "2025-09-11T17:42:02.303Z" }, - { url = "https://files.pythonhosted.org/packages/11/85/bf7dab56e5c4b1d3d8eef92ca8ede788418ad38a7dc3ff50262f00808760/scipy-1.16.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7a5dc7ee9c33019973a470556081b0fd3c9f4c44019191039f9769183141a4d9", size = 33329824, upload-time = "2025-09-11T17:42:07.549Z" }, - { url = "https://files.pythonhosted.org/packages/da/6a/1a927b14ddc7714111ea51f4e568203b2bb6ed59bdd036d62127c1a360c8/scipy-1.16.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c2275ff105e508942f99d4e3bc56b6ef5e4b3c0af970386ca56b777608ce95b7", size = 35681881, upload-time = "2025-09-11T17:42:13.255Z" }, - { url = "https://files.pythonhosted.org/packages/c1/5f/331148ea5780b4fcc7007a4a6a6ee0a0c1507a796365cc642d4d226e1c3a/scipy-1.16.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:af80196eaa84f033e48444d2e0786ec47d328ba00c71e4299b602235ffef9acb", size = 36006219, upload-time = "2025-09-11T17:42:18.765Z" }, - { url = "https://files.pythonhosted.org/packages/46/3a/e991aa9d2aec723b4a8dcfbfc8365edec5d5e5f9f133888067f1cbb7dfc1/scipy-1.16.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9fb1eb735fe3d6ed1f89918224e3385fbf6f9e23757cacc35f9c78d3b712dd6e", size = 38682147, upload-time = "2025-09-11T17:42:25.177Z" }, - { url = "https://files.pythonhosted.org/packages/a1/57/0f38e396ad19e41b4c5db66130167eef8ee620a49bc7d0512e3bb67e0cab/scipy-1.16.2-cp313-cp313-win_amd64.whl", hash = "sha256:fda714cf45ba43c9d3bae8f2585c777f64e3f89a2e073b668b32ede412d8f52c", size = 38520766, upload-time = "2025-09-11T17:43:25.342Z" }, - { url = "https://files.pythonhosted.org/packages/1b/a5/85d3e867b6822d331e26c862a91375bb7746a0b458db5effa093d34cdb89/scipy-1.16.2-cp313-cp313-win_arm64.whl", hash = "sha256:2f5350da923ccfd0b00e07c3e5cfb316c1c0d6c1d864c07a72d092e9f20db104", size = 25451169, upload-time = "2025-09-11T17:43:30.198Z" }, - { url = "https://files.pythonhosted.org/packages/09/d9/60679189bcebda55992d1a45498de6d080dcaf21ce0c8f24f888117e0c2d/scipy-1.16.2-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:53d8d2ee29b925344c13bda64ab51785f016b1b9617849dac10897f0701b20c1", size = 37012682, upload-time = "2025-09-11T17:42:30.677Z" }, - { url = "https://files.pythonhosted.org/packages/83/be/a99d13ee4d3b7887a96f8c71361b9659ba4ef34da0338f14891e102a127f/scipy-1.16.2-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:9e05e33657efb4c6a9d23bd8300101536abd99c85cca82da0bffff8d8764d08a", size = 29389926, upload-time = "2025-09-11T17:42:35.845Z" }, - { url = "https://files.pythonhosted.org/packages/bf/0a/130164a4881cec6ca8c00faf3b57926f28ed429cd6001a673f83c7c2a579/scipy-1.16.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:7fe65b36036357003b3ef9d37547abeefaa353b237e989c21027b8ed62b12d4f", size = 21381152, upload-time = "2025-09-11T17:42:40.07Z" }, - { url = "https://files.pythonhosted.org/packages/47/a6/503ffb0310ae77fba874e10cddfc4a1280bdcca1d13c3751b8c3c2996cf8/scipy-1.16.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:6406d2ac6d40b861cccf57f49592f9779071655e9f75cd4f977fa0bdd09cb2e4", size = 23914410, upload-time = "2025-09-11T17:42:44.313Z" }, - { url = "https://files.pythonhosted.org/packages/fa/c7/1147774bcea50d00c02600aadaa919facbd8537997a62496270133536ed6/scipy-1.16.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ff4dc42bd321991fbf611c23fc35912d690f731c9914bf3af8f417e64aca0f21", size = 33481880, upload-time = "2025-09-11T17:42:49.325Z" }, - { url = "https://files.pythonhosted.org/packages/6a/74/99d5415e4c3e46b2586f30cdbecb95e101c7192628a484a40dd0d163811a/scipy-1.16.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:654324826654d4d9133e10675325708fb954bc84dae6e9ad0a52e75c6b1a01d7", size = 35791425, upload-time = "2025-09-11T17:42:54.711Z" }, - { url = "https://files.pythonhosted.org/packages/1b/ee/a6559de7c1cc710e938c0355d9d4fbcd732dac4d0d131959d1f3b63eb29c/scipy-1.16.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:63870a84cd15c44e65220eaed2dac0e8f8b26bbb991456a033c1d9abfe8a94f8", size = 36178622, upload-time = "2025-09-11T17:43:00.375Z" }, - { url = "https://files.pythonhosted.org/packages/4e/7b/f127a5795d5ba8ece4e0dce7d4a9fb7cb9e4f4757137757d7a69ab7d4f1a/scipy-1.16.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:fa01f0f6a3050fa6a9771a95d5faccc8e2f5a92b4a2e5440a0fa7264a2398472", size = 38783985, upload-time = "2025-09-11T17:43:06.661Z" }, - { url = "https://files.pythonhosted.org/packages/3e/9f/bc81c1d1e033951eb5912cd3750cc005943afa3e65a725d2443a3b3c4347/scipy-1.16.2-cp313-cp313t-win_amd64.whl", hash = "sha256:116296e89fba96f76353a8579820c2512f6e55835d3fad7780fece04367de351", size = 38631367, upload-time = "2025-09-11T17:43:14.44Z" }, - { url = "https://files.pythonhosted.org/packages/d6/5e/2cc7555fd81d01814271412a1d59a289d25f8b63208a0a16c21069d55d3e/scipy-1.16.2-cp313-cp313t-win_arm64.whl", hash = "sha256:98e22834650be81d42982360382b43b17f7ba95e0e6993e2a4f5b9ad9283a94d", size = 25787992, upload-time = "2025-09-11T17:43:19.745Z" }, - { url = "https://files.pythonhosted.org/packages/8b/ac/ad8951250516db71619f0bd3b2eb2448db04b720a003dd98619b78b692c0/scipy-1.16.2-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:567e77755019bb7461513c87f02bb73fb65b11f049aaaa8ca17cfaa5a5c45d77", size = 36595109, upload-time = "2025-09-11T17:43:35.713Z" }, - { url = "https://files.pythonhosted.org/packages/ff/f6/5779049ed119c5b503b0f3dc6d6f3f68eefc3a9190d4ad4c276f854f051b/scipy-1.16.2-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:17d9bb346194e8967296621208fcdfd39b55498ef7d2f376884d5ac47cec1a70", size = 28859110, upload-time = "2025-09-11T17:43:40.814Z" }, - { url = "https://files.pythonhosted.org/packages/82/09/9986e410ae38bf0a0c737ff8189ac81a93b8e42349aac009891c054403d7/scipy-1.16.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:0a17541827a9b78b777d33b623a6dcfe2ef4a25806204d08ead0768f4e529a88", size = 20850110, upload-time = "2025-09-11T17:43:44.981Z" }, - { url = "https://files.pythonhosted.org/packages/0d/ad/485cdef2d9215e2a7df6d61b81d2ac073dfacf6ae24b9ae87274c4e936ae/scipy-1.16.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:d7d4c6ba016ffc0f9568d012f5f1eb77ddd99412aea121e6fa8b4c3b7cbad91f", size = 23497014, upload-time = "2025-09-11T17:43:49.074Z" }, - { url = "https://files.pythonhosted.org/packages/a7/74/f6a852e5d581122b8f0f831f1d1e32fb8987776ed3658e95c377d308ed86/scipy-1.16.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9702c4c023227785c779cba2e1d6f7635dbb5b2e0936cdd3a4ecb98d78fd41eb", size = 33401155, upload-time = "2025-09-11T17:43:54.661Z" }, - { url = "https://files.pythonhosted.org/packages/d9/f5/61d243bbc7c6e5e4e13dde9887e84a5cbe9e0f75fd09843044af1590844e/scipy-1.16.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d1cdf0ac28948d225decdefcc45ad7dd91716c29ab56ef32f8e0d50657dffcc7", size = 35691174, upload-time = "2025-09-11T17:44:00.101Z" }, - { url = "https://files.pythonhosted.org/packages/03/99/59933956331f8cc57e406cdb7a483906c74706b156998f322913e789c7e1/scipy-1.16.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:70327d6aa572a17c2941cdfb20673f82e536e91850a2e4cb0c5b858b690e1548", size = 36070752, upload-time = "2025-09-11T17:44:05.619Z" }, - { url = "https://files.pythonhosted.org/packages/c6/7d/00f825cfb47ee19ef74ecf01244b43e95eae74e7e0ff796026ea7cd98456/scipy-1.16.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5221c0b2a4b58aa7c4ed0387d360fd90ee9086d383bb34d9f2789fafddc8a936", size = 38701010, upload-time = "2025-09-11T17:44:11.322Z" }, - { url = "https://files.pythonhosted.org/packages/e4/9f/b62587029980378304ba5a8563d376c96f40b1e133daacee76efdcae32de/scipy-1.16.2-cp314-cp314-win_amd64.whl", hash = "sha256:f5a85d7b2b708025af08f060a496dd261055b617d776fc05a1a1cc69e09fe9ff", size = 39360061, upload-time = "2025-09-11T17:45:09.814Z" }, - { url = "https://files.pythonhosted.org/packages/82/04/7a2f1609921352c7fbee0815811b5050582f67f19983096c4769867ca45f/scipy-1.16.2-cp314-cp314-win_arm64.whl", hash = "sha256:2cc73a33305b4b24556957d5857d6253ce1e2dcd67fa0ff46d87d1670b3e1e1d", size = 26126914, upload-time = "2025-09-11T17:45:14.73Z" }, - { url = "https://files.pythonhosted.org/packages/51/b9/60929ce350c16b221928725d2d1d7f86cf96b8bc07415547057d1196dc92/scipy-1.16.2-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:9ea2a3fed83065d77367775d689401a703d0f697420719ee10c0780bcab594d8", size = 37013193, upload-time = "2025-09-11T17:44:16.757Z" }, - { url = "https://files.pythonhosted.org/packages/2a/41/ed80e67782d4bc5fc85a966bc356c601afddd175856ba7c7bb6d9490607e/scipy-1.16.2-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:7280d926f11ca945c3ef92ba960fa924e1465f8d07ce3a9923080363390624c4", size = 29390172, upload-time = "2025-09-11T17:44:21.783Z" }, - { url = "https://files.pythonhosted.org/packages/c4/a3/2f673ace4090452696ccded5f5f8efffb353b8f3628f823a110e0170b605/scipy-1.16.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:8afae1756f6a1fe04636407ef7dbece33d826a5d462b74f3d0eb82deabefd831", size = 21381326, upload-time = "2025-09-11T17:44:25.982Z" }, - { url = "https://files.pythonhosted.org/packages/42/bf/59df61c5d51395066c35836b78136accf506197617c8662e60ea209881e1/scipy-1.16.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:5c66511f29aa8d233388e7416a3f20d5cae7a2744d5cee2ecd38c081f4e861b3", size = 23915036, upload-time = "2025-09-11T17:44:30.527Z" }, - { url = "https://files.pythonhosted.org/packages/91/c3/edc7b300dc16847ad3672f1a6f3f7c5d13522b21b84b81c265f4f2760d4a/scipy-1.16.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:efe6305aeaa0e96b0ccca5ff647a43737d9a092064a3894e46c414db84bc54ac", size = 33484341, upload-time = "2025-09-11T17:44:35.981Z" }, - { url = "https://files.pythonhosted.org/packages/26/c7/24d1524e72f06ff141e8d04b833c20db3021020563272ccb1b83860082a9/scipy-1.16.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7f3a337d9ae06a1e8d655ee9d8ecb835ea5ddcdcbd8d23012afa055ab014f374", size = 35790840, upload-time = "2025-09-11T17:44:41.76Z" }, - { url = "https://files.pythonhosted.org/packages/aa/b7/5aaad984eeedd56858dc33d75efa59e8ce798d918e1033ef62d2708f2c3d/scipy-1.16.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bab3605795d269067d8ce78a910220262711b753de8913d3deeaedb5dded3bb6", size = 36174716, upload-time = "2025-09-11T17:44:47.316Z" }, - { url = "https://files.pythonhosted.org/packages/fd/c2/e276a237acb09824822b0ada11b028ed4067fdc367a946730979feacb870/scipy-1.16.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b0348d8ddb55be2a844c518cd8cc8deeeb8aeba707cf834db5758fc89b476a2c", size = 38790088, upload-time = "2025-09-11T17:44:53.011Z" }, - { url = "https://files.pythonhosted.org/packages/c6/b4/5c18a766e8353015439f3780f5fc473f36f9762edc1a2e45da3ff5a31b21/scipy-1.16.2-cp314-cp314t-win_amd64.whl", hash = "sha256:26284797e38b8a75e14ea6631d29bda11e76ceaa6ddb6fdebbfe4c4d90faf2f9", size = 39457455, upload-time = "2025-09-11T17:44:58.899Z" }, - { url = "https://files.pythonhosted.org/packages/97/30/2f9a5243008f76dfc5dee9a53dfb939d9b31e16ce4bd4f2e628bfc5d89d2/scipy-1.16.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d2a4472c231328d4de38d5f1f68fdd6d28a615138f842580a8a321b5845cf779", size = 26448374, upload-time = "2025-09-11T17:45:03.45Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/0a/ca/d8ace4f98322d01abcd52d381134344bf7b431eba7ed8b42bdea5a3c2ac9/scipy-1.16.3.tar.gz", hash = "sha256:01e87659402762f43bd2fee13370553a17ada367d42e7487800bf2916535aecb", size = 30597883, upload-time = "2025-10-28T17:38:54.068Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/41/5bf55c3f386b1643812f3a5674edf74b26184378ef0f3e7c7a09a7e2ca7f/scipy-1.16.3-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:81fc5827606858cf71446a5e98715ba0e11f0dbc83d71c7409d05486592a45d6", size = 36659043, upload-time = "2025-10-28T17:32:40.285Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0f/65582071948cfc45d43e9870bf7ca5f0e0684e165d7c9ef4e50d783073eb/scipy-1.16.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:c97176013d404c7346bf57874eaac5187d969293bf40497140b0a2b2b7482e07", size = 28898986, upload-time = "2025-10-28T17:32:45.325Z" }, + { url = "https://files.pythonhosted.org/packages/96/5e/36bf3f0ac298187d1ceadde9051177d6a4fe4d507e8f59067dc9dd39e650/scipy-1.16.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2b71d93c8a9936046866acebc915e2af2e292b883ed6e2cbe5c34beb094b82d9", size = 20889814, upload-time = "2025-10-28T17:32:49.277Z" }, + { url = "https://files.pythonhosted.org/packages/80/35/178d9d0c35394d5d5211bbff7ac4f2986c5488b59506fef9e1de13ea28d3/scipy-1.16.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3d4a07a8e785d80289dfe66b7c27d8634a773020742ec7187b85ccc4b0e7b686", size = 23565795, upload-time = "2025-10-28T17:32:53.337Z" }, + { url = "https://files.pythonhosted.org/packages/fa/46/d1146ff536d034d02f83c8afc3c4bab2eddb634624d6529a8512f3afc9da/scipy-1.16.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0553371015692a898e1aa858fed67a3576c34edefa6b7ebdb4e9dde49ce5c203", size = 33349476, upload-time = "2025-10-28T17:32:58.353Z" }, + { url = "https://files.pythonhosted.org/packages/79/2e/415119c9ab3e62249e18c2b082c07aff907a273741b3f8160414b0e9193c/scipy-1.16.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:72d1717fd3b5e6ec747327ce9bda32d5463f472c9dce9f54499e81fbd50245a1", size = 35676692, upload-time = "2025-10-28T17:33:03.88Z" }, + { url = "https://files.pythonhosted.org/packages/27/82/df26e44da78bf8d2aeaf7566082260cfa15955a5a6e96e6a29935b64132f/scipy-1.16.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1fb2472e72e24d1530debe6ae078db70fb1605350c88a3d14bc401d6306dbffe", size = 36019345, upload-time = "2025-10-28T17:33:09.773Z" }, + { url = "https://files.pythonhosted.org/packages/82/31/006cbb4b648ba379a95c87262c2855cd0d09453e500937f78b30f02fa1cd/scipy-1.16.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c5192722cffe15f9329a3948c4b1db789fbb1f05c97899187dcf009b283aea70", size = 38678975, upload-time = "2025-10-28T17:33:15.809Z" }, + { url = "https://files.pythonhosted.org/packages/c2/7f/acbd28c97e990b421af7d6d6cd416358c9c293fc958b8529e0bd5d2a2a19/scipy-1.16.3-cp312-cp312-win_amd64.whl", hash = "sha256:56edc65510d1331dae01ef9b658d428e33ed48b4f77b1d51caf479a0253f96dc", size = 38555926, upload-time = "2025-10-28T17:33:21.388Z" }, + { url = "https://files.pythonhosted.org/packages/ce/69/c5c7807fd007dad4f48e0a5f2153038dc96e8725d3345b9ee31b2b7bed46/scipy-1.16.3-cp312-cp312-win_arm64.whl", hash = "sha256:a8a26c78ef223d3e30920ef759e25625a0ecdd0d60e5a8818b7513c3e5384cf2", size = 25463014, upload-time = "2025-10-28T17:33:25.975Z" }, + { url = "https://files.pythonhosted.org/packages/72/f1/57e8327ab1508272029e27eeef34f2302ffc156b69e7e233e906c2a5c379/scipy-1.16.3-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:d2ec56337675e61b312179a1ad124f5f570c00f920cc75e1000025451b88241c", size = 36617856, upload-time = "2025-10-28T17:33:31.375Z" }, + { url = "https://files.pythonhosted.org/packages/44/13/7e63cfba8a7452eb756306aa2fd9b37a29a323b672b964b4fdeded9a3f21/scipy-1.16.3-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:16b8bc35a4cc24db80a0ec836a9286d0e31b2503cb2fd7ff7fb0e0374a97081d", size = 28874306, upload-time = "2025-10-28T17:33:36.516Z" }, + { url = "https://files.pythonhosted.org/packages/15/65/3a9400efd0228a176e6ec3454b1fa998fbbb5a8defa1672c3f65706987db/scipy-1.16.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:5803c5fadd29de0cf27fa08ccbfe7a9e5d741bf63e4ab1085437266f12460ff9", size = 20865371, upload-time = "2025-10-28T17:33:42.094Z" }, + { url = "https://files.pythonhosted.org/packages/33/d7/eda09adf009a9fb81827194d4dd02d2e4bc752cef16737cc4ef065234031/scipy-1.16.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:b81c27fc41954319a943d43b20e07c40bdcd3ff7cf013f4fb86286faefe546c4", size = 23524877, upload-time = "2025-10-28T17:33:48.483Z" }, + { url = "https://files.pythonhosted.org/packages/7d/6b/3f911e1ebc364cb81320223a3422aab7d26c9c7973109a9cd0f27c64c6c0/scipy-1.16.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0c3b4dd3d9b08dbce0f3440032c52e9e2ab9f96ade2d3943313dfe51a7056959", size = 33342103, upload-time = "2025-10-28T17:33:56.495Z" }, + { url = "https://files.pythonhosted.org/packages/21/f6/4bfb5695d8941e5c570a04d9fcd0d36bce7511b7d78e6e75c8f9791f82d0/scipy-1.16.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7dc1360c06535ea6116a2220f760ae572db9f661aba2d88074fe30ec2aa1ff88", size = 35697297, upload-time = "2025-10-28T17:34:04.722Z" }, + { url = "https://files.pythonhosted.org/packages/04/e1/6496dadbc80d8d896ff72511ecfe2316b50313bfc3ebf07a3f580f08bd8c/scipy-1.16.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:663b8d66a8748051c3ee9c96465fb417509315b99c71550fda2591d7dd634234", size = 36021756, upload-time = "2025-10-28T17:34:13.482Z" }, + { url = "https://files.pythonhosted.org/packages/fe/bd/a8c7799e0136b987bda3e1b23d155bcb31aec68a4a472554df5f0937eef7/scipy-1.16.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eab43fae33a0c39006a88096cd7b4f4ef545ea0447d250d5ac18202d40b6611d", size = 38696566, upload-time = "2025-10-28T17:34:22.384Z" }, + { url = "https://files.pythonhosted.org/packages/cd/01/1204382461fcbfeb05b6161b594f4007e78b6eba9b375382f79153172b4d/scipy-1.16.3-cp313-cp313-win_amd64.whl", hash = "sha256:062246acacbe9f8210de8e751b16fc37458213f124bef161a5a02c7a39284304", size = 38529877, upload-time = "2025-10-28T17:35:51.076Z" }, + { url = "https://files.pythonhosted.org/packages/7f/14/9d9fbcaa1260a94f4bb5b64ba9213ceb5d03cd88841fe9fd1ffd47a45b73/scipy-1.16.3-cp313-cp313-win_arm64.whl", hash = "sha256:50a3dbf286dbc7d84f176f9a1574c705f277cb6565069f88f60db9eafdbe3ee2", size = 25455366, upload-time = "2025-10-28T17:35:59.014Z" }, + { url = "https://files.pythonhosted.org/packages/e2/a3/9ec205bd49f42d45d77f1730dbad9ccf146244c1647605cf834b3a8c4f36/scipy-1.16.3-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:fb4b29f4cf8cc5a8d628bc8d8e26d12d7278cd1f219f22698a378c3d67db5e4b", size = 37027931, upload-time = "2025-10-28T17:34:31.451Z" }, + { url = "https://files.pythonhosted.org/packages/25/06/ca9fd1f3a4589cbd825b1447e5db3a8ebb969c1eaf22c8579bd286f51b6d/scipy-1.16.3-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:8d09d72dc92742988b0e7750bddb8060b0c7079606c0d24a8cc8e9c9c11f9079", size = 29400081, upload-time = "2025-10-28T17:34:39.087Z" }, + { url = "https://files.pythonhosted.org/packages/6a/56/933e68210d92657d93fb0e381683bc0e53a965048d7358ff5fbf9e6a1b17/scipy-1.16.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:03192a35e661470197556de24e7cb1330d84b35b94ead65c46ad6f16f6b28f2a", size = 21391244, upload-time = "2025-10-28T17:34:45.234Z" }, + { url = "https://files.pythonhosted.org/packages/a8/7e/779845db03dc1418e215726329674b40576879b91814568757ff0014ad65/scipy-1.16.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:57d01cb6f85e34f0946b33caa66e892aae072b64b034183f3d87c4025802a119", size = 23929753, upload-time = "2025-10-28T17:34:51.793Z" }, + { url = "https://files.pythonhosted.org/packages/4c/4b/f756cf8161d5365dcdef9e5f460ab226c068211030a175d2fc7f3f41ca64/scipy-1.16.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:96491a6a54e995f00a28a3c3badfff58fd093bf26cd5fb34a2188c8c756a3a2c", size = 33496912, upload-time = "2025-10-28T17:34:59.8Z" }, + { url = "https://files.pythonhosted.org/packages/09/b5/222b1e49a58668f23839ca1542a6322bb095ab8d6590d4f71723869a6c2c/scipy-1.16.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cd13e354df9938598af2be05822c323e97132d5e6306b83a3b4ee6724c6e522e", size = 35802371, upload-time = "2025-10-28T17:35:08.173Z" }, + { url = "https://files.pythonhosted.org/packages/c1/8d/5964ef68bb31829bde27611f8c9deeac13764589fe74a75390242b64ca44/scipy-1.16.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:63d3cdacb8a824a295191a723ee5e4ea7768ca5ca5f2838532d9f2e2b3ce2135", size = 36190477, upload-time = "2025-10-28T17:35:16.7Z" }, + { url = "https://files.pythonhosted.org/packages/ab/f2/b31d75cb9b5fa4dd39a0a931ee9b33e7f6f36f23be5ef560bf72e0f92f32/scipy-1.16.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e7efa2681ea410b10dde31a52b18b0154d66f2485328830e45fdf183af5aefc6", size = 38796678, upload-time = "2025-10-28T17:35:26.354Z" }, + { url = "https://files.pythonhosted.org/packages/b4/1e/b3723d8ff64ab548c38d87055483714fefe6ee20e0189b62352b5e015bb1/scipy-1.16.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2d1ae2cf0c350e7705168ff2429962a89ad90c2d49d1dd300686d8b2a5af22fc", size = 38640178, upload-time = "2025-10-28T17:35:35.304Z" }, + { url = "https://files.pythonhosted.org/packages/8e/f3/d854ff38789aca9b0cc23008d607ced9de4f7ab14fa1ca4329f86b3758ca/scipy-1.16.3-cp313-cp313t-win_arm64.whl", hash = "sha256:0c623a54f7b79dd88ef56da19bc2873afec9673a48f3b85b18e4d402bdd29a5a", size = 25803246, upload-time = "2025-10-28T17:35:42.155Z" }, + { url = "https://files.pythonhosted.org/packages/99/f6/99b10fd70f2d864c1e29a28bbcaa0c6340f9d8518396542d9ea3b4aaae15/scipy-1.16.3-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:875555ce62743e1d54f06cdf22c1e0bc47b91130ac40fe5d783b6dfa114beeb6", size = 36606469, upload-time = "2025-10-28T17:36:08.741Z" }, + { url = "https://files.pythonhosted.org/packages/4d/74/043b54f2319f48ea940dd025779fa28ee360e6b95acb7cd188fad4391c6b/scipy-1.16.3-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:bb61878c18a470021fb515a843dc7a76961a8daceaaaa8bad1332f1bf4b54657", size = 28872043, upload-time = "2025-10-28T17:36:16.599Z" }, + { url = "https://files.pythonhosted.org/packages/4d/e1/24b7e50cc1c4ee6ffbcb1f27fe9f4c8b40e7911675f6d2d20955f41c6348/scipy-1.16.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:f2622206f5559784fa5c4b53a950c3c7c1cf3e84ca1b9c4b6c03f062f289ca26", size = 20862952, upload-time = "2025-10-28T17:36:22.966Z" }, + { url = "https://files.pythonhosted.org/packages/dd/3a/3e8c01a4d742b730df368e063787c6808597ccb38636ed821d10b39ca51b/scipy-1.16.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:7f68154688c515cdb541a31ef8eb66d8cd1050605be9dcd74199cbd22ac739bc", size = 23508512, upload-time = "2025-10-28T17:36:29.731Z" }, + { url = "https://files.pythonhosted.org/packages/1f/60/c45a12b98ad591536bfe5330cb3cfe1850d7570259303563b1721564d458/scipy-1.16.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8b3c820ddb80029fe9f43d61b81d8b488d3ef8ca010d15122b152db77dc94c22", size = 33413639, upload-time = "2025-10-28T17:36:37.982Z" }, + { url = "https://files.pythonhosted.org/packages/71/bc/35957d88645476307e4839712642896689df442f3e53b0fa016ecf8a3357/scipy-1.16.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d3837938ae715fc0fe3c39c0202de3a8853aff22ca66781ddc2ade7554b7e2cc", size = 35704729, upload-time = "2025-10-28T17:36:46.547Z" }, + { url = "https://files.pythonhosted.org/packages/3b/15/89105e659041b1ca11c386e9995aefacd513a78493656e57789f9d9eab61/scipy-1.16.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:aadd23f98f9cb069b3bd64ddc900c4d277778242e961751f77a8cb5c4b946fb0", size = 36086251, upload-time = "2025-10-28T17:36:55.161Z" }, + { url = "https://files.pythonhosted.org/packages/1a/87/c0ea673ac9c6cc50b3da2196d860273bc7389aa69b64efa8493bdd25b093/scipy-1.16.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b7c5f1bda1354d6a19bc6af73a649f8285ca63ac6b52e64e658a5a11d4d69800", size = 38716681, upload-time = "2025-10-28T17:37:04.1Z" }, + { url = "https://files.pythonhosted.org/packages/91/06/837893227b043fb9b0d13e4bd7586982d8136cb249ffb3492930dab905b8/scipy-1.16.3-cp314-cp314-win_amd64.whl", hash = "sha256:e5d42a9472e7579e473879a1990327830493a7047506d58d73fc429b84c1d49d", size = 39358423, upload-time = "2025-10-28T17:38:20.005Z" }, + { url = "https://files.pythonhosted.org/packages/95/03/28bce0355e4d34a7c034727505a02d19548549e190bedd13a721e35380b7/scipy-1.16.3-cp314-cp314-win_arm64.whl", hash = "sha256:6020470b9d00245926f2d5bb93b119ca0340f0d564eb6fbaad843eaebf9d690f", size = 26135027, upload-time = "2025-10-28T17:38:24.966Z" }, + { url = "https://files.pythonhosted.org/packages/b2/6f/69f1e2b682efe9de8fe9f91040f0cd32f13cfccba690512ba4c582b0bc29/scipy-1.16.3-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:e1d27cbcb4602680a49d787d90664fa4974063ac9d4134813332a8c53dbe667c", size = 37028379, upload-time = "2025-10-28T17:37:14.061Z" }, + { url = "https://files.pythonhosted.org/packages/7c/2d/e826f31624a5ebbab1cd93d30fd74349914753076ed0593e1d56a98c4fb4/scipy-1.16.3-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:9b9c9c07b6d56a35777a1b4cc8966118fb16cfd8daf6743867d17d36cfad2d40", size = 29400052, upload-time = "2025-10-28T17:37:21.709Z" }, + { url = "https://files.pythonhosted.org/packages/69/27/d24feb80155f41fd1f156bf144e7e049b4e2b9dd06261a242905e3bc7a03/scipy-1.16.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:3a4c460301fb2cffb7f88528f30b3127742cff583603aa7dc964a52c463b385d", size = 21391183, upload-time = "2025-10-28T17:37:29.559Z" }, + { url = "https://files.pythonhosted.org/packages/f8/d3/1b229e433074c5738a24277eca520a2319aac7465eea7310ea6ae0e98ae2/scipy-1.16.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:f667a4542cc8917af1db06366d3f78a5c8e83badd56409f94d1eac8d8d9133fa", size = 23930174, upload-time = "2025-10-28T17:37:36.306Z" }, + { url = "https://files.pythonhosted.org/packages/16/9d/d9e148b0ec680c0f042581a2be79a28a7ab66c0c4946697f9e7553ead337/scipy-1.16.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f379b54b77a597aa7ee5e697df0d66903e41b9c85a6dd7946159e356319158e8", size = 33497852, upload-time = "2025-10-28T17:37:42.228Z" }, + { url = "https://files.pythonhosted.org/packages/2f/22/4e5f7561e4f98b7bea63cf3fd7934bff1e3182e9f1626b089a679914d5c8/scipy-1.16.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4aff59800a3b7f786b70bfd6ab551001cb553244988d7d6b8299cb1ea653b353", size = 35798595, upload-time = "2025-10-28T17:37:48.102Z" }, + { url = "https://files.pythonhosted.org/packages/83/42/6644d714c179429fc7196857866f219fef25238319b650bb32dde7bf7a48/scipy-1.16.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:da7763f55885045036fabcebd80144b757d3db06ab0861415d1c3b7c69042146", size = 36186269, upload-time = "2025-10-28T17:37:53.72Z" }, + { url = "https://files.pythonhosted.org/packages/ac/70/64b4d7ca92f9cf2e6fc6aaa2eecf80bb9b6b985043a9583f32f8177ea122/scipy-1.16.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ffa6eea95283b2b8079b821dc11f50a17d0571c92b43e2b5b12764dc5f9b285d", size = 38802779, upload-time = "2025-10-28T17:37:59.393Z" }, + { url = "https://files.pythonhosted.org/packages/61/82/8d0e39f62764cce5ffd5284131e109f07cf8955aef9ab8ed4e3aa5e30539/scipy-1.16.3-cp314-cp314t-win_amd64.whl", hash = "sha256:d9f48cafc7ce94cf9b15c6bffdc443a81a27bf7075cf2dcd5c8b40f85d10c4e7", size = 39471128, upload-time = "2025-10-28T17:38:05.259Z" }, + { url = "https://files.pythonhosted.org/packages/64/47/a494741db7280eae6dc033510c319e34d42dd41b7ac0c7ead39354d1a2b5/scipy-1.16.3-cp314-cp314t-win_arm64.whl", hash = "sha256:21d9d6b197227a12dcbf9633320a4e34c6b0e51c57268df255a0942983bac562", size = 26464127, upload-time = "2025-10-28T17:38:11.34Z" }, ] [[package]] name = "scipy-stubs" -version = "1.16.2.4" +version = "1.16.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "optype", extra = ["numpy"] }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1f/b1/c806d700fb442f8b04037b1272be303e9b55dea17237002958bd4dd48c47/scipy_stubs-1.16.2.4.tar.gz", hash = "sha256:dc303e0ba2272aa3832660f0e55f7b461ab32e98f452090f3e28a338f3920e67", size = 356403, upload-time = "2025-10-17T03:53:11.714Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/68/c53c3bce6bd069a164015be1be2671c968b526be4af1e85db64c88f04546/scipy_stubs-1.16.3.0.tar.gz", hash = "sha256:d6943c085e47a1ed431309f9ca582b6a206a9db808a036132a0bf01ebc34b506", size = 356462, upload-time = "2025-10-28T22:05:31.198Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f5/d2/596b5f7439c96e6e636db81a2e39a24738ccc6a1363b97e254643070c9c2/scipy_stubs-1.16.2.4-py3-none-any.whl", hash = "sha256:8e47684fe5f8b823e06ec6513e4dbb5ae43a5a064d10d8228b7e3c3d243ec673", size = 557679, upload-time = "2025-10-17T03:53:10.007Z" }, + { url = "https://files.pythonhosted.org/packages/86/1c/0ba7305fa01cfe7a6f1b8c86ccdd1b7a0d43fa9bd769c059995311e291a2/scipy_stubs-1.16.3.0-py3-none-any.whl", hash = "sha256:90e5d82ced2183ef3c5c0a28a77df8cc227458624364fa0ff975ad24fa89d6ad", size = 557713, upload-time = "2025-10-28T22:05:29.454Z" }, ] [[package]] @@ -3748,6 +3830,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, ] +[[package]] +name = "shellingham" +version = "1.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" }, +] + [[package]] name = "six" version = "1.17.0" @@ -4043,15 +4134,15 @@ wheels = [ [[package]] name = "starlette" -version = "0.48.0" +version = "0.49.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a7/a5/d6f429d43394057b67a6b5bbe6eae2f77a6bf7459d961fdb224bf206eee6/starlette-0.48.0.tar.gz", hash = "sha256:7e8cee469a8ab2352911528110ce9088fdc6a37d9876926e73da7ce4aa4c7a46", size = 2652949, upload-time = "2025-09-13T08:41:05.699Z" } +sdist = { url = "https://files.pythonhosted.org/packages/de/1a/608df0b10b53b0beb96a37854ee05864d182ddd4b1156a22f1ad3860425a/starlette-0.49.3.tar.gz", hash = "sha256:1c14546f299b5901a1ea0e34410575bc33bbd741377a10484a54445588d00284", size = 2655031, upload-time = "2025-11-01T15:12:26.13Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/be/72/2db2f49247d0a18b4f1bb9a5a39a0162869acf235f3a96418363947b3d46/starlette-0.48.0-py3-none-any.whl", hash = "sha256:0764ca97b097582558ecb498132ed0c7d942f233f365b86ba37770e026510659", size = 73736, upload-time = "2025-09-13T08:41:03.869Z" }, + { url = "https://files.pythonhosted.org/packages/a3/e0/021c772d6a662f43b63044ab481dc6ac7592447605b5b35a957785363122/starlette-0.49.3-py3-none-any.whl", hash = "sha256:b579b99715fdc2980cf88c8ec96d3bf1ce16f5a8051a7c2b84ef9b1cdecaea2f", size = 74340, upload-time = "2025-11-01T15:12:24.387Z" }, ] [[package]] @@ -4077,11 +4168,11 @@ wheels = [ [[package]] name = "toml-fmt-common" -version = "1.0.1" +version = "1.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/3a/7a/fca432020e0b2134f7bb8fa4bf4714f6f0d1c72a08100c96b582c22098bc/toml_fmt_common-1.0.1.tar.gz", hash = "sha256:7a29e99e527ffac456043296a0f1d8c03aaa1b06167bd39ad5e3cc5041f31c17", size = 9626, upload-time = "2024-10-20T05:01:31.278Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/ec/94b10890bf99ef0cc547cf4113bff46337c289012e7916ae7adb8f3c470b/toml_fmt_common-1.1.0.tar.gz", hash = "sha256:e4ba8f13e5fe25cfe0bfc60342ad7deb91c741fd31f2e5522e6a51bfbf1427d3", size = 9643, upload-time = "2025-10-08T17:41:14.328Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e8/7f/094a5d096adaf2a51de24c8650530625a262bef0c654fc981165ad45821f/toml_fmt_common-1.0.1-py3-none-any.whl", hash = "sha256:7a6542e36a7167fa94b8b997d3f8debadbb4ab757c7d78a77304579bd7a0cc7d", size = 5666, upload-time = "2024-10-20T05:01:29.468Z" }, + { url = "https://files.pythonhosted.org/packages/4f/3b/40e889a19cf41bd898eedb6dded7c4ba711442555f68dc0cff6275aaa682/toml_fmt_common-1.1.0-py3-none-any.whl", hash = "sha256:92a956c4abf9c14e72d51e4c23149b2596a84ac0c347484e7c36008807e2e0a3", size = 5686, upload-time = "2025-10-08T17:41:13.035Z" }, ] [[package]] @@ -4155,13 +4246,26 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" }, ] +[[package]] +name = "typer-slim" +version = "0.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8e/45/81b94a52caed434b94da65729c03ad0fb7665fab0f7db9ee54c94e541403/typer_slim-0.20.0.tar.gz", hash = "sha256:9fc6607b3c6c20f5c33ea9590cbeb17848667c51feee27d9e314a579ab07d1a3", size = 106561, upload-time = "2025-10-20T17:03:46.642Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/dd/5cbf31f402f1cc0ab087c94d4669cfa55bd1e818688b910631e131d74e75/typer_slim-0.20.0-py3-none-any.whl", hash = "sha256:f42a9b7571a12b97dddf364745d29f12221865acef7a2680065f9bb29c7dc89d", size = 47087, upload-time = "2025-10-20T17:03:44.546Z" }, +] + [[package]] name = "types-pytz" -version = "2025.2.0.20250809" +version = "2025.2.0.20251108" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/07/e2/c774f754de26848f53f05defff5bb21dd9375a059d1ba5b5ea943cf8206e/types_pytz-2025.2.0.20250809.tar.gz", hash = "sha256:222e32e6a29bb28871f8834e8785e3801f2dc4441c715cd2082b271eecbe21e5", size = 10876, upload-time = "2025-08-09T03:14:17.453Z" } +sdist = { url = "https://files.pythonhosted.org/packages/40/ff/c047ddc68c803b46470a357454ef76f4acd8c1088f5cc4891cdd909bfcf6/types_pytz-2025.2.0.20251108.tar.gz", hash = "sha256:fca87917836ae843f07129567b74c1929f1870610681b4c92cb86a3df5817bdb", size = 10961, upload-time = "2025-11-08T02:55:57.001Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/db/d0/91c24fe54e565f2344d7a6821e6c6bb099841ef09007ea6321a0bac0f808/types_pytz-2025.2.0.20250809-py3-none-any.whl", hash = "sha256:4f55ed1b43e925cf851a756fe1707e0f5deeb1976e15bf844bcaa025e8fbd0db", size = 10095, upload-time = "2025-08-09T03:14:16.674Z" }, + { url = "https://files.pythonhosted.org/packages/e7/c1/56ef16bf5dcd255155cc736d276efa6ae0a5c26fd685e28f0412a4013c01/types_pytz-2025.2.0.20251108-py3-none-any.whl", hash = "sha256:0f1c9792cab4eb0e46c52f8845c8f77cf1e313cb3d68bf826aa867fe4717d91c", size = 10116, upload-time = "2025-11-08T02:55:56.194Z" }, ] [[package]] @@ -4217,28 +4321,28 @@ wheels = [ [[package]] name = "uv" -version = "0.9.5" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6e/6a/fab7dd47e7344705158cc3fcbe70b4814175902159574c3abb081ebaba88/uv-0.9.5.tar.gz", hash = "sha256:d8835d2c034421ac2235fb658bb4f669a301a0f1eb00a8430148dd8461b65641", size = 3700444, upload-time = "2025-10-21T16:48:26.847Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/51/4a/4db051b9e41e6c67d0b7a56c68e2457e9bbe947463a656873e7d02a974f3/uv-0.9.5-py3-none-linux_armv6l.whl", hash = "sha256:f8eb34ebebac4b45334ce7082cca99293b71fb32b164651f1727c8a640e5b387", size = 20667903, upload-time = "2025-10-21T16:47:41.841Z" }, - { url = "https://files.pythonhosted.org/packages/4e/6c/3508d67f80aac0ddb5806680a6735ff6cb5a14e9b697e5ae145b01050880/uv-0.9.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:922cd784cce36bbdc7754b590d28c276698c85791c18cd4c6a7e917db4480440", size = 19680481, upload-time = "2025-10-21T16:47:45.825Z" }, - { url = "https://files.pythonhosted.org/packages/b2/26/bd6438cf6d84a6b0b608bcbe9f353d8e424f8fe3b1b73a768984a76bf80b/uv-0.9.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:8603bb902e578463c50c3ddd4ee376ba4172ccdf4979787f8948747d1bb0e18b", size = 18309280, upload-time = "2025-10-21T16:47:47.919Z" }, - { url = "https://files.pythonhosted.org/packages/48/8a/a990d9a39094d4d47bd11edff17573247f3791c33a19626e92c995498e68/uv-0.9.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:48a3542835d37882ff57d1ff91b757085525d98756712fa61cf9941d3dda8ebf", size = 20030908, upload-time = "2025-10-21T16:47:50.532Z" }, - { url = "https://files.pythonhosted.org/packages/24/7a/63a5dd8e1b7ff69d9920a36c018c54c6247e48477d252770d979e30c97bd/uv-0.9.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:21452ece590ddb90e869a478ca4c2ba70be180ec0d6716985ee727b9394c8aa5", size = 20236853, upload-time = "2025-10-21T16:47:53.108Z" }, - { url = "https://files.pythonhosted.org/packages/bc/cd/511e0d96b10a88fb382515f33fcacb8613fea6e50ae767827ad8056f6c38/uv-0.9.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb31c9896dc2c88f6a9f1d693be2409fe2fc2e3d90827956e4341c2b2171289", size = 21161956, upload-time = "2025-10-21T16:47:55.337Z" }, - { url = "https://files.pythonhosted.org/packages/0b/bd/3255b9649f491ff7ae3450919450325ad125c8af6530d24aa22932f83aa0/uv-0.9.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:02db727beb94a2137508cee5a785c3465d150954ca9abdff2d8157c76dea163e", size = 22646501, upload-time = "2025-10-21T16:47:57.917Z" }, - { url = "https://files.pythonhosted.org/packages/b6/6e/f2d172ea3aa078aa2ba1c391f674b2d322e5d1a8b695e2bdd941ea22f6c3/uv-0.9.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c465f2e342cab908849b8ce83e14fd4cf75f5bed55802d0acf1399f9d02f92d9", size = 22285962, upload-time = "2025-10-21T16:48:00.516Z" }, - { url = "https://files.pythonhosted.org/packages/71/ad/f22e2b094c82178cee674337340f2e1a3dfcdaabc75e393e1f499f997c15/uv-0.9.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:133e2614e1ff3b34c2606595d8ae55710473ebb7516bfa5708afc00315730cd1", size = 21374721, upload-time = "2025-10-21T16:48:02.957Z" }, - { url = "https://files.pythonhosted.org/packages/9b/83/a0bdf4abf86ede79b427778fe27e2b4a022c98a7a8ea1745dcd6c6561f17/uv-0.9.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6507bbbcd788553ec4ad5a96fa19364dc0f58b023e31d79868773559a83ec181", size = 21332544, upload-time = "2025-10-21T16:48:05.75Z" }, - { url = "https://files.pythonhosted.org/packages/da/93/f61862a5cb34d3fd021352f4a46993950ba2b301f0fd0694a56c7a56b20b/uv-0.9.5-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:6a046c2e833169bf26f461286aab58a2ba8d48ed2220bfcf119dcfaf87163116", size = 20157103, upload-time = "2025-10-21T16:48:08.018Z" }, - { url = "https://files.pythonhosted.org/packages/04/9c/2788b82454dd485a5b3691cc6f465583e9ce8d4c45bac11461ff38165fd5/uv-0.9.5-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:9fc13b4b943d19adac52d7dcd2159e96ab2e837ac49a79e20714ed25f1f1b7f9", size = 21263882, upload-time = "2025-10-21T16:48:10.222Z" }, - { url = "https://files.pythonhosted.org/packages/c6/eb/73dd04b7e9c1df76fc6b263140917ba5d7d6d0d28c6913090f3e94e53220/uv-0.9.5-py3-none-musllinux_1_1_armv7l.whl", hash = "sha256:5bb4996329ba47e7e775baba4a47e85092aa491d708a66e63b564e9b306bfb7e", size = 20210317, upload-time = "2025-10-21T16:48:12.606Z" }, - { url = "https://files.pythonhosted.org/packages/bb/45/3f5e0954a727f037e75036ddef2361a16f23f2a4a2bc98c272bb64c273f1/uv-0.9.5-py3-none-musllinux_1_1_i686.whl", hash = "sha256:6452eb6257e37e1ebd97430b5f5e10419da2c3ca35b4086540ec4163b4b2f25c", size = 20614233, upload-time = "2025-10-21T16:48:14.937Z" }, - { url = "https://files.pythonhosted.org/packages/eb/fd/d1317e982a8b004339ca372fbf4d1807be5d765420970bde17bbd621cbf9/uv-0.9.5-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:3a4ecbfdcbd3dae4190428874762c791e05d2c97ff2872bf6c0a30ed5c4ea9ca", size = 21526600, upload-time = "2025-10-21T16:48:17.396Z" }, - { url = "https://files.pythonhosted.org/packages/9c/39/6b288c4e348c4113d4925c714606f7d1e0a7bfcb7f1ad001a28dbcf62f30/uv-0.9.5-py3-none-win32.whl", hash = "sha256:0316493044035098666d6e99c14bd61b352555d9717d57269f4ce531855330fa", size = 19469211, upload-time = "2025-10-21T16:48:19.668Z" }, - { url = "https://files.pythonhosted.org/packages/af/14/0f07d0b2e561548b4e3006208480a5fce8cdaae5247d85efbfb56e8e596b/uv-0.9.5-py3-none-win_amd64.whl", hash = "sha256:48a12390421f91af8a8993cf15c38297c0bb121936046286e287975b2fbf1789", size = 21404719, upload-time = "2025-10-21T16:48:22.145Z" }, - { url = "https://files.pythonhosted.org/packages/c7/33/14244c0641c2340653ae934e5c82750543fcddbcd260bdc2353a33b6148f/uv-0.9.5-py3-none-win_arm64.whl", hash = "sha256:c966e3a4fe4de3b0a6279d0a835c79f9cddbb3693f52d140910cbbed177c5742", size = 19911407, upload-time = "2025-10-21T16:48:24.974Z" }, +version = "0.9.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/4a/dd4d1a772afd0ad4167a932864e145ba3010d2a148e34171070bfcb85528/uv-0.9.9.tar.gz", hash = "sha256:dc5885fda74cec4cf8eea4115a6e0e431462c6c6bf1bd925abd72699d6b54f51", size = 3724446, upload-time = "2025-11-12T18:45:24.863Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/d2/dcf1ee2b977ebbe12c8666b56f49d1138b565fc475c0e80c00c50da6d321/uv-0.9.9-py3-none-linux_armv6l.whl", hash = "sha256:ea700f6e43389a3bd6aa90c02f3010b61ef987c3b025842281a8bd513e26cf3a", size = 20481964, upload-time = "2025-11-12T18:44:21.532Z" }, + { url = "https://files.pythonhosted.org/packages/80/d5/d9e18da60593d8d127a435fe5451033dba2ec6d11baea06d6cbad5e2e6b0/uv-0.9.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:7ea663b3e5e5b20a17efbc6c7f8db602abf72447d7cced0882a0dff71c2de1ef", size = 19589253, upload-time = "2025-11-12T18:44:26.35Z" }, + { url = "https://files.pythonhosted.org/packages/cc/47/436863f6d99cfc3e41408e1d28d07fb3d20227d5ff66f52666564a5649f5/uv-0.9.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e8303e17b7d2a2dc65ebc4cc65cc0b2be493566b4f7421279b008ecb10adfc5f", size = 18149442, upload-time = "2025-11-12T18:44:29.45Z" }, + { url = "https://files.pythonhosted.org/packages/15/04/b22cd0716369f63265c76ab254e98573cb65e2ee7908f5ffa90e1c2e18fc/uv-0.9.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:036e8d38f87ffbebcd478e6b61a2c4f8733f77fbdf34140b78e0f5ab238810cf", size = 19960485, upload-time = "2025-11-12T18:44:32.934Z" }, + { url = "https://files.pythonhosted.org/packages/e9/cd/de0f6d6292a812159a134c7ed0b1692ad1ea7baf6de3c66e48c2500bd919/uv-0.9.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aa5fb4ee5f85fe4364a2895bf849c98a4537f6a96a8da22922fb3eb149ef7aaf", size = 20085388, upload-time = "2025-11-12T18:44:36.036Z" }, + { url = "https://files.pythonhosted.org/packages/2f/bf/86ddcc9042d003c2edba8c534787bf5b8c15da026903084faaeb6cee4a7c/uv-0.9.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1bcb3e003d6b12cfb03a6223914b648de595a0b79ae2c0259411224966f3fd60", size = 20978689, upload-time = "2025-11-12T18:44:39.231Z" }, + { url = "https://files.pythonhosted.org/packages/fd/bb/d8f8ddfbc2c429a75df28b37594c9b8dfdf0f00f091175d5dabc6791ed09/uv-0.9.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:f423deb65d2c3aed8f896cd012f0fdaca47aff200fe35a81d9e0dfd543104c56", size = 22602188, upload-time = "2025-11-12T18:44:42.62Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4d/bf42ae81d0ccee4d5bbc401465da1a20b82944435a36eebb953e836ea6a8/uv-0.9.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb2eca9575bb993fdd4309c75f6253772b826b5a1778b83479e32e9097a35340", size = 22187774, upload-time = "2025-11-12T18:44:46.038Z" }, + { url = "https://files.pythonhosted.org/packages/4d/f9/e559d46b77a33c1ef5d10e5d6223ac6a60bbc681a11c9352782b3d391001/uv-0.9.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3bb84164437e71a55674898a1db34a1874489f362e90f0ce1d2be3c5ef214453", size = 21309545, upload-time = "2025-11-12T18:44:49.844Z" }, + { url = "https://files.pythonhosted.org/packages/4c/4a/d5357825bb47ff73762d247b1a553a966fef6802e3ab829fe60934cbf339/uv-0.9.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afdd00ddc25e12ed756e069090011ca55f127753e1192e51f45fa288a024f3df", size = 21287121, upload-time = "2025-11-12T18:44:53.745Z" }, + { url = "https://files.pythonhosted.org/packages/3a/97/9925ec558b9b7435d8646e74f8831aa10165e8768b6d9b0c702655b164fb/uv-0.9.9-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:4f2e2a818ce64b917aada5a752a92bc5665ed90f3ac1348689c4d92abe4af3f5", size = 20085994, upload-time = "2025-11-12T18:44:57.663Z" }, + { url = "https://files.pythonhosted.org/packages/11/ec/8fe7499790805913a2a8199e814aa78c1ab63db97ac654c741a2a5d493ca/uv-0.9.9-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:25892e6a4e5e1b9eb3cac8571a66c2f6f7be201ce114e443ef64e007dceeb640", size = 21118665, upload-time = "2025-11-12T18:45:00.949Z" }, + { url = "https://files.pythonhosted.org/packages/dc/49/8b93a53411789a35010bfc9f359391081c7bc2861d4d5c1d8d98b3d07cbb/uv-0.9.9-py3-none-musllinux_1_1_armv7l.whl", hash = "sha256:fa149da37045afde21d3167a5057ca8c5abbe65194f08ea59dfbd5f4faa25b13", size = 20064311, upload-time = "2025-11-12T18:45:04.425Z" }, + { url = "https://files.pythonhosted.org/packages/69/66/3c15283ffec67bd8302c34eaf871e50d71fceacfffc8ee26ff02b0adea69/uv-0.9.9-py3-none-musllinux_1_1_i686.whl", hash = "sha256:0b93153f1262873d6fc725f3a76264eb06e26a2651af17a1e797ae52e19eacb1", size = 20474039, upload-time = "2025-11-12T18:45:07.55Z" }, + { url = "https://files.pythonhosted.org/packages/08/ec/73bc3fb4613ad2b21b92a2c23d5bc6dc31c1acb1ca6a70bdc55e7c426ef6/uv-0.9.9-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:1263f03571f3fda44061862c14b92c992102d03f5e1015f3886d9252f9436d60", size = 21506473, upload-time = "2025-11-12T18:45:11.183Z" }, + { url = "https://files.pythonhosted.org/packages/85/5c/5b20529430140cc39255c0884da734610ccaaf2fd15f2cfabd29f6193d01/uv-0.9.9-py3-none-win32.whl", hash = "sha256:1d25f1aca2f8a3b24f3fdf9b029a9a923c429a828be7c9eee9fa073addedbc36", size = 19272132, upload-time = "2025-11-12T18:45:14.352Z" }, + { url = "https://files.pythonhosted.org/packages/f2/38/562295348cf2eb567fd5ea44512a645ea5bec2661a7e07b7f14fda54cb07/uv-0.9.9-py3-none-win_amd64.whl", hash = "sha256:1201765ae39643ef66bc6decfc44c5f8540fcaeae8b0914553b32e670f1941da", size = 21316052, upload-time = "2025-11-12T18:45:18.897Z" }, + { url = "https://files.pythonhosted.org/packages/9d/62/47e8d16da92ffb095388e45cc3f6e6c2ba1404d80590fb9528305517c7f3/uv-0.9.9-py3-none-win_arm64.whl", hash = "sha256:2695624ee43a8932c3fb414a98e4aed3b4f60306a24acd68e2b288dd5a58c370", size = 19821476, upload-time = "2025-11-12T18:45:22.462Z" }, ] [[package]] From 8be453a760640551f4f9c24ef25e4e89c8af812c Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 17 Nov 2025 21:39:38 +0100 Subject: [PATCH 018/104] set silence --- .../src/openstef_beam/benchmarking/benchmark_pipeline.py | 2 +- .../openstef_models/models/forecasting/gblinear_forecaster.py | 2 +- .../src/openstef_models/presets/forecasting_workflow.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py index bcd1ef070..28d823db1 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py @@ -187,7 +187,7 @@ def run( process_fn=partial(self._run_for_target, context, forecaster_factory), items=targets, n_processes=n_processes, - mode="loky", + mode="fork", # TODO: Change back to 'loky' after before commit ) if not self.storage.has_analysis_output( diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py index cbda4b2df..92c3981a3 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py @@ -123,7 +123,7 @@ class GBLinearForecasterConfig(ForecasterConfig): default="cpu", description="Device for XGBoost computation. Options: 'cpu', 'cuda', 'cuda:', 'gpu'" ) verbosity: Literal[0, 1, 2, 3, True] = Field( - default=1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + default=0, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" ) def forecaster_from_config(self) -> "GBLinearForecaster": diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 263965b06..dc1ec7fb1 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -260,7 +260,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob ) verbosity: Literal[0, 1, 2, 3, True] = Field( - default=1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + default=0, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" ) # Metadata From ea902390e027c7a0c5b575ea386b1dfe54537b2a Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 18 Nov 2025 09:20:37 +0100 Subject: [PATCH 019/104] small fix --- .../src/openstef_models/presets/forecasting_workflow.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 4dbf20c52..dc1ec7fb1 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -36,7 +36,7 @@ from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster from openstef_models.transforms.energy_domain import WindPowerFeatureAdder from openstef_models.transforms.general import Clipper, EmptyFeatureRemover, Imputer, NaNDropper, SampleWeighter, Scaler -from openstef_models.transforms.postprocessing import QuantileSorter +from openstef_models.transforms.postprocessing import ConfidenceIntervalApplicator, QuantileSorter from openstef_models.transforms.time_domain import ( CyclicFeaturesAdder, DatetimeFeaturesAdder, From 93baa03e3a2a91ccbbdca78365a2a2910c9fa9ae Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 18 Nov 2025 12:19:52 +0100 Subject: [PATCH 020/104] Fix final learner --- .../models/forecasting/hybrid_forecaster.py | 213 +++++++++--------- .../presets/forecasting_workflow.py | 13 +- 2 files changed, 120 insertions(+), 106 deletions(-) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index 7a41035b6..63e0c4a95 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -11,16 +11,17 @@ import logging from typing import override +from abc import abstractmethod import pandas as pd from pydantic import Field, field_validator -from sklearn.linear_model import QuantileRegressor from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ( NotFittedError, ) from openstef_core.mixins import HyperParams +from openstef_core.types import LeadTime, Quantile from openstef_models.estimators.hybrid import HybridQuantileRegressor from openstef_models.models.forecasting.forecaster import ( Forecaster, @@ -53,6 +54,69 @@ ) +class FinalLearner: + """Combines base learner predictions for each quantile into final predictions.""" + + @abstractmethod + def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + raise NotImplementedError("Subclasses must implement the fit method.") + + def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + raise NotImplementedError("Subclasses must implement the predict method.") + + @property + @abstractmethod + def is_fitted(self) -> bool: + raise NotImplementedError("Subclasses must implement the is_fitted property.") + + +class FinalForecaster(FinalLearner): + """Combines base learner predictions for each quantile into final predictions.""" + + def __init__(self, forecaster: Forecaster, feature_adders: None = None) -> None: + # Feature adders placeholder for future use + + # Split forecaster per quantile + self.quantiles = forecaster.config.quantiles + models: list[Forecaster] = [] + for q in self.quantiles: + config = forecaster.config.model_copy( + update={ + "quantiles": [q], + } + ) + model = forecaster.__class__(config=config) + models.append(model) + self.models = models + + @override + def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + for i, q in enumerate(self.quantiles): + self.models[i].fit(data=base_learner_predictions[q], data_val=None) + + @override + def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + # Generate predictions + predictions = [ + self.models[i].predict(data=base_learner_predictions[q]).data for i, q in enumerate(self.quantiles) + ] + + # Concatenate predictions along columns to form a DataFrame with quantile columns + df = pd.concat(predictions, axis=1) + + return ForecastDataset( + data=df, + sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, + ) + + @property + def is_fitted(self) -> bool: + return all(x.is_fitted for x in self.models) + + class HybridHyperParams(HyperParams): """Hyperparameters for Stacked LGBM GBLinear Regressor.""" @@ -62,9 +126,15 @@ class HybridHyperParams(HyperParams): "Defaults to [LGBMHyperParams, GBLinearHyperParams].", ) - l1_penalty: float = Field( - default=0.0, - description="L1 regularization term for the quantile regression.", + final_hyperparams: BaseLearnerHyperParams = Field( + default=GBLinearHyperParams(), + description="Hyperparameters for the final learner. Defaults to GBLinearHyperParams.", + ) + + add_rolling_accuracy_features: bool = Field( + default=False, + description="Whether to add rolling accuracy features from base learners as additional features " + "to the final learner. Defaults to False.", ) @field_validator("base_hyperparams", mode="after") @@ -104,51 +174,8 @@ def __init__(self, config: HybridForecasterConfig) -> None: self._base_learners: list[BaseLearner] = self._init_base_learners( base_hyperparams=config.hyperparams.base_hyperparams ) - self._final_learner = [ - QuantileRegressor(quantile=float(q), alpha=config.hyperparams.l1_penalty) for q in config.quantiles - ] - - @staticmethod - def _hyperparams_forecast_map(hyperparams: type[BaseLearnerHyperParams]) -> type[BaseLearner]: - """Map hyperparameters to forecast types. - - Args: - hyperparams: Hyperparameters of the base learner. - - Returns: - Corresponding Forecaster class. - - Raises: - TypeError: If a nested HybridForecaster is attempted. - """ - if isinstance(hyperparams, HybridHyperParams): - raise TypeError("Nested HybridForecaster is not supported.") - - mapping: dict[type[BaseLearnerHyperParams], type[BaseLearner]] = { - LGBMHyperParams: LGBMForecaster, - LGBMLinearHyperParams: LGBMLinearForecaster, - XGBoostHyperParams: XGBoostForecaster, - GBLinearHyperParams: GBLinearForecaster, - } - return mapping[hyperparams] - - @staticmethod - def _base_learner_config(base_learner_class: type[BaseLearner]) -> type[BaseLearnerConfig]: - """Extract the configuration from a base learner. - - Args: - base_learner_class: The base learner forecaster. - - Returns: - The configuration of the base learner. - """ - mapping: dict[type[BaseLearner], type[BaseLearnerConfig]] = { - LGBMForecaster: LGBMForecasterConfig, - LGBMLinearForecaster: LGBMLinearForecasterConfig, - XGBoostForecaster: XGBoostForecasterConfig, - GBLinearForecaster: GBLinearForecasterConfig, - } - return mapping[base_learner_class] + final_forecaster = self._init_base_learners(base_hyperparams=[config.hyperparams.final_hyperparams])[0] + self._final_learner = FinalForecaster(forecaster=final_forecaster) def _init_base_learners(self, base_hyperparams: list[BaseLearnerHyperParams]) -> list[BaseLearner]: """Initialize base learners based on provided hyperparameters. @@ -192,6 +219,7 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None # Fit base learners [x.fit(data=data, data_val=data_val) for x in self._base_learners] + # Reset forecast start date to ensure we predict on the full dataset full_dataset = ForecastInputDataset( data=data.data, sample_interval=data.sample_interval, @@ -201,27 +229,17 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None base_predictions = self._predict_base_learners(data=full_dataset) - quantile_dataframes = self._prepare_input_final_learner(base_predictions=base_predictions) + quantile_datasets = self._prepare_input_final_learner( + base_predictions=base_predictions, quantiles=self._config.quantiles, target_series=data.target_series + ) - self._fit_final_learner(target=data.target_series, quantile_df=quantile_dataframes) + self._final_learner.fit( + base_learner_predictions=quantile_datasets, + ) self._is_fitted = True - def _fit_final_learner( - self, - target: pd.Series, - quantile_df: dict[str, pd.DataFrame], - ) -> None: - """Fit the final learner using base learner predictions. - - Args: - target: Target values for training. - quantile_df: Dictionary mapping quantile strings to DataFrames of base learner predictions. - """ - for i, df in enumerate(quantile_df.values()): - self._final_learner[i].fit(X=df, y=target) - - def _predict_base_learners(self, data: ForecastInputDataset) -> dict[str, ForecastDataset]: + def _predict_base_learners(self, data: ForecastInputDataset) -> dict[type[BaseLearner], ForecastDataset]: """Generate predictions from base learners. Args: @@ -230,37 +248,19 @@ def _predict_base_learners(self, data: ForecastInputDataset) -> dict[str, Foreca Returns: DataFrame containing base learner predictions. """ - base_predictions: dict[str, ForecastDataset] = {} + base_predictions: dict[type[BaseLearner], ForecastDataset] = {} for learner in self._base_learners: preds = learner.predict(data=data) - base_predictions[learner.__class__.__name__] = preds + base_predictions[learner.__class__] = preds return base_predictions - def _predict_final_learner( - self, quantile_df: dict[str, pd.DataFrame], data: ForecastInputDataset - ) -> ForecastDataset: - if not self.is_fitted: - raise NotFittedError(self.__class__.__name__) - - # Generate predictions - predictions_dict = [ - pd.Series(self._final_learner[i].predict(X=quantile_df[q_str]), index=quantile_df[q_str].index, name=q_str) - for i, q_str in enumerate(quantile_df.keys()) - ] - - # Construct DataFrame with appropriate quantile columns - predictions = pd.DataFrame( - data=predictions_dict, - ).T - - return ForecastDataset( - data=predictions, - sample_interval=data.sample_interval, - ) - @staticmethod - def _prepare_input_final_learner(base_predictions: dict[str, ForecastDataset]) -> dict[str, pd.DataFrame]: + def _prepare_input_final_learner( + quantiles: list[Quantile], + base_predictions: dict[type[BaseLearner], ForecastDataset], + target_series: pd.Series, + ) -> dict[Quantile, ForecastInputDataset]: """Prepare input data for the final learner based on base learner predictions. Args: @@ -269,14 +269,22 @@ def _prepare_input_final_learner(base_predictions: dict[str, ForecastDataset]) - Returns: dictionary mapping quantile strings to DataFrames of base learner predictions. """ - predictions_quantiles: dict[str, pd.DataFrame] = {} - first_key = next(iter(base_predictions)) - for quantile in base_predictions[first_key].quantiles: - quantile_str = quantile.format() - quantile_preds = pd.DataFrame({ - learner_name: preds.data[quantile_str] for learner_name, preds in base_predictions.items() + predictions_quantiles: dict[Quantile, ForecastInputDataset] = {} + sample_interval = base_predictions[next(iter(base_predictions))].sample_interval + target_name = str(target_series.name) + + for q in quantiles: + df = pd.DataFrame({ + learner.__name__: preds.data[Quantile(q).format()] for learner, preds in base_predictions.items() }) - predictions_quantiles[quantile_str] = quantile_preds + df[target_name] = target_series + + predictions_quantiles[q] = ForecastInputDataset( + data=df, + sample_interval=sample_interval, + target_column=target_name, + forecast_start=df.index[0], + ) return predictions_quantiles @@ -287,13 +295,12 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: base_predictions = self._predict_base_learners(data=data) - final_learner_input = self._prepare_input_final_learner(base_predictions=base_predictions) - - return self._predict_final_learner( - quantile_df=final_learner_input, - data=data, + final_learner_input = self._prepare_input_final_learner( + quantiles=self._config.quantiles, base_predictions=base_predictions, target_series=data.target_series ) + return self._final_learner.predict(base_learner_predictions=final_learner_input) + # TODO(@Lars800): #745: Make forecaster Explainable diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index dc1ec7fb1..1a33b4622 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -301,9 +301,9 @@ def create_forecasting_workflow( LagsAdder( history_available=config.predict_history, horizons=config.horizons, - add_trivial_lags=config.model != "gblinear", # GBLinear uses only 7day lag. + add_trivial_lags=config.model not in {"gblinear", "hybrid"}, # GBLinear uses only 7day lag. target_column=config.target_column, - custom_lags=[timedelta(days=7)] if config.model == "gblinear" else [], + custom_lags=[timedelta(days=7)] if config.model in {"gblinear", "hybrid"} else [], ), WindPowerFeatureAdder( windspeed_reference_column=config.wind_speed_column, @@ -428,9 +428,16 @@ def create_forecasting_workflow( elif config.model == "hybrid": preprocessing = [ *checks, - Imputer(selection=Exclude(config.target_column), imputation_strategy="mean"), *feature_adders, *feature_standardizers, + Imputer( + selection=Exclude(config.target_column), + imputation_strategy="mean", + fill_future_values=Include(config.energy_price_column), + ), + NaNDropper( + selection=Exclude(config.target_column), + ), ] forecaster = HybridForecaster( config=HybridForecaster.Config( From 4f8ea8f04089bff78222b96545a095c804825cd3 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 18 Nov 2025 20:43:32 +0100 Subject: [PATCH 021/104] fixed lgbm efficiency --- .../src/openstef_models/models/forecasting/lgbm_forecaster.py | 1 + .../openstef_models/models/forecasting/lgbmlinear_forecaster.py | 1 + 2 files changed, 2 insertions(+) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index f46009502..4fc07ac75 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -238,6 +238,7 @@ def __init__(self, config: LGBMForecasterConfig) -> None: "random_state": config.random_state, "early_stopping_rounds": config.early_stopping_rounds, "verbosity": config.verbosity, + "n_jobs": config.n_jobs, **config.hyperparams.model_dump(), } diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py index 57a9d96f8..2dc7a8f87 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -240,6 +240,7 @@ def __init__(self, config: LGBMLinearForecasterConfig) -> None: "random_state": config.random_state, "early_stopping_rounds": config.early_stopping_rounds, "verbosity": config.verbosity, + "n_jobs": config.n_jobs, **config.hyperparams.model_dump(), } From b4bdbdca44880a6945f4a417a30a4074bba694b7 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 20 Nov 2025 10:01:07 +0100 Subject: [PATCH 022/104] updated lgbm linear params --- .../models/forecasting/lgbmlinear_forecaster.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py index 2dc7a8f87..eace689fb 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -52,7 +52,7 @@ class LGBMLinearHyperParams(HyperParams): # Core Tree Boosting Parameters n_estimators: int = Field( - default=77, + default=100, description="Number of boosting rounds/trees to fit. Higher values may improve performance but " "increase training time and risk overfitting.", ) @@ -63,7 +63,7 @@ class LGBMLinearHyperParams(HyperParams): "more boosting rounds.", ) max_depth: int = Field( - default=1, + default=6, description="Maximum depth of trees. Higher values capture more complex patterns but risk " "overfitting. Range: [1,∞]", ) @@ -74,11 +74,11 @@ class LGBMLinearHyperParams(HyperParams): ) min_data_in_leaf: int = Field( - default=5, + default=500, description="Minimum number of data points in a leaf. Higher values prevent overfitting. Range: [1,∞]", ) min_data_in_bin: int = Field( - default=13, + default=500, description="Minimum number of data points in a bin. Higher values prevent overfitting. Range: [1,∞]", ) @@ -94,19 +94,19 @@ class LGBMLinearHyperParams(HyperParams): # Tree Structure Control num_leaves: int = Field( - default=78, + default=30, description="Maximum number of leaves. 0 means no limit. Only relevant when grow_policy='lossguide'.", ) max_bin: int = Field( - default=12, + default=256, description="Maximum number of discrete bins for continuous features. Higher values may improve accuracy but " "increase memory.", ) # Subsampling Parameters colsample_bytree: float = Field( - default=0.5, + default=1, description="Fraction of features used when constructing each tree. Range: (0,1]", ) @@ -158,7 +158,7 @@ class LGBMLinearForecasterConfig(ForecasterConfig): ) early_stopping_rounds: int | None = Field( - default=10, + default=None, description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", ) From ea1f5f7cf479a898f19a8b4afc4885dc37b8e4fc Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 20 Nov 2025 10:49:04 +0100 Subject: [PATCH 023/104] Fixed type and quality issues --- .../openstef_beam/benchmarking/benchmark_pipeline.py | 2 +- .../models/forecasting/hybrid_forecaster.py | 11 +++++++---- .../models/forecasting/lgbm_forecaster.py | 2 +- .../openstef_models/utils/multi_quantile_regressor.py | 8 ++++++++ 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py index 28d823db1..bcd1ef070 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py @@ -187,7 +187,7 @@ def run( process_fn=partial(self._run_for_target, context, forecaster_factory), items=targets, n_processes=n_processes, - mode="fork", # TODO: Change back to 'loky' after before commit + mode="loky", ) if not self.storage.has_analysis_output( diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index 63e0c4a95..3cda34562 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -10,8 +10,8 @@ """ import logging -from typing import override from abc import abstractmethod +from typing import override import pandas as pd from pydantic import Field, field_validator @@ -21,7 +21,7 @@ NotFittedError, ) from openstef_core.mixins import HyperParams -from openstef_core.types import LeadTime, Quantile +from openstef_core.types import Quantile from openstef_models.estimators.hybrid import HybridQuantileRegressor from openstef_models.models.forecasting.forecaster import ( Forecaster, @@ -75,6 +75,8 @@ class FinalForecaster(FinalLearner): def __init__(self, forecaster: Forecaster, feature_adders: None = None) -> None: # Feature adders placeholder for future use + if feature_adders is not None: + raise NotImplementedError("Feature adders are not yet implemented.") # Split forecaster per quantile self.quantiles = forecaster.config.quantiles @@ -168,7 +170,6 @@ class HybridForecaster(Forecaster): def __init__(self, config: HybridForecasterConfig) -> None: """Initialize the Hybrid forecaster.""" - self._config = config self._base_learners: list[BaseLearner] = self._init_base_learners( @@ -264,7 +265,9 @@ def _prepare_input_final_learner( """Prepare input data for the final learner based on base learner predictions. Args: - base_predictions: Dictionary of base learner predictions. + quantiles: List of quantiles to prepare data for. + base_predictions: Predictions from base learners. + target_series: Actual target series for reference. Returns: dictionary mapping quantile strings to DataFrames of base learner predictions. diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index 4fc07ac75..03c667b00 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -22,7 +22,7 @@ ) from openstef_core.mixins import HyperParams from openstef_models.explainability.mixins import ExplainableForecaster -from openstef_models.models.forecasting.forecaster import ForecasterConfig, Forecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.utils.multi_quantile_regressor import MultiQuantileRegressor if TYPE_CHECKING: diff --git a/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py index 8a6276927..763932268 100644 --- a/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py +++ b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py @@ -1,3 +1,11 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Adaptor for multi-quantile regression using a base quantile regressor. + +Designed to work with scikit-learn compatible regressors that support quantile regression. +""" + import logging import numpy as np From 22688e026b41870a463eb00f08772924bdf73046 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 20 Nov 2025 15:11:33 +0100 Subject: [PATCH 024/104] First Version Sample Weighting Approach Signed-off-by: Lars van Someren --- ...liander_2024_benchmark_xgboost_gblinear.py | 3 +- .../models/forecasting/hybrid_forecaster.py | 87 ++++++++++++++++++- 2 files changed, 87 insertions(+), 3 deletions(-) diff --git a/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py b/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py index dc41b1744..4ff925cce 100644 --- a/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py +++ b/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py @@ -45,7 +45,8 @@ BENCHMARK_RESULTS_PATH_XGBOOST = OUTPUT_PATH / "XGBoost" BENCHMARK_RESULTS_PATH_GBLINEAR = OUTPUT_PATH / "GBLinear" -N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark +N_PROCESSES = 1 # Amount of parallel processes to use for the benchmark + # Model configuration FORECAST_HORIZONS = [LeadTime.from_string("P3D")] # Forecast horizon(s) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py index 3cda34562..274be98a7 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -10,11 +10,13 @@ """ import logging +import time from abc import abstractmethod from typing import override import pandas as pd from pydantic import Field, field_validator +from sklearn.ensemble import RandomForestClassifier from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ( @@ -44,6 +46,8 @@ XGBoostHyperParams, ) +from lightgbm import LGBMClassifier + logger = logging.getLogger(__name__) @@ -54,6 +58,14 @@ ) +def calculate_pinball_errors(y_true: pd.Series, y_pred: pd.Series, alpha: float) -> pd.Series: + """Calculate pinball loss for given true and predicted values.""" + + diff = y_true - y_pred + sign = (diff >= 0).astype(float) + return alpha * sign * diff - (1 - alpha) * (1 - sign) * diff + + class FinalLearner: """Combines base learner predictions for each quantile into final predictions.""" @@ -119,6 +131,68 @@ def is_fitted(self) -> bool: return all(x.is_fitted for x in self.models) +class FinalWeighter(FinalLearner): + """Combines base learner predictions with a classification approach to determine which base learner to use.""" + + def __init__(self, quantiles: list[Quantile]) -> None: + self.quantiles = quantiles + self.models = [LGBMClassifier(class_weight="balanced", n_estimators=20) for _ in quantiles] + self._is_fitted = False + + @override + def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + for i, q in enumerate(self.quantiles): + pred = base_learner_predictions[q].data.drop(columns=[base_learner_predictions[q].target_column]) + labels = self._prepare_classification_data( + quantile=q, + target=base_learner_predictions[q].target_series, + predictions=pred, + ) + + self.models[i].fit(X=pred, y=labels) + self._is_fitted = True + + @staticmethod + def _prepare_classification_data(quantile: Quantile, target: pd.Series, predictions: pd.DataFrame) -> pd.Series: + """Selects base learner with lowest error for each sample as target for classification.""" + # Calculate pinball loss for each base learner + pinball_losses = predictions.apply(lambda x: calculate_pinball_errors(y_true=target, y_pred=x, alpha=quantile)) + + # For each sample, select the base learner with the lowest pinball loss + return pinball_losses.idxmin(axis=1) + + def _calculate_sample_weights_quantile(self, base_predictions: pd.DataFrame, quantile: Quantile) -> pd.DataFrame: + model = self.models[self.quantiles.index(quantile)] + + return model.predict_proba(X=base_predictions) + + def _generate_predictions_quantile(self, base_predictions: ForecastInputDataset, quantile: Quantile) -> pd.Series: + df = base_predictions.data.drop(columns=[base_predictions.target_column]) + weights = self._calculate_sample_weights_quantile(base_predictions=df, quantile=quantile) + + return df.mul(weights).sum(axis=1) + + @override + def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + # Generate predictions + predictions = pd.DataFrame({ + Quantile(q).format(): self._generate_predictions_quantile(base_predictions=data, quantile=q) + for q, data in base_learner_predictions.items() + }) + + return ForecastDataset( + data=predictions, + sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, + ) + + @property + def is_fitted(self) -> bool: + return self._is_fitted + + class HybridHyperParams(HyperParams): """Hyperparameters for Stacked LGBM GBLinear Regressor.""" @@ -133,6 +207,11 @@ class HybridHyperParams(HyperParams): description="Hyperparameters for the final learner. Defaults to GBLinearHyperParams.", ) + use_classifier: bool = Field( + default=True, + description="Whether to use sample weights when fitting base and final learners. Defaults to False.", + ) + add_rolling_accuracy_features: bool = Field( default=False, description="Whether to add rolling accuracy features from base learners as additional features " @@ -175,8 +254,12 @@ def __init__(self, config: HybridForecasterConfig) -> None: self._base_learners: list[BaseLearner] = self._init_base_learners( base_hyperparams=config.hyperparams.base_hyperparams ) - final_forecaster = self._init_base_learners(base_hyperparams=[config.hyperparams.final_hyperparams])[0] - self._final_learner = FinalForecaster(forecaster=final_forecaster) + if config.hyperparams.use_classifier: + self._final_learner = FinalWeighter(quantiles=config.quantiles) + + else: + final_forecaster = self._init_base_learners(base_hyperparams=[config.hyperparams.final_hyperparams])[0] + self._final_learner = FinalForecaster(forecaster=final_forecaster) def _init_base_learners(self, base_hyperparams: list[BaseLearnerHyperParams]) -> list[BaseLearner]: """Initialize base learners based on provided hyperparameters. From 9b971d3e5a91398e6b43f8f687cb625181d6a5e5 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Fri, 21 Nov 2025 16:33:31 +0100 Subject: [PATCH 025/104] MetaForecasterClass Signed-off-by: Lars van Someren --- .../models/forecasting/hybrid_forecaster.py | 393 ------------------ .../models/forecasting/meta/__init__.py | 7 + .../meta/learned_weights_forecaster.py | 183 ++++++++ .../forecasting/meta/meta_forecaster.py | 240 +++++++++++ .../forecasting/meta/stacking_forecaster.py | 161 +++++++ .../presets/forecasting_workflow.py | 53 ++- .../tests/unit/estimators/__init__.py | 0 .../tests/unit/estimators/test_hybrid.py | 43 -- .../meta/test_learned_weights_forecaster.py | 105 +++++ .../test_stacking_forecaster.py} | 38 +- 10 files changed, 756 insertions(+), 467 deletions(-) delete mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py create mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/meta/__init__.py create mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/meta/learned_weights_forecaster.py create mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/meta/meta_forecaster.py create mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/meta/stacking_forecaster.py delete mode 100644 packages/openstef-models/tests/unit/estimators/__init__.py delete mode 100644 packages/openstef-models/tests/unit/estimators/test_hybrid.py create mode 100644 packages/openstef-models/tests/unit/models/forecasting/meta/test_learned_weights_forecaster.py rename packages/openstef-models/tests/unit/models/forecasting/{test_hybrid_forecaster.py => meta/test_stacking_forecaster.py} (77%) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py deleted file mode 100644 index 274be98a7..000000000 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ /dev/null @@ -1,393 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""Hybrid Forecaster (Stacked LightGBM + Linear Model Gradient Boosting). - -Provides method that attempts to combine the advantages of a linear model (Extraplolation) -and tree-based model (Non-linear patterns). This is acieved by training two base learners, -followed by a small linear model that regresses on the baselearners' predictions. -The implementation is based on sklearn's StackingRegressor. -""" - -import logging -import time -from abc import abstractmethod -from typing import override - -import pandas as pd -from pydantic import Field, field_validator -from sklearn.ensemble import RandomForestClassifier - -from openstef_core.datasets import ForecastDataset, ForecastInputDataset -from openstef_core.exceptions import ( - NotFittedError, -) -from openstef_core.mixins import HyperParams -from openstef_core.types import Quantile -from openstef_models.estimators.hybrid import HybridQuantileRegressor -from openstef_models.models.forecasting.forecaster import ( - Forecaster, - ForecasterConfig, -) -from openstef_models.models.forecasting.gblinear_forecaster import ( - GBLinearForecaster, - GBLinearForecasterConfig, - GBLinearHyperParams, -) -from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMForecasterConfig, LGBMHyperParams -from openstef_models.models.forecasting.lgbmlinear_forecaster import ( - LGBMLinearForecaster, - LGBMLinearForecasterConfig, - LGBMLinearHyperParams, -) -from openstef_models.models.forecasting.xgboost_forecaster import ( - XGBoostForecaster, - XGBoostForecasterConfig, - XGBoostHyperParams, -) - -from lightgbm import LGBMClassifier - -logger = logging.getLogger(__name__) - - -BaseLearner = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster -BaseLearnerHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams -BaseLearnerConfig = ( - LGBMForecasterConfig | LGBMLinearForecasterConfig | XGBoostForecasterConfig | GBLinearForecasterConfig -) - - -def calculate_pinball_errors(y_true: pd.Series, y_pred: pd.Series, alpha: float) -> pd.Series: - """Calculate pinball loss for given true and predicted values.""" - - diff = y_true - y_pred - sign = (diff >= 0).astype(float) - return alpha * sign * diff - (1 - alpha) * (1 - sign) * diff - - -class FinalLearner: - """Combines base learner predictions for each quantile into final predictions.""" - - @abstractmethod - def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: - raise NotImplementedError("Subclasses must implement the fit method.") - - def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: - raise NotImplementedError("Subclasses must implement the predict method.") - - @property - @abstractmethod - def is_fitted(self) -> bool: - raise NotImplementedError("Subclasses must implement the is_fitted property.") - - -class FinalForecaster(FinalLearner): - """Combines base learner predictions for each quantile into final predictions.""" - - def __init__(self, forecaster: Forecaster, feature_adders: None = None) -> None: - # Feature adders placeholder for future use - if feature_adders is not None: - raise NotImplementedError("Feature adders are not yet implemented.") - - # Split forecaster per quantile - self.quantiles = forecaster.config.quantiles - models: list[Forecaster] = [] - for q in self.quantiles: - config = forecaster.config.model_copy( - update={ - "quantiles": [q], - } - ) - model = forecaster.__class__(config=config) - models.append(model) - self.models = models - - @override - def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: - for i, q in enumerate(self.quantiles): - self.models[i].fit(data=base_learner_predictions[q], data_val=None) - - @override - def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: - if not self.is_fitted: - raise NotFittedError(self.__class__.__name__) - - # Generate predictions - predictions = [ - self.models[i].predict(data=base_learner_predictions[q]).data for i, q in enumerate(self.quantiles) - ] - - # Concatenate predictions along columns to form a DataFrame with quantile columns - df = pd.concat(predictions, axis=1) - - return ForecastDataset( - data=df, - sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, - ) - - @property - def is_fitted(self) -> bool: - return all(x.is_fitted for x in self.models) - - -class FinalWeighter(FinalLearner): - """Combines base learner predictions with a classification approach to determine which base learner to use.""" - - def __init__(self, quantiles: list[Quantile]) -> None: - self.quantiles = quantiles - self.models = [LGBMClassifier(class_weight="balanced", n_estimators=20) for _ in quantiles] - self._is_fitted = False - - @override - def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: - for i, q in enumerate(self.quantiles): - pred = base_learner_predictions[q].data.drop(columns=[base_learner_predictions[q].target_column]) - labels = self._prepare_classification_data( - quantile=q, - target=base_learner_predictions[q].target_series, - predictions=pred, - ) - - self.models[i].fit(X=pred, y=labels) - self._is_fitted = True - - @staticmethod - def _prepare_classification_data(quantile: Quantile, target: pd.Series, predictions: pd.DataFrame) -> pd.Series: - """Selects base learner with lowest error for each sample as target for classification.""" - # Calculate pinball loss for each base learner - pinball_losses = predictions.apply(lambda x: calculate_pinball_errors(y_true=target, y_pred=x, alpha=quantile)) - - # For each sample, select the base learner with the lowest pinball loss - return pinball_losses.idxmin(axis=1) - - def _calculate_sample_weights_quantile(self, base_predictions: pd.DataFrame, quantile: Quantile) -> pd.DataFrame: - model = self.models[self.quantiles.index(quantile)] - - return model.predict_proba(X=base_predictions) - - def _generate_predictions_quantile(self, base_predictions: ForecastInputDataset, quantile: Quantile) -> pd.Series: - df = base_predictions.data.drop(columns=[base_predictions.target_column]) - weights = self._calculate_sample_weights_quantile(base_predictions=df, quantile=quantile) - - return df.mul(weights).sum(axis=1) - - @override - def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: - if not self.is_fitted: - raise NotFittedError(self.__class__.__name__) - - # Generate predictions - predictions = pd.DataFrame({ - Quantile(q).format(): self._generate_predictions_quantile(base_predictions=data, quantile=q) - for q, data in base_learner_predictions.items() - }) - - return ForecastDataset( - data=predictions, - sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, - ) - - @property - def is_fitted(self) -> bool: - return self._is_fitted - - -class HybridHyperParams(HyperParams): - """Hyperparameters for Stacked LGBM GBLinear Regressor.""" - - base_hyperparams: list[BaseLearnerHyperParams] = Field( - default=[LGBMHyperParams(), GBLinearHyperParams()], - description="List of hyperparameter configurations for base learners. " - "Defaults to [LGBMHyperParams, GBLinearHyperParams].", - ) - - final_hyperparams: BaseLearnerHyperParams = Field( - default=GBLinearHyperParams(), - description="Hyperparameters for the final learner. Defaults to GBLinearHyperParams.", - ) - - use_classifier: bool = Field( - default=True, - description="Whether to use sample weights when fitting base and final learners. Defaults to False.", - ) - - add_rolling_accuracy_features: bool = Field( - default=False, - description="Whether to add rolling accuracy features from base learners as additional features " - "to the final learner. Defaults to False.", - ) - - @field_validator("base_hyperparams", mode="after") - @classmethod - def _check_classes(cls, v: list[BaseLearnerHyperParams]) -> list[BaseLearnerHyperParams]: - hp_classes = [type(hp) for hp in v] - if not len(hp_classes) == len(set(hp_classes)): - raise ValueError("Duplicate base learner hyperparameter classes are not allowed.") - return v - - -class HybridForecasterConfig(ForecasterConfig): - """Configuration for Hybrid-based forecasting models.""" - - hyperparams: HybridHyperParams = HybridHyperParams() - - verbosity: bool = Field( - default=True, - description="Enable verbose output from the Hybrid model (True/False).", - ) - - -class HybridForecaster(Forecaster): - """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" - - Config = HybridForecasterConfig - HyperParams = HybridHyperParams - - _config: HybridForecasterConfig - model: HybridQuantileRegressor - - def __init__(self, config: HybridForecasterConfig) -> None: - """Initialize the Hybrid forecaster.""" - self._config = config - - self._base_learners: list[BaseLearner] = self._init_base_learners( - base_hyperparams=config.hyperparams.base_hyperparams - ) - if config.hyperparams.use_classifier: - self._final_learner = FinalWeighter(quantiles=config.quantiles) - - else: - final_forecaster = self._init_base_learners(base_hyperparams=[config.hyperparams.final_hyperparams])[0] - self._final_learner = FinalForecaster(forecaster=final_forecaster) - - def _init_base_learners(self, base_hyperparams: list[BaseLearnerHyperParams]) -> list[BaseLearner]: - """Initialize base learners based on provided hyperparameters. - - Returns: - list[Forecaster]: List of initialized base learner forecasters. - """ - base_learners: list[BaseLearner] = [] - horizons = self.config.horizons - quantiles = self.config.quantiles - - for hyperparams in base_hyperparams: - forecaster_cls = hyperparams.forecaster_class() - config = forecaster_cls.Config(horizons=horizons, quantiles=quantiles) - if "hyperparams" in forecaster_cls.Config.model_fields: - config = config.model_copy(update={"hyperparams": hyperparams}) - - base_learners.append(config.forecaster_from_config()) - - return base_learners - - @property - @override - def is_fitted(self) -> bool: - return all(x.is_fitted for x in self._base_learners) - - @property - @override - def config(self) -> ForecasterConfig: - return self._config - - @override - def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: - """Fit the Hybrid model to the training data. - - Args: - data: Training data in the expected ForecastInputDataset format. - data_val: Validation data for tuning the model (optional, not used in this implementation). - - """ - # Fit base learners - [x.fit(data=data, data_val=data_val) for x in self._base_learners] - - # Reset forecast start date to ensure we predict on the full dataset - full_dataset = ForecastInputDataset( - data=data.data, - sample_interval=data.sample_interval, - target_column=data.target_column, - forecast_start=data.index[0], - ) - - base_predictions = self._predict_base_learners(data=full_dataset) - - quantile_datasets = self._prepare_input_final_learner( - base_predictions=base_predictions, quantiles=self._config.quantiles, target_series=data.target_series - ) - - self._final_learner.fit( - base_learner_predictions=quantile_datasets, - ) - - self._is_fitted = True - - def _predict_base_learners(self, data: ForecastInputDataset) -> dict[type[BaseLearner], ForecastDataset]: - """Generate predictions from base learners. - - Args: - data: Input data for prediction. - - Returns: - DataFrame containing base learner predictions. - """ - base_predictions: dict[type[BaseLearner], ForecastDataset] = {} - for learner in self._base_learners: - preds = learner.predict(data=data) - base_predictions[learner.__class__] = preds - - return base_predictions - - @staticmethod - def _prepare_input_final_learner( - quantiles: list[Quantile], - base_predictions: dict[type[BaseLearner], ForecastDataset], - target_series: pd.Series, - ) -> dict[Quantile, ForecastInputDataset]: - """Prepare input data for the final learner based on base learner predictions. - - Args: - quantiles: List of quantiles to prepare data for. - base_predictions: Predictions from base learners. - target_series: Actual target series for reference. - - Returns: - dictionary mapping quantile strings to DataFrames of base learner predictions. - """ - predictions_quantiles: dict[Quantile, ForecastInputDataset] = {} - sample_interval = base_predictions[next(iter(base_predictions))].sample_interval - target_name = str(target_series.name) - - for q in quantiles: - df = pd.DataFrame({ - learner.__name__: preds.data[Quantile(q).format()] for learner, preds in base_predictions.items() - }) - df[target_name] = target_series - - predictions_quantiles[q] = ForecastInputDataset( - data=df, - sample_interval=sample_interval, - target_column=target_name, - forecast_start=df.index[0], - ) - - return predictions_quantiles - - @override - def predict(self, data: ForecastInputDataset) -> ForecastDataset: - if not self.is_fitted: - raise NotFittedError(self.__class__.__name__) - - base_predictions = self._predict_base_learners(data=data) - - final_learner_input = self._prepare_input_final_learner( - quantiles=self._config.quantiles, base_predictions=base_predictions, target_series=data.target_series - ) - - return self._final_learner.predict(base_learner_predictions=final_learner_input) - - # TODO(@Lars800): #745: Make forecaster Explainable - - -__all__ = ["HybridForecaster", "HybridForecasterConfig", "HybridHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/meta/__init__.py b/packages/openstef-models/src/openstef_models/models/forecasting/meta/__init__.py new file mode 100644 index 000000000..9ef8b6fdf --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/forecasting/meta/__init__.py @@ -0,0 +1,7 @@ +from .meta_forecaster import FinalLearner, MetaForecaster, MetaHyperParams + +__all__ = [ + "FinalLearner", + "MetaForecaster", + "MetaHyperParams", +] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/meta/learned_weights_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/meta/learned_weights_forecaster.py new file mode 100644 index 000000000..62d00a488 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/forecasting/meta/learned_weights_forecaster.py @@ -0,0 +1,183 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Hybrid Forecaster (Stacked LightGBM + Linear Model Gradient Boosting). + +Provides method that attempts to combine the advantages of a linear model (Extraplolation) +and tree-based model (Non-linear patterns). This is acieved by training two base learners, +followed by a small linear model that regresses on the baselearners' predictions. +The implementation is based on sklearn's StackingRegressor. +""" + +import logging +from typing import override + +import pandas as pd +from lightgbm import LGBMClassifier +from pydantic import Field + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import ( + NotFittedError, +) +from openstef_core.types import Quantile +from openstef_models.models.forecasting.forecaster import ( + ForecasterConfig, +) +from openstef_models.models.forecasting.gblinear_forecaster import ( + GBLinearHyperParams, +) +from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +from openstef_models.models.forecasting.meta.meta_forecaster import ( + BaseLearner, + BaseLearnerHyperParams, + FinalLearner, + MetaForecaster, + MetaHyperParams, +) + +logger = logging.getLogger(__name__) + + +def calculate_pinball_errors(y_true: pd.Series, y_pred: pd.Series, alpha: float) -> pd.Series: + """Calculate pinball loss for given true and predicted values. + + Args: + y_true: True values as a pandas Series. + y_pred: Predicted values as a pandas Series. + alpha: Quantile value. + + Returns: + A pandas Series containing the pinball loss for each sample. + """ + diff = y_true - y_pred + sign = (diff >= 0).astype(float) + return alpha * sign * diff - (1 - alpha) * (1 - sign) * diff + + +class LearnedWeightsFinalLearner(FinalLearner): + """Combines base learner predictions with a classification approach to determine which base learner to use.""" + + def __init__(self, quantiles: list[Quantile]) -> None: + self.quantiles = quantiles + self.models = [LGBMClassifier(class_weight="balanced", n_estimators=20) for _ in quantiles] + self._is_fitted = False + + @override + def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + for i, q in enumerate(self.quantiles): + pred = base_learner_predictions[q].data.drop(columns=[base_learner_predictions[q].target_column]) + labels = self._prepare_classification_data( + quantile=q, + target=base_learner_predictions[q].target_series, + predictions=pred, + ) + + self.models[i].fit(X=pred, y=labels) # type: ignore + self._is_fitted = True + + @staticmethod + def _prepare_classification_data(quantile: Quantile, target: pd.Series, predictions: pd.DataFrame) -> pd.Series: + """Selects base learner with lowest error for each sample as target for classification. + + Returns: + pd.Series: Series indicating the base learner with the lowest pinball loss for each sample. + """ + + # Calculate pinball loss for each base learner + def column_pinball_losses(preds: pd.Series) -> pd.Series: + return calculate_pinball_errors(y_true=target, y_pred=preds, alpha=quantile) + + pinball_losses = predictions.apply(column_pinball_losses) + + # For each sample, select the base learner with the lowest pinball loss + return pinball_losses.idxmin(axis=1) + + def _calculate_sample_weights_quantile(self, base_predictions: pd.DataFrame, quantile: Quantile) -> pd.DataFrame: + model = self.models[self.quantiles.index(quantile)] + + return model.predict_proba(X=base_predictions) # type: ignore + + def _generate_predictions_quantile(self, base_predictions: ForecastInputDataset, quantile: Quantile) -> pd.Series: + df = base_predictions.data.drop(columns=[base_predictions.target_column]) + weights = self._calculate_sample_weights_quantile(base_predictions=df, quantile=quantile) + + return df.mul(weights).sum(axis=1) + + @override + def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + # Generate predictions + predictions = pd.DataFrame({ + Quantile(q).format(): self._generate_predictions_quantile(base_predictions=data, quantile=q) + for q, data in base_learner_predictions.items() + }) + + return ForecastDataset( + data=predictions, + sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, + ) + + @property + def is_fitted(self) -> bool: + return self._is_fitted + + +class LearnedWeightsHyperParams(MetaHyperParams): + """Hyperparameters for Stacked LGBM GBLinear Regressor.""" + + base_hyperparams: list[BaseLearnerHyperParams] = Field( + default=[LGBMHyperParams(), GBLinearHyperParams()], + description="List of hyperparameter configurations for base learners. " + "Defaults to [LGBMHyperParams, GBLinearHyperParams].", + ) + + final_hyperparams: BaseLearnerHyperParams = Field( + default=GBLinearHyperParams(), + description="Hyperparameters for the final learner. Defaults to GBLinearHyperParams.", + ) + + use_classifier: bool = Field( + default=True, + description="Whether to use sample weights when fitting base and final learners. Defaults to False.", + ) + + add_rolling_accuracy_features: bool = Field( + default=False, + description="Whether to add rolling accuracy features from base learners as additional features " + "to the final learner. Defaults to False.", + ) + + +class LearnedWeightsForecasterConfig(ForecasterConfig): + """Configuration for Hybrid-based forecasting models.""" + + hyperparams: LearnedWeightsHyperParams = LearnedWeightsHyperParams() + + verbosity: bool = Field( + default=True, + description="Enable verbose output from the Hybrid model (True/False).", + ) + + +class LearnedWeightsForecaster(MetaForecaster): + """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" + + Config = LearnedWeightsForecasterConfig + HyperParams = LearnedWeightsHyperParams + + def __init__(self, config: LearnedWeightsForecasterConfig) -> None: + """Initialize the LearnedWeightsForecaster.""" + self._config = config + + self._base_learners: list[BaseLearner] = self._init_base_learners( + base_hyperparams=config.hyperparams.base_hyperparams + ) + self._final_learner = LearnedWeightsFinalLearner(quantiles=config.quantiles) + + # TODO(@Lars800): #745: Make forecaster Explainable + + +__all__ = ["LearnedWeightsForecaster", "LearnedWeightsForecasterConfig", "LearnedWeightsHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/meta/meta_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/meta/meta_forecaster.py new file mode 100644 index 000000000..07b58501f --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/forecasting/meta/meta_forecaster.py @@ -0,0 +1,240 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Core meta model interfaces and configurations. + +Provides the fundamental building blocks for implementing meta models in OpenSTEF. +These mixins establish contracts that ensure consistent behavior across different meta model types +while ensuring full compatability with regular Forecasters. +""" + +import logging +from abc import abstractmethod +from typing import override + +import pandas as pd +from pydantic import field_validator + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import ( + NotFittedError, +) +from openstef_core.mixins import HyperParams +from openstef_core.types import Quantile +from openstef_models.models.forecasting.forecaster import ( + Forecaster, + ForecasterConfig, +) +from openstef_models.models.forecasting.gblinear_forecaster import ( + GBLinearForecaster, + GBLinearForecasterConfig, + GBLinearHyperParams, +) +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMForecasterConfig, LGBMHyperParams +from openstef_models.models.forecasting.lgbmlinear_forecaster import ( + LGBMLinearForecaster, + LGBMLinearForecasterConfig, + LGBMLinearHyperParams, +) +from openstef_models.models.forecasting.xgboost_forecaster import ( + XGBoostForecaster, + XGBoostForecasterConfig, + XGBoostHyperParams, +) + +logger = logging.getLogger(__name__) + + +BaseLearner = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster +BaseLearnerHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams +BaseLearnerConfig = ( + LGBMForecasterConfig | LGBMLinearForecasterConfig | XGBoostForecasterConfig | GBLinearForecasterConfig +) + + +class FinalLearner: + """Combines base learner predictions for each quantile into final predictions.""" + + @abstractmethod + def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + """Fit the final learner using base learner predictions. + + Args: + base_learner_predictions: Dictionary mapping Quantiles to ForecastInputDatasets containing base learner + """ + raise NotImplementedError("Subclasses must implement the fit method.") + + def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + """Generate final predictions based on base learner predictions. + + Args: + base_learner_predictions: Dictionary mapping Quantiles to ForecastInputDatasets containing base learner + predictions. + + Returns: + ForecastDataset containing the final predictions. + """ + raise NotImplementedError("Subclasses must implement the predict method.") + + @property + @abstractmethod + def is_fitted(self) -> bool: + """Indicates whether the final learner has been fitted.""" + raise NotImplementedError("Subclasses must implement the is_fitted property.") + + +class MetaHyperParams(HyperParams): + """Hyperparameters for Stacked LGBM GBLinear Regressor.""" + + base_hyperparams: list[BaseLearnerHyperParams] + + @field_validator("base_hyperparams", mode="after") + @classmethod + def _check_classes(cls, v: list[BaseLearnerHyperParams]) -> list[BaseLearnerHyperParams]: + hp_classes = [type(hp) for hp in v] + if not len(hp_classes) == len(set(hp_classes)): + raise ValueError("Duplicate base learner hyperparameter classes are not allowed.") + return v + + +class MetaForecaster(Forecaster): + """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" + + _config: ForecasterConfig + _base_learners: list[BaseLearner] + _final_learner: FinalLearner + + def _init_base_learners(self, base_hyperparams: list[BaseLearnerHyperParams]) -> list[BaseLearner]: + """Initialize base learners based on provided hyperparameters. + + Returns: + list[Forecaster]: List of initialized base learner forecasters. + """ + base_learners: list[BaseLearner] = [] + horizons = self.config.horizons + quantiles = self.config.quantiles + + for hyperparams in base_hyperparams: + forecaster_cls = hyperparams.forecaster_class() + config = forecaster_cls.Config(horizons=horizons, quantiles=quantiles) + if "hyperparams" in forecaster_cls.Config.model_fields: + config = config.model_copy(update={"hyperparams": hyperparams}) + + base_learners.append(config.forecaster_from_config()) + + return base_learners + + @property + @override + def is_fitted(self) -> bool: + return all(x.is_fitted for x in self._base_learners) and self._final_learner.is_fitted + + @property + @override + def config(self) -> ForecasterConfig: + return self._config + + @override + def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: + """Fit the Hybrid model to the training data. + + Args: + data: Training data in the expected ForecastInputDataset format. + data_val: Validation data for tuning the model (optional, not used in this implementation). + + """ + # Fit base learners + [x.fit(data=data, data_val=data_val) for x in self._base_learners] + + # Reset forecast start date to ensure we predict on the full dataset + full_dataset = ForecastInputDataset( + data=data.data, + sample_interval=data.sample_interval, + target_column=data.target_column, + forecast_start=data.index[0], + ) + + base_predictions = self._predict_base_learners(data=full_dataset) + + quantile_datasets = self._prepare_input_final_learner( + base_predictions=base_predictions, quantiles=self._config.quantiles, target_series=data.target_series + ) + + self._final_learner.fit( + base_learner_predictions=quantile_datasets, + ) + + self._is_fitted = True + + def _predict_base_learners(self, data: ForecastInputDataset) -> dict[type[BaseLearner], ForecastDataset]: + """Generate predictions from base learners. + + Args: + data: Input data for prediction. + + Returns: + DataFrame containing base learner predictions. + """ + base_predictions: dict[type[BaseLearner], ForecastDataset] = {} + for learner in self._base_learners: + preds = learner.predict(data=data) + base_predictions[learner.__class__] = preds + + return base_predictions + + @staticmethod + def _prepare_input_final_learner( + quantiles: list[Quantile], + base_predictions: dict[type[BaseLearner], ForecastDataset], + target_series: pd.Series, + ) -> dict[Quantile, ForecastInputDataset]: + """Prepare input data for the final learner based on base learner predictions. + + Args: + quantiles: List of quantiles to prepare data for. + base_predictions: Predictions from base learners. + target_series: Actual target series for reference. + + Returns: + dictionary mapping Quantiles to ForecastInputDatasets. + """ + predictions_quantiles: dict[Quantile, ForecastInputDataset] = {} + sample_interval = base_predictions[next(iter(base_predictions))].sample_interval + target_name = str(target_series.name) + + for q in quantiles: + df = pd.DataFrame({ + learner.__name__: preds.data[Quantile(q).format()] for learner, preds in base_predictions.items() + }) + df[target_name] = target_series + + predictions_quantiles[q] = ForecastInputDataset( + data=df, + sample_interval=sample_interval, + target_column=target_name, + forecast_start=df.index[0], + ) + + return predictions_quantiles + + @override + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + base_predictions = self._predict_base_learners(data=data) + + final_learner_input = self._prepare_input_final_learner( + quantiles=self._config.quantiles, base_predictions=base_predictions, target_series=data.target_series + ) + + return self._final_learner.predict(base_learner_predictions=final_learner_input) + + +__all__ = [ + "BaseLearner", + "BaseLearnerConfig", + "BaseLearnerHyperParams", + "FinalLearner", + "MetaForecaster", +] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/meta/stacking_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/meta/stacking_forecaster.py new file mode 100644 index 000000000..73debe3c7 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/forecasting/meta/stacking_forecaster.py @@ -0,0 +1,161 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Hybrid Forecaster (Stacked LightGBM + Linear Model Gradient Boosting). + +Provides method that attempts to combine the advantages of a linear model (Extraplolation) +and tree-based model (Non-linear patterns). This is acieved by training two base learners, +followed by a small linear model that regresses on the baselearners' predictions. +The implementation is based on sklearn's StackingRegressor. +""" + +import logging +from typing import override + +import pandas as pd +from pydantic import Field, field_validator + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import ( + NotFittedError, +) +from openstef_core.mixins import HyperParams +from openstef_core.types import Quantile +from openstef_models.models.forecasting.forecaster import ( + Forecaster, + ForecasterConfig, +) +from openstef_models.models.forecasting.gblinear_forecaster import ( + GBLinearHyperParams, +) +from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +from openstef_models.models.forecasting.meta.meta_forecaster import ( + BaseLearner, + BaseLearnerHyperParams, + FinalLearner, + MetaForecaster, +) + +logger = logging.getLogger(__name__) + + +class StackingFinalLearner(FinalLearner): + """Combines base learner predictions per quantile into final predictions using a regression approach.""" + + def __init__(self, forecaster: Forecaster, feature_adders: None = None) -> None: + """Initialize the Stacking final learner. + + Args: + forecaster: The forecaster model to be used as the final learner. + feature_adders: Placeholder for future feature adders (not yet implemented). + """ + # Feature adders placeholder for future use + if feature_adders is not None: + raise NotImplementedError("Feature adders are not yet implemented.") + + # Split forecaster per quantile + self.quantiles = forecaster.config.quantiles + models: list[Forecaster] = [] + for q in self.quantiles: + config = forecaster.config.model_copy( + update={ + "quantiles": [q], + } + ) + model = forecaster.__class__(config=config) + models.append(model) + self.models = models + + @override + def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + for i, q in enumerate(self.quantiles): + self.models[i].fit(data=base_learner_predictions[q], data_val=None) + + @override + def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + # Generate predictions + predictions = [ + self.models[i].predict(data=base_learner_predictions[q]).data for i, q in enumerate(self.quantiles) + ] + + # Concatenate predictions along columns to form a DataFrame with quantile columns + df = pd.concat(predictions, axis=1) + + return ForecastDataset( + data=df, + sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, + ) + + @property + def is_fitted(self) -> bool: + """Check the StackingFinalLearner is fitted.""" + return all(x.is_fitted for x in self.models) + + +class StackingHyperParams(HyperParams): + """Hyperparameters for Stacked LGBM GBLinear Regressor.""" + + base_hyperparams: list[BaseLearnerHyperParams] = Field( + default=[LGBMHyperParams(), GBLinearHyperParams()], + description="List of hyperparameter configurations for base learners. " + "Defaults to [LGBMHyperParams, GBLinearHyperParams].", + ) + + final_hyperparams: BaseLearnerHyperParams = Field( + default=GBLinearHyperParams(), + description="Hyperparameters for the final learner. Defaults to GBLinearHyperParams.", + ) + + use_classifier: bool = Field( + default=True, + description="Whether to use sample weights when fitting base and final learners. Defaults to False.", + ) + + add_rolling_accuracy_features: bool = Field( + default=False, + description="Whether to add rolling accuracy features from base learners as additional features " + "to the final learner. Defaults to False.", + ) + + @field_validator("base_hyperparams", mode="after") + @classmethod + def _check_classes(cls, v: list[BaseLearnerHyperParams]) -> list[BaseLearnerHyperParams]: + hp_classes = [type(hp) for hp in v] + if not len(hp_classes) == len(set(hp_classes)): + raise ValueError("Duplicate base learner hyperparameter classes are not allowed.") + return v + + +class StackingForecasterConfig(ForecasterConfig): + """Configuration for Hybrid-based forecasting models.""" + + hyperparams: StackingHyperParams = StackingHyperParams() + + verbosity: bool = Field( + default=True, + description="Enable verbose output from the Hybrid model (True/False).", + ) + + +class StackingForecaster(MetaForecaster): + """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" + + Config = StackingForecasterConfig + HyperParams = StackingHyperParams + + def __init__(self, config: StackingForecasterConfig) -> None: + """Initialize the Hybrid forecaster.""" + self._config = config + + self._base_learners: list[BaseLearner] = self._init_base_learners( + base_hyperparams=config.hyperparams.base_hyperparams + ) + + final_forecaster = self._init_base_learners(base_hyperparams=[config.hyperparams.final_hyperparams])[0] + self._final_learner = StackingFinalLearner(forecaster=final_forecaster) + + +__all__ = ["StackingFinalLearner", "StackingForecaster", "StackingForecasterConfig", "StackingHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 1a33b4622..dd124b414 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -30,9 +30,10 @@ from openstef_models.models import ForecastingModel from openstef_models.models.forecasting.flatliner_forecaster import FlatlinerForecaster from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster -from openstef_models.models.forecasting.hybrid_forecaster import HybridForecaster from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster +from openstef_models.models.forecasting.meta.learned_weights_forecaster import LearnedWeightsForecaster +from openstef_models.models.forecasting.meta.stacking_forecaster import StackingForecaster from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster from openstef_models.transforms.energy_domain import WindPowerFeatureAdder from openstef_models.transforms.general import Clipper, EmptyFeatureRemover, Imputer, NaNDropper, SampleWeighter, Scaler @@ -100,7 +101,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob model_id: ModelIdentifier = Field(description="Unique identifier for the forecasting model.") # Model configuration - model: Literal["xgboost", "gblinear", "flatliner", "hybrid", "lgbm", "lgbmlinear"] = Field( + model: Literal["xgboost", "gblinear", "flatliner", "stacking", "learned_weights", "lgbm", "lgbmlinear"] = Field( description="Type of forecasting model to use." ) # TODO(#652): Implement median forecaster quantiles: list[Quantile] = Field( @@ -136,9 +137,14 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob description="Hyperparameters for LightGBM forecaster.", ) - hybrid_hyperparams: HybridForecaster.HyperParams = Field( - default=HybridForecaster.HyperParams(), - description="Hyperparameters for Hybrid forecaster.", + stacking_hyperparams: StackingForecaster.HyperParams = Field( + default=StackingForecaster.HyperParams(), + description="Hyperparameters for Stacking forecaster.", + ) + + learned_weights_hyperparams: LearnedWeightsForecaster.HyperParams = Field( + default=LearnedWeightsForecaster.HyperParams(), + description="Hyperparameters for Learned Weights forecaster.", ) location: LocationConfig = Field( @@ -202,7 +208,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob ) sample_weight_exponent: float = Field( default_factory=lambda data: 1.0 - if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "hybrid", "xgboost"} + if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "learned_weights", "stacking", "xgboost"} else 0.0, description="Exponent applied to scale the sample weights. " "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " @@ -301,9 +307,10 @@ def create_forecasting_workflow( LagsAdder( history_available=config.predict_history, horizons=config.horizons, - add_trivial_lags=config.model not in {"gblinear", "hybrid"}, # GBLinear uses only 7day lag. + add_trivial_lags=config.model + not in {"gblinear", "stacking", "learned_weights"}, # GBLinear uses only 7day lag. target_column=config.target_column, - custom_lags=[timedelta(days=7)] if config.model in {"gblinear", "hybrid"} else [], + custom_lags=[timedelta(days=7)] if config.model in {"gblinear", "learned_weights"} else [], ), WindPowerFeatureAdder( windspeed_reference_column=config.wind_speed_column, @@ -425,7 +432,29 @@ def create_forecasting_workflow( postprocessing = [ ConfidenceIntervalApplicator(quantiles=config.quantiles), ] - elif config.model == "hybrid": + elif config.model == "learned_weights": + preprocessing = [ + *checks, + *feature_adders, + *feature_standardizers, + Imputer( + selection=Exclude(config.target_column), + imputation_strategy="mean", + fill_future_values=Include(config.energy_price_column), + ), + NaNDropper( + selection=Exclude(config.target_column), + ), + ] + forecaster = LearnedWeightsForecaster( + config=LearnedWeightsForecaster.Config( + quantiles=config.quantiles, + horizons=config.horizons, + hyperparams=config.learned_weights_hyperparams, + ) + ) + postprocessing = [QuantileSorter()] + elif config.model == "stacking": preprocessing = [ *checks, *feature_adders, @@ -439,11 +468,11 @@ def create_forecasting_workflow( selection=Exclude(config.target_column), ), ] - forecaster = HybridForecaster( - config=HybridForecaster.Config( + forecaster = StackingForecaster( + config=StackingForecaster.Config( quantiles=config.quantiles, horizons=config.horizons, - hyperparams=config.hybrid_hyperparams, + hyperparams=config.stacking_hyperparams, ) ) postprocessing = [QuantileSorter()] diff --git a/packages/openstef-models/tests/unit/estimators/__init__.py b/packages/openstef-models/tests/unit/estimators/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/packages/openstef-models/tests/unit/estimators/test_hybrid.py b/packages/openstef-models/tests/unit/estimators/test_hybrid.py deleted file mode 100644 index 4c8a1ac97..000000000 --- a/packages/openstef-models/tests/unit/estimators/test_hybrid.py +++ /dev/null @@ -1,43 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -import pandas as pd -import pytest -from numpy.random import default_rng - -from openstef_models.estimators.hybrid import HybridQuantileRegressor - - -@pytest.fixture -def dataset() -> tuple[pd.DataFrame, pd.Series]: - n_samples = 100 - n_features = 5 - rng = default_rng() - X = pd.DataFrame(rng.random((n_samples, n_features))) - y = pd.Series(rng.random(n_samples)) - return X, y - - -def test_init_sets_quantiles_and_models(): - quantiles = [0.1, 0.5, 0.9] - model = HybridQuantileRegressor(quantiles=quantiles) - assert model.quantiles == quantiles - assert len(model._models) == len(quantiles) - - -def test_fit_and_predict_shape(dataset: tuple[pd.DataFrame, pd.Series]): - quantiles = [0.1, 0.5, 0.9] - X, y = dataset[0], dataset[1] - model = HybridQuantileRegressor(quantiles=quantiles) - model.fit(X, y) - preds = model.predict(X) - assert preds.shape == (X.shape[0], len(quantiles)) - - -def test_is_fitted(dataset: tuple[pd.DataFrame, pd.Series]): - quantiles = [0.1, 0.5, 0.9] - X, y = dataset[0], dataset[1] - model = HybridQuantileRegressor(quantiles=quantiles) - model.fit(X, y) - assert model.is_fitted diff --git a/packages/openstef-models/tests/unit/models/forecasting/meta/test_learned_weights_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/meta/test_learned_weights_forecaster.py new file mode 100644 index 000000000..f227d1977 --- /dev/null +++ b/packages/openstef-models/tests/unit/models/forecasting/meta/test_learned_weights_forecaster.py @@ -0,0 +1,105 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import pytest + +from openstef_core.datasets import ForecastInputDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_models.models.forecasting.meta.learned_weights_forecaster import ( + LearnedWeightsForecaster, + LearnedWeightsForecasterConfig, + LearnedWeightsHyperParams, +) + + +@pytest.fixture +def base_config() -> LearnedWeightsForecasterConfig: + """Base configuration for LearnedWeights forecaster tests.""" + + params = LearnedWeightsHyperParams() + return LearnedWeightsForecasterConfig( + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime(timedelta(days=1))], + hyperparams=params, + verbosity=False, + ) + + +def test_learned_weights_forecaster_fit_predict( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LearnedWeightsForecasterConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = LearnedWeightsForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + +def test_learned_weights_forecaster_predict_not_fitted_raises_error( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LearnedWeightsForecasterConfig, +): + """Test that predict() raises NotFittedError when called before fit().""" + # Arrange + forecaster = LearnedWeightsForecaster(config=base_config) + + # Act & Assert + with pytest.raises(NotFittedError, match="LearnedWeightsForecaster"): + forecaster.predict(sample_forecast_input_dataset) + + +def test_learned_weights_forecaster_with_sample_weights( + sample_dataset_with_weights: ForecastInputDataset, + base_config: LearnedWeightsForecasterConfig, +): + """Test that forecaster works with sample weights and produces different results.""" + # Arrange + forecaster_with_weights = LearnedWeightsForecaster(config=base_config) + + # Create dataset without weights for comparison + data_without_weights = ForecastInputDataset( + data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), + sample_interval=sample_dataset_with_weights.sample_interval, + target_column=sample_dataset_with_weights.target_column, + forecast_start=sample_dataset_with_weights.forecast_start, + ) + forecaster_without_weights = LearnedWeightsForecaster(config=base_config) + + # Act + forecaster_with_weights.fit(sample_dataset_with_weights) + forecaster_without_weights.fit(data_without_weights) + + # Predict using data without sample_weight column (since that's used for training, not prediction) + result_with_weights = forecaster_with_weights.predict(data_without_weights) + result_without_weights = forecaster_without_weights.predict(data_without_weights) + + # Assert + # Both should produce valid forecasts + assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" + assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + + # Sample weights should affect the model, so results should be different + # (This is a statistical test - with different weights, predictions should differ) + differences = (result_with_weights.data - result_without_weights.data).abs() + assert differences.sum().sum() > 0, "Sample weights should affect model predictions" diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/meta/test_stacking_forecaster.py similarity index 77% rename from packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py rename to packages/openstef-models/tests/unit/models/forecasting/meta/test_stacking_forecaster.py index 4e36e125d..416f36ab9 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/meta/test_stacking_forecaster.py @@ -9,19 +9,19 @@ from openstef_core.datasets import ForecastInputDataset from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q -from openstef_models.models.forecasting.hybrid_forecaster import ( - HybridForecaster, - HybridForecasterConfig, - HybridHyperParams, +from openstef_models.models.forecasting.meta.stacking_forecaster import ( + StackingForecaster, + StackingForecasterConfig, + StackingHyperParams, ) @pytest.fixture -def base_config() -> HybridForecasterConfig: - """Base configuration for Hybrid forecaster tests.""" +def base_config() -> StackingForecasterConfig: + """Base configuration for Stacking forecaster tests.""" - params = HybridHyperParams() - return HybridForecasterConfig( + params = StackingHyperParams() + return StackingForecasterConfig( quantiles=[Q(0.1), Q(0.5), Q(0.9)], horizons=[LeadTime(timedelta(days=1))], hyperparams=params, @@ -29,14 +29,14 @@ def base_config() -> HybridForecasterConfig: ) -def test_hybrid_forecaster_fit_predict( +def test_stacking_forecaster_fit_predict( sample_forecast_input_dataset: ForecastInputDataset, - base_config: HybridForecasterConfig, + base_config: StackingForecasterConfig, ): """Test basic fit and predict workflow with comprehensive output validation.""" # Arrange expected_quantiles = base_config.quantiles - forecaster = HybridForecaster(config=base_config) + forecaster = StackingForecaster(config=base_config) # Act forecaster.fit(sample_forecast_input_dataset) @@ -56,26 +56,26 @@ def test_hybrid_forecaster_fit_predict( assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" -def test_hybrid_forecaster_predict_not_fitted_raises_error( +def test_stacking_forecaster_predict_not_fitted_raises_error( sample_forecast_input_dataset: ForecastInputDataset, - base_config: HybridForecasterConfig, + base_config: StackingForecasterConfig, ): """Test that predict() raises NotFittedError when called before fit().""" # Arrange - forecaster = HybridForecaster(config=base_config) + forecaster = StackingForecaster(config=base_config) # Act & Assert - with pytest.raises(NotFittedError, match="HybridForecaster"): + with pytest.raises(NotFittedError, match="StackingForecaster"): forecaster.predict(sample_forecast_input_dataset) -def test_hybrid_forecaster_with_sample_weights( +def test_stacking_forecaster_with_sample_weights( sample_dataset_with_weights: ForecastInputDataset, - base_config: HybridForecasterConfig, + base_config: StackingForecasterConfig, ): """Test that forecaster works with sample weights and produces different results.""" # Arrange - forecaster_with_weights = HybridForecaster(config=base_config) + forecaster_with_weights = StackingForecaster(config=base_config) # Create dataset without weights for comparison data_without_weights = ForecastInputDataset( @@ -84,7 +84,7 @@ def test_hybrid_forecaster_with_sample_weights( target_column=sample_dataset_with_weights.target_column, forecast_start=sample_dataset_with_weights.forecast_start, ) - forecaster_without_weights = HybridForecaster(config=base_config) + forecaster_without_weights = StackingForecaster(config=base_config) # Act forecaster_with_weights.fit(sample_dataset_with_weights) From f476523688f9f27fb0d3245830b2405fed17928f Mon Sep 17 00:00:00 2001 From: Lars van Someren <93349011+Lars800@users.noreply.github.com> Date: Fri, 21 Nov 2025 16:38:28 +0100 Subject: [PATCH 026/104] Research/v4.1.0 additional forecasters (#765) * Added Lightgbm, LightGBM Linear Trees and Hybrid Stacking Forecasters * Fixed small issues * Ruff compliance * fixed quality checks * Fixed last issues, Signed-off-by: Lars van Someren * fixed comments * Refactor LightGBM to LGBM * Update LGBM and LGBMLinear defaults, fixed comments * Fixed comments * Added SkopsModelSerializer * Fixed issues * Gitignore optimization and dev sandbox * Added MultiQuantileAdapter Class * small fix * Hybrid V2 * Small fix * Squashed commit of the following: commit 37089b84bdea12d22506174ef1393c4fc346ca36 Author: Egor Dmitriev Date: Mon Nov 17 15:29:59 2025 +0100 fix(#728): Fixed parallelism stability issues, and gblinear feature pipeline. (#752) * fix(STEF-2475): Added loky as default option for parallelism since fork causes instabilities for xgboost results. Signed-off-by: Egor Dmitriev * fix(STEF-2475): Added better support for flatliners and predicting when data is sparse. Signed-off-by: Egor Dmitriev * fix(STEF-2475): Feature handing improvements for gblinear. Like imputation, nan dropping, and checking if features are available. Signed-off-by: Egor Dmitriev * fix(#728): Added checks on metrics to gracefully handle empty data. Added flatline filtering during evalution. Signed-off-by: Egor Dmitriev * fix(#728): Updated xgboost to skip scaling on empty prediction. Signed-off-by: Egor Dmitriev * fix(STEF-2475): Added parallelism parameters. Signed-off-by: Egor Dmitriev --------- Signed-off-by: Egor Dmitriev commit a85a3f709c9a54b85658578b5c2aefc001bdf803 Author: Egor Dmitriev Date: Fri Nov 14 14:31:34 2025 +0100 fix(STEF-2475): Fixed rolling aggregate adder by adding forward filling and stating support for only one horizon. (#750) Signed-off-by: Egor Dmitriev commit 4f0c6648516bf184608d268020fdfa4107050c83 Author: Egor Dmitriev Date: Thu Nov 13 16:54:15 2025 +0100 feature: Disabled data cutoff by default to be consistent with openstef 3. And other minor improvements. (#748) commit 493126e9f16836d0da03d9c43e391537c5bea7ca Author: Egor Dmitriev Date: Thu Nov 13 16:12:35 2025 +0100 fix(STEF-2475) fix and refactor backtesting iction in context of backtestforecasting config for clarity. Added more colors. Fixed data split function to handle 0.0 splits. (#747) * fix: Fixed data collation during backtesting. Renamed horizon to prediction in context of backtestforecasting config for clarity. Added more colors. Fixed data split function to handle 0.0 splits. * fix: Formatting. Signed-off-by: Egor Dmitriev * fix: Formatting. Signed-off-by: Egor Dmitriev --------- Signed-off-by: Egor Dmitriev commit 6b1da449b7841f1b13a5fac1f16e48bbeb9b9692 Author: Egor Dmitriev Date: Thu Nov 13 16:05:32 2025 +0100 feature: forecaster hyperparams and eval metrics (#746) * feature(#729) Removed to_state and from_state methods in favor of builtin python state saving functions. Signed-off-by: Egor Dmitriev * feature(#729): Fixed issue where generic transform pipeline could not be serialized. Signed-off-by: Egor Dmitriev * feature(#729): Added more state saving tests Signed-off-by: Egor Dmitriev * feature(#729): Added more state saving tests Signed-off-by: Egor Dmitriev * feature(#729): Added more state saving tests Signed-off-by: Egor Dmitriev * feature: standardized objective function. Added custom evaluation functions for forecasters. * fix: Formatting. Signed-off-by: Egor Dmitriev --------- Signed-off-by: Egor Dmitriev * set silence * small fix * Fix final learner * fixed lgbm efficiency * updated lgbm linear params * Fixed type and quality issues * remove depricated files Signed-off-by: Lars van Someren * change: Fixed dependencies to align more with the current release. Signed-off-by: Egor Dmitriev * change: Style fixes. Signed-off-by: Egor Dmitriev --------- Signed-off-by: Lars van Someren Signed-off-by: Egor Dmitriev Co-authored-by: Egor Dmitriev --- .gitignore | 5 +- packages/openstef-models/pyproject.toml | 2 + .../integrations/skops/__init__.py | 15 + .../skops/skops_model_serializer.py | 105 ++++++ .../mixins/model_serializer.py | 1 + .../models/forecasting/forecaster.py | 28 ++ .../models/forecasting/gblinear_forecaster.py | 19 +- .../models/forecasting/hybrid_forecaster.py | 308 ++++++++++++++++ .../models/forecasting/lgbm_forecaster.py | 334 +++++++++++++++++ .../forecasting/lgbmlinear_forecaster.py | 336 ++++++++++++++++++ .../models/forecasting/xgboost_forecaster.py | 17 + .../presets/forecasting_workflow.py | 158 ++++++-- .../utils/multi_quantile_regressor.py | 157 ++++++++ .../tests/unit/integrations/skops/__init__.py | 5 + .../skops/test_skops_model_serializer.py | 72 ++++ .../forecasting/test_hybrid_forecaster.py | 105 ++++++ .../forecasting/test_lgbm_forecaster.py | 149 ++++++++ .../forecasting/test_lgbmlinear_forecaster.py | 149 ++++++++ .../utils/test_multi_quantile_regressor.py | 107 ++++++ pyproject.toml | 1 + uv.lock | 49 +++ 21 files changed, 2092 insertions(+), 30 deletions(-) create mode 100644 packages/openstef-models/src/openstef_models/integrations/skops/__init__.py create mode 100644 packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py create mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py create mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py create mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py create mode 100644 packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py create mode 100644 packages/openstef-models/tests/unit/integrations/skops/__init__.py create mode 100644 packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py create mode 100644 packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py create mode 100644 packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py create mode 100644 packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py create mode 100644 packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py diff --git a/.gitignore b/.gitignore index 05af14c40..5d863242d 100644 --- a/.gitignore +++ b/.gitignore @@ -124,4 +124,7 @@ certificates/ *.pkl # Benchmark outputs -benchmark_results*/ \ No newline at end of file +benchmark_results*/ +# Experiment outputs +optimization_results/ +dev_sandbox/ diff --git a/packages/openstef-models/pyproject.toml b/packages/openstef-models/pyproject.toml index e12214954..2b6f727bf 100644 --- a/packages/openstef-models/pyproject.toml +++ b/packages/openstef-models/pyproject.toml @@ -29,6 +29,7 @@ classifiers = [ dependencies = [ "holidays>=0.79", + "lightgbm>=4.6", "mlflow-skinny>=3,<4", "openstef-beam>=4.0.0.dev0,<5", "openstef-core>=4.0.0.dev0,<5", @@ -36,6 +37,7 @@ dependencies = [ "pycountry>=24.6.1", "scikit-learn>=1.7.1,<2", "scipy>=1.16.3,<2", + "skops>=0.13", ] optional-dependencies.xgb-cpu = [ diff --git a/packages/openstef-models/src/openstef_models/integrations/skops/__init__.py b/packages/openstef-models/src/openstef_models/integrations/skops/__init__.py new file mode 100644 index 000000000..16fcbd789 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/integrations/skops/__init__.py @@ -0,0 +1,15 @@ +"""Joblib-based model storage integration. + +Provides local file-based model persistence using Skops for serialization. +This integration provides a safe way for storing and loading ForecastingModel instances on +the local filesystem, making it suitable for development, testing, and +single-machine deployments. +""" + +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from .skops_model_serializer import SkopsModelSerializer + +__all__ = ["SkopsModelSerializer"] diff --git a/packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py b/packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py new file mode 100644 index 000000000..6296d3abb --- /dev/null +++ b/packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py @@ -0,0 +1,105 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Local model storage implementation using joblib serialization. + +Provides file-based persistence for ForecastingModel instances using joblib's +pickle-based serialization. This storage backend is suitable for development, +testing, and single-machine deployments where models need to be persisted +to the local filesystem. +""" + +from typing import BinaryIO, ClassVar, override + +from openstef_core.exceptions import MissingExtraError +from openstef_models.mixins.model_serializer import ModelSerializer + +try: + from skops.io import dump, get_untrusted_types, load +except ImportError as e: + raise MissingExtraError("joblib", package="openstef-models") from e + + +class SkopsModelSerializer(ModelSerializer): + """File-based model storage using joblib serialization. + + Provides persistent storage for ForecastingModel instances on the local + filesystem. Models are serialized using joblib and stored as pickle files + in the specified directory. + + This storage implementation is suitable for development, testing, and + single-machine deployments where simple file-based persistence is sufficient. + + Note: + joblib.dump() and joblib.load() are based on the Python pickle serialization model, + which means that arbitrary Python code can be executed when loading a serialized object + with joblib.load(). + + joblib.load() should therefore never be used to load objects from an untrusted source + or otherwise you will introduce a security vulnerability in your program. + + Invariants: + - Models are stored as .pkl files in the configured storage directory + - Model files use the pattern: {model_id}.pkl + - Storage directory is created automatically if it doesn't exist + - Load operations fail with ModelNotFoundError if model file doesn't exist + + Example: + Basic usage with model persistence: + + >>> from pathlib import Path + >>> from openstef_models.models.forecasting_model import ForecastingModel + >>> storage = LocalModelStorage(storage_dir=Path("./models")) # doctest: +SKIP + >>> storage.save_model("my_model", my_forecasting_model) # doctest: +SKIP + >>> loaded_model = storage.load_model("my_model") # doctest: +SKIP + """ + + extension: ClassVar[str] = ".skops" + + @override + def serialize(self, model: object, file: BinaryIO) -> None: + dump(model, file) # type: ignore[reportUnknownMemberType] + + @staticmethod + def _get_stateful_types() -> set[str]: + return { + "tests.unit.integrations.skops.test_skops_model_serializer.SimpleSerializableModel", + "openstef_core.mixins.predictor.BatchPredictor", + "openstef_models.models.forecasting.forecaster.Forecaster", + "openstef_models.models.forecasting.xgboost_forecaster.XGBoostForecaster", + "openstef_models.models.component_splitting_model.ComponentSplittingModel", + "openstef_core.mixins.transform.TransformPipeline", + "openstef_core.mixins.transform.TransformPipeline[EnergyComponentDataset]", + "openstef_core.mixins.transform.TransformPipeline[TimeSeriesDataset]", + "openstef_models.models.forecasting.lgbm_forecaster.LGBMForecaster", + "openstef_models.models.component_splitting.component_splitter.ComponentSplitter", + "openstef_models.models.forecasting_model.ForecastingModel", + "openstef_core.mixins.transform.Transform", + "openstef_core.mixins.transform.TransformPipeline[ForecastDataset]", + "openstef_core.mixins.predictor.Predictor", + "openstef_models.models.forecasting.lgbmlinear_forecaster.LGBMLinearForecaster", + } + + @override + def deserialize(self, file: BinaryIO) -> object: + """Load a model's state from a binary file and restore it. + + Returns: + The restored model instance. + + Raises: + ValueError: If no safe types are found in the serialized model. + """ + safe_types = self._get_stateful_types() + + # Weak security measure that checks a safe class is present. + # Can be improved to ensure no unsafe classes are present. + model_types: set[str] = set(get_untrusted_types(file=file)) # type: ignore + + if len(safe_types.intersection(model_types)) == 0: + raise ValueError("Deserialization aborted: No safe types found in the serialized model.") + + return load(file, trusted=list(model_types)) # type: ignore[reportUnknownMemberType] + + +__all__ = ["SkopsModelSerializer"] diff --git a/packages/openstef-models/src/openstef_models/mixins/model_serializer.py b/packages/openstef-models/src/openstef_models/mixins/model_serializer.py index ab00993f7..40e74a52a 100644 --- a/packages/openstef-models/src/openstef_models/mixins/model_serializer.py +++ b/packages/openstef-models/src/openstef_models/mixins/model_serializer.py @@ -34,6 +34,7 @@ class ModelSerializer(BaseConfig, ABC): See Also: JoblibModelSerializer: Concrete implementation using joblib. + SkopsModelSerializer: Concrete implementation using skops. """ extension: ClassVar[str] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py index d796b0ef3..9628c61e3 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py @@ -111,6 +111,15 @@ def with_horizon(self, horizon: LeadTime) -> Self: """ return self.model_copy(update={"horizons": [horizon]}) + @classmethod + def forecaster_class(cls) -> type["Forecaster"]: + """Get the associated Forecaster class for this configuration. + + Returns: + The Forecaster class that uses this configuration. + """ + raise NotImplementedError("Subclasses must implement forecaster_class") + class ConfigurableForecaster: @property @@ -197,6 +206,25 @@ class Forecaster(BatchPredictor[ForecastInputDataset, ForecastDataset], Configur ... ) """ + @abstractmethod + def __init__(self, config: ForecasterConfig) -> None: + """Initialize the forecaster with the given configuration. + + Args: + config: Configuration object specifying quantiles, horizons, and batching support. + """ + raise NotImplementedError("Subclasses must implement __init__") + + @property + @abstractmethod + def config(self) -> ForecasterConfig: + """Access the model's configuration parameters. + + Returns: + Configuration object containing fundamental model parameters. + """ + raise NotImplementedError("Subclasses must implement config") + __all__ = [ "Forecaster", diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py index 002cada6f..92c3981a3 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py @@ -93,6 +93,15 @@ class GBLinearHyperParams(HyperParams): description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", ) + @classmethod + def forecaster_class(cls) -> "type[GBLinearForecaster]": + """Forecaster class for these hyperparams. + + Returns: + Forecaster class associated with this configuration. + """ + return GBLinearForecaster + class GBLinearForecasterConfig(ForecasterConfig): """Configuration for GBLinear forecaster.""" @@ -114,9 +123,17 @@ class GBLinearForecasterConfig(ForecasterConfig): default="cpu", description="Device for XGBoost computation. Options: 'cpu', 'cuda', 'cuda:', 'gpu'" ) verbosity: Literal[0, 1, 2, 3, True] = Field( - default=1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + default=0, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" ) + def forecaster_from_config(self) -> "GBLinearForecaster": + """Create a GBLinearForecaster instance from this configuration. + + Returns: + Forecaster instance associated with this configuration. + """ + return GBLinearForecaster(config=self) + MODEL_CODE_VERSION = 1 diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py new file mode 100644 index 000000000..2b4b72573 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py @@ -0,0 +1,308 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Hybrid Forecaster (Stacked LightGBM + Linear Model Gradient Boosting). + +Provides method that attempts to combine the advantages of a linear model (Extraplolation) +and tree-based model (Non-linear patterns). This is acieved by training two base learners, +followed by a small linear model that regresses on the baselearners' predictions. +The implementation is based on sklearn's StackingRegressor. +""" + +import logging +from abc import abstractmethod +from typing import override + +import pandas as pd +from pydantic import Field, field_validator + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import ( + NotFittedError, +) +from openstef_core.mixins import HyperParams +from openstef_core.types import Quantile +from openstef_models.models.forecasting.forecaster import ( + Forecaster, + ForecasterConfig, +) +from openstef_models.models.forecasting.gblinear_forecaster import ( + GBLinearForecaster, + GBLinearForecasterConfig, + GBLinearHyperParams, +) +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMForecasterConfig, LGBMHyperParams +from openstef_models.models.forecasting.lgbmlinear_forecaster import ( + LGBMLinearForecaster, + LGBMLinearForecasterConfig, + LGBMLinearHyperParams, +) +from openstef_models.models.forecasting.xgboost_forecaster import ( + XGBoostForecaster, + XGBoostForecasterConfig, + XGBoostHyperParams, +) + +logger = logging.getLogger(__name__) + + +BaseLearner = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster +BaseLearnerHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams +BaseLearnerConfig = ( + LGBMForecasterConfig | LGBMLinearForecasterConfig | XGBoostForecasterConfig | GBLinearForecasterConfig +) + + +class FinalLearner: + """Combines base learner predictions for each quantile into final predictions.""" + + @abstractmethod + def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + raise NotImplementedError("Subclasses must implement the fit method.") + + def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + raise NotImplementedError("Subclasses must implement the predict method.") + + @property + @abstractmethod + def is_fitted(self) -> bool: + raise NotImplementedError("Subclasses must implement the is_fitted property.") + + +class FinalForecaster(FinalLearner): + """Combines base learner predictions for each quantile into final predictions.""" + + def __init__(self, forecaster: Forecaster, feature_adders: None = None) -> None: + # Feature adders placeholder for future use + if feature_adders is not None: + raise NotImplementedError("Feature adders are not yet implemented.") + + # Split forecaster per quantile + self.quantiles = forecaster.config.quantiles + models: list[Forecaster] = [] + for q in self.quantiles: + config = forecaster.config.model_copy( + update={ + "quantiles": [q], + } + ) + model = forecaster.__class__(config=config) + models.append(model) + self.models = models + + @override + def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + for i, q in enumerate(self.quantiles): + self.models[i].fit(data=base_learner_predictions[q], data_val=None) + + @override + def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + # Generate predictions + predictions = [ + self.models[i].predict(data=base_learner_predictions[q]).data for i, q in enumerate(self.quantiles) + ] + + # Concatenate predictions along columns to form a DataFrame with quantile columns + df = pd.concat(predictions, axis=1) + + return ForecastDataset( + data=df, + sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, + ) + + @property + def is_fitted(self) -> bool: + return all(x.is_fitted for x in self.models) + + +class HybridHyperParams(HyperParams): + """Hyperparameters for Stacked LGBM GBLinear Regressor.""" + + base_hyperparams: list[BaseLearnerHyperParams] = Field( + default=[LGBMHyperParams(), GBLinearHyperParams()], + description="List of hyperparameter configurations for base learners. " + "Defaults to [LGBMHyperParams, GBLinearHyperParams].", + ) + + final_hyperparams: BaseLearnerHyperParams = Field( + default=GBLinearHyperParams(), + description="Hyperparameters for the final learner. Defaults to GBLinearHyperParams.", + ) + + add_rolling_accuracy_features: bool = Field( + default=False, + description="Whether to add rolling accuracy features from base learners as additional features " + "to the final learner. Defaults to False.", + ) + + @field_validator("base_hyperparams", mode="after") + @classmethod + def _check_classes(cls, v: list[BaseLearnerHyperParams]) -> list[BaseLearnerHyperParams]: + hp_classes = [type(hp) for hp in v] + if not len(hp_classes) == len(set(hp_classes)): + raise ValueError("Duplicate base learner hyperparameter classes are not allowed.") + return v + + +class HybridForecasterConfig(ForecasterConfig): + """Configuration for Hybrid-based forecasting models.""" + + hyperparams: HybridHyperParams = HybridHyperParams() + + verbosity: bool = Field( + default=True, + description="Enable verbose output from the Hybrid model (True/False).", + ) + + +class HybridForecaster(Forecaster): + """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" + + Config = HybridForecasterConfig + HyperParams = HybridHyperParams + + _config: HybridForecasterConfig + + def __init__(self, config: HybridForecasterConfig) -> None: + """Initialize the Hybrid forecaster.""" + self._config = config + + self._base_learners: list[BaseLearner] = self._init_base_learners( + base_hyperparams=config.hyperparams.base_hyperparams + ) + final_forecaster = self._init_base_learners(base_hyperparams=[config.hyperparams.final_hyperparams])[0] + self._final_learner = FinalForecaster(forecaster=final_forecaster) + + def _init_base_learners(self, base_hyperparams: list[BaseLearnerHyperParams]) -> list[BaseLearner]: + """Initialize base learners based on provided hyperparameters. + + Returns: + list[Forecaster]: List of initialized base learner forecasters. + """ + base_learners: list[BaseLearner] = [] + horizons = self.config.horizons + quantiles = self.config.quantiles + + for hyperparams in base_hyperparams: + forecaster_cls = hyperparams.forecaster_class() + config = forecaster_cls.Config(horizons=horizons, quantiles=quantiles) + if "hyperparams" in forecaster_cls.Config.model_fields: + config = config.model_copy(update={"hyperparams": hyperparams}) + + base_learners.append(config.forecaster_from_config()) + + return base_learners + + @property + @override + def is_fitted(self) -> bool: + return all(x.is_fitted for x in self._base_learners) + + @property + @override + def config(self) -> ForecasterConfig: + return self._config + + @override + def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: + """Fit the Hybrid model to the training data. + + Args: + data: Training data in the expected ForecastInputDataset format. + data_val: Validation data for tuning the model (optional, not used in this implementation). + + """ + # Fit base learners + [x.fit(data=data, data_val=data_val) for x in self._base_learners] + + # Reset forecast start date to ensure we predict on the full dataset + full_dataset = ForecastInputDataset( + data=data.data, + sample_interval=data.sample_interval, + target_column=data.target_column, + forecast_start=data.index[0], + ) + + base_predictions = self._predict_base_learners(data=full_dataset) + + quantile_datasets = self._prepare_input_final_learner( + base_predictions=base_predictions, quantiles=self._config.quantiles, target_series=data.target_series + ) + + self._final_learner.fit( + base_learner_predictions=quantile_datasets, + ) + + self._is_fitted = True + + def _predict_base_learners(self, data: ForecastInputDataset) -> dict[type[BaseLearner], ForecastDataset]: + """Generate predictions from base learners. + + Args: + data: Input data for prediction. + + Returns: + DataFrame containing base learner predictions. + """ + base_predictions: dict[type[BaseLearner], ForecastDataset] = {} + for learner in self._base_learners: + preds = learner.predict(data=data) + base_predictions[learner.__class__] = preds + + return base_predictions + + @staticmethod + def _prepare_input_final_learner( + quantiles: list[Quantile], + base_predictions: dict[type[BaseLearner], ForecastDataset], + target_series: pd.Series, + ) -> dict[Quantile, ForecastInputDataset]: + """Prepare input data for the final learner based on base learner predictions. + + Args: + quantiles: List of quantiles to prepare data for. + base_predictions: Predictions from base learners. + target_series: Actual target series for reference. + + Returns: + dictionary mapping quantile strings to DataFrames of base learner predictions. + """ + predictions_quantiles: dict[Quantile, ForecastInputDataset] = {} + sample_interval = base_predictions[next(iter(base_predictions))].sample_interval + target_name = str(target_series.name) + + for q in quantiles: + df = pd.DataFrame({ + learner.__name__: preds.data[Quantile(q).format()] for learner, preds in base_predictions.items() + }) + df[target_name] = target_series + + predictions_quantiles[q] = ForecastInputDataset( + data=df, + sample_interval=sample_interval, + target_column=target_name, + forecast_start=df.index[0], + ) + + return predictions_quantiles + + @override + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + base_predictions = self._predict_base_learners(data=data) + + final_learner_input = self._prepare_input_final_learner( + quantiles=self._config.quantiles, base_predictions=base_predictions, target_series=data.target_series + ) + + return self._final_learner.predict(base_learner_predictions=final_learner_input) + + # TODO(@Lars800): #745: Make forecaster Explainable + + +__all__ = ["HybridForecaster", "HybridForecasterConfig", "HybridHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py new file mode 100644 index 000000000..03c667b00 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -0,0 +1,334 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""LightGBM-based forecasting models for probabilistic energy forecasting. + +Provides gradient boosting tree models using LightGBM for multi-quantile energy +forecasting. Optimized for time series data with specialized loss functions and +comprehensive hyperparameter control for production forecasting workflows. +""" + +from typing import TYPE_CHECKING, Literal, override + +import numpy as np +import pandas as pd +from lightgbm import LGBMRegressor +from pydantic import Field + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import ( + NotFittedError, +) +from openstef_core.mixins import HyperParams +from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig +from openstef_models.utils.multi_quantile_regressor import MultiQuantileRegressor + +if TYPE_CHECKING: + import numpy.typing as npt + + +class LGBMHyperParams(HyperParams): + """LightGBM hyperparameters for gradient boosting tree models. + + Example: + Creating custom hyperparameters for deep trees with regularization: + + >>> hyperparams = LGBMHyperParams( + ... n_estimators=200, + ... max_depth=8, + ... learning_rate=0.1, + ... reg_alpha=0.1, + ... reg_lambda=1.0, + ... ) + + Note: + These parameters are optimized for probabilistic forecasting with + quantile regression. The default objective function is specialized + for magnitude-weighted pinball loss. + """ + + # Core Tree Boosting Parameters + n_estimators: int = Field( + default=100, + description="Number of boosting rounds/trees to fit. Higher values may improve performance but " + "increase training time and risk overfitting.", + ) + learning_rate: float = Field( + default=0.49, # 0.3 + alias="eta", + description="Step size shrinkage used to prevent overfitting. Range: [0,1]. Lower values require " + "more boosting rounds.", + ) + max_depth: int = Field( + default=2, # 8, + description="Maximum depth of trees. Higher values capture more complex patterns but risk " + "overfitting. Range: [1,∞]", + ) + min_child_weight: float = Field( + default=1, + description="Minimum sum of instance weight (hessian) needed in a child. Higher values prevent " + "overfitting. Range: [0,∞]", + ) + + min_data_in_leaf: int = Field( + default=10, + description="Minimum number of data points in a leaf. Higher values prevent overfitting. Range: [1,∞]", + ) + min_data_in_bin: int = Field( + default=10, + description="Minimum number of data points in a bin. Higher values prevent overfitting. Range: [1,∞]", + ) + + # Regularization + reg_alpha: float = Field( + default=0, + description="L1 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", + ) + reg_lambda: float = Field( + default=1, + description="L2 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", + ) + + # Tree Structure Control + num_leaves: int = Field( + default=100, # 31 + description="Maximum number of leaves. 0 means no limit. Only relevant when grow_policy='lossguide'.", + ) + + max_bin: int = Field( + default=256, + description="Maximum number of discrete bins for continuous features. Higher values may improve accuracy but " + "increase memory. Only for hist tree_method.", + ) + + # Subsampling Parameters + colsample_bytree: float = Field( + default=1.0, + description="Fraction of features used when constructing each tree. Range: (0,1]", + ) + + @classmethod + def forecaster_class(cls) -> "type[LGBMForecaster]": + """Create a LightGBM forecaster instance from this configuration. + + Returns: + Forecaster class associated with this configuration. + """ + return LGBMForecaster + + +class LGBMForecasterConfig(ForecasterConfig): + """Configuration for LightGBM-based forecaster. + Extends HorizonForecasterConfig with LightGBM-specific hyperparameters + and execution settings. + + Example: + Creating a LightGBM forecaster configuration with custom hyperparameters: + >>> from datetime import timedelta + >>> from openstef_core.types import LeadTime, Quantile + >>> config = LGBMForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime(timedelta(hours=1))], + ... hyperparams=LGBMHyperParams(n_estimators=100, max_depth=6)) + """ # noqa: D205 + + hyperparams: LGBMHyperParams = LGBMHyperParams() + + # General Parameters + device: str = Field( + default="cpu", + description="Device for LightGBM computation. Options: 'cpu', 'cuda', 'cuda:', 'gpu'", + ) + n_jobs: int = Field( + default=1, + description="Number of parallel threads for tree construction. -1 uses all available cores.", + ) + verbosity: Literal[-1, 0, 1, 2, 3] = Field( + default=-1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + ) + + random_state: int | None = Field( + default=None, + alias="seed", + description="Random seed for reproducibility. Controls tree structure randomness.", + ) + + early_stopping_rounds: int | None = Field( + default=None, + description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", + ) + + def forecaster_from_config(self) -> "LGBMForecaster": + """Create a LGBMForecaster instance from this configuration. + + Returns: + Forecaster instance associated with this configuration. + """ + return LGBMForecaster(config=self) + + +MODEL_CODE_VERSION = 1 + + +class LGBMForecaster(Forecaster, ExplainableForecaster): + """LightGBM-based forecaster for probabilistic energy forecasting. + + Implements gradient boosting trees using LightGBM for multi-quantile forecasting. + Optimized for time series prediction with specialized loss functions and + comprehensive hyperparameter control suitable for production energy forecasting. + + The forecaster uses a multi-output strategy where each quantile is predicted + by separate trees within the same boosting ensemble. This approach provides + well-calibrated uncertainty estimates while maintaining computational efficiency. + + Invariants: + - fit() must be called before predict() to train the model + - Configuration quantiles determine the number of prediction outputs + - Model state is preserved across predict() calls after fitting + - Input features must match training data structure during prediction + + Example: + Basic forecasting workflow: + + >>> from datetime import timedelta + >>> from openstef_core.types import LeadTime, Quantile + >>> config = LGBMForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime(timedelta(hours=1))], + ... hyperparams=LGBMHyperParams(n_estimators=100, max_depth=6) + ... ) + >>> forecaster = LGBMForecaster(config) + >>> # forecaster.fit(training_data) + >>> # predictions = forecaster.predict(test_data) + + Note: + LightGBM dependency is optional and must be installed separately. + The model automatically handles multi-quantile output and uses + magnitude-weighted pinball loss by default for better forecasting performance. + + See Also: + LGBMHyperParams: Detailed hyperparameter configuration options. + HorizonForecaster: Base interface for all forecasting models. + GBLinearForecaster: Alternative linear model using LightGBM. + """ + + Config = LGBMForecasterConfig + HyperParams = LGBMHyperParams + + _config: LGBMForecasterConfig + + def __init__(self, config: LGBMForecasterConfig) -> None: + """Initialize LightGBM forecaster with configuration. + + Creates an untrained LightGBM regressor with the specified configuration. + The underlying LightGBM model is configured for multi-output quantile + regression using the provided hyperparameters and execution settings. + + Args: + config: Complete configuration including hyperparameters, quantiles, + and execution settings for the LightGBM model. + """ + self._config = config + + lgbm_params = { + "linear_tree": False, + "objective": "quantile", + "random_state": config.random_state, + "early_stopping_rounds": config.early_stopping_rounds, + "verbosity": config.verbosity, + "n_jobs": config.n_jobs, + **config.hyperparams.model_dump(), + } + + self._lgbm_model: MultiQuantileRegressor = MultiQuantileRegressor( + base_learner=LGBMRegressor, # type: ignore + quantile_param="alpha", + hyperparams=lgbm_params, + quantiles=[float(q) for q in config.quantiles], + ) + + @property + @override + def config(self) -> ForecasterConfig: + return self._config + + @property + @override + def hyperparams(self) -> LGBMHyperParams: + return self._config.hyperparams + + @property + @override + def is_fitted(self) -> bool: + return self._lgbm_model.is_fitted + + @staticmethod + def _prepare_fit_input(data: ForecastInputDataset) -> tuple[pd.DataFrame, np.ndarray, pd.Series]: + input_data: pd.DataFrame = data.input_data() + target: np.ndarray = np.asarray(data.target_series.values) + sample_weight: pd.Series = data.sample_weight_series + + return input_data, target, sample_weight + + @override + def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: + # Prepare training data + input_data, target, sample_weight = self._prepare_fit_input(data) + + # Evaluation sets + eval_set = [(input_data, target)] + sample_weight_eval_set = [sample_weight] + + if data_val is not None: + input_data_val, target_val, sample_weight_val = self._prepare_fit_input(data_val) + eval_set.append((input_data_val, target_val)) + sample_weight_eval_set.append(sample_weight_val) + + self._lgbm_model.fit( + X=input_data, + y=target, + feature_name=input_data.columns.tolist(), + sample_weight=sample_weight, + eval_set=eval_set, + eval_sample_weight=sample_weight_eval_set, + ) + + @override + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + input_data: pd.DataFrame = data.input_data(start=data.forecast_start) + prediction: npt.NDArray[np.floating] = self._lgbm_model.predict(X=input_data) + + return ForecastDataset( + data=pd.DataFrame( + data=prediction, + index=input_data.index, + columns=[quantile.format() for quantile in self.config.quantiles], + ), + sample_interval=data.sample_interval, + ) + + @property + @override + def feature_importances(self) -> pd.DataFrame: + models: list[LGBMRegressor] = self._lgbm_model.models # type: ignore + weights_df = pd.DataFrame( + [models[i].feature_importances_ for i in range(len(models))], + index=[quantile.format() for quantile in self.config.quantiles], + columns=self._lgbm_model.model_feature_names if self._lgbm_model.has_feature_names else None, + ).transpose() + + weights_df.index.name = "feature_name" + weights_df.columns.name = "quantiles" + + weights_abs = weights_df.abs() + total = weights_abs.sum(axis=0).replace(to_replace=0, value=1.0) # pyright: ignore[reportUnknownMemberType] + + return weights_abs / total + + +__all__ = ["LGBMForecaster", "LGBMForecasterConfig", "LGBMHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py new file mode 100644 index 000000000..eace689fb --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -0,0 +1,336 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""LightGBM-based forecasting models for probabilistic energy forecasting. + +Provides gradient boosting tree models using LightGBM for multi-quantile energy +forecasting. Optimized for time series data with specialized loss functions and +comprehensive hyperparameter control for production forecasting workflows. +""" + +from typing import TYPE_CHECKING, Literal, override + +import numpy as np +import pandas as pd +from lightgbm import LGBMRegressor +from pydantic import Field + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import ( + NotFittedError, +) +from openstef_core.mixins import HyperParams +from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig +from openstef_models.utils.multi_quantile_regressor import MultiQuantileRegressor + +if TYPE_CHECKING: + import numpy.typing as npt + + +class LGBMLinearHyperParams(HyperParams): + """LgbLinear hyperparameters for gradient boosting tree models. + + Example: + Creating custom hyperparameters for deep trees with regularization: + + >>> hyperparams = LGBMLinearHyperParams( + ... n_estimators=200, + ... max_depth=8, + ... learning_rate=0.1, + ... reg_alpha=0.1, + ... reg_lambda=1.0, + ... ) + + Note: + These parameters are optimized for probabilistic forecasting with + quantile regression. The default objective function is specialized + for magnitude-weighted pinball loss. + """ + + # Core Tree Boosting Parameters + + n_estimators: int = Field( + default=100, + description="Number of boosting rounds/trees to fit. Higher values may improve performance but " + "increase training time and risk overfitting.", + ) + learning_rate: float = Field( + default=0.07, + alias="eta", + description="Step size shrinkage used to prevent overfitting. Range: [0,1]. Lower values require " + "more boosting rounds.", + ) + max_depth: int = Field( + default=6, + description="Maximum depth of trees. Higher values capture more complex patterns but risk " + "overfitting. Range: [1,∞]", + ) + min_child_weight: float = Field( + default=0.06, + description="Minimum sum of instance weight (hessian) needed in a child. Higher values prevent " + "overfitting. Range: [0,∞]", + ) + + min_data_in_leaf: int = Field( + default=500, + description="Minimum number of data points in a leaf. Higher values prevent overfitting. Range: [1,∞]", + ) + min_data_in_bin: int = Field( + default=500, + description="Minimum number of data points in a bin. Higher values prevent overfitting. Range: [1,∞]", + ) + + # Regularization + reg_alpha: float = Field( + default=0, + description="L1 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", + ) + reg_lambda: float = Field( + default=1, + description="L2 regularization on leaf weights. Higher values increase regularization. Range: [0,∞]", + ) + + # Tree Structure Control + num_leaves: int = Field( + default=30, + description="Maximum number of leaves. 0 means no limit. Only relevant when grow_policy='lossguide'.", + ) + + max_bin: int = Field( + default=256, + description="Maximum number of discrete bins for continuous features. Higher values may improve accuracy but " + "increase memory.", + ) + + # Subsampling Parameters + colsample_bytree: float = Field( + default=1, + description="Fraction of features used when constructing each tree. Range: (0,1]", + ) + + @classmethod + def forecaster_class(cls) -> "type[LGBMLinearForecaster]": + """Get forecaster class for these hyperparams. + + Returns: + Forecaster class associated with this configuration. + """ + return LGBMLinearForecaster + + +class LGBMLinearForecasterConfig(ForecasterConfig): + """Configuration for LgbLinear-based forecaster. + Extends HorizonForecasterConfig with LgbLinear-specific hyperparameters + and execution settings. + + Example: + Creating a LgbLinear forecaster configuration with custom hyperparameters: + >>> from datetime import timedelta + >>> from openstef_core.types import LeadTime, Quantile + >>> config = LGBMLinearForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime(timedelta(hours=1))], + ... hyperparams=LGBMLinearHyperParams(n_estimators=100, max_depth=6) + ... ) + """ # noqa: D205 + + hyperparams: LGBMLinearHyperParams = LGBMLinearHyperParams() + + # General Parameters + device: str = Field( + default="cpu", + description="Device for LgbLinear computation. Options: 'cpu', 'cuda', 'cuda:', 'gpu'", + ) + n_jobs: int = Field( + default=1, + description="Number of parallel threads for tree construction. -1 uses all available cores.", + ) + verbosity: Literal[-1, 0, 1, 2, 3] = Field( + default=-1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + ) + + random_state: int | None = Field( + default=None, + alias="seed", + description="Random seed for reproducibility. Controls tree structure randomness.", + ) + + early_stopping_rounds: int | None = Field( + default=None, + description="Training will stop if performance doesn't improve for this many rounds. Requires validation data.", + ) + + def forecaster_from_config(self) -> "LGBMLinearForecaster": + """Create a LGBMLinearForecaster instance from this configuration. + + Returns: + Forecaster instance associated with this configuration. + """ + return LGBMLinearForecaster(config=self) + + +MODEL_CODE_VERSION = 1 + + +class LGBMLinearForecaster(Forecaster, ExplainableForecaster): + """LgbLinear-based forecaster for probabilistic energy forecasting. + + Implements gradient boosting trees using LgbLinear for multi-quantile forecasting. + Optimized for time series prediction with specialized loss functions and + comprehensive hyperparameter control suitable for production energy forecasting. + + The forecaster uses a multi-output strategy where each quantile is predicted + by separate trees within the same boosting ensemble. This approach provides + well-calibrated uncertainty estimates while maintaining computational efficiency. + + Invariants: + - fit() must be called before predict() to train the model + - Configuration quantiles determine the number of prediction outputs + - Model state is preserved across predict() calls after fitting + - Input features must match training data structure during prediction + + Example: + Basic forecasting workflow: + + >>> from datetime import timedelta + >>> from openstef_core.types import LeadTime, Quantile + >>> config = LGBMLinearForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime(timedelta(hours=1))], + ... hyperparams=LGBMLinearHyperParams(n_estimators=100, max_depth=6) + ... ) + >>> forecaster = LGBMLinearForecaster(config) + >>> # forecaster.fit(training_data) + >>> # predictions = forecaster.predict(test_data) + + Note: + LgbLinear dependency is optional and must be installed separately. + The model automatically handles multi-quantile output and uses + magnitude-weighted pinball loss by default for better forecasting performance. + + See Also: + LGBMLinearHyperParams: Detailed hyperparameter configuration options. + HorizonForecaster: Base interface for all forecasting models. + GBLinearForecaster: Alternative linear model using LgbLinear. + """ + + Config = LGBMLinearForecasterConfig + HyperParams = LGBMLinearHyperParams + + _config: LGBMLinearForecasterConfig + + def __init__(self, config: LGBMLinearForecasterConfig) -> None: + """Initialize LgbLinear forecaster with configuration. + + Creates an untrained LgbLinear regressor with the specified configuration. + The underlying LgbLinear model is configured for multi-output quantile + regression using the provided hyperparameters and execution settings. + + Args: + config: Complete configuration including hyperparameters, quantiles, + and execution settings for the LgbLinear model. + """ + self._config = config + + lgbmlinear_params = { + "linear_tree": True, + "objective": "quantile", + "random_state": config.random_state, + "early_stopping_rounds": config.early_stopping_rounds, + "verbosity": config.verbosity, + "n_jobs": config.n_jobs, + **config.hyperparams.model_dump(), + } + + self._lgbmlinear_model: MultiQuantileRegressor = MultiQuantileRegressor( + base_learner=LGBMRegressor, # type: ignore + quantile_param="alpha", + hyperparams=lgbmlinear_params, + quantiles=[float(q) for q in config.quantiles], + ) + + @property + @override + def config(self) -> ForecasterConfig: + return self._config + + @property + @override + def hyperparams(self) -> LGBMLinearHyperParams: + return self._config.hyperparams + + @property + @override + def is_fitted(self) -> bool: + return self._lgbmlinear_model.is_fitted + + @staticmethod + def _prepare_fit_input(data: ForecastInputDataset) -> tuple[pd.DataFrame, np.ndarray, pd.Series]: + input_data: pd.DataFrame = data.input_data() + target: np.ndarray = np.asarray(data.target_series.values) + sample_weight: pd.Series = data.sample_weight_series + + return input_data, target, sample_weight + + @override + def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: + # Prepare training data + input_data, target, sample_weight = self._prepare_fit_input(data) + + # Evaluation sets + eval_set = [(input_data, target)] + sample_weight_eval_set = [sample_weight] + + if data_val is not None: + input_data_val, target_val, sample_weight_val = self._prepare_fit_input(data_val) + eval_set.append((input_data_val, target_val)) + sample_weight_eval_set.append(sample_weight_val) + + self._lgbmlinear_model.fit( + X=input_data, + y=target, + feature_name=input_data.columns.tolist(), + sample_weight=sample_weight, + eval_set=eval_set, + eval_sample_weight=sample_weight_eval_set, + ) + + @override + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + input_data: pd.DataFrame = data.input_data(start=data.forecast_start) + prediction: npt.NDArray[np.floating] = self._lgbmlinear_model.predict(X=input_data) + + return ForecastDataset( + data=pd.DataFrame( + data=prediction, + index=input_data.index, + columns=[quantile.format() for quantile in self.config.quantiles], + ), + sample_interval=data.sample_interval, + ) + + @property + @override + def feature_importances(self) -> pd.DataFrame: + models = self._lgbmlinear_model._models # noqa: SLF001 + weights_df = pd.DataFrame( + [models[i].feature_importances_ for i in range(len(models))], # type: ignore + index=[quantile.format() for quantile in self.config.quantiles], + columns=self._lgbmlinear_model.model_feature_names if self._lgbmlinear_model.has_feature_names else None, + ).transpose() + + weights_df.index.name = "feature_name" + weights_df.columns.name = "quantiles" + + weights_abs = weights_df.abs() + total = weights_abs.sum(axis=0).replace(to_replace=0, value=1.0) # pyright: ignore[reportUnknownMemberType] + + return weights_abs / total + + +__all__ = ["LGBMLinearForecaster", "LGBMLinearForecasterConfig", "LGBMLinearHyperParams"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py index 7c0576f84..2c673c68b 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py @@ -169,6 +169,15 @@ class XGBoostHyperParams(HyperParams): description="Whether to apply standard scaling to the target variable before training. Improves convergence.", ) + @classmethod + def forecaster_class(cls) -> "type[XGBoostForecaster]": + """Get the forecaster class for these hyperparams. + + Returns: + Forecaster class associated with this configuration. + """ + return XGBoostForecaster + class XGBoostForecasterConfig(ForecasterConfig): """Configuration for XGBoost-based forecasting models. @@ -205,6 +214,14 @@ class XGBoostForecasterConfig(ForecasterConfig): default=1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" ) + def forecaster_from_config(self) -> "XGBoostForecaster": + """Create a XGBoost forecaster instance from this configuration. + + Returns: + Forecaster instance associated with this configuration. + """ + return XGBoostForecaster(config=self) + MODEL_CODE_VERSION = 1 diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index ed2a819d4..1a33b4622 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -30,10 +30,13 @@ from openstef_models.models import ForecastingModel from openstef_models.models.forecasting.flatliner_forecaster import FlatlinerForecaster from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster +from openstef_models.models.forecasting.hybrid_forecaster import HybridForecaster +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster +from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster from openstef_models.transforms.energy_domain import WindPowerFeatureAdder from openstef_models.transforms.general import Clipper, EmptyFeatureRemover, Imputer, NaNDropper, SampleWeighter, Scaler -from openstef_models.transforms.postprocessing import QuantileSorter +from openstef_models.transforms.postprocessing import ConfidenceIntervalApplicator, QuantileSorter from openstef_models.transforms.time_domain import ( CyclicFeaturesAdder, DatetimeFeaturesAdder, @@ -50,13 +53,19 @@ ) from openstef_models.utils.data_split import DataSplitter from openstef_models.utils.feature_selection import Exclude, FeatureSelection, Include -from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow, ForecastingCallback +from openstef_models.workflows.custom_forecasting_workflow import ( + CustomForecastingWorkflow, + ForecastingCallback, +) class LocationConfig(BaseConfig): """Configuration for location information in forecasting workflows.""" - name: str = Field(default="test_location", description="Name of the forecasting location or workflow.") + name: str = Field( + default="test_location", + description="Name of the forecasting location or workflow.", + ) description: str = Field(default="", description="Description of the forecasting workflow.") coordinate: Coordinate = Field( default=Coordinate( @@ -66,7 +75,8 @@ class LocationConfig(BaseConfig): description="Geographic coordinate of the location.", ) country_code: CountryAlpha2 = Field( - default=CountryAlpha2("NL"), description="Country code for holiday feature generation." + default=CountryAlpha2("NL"), + description="Country code for holiday feature generation.", ) @property @@ -90,42 +100,65 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob model_id: ModelIdentifier = Field(description="Unique identifier for the forecasting model.") # Model configuration - model: Literal["xgboost", "gblinear", "flatliner"] = Field( + model: Literal["xgboost", "gblinear", "flatliner", "hybrid", "lgbm", "lgbmlinear"] = Field( description="Type of forecasting model to use." ) # TODO(#652): Implement median forecaster quantiles: list[Quantile] = Field( - default=[Q(0.5)], description="List of quantiles to predict for probabilistic forecasting." + default=[Q(0.5)], + description="List of quantiles to predict for probabilistic forecasting.", ) sample_interval: timedelta = Field( - default=timedelta(minutes=15), description="Time interval between consecutive data samples." + default=timedelta(minutes=15), + description="Time interval between consecutive data samples.", ) horizons: list[LeadTime] = Field( - default=[LeadTime.from_string("PT48H")], description="List of forecast horizons to predict." + default=[LeadTime.from_string("PT48H")], + description="List of forecast horizons to predict.", ) xgboost_hyperparams: XGBoostForecaster.HyperParams = Field( - default=XGBoostForecaster.HyperParams(), description="Hyperparameters for XGBoost forecaster." + default=XGBoostForecaster.HyperParams(), + description="Hyperparameters for XGBoost forecaster.", ) gblinear_hyperparams: GBLinearForecaster.HyperParams = Field( - default=GBLinearForecaster.HyperParams(), description="Hyperparameters for GBLinear forecaster." + default=GBLinearForecaster.HyperParams(), + description="Hyperparameters for GBLinear forecaster.", + ) + + lgbm_hyperparams: LGBMForecaster.HyperParams = Field( + default=LGBMForecaster.HyperParams(), + description="Hyperparameters for LightGBM forecaster.", + ) + + lgbmlinear_hyperparams: LGBMLinearForecaster.HyperParams = Field( + default=LGBMLinearForecaster.HyperParams(), + description="Hyperparameters for LightGBM forecaster.", + ) + + hybrid_hyperparams: HybridForecaster.HyperParams = Field( + default=HybridForecaster.HyperParams(), + description="Hyperparameters for Hybrid forecaster.", ) location: LocationConfig = Field( - default=LocationConfig(), description="Location information for the forecasting workflow." + default=LocationConfig(), + description="Location information for the forecasting workflow.", ) # Data properties target_column: str = Field(default="load", description="Name of the target variable column in datasets.") energy_price_column: str = Field( - default="day_ahead_electricity_price", description="Name of the energy price column in datasets." + default="day_ahead_electricity_price", + description="Name of the energy price column in datasets.", ) radiation_column: str = Field(default="radiation", description="Name of the radiation column in datasets.") wind_speed_column: str = Field(default="windspeed", description="Name of the wind speed column in datasets.") pressure_column: str = Field(default="pressure", description="Name of the pressure column in datasets.") temperature_column: str = Field(default="temperature", description="Name of the temperature column in datasets.") relative_humidity_column: str = Field( - default="relative_humidity", description="Name of the relative humidity column in datasets." + default="relative_humidity", + description="Name of the relative humidity column in datasets.", ) predict_history: timedelta = Field( default=timedelta(days=14), @@ -143,7 +176,8 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob # Feature engineering and validation completeness_threshold: float = Field( - default=0.5, description="Minimum fraction of data that should be available for making a regular forecast." + default=0.5, + description="Minimum fraction of data that should be available for making a regular forecast.", ) flatliner_threshold: timedelta = Field( default=timedelta(hours=24), @@ -167,7 +201,9 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob "Values are normalized relative to this percentile before weighting.", ) sample_weight_exponent: float = Field( - default_factory=lambda data: 1.0 if data.get("model") == "gblinear" else 0.0, + default_factory=lambda data: 1.0 + if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "hybrid", "xgboost"} + else 0.0, description="Exponent applied to scale the sample weights. " "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " "Note: Defaults to 1.0 for gblinear congestion models.", @@ -197,16 +233,22 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob # Callbacks mlflow_storage: MLFlowStorage | None = Field( - default_factory=MLFlowStorage, description="Configuration for MLflow experiment tracking and model storage." + default_factory=MLFlowStorage, + description="Configuration for MLflow experiment tracking and model storage.", ) - model_reuse_enable: bool = Field(default=True, description="Whether to enable reuse of previously trained models.") + model_reuse_enable: bool = Field( + default=True, + description="Whether to enable reuse of previously trained models.", + ) model_reuse_max_age: timedelta = Field( - default=timedelta(days=7), description="Maximum age of a model to be considered for reuse." + default=timedelta(days=7), + description="Maximum age of a model to be considered for reuse.", ) model_selection_enable: bool = Field( - default=True, description="Whether to enable automatic model selection based on performance." + default=True, + description="Whether to enable automatic model selection based on performance.", ) model_selection_metric: tuple[QuantileOrGlobal, str, MetricDirection] = Field( default=(Q(0.5), "R2", "higher_is_better"), @@ -218,7 +260,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob ) verbosity: Literal[0, 1, 2, 3, True] = Field( - default=1, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + default=0, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" ) # Metadata @@ -228,7 +270,9 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob ) -def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomForecastingWorkflow: +def create_forecasting_workflow( + config: ForecastingWorkflowConfig, +) -> CustomForecastingWorkflow: """Create a forecasting workflow from configuration. Builds a complete forecasting pipeline including preprocessing, forecaster, and postprocessing @@ -249,7 +293,7 @@ def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomFore load_column=config.target_column, flatliner_threshold=config.flatliner_threshold, detect_non_zero_flatliner=config.detect_non_zero_flatliner, - error_on_flatliner=True, + error_on_flatliner=False, ), CompletenessChecker(completeness_threshold=config.completeness_threshold), ] @@ -257,9 +301,9 @@ def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomFore LagsAdder( history_available=config.predict_history, horizons=config.horizons, - add_trivial_lags=config.model != "gblinear", # GBLinear uses only 7day lag. + add_trivial_lags=config.model not in {"gblinear", "hybrid"}, # GBLinear uses only 7day lag. target_column=config.target_column, - custom_lags=[timedelta(days=7)] if config.model == "gblinear" else [], + custom_lags=[timedelta(days=7)] if config.model in {"gblinear", "hybrid"} else [], ), WindPowerFeatureAdder( windspeed_reference_column=config.wind_speed_column, @@ -284,7 +328,10 @@ def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomFore ), ] feature_standardizers = [ - Clipper(selection=Include(config.energy_price_column).combine(config.clip_features), mode="standard"), + Clipper( + selection=Include(config.energy_price_column).combine(config.clip_features), + mode="standard", + ), Scaler(selection=Exclude(config.target_column), method="standard"), SampleWeighter( target_column=config.target_column, @@ -312,7 +359,38 @@ def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomFore ) ) postprocessing = [QuantileSorter()] - + elif config.model == "lgbmlinear": + preprocessing = [ + *checks, + *feature_adders, + HolidayFeatureAdder(country_code=config.location.country_code), + DatetimeFeaturesAdder(onehot_encode=False), + *feature_standardizers, + ] + forecaster = LGBMLinearForecaster( + config=LGBMLinearForecaster.Config( + quantiles=config.quantiles, + horizons=config.horizons, + hyperparams=config.lgbmlinear_hyperparams, + ) + ) + postprocessing = [QuantileSorter()] + elif config.model == "lgbm": + preprocessing = [ + *checks, + *feature_adders, + HolidayFeatureAdder(country_code=config.location.country_code), + DatetimeFeaturesAdder(onehot_encode=False), + *feature_standardizers, + ] + forecaster = LGBMForecaster( + config=LGBMForecaster.Config( + quantiles=config.quantiles, + horizons=config.horizons, + hyperparams=config.lgbm_hyperparams, + ) + ) + postprocessing = [QuantileSorter()] elif config.model == "gblinear": preprocessing = [ *checks, @@ -335,7 +413,7 @@ def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomFore verbosity=config.verbosity, ), ) - postprocessing = [] + postprocessing = [QuantileSorter()] elif config.model == "flatliner": preprocessing = [] forecaster = FlatlinerForecaster( @@ -344,7 +422,31 @@ def create_forecasting_workflow(config: ForecastingWorkflowConfig) -> CustomFore horizons=config.horizons, ) ) - postprocessing = [] + postprocessing = [ + ConfidenceIntervalApplicator(quantiles=config.quantiles), + ] + elif config.model == "hybrid": + preprocessing = [ + *checks, + *feature_adders, + *feature_standardizers, + Imputer( + selection=Exclude(config.target_column), + imputation_strategy="mean", + fill_future_values=Include(config.energy_price_column), + ), + NaNDropper( + selection=Exclude(config.target_column), + ), + ] + forecaster = HybridForecaster( + config=HybridForecaster.Config( + quantiles=config.quantiles, + horizons=config.horizons, + hyperparams=config.hybrid_hyperparams, + ) + ) + postprocessing = [QuantileSorter()] else: msg = f"Unsupported model type: {config.model}" raise ValueError(msg) diff --git a/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py new file mode 100644 index 000000000..763932268 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py @@ -0,0 +1,157 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Adaptor for multi-quantile regression using a base quantile regressor. + +Designed to work with scikit-learn compatible regressors that support quantile regression. +""" + +import logging + +import numpy as np +import numpy.typing as npt +import pandas as pd +from sklearn.base import BaseEstimator, RegressorMixin + +logger = logging.getLogger(__name__) + +ParamType = float | int | str | bool | None + + +class MultiQuantileRegressor(BaseEstimator, RegressorMixin): + """Adaptor for multi-quantile regression using a base quantile regressor. + + This class creates separate instances of a given quantile regressor for each quantile + and manages their training and prediction. + """ + + def __init__( + self, + base_learner: type[BaseEstimator], + quantile_param: str, + quantiles: list[float], + hyperparams: dict[str, ParamType], + ): + """Initialize MultiQuantileRegressor. + + This is an adaptor that allows any quantile-capable regressor to predict multiple quantiles + by instantiating separate models for each quantile. + + Args: + base_learner: A scikit-learn compatible regressor class that supports quantile regression. + quantile_param: The name of the parameter in base_learner that sets the quantile level. + quantiles: List of quantiles to predict (e.g., [0.1, 0.5, 0.9]). + hyperparams: Dictionary of hyperparameters to pass to each base learner instance. + """ + self.quantiles = quantiles + self.hyperparams = hyperparams + self.quantile_param = quantile_param + self.base_learner = base_learner + self.is_fitted = False + self._models = [self._init_model(q) for q in quantiles] + + def _init_model(self, q: float) -> BaseEstimator: + params = self.hyperparams.copy() + params[self.quantile_param] = q + base_learner = self.base_learner(**params) + + if self.quantile_param not in base_learner.get_params(): # type: ignore + msg = f"The base learner does not support the quantile parameter '{self.quantile_param}'." + raise ValueError(msg) + + return base_learner + + def fit( + self, + X: npt.NDArray[np.floating] | pd.DataFrame, + y: npt.NDArray[np.floating] | pd.Series, + sample_weight: npt.NDArray[np.floating] | pd.Series | None = None, + feature_name: list[str] | None = None, + eval_set: list[tuple[pd.DataFrame, npt.NDArray[np.floating]]] | None = None, + eval_sample_weight: list[npt.NDArray[np.floating]] | list[pd.Series] | None = None, + ) -> None: + """Fit the multi-quantile regressor. + + Args: + X: Input features as a DataFrame. + y: Target values as a 2D array where each column corresponds to a quantile. + sample_weight: Sample weights for training data. + feature_name: List of feature names. + eval_set: Evaluation set for early stopping. + eval_sample_weight: Sample weights for evaluation data. + """ + # Pass model-specific eval arguments + kwargs = {} + for model in self._models: + # Check if early stopping is supported + # Check that eval_set is supported + if eval_set is None and "early_stopping_rounds" in self.hyperparams: + model.set_params(early_stopping_rounds=None) # type: ignore + + if eval_set is not None and self.learner_eval_sample_weight_param is not None: # type: ignore + kwargs[self.learner_eval_sample_weight_param] = eval_sample_weight + + if "early_stopping_rounds" in self.hyperparams and self.learner_eval_sample_weight_param is not None: + model.set_params(early_stopping_rounds=self.hyperparams["early_stopping_rounds"]) # type: ignore + + if feature_name: + self.model_feature_names = feature_name + else: + self.model_feature_names = [] + + if eval_sample_weight is not None and self.learner_eval_sample_weight_param: + kwargs[self.learner_eval_sample_weight_param] = eval_sample_weight + + model.fit( # type: ignore + X=np.asarray(X), + y=y, + sample_weight=sample_weight, + **kwargs, + ) + + self.is_fitted = True + + @property + def learner_eval_sample_weight_param(self) -> str | None: + """Get the name of the sample weight parameter for evaluation sets. + + Returns: + The name of the sample weight parameter if supported, else None. + """ + learner_name: str = self.base_learner.__name__ + params: dict[str, str | None] = { + "QuantileRegressor": None, + "LGBMRegressor": "eval_sample_weight", + "XGBRegressor": "sample_weight_eval_set", + } + return params.get(learner_name) + + def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: + """Predict quantiles for the input features. + + Args: + X: Input features as a DataFrame. + + Returns: + + A 2D array where each column corresponds to predicted quantiles. + """ # noqa: D412 + return np.column_stack([model.predict(X=X) for model in self._models]) # type: ignore + + @property + def models(self) -> list[BaseEstimator]: + """Get the list of underlying quantile models. + + Returns: + List of BaseEstimator instances for each quantile. + """ + return self._models + + @property + def has_feature_names(self) -> bool: + """Check if the base learners have feature names. + + Returns: + True if the base learners have feature names, False otherwise. + """ + return len(self.model_feature_names) > 0 diff --git a/packages/openstef-models/tests/unit/integrations/skops/__init__.py b/packages/openstef-models/tests/unit/integrations/skops/__init__.py new file mode 100644 index 000000000..63d543f53 --- /dev/null +++ b/packages/openstef-models/tests/unit/integrations/skops/__init__.py @@ -0,0 +1,5 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +__all__ = [] diff --git a/packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py b/packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py new file mode 100644 index 000000000..8d4bb9eb7 --- /dev/null +++ b/packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py @@ -0,0 +1,72 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from __future__ import annotations + +from io import BytesIO +from typing import TYPE_CHECKING + +import pytest + +from openstef_core.mixins import Stateful +from openstef_core.types import LeadTime, Q +from openstef_models.integrations.skops.skops_model_serializer import SkopsModelSerializer +from openstef_models.models.forecasting.forecaster import ForecasterConfig +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster +from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster +from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster + +if TYPE_CHECKING: + from openstef_models.models.forecasting.forecaster import Forecaster + + +class SimpleSerializableModel(Stateful): + """A simple model class that can be pickled for testing.""" + + def __init__(self) -> None: + self.target_column = "load" + self.is_fitted = True + + +def test_skops_model_serializer__roundtrip__preserves_model_integrity(): + """Test complete serialize/deserialize roundtrip preserves model state.""" + # Arrange + buffer = BytesIO() + serializer = SkopsModelSerializer() + model = SimpleSerializableModel() + + # Act - Serialize then deserialize + serializer.serialize(model, buffer) + buffer.seek(0) + restored_model = serializer.deserialize(buffer) + + # Assert - Model state should be identical + assert isinstance(restored_model, SimpleSerializableModel) + assert restored_model.target_column == model.target_column + assert restored_model.is_fitted == model.is_fitted + + +@pytest.mark.parametrize( + "forecaster_class", + [ + XGBoostForecaster, + LGBMForecaster, + LGBMLinearForecaster, + ], +) +def test_skops_works_with_different_forecasters(forecaster_class: type[Forecaster]): + buffer = BytesIO() + serializer = SkopsModelSerializer() + + config: ForecasterConfig = forecaster_class.Config(horizons=[LeadTime.from_string("PT12H")], quantiles=[Q(0.5)]) # type: ignore + assert isinstance(config, ForecasterConfig) + forecaster = forecaster_class(config=config) + + # Act - Serialize then deserialize + serializer.serialize(forecaster, buffer) + buffer.seek(0) + restored_model = serializer.deserialize(buffer) + + # Assert - Model state should be identical + assert isinstance(restored_model, forecaster.__class__) diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py new file mode 100644 index 000000000..4e36e125d --- /dev/null +++ b/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py @@ -0,0 +1,105 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import pytest + +from openstef_core.datasets import ForecastInputDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_models.models.forecasting.hybrid_forecaster import ( + HybridForecaster, + HybridForecasterConfig, + HybridHyperParams, +) + + +@pytest.fixture +def base_config() -> HybridForecasterConfig: + """Base configuration for Hybrid forecaster tests.""" + + params = HybridHyperParams() + return HybridForecasterConfig( + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime(timedelta(days=1))], + hyperparams=params, + verbosity=False, + ) + + +def test_hybrid_forecaster_fit_predict( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: HybridForecasterConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = HybridForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + +def test_hybrid_forecaster_predict_not_fitted_raises_error( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: HybridForecasterConfig, +): + """Test that predict() raises NotFittedError when called before fit().""" + # Arrange + forecaster = HybridForecaster(config=base_config) + + # Act & Assert + with pytest.raises(NotFittedError, match="HybridForecaster"): + forecaster.predict(sample_forecast_input_dataset) + + +def test_hybrid_forecaster_with_sample_weights( + sample_dataset_with_weights: ForecastInputDataset, + base_config: HybridForecasterConfig, +): + """Test that forecaster works with sample weights and produces different results.""" + # Arrange + forecaster_with_weights = HybridForecaster(config=base_config) + + # Create dataset without weights for comparison + data_without_weights = ForecastInputDataset( + data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), + sample_interval=sample_dataset_with_weights.sample_interval, + target_column=sample_dataset_with_weights.target_column, + forecast_start=sample_dataset_with_weights.forecast_start, + ) + forecaster_without_weights = HybridForecaster(config=base_config) + + # Act + forecaster_with_weights.fit(sample_dataset_with_weights) + forecaster_without_weights.fit(data_without_weights) + + # Predict using data without sample_weight column (since that's used for training, not prediction) + result_with_weights = forecaster_with_weights.predict(data_without_weights) + result_without_weights = forecaster_without_weights.predict(data_without_weights) + + # Assert + # Both should produce valid forecasts + assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" + assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + + # Sample weights should affect the model, so results should be different + # (This is a statistical test - with different weights, predictions should differ) + differences = (result_with_weights.data - result_without_weights.data).abs() + assert differences.sum().sum() > 0, "Sample weights should affect model predictions" diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py new file mode 100644 index 000000000..b4fe1c989 --- /dev/null +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py @@ -0,0 +1,149 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +from datetime import timedelta + +import pandas as pd +import pytest + +from openstef_core.datasets import ForecastInputDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_models.models.forecasting.lgbm_forecaster import ( + LGBMForecaster, + LGBMForecasterConfig, + LGBMHyperParams, +) + + +@pytest.fixture +def base_config() -> LGBMForecasterConfig: + """Base configuration for LightGBM forecaster tests.""" + + return LGBMForecasterConfig( + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime(timedelta(days=1))], + hyperparams=LGBMHyperParams(n_estimators=100, max_depth=3, min_data_in_leaf=1, min_data_in_bin=1), + device="cpu", + n_jobs=1, + verbosity=0, + ) + + +@pytest.fixture +def forecaster(base_config: LGBMForecasterConfig) -> LGBMForecaster: + return LGBMForecaster(base_config) + + +def test_initialization(forecaster: LGBMForecaster): + assert isinstance(forecaster, LGBMForecaster) + assert forecaster.config.hyperparams.n_estimators == 100 # type: ignore + + +def test_quantile_lgbm_forecaster__fit_predict( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LGBMForecasterConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = LGBMForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + # Since forecast is deterministic with fixed random seed, check value spread (vectorized) + # All quantiles should have some variation (not all identical values) + stds = result.data.std() + assert (stds > 0).all(), f"All columns should have variation, got stds: {dict(stds)}" + + +def test_lgbm_forecaster__not_fitted_error( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LGBMForecasterConfig, +): + """Test that NotFittedError is raised when predicting before fitting.""" + # Arrange + forecaster = LGBMForecaster(config=base_config) + + # Act & Assert + with pytest.raises(NotFittedError): + forecaster.predict(sample_forecast_input_dataset) + + +def test_lgbm_forecaster__with_sample_weights( + sample_dataset_with_weights: ForecastInputDataset, + base_config: LGBMForecasterConfig, +): + """Test that forecaster works with sample weights and produces different results.""" + # Arrange + forecaster_with_weights = LGBMForecaster(config=base_config) + + # Create dataset without weights for comparison + data_without_weights = ForecastInputDataset( + data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), + sample_interval=sample_dataset_with_weights.sample_interval, + target_column=sample_dataset_with_weights.target_column, + forecast_start=sample_dataset_with_weights.forecast_start, + ) + forecaster_without_weights = LGBMForecaster(config=base_config) + + # Act + forecaster_with_weights.fit(sample_dataset_with_weights) + forecaster_without_weights.fit(data_without_weights) + + # Predict using data without sample_weight column (since that's used for training, not prediction) + result_with_weights = forecaster_with_weights.predict(data_without_weights) + result_without_weights = forecaster_without_weights.predict(data_without_weights) + + # Assert + # Both should produce valid forecasts + assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" + assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + + # Sample weights should affect the model, so results should be different + # (This is a statistical test - with different weights, predictions should differ) + differences = (result_with_weights.data - result_without_weights.data).abs() + assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +def test_lgbm_forecaster__feature_importances( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LGBMForecasterConfig, +): + """Test that feature_importances returns correct normalized importance scores.""" + # Arrange + forecaster = LGBMForecaster(config=base_config) + forecaster.fit(sample_forecast_input_dataset) + + # Act + feature_importances = forecaster.feature_importances + + # Assert + assert len(feature_importances.index) > 0 + + # Columns should match expected quantile formats + expected_columns = pd.Index([q.format() for q in base_config.quantiles], name="quantiles") + pd.testing.assert_index_equal(feature_importances.columns, expected_columns) + + # Values should be normalized (sum to 1.0 per quantile column) and non-negative + col_sums = feature_importances.sum(axis=0) + pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=expected_columns), atol=1e-10) + assert (feature_importances >= 0).all().all() + + +# TODO(@MvLieshout): Add tests on different loss functions # noqa: TD003 diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py new file mode 100644 index 000000000..cc4b4701e --- /dev/null +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py @@ -0,0 +1,149 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +from datetime import timedelta + +import pandas as pd +import pytest + +from openstef_core.datasets import ForecastInputDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_models.models.forecasting.lgbmlinear_forecaster import ( + LGBMLinearForecaster, + LGBMLinearForecasterConfig, + LGBMLinearHyperParams, +) + + +@pytest.fixture +def base_config() -> LGBMLinearForecasterConfig: + """Base configuration for LgbLinear forecaster tests.""" + + return LGBMLinearForecasterConfig( + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime(timedelta(days=1))], + hyperparams=LGBMLinearHyperParams(n_estimators=100, max_depth=3, min_data_in_leaf=1, min_data_in_bin=1), + device="cpu", + n_jobs=1, + verbosity=0, + ) + + +@pytest.fixture +def forecaster(base_config: LGBMLinearForecasterConfig) -> LGBMLinearForecaster: + return LGBMLinearForecaster(base_config) + + +def test_initialization(forecaster: LGBMLinearForecaster): + assert isinstance(forecaster, LGBMLinearForecaster) + assert forecaster.config.hyperparams.n_estimators == 100 # type: ignore + + +def test_quantile_lgbmlinear_forecaster__fit_predict( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LGBMLinearForecasterConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = LGBMLinearForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + # Since forecast is deterministic with fixed random seed, check value spread (vectorized) + # All quantiles should have some variation (not all identical values) + stds = result.data.std() + assert (stds > 0).all(), f"All columns should have variation, got stds: {dict(stds)}" + + +def test_lgbmlinear_forecaster__not_fitted_error( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LGBMLinearForecasterConfig, +): + """Test that NotFittedError is raised when predicting before fitting.""" + # Arrange + forecaster = LGBMLinearForecaster(config=base_config) + + # Act & Assert + with pytest.raises(NotFittedError): + forecaster.predict(sample_forecast_input_dataset) + + +def test_lgbmlinear_forecaster__with_sample_weights( + sample_dataset_with_weights: ForecastInputDataset, + base_config: LGBMLinearForecasterConfig, +): + """Test that forecaster works with sample weights and produces different results.""" + # Arrange + forecaster_with_weights = LGBMLinearForecaster(config=base_config) + + # Create dataset without weights for comparison + data_without_weights = ForecastInputDataset( + data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), + sample_interval=sample_dataset_with_weights.sample_interval, + target_column=sample_dataset_with_weights.target_column, + forecast_start=sample_dataset_with_weights.forecast_start, + ) + forecaster_without_weights = LGBMLinearForecaster(config=base_config) + + # Act + forecaster_with_weights.fit(sample_dataset_with_weights) + forecaster_without_weights.fit(data_without_weights) + + # Predict using data without sample_weight column (since that's used for training, not prediction) + result_with_weights = forecaster_with_weights.predict(data_without_weights) + result_without_weights = forecaster_without_weights.predict(data_without_weights) + + # Assert + # Both should produce valid forecasts + assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" + assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + + # Sample weights should affect the model, so results should be different + # (This is a statistical test - with different weights, predictions should differ) + differences = (result_with_weights.data - result_without_weights.data).abs() + assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +def test_lgbmlinear_forecaster__feature_importances( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LGBMLinearForecasterConfig, +): + """Test that feature_importances returns correct normalized importance scores.""" + # Arrange + forecaster = LGBMLinearForecaster(config=base_config) + forecaster.fit(sample_forecast_input_dataset) + + # Act + feature_importances = forecaster.feature_importances + + # Assert + assert len(feature_importances.index) > 0 + + # Columns should match expected quantile formats + expected_columns = pd.Index([q.format() for q in base_config.quantiles], name="quantiles") + pd.testing.assert_index_equal(feature_importances.columns, expected_columns) + + # Values should be normalized (sum to 1.0 per quantile column) and non-negative + col_sums = feature_importances.sum(axis=0) + pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=expected_columns), atol=1e-10) + assert (feature_importances >= 0).all().all() + + +# TODO(@MvLieshout): Add tests on different loss functions # noqa: TD003 diff --git a/packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py b/packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py new file mode 100644 index 000000000..d2e8ad7be --- /dev/null +++ b/packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py @@ -0,0 +1,107 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +import pandas as pd +import pytest +from lightgbm import LGBMRegressor +from numpy.random import default_rng +from pydantic import BaseModel +from sklearn.base import BaseEstimator +from sklearn.linear_model import QuantileRegressor +from xgboost import XGBRegressor + +from openstef_models.utils.multi_quantile_regressor import MultiQuantileRegressor, ParamType + +ParamDict = dict[str, ParamType] +BaseLearner = BaseEstimator + + +class BaseLearnerConfig(BaseModel): + base_learner: type[BaseLearner] + quantile_param: str + hyperparams: ParamDict + + +@pytest.fixture +def dataset() -> tuple[pd.DataFrame, pd.Series]: + n_samples = 100 + n_features = 5 + rng = default_rng() + X = pd.DataFrame(rng.random((n_samples, n_features))) + y = pd.Series(rng.random(n_samples)) + return X, y + + +@pytest.fixture(params=["sklearn_quantile", "lgbm", "xgboost"]) +def baselearner_config(request: pytest.FixtureRequest) -> BaseLearnerConfig: # type : ignore + model: str = request.param + if model == "sklearn_quantile": + return BaseLearnerConfig( + base_learner=QuantileRegressor, + quantile_param="quantile", + hyperparams={"alpha": 0.1, "solver": "highs", "fit_intercept": True}, + ) + if model == "lgbm": + return BaseLearnerConfig( + base_learner=LGBMRegressor, # type: ignore + quantile_param="alpha", + hyperparams={ + "objective": "quantile", + "n_estimators": 10, + "learning_rate": 0.1, + "max_depth": -1, + }, + ) + return BaseLearnerConfig( + base_learner=XGBRegressor, + quantile_param="quantile_alpha", + hyperparams={ + "objective": "reg:quantileerror", + "n_estimators": 10, + "learning_rate": 0.1, + "max_depth": 3, + }, + ) + + +def test_init_sets_quantiles_and_models(baselearner_config: BaseLearnerConfig): + quantiles = [0.1, 0.5, 0.9] + + model = MultiQuantileRegressor( + base_learner=baselearner_config.base_learner, + quantile_param=baselearner_config.quantile_param, + quantiles=quantiles, + hyperparams=baselearner_config.hyperparams, + ) + + assert model.quantiles == quantiles + assert len(model._models) == len(quantiles) + + +def test_fit_and_predict_shape(dataset: tuple[pd.DataFrame, pd.Series], baselearner_config: BaseLearnerConfig): + quantiles = [0.1, 0.5, 0.9] + + X, y = dataset[0], dataset[1] + model = MultiQuantileRegressor( + base_learner=baselearner_config.base_learner, + quantile_param=baselearner_config.quantile_param, + quantiles=quantiles, + hyperparams=baselearner_config.hyperparams, + ) + + model.fit(X, y) + preds = model.predict(X) + assert preds.shape == (X.shape[0], len(quantiles)) + + +def test_is_fitted_true_after_fit(dataset: tuple[pd.DataFrame, pd.Series], baselearner_config: BaseLearnerConfig): + quantiles = [0.1, 0.5, 0.9] + X, y = dataset[0], dataset[1] + model = MultiQuantileRegressor( + base_learner=baselearner_config.base_learner, + quantile_param=baselearner_config.quantile_param, + quantiles=quantiles, + hyperparams=baselearner_config.hyperparams, + ) + model.fit(X, y) + assert model.is_fitted diff --git a/pyproject.toml b/pyproject.toml index 6c9333afa..87ef62841 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -145,6 +145,7 @@ lint.isort.known-first-party = [ "tests", "examples", ] # Useful if ruff does not run from the actual root of the project and to import form tests +lint.pep8-naming.ignore-names = [ "X" ] # Allow X for SKLearn-like feature matrices lint.pydocstyle.convention = "google" lint.pylint.allow-dunder-method-names = [ "__get_pydantic_core_schema__", diff --git a/uv.lock b/uv.lock index aa29c527d..013babc38 100644 --- a/uv.lock +++ b/uv.lock @@ -1552,6 +1552,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/65/bd/606e2f7eb0da042bffd8711a7427f7a28ca501aa6b1e3367ae3c7d4dc489/licensecheck-2025.1.0-py3-none-any.whl", hash = "sha256:eb20131cd8f877e5396958fd7b00cdb2225436c37a59dba4cf36d36079133a17", size = 26681, upload-time = "2025-03-26T22:58:03.145Z" }, ] +[[package]] +name = "lightgbm" +version = "4.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/0b/a2e9f5c5da7ef047cc60cef37f86185088845e8433e54d2e7ed439cce8a3/lightgbm-4.6.0.tar.gz", hash = "sha256:cb1c59720eb569389c0ba74d14f52351b573af489f230032a1c9f314f8bab7fe", size = 1703705, upload-time = "2025-02-15T04:03:03.111Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/75/cffc9962cca296bc5536896b7e65b4a7cdeb8db208e71b9c0133c08f8f7e/lightgbm-4.6.0-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:b7a393de8a334d5c8e490df91270f0763f83f959574d504c7ccb9eee4aef70ed", size = 2010151, upload-time = "2025-02-15T04:02:50.961Z" }, + { url = "https://files.pythonhosted.org/packages/21/1b/550ee378512b78847930f5d74228ca1fdba2a7fbdeaac9aeccc085b0e257/lightgbm-4.6.0-py3-none-macosx_12_0_arm64.whl", hash = "sha256:2dafd98d4e02b844ceb0b61450a660681076b1ea6c7adb8c566dfd66832aafad", size = 1592172, upload-time = "2025-02-15T04:02:53.937Z" }, + { url = "https://files.pythonhosted.org/packages/64/41/4fbde2c3d29e25ee7c41d87df2f2e5eda65b431ee154d4d462c31041846c/lightgbm-4.6.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4d68712bbd2b57a0b14390cbf9376c1d5ed773fa2e71e099cac588703b590336", size = 3454567, upload-time = "2025-02-15T04:02:56.443Z" }, + { url = "https://files.pythonhosted.org/packages/42/86/dabda8fbcb1b00bcfb0003c3776e8ade1aa7b413dff0a2c08f457dace22f/lightgbm-4.6.0-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:cb19b5afea55b5b61cbb2131095f50538bd608a00655f23ad5d25ae3e3bf1c8d", size = 3569831, upload-time = "2025-02-15T04:02:58.925Z" }, + { url = "https://files.pythonhosted.org/packages/5e/23/f8b28ca248bb629b9e08f877dd2965d1994e1674a03d67cd10c5246da248/lightgbm-4.6.0-py3-none-win_amd64.whl", hash = "sha256:37089ee95664b6550a7189d887dbf098e3eadab03537e411f52c63c121e3ba4b", size = 1451509, upload-time = "2025-02-15T04:03:01.515Z" }, +] + [[package]] name = "loguru" version = "0.7.3" @@ -2265,6 +2282,7 @@ version = "0.0.0" source = { editable = "packages/openstef-models" } dependencies = [ { name = "holidays" }, + { name = "lightgbm" }, { name = "mlflow-skinny" }, { name = "openstef-beam" }, { name = "openstef-core" }, @@ -2272,6 +2290,7 @@ dependencies = [ { name = "pycountry" }, { name = "scikit-learn" }, { name = "scipy" }, + { name = "skops" }, ] [package.optional-dependencies] @@ -2286,6 +2305,7 @@ xgb-gpu = [ [package.metadata] requires-dist = [ { name = "holidays", specifier = ">=0.79" }, + { name = "lightgbm", specifier = ">=4.6" }, { name = "mlflow-skinny", specifier = ">=3,<4" }, { name = "openstef-beam", editable = "packages/openstef-beam" }, { name = "openstef-core", editable = "packages/openstef-core" }, @@ -2293,6 +2313,7 @@ requires-dist = [ { name = "pycountry", specifier = ">=24.6.1" }, { name = "scikit-learn", specifier = ">=1.7.1,<2" }, { name = "scipy", specifier = ">=1.16.3,<2" }, + { name = "skops", specifier = ">=0.13" }, { name = "xgboost", marker = "sys_platform == 'darwin' and extra == 'xgb-cpu'", specifier = ">=3,<4" }, { name = "xgboost", marker = "extra == 'xgb-gpu'", specifier = ">=3,<4" }, { name = "xgboost-cpu", marker = "(sys_platform == 'linux' and extra == 'xgb-cpu') or (sys_platform == 'win32' and extra == 'xgb-cpu')", specifier = ">=3,<4" }, @@ -2599,6 +2620,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/92/1b/5337af1a6a478d25a3e3c56b9b4b42b0a160314e02f4a0498d5322c8dac4/poethepoet-0.37.0-py3-none-any.whl", hash = "sha256:861790276315abcc8df1b4bd60e28c3d48a06db273edd3092f3c94e1a46e5e22", size = 90062, upload-time = "2025-08-11T18:00:27.595Z" }, ] +[[package]] +name = "prettytable" +version = "3.17.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/79/45/b0847d88d6cfeb4413566738c8bbf1e1995fad3d42515327ff32cc1eb578/prettytable-3.17.0.tar.gz", hash = "sha256:59f2590776527f3c9e8cf9fe7b66dd215837cca96a9c39567414cbc632e8ddb0", size = 67892, upload-time = "2025-11-14T17:33:20.212Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/8c/83087ebc47ab0396ce092363001fa37c17153119ee282700c0713a195853/prettytable-3.17.0-py3-none-any.whl", hash = "sha256:aad69b294ddbe3e1f95ef8886a060ed1666a0b83018bbf56295f6f226c43d287", size = 34433, upload-time = "2025-11-14T17:33:19.093Z" }, +] + [[package]] name = "prompt-toolkit" version = "3.0.52" @@ -3654,6 +3687,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] +[[package]] +name = "skops" +version = "0.13.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "packaging" }, + { name = "prettytable" }, + { name = "scikit-learn" }, + { name = "scipy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b5/0c/5ec987633e077dd0076178ea6ade2d6e57780b34afea0b497fb507d7a1ed/skops-0.13.0.tar.gz", hash = "sha256:66949fd3c95cbb5c80270fbe40293c0fe1e46cb4a921860e42584dd9c20ebeb1", size = 581312, upload-time = "2025-08-06T09:48:14.916Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/e8/6a2b2030f0689f894432b9c2f0357f2f3286b2a00474827e04b8fe9eea13/skops-0.13.0-py3-none-any.whl", hash = "sha256:55e2cccb18c86f5916e4cfe5acf55ed7b0eecddf08a151906414c092fa5926dc", size = 131200, upload-time = "2025-08-06T09:48:13.356Z" }, +] + [[package]] name = "smmap" version = "5.0.2" From 72b1ca7ffa02d9c194913220d342ffba9dc44add Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Fri, 21 Nov 2025 20:04:43 +0100 Subject: [PATCH 027/104] fix merge issue Signed-off-by: Lars van Someren --- .../src/openstef_models/presets/forecasting_workflow.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 6616904b2..99cbea0ac 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -241,8 +241,6 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob mlflow_storage: MLFlowStorage | None = Field( default_factory=MLFlowStorage, description="Configuration for MLflow experiment tracking and model storage.", - default_factory=MLFlowStorage, - description="Configuration for MLflow experiment tracking and model storage.", ) model_reuse_enable: bool = Field( @@ -252,15 +250,11 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob model_reuse_max_age: timedelta = Field( default=timedelta(days=7), description="Maximum age of a model to be considered for reuse.", - default=timedelta(days=7), - description="Maximum age of a model to be considered for reuse.", ) model_selection_enable: bool = Field( default=True, description="Whether to enable automatic model selection based on performance.", - default=True, - description="Whether to enable automatic model selection based on performance.", ) model_selection_metric: tuple[QuantileOrGlobal, str, MetricDirection] = Field( default=(Q(0.5), "R2", "higher_is_better"), @@ -316,7 +310,7 @@ def create_forecasting_workflow( add_trivial_lags=config.model not in {"gblinear", "stacking", "learned_weights"}, # GBLinear uses only 7day lag. target_column=config.target_column, - custom_lags=[timedelta(days=7)] if config.model in {"gblinear","stacking" "learned_weights"} else [], + custom_lags=[timedelta(days=7)] if config.model in {"gblinear", "stackinglearned_weights"} else [], ), WindPowerFeatureAdder( windspeed_reference_column=config.wind_speed_column, From 553e2fdf1ac148293a9dd37f80d17283a4117831 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Fri, 21 Nov 2025 20:08:39 +0100 Subject: [PATCH 028/104] Fixed type Issues Signed-off-by: Lars van Someren --- .../benchmarks/liander_2024_benchmark_xgboost_gblinear.py | 2 +- .../src/openstef_models/models/forecasting/meta/__init__.py | 5 +++++ .../tests/unit/models/forecasting/meta/__init__.py | 0 3 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 packages/openstef-models/tests/unit/models/forecasting/meta/__init__.py diff --git a/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py b/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py index 4ff925cce..63ad9baff 100644 --- a/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py +++ b/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py @@ -45,7 +45,7 @@ BENCHMARK_RESULTS_PATH_XGBOOST = OUTPUT_PATH / "XGBoost" BENCHMARK_RESULTS_PATH_GBLINEAR = OUTPUT_PATH / "GBLinear" -N_PROCESSES = 1 # Amount of parallel processes to use for the benchmark +N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark # Model configuration diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/meta/__init__.py b/packages/openstef-models/src/openstef_models/models/forecasting/meta/__init__.py index 9ef8b6fdf..996e37d1a 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/meta/__init__.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/meta/__init__.py @@ -1,3 +1,8 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""This module provides meta-forecasting models and related hyperparameters for the OpenSTEF project.""" + from .meta_forecaster import FinalLearner, MetaForecaster, MetaHyperParams __all__ = [ diff --git a/packages/openstef-models/tests/unit/models/forecasting/meta/__init__.py b/packages/openstef-models/tests/unit/models/forecasting/meta/__init__.py new file mode 100644 index 000000000..e69de29bb From f873f892105327b383474791ab21a1026d4c6965 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 24 Nov 2025 11:59:07 +0100 Subject: [PATCH 029/104] Introduced openstef_metalearning Signed-off-by: Lars van Someren --- packages/openstef-metalearning/README.md | 0 packages/openstef-metalearning/pyproject.toml | 15 +++++ .../src/openstef_metalearning/__init__.py | 13 ++++ .../openstef_metalearning/models}/__init__.py | 0 .../models}/learned_weights_forecaster.py | 14 ++-- .../models}/meta_forecaster.py | 0 .../models}/stacking_forecaster.py | 12 ++-- .../general/distribution_transform.py | 65 +++++++++++++++++++ pyproject.toml | 4 ++ uv.lock | 18 +++++ 10 files changed, 128 insertions(+), 13 deletions(-) create mode 100644 packages/openstef-metalearning/README.md create mode 100644 packages/openstef-metalearning/pyproject.toml create mode 100644 packages/openstef-metalearning/src/openstef_metalearning/__init__.py rename packages/{openstef-models/src/openstef_models/models/forecasting/meta => openstef-metalearning/src/openstef_metalearning/models}/__init__.py (100%) rename packages/{openstef-models/src/openstef_models/models/forecasting/meta => openstef-metalearning/src/openstef_metalearning/models}/learned_weights_forecaster.py (98%) rename packages/{openstef-models/src/openstef_models/models/forecasting/meta => openstef-metalearning/src/openstef_metalearning/models}/meta_forecaster.py (100%) rename packages/{openstef-models/src/openstef_models/models/forecasting/meta => openstef-metalearning/src/openstef_metalearning/models}/stacking_forecaster.py (98%) create mode 100644 packages/openstef-models/src/openstef_models/transforms/general/distribution_transform.py diff --git a/packages/openstef-metalearning/README.md b/packages/openstef-metalearning/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-metalearning/pyproject.toml b/packages/openstef-metalearning/pyproject.toml new file mode 100644 index 000000000..31352ea30 --- /dev/null +++ b/packages/openstef-metalearning/pyproject.toml @@ -0,0 +1,15 @@ +[project] +name = "openstef-metalearning" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.12" +dependencies = ["openstef-core", "openstef-models"] + +[tool.uv.sources] +openstef-models = { workspace = true } +openstef-core = { workspace = true } + + +[tool.hatch.build.targets.wheel] +packages = ["src/openstef_metalearning"] diff --git a/packages/openstef-metalearning/src/openstef_metalearning/__init__.py b/packages/openstef-metalearning/src/openstef_metalearning/__init__.py new file mode 100644 index 000000000..e659c6c12 --- /dev/null +++ b/packages/openstef-metalearning/src/openstef_metalearning/__init__.py @@ -0,0 +1,13 @@ +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Core models for OpenSTEF.""" + +import logging + +# Set up logging configuration +root_logger = logging.getLogger(name=__name__) +if not root_logger.handlers: + root_logger.addHandler(logging.NullHandler()) + +__all__ = [] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/meta/__init__.py b/packages/openstef-metalearning/src/openstef_metalearning/models/__init__.py similarity index 100% rename from packages/openstef-models/src/openstef_models/models/forecasting/meta/__init__.py rename to packages/openstef-metalearning/src/openstef_metalearning/models/__init__.py diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/meta/learned_weights_forecaster.py b/packages/openstef-metalearning/src/openstef_metalearning/models/learned_weights_forecaster.py similarity index 98% rename from packages/openstef-models/src/openstef_models/models/forecasting/meta/learned_weights_forecaster.py rename to packages/openstef-metalearning/src/openstef_metalearning/models/learned_weights_forecaster.py index 62d00a488..f8d22f12b 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/meta/learned_weights_forecaster.py +++ b/packages/openstef-metalearning/src/openstef_metalearning/models/learned_weights_forecaster.py @@ -21,6 +21,13 @@ NotFittedError, ) from openstef_core.types import Quantile +from openstef_metalearning.models.meta_forecaster import ( + BaseLearner, + BaseLearnerHyperParams, + FinalLearner, + MetaForecaster, + MetaHyperParams, +) from openstef_models.models.forecasting.forecaster import ( ForecasterConfig, ) @@ -28,13 +35,6 @@ GBLinearHyperParams, ) from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams -from openstef_models.models.forecasting.meta.meta_forecaster import ( - BaseLearner, - BaseLearnerHyperParams, - FinalLearner, - MetaForecaster, - MetaHyperParams, -) logger = logging.getLogger(__name__) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/meta/meta_forecaster.py b/packages/openstef-metalearning/src/openstef_metalearning/models/meta_forecaster.py similarity index 100% rename from packages/openstef-models/src/openstef_models/models/forecasting/meta/meta_forecaster.py rename to packages/openstef-metalearning/src/openstef_metalearning/models/meta_forecaster.py diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/meta/stacking_forecaster.py b/packages/openstef-metalearning/src/openstef_metalearning/models/stacking_forecaster.py similarity index 98% rename from packages/openstef-models/src/openstef_models/models/forecasting/meta/stacking_forecaster.py rename to packages/openstef-metalearning/src/openstef_metalearning/models/stacking_forecaster.py index 73debe3c7..70130ff07 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/meta/stacking_forecaster.py +++ b/packages/openstef-metalearning/src/openstef_metalearning/models/stacking_forecaster.py @@ -21,6 +21,12 @@ ) from openstef_core.mixins import HyperParams from openstef_core.types import Quantile +from openstef_metalearning.models.meta_forecaster import ( + BaseLearner, + BaseLearnerHyperParams, + FinalLearner, + MetaForecaster, +) from openstef_models.models.forecasting.forecaster import ( Forecaster, ForecasterConfig, @@ -29,12 +35,6 @@ GBLinearHyperParams, ) from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams -from openstef_models.models.forecasting.meta.meta_forecaster import ( - BaseLearner, - BaseLearnerHyperParams, - FinalLearner, - MetaForecaster, -) logger = logging.getLogger(__name__) diff --git a/packages/openstef-models/src/openstef_models/transforms/general/distribution_transform.py b/packages/openstef-models/src/openstef_models/transforms/general/distribution_transform.py new file mode 100644 index 000000000..8e93da672 --- /dev/null +++ b/packages/openstef-models/src/openstef_models/transforms/general/distribution_transform.py @@ -0,0 +1,65 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Transform for clipping feature values to observed ranges. + +This module provides functionality to clip feature values to their observed +minimum and maximum ranges during training, preventing out-of-range values +during inference and improving model robustness. +""" + +from typing import Literal, override + +import pandas as pd +from pydantic import Field, PrivateAttr + +from openstef_core.base_model import BaseConfig +from openstef_core.datasets import TimeSeriesDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.transforms import TimeSeriesTransform +from openstef_models.utils.feature_selection import FeatureSelection + +type ClipMode = Literal["minmax", "standard"] + + +class DistributionTransform(BaseConfig, TimeSeriesTransform): + """Transform dataframe to (robust) percentage of min-max of training data. + + Useful to determine whether datadrift has occured. + Can be used as a feature for learning sample weights in meta models. + """ + + robust_threshold: float = Field( + default=2.0, + description="Percentage of observations to ignore when determing percentage. (Single sided)", + ) + + _feature_mins: pd.Series = PrivateAttr(default_factory=pd.Series) + _feature_maxs: pd.Series = PrivateAttr(default_factory=pd.Series) + _is_fitted: bool = PrivateAttr(default=False) + + @property + @override + def is_fitted(self) -> bool: + return self._is_fitted + + @override + def fit(self, data: TimeSeriesDataset) -> None: + self._feature_mins = data.data.min(axis=0) + self._feature_maxs = data.data.max(axis=0) + self._is_fitted = True + + @override + def transform(self, data: TimeSeriesDataset) -> TimeSeriesDataset: + if not self._is_fitted: + raise NotFittedError(self.__class__.__name__) + + # Apply min-max scaling to each feature based on fitted min and max + transformed_data = (data.data - self._feature_mins) / (self._feature_maxs - self._feature_mins) + + return TimeSeriesDataset(data=transformed_data, sample_interval=data.sample_interval) + + @override + def features_added(self) -> list[str]: + return [] diff --git a/pyproject.toml b/pyproject.toml index 87ef62841..1ef0cea1c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,7 @@ optional-dependencies.beam = [ ] optional-dependencies.models = [ "openstef-models[xgb-cpu]", + "openstef-metalearning", ] urls.Documentation = "https://openstef.github.io/openstef/index.html" urls.Homepage = "https://lfenergy.org/projects/openstef/" @@ -77,6 +78,7 @@ openstef-beam = { workspace = true } openstef-models = { workspace = true } openstef-docs = { workspace = true } openstef-core = { workspace = true } +openstef-metalearning = { workspace = true } microsoft-python-type-stubs = { git = "git+https://github.com/microsoft/python-type-stubs.git" } [tool.uv.workspace] @@ -85,6 +87,7 @@ members = [ "packages/openstef-beam", "docs", "packages/openstef-core", + "packages/openstef-metalearning", ] [tool.ruff] @@ -190,6 +193,7 @@ source = [ "packages/openstef-beam/src", "packages/openstef-models/src", "packages/openstef-core/src", + "packages/openstef-metalearning/src", ] omit = [ "tests/*", diff --git a/uv.lock b/uv.lock index 013babc38..520b8f9d9 100644 --- a/uv.lock +++ b/uv.lock @@ -8,6 +8,7 @@ members = [ "openstef-beam", "openstef-core", "openstef-docs", + "openstef-metalearning", "openstef-models", ] @@ -2124,6 +2125,7 @@ beam = [ { name = "openstef-beam" }, ] models = [ + { name = "openstef-metalearning" }, { name = "openstef-models", extra = ["xgb-cpu"] }, ] @@ -2155,6 +2157,7 @@ requires-dist = [ { name = "openstef-beam", extras = ["all"], marker = "extra == 'all'", editable = "packages/openstef-beam" }, { name = "openstef-core", editable = "packages/openstef-core" }, { name = "openstef-core", marker = "extra == 'all'", editable = "packages/openstef-core" }, + { name = "openstef-metalearning", marker = "extra == 'models'", editable = "packages/openstef-metalearning" }, { name = "openstef-models", extras = ["xgb-cpu"], editable = "packages/openstef-models" }, { name = "openstef-models", extras = ["xgb-cpu"], marker = "extra == 'all'", editable = "packages/openstef-models" }, { name = "openstef-models", extras = ["xgb-cpu"], marker = "extra == 'models'", editable = "packages/openstef-models" }, @@ -2276,6 +2279,21 @@ requires-dist = [ { name = "sphinx-pyproject", specifier = ">=0.3.0" }, ] +[[package]] +name = "openstef-metalearning" +version = "0.1.0" +source = { editable = "packages/openstef-metalearning" } +dependencies = [ + { name = "openstef-core" }, + { name = "openstef-models" }, +] + +[package.metadata] +requires-dist = [ + { name = "openstef-core", editable = "packages/openstef-core" }, + { name = "openstef-models", editable = "packages/openstef-models" }, +] + [[package]] name = "openstef-models" version = "0.0.0" From 3338be1b40d7e4b8720ab06240c269ef601db7cf Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 24 Nov 2025 21:05:22 +0100 Subject: [PATCH 030/104] ResidualForecaster + refactoring Signed-off-by: Lars van Someren --- .../README.md | 0 .../pyproject.toml | 4 +- .../src/openstef_meta}/__init__.py | 0 .../src/openstef_meta/framework}/__init__.py | 8 +- .../openstef_meta/framework/base_learner.py | 26 ++ .../openstef_meta/framework/final_learner.py | 54 +++ .../framework}/meta_forecaster.py | 103 ++---- .../src/openstef_meta/models/__init__.py | 24 ++ .../models/learned_weights_forecaster.py | 109 ++++--- .../models/residual_forecaster.py | 235 +++++++++++++ .../models/stacking_forecaster.py | 16 +- .../src/openstef_meta/utils}/__init__.py | 0 .../src/openstef_meta/utils/pinball_errors.py | 22 ++ .../openstef-meta/tests/models/__init__.py | 0 .../openstef-meta/tests/models/conftest.py | 59 ++++ .../test_learned_weights_forecaster.py | 2 +- .../tests/models/test_residual_forecaster.py} | 77 +++-- .../tests/models}/test_stacking_forecaster.py | 2 +- .../models/forecasting/hybrid_forecaster.py | 308 ------------------ pyproject.toml | 8 +- uv.lock | 12 +- 21 files changed, 600 insertions(+), 469 deletions(-) rename packages/{openstef-metalearning => openstef-meta}/README.md (100%) rename packages/{openstef-metalearning => openstef-meta}/pyproject.toml (80%) rename packages/{openstef-metalearning/src/openstef_metalearning => openstef-meta/src/openstef_meta}/__init__.py (100%) rename packages/{openstef-metalearning/src/openstef_metalearning/models => openstef-meta/src/openstef_meta/framework}/__init__.py (55%) create mode 100644 packages/openstef-meta/src/openstef_meta/framework/base_learner.py create mode 100644 packages/openstef-meta/src/openstef_meta/framework/final_learner.py rename packages/{openstef-metalearning/src/openstef_metalearning/models => openstef-meta/src/openstef_meta/framework}/meta_forecaster.py (64%) create mode 100644 packages/openstef-meta/src/openstef_meta/models/__init__.py rename packages/{openstef-metalearning/src/openstef_metalearning => openstef-meta/src/openstef_meta}/models/learned_weights_forecaster.py (66%) create mode 100644 packages/openstef-meta/src/openstef_meta/models/residual_forecaster.py rename packages/{openstef-metalearning/src/openstef_metalearning => openstef-meta/src/openstef_meta}/models/stacking_forecaster.py (92%) rename packages/{openstef-models/tests/unit/models/forecasting/meta => openstef-meta/src/openstef_meta/utils}/__init__.py (100%) create mode 100644 packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py create mode 100644 packages/openstef-meta/tests/models/__init__.py create mode 100644 packages/openstef-meta/tests/models/conftest.py rename packages/{openstef-models/tests/unit/models/forecasting/meta => openstef-meta/tests/models}/test_learned_weights_forecaster.py (98%) rename packages/{openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py => openstef-meta/tests/models/test_residual_forecaster.py} (56%) rename packages/{openstef-models/tests/unit/models/forecasting/meta => openstef-meta/tests/models}/test_stacking_forecaster.py (98%) delete mode 100644 packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py diff --git a/packages/openstef-metalearning/README.md b/packages/openstef-meta/README.md similarity index 100% rename from packages/openstef-metalearning/README.md rename to packages/openstef-meta/README.md diff --git a/packages/openstef-metalearning/pyproject.toml b/packages/openstef-meta/pyproject.toml similarity index 80% rename from packages/openstef-metalearning/pyproject.toml rename to packages/openstef-meta/pyproject.toml index 31352ea30..a91b25359 100644 --- a/packages/openstef-metalearning/pyproject.toml +++ b/packages/openstef-meta/pyproject.toml @@ -1,5 +1,5 @@ [project] -name = "openstef-metalearning" +name = "openstef-meta" version = "0.1.0" description = "Add your description here" readme = "README.md" @@ -12,4 +12,4 @@ openstef-core = { workspace = true } [tool.hatch.build.targets.wheel] -packages = ["src/openstef_metalearning"] +packages = ["src/openstef_meta"] diff --git a/packages/openstef-metalearning/src/openstef_metalearning/__init__.py b/packages/openstef-meta/src/openstef_meta/__init__.py similarity index 100% rename from packages/openstef-metalearning/src/openstef_metalearning/__init__.py rename to packages/openstef-meta/src/openstef_meta/__init__.py diff --git a/packages/openstef-metalearning/src/openstef_metalearning/models/__init__.py b/packages/openstef-meta/src/openstef_meta/framework/__init__.py similarity index 55% rename from packages/openstef-metalearning/src/openstef_metalearning/models/__init__.py rename to packages/openstef-meta/src/openstef_meta/framework/__init__.py index 996e37d1a..e64377d16 100644 --- a/packages/openstef-metalearning/src/openstef_metalearning/models/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/framework/__init__.py @@ -3,10 +3,14 @@ # SPDX-License-Identifier: MPL-2.0 """This module provides meta-forecasting models and related hyperparameters for the OpenSTEF project.""" -from .meta_forecaster import FinalLearner, MetaForecaster, MetaHyperParams +from .base_learner import BaseLearner, BaseLearnerHyperParams +from .final_learner import FinalLearner, FinalLearnerHyperParams +from .meta_forecaster import MetaForecaster __all__ = [ + "BaseLearner", + "BaseLearnerHyperParams", "FinalLearner", + "FinalLearnerHyperParams", "MetaForecaster", - "MetaHyperParams", ] diff --git a/packages/openstef-meta/src/openstef_meta/framework/base_learner.py b/packages/openstef-meta/src/openstef_meta/framework/base_learner.py new file mode 100644 index 000000000..36688b419 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/framework/base_learner.py @@ -0,0 +1,26 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Core meta model interfaces and configurations. + +Provides the fundamental building blocks for implementing meta models in OpenSTEF. +These mixins establish contracts that ensure consistent behavior across different meta model types +while ensuring full compatability with regular Forecasters. +""" + +from openstef_models.models.forecasting.gblinear_forecaster import ( + GBLinearForecaster, + GBLinearHyperParams, +) +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMHyperParams +from openstef_models.models.forecasting.lgbmlinear_forecaster import ( + LGBMLinearForecaster, + LGBMLinearHyperParams, +) +from openstef_models.models.forecasting.xgboost_forecaster import ( + XGBoostForecaster, + XGBoostHyperParams, +) + +BaseLearner = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster +BaseLearnerHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams diff --git a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py new file mode 100644 index 000000000..26567e156 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py @@ -0,0 +1,54 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Core meta model interfaces and configurations. + +Provides the fundamental building blocks for implementing meta models in OpenSTEF. +These mixins establish contracts that ensure consistent behavior across different meta model types +while ensuring full compatability with regular Forecasters. +""" + +from abc import ABC, abstractmethod + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.mixins import HyperParams +from openstef_core.types import Quantile + + +class FinalLearnerHyperParams(HyperParams): + """Hyperparameters for the Final Learner.""" + + +class FinalLearnerConfig: + """Configuration for the Final Learner.""" + + +class FinalLearner(ABC): + """Combines base learner predictions for each quantile into final predictions.""" + + @abstractmethod + def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + """Fit the final learner using base learner predictions. + + Args: + base_learner_predictions: Dictionary mapping Quantiles to ForecastInputDatasets containing base learner + """ + raise NotImplementedError("Subclasses must implement the fit method.") + + def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + """Generate final predictions based on base learner predictions. + + Args: + base_learner_predictions: Dictionary mapping Quantiles to ForecastInputDatasets containing base learner + predictions. + + Returns: + ForecastDataset containing the final predictions. + """ + raise NotImplementedError("Subclasses must implement the predict method.") + + @property + @abstractmethod + def is_fitted(self) -> bool: + """Indicates whether the final learner has been fitted.""" + raise NotImplementedError("Subclasses must implement the is_fitted property.") diff --git a/packages/openstef-metalearning/src/openstef_metalearning/models/meta_forecaster.py b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py similarity index 64% rename from packages/openstef-metalearning/src/openstef_metalearning/models/meta_forecaster.py rename to packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py index 07b58501f..5b69e3153 100644 --- a/packages/openstef-metalearning/src/openstef_metalearning/models/meta_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py @@ -9,110 +9,45 @@ """ import logging -from abc import abstractmethod from typing import override import pandas as pd -from pydantic import field_validator from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ( NotFittedError, ) -from openstef_core.mixins import HyperParams from openstef_core.types import Quantile +from openstef_meta.framework.base_learner import ( + BaseLearner, + BaseLearnerHyperParams, +) +from openstef_meta.framework.final_learner import FinalLearner from openstef_models.models.forecasting.forecaster import ( Forecaster, ForecasterConfig, ) -from openstef_models.models.forecasting.gblinear_forecaster import ( - GBLinearForecaster, - GBLinearForecasterConfig, - GBLinearHyperParams, -) -from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMForecasterConfig, LGBMHyperParams -from openstef_models.models.forecasting.lgbmlinear_forecaster import ( - LGBMLinearForecaster, - LGBMLinearForecasterConfig, - LGBMLinearHyperParams, -) -from openstef_models.models.forecasting.xgboost_forecaster import ( - XGBoostForecaster, - XGBoostForecasterConfig, - XGBoostHyperParams, -) logger = logging.getLogger(__name__) -BaseLearner = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster -BaseLearnerHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams -BaseLearnerConfig = ( - LGBMForecasterConfig | LGBMLinearForecasterConfig | XGBoostForecasterConfig | GBLinearForecasterConfig -) - - -class FinalLearner: - """Combines base learner predictions for each quantile into final predictions.""" - - @abstractmethod - def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: - """Fit the final learner using base learner predictions. - - Args: - base_learner_predictions: Dictionary mapping Quantiles to ForecastInputDatasets containing base learner - """ - raise NotImplementedError("Subclasses must implement the fit method.") - - def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: - """Generate final predictions based on base learner predictions. - - Args: - base_learner_predictions: Dictionary mapping Quantiles to ForecastInputDatasets containing base learner - predictions. - - Returns: - ForecastDataset containing the final predictions. - """ - raise NotImplementedError("Subclasses must implement the predict method.") - - @property - @abstractmethod - def is_fitted(self) -> bool: - """Indicates whether the final learner has been fitted.""" - raise NotImplementedError("Subclasses must implement the is_fitted property.") - - -class MetaHyperParams(HyperParams): - """Hyperparameters for Stacked LGBM GBLinear Regressor.""" - - base_hyperparams: list[BaseLearnerHyperParams] - - @field_validator("base_hyperparams", mode="after") - @classmethod - def _check_classes(cls, v: list[BaseLearnerHyperParams]) -> list[BaseLearnerHyperParams]: - hp_classes = [type(hp) for hp in v] - if not len(hp_classes) == len(set(hp_classes)): - raise ValueError("Duplicate base learner hyperparameter classes are not allowed.") - return v - - class MetaForecaster(Forecaster): - """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" + """Abstract class for Meta forecasters combining multiple models.""" _config: ForecasterConfig - _base_learners: list[BaseLearner] - _final_learner: FinalLearner - def _init_base_learners(self, base_hyperparams: list[BaseLearnerHyperParams]) -> list[BaseLearner]: + @staticmethod + def _init_base_learners( + config: ForecasterConfig, base_hyperparams: list[BaseLearnerHyperParams] + ) -> list[BaseLearner]: """Initialize base learners based on provided hyperparameters. Returns: list[Forecaster]: List of initialized base learner forecasters. """ base_learners: list[BaseLearner] = [] - horizons = self.config.horizons - quantiles = self.config.quantiles + horizons = config.horizons + quantiles = config.quantiles for hyperparams in base_hyperparams: forecaster_cls = hyperparams.forecaster_class() @@ -124,6 +59,19 @@ def _init_base_learners(self, base_hyperparams: list[BaseLearnerHyperParams]) -> return base_learners + @property + @override + def config(self) -> ForecasterConfig: + return self._config + + +class EnsembleForecaster(MetaForecaster): + """Abstract class for Meta forecasters combining multiple base learners and a final learner.""" + + _config: ForecasterConfig + _base_learners: list[BaseLearner] + _final_learner: FinalLearner + @property @override def is_fitted(self) -> bool: @@ -233,7 +181,6 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: __all__ = [ "BaseLearner", - "BaseLearnerConfig", "BaseLearnerHyperParams", "FinalLearner", "MetaForecaster", diff --git a/packages/openstef-meta/src/openstef_meta/models/__init__.py b/packages/openstef-meta/src/openstef_meta/models/__init__.py new file mode 100644 index 000000000..614543150 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/models/__init__.py @@ -0,0 +1,24 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""This module provides meta-forecasting models and related hyperparameters for the OpenSTEF project.""" + +from .learned_weights_forecaster import ( + LearnedWeightsForecaster, + LearnedWeightsForecasterConfig, + LearnedWeightsHyperParams, +) +from .residual_forecaster import ResidualForecaster, ResidualForecasterConfig, ResidualHyperParams +from .stacking_forecaster import StackingForecaster, StackingForecasterConfig, StackingHyperParams + +__all__ = [ + "LearnedWeightsForecaster", + "LearnedWeightsForecasterConfig", + "LearnedWeightsHyperParams", + "ResidualForecaster", + "ResidualForecasterConfig", + "ResidualHyperParams", + "StackingForecaster", + "StackingForecasterConfig", + "StackingHyperParams", +] diff --git a/packages/openstef-metalearning/src/openstef_metalearning/models/learned_weights_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py similarity index 66% rename from packages/openstef-metalearning/src/openstef_metalearning/models/learned_weights_forecaster.py rename to packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py index f8d22f12b..d606a79f5 100644 --- a/packages/openstef-metalearning/src/openstef_metalearning/models/learned_weights_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py @@ -10,24 +10,30 @@ """ import logging +from abc import abstractmethod from typing import override import pandas as pd from lightgbm import LGBMClassifier from pydantic import Field +from sklearn.linear_model import LinearRegression +from xgboost import XGBClassifier from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ( NotFittedError, ) +from openstef_core.mixins import HyperParams from openstef_core.types import Quantile -from openstef_metalearning.models.meta_forecaster import ( +from openstef_meta.framework.base_learner import ( BaseLearner, BaseLearnerHyperParams, - FinalLearner, - MetaForecaster, - MetaHyperParams, ) +from openstef_meta.framework.final_learner import FinalLearner +from openstef_meta.framework.meta_forecaster import ( + EnsembleForecaster, +) +from openstef_meta.utils.pinball_errors import calculate_pinball_errors from openstef_models.models.forecasting.forecaster import ( ForecasterConfig, ) @@ -39,28 +45,16 @@ logger = logging.getLogger(__name__) -def calculate_pinball_errors(y_true: pd.Series, y_pred: pd.Series, alpha: float) -> pd.Series: - """Calculate pinball loss for given true and predicted values. - - Args: - y_true: True values as a pandas Series. - y_pred: Predicted values as a pandas Series. - alpha: Quantile value. - - Returns: - A pandas Series containing the pinball loss for each sample. - """ - diff = y_true - y_pred - sign = (diff >= 0).astype(float) - return alpha * sign * diff - (1 - alpha) * (1 - sign) * diff +Classifier = LGBMClassifier | XGBClassifier | LinearRegression class LearnedWeightsFinalLearner(FinalLearner): """Combines base learner predictions with a classification approach to determine which base learner to use.""" + @abstractmethod def __init__(self, quantiles: list[Quantile]) -> None: self.quantiles = quantiles - self.models = [LGBMClassifier(class_weight="balanced", n_estimators=20) for _ in quantiles] + self.models: list[Classifier] = [] self._is_fitted = False @override @@ -125,7 +119,43 @@ def is_fitted(self) -> bool: return self._is_fitted -class LearnedWeightsHyperParams(MetaHyperParams): +class LGBMFinalLearner(LearnedWeightsFinalLearner): + """Final learner using only LGBM as base learners.""" + + def __init__(self, quantiles: list[Quantile], n_estimators: int = 20) -> None: + self.quantiles = quantiles + self.models = [LGBMClassifier(class_weight="balanced", n_estimators=n_estimators) for _ in quantiles] + self._is_fitted = False + + +class RandomForestFinalLearner(LearnedWeightsFinalLearner): + def __init__(self, quantiles: list[Quantile], n_estimators: int = 20) -> None: + self.quantiles = quantiles + self.models = [ + LGBMClassifier(boosting_type="rf", class_weight="balanced", n_estimators=n_estimators) for _ in quantiles + ] + self._is_fitted = False + + +class XGBFinalLearner(LearnedWeightsFinalLearner): + """Final learner using only XGBoost as base learners.""" + + def __init__(self, quantiles: list[Quantile], n_estimators: int = 20) -> None: + self.quantiles = quantiles + self.models = [XGBClassifier(class_weight="balanced", n_estimators=n_estimators) for _ in quantiles] + self._is_fitted = False + + +class LogisticRegressionFinalLearner(LearnedWeightsFinalLearner): + """Final learner using only Logistic Regression as base learners.""" + + def __init__(self, quantiles: list[Quantile]) -> None: + self.quantiles = quantiles + self.models = [LinearRegression() for _ in quantiles] + self._is_fitted = False + + +class LearnedWeightsHyperParams(HyperParams): """Hyperparameters for Stacked LGBM GBLinear Regressor.""" base_hyperparams: list[BaseLearnerHyperParams] = Field( @@ -134,20 +164,9 @@ class LearnedWeightsHyperParams(MetaHyperParams): "Defaults to [LGBMHyperParams, GBLinearHyperParams].", ) - final_hyperparams: BaseLearnerHyperParams = Field( - default=GBLinearHyperParams(), - description="Hyperparameters for the final learner. Defaults to GBLinearHyperParams.", - ) - - use_classifier: bool = Field( - default=True, - description="Whether to use sample weights when fitting base and final learners. Defaults to False.", - ) - - add_rolling_accuracy_features: bool = Field( - default=False, - description="Whether to add rolling accuracy features from base learners as additional features " - "to the final learner. Defaults to False.", + final_learner: type[LearnedWeightsFinalLearner] = Field( + default=LGBMFinalLearner, + description="Type of final learner to use. Defaults to LearnedWeightsFinalLearner.", ) @@ -162,7 +181,7 @@ class LearnedWeightsForecasterConfig(ForecasterConfig): ) -class LearnedWeightsForecaster(MetaForecaster): +class LearnedWeightsForecaster(EnsembleForecaster): """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" Config = LearnedWeightsForecasterConfig @@ -173,11 +192,17 @@ def __init__(self, config: LearnedWeightsForecasterConfig) -> None: self._config = config self._base_learners: list[BaseLearner] = self._init_base_learners( - base_hyperparams=config.hyperparams.base_hyperparams + config=config, base_hyperparams=config.hyperparams.base_hyperparams ) - self._final_learner = LearnedWeightsFinalLearner(quantiles=config.quantiles) - - # TODO(@Lars800): #745: Make forecaster Explainable - - -__all__ = ["LearnedWeightsForecaster", "LearnedWeightsForecasterConfig", "LearnedWeightsHyperParams"] + self._final_learner = config.hyperparams.final_learner(quantiles=config.quantiles) + + +__all__ = [ + "LGBMFinalLearner", + "LearnedWeightsForecaster", + "LearnedWeightsForecasterConfig", + "LearnedWeightsHyperParams", + "LogisticRegressionFinalLearner", + "RandomForestFinalLearner", + "XGBFinalLearner", +] diff --git a/packages/openstef-meta/src/openstef_meta/models/residual_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/residual_forecaster.py new file mode 100644 index 000000000..4c0de156b --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/models/residual_forecaster.py @@ -0,0 +1,235 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Hybrid Forecaster (Stacked LightGBM + Linear Model Gradient Boosting). + +Provides method that attempts to combine the advantages of a linear model (Extraplolation) +and tree-based model (Non-linear patterns). This is acieved by training two base learners, +followed by a small linear model that regresses on the baselearners' predictions. +The implementation is based on sklearn's ResidualRegressor. +""" + +import logging +from typing import override + +import pandas as pd +from pydantic import Field + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import ( + NotFittedError, +) +from openstef_core.mixins import HyperParams +from openstef_core.types import Quantile +from openstef_meta.framework.base_learner import ( + BaseLearner, + BaseLearnerHyperParams, +) +from openstef_meta.framework.meta_forecaster import ( + MetaForecaster, +) +from openstef_models.models.forecasting.forecaster import ( + ForecasterConfig, +) +from openstef_models.models.forecasting.gblinear_forecaster import ( + GBLinearHyperParams, +) +from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams + +logger = logging.getLogger(__name__) + + +class ResidualHyperParams(HyperParams): + """Hyperparameters for Stacked LGBM GBLinear Regressor.""" + + primary_hyperparams: BaseLearnerHyperParams = Field( + default=GBLinearHyperParams(), + description="Primary model hyperparams. Defaults to GBLinearHyperParams.", + ) + + secondary_hyperparams: BaseLearnerHyperParams = Field( + default=LGBMHyperParams(), + description="Hyperparameters for the final learner. Defaults to LGBMHyperparams.", + ) + + +class ResidualForecasterConfig(ForecasterConfig): + """Configuration for Hybrid-based forecasting models.""" + + hyperparams: ResidualHyperParams = ResidualHyperParams() + + verbosity: bool = Field( + default=True, + description="Enable verbose output from the Hybrid model (True/False).", + ) + + +class ResidualForecaster(MetaForecaster): + """MetaForecaster that implements residual modeling. + + It takes in a primary forecaster and a residual forecaster. The primary forecaster makes initial predictions, + and the residual forecaster models the residuals (errors) of the primary forecaster to improve overall accuracy. + """ + + Config = ResidualForecasterConfig + HyperParams = ResidualHyperParams + + def __init__(self, config: ResidualForecasterConfig) -> None: + """Initialize the Hybrid forecaster.""" + self._config = config + + self._primary_model: BaseLearner = self._init_base_learners( + config=config, base_hyperparams=[config.hyperparams.primary_hyperparams] + )[0] + + self._secondary_model: list[BaseLearner] = self._init_secondary_model( + hyperparams=config.hyperparams.secondary_hyperparams + ) + self._is_fitted = False + + def _init_secondary_model(self, hyperparams: BaseLearnerHyperParams) -> list[BaseLearner]: + """Initialize secondary model for residual forecasting. + + Returns: + list[Forecaster]: List containing the initialized secondary model forecaster. + """ + models: list[BaseLearner] = [] + + for q in self.config.quantiles: + config = self._config.model_copy(update={"quantiles": [q]}) + secondary_model = self._init_base_learners(config=config, base_hyperparams=[hyperparams])[0] + models.append(secondary_model) + + return models + + @override + def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: + """Fit the Hybrid model to the training data. + + Args: + data: Training data in the expected ForecastInputDataset format. + data_val: Validation data for tuning the model (optional, not used in this implementation). + + """ + # Fit primary model + self._primary_model.fit(data=data, data_val=data_val) + + # Reset forecast start date to ensure we predict on the full dataset + full_dataset = ForecastInputDataset( + data=data.data, + sample_interval=data.sample_interval, + target_column=data.target_column, + forecast_start=data.index[0], + ) + + secondary_input = self._prepare_secondary_input( + quantiles=self.config.quantiles, + base_predictions=self._primary_model.predict(data=full_dataset), + data=data, + ) + # Predict primary model on validation data if provided + if data_val is not None: + full_val_dataset = ForecastInputDataset( + data=data_val.data, + sample_interval=data_val.sample_interval, + target_column=data_val.target_column, + forecast_start=data_val.index[0], + ) + + secondary_val_input = self._prepare_secondary_input( + quantiles=self.config.quantiles, + base_predictions=self._primary_model.predict(data=full_val_dataset), + data=data_val, + ) + # Fit secondary model on residuals + [ + self._secondary_model[i].fit(data=secondary_input[q], data_val=secondary_val_input[q]) + for i, q in enumerate(secondary_input) + ] + + else: + # Fit secondary model on residuals + [ + self._secondary_model[i].fit(data=secondary_input[q], data_val=None) + for i, q in enumerate(secondary_input) + ] + + self._is_fitted = True + + @property + @override + def is_fitted(self) -> bool: + """Check the ResidualForecaster is fitted.""" + return self._is_fitted + + @staticmethod + def _prepare_secondary_input( + quantiles: list[Quantile], + base_predictions: ForecastDataset, + data: ForecastInputDataset, + ) -> dict[Quantile, ForecastInputDataset]: + """Adjust target series to be residuals for secondary model training. + + Args: + quantiles: List of quantiles to prepare data for. + base_predictions: Predictions from the primary model. + data: Original input data. + + Returns: + dict[Quantile, ForecastInputDataset]: Prepared datasets for each quantile. + """ + predictions_quantiles: dict[Quantile, ForecastInputDataset] = {} + sample_interval = data.sample_interval + for q in quantiles: + predictions = base_predictions.data[q.format()] + df = data.data.copy() + df[data.target_column] = data.target_series - predictions + predictions_quantiles[q] = ForecastInputDataset( + data=df, + sample_interval=sample_interval, + target_column=data.target_column, + forecast_start=df.index[0], + ) + + return predictions_quantiles + + def _predict_secodary_model(self, data: ForecastInputDataset) -> ForecastDataset: + predictions: dict[str, pd.Series] = {} + for model in self._secondary_model: + pred = model.predict(data=data) + q = model.config.quantiles[0].format() + predictions[q] = pred.data[q] + + return ForecastDataset( + data=pd.DataFrame(predictions), + sample_interval=data.sample_interval, + ) + + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + """Generate predictions using the ResidualForecaster model. + + Args: + data: Input data for prediction. + + Returns: + ForecastDataset containing the predictions. + + Raises: + NotFittedError: If the ResidualForecaster instance is not fitted yet. + """ + if not self.is_fitted: + raise NotFittedError("The ResidualForecaster instance is not fitted yet. Call 'fit' first.") + + primary_predictions = self._primary_model.predict(data=data).data + + secondary_predictions = self._predict_secodary_model(data=data).data + + final_predictions = primary_predictions + secondary_predictions + + return ForecastDataset( + data=final_predictions, + sample_interval=data.sample_interval, + ) + + +__all__ = ["ResidualForecaster", "ResidualForecasterConfig", "ResidualHyperParams"] diff --git a/packages/openstef-metalearning/src/openstef_metalearning/models/stacking_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py similarity index 92% rename from packages/openstef-metalearning/src/openstef_metalearning/models/stacking_forecaster.py rename to packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py index 70130ff07..f48ae7988 100644 --- a/packages/openstef-metalearning/src/openstef_metalearning/models/stacking_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py @@ -21,11 +21,13 @@ ) from openstef_core.mixins import HyperParams from openstef_core.types import Quantile -from openstef_metalearning.models.meta_forecaster import ( +from openstef_meta.framework.base_learner import ( BaseLearner, BaseLearnerHyperParams, - FinalLearner, - MetaForecaster, +) +from openstef_meta.framework.final_learner import FinalLearner +from openstef_meta.framework.meta_forecaster import ( + EnsembleForecaster, ) from openstef_models.models.forecasting.forecaster import ( Forecaster, @@ -140,7 +142,7 @@ class StackingForecasterConfig(ForecasterConfig): ) -class StackingForecaster(MetaForecaster): +class StackingForecaster(EnsembleForecaster): """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" Config = StackingForecasterConfig @@ -151,10 +153,12 @@ def __init__(self, config: StackingForecasterConfig) -> None: self._config = config self._base_learners: list[BaseLearner] = self._init_base_learners( - base_hyperparams=config.hyperparams.base_hyperparams + config=config, base_hyperparams=config.hyperparams.base_hyperparams ) - final_forecaster = self._init_base_learners(base_hyperparams=[config.hyperparams.final_hyperparams])[0] + final_forecaster = self._init_base_learners( + config=config, base_hyperparams=[config.hyperparams.final_hyperparams] + )[0] self._final_learner = StackingFinalLearner(forecaster=final_forecaster) diff --git a/packages/openstef-models/tests/unit/models/forecasting/meta/__init__.py b/packages/openstef-meta/src/openstef_meta/utils/__init__.py similarity index 100% rename from packages/openstef-models/tests/unit/models/forecasting/meta/__init__.py rename to packages/openstef-meta/src/openstef_meta/utils/__init__.py diff --git a/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py b/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py new file mode 100644 index 000000000..b11d4c7b8 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py @@ -0,0 +1,22 @@ +"""Utility functions for calculating pinball loss errors. + +This module provides a function to compute the pinball loss for quantile regression. +""" + +import pandas as pd + + +def calculate_pinball_errors(y_true: pd.Series, y_pred: pd.Series, alpha: float) -> pd.Series: + """Calculate pinball loss for given true and predicted values. + + Args: + y_true: True values as a pandas Series. + y_pred: Predicted values as a pandas Series. + alpha: Quantile value. + + Returns: + A pandas Series containing the pinball loss for each sample. + """ + diff = y_true - y_pred + sign = (diff >= 0).astype(float) + return alpha * sign * diff - (1 - alpha) * (1 - sign) * diff diff --git a/packages/openstef-meta/tests/models/__init__.py b/packages/openstef-meta/tests/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-meta/tests/models/conftest.py b/packages/openstef-meta/tests/models/conftest.py new file mode 100644 index 000000000..968e68d8c --- /dev/null +++ b/packages/openstef-meta/tests/models/conftest.py @@ -0,0 +1,59 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from datetime import datetime, timedelta + +import numpy as np +import pandas as pd +import pytest + +from openstef_core.datasets import ForecastInputDataset + + +@pytest.fixture +def sample_forecast_input_dataset() -> ForecastInputDataset: + """Create sample input dataset for forecaster training and prediction.""" + rng = np.random.default_rng(42) + num_samples = 14 + start_date = datetime.fromisoformat("2025-01-01T00:00:00") + + feature_1 = rng.normal(loc=0, scale=1, size=num_samples) + feature_2 = rng.normal(loc=0, scale=1, size=num_samples) + feature_3 = rng.uniform(low=-1, high=1, size=num_samples) + + return ForecastInputDataset( + data=pd.DataFrame( + { + "load": (feature_1 + feature_2 + feature_3) / 3, + "feature1": feature_1, + "feature2": feature_2, + "feature3": feature_3, + }, + index=pd.date_range(start=start_date, periods=num_samples, freq="1d"), + ), + sample_interval=timedelta(days=1), + target_column="load", + forecast_start=start_date + timedelta(days=num_samples // 2), + ) + + +@pytest.fixture +def sample_dataset_with_weights(sample_forecast_input_dataset: ForecastInputDataset) -> ForecastInputDataset: + """Create sample dataset with sample weights by adding weights to the base dataset.""" + rng = np.random.default_rng(42) + num_samples = len(sample_forecast_input_dataset.data) + + # Create varied sample weights (some high, some low) + sample_weights = rng.uniform(low=0.1, high=2.0, size=num_samples) + + # Add sample weights to existing data + data_with_weights = sample_forecast_input_dataset.data.copy() + data_with_weights["sample_weight"] = sample_weights + + return ForecastInputDataset( + data=data_with_weights, + sample_interval=sample_forecast_input_dataset.sample_interval, + target_column=sample_forecast_input_dataset.target_column, + forecast_start=sample_forecast_input_dataset.forecast_start, + ) diff --git a/packages/openstef-models/tests/unit/models/forecasting/meta/test_learned_weights_forecaster.py b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py similarity index 98% rename from packages/openstef-models/tests/unit/models/forecasting/meta/test_learned_weights_forecaster.py rename to packages/openstef-meta/tests/models/test_learned_weights_forecaster.py index f227d1977..4abdcee3c 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/meta/test_learned_weights_forecaster.py +++ b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py @@ -9,7 +9,7 @@ from openstef_core.datasets import ForecastInputDataset from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q -from openstef_models.models.forecasting.meta.learned_weights_forecaster import ( +from openstef_meta.models.learned_weights_forecaster import ( LearnedWeightsForecaster, LearnedWeightsForecasterConfig, LearnedWeightsHyperParams, diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py b/packages/openstef-meta/tests/models/test_residual_forecaster.py similarity index 56% rename from packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py rename to packages/openstef-meta/tests/models/test_residual_forecaster.py index 4e36e125d..eba0d8d2a 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_hybrid_forecaster.py +++ b/packages/openstef-meta/tests/models/test_residual_forecaster.py @@ -9,19 +9,56 @@ from openstef_core.datasets import ForecastInputDataset from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q -from openstef_models.models.forecasting.hybrid_forecaster import ( - HybridForecaster, - HybridForecasterConfig, - HybridHyperParams, +from openstef_meta.framework.base_learner import BaseLearnerHyperParams +from openstef_meta.models.residual_forecaster import ( + ResidualForecaster, + ResidualForecasterConfig, + ResidualHyperParams, ) +from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams +from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearHyperParams +from openstef_models.models.forecasting.xgboost_forecaster import XGBoostHyperParams + + +@pytest.fixture(params=["gblinear", "lgbmlinear"]) +def primary_model(request: pytest.FixtureRequest) -> BaseLearnerHyperParams: + """Fixture to provide different primary models types.""" + learner_type = request.param + if learner_type == "gblinear": + return GBLinearHyperParams() + if learner_type == "lgbm": + return LGBMHyperParams() + if learner_type == "lgbmlinear": + return LGBMLinearHyperParams() + return XGBoostHyperParams() + + +@pytest.fixture(params=["gblinear", "lgbm", "lgbmlinear", "xgboost"]) +def secondary_model(request: pytest.FixtureRequest) -> BaseLearnerHyperParams: + """Fixture to provide different secondary models types.""" + learner_type = request.param + if learner_type == "gblinear": + return GBLinearHyperParams() + if learner_type == "lgbm": + return LGBMHyperParams() + if learner_type == "lgbmlinear": + return LGBMLinearHyperParams() + return XGBoostHyperParams() @pytest.fixture -def base_config() -> HybridForecasterConfig: - """Base configuration for Hybrid forecaster tests.""" - - params = HybridHyperParams() - return HybridForecasterConfig( +def base_config( + primary_model: BaseLearnerHyperParams, + secondary_model: BaseLearnerHyperParams, +) -> ResidualForecasterConfig: + """Base configuration for Residual forecaster tests.""" + + params = ResidualHyperParams( + primary_hyperparams=primary_model, + secondary_hyperparams=secondary_model, + ) + return ResidualForecasterConfig( quantiles=[Q(0.1), Q(0.5), Q(0.9)], horizons=[LeadTime(timedelta(days=1))], hyperparams=params, @@ -29,14 +66,14 @@ def base_config() -> HybridForecasterConfig: ) -def test_hybrid_forecaster_fit_predict( +def test_residual_forecaster_fit_predict( sample_forecast_input_dataset: ForecastInputDataset, - base_config: HybridForecasterConfig, + base_config: ResidualForecasterConfig, ): """Test basic fit and predict workflow with comprehensive output validation.""" # Arrange expected_quantiles = base_config.quantiles - forecaster = HybridForecaster(config=base_config) + forecaster = ResidualForecaster(config=base_config) # Act forecaster.fit(sample_forecast_input_dataset) @@ -56,26 +93,26 @@ def test_hybrid_forecaster_fit_predict( assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" -def test_hybrid_forecaster_predict_not_fitted_raises_error( +def test_residual_forecaster_predict_not_fitted_raises_error( sample_forecast_input_dataset: ForecastInputDataset, - base_config: HybridForecasterConfig, + base_config: ResidualForecasterConfig, ): """Test that predict() raises NotFittedError when called before fit().""" # Arrange - forecaster = HybridForecaster(config=base_config) + forecaster = ResidualForecaster(config=base_config) # Act & Assert - with pytest.raises(NotFittedError, match="HybridForecaster"): + with pytest.raises(NotFittedError, match="ResidualForecaster"): forecaster.predict(sample_forecast_input_dataset) -def test_hybrid_forecaster_with_sample_weights( +def test_residual_forecaster_with_sample_weights( sample_dataset_with_weights: ForecastInputDataset, - base_config: HybridForecasterConfig, + base_config: ResidualForecasterConfig, ): """Test that forecaster works with sample weights and produces different results.""" # Arrange - forecaster_with_weights = HybridForecaster(config=base_config) + forecaster_with_weights = ResidualForecaster(config=base_config) # Create dataset without weights for comparison data_without_weights = ForecastInputDataset( @@ -84,7 +121,7 @@ def test_hybrid_forecaster_with_sample_weights( target_column=sample_dataset_with_weights.target_column, forecast_start=sample_dataset_with_weights.forecast_start, ) - forecaster_without_weights = HybridForecaster(config=base_config) + forecaster_without_weights = ResidualForecaster(config=base_config) # Act forecaster_with_weights.fit(sample_dataset_with_weights) diff --git a/packages/openstef-models/tests/unit/models/forecasting/meta/test_stacking_forecaster.py b/packages/openstef-meta/tests/models/test_stacking_forecaster.py similarity index 98% rename from packages/openstef-models/tests/unit/models/forecasting/meta/test_stacking_forecaster.py rename to packages/openstef-meta/tests/models/test_stacking_forecaster.py index 416f36ab9..e8543f055 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/meta/test_stacking_forecaster.py +++ b/packages/openstef-meta/tests/models/test_stacking_forecaster.py @@ -9,7 +9,7 @@ from openstef_core.datasets import ForecastInputDataset from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q -from openstef_models.models.forecasting.meta.stacking_forecaster import ( +from openstef_meta.models.stacking_forecaster import ( StackingForecaster, StackingForecasterConfig, StackingHyperParams, diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py deleted file mode 100644 index 2b4b72573..000000000 --- a/packages/openstef-models/src/openstef_models/models/forecasting/hybrid_forecaster.py +++ /dev/null @@ -1,308 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""Hybrid Forecaster (Stacked LightGBM + Linear Model Gradient Boosting). - -Provides method that attempts to combine the advantages of a linear model (Extraplolation) -and tree-based model (Non-linear patterns). This is acieved by training two base learners, -followed by a small linear model that regresses on the baselearners' predictions. -The implementation is based on sklearn's StackingRegressor. -""" - -import logging -from abc import abstractmethod -from typing import override - -import pandas as pd -from pydantic import Field, field_validator - -from openstef_core.datasets import ForecastDataset, ForecastInputDataset -from openstef_core.exceptions import ( - NotFittedError, -) -from openstef_core.mixins import HyperParams -from openstef_core.types import Quantile -from openstef_models.models.forecasting.forecaster import ( - Forecaster, - ForecasterConfig, -) -from openstef_models.models.forecasting.gblinear_forecaster import ( - GBLinearForecaster, - GBLinearForecasterConfig, - GBLinearHyperParams, -) -from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMForecasterConfig, LGBMHyperParams -from openstef_models.models.forecasting.lgbmlinear_forecaster import ( - LGBMLinearForecaster, - LGBMLinearForecasterConfig, - LGBMLinearHyperParams, -) -from openstef_models.models.forecasting.xgboost_forecaster import ( - XGBoostForecaster, - XGBoostForecasterConfig, - XGBoostHyperParams, -) - -logger = logging.getLogger(__name__) - - -BaseLearner = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster -BaseLearnerHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams -BaseLearnerConfig = ( - LGBMForecasterConfig | LGBMLinearForecasterConfig | XGBoostForecasterConfig | GBLinearForecasterConfig -) - - -class FinalLearner: - """Combines base learner predictions for each quantile into final predictions.""" - - @abstractmethod - def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: - raise NotImplementedError("Subclasses must implement the fit method.") - - def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: - raise NotImplementedError("Subclasses must implement the predict method.") - - @property - @abstractmethod - def is_fitted(self) -> bool: - raise NotImplementedError("Subclasses must implement the is_fitted property.") - - -class FinalForecaster(FinalLearner): - """Combines base learner predictions for each quantile into final predictions.""" - - def __init__(self, forecaster: Forecaster, feature_adders: None = None) -> None: - # Feature adders placeholder for future use - if feature_adders is not None: - raise NotImplementedError("Feature adders are not yet implemented.") - - # Split forecaster per quantile - self.quantiles = forecaster.config.quantiles - models: list[Forecaster] = [] - for q in self.quantiles: - config = forecaster.config.model_copy( - update={ - "quantiles": [q], - } - ) - model = forecaster.__class__(config=config) - models.append(model) - self.models = models - - @override - def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: - for i, q in enumerate(self.quantiles): - self.models[i].fit(data=base_learner_predictions[q], data_val=None) - - @override - def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: - if not self.is_fitted: - raise NotFittedError(self.__class__.__name__) - - # Generate predictions - predictions = [ - self.models[i].predict(data=base_learner_predictions[q]).data for i, q in enumerate(self.quantiles) - ] - - # Concatenate predictions along columns to form a DataFrame with quantile columns - df = pd.concat(predictions, axis=1) - - return ForecastDataset( - data=df, - sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, - ) - - @property - def is_fitted(self) -> bool: - return all(x.is_fitted for x in self.models) - - -class HybridHyperParams(HyperParams): - """Hyperparameters for Stacked LGBM GBLinear Regressor.""" - - base_hyperparams: list[BaseLearnerHyperParams] = Field( - default=[LGBMHyperParams(), GBLinearHyperParams()], - description="List of hyperparameter configurations for base learners. " - "Defaults to [LGBMHyperParams, GBLinearHyperParams].", - ) - - final_hyperparams: BaseLearnerHyperParams = Field( - default=GBLinearHyperParams(), - description="Hyperparameters for the final learner. Defaults to GBLinearHyperParams.", - ) - - add_rolling_accuracy_features: bool = Field( - default=False, - description="Whether to add rolling accuracy features from base learners as additional features " - "to the final learner. Defaults to False.", - ) - - @field_validator("base_hyperparams", mode="after") - @classmethod - def _check_classes(cls, v: list[BaseLearnerHyperParams]) -> list[BaseLearnerHyperParams]: - hp_classes = [type(hp) for hp in v] - if not len(hp_classes) == len(set(hp_classes)): - raise ValueError("Duplicate base learner hyperparameter classes are not allowed.") - return v - - -class HybridForecasterConfig(ForecasterConfig): - """Configuration for Hybrid-based forecasting models.""" - - hyperparams: HybridHyperParams = HybridHyperParams() - - verbosity: bool = Field( - default=True, - description="Enable verbose output from the Hybrid model (True/False).", - ) - - -class HybridForecaster(Forecaster): - """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" - - Config = HybridForecasterConfig - HyperParams = HybridHyperParams - - _config: HybridForecasterConfig - - def __init__(self, config: HybridForecasterConfig) -> None: - """Initialize the Hybrid forecaster.""" - self._config = config - - self._base_learners: list[BaseLearner] = self._init_base_learners( - base_hyperparams=config.hyperparams.base_hyperparams - ) - final_forecaster = self._init_base_learners(base_hyperparams=[config.hyperparams.final_hyperparams])[0] - self._final_learner = FinalForecaster(forecaster=final_forecaster) - - def _init_base_learners(self, base_hyperparams: list[BaseLearnerHyperParams]) -> list[BaseLearner]: - """Initialize base learners based on provided hyperparameters. - - Returns: - list[Forecaster]: List of initialized base learner forecasters. - """ - base_learners: list[BaseLearner] = [] - horizons = self.config.horizons - quantiles = self.config.quantiles - - for hyperparams in base_hyperparams: - forecaster_cls = hyperparams.forecaster_class() - config = forecaster_cls.Config(horizons=horizons, quantiles=quantiles) - if "hyperparams" in forecaster_cls.Config.model_fields: - config = config.model_copy(update={"hyperparams": hyperparams}) - - base_learners.append(config.forecaster_from_config()) - - return base_learners - - @property - @override - def is_fitted(self) -> bool: - return all(x.is_fitted for x in self._base_learners) - - @property - @override - def config(self) -> ForecasterConfig: - return self._config - - @override - def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: - """Fit the Hybrid model to the training data. - - Args: - data: Training data in the expected ForecastInputDataset format. - data_val: Validation data for tuning the model (optional, not used in this implementation). - - """ - # Fit base learners - [x.fit(data=data, data_val=data_val) for x in self._base_learners] - - # Reset forecast start date to ensure we predict on the full dataset - full_dataset = ForecastInputDataset( - data=data.data, - sample_interval=data.sample_interval, - target_column=data.target_column, - forecast_start=data.index[0], - ) - - base_predictions = self._predict_base_learners(data=full_dataset) - - quantile_datasets = self._prepare_input_final_learner( - base_predictions=base_predictions, quantiles=self._config.quantiles, target_series=data.target_series - ) - - self._final_learner.fit( - base_learner_predictions=quantile_datasets, - ) - - self._is_fitted = True - - def _predict_base_learners(self, data: ForecastInputDataset) -> dict[type[BaseLearner], ForecastDataset]: - """Generate predictions from base learners. - - Args: - data: Input data for prediction. - - Returns: - DataFrame containing base learner predictions. - """ - base_predictions: dict[type[BaseLearner], ForecastDataset] = {} - for learner in self._base_learners: - preds = learner.predict(data=data) - base_predictions[learner.__class__] = preds - - return base_predictions - - @staticmethod - def _prepare_input_final_learner( - quantiles: list[Quantile], - base_predictions: dict[type[BaseLearner], ForecastDataset], - target_series: pd.Series, - ) -> dict[Quantile, ForecastInputDataset]: - """Prepare input data for the final learner based on base learner predictions. - - Args: - quantiles: List of quantiles to prepare data for. - base_predictions: Predictions from base learners. - target_series: Actual target series for reference. - - Returns: - dictionary mapping quantile strings to DataFrames of base learner predictions. - """ - predictions_quantiles: dict[Quantile, ForecastInputDataset] = {} - sample_interval = base_predictions[next(iter(base_predictions))].sample_interval - target_name = str(target_series.name) - - for q in quantiles: - df = pd.DataFrame({ - learner.__name__: preds.data[Quantile(q).format()] for learner, preds in base_predictions.items() - }) - df[target_name] = target_series - - predictions_quantiles[q] = ForecastInputDataset( - data=df, - sample_interval=sample_interval, - target_column=target_name, - forecast_start=df.index[0], - ) - - return predictions_quantiles - - @override - def predict(self, data: ForecastInputDataset) -> ForecastDataset: - if not self.is_fitted: - raise NotFittedError(self.__class__.__name__) - - base_predictions = self._predict_base_learners(data=data) - - final_learner_input = self._prepare_input_final_learner( - quantiles=self._config.quantiles, base_predictions=base_predictions, target_series=data.target_series - ) - - return self._final_learner.predict(base_learner_predictions=final_learner_input) - - # TODO(@Lars800): #745: Make forecaster Explainable - - -__all__ = ["HybridForecaster", "HybridForecasterConfig", "HybridHyperParams"] diff --git a/pyproject.toml b/pyproject.toml index 1ef0cea1c..17df056fa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,7 @@ optional-dependencies.beam = [ ] optional-dependencies.models = [ "openstef-models[xgb-cpu]", - "openstef-metalearning", + "openstef-meta", ] urls.Documentation = "https://openstef.github.io/openstef/index.html" urls.Homepage = "https://lfenergy.org/projects/openstef/" @@ -78,7 +78,7 @@ openstef-beam = { workspace = true } openstef-models = { workspace = true } openstef-docs = { workspace = true } openstef-core = { workspace = true } -openstef-metalearning = { workspace = true } +openstef-meta = { workspace = true } microsoft-python-type-stubs = { git = "git+https://github.com/microsoft/python-type-stubs.git" } [tool.uv.workspace] @@ -87,7 +87,7 @@ members = [ "packages/openstef-beam", "docs", "packages/openstef-core", - "packages/openstef-metalearning", + "packages/openstef-meta", ] [tool.ruff] @@ -193,7 +193,7 @@ source = [ "packages/openstef-beam/src", "packages/openstef-models/src", "packages/openstef-core/src", - "packages/openstef-metalearning/src", + "packages/openstef-meta/src", ] omit = [ "tests/*", diff --git a/uv.lock b/uv.lock index 520b8f9d9..4e2691b89 100644 --- a/uv.lock +++ b/uv.lock @@ -8,7 +8,7 @@ members = [ "openstef-beam", "openstef-core", "openstef-docs", - "openstef-metalearning", + "openstef-meta", "openstef-models", ] @@ -2119,13 +2119,15 @@ all = [ { name = "openstef-beam", extra = ["all"] }, { name = "openstef-core" }, { name = "openstef-models", extra = ["xgb-cpu"] }, + { name = "openstef-meta" }, + ] beam = [ { name = "huggingface-hub" }, { name = "openstef-beam" }, ] models = [ - { name = "openstef-metalearning" }, + { name = "openstef-meta" }, { name = "openstef-models", extra = ["xgb-cpu"] }, ] @@ -2157,7 +2159,7 @@ requires-dist = [ { name = "openstef-beam", extras = ["all"], marker = "extra == 'all'", editable = "packages/openstef-beam" }, { name = "openstef-core", editable = "packages/openstef-core" }, { name = "openstef-core", marker = "extra == 'all'", editable = "packages/openstef-core" }, - { name = "openstef-metalearning", marker = "extra == 'models'", editable = "packages/openstef-metalearning" }, + { name = "openstef-meta", marker = "extra == 'models'", editable = "packages/openstef-meta" }, { name = "openstef-models", extras = ["xgb-cpu"], editable = "packages/openstef-models" }, { name = "openstef-models", extras = ["xgb-cpu"], marker = "extra == 'all'", editable = "packages/openstef-models" }, { name = "openstef-models", extras = ["xgb-cpu"], marker = "extra == 'models'", editable = "packages/openstef-models" }, @@ -2280,9 +2282,9 @@ requires-dist = [ ] [[package]] -name = "openstef-metalearning" +name = "openstef-meta" version = "0.1.0" -source = { editable = "packages/openstef-metalearning" } +source = { editable = "packages/openstef-meta" } dependencies = [ { name = "openstef-core" }, { name = "openstef-models" }, From 114189838023b66b3c61e390bd998719d381c511 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 24 Nov 2025 22:13:55 +0100 Subject: [PATCH 031/104] Testing and fixes on Learned Weights Forecaster Signed-off-by: Lars van Someren --- .../models/learned_weights_forecaster.py | 190 +++++++++++++++--- .../models/test_learned_weights_forecaster.py | 47 ++++- 2 files changed, 208 insertions(+), 29 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py index d606a79f5..576b1a586 100644 --- a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py @@ -11,12 +11,12 @@ import logging from abc import abstractmethod -from typing import override +from typing import override, Literal, Self import pandas as pd from lightgbm import LGBMClassifier from pydantic import Field -from sklearn.linear_model import LinearRegression +from sklearn.linear_model import LogisticRegression from xgboost import XGBClassifier from openstef_core.datasets import ForecastDataset, ForecastInputDataset @@ -45,14 +45,32 @@ logger = logging.getLogger(__name__) -Classifier = LGBMClassifier | XGBClassifier | LinearRegression +# Base classes for Learned Weights Final Learner +Classifier = LGBMClassifier | XGBClassifier | LogisticRegression -class LearnedWeightsFinalLearner(FinalLearner): + +class LWFLHyperParams(HyperParams): + """Hyperparameters for Learned Weights Final Learner.""" + + @property + @abstractmethod + def learner(self) -> type["WeightsLearner"]: + """Returns the classifier to be used as final learner.""" + raise NotImplementedError("Subclasses must implement the 'estimator' property.") + + @classmethod + def learner_from_params(cls, quantiles: list[Quantile], hyperparams: Self) -> "WeightsLearner": + """Initialize the final learner from hyperparameters.""" + instance = cls() + return instance.learner(quantiles=quantiles, hyperparams=hyperparams) + + +class WeightsLearner(FinalLearner): """Combines base learner predictions with a classification approach to determine which base learner to use.""" @abstractmethod - def __init__(self, quantiles: list[Quantile]) -> None: + def __init__(self, quantiles: list[Quantile], hyperparams: LWFLHyperParams) -> None: self.quantiles = quantiles self.models: list[Classifier] = [] self._is_fitted = False @@ -119,42 +137,152 @@ def is_fitted(self) -> bool: return self._is_fitted -class LGBMFinalLearner(LearnedWeightsFinalLearner): - """Final learner using only LGBM as base learners.""" +# Final learner implementations using different classifiers +# 1 LGBM Classifier + + +class LGBMLearnerHyperParams(LWFLHyperParams): + """Hyperparameters for Learned Weights Final Learner with LGBM Classifier.""" + + n_estimators: int = Field( + default=20, + description="Number of estimators for the LGBM Classifier. Defaults to 20.", + ) + + n_leaves: int = Field( + default=31, + description="Number of leaves for the LGBM Classifier. Defaults to 31.", + ) + + @property + @override + def learner(self) -> type["LGBMLearner"]: + """Returns the LGBMLearner""" + return LGBMLearner + - def __init__(self, quantiles: list[Quantile], n_estimators: int = 20) -> None: +class LGBMLearner(WeightsLearner): + """Final learner with LGBM Classifier.""" + + HyperParams = LGBMLearnerHyperParams + + def __init__( + self, + quantiles: list[Quantile], + hyperparams: LGBMLearnerHyperParams, + ) -> None: self.quantiles = quantiles - self.models = [LGBMClassifier(class_weight="balanced", n_estimators=n_estimators) for _ in quantiles] + self.models = [ + LGBMClassifier( + class_weight="balanced", + n_estimators=hyperparams.n_estimators, + num_leaves=hyperparams.n_leaves, + ) + for _ in quantiles + ] self._is_fitted = False -class RandomForestFinalLearner(LearnedWeightsFinalLearner): - def __init__(self, quantiles: list[Quantile], n_estimators: int = 20) -> None: +# 1 RandomForest Classifier +class RFLearnerHyperParams(LWFLHyperParams): + """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" + + n_estimators: int = Field( + default=20, + description="Number of estimators for the LGBM Classifier. Defaults to 20.", + ) + + n_leaves: int = Field( + default=31, + description="Number of leaves for the LGBM Classifier. Defaults to 31.", + ) + + @property + def learner(self) -> type["RandomForestLearner"]: + """Returns the LGBMClassifier to be used as final learner.""" + return RandomForestLearner + + +class RandomForestLearner(WeightsLearner): + """Final learner using only Random Forest as base learners.""" + + def __init__(self, quantiles: list[Quantile], hyperparams: RFLearnerHyperParams) -> None: + """Initialize RandomForestLearner.""" self.quantiles = quantiles self.models = [ - LGBMClassifier(boosting_type="rf", class_weight="balanced", n_estimators=n_estimators) for _ in quantiles + LGBMClassifier(boosting_type="rf", class_weight="balanced", n_estimators=hyperparams.n_estimators) + for _ in quantiles ] self._is_fitted = False -class XGBFinalLearner(LearnedWeightsFinalLearner): +# 3 XGB Classifier +class XGBLearnerHyperParams(LWFLHyperParams): + """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" + + n_estimators: int = Field( + default=20, + description="Number of estimators for the LGBM Classifier. Defaults to 20.", + ) + + @property + def learner(self) -> type["XGBLearner"]: + """Returns the LGBMClassifier to be used as final learner.""" + return XGBLearner + + +class XGBLearner(WeightsLearner): """Final learner using only XGBoost as base learners.""" - def __init__(self, quantiles: list[Quantile], n_estimators: int = 20) -> None: + def __init__(self, quantiles: list[Quantile], hyperparams: XGBLearnerHyperParams) -> None: self.quantiles = quantiles - self.models = [XGBClassifier(class_weight="balanced", n_estimators=n_estimators) for _ in quantiles] + self.models = [XGBClassifier(class_weight="balanced", n_estimators=hyperparams.n_estimators) for _ in quantiles] self._is_fitted = False -class LogisticRegressionFinalLearner(LearnedWeightsFinalLearner): +# 4 Logistic Regression Classifier +class LogisticLearnerHyperParams(LWFLHyperParams): + """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" + + fit_intercept: bool = Field( + default=True, + description="Whether to calculate the intercept for this model. Defaults to True.", + ) + + penalty: Literal["l1", "l2", "elasticnet"] = Field( + default="l2", + description="Specify the norm used in the penalization. Defaults to 'l2'.", + ) + + c: float = Field( + default=1.0, + description="Inverse of regularization strength; must be a positive float. Defaults to 1.0.", + ) + + @property + def learner(self) -> type["LogisticLearner"]: + """Returns the LGBMClassifier to be used as final learner.""" + return LogisticLearner + + +class LogisticLearner(WeightsLearner): """Final learner using only Logistic Regression as base learners.""" - def __init__(self, quantiles: list[Quantile]) -> None: + def __init__(self, quantiles: list[Quantile], hyperparams: LogisticLearnerHyperParams) -> None: self.quantiles = quantiles - self.models = [LinearRegression() for _ in quantiles] + self.models = [ + LogisticRegression( + class_weight="balanced", + fit_intercept=hyperparams.fit_intercept, + penalty=hyperparams.penalty, + C=hyperparams.c, + ) + for _ in quantiles + ] self._is_fitted = False +# Assembly classes class LearnedWeightsHyperParams(HyperParams): """Hyperparameters for Stacked LGBM GBLinear Regressor.""" @@ -164,16 +292,16 @@ class LearnedWeightsHyperParams(HyperParams): "Defaults to [LGBMHyperParams, GBLinearHyperParams].", ) - final_learner: type[LearnedWeightsFinalLearner] = Field( - default=LGBMFinalLearner, - description="Type of final learner to use. Defaults to LearnedWeightsFinalLearner.", + final_hyperparams: LWFLHyperParams = Field( + default=LGBMLearnerHyperParams(), + description="Hyperparameters for the final learner. Defaults to LGBMLearnerHyperParams.", ) class LearnedWeightsForecasterConfig(ForecasterConfig): """Configuration for Hybrid-based forecasting models.""" - hyperparams: LearnedWeightsHyperParams = LearnedWeightsHyperParams() + hyperparams: LearnedWeightsHyperParams verbosity: bool = Field( default=True, @@ -194,15 +322,23 @@ def __init__(self, config: LearnedWeightsForecasterConfig) -> None: self._base_learners: list[BaseLearner] = self._init_base_learners( config=config, base_hyperparams=config.hyperparams.base_hyperparams ) - self._final_learner = config.hyperparams.final_learner(quantiles=config.quantiles) + self._final_learner = config.hyperparams.final_hyperparams.learner_from_params( + quantiles=config.quantiles, + hyperparams=config.hyperparams.final_hyperparams, + ) __all__ = [ - "LGBMFinalLearner", + "LGBMLearner", + "LGBMLearnerHyperParams", "LearnedWeightsForecaster", "LearnedWeightsForecasterConfig", "LearnedWeightsHyperParams", - "LogisticRegressionFinalLearner", - "RandomForestFinalLearner", - "XGBFinalLearner", + "LogisticLearner", + "LogisticLearnerHyperParams", + "RFLearnerHyperParams", + "RandomForestLearner", + "WeightsLearner", + "XGBLearner", + "XGBLearnerHyperParams", ] diff --git a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py index 4abdcee3c..ba172bc6d 100644 --- a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py +++ b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py @@ -13,14 +13,39 @@ LearnedWeightsForecaster, LearnedWeightsForecasterConfig, LearnedWeightsHyperParams, + LGBMLearner, + LGBMLearnerHyperParams, + LogisticLearner, + LogisticLearnerHyperParams, + LWFLHyperParams, + RandomForestLearner, + RFLearnerHyperParams, + WeightsLearner, + XGBLearner, + XGBLearnerHyperParams, ) +@pytest.fixture(params=["rf", "lgbm", "xgboost", "logistic"]) +def final_hyperparams(request: pytest.FixtureRequest) -> LWFLHyperParams: + """Fixture to provide different primary models types.""" + learner_type = request.param + if learner_type == "rf": + return RFLearnerHyperParams() + if learner_type == "lgbm": + return LGBMLearnerHyperParams() + if learner_type == "xgboost": + return XGBLearnerHyperParams() + return LogisticLearnerHyperParams() + + @pytest.fixture -def base_config() -> LearnedWeightsForecasterConfig: +def base_config(final_hyperparams: LWFLHyperParams) -> LearnedWeightsForecasterConfig: """Base configuration for LearnedWeights forecaster tests.""" - params = LearnedWeightsHyperParams() + params = LearnedWeightsHyperParams( + final_hyperparams=final_hyperparams, + ) return LearnedWeightsForecasterConfig( quantiles=[Q(0.1), Q(0.5), Q(0.9)], horizons=[LeadTime(timedelta(days=1))], @@ -29,6 +54,24 @@ def base_config() -> LearnedWeightsForecasterConfig: ) +def test_final_learner_corresponds_to_hyperparams(base_config: LearnedWeightsForecasterConfig): + """Test that the final learner corresponds to the specified hyperparameters.""" + forecaster = LearnedWeightsForecaster(config=base_config) + final_learner = forecaster._final_learner + + mapping: dict[type[LWFLHyperParams], type[WeightsLearner]] = { + RFLearnerHyperParams: RandomForestLearner, + LGBMLearnerHyperParams: LGBMLearner, + XGBLearnerHyperParams: XGBLearner, + LogisticLearnerHyperParams: LogisticLearner, + } + expected_learner_type = mapping[type(base_config.hyperparams.final_hyperparams)] + + assert isinstance(final_learner, expected_learner_type), ( + f"Final learner type {type(final_learner)} does not match expected type {expected_learner_type}" + ) + + def test_learned_weights_forecaster_fit_predict( sample_forecast_input_dataset: ForecastInputDataset, base_config: LearnedWeightsForecasterConfig, From 976a2fc0cb0cf5dd516c2c6905ed7f11b4efc453 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 25 Nov 2025 13:27:49 +0100 Subject: [PATCH 032/104] FinalLearner PreProcessor Signed-off-by: Lars van Someren --- .../openstef_meta/framework/final_learner.py | 65 ++++++++- .../framework/meta_forecaster.py | 29 +++- .../models/learned_weights_forecaster.py | 136 ++++++++++++++---- .../models/test_learned_weights_forecaster.py | 21 +++ 4 files changed, 212 insertions(+), 39 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py index 26567e156..8f2c58424 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py @@ -10,14 +10,24 @@ from abc import ABC, abstractmethod -from openstef_core.datasets import ForecastDataset, ForecastInputDataset -from openstef_core.mixins import HyperParams +from pydantic import ConfigDict, Field + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset +from openstef_core.mixins import HyperParams, TransformPipeline +from openstef_core.transforms import TimeSeriesTransform from openstef_core.types import Quantile class FinalLearnerHyperParams(HyperParams): """Hyperparameters for the Final Learner.""" + model_config = ConfigDict(arbitrary_types_allowed=True) + + feature_adders: list[TimeSeriesTransform] = Field( + default=[], + description="Additional features to add to the base learner predictions before fitting the final learner.", + ) + class FinalLearnerConfig: """Configuration for the Final Learner.""" @@ -26,29 +36,76 @@ class FinalLearnerConfig: class FinalLearner(ABC): """Combines base learner predictions for each quantile into final predictions.""" + def __init__(self, quantiles: list[Quantile], hyperparams: FinalLearnerHyperParams) -> None: + """Initialize the Final Learner.""" + self.quantiles = quantiles + self.hyperparams = hyperparams + self.final_learner_processing: TransformPipeline[TimeSeriesDataset] = TransformPipeline( + transforms=hyperparams.feature_adders + ) + self._is_fitted: bool = False + @abstractmethod - def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + def fit( + self, + base_learner_predictions: dict[Quantile, ForecastInputDataset], + additional_features: ForecastInputDataset | None, + ) -> None: """Fit the final learner using base learner predictions. Args: base_learner_predictions: Dictionary mapping Quantiles to ForecastInputDatasets containing base learner + predictions. + additional_features: Optional ForecastInputDataset containing additional features for the final learner. """ raise NotImplementedError("Subclasses must implement the fit method.") - def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + def predict( + self, + base_learner_predictions: dict[Quantile, ForecastInputDataset], + additional_features: ForecastInputDataset | None, + ) -> ForecastDataset: """Generate final predictions based on base learner predictions. Args: base_learner_predictions: Dictionary mapping Quantiles to ForecastInputDatasets containing base learner predictions. + additional_features: Optional ForecastInputDataset containing additional features for the final learner. Returns: ForecastDataset containing the final predictions. """ raise NotImplementedError("Subclasses must implement the predict method.") + def calculate_features(self, data: ForecastInputDataset) -> ForecastInputDataset: + """Calculate additional features for the final learner. + + Args: + data: Input TimeSeriesDataset to calculate features on. + + Returns: + TimeSeriesDataset with additional features. + """ + data_ts = TimeSeriesDataset( + data=data.data, + sample_interval=data.sample_interval, + ) + data_transformed = self.final_learner_processing.transform(data_ts) + + return ForecastInputDataset( + data=data_transformed.data, + sample_interval=data.sample_interval, + target_column=data.target_column, + forecast_start=data.forecast_start, + ) + @property @abstractmethod def is_fitted(self) -> bool: """Indicates whether the final learner has been fitted.""" raise NotImplementedError("Subclasses must implement the is_fitted property.") + + @property + def has_features(self) -> bool: + """Indicates whether the final learner uses additional features.""" + return len(self.final_learner_processing.transforms) > 0 diff --git a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py index 5b69e3153..3af6329d7 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py @@ -104,12 +104,20 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None base_predictions = self._predict_base_learners(data=full_dataset) + if self._final_learner.has_features: + features = self._final_learner.calculate_features(data=full_dataset) + else: + features = None + quantile_datasets = self._prepare_input_final_learner( - base_predictions=base_predictions, quantiles=self._config.quantiles, target_series=data.target_series + base_predictions=base_predictions, + quantiles=self._config.quantiles, + target_series=data.target_series, ) self._final_learner.fit( base_learner_predictions=quantile_datasets, + additional_features=features, ) self._is_fitted = True @@ -170,18 +178,31 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: if not self.is_fitted: raise NotFittedError(self.__class__.__name__) - base_predictions = self._predict_base_learners(data=data) + full_dataset = ForecastInputDataset( + data=data.data, + sample_interval=data.sample_interval, + target_column=data.target_column, + forecast_start=data.index[0], + ) + + base_predictions = self._predict_base_learners(data=full_dataset) final_learner_input = self._prepare_input_final_learner( quantiles=self._config.quantiles, base_predictions=base_predictions, target_series=data.target_series ) - return self._final_learner.predict(base_learner_predictions=final_learner_input) + if self._final_learner.has_features: + additional_features = self._final_learner.calculate_features(data=data) + else: + additional_features = None + + return self._final_learner.predict( + base_learner_predictions=final_learner_input, additional_features=additional_features + ) __all__ = [ "BaseLearner", "BaseLearnerHyperParams", - "FinalLearner", "MetaForecaster", ] diff --git a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py index 576b1a586..255e25dd0 100644 --- a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py @@ -11,12 +11,15 @@ import logging from abc import abstractmethod -from typing import override, Literal, Self +from typing import Literal, Self, override import pandas as pd from lightgbm import LGBMClassifier from pydantic import Field +from sklearn.dummy import DummyClassifier from sklearn.linear_model import LogisticRegression +from sklearn.preprocessing import LabelEncoder +from sklearn.utils.class_weight import compute_sample_weight # type: ignore from xgboost import XGBClassifier from openstef_core.datasets import ForecastDataset, ForecastInputDataset @@ -29,7 +32,7 @@ BaseLearner, BaseLearnerHyperParams, ) -from openstef_meta.framework.final_learner import FinalLearner +from openstef_meta.framework.final_learner import FinalLearner, FinalLearnerHyperParams from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) @@ -47,10 +50,10 @@ # Base classes for Learned Weights Final Learner -Classifier = LGBMClassifier | XGBClassifier | LogisticRegression +Classifier = LGBMClassifier | XGBClassifier | LogisticRegression | DummyClassifier -class LWFLHyperParams(HyperParams): +class LWFLHyperParams(FinalLearnerHyperParams): """Hyperparameters for Learned Weights Final Learner.""" @property @@ -61,7 +64,11 @@ def learner(self) -> type["WeightsLearner"]: @classmethod def learner_from_params(cls, quantiles: list[Quantile], hyperparams: Self) -> "WeightsLearner": - """Initialize the final learner from hyperparameters.""" + """Initialize the final learner from hyperparameters. + + Returns: + WeightsLearner: An instance of the WeightsLearner initialized with the provided hyperparameters. + """ instance = cls() return instance.learner(quantiles=quantiles, hyperparams=hyperparams) @@ -69,23 +76,55 @@ def learner_from_params(cls, quantiles: list[Quantile], hyperparams: Self) -> "W class WeightsLearner(FinalLearner): """Combines base learner predictions with a classification approach to determine which base learner to use.""" - @abstractmethod def __init__(self, quantiles: list[Quantile], hyperparams: LWFLHyperParams) -> None: - self.quantiles = quantiles + """Initialize WeightsLearner.""" + super().__init__(quantiles=quantiles, hyperparams=hyperparams) self.models: list[Classifier] = [] + self._label_encoder = LabelEncoder() + self._is_fitted = False @override - def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + def fit( + self, + base_learner_predictions: dict[Quantile, ForecastInputDataset], + additional_features: ForecastInputDataset | None, + ) -> None: + for i, q in enumerate(self.quantiles): - pred = base_learner_predictions[q].data.drop(columns=[base_learner_predictions[q].target_column]) + base_predictions = base_learner_predictions[q].data.drop( + columns=[base_learner_predictions[q].target_column] + ) + labels = self._prepare_classification_data( quantile=q, target=base_learner_predictions[q].target_series, - predictions=pred, + predictions=base_predictions, ) - self.models[i].fit(X=pred, y=labels) # type: ignore + if additional_features is not None: + df = pd.concat( + [base_predictions, additional_features.data], + axis=1, + ) + else: + df = base_predictions + + if len(labels.unique()) == 1: + msg = f"""Final learner for quantile {q.format()} has less than 2 classes in the target. + Switching to dummy classifier """ + logger.warning(msg=msg) + self.models[i] = DummyClassifier(strategy="most_frequent") + + if i == 0: + # Fit label encoder only once + self._label_encoder.fit(labels) + labels = self._label_encoder.transform(labels) + + # Balance classes + weights = compute_sample_weight("balanced", labels) + + self.models[i].fit(X=df, y=labels, sample_weight=weights) # type: ignore self._is_fitted = True @staticmethod @@ -105,25 +144,38 @@ def column_pinball_losses(preds: pd.Series) -> pd.Series: # For each sample, select the base learner with the lowest pinball loss return pinball_losses.idxmin(axis=1) - def _calculate_sample_weights_quantile(self, base_predictions: pd.DataFrame, quantile: Quantile) -> pd.DataFrame: + def _calculate_model_weights_quantile(self, base_predictions: pd.DataFrame, quantile: Quantile) -> pd.DataFrame: model = self.models[self.quantiles.index(quantile)] return model.predict_proba(X=base_predictions) # type: ignore - def _generate_predictions_quantile(self, base_predictions: ForecastInputDataset, quantile: Quantile) -> pd.Series: - df = base_predictions.data.drop(columns=[base_predictions.target_column]) - weights = self._calculate_sample_weights_quantile(base_predictions=df, quantile=quantile) + def _generate_predictions_quantile( + self, + base_predictions: ForecastInputDataset, + additional_features: ForecastInputDataset | None, + quantile: Quantile, + ) -> pd.Series: + base_df = base_predictions.data.drop(columns=[base_predictions.target_column]) + df = pd.concat([base_df, additional_features.data], axis=1) if additional_features is not None else base_df + + weights = self._calculate_model_weights_quantile(base_predictions=df, quantile=quantile) - return df.mul(weights).sum(axis=1) + return base_df.mul(weights).sum(axis=1) @override - def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + def predict( + self, + base_learner_predictions: dict[Quantile, ForecastInputDataset], + additional_features: ForecastInputDataset | None, + ) -> ForecastDataset: if not self.is_fitted: raise NotFittedError(self.__class__.__name__) # Generate predictions predictions = pd.DataFrame({ - Quantile(q).format(): self._generate_predictions_quantile(base_predictions=data, quantile=q) + Quantile(q).format(): self._generate_predictions_quantile( + base_predictions=data, quantile=q, additional_features=additional_features + ) for q, data in base_learner_predictions.items() }) @@ -133,14 +185,13 @@ def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset] ) @property + @override def is_fitted(self) -> bool: return self._is_fitted # Final learner implementations using different classifiers # 1 LGBM Classifier - - class LGBMLearnerHyperParams(LWFLHyperParams): """Hyperparameters for Learned Weights Final Learner with LGBM Classifier.""" @@ -157,7 +208,7 @@ class LGBMLearnerHyperParams(LWFLHyperParams): @property @override def learner(self) -> type["LGBMLearner"]: - """Returns the LGBMLearner""" + """Returns the LGBMLearner.""" return LGBMLearner @@ -171,7 +222,8 @@ def __init__( quantiles: list[Quantile], hyperparams: LGBMLearnerHyperParams, ) -> None: - self.quantiles = quantiles + """Initialize LGBMLearner.""" + super().__init__(quantiles=quantiles, hyperparams=hyperparams) self.models = [ LGBMClassifier( class_weight="balanced", @@ -180,7 +232,6 @@ def __init__( ) for _ in quantiles ] - self._is_fitted = False # 1 RandomForest Classifier @@ -197,6 +248,21 @@ class RFLearnerHyperParams(LWFLHyperParams): description="Number of leaves for the LGBM Classifier. Defaults to 31.", ) + bagging_freq: int = Field( + default=1, + description="Frequency for bagging in the Random Forest. Defaults to 1.", + ) + + bagging_fraction: float = Field( + default=0.8, + description="Fraction of data to be used for each iteration of the Random Forest. Defaults to 0.8.", + ) + + feature_fraction: float = Field( + default=1, + description="Fraction of features to be used for each iteration of the Random Forest. Defaults to 1.", + ) + @property def learner(self) -> type["RandomForestLearner"]: """Returns the LGBMClassifier to be used as final learner.""" @@ -208,12 +274,20 @@ class RandomForestLearner(WeightsLearner): def __init__(self, quantiles: list[Quantile], hyperparams: RFLearnerHyperParams) -> None: """Initialize RandomForestLearner.""" - self.quantiles = quantiles + super().__init__(quantiles=quantiles, hyperparams=hyperparams) + self.models = [ - LGBMClassifier(boosting_type="rf", class_weight="balanced", n_estimators=hyperparams.n_estimators) + LGBMClassifier( + boosting_type="rf", + class_weight="balanced", + n_estimators=hyperparams.n_estimators, + bagging_freq=hyperparams.bagging_freq, + bagging_fraction=hyperparams.bagging_fraction, + feature_fraction=hyperparams.feature_fraction, + num_leaves=hyperparams.n_leaves, + ) for _ in quantiles ] - self._is_fitted = False # 3 XGB Classifier @@ -235,9 +309,9 @@ class XGBLearner(WeightsLearner): """Final learner using only XGBoost as base learners.""" def __init__(self, quantiles: list[Quantile], hyperparams: XGBLearnerHyperParams) -> None: - self.quantiles = quantiles - self.models = [XGBClassifier(class_weight="balanced", n_estimators=hyperparams.n_estimators) for _ in quantiles] - self._is_fitted = False + """Initialize XGBLearner.""" + super().__init__(quantiles=quantiles, hyperparams=hyperparams) + self.models = [XGBClassifier(n_estimators=hyperparams.n_estimators) for _ in quantiles] # 4 Logistic Regression Classifier @@ -269,7 +343,8 @@ class LogisticLearner(WeightsLearner): """Final learner using only Logistic Regression as base learners.""" def __init__(self, quantiles: list[Quantile], hyperparams: LogisticLearnerHyperParams) -> None: - self.quantiles = quantiles + """Initialize LogisticLearner.""" + super().__init__(quantiles=quantiles, hyperparams=hyperparams) self.models = [ LogisticRegression( class_weight="balanced", @@ -279,7 +354,6 @@ def __init__(self, quantiles: list[Quantile], hyperparams: LogisticLearnerHyperP ) for _ in quantiles ] - self._is_fitted = False # Assembly classes diff --git a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py index ba172bc6d..af2504a55 100644 --- a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py +++ b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py @@ -24,6 +24,7 @@ XGBLearner, XGBLearnerHyperParams, ) +from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder @pytest.fixture(params=["rf", "lgbm", "xgboost", "logistic"]) @@ -146,3 +147,23 @@ def test_learned_weights_forecaster_with_sample_weights( # (This is a statistical test - with different weights, predictions should differ) differences = (result_with_weights.data - result_without_weights.data).abs() assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +def test_learned_weights_forecaster_with_additional_features( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LearnedWeightsForecasterConfig, +): + """Test that forecaster works with additional features for the final learner.""" + # Arrange + # Add a simple feature adder that adds a constant feature + + base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) + forecaster = LearnedWeightsForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" From 82795d9c00c2e69337c513fc5a74740a7f7a670f Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 25 Nov 2025 13:38:52 +0100 Subject: [PATCH 033/104] Fixed benchmark references Signed-off-by: Lars van Someren --- .../presets/forecasting_workflow.py | 46 +++++++++++++++---- 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 99cbea0ac..df0f2c59c 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -25,6 +25,9 @@ from openstef_core.base_model import BaseConfig from openstef_core.mixins import TransformPipeline from openstef_core.types import LeadTime, Q, Quantile, QuantileOrGlobal +from openstef_meta.models.learned_weights_forecaster import LearnedWeightsForecaster +from openstef_meta.models.stacking_forecaster import StackingForecaster +from openstef_meta.models.residual_forecaster import ResidualForecaster from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback from openstef_models.mixins import ModelIdentifier from openstef_models.models import ForecastingModel @@ -32,8 +35,6 @@ from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster -from openstef_models.models.forecasting.meta.learned_weights_forecaster import LearnedWeightsForecaster -from openstef_models.models.forecasting.meta.stacking_forecaster import StackingForecaster from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster from openstef_models.transforms.energy_domain import WindPowerFeatureAdder from openstef_models.transforms.general import Clipper, EmptyFeatureRemover, Imputer, NaNDropper, SampleWeighter, Scaler @@ -101,9 +102,9 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob model_id: ModelIdentifier = Field(description="Unique identifier for the forecasting model.") # Model configuration - model: Literal["xgboost", "gblinear", "flatliner", "stacking", "learned_weights", "lgbm", "lgbmlinear"] = Field( - description="Type of forecasting model to use." - ) # TODO(#652): Implement median forecaster + model: Literal[ + "xgboost", "gblinear", "flatliner", "stacking", "residual", "learned_weights", "lgbm", "lgbmlinear" + ] = Field(description="Type of forecasting model to use.") # TODO(#652): Implement median forecaster quantiles: list[Quantile] = Field( default=[Q(0.5)], description="List of quantiles to predict for probabilistic forecasting.", @@ -137,6 +138,11 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob description="Hyperparameters for LightGBM forecaster.", ) + residual_hyperparams: ResidualForecaster.HyperParams = Field( + default=ResidualForecaster.HyperParams(), + description="Hyperparameters for Residual forecaster.", + ) + stacking_hyperparams: StackingForecaster.HyperParams = Field( default=StackingForecaster.HyperParams(), description="Hyperparameters for Stacking forecaster.", @@ -208,7 +214,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob ) sample_weight_exponent: float = Field( default_factory=lambda data: 1.0 - if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "learned_weights", "stacking", "xgboost"} + if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "learned_weights", "stacking", "residual", "xgboost"} else 0.0, description="Exponent applied to scale the sample weights. " "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " @@ -308,9 +314,11 @@ def create_forecasting_workflow( history_available=config.predict_history, horizons=config.horizons, add_trivial_lags=config.model - not in {"gblinear", "stacking", "learned_weights"}, # GBLinear uses only 7day lag. + not in {"gblinear", "residual", "stacking", "learned_weights"}, # GBLinear uses only 7day lag. target_column=config.target_column, - custom_lags=[timedelta(days=7)] if config.model in {"gblinear", "stackinglearned_weights"} else [], + custom_lags=[timedelta(days=7)] + if config.model in {"gblinear", "residual", "stacking", "learned_weights"} + else [], ), WindPowerFeatureAdder( windspeed_reference_column=config.wind_speed_column, @@ -430,6 +438,28 @@ def create_forecasting_workflow( ConfidenceIntervalApplicator(quantiles=config.quantiles), ] elif config.model == "learned_weights": + preprocessing = [ + *checks, + *feature_adders, + *feature_standardizers, + Imputer( + selection=Exclude(config.target_column), + imputation_strategy="mean", + fill_future_values=Include(config.energy_price_column), + ), + NaNDropper( + selection=Exclude(config.target_column), + ), + ] + forecaster = ResidualForecaster( + config=ResidualForecaster.Config( + quantiles=config.quantiles, + horizons=config.horizons, + hyperparams=config.residual_hyperparams, + ) + ) + postprocessing = [QuantileSorter()] + elif config.model == "residual": preprocessing = [ *checks, *feature_adders, From 140fe26e295c06065b936f49627ac4749af3be1b Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 25 Nov 2025 17:08:44 +0100 Subject: [PATCH 034/104] Added additional Feature logic to StackingForecaster Signed-off-by: Lars van Someren --- .../openstef_meta/framework/final_learner.py | 4 - .../models/stacking_forecaster.py | 123 ++++++++++++++---- .../tests/models/test_stacking_forecaster.py | 31 +++++ 3 files changed, 126 insertions(+), 32 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py index 8f2c58424..dc92cc1c1 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py @@ -29,10 +29,6 @@ class FinalLearnerHyperParams(HyperParams): ) -class FinalLearnerConfig: - """Configuration for the Final Learner.""" - - class FinalLearner(ABC): """Combines base learner predictions for each quantile into final predictions.""" diff --git a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py index f48ae7988..2af0ff395 100644 --- a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py @@ -20,12 +20,13 @@ NotFittedError, ) from openstef_core.mixins import HyperParams -from openstef_core.types import Quantile +from openstef_core.transforms import TimeSeriesTransform +from openstef_core.types import LeadTime, Quantile from openstef_meta.framework.base_learner import ( BaseLearner, BaseLearnerHyperParams, ) -from openstef_meta.framework.final_learner import FinalLearner +from openstef_meta.framework.final_learner import FinalLearner, FinalLearnerHyperParams from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) @@ -41,47 +42,114 @@ logger = logging.getLogger(__name__) +class StackingFinalLearnerHyperParams(FinalLearnerHyperParams): + """HyperParams for Stacking Final Learner.""" + + feature_adders: list[TimeSeriesTransform] = Field( + default=[], + description="Additional features to add to the base learner predictions before fitting the final learner.", + ) + + forecaster_hyperparams: BaseLearnerHyperParams = Field( + default=GBLinearHyperParams(), + description="", + ) + + class StackingFinalLearner(FinalLearner): """Combines base learner predictions per quantile into final predictions using a regression approach.""" - def __init__(self, forecaster: Forecaster, feature_adders: None = None) -> None: + def __init__( + self, quantiles: list[Quantile], hyperparams: StackingFinalLearnerHyperParams, horizon: LeadTime + ) -> None: """Initialize the Stacking final learner. Args: - forecaster: The forecaster model to be used as the final learner. - feature_adders: Placeholder for future feature adders (not yet implemented). + quantiles: List of quantiles to predict. + hyperparams: Hyperparameters for the final learner. + horizon: Forecast horizon for which to create the final learner. """ - # Feature adders placeholder for future use - if feature_adders is not None: - raise NotImplementedError("Feature adders are not yet implemented.") + super().__init__(quantiles=quantiles, hyperparams=hyperparams) + + forecaster_hyperparams: BaseLearnerHyperParams = hyperparams.forecaster_hyperparams # Split forecaster per quantile - self.quantiles = forecaster.config.quantiles models: list[Forecaster] = [] for q in self.quantiles: - config = forecaster.config.model_copy( - update={ - "quantiles": [q], - } - ) - model = forecaster.__class__(config=config) + forecaster_cls = forecaster_hyperparams.forecaster_class() + config = forecaster_cls.Config(horizons=[horizon], quantiles=[q]) + if "hyperparams" in forecaster_cls.Config.model_fields: + config = config.model_copy(update={"hyperparams": forecaster_hyperparams}) + + model = config.forecaster_from_config() models.append(model) self.models = models + @staticmethod + def _combine_datasets( + data: ForecastInputDataset, additional_features: ForecastInputDataset + ) -> ForecastInputDataset: + """Combine base learner predictions with additional features for final learner input. + + Args: + data: ForecastInputDataset containing base learner predictions. + additional_features: ForecastInputDataset containing additional features. + + Returns: + ForecastInputDataset with combined features. + """ + additional_df = additional_features.data.loc[ + :, [col for col in additional_features.data.columns if col not in data.data.columns] + ] + # Merge on index to combine datasets + combined_df = data.data.join(additional_df) + + return ForecastInputDataset( + data=combined_df, + sample_interval=data.sample_interval, + forecast_start=data.forecast_start, + ) + @override - def fit(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> None: + def fit( + self, + base_learner_predictions: dict[Quantile, ForecastInputDataset], + additional_features: ForecastInputDataset | None, + ) -> None: + for i, q in enumerate(self.quantiles): - self.models[i].fit(data=base_learner_predictions[q], data_val=None) + if additional_features is not None: + data = self._combine_datasets( + data=base_learner_predictions[q], + additional_features=additional_features, + ) + else: + data = base_learner_predictions[q] + + self.models[i].fit(data=data, data_val=None) @override - def predict(self, base_learner_predictions: dict[Quantile, ForecastInputDataset]) -> ForecastDataset: + def predict( + self, + base_learner_predictions: dict[Quantile, ForecastInputDataset], + additional_features: ForecastInputDataset | None, + ) -> ForecastDataset: if not self.is_fitted: raise NotFittedError(self.__class__.__name__) # Generate predictions - predictions = [ - self.models[i].predict(data=base_learner_predictions[q]).data for i, q in enumerate(self.quantiles) - ] + predictions: list[pd.DataFrame] = [] + for i, q in enumerate(self.quantiles): + if additional_features is not None: + data = self._combine_datasets( + data=base_learner_predictions[q], + additional_features=additional_features, + ) + else: + data = base_learner_predictions[q] + + p = self.models[i].predict(data=data).data + predictions.append(p) # Concatenate predictions along columns to form a DataFrame with quantile columns df = pd.concat(predictions, axis=1) @@ -106,9 +174,9 @@ class StackingHyperParams(HyperParams): "Defaults to [LGBMHyperParams, GBLinearHyperParams].", ) - final_hyperparams: BaseLearnerHyperParams = Field( - default=GBLinearHyperParams(), - description="Hyperparameters for the final learner. Defaults to GBLinearHyperParams.", + final_hyperparams: StackingFinalLearnerHyperParams = Field( + default=StackingFinalLearnerHyperParams(), + description="Hyperparameters for the final learner.", ) use_classifier: bool = Field( @@ -156,10 +224,9 @@ def __init__(self, config: StackingForecasterConfig) -> None: config=config, base_hyperparams=config.hyperparams.base_hyperparams ) - final_forecaster = self._init_base_learners( - config=config, base_hyperparams=[config.hyperparams.final_hyperparams] - )[0] - self._final_learner = StackingFinalLearner(forecaster=final_forecaster) + self._final_learner = StackingFinalLearner( + quantiles=config.quantiles, hyperparams=config.hyperparams.final_hyperparams, horizon=config.max_horizon + ) __all__ = ["StackingFinalLearner", "StackingForecaster", "StackingForecasterConfig", "StackingHyperParams"] diff --git a/packages/openstef-meta/tests/models/test_stacking_forecaster.py b/packages/openstef-meta/tests/models/test_stacking_forecaster.py index e8543f055..9eccde9b9 100644 --- a/packages/openstef-meta/tests/models/test_stacking_forecaster.py +++ b/packages/openstef-meta/tests/models/test_stacking_forecaster.py @@ -14,6 +14,7 @@ StackingForecasterConfig, StackingHyperParams, ) +from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder @pytest.fixture @@ -103,3 +104,33 @@ def test_stacking_forecaster_with_sample_weights( # (This is a statistical test - with different weights, predictions should differ) differences = (result_with_weights.data - result_without_weights.data).abs() assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +def test_stacking_forecaster_with_additional_features( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: StackingForecasterConfig, +): + """Test that forecaster works with additional features for the final learner.""" + + base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) + + # Arrange + expected_quantiles = base_config.quantiles + forecaster = StackingForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" From c053ea52f84accbbf35382ed4a409e47beebf2db Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 25 Nov 2025 17:13:26 +0100 Subject: [PATCH 035/104] added example to openstef Meta Signed-off-by: Lars van Someren --- .../examples/liander_2024_residual.py | 154 ++++++++++++++++++ .../presets/forecasting_workflow.py | 2 +- 2 files changed, 155 insertions(+), 1 deletion(-) create mode 100644 packages/openstef-meta/src/openstef_meta/examples/liander_2024_residual.py diff --git a/packages/openstef-meta/src/openstef_meta/examples/liander_2024_residual.py b/packages/openstef-meta/src/openstef_meta/examples/liander_2024_residual.py new file mode 100644 index 000000000..3448697ac --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/examples/liander_2024_residual.py @@ -0,0 +1,154 @@ +"""Liander 2024 Benchmark Example. + +==================================== + +This example demonstrates how to set up and run the Liander 2024 STEF benchmark using OpenSTEF BEAM. +The benchmark will evaluate XGBoost and GBLinear models on the dataset from HuggingFace. +""" + +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +import os +import time + +os.environ["OMP_NUM_THREADS"] = "1" # Set OMP_NUM_THREADS to 1 to avoid issues with parallel execution and xgboost +os.environ["OPENBLAS_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" + +import logging +import multiprocessing +from datetime import timedelta +from pathlib import Path + +from pydantic_extra_types.coordinate import Coordinate +from pydantic_extra_types.country import CountryAlpha2 + +from openstef_beam.backtesting.backtest_forecaster import BacktestForecasterConfig, OpenSTEF4BacktestForecaster +from openstef_beam.benchmarking.benchmark_pipeline import BenchmarkContext +from openstef_beam.benchmarking.benchmarks.liander2024 import Liander2024Category, create_liander2024_benchmark_runner +from openstef_beam.benchmarking.callbacks.strict_execution_callback import StrictExecutionCallback +from openstef_beam.benchmarking.models.benchmark_target import BenchmarkTarget +from openstef_beam.benchmarking.storage.local_storage import LocalBenchmarkStorage +from openstef_core.types import LeadTime, Q +from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage +from openstef_models.presets import ( + ForecastingWorkflowConfig, + create_forecasting_workflow, +) +from openstef_models.presets.forecasting_workflow import LocationConfig +from openstef_models.workflows import CustomForecastingWorkflow + +logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s") + +OUTPUT_PATH = Path("./benchmark_results") + +N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark + +model = "residual" # Can be "stacking", "learned_weights" or "residual" + +# Model configuration +FORECAST_HORIZONS = [LeadTime.from_string("PT36H")] # Forecast horizon(s) +PREDICTION_QUANTILES = [ + Q(0.05), + Q(0.1), + Q(0.3), + Q(0.5), + Q(0.7), + Q(0.9), + Q(0.95), +] # Quantiles for probabilistic forecasts + +BENCHMARK_FILTER: list[Liander2024Category] | None = None + +USE_MLFLOW_STORAGE = False + +if USE_MLFLOW_STORAGE: + storage = MLFlowStorage( + tracking_uri=str(OUTPUT_PATH / "mlflow_artifacts"), + local_artifacts_path=OUTPUT_PATH / "mlflow_tracking_artifacts", + ) +else: + storage = None + +common_config = ForecastingWorkflowConfig( + model_id="common_model_", + model=model, + horizons=FORECAST_HORIZONS, + quantiles=PREDICTION_QUANTILES, + model_reuse_enable=False, + mlflow_storage=None, + radiation_column="shortwave_radiation", + rolling_aggregate_features=["mean", "median", "max", "min"], + wind_speed_column="wind_speed_80m", + pressure_column="surface_pressure", + temperature_column="temperature_2m", + relative_humidity_column="relative_humidity_2m", + energy_price_column="EPEX_NL", +) + + +# Create the backtest configuration +backtest_config = BacktestForecasterConfig( + requires_training=True, + predict_length=timedelta(days=7), + predict_min_length=timedelta(minutes=15), + predict_context_length=timedelta(days=14), # Context needed for lag features + predict_context_min_coverage=0.5, + training_context_length=timedelta(days=90), # Three months of training data + training_context_min_coverage=0.5, + predict_sample_interval=timedelta(minutes=15), +) + + +def _target_forecaster_factory( + context: BenchmarkContext, + target: BenchmarkTarget, +) -> OpenSTEF4BacktestForecaster: + # Factory function that creates a forecaster for a given target. + prefix = context.run_name + base_config = common_config + + def _create_workflow() -> CustomForecastingWorkflow: + # Create a new workflow instance with fresh model. + return create_forecasting_workflow( + config=base_config.model_copy( + update={ + "model_id": f"{prefix}_{target.name}", + "location": LocationConfig( + name=target.name, + description=target.description, + coordinate=Coordinate( + latitude=target.latitude, + longitude=target.longitude, + ), + country_code=CountryAlpha2("NL"), + ), + } + ) + ) + + return OpenSTEF4BacktestForecaster( + config=backtest_config, + workflow_factory=_create_workflow, + debug=False, + cache_dir=OUTPUT_PATH / "cache" / f"{context.run_name}_{target.name}", + ) + + +if __name__ == "__main__": + start_time = time.time() + create_liander2024_benchmark_runner( + storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH / model), + data_dir=Path("../data/liander2024-energy-forecasting-benchmark"), # adjust path as needed + callbacks=[StrictExecutionCallback()], + ).run( + forecaster_factory=_target_forecaster_factory, + run_name=model, + n_processes=N_PROCESSES, + filter_args=BENCHMARK_FILTER, + ) + + end_time = time.time() + print(f"Benchmark completed in {end_time - start_time:.2f} seconds.") diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index df0f2c59c..37dc5bbdb 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -26,8 +26,8 @@ from openstef_core.mixins import TransformPipeline from openstef_core.types import LeadTime, Q, Quantile, QuantileOrGlobal from openstef_meta.models.learned_weights_forecaster import LearnedWeightsForecaster -from openstef_meta.models.stacking_forecaster import StackingForecaster from openstef_meta.models.residual_forecaster import ResidualForecaster +from openstef_meta.models.stacking_forecaster import StackingForecaster from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback from openstef_models.mixins import ModelIdentifier from openstef_models.models import ForecastingModel From 1d5d97d9c9b28fb77295a3e60024ff045fb222ee Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 25 Nov 2025 21:34:53 +0100 Subject: [PATCH 036/104] RulesForecaster with dummy features Signed-off-by: Lars van Someren --- .../src/openstef_meta/__init__.py | 2 +- .../openstef_meta/models/rules_forecaster.py | 198 ++++++++++++++++++ .../src/openstef_meta/utils/__init__.py | 11 + .../src/openstef_meta/utils/decision_tree.py | 138 ++++++++++++ .../tests/models/test_rules_forecaster.py | 136 ++++++++++++ .../openstef-meta/tests/utils/__init__.py | 0 .../tests/utils/test_decision_tree.py | 41 ++++ 7 files changed, 525 insertions(+), 1 deletion(-) create mode 100644 packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py create mode 100644 packages/openstef-meta/src/openstef_meta/utils/decision_tree.py create mode 100644 packages/openstef-meta/tests/models/test_rules_forecaster.py create mode 100644 packages/openstef-meta/tests/utils/__init__.py create mode 100644 packages/openstef-meta/tests/utils/test_decision_tree.py diff --git a/packages/openstef-meta/src/openstef_meta/__init__.py b/packages/openstef-meta/src/openstef_meta/__init__.py index e659c6c12..ff5902981 100644 --- a/packages/openstef-meta/src/openstef_meta/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/__init__.py @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 -"""Core models for OpenSTEF.""" +"""Meta models for OpenSTEF.""" import logging diff --git a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py new file mode 100644 index 000000000..4211b9e18 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py @@ -0,0 +1,198 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Rules-based Meta Forecaster Module.""" + +import logging +from typing import override + +import pandas as pd +from pydantic import Field, field_validator +from pydantic_extra_types.country import CountryAlpha2 + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.mixins import HyperParams +from openstef_core.transforms import TimeSeriesTransform +from openstef_core.types import Quantile +from openstef_meta.framework.base_learner import ( + BaseLearner, + BaseLearnerHyperParams, +) +from openstef_meta.framework.final_learner import FinalLearner, FinalLearnerHyperParams +from openstef_meta.framework.meta_forecaster import ( + EnsembleForecaster, +) +from openstef_meta.utils.decision_tree import Decision, DecisionTree, Rule +from openstef_models.models.forecasting.forecaster import ( + ForecasterConfig, +) +from openstef_models.models.forecasting.gblinear_forecaster import ( + GBLinearHyperParams, +) +from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +from openstef_models.transforms.time_domain import HolidayFeatureAdder + +logger = logging.getLogger(__name__) + + +class RulesLearnerHyperParams(FinalLearnerHyperParams): + """HyperParams for Stacking Final Learner.""" + + feature_adders: list[TimeSeriesTransform] = Field( + default=[], + description="Additional features to add to the final learner.", + ) + + decision_tree: DecisionTree = Field( + description="Decision tree defining the rules for the final learner.", + ) + + @field_validator("feature_adders", mode="after") + @classmethod + def _check_not_empty(cls, v: list[TimeSeriesTransform]) -> list[TimeSeriesTransform]: + if v == []: + raise ValueError("RulesForecaster requires at least one feature adder.") + return v + + +class RulesLearner(FinalLearner): + """Combines base learner predictions per quantile into final predictions using a regression approach.""" + + def __init__(self, quantiles: list[Quantile], hyperparams: RulesLearnerHyperParams) -> None: + """Initialize the Rules Learner. + + Args: + quantiles: List of quantiles to predict. + hyperparams: Hyperparameters for the final learner. + horizon: Forecast horizon for which to create the final learner. + """ + super().__init__(quantiles=quantiles, hyperparams=hyperparams) + + self.tree = hyperparams.decision_tree + self.feature_adders = hyperparams.feature_adders + + @override + def fit( + self, + base_learner_predictions: dict[Quantile, ForecastInputDataset], + additional_features: ForecastInputDataset | None, + ) -> None: + # No fitting needed for rule-based final learner + # Check that additional features are provided + if additional_features is None: + raise ValueError("Additional features must be provided for RulesFinalLearner prediction.") + + def _predict_tree(self, data: pd.DataFrame, columns: pd.Index) -> pd.DataFrame: + """Predict using the decision tree rules. + + Args: + data: DataFrame containing the additional features. + columns: Expected columns for the output DataFrame. + + Returns: + DataFrame with predictions for each quantile. + """ + predictions = data.apply(self.tree.get_decision, axis=1) + + return pd.get_dummies(predictions).reindex(columns=columns) + + @override + def predict( + self, + base_learner_predictions: dict[Quantile, ForecastInputDataset], + additional_features: ForecastInputDataset | None, + ) -> ForecastDataset: + if additional_features is None: + raise ValueError("Additional features must be provided for RulesFinalLearner prediction.") + + decisions = self._predict_tree( + additional_features.data, columns=base_learner_predictions[self.quantiles[0]].data.columns + ) + + # Generate predictions + predictions: list[pd.DataFrame] = [] + for q, data in base_learner_predictions.items(): + preds = data.data * decisions + predictions.append(preds.sum(axis=1).to_frame(name=Quantile(q).format())) + + # Concatenate predictions along columns to form a DataFrame with quantile columns + df = pd.concat(predictions, axis=1) + + return ForecastDataset( + data=df, + sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, + ) + + @property + def is_fitted(self) -> bool: + """Check the Rules Final Learner is fitted.""" + return True + + +class RulesForecasterHyperParams(HyperParams): + """Hyperparameters for Rules Forecaster.""" + + base_hyperparams: list[BaseLearnerHyperParams] = Field( + default=[LGBMHyperParams(), GBLinearHyperParams()], + description="List of hyperparameter configurations for base learners. " + "Defaults to [LGBMHyperParams, GBLinearHyperParams].", + ) + + final_hyperparams: RulesLearnerHyperParams = Field( + description="Hyperparameters for the final learner.", + default=RulesLearnerHyperParams( + decision_tree=DecisionTree(nodes=[Decision(idx=0, decision="LGBMForecaster")], outcomes={"LGBMForecaster"}), + feature_adders=[HolidayFeatureAdder(country_code=CountryAlpha2("NL"))], + ), + ) + + @field_validator("base_hyperparams", mode="after") + @classmethod + def _check_classes(cls, v: list[BaseLearnerHyperParams]) -> list[BaseLearnerHyperParams]: + hp_classes = [type(hp) for hp in v] + if not len(hp_classes) == len(set(hp_classes)): + raise ValueError("Duplicate base learner hyperparameter classes are not allowed.") + return v + + +class RulesForecasterConfig(ForecasterConfig): + """Configuration for Hybrid-based forecasting models.""" + + hyperparams: RulesForecasterHyperParams = Field( + default=RulesForecasterHyperParams(), + description="Hyperparameters for the Hybrid forecaster.", + ) + + verbosity: bool = Field( + default=True, + description="Enable verbose output from the Hybrid model (True/False).", + ) + + +class RulesForecaster(EnsembleForecaster): + """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" + + Config = RulesForecasterConfig + HyperParams = RulesForecasterHyperParams + + def __init__(self, config: RulesForecasterConfig) -> None: + """Initialize the Hybrid forecaster.""" + self._config = config + + self._base_learners: list[BaseLearner] = self._init_base_learners( + config=config, base_hyperparams=config.hyperparams.base_hyperparams + ) + + self._final_learner = RulesLearner( + quantiles=config.quantiles, + hyperparams=config.hyperparams.final_hyperparams, + ) + + +__all__ = [ + "RulesForecaster", + "RulesForecasterConfig", + "RulesForecasterHyperParams", + "RulesLearner", + "RulesLearnerHyperParams", +] diff --git a/packages/openstef-meta/src/openstef_meta/utils/__init__.py b/packages/openstef-meta/src/openstef_meta/utils/__init__.py index e69de29bb..8b8144daa 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/utils/__init__.py @@ -0,0 +1,11 @@ +"""Utility functions and classes for OpenSTEF Meta.""" + +from .decision_tree import Decision, DecisionTree, Rule +from .pinball_errors import calculate_pinball_errors + +__all__ = [ + "Decision", + "DecisionTree", + "Rule", + "calculate_pinball_errors", +] diff --git a/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py b/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py new file mode 100644 index 000000000..c5d49852a --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py @@ -0,0 +1,138 @@ +from typing import Literal + +import pandas as pd +from pydantic import BaseModel, Field, model_validator + + +class Node(BaseModel): + """A node in the decision tree, either a rule or a decision.""" + + idx: int = Field( + description="Index of the rule in the decision tree.", + ) + + +class Rule(Node): + """A single rule in the decision tree.""" + + idx: int = Field( + description="Index of the decision in the decision tree.", + ) + + rule_type: Literal["greater_than", "less_than"] = Field( + ..., + description="Type of the rule to apply.", + ) + feature_name: str = Field( + ..., + description="Name of the feature to which the rule applies.", + ) + + threshold: float | int = Field( + ..., + description="Threshold value for the rule.", + ) + + next_true: int = Field( + ..., + description="Index of the next rule if the condition is true.", + ) + + next_false: int = Field( + ..., + description="Index of the next rule if the condition is false.", + ) + + +class Decision(Node): + """A leaf decision in the decision tree.""" + + idx: int = Field( + description="Index of the decision in the decision tree.", + ) + + decision: str = Field( + ..., + description="The prediction value at this leaf.", + ) + + +class DecisionTree(BaseModel): + """A simple decision tree defined by a list of rules.""" + + nodes: list[Node] = Field( + ..., + description="List of rules that define the decision tree.", + ) + + outcomes: set[str] = Field( + ..., + description="Set of possible outcomes from the decision tree.", + ) + + @model_validator(mode="after") + def validate_tree_structure(self) -> "DecisionTree": + """Validate that the tree structure is correct. + + Raises: + ValueError: If tree is not built correctly. + + Returns: + The validated DecisionTree instance. + """ + node_idx = {node.idx for node in self.nodes} + if node_idx != set(range(len(self.nodes))): + raise ValueError("Rule indices must be consecutive starting from 0.") + + for node in self.nodes: + if isinstance(node, Rule): + if node.next_true not in node_idx: + msg = f"next_true index {node.next_true} not found in nodes." + raise ValueError(msg) + if node.next_false not in node_idx: + msg = f"next_false index {node.next_false} not found in nodes." + raise ValueError(msg) + if isinstance(node, Decision) and node.decision not in self.outcomes: + msg = f"Decision '{node.decision}' not in defined outcomes {self.outcomes}." + raise ValueError(msg) + + return self + + def get_decision(self, row: pd.Series) -> str: + """Get decision from the decision tree based on input features. + + Args: + row: Series containing feature values. + + Returns: + The decision outcome as a string. + + Raises: + ValueError: If the tree structure is invalid. + TypeError: If a node type is invalid. + """ + current_idx = 0 + while True: + current_node = self.nodes[current_idx] + if isinstance(current_node, Decision): + return current_node.decision + if isinstance(current_node, Rule): + feature_value = row[current_node.feature_name] + if current_node.rule_type == "greater_than": + if feature_value > current_node.threshold: + current_idx = current_node.next_true + else: + current_idx = current_node.next_false + elif current_node.rule_type == "less_than": + if feature_value < current_node.threshold: + current_idx = current_node.next_true + else: + current_idx = current_node.next_false + else: + msg = f"Invalid rule type '{current_node.rule_type}' at index {current_idx}." + raise ValueError(msg) + else: + msg = f"Invalid node type at index {current_idx}." + raise TypeError(msg) + + __all__ = ["Node", "Rule", "Decision", "DecisionTree"] diff --git a/packages/openstef-meta/tests/models/test_rules_forecaster.py b/packages/openstef-meta/tests/models/test_rules_forecaster.py new file mode 100644 index 000000000..434f0c6c2 --- /dev/null +++ b/packages/openstef-meta/tests/models/test_rules_forecaster.py @@ -0,0 +1,136 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import pytest + +from openstef_core.datasets import ForecastInputDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_meta.models.rules_forecaster import ( + RulesForecaster, + RulesForecasterConfig, + RulesForecasterHyperParams, +) +from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder + + +@pytest.fixture +def base_config() -> RulesForecasterConfig: + """Base configuration for Rules forecaster tests.""" + + params = RulesForecasterHyperParams() + return RulesForecasterConfig( + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime(timedelta(days=1))], + hyperparams=params, + verbosity=False, + ) + + +def test_rules_forecaster_fit_predict( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: RulesForecasterConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = RulesForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + +def test_rules_forecaster_predict_not_fitted_raises_error( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: RulesForecasterConfig, +): + """Test that predict() raises NotFittedError when called before fit().""" + # Arrange + forecaster = RulesForecaster(config=base_config) + + # Act & Assert + with pytest.raises(NotFittedError, match="RulesForecaster"): + forecaster.predict(sample_forecast_input_dataset) + + +def test_rules_forecaster_with_sample_weights( + sample_dataset_with_weights: ForecastInputDataset, + base_config: RulesForecasterConfig, +): + """Test that forecaster works with sample weights and produces different results.""" + # Arrange + forecaster_with_weights = RulesForecaster(config=base_config) + + # Create dataset without weights for comparison + data_without_weights = ForecastInputDataset( + data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), + sample_interval=sample_dataset_with_weights.sample_interval, + target_column=sample_dataset_with_weights.target_column, + forecast_start=sample_dataset_with_weights.forecast_start, + ) + forecaster_without_weights = RulesForecaster(config=base_config) + + # Act + forecaster_with_weights.fit(sample_dataset_with_weights) + forecaster_without_weights.fit(data_without_weights) + + # Predict using data without sample_weight column (since that's used for training, not prediction) + result_with_weights = forecaster_with_weights.predict(data_without_weights) + result_without_weights = forecaster_without_weights.predict(data_without_weights) + + # Assert + # Both should produce valid forecasts + assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" + assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + + # Sample weights should affect the model, so results should be different + # (This is a statistical test - with different weights, predictions should differ) + differences = (result_with_weights.data - result_without_weights.data).abs() + assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +def test_rules_forecaster_with_additional_features( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: RulesForecasterConfig, +): + """Test that forecaster works with additional features for the final learner.""" + + base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) + + # Arrange + expected_quantiles = base_config.quantiles + forecaster = RulesForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" diff --git a/packages/openstef-meta/tests/utils/__init__.py b/packages/openstef-meta/tests/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-meta/tests/utils/test_decision_tree.py b/packages/openstef-meta/tests/utils/test_decision_tree.py new file mode 100644 index 000000000..11298e9d5 --- /dev/null +++ b/packages/openstef-meta/tests/utils/test_decision_tree.py @@ -0,0 +1,41 @@ +import pandas as pd +import pytest + +from openstef_meta.utils.decision_tree import Decision, DecisionTree, Node, Rule + + +@pytest.fixture +def sample_dataset() -> pd.DataFrame: + data = { + "feature_1": [1, 2, 3, 4, 5], + "feature_2": [10, 20, 30, 40, 50], + } + return pd.DataFrame(data) + + +@pytest.fixture +def simple_decision_tree() -> DecisionTree: + nodes: list[Node] = [ + Rule( + idx=0, + rule_type="less_than", + feature_name="feature_1", + threshold=3, + next_true=1, + next_false=2, + ), + Decision(idx=1, decision="Class_A"), + Decision(idx=2, decision="Class_B"), + ] + return DecisionTree(nodes=nodes, outcomes={"Class_A", "Class_B"}) + + +def test_decision_tree_prediction(sample_dataset: pd.DataFrame, simple_decision_tree: DecisionTree): + + decisions = sample_dataset.apply(simple_decision_tree.get_decision, axis=1) + + expected_decisions = pd.Series( + ["Class_A", "Class_A", "Class_B", "Class_B", "Class_B"], + ) + + pd.testing.assert_series_equal(decisions, expected_decisions) From 100494cbbd079636d755da731527227855991e5b Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Wed, 26 Nov 2025 10:55:24 +0100 Subject: [PATCH 037/104] Updated feature specification Signed-off-by: Lars van Someren --- .../openstef_meta/framework/final_learner.py | 34 ++++++++-- .../framework/meta_forecaster.py | 1 + .../src/openstef_meta/transforms/selector.py | 50 ++++++++++++++ .../presets/forecasting_workflow.py | 12 ++-- .../general/distribution_transform.py | 65 ------------------- 5 files changed, 84 insertions(+), 78 deletions(-) create mode 100644 packages/openstef-meta/src/openstef_meta/transforms/selector.py delete mode 100644 packages/openstef-models/src/openstef_models/transforms/general/distribution_transform.py diff --git a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py index dc92cc1c1..f26105aa1 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py @@ -9,13 +9,17 @@ """ from abc import ABC, abstractmethod +from collections.abc import Sequence from pydantic import ConfigDict, Field from openstef_core.datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset from openstef_core.mixins import HyperParams, TransformPipeline from openstef_core.transforms import TimeSeriesTransform +from openstef_models.transforms.general.scaler import Scaler from openstef_core.types import Quantile +from openstef_models.utils.feature_selection import FeatureSelection +from openstef_meta.transforms.selector import Selector class FinalLearnerHyperParams(HyperParams): @@ -23,8 +27,28 @@ class FinalLearnerHyperParams(HyperParams): model_config = ConfigDict(arbitrary_types_allowed=True) - feature_adders: list[TimeSeriesTransform] = Field( - default=[], + feature_adders: Sequence[TimeSeriesTransform] = Field( + default=[ + Selector( + selection=FeatureSelection( + include={ + "temperature_2m", + "relative_humidity_2m", + "surface_pressure", + "cloud_cover", + "wind_speed_10m", + "wind_speed_80m", + "wind_direction_10m", + "shortwave_radiation", + "direct_radiation", + "diffuse_radiation", + "direct_normal_irradiance", + "load", + } + ), + ), + Scaler(method="standard"), + ], description="Additional features to add to the base learner predictions before fitting the final learner.", ) @@ -82,11 +106,7 @@ def calculate_features(self, data: ForecastInputDataset) -> ForecastInputDataset Returns: TimeSeriesDataset with additional features. """ - data_ts = TimeSeriesDataset( - data=data.data, - sample_interval=data.sample_interval, - ) - data_transformed = self.final_learner_processing.transform(data_ts) + data_transformed = self.final_learner_processing.transform(data) return ForecastInputDataset( data=data_transformed.data, diff --git a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py index 3af6329d7..670eeb1bc 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py @@ -105,6 +105,7 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None base_predictions = self._predict_base_learners(data=full_dataset) if self._final_learner.has_features: + self._final_learner.final_learner_processing.fit(data=full_dataset) features = self._final_learner.calculate_features(data=full_dataset) else: features = None diff --git a/packages/openstef-meta/src/openstef_meta/transforms/selector.py b/packages/openstef-meta/src/openstef_meta/transforms/selector.py new file mode 100644 index 000000000..75eb4e321 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/transforms/selector.py @@ -0,0 +1,50 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Transform for dropping rows containing NaN values. + +This module provides functionality to drop rows containing NaN values in selected +columns, useful for data cleaning and ensuring complete cases for model training. +""" + +from typing import override + +from pydantic import Field + +from openstef_core.base_model import BaseConfig +from openstef_core.datasets import TimeSeriesDataset +from openstef_core.datasets.validated_datasets import ForecastInputDataset +from openstef_core.transforms import TimeSeriesTransform +from openstef_models.utils.feature_selection import FeatureSelection + + +class Selector(BaseConfig, TimeSeriesTransform): + """Selects features based on FeatureSelection.""" + + selection: FeatureSelection = Field( + default=FeatureSelection.ALL, + description="Features to check for NaN values. Rows with NaN in any selected column are dropped.", + ) + + @override + def fit(self, data: TimeSeriesDataset) -> None: + if ( + isinstance(data, ForecastInputDataset) + and self.selection.include is not None + and (data.target_column not in self.selection.include) + ): + self.selection.include.add(data.target_column) + + @override + def transform(self, data: TimeSeriesDataset) -> TimeSeriesDataset: + + features = self.selection.resolve(data.feature_names) + + transformed_data = data.data.drop(columns=[col for col in data.feature_names if col not in features]) + + return data.copy_with(data=transformed_data, is_sorted=True) + + @override + def features_added(self) -> list[str]: + return [] diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 37dc5bbdb..ba0e279f5 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -451,11 +451,11 @@ def create_forecasting_workflow( selection=Exclude(config.target_column), ), ] - forecaster = ResidualForecaster( - config=ResidualForecaster.Config( + forecaster = LearnedWeightsForecaster( + config=LearnedWeightsForecaster.Config( quantiles=config.quantiles, horizons=config.horizons, - hyperparams=config.residual_hyperparams, + hyperparams=config.learned_weights_hyperparams, ) ) postprocessing = [QuantileSorter()] @@ -473,11 +473,11 @@ def create_forecasting_workflow( selection=Exclude(config.target_column), ), ] - forecaster = LearnedWeightsForecaster( - config=LearnedWeightsForecaster.Config( + forecaster = ResidualForecaster( + config=ResidualForecaster.Config( quantiles=config.quantiles, horizons=config.horizons, - hyperparams=config.learned_weights_hyperparams, + hyperparams=config.residual_hyperparams, ) ) postprocessing = [QuantileSorter()] diff --git a/packages/openstef-models/src/openstef_models/transforms/general/distribution_transform.py b/packages/openstef-models/src/openstef_models/transforms/general/distribution_transform.py deleted file mode 100644 index 8e93da672..000000000 --- a/packages/openstef-models/src/openstef_models/transforms/general/distribution_transform.py +++ /dev/null @@ -1,65 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""Transform for clipping feature values to observed ranges. - -This module provides functionality to clip feature values to their observed -minimum and maximum ranges during training, preventing out-of-range values -during inference and improving model robustness. -""" - -from typing import Literal, override - -import pandas as pd -from pydantic import Field, PrivateAttr - -from openstef_core.base_model import BaseConfig -from openstef_core.datasets import TimeSeriesDataset -from openstef_core.exceptions import NotFittedError -from openstef_core.transforms import TimeSeriesTransform -from openstef_models.utils.feature_selection import FeatureSelection - -type ClipMode = Literal["minmax", "standard"] - - -class DistributionTransform(BaseConfig, TimeSeriesTransform): - """Transform dataframe to (robust) percentage of min-max of training data. - - Useful to determine whether datadrift has occured. - Can be used as a feature for learning sample weights in meta models. - """ - - robust_threshold: float = Field( - default=2.0, - description="Percentage of observations to ignore when determing percentage. (Single sided)", - ) - - _feature_mins: pd.Series = PrivateAttr(default_factory=pd.Series) - _feature_maxs: pd.Series = PrivateAttr(default_factory=pd.Series) - _is_fitted: bool = PrivateAttr(default=False) - - @property - @override - def is_fitted(self) -> bool: - return self._is_fitted - - @override - def fit(self, data: TimeSeriesDataset) -> None: - self._feature_mins = data.data.min(axis=0) - self._feature_maxs = data.data.max(axis=0) - self._is_fitted = True - - @override - def transform(self, data: TimeSeriesDataset) -> TimeSeriesDataset: - if not self._is_fitted: - raise NotFittedError(self.__class__.__name__) - - # Apply min-max scaling to each feature based on fitted min and max - transformed_data = (data.data - self._feature_mins) / (self._feature_maxs - self._feature_mins) - - return TimeSeriesDataset(data=transformed_data, sample_interval=data.sample_interval) - - @override - def features_added(self) -> list[str]: - return [] From d8d10a1f42a3592cbb5c52b50809f4926c9198dc Mon Sep 17 00:00:00 2001 From: floriangoethals Date: Wed, 26 Nov 2025 14:30:21 +0100 Subject: [PATCH 038/104] entered flagger feature in new architecture --- .../openstef_meta/framework/final_learner.py | 3 +- .../framework/meta_forecaster.py | 1 + .../presets/forecasting_workflow.py | 4 +- .../transforms/general/__init__.py | 2 + .../transforms/general/flag_features_bound.py | 105 ++++++++++++++++++ .../general/test_flag_features_bound.py | 64 +++++++++++ 6 files changed, 176 insertions(+), 3 deletions(-) create mode 100644 packages/openstef-models/src/openstef_models/transforms/general/flag_features_bound.py create mode 100644 packages/openstef-models/tests/unit/transforms/general/test_flag_features_bound.py diff --git a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py index dc92cc1c1..22fcf4d7a 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py @@ -16,6 +16,7 @@ from openstef_core.mixins import HyperParams, TransformPipeline from openstef_core.transforms import TimeSeriesTransform from openstef_core.types import Quantile +from openstef_models.transforms.general import Flagger class FinalLearnerHyperParams(HyperParams): @@ -24,7 +25,7 @@ class FinalLearnerHyperParams(HyperParams): model_config = ConfigDict(arbitrary_types_allowed=True) feature_adders: list[TimeSeriesTransform] = Field( - default=[], + default=[Flagger()], description="Additional features to add to the base learner predictions before fitting the final learner.", ) diff --git a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py index 3af6329d7..c5ba7a33b 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py @@ -105,6 +105,7 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None base_predictions = self._predict_base_learners(data=full_dataset) if self._final_learner.has_features: + self._final_learner.final_learner_processing.fit(full_dataset) features = self._final_learner.calculate_features(data=full_dataset) else: features = None diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 37dc5bbdb..8c4c7e611 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -437,7 +437,7 @@ def create_forecasting_workflow( postprocessing = [ ConfidenceIntervalApplicator(quantiles=config.quantiles), ] - elif config.model == "learned_weights": + elif config.model == "residual": preprocessing = [ *checks, *feature_adders, @@ -459,7 +459,7 @@ def create_forecasting_workflow( ) ) postprocessing = [QuantileSorter()] - elif config.model == "residual": + elif config.model == "learned_weights": preprocessing = [ *checks, *feature_adders, diff --git a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py index 79e59f58b..57a5b6187 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py @@ -17,6 +17,7 @@ from openstef_models.transforms.general.nan_dropper import NaNDropper from openstef_models.transforms.general.sample_weighter import SampleWeighter from openstef_models.transforms.general.scaler import Scaler +from openstef_models.transforms.general.flag_features_bound import Flagger __all__ = [ "Clipper", @@ -26,4 +27,5 @@ "NaNDropper", "SampleWeighter", "Scaler", + "Flagger" ] diff --git a/packages/openstef-models/src/openstef_models/transforms/general/flag_features_bound.py b/packages/openstef-models/src/openstef_models/transforms/general/flag_features_bound.py new file mode 100644 index 000000000..6f4a4df3a --- /dev/null +++ b/packages/openstef-models/src/openstef_models/transforms/general/flag_features_bound.py @@ -0,0 +1,105 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Transform for clipping feature values to observed ranges. + +This module provides functionality to clip feature values to their observed +minimum and maximum ranges during training, preventing out-of-range values +during inference and improving model robustness. +""" + +from typing import Literal, override + +import pandas as pd +from pydantic import Field, PrivateAttr + +from openstef_core.base_model import BaseConfig +from openstef_core.datasets import TimeSeriesDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.transforms import TimeSeriesTransform +from openstef_models.utils.feature_selection import FeatureSelection + + + + + +class Flagger(BaseConfig, TimeSeriesTransform): + """Transform that flags specified features to their observed min and max values. + + This transform flags the peaks for the metalearner to know when to expect outliers and + extrapolate from its training set. + + + Example: + >>> import pandas as pd + >>> from datetime import timedelta + >>> from openstef_core.datasets import TimeSeriesDataset + >>> from openstef_models.transforms.general import Clipper + >>> + >>> # Create sample training dataset + >>> training_data = pd.DataFrame({ + ... 'load': [100, 120, 110, 130, 125], + ... 'temperature': [20, 22, 21, 23, 24] + ... }, index=pd.date_range('2025-01-01', periods=5, freq='1h')) + >>> training_dataset = TimeSeriesDataset(training_data, timedelta(hours=1)) + >>> test_data = pd.DataFrame({ + ... 'load': [90, 140, 115], + ... 'temperature': [19, 25, 22] + ... }, index=pd.date_range('2025-01-06', periods=3, + ... freq='1h')) + >>> test_dataset = TimeSeriesDataset(test_data, timedelta(hours=1)) + >>> # Initialize and apply transform + >>> clipper = Clipper(selection=FeatureSelection(include=['load', 'temperature']), mode='minmax') + >>> clipper.fit(training_dataset) + >>> transformed_dataset = clipper.transform(test_dataset) + >>> clipper._feature_mins.to_dict() + {'load': 100, 'temperature': 20} + >>> clipper._feature_maxs.to_dict() + {'load': 130, 'temperature': 24} + >>> transformed_dataset.data['load'].tolist() + [100, 130, 115] + >>> transformed_dataset.data['temperature'].tolist() + [20, 24, 22] + + """ + + selection: FeatureSelection = Field(default=FeatureSelection.ALL, description="Features to flag.") + + _feature_mins: pd.Series = PrivateAttr(default_factory=pd.Series) + _feature_maxs: pd.Series = PrivateAttr(default_factory=pd.Series) + _is_fitted: bool = PrivateAttr(default=False) + @property + @override + def is_fitted(self) -> bool: + return self._is_fitted + + @override + def fit(self, data: TimeSeriesDataset) -> None: + features = self.selection.resolve(data.feature_names) + self._feature_mins = data.data.reindex(features, axis=1).min() + self._feature_maxs = data.data.reindex(features, axis=1).max() + self._is_fitted = True + + @override + def transform(self, data: TimeSeriesDataset) -> TimeSeriesDataset: + if not self._is_fitted: + raise NotFittedError(self.__class__.__name__) + + features = self.selection.resolve(data.feature_names) + transformed_data = data.data.copy(deep=False).loc[:, features] + + # compute min & max of the features + min_aligned = self._feature_mins.reindex(features) + max_aligned = self._feature_maxs.reindex(features) + + outside = (transformed_data[features] <= min_aligned) | (transformed_data[features] >= max_aligned) + transformed_data = (~outside).astype(int) + + + + return TimeSeriesDataset(data=transformed_data, sample_interval=data.sample_interval) + + @override + def features_added(self) -> list[str]: + return [] diff --git a/packages/openstef-models/tests/unit/transforms/general/test_flag_features_bound.py b/packages/openstef-models/tests/unit/transforms/general/test_flag_features_bound.py new file mode 100644 index 000000000..62f31ef97 --- /dev/null +++ b/packages/openstef-models/tests/unit/transforms/general/test_flag_features_bound.py @@ -0,0 +1,64 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import pandas as pd +import pytest + +from openstef_core.datasets import TimeSeriesDataset +from openstef_core.exceptions import NotFittedError +from openstef_models.transforms.general import Flagger +from openstef_models.utils.feature_selection import FeatureSelection + + +@pytest.fixture +def train_dataset() -> TimeSeriesDataset: + """Training dataset with three features A, B, C.""" + return TimeSeriesDataset( + data=pd.DataFrame( + {"A": [1.0, 2.0, 3.0], "B": [1.0, 2.0, 3.0], "C": [1.0, 2.0, 3.0]}, + index=pd.date_range("2025-01-01", periods=3, freq="1h"), + ), + sample_interval=timedelta(hours=1), + ) + + +@pytest.fixture +def test_dataset() -> TimeSeriesDataset: + """Test dataset with values outside training ranges.""" + return TimeSeriesDataset( + data=pd.DataFrame( + {"A": [2, 2], "B": [0.0, 2.0], "C": [1, 4]}, + index=pd.date_range("2025-01-06", periods=2, freq="1h"), + ), + sample_interval=timedelta(hours=1), + ) + + + +def test_flagger__fit_transform( + train_dataset: TimeSeriesDataset, + test_dataset: TimeSeriesDataset, + ): + """Test fit and transform flags correctly leaves other columns unchanged.""" + # Arrange + flagger = Flagger(selection=FeatureSelection(include={"A", "B", "C"})) + + # Act + flagger.fit(train_dataset) + transformed_dataset = flagger.transform(test_dataset) + + # Assert + # Column C should remain unchanged + expected_df = pd.DataFrame( + { + "A": [1,1], + "B": [0,1], + "C": [0,0], # Unchanged + }, + index=test_dataset.index, + ) + pd.testing.assert_frame_equal(transformed_dataset.data, expected_df) + assert transformed_dataset.sample_interval == test_dataset.sample_interval From 797eee7328ddaf617287efb36326a81946acfc8c Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Wed, 26 Nov 2025 14:48:02 +0100 Subject: [PATCH 039/104] Fix sample weights Signed-off-by: Lars van Someren --- .../openstef_meta/framework/final_learner.py | 49 ++++++++++--------- .../framework/meta_forecaster.py | 1 + .../models/learned_weights_forecaster.py | 5 +- .../openstef_meta/models/rules_forecaster.py | 4 ++ 4 files changed, 35 insertions(+), 24 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py index f26105aa1..4d0d7b719 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py @@ -11,15 +11,36 @@ from abc import ABC, abstractmethod from collections.abc import Sequence +import pandas as pd from pydantic import ConfigDict, Field from openstef_core.datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset from openstef_core.mixins import HyperParams, TransformPipeline from openstef_core.transforms import TimeSeriesTransform -from openstef_models.transforms.general.scaler import Scaler from openstef_core.types import Quantile -from openstef_models.utils.feature_selection import FeatureSelection from openstef_meta.transforms.selector import Selector +from openstef_models.utils.feature_selection import FeatureSelection + +WEATHER_FEATURES = { + "temperature_2m", + "relative_humidity_2m", + "surface_pressure", + "cloud_cover", + "wind_speed_10m", + "wind_speed_80m", + "wind_direction_10m", + "shortwave_radiation", + "direct_radiation", + "diffuse_radiation", + "direct_normal_irradiance", + "load", +} + +SELECTOR = ( + Selector( + selection=FeatureSelection.NONE, + ), +) class FinalLearnerHyperParams(HyperParams): @@ -28,27 +49,7 @@ class FinalLearnerHyperParams(HyperParams): model_config = ConfigDict(arbitrary_types_allowed=True) feature_adders: Sequence[TimeSeriesTransform] = Field( - default=[ - Selector( - selection=FeatureSelection( - include={ - "temperature_2m", - "relative_humidity_2m", - "surface_pressure", - "cloud_cover", - "wind_speed_10m", - "wind_speed_80m", - "wind_direction_10m", - "shortwave_radiation", - "direct_radiation", - "diffuse_radiation", - "direct_normal_irradiance", - "load", - } - ), - ), - Scaler(method="standard"), - ], + default=[], description="Additional features to add to the base learner predictions before fitting the final learner.", ) @@ -70,6 +71,7 @@ def fit( self, base_learner_predictions: dict[Quantile, ForecastInputDataset], additional_features: ForecastInputDataset | None, + sample_weights: pd.Series | None = None, ) -> None: """Fit the final learner using base learner predictions. @@ -77,6 +79,7 @@ def fit( base_learner_predictions: Dictionary mapping Quantiles to ForecastInputDatasets containing base learner predictions. additional_features: Optional ForecastInputDataset containing additional features for the final learner. + sample_weights: Optional series of sample weights for fitting. """ raise NotImplementedError("Subclasses must implement the fit method.") diff --git a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py index 670eeb1bc..85aaeb9b1 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py @@ -119,6 +119,7 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None self._final_learner.fit( base_learner_predictions=quantile_datasets, additional_features=features, + sample_weights=data.data.loc[:, data.sample_weight_column], ) self._is_fitted = True diff --git a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py index 255e25dd0..43e1ba567 100644 --- a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py @@ -89,6 +89,7 @@ def fit( self, base_learner_predictions: dict[Quantile, ForecastInputDataset], additional_features: ForecastInputDataset | None, + sample_weights: pd.Series | None = None, ) -> None: for i, q in enumerate(self.quantiles): @@ -121,8 +122,10 @@ def fit( self._label_encoder.fit(labels) labels = self._label_encoder.transform(labels) - # Balance classes + # Balance classes, adjust with sample weights weights = compute_sample_weight("balanced", labels) + if sample_weights is not None: + weights *= sample_weights self.models[i].fit(X=df, y=labels, sample_weight=weights) # type: ignore self._is_fitted = True diff --git a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py index 4211b9e18..52033cbc5 100644 --- a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py @@ -76,12 +76,16 @@ def fit( self, base_learner_predictions: dict[Quantile, ForecastInputDataset], additional_features: ForecastInputDataset | None, + sample_weights: pd.Series | None = None, ) -> None: # No fitting needed for rule-based final learner # Check that additional features are provided if additional_features is None: raise ValueError("Additional features must be provided for RulesFinalLearner prediction.") + if sample_weights is not None: + logger.warning("Sample weights are ignored in RulesLearner.fit method.") + def _predict_tree(self, data: pd.DataFrame, columns: pd.Index) -> pd.DataFrame: """Predict using the decision tree rules. From 88c68654ddb702713915673a4a31d34745357e02 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 27 Nov 2025 09:15:15 +0100 Subject: [PATCH 040/104] Fixes Signed-off-by: Lars van Someren --- .../src/openstef_meta/framework/final_learner.py | 12 +++++------- .../models/learned_weights_forecaster.py | 7 +++++-- .../openstef_models/presets/forecasting_workflow.py | 4 ++-- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py index eab95f1ca..c509dc05a 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py @@ -19,6 +19,7 @@ from openstef_core.transforms import TimeSeriesTransform from openstef_core.types import Quantile from openstef_meta.transforms.selector import Selector +from openstef_models.transforms.general import Flagger from openstef_models.utils.feature_selection import FeatureSelection WEATHER_FEATURES = { @@ -36,12 +37,9 @@ "load", } -SELECTOR = ( - Selector( - selection=FeatureSelection.NONE, - ), +SELECTOR = Selector( + selection=FeatureSelection(include=WEATHER_FEATURES), ) -from openstef_models.transforms.general import Flagger class FinalLearnerHyperParams(HyperParams): @@ -49,8 +47,8 @@ class FinalLearnerHyperParams(HyperParams): model_config = ConfigDict(arbitrary_types_allowed=True) - feature_adders: list[TimeSeriesTransform] = Field( - default=[Flagger()], + feature_adders: Sequence[TimeSeriesTransform] = Field( + default=[SELECTOR], description="Additional features to add to the base learner predictions before fitting the final learner.", ) diff --git a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py index 43e1ba567..bf0150644 100644 --- a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py @@ -44,6 +44,8 @@ GBLinearHyperParams, ) from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +from openstef_models.models.forecasting.xgboost_forecaster import XGBoostHyperParams +from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearHyperParams logger = logging.getLogger(__name__) @@ -199,7 +201,7 @@ class LGBMLearnerHyperParams(LWFLHyperParams): """Hyperparameters for Learned Weights Final Learner with LGBM Classifier.""" n_estimators: int = Field( - default=20, + default=200, description="Number of estimators for the LGBM Classifier. Defaults to 20.", ) @@ -232,6 +234,7 @@ def __init__( class_weight="balanced", n_estimators=hyperparams.n_estimators, num_leaves=hyperparams.n_leaves, + n_jobs=1, ) for _ in quantiles ] @@ -242,7 +245,7 @@ class RFLearnerHyperParams(LWFLHyperParams): """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" n_estimators: int = Field( - default=20, + default=200, description="Number of estimators for the LGBM Classifier. Defaults to 20.", ) diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 189f18df2..ba0e279f5 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -437,7 +437,7 @@ def create_forecasting_workflow( postprocessing = [ ConfidenceIntervalApplicator(quantiles=config.quantiles), ] - elif config.model == "residual": + elif config.model == "learned_weights": preprocessing = [ *checks, *feature_adders, @@ -459,7 +459,7 @@ def create_forecasting_workflow( ) ) postprocessing = [QuantileSorter()] - elif config.model == "learned_weights": + elif config.model == "residual": preprocessing = [ *checks, *feature_adders, From c6749b3ad9182b0ede8d372dd1606979720d5759 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 27 Nov 2025 15:25:43 +0100 Subject: [PATCH 041/104] PR compliant Signed-off-by: Lars van Someren --- packages/openstef-meta/pyproject.toml | 4 ++ .../src/openstef_meta/examples/__init__.py | 5 +++ .../examples/liander_2024_residual.py | 6 ++- .../openstef_meta/framework/final_learner.py | 1 - .../framework/meta_forecaster.py | 18 ++++++++- .../models/learned_weights_forecaster.py | 11 ++--- .../models/residual_forecaster.py | 4 +- .../openstef_meta/models/rules_forecaster.py | 6 +-- .../models/stacking_forecaster.py | 17 ++------ .../src/openstef_meta/transforms/__init__.py | 11 +++++ .../transforms}/flag_features_bound.py | 40 ++++++++----------- .../src/openstef_meta/utils/__init__.py | 4 ++ .../src/openstef_meta/utils/datasets.py | 0 .../src/openstef_meta/utils/decision_tree.py | 5 +++ .../src/openstef_meta/utils/pinball_errors.py | 4 ++ .../models/test_learned_weights_forecaster.py | 4 +- .../tests/transforms/__init__.py | 0 .../transforms}/test_flag_features_bound.py | 12 +++--- .../tests/utils/test_decision_tree.py | 4 ++ .../transforms/general/__init__.py | 2 - 20 files changed, 97 insertions(+), 61 deletions(-) create mode 100644 packages/openstef-meta/src/openstef_meta/examples/__init__.py create mode 100644 packages/openstef-meta/src/openstef_meta/transforms/__init__.py rename packages/{openstef-models/src/openstef_models/transforms/general => openstef-meta/src/openstef_meta/transforms}/flag_features_bound.py (79%) create mode 100644 packages/openstef-meta/src/openstef_meta/utils/datasets.py create mode 100644 packages/openstef-meta/tests/transforms/__init__.py rename packages/{openstef-models/tests/unit/transforms/general => openstef-meta/tests/transforms}/test_flag_features_bound.py (89%) diff --git a/packages/openstef-meta/pyproject.toml b/packages/openstef-meta/pyproject.toml index a91b25359..838ef8ee9 100644 --- a/packages/openstef-meta/pyproject.toml +++ b/packages/openstef-meta/pyproject.toml @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + [project] name = "openstef-meta" version = "0.1.0" diff --git a/packages/openstef-meta/src/openstef_meta/examples/__init__.py b/packages/openstef-meta/src/openstef_meta/examples/__init__.py new file mode 100644 index 000000000..765b7c107 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/examples/__init__.py @@ -0,0 +1,5 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Examples for OpenSTEF Meta.""" diff --git a/packages/openstef-meta/src/openstef_meta/examples/liander_2024_residual.py b/packages/openstef-meta/src/openstef_meta/examples/liander_2024_residual.py index 3448697ac..a8a42b113 100644 --- a/packages/openstef-meta/src/openstef_meta/examples/liander_2024_residual.py +++ b/packages/openstef-meta/src/openstef_meta/examples/liander_2024_residual.py @@ -42,6 +42,8 @@ logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s") +logger = logging.getLogger(__name__) + OUTPUT_PATH = Path("./benchmark_results") N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark @@ -139,6 +141,7 @@ def _create_workflow() -> CustomForecastingWorkflow: if __name__ == "__main__": start_time = time.time() + create_liander2024_benchmark_runner( storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH / model), data_dir=Path("../data/liander2024-energy-forecasting-benchmark"), # adjust path as needed @@ -151,4 +154,5 @@ def _create_workflow() -> CustomForecastingWorkflow: ) end_time = time.time() - print(f"Benchmark completed in {end_time - start_time:.2f} seconds.") + msg = f"Benchmark completed in {end_time - start_time:.2f} seconds." + logger.info(msg) diff --git a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py index c509dc05a..cdf208309 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py @@ -19,7 +19,6 @@ from openstef_core.transforms import TimeSeriesTransform from openstef_core.types import Quantile from openstef_meta.transforms.selector import Selector -from openstef_models.transforms.general import Flagger from openstef_models.utils.feature_selection import FeatureSelection WEATHER_FEATURES = { diff --git a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py index 928ac5651..56eb34681 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py @@ -64,6 +64,18 @@ def _init_base_learners( def config(self) -> ForecasterConfig: return self._config + @property + def feature_importances(self) -> pd.DataFrame: + """Placeholder for feature importances across base learners and final learner.""" + raise NotImplementedError("Feature importances are not implemented for EnsembleForecaster.") + # TODO(#745): Make MetaForecaster explainable + + @property + def model_contributions(self) -> pd.DataFrame: + """Placeholder for model contributions across base learners and final learner.""" + raise NotImplementedError("Model contributions are not implemented for EnsembleForecaster.") + # TODO(#745): Make MetaForecaster explainable + class EnsembleForecaster(MetaForecaster): """Abstract class for Meta forecasters combining multiple base learners and a final learner.""" @@ -116,10 +128,14 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None target_series=data.target_series, ) + sample_weights = None + if data.sample_weight_column in data.data.columns: + sample_weights = data.data.loc[:, data.sample_weight_column] + self._final_learner.fit( base_learner_predictions=quantile_datasets, additional_features=features, - sample_weights=data.data.loc[:, data.sample_weight_column], + sample_weights=sample_weights, ) self._is_fitted = True diff --git a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py index bf0150644..df26edb12 100644 --- a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py @@ -44,8 +44,6 @@ GBLinearHyperParams, ) from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams -from openstef_models.models.forecasting.xgboost_forecaster import XGBoostHyperParams -from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearHyperParams logger = logging.getLogger(__name__) @@ -93,6 +91,12 @@ def fit( additional_features: ForecastInputDataset | None, sample_weights: pd.Series | None = None, ) -> None: + q0 = self.quantiles[0] + base_learners_map = set(base_learner_predictions[q0].data.columns).difference({ + base_learner_predictions[q0].target_column, + base_learner_predictions[q0].sample_weight_column, + }) + self._label_encoder.fit(list(base_learners_map)) for i, q in enumerate(self.quantiles): base_predictions = base_learner_predictions[q].data.drop( @@ -119,9 +123,6 @@ def fit( logger.warning(msg=msg) self.models[i] = DummyClassifier(strategy="most_frequent") - if i == 0: - # Fit label encoder only once - self._label_encoder.fit(labels) labels = self._label_encoder.transform(labels) # Balance classes, adjust with sample weights diff --git a/packages/openstef-meta/src/openstef_meta/models/residual_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/residual_forecaster.py index 4c0de156b..8905d8f57 100644 --- a/packages/openstef-meta/src/openstef_meta/models/residual_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/residual_forecaster.py @@ -94,7 +94,7 @@ def _init_secondary_model(self, hyperparams: BaseLearnerHyperParams) -> list[Bas list[Forecaster]: List containing the initialized secondary model forecaster. """ models: list[BaseLearner] = [] - + # Different datasets per quantile, so we need a model per quantile for q in self.config.quantiles: config = self._config.model_copy(update={"quantiles": [q]}) secondary_model = self._init_base_learners(config=config, base_hyperparams=[hyperparams])[0] @@ -114,7 +114,7 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None # Fit primary model self._primary_model.fit(data=data, data_val=data_val) - # Reset forecast start date to ensure we predict on the full dataset + # Reset forecast start date to ensure we fit on the full training set full_dataset = ForecastInputDataset( data=data.data, sample_interval=data.sample_interval, diff --git a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py index 52033cbc5..1389c28da 100644 --- a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py @@ -4,6 +4,7 @@ """Rules-based Meta Forecaster Module.""" import logging +from collections.abc import Sequence from typing import override import pandas as pd @@ -22,7 +23,7 @@ from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) -from openstef_meta.utils.decision_tree import Decision, DecisionTree, Rule +from openstef_meta.utils.decision_tree import Decision, DecisionTree from openstef_models.models.forecasting.forecaster import ( ForecasterConfig, ) @@ -38,7 +39,7 @@ class RulesLearnerHyperParams(FinalLearnerHyperParams): """HyperParams for Stacking Final Learner.""" - feature_adders: list[TimeSeriesTransform] = Field( + feature_adders: Sequence[TimeSeriesTransform] = Field( default=[], description="Additional features to add to the final learner.", ) @@ -64,7 +65,6 @@ def __init__(self, quantiles: list[Quantile], hyperparams: RulesLearnerHyperPara Args: quantiles: List of quantiles to predict. hyperparams: Hyperparameters for the final learner. - horizon: Forecast horizon for which to create the final learner. """ super().__init__(quantiles=quantiles, hyperparams=hyperparams) diff --git a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py index 2af0ff395..045fe2988 100644 --- a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py @@ -10,6 +10,7 @@ """ import logging +from collections.abc import Sequence from typing import override import pandas as pd @@ -45,14 +46,14 @@ class StackingFinalLearnerHyperParams(FinalLearnerHyperParams): """HyperParams for Stacking Final Learner.""" - feature_adders: list[TimeSeriesTransform] = Field( + feature_adders: Sequence[TimeSeriesTransform] = Field( default=[], description="Additional features to add to the base learner predictions before fitting the final learner.", ) forecaster_hyperparams: BaseLearnerHyperParams = Field( default=GBLinearHyperParams(), - description="", + description="Forecaster hyperparameters for the final learner. Defaults to GBLinearHyperParams.", ) @@ -115,6 +116,7 @@ def fit( self, base_learner_predictions: dict[Quantile, ForecastInputDataset], additional_features: ForecastInputDataset | None, + sample_weights: pd.Series | None = None, ) -> None: for i, q in enumerate(self.quantiles): @@ -179,17 +181,6 @@ class StackingHyperParams(HyperParams): description="Hyperparameters for the final learner.", ) - use_classifier: bool = Field( - default=True, - description="Whether to use sample weights when fitting base and final learners. Defaults to False.", - ) - - add_rolling_accuracy_features: bool = Field( - default=False, - description="Whether to add rolling accuracy features from base learners as additional features " - "to the final learner. Defaults to False.", - ) - @field_validator("base_hyperparams", mode="after") @classmethod def _check_classes(cls, v: list[BaseLearnerHyperParams]) -> list[BaseLearnerHyperParams]: diff --git a/packages/openstef-meta/src/openstef_meta/transforms/__init__.py b/packages/openstef-meta/src/openstef_meta/transforms/__init__.py new file mode 100644 index 000000000..e551ace37 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/transforms/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Module for OpenSTEF Meta Transforms.""" + +from openstef_meta.transforms.flag_features_bound import Flagger + +__all__ = [ + "Flagger", +] diff --git a/packages/openstef-models/src/openstef_models/transforms/general/flag_features_bound.py b/packages/openstef-meta/src/openstef_meta/transforms/flag_features_bound.py similarity index 79% rename from packages/openstef-models/src/openstef_models/transforms/general/flag_features_bound.py rename to packages/openstef-meta/src/openstef_meta/transforms/flag_features_bound.py index 6f4a4df3a..0d5fcd379 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/flag_features_bound.py +++ b/packages/openstef-meta/src/openstef_meta/transforms/flag_features_bound.py @@ -9,7 +9,7 @@ during inference and improving model robustness. """ -from typing import Literal, override +from typing import override import pandas as pd from pydantic import Field, PrivateAttr @@ -21,46 +21,39 @@ from openstef_models.utils.feature_selection import FeatureSelection - - - class Flagger(BaseConfig, TimeSeriesTransform): """Transform that flags specified features to their observed min and max values. - This transform flags the peaks for the metalearner to know when to expect outliers and + This transform flags the peaks for the metalearner to know when to expect outliers and extrapolate from its training set. - + Example: >>> import pandas as pd >>> from datetime import timedelta >>> from openstef_core.datasets import TimeSeriesDataset - >>> from openstef_models.transforms.general import Clipper - >>> + >>> from openstef_meta.transforms import Flagger + >>> from openstef_models.utils.feature_selection import FeatureSelection >>> # Create sample training dataset >>> training_data = pd.DataFrame({ - ... 'load': [100, 120, 110, 130, 125], - ... 'temperature': [20, 22, 21, 23, 24] - ... }, index=pd.date_range('2025-01-01', periods=5, freq='1h')) + ... 'load': [100, 90, 110], + ... 'temperature': [19, 20, 21] + ... }, index=pd.date_range('2025-01-01', periods=3, freq='1h')) >>> training_dataset = TimeSeriesDataset(training_data, timedelta(hours=1)) >>> test_data = pd.DataFrame({ - ... 'load': [90, 140, 115], - ... 'temperature': [19, 25, 22] + ... 'load': [90, 140, 100], + ... 'temperature': [18, 20, 22] ... }, index=pd.date_range('2025-01-06', periods=3, ... freq='1h')) >>> test_dataset = TimeSeriesDataset(test_data, timedelta(hours=1)) >>> # Initialize and apply transform - >>> clipper = Clipper(selection=FeatureSelection(include=['load', 'temperature']), mode='minmax') - >>> clipper.fit(training_dataset) - >>> transformed_dataset = clipper.transform(test_dataset) - >>> clipper._feature_mins.to_dict() - {'load': 100, 'temperature': 20} - >>> clipper._feature_maxs.to_dict() - {'load': 130, 'temperature': 24} + >>> flagger = Flagger(selection=FeatureSelection(include=['load', 'temperature'])) + >>> flagger.fit(training_dataset) + >>> transformed_dataset = flagger.transform(test_dataset) >>> transformed_dataset.data['load'].tolist() - [100, 130, 115] + [0, 0, 1] >>> transformed_dataset.data['temperature'].tolist() - [20, 24, 22] + [0, 1, 0] """ @@ -69,6 +62,7 @@ class Flagger(BaseConfig, TimeSeriesTransform): _feature_mins: pd.Series = PrivateAttr(default_factory=pd.Series) _feature_maxs: pd.Series = PrivateAttr(default_factory=pd.Series) _is_fitted: bool = PrivateAttr(default=False) + @property @override def is_fitted(self) -> bool: @@ -95,8 +89,6 @@ def transform(self, data: TimeSeriesDataset) -> TimeSeriesDataset: outside = (transformed_data[features] <= min_aligned) | (transformed_data[features] >= max_aligned) transformed_data = (~outside).astype(int) - - return TimeSeriesDataset(data=transformed_data, sample_interval=data.sample_interval) diff --git a/packages/openstef-meta/src/openstef_meta/utils/__init__.py b/packages/openstef-meta/src/openstef_meta/utils/__init__.py index 8b8144daa..a6b9e93a4 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/utils/__init__.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """Utility functions and classes for OpenSTEF Meta.""" from .decision_tree import Decision, DecisionTree, Rule diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py b/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py index c5d49852a..8e3940dfa 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py +++ b/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py @@ -1,3 +1,8 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""A simple decision tree implementation for making decisions based on feature rules.""" + from typing import Literal import pandas as pd diff --git a/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py b/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py index b11d4c7b8..5fe1166d0 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py +++ b/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """Utility functions for calculating pinball loss errors. This module provides a function to compute the pinball loss for quantile regression. diff --git a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py index af2504a55..53d08e89f 100644 --- a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py +++ b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py @@ -135,7 +135,7 @@ def test_learned_weights_forecaster_with_sample_weights( forecaster_without_weights.fit(data_without_weights) # Predict using data without sample_weight column (since that's used for training, not prediction) - result_with_weights = forecaster_with_weights.predict(data_without_weights) + result_with_weights = forecaster_with_weights.predict(sample_dataset_with_weights) result_without_weights = forecaster_without_weights.predict(data_without_weights) # Assert @@ -157,7 +157,7 @@ def test_learned_weights_forecaster_with_additional_features( # Arrange # Add a simple feature adder that adds a constant feature - base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) + base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) # type: ignore forecaster = LearnedWeightsForecaster(config=base_config) # Act diff --git a/packages/openstef-meta/tests/transforms/__init__.py b/packages/openstef-meta/tests/transforms/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-models/tests/unit/transforms/general/test_flag_features_bound.py b/packages/openstef-meta/tests/transforms/test_flag_features_bound.py similarity index 89% rename from packages/openstef-models/tests/unit/transforms/general/test_flag_features_bound.py rename to packages/openstef-meta/tests/transforms/test_flag_features_bound.py index 62f31ef97..dc0d3ea80 100644 --- a/packages/openstef-models/tests/unit/transforms/general/test_flag_features_bound.py +++ b/packages/openstef-meta/tests/transforms/test_flag_features_bound.py @@ -8,8 +8,7 @@ import pytest from openstef_core.datasets import TimeSeriesDataset -from openstef_core.exceptions import NotFittedError -from openstef_models.transforms.general import Flagger +from openstef_meta.transforms import Flagger from openstef_models.utils.feature_selection import FeatureSelection @@ -37,11 +36,10 @@ def test_dataset() -> TimeSeriesDataset: ) - def test_flagger__fit_transform( train_dataset: TimeSeriesDataset, test_dataset: TimeSeriesDataset, - ): +): """Test fit and transform flags correctly leaves other columns unchanged.""" # Arrange flagger = Flagger(selection=FeatureSelection(include={"A", "B", "C"})) @@ -54,9 +52,9 @@ def test_flagger__fit_transform( # Column C should remain unchanged expected_df = pd.DataFrame( { - "A": [1,1], - "B": [0,1], - "C": [0,0], # Unchanged + "A": [1, 1], + "B": [0, 1], + "C": [0, 0], # Unchanged }, index=test_dataset.index, ) diff --git a/packages/openstef-meta/tests/utils/test_decision_tree.py b/packages/openstef-meta/tests/utils/test_decision_tree.py index 11298e9d5..f40bdb220 100644 --- a/packages/openstef-meta/tests/utils/test_decision_tree.py +++ b/packages/openstef-meta/tests/utils/test_decision_tree.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + import pandas as pd import pytest diff --git a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py index 57a5b6187..79e59f58b 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py @@ -17,7 +17,6 @@ from openstef_models.transforms.general.nan_dropper import NaNDropper from openstef_models.transforms.general.sample_weighter import SampleWeighter from openstef_models.transforms.general.scaler import Scaler -from openstef_models.transforms.general.flag_features_bound import Flagger __all__ = [ "Clipper", @@ -27,5 +26,4 @@ "NaNDropper", "SampleWeighter", "Scaler", - "Flagger" ] From 4b3aff81e8110bf95f873536417f7ad02701c3c1 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Fri, 28 Nov 2025 15:09:13 +0100 Subject: [PATCH 042/104] Ensemble Forecast Dataset Signed-off-by: Lars van Someren --- .../openstef_meta/framework/base_learner.py | 9 + .../openstef_meta/framework/final_learner.py | 35 ++- .../framework/meta_forecaster.py | 65 +----- .../models/learned_weights_forecaster.py | 118 +++++----- .../openstef_meta/models/rules_forecaster.py | 17 +- .../models/stacking_forecaster.py | 17 +- .../src/openstef_meta/utils/datasets.py | 213 ++++++++++++++++++ .../src/openstef_meta/utils/pinball_errors.py | 14 +- .../models/test_learned_weights_forecaster.py | 6 +- .../tests/models/test_rules_forecaster.py | 2 +- .../tests/utils/test_datasets.py | 115 ++++++++++ .../models/forecasting/gblinear_forecaster.py | 2 +- 12 files changed, 468 insertions(+), 145 deletions(-) create mode 100644 packages/openstef-meta/tests/utils/test_datasets.py diff --git a/packages/openstef-meta/src/openstef_meta/framework/base_learner.py b/packages/openstef-meta/src/openstef_meta/framework/base_learner.py index 36688b419..67c559468 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/base_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/base_learner.py @@ -8,6 +8,9 @@ while ensuring full compatability with regular Forecasters. """ +from typing import Any, Literal, Self, override + +from openstef_core.base_model import PydanticStringPrimitive from openstef_models.models.forecasting.gblinear_forecaster import ( GBLinearForecaster, GBLinearHyperParams, @@ -24,3 +27,9 @@ BaseLearner = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster BaseLearnerHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams +BaseLearnerNames = Literal[ + "LGBMForecaster", + "LGBMLinearForecaster", + "XGBoostForecaster", + "GBLinearForecaster", +] diff --git a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py index cdf208309..9feee7add 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py @@ -19,6 +19,7 @@ from openstef_core.transforms import TimeSeriesTransform from openstef_core.types import Quantile from openstef_meta.transforms.selector import Selector +from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_models.utils.feature_selection import FeatureSelection WEATHER_FEATURES = { @@ -47,7 +48,7 @@ class FinalLearnerHyperParams(HyperParams): model_config = ConfigDict(arbitrary_types_allowed=True) feature_adders: Sequence[TimeSeriesTransform] = Field( - default=[SELECTOR], + default=[], description="Additional features to add to the base learner predictions before fitting the final learner.", ) @@ -67,15 +68,14 @@ def __init__(self, quantiles: list[Quantile], hyperparams: FinalLearnerHyperPara @abstractmethod def fit( self, - base_learner_predictions: dict[Quantile, ForecastInputDataset], + base_predictions: EnsembleForecastDataset, additional_features: ForecastInputDataset | None, sample_weights: pd.Series | None = None, ) -> None: """Fit the final learner using base learner predictions. Args: - base_learner_predictions: Dictionary mapping Quantiles to ForecastInputDatasets containing base learner - predictions. + base_predictions: EnsembleForecastDataset additional_features: Optional ForecastInputDataset containing additional features for the final learner. sample_weights: Optional series of sample weights for fitting. """ @@ -83,14 +83,13 @@ def fit( def predict( self, - base_learner_predictions: dict[Quantile, ForecastInputDataset], + base_predictions: EnsembleForecastDataset, additional_features: ForecastInputDataset | None, ) -> ForecastDataset: """Generate final predictions based on base learner predictions. Args: - base_learner_predictions: Dictionary mapping Quantiles to ForecastInputDatasets containing base learner - predictions. + base_predictions: EnsembleForecastDataset containing base learner predictions. additional_features: Optional ForecastInputDataset containing additional features for the final learner. Returns: @@ -116,6 +115,28 @@ def calculate_features(self, data: ForecastInputDataset) -> ForecastInputDataset forecast_start=data.forecast_start, ) + @staticmethod + def _prepare_input_data( + dataset: ForecastInputDataset, additional_features: ForecastInputDataset | None + ) -> pd.DataFrame: + """Prepare input data by combining base predictions with additional features if provided. + + Args: + dataset: ForecastInputDataset containing base predictions. + additional_features: Optional ForecastInputDataset containing additional features. + + Returns: + pd.DataFrame: Combined DataFrame of base predictions and additional features if provided. + """ + df = dataset.input_data(start=dataset.index[0]) + if additional_features is not None: + df_a = additional_features.input_data(start=dataset.index[0]) + df = pd.concat( + [df, df_a], + axis=1, + ) + return df + @property @abstractmethod def is_fitted(self) -> bool: diff --git a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py index 56eb34681..9d998ec9e 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py @@ -9,7 +9,7 @@ """ import logging -from typing import override +from typing import cast, override import pandas as pd @@ -17,12 +17,13 @@ from openstef_core.exceptions import ( NotFittedError, ) -from openstef_core.types import Quantile from openstef_meta.framework.base_learner import ( BaseLearner, BaseLearnerHyperParams, + BaseLearnerNames, ) from openstef_meta.framework.final_learner import FinalLearner +from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_models.models.forecasting.forecaster import ( Forecaster, ForecasterConfig, @@ -122,25 +123,19 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None else: features = None - quantile_datasets = self._prepare_input_final_learner( - base_predictions=base_predictions, - quantiles=self._config.quantiles, - target_series=data.target_series, - ) - sample_weights = None if data.sample_weight_column in data.data.columns: sample_weights = data.data.loc[:, data.sample_weight_column] self._final_learner.fit( - base_learner_predictions=quantile_datasets, + base_predictions=base_predictions, additional_features=features, sample_weights=sample_weights, ) self._is_fitted = True - def _predict_base_learners(self, data: ForecastInputDataset) -> dict[type[BaseLearner], ForecastDataset]: + def _predict_base_learners(self, data: ForecastInputDataset) -> EnsembleForecastDataset: """Generate predictions from base learners. Args: @@ -149,47 +144,13 @@ def _predict_base_learners(self, data: ForecastInputDataset) -> dict[type[BaseLe Returns: DataFrame containing base learner predictions. """ - base_predictions: dict[type[BaseLearner], ForecastDataset] = {} + base_predictions: dict[BaseLearnerNames, ForecastDataset] = {} for learner in self._base_learners: preds = learner.predict(data=data) - base_predictions[learner.__class__] = preds + name = cast(BaseLearnerNames, learner.__class__.__name__) + base_predictions[name] = preds - return base_predictions - - @staticmethod - def _prepare_input_final_learner( - quantiles: list[Quantile], - base_predictions: dict[type[BaseLearner], ForecastDataset], - target_series: pd.Series, - ) -> dict[Quantile, ForecastInputDataset]: - """Prepare input data for the final learner based on base learner predictions. - - Args: - quantiles: List of quantiles to prepare data for. - base_predictions: Predictions from base learners. - target_series: Actual target series for reference. - - Returns: - dictionary mapping Quantiles to ForecastInputDatasets. - """ - predictions_quantiles: dict[Quantile, ForecastInputDataset] = {} - sample_interval = base_predictions[next(iter(base_predictions))].sample_interval - target_name = str(target_series.name) - - for q in quantiles: - df = pd.DataFrame({ - learner.__name__: preds.data[Quantile(q).format()] for learner, preds in base_predictions.items() - }) - df[target_name] = target_series - - predictions_quantiles[q] = ForecastInputDataset( - data=df, - sample_interval=sample_interval, - target_column=target_name, - forecast_start=df.index[0], - ) - - return predictions_quantiles + return EnsembleForecastDataset.from_forecast_datasets(base_predictions, target_series=data.target_series) @override def predict(self, data: ForecastInputDataset) -> ForecastDataset: @@ -205,18 +166,12 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: base_predictions = self._predict_base_learners(data=full_dataset) - final_learner_input = self._prepare_input_final_learner( - quantiles=self._config.quantiles, base_predictions=base_predictions, target_series=data.target_series - ) - if self._final_learner.has_features: additional_features = self._final_learner.calculate_features(data=data) else: additional_features = None - return self._final_learner.predict( - base_learner_predictions=final_learner_input, additional_features=additional_features - ) + return self._final_learner.predict(base_predictions=base_predictions, additional_features=additional_features) __all__ = [ diff --git a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py index df26edb12..15e667deb 100644 --- a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py @@ -32,11 +32,10 @@ BaseLearner, BaseLearnerHyperParams, ) -from openstef_meta.framework.final_learner import FinalLearner, FinalLearnerHyperParams +from openstef_meta.framework.final_learner import EnsembleForecastDataset, FinalLearner, FinalLearnerHyperParams from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) -from openstef_meta.utils.pinball_errors import calculate_pinball_errors from openstef_models.models.forecasting.forecaster import ( ForecasterConfig, ) @@ -87,42 +86,22 @@ def __init__(self, quantiles: list[Quantile], hyperparams: LWFLHyperParams) -> N @override def fit( self, - base_learner_predictions: dict[Quantile, ForecastInputDataset], + base_predictions: EnsembleForecastDataset, additional_features: ForecastInputDataset | None, sample_weights: pd.Series | None = None, ) -> None: - q0 = self.quantiles[0] - base_learners_map = set(base_learner_predictions[q0].data.columns).difference({ - base_learner_predictions[q0].target_column, - base_learner_predictions[q0].sample_weight_column, - }) - self._label_encoder.fit(list(base_learners_map)) - for i, q in enumerate(self.quantiles): - base_predictions = base_learner_predictions[q].data.drop( - columns=[base_learner_predictions[q].target_column] - ) + self._label_encoder.fit(base_predictions.model_names) - labels = self._prepare_classification_data( - quantile=q, - target=base_learner_predictions[q].target_series, - predictions=base_predictions, + for i, q in enumerate(self.quantiles): + # Data preparation + dataset = base_predictions.select_quantile_classification(quantile=q) + input_data = self._prepare_input_data( + dataset=dataset, + additional_features=additional_features, ) - - if additional_features is not None: - df = pd.concat( - [base_predictions, additional_features.data], - axis=1, - ) - else: - df = base_predictions - - if len(labels.unique()) == 1: - msg = f"""Final learner for quantile {q.format()} has less than 2 classes in the target. - Switching to dummy classifier """ - logger.warning(msg=msg) - self.models[i] = DummyClassifier(strategy="most_frequent") - + labels = dataset.target_series + self._validate_labels(labels=labels, model_index=i) labels = self._label_encoder.transform(labels) # Balance classes, adjust with sample weights @@ -130,48 +109,62 @@ def fit( if sample_weights is not None: weights *= sample_weights - self.models[i].fit(X=df, y=labels, sample_weight=weights) # type: ignore + self.models[i].fit(X=input_data, y=labels, sample_weight=weights) # type: ignore self._is_fitted = True @staticmethod - def _prepare_classification_data(quantile: Quantile, target: pd.Series, predictions: pd.DataFrame) -> pd.Series: - """Selects base learner with lowest error for each sample as target for classification. + def _prepare_input_data( + dataset: ForecastInputDataset, additional_features: ForecastInputDataset | None + ) -> pd.DataFrame: + """Prepare input data by combining base predictions with additional features if provided. + + Args: + dataset: ForecastInputDataset containing base predictions. + additional_features: Optional ForecastInputDataset containing additional features. Returns: - pd.Series: Series indicating the base learner with the lowest pinball loss for each sample. + pd.DataFrame: Combined DataFrame of base predictions and additional features if provided. """ + df = dataset.input_data(start=dataset.index[0]) + if additional_features is not None: + df_a = additional_features.input_data(start=dataset.index[0]) + df = pd.concat( + [df, df_a], + axis=1, + ) + return df - # Calculate pinball loss for each base learner - def column_pinball_losses(preds: pd.Series) -> pd.Series: - return calculate_pinball_errors(y_true=target, y_pred=preds, alpha=quantile) - - pinball_losses = predictions.apply(column_pinball_losses) - - # For each sample, select the base learner with the lowest pinball loss - return pinball_losses.idxmin(axis=1) - - def _calculate_model_weights_quantile(self, base_predictions: pd.DataFrame, quantile: Quantile) -> pd.DataFrame: - model = self.models[self.quantiles.index(quantile)] + def _validate_labels(self, labels: pd.Series, model_index: int) -> None: + if len(labels.unique()) == 1: + msg = f"""Final learner for quantile {self.quantiles[model_index].format()} has less than 2 classes in the target. + Switching to dummy classifier """ + logger.warning(msg=msg) + self.models[model_index] = DummyClassifier(strategy="most_frequent") + def _predict_model_weights_quantile(self, base_predictions: pd.DataFrame, model_index: int) -> pd.DataFrame: + model = self.models[model_index] return model.predict_proba(X=base_predictions) # type: ignore def _generate_predictions_quantile( self, - base_predictions: ForecastInputDataset, + dataset: ForecastInputDataset, additional_features: ForecastInputDataset | None, - quantile: Quantile, + model_index: int, ) -> pd.Series: - base_df = base_predictions.data.drop(columns=[base_predictions.target_column]) - df = pd.concat([base_df, additional_features.data], axis=1) if additional_features is not None else base_df - weights = self._calculate_model_weights_quantile(base_predictions=df, quantile=quantile) + input_data = self._prepare_input_data( + dataset=dataset, + additional_features=additional_features, + ) + + weights = self._predict_model_weights_quantile(base_predictions=input_data, model_index=model_index) - return base_df.mul(weights).sum(axis=1) + return dataset.input_data().mul(weights).sum(axis=1) @override def predict( self, - base_learner_predictions: dict[Quantile, ForecastInputDataset], + base_predictions: EnsembleForecastDataset, additional_features: ForecastInputDataset | None, ) -> ForecastDataset: if not self.is_fitted: @@ -180,14 +173,21 @@ def predict( # Generate predictions predictions = pd.DataFrame({ Quantile(q).format(): self._generate_predictions_quantile( - base_predictions=data, quantile=q, additional_features=additional_features + dataset=base_predictions.select_quantile(quantile=Quantile(q)), + additional_features=additional_features, + model_index=i, ) - for q, data in base_learner_predictions.items() + for i, q in enumerate(self.quantiles) }) + target_series = base_predictions.target_series + if target_series is not None: + predictions[base_predictions.target_column] = target_series return ForecastDataset( data=predictions, - sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, + sample_interval=base_predictions.sample_interval, + target_column=base_predictions.target_column, + forecast_start=base_predictions.forecast_start, ) @property @@ -202,7 +202,7 @@ class LGBMLearnerHyperParams(LWFLHyperParams): """Hyperparameters for Learned Weights Final Learner with LGBM Classifier.""" n_estimators: int = Field( - default=200, + default=20, description="Number of estimators for the LGBM Classifier. Defaults to 20.", ) @@ -246,7 +246,7 @@ class RFLearnerHyperParams(LWFLHyperParams): """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" n_estimators: int = Field( - default=200, + default=20, description="Number of estimators for the LGBM Classifier. Defaults to 20.", ) diff --git a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py index 1389c28da..b4059da58 100644 --- a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py @@ -7,6 +7,7 @@ from collections.abc import Sequence from typing import override +from openstef_meta.utils.datasets import EnsembleForecastDataset import pandas as pd from pydantic import Field, field_validator from pydantic_extra_types.country import CountryAlpha2 @@ -74,7 +75,7 @@ def __init__(self, quantiles: list[Quantile], hyperparams: RulesLearnerHyperPara @override def fit( self, - base_learner_predictions: dict[Quantile, ForecastInputDataset], + base_predictions: EnsembleForecastDataset, additional_features: ForecastInputDataset | None, sample_weights: pd.Series | None = None, ) -> None: @@ -103,28 +104,30 @@ def _predict_tree(self, data: pd.DataFrame, columns: pd.Index) -> pd.DataFrame: @override def predict( self, - base_learner_predictions: dict[Quantile, ForecastInputDataset], + base_predictions: EnsembleForecastDataset, additional_features: ForecastInputDataset | None, ) -> ForecastDataset: if additional_features is None: raise ValueError("Additional features must be provided for RulesFinalLearner prediction.") decisions = self._predict_tree( - additional_features.data, columns=base_learner_predictions[self.quantiles[0]].data.columns + additional_features.data, columns=base_predictions.select_quantile(quantile=self.quantiles[0]).data.columns ) # Generate predictions predictions: list[pd.DataFrame] = [] - for q, data in base_learner_predictions.items(): - preds = data.data * decisions - predictions.append(preds.sum(axis=1).to_frame(name=Quantile(q).format())) + for q in self.quantiles: + dataset = base_predictions.select_quantile(quantile=q) + preds = dataset.input_data().multiply(decisions).sum(axis=1) + + predictions.append(preds.to_frame(name=Quantile(q).format())) # Concatenate predictions along columns to form a DataFrame with quantile columns df = pd.concat(predictions, axis=1) return ForecastDataset( data=df, - sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, + sample_interval=base_predictions.sample_interval, ) @property diff --git a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py index 045fe2988..4b2dfc88f 100644 --- a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py @@ -17,6 +17,7 @@ from pydantic import Field, field_validator from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_core.exceptions import ( NotFittedError, ) @@ -114,26 +115,27 @@ def _combine_datasets( @override def fit( self, - base_learner_predictions: dict[Quantile, ForecastInputDataset], + base_predictions: EnsembleForecastDataset, additional_features: ForecastInputDataset | None, sample_weights: pd.Series | None = None, ) -> None: for i, q in enumerate(self.quantiles): if additional_features is not None: + dataset = base_predictions.select_quantile(quantile=q) data = self._combine_datasets( - data=base_learner_predictions[q], + data=dataset, additional_features=additional_features, ) else: - data = base_learner_predictions[q] + data = base_predictions.select_quantile(quantile=q) self.models[i].fit(data=data, data_val=None) @override def predict( self, - base_learner_predictions: dict[Quantile, ForecastInputDataset], + base_predictions: EnsembleForecastDataset, additional_features: ForecastInputDataset | None, ) -> ForecastDataset: if not self.is_fitted: @@ -144,12 +146,11 @@ def predict( for i, q in enumerate(self.quantiles): if additional_features is not None: data = self._combine_datasets( - data=base_learner_predictions[q], + data=base_predictions.select_quantile(quantile=q), additional_features=additional_features, ) else: - data = base_learner_predictions[q] - + data = base_predictions.select_quantile(quantile=q) p = self.models[i].predict(data=data).data predictions.append(p) @@ -158,7 +159,7 @@ def predict( return ForecastDataset( data=df, - sample_interval=base_learner_predictions[self.quantiles[0]].sample_interval, + sample_interval=base_predictions.sample_interval, ) @property diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index e69de29bb..f42c41716 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -0,0 +1,213 @@ +from datetime import datetime, timedelta +from typing import Self, cast, override + +import pandas as pd + +from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset +from openstef_core.types import Quantile +from openstef_meta.framework.base_learner import BaseLearnerNames +from openstef_meta.utils.pinball_errors import calculate_pinball_errors + +DEFAULT_TARGET_COLUMN = {Quantile(0.5): "load"} + + +class EnsembleForecastDataset(TimeSeriesDataset): + """First stage output format for ensemble forecasters.""" + + forecast_start: datetime + quantiles: list[Quantile] + model_names: list[BaseLearnerNames] + target_column: str + + @override + def __init__( + self, + data: pd.DataFrame, + sample_interval: timedelta = timedelta(minutes=15), + forecast_start: datetime | None = None, + target_column: str = "load", + *, + horizon_column: str = "horizon", + available_at_column: str = "available_at", + ) -> None: + if "forecast_start" in data.attrs: + self.forecast_start = datetime.fromisoformat(data.attrs["forecast_start"]) + else: + self.forecast_start = forecast_start if forecast_start is not None else data.index.min().to_pydatetime() + self.target_column = data.attrs.get("target_column", target_column) + + super().__init__( + data=data, + sample_interval=sample_interval, + horizon_column=horizon_column, + available_at_column=available_at_column, + ) + quantile_feature_names = [col for col in self.feature_names if col != target_column] + + self.model_names, self.quantiles = self.get_learner_and_quantile(pd.Index(quantile_feature_names)) + n_cols = len(self.model_names) * len(self.quantiles) + if len(data.columns) not in {n_cols + 1, n_cols}: + raise ValueError("Data columns do not match the expected number based on base learners and quantiles.") + + @property + def target_series(self) -> pd.Series | None: + """Return the target series if available.""" + if self.target_column in self.data.columns: + return self.data[self.target_column] + return None + + @staticmethod + def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[BaseLearnerNames], list[Quantile]]: + """Extract base learner names and quantiles from feature names. + + Args: + feature_names: Index of feature names in the dataset. + + Returns: + Tuple containing a list of base learner names and a list of quantiles. + + Raises: + ValueError: If an invalid base learner name is found in a feature name. + """ + all_base_learners = BaseLearnerNames.__args__ + + base_learners: set[BaseLearnerNames] = set() + quantiles: set[Quantile] = set() + + for feature_name in feature_names: + learner_part, quantile_part = feature_name.split("_", maxsplit=1) + if learner_part not in all_base_learners or not Quantile.is_valid_quantile_string(quantile_part): + msg = f"Invalid base learner name in feature: {feature_name}" + raise ValueError(msg) + + base_learners.add(cast(BaseLearnerNames, learner_part)) + quantiles.add(Quantile.parse(quantile_part)) + + return list(base_learners), list(quantiles) + + @staticmethod + def get_quantile_feature_name(feature_name: str) -> tuple[BaseLearnerNames, Quantile]: + """Generate the feature name for a given base learner and quantile. + + Args: + feature_name: Feature name string in the format "BaseLearner_Quantile". + + Returns: + Tuple containing the base learner name and Quantile object. + """ + learner_part, quantile_part = feature_name.split("_", maxsplit=1) + return cast(BaseLearnerNames, learner_part), Quantile.parse(quantile_part) + + @classmethod + def from_forecast_datasets( + cls, + datasets: dict[BaseLearnerNames, ForecastDataset], + target_series: pd.Series | None = None, + sample_weights: pd.Series | None = None, + ) -> Self: + """Create an EnsembleForecastDataset from multiple ForecastDatasets. + + Args: + datasets: Dict of ForecastDatasets to combine. + target_series: Optional target series to include in the dataset. + sample_weights: Optional sample weights series to include in the dataset. + + Returns: + EnsembleForecastDataset combining all input datasets. + """ + ds1 = next(iter(datasets.values())) + additional_columns: dict[str, pd.Series] = {} + if isinstance(ds1.target_series, pd.Series): + additional_columns[ds1.target_column] = ds1.target_series + elif target_series is not None: + additional_columns[ds1.target_column] = target_series + + sample_weight_column = "sample_weight" + if sample_weights is not None: + additional_columns[sample_weight_column] = sample_weights + + combined_data = pd.DataFrame({ + f"{learner}_{q.format()}": ds.data[q.format()] for learner, ds in datasets.items() for q in ds.quantiles + }).assign(**additional_columns) + + return cls( + data=combined_data, + sample_interval=ds1.sample_interval, + forecast_start=ds1.forecast_start, + target_column=ds1.target_column, + ) + + @staticmethod + def _prepare_classification(data: pd.DataFrame, target: pd.Series, quantile: Quantile) -> pd.Series: + """Prepare data for classification tasks by converting quantile columns to binary indicators. + + Args: + data: DataFrame containing quantile predictions. + target: Series containing true target values. + quantile: Quantile for which to prepare classification data. + + Returns: + Series with categorical indicators of best-performing base learners. + """ + + # Calculate pinball loss for each base learner + def column_pinball_losses(preds: pd.Series) -> pd.Series: + return calculate_pinball_errors(y_true=target, y_pred=preds, quantile=quantile) + + pinball_losses = data.apply(column_pinball_losses) + + return pinball_losses.idxmin(axis=1) + + def select_quantile_classification(self, quantile: Quantile) -> ForecastInputDataset: + """Select classification target for a specific quantile. + + Args: + quantile: Quantile to select. + + Returns: + Series containing binary indicators of best-performing base learners for the specified quantile. + + Raises: + ValueError: If the target column is not found in the dataset. + """ + if self.target_column not in self.data.columns: + msg = f"Target column '{self.target_column}' not found in dataset." + raise ValueError(msg) + + selected_columns = [f"{learner}_{quantile.format()}" for learner in self.model_names] + prediction_data = self.data[selected_columns].copy() + prediction_data.columns = self.model_names + + target = self._prepare_classification( + data=prediction_data, + target=self.data[self.target_column], + quantile=quantile, + ) + prediction_data[self.target_column] = target + return ForecastInputDataset( + data=prediction_data, + sample_interval=self.sample_interval, + target_column=self.target_column, + forecast_start=self.forecast_start, + ) + + def select_quantile(self, quantile: Quantile) -> ForecastInputDataset: + """Select data for a specific quantile. + + Args: + quantile: Quantile to select. + + Returns: + ForecastInputDataset containing base predictions for the specified quantile. + """ + selected_columns = [f"{learner}_{quantile.format()}" for learner in self.model_names] + selected_columns.append(self.target_column) + prediction_data = self.data[selected_columns].copy() + prediction_data.columns = [*self.model_names, self.target_column] + + return ForecastInputDataset( + data=prediction_data, + sample_interval=self.sample_interval, + target_column=self.target_column, + forecast_start=self.forecast_start, + ) diff --git a/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py b/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py index 5fe1166d0..85f29fd9c 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py +++ b/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py @@ -7,10 +7,11 @@ This module provides a function to compute the pinball loss for quantile regression. """ +import numpy as np import pandas as pd -def calculate_pinball_errors(y_true: pd.Series, y_pred: pd.Series, alpha: float) -> pd.Series: +def calculate_pinball_errors(y_true: pd.Series, y_pred: pd.Series, quantile: float) -> pd.Series: """Calculate pinball loss for given true and predicted values. Args: @@ -21,6 +22,11 @@ def calculate_pinball_errors(y_true: pd.Series, y_pred: pd.Series, alpha: float) Returns: A pandas Series containing the pinball loss for each sample. """ - diff = y_true - y_pred - sign = (diff >= 0).astype(float) - return alpha * sign * diff - (1 - alpha) * (1 - sign) * diff + errors = y_true - y_pred + pinball_loss = np.where( + errors >= 0, + quantile * errors, # Under-prediction + (quantile - 1) * errors, # Over-prediction + ) + + return pd.Series(pinball_loss, index=y_true.index) diff --git a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py index 53d08e89f..5ca7b3003 100644 --- a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py +++ b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py @@ -91,9 +91,9 @@ def test_learned_weights_forecaster_fit_predict( assert forecaster.is_fitted, "Model should be fitted after calling fit()" # Check that necessary quantiles are present - expected_columns = [q.format() for q in expected_quantiles] - assert list(result.data.columns) == expected_columns, ( - f"Expected columns {expected_columns}, got {list(result.data.columns)}" + required_columns = [q.format() for q in expected_quantiles] + assert all(col in result.data.columns for col in required_columns), ( + f"Expected columns {required_columns}, got {list(result.data.columns)}" ) # Forecast data quality diff --git a/packages/openstef-meta/tests/models/test_rules_forecaster.py b/packages/openstef-meta/tests/models/test_rules_forecaster.py index 434f0c6c2..0dfaba4e5 100644 --- a/packages/openstef-meta/tests/models/test_rules_forecaster.py +++ b/packages/openstef-meta/tests/models/test_rules_forecaster.py @@ -112,7 +112,7 @@ def test_rules_forecaster_with_additional_features( ): """Test that forecaster works with additional features for the final learner.""" - base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) + base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) # type: ignore # Arrange expected_quantiles = base_config.quantiles diff --git a/packages/openstef-meta/tests/utils/test_datasets.py b/packages/openstef-meta/tests/utils/test_datasets.py new file mode 100644 index 000000000..43b6cf319 --- /dev/null +++ b/packages/openstef-meta/tests/utils/test_datasets.py @@ -0,0 +1,115 @@ +from collections.abc import Callable +from datetime import timedelta + +import numpy as np +import pandas as pd +import pytest + +from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset +from openstef_core.types import Quantile +from openstef_meta.framework.base_learner import BaseLearnerNames +from openstef_meta.utils.datasets import EnsembleForecastDataset + + +@pytest.fixture +def simple_dataset() -> TimeSeriesDataset: + return TimeSeriesDataset( + data=pd.DataFrame( + data={ + "available_at": pd.to_datetime([ + "2023-01-01T09:50:00", # lead time = 10:00 - 09:50 = +10min + "2023-01-01T10:55:00", # lead time = 11:00 - 10:55 = +5min + "2023-01-01T12:10:00", # lead time = 12:00 - 12:10 = -10min + "2023-01-01T13:20:00", # lead time = 13:00 - 13:20 = -20min + "2023-01-01T14:15:00", # lead time = 14:00 - 14:15 = -15min + "2023-01-01T14:30:00", # lead time = 14:00 - 14:30 = -30min + ]), + "value1": [10, 20, 30, 40, 50, 55], # 55 should override 50 for 14:00 + }, + index=pd.to_datetime([ + "2023-01-01T10:00:00", + "2023-01-01T11:00:00", + "2023-01-01T12:00:00", + "2023-01-01T13:00:00", + # Duplicate timestamp with different availability + "2023-01-01T14:00:00", + "2023-01-01T14:00:00", + ]), + ), + sample_interval=timedelta(hours=1), + ) + + +@pytest.fixture +def forecast_dataset_factory() -> Callable[[], ForecastDataset]: + def _make() -> ForecastDataset: + rng = np.random.default_rng() + df = pd.DataFrame( + data={ + "quantile_P10": [90, 180, 270], + "quantile_P50": [100, 200, 300], + "quantile_P90": [110, 220, 330], + "load": [100, 200, 300], + }, + index=pd.to_datetime([ + "2023-01-01T10:00:00", + "2023-01-01T11:00:00", + "2023-01-01T12:00:00", + ]), + ) + df += rng.normal(0, 1, df.shape) # Add slight noise to avoid perfect predictions + + df["available_at"] = pd.to_datetime([ + "2023-01-01T09:50:00", + "2023-01-01T10:55:00", + "2023-01-01T12:10:00", + ]) + + return ForecastDataset( + data=df, + sample_interval=timedelta(hours=1), + target_column="load", + ) + + return _make + + +@pytest.fixture +def base_learner_output( + forecast_dataset_factory: Callable[[], ForecastDataset], +) -> dict[BaseLearnerNames, ForecastDataset]: + + return { + "GBLinearForecaster": forecast_dataset_factory(), + "LGBMForecaster": forecast_dataset_factory(), + } + + +@pytest.fixture +def ensemble_dataset(base_learner_output: dict[BaseLearnerNames, ForecastDataset]) -> EnsembleForecastDataset: + return EnsembleForecastDataset.from_forecast_datasets(base_learner_output) + + +def test_from_ensemble_output(ensemble_dataset: EnsembleForecastDataset): + + assert isinstance(ensemble_dataset, EnsembleForecastDataset) + assert ensemble_dataset.data.shape == (3, 7) # 3 timestamps, 2 learners * 3 quantiles + target + assert set(ensemble_dataset.model_names) == {"GBLinearForecaster", "LGBMForecaster"} + assert set(ensemble_dataset.quantiles) == {Quantile(0.1), Quantile(0.5), Quantile(0.9)} + + +def test_select_quantile(ensemble_dataset: EnsembleForecastDataset): + + dataset = ensemble_dataset.select_quantile(Quantile(0.5)) + + assert isinstance(dataset, ForecastInputDataset) + assert dataset.data.shape == (3, 3) # 3 timestamps, 2 learners * 1 quantiles + target + + +def test_select_quantile_classification(ensemble_dataset: EnsembleForecastDataset): + + dataset = ensemble_dataset.select_quantile_classification(Quantile(0.5)) + + assert isinstance(dataset, ForecastInputDataset) + assert dataset.data.shape == (3, 3) # 3 timestamps, 2 learners * 1 quantiles + target + assert all(dataset.target_series.apply(lambda x: x in BaseLearnerNames.__args__)) # type: ignore diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py index 92c3981a3..4fccf2825 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py @@ -272,7 +272,7 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None raise InputValidationError("The input data is empty after dropping NaN values.") # Fit the scalers - self._target_scaler.fit(data.target_series.to_frame()) + self._target_scaler.fit(data.target_series.to_frame().to_numpy()) # Prepare training data input_data, target, sample_weight = self._prepare_fit_input(data) From 719ea5cb547e95c54b3010b1cebdcdda4b0c623f Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Fri, 28 Nov 2025 15:14:28 +0100 Subject: [PATCH 043/104] Make PR compliant Signed-off-by: Lars van Someren --- .../src/openstef_meta/framework/base_learner.py | 3 +-- .../openstef_meta/models/learned_weights_forecaster.py | 3 ++- .../src/openstef_meta/models/rules_forecaster.py | 2 +- .../src/openstef_meta/models/stacking_forecaster.py | 2 +- .../openstef-meta/src/openstef_meta/utils/datasets.py | 10 ++++++++++ .../src/openstef_meta/utils/pinball_errors.py | 2 +- packages/openstef-meta/tests/utils/test_datasets.py | 4 ++++ 7 files changed, 20 insertions(+), 6 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/framework/base_learner.py b/packages/openstef-meta/src/openstef_meta/framework/base_learner.py index 67c559468..96cda27f5 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/base_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/base_learner.py @@ -8,9 +8,8 @@ while ensuring full compatability with regular Forecasters. """ -from typing import Any, Literal, Self, override +from typing import Literal -from openstef_core.base_model import PydanticStringPrimitive from openstef_models.models.forecasting.gblinear_forecaster import ( GBLinearForecaster, GBLinearHyperParams, diff --git a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py index 15e667deb..4097296d7 100644 --- a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py @@ -136,7 +136,8 @@ def _prepare_input_data( def _validate_labels(self, labels: pd.Series, model_index: int) -> None: if len(labels.unique()) == 1: - msg = f"""Final learner for quantile {self.quantiles[model_index].format()} has less than 2 classes in the target. + msg = f"""Final learner for quantile {self.quantiles[model_index].format()} has + less than 2 classes in the target. Switching to dummy classifier """ logger.warning(msg=msg) self.models[model_index] = DummyClassifier(strategy="most_frequent") diff --git a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py index b4059da58..658b08957 100644 --- a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py @@ -7,7 +7,6 @@ from collections.abc import Sequence from typing import override -from openstef_meta.utils.datasets import EnsembleForecastDataset import pandas as pd from pydantic import Field, field_validator from pydantic_extra_types.country import CountryAlpha2 @@ -24,6 +23,7 @@ from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) +from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_meta.utils.decision_tree import Decision, DecisionTree from openstef_models.models.forecasting.forecaster import ( ForecasterConfig, diff --git a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py index 4b2dfc88f..2afa2dfc0 100644 --- a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py @@ -17,7 +17,6 @@ from pydantic import Field, field_validator from openstef_core.datasets import ForecastDataset, ForecastInputDataset -from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_core.exceptions import ( NotFittedError, ) @@ -32,6 +31,7 @@ from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) +from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_models.models.forecasting.forecaster import ( Forecaster, ForecasterConfig, diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index f42c41716..41186152d 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -1,3 +1,13 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Ensemble Forecast Dataset. + +Validated dataset for ensemble forecasters first stage output. +Implements methods to select quantile-specific ForecastInputDatasets for final learners. +Also supports constructing classifation targets based on pinball loss. +""" + from datetime import datetime, timedelta from typing import Self, cast, override diff --git a/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py b/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py index 85f29fd9c..08e1c7704 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py +++ b/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py @@ -17,7 +17,7 @@ def calculate_pinball_errors(y_true: pd.Series, y_pred: pd.Series, quantile: flo Args: y_true: True values as a pandas Series. y_pred: Predicted values as a pandas Series. - alpha: Quantile value. + quantile: Quantile value. Returns: A pandas Series containing the pinball loss for each sample. diff --git a/packages/openstef-meta/tests/utils/test_datasets.py b/packages/openstef-meta/tests/utils/test_datasets.py index 43b6cf319..045aecd13 100644 --- a/packages/openstef-meta/tests/utils/test_datasets.py +++ b/packages/openstef-meta/tests/utils/test_datasets.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + from collections.abc import Callable from datetime import timedelta From e3a587cf10ff9aaf063c22fa8c89735ee2d2a138 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Fri, 28 Nov 2025 15:43:45 +0100 Subject: [PATCH 044/104] fixed toml Signed-off-by: Lars van Someren --- packages/openstef-meta/pyproject.toml | 32 +++++++++++++++++++++------ uv.lock | 6 ++--- 2 files changed, 28 insertions(+), 10 deletions(-) diff --git a/packages/openstef-meta/pyproject.toml b/packages/openstef-meta/pyproject.toml index 838ef8ee9..e013b1b94 100644 --- a/packages/openstef-meta/pyproject.toml +++ b/packages/openstef-meta/pyproject.toml @@ -4,16 +4,34 @@ [project] name = "openstef-meta" -version = "0.1.0" -description = "Add your description here" +version = "0.0.0" +description = "Meta models for OpenSTEF" readme = "README.md" -requires-python = ">=3.12" -dependencies = ["openstef-core", "openstef-models"] +keywords = ["energy", "forecasting", "machinelearning"] +license = "MPL-2.0" +authors = [ + { name = "Alliander N.V", email = "short.term.energy.forecasts@alliander.com" }, +] +requires-python = ">=3.12,<4.0" +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", +] -[tool.uv.sources] -openstef-models = { workspace = true } -openstef-core = { workspace = true } +dependencies = [ + "openstef-beam>=4.0.0.dev0,<5", + "openstef-core>=4.0.0.dev0,<5", + "openstef-models>=4.0.0.dev0,<5", +] +urls.Documentation = "https://openstef.github.io/openstef/index.html" +urls.Homepage = "https://lfenergy.org/projects/openstef/" +urls.Issues = "https://github.com/OpenSTEF/openstef/issues" +urls.Repository = "https://github.com/OpenSTEF/openstef" [tool.hatch.build.targets.wheel] packages = ["src/openstef_meta"] diff --git a/uv.lock b/uv.lock index 4e2691b89..5fc862810 100644 --- a/uv.lock +++ b/uv.lock @@ -2119,8 +2119,6 @@ all = [ { name = "openstef-beam", extra = ["all"] }, { name = "openstef-core" }, { name = "openstef-models", extra = ["xgb-cpu"] }, - { name = "openstef-meta" }, - ] beam = [ { name = "huggingface-hub" }, @@ -2283,15 +2281,17 @@ requires-dist = [ [[package]] name = "openstef-meta" -version = "0.1.0" +version = "0.0.0" source = { editable = "packages/openstef-meta" } dependencies = [ + { name = "openstef-beam" }, { name = "openstef-core" }, { name = "openstef-models" }, ] [package.metadata] requires-dist = [ + { name = "openstef-beam", editable = "packages/openstef-beam" }, { name = "openstef-core", editable = "packages/openstef-core" }, { name = "openstef-models", editable = "packages/openstef-models" }, ] From 308d7c8db6f825a81aab8d1aa6c5e462c59766a0 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Fri, 28 Nov 2025 15:47:43 +0100 Subject: [PATCH 045/104] Really fixed the TOML Signed-off-by: Lars van Someren --- packages/openstef-meta/pyproject.toml | 24 ++++++++++++------------ packages/openstef-models/pyproject.toml | 8 ++++---- pyproject.toml | 2 +- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/packages/openstef-meta/pyproject.toml b/packages/openstef-meta/pyproject.toml index e013b1b94..0f620e63b 100644 --- a/packages/openstef-meta/pyproject.toml +++ b/packages/openstef-meta/pyproject.toml @@ -7,25 +7,25 @@ name = "openstef-meta" version = "0.0.0" description = "Meta models for OpenSTEF" readme = "README.md" -keywords = ["energy", "forecasting", "machinelearning"] +keywords = [ "energy", "forecasting", "machinelearning" ] license = "MPL-2.0" authors = [ - { name = "Alliander N.V", email = "short.term.energy.forecasts@alliander.com" }, + { name = "Alliander N.V", email = "short.term.energy.forecasts@alliander.com" }, ] requires-python = ">=3.12,<4.0" classifiers = [ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.12", - "Programming Language :: Python :: 3.13", - "Programming Language :: Python :: 3.14", + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", ] dependencies = [ - "openstef-beam>=4.0.0.dev0,<5", - "openstef-core>=4.0.0.dev0,<5", - "openstef-models>=4.0.0.dev0,<5", + "openstef-beam>=4.0.0.dev0,<5", + "openstef-core>=4.0.0.dev0,<5", + "openstef-models>=4.0.0.dev0,<5", ] urls.Documentation = "https://openstef.github.io/openstef/index.html" @@ -34,4 +34,4 @@ urls.Issues = "https://github.com/OpenSTEF/openstef/issues" urls.Repository = "https://github.com/OpenSTEF/openstef" [tool.hatch.build.targets.wheel] -packages = ["src/openstef_meta"] +packages = [ "src/openstef_meta" ] diff --git a/packages/openstef-models/pyproject.toml b/packages/openstef-models/pyproject.toml index c12b3dacc..2b6f727bf 100644 --- a/packages/openstef-models/pyproject.toml +++ b/packages/openstef-models/pyproject.toml @@ -5,14 +5,14 @@ [build-system] build-backend = "hatchling.build" -requires = ["hatchling"] +requires = [ "hatchling" ] [project] name = "openstef-models" version = "0.0.0" description = "Core models for OpenSTEF" readme = "README.md" -keywords = ["energy", "forecasting", "machinelearning"] +keywords = [ "energy", "forecasting", "machinelearning" ] license = "MPL-2.0" authors = [ { name = "Alliander N.V", email = "short.term.energy.forecasts@alliander.com" }, @@ -45,7 +45,7 @@ optional-dependencies.xgb-cpu = [ "xgboost-cpu>=3,<4; sys_platform=='linux' or sys_platform=='win32'", ] -optional-dependencies.xgb-gpu = ["xgboost>=3,<4"] +optional-dependencies.xgb-gpu = [ "xgboost>=3,<4" ] urls.Documentation = "https://openstef.github.io/openstef/index.html" urls.Homepage = "https://lfenergy.org/projects/openstef/" @@ -53,4 +53,4 @@ urls.Issues = "https://github.com/OpenSTEF/openstef/issues" urls.Repository = "https://github.com/OpenSTEF/openstef" [tool.hatch.build.targets.wheel] -packages = ["src/openstef_models"] +packages = [ "src/openstef_models" ] diff --git a/pyproject.toml b/pyproject.toml index 17df056fa..66d397e87 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,8 +41,8 @@ optional-dependencies.beam = [ "openstef-beam", ] optional-dependencies.models = [ - "openstef-models[xgb-cpu]", "openstef-meta", + "openstef-models[xgb-cpu]", ] urls.Documentation = "https://openstef.github.io/openstef/index.html" urls.Homepage = "https://lfenergy.org/projects/openstef/" From 460548b64d8da67c2aa8ee90931e8787b7343fed Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 1 Dec 2025 16:27:59 +0100 Subject: [PATCH 046/104] Renamed FinalLearner to Forecast Combiner. Eliminated redundant classes Signed-off-by: Lars van Someren --- .../src/openstef_meta/framework/__init__.py | 6 +- .../openstef_meta/framework/final_learner.py | 46 ++-- .../framework/meta_forecaster.py | 26 ++- .../models/learned_weights_forecaster.py | 215 +++++++----------- .../openstef_meta/models/rules_forecaster.py | 10 +- .../models/stacking_forecaster.py | 18 +- .../models/test_learned_weights_forecaster.py | 62 ++--- 7 files changed, 158 insertions(+), 225 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/framework/__init__.py b/packages/openstef-meta/src/openstef_meta/framework/__init__.py index e64377d16..64fa31259 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/framework/__init__.py @@ -4,13 +4,13 @@ """This module provides meta-forecasting models and related hyperparameters for the OpenSTEF project.""" from .base_learner import BaseLearner, BaseLearnerHyperParams -from .final_learner import FinalLearner, FinalLearnerHyperParams +from .final_learner import ForecastCombiner, ForecastCombinerHyperParams from .meta_forecaster import MetaForecaster __all__ = [ "BaseLearner", "BaseLearnerHyperParams", - "FinalLearner", - "FinalLearnerHyperParams", + "ForecastCombiner", + "ForecastCombinerHyperParams", "MetaForecaster", ] diff --git a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py index 9feee7add..1ed90a1ad 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/final_learner.py @@ -8,41 +8,26 @@ while ensuring full compatability with regular Forecasters. """ -from abc import ABC, abstractmethod +from abc import abstractmethod from collections.abc import Sequence import pandas as pd from pydantic import ConfigDict, Field from openstef_core.datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset -from openstef_core.mixins import HyperParams, TransformPipeline +from openstef_core.mixins import HyperParams, Predictor, TransformPipeline from openstef_core.transforms import TimeSeriesTransform from openstef_core.types import Quantile from openstef_meta.transforms.selector import Selector from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_models.utils.feature_selection import FeatureSelection -WEATHER_FEATURES = { - "temperature_2m", - "relative_humidity_2m", - "surface_pressure", - "cloud_cover", - "wind_speed_10m", - "wind_speed_80m", - "wind_direction_10m", - "shortwave_radiation", - "direct_radiation", - "diffuse_radiation", - "direct_normal_irradiance", - "load", -} - SELECTOR = Selector( - selection=FeatureSelection(include=WEATHER_FEATURES), + selection=FeatureSelection(include=None), ) -class FinalLearnerHyperParams(HyperParams): +class ForecastCombinerHyperParams(HyperParams): """Hyperparameters for the Final Learner.""" model_config = ConfigDict(arbitrary_types_allowed=True) @@ -53,10 +38,10 @@ class FinalLearnerHyperParams(HyperParams): ) -class FinalLearner(ABC): +class ForecastCombiner(Predictor[EnsembleForecastDataset, ForecastDataset]): """Combines base learner predictions for each quantile into final predictions.""" - def __init__(self, quantiles: list[Quantile], hyperparams: FinalLearnerHyperParams) -> None: + def __init__(self, quantiles: list[Quantile], hyperparams: ForecastCombinerHyperParams) -> None: """Initialize the Final Learner.""" self.quantiles = quantiles self.hyperparams = hyperparams @@ -68,14 +53,16 @@ def __init__(self, quantiles: list[Quantile], hyperparams: FinalLearnerHyperPara @abstractmethod def fit( self, - base_predictions: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None, + data: EnsembleForecastDataset, + data_val: EnsembleForecastDataset | None = None, + additional_features: ForecastInputDataset | None = None, sample_weights: pd.Series | None = None, ) -> None: """Fit the final learner using base learner predictions. Args: - base_predictions: EnsembleForecastDataset + data: EnsembleForecastDataset + data_val: Optional EnsembleForecastDataset for validation during fitting. Will be ignored additional_features: Optional ForecastInputDataset containing additional features for the final learner. sample_weights: Optional series of sample weights for fitting. """ @@ -83,13 +70,14 @@ def fit( def predict( self, - base_predictions: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, ) -> ForecastDataset: """Generate final predictions based on base learner predictions. Args: - base_predictions: EnsembleForecastDataset containing base learner predictions. + data: EnsembleForecastDataset containing base learner predictions. + data_val: Optional EnsembleForecastDataset for validation during prediction. Will be ignored additional_features: Optional ForecastInputDataset containing additional features for the final learner. Returns: @@ -101,10 +89,10 @@ def calculate_features(self, data: ForecastInputDataset) -> ForecastInputDataset """Calculate additional features for the final learner. Args: - data: Input TimeSeriesDataset to calculate features on. + data: Input ForecastInputDataset to calculate features on. Returns: - TimeSeriesDataset with additional features. + ForecastInputDataset with additional features. """ data_transformed = self.final_learner_processing.transform(data) diff --git a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py index 9d998ec9e..04d0042e9 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py @@ -22,7 +22,7 @@ BaseLearnerHyperParams, BaseLearnerNames, ) -from openstef_meta.framework.final_learner import FinalLearner +from openstef_meta.framework.final_learner import ForecastCombiner from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_models.models.forecasting.forecaster import ( Forecaster, @@ -83,12 +83,12 @@ class EnsembleForecaster(MetaForecaster): _config: ForecasterConfig _base_learners: list[BaseLearner] - _final_learner: FinalLearner + _forecast_combiner: ForecastCombiner @property @override def is_fitted(self) -> bool: - return all(x.is_fitted for x in self._base_learners) and self._final_learner.is_fitted + return all(x.is_fitted for x in self._base_learners) and self._forecast_combiner.is_fitted @property @override @@ -117,9 +117,9 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None base_predictions = self._predict_base_learners(data=full_dataset) - if self._final_learner.has_features: - self._final_learner.final_learner_processing.fit(full_dataset) - features = self._final_learner.calculate_features(data=full_dataset) + if self._forecast_combiner.has_features: + self._forecast_combiner.final_learner_processing.fit(full_dataset) + features = self._forecast_combiner.calculate_features(data=full_dataset) else: features = None @@ -127,8 +127,9 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None if data.sample_weight_column in data.data.columns: sample_weights = data.data.loc[:, data.sample_weight_column] - self._final_learner.fit( - base_predictions=base_predictions, + self._forecast_combiner.fit( + data=base_predictions, + data_val=None, # TODO ADD validation dataset support additional_features=features, sample_weights=sample_weights, ) @@ -166,12 +167,15 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: base_predictions = self._predict_base_learners(data=full_dataset) - if self._final_learner.has_features: - additional_features = self._final_learner.calculate_features(data=data) + if self._forecast_combiner.has_features: + additional_features = self._forecast_combiner.calculate_features(data=data) else: additional_features = None - return self._final_learner.predict(base_predictions=base_predictions, additional_features=additional_features) + return self._forecast_combiner.predict( + data=base_predictions, + additional_features=additional_features, + ) __all__ = [ diff --git a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py index 4097296d7..96132c302 100644 --- a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py @@ -11,7 +11,7 @@ import logging from abc import abstractmethod -from typing import Literal, Self, override +from typing import Literal, override import pandas as pd from lightgbm import LGBMClassifier @@ -32,7 +32,7 @@ BaseLearner, BaseLearnerHyperParams, ) -from openstef_meta.framework.final_learner import EnsembleForecastDataset, FinalLearner, FinalLearnerHyperParams +from openstef_meta.framework.final_learner import EnsembleForecastDataset, ForecastCombiner, ForecastCombinerHyperParams from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) @@ -50,52 +50,51 @@ # Base classes for Learned Weights Final Learner Classifier = LGBMClassifier | XGBClassifier | LogisticRegression | DummyClassifier +ClassifierNames = Literal["lgbm", "xgb", "logistic_regression", "dummy"] -class LWFLHyperParams(FinalLearnerHyperParams): +class WeightsCombinerHyperParams(ForecastCombinerHyperParams): """Hyperparameters for Learned Weights Final Learner.""" - @property @abstractmethod - def learner(self) -> type["WeightsLearner"]: - """Returns the classifier to be used as final learner.""" - raise NotImplementedError("Subclasses must implement the 'estimator' property.") - - @classmethod - def learner_from_params(cls, quantiles: list[Quantile], hyperparams: Self) -> "WeightsLearner": - """Initialize the final learner from hyperparameters. + def get_classifier(self) -> Classifier: + """Initialize the classifier from hyperparameters. Returns: - WeightsLearner: An instance of the WeightsLearner initialized with the provided hyperparameters. + Classifier: An instance of the classifier initialized with the provided hyperparameters. """ - instance = cls() - return instance.learner(quantiles=quantiles, hyperparams=hyperparams) + raise NotImplementedError("Subclasses must implement the 'get_classifier' method.") -class WeightsLearner(FinalLearner): +class WeightsCombiner(ForecastCombiner): """Combines base learner predictions with a classification approach to determine which base learner to use.""" - def __init__(self, quantiles: list[Quantile], hyperparams: LWFLHyperParams) -> None: - """Initialize WeightsLearner.""" + model_type: ClassifierNames = Field( + default="lgbm", description="Type of classifier to use for combining base learner predictions." + ) + + def __init__(self, quantiles: list[Quantile], hyperparams: WeightsCombinerHyperParams) -> None: + """Initialize WeightsCombiner.""" super().__init__(quantiles=quantiles, hyperparams=hyperparams) - self.models: list[Classifier] = [] - self._label_encoder = LabelEncoder() + self.models: list[Classifier] = [hyperparams.get_classifier() for _ in self.quantiles] + self._label_encoder = LabelEncoder() self._is_fitted = False @override def fit( self, - base_predictions: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None, + data: EnsembleForecastDataset, + data_val: EnsembleForecastDataset | None = None, + additional_features: ForecastInputDataset | None = None, sample_weights: pd.Series | None = None, ) -> None: - self._label_encoder.fit(base_predictions.model_names) + self._label_encoder.fit(data.model_names) for i, q in enumerate(self.quantiles): # Data preparation - dataset = base_predictions.select_quantile_classification(quantile=q) + dataset = data.select_quantile_classification(quantile=q) input_data = self._prepare_input_data( dataset=dataset, additional_features=additional_features, @@ -165,8 +164,8 @@ def _generate_predictions_quantile( @override def predict( self, - base_predictions: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, ) -> ForecastDataset: if not self.is_fitted: raise NotFittedError(self.__class__.__name__) @@ -174,21 +173,21 @@ def predict( # Generate predictions predictions = pd.DataFrame({ Quantile(q).format(): self._generate_predictions_quantile( - dataset=base_predictions.select_quantile(quantile=Quantile(q)), + dataset=data.select_quantile(quantile=Quantile(q)), additional_features=additional_features, model_index=i, ) for i, q in enumerate(self.quantiles) }) - target_series = base_predictions.target_series + target_series = data.target_series if target_series is not None: - predictions[base_predictions.target_column] = target_series + predictions[data.target_column] = target_series return ForecastDataset( data=predictions, - sample_interval=base_predictions.sample_interval, - target_column=base_predictions.target_column, - forecast_start=base_predictions.forecast_start, + sample_interval=data.sample_interval, + target_column=data.target_column, + forecast_start=data.forecast_start, ) @property @@ -199,7 +198,7 @@ def is_fitted(self) -> bool: # Final learner implementations using different classifiers # 1 LGBM Classifier -class LGBMLearnerHyperParams(LWFLHyperParams): +class LGBMCombinerHyperParams(WeightsCombinerHyperParams): """Hyperparameters for Learned Weights Final Learner with LGBM Classifier.""" n_estimators: int = Field( @@ -212,38 +211,18 @@ class LGBMLearnerHyperParams(LWFLHyperParams): description="Number of leaves for the LGBM Classifier. Defaults to 31.", ) - @property @override - def learner(self) -> type["LGBMLearner"]: - """Returns the LGBMLearner.""" - return LGBMLearner - - -class LGBMLearner(WeightsLearner): - """Final learner with LGBM Classifier.""" - - HyperParams = LGBMLearnerHyperParams - - def __init__( - self, - quantiles: list[Quantile], - hyperparams: LGBMLearnerHyperParams, - ) -> None: - """Initialize LGBMLearner.""" - super().__init__(quantiles=quantiles, hyperparams=hyperparams) - self.models = [ - LGBMClassifier( - class_weight="balanced", - n_estimators=hyperparams.n_estimators, - num_leaves=hyperparams.n_leaves, - n_jobs=1, - ) - for _ in quantiles - ] + def get_classifier(self) -> LGBMClassifier: + """Returns the LGBM Classifier.""" + return LGBMClassifier( + class_weight="balanced", + n_estimators=self.n_estimators, + num_leaves=self.n_leaves, + n_jobs=1, + ) -# 1 RandomForest Classifier -class RFLearnerHyperParams(LWFLHyperParams): +class RFCombinerHyperParams(WeightsCombinerHyperParams): """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" n_estimators: int = Field( @@ -271,35 +250,22 @@ class RFLearnerHyperParams(LWFLHyperParams): description="Fraction of features to be used for each iteration of the Random Forest. Defaults to 1.", ) - @property - def learner(self) -> type["RandomForestLearner"]: - """Returns the LGBMClassifier to be used as final learner.""" - return RandomForestLearner - - -class RandomForestLearner(WeightsLearner): - """Final learner using only Random Forest as base learners.""" - - def __init__(self, quantiles: list[Quantile], hyperparams: RFLearnerHyperParams) -> None: - """Initialize RandomForestLearner.""" - super().__init__(quantiles=quantiles, hyperparams=hyperparams) - - self.models = [ - LGBMClassifier( - boosting_type="rf", - class_weight="balanced", - n_estimators=hyperparams.n_estimators, - bagging_freq=hyperparams.bagging_freq, - bagging_fraction=hyperparams.bagging_fraction, - feature_fraction=hyperparams.feature_fraction, - num_leaves=hyperparams.n_leaves, - ) - for _ in quantiles - ] + @override + def get_classifier(self) -> LGBMClassifier: + """Returns the Random Forest LGBMClassifier.""" + return LGBMClassifier( + boosting_type="rf", + class_weight="balanced", + n_estimators=self.n_estimators, + bagging_freq=self.bagging_freq, + bagging_fraction=self.bagging_fraction, + feature_fraction=self.feature_fraction, + num_leaves=self.n_leaves, + ) # 3 XGB Classifier -class XGBLearnerHyperParams(LWFLHyperParams): +class XGBCombinerHyperParams(WeightsCombinerHyperParams): """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" n_estimators: int = Field( @@ -307,23 +273,13 @@ class XGBLearnerHyperParams(LWFLHyperParams): description="Number of estimators for the LGBM Classifier. Defaults to 20.", ) - @property - def learner(self) -> type["XGBLearner"]: - """Returns the LGBMClassifier to be used as final learner.""" - return XGBLearner - - -class XGBLearner(WeightsLearner): - """Final learner using only XGBoost as base learners.""" - - def __init__(self, quantiles: list[Quantile], hyperparams: XGBLearnerHyperParams) -> None: - """Initialize XGBLearner.""" - super().__init__(quantiles=quantiles, hyperparams=hyperparams) - self.models = [XGBClassifier(n_estimators=hyperparams.n_estimators) for _ in quantiles] + @override + def get_classifier(self) -> XGBClassifier: + """Returns the XGBClassifier.""" + return XGBClassifier(n_estimators=self.n_estimators) -# 4 Logistic Regression Classifier -class LogisticLearnerHyperParams(LWFLHyperParams): +class LogisticCombinerHyperParams(WeightsCombinerHyperParams): """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" fit_intercept: bool = Field( @@ -341,30 +297,17 @@ class LogisticLearnerHyperParams(LWFLHyperParams): description="Inverse of regularization strength; must be a positive float. Defaults to 1.0.", ) - @property - def learner(self) -> type["LogisticLearner"]: - """Returns the LGBMClassifier to be used as final learner.""" - return LogisticLearner - - -class LogisticLearner(WeightsLearner): - """Final learner using only Logistic Regression as base learners.""" - - def __init__(self, quantiles: list[Quantile], hyperparams: LogisticLearnerHyperParams) -> None: - """Initialize LogisticLearner.""" - super().__init__(quantiles=quantiles, hyperparams=hyperparams) - self.models = [ - LogisticRegression( - class_weight="balanced", - fit_intercept=hyperparams.fit_intercept, - penalty=hyperparams.penalty, - C=hyperparams.c, - ) - for _ in quantiles - ] + @override + def get_classifier(self) -> LogisticRegression: + """Returns the LogisticRegression.""" + return LogisticRegression( + class_weight="balanced", + fit_intercept=self.fit_intercept, + penalty=self.penalty, + C=self.c, + ) -# Assembly classes class LearnedWeightsHyperParams(HyperParams): """Hyperparameters for Stacked LGBM GBLinear Regressor.""" @@ -374,8 +317,8 @@ class LearnedWeightsHyperParams(HyperParams): "Defaults to [LGBMHyperParams, GBLinearHyperParams].", ) - final_hyperparams: LWFLHyperParams = Field( - default=LGBMLearnerHyperParams(), + combiner_hyperparams: WeightsCombinerHyperParams = Field( + default=LGBMCombinerHyperParams(), description="Hyperparameters for the final learner. Defaults to LGBMLearnerHyperParams.", ) @@ -404,23 +347,19 @@ def __init__(self, config: LearnedWeightsForecasterConfig) -> None: self._base_learners: list[BaseLearner] = self._init_base_learners( config=config, base_hyperparams=config.hyperparams.base_hyperparams ) - self._final_learner = config.hyperparams.final_hyperparams.learner_from_params( - quantiles=config.quantiles, - hyperparams=config.hyperparams.final_hyperparams, + self._forecast_combiner = WeightsCombiner( + quantiles=config.quantiles, hyperparams=config.hyperparams.combiner_hyperparams ) __all__ = [ - "LGBMLearner", - "LGBMLearnerHyperParams", + "LGBMCombinerHyperParams", "LearnedWeightsForecaster", "LearnedWeightsForecasterConfig", "LearnedWeightsHyperParams", - "LogisticLearner", - "LogisticLearnerHyperParams", - "RFLearnerHyperParams", - "RandomForestLearner", - "WeightsLearner", - "XGBLearner", - "XGBLearnerHyperParams", + "LogisticCombinerHyperParams", + "RFCombinerHyperParams", + "WeightsCombiner", + "WeightsCombinerHyperParams", + "XGBCombinerHyperParams", ] diff --git a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py index 658b08957..97174a69f 100644 --- a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py @@ -19,7 +19,7 @@ BaseLearner, BaseLearnerHyperParams, ) -from openstef_meta.framework.final_learner import FinalLearner, FinalLearnerHyperParams +from openstef_meta.framework.final_learner import ForecastCombiner, ForecastCombinerHyperParams from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) @@ -37,7 +37,7 @@ logger = logging.getLogger(__name__) -class RulesLearnerHyperParams(FinalLearnerHyperParams): +class RulesLearnerHyperParams(ForecastCombinerHyperParams): """HyperParams for Stacking Final Learner.""" feature_adders: Sequence[TimeSeriesTransform] = Field( @@ -57,7 +57,7 @@ def _check_not_empty(cls, v: list[TimeSeriesTransform]) -> list[TimeSeriesTransf return v -class RulesLearner(FinalLearner): +class RulesLearner(ForecastCombiner): """Combines base learner predictions per quantile into final predictions using a regression approach.""" def __init__(self, quantiles: list[Quantile], hyperparams: RulesLearnerHyperParams) -> None: @@ -82,7 +82,7 @@ def fit( # No fitting needed for rule-based final learner # Check that additional features are provided if additional_features is None: - raise ValueError("Additional features must be provided for RulesFinalLearner prediction.") + raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") if sample_weights is not None: logger.warning("Sample weights are ignored in RulesLearner.fit method.") @@ -108,7 +108,7 @@ def predict( additional_features: ForecastInputDataset | None, ) -> ForecastDataset: if additional_features is None: - raise ValueError("Additional features must be provided for RulesFinalLearner prediction.") + raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") decisions = self._predict_tree( additional_features.data, columns=base_predictions.select_quantile(quantile=self.quantiles[0]).data.columns diff --git a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py index 2afa2dfc0..17bb47ded 100644 --- a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py @@ -27,7 +27,7 @@ BaseLearner, BaseLearnerHyperParams, ) -from openstef_meta.framework.final_learner import FinalLearner, FinalLearnerHyperParams +from openstef_meta.framework.final_learner import ForecastCombiner, ForecastCombinerHyperParams from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) @@ -44,7 +44,7 @@ logger = logging.getLogger(__name__) -class StackingFinalLearnerHyperParams(FinalLearnerHyperParams): +class StackingForecastCombinerHyperParams(ForecastCombinerHyperParams): """HyperParams for Stacking Final Learner.""" feature_adders: Sequence[TimeSeriesTransform] = Field( @@ -58,11 +58,11 @@ class StackingFinalLearnerHyperParams(FinalLearnerHyperParams): ) -class StackingFinalLearner(FinalLearner): +class StackingForecastCombiner(ForecastCombiner): """Combines base learner predictions per quantile into final predictions using a regression approach.""" def __init__( - self, quantiles: list[Quantile], hyperparams: StackingFinalLearnerHyperParams, horizon: LeadTime + self, quantiles: list[Quantile], hyperparams: StackingForecastCombinerHyperParams, horizon: LeadTime ) -> None: """Initialize the Stacking final learner. @@ -164,7 +164,7 @@ def predict( @property def is_fitted(self) -> bool: - """Check the StackingFinalLearner is fitted.""" + """Check the StackingForecastCombiner is fitted.""" return all(x.is_fitted for x in self.models) @@ -177,8 +177,8 @@ class StackingHyperParams(HyperParams): "Defaults to [LGBMHyperParams, GBLinearHyperParams].", ) - final_hyperparams: StackingFinalLearnerHyperParams = Field( - default=StackingFinalLearnerHyperParams(), + final_hyperparams: StackingForecastCombinerHyperParams = Field( + default=StackingForecastCombinerHyperParams(), description="Hyperparameters for the final learner.", ) @@ -216,9 +216,9 @@ def __init__(self, config: StackingForecasterConfig) -> None: config=config, base_hyperparams=config.hyperparams.base_hyperparams ) - self._final_learner = StackingFinalLearner( + self._final_learner = StackingForecastCombiner( quantiles=config.quantiles, hyperparams=config.hyperparams.final_hyperparams, horizon=config.max_horizon ) -__all__ = ["StackingFinalLearner", "StackingForecaster", "StackingForecasterConfig", "StackingHyperParams"] +__all__ = ["StackingForecastCombiner", "StackingForecaster", "StackingForecasterConfig", "StackingHyperParams"] diff --git a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py index 5ca7b3003..ad00a393f 100644 --- a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py +++ b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py @@ -5,47 +5,47 @@ from datetime import timedelta import pytest +from lightgbm import LGBMClassifier +from sklearn.linear_model import LogisticRegression +from xgboost import XGBClassifier from openstef_core.datasets import ForecastInputDataset from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q from openstef_meta.models.learned_weights_forecaster import ( + Classifier, LearnedWeightsForecaster, LearnedWeightsForecasterConfig, LearnedWeightsHyperParams, - LGBMLearner, - LGBMLearnerHyperParams, - LogisticLearner, - LogisticLearnerHyperParams, - LWFLHyperParams, - RandomForestLearner, - RFLearnerHyperParams, - WeightsLearner, - XGBLearner, - XGBLearnerHyperParams, + LGBMCombinerHyperParams, + LogisticCombinerHyperParams, + RFCombinerHyperParams, + WeightsCombiner, + WeightsCombinerHyperParams, + XGBCombinerHyperParams, ) from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder @pytest.fixture(params=["rf", "lgbm", "xgboost", "logistic"]) -def final_hyperparams(request: pytest.FixtureRequest) -> LWFLHyperParams: +def combiner_hyperparams(request: pytest.FixtureRequest) -> WeightsCombinerHyperParams: """Fixture to provide different primary models types.""" learner_type = request.param if learner_type == "rf": - return RFLearnerHyperParams() + return RFCombinerHyperParams() if learner_type == "lgbm": - return LGBMLearnerHyperParams() + return LGBMCombinerHyperParams() if learner_type == "xgboost": - return XGBLearnerHyperParams() - return LogisticLearnerHyperParams() + return XGBCombinerHyperParams() + return LogisticCombinerHyperParams() @pytest.fixture -def base_config(final_hyperparams: LWFLHyperParams) -> LearnedWeightsForecasterConfig: +def base_config(combiner_hyperparams: WeightsCombinerHyperParams) -> LearnedWeightsForecasterConfig: """Base configuration for LearnedWeights forecaster tests.""" params = LearnedWeightsHyperParams( - final_hyperparams=final_hyperparams, + combiner_hyperparams=combiner_hyperparams, ) return LearnedWeightsForecasterConfig( quantiles=[Q(0.1), Q(0.5), Q(0.9)], @@ -55,21 +55,23 @@ def base_config(final_hyperparams: LWFLHyperParams) -> LearnedWeightsForecasterC ) -def test_final_learner_corresponds_to_hyperparams(base_config: LearnedWeightsForecasterConfig): - """Test that the final learner corresponds to the specified hyperparameters.""" +def test_forecast_combiner_corresponds_to_hyperparams(base_config: LearnedWeightsForecasterConfig): + """Test that the forecast combiner learner corresponds to the specified hyperparameters.""" forecaster = LearnedWeightsForecaster(config=base_config) - final_learner = forecaster._final_learner - - mapping: dict[type[LWFLHyperParams], type[WeightsLearner]] = { - RFLearnerHyperParams: RandomForestLearner, - LGBMLearnerHyperParams: LGBMLearner, - XGBLearnerHyperParams: XGBLearner, - LogisticLearnerHyperParams: LogisticLearner, + forecast_combiner = forecaster._forecast_combiner + assert isinstance(forecast_combiner, WeightsCombiner) + classifier = forecast_combiner.models[0] + + mapping: dict[type[WeightsCombinerHyperParams], type[Classifier]] = { + RFCombinerHyperParams: LGBMClassifier, + LGBMCombinerHyperParams: LGBMClassifier, + XGBCombinerHyperParams: XGBClassifier, + LogisticCombinerHyperParams: LogisticRegression, } - expected_learner_type = mapping[type(base_config.hyperparams.final_hyperparams)] + expected_type = mapping[type(base_config.hyperparams.combiner_hyperparams)] - assert isinstance(final_learner, expected_learner_type), ( - f"Final learner type {type(final_learner)} does not match expected type {expected_learner_type}" + assert isinstance(classifier, expected_type), ( + f"Final learner type {type(forecast_combiner)} does not match expected type {expected_type}" ) @@ -157,7 +159,7 @@ def test_learned_weights_forecaster_with_additional_features( # Arrange # Add a simple feature adder that adds a constant feature - base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) # type: ignore + base_config.hyperparams.combiner_hyperparams.feature_adders.append(CyclicFeaturesAdder()) # type: ignore forecaster = LearnedWeightsForecaster(config=base_config) # Act From b2fca54752c3dcd660c1bd63b4c22be492860b28 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 1 Dec 2025 17:08:09 +0100 Subject: [PATCH 047/104] fixed small issues Signed-off-by: Lars van Someren --- .../src/openstef_meta/framework/__init__.py | 2 +- ...{final_learner.py => forecast_combiner.py} | 6 ++-- .../framework/meta_forecaster.py | 4 +-- .../models/learned_weights_forecaster.py | 6 +++- .../openstef_meta/models/rules_forecaster.py | 19 +++++----- .../models/stacking_forecaster.py | 35 ++++++++++--------- .../tests/models/test_stacking_forecaster.py | 2 +- 7 files changed, 40 insertions(+), 34 deletions(-) rename packages/openstef-meta/src/openstef_meta/framework/{final_learner.py => forecast_combiner.py} (95%) diff --git a/packages/openstef-meta/src/openstef_meta/framework/__init__.py b/packages/openstef-meta/src/openstef_meta/framework/__init__.py index 64fa31259..bd120bc0e 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/framework/__init__.py @@ -4,7 +4,7 @@ """This module provides meta-forecasting models and related hyperparameters for the OpenSTEF project.""" from .base_learner import BaseLearner, BaseLearnerHyperParams -from .final_learner import ForecastCombiner, ForecastCombinerHyperParams +from .forecast_combiner import ForecastCombiner, ForecastCombinerHyperParams from .meta_forecaster import MetaForecaster __all__ = [ diff --git a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py b/packages/openstef-meta/src/openstef_meta/framework/forecast_combiner.py similarity index 95% rename from packages/openstef-meta/src/openstef_meta/framework/final_learner.py rename to packages/openstef-meta/src/openstef_meta/framework/forecast_combiner.py index 1ed90a1ad..2a1146d63 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/final_learner.py +++ b/packages/openstef-meta/src/openstef_meta/framework/forecast_combiner.py @@ -45,7 +45,7 @@ def __init__(self, quantiles: list[Quantile], hyperparams: ForecastCombinerHyper """Initialize the Final Learner.""" self.quantiles = quantiles self.hyperparams = hyperparams - self.final_learner_processing: TransformPipeline[TimeSeriesDataset] = TransformPipeline( + self.pre_processing: TransformPipeline[TimeSeriesDataset] = TransformPipeline( transforms=hyperparams.feature_adders ) self._is_fitted: bool = False @@ -94,7 +94,7 @@ def calculate_features(self, data: ForecastInputDataset) -> ForecastInputDataset Returns: ForecastInputDataset with additional features. """ - data_transformed = self.final_learner_processing.transform(data) + data_transformed = self.pre_processing.transform(data) return ForecastInputDataset( data=data_transformed.data, @@ -134,4 +134,4 @@ def is_fitted(self) -> bool: @property def has_features(self) -> bool: """Indicates whether the final learner uses additional features.""" - return len(self.final_learner_processing.transforms) > 0 + return len(self.pre_processing.transforms) > 0 diff --git a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py index 04d0042e9..49acd1c1f 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py @@ -22,7 +22,7 @@ BaseLearnerHyperParams, BaseLearnerNames, ) -from openstef_meta.framework.final_learner import ForecastCombiner +from openstef_meta.framework.forecast_combiner import ForecastCombiner from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_models.models.forecasting.forecaster import ( Forecaster, @@ -118,7 +118,7 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None base_predictions = self._predict_base_learners(data=full_dataset) if self._forecast_combiner.has_features: - self._forecast_combiner.final_learner_processing.fit(full_dataset) + self._forecast_combiner.pre_processing.fit(full_dataset) features = self._forecast_combiner.calculate_features(data=full_dataset) else: features = None diff --git a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py index 96132c302..1f4837038 100644 --- a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py @@ -32,7 +32,11 @@ BaseLearner, BaseLearnerHyperParams, ) -from openstef_meta.framework.final_learner import EnsembleForecastDataset, ForecastCombiner, ForecastCombinerHyperParams +from openstef_meta.framework.forecast_combiner import ( + EnsembleForecastDataset, + ForecastCombiner, + ForecastCombinerHyperParams, +) from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) diff --git a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py index 97174a69f..d83d586f6 100644 --- a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py @@ -19,7 +19,7 @@ BaseLearner, BaseLearnerHyperParams, ) -from openstef_meta.framework.final_learner import ForecastCombiner, ForecastCombinerHyperParams +from openstef_meta.framework.forecast_combiner import ForecastCombiner, ForecastCombinerHyperParams from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) @@ -75,8 +75,9 @@ def __init__(self, quantiles: list[Quantile], hyperparams: RulesLearnerHyperPara @override def fit( self, - base_predictions: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None, + data: EnsembleForecastDataset, + data_val: EnsembleForecastDataset | None = None, + additional_features: ForecastInputDataset | None = None, sample_weights: pd.Series | None = None, ) -> None: # No fitting needed for rule-based final learner @@ -104,20 +105,20 @@ def _predict_tree(self, data: pd.DataFrame, columns: pd.Index) -> pd.DataFrame: @override def predict( self, - base_predictions: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, ) -> ForecastDataset: if additional_features is None: raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") decisions = self._predict_tree( - additional_features.data, columns=base_predictions.select_quantile(quantile=self.quantiles[0]).data.columns + additional_features.data, columns=data.select_quantile(quantile=self.quantiles[0]).data.columns ) # Generate predictions predictions: list[pd.DataFrame] = [] for q in self.quantiles: - dataset = base_predictions.select_quantile(quantile=q) + dataset = data.select_quantile(quantile=q) preds = dataset.input_data().multiply(decisions).sum(axis=1) predictions.append(preds.to_frame(name=Quantile(q).format())) @@ -127,7 +128,7 @@ def predict( return ForecastDataset( data=df, - sample_interval=base_predictions.sample_interval, + sample_interval=data.sample_interval, ) @property @@ -190,7 +191,7 @@ def __init__(self, config: RulesForecasterConfig) -> None: config=config, base_hyperparams=config.hyperparams.base_hyperparams ) - self._final_learner = RulesLearner( + self._forecast_combiner = RulesLearner( quantiles=config.quantiles, hyperparams=config.hyperparams.final_hyperparams, ) diff --git a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py index 17bb47ded..7dd3b2220 100644 --- a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py @@ -27,7 +27,7 @@ BaseLearner, BaseLearnerHyperParams, ) -from openstef_meta.framework.final_learner import ForecastCombiner, ForecastCombinerHyperParams +from openstef_meta.framework.forecast_combiner import ForecastCombiner, ForecastCombinerHyperParams from openstef_meta.framework.meta_forecaster import ( EnsembleForecaster, ) @@ -115,28 +115,29 @@ def _combine_datasets( @override def fit( self, - base_predictions: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None, + data: EnsembleForecastDataset, + data_val: EnsembleForecastDataset | None = None, + additional_features: ForecastInputDataset | None = None, sample_weights: pd.Series | None = None, ) -> None: for i, q in enumerate(self.quantiles): if additional_features is not None: - dataset = base_predictions.select_quantile(quantile=q) - data = self._combine_datasets( + dataset = data.select_quantile(quantile=q) + input_data = self._combine_datasets( data=dataset, additional_features=additional_features, ) else: - data = base_predictions.select_quantile(quantile=q) + input_data = data.select_quantile(quantile=q) - self.models[i].fit(data=data, data_val=None) + self.models[i].fit(data=input_data, data_val=None) @override def predict( self, - base_predictions: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, ) -> ForecastDataset: if not self.is_fitted: raise NotFittedError(self.__class__.__name__) @@ -145,13 +146,13 @@ def predict( predictions: list[pd.DataFrame] = [] for i, q in enumerate(self.quantiles): if additional_features is not None: - data = self._combine_datasets( - data=base_predictions.select_quantile(quantile=q), + input_data = self._combine_datasets( + data=data.select_quantile(quantile=q), additional_features=additional_features, ) else: - data = base_predictions.select_quantile(quantile=q) - p = self.models[i].predict(data=data).data + input_data = data.select_quantile(quantile=q) + p = self.models[i].predict(data=input_data).data predictions.append(p) # Concatenate predictions along columns to form a DataFrame with quantile columns @@ -159,7 +160,7 @@ def predict( return ForecastDataset( data=df, - sample_interval=base_predictions.sample_interval, + sample_interval=data.sample_interval, ) @property @@ -177,7 +178,7 @@ class StackingHyperParams(HyperParams): "Defaults to [LGBMHyperParams, GBLinearHyperParams].", ) - final_hyperparams: StackingForecastCombinerHyperParams = Field( + combiner_hyperparams: StackingForecastCombinerHyperParams = Field( default=StackingForecastCombinerHyperParams(), description="Hyperparameters for the final learner.", ) @@ -216,8 +217,8 @@ def __init__(self, config: StackingForecasterConfig) -> None: config=config, base_hyperparams=config.hyperparams.base_hyperparams ) - self._final_learner = StackingForecastCombiner( - quantiles=config.quantiles, hyperparams=config.hyperparams.final_hyperparams, horizon=config.max_horizon + self._forecast_combiner = StackingForecastCombiner( + quantiles=config.quantiles, hyperparams=config.hyperparams.combiner_hyperparams, horizon=config.max_horizon ) diff --git a/packages/openstef-meta/tests/models/test_stacking_forecaster.py b/packages/openstef-meta/tests/models/test_stacking_forecaster.py index 9eccde9b9..33a956d8d 100644 --- a/packages/openstef-meta/tests/models/test_stacking_forecaster.py +++ b/packages/openstef-meta/tests/models/test_stacking_forecaster.py @@ -112,7 +112,7 @@ def test_stacking_forecaster_with_additional_features( ): """Test that forecaster works with additional features for the final learner.""" - base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) + base_config.hyperparams.combiner_hyperparams.feature_adders = [CyclicFeaturesAdder()] # Arrange expected_quantiles = base_config.quantiles From ddef9f3949641f0e1ac96a43d44bc310d6dfe2ac Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 2 Dec 2025 15:34:34 +0100 Subject: [PATCH 048/104] Major Refactor, Working Version Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 162 ++++++ .../src/openstef_meta/framework/__init__.py | 2 +- .../openstef_meta/framework/base_learner.py | 34 -- .../framework/meta_forecaster.py | 185 ------- .../src/openstef_meta/models/__init__.py | 24 - .../models/ensemble_forecasting_model.py | 479 ++++++++++++++++++ .../models/forecast_combiners/__init__.py | 28 + .../forecast_combiners}/forecast_combiner.py | 103 ++-- .../learned_weights_combiner.py} | 370 +++++++------- .../forecast_combiners/rules_combiner.py | 154 ++++++ .../stacking_combiner.py} | 161 +++--- .../models/forecasting/__init__.py | 12 + .../{ => forecasting}/residual_forecaster.py | 43 +- .../openstef_meta/models/rules_forecaster.py | 206 -------- .../src/openstef_meta/presets/__init__.py | 5 + .../presets/forecasting_workflow.py | 453 +++++++++++++++++ .../src/openstef_meta/utils/datasets.py | 39 +- .../openstef-meta/test_forecasting_model.py | 274 ++++++++++ .../models/test_learned_weights_forecaster.py | 342 ++++++------- .../tests/models/test_residual_forecaster.py | 284 +++++------ .../tests/models/test_rules_forecaster.py | 272 +++++----- .../tests/models/test_stacking_forecaster.py | 272 +++++----- .../presets/forecasting_workflow.py | 59 +-- .../workflows/custom_forecasting_workflow.py | 3 +- 24 files changed, 2519 insertions(+), 1447 deletions(-) create mode 100644 examples/benchmarks/liander_2024_ensemble.py delete mode 100644 packages/openstef-meta/src/openstef_meta/framework/base_learner.py delete mode 100644 packages/openstef-meta/src/openstef_meta/models/__init__.py create mode 100644 packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py create mode 100644 packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py rename packages/openstef-meta/src/openstef_meta/{framework => models/forecast_combiners}/forecast_combiner.py (63%) rename packages/openstef-meta/src/openstef_meta/models/{learned_weights_forecaster.py => forecast_combiners/learned_weights_combiner.py} (72%) create mode 100644 packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py rename packages/openstef-meta/src/openstef_meta/models/{stacking_forecaster.py => forecast_combiners/stacking_combiner.py} (54%) create mode 100644 packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py rename packages/openstef-meta/src/openstef_meta/models/{ => forecasting}/residual_forecaster.py (85%) delete mode 100644 packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py create mode 100644 packages/openstef-meta/src/openstef_meta/presets/__init__.py create mode 100644 packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py create mode 100644 packages/openstef-meta/test_forecasting_model.py diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py new file mode 100644 index 000000000..d3c990ad2 --- /dev/null +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -0,0 +1,162 @@ +"""Liander 2024 Benchmark Example. + +==================================== + +This example demonstrates how to set up and run the Liander 2024 STEF benchmark using OpenSTEF BEAM. +The benchmark will evaluate XGBoost and GBLinear models on the dataset from HuggingFace. +""" + +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +import os +import time + +os.environ["OMP_NUM_THREADS"] = "1" # Set OMP_NUM_THREADS to 1 to avoid issues with parallel execution and xgboost +os.environ["OPENBLAS_NUM_THREADS"] = "1" +os.environ["MKL_NUM_THREADS"] = "1" + +import logging +import multiprocessing +from datetime import timedelta +from pathlib import Path + +from pydantic_extra_types.coordinate import Coordinate +from pydantic_extra_types.country import CountryAlpha2 + +from openstef_beam.backtesting.backtest_forecaster import BacktestForecasterConfig, OpenSTEF4BacktestForecaster +from openstef_beam.benchmarking.benchmark_pipeline import BenchmarkContext +from openstef_beam.benchmarking.benchmarks.liander2024 import Liander2024Category, create_liander2024_benchmark_runner +from openstef_beam.benchmarking.callbacks.strict_execution_callback import StrictExecutionCallback +from openstef_beam.benchmarking.models.benchmark_target import BenchmarkTarget +from openstef_beam.benchmarking.storage.local_storage import LocalBenchmarkStorage +from openstef_core.types import LeadTime, Q +from openstef_meta.presets import ( + EnsembleWorkflowConfig, + create_ensemble_workflow, +) +from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage +from openstef_models.presets.forecasting_workflow import LocationConfig +from openstef_models.workflows import CustomForecastingWorkflow + +logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s") + +OUTPUT_PATH = Path("./benchmark_results") + +N_PROCESSES = 1 # multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark + +ensemble_type = "learned_weights" # "stacking", "learned_weights" or "rules" +base_models = ["lgbm", "gblinear"] # combination of "lgbm", "gblinear", "xgboost" and "lgbm_linear" +combiner_model = ( + "lgbm" # "lgbm", "xgboost", "rf" or "logistic" for learned weights combiner, gblinear for stacking combiner +) + +model = "Ensemble_" + "_".join(base_models) + "_" + ensemble_type + "_" + combiner_model + +# Model configuration +FORECAST_HORIZONS = [LeadTime.from_string("PT36H")] # Forecast horizon(s) +PREDICTION_QUANTILES = [ + Q(0.05), + Q(0.1), + Q(0.3), + Q(0.5), + Q(0.7), + Q(0.9), + Q(0.95), +] # Quantiles for probabilistic forecasts + +BENCHMARK_FILTER: list[Liander2024Category] | None = None + +USE_MLFLOW_STORAGE = False + +if USE_MLFLOW_STORAGE: + storage = MLFlowStorage( + tracking_uri=str(OUTPUT_PATH / "mlflow_artifacts"), + local_artifacts_path=OUTPUT_PATH / "mlflow_tracking_artifacts", + ) +else: + storage = None + +common_config = EnsembleWorkflowConfig( + model_id="common_model_", + ensemble_type=ensemble_type, + base_models=base_models, # type: ignore + combiner_model=combiner_model, + horizons=FORECAST_HORIZONS, + quantiles=PREDICTION_QUANTILES, + model_reuse_enable=False, + mlflow_storage=None, + radiation_column="shortwave_radiation", + rolling_aggregate_features=["mean", "median", "max", "min"], + wind_speed_column="wind_speed_80m", + pressure_column="surface_pressure", + temperature_column="temperature_2m", + relative_humidity_column="relative_humidity_2m", + energy_price_column="EPEX_NL", +) + + +# Create the backtest configuration +backtest_config = BacktestForecasterConfig( + requires_training=True, + predict_length=timedelta(days=7), + predict_min_length=timedelta(minutes=15), + predict_context_length=timedelta(days=14), # Context needed for lag features + predict_context_min_coverage=0.5, + training_context_length=timedelta(days=90), # Three months of training data + training_context_min_coverage=0.5, + predict_sample_interval=timedelta(minutes=15), +) + + +def _target_forecaster_factory( + context: BenchmarkContext, + target: BenchmarkTarget, +) -> OpenSTEF4BacktestForecaster: + # Factory function that creates a forecaster for a given target. + prefix = context.run_name + base_config = common_config + + def _create_workflow() -> CustomForecastingWorkflow: + # Create a new workflow instance with fresh model. + return create_ensemble_workflow( + config=base_config.model_copy( + update={ + "model_id": f"{prefix}_{target.name}", + "location": LocationConfig( + name=target.name, + description=target.description, + coordinate=Coordinate( + latitude=target.latitude, + longitude=target.longitude, + ), + country_code=CountryAlpha2("NL"), + ), + } + ) + ) + + return OpenSTEF4BacktestForecaster( + config=backtest_config, + workflow_factory=_create_workflow, + debug=False, + cache_dir=OUTPUT_PATH / "cache" / f"{context.run_name}_{target.name}", + ) + + +if __name__ == "__main__": + start_time = time.time() + create_liander2024_benchmark_runner( + storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH / model), + data_dir=Path("../data/liander2024-energy-forecasting-benchmark"), + callbacks=[StrictExecutionCallback()], + ).run( + forecaster_factory=_target_forecaster_factory, + run_name=model, + n_processes=N_PROCESSES, + filter_args=BENCHMARK_FILTER, + ) + + end_time = time.time() + print(f"Benchmark completed in {end_time - start_time:.2f} seconds.") diff --git a/packages/openstef-meta/src/openstef_meta/framework/__init__.py b/packages/openstef-meta/src/openstef_meta/framework/__init__.py index bd120bc0e..0775b88f1 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/framework/__init__.py @@ -4,7 +4,7 @@ """This module provides meta-forecasting models and related hyperparameters for the OpenSTEF project.""" from .base_learner import BaseLearner, BaseLearnerHyperParams -from .forecast_combiner import ForecastCombiner, ForecastCombinerHyperParams +from ..models.combiners.forecast_combiner import ForecastCombiner, ForecastCombinerHyperParams from .meta_forecaster import MetaForecaster __all__ = [ diff --git a/packages/openstef-meta/src/openstef_meta/framework/base_learner.py b/packages/openstef-meta/src/openstef_meta/framework/base_learner.py deleted file mode 100644 index 96cda27f5..000000000 --- a/packages/openstef-meta/src/openstef_meta/framework/base_learner.py +++ /dev/null @@ -1,34 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""Core meta model interfaces and configurations. - -Provides the fundamental building blocks for implementing meta models in OpenSTEF. -These mixins establish contracts that ensure consistent behavior across different meta model types -while ensuring full compatability with regular Forecasters. -""" - -from typing import Literal - -from openstef_models.models.forecasting.gblinear_forecaster import ( - GBLinearForecaster, - GBLinearHyperParams, -) -from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMHyperParams -from openstef_models.models.forecasting.lgbmlinear_forecaster import ( - LGBMLinearForecaster, - LGBMLinearHyperParams, -) -from openstef_models.models.forecasting.xgboost_forecaster import ( - XGBoostForecaster, - XGBoostHyperParams, -) - -BaseLearner = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster -BaseLearnerHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams -BaseLearnerNames = Literal[ - "LGBMForecaster", - "LGBMLinearForecaster", - "XGBoostForecaster", - "GBLinearForecaster", -] diff --git a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py index 49acd1c1f..e69de29bb 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py @@ -1,185 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""Core meta model interfaces and configurations. - -Provides the fundamental building blocks for implementing meta models in OpenSTEF. -These mixins establish contracts that ensure consistent behavior across different meta model types -while ensuring full compatability with regular Forecasters. -""" - -import logging -from typing import cast, override - -import pandas as pd - -from openstef_core.datasets import ForecastDataset, ForecastInputDataset -from openstef_core.exceptions import ( - NotFittedError, -) -from openstef_meta.framework.base_learner import ( - BaseLearner, - BaseLearnerHyperParams, - BaseLearnerNames, -) -from openstef_meta.framework.forecast_combiner import ForecastCombiner -from openstef_meta.utils.datasets import EnsembleForecastDataset -from openstef_models.models.forecasting.forecaster import ( - Forecaster, - ForecasterConfig, -) - -logger = logging.getLogger(__name__) - - -class MetaForecaster(Forecaster): - """Abstract class for Meta forecasters combining multiple models.""" - - _config: ForecasterConfig - - @staticmethod - def _init_base_learners( - config: ForecasterConfig, base_hyperparams: list[BaseLearnerHyperParams] - ) -> list[BaseLearner]: - """Initialize base learners based on provided hyperparameters. - - Returns: - list[Forecaster]: List of initialized base learner forecasters. - """ - base_learners: list[BaseLearner] = [] - horizons = config.horizons - quantiles = config.quantiles - - for hyperparams in base_hyperparams: - forecaster_cls = hyperparams.forecaster_class() - config = forecaster_cls.Config(horizons=horizons, quantiles=quantiles) - if "hyperparams" in forecaster_cls.Config.model_fields: - config = config.model_copy(update={"hyperparams": hyperparams}) - - base_learners.append(config.forecaster_from_config()) - - return base_learners - - @property - @override - def config(self) -> ForecasterConfig: - return self._config - - @property - def feature_importances(self) -> pd.DataFrame: - """Placeholder for feature importances across base learners and final learner.""" - raise NotImplementedError("Feature importances are not implemented for EnsembleForecaster.") - # TODO(#745): Make MetaForecaster explainable - - @property - def model_contributions(self) -> pd.DataFrame: - """Placeholder for model contributions across base learners and final learner.""" - raise NotImplementedError("Model contributions are not implemented for EnsembleForecaster.") - # TODO(#745): Make MetaForecaster explainable - - -class EnsembleForecaster(MetaForecaster): - """Abstract class for Meta forecasters combining multiple base learners and a final learner.""" - - _config: ForecasterConfig - _base_learners: list[BaseLearner] - _forecast_combiner: ForecastCombiner - - @property - @override - def is_fitted(self) -> bool: - return all(x.is_fitted for x in self._base_learners) and self._forecast_combiner.is_fitted - - @property - @override - def config(self) -> ForecasterConfig: - return self._config - - @override - def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: - """Fit the Hybrid model to the training data. - - Args: - data: Training data in the expected ForecastInputDataset format. - data_val: Validation data for tuning the model (optional, not used in this implementation). - - """ - # Fit base learners - [x.fit(data=data, data_val=data_val) for x in self._base_learners] - - # Reset forecast start date to ensure we predict on the full dataset - full_dataset = ForecastInputDataset( - data=data.data, - sample_interval=data.sample_interval, - target_column=data.target_column, - forecast_start=data.index[0], - ) - - base_predictions = self._predict_base_learners(data=full_dataset) - - if self._forecast_combiner.has_features: - self._forecast_combiner.pre_processing.fit(full_dataset) - features = self._forecast_combiner.calculate_features(data=full_dataset) - else: - features = None - - sample_weights = None - if data.sample_weight_column in data.data.columns: - sample_weights = data.data.loc[:, data.sample_weight_column] - - self._forecast_combiner.fit( - data=base_predictions, - data_val=None, # TODO ADD validation dataset support - additional_features=features, - sample_weights=sample_weights, - ) - - self._is_fitted = True - - def _predict_base_learners(self, data: ForecastInputDataset) -> EnsembleForecastDataset: - """Generate predictions from base learners. - - Args: - data: Input data for prediction. - - Returns: - DataFrame containing base learner predictions. - """ - base_predictions: dict[BaseLearnerNames, ForecastDataset] = {} - for learner in self._base_learners: - preds = learner.predict(data=data) - name = cast(BaseLearnerNames, learner.__class__.__name__) - base_predictions[name] = preds - - return EnsembleForecastDataset.from_forecast_datasets(base_predictions, target_series=data.target_series) - - @override - def predict(self, data: ForecastInputDataset) -> ForecastDataset: - if not self.is_fitted: - raise NotFittedError(self.__class__.__name__) - - full_dataset = ForecastInputDataset( - data=data.data, - sample_interval=data.sample_interval, - target_column=data.target_column, - forecast_start=data.index[0], - ) - - base_predictions = self._predict_base_learners(data=full_dataset) - - if self._forecast_combiner.has_features: - additional_features = self._forecast_combiner.calculate_features(data=data) - else: - additional_features = None - - return self._forecast_combiner.predict( - data=base_predictions, - additional_features=additional_features, - ) - - -__all__ = [ - "BaseLearner", - "BaseLearnerHyperParams", - "MetaForecaster", -] diff --git a/packages/openstef-meta/src/openstef_meta/models/__init__.py b/packages/openstef-meta/src/openstef_meta/models/__init__.py deleted file mode 100644 index 614543150..000000000 --- a/packages/openstef-meta/src/openstef_meta/models/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""This module provides meta-forecasting models and related hyperparameters for the OpenSTEF project.""" - -from .learned_weights_forecaster import ( - LearnedWeightsForecaster, - LearnedWeightsForecasterConfig, - LearnedWeightsHyperParams, -) -from .residual_forecaster import ResidualForecaster, ResidualForecasterConfig, ResidualHyperParams -from .stacking_forecaster import StackingForecaster, StackingForecasterConfig, StackingHyperParams - -__all__ = [ - "LearnedWeightsForecaster", - "LearnedWeightsForecasterConfig", - "LearnedWeightsHyperParams", - "ResidualForecaster", - "ResidualForecasterConfig", - "ResidualHyperParams", - "StackingForecaster", - "StackingForecasterConfig", - "StackingHyperParams", -] diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py new file mode 100644 index 000000000..c7eaf3c1f --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -0,0 +1,479 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""High-level forecasting model that orchestrates the complete prediction pipeline. + +Combines feature engineering, forecasting, and postprocessing into a unified interface. +Handles both single-horizon and multi-horizon forecasters while providing consistent +data transformation and validation. +""" + +import logging +from datetime import datetime, timedelta +from functools import partial +from typing import cast, override + +import pandas as pd +from pydantic import Field, PrivateAttr + +from openstef_beam.evaluation import EvaluationConfig, EvaluationPipeline, SubsetMetric +from openstef_beam.evaluation.metric_providers import MetricProvider, ObservedProbabilityProvider, R2Provider +from openstef_core.base_model import BaseModel +from openstef_core.datasets import ( + ForecastDataset, + ForecastInputDataset, + TimeSeriesDataset, +) +from openstef_core.datasets.timeseries_dataset import validate_horizons_present +from openstef_core.exceptions import NotFittedError +from openstef_core.mixins import Predictor, TransformPipeline +from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner +from openstef_meta.utils.datasets import EnsembleForecastDataset +from openstef_models.models.forecasting import Forecaster +from openstef_models.models.forecasting.forecaster import ForecasterConfig +from openstef_models.models.forecasting_model import ModelFitResult +from openstef_models.utils.data_split import DataSplitter + + +class EnsembleForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]): + """Complete forecasting pipeline combining preprocessing, prediction, and postprocessing. + + Orchestrates the full forecasting workflow by managing feature engineering, + model training/prediction, and result postprocessing. Automatically handles + the differences between single-horizon and multi-horizon forecasters while + ensuring data consistency and validation throughout the pipeline. + + Invariants: + - fit() must be called before predict() + - Forecaster and preprocessing horizons must match during initialization + + Important: + The `cutoff_history` parameter is crucial when using lag-based features in + preprocessing. For example, a lag-14 transformation creates NaN values for + the first 14 days of data. Set `cutoff_history` to exclude these incomplete + rows from training. You must configure this manually based on your preprocessing + pipeline since lags cannot be automatically inferred from the transforms. + + Example: + Basic forecasting workflow: + + >>> from openstef_models.models.forecasting.constant_median_forecaster import ( + ... ConstantMedianForecaster, ConstantMedianForecasterConfig + ... ) + >>> from openstef_core.types import LeadTime + >>> + >>> # Note: This is a conceptual example showing the API structure + >>> # Real usage requires implemented forecaster classes + >>> forecaster = ConstantMedianForecaster( + ... config=ConstantMedianForecasterConfig(horizons=[LeadTime.from_string("PT36H")]) + ... ) + >>> # Create and train model + >>> model = ForecastingModel( + ... forecaster=forecaster, + ... cutoff_history=timedelta(days=14), # Match your maximum lag in preprocessing + ... ) + >>> model.fit(training_data) # doctest: +SKIP + >>> + >>> # Generate forecasts + >>> forecasts = model.predict(new_data) # doctest: +SKIP + """ + + # Forecasting components + common_preprocessing: TransformPipeline[TimeSeriesDataset] = Field( + default_factory=TransformPipeline[TimeSeriesDataset], + description="Feature engineering pipeline for transforming raw input data into model-ready features.", + exclude=True, + ) + + model_specific_preprocessing: dict[str, TransformPipeline[TimeSeriesDataset]] = Field( + default_factory=dict, + description="Feature engineering pipeline for transforming raw input data into model-ready features.", + exclude=True, + ) + + forecasters: dict[str, Forecaster] = Field( + default=..., + description="Underlying forecasting algorithm, either single-horizon or multi-horizon.", + exclude=True, + ) + + combiner: ForecastCombiner = Field( + default=..., + description="Combiner to aggregate forecasts from multiple forecasters if applicable.", + exclude=True, + ) + + combiner_preprocessing: TransformPipeline[TimeSeriesDataset] = Field( + default_factory=TransformPipeline[TimeSeriesDataset], + description="Feature engineering for the forecast combiner.", + exclude=True, + ) + + postprocessing: TransformPipeline[ForecastDataset] = Field( + default_factory=TransformPipeline[ForecastDataset], + description="Postprocessing pipeline for transforming model outputs into final forecasts.", + exclude=True, + ) + target_column: str = Field( + default="load", + description="Name of the target variable column in datasets.", + ) + data_splitter: DataSplitter = Field( + default_factory=DataSplitter, + description="Data splitting strategy for train/validation/test sets.", + ) + cutoff_history: timedelta = Field( + default=timedelta(days=0), + description="Amount of historical data to exclude from training and prediction due to incomplete features " + "from lag-based preprocessing. When using lag transforms (e.g., lag-14), the first N days contain NaN values. " + "Set this to match your maximum lag duration (e.g., timedelta(days=14)). " + "Default of 0 assumes no invalid rows are created by preprocessing.", + ) + # Evaluation + evaluation_metrics: list[MetricProvider] = Field( + default_factory=lambda: [R2Provider(), ObservedProbabilityProvider()], + description="List of metric providers for evaluating model score.", + ) + # Metadata + tags: dict[str, str] = Field( + default_factory=dict, + description="Optional metadata tags for the model.", + ) + + _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) + + @property + def config(self) -> list[ForecasterConfig]: + """Returns the configuration of the underlying forecaster.""" + return [x.config for x in self.forecasters.values()] + + @property + @override + def is_fitted(self) -> bool: + return all(f.is_fitted for f in self.forecasters.values()) and self.combiner.is_fitted + + @property + def forecaster_names(self) -> list[str]: + """Returns the names of the underlying forecasters.""" + return list(self.forecasters.keys()) + + @override + def fit( + self, + data: TimeSeriesDataset, + data_val: TimeSeriesDataset | None = None, + data_test: TimeSeriesDataset | None = None, + ) -> ModelFitResult: + """Train the forecasting model on the provided dataset. + + Fits the preprocessing pipeline and underlying forecaster. Handles both + single-horizon and multi-horizon forecasters appropriately. + + The data splitting follows this sequence: + 1. Split test set from full data (using test_splitter) + 2. Split validation from remaining train+val data (using val_splitter) + 3. Train on the final training set + + Args: + data: Historical time series data with features and target values. + data_val: Optional validation data. If provided, splitters are ignored for validation. + data_test: Optional test data. If provided, splitters are ignored for test. + + Returns: + FitResult containing training details and metrics. + """ + # Fit the feature engineering transforms + self.common_preprocessing.fit(data=data) + + # Fit predict forecasters + ensemble_predictions = self._preprocess_fit_forecasters( + data=data, + data_val=data_val, + data_test=data_test, + ) + + if data_val is not None: + ensemble_predictions_val = self._predict_forecasters( + data=self.prepare_input(data=data_val), + ) + else: + ensemble_predictions_val = None + + if len(self.combiner_preprocessing.transforms) > 0: + combiner_data = self.prepare_input(data=data) + self.combiner_preprocessing.fit(combiner_data) + combiner_data = self.combiner_preprocessing.transform(combiner_data) + features = ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) + else: + features = None + + self.combiner.fit( + data=ensemble_predictions, + data_val=ensemble_predictions_val, + additional_features=features, + ) + + # Prepare input datasets for metrics calculation + input_data_train = self.prepare_input(data=data) + input_data_val = self.prepare_input(data=data_val) if data_val else None + input_data_test = self.prepare_input(data=data_test) if data_test else None + + metrics_train = self._predict_and_score(input_data=input_data_train) + metrics_val = self._predict_and_score(input_data=input_data_val) if input_data_val else None + metrics_test = self._predict_and_score(input_data=input_data_test) if input_data_test else None + metrics_full = self.score(data=data) + + return ModelFitResult( + input_dataset=data, + input_data_train=input_data_train, + input_data_val=input_data_val, + input_data_test=input_data_test, + metrics_train=metrics_train, + metrics_val=metrics_val, + metrics_test=metrics_test, + metrics_full=metrics_full, + ) + + def _preprocess_fit_forecasters( + self, + data: TimeSeriesDataset, + data_val: TimeSeriesDataset | None = None, + data_test: TimeSeriesDataset | None = None, + ) -> EnsembleForecastDataset: + + predictions_raw: dict[str, ForecastDataset] = {} + + for name, forecaster in self.forecasters.items(): + validate_horizons_present(data, forecaster.config.horizons) + + # Transform and split input data + input_data_train = self.prepare_input(data=data, forecaster_name=name) + input_data_val = self.prepare_input(data=data_val, forecaster_name=name) if data_val else None + input_data_test = self.prepare_input(data=data_test, forecaster_name=name) if data_test else None + + # Drop target column nan's from training data. One can not train on missing targets. + target_dropna = partial(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownMemberType] + input_data_train = input_data_train.pipe_pandas(target_dropna) + input_data_val = input_data_val.pipe_pandas(target_dropna) if input_data_val else None + input_data_test = input_data_test.pipe_pandas(target_dropna) if input_data_test else None + + # Transform the input data to a valid forecast input and split into train/val/test + input_data_train, input_data_val, input_data_test = self.data_splitter.split_dataset( + data=input_data_train, + data_val=input_data_val, + data_test=input_data_test, + target_column=self.target_column, + ) + + # Fit the model + forecaster.fit(data=input_data_train, data_val=input_data_val) + predictions_raw[name] = self.forecasters[name].predict(data=input_data_train) + + return EnsembleForecastDataset.from_forecast_datasets( + predictions_raw, target_series=data.data[self.target_column] + ) + + def _predict_forecasters(self, data: TimeSeriesDataset) -> EnsembleForecastDataset: + """Generate predictions from base learners. + + Args: + data: Input data for prediction. + + Returns: + DataFrame containing base learner predictions. + """ + base_predictions: dict[str, ForecastDataset] = {} + for name, forecaster in self.forecasters.items(): + forecaster_data = self.prepare_input(data, forecaster_name=name) + preds = forecaster.predict(data=forecaster_data) + base_predictions[name] = preds + + return EnsembleForecastDataset.from_forecast_datasets( + base_predictions, target_series=data.data[self.target_column] + ) + + @override + def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: + """Generate forecasts using the trained model. + + Transforms input data through the preprocessing pipeline, generates predictions + using the underlying forecaster, and applies postprocessing transformations. + + Args: + data: Input time series data for generating forecasts. + forecast_start: Starting time for forecasts. If None, uses data end time. + + Returns: + Processed forecast dataset with predictions and uncertainty estimates. + + Raises: + NotFittedError: If the model hasn't been trained yet. + """ + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + # Transform the input data to a valid forecast input + input_data = self.prepare_input(data=data, forecast_start=forecast_start) + + # Generate predictions + raw_predictions = self._predict(input_data=input_data) + + return self.postprocessing.transform(data=raw_predictions) + + def prepare_input( + self, + data: TimeSeriesDataset, + forecaster_name: str | None = None, + forecast_start: datetime | None = None, + ) -> ForecastInputDataset: + """Prepare input data for forecasting by applying preprocessing and filtering. + + Transforms raw time series data through the preprocessing pipeline, restores + the target column, and filters out incomplete historical data to ensure + training quality. + + Args: + data: Raw time series dataset to prepare for forecasting. + forecast_start: Optional start time for forecasts. If provided and earlier + than the cutoff time, overrides the cutoff for data filtering. + + Returns: + Processed forecast input dataset ready for model prediction. + """ + # Transform and restore target column + data = self.common_preprocessing.transform(data=data) + + # Apply model-specific preprocessing if available + if forecaster_name in self.model_specific_preprocessing: + self.model_specific_preprocessing[forecaster_name].fit(data=data) + data = self.model_specific_preprocessing[forecaster_name].transform(data=data) + + input_data = restore_target(dataset=data, original_dataset=data, target_column=self.target_column) + + # Cut away input history to avoid training on incomplete data + input_data_start = cast("pd.Series[pd.Timestamp]", input_data.index).min().to_pydatetime() + input_data_cutoff = input_data_start + self.cutoff_history + if forecast_start is not None and forecast_start < input_data_cutoff: + input_data_cutoff = forecast_start + self._logger.warning( + "Forecast start %s is after input data start + cutoff history %s. Using forecast start as cutoff.", + forecast_start, + input_data_cutoff, + ) + input_data = input_data.filter_by_range(start=input_data_cutoff) + + return ForecastInputDataset.from_timeseries( + dataset=input_data, + target_column=self.target_column, + forecast_start=forecast_start, + ) + + def _predict_and_score(self, input_data: ForecastInputDataset) -> SubsetMetric: + prediction_raw = self._predict(input_data=input_data) + prediction = self.postprocessing.transform(data=prediction_raw) + return self._calculate_score(prediction=prediction) + + def _predict(self, input_data: ForecastInputDataset) -> ForecastDataset: + + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + ensemble_predictions = self._predict_forecasters(data=input_data) + + additional_features = ( + ForecastInputDataset.from_timeseries( + self.combiner_preprocessing.transform(data=input_data), target_column=self.target_column + ) + if len(self.combiner_preprocessing.transforms) > 0 + else None + ) + + # Predict and restore target column + prediction = self.combiner.predict( + data=ensemble_predictions, + additional_features=additional_features, + ) + return restore_target(dataset=prediction, original_dataset=input_data, target_column=self.target_column) + + def score( + self, + data: TimeSeriesDataset, + ) -> SubsetMetric: + """Evaluate model performance on the provided dataset. + + Generates predictions for the dataset and calculates evaluation metrics + by comparing against ground truth values. Uses the configured evaluation + metrics to assess forecast quality at the maximum forecast horizon. + + Args: + data: Time series dataset containing both features and target values + for evaluation. + + Returns: + Evaluation metrics including configured providers (e.g., R2, observed + probability) computed at the maximum forecast horizon. + """ + prediction = self.predict(data=data) + + return self._calculate_score(prediction=prediction) + + def _calculate_score(self, prediction: ForecastDataset) -> SubsetMetric: + if prediction.target_series is None: + raise ValueError("Prediction dataset must contain target series for scoring.") + + # We need to make sure there are no NaNs in the target label for metric calculation + prediction = prediction.pipe_pandas(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownArgumentType, reportUnknownMemberType] + + pipeline = EvaluationPipeline( + # Needs only one horizon since we are using only a single prediction step + # If a more comprehensive test is needed, a backtest should be run. + config=EvaluationConfig(available_ats=[], lead_times=[self.config[0].max_horizon]), + quantiles=self.combiner.config.quantiles, + # Similarly windowed metrics are not relevant for single predictions. + window_metric_providers=[], + global_metric_providers=self.evaluation_metrics, + ) + + evaluation_result = pipeline.run_for_subset( + filtering=self.combiner.config.max_horizon, + predictions=prediction, + ) + global_metric = evaluation_result.get_global_metric() + if not global_metric: + return SubsetMetric( + window="global", + timestamp=prediction.forecast_start, + metrics={}, + ) + + return global_metric + + +def restore_target[T: TimeSeriesDataset]( + dataset: T, + original_dataset: TimeSeriesDataset, + target_column: str, +) -> T: + """Restore the target column from the original dataset to the given dataset. + + Maps target values from the original dataset to the dataset using index alignment. + Ensures the target column is present in the dataset for downstream processing. + + Args: + dataset: Dataset to modify by adding the target column. + original_dataset: Source dataset containing the target values. + target_column: Name of the target column to restore. + + Returns: + Dataset with the target column restored from the original dataset. + """ + target_series = original_dataset.select_features([target_column]).select_version().data[target_column] + + def _transform_restore_target(df: pd.DataFrame) -> pd.DataFrame: + return df.assign(**{str(target_series.name): df.index.map(target_series)}) # pyright: ignore[reportUnknownMemberType] + + return dataset.pipe_pandas(_transform_restore_target) + + +__all__ = ["EnsembleForecastingModel", "ModelFitResult", "restore_target"] diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py new file mode 100644 index 000000000..db4917778 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py @@ -0,0 +1,28 @@ +"""Forecast Combiners.""" + +from .forecast_combiner import ForecastCombiner, ForecastCombinerConfig +from .learned_weights_combiner import ( + LGBMCombinerHyperParams, + LogisticCombinerHyperParams, + RFCombinerHyperParams, + WeightsCombiner, + WeightsCombinerConfig, + XGBCombinerHyperParams, +) +from .rules_combiner import RulesCombiner, RulesCombinerConfig +from .stacking_combiner import StackingCombiner, StackingCombinerConfig + +__all__ = [ + "ForecastCombiner", + "ForecastCombinerConfig", + "LGBMCombinerHyperParams", + "LogisticCombinerHyperParams", + "RFCombinerHyperParams", + "RulesCombiner", + "RulesCombinerConfig", + "StackingCombiner", + "StackingCombinerConfig", + "WeightsCombiner", + "WeightsCombinerConfig", + "XGBCombinerHyperParams", +] diff --git a/packages/openstef-meta/src/openstef_meta/framework/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py similarity index 63% rename from packages/openstef-meta/src/openstef_meta/framework/forecast_combiner.py rename to packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index 2a1146d63..8a12027d9 100644 --- a/packages/openstef-meta/src/openstef_meta/framework/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -9,15 +9,15 @@ """ from abc import abstractmethod -from collections.abc import Sequence +from typing import Self import pandas as pd from pydantic import ConfigDict, Field -from openstef_core.datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset -from openstef_core.mixins import HyperParams, Predictor, TransformPipeline -from openstef_core.transforms import TimeSeriesTransform -from openstef_core.types import Quantile +from openstef_core.base_model import BaseConfig +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.mixins import HyperParams, Predictor +from openstef_core.types import LeadTime, Quantile from openstef_meta.transforms.selector import Selector from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_models.utils.feature_selection import FeatureSelection @@ -27,28 +27,74 @@ ) -class ForecastCombinerHyperParams(HyperParams): +class ForecastCombinerConfig(BaseConfig): """Hyperparameters for the Final Learner.""" model_config = ConfigDict(arbitrary_types_allowed=True) - feature_adders: Sequence[TimeSeriesTransform] = Field( - default=[], - description="Additional features to add to the base learner predictions before fitting the final learner.", + hyperparams: HyperParams = Field( + description="Hyperparameters for the final learner.", ) + quantiles: list[Quantile] = Field( + default=[Quantile(0.5)], + description=( + "Probability levels for uncertainty estimation. Each quantile represents a confidence level " + "(e.g., 0.1 = 10th percentile, 0.5 = median, 0.9 = 90th percentile). " + "Models must generate predictions for all specified quantiles." + ), + min_length=1, + ) + + horizons: list[LeadTime] = Field( + default=..., + description=( + "Lead times for predictions, accounting for data availability and versioning cutoffs. " + "Each horizon defines how far ahead the model should predict." + ), + min_length=1, + ) + + @property + def max_horizon(self) -> LeadTime: + """Returns the maximum lead time (horizon) from the configured horizons. + + Useful for determining the furthest prediction distance required by the model. + This is commonly used for data preparation and validation logic. + + Returns: + The maximum lead time. + """ + return max(self.horizons) + + def with_horizon(self, horizon: LeadTime) -> Self: + """Create a new configuration with a different horizon. + + Useful for creating multiple forecaster instances for different prediction + horizons from a single base configuration. + + Args: + horizon: The new lead time to use for predictions. + + Returns: + New configuration instance with the specified horizon. + """ + return self.model_copy(update={"horizons": [horizon]}) + + @classmethod + def combiner_class(cls) -> type["ForecastCombiner"]: + """Get the associated Forecaster class for this configuration. + + Returns: + The Forecaster class that uses this configuration. + """ + raise NotImplementedError("Subclasses must implement combiner_class") + class ForecastCombiner(Predictor[EnsembleForecastDataset, ForecastDataset]): """Combines base learner predictions for each quantile into final predictions.""" - def __init__(self, quantiles: list[Quantile], hyperparams: ForecastCombinerHyperParams) -> None: - """Initialize the Final Learner.""" - self.quantiles = quantiles - self.hyperparams = hyperparams - self.pre_processing: TransformPipeline[TimeSeriesDataset] = TransformPipeline( - transforms=hyperparams.feature_adders - ) - self._is_fitted: bool = False + config: ForecastCombinerConfig @abstractmethod def fit( @@ -85,24 +131,6 @@ def predict( """ raise NotImplementedError("Subclasses must implement the predict method.") - def calculate_features(self, data: ForecastInputDataset) -> ForecastInputDataset: - """Calculate additional features for the final learner. - - Args: - data: Input ForecastInputDataset to calculate features on. - - Returns: - ForecastInputDataset with additional features. - """ - data_transformed = self.pre_processing.transform(data) - - return ForecastInputDataset( - data=data_transformed.data, - sample_interval=data.sample_interval, - target_column=data.target_column, - forecast_start=data.forecast_start, - ) - @staticmethod def _prepare_input_data( dataset: ForecastInputDataset, additional_features: ForecastInputDataset | None @@ -130,8 +158,3 @@ def _prepare_input_data( def is_fitted(self) -> bool: """Indicates whether the final learner has been fitted.""" raise NotImplementedError("Subclasses must implement the is_fitted property.") - - @property - def has_features(self) -> bool: - """Indicates whether the final learner uses additional features.""" - return len(self.pre_processing.transforms) > 0 diff --git a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py similarity index 72% rename from packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py rename to packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 1f4837038..31f39e095 100644 --- a/packages/openstef-meta/src/openstef_meta/models/learned_weights_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -26,27 +26,13 @@ from openstef_core.exceptions import ( NotFittedError, ) -from openstef_core.mixins import HyperParams -from openstef_core.types import Quantile -from openstef_meta.framework.base_learner import ( - BaseLearner, - BaseLearnerHyperParams, -) -from openstef_meta.framework.forecast_combiner import ( - EnsembleForecastDataset, +from openstef_core.mixins.predictor import HyperParams +from openstef_core.types import LeadTime, Quantile +from openstef_meta.models.forecast_combiners.forecast_combiner import ( ForecastCombiner, - ForecastCombinerHyperParams, -) -from openstef_meta.framework.meta_forecaster import ( - EnsembleForecaster, -) -from openstef_models.models.forecasting.forecaster import ( - ForecasterConfig, -) -from openstef_models.models.forecasting.gblinear_forecaster import ( - GBLinearHyperParams, + ForecastCombinerConfig, ) -from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +from openstef_meta.utils.datasets import EnsembleForecastDataset logger = logging.getLogger(__name__) @@ -57,152 +43,17 @@ ClassifierNames = Literal["lgbm", "xgb", "logistic_regression", "dummy"] -class WeightsCombinerHyperParams(ForecastCombinerHyperParams): - """Hyperparameters for Learned Weights Final Learner.""" +class ClassifierParamsMixin: + """Hyperparameters for the Final Learner.""" @abstractmethod def get_classifier(self) -> Classifier: - """Initialize the classifier from hyperparameters. - - Returns: - Classifier: An instance of the classifier initialized with the provided hyperparameters. - """ - raise NotImplementedError("Subclasses must implement the 'get_classifier' method.") - - -class WeightsCombiner(ForecastCombiner): - """Combines base learner predictions with a classification approach to determine which base learner to use.""" - - model_type: ClassifierNames = Field( - default="lgbm", description="Type of classifier to use for combining base learner predictions." - ) - - def __init__(self, quantiles: list[Quantile], hyperparams: WeightsCombinerHyperParams) -> None: - """Initialize WeightsCombiner.""" - super().__init__(quantiles=quantiles, hyperparams=hyperparams) - self.models: list[Classifier] = [hyperparams.get_classifier() for _ in self.quantiles] - - self._label_encoder = LabelEncoder() - self._is_fitted = False - - @override - def fit( - self, - data: EnsembleForecastDataset, - data_val: EnsembleForecastDataset | None = None, - additional_features: ForecastInputDataset | None = None, - sample_weights: pd.Series | None = None, - ) -> None: - - self._label_encoder.fit(data.model_names) - - for i, q in enumerate(self.quantiles): - # Data preparation - dataset = data.select_quantile_classification(quantile=q) - input_data = self._prepare_input_data( - dataset=dataset, - additional_features=additional_features, - ) - labels = dataset.target_series - self._validate_labels(labels=labels, model_index=i) - labels = self._label_encoder.transform(labels) - - # Balance classes, adjust with sample weights - weights = compute_sample_weight("balanced", labels) - if sample_weights is not None: - weights *= sample_weights - - self.models[i].fit(X=input_data, y=labels, sample_weight=weights) # type: ignore - self._is_fitted = True - - @staticmethod - def _prepare_input_data( - dataset: ForecastInputDataset, additional_features: ForecastInputDataset | None - ) -> pd.DataFrame: - """Prepare input data by combining base predictions with additional features if provided. + """Returns the classifier instance.""" + msg = "Subclasses must implement get_classifier method." + raise NotImplementedError(msg) - Args: - dataset: ForecastInputDataset containing base predictions. - additional_features: Optional ForecastInputDataset containing additional features. - Returns: - pd.DataFrame: Combined DataFrame of base predictions and additional features if provided. - """ - df = dataset.input_data(start=dataset.index[0]) - if additional_features is not None: - df_a = additional_features.input_data(start=dataset.index[0]) - df = pd.concat( - [df, df_a], - axis=1, - ) - return df - - def _validate_labels(self, labels: pd.Series, model_index: int) -> None: - if len(labels.unique()) == 1: - msg = f"""Final learner for quantile {self.quantiles[model_index].format()} has - less than 2 classes in the target. - Switching to dummy classifier """ - logger.warning(msg=msg) - self.models[model_index] = DummyClassifier(strategy="most_frequent") - - def _predict_model_weights_quantile(self, base_predictions: pd.DataFrame, model_index: int) -> pd.DataFrame: - model = self.models[model_index] - return model.predict_proba(X=base_predictions) # type: ignore - - def _generate_predictions_quantile( - self, - dataset: ForecastInputDataset, - additional_features: ForecastInputDataset | None, - model_index: int, - ) -> pd.Series: - - input_data = self._prepare_input_data( - dataset=dataset, - additional_features=additional_features, - ) - - weights = self._predict_model_weights_quantile(base_predictions=input_data, model_index=model_index) - - return dataset.input_data().mul(weights).sum(axis=1) - - @override - def predict( - self, - data: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None = None, - ) -> ForecastDataset: - if not self.is_fitted: - raise NotFittedError(self.__class__.__name__) - - # Generate predictions - predictions = pd.DataFrame({ - Quantile(q).format(): self._generate_predictions_quantile( - dataset=data.select_quantile(quantile=Quantile(q)), - additional_features=additional_features, - model_index=i, - ) - for i, q in enumerate(self.quantiles) - }) - target_series = data.target_series - if target_series is not None: - predictions[data.target_column] = target_series - - return ForecastDataset( - data=predictions, - sample_interval=data.sample_interval, - target_column=data.target_column, - forecast_start=data.forecast_start, - ) - - @property - @override - def is_fitted(self) -> bool: - return self._is_fitted - - -# Final learner implementations using different classifiers -# 1 LGBM Classifier -class LGBMCombinerHyperParams(WeightsCombinerHyperParams): +class LGBMCombinerHyperParams(HyperParams, ClassifierParamsMixin): """Hyperparameters for Learned Weights Final Learner with LGBM Classifier.""" n_estimators: int = Field( @@ -226,7 +77,7 @@ def get_classifier(self) -> LGBMClassifier: ) -class RFCombinerHyperParams(WeightsCombinerHyperParams): +class RFCombinerHyperParams(HyperParams, ClassifierParamsMixin): """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" n_estimators: int = Field( @@ -269,7 +120,7 @@ def get_classifier(self) -> LGBMClassifier: # 3 XGB Classifier -class XGBCombinerHyperParams(WeightsCombinerHyperParams): +class XGBCombinerHyperParams(HyperParams, ClassifierParamsMixin): """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" n_estimators: int = Field( @@ -283,7 +134,7 @@ def get_classifier(self) -> XGBClassifier: return XGBClassifier(n_estimators=self.n_estimators) -class LogisticCombinerHyperParams(WeightsCombinerHyperParams): +class LogisticCombinerHyperParams(HyperParams, ClassifierParamsMixin): """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" fit_intercept: bool = Field( @@ -312,58 +163,189 @@ def get_classifier(self) -> LogisticRegression: ) -class LearnedWeightsHyperParams(HyperParams): - """Hyperparameters for Stacked LGBM GBLinear Regressor.""" +class WeightsCombinerConfig(ForecastCombinerConfig): + """Configuration for WeightsCombiner.""" + + hyperparams: HyperParams = Field( + default=LGBMCombinerHyperParams(), + description="Hyperparameters for the Weights Combiner.", + ) - base_hyperparams: list[BaseLearnerHyperParams] = Field( - default=[LGBMHyperParams(), GBLinearHyperParams()], - description="List of hyperparameter configurations for base learners. " - "Defaults to [LGBMHyperParams, GBLinearHyperParams].", + quantiles: list[Quantile] = Field( + default=[Quantile(0.5)], + description=( + "Probability levels for uncertainty estimation. Each quantile represents a confidence level " + "(e.g., 0.1 = 10th percentile, 0.5 = median, 0.9 = 90th percentile). " + "Models must generate predictions for all specified quantiles." + ), + min_length=1, ) - combiner_hyperparams: WeightsCombinerHyperParams = Field( - default=LGBMCombinerHyperParams(), - description="Hyperparameters for the final learner. Defaults to LGBMLearnerHyperParams.", + horizons: list[LeadTime] = Field( + default=..., + description=( + "Lead times for predictions, accounting for data availability and versioning cutoffs. " + "Each horizon defines how far ahead the model should predict." + ), + min_length=1, ) + @property + def get_classifier(self) -> Classifier: + """Returns the classifier instance from hyperparameters. + + Returns: + Classifier instance. -class LearnedWeightsForecasterConfig(ForecasterConfig): - """Configuration for Hybrid-based forecasting models.""" + Raises: + TypeError: If hyperparams do not implement ClassifierParamsMixin. + """ + if not isinstance(self.hyperparams, ClassifierParamsMixin): + msg = "hyperparams must implement ClassifierParamsMixin to get classifier." + raise TypeError(msg) + return self.hyperparams.get_classifier() - hyperparams: LearnedWeightsHyperParams - verbosity: bool = Field( - default=True, - description="Enable verbose output from the Hybrid model (True/False).", - ) +class WeightsCombiner(ForecastCombiner): + """Combines base learner predictions with a classification approach to determine which base learner to use.""" + Config = WeightsCombinerConfig + LGBMHyperParams = LGBMCombinerHyperParams + RFHyperParams = RFCombinerHyperParams + XGBHyperParams = XGBCombinerHyperParams + LogisticHyperParams = LogisticCombinerHyperParams + + def __init__(self, config: WeightsCombinerConfig) -> None: + """Initialize the Weigths Combiner.""" + self.quantiles = config.quantiles + self.config = config + self.hyperparams = config.hyperparams + self._is_fitted: bool = False + self._is_fitted = False + self._label_encoder = LabelEncoder() + + # Initialize a classifier per quantile + self.models: list[Classifier] = [config.get_classifier for _ in self.quantiles] + + @override + def fit( + self, + data: EnsembleForecastDataset, + data_val: EnsembleForecastDataset | None = None, + additional_features: ForecastInputDataset | None = None, + sample_weights: pd.Series | None = None, + ) -> None: + + self._label_encoder.fit(data.forecaster_names) + + for i, q in enumerate(self.quantiles): + # Data preparation + dataset = data.select_quantile_classification(quantile=q) + input_data = self._prepare_input_data( + dataset=dataset, + additional_features=additional_features, + ) + labels = dataset.target_series + self._validate_labels(labels=labels, model_index=i) + labels = self._label_encoder.transform(labels) + + # Balance classes, adjust with sample weights + weights = compute_sample_weight("balanced", labels) + if sample_weights is not None: + weights *= sample_weights + + self.models[i].fit(X=input_data, y=labels, sample_weight=weights) # type: ignore + self._is_fitted = True + + @staticmethod + def _prepare_input_data( + dataset: ForecastInputDataset, additional_features: ForecastInputDataset | None + ) -> pd.DataFrame: + """Prepare input data by combining base predictions with additional features if provided. + + Args: + dataset: ForecastInputDataset containing base predictions. + additional_features: Optional ForecastInputDataset containing additional features. + + Returns: + pd.DataFrame: Combined DataFrame of base predictions and additional features if provided. + """ + df = dataset.input_data(start=dataset.index[0]) + if additional_features is not None: + df_a = additional_features.input_data(start=dataset.index[0]) + df = pd.concat( + [df, df_a], + axis=1, + ) + return df -class LearnedWeightsForecaster(EnsembleForecaster): - """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" + def _validate_labels(self, labels: pd.Series, model_index: int) -> None: + if len(labels.unique()) == 1: + msg = f"""Final learner for quantile {self.quantiles[model_index].format()} has + less than 2 classes in the target. + Switching to dummy classifier """ + logger.warning(msg=msg) + self.models[model_index] = DummyClassifier(strategy="most_frequent") - Config = LearnedWeightsForecasterConfig - HyperParams = LearnedWeightsHyperParams + def _predict_model_weights_quantile(self, base_predictions: pd.DataFrame, model_index: int) -> pd.DataFrame: + model = self.models[model_index] + return model.predict_proba(X=base_predictions) # type: ignore - def __init__(self, config: LearnedWeightsForecasterConfig) -> None: - """Initialize the LearnedWeightsForecaster.""" - self._config = config + def _generate_predictions_quantile( + self, + dataset: ForecastInputDataset, + additional_features: ForecastInputDataset | None, + model_index: int, + ) -> pd.Series: - self._base_learners: list[BaseLearner] = self._init_base_learners( - config=config, base_hyperparams=config.hyperparams.base_hyperparams + input_data = self._prepare_input_data( + dataset=dataset, + additional_features=additional_features, ) - self._forecast_combiner = WeightsCombiner( - quantiles=config.quantiles, hyperparams=config.hyperparams.combiner_hyperparams + + weights = self._predict_model_weights_quantile(base_predictions=input_data, model_index=model_index) + + return dataset.input_data().mul(weights).sum(axis=1) + + @override + def predict( + self, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, + ) -> ForecastDataset: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + # Generate predictions + predictions = pd.DataFrame({ + Quantile(q).format(): self._generate_predictions_quantile( + dataset=data.select_quantile(quantile=Quantile(q)), + additional_features=additional_features, + model_index=i, + ) + for i, q in enumerate(self.quantiles) + }) + target_series = data.target_series + if target_series is not None: + predictions[data.target_column] = target_series + + return ForecastDataset( + data=predictions, + sample_interval=data.sample_interval, + target_column=data.target_column, + forecast_start=data.forecast_start, ) + @property + @override + def is_fitted(self) -> bool: + return self._is_fitted + __all__ = [ "LGBMCombinerHyperParams", - "LearnedWeightsForecaster", - "LearnedWeightsForecasterConfig", - "LearnedWeightsHyperParams", "LogisticCombinerHyperParams", "RFCombinerHyperParams", "WeightsCombiner", - "WeightsCombinerHyperParams", "XGBCombinerHyperParams", ] diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py new file mode 100644 index 000000000..a030b3df5 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py @@ -0,0 +1,154 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Rules-based Meta Forecaster Module.""" + +import logging +from typing import cast, override + +import pandas as pd +from pydantic import Field, field_validator + +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.mixins import HyperParams +from openstef_core.types import LeadTime, Quantile +from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig +from openstef_meta.utils.datasets import EnsembleForecastDataset +from openstef_meta.utils.decision_tree import Decision, DecisionTree + +logger = logging.getLogger(__name__) + + +class RulesLearnerHyperParams(HyperParams): + """HyperParams for Stacking Final Learner.""" + + decision_tree: DecisionTree = Field( + description="Decision tree defining the rules for the final learner.", + default=DecisionTree( + nodes=[Decision(idx=0, decision="LGBMForecaster")], + outcomes={"LGBMForecaster"}, + ), + ) + + +class RulesCombinerConfig(ForecastCombinerConfig): + """Configuration for Rules-based Forecast Combiner.""" + + hyperparams: HyperParams = Field( + description="Hyperparameters for the Rules-based final learner.", + default=RulesLearnerHyperParams(), + ) + + quantiles: list[Quantile] = Field( + default=[Quantile(0.5)], + description=( + "Probability levels for uncertainty estimation. Each quantile represents a confidence level " + "(e.g., 0.1 = 10th percentile, 0.5 = median, 0.9 = 90th percentile). " + "Models must generate predictions for all specified quantiles." + ), + min_length=1, + ) + + horizons: list[LeadTime] = Field( + default=..., + description=( + "Lead times for predictions, accounting for data availability and versioning cutoffs. " + "Each horizon defines how far ahead the model should predict." + ), + min_length=1, + ) + + @field_validator("hyperparams", mode="after") + @staticmethod + def _validate_hyperparams(v: HyperParams) -> HyperParams: + if not isinstance(v, RulesLearnerHyperParams): + raise TypeError("hyperparams must be an instance of RulesLearnerHyperParams.") + return v + + +class RulesCombiner(ForecastCombiner): + """Combines base learner predictions per quantile into final predictions using a regression approach.""" + + Config = RulesCombinerConfig + + def __init__(self, config: RulesCombinerConfig) -> None: + """Initialize the Rules Learner. + + Args: + config: Configuration for the Rules Combiner. + """ + hyperparams = cast(RulesLearnerHyperParams, config.hyperparams) + self.tree = hyperparams.decision_tree + self.quantiles = config.quantiles + self.config = config + + @override + def fit( + self, + data: EnsembleForecastDataset, + data_val: EnsembleForecastDataset | None = None, + additional_features: ForecastInputDataset | None = None, + sample_weights: pd.Series | None = None, + ) -> None: + # No fitting needed for rule-based final learner + # Check that additional features are provided + if additional_features is None: + raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") + + if sample_weights is not None: + logger.warning("Sample weights are ignored in RulesLearner.fit method.") + + def _predict_tree(self, data: pd.DataFrame, columns: pd.Index) -> pd.DataFrame: + """Predict using the decision tree rules. + + Args: + data: DataFrame containing the additional features. + columns: Expected columns for the output DataFrame. + + Returns: + DataFrame with predictions for each quantile. + """ + predictions = data.apply(self.tree.get_decision, axis=1) + + return pd.get_dummies(predictions).reindex(columns=columns) + + @override + def predict( + self, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, + ) -> ForecastDataset: + if additional_features is None: + raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") + + decisions = self._predict_tree( + additional_features.data, columns=data.select_quantile(quantile=self.quantiles[0]).data.columns + ) + + # Generate predictions + predictions: list[pd.DataFrame] = [] + for q in self.quantiles: + dataset = data.select_quantile(quantile=q) + preds = dataset.input_data().multiply(decisions).sum(axis=1) + + predictions.append(preds.to_frame(name=Quantile(q).format())) + + # Concatenate predictions along columns to form a DataFrame with quantile columns + df = pd.concat(predictions, axis=1) + + return ForecastDataset( + data=df, + sample_interval=data.sample_interval, + ) + + @property + def is_fitted(self) -> bool: + """Check the Rules Final Learner is fitted.""" + return True + + +__all__ = [ + "RulesCombiner", + "RulesCombinerConfig", + "RulesLearnerHyperParams", +] diff --git a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py similarity index 54% rename from packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py rename to packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index 7dd3b2220..b8f4ebad5 100644 --- a/packages/openstef-meta/src/openstef_meta/models/stacking_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -10,8 +10,7 @@ """ import logging -from collections.abc import Sequence -from typing import override +from typing import TYPE_CHECKING, cast, override import pandas as pd from pydantic import Field, field_validator @@ -21,69 +20,105 @@ NotFittedError, ) from openstef_core.mixins import HyperParams -from openstef_core.transforms import TimeSeriesTransform from openstef_core.types import LeadTime, Quantile -from openstef_meta.framework.base_learner import ( - BaseLearner, - BaseLearnerHyperParams, -) -from openstef_meta.framework.forecast_combiner import ForecastCombiner, ForecastCombinerHyperParams -from openstef_meta.framework.meta_forecaster import ( - EnsembleForecaster, -) +from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig from openstef_meta.utils.datasets import EnsembleForecastDataset -from openstef_models.models.forecasting.forecaster import ( - Forecaster, - ForecasterConfig, -) from openstef_models.models.forecasting.gblinear_forecaster import ( + GBLinearForecaster, GBLinearHyperParams, ) -from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMHyperParams + +if TYPE_CHECKING: + from openstef_models.models.forecasting.forecaster import Forecaster logger = logging.getLogger(__name__) +ForecasterHyperParams = GBLinearHyperParams | LGBMHyperParams +ForecasterType = GBLinearForecaster | LGBMForecaster + -class StackingForecastCombinerHyperParams(ForecastCombinerHyperParams): - """HyperParams for Stacking Final Learner.""" +class StackingCombinerConfig(ForecastCombinerConfig): + """Configuration for the Stacking final learner.""" - feature_adders: Sequence[TimeSeriesTransform] = Field( - default=[], - description="Additional features to add to the base learner predictions before fitting the final learner.", + hyperparams: HyperParams = Field( + description="Hyperparameters for the Stacking Combiner.", ) - forecaster_hyperparams: BaseLearnerHyperParams = Field( - default=GBLinearHyperParams(), - description="Forecaster hyperparameters for the final learner. Defaults to GBLinearHyperParams.", + quantiles: list[Quantile] = Field( + default=[Quantile(0.5)], + description=( + "Probability levels for uncertainty estimation. Each quantile represents a confidence level " + "(e.g., 0.1 = 10th percentile, 0.5 = median, 0.9 = 90th percentile). " + "Models must generate predictions for all specified quantiles." + ), + min_length=1, ) + horizons: list[LeadTime] = Field( + default=..., + description=( + "Lead times for predictions, accounting for data availability and versioning cutoffs. " + "Each horizon defines how far ahead the model should predict." + ), + min_length=1, + ) -class StackingForecastCombiner(ForecastCombiner): + @field_validator("hyperparams", mode="after") + @staticmethod + def validate_forecaster( + v: HyperParams, + ) -> HyperParams: + """Validate that the forecaster class is set in the hyperparameters. + + Args: + v: Hyperparameters to validate. + + Returns: + Validated hyperparameters. + + Raises: + ValueError: If the forecaster class is not set. + """ + if not hasattr(v, "forecaster_class"): + raise ValueError("forecaster_class must be set in hyperparameters for StackingCombinerConfig.") + return v + + +class StackingCombiner(ForecastCombiner): """Combines base learner predictions per quantile into final predictions using a regression approach.""" + Config = StackingCombinerConfig + LGBMHyperParams = LGBMHyperParams + GBLinearHyperParams = GBLinearHyperParams + def __init__( - self, quantiles: list[Quantile], hyperparams: StackingForecastCombinerHyperParams, horizon: LeadTime + self, + config: StackingCombinerConfig, ) -> None: """Initialize the Stacking final learner. Args: - quantiles: List of quantiles to predict. - hyperparams: Hyperparameters for the final learner. - horizon: Forecast horizon for which to create the final learner. + config: Configuration for the Stacking combiner. """ - super().__init__(quantiles=quantiles, hyperparams=hyperparams) - - forecaster_hyperparams: BaseLearnerHyperParams = hyperparams.forecaster_hyperparams + forecaster_hyperparams = cast(ForecasterHyperParams, config.hyperparams) + self.quantiles = config.quantiles + self.config = config + self.hyperparams = forecaster_hyperparams + self._is_fitted: bool = False # Split forecaster per quantile models: list[Forecaster] = [] for q in self.quantiles: forecaster_cls = forecaster_hyperparams.forecaster_class() - config = forecaster_cls.Config(horizons=[horizon], quantiles=[q]) + forecaster_config = forecaster_cls.Config( + horizons=[config.max_horizon], + quantiles=[q], + ) if "hyperparams" in forecaster_cls.Config.model_fields: - config = config.model_copy(update={"hyperparams": forecaster_hyperparams}) + forecaster_config = forecaster_config.model_copy(update={"hyperparams": forecaster_hyperparams}) - model = config.forecaster_from_config() + model = forecaster_config.forecaster_from_config() models.append(model) self.models = models @@ -167,59 +202,3 @@ def predict( def is_fitted(self) -> bool: """Check the StackingForecastCombiner is fitted.""" return all(x.is_fitted for x in self.models) - - -class StackingHyperParams(HyperParams): - """Hyperparameters for Stacked LGBM GBLinear Regressor.""" - - base_hyperparams: list[BaseLearnerHyperParams] = Field( - default=[LGBMHyperParams(), GBLinearHyperParams()], - description="List of hyperparameter configurations for base learners. " - "Defaults to [LGBMHyperParams, GBLinearHyperParams].", - ) - - combiner_hyperparams: StackingForecastCombinerHyperParams = Field( - default=StackingForecastCombinerHyperParams(), - description="Hyperparameters for the final learner.", - ) - - @field_validator("base_hyperparams", mode="after") - @classmethod - def _check_classes(cls, v: list[BaseLearnerHyperParams]) -> list[BaseLearnerHyperParams]: - hp_classes = [type(hp) for hp in v] - if not len(hp_classes) == len(set(hp_classes)): - raise ValueError("Duplicate base learner hyperparameter classes are not allowed.") - return v - - -class StackingForecasterConfig(ForecasterConfig): - """Configuration for Hybrid-based forecasting models.""" - - hyperparams: StackingHyperParams = StackingHyperParams() - - verbosity: bool = Field( - default=True, - description="Enable verbose output from the Hybrid model (True/False).", - ) - - -class StackingForecaster(EnsembleForecaster): - """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" - - Config = StackingForecasterConfig - HyperParams = StackingHyperParams - - def __init__(self, config: StackingForecasterConfig) -> None: - """Initialize the Hybrid forecaster.""" - self._config = config - - self._base_learners: list[BaseLearner] = self._init_base_learners( - config=config, base_hyperparams=config.hyperparams.base_hyperparams - ) - - self._forecast_combiner = StackingForecastCombiner( - quantiles=config.quantiles, hyperparams=config.hyperparams.combiner_hyperparams, horizon=config.max_horizon - ) - - -__all__ = ["StackingForecastCombiner", "StackingForecaster", "StackingForecasterConfig", "StackingHyperParams"] diff --git a/packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py b/packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py new file mode 100644 index 000000000..fce9bcb92 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py @@ -0,0 +1,12 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""This module provides meta-forecasting models.""" + +from .residual_forecaster import ResidualForecaster, ResidualForecasterConfig, ResidualHyperParams + +__all__ = [ + "ResidualForecaster", + "ResidualForecasterConfig", + "ResidualHyperParams", +] diff --git a/packages/openstef-meta/src/openstef_meta/models/residual_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py similarity index 85% rename from packages/openstef-meta/src/openstef_meta/models/residual_forecaster.py rename to packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py index 8905d8f57..efd8f50bc 100644 --- a/packages/openstef-meta/src/openstef_meta/models/residual_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py @@ -12,6 +12,8 @@ import logging from typing import override +from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster, LGBMLinearHyperParams +from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster, XGBoostHyperParams import pandas as pd from pydantic import Field @@ -21,23 +23,23 @@ ) from openstef_core.mixins import HyperParams from openstef_core.types import Quantile -from openstef_meta.framework.base_learner import ( - BaseLearner, - BaseLearnerHyperParams, -) -from openstef_meta.framework.meta_forecaster import ( - MetaForecaster, -) + + from openstef_models.models.forecasting.forecaster import ( + Forecaster, ForecasterConfig, ) from openstef_models.models.forecasting.gblinear_forecaster import ( + GBLinearForecaster, GBLinearHyperParams, ) -from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMHyperParams logger = logging.getLogger(__name__) +BaseLearner = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster +BaseLearnerHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams + class ResidualHyperParams(HyperParams): """Hyperparameters for Stacked LGBM GBLinear Regressor.""" @@ -64,7 +66,7 @@ class ResidualForecasterConfig(ForecasterConfig): ) -class ResidualForecaster(MetaForecaster): +class ResidualForecaster(Forecaster): """MetaForecaster that implements residual modeling. It takes in a primary forecaster and a residual forecaster. The primary forecaster makes initial predictions, @@ -102,6 +104,29 @@ def _init_secondary_model(self, hyperparams: BaseLearnerHyperParams) -> list[Bas return models + @staticmethod + def _init_base_learners( + config: ForecasterConfig, base_hyperparams: list[BaseLearnerHyperParams] + ) -> list[BaseLearner]: + """Initialize base learners based on provided hyperparameters. + + Returns: + list[Forecaster]: List of initialized base learner forecasters. + """ + base_learners: list[BaseLearner] = [] + horizons = config.horizons + quantiles = config.quantiles + + for hyperparams in base_hyperparams: + forecaster_cls = hyperparams.forecaster_class() + config = forecaster_cls.Config(horizons=horizons, quantiles=quantiles) + if "hyperparams" in forecaster_cls.Config.model_fields: + config = config.model_copy(update={"hyperparams": hyperparams}) + + base_learners.append(config.forecaster_from_config()) + + return base_learners + @override def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: """Fit the Hybrid model to the training data. diff --git a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py deleted file mode 100644 index d83d586f6..000000000 --- a/packages/openstef-meta/src/openstef_meta/models/rules_forecaster.py +++ /dev/null @@ -1,206 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""Rules-based Meta Forecaster Module.""" - -import logging -from collections.abc import Sequence -from typing import override - -import pandas as pd -from pydantic import Field, field_validator -from pydantic_extra_types.country import CountryAlpha2 - -from openstef_core.datasets import ForecastDataset, ForecastInputDataset -from openstef_core.mixins import HyperParams -from openstef_core.transforms import TimeSeriesTransform -from openstef_core.types import Quantile -from openstef_meta.framework.base_learner import ( - BaseLearner, - BaseLearnerHyperParams, -) -from openstef_meta.framework.forecast_combiner import ForecastCombiner, ForecastCombinerHyperParams -from openstef_meta.framework.meta_forecaster import ( - EnsembleForecaster, -) -from openstef_meta.utils.datasets import EnsembleForecastDataset -from openstef_meta.utils.decision_tree import Decision, DecisionTree -from openstef_models.models.forecasting.forecaster import ( - ForecasterConfig, -) -from openstef_models.models.forecasting.gblinear_forecaster import ( - GBLinearHyperParams, -) -from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams -from openstef_models.transforms.time_domain import HolidayFeatureAdder - -logger = logging.getLogger(__name__) - - -class RulesLearnerHyperParams(ForecastCombinerHyperParams): - """HyperParams for Stacking Final Learner.""" - - feature_adders: Sequence[TimeSeriesTransform] = Field( - default=[], - description="Additional features to add to the final learner.", - ) - - decision_tree: DecisionTree = Field( - description="Decision tree defining the rules for the final learner.", - ) - - @field_validator("feature_adders", mode="after") - @classmethod - def _check_not_empty(cls, v: list[TimeSeriesTransform]) -> list[TimeSeriesTransform]: - if v == []: - raise ValueError("RulesForecaster requires at least one feature adder.") - return v - - -class RulesLearner(ForecastCombiner): - """Combines base learner predictions per quantile into final predictions using a regression approach.""" - - def __init__(self, quantiles: list[Quantile], hyperparams: RulesLearnerHyperParams) -> None: - """Initialize the Rules Learner. - - Args: - quantiles: List of quantiles to predict. - hyperparams: Hyperparameters for the final learner. - """ - super().__init__(quantiles=quantiles, hyperparams=hyperparams) - - self.tree = hyperparams.decision_tree - self.feature_adders = hyperparams.feature_adders - - @override - def fit( - self, - data: EnsembleForecastDataset, - data_val: EnsembleForecastDataset | None = None, - additional_features: ForecastInputDataset | None = None, - sample_weights: pd.Series | None = None, - ) -> None: - # No fitting needed for rule-based final learner - # Check that additional features are provided - if additional_features is None: - raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") - - if sample_weights is not None: - logger.warning("Sample weights are ignored in RulesLearner.fit method.") - - def _predict_tree(self, data: pd.DataFrame, columns: pd.Index) -> pd.DataFrame: - """Predict using the decision tree rules. - - Args: - data: DataFrame containing the additional features. - columns: Expected columns for the output DataFrame. - - Returns: - DataFrame with predictions for each quantile. - """ - predictions = data.apply(self.tree.get_decision, axis=1) - - return pd.get_dummies(predictions).reindex(columns=columns) - - @override - def predict( - self, - data: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None = None, - ) -> ForecastDataset: - if additional_features is None: - raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") - - decisions = self._predict_tree( - additional_features.data, columns=data.select_quantile(quantile=self.quantiles[0]).data.columns - ) - - # Generate predictions - predictions: list[pd.DataFrame] = [] - for q in self.quantiles: - dataset = data.select_quantile(quantile=q) - preds = dataset.input_data().multiply(decisions).sum(axis=1) - - predictions.append(preds.to_frame(name=Quantile(q).format())) - - # Concatenate predictions along columns to form a DataFrame with quantile columns - df = pd.concat(predictions, axis=1) - - return ForecastDataset( - data=df, - sample_interval=data.sample_interval, - ) - - @property - def is_fitted(self) -> bool: - """Check the Rules Final Learner is fitted.""" - return True - - -class RulesForecasterHyperParams(HyperParams): - """Hyperparameters for Rules Forecaster.""" - - base_hyperparams: list[BaseLearnerHyperParams] = Field( - default=[LGBMHyperParams(), GBLinearHyperParams()], - description="List of hyperparameter configurations for base learners. " - "Defaults to [LGBMHyperParams, GBLinearHyperParams].", - ) - - final_hyperparams: RulesLearnerHyperParams = Field( - description="Hyperparameters for the final learner.", - default=RulesLearnerHyperParams( - decision_tree=DecisionTree(nodes=[Decision(idx=0, decision="LGBMForecaster")], outcomes={"LGBMForecaster"}), - feature_adders=[HolidayFeatureAdder(country_code=CountryAlpha2("NL"))], - ), - ) - - @field_validator("base_hyperparams", mode="after") - @classmethod - def _check_classes(cls, v: list[BaseLearnerHyperParams]) -> list[BaseLearnerHyperParams]: - hp_classes = [type(hp) for hp in v] - if not len(hp_classes) == len(set(hp_classes)): - raise ValueError("Duplicate base learner hyperparameter classes are not allowed.") - return v - - -class RulesForecasterConfig(ForecasterConfig): - """Configuration for Hybrid-based forecasting models.""" - - hyperparams: RulesForecasterHyperParams = Field( - default=RulesForecasterHyperParams(), - description="Hyperparameters for the Hybrid forecaster.", - ) - - verbosity: bool = Field( - default=True, - description="Enable verbose output from the Hybrid model (True/False).", - ) - - -class RulesForecaster(EnsembleForecaster): - """Wrapper for sklearn's StackingRegressor to make it compatible with HorizonForecaster.""" - - Config = RulesForecasterConfig - HyperParams = RulesForecasterHyperParams - - def __init__(self, config: RulesForecasterConfig) -> None: - """Initialize the Hybrid forecaster.""" - self._config = config - - self._base_learners: list[BaseLearner] = self._init_base_learners( - config=config, base_hyperparams=config.hyperparams.base_hyperparams - ) - - self._forecast_combiner = RulesLearner( - quantiles=config.quantiles, - hyperparams=config.hyperparams.final_hyperparams, - ) - - -__all__ = [ - "RulesForecaster", - "RulesForecasterConfig", - "RulesForecasterHyperParams", - "RulesLearner", - "RulesLearnerHyperParams", -] diff --git a/packages/openstef-meta/src/openstef_meta/presets/__init__.py b/packages/openstef-meta/src/openstef_meta/presets/__init__.py new file mode 100644 index 000000000..53b9630aa --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/presets/__init__.py @@ -0,0 +1,5 @@ +"""Package for preset forecasting workflows.""" + +from .forecasting_workflow import EnsembleForecastingModel, EnsembleWorkflowConfig, create_ensemble_workflow + +__all__ = ["EnsembleForecastingModel", "EnsembleWorkflowConfig", "create_ensemble_workflow"] diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py new file mode 100644 index 000000000..88d063584 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -0,0 +1,453 @@ +"""Ensemble forecasting workflow preset. + +Mimics OpenSTEF-models forecasting workflow with ensemble capabilities. +""" + +from collections.abc import Sequence +from datetime import timedelta +from typing import Literal + +from pydantic import Field + +from openstef_beam.evaluation.metric_providers import ( + MetricDirection, + MetricProvider, + ObservedProbabilityProvider, + R2Provider, +) +from openstef_core.base_model import BaseConfig +from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset +from openstef_core.mixins.transform import Transform, TransformPipeline +from openstef_core.types import LeadTime, Q, Quantile, QuantileOrGlobal +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel +from openstef_meta.models.forecast_combiners.learned_weights_combiner import WeightsCombiner +from openstef_meta.models.forecast_combiners.rules_combiner import RulesCombiner +from openstef_meta.models.forecast_combiners.stacking_combiner import StackingCombiner +from openstef_meta.models.forecasting.residual_forecaster import ResidualForecaster +from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback +from openstef_models.mixins.model_serializer import ModelIdentifier +from openstef_models.models.forecasting.forecaster import Forecaster +from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster +from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster +from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster +from openstef_models.presets.forecasting_workflow import LocationConfig +from openstef_models.transforms.energy_domain import WindPowerFeatureAdder +from openstef_models.transforms.general import Clipper, EmptyFeatureRemover, SampleWeighter, Scaler +from openstef_models.transforms.general.imputer import Imputer +from openstef_models.transforms.general.nan_dropper import NaNDropper +from openstef_models.transforms.postprocessing import QuantileSorter +from openstef_models.transforms.time_domain import ( + CyclicFeaturesAdder, + DatetimeFeaturesAdder, + HolidayFeatureAdder, + RollingAggregatesAdder, +) +from openstef_models.transforms.time_domain.lags_adder import LagsAdder +from openstef_models.transforms.time_domain.rolling_aggregates_adder import AggregationFunction +from openstef_models.transforms.validation import CompletenessChecker, FlatlineChecker, InputConsistencyChecker +from openstef_models.transforms.weather_domain import ( + AtmosphereDerivedFeaturesAdder, + DaylightFeatureAdder, + RadiationDerivedFeaturesAdder, +) +from openstef_models.utils.data_split import DataSplitter +from openstef_models.utils.feature_selection import Exclude, FeatureSelection, Include +from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow, ForecastingCallback + + +class EnsembleWorkflowConfig(BaseConfig): + """Configuration for ensemble forecasting workflows.""" + + model_id: ModelIdentifier + + # Ensemble configuration + ensemble_type: Literal["learned_weights", "stacking", "rules"] = Field(default="learned_weights") + base_models: Sequence[Literal["lgbm", "gblinear", "xgboost", "lgbm_linear"]] = Field(default=["lgbm", "gblinear"]) + combiner_model: Literal["lgbm", "rf", "xgboost", "logistic", "gblinear"] = Field(default="lgbm") + + # Forecast configuration + quantiles: list[Quantile] = Field( + default=[Q(0.5)], + description="List of quantiles to predict for probabilistic forecasting.", + ) + + sample_interval: timedelta = Field( + default=timedelta(minutes=15), + description="Time interval between consecutive data samples.", + ) + horizons: list[LeadTime] = Field( + default=[LeadTime.from_string("PT48H")], + description="List of forecast horizons to predict.", + ) + + location: LocationConfig = Field( + default=LocationConfig(), + description="Location information for the forecasting workflow.", + ) + + # Forecaster hyperparameters + xgboost_hyperparams: XGBoostForecaster.HyperParams = Field( + default=XGBoostForecaster.HyperParams(), + description="Hyperparameters for XGBoost forecaster.", + ) + gblinear_hyperparams: GBLinearForecaster.HyperParams = Field( + default=GBLinearForecaster.HyperParams(), + description="Hyperparameters for GBLinear forecaster.", + ) + + lgbm_hyperparams: LGBMForecaster.HyperParams = Field( + default=LGBMForecaster.HyperParams(), + description="Hyperparameters for LightGBM forecaster.", + ) + + lgbmlinear_hyperparams: LGBMLinearForecaster.HyperParams = Field( + default=LGBMLinearForecaster.HyperParams(), + description="Hyperparameters for LightGBM forecaster.", + ) + + residual_hyperparams: ResidualForecaster.HyperParams = Field( + default=ResidualForecaster.HyperParams(), + description="Hyperparameters for Residual forecaster.", + ) + + # Data properties + target_column: str = Field(default="load", description="Name of the target variable column in datasets.") + energy_price_column: str = Field( + default="day_ahead_electricity_price", + description="Name of the energy price column in datasets.", + ) + radiation_column: str = Field(default="radiation", description="Name of the radiation column in datasets.") + wind_speed_column: str = Field(default="windspeed", description="Name of the wind speed column in datasets.") + pressure_column: str = Field(default="pressure", description="Name of the pressure column in datasets.") + temperature_column: str = Field(default="temperature", description="Name of the temperature column in datasets.") + relative_humidity_column: str = Field( + default="relative_humidity", + description="Name of the relative humidity column in datasets.", + ) + predict_history: timedelta = Field( + default=timedelta(days=14), + description="Amount of historical data available at prediction time.", + ) + cutoff_history: timedelta = Field( + default=timedelta(days=0), + description="Amount of historical data to exclude from training and prediction due to incomplete features " + "from lag-based preprocessing. When using lag transforms (e.g., lag-14), the first N days contain NaN values. " + "Set this to match your maximum lag duration (e.g., timedelta(days=14)). " + "Default of 0 assumes no invalid rows are created by preprocessing. " + "Note: should be same as predict_history if you are using lags. We default to disabled to keep the same " + "behaviour as openstef 3.0.", + ) + + # Feature engineering and validation + completeness_threshold: float = Field( + default=0.5, + description="Minimum fraction of data that should be available for making a regular forecast.", + ) + flatliner_threshold: timedelta = Field( + default=timedelta(hours=24), + description="Number of minutes that the load has to be constant to detect a flatliner.", + ) + detect_non_zero_flatliner: bool = Field( + default=False, + description="If True, flatliners are also detected on non-zero values (median of the load).", + ) + rolling_aggregate_features: list[AggregationFunction] = Field( + default=[], + description="If not None, rolling aggregate(s) of load will be used as features in the model.", + ) + clip_features: FeatureSelection = Field( + default=FeatureSelection(include=None, exclude=None), + description="Feature selection for which features to clip.", + ) + sample_weight_scale_percentile: int = Field( + default=95, + description="Percentile of target values used as scaling reference. " + "Values are normalized relative to this percentile before weighting.", + ) + sample_weight_exponent: float = Field( + default_factory=lambda data: 1.0 + if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "learned_weights", "stacking", "residual", "xgboost"} + else 0.0, + description="Exponent applied to scale the sample weights. " + "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " + "Note: Defaults to 1.0 for gblinear congestion models.", + ) + sample_weight_floor: float = Field( + default=0.1, + description="Minimum weight value to ensure all samples contribute to training.", + ) + + # Data splitting strategy + data_splitter: DataSplitter = Field( + default=DataSplitter( + # Copied from OpenSTEF3 pipeline defaults + val_fraction=0.15, + test_fraction=0.0, + stratification_fraction=0.15, + min_days_for_stratification=4, + ), + description="Configuration for splitting data into training, validation, and test sets.", + ) + + # Evaluation + evaluation_metrics: list[MetricProvider] = Field( + default_factory=lambda: [R2Provider(), ObservedProbabilityProvider()], + description="List of metric providers for evaluating model score.", + ) + + # Callbacks + mlflow_storage: MLFlowStorage | None = Field( + default_factory=MLFlowStorage, + description="Configuration for MLflow experiment tracking and model storage.", + ) + + model_reuse_enable: bool = Field( + default=True, + description="Whether to enable reuse of previously trained models.", + ) + model_reuse_max_age: timedelta = Field( + default=timedelta(days=7), + description="Maximum age of a model to be considered for reuse.", + ) + + model_selection_enable: bool = Field( + default=True, + description="Whether to enable automatic model selection based on performance.", + ) + model_selection_metric: tuple[QuantileOrGlobal, str, MetricDirection] = Field( + default=(Q(0.5), "R2", "higher_is_better"), + description="Metric to monitor for model performance when retraining.", + ) + model_selection_old_model_penalty: float = Field( + default=1.2, + description="Penalty to apply to the old model's metric to bias selection towards newer models.", + ) + + verbosity: Literal[0, 1, 2, 3, True] = Field( + default=0, description="Verbosity level. 0=silent, 1=warning, 2=info, 3=debug" + ) + + # Metadata + tags: dict[str, str] = Field( + default_factory=dict, + description="Optional metadata tags for the model.", + ) + + +def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastingWorkflow: + """Create an ensemble forecasting workflow from configuration.""" + + # Build preprocessing components + def checks() -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: + return [ + InputConsistencyChecker(), + FlatlineChecker( + load_column=config.target_column, + flatliner_threshold=config.flatliner_threshold, + detect_non_zero_flatliner=config.detect_non_zero_flatliner, + error_on_flatliner=False, + ), + CompletenessChecker(completeness_threshold=config.completeness_threshold), + ] + + def feature_adders() -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: + return [ + WindPowerFeatureAdder( + windspeed_reference_column=config.wind_speed_column, + ), + AtmosphereDerivedFeaturesAdder( + pressure_column=config.pressure_column, + relative_humidity_column=config.relative_humidity_column, + temperature_column=config.temperature_column, + ), + RadiationDerivedFeaturesAdder( + coordinate=config.location.coordinate, + radiation_column=config.radiation_column, + ), + CyclicFeaturesAdder(), + DaylightFeatureAdder( + coordinate=config.location.coordinate, + ), + RollingAggregatesAdder( + feature=config.target_column, + aggregation_functions=config.rolling_aggregate_features, + horizons=config.horizons, + ), + ] + + def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: + return [ + Clipper(selection=Include(config.energy_price_column).combine(config.clip_features), mode="standard"), + Scaler(selection=Exclude(config.target_column), method="standard"), + SampleWeighter( + target_column=config.target_column, + weight_exponent=config.sample_weight_exponent, + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, + ), + EmptyFeatureRemover(), + ] + + # Model Specific LagsAdder + + # Build forecasters and their processing pipelines + forecaster_preprocessing: dict[str, list[Transform[TimeSeriesDataset, TimeSeriesDataset]]] = {} + forecasters: dict[str, Forecaster] = {} + for model_type in config.base_models: + if model_type == "lgbm": + forecasters[model_type] = LGBMForecaster( + config=LGBMForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) + ) + forecaster_preprocessing[model_type] = [ + *checks(), + *feature_adders(), + LagsAdder( + history_available=config.predict_history, + horizons=config.horizons, + add_trivial_lags=True, + target_column=config.target_column, + ), + HolidayFeatureAdder(country_code=config.location.country_code), + DatetimeFeaturesAdder(onehot_encode=False), + *feature_standardizers(), + ] + + elif model_type == "gblinear": + forecasters[model_type] = GBLinearForecaster( + config=GBLinearForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) + ) + forecaster_preprocessing[model_type] = [ + *checks(), + *feature_adders(), + LagsAdder( + history_available=config.predict_history, + horizons=config.horizons, + add_trivial_lags=False, + target_column=config.target_column, + custom_lags=[timedelta(days=7)], + ), + HolidayFeatureAdder(country_code=config.location.country_code), + DatetimeFeaturesAdder(onehot_encode=False), + *feature_standardizers(), + Imputer( + selection=Exclude(config.target_column), + imputation_strategy="mean", + fill_future_values=Include(config.energy_price_column), + ), + NaNDropper( + selection=Exclude(config.target_column), + ), + ] + elif model_type == "xgboost": + forecasters[model_type] = XGBoostForecaster( + config=XGBoostForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) + ) + forecaster_preprocessing[model_type] = [ + *checks(), + *feature_adders(), + LagsAdder( + history_available=config.predict_history, + horizons=config.horizons, + add_trivial_lags=True, + target_column=config.target_column, + ), + HolidayFeatureAdder(country_code=config.location.country_code), + DatetimeFeaturesAdder(onehot_encode=False), + *feature_standardizers(), + ] + elif model_type == "lgbm_linear": + forecasters[model_type] = LGBMLinearForecaster( + config=LGBMLinearForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) + ) + forecaster_preprocessing[model_type] = [ + *checks(), + *feature_adders(), + LagsAdder( + history_available=config.predict_history, + horizons=config.horizons, + add_trivial_lags=True, + target_column=config.target_column, + ), + HolidayFeatureAdder(country_code=config.location.country_code), + DatetimeFeaturesAdder(onehot_encode=False), + *feature_standardizers(), + ] + else: + msg = f"Unsupported base model type: {model_type}" + raise ValueError(msg) + + # Build combiner + if config.ensemble_type == "learned_weights": + if config.combiner_model == "lgbm": + combiner_hp = WeightsCombiner.LGBMHyperParams() + elif config.combiner_model == "rf": + combiner_hp = WeightsCombiner.RFHyperParams() + elif config.combiner_model == "xgboost": + combiner_hp = WeightsCombiner.XGBHyperParams() + elif config.combiner_model == "logistic": + combiner_hp = WeightsCombiner.LogisticHyperParams() + else: + msg = f"Unsupported combiner model type: {config.combiner_model}" + raise ValueError(msg) + combiner_config = WeightsCombiner.Config( + hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles + ) + combiner = WeightsCombiner( + config=combiner_config, + ) + elif config.ensemble_type == "stacking": + if config.combiner_model == "lgbm": + combiner_hp = StackingCombiner.LGBMHyperParams() + elif config.combiner_model == "gblinear": + combiner_hp = StackingCombiner.GBLinearHyperParams() + else: + msg = f"Unsupported combiner model type for stacking: {config.combiner_model}" + raise ValueError(msg) + combiner_config = StackingCombiner.Config( + hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles + ) + combiner = StackingCombiner( + config=combiner_config, + ) + elif config.ensemble_type == "rules": + combiner_config = RulesCombiner.Config(horizons=config.horizons, quantiles=config.quantiles) + combiner = RulesCombiner( + config=combiner_config, + ) + else: + msg = f"Unsupported ensemble type: {config.ensemble_type}" + raise ValueError(msg) + + postprocessing = [QuantileSorter()] + + model_specific_preprocessing: dict[str, TransformPipeline[TimeSeriesDataset]] = { + name: TransformPipeline(transforms=transforms) for name, transforms in forecaster_preprocessing.items() + } + + ensemble_model = EnsembleForecastingModel( + common_preprocessing=TransformPipeline(transforms=[]), + model_specific_preprocessing=model_specific_preprocessing, + postprocessing=TransformPipeline(transforms=postprocessing), + forecasters=forecasters, + combiner=combiner, + target_column=config.target_column, + ) + + callbacks: list[ForecastingCallback] = [] + if config.mlflow_storage is not None: + callbacks.append( + MLFlowStorageCallback( + storage=config.mlflow_storage, + model_reuse_enable=config.model_reuse_enable, + model_reuse_max_age=config.model_reuse_max_age, + model_selection_enable=config.model_selection_enable, + model_selection_metric=config.model_selection_metric, + model_selection_old_model_penalty=config.model_selection_old_model_penalty, + ) + ) + + return CustomForecastingWorkflow(model=ensemble_model, model_id=config.model_id, callbacks=callbacks) + + +__all__ = ["EnsembleWorkflowConfig", "create_ensemble_workflow"] diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index 41186152d..9d38c5b4f 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -9,13 +9,12 @@ """ from datetime import datetime, timedelta -from typing import Self, cast, override +from typing import Self, override import pandas as pd from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset from openstef_core.types import Quantile -from openstef_meta.framework.base_learner import BaseLearnerNames from openstef_meta.utils.pinball_errors import calculate_pinball_errors DEFAULT_TARGET_COLUMN = {Quantile(0.5): "load"} @@ -26,7 +25,7 @@ class EnsembleForecastDataset(TimeSeriesDataset): forecast_start: datetime quantiles: list[Quantile] - model_names: list[BaseLearnerNames] + forecaster_names: list[str] target_column: str @override @@ -54,8 +53,8 @@ def __init__( ) quantile_feature_names = [col for col in self.feature_names if col != target_column] - self.model_names, self.quantiles = self.get_learner_and_quantile(pd.Index(quantile_feature_names)) - n_cols = len(self.model_names) * len(self.quantiles) + self.forecaster_names, self.quantiles = self.get_learner_and_quantile(pd.Index(quantile_feature_names)) + n_cols = len(self.forecaster_names) * len(self.quantiles) if len(data.columns) not in {n_cols + 1, n_cols}: raise ValueError("Data columns do not match the expected number based on base learners and quantiles.") @@ -67,7 +66,7 @@ def target_series(self) -> pd.Series | None: return None @staticmethod - def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[BaseLearnerNames], list[Quantile]]: + def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[str], list[Quantile]]: """Extract base learner names and quantiles from feature names. Args: @@ -79,24 +78,24 @@ def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[BaseLearnerN Raises: ValueError: If an invalid base learner name is found in a feature name. """ - all_base_learners = BaseLearnerNames.__args__ - base_learners: set[BaseLearnerNames] = set() + forecasters: set[str] = set() quantiles: set[Quantile] = set() for feature_name in feature_names: - learner_part, quantile_part = feature_name.split("_", maxsplit=1) - if learner_part not in all_base_learners or not Quantile.is_valid_quantile_string(quantile_part): - msg = f"Invalid base learner name in feature: {feature_name}" + quantile_part = "_".join(feature_name.split("_")[-2:]) + learner_part = feature_name[: -(len(quantile_part) + 1)] + if not Quantile.is_valid_quantile_string(quantile_part): + msg = f"Column has no valid quantile string: {feature_name}" raise ValueError(msg) - base_learners.add(cast(BaseLearnerNames, learner_part)) + forecasters.add(learner_part) quantiles.add(Quantile.parse(quantile_part)) - return list(base_learners), list(quantiles) + return list(forecasters), list(quantiles) @staticmethod - def get_quantile_feature_name(feature_name: str) -> tuple[BaseLearnerNames, Quantile]: + def get_quantile_feature_name(feature_name: str) -> tuple[str, Quantile]: """Generate the feature name for a given base learner and quantile. Args: @@ -106,12 +105,12 @@ def get_quantile_feature_name(feature_name: str) -> tuple[BaseLearnerNames, Quan Tuple containing the base learner name and Quantile object. """ learner_part, quantile_part = feature_name.split("_", maxsplit=1) - return cast(BaseLearnerNames, learner_part), Quantile.parse(quantile_part) + return learner_part, Quantile.parse(quantile_part) @classmethod def from_forecast_datasets( cls, - datasets: dict[BaseLearnerNames, ForecastDataset], + datasets: dict[str, ForecastDataset], target_series: pd.Series | None = None, sample_weights: pd.Series | None = None, ) -> Self: @@ -184,9 +183,9 @@ def select_quantile_classification(self, quantile: Quantile) -> ForecastInputDat msg = f"Target column '{self.target_column}' not found in dataset." raise ValueError(msg) - selected_columns = [f"{learner}_{quantile.format()}" for learner in self.model_names] + selected_columns = [f"{learner}_{quantile.format()}" for learner in self.forecaster_names] prediction_data = self.data[selected_columns].copy() - prediction_data.columns = self.model_names + prediction_data.columns = self.forecaster_names target = self._prepare_classification( data=prediction_data, @@ -210,10 +209,10 @@ def select_quantile(self, quantile: Quantile) -> ForecastInputDataset: Returns: ForecastInputDataset containing base predictions for the specified quantile. """ - selected_columns = [f"{learner}_{quantile.format()}" for learner in self.model_names] + selected_columns = [f"{learner}_{quantile.format()}" for learner in self.forecaster_names] selected_columns.append(self.target_column) prediction_data = self.data[selected_columns].copy() - prediction_data.columns = [*self.model_names, self.target_column] + prediction_data.columns = [*self.forecaster_names, self.target_column] return ForecastInputDataset( data=prediction_data, diff --git a/packages/openstef-meta/test_forecasting_model.py b/packages/openstef-meta/test_forecasting_model.py new file mode 100644 index 000000000..008199689 --- /dev/null +++ b/packages/openstef-meta/test_forecasting_model.py @@ -0,0 +1,274 @@ +import pickle +from datetime import datetime, timedelta +from typing import override + +import numpy as np +from openstef_core.mixins.predictor import HyperParams +import pandas as pd +import pytest + +from openstef_core.datasets import ForecastInputDataset +from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset +from openstef_core.datasets.validated_datasets import ForecastDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.mixins.transform import TransformPipeline +from openstef_core.testing import assert_timeseries_equal, create_synthetic_forecasting_dataset +from openstef_core.types import LeadTime, Q +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel +from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig +from openstef_meta.utils.datasets import EnsembleForecastDataset +from openstef_models.models.forecasting import Forecaster, ForecasterConfig +from openstef_models.transforms.postprocessing.quantile_sorter import QuantileSorter +from openstef_models.transforms.time_domain.lags_adder import LagsAdder + + +class SimpleForecaster(Forecaster): + """Simple test forecaster that returns predictable values for testing.""" + + def __init__(self, config: ForecasterConfig): + self._config = config + self._is_fitted = False + + @property + def config(self) -> ForecasterConfig: + return self._config + + @property + @override + def is_fitted(self) -> bool: + return self._is_fitted + + @override + def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: + self._is_fitted = True + + @override + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + # Return predictable forecast values + forecast_values = {quantile: 100.0 + quantile * 10 for quantile in self.config.quantiles} + return ForecastDataset( + pd.DataFrame( + { + quantile.format(): [forecast_values[quantile]] * len(data.index) + for quantile in self.config.quantiles + }, + index=data.index, + ), + data.sample_interval, + data.forecast_start, + ) + + +class SimpleCombiner(ForecastCombiner): + """Simple combiner that averages base learner predictions.""" + + def fit( + self, + data: EnsembleForecastDataset, + data_val: EnsembleForecastDataset | None = None, + additional_features: ForecastInputDataset | None = None, + sample_weights: pd.Series | None = None, + ) -> None: + self._is_fitted = True + + def predict( + self, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, + ) -> ForecastDataset: + if not self._is_fitted: + raise NotFittedError("Combiner must be fitted before prediction.") + + combined_data = pd.DataFrame(index=data.data.index) + for quantile in self.quantiles: + quantile_cols = [col for col in data.data.columns if col.endswith(quantile.format())] + combined_data[quantile.format()] = data.data[quantile_cols].mean(axis=1) + + return ForecastDataset( + data=combined_data, + sample_interval=data.sample_interval, + forecast_start=data.forecast_start, + ) + + @property + def is_fitted(self) -> bool: + return self._is_fitted + + +@pytest.fixture +def sample_timeseries_dataset() -> TimeSeriesDataset: + """Create sample time series data with typical energy forecasting features.""" + n_samples = 25 + rng = np.random.default_rng(seed=42) + + data = pd.DataFrame( + { + "load": 100.0 + rng.normal(10.0, 5.0, n_samples), + "temperature": 20.0 + rng.normal(1.0, 0.5, n_samples), + "radiation": rng.uniform(0.0, 500.0, n_samples), + }, + index=pd.date_range("2025-01-01 10:00", periods=n_samples, freq="h"), + ) + + return TimeSeriesDataset(data, timedelta(hours=1)) + + +@pytest.fixture +def model() -> EnsembleForecastingModel: + """Create a simple EnsembleForecastingModel for testing.""" + # Arrange + horizons = [LeadTime(timedelta(hours=1))] + quantiles = [Q(0.3), Q(0.5), Q(0.7)] + config = ForecasterConfig(quantiles=quantiles, horizons=horizons) + forecasters: dict[str, Forecaster] = { + "forecaster_1": SimpleForecaster(config=config), + "forecaster_2": SimpleForecaster(config=config), + } + combiner_config = ForecastCombinerConfig(quantiles=quantiles, horizons=horizons, hyperparams=HyperParams()) + + combiner = SimpleCombiner( + config=combiner_config, + quantiles=quantiles, + ) + + # Act + model = EnsembleForecastingModel( + forecasters=forecasters, combiner=combiner, common_preprocessing=TransformPipeline() + ) + return model + + +def test_forecasting_model__init__uses_defaults(model: EnsembleForecastingModel): + """Test initialization uses default preprocessing and postprocessing when not provided.""" + + # Assert - Check that components are assigned correctly + assert model.common_preprocessing is not None + assert model.postprocessing is not None + assert model.target_column == "load" # Default value + assert model.forecaster_names == ["forecaster_1", "forecaster_2"] + + +def test_forecasting_model__fit(sample_timeseries_dataset: TimeSeriesDataset, model: EnsembleForecastingModel): + """Test that fit correctly orchestrates preprocessing and forecaster calls, and returns metrics.""" + + # Act + result = model.fit(data=sample_timeseries_dataset) + + # Assert - Model is fitted and returns metrics + assert model.is_fitted + assert result is not None + + +def test_forecasting_model__predict(sample_timeseries_dataset: TimeSeriesDataset, model: EnsembleForecastingModel): + """Test that predict correctly orchestrates preprocessing and forecaster calls.""" + + # Fit the model first + model.fit(data=sample_timeseries_dataset) + forecast_start = datetime.fromisoformat("2025-01-01T12:00:00") + + # Act + result = model.predict(data=sample_timeseries_dataset, forecast_start=forecast_start) + + # Assert - Prediction returns a forecast dataset with expected properties + assert isinstance(result, ForecastDataset) + assert result.sample_interval == sample_timeseries_dataset.sample_interval + assert result.quantiles == [Q(0.3), Q(0.5), Q(0.7)] + assert result.forecast_start >= forecast_start + assert not result.data.empty + assert not result.data.isna().any().any() + + +def test_forecasting_model__predict__raises_error_when_not_fitted( + sample_timeseries_dataset: TimeSeriesDataset, model: EnsembleForecastingModel +): + """Test predict raises NotFittedError when model is not fitted.""" + + # Act & Assert + with pytest.raises(NotFittedError): + model.predict(data=sample_timeseries_dataset) + + +def test_forecasting_model__score__returns_metrics( + sample_timeseries_dataset: TimeSeriesDataset, model: EnsembleForecastingModel +): + """Test that score evaluates model and returns metrics.""" + + model.fit(data=sample_timeseries_dataset) + + # Act + metrics = model.score(data=sample_timeseries_dataset) + + # Assert - Metrics are calculated for the median quantile + assert metrics.metrics is not None + assert all(x in metrics.metrics for x in [Q(0.3), Q(0.5), Q(0.7)]) + # R2 metric should be present (default evaluation metric) + assert "R2" in metrics.metrics[Q(0.5)] + + +def test_forecasting_model__pickle_roundtrip(): + """Test that ForecastingModel with preprocessing and postprocessing can be pickled and unpickled. + + This verifies that the entire forecasting pipeline, including transforms and forecaster, + can be serialized and deserialized while maintaining functionality. + """ + # Arrange - create synthetic dataset + dataset = create_synthetic_forecasting_dataset( + length=timedelta(days=30), + sample_interval=timedelta(hours=1), + random_seed=42, + ) + + # Create forecasting model with preprocessing and postprocessing + # Arrange + horizons = [LeadTime(timedelta(hours=1))] + quantiles = [Q(0.3), Q(0.5), Q(0.7)] + config = ForecasterConfig(quantiles=quantiles, horizons=horizons) + forecasters: dict[str, Forecaster] = { + "forecaster_1": SimpleForecaster(config=config), + "forecaster_2": SimpleForecaster(config=config), + } + combiner_config = ForecastCombinerConfig(quantiles=quantiles, horizons=horizons, hyperparams=HyperParams()) + + combiner = SimpleCombiner( + config=combiner_config, + quantiles=quantiles, + ) + + original_model = EnsembleForecastingModel( + forecasters=forecasters, + combiner=combiner, + common_preprocessing=TransformPipeline( + transforms=[ + LagsAdder( + history_available=timedelta(days=14), + horizons=horizons, + max_day_lags=7, + add_trivial_lags=True, + add_autocorr_lags=False, + ), + ] + ), + postprocessing=TransformPipeline(transforms=[QuantileSorter()]), + cutoff_history=timedelta(days=7), + target_column="load", + ) + + # Fit the original model + original_model.fit(data=dataset) + + # Get predictions from original model + expected_predictions = original_model.predict(data=dataset) + + # Act - pickle and unpickle the model + pickled = pickle.dumps(original_model) + restored_model = pickle.loads(pickled) # noqa: S301 - Controlled test + + # Assert - verify the restored model is the correct type + assert isinstance(restored_model, EnsembleForecastingModel) + assert restored_model.is_fitted + assert restored_model.target_column == original_model.target_column + assert restored_model.cutoff_history == original_model.cutoff_history + + # Verify predictions match using pandas testing utilities + actual_predictions = restored_model.predict(data=dataset) + assert_timeseries_equal(actual_predictions, expected_predictions) diff --git a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py index ad00a393f..667477191 100644 --- a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py +++ b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py @@ -1,171 +1,171 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -from datetime import timedelta - -import pytest -from lightgbm import LGBMClassifier -from sklearn.linear_model import LogisticRegression -from xgboost import XGBClassifier - -from openstef_core.datasets import ForecastInputDataset -from openstef_core.exceptions import NotFittedError -from openstef_core.types import LeadTime, Q -from openstef_meta.models.learned_weights_forecaster import ( - Classifier, - LearnedWeightsForecaster, - LearnedWeightsForecasterConfig, - LearnedWeightsHyperParams, - LGBMCombinerHyperParams, - LogisticCombinerHyperParams, - RFCombinerHyperParams, - WeightsCombiner, - WeightsCombinerHyperParams, - XGBCombinerHyperParams, -) -from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder - - -@pytest.fixture(params=["rf", "lgbm", "xgboost", "logistic"]) -def combiner_hyperparams(request: pytest.FixtureRequest) -> WeightsCombinerHyperParams: - """Fixture to provide different primary models types.""" - learner_type = request.param - if learner_type == "rf": - return RFCombinerHyperParams() - if learner_type == "lgbm": - return LGBMCombinerHyperParams() - if learner_type == "xgboost": - return XGBCombinerHyperParams() - return LogisticCombinerHyperParams() - - -@pytest.fixture -def base_config(combiner_hyperparams: WeightsCombinerHyperParams) -> LearnedWeightsForecasterConfig: - """Base configuration for LearnedWeights forecaster tests.""" - - params = LearnedWeightsHyperParams( - combiner_hyperparams=combiner_hyperparams, - ) - return LearnedWeightsForecasterConfig( - quantiles=[Q(0.1), Q(0.5), Q(0.9)], - horizons=[LeadTime(timedelta(days=1))], - hyperparams=params, - verbosity=False, - ) - - -def test_forecast_combiner_corresponds_to_hyperparams(base_config: LearnedWeightsForecasterConfig): - """Test that the forecast combiner learner corresponds to the specified hyperparameters.""" - forecaster = LearnedWeightsForecaster(config=base_config) - forecast_combiner = forecaster._forecast_combiner - assert isinstance(forecast_combiner, WeightsCombiner) - classifier = forecast_combiner.models[0] - - mapping: dict[type[WeightsCombinerHyperParams], type[Classifier]] = { - RFCombinerHyperParams: LGBMClassifier, - LGBMCombinerHyperParams: LGBMClassifier, - XGBCombinerHyperParams: XGBClassifier, - LogisticCombinerHyperParams: LogisticRegression, - } - expected_type = mapping[type(base_config.hyperparams.combiner_hyperparams)] - - assert isinstance(classifier, expected_type), ( - f"Final learner type {type(forecast_combiner)} does not match expected type {expected_type}" - ) - - -def test_learned_weights_forecaster_fit_predict( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: LearnedWeightsForecasterConfig, -): - """Test basic fit and predict workflow with comprehensive output validation.""" - # Arrange - expected_quantiles = base_config.quantiles - forecaster = LearnedWeightsForecaster(config=base_config) - - # Act - forecaster.fit(sample_forecast_input_dataset) - result = forecaster.predict(sample_forecast_input_dataset) - - # Assert - # Basic functionality - assert forecaster.is_fitted, "Model should be fitted after calling fit()" - - # Check that necessary quantiles are present - required_columns = [q.format() for q in expected_quantiles] - assert all(col in result.data.columns for col in required_columns), ( - f"Expected columns {required_columns}, got {list(result.data.columns)}" - ) - - # Forecast data quality - assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" - - -def test_learned_weights_forecaster_predict_not_fitted_raises_error( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: LearnedWeightsForecasterConfig, -): - """Test that predict() raises NotFittedError when called before fit().""" - # Arrange - forecaster = LearnedWeightsForecaster(config=base_config) - - # Act & Assert - with pytest.raises(NotFittedError, match="LearnedWeightsForecaster"): - forecaster.predict(sample_forecast_input_dataset) - - -def test_learned_weights_forecaster_with_sample_weights( - sample_dataset_with_weights: ForecastInputDataset, - base_config: LearnedWeightsForecasterConfig, -): - """Test that forecaster works with sample weights and produces different results.""" - # Arrange - forecaster_with_weights = LearnedWeightsForecaster(config=base_config) - - # Create dataset without weights for comparison - data_without_weights = ForecastInputDataset( - data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), - sample_interval=sample_dataset_with_weights.sample_interval, - target_column=sample_dataset_with_weights.target_column, - forecast_start=sample_dataset_with_weights.forecast_start, - ) - forecaster_without_weights = LearnedWeightsForecaster(config=base_config) - - # Act - forecaster_with_weights.fit(sample_dataset_with_weights) - forecaster_without_weights.fit(data_without_weights) - - # Predict using data without sample_weight column (since that's used for training, not prediction) - result_with_weights = forecaster_with_weights.predict(sample_dataset_with_weights) - result_without_weights = forecaster_without_weights.predict(data_without_weights) - - # Assert - # Both should produce valid forecasts - assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" - assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" - - # Sample weights should affect the model, so results should be different - # (This is a statistical test - with different weights, predictions should differ) - differences = (result_with_weights.data - result_without_weights.data).abs() - assert differences.sum().sum() > 0, "Sample weights should affect model predictions" - - -def test_learned_weights_forecaster_with_additional_features( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: LearnedWeightsForecasterConfig, -): - """Test that forecaster works with additional features for the final learner.""" - # Arrange - # Add a simple feature adder that adds a constant feature - - base_config.hyperparams.combiner_hyperparams.feature_adders.append(CyclicFeaturesAdder()) # type: ignore - forecaster = LearnedWeightsForecaster(config=base_config) - - # Act - forecaster.fit(sample_forecast_input_dataset) - result = forecaster.predict(sample_forecast_input_dataset) - - # Assert - assert forecaster.is_fitted, "Model should be fitted after calling fit()" - assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" +# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# # +# # SPDX-License-Identifier: MPL-2.0 + +# from datetime import timedelta + +# import pytest +# from lightgbm import LGBMClassifier +# from sklearn.linear_model import LogisticRegression +# from xgboost import XGBClassifier + +# from openstef_core.datasets import ForecastInputDataset +# from openstef_core.exceptions import NotFittedError +# from openstef_core.types import LeadTime, Q +# from openstef_meta.models.learned_weights_forecaster import ( +# Classifier, +# LearnedWeightsForecaster, +# LearnedWeightsForecasterConfig, +# LearnedWeightsHyperParams, +# LGBMCombinerHyperParams, +# LogisticCombinerHyperParams, +# RFCombinerHyperParams, +# WeightsCombiner, +# WeightsCombinerHyperParams, +# XGBCombinerHyperParams, +# ) +# from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder + + +# @pytest.fixture(params=["rf", "lgbm", "xgboost", "logistic"]) +# def combiner_hyperparams(request: pytest.FixtureRequest) -> WeightsCombinerHyperParams: +# """Fixture to provide different primary models types.""" +# learner_type = request.param +# if learner_type == "rf": +# return RFCombinerHyperParams() +# if learner_type == "lgbm": +# return LGBMCombinerHyperParams() +# if learner_type == "xgboost": +# return XGBCombinerHyperParams() +# return LogisticCombinerHyperParams() + + +# @pytest.fixture +# def base_config(combiner_hyperparams: WeightsCombinerHyperParams) -> LearnedWeightsForecasterConfig: +# """Base configuration for LearnedWeights forecaster tests.""" + +# params = LearnedWeightsHyperParams( +# combiner_hyperparams=combiner_hyperparams, +# ) +# return LearnedWeightsForecasterConfig( +# quantiles=[Q(0.1), Q(0.5), Q(0.9)], +# horizons=[LeadTime(timedelta(days=1))], +# hyperparams=params, +# verbosity=False, +# ) + + +# def test_forecast_combiner_corresponds_to_hyperparams(base_config: LearnedWeightsForecasterConfig): +# """Test that the forecast combiner learner corresponds to the specified hyperparameters.""" +# forecaster = LearnedWeightsForecaster(config=base_config) +# forecast_combiner = forecaster._forecast_combiner +# assert isinstance(forecast_combiner, WeightsCombiner) +# classifier = forecast_combiner.models[0] + +# mapping: dict[type[WeightsCombinerHyperParams], type[Classifier]] = { +# RFCombinerHyperParams: LGBMClassifier, +# LGBMCombinerHyperParams: LGBMClassifier, +# XGBCombinerHyperParams: XGBClassifier, +# LogisticCombinerHyperParams: LogisticRegression, +# } +# expected_type = mapping[type(base_config.hyperparams.combiner_hyperparams)] + +# assert isinstance(classifier, expected_type), ( +# f"Final learner type {type(forecast_combiner)} does not match expected type {expected_type}" +# ) + + +# def test_learned_weights_forecaster_fit_predict( +# sample_forecast_input_dataset: ForecastInputDataset, +# base_config: LearnedWeightsForecasterConfig, +# ): +# """Test basic fit and predict workflow with comprehensive output validation.""" +# # Arrange +# expected_quantiles = base_config.quantiles +# forecaster = LearnedWeightsForecaster(config=base_config) + +# # Act +# forecaster.fit(sample_forecast_input_dataset) +# result = forecaster.predict(sample_forecast_input_dataset) + +# # Assert +# # Basic functionality +# assert forecaster.is_fitted, "Model should be fitted after calling fit()" + +# # Check that necessary quantiles are present +# required_columns = [q.format() for q in expected_quantiles] +# assert all(col in result.data.columns for col in required_columns), ( +# f"Expected columns {required_columns}, got {list(result.data.columns)}" +# ) + +# # Forecast data quality +# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + +# def test_learned_weights_forecaster_predict_not_fitted_raises_error( +# sample_forecast_input_dataset: ForecastInputDataset, +# base_config: LearnedWeightsForecasterConfig, +# ): +# """Test that predict() raises NotFittedError when called before fit().""" +# # Arrange +# forecaster = LearnedWeightsForecaster(config=base_config) + +# # Act & Assert +# with pytest.raises(NotFittedError, match="LearnedWeightsForecaster"): +# forecaster.predict(sample_forecast_input_dataset) + + +# def test_learned_weights_forecaster_with_sample_weights( +# sample_dataset_with_weights: ForecastInputDataset, +# base_config: LearnedWeightsForecasterConfig, +# ): +# """Test that forecaster works with sample weights and produces different results.""" +# # Arrange +# forecaster_with_weights = LearnedWeightsForecaster(config=base_config) + +# # Create dataset without weights for comparison +# data_without_weights = ForecastInputDataset( +# data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), +# sample_interval=sample_dataset_with_weights.sample_interval, +# target_column=sample_dataset_with_weights.target_column, +# forecast_start=sample_dataset_with_weights.forecast_start, +# ) +# forecaster_without_weights = LearnedWeightsForecaster(config=base_config) + +# # Act +# forecaster_with_weights.fit(sample_dataset_with_weights) +# forecaster_without_weights.fit(data_without_weights) + +# # Predict using data without sample_weight column (since that's used for training, not prediction) +# result_with_weights = forecaster_with_weights.predict(sample_dataset_with_weights) +# result_without_weights = forecaster_without_weights.predict(data_without_weights) + +# # Assert +# # Both should produce valid forecasts +# assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" +# assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + +# # Sample weights should affect the model, so results should be different +# # (This is a statistical test - with different weights, predictions should differ) +# differences = (result_with_weights.data - result_without_weights.data).abs() +# assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +# def test_learned_weights_forecaster_with_additional_features( +# sample_forecast_input_dataset: ForecastInputDataset, +# base_config: LearnedWeightsForecasterConfig, +# ): +# """Test that forecaster works with additional features for the final learner.""" +# # Arrange +# # Add a simple feature adder that adds a constant feature + +# base_config.hyperparams.combiner_hyperparams.feature_adders.append(CyclicFeaturesAdder()) # type: ignore +# forecaster = LearnedWeightsForecaster(config=base_config) + +# # Act +# forecaster.fit(sample_forecast_input_dataset) +# result = forecaster.predict(sample_forecast_input_dataset) + +# # Assert +# assert forecaster.is_fitted, "Model should be fitted after calling fit()" +# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" diff --git a/packages/openstef-meta/tests/models/test_residual_forecaster.py b/packages/openstef-meta/tests/models/test_residual_forecaster.py index eba0d8d2a..c21111d92 100644 --- a/packages/openstef-meta/tests/models/test_residual_forecaster.py +++ b/packages/openstef-meta/tests/models/test_residual_forecaster.py @@ -1,142 +1,142 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -from datetime import timedelta - -import pytest - -from openstef_core.datasets import ForecastInputDataset -from openstef_core.exceptions import NotFittedError -from openstef_core.types import LeadTime, Q -from openstef_meta.framework.base_learner import BaseLearnerHyperParams -from openstef_meta.models.residual_forecaster import ( - ResidualForecaster, - ResidualForecasterConfig, - ResidualHyperParams, -) -from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams -from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams -from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearHyperParams -from openstef_models.models.forecasting.xgboost_forecaster import XGBoostHyperParams - - -@pytest.fixture(params=["gblinear", "lgbmlinear"]) -def primary_model(request: pytest.FixtureRequest) -> BaseLearnerHyperParams: - """Fixture to provide different primary models types.""" - learner_type = request.param - if learner_type == "gblinear": - return GBLinearHyperParams() - if learner_type == "lgbm": - return LGBMHyperParams() - if learner_type == "lgbmlinear": - return LGBMLinearHyperParams() - return XGBoostHyperParams() - - -@pytest.fixture(params=["gblinear", "lgbm", "lgbmlinear", "xgboost"]) -def secondary_model(request: pytest.FixtureRequest) -> BaseLearnerHyperParams: - """Fixture to provide different secondary models types.""" - learner_type = request.param - if learner_type == "gblinear": - return GBLinearHyperParams() - if learner_type == "lgbm": - return LGBMHyperParams() - if learner_type == "lgbmlinear": - return LGBMLinearHyperParams() - return XGBoostHyperParams() - - -@pytest.fixture -def base_config( - primary_model: BaseLearnerHyperParams, - secondary_model: BaseLearnerHyperParams, -) -> ResidualForecasterConfig: - """Base configuration for Residual forecaster tests.""" - - params = ResidualHyperParams( - primary_hyperparams=primary_model, - secondary_hyperparams=secondary_model, - ) - return ResidualForecasterConfig( - quantiles=[Q(0.1), Q(0.5), Q(0.9)], - horizons=[LeadTime(timedelta(days=1))], - hyperparams=params, - verbosity=False, - ) - - -def test_residual_forecaster_fit_predict( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: ResidualForecasterConfig, -): - """Test basic fit and predict workflow with comprehensive output validation.""" - # Arrange - expected_quantiles = base_config.quantiles - forecaster = ResidualForecaster(config=base_config) - - # Act - forecaster.fit(sample_forecast_input_dataset) - result = forecaster.predict(sample_forecast_input_dataset) - - # Assert - # Basic functionality - assert forecaster.is_fitted, "Model should be fitted after calling fit()" - - # Check that necessary quantiles are present - expected_columns = [q.format() for q in expected_quantiles] - assert list(result.data.columns) == expected_columns, ( - f"Expected columns {expected_columns}, got {list(result.data.columns)}" - ) - - # Forecast data quality - assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" - - -def test_residual_forecaster_predict_not_fitted_raises_error( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: ResidualForecasterConfig, -): - """Test that predict() raises NotFittedError when called before fit().""" - # Arrange - forecaster = ResidualForecaster(config=base_config) - - # Act & Assert - with pytest.raises(NotFittedError, match="ResidualForecaster"): - forecaster.predict(sample_forecast_input_dataset) - - -def test_residual_forecaster_with_sample_weights( - sample_dataset_with_weights: ForecastInputDataset, - base_config: ResidualForecasterConfig, -): - """Test that forecaster works with sample weights and produces different results.""" - # Arrange - forecaster_with_weights = ResidualForecaster(config=base_config) - - # Create dataset without weights for comparison - data_without_weights = ForecastInputDataset( - data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), - sample_interval=sample_dataset_with_weights.sample_interval, - target_column=sample_dataset_with_weights.target_column, - forecast_start=sample_dataset_with_weights.forecast_start, - ) - forecaster_without_weights = ResidualForecaster(config=base_config) - - # Act - forecaster_with_weights.fit(sample_dataset_with_weights) - forecaster_without_weights.fit(data_without_weights) - - # Predict using data without sample_weight column (since that's used for training, not prediction) - result_with_weights = forecaster_with_weights.predict(data_without_weights) - result_without_weights = forecaster_without_weights.predict(data_without_weights) - - # Assert - # Both should produce valid forecasts - assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" - assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" - - # Sample weights should affect the model, so results should be different - # (This is a statistical test - with different weights, predictions should differ) - differences = (result_with_weights.data - result_without_weights.data).abs() - assert differences.sum().sum() > 0, "Sample weights should affect model predictions" +# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# # +# # SPDX-License-Identifier: MPL-2.0 + +# from datetime import timedelta + +# import pytest + +# from openstef_core.datasets import ForecastInputDataset +# from openstef_core.exceptions import NotFittedError +# from openstef_core.types import LeadTime, Q +# from openstef_meta.framework.base_learner import BaseLearnerHyperParams +# from openstef_meta.models.residual_forecaster import ( +# ResidualForecaster, +# ResidualForecasterConfig, +# ResidualHyperParams, +# ) +# from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams +# from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +# from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearHyperParams +# from openstef_models.models.forecasting.xgboost_forecaster import XGBoostHyperParams + + +# @pytest.fixture(params=["gblinear", "lgbmlinear"]) +# def primary_model(request: pytest.FixtureRequest) -> BaseLearnerHyperParams: +# """Fixture to provide different primary models types.""" +# learner_type = request.param +# if learner_type == "gblinear": +# return GBLinearHyperParams() +# if learner_type == "lgbm": +# return LGBMHyperParams() +# if learner_type == "lgbmlinear": +# return LGBMLinearHyperParams() +# return XGBoostHyperParams() + + +# @pytest.fixture(params=["gblinear", "lgbm", "lgbmlinear", "xgboost"]) +# def secondary_model(request: pytest.FixtureRequest) -> BaseLearnerHyperParams: +# """Fixture to provide different secondary models types.""" +# learner_type = request.param +# if learner_type == "gblinear": +# return GBLinearHyperParams() +# if learner_type == "lgbm": +# return LGBMHyperParams() +# if learner_type == "lgbmlinear": +# return LGBMLinearHyperParams() +# return XGBoostHyperParams() + + +# @pytest.fixture +# def base_config( +# primary_model: BaseLearnerHyperParams, +# secondary_model: BaseLearnerHyperParams, +# ) -> ResidualForecasterConfig: +# """Base configuration for Residual forecaster tests.""" + +# params = ResidualHyperParams( +# primary_hyperparams=primary_model, +# secondary_hyperparams=secondary_model, +# ) +# return ResidualForecasterConfig( +# quantiles=[Q(0.1), Q(0.5), Q(0.9)], +# horizons=[LeadTime(timedelta(days=1))], +# hyperparams=params, +# verbosity=False, +# ) + + +# def test_residual_forecaster_fit_predict( +# sample_forecast_input_dataset: ForecastInputDataset, +# base_config: ResidualForecasterConfig, +# ): +# """Test basic fit and predict workflow with comprehensive output validation.""" +# # Arrange +# expected_quantiles = base_config.quantiles +# forecaster = ResidualForecaster(config=base_config) + +# # Act +# forecaster.fit(sample_forecast_input_dataset) +# result = forecaster.predict(sample_forecast_input_dataset) + +# # Assert +# # Basic functionality +# assert forecaster.is_fitted, "Model should be fitted after calling fit()" + +# # Check that necessary quantiles are present +# expected_columns = [q.format() for q in expected_quantiles] +# assert list(result.data.columns) == expected_columns, ( +# f"Expected columns {expected_columns}, got {list(result.data.columns)}" +# ) + +# # Forecast data quality +# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + +# def test_residual_forecaster_predict_not_fitted_raises_error( +# sample_forecast_input_dataset: ForecastInputDataset, +# base_config: ResidualForecasterConfig, +# ): +# """Test that predict() raises NotFittedError when called before fit().""" +# # Arrange +# forecaster = ResidualForecaster(config=base_config) + +# # Act & Assert +# with pytest.raises(NotFittedError, match="ResidualForecaster"): +# forecaster.predict(sample_forecast_input_dataset) + + +# def test_residual_forecaster_with_sample_weights( +# sample_dataset_with_weights: ForecastInputDataset, +# base_config: ResidualForecasterConfig, +# ): +# """Test that forecaster works with sample weights and produces different results.""" +# # Arrange +# forecaster_with_weights = ResidualForecaster(config=base_config) + +# # Create dataset without weights for comparison +# data_without_weights = ForecastInputDataset( +# data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), +# sample_interval=sample_dataset_with_weights.sample_interval, +# target_column=sample_dataset_with_weights.target_column, +# forecast_start=sample_dataset_with_weights.forecast_start, +# ) +# forecaster_without_weights = ResidualForecaster(config=base_config) + +# # Act +# forecaster_with_weights.fit(sample_dataset_with_weights) +# forecaster_without_weights.fit(data_without_weights) + +# # Predict using data without sample_weight column (since that's used for training, not prediction) +# result_with_weights = forecaster_with_weights.predict(data_without_weights) +# result_without_weights = forecaster_without_weights.predict(data_without_weights) + +# # Assert +# # Both should produce valid forecasts +# assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" +# assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + +# # Sample weights should affect the model, so results should be different +# # (This is a statistical test - with different weights, predictions should differ) +# differences = (result_with_weights.data - result_without_weights.data).abs() +# assert differences.sum().sum() > 0, "Sample weights should affect model predictions" diff --git a/packages/openstef-meta/tests/models/test_rules_forecaster.py b/packages/openstef-meta/tests/models/test_rules_forecaster.py index 0dfaba4e5..06ae2a41d 100644 --- a/packages/openstef-meta/tests/models/test_rules_forecaster.py +++ b/packages/openstef-meta/tests/models/test_rules_forecaster.py @@ -1,136 +1,136 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -from datetime import timedelta - -import pytest - -from openstef_core.datasets import ForecastInputDataset -from openstef_core.exceptions import NotFittedError -from openstef_core.types import LeadTime, Q -from openstef_meta.models.rules_forecaster import ( - RulesForecaster, - RulesForecasterConfig, - RulesForecasterHyperParams, -) -from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder - - -@pytest.fixture -def base_config() -> RulesForecasterConfig: - """Base configuration for Rules forecaster tests.""" - - params = RulesForecasterHyperParams() - return RulesForecasterConfig( - quantiles=[Q(0.1), Q(0.5), Q(0.9)], - horizons=[LeadTime(timedelta(days=1))], - hyperparams=params, - verbosity=False, - ) - - -def test_rules_forecaster_fit_predict( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: RulesForecasterConfig, -): - """Test basic fit and predict workflow with comprehensive output validation.""" - # Arrange - expected_quantiles = base_config.quantiles - forecaster = RulesForecaster(config=base_config) - - # Act - forecaster.fit(sample_forecast_input_dataset) - result = forecaster.predict(sample_forecast_input_dataset) - - # Assert - # Basic functionality - assert forecaster.is_fitted, "Model should be fitted after calling fit()" - - # Check that necessary quantiles are present - expected_columns = [q.format() for q in expected_quantiles] - assert list(result.data.columns) == expected_columns, ( - f"Expected columns {expected_columns}, got {list(result.data.columns)}" - ) - - # Forecast data quality - assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" - - -def test_rules_forecaster_predict_not_fitted_raises_error( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: RulesForecasterConfig, -): - """Test that predict() raises NotFittedError when called before fit().""" - # Arrange - forecaster = RulesForecaster(config=base_config) - - # Act & Assert - with pytest.raises(NotFittedError, match="RulesForecaster"): - forecaster.predict(sample_forecast_input_dataset) - - -def test_rules_forecaster_with_sample_weights( - sample_dataset_with_weights: ForecastInputDataset, - base_config: RulesForecasterConfig, -): - """Test that forecaster works with sample weights and produces different results.""" - # Arrange - forecaster_with_weights = RulesForecaster(config=base_config) - - # Create dataset without weights for comparison - data_without_weights = ForecastInputDataset( - data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), - sample_interval=sample_dataset_with_weights.sample_interval, - target_column=sample_dataset_with_weights.target_column, - forecast_start=sample_dataset_with_weights.forecast_start, - ) - forecaster_without_weights = RulesForecaster(config=base_config) - - # Act - forecaster_with_weights.fit(sample_dataset_with_weights) - forecaster_without_weights.fit(data_without_weights) - - # Predict using data without sample_weight column (since that's used for training, not prediction) - result_with_weights = forecaster_with_weights.predict(data_without_weights) - result_without_weights = forecaster_without_weights.predict(data_without_weights) - - # Assert - # Both should produce valid forecasts - assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" - assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" - - # Sample weights should affect the model, so results should be different - # (This is a statistical test - with different weights, predictions should differ) - differences = (result_with_weights.data - result_without_weights.data).abs() - assert differences.sum().sum() > 0, "Sample weights should affect model predictions" - - -def test_rules_forecaster_with_additional_features( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: RulesForecasterConfig, -): - """Test that forecaster works with additional features for the final learner.""" - - base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) # type: ignore - - # Arrange - expected_quantiles = base_config.quantiles - forecaster = RulesForecaster(config=base_config) - - # Act - forecaster.fit(sample_forecast_input_dataset) - result = forecaster.predict(sample_forecast_input_dataset) - - # Assert - # Basic functionality - assert forecaster.is_fitted, "Model should be fitted after calling fit()" - - # Check that necessary quantiles are present - expected_columns = [q.format() for q in expected_quantiles] - assert list(result.data.columns) == expected_columns, ( - f"Expected columns {expected_columns}, got {list(result.data.columns)}" - ) - - # Forecast data quality - assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" +# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# # +# # SPDX-License-Identifier: MPL-2.0 + +# from datetime import timedelta + +# import pytest + +# from openstef_core.datasets import ForecastInputDataset +# from openstef_core.exceptions import NotFittedError +# from openstef_core.types import LeadTime, Q +# from openstef_meta.models.rules_forecaster import ( +# RulesForecaster, +# RulesForecasterConfig, +# RulesForecasterHyperParams, +# ) +# from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder + + +# @pytest.fixture +# def base_config() -> RulesForecasterConfig: +# """Base configuration for Rules forecaster tests.""" + +# params = RulesForecasterHyperParams() +# return RulesForecasterConfig( +# quantiles=[Q(0.1), Q(0.5), Q(0.9)], +# horizons=[LeadTime(timedelta(days=1))], +# hyperparams=params, +# verbosity=False, +# ) + + +# def test_rules_forecaster_fit_predict( +# sample_forecast_input_dataset: ForecastInputDataset, +# base_config: RulesForecasterConfig, +# ): +# """Test basic fit and predict workflow with comprehensive output validation.""" +# # Arrange +# expected_quantiles = base_config.quantiles +# forecaster = RulesForecaster(config=base_config) + +# # Act +# forecaster.fit(sample_forecast_input_dataset) +# result = forecaster.predict(sample_forecast_input_dataset) + +# # Assert +# # Basic functionality +# assert forecaster.is_fitted, "Model should be fitted after calling fit()" + +# # Check that necessary quantiles are present +# expected_columns = [q.format() for q in expected_quantiles] +# assert list(result.data.columns) == expected_columns, ( +# f"Expected columns {expected_columns}, got {list(result.data.columns)}" +# ) + +# # Forecast data quality +# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + +# def test_rules_forecaster_predict_not_fitted_raises_error( +# sample_forecast_input_dataset: ForecastInputDataset, +# base_config: RulesForecasterConfig, +# ): +# """Test that predict() raises NotFittedError when called before fit().""" +# # Arrange +# forecaster = RulesForecaster(config=base_config) + +# # Act & Assert +# with pytest.raises(NotFittedError, match="RulesForecaster"): +# forecaster.predict(sample_forecast_input_dataset) + + +# def test_rules_forecaster_with_sample_weights( +# sample_dataset_with_weights: ForecastInputDataset, +# base_config: RulesForecasterConfig, +# ): +# """Test that forecaster works with sample weights and produces different results.""" +# # Arrange +# forecaster_with_weights = RulesForecaster(config=base_config) + +# # Create dataset without weights for comparison +# data_without_weights = ForecastInputDataset( +# data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), +# sample_interval=sample_dataset_with_weights.sample_interval, +# target_column=sample_dataset_with_weights.target_column, +# forecast_start=sample_dataset_with_weights.forecast_start, +# ) +# forecaster_without_weights = RulesForecaster(config=base_config) + +# # Act +# forecaster_with_weights.fit(sample_dataset_with_weights) +# forecaster_without_weights.fit(data_without_weights) + +# # Predict using data without sample_weight column (since that's used for training, not prediction) +# result_with_weights = forecaster_with_weights.predict(data_without_weights) +# result_without_weights = forecaster_without_weights.predict(data_without_weights) + +# # Assert +# # Both should produce valid forecasts +# assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" +# assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + +# # Sample weights should affect the model, so results should be different +# # (This is a statistical test - with different weights, predictions should differ) +# differences = (result_with_weights.data - result_without_weights.data).abs() +# assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +# def test_rules_forecaster_with_additional_features( +# sample_forecast_input_dataset: ForecastInputDataset, +# base_config: RulesForecasterConfig, +# ): +# """Test that forecaster works with additional features for the final learner.""" + +# base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) # type: ignore + +# # Arrange +# expected_quantiles = base_config.quantiles +# forecaster = RulesForecaster(config=base_config) + +# # Act +# forecaster.fit(sample_forecast_input_dataset) +# result = forecaster.predict(sample_forecast_input_dataset) + +# # Assert +# # Basic functionality +# assert forecaster.is_fitted, "Model should be fitted after calling fit()" + +# # Check that necessary quantiles are present +# expected_columns = [q.format() for q in expected_quantiles] +# assert list(result.data.columns) == expected_columns, ( +# f"Expected columns {expected_columns}, got {list(result.data.columns)}" +# ) + +# # Forecast data quality +# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" diff --git a/packages/openstef-meta/tests/models/test_stacking_forecaster.py b/packages/openstef-meta/tests/models/test_stacking_forecaster.py index 33a956d8d..fac92e7cc 100644 --- a/packages/openstef-meta/tests/models/test_stacking_forecaster.py +++ b/packages/openstef-meta/tests/models/test_stacking_forecaster.py @@ -1,136 +1,136 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -from datetime import timedelta - -import pytest - -from openstef_core.datasets import ForecastInputDataset -from openstef_core.exceptions import NotFittedError -from openstef_core.types import LeadTime, Q -from openstef_meta.models.stacking_forecaster import ( - StackingForecaster, - StackingForecasterConfig, - StackingHyperParams, -) -from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder - - -@pytest.fixture -def base_config() -> StackingForecasterConfig: - """Base configuration for Stacking forecaster tests.""" - - params = StackingHyperParams() - return StackingForecasterConfig( - quantiles=[Q(0.1), Q(0.5), Q(0.9)], - horizons=[LeadTime(timedelta(days=1))], - hyperparams=params, - verbosity=False, - ) - - -def test_stacking_forecaster_fit_predict( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: StackingForecasterConfig, -): - """Test basic fit and predict workflow with comprehensive output validation.""" - # Arrange - expected_quantiles = base_config.quantiles - forecaster = StackingForecaster(config=base_config) - - # Act - forecaster.fit(sample_forecast_input_dataset) - result = forecaster.predict(sample_forecast_input_dataset) - - # Assert - # Basic functionality - assert forecaster.is_fitted, "Model should be fitted after calling fit()" - - # Check that necessary quantiles are present - expected_columns = [q.format() for q in expected_quantiles] - assert list(result.data.columns) == expected_columns, ( - f"Expected columns {expected_columns}, got {list(result.data.columns)}" - ) - - # Forecast data quality - assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" - - -def test_stacking_forecaster_predict_not_fitted_raises_error( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: StackingForecasterConfig, -): - """Test that predict() raises NotFittedError when called before fit().""" - # Arrange - forecaster = StackingForecaster(config=base_config) - - # Act & Assert - with pytest.raises(NotFittedError, match="StackingForecaster"): - forecaster.predict(sample_forecast_input_dataset) - - -def test_stacking_forecaster_with_sample_weights( - sample_dataset_with_weights: ForecastInputDataset, - base_config: StackingForecasterConfig, -): - """Test that forecaster works with sample weights and produces different results.""" - # Arrange - forecaster_with_weights = StackingForecaster(config=base_config) - - # Create dataset without weights for comparison - data_without_weights = ForecastInputDataset( - data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), - sample_interval=sample_dataset_with_weights.sample_interval, - target_column=sample_dataset_with_weights.target_column, - forecast_start=sample_dataset_with_weights.forecast_start, - ) - forecaster_without_weights = StackingForecaster(config=base_config) - - # Act - forecaster_with_weights.fit(sample_dataset_with_weights) - forecaster_without_weights.fit(data_without_weights) - - # Predict using data without sample_weight column (since that's used for training, not prediction) - result_with_weights = forecaster_with_weights.predict(data_without_weights) - result_without_weights = forecaster_without_weights.predict(data_without_weights) - - # Assert - # Both should produce valid forecasts - assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" - assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" - - # Sample weights should affect the model, so results should be different - # (This is a statistical test - with different weights, predictions should differ) - differences = (result_with_weights.data - result_without_weights.data).abs() - assert differences.sum().sum() > 0, "Sample weights should affect model predictions" - - -def test_stacking_forecaster_with_additional_features( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: StackingForecasterConfig, -): - """Test that forecaster works with additional features for the final learner.""" - - base_config.hyperparams.combiner_hyperparams.feature_adders = [CyclicFeaturesAdder()] - - # Arrange - expected_quantiles = base_config.quantiles - forecaster = StackingForecaster(config=base_config) - - # Act - forecaster.fit(sample_forecast_input_dataset) - result = forecaster.predict(sample_forecast_input_dataset) - - # Assert - # Basic functionality - assert forecaster.is_fitted, "Model should be fitted after calling fit()" - - # Check that necessary quantiles are present - expected_columns = [q.format() for q in expected_quantiles] - assert list(result.data.columns) == expected_columns, ( - f"Expected columns {expected_columns}, got {list(result.data.columns)}" - ) - - # Forecast data quality - assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" +# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# # +# # SPDX-License-Identifier: MPL-2.0 + +# from datetime import timedelta + +# import pytest + +# from openstef_core.datasets import ForecastInputDataset +# from openstef_core.exceptions import NotFittedError +# from openstef_core.types import LeadTime, Q +# from openstef_meta.models.stacking_forecaster import ( +# StackingForecaster, +# StackingForecasterConfig, +# StackingHyperParams, +# ) +# from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder + + +# @pytest.fixture +# def base_config() -> StackingForecasterConfig: +# """Base configuration for Stacking forecaster tests.""" + +# params = StackingHyperParams() +# return StackingForecasterConfig( +# quantiles=[Q(0.1), Q(0.5), Q(0.9)], +# horizons=[LeadTime(timedelta(days=1))], +# hyperparams=params, +# verbosity=False, +# ) + + +# def test_stacking_forecaster_fit_predict( +# sample_forecast_input_dataset: ForecastInputDataset, +# base_config: StackingForecasterConfig, +# ): +# """Test basic fit and predict workflow with comprehensive output validation.""" +# # Arrange +# expected_quantiles = base_config.quantiles +# forecaster = StackingForecaster(config=base_config) + +# # Act +# forecaster.fit(sample_forecast_input_dataset) +# result = forecaster.predict(sample_forecast_input_dataset) + +# # Assert +# # Basic functionality +# assert forecaster.is_fitted, "Model should be fitted after calling fit()" + +# # Check that necessary quantiles are present +# expected_columns = [q.format() for q in expected_quantiles] +# assert list(result.data.columns) == expected_columns, ( +# f"Expected columns {expected_columns}, got {list(result.data.columns)}" +# ) + +# # Forecast data quality +# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + +# def test_stacking_forecaster_predict_not_fitted_raises_error( +# sample_forecast_input_dataset: ForecastInputDataset, +# base_config: StackingForecasterConfig, +# ): +# """Test that predict() raises NotFittedError when called before fit().""" +# # Arrange +# forecaster = StackingForecaster(config=base_config) + +# # Act & Assert +# with pytest.raises(NotFittedError, match="StackingForecaster"): +# forecaster.predict(sample_forecast_input_dataset) + + +# def test_stacking_forecaster_with_sample_weights( +# sample_dataset_with_weights: ForecastInputDataset, +# base_config: StackingForecasterConfig, +# ): +# """Test that forecaster works with sample weights and produces different results.""" +# # Arrange +# forecaster_with_weights = StackingForecaster(config=base_config) + +# # Create dataset without weights for comparison +# data_without_weights = ForecastInputDataset( +# data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), +# sample_interval=sample_dataset_with_weights.sample_interval, +# target_column=sample_dataset_with_weights.target_column, +# forecast_start=sample_dataset_with_weights.forecast_start, +# ) +# forecaster_without_weights = StackingForecaster(config=base_config) + +# # Act +# forecaster_with_weights.fit(sample_dataset_with_weights) +# forecaster_without_weights.fit(data_without_weights) + +# # Predict using data without sample_weight column (since that's used for training, not prediction) +# result_with_weights = forecaster_with_weights.predict(data_without_weights) +# result_without_weights = forecaster_without_weights.predict(data_without_weights) + +# # Assert +# # Both should produce valid forecasts +# assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" +# assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + +# # Sample weights should affect the model, so results should be different +# # (This is a statistical test - with different weights, predictions should differ) +# differences = (result_with_weights.data - result_without_weights.data).abs() +# assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +# def test_stacking_forecaster_with_additional_features( +# sample_forecast_input_dataset: ForecastInputDataset, +# base_config: StackingForecasterConfig, +# ): +# """Test that forecaster works with additional features for the final learner.""" + +# base_config.hyperparams.combiner_hyperparams.feature_adders = [CyclicFeaturesAdder()] + +# # Arrange +# expected_quantiles = base_config.quantiles +# forecaster = StackingForecaster(config=base_config) + +# # Act +# forecaster.fit(sample_forecast_input_dataset) +# result = forecaster.predict(sample_forecast_input_dataset) + +# # Assert +# # Basic functionality +# assert forecaster.is_fitted, "Model should be fitted after calling fit()" + +# # Check that necessary quantiles are present +# expected_columns = [q.format() for q in expected_quantiles] +# assert list(result.data.columns) == expected_columns, ( +# f"Expected columns {expected_columns}, got {list(result.data.columns)}" +# ) + +# # Forecast data quality +# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index ba0e279f5..7d5769f0e 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -25,9 +25,7 @@ from openstef_core.base_model import BaseConfig from openstef_core.mixins import TransformPipeline from openstef_core.types import LeadTime, Q, Quantile, QuantileOrGlobal -from openstef_meta.models.learned_weights_forecaster import LearnedWeightsForecaster -from openstef_meta.models.residual_forecaster import ResidualForecaster -from openstef_meta.models.stacking_forecaster import StackingForecaster +from openstef_meta.models.forecasting.residual_forecaster import ResidualForecaster from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback from openstef_models.mixins import ModelIdentifier from openstef_models.models import ForecastingModel @@ -143,16 +141,6 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob description="Hyperparameters for Residual forecaster.", ) - stacking_hyperparams: StackingForecaster.HyperParams = Field( - default=StackingForecaster.HyperParams(), - description="Hyperparameters for Stacking forecaster.", - ) - - learned_weights_hyperparams: LearnedWeightsForecaster.HyperParams = Field( - default=LearnedWeightsForecaster.HyperParams(), - description="Hyperparameters for Learned Weights forecaster.", - ) - location: LocationConfig = Field( default=LocationConfig(), description="Location information for the forecasting workflow.", @@ -437,28 +425,7 @@ def create_forecasting_workflow( postprocessing = [ ConfidenceIntervalApplicator(quantiles=config.quantiles), ] - elif config.model == "learned_weights": - preprocessing = [ - *checks, - *feature_adders, - *feature_standardizers, - Imputer( - selection=Exclude(config.target_column), - imputation_strategy="mean", - fill_future_values=Include(config.energy_price_column), - ), - NaNDropper( - selection=Exclude(config.target_column), - ), - ] - forecaster = LearnedWeightsForecaster( - config=LearnedWeightsForecaster.Config( - quantiles=config.quantiles, - horizons=config.horizons, - hyperparams=config.learned_weights_hyperparams, - ) - ) - postprocessing = [QuantileSorter()] + elif config.model == "residual": preprocessing = [ *checks, @@ -481,28 +448,6 @@ def create_forecasting_workflow( ) ) postprocessing = [QuantileSorter()] - elif config.model == "stacking": - preprocessing = [ - *checks, - *feature_adders, - *feature_standardizers, - Imputer( - selection=Exclude(config.target_column), - imputation_strategy="mean", - fill_future_values=Include(config.energy_price_column), - ), - NaNDropper( - selection=Exclude(config.target_column), - ), - ] - forecaster = StackingForecaster( - config=StackingForecaster.Config( - quantiles=config.quantiles, - horizons=config.horizons, - hyperparams=config.stacking_hyperparams, - ) - ) - postprocessing = [QuantileSorter()] else: msg = f"Unsupported model type: {config.model}" raise ValueError(msg) diff --git a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py index a740ac7c0..542d00448 100644 --- a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py @@ -18,6 +18,7 @@ from openstef_core.datasets import TimeSeriesDataset, VersionedTimeSeriesDataset from openstef_core.datasets.validated_datasets import ForecastDataset from openstef_core.exceptions import NotFittedError, SkipFitting +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_models.mixins import ModelIdentifier, PredictorCallback from openstef_models.mixins.callbacks import WorkflowContext from openstef_models.models.forecasting_model import ForecastingModel, ModelFitResult @@ -117,7 +118,7 @@ class CustomForecastingWorkflow(BaseModel): ... ) # doctest: +SKIP """ - model: ForecastingModel = Field(description="The forecasting model to use.") + model: ForecastingModel | EnsembleForecastingModel = Field(description="The forecasting model to use.") callbacks: list[ForecastingCallback] = Field( default_factory=list[ForecastingCallback], description="List of callbacks to execute during workflow events." ) From 9c6de7daca20af7dc402f8a27bdc2ab08135df26 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Wed, 3 Dec 2025 13:13:18 +0100 Subject: [PATCH 049/104] Fixed tests Signed-off-by: Lars van Someren --- .../src/openstef_meta/framework/__init__.py | 16 -- .../framework/meta_forecaster.py | 0 .../src/openstef_meta/mixins/__init__.py | 1 + .../src/openstef_meta/mixins/contributions.py | 16 ++ .../models/ensemble_forecasting_model.py | 84 ++++----- .../forecast_combiners/forecast_combiner.py | 27 ++- .../models/forecasting/residual_forecaster.py | 37 ++-- .../models/forecast_combiners/conftest.py | 57 ++++++ .../test_learned_weights_combiner.py | 95 ++++++++++ .../forecast_combiners/test_rules_combiner.py | 64 +++++++ .../test_stacking_combiner.py | 85 +++++++++ .../forecasting/test_residual_forecaster.py | 142 +++++++++++++++ .../test_ensemble_forecasting_model.py} | 9 +- .../models/test_learned_weights_forecaster.py | 171 ------------------ .../tests/models/test_residual_forecaster.py | 142 --------------- .../tests/models/test_rules_forecaster.py | 136 -------------- .../tests/models/test_stacking_forecaster.py | 136 -------------- .../tests/utils/test_datasets.py | 18 +- .../openstef_models/estimators/__init__.py | 7 - .../src/openstef_models/estimators/hybrid.py | 146 --------------- 20 files changed, 549 insertions(+), 840 deletions(-) delete mode 100644 packages/openstef-meta/src/openstef_meta/framework/__init__.py delete mode 100644 packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py create mode 100644 packages/openstef-meta/src/openstef_meta/mixins/__init__.py create mode 100644 packages/openstef-meta/src/openstef_meta/mixins/contributions.py create mode 100644 packages/openstef-meta/tests/models/forecast_combiners/conftest.py create mode 100644 packages/openstef-meta/tests/models/forecast_combiners/test_learned_weights_combiner.py create mode 100644 packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py create mode 100644 packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py create mode 100644 packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py rename packages/openstef-meta/{test_forecasting_model.py => tests/models/test_ensemble_forecasting_model.py} (98%) delete mode 100644 packages/openstef-meta/tests/models/test_learned_weights_forecaster.py delete mode 100644 packages/openstef-meta/tests/models/test_residual_forecaster.py delete mode 100644 packages/openstef-meta/tests/models/test_rules_forecaster.py delete mode 100644 packages/openstef-meta/tests/models/test_stacking_forecaster.py delete mode 100644 packages/openstef-models/src/openstef_models/estimators/__init__.py delete mode 100644 packages/openstef-models/src/openstef_models/estimators/hybrid.py diff --git a/packages/openstef-meta/src/openstef_meta/framework/__init__.py b/packages/openstef-meta/src/openstef_meta/framework/__init__.py deleted file mode 100644 index 0775b88f1..000000000 --- a/packages/openstef-meta/src/openstef_meta/framework/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""This module provides meta-forecasting models and related hyperparameters for the OpenSTEF project.""" - -from .base_learner import BaseLearner, BaseLearnerHyperParams -from ..models.combiners.forecast_combiner import ForecastCombiner, ForecastCombinerHyperParams -from .meta_forecaster import MetaForecaster - -__all__ = [ - "BaseLearner", - "BaseLearnerHyperParams", - "ForecastCombiner", - "ForecastCombinerHyperParams", - "MetaForecaster", -] diff --git a/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py b/packages/openstef-meta/src/openstef_meta/framework/meta_forecaster.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/packages/openstef-meta/src/openstef_meta/mixins/__init__.py b/packages/openstef-meta/src/openstef_meta/mixins/__init__.py new file mode 100644 index 000000000..71d67869d --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/mixins/__init__.py @@ -0,0 +1 @@ +"""Mixins for OpenSTEF-Meta package.""" diff --git a/packages/openstef-meta/src/openstef_meta/mixins/contributions.py b/packages/openstef-meta/src/openstef_meta/mixins/contributions.py new file mode 100644 index 000000000..9fb68377c --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/mixins/contributions.py @@ -0,0 +1,16 @@ +"""ExplainableMetaForecaster Mixin.""" + +from abc import ABC, abstractmethod + +import pandas as pd + +from openstef_core.datasets import ForecastInputDataset + + +class ContributionsMixin(ABC): + """Mixin class for models that support contribution analysis.""" + + @abstractmethod + def predict_contributions(self, X: ForecastInputDataset) -> pd.DataFrame: + """Get feature contributions for the given input data X.""" + raise NotImplementedError("This method should be implemented by subclasses.") diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index c7eaf3c1f..5ff81cd8e 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -215,20 +215,17 @@ def fit( ) # Prepare input datasets for metrics calculation - input_data_train = self.prepare_input(data=data) - input_data_val = self.prepare_input(data=data_val) if data_val else None - input_data_test = self.prepare_input(data=data_test) if data_test else None - metrics_train = self._predict_and_score(input_data=input_data_train) - metrics_val = self._predict_and_score(input_data=input_data_val) if input_data_val else None - metrics_test = self._predict_and_score(input_data=input_data_test) if input_data_test else None + metrics_train = self._predict_and_score(data=data) + metrics_val = self._predict_and_score(data=data_val) if data_val else None + metrics_test = self._predict_and_score(data=data_test) if data_test else None metrics_full = self.score(data=data) return ModelFitResult( input_dataset=data, - input_data_train=input_data_train, - input_data_val=input_data_val, - input_data_test=input_data_test, + input_data_train=ForecastInputDataset.from_timeseries(data), + input_data_val=ForecastInputDataset.from_timeseries(data_val) if data_val else None, + input_data_test=ForecastInputDataset.from_timeseries(data_test) if data_test else None, metrics_train=metrics_train, metrics_val=metrics_val, metrics_test=metrics_test, @@ -274,53 +271,29 @@ def _preprocess_fit_forecasters( predictions_raw, target_series=data.data[self.target_column] ) - def _predict_forecasters(self, data: TimeSeriesDataset) -> EnsembleForecastDataset: + def _predict_forecasters( + self, data: TimeSeriesDataset, forecast_start: datetime | None = None + ) -> EnsembleForecastDataset: """Generate predictions from base learners. Args: data: Input data for prediction. + forecast_start: Optional start time for forecasts. Returns: DataFrame containing base learner predictions. """ base_predictions: dict[str, ForecastDataset] = {} for name, forecaster in self.forecasters.items(): - forecaster_data = self.prepare_input(data, forecaster_name=name) - preds = forecaster.predict(data=forecaster_data) + forecaster_data = self.prepare_input(data, forecaster_name=name, forecast_start=forecast_start) + preds_raw = forecaster.predict(data=forecaster_data) + preds = self.postprocessing.transform(data=preds_raw) base_predictions[name] = preds return EnsembleForecastDataset.from_forecast_datasets( base_predictions, target_series=data.data[self.target_column] ) - @override - def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: - """Generate forecasts using the trained model. - - Transforms input data through the preprocessing pipeline, generates predictions - using the underlying forecaster, and applies postprocessing transformations. - - Args: - data: Input time series data for generating forecasts. - forecast_start: Starting time for forecasts. If None, uses data end time. - - Returns: - Processed forecast dataset with predictions and uncertainty estimates. - - Raises: - NotFittedError: If the model hasn't been trained yet. - """ - if not self.is_fitted: - raise NotFittedError(self.__class__.__name__) - - # Transform the input data to a valid forecast input - input_data = self.prepare_input(data=data, forecast_start=forecast_start) - - # Generate predictions - raw_predictions = self._predict(input_data=input_data) - - return self.postprocessing.transform(data=raw_predictions) - def prepare_input( self, data: TimeSeriesDataset, @@ -369,21 +342,33 @@ def prepare_input( forecast_start=forecast_start, ) - def _predict_and_score(self, input_data: ForecastInputDataset) -> SubsetMetric: - prediction_raw = self._predict(input_data=input_data) - prediction = self.postprocessing.transform(data=prediction_raw) + def _predict_and_score(self, data: TimeSeriesDataset) -> SubsetMetric: + prediction = self.predict(data) return self._calculate_score(prediction=prediction) - def _predict(self, input_data: ForecastInputDataset) -> ForecastDataset: + def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: + """Generate forecasts for the provided dataset. + + Args: + data: Input time series dataset for prediction. + forecast_start: Optional start time for forecasts. + Returns: + ForecastDataset containing the generated forecasts. + + Raises: + NotFittedError: If the model has not been fitted yet. + """ if not self.is_fitted: raise NotFittedError(self.__class__.__name__) - ensemble_predictions = self._predict_forecasters(data=input_data) + ensemble_predictions = self._predict_forecasters(data=data, forecast_start=forecast_start) additional_features = ( ForecastInputDataset.from_timeseries( - self.combiner_preprocessing.transform(data=input_data), target_column=self.target_column + self.combiner_preprocessing.transform(data=data), + target_column=self.target_column, + forecast_start=forecast_start, ) if len(self.combiner_preprocessing.transforms) > 0 else None @@ -394,7 +379,8 @@ def _predict(self, input_data: ForecastInputDataset) -> ForecastDataset: data=ensemble_predictions, additional_features=additional_features, ) - return restore_target(dataset=prediction, original_dataset=input_data, target_column=self.target_column) + + return restore_target(dataset=prediction, original_dataset=data, target_column=self.target_column) def score( self, @@ -429,14 +415,14 @@ def _calculate_score(self, prediction: ForecastDataset) -> SubsetMetric: # Needs only one horizon since we are using only a single prediction step # If a more comprehensive test is needed, a backtest should be run. config=EvaluationConfig(available_ats=[], lead_times=[self.config[0].max_horizon]), - quantiles=self.combiner.config.quantiles, + quantiles=self.config[0].quantiles, # Similarly windowed metrics are not relevant for single predictions. window_metric_providers=[], global_metric_providers=self.evaluation_metrics, ) evaluation_result = pipeline.run_for_subset( - filtering=self.combiner.config.max_horizon, + filtering=self.config[0].max_horizon, predictions=prediction, ) global_metric = evaluation_result.get_global_metric() diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index 8a12027d9..09b4e9017 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -81,15 +81,6 @@ def with_horizon(self, horizon: LeadTime) -> Self: """ return self.model_copy(update={"horizons": [horizon]}) - @classmethod - def combiner_class(cls) -> type["ForecastCombiner"]: - """Get the associated Forecaster class for this configuration. - - Returns: - The Forecaster class that uses this configuration. - """ - raise NotImplementedError("Subclasses must implement combiner_class") - class ForecastCombiner(Predictor[EnsembleForecastDataset, ForecastDataset]): """Combines base learner predictions for each quantile into final predictions.""" @@ -158,3 +149,21 @@ def _prepare_input_data( def is_fitted(self) -> bool: """Indicates whether the final learner has been fitted.""" raise NotImplementedError("Subclasses must implement the is_fitted property.") + + @abstractmethod + def predict_contributions( + self, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, + ) -> pd.DataFrame: + """Generate final predictions based on base learner predictions. + + Args: + data: EnsembleForecastDataset containing base learner predictions. + data_val: Optional EnsembleForecastDataset for validation during prediction. Will be ignored + additional_features: Optional ForecastInputDataset containing additional features for the final learner. + + Returns: + ForecastDataset containing the final contributions. + """ + raise NotImplementedError("Subclasses must implement the predict method.") diff --git a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py index efd8f50bc..96cb8911f 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py @@ -12,8 +12,6 @@ import logging from typing import override -from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster, LGBMLinearHyperParams -from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster, XGBoostHyperParams import pandas as pd from pydantic import Field @@ -23,8 +21,6 @@ ) from openstef_core.mixins import HyperParams from openstef_core.types import Quantile - - from openstef_models.models.forecasting.forecaster import ( Forecaster, ForecasterConfig, @@ -34,22 +30,24 @@ GBLinearHyperParams, ) from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMHyperParams +from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster, LGBMLinearHyperParams +from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster, XGBoostHyperParams logger = logging.getLogger(__name__) -BaseLearner = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster -BaseLearnerHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams +ResidualBaseForecaster = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster +ResidualBaseForecasterHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams class ResidualHyperParams(HyperParams): """Hyperparameters for Stacked LGBM GBLinear Regressor.""" - primary_hyperparams: BaseLearnerHyperParams = Field( + primary_hyperparams: ResidualBaseForecasterHyperParams = Field( default=GBLinearHyperParams(), description="Primary model hyperparams. Defaults to GBLinearHyperParams.", ) - secondary_hyperparams: BaseLearnerHyperParams = Field( + secondary_hyperparams: ResidualBaseForecasterHyperParams = Field( default=LGBMHyperParams(), description="Hyperparameters for the final learner. Defaults to LGBMHyperparams.", ) @@ -80,22 +78,22 @@ def __init__(self, config: ResidualForecasterConfig) -> None: """Initialize the Hybrid forecaster.""" self._config = config - self._primary_model: BaseLearner = self._init_base_learners( + self._primary_model: ResidualBaseForecaster = self._init_base_learners( config=config, base_hyperparams=[config.hyperparams.primary_hyperparams] )[0] - self._secondary_model: list[BaseLearner] = self._init_secondary_model( + self._secondary_model: list[ResidualBaseForecaster] = self._init_secondary_model( hyperparams=config.hyperparams.secondary_hyperparams ) self._is_fitted = False - def _init_secondary_model(self, hyperparams: BaseLearnerHyperParams) -> list[BaseLearner]: + def _init_secondary_model(self, hyperparams: ResidualBaseForecasterHyperParams) -> list[ResidualBaseForecaster]: """Initialize secondary model for residual forecasting. Returns: list[Forecaster]: List containing the initialized secondary model forecaster. """ - models: list[BaseLearner] = [] + models: list[ResidualBaseForecaster] = [] # Different datasets per quantile, so we need a model per quantile for q in self.config.quantiles: config = self._config.model_copy(update={"quantiles": [q]}) @@ -106,14 +104,14 @@ def _init_secondary_model(self, hyperparams: BaseLearnerHyperParams) -> list[Bas @staticmethod def _init_base_learners( - config: ForecasterConfig, base_hyperparams: list[BaseLearnerHyperParams] - ) -> list[BaseLearner]: + config: ForecasterConfig, base_hyperparams: list[ResidualBaseForecasterHyperParams] + ) -> list[ResidualBaseForecaster]: """Initialize base learners based on provided hyperparameters. Returns: list[Forecaster]: List of initialized base learner forecasters. """ - base_learners: list[BaseLearner] = [] + base_learners: list[ResidualBaseForecaster] = [] horizons = config.horizons quantiles = config.quantiles @@ -256,5 +254,14 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: sample_interval=data.sample_interval, ) + @property + def config(self) -> ResidualForecasterConfig: + """Get the configuration of the ResidualForecaster. + + Returns: + ResidualForecasterConfig: The configuration of the forecaster. + """ + return self._config + __all__ = ["ResidualForecaster", "ResidualForecasterConfig", "ResidualHyperParams"] diff --git a/packages/openstef-meta/tests/models/forecast_combiners/conftest.py b/packages/openstef-meta/tests/models/forecast_combiners/conftest.py new file mode 100644 index 000000000..c80385a07 --- /dev/null +++ b/packages/openstef-meta/tests/models/forecast_combiners/conftest.py @@ -0,0 +1,57 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from collections.abc import Callable +from datetime import timedelta + +import numpy as np +import pandas as pd +import pytest + +from openstef_core.datasets.validated_datasets import ForecastDataset +from openstef_meta.utils.datasets import EnsembleForecastDataset + + +@pytest.fixture +def forecast_dataset_factory() -> Callable[[], ForecastDataset]: + def _make() -> ForecastDataset: + rng = np.random.default_rng() + df = pd.DataFrame( + data={ + "quantile_P10": [90, 180, 270], + "quantile_P50": [100, 200, 300], + "quantile_P90": [110, 220, 330], + "load": [100, 200, 300], + }, + index=pd.to_datetime([ + "2023-01-01T10:00:00", + "2023-01-01T11:00:00", + "2023-01-01T12:00:00", + ]), + ) + df += rng.normal(0, 1, df.shape) # Add slight noise to avoid perfect predictions + + df["available_at"] = pd.to_datetime([ + "2023-01-01T09:50:00", + "2023-01-01T10:55:00", + "2023-01-01T12:10:00", + ]) + + return ForecastDataset( + data=df, + sample_interval=timedelta(hours=1), + target_column="load", + ) + + return _make + + +@pytest.fixture +def ensemble_dataset(forecast_dataset_factory: Callable[[], ForecastDataset]) -> EnsembleForecastDataset: + base_learner_output = { + "GBLinearForecaster": forecast_dataset_factory(), + "LGBMForecaster": forecast_dataset_factory(), + } + + return EnsembleForecastDataset.from_forecast_datasets(base_learner_output) diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_learned_weights_combiner.py b/packages/openstef-meta/tests/models/forecast_combiners/test_learned_weights_combiner.py new file mode 100644 index 000000000..ac7a4c380 --- /dev/null +++ b/packages/openstef-meta/tests/models/forecast_combiners/test_learned_weights_combiner.py @@ -0,0 +1,95 @@ +# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# # +# # SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import pytest + +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_meta.models.forecast_combiners.learned_weights_combiner import ( + WeightsCombiner, + WeightsCombinerConfig, +) +from openstef_meta.utils.datasets import EnsembleForecastDataset + + +@pytest.fixture(params=["lgbm", "xgboost", "rf", "logistic"]) +def classifier(request: pytest.FixtureRequest) -> str: + """Fixture to provide different classifier types for LearnedWeightsCombiner tests.""" + return request.param + + +@pytest.fixture +def config(classifier: str) -> WeightsCombinerConfig: + """Fixture to create WeightsCombinerConfig based on the classifier type.""" + if classifier == "lgbm": + hp = WeightsCombiner.LGBMHyperParams(n_leaves=5, n_estimators=10) + elif classifier == "xgboost": + hp = WeightsCombiner.XGBHyperParams(n_estimators=10) + elif classifier == "rf": + hp = WeightsCombiner.RFHyperParams(n_estimators=10, n_leaves=5) + elif classifier == "logistic": + hp = WeightsCombiner.LogisticHyperParams() + else: + msg = f"Unsupported classifier type: {classifier}" + raise ValueError(msg) + + return WeightsCombiner.Config( + hyperparams=hp, quantiles=[Q(0.1), Q(0.5), Q(0.9)], horizons=[LeadTime(timedelta(days=1))] + ) + + +@pytest.fixture +def forecaster(config: WeightsCombinerConfig) -> WeightsCombiner: + return WeightsCombiner(config) + + +def test_initialization(forecaster: WeightsCombiner): + assert isinstance(forecaster, WeightsCombiner) + + +def test_quantile_weights_combiner__fit_predict( + ensemble_dataset: EnsembleForecastDataset, + config: WeightsCombinerConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = config.quantiles + forecaster = WeightsCombiner(config=config) + + # Act + forecaster.fit(ensemble_dataset) + result = forecaster.predict(ensemble_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + expected_columns.append("load") + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + # Since forecast is deterministic with fixed random seed, check value spread (vectorized) + # All quantiles should have some variation (not all identical values) + stds = result.data.std() + assert (stds > 0).all(), f"All columns should have variation, got stds: {dict(stds)}" + + +def test_weights_combiner_not_fitted_error( + ensemble_dataset: EnsembleForecastDataset, + config: WeightsCombinerConfig, +): + """Test that NotFittedError is raised when predicting before fitting.""" + # Arrange + forecaster = WeightsCombiner(config=config) + # Act & Assert + with pytest.raises(NotFittedError): + forecaster.predict(ensemble_dataset) diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py b/packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py new file mode 100644 index 000000000..6ee938ef4 --- /dev/null +++ b/packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py @@ -0,0 +1,64 @@ +# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# # +# # SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import pytest + +from openstef_core.datasets import ForecastInputDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_meta.models.forecast_combiners.rules_combiner import ( + RulesCombiner, + RulesCombinerConfig, +) +from openstef_meta.utils.datasets import EnsembleForecastDataset + + +@pytest.fixture +def config() -> RulesCombinerConfig: + """Fixture to create RulesCombinerConfig.""" + return RulesCombiner.Config( + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime(timedelta(days=1))], + ) + + +@pytest.fixture +def forecaster(config: RulesCombinerConfig) -> RulesCombiner: + return RulesCombiner(config=config) + + +def test_initialization(forecaster: RulesCombiner): + assert isinstance(forecaster, RulesCombiner) + + +def test_quantile_weights_combiner__fit_predict( + ensemble_dataset: EnsembleForecastDataset, + config: RulesCombinerConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = config.quantiles + forecaster = RulesCombiner(config=config) + additional_features = ensemble_dataset.select_quantile(Q(0.5)) + additional_features.data = additional_features.data.drop(columns=additional_features.target_column) + additional_features.data.columns = ["feature1", "feature2"] + + # Act + forecaster.fit(ensemble_dataset, additional_features=additional_features) + result = forecaster.predict(ensemble_dataset, additional_features=additional_features) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py b/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py new file mode 100644 index 000000000..abcd9f66c --- /dev/null +++ b/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py @@ -0,0 +1,85 @@ +# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# # +# # SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import pytest + +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_meta.models.forecast_combiners.stacking_combiner import ( + StackingCombiner, + StackingCombinerConfig, +) +from openstef_meta.utils.datasets import EnsembleForecastDataset + + +@pytest.fixture(params=["lgbm", "gblinear"]) +def regressor(request: pytest.FixtureRequest) -> str: + """Fixture to provide different classifier types for LearnedWeightsCombiner tests.""" + return request.param + + +@pytest.fixture +def config(regressor: str) -> StackingCombinerConfig: + """Fixture to create StackingCombinerConfig based on the classifier type.""" + if regressor == "lgbm": + hp = StackingCombiner.LGBMHyperParams(num_leaves=5, n_estimators=10) + elif regressor == "gblinear": + hp = StackingCombiner.GBLinearHyperParams(n_steps=10) + else: + msg = f"Unsupported regressor type: {regressor}" + raise ValueError(msg) + + return StackingCombiner.Config( + hyperparams=hp, quantiles=[Q(0.1), Q(0.5), Q(0.9)], horizons=[LeadTime(timedelta(days=1))] + ) + + +@pytest.fixture +def forecaster(config: StackingCombinerConfig) -> StackingCombiner: + return StackingCombiner(config) + + +def test_initialization(forecaster: StackingCombiner): + assert isinstance(forecaster, StackingCombiner) + + +def test_quantile_weights_combiner__fit_predict( + ensemble_dataset: EnsembleForecastDataset, + config: StackingCombinerConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = config.quantiles + forecaster = StackingCombiner(config=config) + + # Act + forecaster.fit(ensemble_dataset) + result = forecaster.predict(ensemble_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + +def test_stacking_combiner_not_fitted_error( + ensemble_dataset: EnsembleForecastDataset, + config: StackingCombinerConfig, +): + """Test that NotFittedError is raised when predicting before fitting.""" + # Arrange + forecaster = StackingCombiner(config=config) + # Act & Assert + with pytest.raises(NotFittedError): + forecaster.predict(ensemble_dataset) diff --git a/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py b/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py new file mode 100644 index 000000000..37f30ab2d --- /dev/null +++ b/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py @@ -0,0 +1,142 @@ +# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# # +# # SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import pytest + +from openstef_core.datasets import ForecastInputDataset +from openstef_core.exceptions import NotFittedError +from openstef_core.types import LeadTime, Q +from openstef_meta.models.forecasting.residual_forecaster import ( + ResidualBaseForecasterHyperParams, + ResidualForecaster, + ResidualForecasterConfig, + ResidualHyperParams, +) +from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams +from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams +from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearHyperParams +from openstef_models.models.forecasting.xgboost_forecaster import XGBoostHyperParams + + +@pytest.fixture(params=["gblinear", "lgbmlinear"]) +def primary_model(request: pytest.FixtureRequest) -> ResidualBaseForecasterHyperParams: + """Fixture to provide different primary models types.""" + learner_type = request.param + if learner_type == "gblinear": + return GBLinearHyperParams() + if learner_type == "lgbm": + return LGBMHyperParams() + if learner_type == "lgbmlinear": + return LGBMLinearHyperParams() + return XGBoostHyperParams() + + +@pytest.fixture(params=["gblinear", "lgbm", "lgbmlinear", "xgboost"]) +def secondary_model(request: pytest.FixtureRequest) -> ResidualBaseForecasterHyperParams: + """Fixture to provide different secondary models types.""" + learner_type = request.param + if learner_type == "gblinear": + return GBLinearHyperParams() + if learner_type == "lgbm": + return LGBMHyperParams() + if learner_type == "lgbmlinear": + return LGBMLinearHyperParams() + return XGBoostHyperParams() + + +@pytest.fixture +def base_config( + primary_model: ResidualBaseForecasterHyperParams, + secondary_model: ResidualBaseForecasterHyperParams, +) -> ResidualForecasterConfig: + """Base configuration for Residual forecaster tests.""" + + params = ResidualHyperParams( + primary_hyperparams=primary_model, + secondary_hyperparams=secondary_model, + ) + return ResidualForecasterConfig( + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime(timedelta(days=1))], + hyperparams=params, + verbosity=False, + ) + + +def test_residual_forecaster_fit_predict( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: ResidualForecasterConfig, +): + """Test basic fit and predict workflow with comprehensive output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = ResidualForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + expected_columns = [q.format() for q in expected_quantiles] + assert list(result.data.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.data.columns)}" + ) + + # Forecast data quality + assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" + + +def test_residual_forecaster_predict_not_fitted_raises_error( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: ResidualForecasterConfig, +): + """Test that predict() raises NotFittedError when called before fit().""" + # Arrange + forecaster = ResidualForecaster(config=base_config) + + # Act & Assert + with pytest.raises(NotFittedError, match="ResidualForecaster"): + forecaster.predict(sample_forecast_input_dataset) + + +def test_residual_forecaster_with_sample_weights( + sample_dataset_with_weights: ForecastInputDataset, + base_config: ResidualForecasterConfig, +): + """Test that forecaster works with sample weights and produces different results.""" + # Arrange + forecaster_with_weights = ResidualForecaster(config=base_config) + + # Create dataset without weights for comparison + data_without_weights = ForecastInputDataset( + data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), + sample_interval=sample_dataset_with_weights.sample_interval, + target_column=sample_dataset_with_weights.target_column, + forecast_start=sample_dataset_with_weights.forecast_start, + ) + forecaster_without_weights = ResidualForecaster(config=base_config) + + # Act + forecaster_with_weights.fit(sample_dataset_with_weights) + forecaster_without_weights.fit(data_without_weights) + + # Predict using data without sample_weight column (since that's used for training, not prediction) + result_with_weights = forecaster_with_weights.predict(data_without_weights) + result_without_weights = forecaster_without_weights.predict(data_without_weights) + + # Assert + # Both should produce valid forecasts + assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" + assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" + + # Sample weights should affect the model, so results should be different + # (This is a statistical test - with different weights, predictions should differ) + differences = (result_with_weights.data - result_without_weights.data).abs() + assert differences.sum().sum() > 0, "Sample weights should affect model predictions" diff --git a/packages/openstef-meta/test_forecasting_model.py b/packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py similarity index 98% rename from packages/openstef-meta/test_forecasting_model.py rename to packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py index 008199689..126163bd9 100644 --- a/packages/openstef-meta/test_forecasting_model.py +++ b/packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py @@ -3,7 +3,6 @@ from typing import override import numpy as np -from openstef_core.mixins.predictor import HyperParams import pandas as pd import pytest @@ -11,6 +10,7 @@ from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset from openstef_core.datasets.validated_datasets import ForecastDataset from openstef_core.exceptions import NotFittedError +from openstef_core.mixins.predictor import HyperParams from openstef_core.mixins.transform import TransformPipeline from openstef_core.testing import assert_timeseries_equal, create_synthetic_forecasting_dataset from openstef_core.types import LeadTime, Q @@ -62,6 +62,11 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: class SimpleCombiner(ForecastCombiner): """Simple combiner that averages base learner predictions.""" + def __init__(self, config: ForecastCombinerConfig): + self._config = config + self._is_fitted = False + self.quantiles = config.quantiles + def fit( self, data: EnsembleForecastDataset, @@ -128,7 +133,6 @@ def model() -> EnsembleForecastingModel: combiner = SimpleCombiner( config=combiner_config, - quantiles=quantiles, ) # Act @@ -231,7 +235,6 @@ def test_forecasting_model__pickle_roundtrip(): combiner = SimpleCombiner( config=combiner_config, - quantiles=quantiles, ) original_model = EnsembleForecastingModel( diff --git a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py b/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py deleted file mode 100644 index 667477191..000000000 --- a/packages/openstef-meta/tests/models/test_learned_weights_forecaster.py +++ /dev/null @@ -1,171 +0,0 @@ -# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# # -# # SPDX-License-Identifier: MPL-2.0 - -# from datetime import timedelta - -# import pytest -# from lightgbm import LGBMClassifier -# from sklearn.linear_model import LogisticRegression -# from xgboost import XGBClassifier - -# from openstef_core.datasets import ForecastInputDataset -# from openstef_core.exceptions import NotFittedError -# from openstef_core.types import LeadTime, Q -# from openstef_meta.models.learned_weights_forecaster import ( -# Classifier, -# LearnedWeightsForecaster, -# LearnedWeightsForecasterConfig, -# LearnedWeightsHyperParams, -# LGBMCombinerHyperParams, -# LogisticCombinerHyperParams, -# RFCombinerHyperParams, -# WeightsCombiner, -# WeightsCombinerHyperParams, -# XGBCombinerHyperParams, -# ) -# from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder - - -# @pytest.fixture(params=["rf", "lgbm", "xgboost", "logistic"]) -# def combiner_hyperparams(request: pytest.FixtureRequest) -> WeightsCombinerHyperParams: -# """Fixture to provide different primary models types.""" -# learner_type = request.param -# if learner_type == "rf": -# return RFCombinerHyperParams() -# if learner_type == "lgbm": -# return LGBMCombinerHyperParams() -# if learner_type == "xgboost": -# return XGBCombinerHyperParams() -# return LogisticCombinerHyperParams() - - -# @pytest.fixture -# def base_config(combiner_hyperparams: WeightsCombinerHyperParams) -> LearnedWeightsForecasterConfig: -# """Base configuration for LearnedWeights forecaster tests.""" - -# params = LearnedWeightsHyperParams( -# combiner_hyperparams=combiner_hyperparams, -# ) -# return LearnedWeightsForecasterConfig( -# quantiles=[Q(0.1), Q(0.5), Q(0.9)], -# horizons=[LeadTime(timedelta(days=1))], -# hyperparams=params, -# verbosity=False, -# ) - - -# def test_forecast_combiner_corresponds_to_hyperparams(base_config: LearnedWeightsForecasterConfig): -# """Test that the forecast combiner learner corresponds to the specified hyperparameters.""" -# forecaster = LearnedWeightsForecaster(config=base_config) -# forecast_combiner = forecaster._forecast_combiner -# assert isinstance(forecast_combiner, WeightsCombiner) -# classifier = forecast_combiner.models[0] - -# mapping: dict[type[WeightsCombinerHyperParams], type[Classifier]] = { -# RFCombinerHyperParams: LGBMClassifier, -# LGBMCombinerHyperParams: LGBMClassifier, -# XGBCombinerHyperParams: XGBClassifier, -# LogisticCombinerHyperParams: LogisticRegression, -# } -# expected_type = mapping[type(base_config.hyperparams.combiner_hyperparams)] - -# assert isinstance(classifier, expected_type), ( -# f"Final learner type {type(forecast_combiner)} does not match expected type {expected_type}" -# ) - - -# def test_learned_weights_forecaster_fit_predict( -# sample_forecast_input_dataset: ForecastInputDataset, -# base_config: LearnedWeightsForecasterConfig, -# ): -# """Test basic fit and predict workflow with comprehensive output validation.""" -# # Arrange -# expected_quantiles = base_config.quantiles -# forecaster = LearnedWeightsForecaster(config=base_config) - -# # Act -# forecaster.fit(sample_forecast_input_dataset) -# result = forecaster.predict(sample_forecast_input_dataset) - -# # Assert -# # Basic functionality -# assert forecaster.is_fitted, "Model should be fitted after calling fit()" - -# # Check that necessary quantiles are present -# required_columns = [q.format() for q in expected_quantiles] -# assert all(col in result.data.columns for col in required_columns), ( -# f"Expected columns {required_columns}, got {list(result.data.columns)}" -# ) - -# # Forecast data quality -# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" - - -# def test_learned_weights_forecaster_predict_not_fitted_raises_error( -# sample_forecast_input_dataset: ForecastInputDataset, -# base_config: LearnedWeightsForecasterConfig, -# ): -# """Test that predict() raises NotFittedError when called before fit().""" -# # Arrange -# forecaster = LearnedWeightsForecaster(config=base_config) - -# # Act & Assert -# with pytest.raises(NotFittedError, match="LearnedWeightsForecaster"): -# forecaster.predict(sample_forecast_input_dataset) - - -# def test_learned_weights_forecaster_with_sample_weights( -# sample_dataset_with_weights: ForecastInputDataset, -# base_config: LearnedWeightsForecasterConfig, -# ): -# """Test that forecaster works with sample weights and produces different results.""" -# # Arrange -# forecaster_with_weights = LearnedWeightsForecaster(config=base_config) - -# # Create dataset without weights for comparison -# data_without_weights = ForecastInputDataset( -# data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), -# sample_interval=sample_dataset_with_weights.sample_interval, -# target_column=sample_dataset_with_weights.target_column, -# forecast_start=sample_dataset_with_weights.forecast_start, -# ) -# forecaster_without_weights = LearnedWeightsForecaster(config=base_config) - -# # Act -# forecaster_with_weights.fit(sample_dataset_with_weights) -# forecaster_without_weights.fit(data_without_weights) - -# # Predict using data without sample_weight column (since that's used for training, not prediction) -# result_with_weights = forecaster_with_weights.predict(sample_dataset_with_weights) -# result_without_weights = forecaster_without_weights.predict(data_without_weights) - -# # Assert -# # Both should produce valid forecasts -# assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" -# assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" - -# # Sample weights should affect the model, so results should be different -# # (This is a statistical test - with different weights, predictions should differ) -# differences = (result_with_weights.data - result_without_weights.data).abs() -# assert differences.sum().sum() > 0, "Sample weights should affect model predictions" - - -# def test_learned_weights_forecaster_with_additional_features( -# sample_forecast_input_dataset: ForecastInputDataset, -# base_config: LearnedWeightsForecasterConfig, -# ): -# """Test that forecaster works with additional features for the final learner.""" -# # Arrange -# # Add a simple feature adder that adds a constant feature - -# base_config.hyperparams.combiner_hyperparams.feature_adders.append(CyclicFeaturesAdder()) # type: ignore -# forecaster = LearnedWeightsForecaster(config=base_config) - -# # Act -# forecaster.fit(sample_forecast_input_dataset) -# result = forecaster.predict(sample_forecast_input_dataset) - -# # Assert -# assert forecaster.is_fitted, "Model should be fitted after calling fit()" -# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" diff --git a/packages/openstef-meta/tests/models/test_residual_forecaster.py b/packages/openstef-meta/tests/models/test_residual_forecaster.py deleted file mode 100644 index c21111d92..000000000 --- a/packages/openstef-meta/tests/models/test_residual_forecaster.py +++ /dev/null @@ -1,142 +0,0 @@ -# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# # -# # SPDX-License-Identifier: MPL-2.0 - -# from datetime import timedelta - -# import pytest - -# from openstef_core.datasets import ForecastInputDataset -# from openstef_core.exceptions import NotFittedError -# from openstef_core.types import LeadTime, Q -# from openstef_meta.framework.base_learner import BaseLearnerHyperParams -# from openstef_meta.models.residual_forecaster import ( -# ResidualForecaster, -# ResidualForecasterConfig, -# ResidualHyperParams, -# ) -# from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams -# from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams -# from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearHyperParams -# from openstef_models.models.forecasting.xgboost_forecaster import XGBoostHyperParams - - -# @pytest.fixture(params=["gblinear", "lgbmlinear"]) -# def primary_model(request: pytest.FixtureRequest) -> BaseLearnerHyperParams: -# """Fixture to provide different primary models types.""" -# learner_type = request.param -# if learner_type == "gblinear": -# return GBLinearHyperParams() -# if learner_type == "lgbm": -# return LGBMHyperParams() -# if learner_type == "lgbmlinear": -# return LGBMLinearHyperParams() -# return XGBoostHyperParams() - - -# @pytest.fixture(params=["gblinear", "lgbm", "lgbmlinear", "xgboost"]) -# def secondary_model(request: pytest.FixtureRequest) -> BaseLearnerHyperParams: -# """Fixture to provide different secondary models types.""" -# learner_type = request.param -# if learner_type == "gblinear": -# return GBLinearHyperParams() -# if learner_type == "lgbm": -# return LGBMHyperParams() -# if learner_type == "lgbmlinear": -# return LGBMLinearHyperParams() -# return XGBoostHyperParams() - - -# @pytest.fixture -# def base_config( -# primary_model: BaseLearnerHyperParams, -# secondary_model: BaseLearnerHyperParams, -# ) -> ResidualForecasterConfig: -# """Base configuration for Residual forecaster tests.""" - -# params = ResidualHyperParams( -# primary_hyperparams=primary_model, -# secondary_hyperparams=secondary_model, -# ) -# return ResidualForecasterConfig( -# quantiles=[Q(0.1), Q(0.5), Q(0.9)], -# horizons=[LeadTime(timedelta(days=1))], -# hyperparams=params, -# verbosity=False, -# ) - - -# def test_residual_forecaster_fit_predict( -# sample_forecast_input_dataset: ForecastInputDataset, -# base_config: ResidualForecasterConfig, -# ): -# """Test basic fit and predict workflow with comprehensive output validation.""" -# # Arrange -# expected_quantiles = base_config.quantiles -# forecaster = ResidualForecaster(config=base_config) - -# # Act -# forecaster.fit(sample_forecast_input_dataset) -# result = forecaster.predict(sample_forecast_input_dataset) - -# # Assert -# # Basic functionality -# assert forecaster.is_fitted, "Model should be fitted after calling fit()" - -# # Check that necessary quantiles are present -# expected_columns = [q.format() for q in expected_quantiles] -# assert list(result.data.columns) == expected_columns, ( -# f"Expected columns {expected_columns}, got {list(result.data.columns)}" -# ) - -# # Forecast data quality -# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" - - -# def test_residual_forecaster_predict_not_fitted_raises_error( -# sample_forecast_input_dataset: ForecastInputDataset, -# base_config: ResidualForecasterConfig, -# ): -# """Test that predict() raises NotFittedError when called before fit().""" -# # Arrange -# forecaster = ResidualForecaster(config=base_config) - -# # Act & Assert -# with pytest.raises(NotFittedError, match="ResidualForecaster"): -# forecaster.predict(sample_forecast_input_dataset) - - -# def test_residual_forecaster_with_sample_weights( -# sample_dataset_with_weights: ForecastInputDataset, -# base_config: ResidualForecasterConfig, -# ): -# """Test that forecaster works with sample weights and produces different results.""" -# # Arrange -# forecaster_with_weights = ResidualForecaster(config=base_config) - -# # Create dataset without weights for comparison -# data_without_weights = ForecastInputDataset( -# data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), -# sample_interval=sample_dataset_with_weights.sample_interval, -# target_column=sample_dataset_with_weights.target_column, -# forecast_start=sample_dataset_with_weights.forecast_start, -# ) -# forecaster_without_weights = ResidualForecaster(config=base_config) - -# # Act -# forecaster_with_weights.fit(sample_dataset_with_weights) -# forecaster_without_weights.fit(data_without_weights) - -# # Predict using data without sample_weight column (since that's used for training, not prediction) -# result_with_weights = forecaster_with_weights.predict(data_without_weights) -# result_without_weights = forecaster_without_weights.predict(data_without_weights) - -# # Assert -# # Both should produce valid forecasts -# assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" -# assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" - -# # Sample weights should affect the model, so results should be different -# # (This is a statistical test - with different weights, predictions should differ) -# differences = (result_with_weights.data - result_without_weights.data).abs() -# assert differences.sum().sum() > 0, "Sample weights should affect model predictions" diff --git a/packages/openstef-meta/tests/models/test_rules_forecaster.py b/packages/openstef-meta/tests/models/test_rules_forecaster.py deleted file mode 100644 index 06ae2a41d..000000000 --- a/packages/openstef-meta/tests/models/test_rules_forecaster.py +++ /dev/null @@ -1,136 +0,0 @@ -# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# # -# # SPDX-License-Identifier: MPL-2.0 - -# from datetime import timedelta - -# import pytest - -# from openstef_core.datasets import ForecastInputDataset -# from openstef_core.exceptions import NotFittedError -# from openstef_core.types import LeadTime, Q -# from openstef_meta.models.rules_forecaster import ( -# RulesForecaster, -# RulesForecasterConfig, -# RulesForecasterHyperParams, -# ) -# from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder - - -# @pytest.fixture -# def base_config() -> RulesForecasterConfig: -# """Base configuration for Rules forecaster tests.""" - -# params = RulesForecasterHyperParams() -# return RulesForecasterConfig( -# quantiles=[Q(0.1), Q(0.5), Q(0.9)], -# horizons=[LeadTime(timedelta(days=1))], -# hyperparams=params, -# verbosity=False, -# ) - - -# def test_rules_forecaster_fit_predict( -# sample_forecast_input_dataset: ForecastInputDataset, -# base_config: RulesForecasterConfig, -# ): -# """Test basic fit and predict workflow with comprehensive output validation.""" -# # Arrange -# expected_quantiles = base_config.quantiles -# forecaster = RulesForecaster(config=base_config) - -# # Act -# forecaster.fit(sample_forecast_input_dataset) -# result = forecaster.predict(sample_forecast_input_dataset) - -# # Assert -# # Basic functionality -# assert forecaster.is_fitted, "Model should be fitted after calling fit()" - -# # Check that necessary quantiles are present -# expected_columns = [q.format() for q in expected_quantiles] -# assert list(result.data.columns) == expected_columns, ( -# f"Expected columns {expected_columns}, got {list(result.data.columns)}" -# ) - -# # Forecast data quality -# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" - - -# def test_rules_forecaster_predict_not_fitted_raises_error( -# sample_forecast_input_dataset: ForecastInputDataset, -# base_config: RulesForecasterConfig, -# ): -# """Test that predict() raises NotFittedError when called before fit().""" -# # Arrange -# forecaster = RulesForecaster(config=base_config) - -# # Act & Assert -# with pytest.raises(NotFittedError, match="RulesForecaster"): -# forecaster.predict(sample_forecast_input_dataset) - - -# def test_rules_forecaster_with_sample_weights( -# sample_dataset_with_weights: ForecastInputDataset, -# base_config: RulesForecasterConfig, -# ): -# """Test that forecaster works with sample weights and produces different results.""" -# # Arrange -# forecaster_with_weights = RulesForecaster(config=base_config) - -# # Create dataset without weights for comparison -# data_without_weights = ForecastInputDataset( -# data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), -# sample_interval=sample_dataset_with_weights.sample_interval, -# target_column=sample_dataset_with_weights.target_column, -# forecast_start=sample_dataset_with_weights.forecast_start, -# ) -# forecaster_without_weights = RulesForecaster(config=base_config) - -# # Act -# forecaster_with_weights.fit(sample_dataset_with_weights) -# forecaster_without_weights.fit(data_without_weights) - -# # Predict using data without sample_weight column (since that's used for training, not prediction) -# result_with_weights = forecaster_with_weights.predict(data_without_weights) -# result_without_weights = forecaster_without_weights.predict(data_without_weights) - -# # Assert -# # Both should produce valid forecasts -# assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" -# assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" - -# # Sample weights should affect the model, so results should be different -# # (This is a statistical test - with different weights, predictions should differ) -# differences = (result_with_weights.data - result_without_weights.data).abs() -# assert differences.sum().sum() > 0, "Sample weights should affect model predictions" - - -# def test_rules_forecaster_with_additional_features( -# sample_forecast_input_dataset: ForecastInputDataset, -# base_config: RulesForecasterConfig, -# ): -# """Test that forecaster works with additional features for the final learner.""" - -# base_config.hyperparams.final_hyperparams.feature_adders.append(CyclicFeaturesAdder()) # type: ignore - -# # Arrange -# expected_quantiles = base_config.quantiles -# forecaster = RulesForecaster(config=base_config) - -# # Act -# forecaster.fit(sample_forecast_input_dataset) -# result = forecaster.predict(sample_forecast_input_dataset) - -# # Assert -# # Basic functionality -# assert forecaster.is_fitted, "Model should be fitted after calling fit()" - -# # Check that necessary quantiles are present -# expected_columns = [q.format() for q in expected_quantiles] -# assert list(result.data.columns) == expected_columns, ( -# f"Expected columns {expected_columns}, got {list(result.data.columns)}" -# ) - -# # Forecast data quality -# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" diff --git a/packages/openstef-meta/tests/models/test_stacking_forecaster.py b/packages/openstef-meta/tests/models/test_stacking_forecaster.py deleted file mode 100644 index fac92e7cc..000000000 --- a/packages/openstef-meta/tests/models/test_stacking_forecaster.py +++ /dev/null @@ -1,136 +0,0 @@ -# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# # -# # SPDX-License-Identifier: MPL-2.0 - -# from datetime import timedelta - -# import pytest - -# from openstef_core.datasets import ForecastInputDataset -# from openstef_core.exceptions import NotFittedError -# from openstef_core.types import LeadTime, Q -# from openstef_meta.models.stacking_forecaster import ( -# StackingForecaster, -# StackingForecasterConfig, -# StackingHyperParams, -# ) -# from openstef_models.transforms.time_domain.cyclic_features_adder import CyclicFeaturesAdder - - -# @pytest.fixture -# def base_config() -> StackingForecasterConfig: -# """Base configuration for Stacking forecaster tests.""" - -# params = StackingHyperParams() -# return StackingForecasterConfig( -# quantiles=[Q(0.1), Q(0.5), Q(0.9)], -# horizons=[LeadTime(timedelta(days=1))], -# hyperparams=params, -# verbosity=False, -# ) - - -# def test_stacking_forecaster_fit_predict( -# sample_forecast_input_dataset: ForecastInputDataset, -# base_config: StackingForecasterConfig, -# ): -# """Test basic fit and predict workflow with comprehensive output validation.""" -# # Arrange -# expected_quantiles = base_config.quantiles -# forecaster = StackingForecaster(config=base_config) - -# # Act -# forecaster.fit(sample_forecast_input_dataset) -# result = forecaster.predict(sample_forecast_input_dataset) - -# # Assert -# # Basic functionality -# assert forecaster.is_fitted, "Model should be fitted after calling fit()" - -# # Check that necessary quantiles are present -# expected_columns = [q.format() for q in expected_quantiles] -# assert list(result.data.columns) == expected_columns, ( -# f"Expected columns {expected_columns}, got {list(result.data.columns)}" -# ) - -# # Forecast data quality -# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" - - -# def test_stacking_forecaster_predict_not_fitted_raises_error( -# sample_forecast_input_dataset: ForecastInputDataset, -# base_config: StackingForecasterConfig, -# ): -# """Test that predict() raises NotFittedError when called before fit().""" -# # Arrange -# forecaster = StackingForecaster(config=base_config) - -# # Act & Assert -# with pytest.raises(NotFittedError, match="StackingForecaster"): -# forecaster.predict(sample_forecast_input_dataset) - - -# def test_stacking_forecaster_with_sample_weights( -# sample_dataset_with_weights: ForecastInputDataset, -# base_config: StackingForecasterConfig, -# ): -# """Test that forecaster works with sample weights and produces different results.""" -# # Arrange -# forecaster_with_weights = StackingForecaster(config=base_config) - -# # Create dataset without weights for comparison -# data_without_weights = ForecastInputDataset( -# data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), -# sample_interval=sample_dataset_with_weights.sample_interval, -# target_column=sample_dataset_with_weights.target_column, -# forecast_start=sample_dataset_with_weights.forecast_start, -# ) -# forecaster_without_weights = StackingForecaster(config=base_config) - -# # Act -# forecaster_with_weights.fit(sample_dataset_with_weights) -# forecaster_without_weights.fit(data_without_weights) - -# # Predict using data without sample_weight column (since that's used for training, not prediction) -# result_with_weights = forecaster_with_weights.predict(data_without_weights) -# result_without_weights = forecaster_without_weights.predict(data_without_weights) - -# # Assert -# # Both should produce valid forecasts -# assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" -# assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" - -# # Sample weights should affect the model, so results should be different -# # (This is a statistical test - with different weights, predictions should differ) -# differences = (result_with_weights.data - result_without_weights.data).abs() -# assert differences.sum().sum() > 0, "Sample weights should affect model predictions" - - -# def test_stacking_forecaster_with_additional_features( -# sample_forecast_input_dataset: ForecastInputDataset, -# base_config: StackingForecasterConfig, -# ): -# """Test that forecaster works with additional features for the final learner.""" - -# base_config.hyperparams.combiner_hyperparams.feature_adders = [CyclicFeaturesAdder()] - -# # Arrange -# expected_quantiles = base_config.quantiles -# forecaster = StackingForecaster(config=base_config) - -# # Act -# forecaster.fit(sample_forecast_input_dataset) -# result = forecaster.predict(sample_forecast_input_dataset) - -# # Assert -# # Basic functionality -# assert forecaster.is_fitted, "Model should be fitted after calling fit()" - -# # Check that necessary quantiles are present -# expected_columns = [q.format() for q in expected_quantiles] -# assert list(result.data.columns) == expected_columns, ( -# f"Expected columns {expected_columns}, got {list(result.data.columns)}" -# ) - -# # Forecast data quality -# assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" diff --git a/packages/openstef-meta/tests/utils/test_datasets.py b/packages/openstef-meta/tests/utils/test_datasets.py index 045aecd13..efb64f3ea 100644 --- a/packages/openstef-meta/tests/utils/test_datasets.py +++ b/packages/openstef-meta/tests/utils/test_datasets.py @@ -11,7 +11,6 @@ from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset from openstef_core.types import Quantile -from openstef_meta.framework.base_learner import BaseLearnerNames from openstef_meta.utils.datasets import EnsembleForecastDataset @@ -79,26 +78,25 @@ def _make() -> ForecastDataset: @pytest.fixture -def base_learner_output( +def base_predictions( forecast_dataset_factory: Callable[[], ForecastDataset], -) -> dict[BaseLearnerNames, ForecastDataset]: - +) -> dict[str, ForecastDataset]: return { - "GBLinearForecaster": forecast_dataset_factory(), - "LGBMForecaster": forecast_dataset_factory(), + "model_1": forecast_dataset_factory(), + "model_2": forecast_dataset_factory(), } @pytest.fixture -def ensemble_dataset(base_learner_output: dict[BaseLearnerNames, ForecastDataset]) -> EnsembleForecastDataset: - return EnsembleForecastDataset.from_forecast_datasets(base_learner_output) +def ensemble_dataset(base_predictions: dict[str, ForecastDataset]) -> EnsembleForecastDataset: + return EnsembleForecastDataset.from_forecast_datasets(base_predictions) def test_from_ensemble_output(ensemble_dataset: EnsembleForecastDataset): assert isinstance(ensemble_dataset, EnsembleForecastDataset) assert ensemble_dataset.data.shape == (3, 7) # 3 timestamps, 2 learners * 3 quantiles + target - assert set(ensemble_dataset.model_names) == {"GBLinearForecaster", "LGBMForecaster"} + assert set(ensemble_dataset.forecaster_names) == {"model_1", "model_2"} assert set(ensemble_dataset.quantiles) == {Quantile(0.1), Quantile(0.5), Quantile(0.9)} @@ -116,4 +114,4 @@ def test_select_quantile_classification(ensemble_dataset: EnsembleForecastDatase assert isinstance(dataset, ForecastInputDataset) assert dataset.data.shape == (3, 3) # 3 timestamps, 2 learners * 1 quantiles + target - assert all(dataset.target_series.apply(lambda x: x in BaseLearnerNames.__args__)) # type: ignore + assert all(dataset.target_series.apply(lambda x: x in {"model_1", "model_2"})) # type: ignore diff --git a/packages/openstef-models/src/openstef_models/estimators/__init__.py b/packages/openstef-models/src/openstef_models/estimators/__init__.py deleted file mode 100644 index 07a4cbc99..000000000 --- a/packages/openstef-models/src/openstef_models/estimators/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""Custom estimators for multi quantiles.""" - -__all__ = [] diff --git a/packages/openstef-models/src/openstef_models/estimators/hybrid.py b/packages/openstef-models/src/openstef_models/estimators/hybrid.py deleted file mode 100644 index 1660d8707..000000000 --- a/packages/openstef-models/src/openstef_models/estimators/hybrid.py +++ /dev/null @@ -1,146 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""Hybrid quantile regression estimators for multi-quantile forecasting. - -This module provides the HybridQuantileRegressor class, which combines LightGBM and linear models -using stacking for robust multi-quantile regression, including serialization utilities. -""" - -import numpy as np -import numpy.typing as npt -import pandas as pd -from lightgbm import LGBMRegressor -from sklearn.ensemble import StackingRegressor -from sklearn.linear_model import QuantileRegressor -from xgboost import XGBRegressor - - -class HybridQuantileRegressor: - """Custom Hybrid regressor for multi-quantile estimation using sample weights.""" - - def __init__( # noqa: D107, PLR0913, PLR0917 - self, - quantiles: list[float], - lgbm_n_estimators: int = 100, - lgbm_learning_rate: float = 0.1, - lgbm_max_depth: int = -1, - lgbm_min_child_weight: float = 1.0, - ligntgbm_min_child_samples: int = 1, - lgbm_min_data_in_leaf: int = 20, - lgbm_min_data_in_bin: int = 10, - lgbm_reg_alpha: float = 0.0, - lgbm_reg_lambda: float = 0.0, - lgbm_num_leaves: int = 31, - lgbm_max_bin: int = 255, - lgbm_colsample_by_tree: float = 1.0, - gblinear_n_steps: int = 100, - gblinear_learning_rate: float = 0.15, - gblinear_reg_alpha: float = 0.0001, - gblinear_reg_lambda: float = 0, - gblinear_feature_selector: str = "shuffle", - gblinear_updater: str = "shotgun", - ): - self.quantiles = quantiles - - self._models: list[StackingRegressor] = [] - - for q in quantiles: - lgbm_model = LGBMRegressor( - objective="quantile", - alpha=q, - min_child_samples=ligntgbm_min_child_samples, - n_estimators=lgbm_n_estimators, - learning_rate=lgbm_learning_rate, - max_depth=lgbm_max_depth, - min_child_weight=lgbm_min_child_weight, - min_data_in_leaf=lgbm_min_data_in_leaf, - min_data_in_bin=lgbm_min_data_in_bin, - reg_alpha=lgbm_reg_alpha, - reg_lambda=lgbm_reg_lambda, - num_leaves=lgbm_num_leaves, - max_bin=lgbm_max_bin, - colsample_bytree=lgbm_colsample_by_tree, - verbosity=-1, - linear_tree=False, - ) - - linear = XGBRegressor( - booster="gblinear", - # Core parameters for forecasting - objective="reg:quantileerror", - n_estimators=gblinear_n_steps, - learning_rate=gblinear_learning_rate, - # Regularization parameters - reg_alpha=gblinear_reg_alpha, - reg_lambda=gblinear_reg_lambda, - # Boosting structure control - feature_selector=gblinear_feature_selector, - updater=gblinear_updater, - quantile_alpha=q, - ) - - final_estimator = QuantileRegressor(quantile=q) - - self._models.append( - StackingRegressor( - estimators=[("lgbm", lgbm_model), ("gblinear", linear)], # type: ignore - final_estimator=final_estimator, - verbose=3, - passthrough=False, - n_jobs=None, - cv=1, - ) - ) - self.is_fitted: bool = False - self.feature_names: list[str] = [] - - @staticmethod - def _prepare_input(X: npt.NDArray[np.floating] | pd.DataFrame) -> pd.DataFrame: - """Prepare input data by handling missing values. - - Args: - X: Input features as a DataFrame or ndarray. - - Returns: - A DataFrame with missing values handled. - """ - return pd.DataFrame(X).ffill().fillna(0) # type: ignore[reportUnknownMemberType] - - def fit( - self, - X: npt.NDArray[np.floating] | pd.DataFrame, - y: npt.NDArray[np.floating] | pd.Series, - sample_weight: npt.NDArray[np.floating] | pd.Series | None = None, - feature_name: list[str] | None = None, - ) -> None: - """Fit the multi-quantile regressor. - - Args: - X: Input features as a DataFrame. - y: Target values as a 2D array where each column corresponds to a quantile. - sample_weight: Sample weights for training data. - feature_name: List of feature names. - """ - self.feature_names = feature_name if feature_name is not None else [] - - for model in self._models: - model.fit( - X=self._prepare_input(X), # type: ignore - y=y, - sample_weight=sample_weight, - ) - self.is_fitted = True - - def predict(self, X: npt.NDArray[np.floating] | pd.DataFrame) -> npt.NDArray[np.floating]: - """Predict quantiles for the input features. - - Args: - X: Input features as a DataFrame. - - Returns: - - A 2D array where each column corresponds to predicted quantiles. - """ # noqa: D412 - X = X.ffill().fillna(0) # type: ignore - return np.column_stack([model.predict(X=X) for model in self._models]) # type: ignore From e18ce5ab23cdeea1a973bee72139a811d736cc72 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Wed, 3 Dec 2025 13:29:20 +0100 Subject: [PATCH 050/104] Prepared TODOs for Florian Signed-off-by: Lars van Someren --- .../models/ensemble_forecasting_model.py | 34 +++++++++++++ .../learned_weights_combiner.py | 40 +++++++++++++++ .../forecast_combiners/rules_combiner.py | 28 +++++++++++ .../forecast_combiners/stacking_combiner.py | 49 +++++++++++++++++++ .../forecast_combiners/test_rules_combiner.py | 2 - 5 files changed, 151 insertions(+), 2 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 5ff81cd8e..7eab3d74f 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -308,6 +308,7 @@ def prepare_input( Args: data: Raw time series dataset to prepare for forecasting. + forecaster_name: Optional name of the forecaster for model-specific preprocessing. forecast_start: Optional start time for forecasts. If provided and earlier than the cutoff time, overrides the cutoff for data filtering. @@ -382,6 +383,39 @@ def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = Non return restore_target(dataset=prediction, original_dataset=data, target_column=self.target_column) + def predict_contributions(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> pd.DataFrame: + """Generate forecasts for the provided dataset. + + Args: + data: Input time series dataset for prediction. + forecast_start: Optional start time for forecasts. + + Returns: + ForecastDataset containing the generated forecasts. + + Raises: + NotFittedError: If the model has not been fitted yet. + """ + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + ensemble_predictions = self._predict_forecasters(data=data, forecast_start=forecast_start) + + additional_features = ( + ForecastInputDataset.from_timeseries( + self.combiner_preprocessing.transform(data=data), + target_column=self.target_column, + forecast_start=forecast_start, + ) + if len(self.combiner_preprocessing.transforms) > 0 + else None + ) + + return self.combiner.predict_contributions( + data=ensemble_predictions, + additional_features=additional_features, + ) + def score( self, data: TimeSeriesDataset, diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 31f39e095..98c1767a3 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -336,6 +336,46 @@ def predict( forecast_start=data.forecast_start, ) + @override + def predict_contributions( + self, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, + ) -> pd.DataFrame: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + # Generate predictions + contributions = pd.DataFrame({ + Quantile(q).format(): self._generate_contributions_quantile( + dataset=data.select_quantile(quantile=Quantile(q)), + additional_features=additional_features, + model_index=i, + ) + for i, q in enumerate(self.quantiles) + }) + target_series = data.target_series + if target_series is not None: + contributions[data.target_column] = target_series + + return contributions + + def _generate_contributions_quantile( + self, + dataset: ForecastInputDataset, + additional_features: ForecastInputDataset | None, + model_index: int, + ) -> pd.DataFrame: + # TODO: FLORIAN Update content + # input_data = self._prepare_input_data( + # dataset=dataset, + # additional_features=additional_features, + # ) + + # weights = self._predict_model_weights_quantile(base_predictions=input_data, model_index=model_index) + + return pd.DataFrame() + @property @override def is_fitted(self) -> bool: diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py index a030b3df5..965997cde 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py @@ -141,6 +141,34 @@ def predict( sample_interval=data.sample_interval, ) + @override + def predict_contributions( + self, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, + ) -> pd.DataFrame: + if additional_features is None: + raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") + + decisions = self._predict_tree( + additional_features.data, columns=data.select_quantile(quantile=self.quantiles[0]).data.columns + ) + + # Generate predictions + predictions: list[pd.DataFrame] = [] + for q in self.quantiles: + dataset = data.select_quantile(quantile=q) + preds = dataset.input_data().multiply(decisions).sum(axis=1) + + predictions.append(preds.to_frame(name=Quantile(q).format())) + + # Concatenate predictions along columns to form a DataFrame with quantile columns + df = pd.concat(predictions, axis=1) + + # TODO FLORIAN return only Decision datadrame + + return df + @property def is_fitted(self) -> bool: """Check the Rules Final Learner is fitted.""" diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index b8f4ebad5..3155d44bc 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -198,6 +198,55 @@ def predict( sample_interval=data.sample_interval, ) + @override + def predict_contributions( + self, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, + ) -> pd.DataFrame: + if not self.is_fitted: + raise NotFittedError(self.__class__.__name__) + + # Generate predictions + predictions: list[pd.DataFrame] = [] + for i, q in enumerate(self.quantiles): + if additional_features is not None: + input_data = self._combine_datasets( + data=data.select_quantile(quantile=q), + additional_features=additional_features, + ) + else: + input_data = data.select_quantile(quantile=q) + p = self.models[i].predict(data=input_data).data + predictions.append(p) + # Concatenate predictions along columns to form a DataFrame with quantile columns + df = pd.concat(predictions, axis=1) + + contributions = pd.DataFrame() + for q in self.quantiles: + # Extract base predictions for this quantile + # TODO Florian implement contributions extraction per quantile + + return pd.DataFrame() # Placeholder for actual implementation + + @staticmethod + def contributions_from_predictions( + base_predictions: pd.DataFrame, + final_predictions: pd.Series, + ) -> pd.DataFrame: + """Extract contributions from predictions DataFrame. + + Args: + predictions: DataFrame containing predictions. + + Returns: + DataFrame with contributions per base learner. + """ + # TODO Florian implement contributions extraction + # abs(final_predictions) / sum(abs(base_predictions), axis=1) + + return pd.DataFrame() # Placeholder for actual implementation + @property def is_fitted(self) -> bool: """Check the StackingForecastCombiner is fitted.""" diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py b/packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py index 6ee938ef4..aa08bf59a 100644 --- a/packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py +++ b/packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py @@ -6,8 +6,6 @@ import pytest -from openstef_core.datasets import ForecastInputDataset -from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q from openstef_meta.models.forecast_combiners.rules_combiner import ( RulesCombiner, From ece5d1863f85b1cc36111d63714b4b76d5d20482 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Wed, 3 Dec 2025 13:31:43 +0100 Subject: [PATCH 051/104] Small fix Signed-off-by: Lars van Someren --- .../models/forecast_combiners/stacking_combiner.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index 3155d44bc..a38123898 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -226,8 +226,9 @@ def predict_contributions( for q in self.quantiles: # Extract base predictions for this quantile # TODO Florian implement contributions extraction per quantile + pass - return pd.DataFrame() # Placeholder for actual implementation + return contributions # Placeholder for actual implementation @staticmethod def contributions_from_predictions( From c33ce9354abf3d08a90b59d419009822b5a29ae0 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Wed, 3 Dec 2025 14:15:06 +0100 Subject: [PATCH 052/104] Made PR Compliant Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 1 - .../openstef4_backtest_forecaster.py | 6 + .../src/openstef_meta/mixins/__init__.py | 4 + .../src/openstef_meta/mixins/contributions.py | 4 + .../src/openstef_meta/models/__init__.py | 5 + .../models/ensemble_forecasting_model.py | 15 +- .../models/forecast_combiners/__init__.py | 4 + .../src/openstef_meta/presets/__init__.py | 4 + .../presets/forecasting_workflow.py | 168 +++++++++--------- .../src/openstef_meta/utils/datasets.py | 1 - .../tests/{ => unit}/models/__init__.py | 0 .../tests/{ => unit}/models/conftest.py | 0 .../models/forecast_combiners}/__init__.py | 0 .../models/forecast_combiners/conftest.py | 0 .../test_learned_weights_combiner.py | 0 .../forecast_combiners/test_rules_combiner.py | 2 - .../test_stacking_combiner.py | 0 .../models/forecasting}/__init__.py | 0 .../forecasting/test_residual_forecaster.py | 0 .../models/test_ensemble_forecasting_model.py | 9 +- .../tests/unit/transforms/__init__.py | 0 .../transforms/test_flag_features_bound.py | 0 .../tests/unit/utils/__init__.py | 0 .../tests/{ => unit}/utils/test_datasets.py | 0 .../{ => unit}/utils/test_decision_tree.py | 0 .../mlflow/mlflow_storage_callback.py | 8 +- 26 files changed, 138 insertions(+), 93 deletions(-) create mode 100644 packages/openstef-meta/src/openstef_meta/models/__init__.py rename packages/openstef-meta/tests/{ => unit}/models/__init__.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/conftest.py (100%) rename packages/openstef-meta/tests/{transforms => unit/models/forecast_combiners}/__init__.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/forecast_combiners/conftest.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/forecast_combiners/test_learned_weights_combiner.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/forecast_combiners/test_rules_combiner.py (95%) rename packages/openstef-meta/tests/{ => unit}/models/forecast_combiners/test_stacking_combiner.py (100%) rename packages/openstef-meta/tests/{utils => unit/models/forecasting}/__init__.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/forecasting/test_residual_forecaster.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/test_ensemble_forecasting_model.py (97%) create mode 100644 packages/openstef-meta/tests/unit/transforms/__init__.py rename packages/openstef-meta/tests/{ => unit}/transforms/test_flag_features_bound.py (100%) create mode 100644 packages/openstef-meta/tests/unit/utils/__init__.py rename packages/openstef-meta/tests/{ => unit}/utils/test_datasets.py (100%) rename packages/openstef-meta/tests/{ => unit}/utils/test_decision_tree.py (100%) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index d3c990ad2..6e58f7998 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -18,7 +18,6 @@ os.environ["MKL_NUM_THREADS"] = "1" import logging -import multiprocessing from datetime import timedelta from pathlib import Path diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py index 56dad935f..d8a303007 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py @@ -17,6 +17,7 @@ from openstef_core.datasets import TimeSeriesDataset from openstef_core.exceptions import FlatlinerDetectedError, NotFittedError from openstef_core.types import Q +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow @@ -58,6 +59,11 @@ def quantiles(self) -> list[Q]: if self._workflow is None: self._workflow = self.workflow_factory() # Extract quantiles from the workflow's model + + if isinstance(self._workflow.model, EnsembleForecastingModel): + # Assuming all ensemble members have the same quantiles + name = self._workflow.model.forecaster_names[0] + return self._workflow.model.forecasters[name].config.quantiles return self._workflow.model.forecaster.config.quantiles @override diff --git a/packages/openstef-meta/src/openstef_meta/mixins/__init__.py b/packages/openstef-meta/src/openstef_meta/mixins/__init__.py index 71d67869d..90a57a257 100644 --- a/packages/openstef-meta/src/openstef_meta/mixins/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/mixins/__init__.py @@ -1 +1,5 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """Mixins for OpenSTEF-Meta package.""" diff --git a/packages/openstef-meta/src/openstef_meta/mixins/contributions.py b/packages/openstef-meta/src/openstef_meta/mixins/contributions.py index 9fb68377c..f00c185b3 100644 --- a/packages/openstef-meta/src/openstef_meta/mixins/contributions.py +++ b/packages/openstef-meta/src/openstef_meta/mixins/contributions.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """ExplainableMetaForecaster Mixin.""" from abc import ABC, abstractmethod diff --git a/packages/openstef-meta/src/openstef_meta/models/__init__.py b/packages/openstef-meta/src/openstef_meta/models/__init__.py new file mode 100644 index 000000000..13175057c --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/models/__init__.py @@ -0,0 +1,5 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Meta Forecasting models.""" diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 5ff81cd8e..a56c8b3e7 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -61,16 +61,24 @@ class EnsembleForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastD >>> from openstef_models.models.forecasting.constant_median_forecaster import ( ... ConstantMedianForecaster, ConstantMedianForecasterConfig ... ) + >>> from openstef_meta.models.forecast_combiners.learned_weights_combiner import WeightsCombiner >>> from openstef_core.types import LeadTime >>> >>> # Note: This is a conceptual example showing the API structure >>> # Real usage requires implemented forecaster classes - >>> forecaster = ConstantMedianForecaster( + >>> forecaster_1 = ConstantMedianForecaster( ... config=ConstantMedianForecasterConfig(horizons=[LeadTime.from_string("PT36H")]) ... ) + >>> forecaster_2 = ConstantMedianForecaster( + ... config=ConstantMedianForecasterConfig(horizons=[LeadTime.from_string("PT36H")]) + ... ) + >>> combiner_config = WeightsCombiner.Config( + ... horizons=[LeadTime.from_string("PT36H")], + ... ) >>> # Create and train model - >>> model = ForecastingModel( - ... forecaster=forecaster, + >>> model = EnsembleForecastingModel( + ... forecasters={"constant_median": forecaster_1, "constant_median_2": forecaster_2}, + ... combiner=WeightsCombiner(config=combiner_config), ... cutoff_history=timedelta(days=14), # Match your maximum lag in preprocessing ... ) >>> model.fit(training_data) # doctest: +SKIP @@ -308,6 +316,7 @@ def prepare_input( Args: data: Raw time series dataset to prepare for forecasting. + forecaster_name: Name of the forecaster for model-specific preprocessing. forecast_start: Optional start time for forecasts. If provided and earlier than the cutoff time, overrides the cutoff for data filtering. diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py index db4917778..56a4cadff 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """Forecast Combiners.""" from .forecast_combiner import ForecastCombiner, ForecastCombinerConfig diff --git a/packages/openstef-meta/src/openstef_meta/presets/__init__.py b/packages/openstef-meta/src/openstef_meta/presets/__init__.py index 53b9630aa..ad62320c2 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/presets/__init__.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """Package for preset forecasting workflows.""" from .forecasting_workflow import EnsembleForecastingModel, EnsembleWorkflowConfig, create_ensemble_workflow diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 88d063584..1087f2c5d 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """Ensemble forecasting workflow preset. Mimics OpenSTEF-models forecasting workflow with ensemble capabilities. @@ -5,7 +9,7 @@ from collections.abc import Sequence from datetime import timedelta -from typing import Literal +from typing import TYPE_CHECKING, Literal from pydantic import Field @@ -24,9 +28,8 @@ from openstef_meta.models.forecast_combiners.rules_combiner import RulesCombiner from openstef_meta.models.forecast_combiners.stacking_combiner import StackingCombiner from openstef_meta.models.forecasting.residual_forecaster import ResidualForecaster -from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback +from openstef_models.integrations.mlflow import MLFlowStorage from openstef_models.mixins.model_serializer import ModelIdentifier -from openstef_models.models.forecasting.forecaster import Forecaster from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster @@ -55,6 +58,9 @@ from openstef_models.utils.feature_selection import Exclude, FeatureSelection, Include from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow, ForecastingCallback +if TYPE_CHECKING: + from openstef_models.models.forecasting.forecaster import Forecaster + class EnsembleWorkflowConfig(BaseConfig): """Configuration for ensemble forecasting workflows.""" @@ -235,62 +241,72 @@ class EnsembleWorkflowConfig(BaseConfig): ) -def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastingWorkflow: - """Create an ensemble forecasting workflow from configuration.""" - - # Build preprocessing components - def checks() -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: - return [ - InputConsistencyChecker(), - FlatlineChecker( - load_column=config.target_column, - flatliner_threshold=config.flatliner_threshold, - detect_non_zero_flatliner=config.detect_non_zero_flatliner, - error_on_flatliner=False, - ), - CompletenessChecker(completeness_threshold=config.completeness_threshold), - ] - - def feature_adders() -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: - return [ - WindPowerFeatureAdder( - windspeed_reference_column=config.wind_speed_column, - ), - AtmosphereDerivedFeaturesAdder( - pressure_column=config.pressure_column, - relative_humidity_column=config.relative_humidity_column, - temperature_column=config.temperature_column, - ), - RadiationDerivedFeaturesAdder( - coordinate=config.location.coordinate, - radiation_column=config.radiation_column, - ), - CyclicFeaturesAdder(), - DaylightFeatureAdder( - coordinate=config.location.coordinate, - ), - RollingAggregatesAdder( - feature=config.target_column, - aggregation_functions=config.rolling_aggregate_features, - horizons=config.horizons, - ), - ] - - def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: - return [ - Clipper(selection=Include(config.energy_price_column).combine(config.clip_features), mode="standard"), - Scaler(selection=Exclude(config.target_column), method="standard"), - SampleWeighter( - target_column=config.target_column, - weight_exponent=config.sample_weight_exponent, - weight_floor=config.sample_weight_floor, - weight_scale_percentile=config.sample_weight_scale_percentile, - ), - EmptyFeatureRemover(), - ] - - # Model Specific LagsAdder +# Build preprocessing components +def checks(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: + return [ + InputConsistencyChecker(), + FlatlineChecker( + load_column=config.target_column, + flatliner_threshold=config.flatliner_threshold, + detect_non_zero_flatliner=config.detect_non_zero_flatliner, + error_on_flatliner=False, + ), + CompletenessChecker(completeness_threshold=config.completeness_threshold), + ] + +def feature_adders(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: + return [ + WindPowerFeatureAdder( + windspeed_reference_column=config.wind_speed_column, + ), + AtmosphereDerivedFeaturesAdder( + pressure_column=config.pressure_column, + relative_humidity_column=config.relative_humidity_column, + temperature_column=config.temperature_column, + ), + RadiationDerivedFeaturesAdder( + coordinate=config.location.coordinate, + radiation_column=config.radiation_column, + ), + CyclicFeaturesAdder(), + DaylightFeatureAdder( + coordinate=config.location.coordinate, + ), + RollingAggregatesAdder( + feature=config.target_column, + aggregation_functions=config.rolling_aggregate_features, + horizons=config.horizons, + ), + ] + + +def feature_standardizers(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: + return [ + Clipper(selection=Include(config.energy_price_column).combine(config.clip_features), mode="standard"), + Scaler(selection=Exclude(config.target_column), method="standard"), + SampleWeighter( + target_column=config.target_column, + weight_exponent=config.sample_weight_exponent, + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, + ), + EmptyFeatureRemover(), + ] + + +def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastingWorkflow: # noqa: C901, PLR0912, PLR0915 + """Create an ensemble forecasting workflow from configuration. + + Args: + config (EnsembleWorkflowConfig): Configuration for the ensemble workflow. + + Returns: + CustomForecastingWorkflow: Configured ensemble forecasting workflow. + + Raises: + ValueError: If an unsupported base model or combiner type is specified. + """ # Build forecasters and their processing pipelines forecaster_preprocessing: dict[str, list[Transform[TimeSeriesDataset, TimeSeriesDataset]]] = {} forecasters: dict[str, Forecaster] = {} @@ -300,8 +316,8 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas config=LGBMForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(), - *feature_adders(), + *checks(config), + *feature_adders(config), LagsAdder( history_available=config.predict_history, horizons=config.horizons, @@ -310,7 +326,7 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas ), HolidayFeatureAdder(country_code=config.location.country_code), DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(), + *feature_standardizers(config), ] elif model_type == "gblinear": @@ -318,8 +334,8 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas config=GBLinearForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(), - *feature_adders(), + *checks(config), + *feature_adders(config), LagsAdder( history_available=config.predict_history, horizons=config.horizons, @@ -329,7 +345,7 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas ), HolidayFeatureAdder(country_code=config.location.country_code), DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(), + *feature_standardizers(config), Imputer( selection=Exclude(config.target_column), imputation_strategy="mean", @@ -344,8 +360,8 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas config=XGBoostForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(), - *feature_adders(), + *checks(config), + *feature_adders(config), LagsAdder( history_available=config.predict_history, horizons=config.horizons, @@ -354,15 +370,15 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas ), HolidayFeatureAdder(country_code=config.location.country_code), DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(), + *feature_standardizers(config), ] elif model_type == "lgbm_linear": forecasters[model_type] = LGBMLinearForecaster( config=LGBMLinearForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(), - *feature_adders(), + *checks(config), + *feature_adders(config), LagsAdder( history_available=config.predict_history, horizons=config.horizons, @@ -371,7 +387,7 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas ), HolidayFeatureAdder(country_code=config.location.country_code), DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(), + *feature_standardizers(config), ] else: msg = f"Unsupported base model type: {model_type}" @@ -435,17 +451,7 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas ) callbacks: list[ForecastingCallback] = [] - if config.mlflow_storage is not None: - callbacks.append( - MLFlowStorageCallback( - storage=config.mlflow_storage, - model_reuse_enable=config.model_reuse_enable, - model_reuse_max_age=config.model_reuse_max_age, - model_selection_enable=config.model_selection_enable, - model_selection_metric=config.model_selection_metric, - model_selection_old_model_penalty=config.model_selection_old_model_penalty, - ) - ) + # TODO(Egor): Implement MLFlow for OpenSTEF-meta # noqa: TD003 return CustomForecastingWorkflow(model=ensemble_model, model_id=config.model_id, callbacks=callbacks) diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index 9d38c5b4f..e0bba9265 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -78,7 +78,6 @@ def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[str], list[Q Raises: ValueError: If an invalid base learner name is found in a feature name. """ - forecasters: set[str] = set() quantiles: set[Quantile] = set() diff --git a/packages/openstef-meta/tests/models/__init__.py b/packages/openstef-meta/tests/unit/models/__init__.py similarity index 100% rename from packages/openstef-meta/tests/models/__init__.py rename to packages/openstef-meta/tests/unit/models/__init__.py diff --git a/packages/openstef-meta/tests/models/conftest.py b/packages/openstef-meta/tests/unit/models/conftest.py similarity index 100% rename from packages/openstef-meta/tests/models/conftest.py rename to packages/openstef-meta/tests/unit/models/conftest.py diff --git a/packages/openstef-meta/tests/transforms/__init__.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/__init__.py similarity index 100% rename from packages/openstef-meta/tests/transforms/__init__.py rename to packages/openstef-meta/tests/unit/models/forecast_combiners/__init__.py diff --git a/packages/openstef-meta/tests/models/forecast_combiners/conftest.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py similarity index 100% rename from packages/openstef-meta/tests/models/forecast_combiners/conftest.py rename to packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_learned_weights_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py similarity index 100% rename from packages/openstef-meta/tests/models/forecast_combiners/test_learned_weights_combiner.py rename to packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py similarity index 95% rename from packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py rename to packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py index 6ee938ef4..aa08bf59a 100644 --- a/packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py @@ -6,8 +6,6 @@ import pytest -from openstef_core.datasets import ForecastInputDataset -from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q from openstef_meta.models.forecast_combiners.rules_combiner import ( RulesCombiner, diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py similarity index 100% rename from packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py rename to packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py diff --git a/packages/openstef-meta/tests/utils/__init__.py b/packages/openstef-meta/tests/unit/models/forecasting/__init__.py similarity index 100% rename from packages/openstef-meta/tests/utils/__init__.py rename to packages/openstef-meta/tests/unit/models/forecasting/__init__.py diff --git a/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py b/packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py similarity index 100% rename from packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py rename to packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py diff --git a/packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py similarity index 97% rename from packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py rename to packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py index 126163bd9..33b78cfc9 100644 --- a/packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py @@ -1,4 +1,8 @@ -import pickle +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +import pickle # noqa: S403 - Controlled test from datetime import datetime, timedelta from typing import override @@ -136,10 +140,9 @@ def model() -> EnsembleForecastingModel: ) # Act - model = EnsembleForecastingModel( + return EnsembleForecastingModel( forecasters=forecasters, combiner=combiner, common_preprocessing=TransformPipeline() ) - return model def test_forecasting_model__init__uses_defaults(model: EnsembleForecastingModel): diff --git a/packages/openstef-meta/tests/unit/transforms/__init__.py b/packages/openstef-meta/tests/unit/transforms/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-meta/tests/transforms/test_flag_features_bound.py b/packages/openstef-meta/tests/unit/transforms/test_flag_features_bound.py similarity index 100% rename from packages/openstef-meta/tests/transforms/test_flag_features_bound.py rename to packages/openstef-meta/tests/unit/transforms/test_flag_features_bound.py diff --git a/packages/openstef-meta/tests/unit/utils/__init__.py b/packages/openstef-meta/tests/unit/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-meta/tests/utils/test_datasets.py b/packages/openstef-meta/tests/unit/utils/test_datasets.py similarity index 100% rename from packages/openstef-meta/tests/utils/test_datasets.py rename to packages/openstef-meta/tests/unit/utils/test_datasets.py diff --git a/packages/openstef-meta/tests/utils/test_decision_tree.py b/packages/openstef-meta/tests/unit/utils/test_decision_tree.py similarity index 100% rename from packages/openstef-meta/tests/utils/test_decision_tree.py rename to packages/openstef-meta/tests/unit/utils/test_decision_tree.py diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index fd59cd600..6d8ee425f 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -101,7 +101,7 @@ def on_fit_end( run = self.storage.create_run( model_id=context.workflow.model_id, tags=context.workflow.model.tags, - hyperparams=context.workflow.model.forecaster.hyperparams, + hyperparams=context.workflow.model.forecaster.hyperparams, # type: ignore TODO Make MLFlow compatible with OpenSTEF Meta ) run_id: str = run.info.run_id self._logger.info("Created MLflow run %s for model %s", run_id, context.workflow.model_id) @@ -114,7 +114,11 @@ def on_fit_end( self._logger.info("Stored training data at %s for run %s", data_path, run_id) # Store feature importance plot if enabled - if self.store_feature_importance_plot and isinstance(context.workflow.model.forecaster, ExplainableForecaster): + if ( + self.store_feature_importance_plot + and isinstance(context.workflow.model, ForecastingModel) + and isinstance(context.workflow.model.forecaster, ExplainableForecaster) + ): fig = context.workflow.model.forecaster.plot_feature_importances() fig.write_html(data_path / "feature_importances.html") # pyright: ignore[reportUnknownMemberType] From eb775e41c2d661f025cc97268f5bbdbf4abe148f Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 4 Dec 2025 11:40:44 +0100 Subject: [PATCH 053/104] BugFix Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 3 ++ .../models/ensemble_forecasting_model.py | 32 ++++++++----- .../forecast_combiners/forecast_combiner.py | 22 --------- .../learned_weights_combiner.py | 14 +++--- .../presets/forecasting_workflow.py | 47 ++++++++++++++----- .../src/openstef_meta/utils/datasets.py | 37 +++++++++++++++ 6 files changed, 103 insertions(+), 52 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 6e58f7998..0f7248183 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -18,6 +18,7 @@ os.environ["MKL_NUM_THREADS"] = "1" import logging +import multiprocessing from datetime import timedelta from pathlib import Path @@ -93,6 +94,8 @@ temperature_column="temperature_2m", relative_humidity_column="relative_humidity_2m", energy_price_column="EPEX_NL", + forecast_combiner_sample_weight_exponent=1, + forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 1, "xgboost": 0, "lgbm_linear": 0}, ) diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index a56c8b3e7..1c6f82bb0 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -35,6 +35,8 @@ from openstef_models.models.forecasting_model import ModelFitResult from openstef_models.utils.data_split import DataSplitter +logger = logging.getLogger(__name__) + class EnsembleForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]): """Complete forecasting pipeline combining preprocessing, prediction, and postprocessing. @@ -191,6 +193,15 @@ def fit( Returns: FitResult containing training details and metrics. """ + + # Transform the input data to a valid forecast input and split into train/val/test + data, data_val, data_test = self.data_splitter.split_dataset( + data=data, + data_val=data_val, + data_test=data_test, + target_column=self.target_column, + ) + # Fit the feature engineering transforms self.common_preprocessing.fit(data=data) @@ -249,27 +260,24 @@ def _preprocess_fit_forecasters( predictions_raw: dict[str, ForecastDataset] = {} + if data_test is not None: + logger.info("Data test provided during fit, but will be ignored for MetaForecating") + for name, forecaster in self.forecasters.items(): validate_horizons_present(data, forecaster.config.horizons) # Transform and split input data - input_data_train = self.prepare_input(data=data, forecaster_name=name) - input_data_val = self.prepare_input(data=data_val, forecaster_name=name) if data_val else None - input_data_test = self.prepare_input(data=data_test, forecaster_name=name) if data_test else None + input_data_train = self.prepare_input(data=data, forecaster_name=name, forecast_start=data.index[0]) + input_data_val = ( + self.prepare_input(data=data_val, forecaster_name=name, forecast_start=data_val.index[0]) + if data_val + else None + ) # Drop target column nan's from training data. One can not train on missing targets. target_dropna = partial(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownMemberType] input_data_train = input_data_train.pipe_pandas(target_dropna) input_data_val = input_data_val.pipe_pandas(target_dropna) if input_data_val else None - input_data_test = input_data_test.pipe_pandas(target_dropna) if input_data_test else None - - # Transform the input data to a valid forecast input and split into train/val/test - input_data_train, input_data_val, input_data_test = self.data_splitter.split_dataset( - data=input_data_train, - data_val=input_data_val, - data_test=input_data_test, - target_column=self.target_column, - ) # Fit the model forecaster.fit(data=input_data_train, data_val=input_data_val) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index 09b4e9017..b1df023f6 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -122,28 +122,6 @@ def predict( """ raise NotImplementedError("Subclasses must implement the predict method.") - @staticmethod - def _prepare_input_data( - dataset: ForecastInputDataset, additional_features: ForecastInputDataset | None - ) -> pd.DataFrame: - """Prepare input data by combining base predictions with additional features if provided. - - Args: - dataset: ForecastInputDataset containing base predictions. - additional_features: Optional ForecastInputDataset containing additional features. - - Returns: - pd.DataFrame: Combined DataFrame of base predictions and additional features if provided. - """ - df = dataset.input_data(start=dataset.index[0]) - if additional_features is not None: - df_a = additional_features.input_data(start=dataset.index[0]) - df = pd.concat( - [df, df_a], - axis=1, - ) - return df - @property @abstractmethod def is_fitted(self) -> bool: diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 31f39e095..cdda00c1c 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -32,7 +32,7 @@ ForecastCombiner, ForecastCombinerConfig, ) -from openstef_meta.utils.datasets import EnsembleForecastDataset +from openstef_meta.utils.datasets import EnsembleForecastDataset, combine_forecast_input_datasets logger = logging.getLogger(__name__) @@ -241,18 +241,17 @@ def fit( for i, q in enumerate(self.quantiles): # Data preparation dataset = data.select_quantile_classification(quantile=q) - input_data = self._prepare_input_data( + combined_data = combine_forecast_input_datasets( dataset=dataset, - additional_features=additional_features, + other=additional_features, ) - labels = dataset.target_series + input_data = combined_data.input_data() + labels = combined_data.target_series self._validate_labels(labels=labels, model_index=i) labels = self._label_encoder.transform(labels) # Balance classes, adjust with sample weights - weights = compute_sample_weight("balanced", labels) - if sample_weights is not None: - weights *= sample_weights + weights = compute_sample_weight("balanced", labels) * combined_data.sample_weight_series self.models[i].fit(X=input_data, y=labels, sample_weight=weights) # type: ignore self._is_fitted = True @@ -276,6 +275,7 @@ def _prepare_input_data( df = pd.concat( [df, df_a], axis=1, + join="inner", ) return df diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 1087f2c5d..011cce2b0 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -11,6 +11,7 @@ from datetime import timedelta from typing import TYPE_CHECKING, Literal +from openstef_meta.transforms.selector import Selector from pydantic import Field from openstef_beam.evaluation.metric_providers import ( @@ -83,7 +84,7 @@ class EnsembleWorkflowConfig(BaseConfig): description="Time interval between consecutive data samples.", ) horizons: list[LeadTime] = Field( - default=[LeadTime.from_string("PT48H")], + default=[LeadTime.from_string("PT36H")], description="List of forecast horizons to predict.", ) @@ -171,14 +172,19 @@ class EnsembleWorkflowConfig(BaseConfig): description="Percentile of target values used as scaling reference. " "Values are normalized relative to this percentile before weighting.", ) - sample_weight_exponent: float = Field( - default_factory=lambda data: 1.0 - if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "learned_weights", "stacking", "residual", "xgboost"} - else 0.0, + forecaster_sample_weight_exponent: dict[str, float] = Field( + default={"gblinear": 1.0, "lgbm": 0, "xgboost": 0, "lgbm_linear": 0}, description="Exponent applied to scale the sample weights. " "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " "Note: Defaults to 1.0 for gblinear congestion models.", ) + + forecast_combiner_sample_weight_exponent: float = Field( + default=0, + description="Exponent applied to scale the sample weights for the forecast combiner model. " + "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values.", + ) + sample_weight_floor: float = Field( default=0.1, description="Minimum weight value to ensure all samples contribute to training.", @@ -281,13 +287,15 @@ def feature_adders(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesD ] -def feature_standardizers(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: +def feature_standardizers( + config: EnsembleWorkflowConfig, model_type: str +) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: return [ Clipper(selection=Include(config.energy_price_column).combine(config.clip_features), mode="standard"), Scaler(selection=Exclude(config.target_column), method="standard"), SampleWeighter( target_column=config.target_column, - weight_exponent=config.sample_weight_exponent, + weight_exponent=config.forecaster_sample_weight_exponent[model_type], weight_floor=config.sample_weight_floor, weight_scale_percentile=config.sample_weight_scale_percentile, ), @@ -326,7 +334,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin ), HolidayFeatureAdder(country_code=config.location.country_code), DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(config), + *feature_standardizers(config, model_type), ] elif model_type == "gblinear": @@ -345,7 +353,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin ), HolidayFeatureAdder(country_code=config.location.country_code), DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(config), + *feature_standardizers(config, model_type), Imputer( selection=Exclude(config.target_column), imputation_strategy="mean", @@ -370,7 +378,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin ), HolidayFeatureAdder(country_code=config.location.country_code), DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(config), + *feature_standardizers(config, model_type), ] elif model_type == "lgbm_linear": forecasters[model_type] = LGBMLinearForecaster( @@ -387,7 +395,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin ), HolidayFeatureAdder(country_code=config.location.country_code), DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(config), + *feature_standardizers(config, model_type), ] else: msg = f"Unsupported base model type: {model_type}" @@ -441,13 +449,30 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin name: TransformPipeline(transforms=transforms) for name, transforms in forecaster_preprocessing.items() } + if config.forecast_combiner_sample_weight_exponent != 0: + combiner_transforms = [ + SampleWeighter( + target_column=config.target_column, + weight_exponent=config.forecast_combiner_sample_weight_exponent, + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, + ), + Selector(selection=Include("sample_weight", config.target_column)), + ] + else: + combiner_transforms = [] + + combiner_preprocessing: TransformPipeline[TimeSeriesDataset] = TransformPipeline(transforms=combiner_transforms) + ensemble_model = EnsembleForecastingModel( common_preprocessing=TransformPipeline(transforms=[]), model_specific_preprocessing=model_specific_preprocessing, + combiner_preprocessing=combiner_preprocessing, postprocessing=TransformPipeline(transforms=postprocessing), forecasters=forecasters, combiner=combiner, target_column=config.target_column, + data_splitter=config.data_splitter, ) callbacks: list[ForecastingCallback] = [] diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index e0bba9265..cb6dbdad2 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -20,6 +20,43 @@ DEFAULT_TARGET_COLUMN = {Quantile(0.5): "load"} +def combine_forecast_input_datasets( + dataset: ForecastInputDataset, other: ForecastInputDataset | None, join: str = "inner" +) -> ForecastInputDataset: + """Combine multiple TimeSeriesDatasets into a single dataset. + + Args: + dataset: First ForecastInputDataset. + other: Second ForecastInputDataset or None. + join: Type of join to perform on the datasets. Defaults to "inner". + + Returns: + Combined ForecastDataset. + """ + if not isinstance(other, ForecastInputDataset): + return dataset + if join != "inner": + raise NotImplementedError("Only 'inner' join is currently supported.") + df_other = other.data + if dataset.target_column in df_other.columns: + df_other = df_other.drop(columns=[dataset.target_column]) + + df_one = dataset.data + df = pd.concat( + [df_one, df_other], + axis=1, + join="inner", + ) + + return ForecastInputDataset( + data=df, + sample_interval=dataset.sample_interval, + target_column=dataset.target_column, + sample_weight_column=dataset.sample_weight_column, + forecast_start=dataset.forecast_start, + ) + + class EnsembleForecastDataset(TimeSeriesDataset): """First stage output format for ensemble forecasters.""" From e212448dbcdad7dd5e024d49d69cf9a7532d7af2 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 4 Dec 2025 12:38:24 +0100 Subject: [PATCH 054/104] fixes Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 4 +- .../models/ensemble_forecasting_model.py | 93 +++++++++++----- .../presets/forecasting_workflow.py | 101 ++++++++++-------- .../src/openstef_meta/transforms/selector.py | 2 +- 4 files changed, 121 insertions(+), 79 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 0f7248183..6d8931099 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -44,7 +44,7 @@ OUTPUT_PATH = Path("./benchmark_results") -N_PROCESSES = 1 # multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark +N_PROCESSES = 11 # multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark ensemble_type = "learned_weights" # "stacking", "learned_weights" or "rules" base_models = ["lgbm", "gblinear"] # combination of "lgbm", "gblinear", "xgboost" and "lgbm_linear" @@ -95,7 +95,7 @@ relative_humidity_column="relative_humidity_2m", energy_price_column="EPEX_NL", forecast_combiner_sample_weight_exponent=1, - forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 1, "xgboost": 0, "lgbm_linear": 0}, + forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 0, "xgboost": 0, "lgbm_linear": 0}, ) diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 1c6f82bb0..a6298088b 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -194,6 +194,16 @@ def fit( FitResult containing training details and metrics. """ + score_data = data.copy_with(data=data.data) + # Fit the feature engineering transforms + self.common_preprocessing.fit(data=data) + data = self.common_preprocessing.transform(data=data) + + if data_val is not None: + data_val = self.common_preprocessing.transform(data=data_val) + if data_test is not None: + data_test = self.common_preprocessing.transform(data=data_test) + # Transform the input data to a valid forecast input and split into train/val/test data, data_val, data_test = self.data_splitter.split_dataset( data=data, @@ -202,9 +212,6 @@ def fit( target_column=self.target_column, ) - # Fit the feature engineering transforms - self.common_preprocessing.fit(data=data) - # Fit predict forecasters ensemble_predictions = self._preprocess_fit_forecasters( data=data, @@ -219,13 +226,7 @@ def fit( else: ensemble_predictions_val = None - if len(self.combiner_preprocessing.transforms) > 0: - combiner_data = self.prepare_input(data=data) - self.combiner_preprocessing.fit(combiner_data) - combiner_data = self.combiner_preprocessing.transform(combiner_data) - features = ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) - else: - features = None + features = self._fit_transform_combiner_data(data=data) self.combiner.fit( data=ensemble_predictions, @@ -234,11 +235,36 @@ def fit( ) # Prepare input datasets for metrics calculation + metrics_train = self._predict_combiner_and_score( + ensemble_dataset=ensemble_predictions, additional_features=features + ) + if data_val is not None: + features_val = self._transform_combiner_data(data=data_val) + metrics_val = ( + self._predict_combiner_and_score( + ensemble_dataset=ensemble_predictions_val, additional_features=features_val + ) + if ensemble_predictions_val + else None + ) + else: + metrics_val = None - metrics_train = self._predict_and_score(data=data) - metrics_val = self._predict_and_score(data=data_val) if data_val else None - metrics_test = self._predict_and_score(data=data_test) if data_test else None - metrics_full = self.score(data=data) + if data_test is not None: + features_test = self._transform_combiner_data(data=data_test) + ensemble_predictions_test = self._predict_forecasters( + data=self.prepare_input(data=data_test), + ) + metrics_test = ( + self._predict_combiner_and_score( + ensemble_dataset=ensemble_predictions_test, additional_features=features_test + ) + if ensemble_predictions_test + else None + ) + else: + metrics_test = None + metrics_full = self.score(data=score_data) return ModelFitResult( input_dataset=data, @@ -251,6 +277,21 @@ def fit( metrics_full=metrics_full, ) + def _transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputDataset | None: + if len(self.combiner_preprocessing.transforms) == 0: + return None + combiner_data = self.prepare_input(data=data) + combiner_data = self.combiner_preprocessing.transform(combiner_data) + return ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) + + def _fit_transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputDataset | None: + if len(self.combiner_preprocessing.transforms) == 0: + return None + combiner_data = self.prepare_input(data=data) + self.combiner_preprocessing.fit(combiner_data) + combiner_data = self.combiner_preprocessing.transform(combiner_data) + return ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) + def _preprocess_fit_forecasters( self, data: TimeSeriesDataset, @@ -331,9 +372,6 @@ def prepare_input( Returns: Processed forecast input dataset ready for model prediction. """ - # Transform and restore target column - data = self.common_preprocessing.transform(data=data) - # Apply model-specific preprocessing if available if forecaster_name in self.model_specific_preprocessing: self.model_specific_preprocessing[forecaster_name].fit(data=data) @@ -359,8 +397,10 @@ def prepare_input( forecast_start=forecast_start, ) - def _predict_and_score(self, data: TimeSeriesDataset) -> SubsetMetric: - prediction = self.predict(data) + def _predict_combiner_and_score( + self, ensemble_dataset: EnsembleForecastDataset, additional_features: ForecastInputDataset | None = None + ) -> SubsetMetric: + prediction = self.combiner.predict(ensemble_dataset, additional_features=additional_features) return self._calculate_score(prediction=prediction) def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: @@ -379,22 +419,17 @@ def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = Non if not self.is_fitted: raise NotFittedError(self.__class__.__name__) + # Common preprocessing + data = self.common_preprocessing.transform(data=data) + ensemble_predictions = self._predict_forecasters(data=data, forecast_start=forecast_start) - additional_features = ( - ForecastInputDataset.from_timeseries( - self.combiner_preprocessing.transform(data=data), - target_column=self.target_column, - forecast_start=forecast_start, - ) - if len(self.combiner_preprocessing.transforms) > 0 - else None - ) + features = self._transform_combiner_data(data=data) # Predict and restore target column prediction = self.combiner.predict( data=ensemble_predictions, - additional_features=additional_features, + additional_features=features, ) return restore_target(dataset=prediction, original_dataset=data, target_column=self.target_column) diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 011cce2b0..ee60e67bb 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -263,6 +263,12 @@ def checks(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesDataset, def feature_adders(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: return [ + LagsAdder( + history_available=config.predict_history, + horizons=config.horizons, + add_trivial_lags=True, + target_column=config.target_column, + ), WindPowerFeatureAdder( windspeed_reference_column=config.wind_speed_column, ), @@ -287,18 +293,10 @@ def feature_adders(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesD ] -def feature_standardizers( - config: EnsembleWorkflowConfig, model_type: str -) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: +def feature_standardizers(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: return [ Clipper(selection=Include(config.energy_price_column).combine(config.clip_features), mode="standard"), Scaler(selection=Exclude(config.target_column), method="standard"), - SampleWeighter( - target_column=config.target_column, - weight_exponent=config.forecaster_sample_weight_exponent[model_type], - weight_floor=config.sample_weight_floor, - weight_scale_percentile=config.sample_weight_scale_percentile, - ), EmptyFeatureRemover(), ] @@ -315,6 +313,17 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin Raises: ValueError: If an unsupported base model or combiner type is specified. """ + # Common preprocessing + common_preprocessing = TransformPipeline( + transforms=[ + *checks(config), + *feature_adders(config), + HolidayFeatureAdder(country_code=config.location.country_code), + DatetimeFeaturesAdder(onehot_encode=False), + *feature_standardizers(config), + ] + ) + # Build forecasters and their processing pipelines forecaster_preprocessing: dict[str, list[Transform[TimeSeriesDataset, TimeSeriesDataset]]] = {} forecasters: dict[str, Forecaster] = {} @@ -324,17 +333,12 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin config=LGBMForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(config), - *feature_adders(config), - LagsAdder( - history_available=config.predict_history, - horizons=config.horizons, - add_trivial_lags=True, + SampleWeighter( target_column=config.target_column, + weight_exponent=config.forecaster_sample_weight_exponent[model_type], + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, ), - HolidayFeatureAdder(country_code=config.location.country_code), - DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(config, model_type), ] elif model_type == "gblinear": @@ -342,18 +346,31 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin config=GBLinearForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(config), - *feature_adders(config), - LagsAdder( - history_available=config.predict_history, - horizons=config.horizons, - add_trivial_lags=False, + SampleWeighter( target_column=config.target_column, - custom_lags=[timedelta(days=7)], + weight_exponent=config.forecaster_sample_weight_exponent[model_type], + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, + ), + Selector( + selection=FeatureSelection( + exclude={ + "load_lag_P14D", + "load_lag_P13D", + "load_lag_P12D", + "load_lag_P11D", + "load_lag_P10D", + "load_lag_P9D", + "load_lag_P8D", + "load_lag_P7D", + "load_lag_P6D", + "load_lag_P5D", + "load_lag_P4D", + "load_lag_P3D", + "load_lag_P2D", + } + ) ), - HolidayFeatureAdder(country_code=config.location.country_code), - DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(config, model_type), Imputer( selection=Exclude(config.target_column), imputation_strategy="mean", @@ -368,34 +385,24 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin config=XGBoostForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(config), - *feature_adders(config), - LagsAdder( - history_available=config.predict_history, - horizons=config.horizons, - add_trivial_lags=True, + SampleWeighter( target_column=config.target_column, + weight_exponent=config.forecaster_sample_weight_exponent[model_type], + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, ), - HolidayFeatureAdder(country_code=config.location.country_code), - DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(config, model_type), ] elif model_type == "lgbm_linear": forecasters[model_type] = LGBMLinearForecaster( config=LGBMLinearForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(config), - *feature_adders(config), - LagsAdder( - history_available=config.predict_history, - horizons=config.horizons, - add_trivial_lags=True, + SampleWeighter( target_column=config.target_column, + weight_exponent=config.forecaster_sample_weight_exponent[model_type], + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, ), - HolidayFeatureAdder(country_code=config.location.country_code), - DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(config, model_type), ] else: msg = f"Unsupported base model type: {model_type}" @@ -465,7 +472,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin combiner_preprocessing: TransformPipeline[TimeSeriesDataset] = TransformPipeline(transforms=combiner_transforms) ensemble_model = EnsembleForecastingModel( - common_preprocessing=TransformPipeline(transforms=[]), + common_preprocessing=common_preprocessing, model_specific_preprocessing=model_specific_preprocessing, combiner_preprocessing=combiner_preprocessing, postprocessing=TransformPipeline(transforms=postprocessing), diff --git a/packages/openstef-meta/src/openstef_meta/transforms/selector.py b/packages/openstef-meta/src/openstef_meta/transforms/selector.py index 75eb4e321..e4c5d343b 100644 --- a/packages/openstef-meta/src/openstef_meta/transforms/selector.py +++ b/packages/openstef-meta/src/openstef_meta/transforms/selector.py @@ -24,7 +24,7 @@ class Selector(BaseConfig, TimeSeriesTransform): selection: FeatureSelection = Field( default=FeatureSelection.ALL, - description="Features to check for NaN values. Rows with NaN in any selected column are dropped.", + description="Feature selection for efficient model specific preprocessing.x", ) @override From b44fd928ff9ddf5eab552fa33ebb2f46742843c0 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 4 Dec 2025 14:39:31 +0100 Subject: [PATCH 055/104] bug fixes Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 6 ++--- .../presets/forecasting_workflow.py | 27 +++++++++++++++++-- 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 6d8931099..be9053a75 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -44,9 +44,9 @@ OUTPUT_PATH = Path("./benchmark_results") -N_PROCESSES = 11 # multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark +N_PROCESSES = 1 # multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark -ensemble_type = "learned_weights" # "stacking", "learned_weights" or "rules" +ensemble_type = "stacking" # "stacking", "learned_weights" or "rules" base_models = ["lgbm", "gblinear"] # combination of "lgbm", "gblinear", "xgboost" and "lgbm_linear" combiner_model = ( "lgbm" # "lgbm", "xgboost", "rf" or "logistic" for learned weights combiner, gblinear for stacking combiner @@ -95,7 +95,7 @@ relative_humidity_column="relative_humidity_2m", energy_price_column="EPEX_NL", forecast_combiner_sample_weight_exponent=1, - forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 0, "xgboost": 0, "lgbm_linear": 0}, + forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 1, "xgboost": 0, "lgbm_linear": 0}, ) diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index ee60e67bb..6ad2b78d7 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -354,7 +354,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin ), Selector( selection=FeatureSelection( - exclude={ + exclude={ # Fix hardcoded lag features should be replaced by a LagsAdder classmethod "load_lag_P14D", "load_lag_P13D", "load_lag_P12D", @@ -362,7 +362,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin "load_lag_P10D", "load_lag_P9D", "load_lag_P8D", - "load_lag_P7D", + # "load_lag_P7D", # Keep 7D lag for weekly seasonality "load_lag_P6D", "load_lag_P5D", "load_lag_P4D", @@ -371,6 +371,29 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin } ) ), + Selector( # Fix hardcoded holiday features should be replaced by a HolidayFeatureAdder classmethod + selection=FeatureSelection( + exclude={ + "is_ascension_day", + "is_christmas_day", + "is_easter_monday", + "is_easter_sunday", + "is_good_friday", + "is_holiday", + "is_king_s_day", + "is_liberation_day", + "is_new_year_s_day", + "is_second_day_of_christmas", + "is_sunday", + "is_week_day", + "is_weekend_day", + "is_whit_monday", + "is_whit_sunday", + "month_of_year", + "quarter_of_year", + } + ) + ), Imputer( selection=Exclude(config.target_column), imputation_strategy="mean", From 51579d0a7dc95cbd0a730aa075403469f67a1469 Mon Sep 17 00:00:00 2001 From: floriangoethals Date: Thu, 4 Dec 2025 15:38:19 +0100 Subject: [PATCH 056/104] added learned weights contributions --- examples/benchmarks/liander_2024_ensemble.py | 3 +- .../openstef4_backtest_forecaster.py | 15 +++++- .../learned_weights_combiner.py | 47 ++++++++++++++----- .../forecast_combiners/stacking_combiner.py | 43 +++++++---------- 4 files changed, 67 insertions(+), 41 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index d3c990ad2..b490a800c 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -141,6 +141,7 @@ def _create_workflow() -> CustomForecastingWorkflow: config=backtest_config, workflow_factory=_create_workflow, debug=False, + contributions=True, cache_dir=OUTPUT_PATH / "cache" / f"{context.run_name}_{target.name}", ) @@ -149,7 +150,7 @@ def _create_workflow() -> CustomForecastingWorkflow: start_time = time.time() create_liander2024_benchmark_runner( storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH / model), - data_dir=Path("../data/liander2024-energy-forecasting-benchmark"), + data_dir=Path("local_data/liander2024-energy-forecasting-benchmark"), callbacks=[StrictExecutionCallback()], ).run( forecaster_factory=_target_forecaster_factory, diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py index 56dad935f..8e7554b24 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py @@ -18,6 +18,7 @@ from openstef_core.exceptions import FlatlinerDetectedError, NotFittedError from openstef_core.types import Q from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): @@ -40,6 +41,10 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): default=False, description="When True, saves intermediate input data for debugging", ) + contributions: bool = Field( + default=False, + description="When True, saves intermediate input data for explainability", + ) _workflow: CustomForecastingWorkflow | None = PrivateAttr(default=None) _is_flatliner_detected: bool = PrivateAttr(default=False) @@ -50,6 +55,8 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): def model_post_init(self, context: Any) -> None: if self.debug: self.cache_dir.mkdir(parents=True, exist_ok=True) + if self.contributions: + self.cache_dir.mkdir(parents=True, exist_ok=True) @property @override @@ -58,7 +65,7 @@ def quantiles(self) -> list[Q]: if self._workflow is None: self._workflow = self.workflow_factory() # Extract quantiles from the workflow's model - return self._workflow.model.forecaster.config.quantiles + return self._workflow.model.forecaster.config.quantiles # type: ignore @override def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: @@ -69,6 +76,7 @@ def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: training_data = data.get_window( start=data.horizon - self.config.training_context_length, end=data.horizon, available_before=data.horizon ) + if self.debug: id_str = data.horizon.strftime("%Y%m%d%H%M%S") @@ -91,6 +99,7 @@ def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: path=self.cache_dir / f"debug_{id_str}_prepared_training.parquet" ) + @override def predict(self, data: RestrictedHorizonVersionedTimeSeries) -> TimeSeriesDataset | None: if self._is_flatliner_detected: @@ -121,6 +130,10 @@ def predict(self, data: RestrictedHorizonVersionedTimeSeries) -> TimeSeriesDatas predict_data.to_parquet(path=self.cache_dir / f"debug_{id_str}_predict.parquet") forecast.to_parquet(path=self.cache_dir / f"debug_{id_str}_forecast.parquet") + if self.contributions and isinstance(self._workflow.model, EnsembleForecastingModel): + contr_str = data.horizon.strftime("%Y%m%d%H%M%S") + contributions = self._workflow.model.predict_contributions(predict_data) + contributions.to_parquet(path=self.cache_dir / f"contrib_{contr_str}_predict.parquet") return forecast diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 98c1767a3..8d4a237a4 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -289,7 +289,10 @@ def _validate_labels(self, labels: pd.Series, model_index: int) -> None: def _predict_model_weights_quantile(self, base_predictions: pd.DataFrame, model_index: int) -> pd.DataFrame: model = self.models[model_index] - return model.predict_proba(X=base_predictions) # type: ignore + weights_array = model.predict_proba(base_predictions.to_numpy()) # type: ignore + + return pd.DataFrame(weights_array, index=base_predictions.index, columns=self._label_encoder.classes_) # type: ignore + def _generate_predictions_quantile( self, @@ -346,14 +349,17 @@ def predict_contributions( raise NotFittedError(self.__class__.__name__) # Generate predictions - contributions = pd.DataFrame({ - Quantile(q).format(): self._generate_contributions_quantile( + contribution_list = [ + self._generate_contributions_quantile( dataset=data.select_quantile(quantile=Quantile(q)), additional_features=additional_features, model_index=i, ) for i, q in enumerate(self.quantiles) - }) + ] + + contributions = pd.concat(contribution_list, axis=1) + target_series = data.target_series if target_series is not None: contributions[data.target_column] = target_series @@ -366,15 +372,30 @@ def _generate_contributions_quantile( additional_features: ForecastInputDataset | None, model_index: int, ) -> pd.DataFrame: - # TODO: FLORIAN Update content - # input_data = self._prepare_input_data( - # dataset=dataset, - # additional_features=additional_features, - # ) - - # weights = self._predict_model_weights_quantile(base_predictions=input_data, model_index=model_index) - - return pd.DataFrame() + input_data = self._prepare_input_data( + dataset=dataset, + additional_features=additional_features, + ) + weights = self._predict_model_weights_quantile(base_predictions=input_data, model_index=model_index) + weights.columns = [f"{col}_{Quantile(self.quantiles[model_index]).format()}" for col in weights.columns] + return weights + + + # def _generate_contributions_quantile( + # self, + # dataset: ForecastInputDataset, + # additional_features: ForecastInputDataset | None, + # model_index: int, + # ) -> pd.DataFrame: + # # TODO: FLORIAN Update content + # # input_data = self._prepare_input_data( + # # dataset=dataset, + # # additional_features=additional_features, + # # ) + + # # weights = self._predict_model_weights_quantile(base_predictions=input_data, model_index=model_index) + + # return pd.DataFrame() @property @override diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index a38123898..beaa556a8 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -204,37 +204,30 @@ def predict_contributions( data: EnsembleForecastDataset, additional_features: ForecastInputDataset | None = None, ) -> pd.DataFrame: - if not self.is_fitted: - raise NotFittedError(self.__class__.__name__) - - # Generate predictions - predictions: list[pd.DataFrame] = [] - for i, q in enumerate(self.quantiles): - if additional_features is not None: - input_data = self._combine_datasets( - data=data.select_quantile(quantile=q), - additional_features=additional_features, - ) - else: - input_data = data.select_quantile(quantile=q) - p = self.models[i].predict(data=input_data).data - predictions.append(p) # Concatenate predictions along columns to form a DataFrame with quantile columns - df = pd.concat(predictions, axis=1) - - contributions = pd.DataFrame() - for q in self.quantiles: + predictions = self.predict(data=data, additional_features = additional_features).data + contributions = {} + + for i, q in enumerate(self.quantiles): # Extract base predictions for this quantile # TODO Florian implement contributions extraction per quantile - pass - - return contributions # Placeholder for actual implementation + quantile_label = Quantile(q).format() + final_prediction = predictions.loc[:,quantile_label] + base_predictions = data.select_quantile(q).data + contributions[quantile_label] = self.contributions_from_predictions( + base_predictions=base_predictions, + #Final_prediction taken as the biggest value + final_predictions=final_prediction + ) + contributions = pd.DataFrame(contributions) + + return pd.DataFrame(contributions) # Placeholder for actual implementation @staticmethod def contributions_from_predictions( base_predictions: pd.DataFrame, final_predictions: pd.Series, - ) -> pd.DataFrame: + ) -> pd.Series: """Extract contributions from predictions DataFrame. Args: @@ -244,9 +237,7 @@ def contributions_from_predictions( DataFrame with contributions per base learner. """ # TODO Florian implement contributions extraction - # abs(final_predictions) / sum(abs(base_predictions), axis=1) - - return pd.DataFrame() # Placeholder for actual implementation + return final_predictions.abs()/base_predictions.abs().sum(axis=1) # Placeholder for actual implementation @property def is_fitted(self) -> bool: From 2899baf0e3513e4d582e81eb8464f9674414fff4 Mon Sep 17 00:00:00 2001 From: lars800 Date: Fri, 5 Dec 2025 17:11:37 +0100 Subject: [PATCH 057/104] Added Feature Contributions Residual Forecaster and Stacking Forecaster can now predict model contributions. Regular forecasters (EXCEPT LGBM Linear) can predict feature contributions --- .../openstef4_backtest_forecaster.py | 4 +- .../learned_weights_combiner.py | 18 ------ .../forecast_combiners/stacking_combiner.py | 56 ++++++----------- .../models/forecasting/residual_forecaster.py | 61 ++++++++++++++++++- .../models/forecast_combiners/conftest.py | 8 ++- .../test_stacking_combiner.py | 23 ++++++- .../forecasting/test_residual_forecaster.py | 35 +++++++++++ .../openstef_models/explainability/mixins.py | 13 ++++ .../models/forecasting/gblinear_forecaster.py | 44 +++++++++++++ .../models/forecasting/lgbm_forecaster.py | 35 +++++++++++ .../forecasting/lgbmlinear_forecaster.py | 37 +++++++++++ .../models/forecasting/xgboost_forecaster.py | 45 ++++++++++++++ .../forecasting/test_gblinear_forecaster.py | 32 ++++++++++ .../forecasting/test_lgbm_forecaster.py | 31 ++++++++++ .../forecasting/test_xgboost_forecaster.py | 33 +++++++++- 15 files changed, 410 insertions(+), 65 deletions(-) diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py index 8e7554b24..5ae961632 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py @@ -65,7 +65,7 @@ def quantiles(self) -> list[Q]: if self._workflow is None: self._workflow = self.workflow_factory() # Extract quantiles from the workflow's model - return self._workflow.model.forecaster.config.quantiles # type: ignore + return self._workflow.model.forecaster.config.quantiles # type: ignore @override def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: @@ -76,7 +76,6 @@ def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: training_data = data.get_window( start=data.horizon - self.config.training_context_length, end=data.horizon, available_before=data.horizon ) - if self.debug: id_str = data.horizon.strftime("%Y%m%d%H%M%S") @@ -99,7 +98,6 @@ def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: path=self.cache_dir / f"debug_{id_str}_prepared_training.parquet" ) - @override def predict(self, data: RestrictedHorizonVersionedTimeSeries) -> TimeSeriesDataset | None: if self._is_flatliner_detected: diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 8d4a237a4..acf366661 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -292,7 +292,6 @@ def _predict_model_weights_quantile(self, base_predictions: pd.DataFrame, model_ weights_array = model.predict_proba(base_predictions.to_numpy()) # type: ignore return pd.DataFrame(weights_array, index=base_predictions.index, columns=self._label_encoder.classes_) # type: ignore - def _generate_predictions_quantile( self, @@ -379,23 +378,6 @@ def _generate_contributions_quantile( weights = self._predict_model_weights_quantile(base_predictions=input_data, model_index=model_index) weights.columns = [f"{col}_{Quantile(self.quantiles[model_index]).format()}" for col in weights.columns] return weights - - - # def _generate_contributions_quantile( - # self, - # dataset: ForecastInputDataset, - # additional_features: ForecastInputDataset | None, - # model_index: int, - # ) -> pd.DataFrame: - # # TODO: FLORIAN Update content - # # input_data = self._prepare_input_data( - # # dataset=dataset, - # # additional_features=additional_features, - # # ) - - # # weights = self._predict_model_weights_quantile(base_predictions=input_data, model_index=model_index) - - # return pd.DataFrame() @property @override diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index beaa556a8..1814ef49b 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -10,7 +10,7 @@ """ import logging -from typing import TYPE_CHECKING, cast, override +from typing import cast, override import pandas as pd from pydantic import Field, field_validator @@ -23,15 +23,13 @@ from openstef_core.types import LeadTime, Quantile from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig from openstef_meta.utils.datasets import EnsembleForecastDataset +from openstef_models.models.forecasting.forecaster import Forecaster from openstef_models.models.forecasting.gblinear_forecaster import ( GBLinearForecaster, GBLinearHyperParams, ) from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMHyperParams -if TYPE_CHECKING: - from openstef_models.models.forecasting.forecaster import Forecaster - logger = logging.getLogger(__name__) ForecasterHyperParams = GBLinearHyperParams | LGBMHyperParams @@ -204,40 +202,26 @@ def predict_contributions( data: EnsembleForecastDataset, additional_features: ForecastInputDataset | None = None, ) -> pd.DataFrame: - # Concatenate predictions along columns to form a DataFrame with quantile columns - predictions = self.predict(data=data, additional_features = additional_features).data - contributions = {} - - for i, q in enumerate(self.quantiles): - # Extract base predictions for this quantile - # TODO Florian implement contributions extraction per quantile - quantile_label = Quantile(q).format() - final_prediction = predictions.loc[:,quantile_label] - base_predictions = data.select_quantile(q).data - contributions[quantile_label] = self.contributions_from_predictions( - base_predictions=base_predictions, - #Final_prediction taken as the biggest value - final_predictions=final_prediction - ) - contributions = pd.DataFrame(contributions) - - return pd.DataFrame(contributions) # Placeholder for actual implementation - @staticmethod - def contributions_from_predictions( - base_predictions: pd.DataFrame, - final_predictions: pd.Series, - ) -> pd.Series: - """Extract contributions from predictions DataFrame. - - Args: - predictions: DataFrame containing predictions. + # Generate predictions + predictions: list[pd.DataFrame] = [] + for i, q in enumerate(self.quantiles): + if additional_features is not None: + input_data = self._combine_datasets( + data=data.select_quantile(quantile=q), + additional_features=additional_features, + ) + else: + input_data = data.select_quantile(quantile=q) + p = self.predict_contributions_quantile( + model=self.models[i], + data=input_data, + ) + p.columns = [f"{col}_{Quantile(self.quantiles[i]).format()}" for col in p.columns] + predictions.append(p) - Returns: - DataFrame with contributions per base learner. - """ - # TODO Florian implement contributions extraction - return final_predictions.abs()/base_predictions.abs().sum(axis=1) # Placeholder for actual implementation + # Concatenate predictions along columns to form a DataFrame with quantile columns + return pd.concat(predictions, axis=1) @property def is_fitted(self) -> bool: diff --git a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py index 96cb8911f..be9100a1a 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py @@ -13,7 +13,7 @@ from typing import override import pandas as pd -from pydantic import Field +from pydantic import Field, model_validator from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ( @@ -52,6 +52,30 @@ class ResidualHyperParams(HyperParams): description="Hyperparameters for the final learner. Defaults to LGBMHyperparams.", ) + primary_name: str = Field( + default="primary_model", + description="Name identifier for the primary model.", + ) + + secondary_name: str = Field( + default="secondary_model", + description="Name identifier for the secondary model.", + ) + + @model_validator(mode="after") + def validate_names(self) -> "ResidualHyperParams": + """Validate that primary and secondary names are not the same. + + Raises: + ValueError: If primary and secondary names are the same. + + Returns: + ResidualHyperParams: The validated hyperparameters. + """ + if self.primary_name == self.secondary_name: + raise ValueError("Primary and secondary model names must be different.") + return self + class ResidualForecasterConfig(ForecasterConfig): """Configuration for Hybrid-based forecasting models.""" @@ -85,6 +109,8 @@ def __init__(self, config: ResidualForecasterConfig) -> None: self._secondary_model: list[ResidualBaseForecaster] = self._init_secondary_model( hyperparams=config.hyperparams.secondary_hyperparams ) + self.primary_name = config.hyperparams.primary_name + self.secondary_name = config.hyperparams.secondary_name self._is_fitted = False def _init_secondary_model(self, hyperparams: ResidualBaseForecasterHyperParams) -> list[ResidualBaseForecaster]: @@ -254,6 +280,39 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: sample_interval=data.sample_interval, ) + def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = True) -> pd.DataFrame: + """Generate prediction contributions using the ResidualForecaster model. + + Args: + data: Input data for prediction contributions. + + Returns: + pd.DataFrame containing the prediction contributions. + """ + primary_predictions = self._primary_model.predict(data=data).data + + secondary_predictions = self._predict_secodary_model(data=data).data + + if not scale: + primary_contributions = primary_predictions + primary_name = self._primary_model.__class__.__name__ + primary_contributions.columns = [f"{primary_name}_{q}" for q in primary_contributions.columns] + + secondary_contributions = secondary_predictions + secondary_name = self._secondary_model[0].__class__.__name__ + secondary_contributions.columns = [f"{secondary_name}_{q}" for q in secondary_contributions.columns] + + return pd.concat([primary_contributions, secondary_contributions], axis=1) + + primary_contributions = primary_predictions.abs() / (primary_predictions.abs() + secondary_predictions.abs()) + primary_contributions.columns = [f"{self.primary_name}_{q}" for q in primary_contributions.columns] + + secondary_contributions = secondary_predictions.abs() / ( + primary_predictions.abs() + secondary_predictions.abs()) + secondary_contributions.columns = [f"{self.secondary_name}_{q}" for q in secondary_contributions.columns] + + return pd.concat([primary_contributions, secondary_contributions], axis=1) + @property def config(self) -> ResidualForecasterConfig: """Get the configuration of the ResidualForecaster. diff --git a/packages/openstef-meta/tests/models/forecast_combiners/conftest.py b/packages/openstef-meta/tests/models/forecast_combiners/conftest.py index c80385a07..cf4edb982 100644 --- a/packages/openstef-meta/tests/models/forecast_combiners/conftest.py +++ b/packages/openstef-meta/tests/models/forecast_combiners/conftest.py @@ -17,11 +17,13 @@ def forecast_dataset_factory() -> Callable[[], ForecastDataset]: def _make() -> ForecastDataset: rng = np.random.default_rng() + coef = rng.normal(0, 1, 3) + df = pd.DataFrame( data={ - "quantile_P10": [90, 180, 270], - "quantile_P50": [100, 200, 300], - "quantile_P90": [110, 220, 330], + "quantile_P10": np.array([1, 2, 3]) * coef[0], + "quantile_P50": np.array([1, 2, 3]) * coef[1], + "quantile_P90": np.array([1, 2, 3]) * coef[2], "load": [100, 200, 300], }, index=pd.to_datetime([ diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py b/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py index abcd9f66c..4235df532 100644 --- a/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py +++ b/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py @@ -3,7 +3,7 @@ # # SPDX-License-Identifier: MPL-2.0 from datetime import timedelta - +import pandas as pd import pytest from openstef_core.exceptions import NotFittedError @@ -17,13 +17,13 @@ @pytest.fixture(params=["lgbm", "gblinear"]) def regressor(request: pytest.FixtureRequest) -> str: - """Fixture to provide different classifier types for LearnedWeightsCombiner tests.""" + """Fixture to provide different regressor types for Stacking tests.""" return request.param @pytest.fixture def config(regressor: str) -> StackingCombinerConfig: - """Fixture to create StackingCombinerConfig based on the classifier type.""" + """Fixture to create StackingCombinerConfig based on the regressor type.""" if regressor == "lgbm": hp = StackingCombiner.LGBMHyperParams(num_leaves=5, n_estimators=10) elif regressor == "gblinear": @@ -83,3 +83,20 @@ def test_stacking_combiner_not_fitted_error( # Act & Assert with pytest.raises(NotFittedError): forecaster.predict(ensemble_dataset) + + +def test_stacking_combiner_predict_contributions( + ensemble_dataset: EnsembleForecastDataset, + config: StackingCombinerConfig, +): + """Test that predict_contributions method returns contributions with correct shape.""" + # Arrange + forecaster = StackingCombiner(config=config) + forecaster.fit(ensemble_dataset) + + # Act + contributions = forecaster.predict_contributions(ensemble_dataset) + + # Assert + assert isinstance(contributions, pd.DataFrame), "Contributions should be returned as a DataFrame." + assert len(contributions.columns) == len(ensemble_dataset.quantiles) * len(ensemble_dataset.forecaster_names) diff --git a/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py b/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py index 37f30ab2d..9e80abc9d 100644 --- a/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py +++ b/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py @@ -4,6 +4,7 @@ from datetime import timedelta +import pandas as pd import pytest from openstef_core.datasets import ForecastInputDataset @@ -140,3 +141,37 @@ def test_residual_forecaster_with_sample_weights( # (This is a statistical test - with different weights, predictions should differ) differences = (result_with_weights.data - result_without_weights.data).abs() assert differences.sum().sum() > 0, "Sample weights should affect model predictions" + + +def test_residual_forecaster_predict_contributions( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: ResidualForecasterConfig, +): + """Test basic fit and predict workflow with output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = ResidualForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict_contributions(sample_forecast_input_dataset, scale=True) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + base_models = [ + forecaster.primary_name, + forecaster.secondary_name + ] + expected_columns = [f"{col}_{q.format()}" for col in base_models for q in expected_quantiles] + assert sorted(result.columns) == sorted(expected_columns), ( + f"Expected columns {expected_columns}, got {list(result.columns)}" + ) + + # Contributions should sum to 1.0 per quantile + for q in expected_quantiles: + quantile_cols = [col for col in result.columns if col.endswith(f"_{q.format()}")] + col_sums = result[quantile_cols].sum(axis=1) + assert all(abs(col_sums - 1.0) < 1e-6), f"Contributions for quantile {q.format()} should sum to 1.0" diff --git a/packages/openstef-models/src/openstef_models/explainability/mixins.py b/packages/openstef-models/src/openstef_models/explainability/mixins.py index 9969b4993..e9c6b45f3 100644 --- a/packages/openstef-models/src/openstef_models/explainability/mixins.py +++ b/packages/openstef-models/src/openstef_models/explainability/mixins.py @@ -13,6 +13,7 @@ import pandas as pd import plotly.graph_objects as go +from openstef_core.datasets.validated_datasets import ForecastInputDataset from openstef_core.types import Q, Quantile from openstef_models.explainability.plotters.feature_importance_plotter import FeatureImportancePlotter @@ -44,6 +45,18 @@ def feature_importances(self) -> pd.DataFrame: """ raise NotImplementedError + @abstractmethod + def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> pd.DataFrame: + """Get feature contributions for each prediction. + + Args: + data: Input dataset for which to compute feature contributions. + + Returns: + DataFrame with contributions per base learner. + """ + raise NotImplementedError + def plot_feature_importances(self, quantile: Quantile = Q(0.5)) -> go.Figure: """Create interactive treemap visualization of feature importances. diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py index 4fccf2825..6fc0c30d6 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py @@ -326,6 +326,50 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: sample_interval=data.sample_interval, ) + def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = True) -> pd.DataFrame: + """Get feature contributions for each prediction. + + Args: + data: Input dataset for which to compute feature contributions. + scale: If True, scale contributions to sum to 1.0 per quantile. + + Returns: + DataFrame with contributions per base learner. + """ + # Get input features for prediction + input_data: pd.DataFrame = data.input_data(start=data.forecast_start) + xgb_input: xgb.DMatrix = xgb.DMatrix(data=input_data) + + # Generate predictions + booster = self._gblinear_model.get_booster() + predictions_array: np.ndarray = booster.predict(xgb_input, pred_contribs=True, strict_shape=True)[:, :, :-1] + + # Remove last column + contribs = predictions_array / np.sum(predictions_array, axis=-1, keepdims=True) + + # Flatten to 2D array, name columns accordingly + contribs = contribs.reshape(contribs.shape[0], -1) + df = pd.DataFrame( + data=contribs, + index=input_data.index, + columns=[ + f"{feature}_{quantile.format()}" + for feature in input_data.columns + for quantile in self.config.quantiles + + ], + ) + + if scale: + # Scale contributions so that they sum to 1.0 per quantile and are positive + for q in self.config.quantiles: + quantile_cols = [col for col in df.columns if col.endswith(f"_{q.format()}")] + row_sums = df[quantile_cols].abs().sum(axis=1) + df[quantile_cols] = df[quantile_cols].abs().div(row_sums, axis=0) + + # Construct DataFrame with appropriate quantile columns + return df + @property @override def feature_importances(self) -> pd.DataFrame: diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index 03c667b00..84852aaaa 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -312,6 +312,41 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: sample_interval=data.sample_interval, ) + def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> pd.DataFrame: + """Get feature contributions for each prediction. + + Args: + data: Input dataset for which to compute feature contributions. + scale: If True, scale contributions to sum to 1.0 per quantile. + + Returns: + DataFrame with contributions per base learner. + """ + # Get input features for prediction + input_data: pd.DataFrame = data.input_data(start=data.forecast_start) + + contributions: list[pd.DataFrame] = [] + + for i, quantile in enumerate(self.config.quantiles): + # Get model for specific quantile + model: LGBMRegressor = self._lgbm_model.models[i] # type: ignore + + # Generate contributions using LightGBM's built-in method, and remove bias term + contribs_quantile: np.ndarray[float] = model.predict(input_data, pred_contrib=True)[:, :-1] # type: ignore + + if scale: + # Scale contributions so that they sum to 1.0 per quantile + contribs_quantile = np.abs(contribs_quantile) / np.sum(np.abs(contribs_quantile), axis=1, keepdims=True) + + contributions.append(pd.DataFrame( + data=contribs_quantile, + index=input_data.index, + columns=[f"{feature}_{quantile.format()}" for feature in input_data.columns], + )) + + # Construct DataFrame + return pd.concat(contributions, axis=1) + @property @override def feature_importances(self) -> pd.DataFrame: diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py index eace689fb..e39e60bb5 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -314,6 +314,43 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: sample_interval=data.sample_interval, ) + @override + def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> pd.DataFrame: + """Get feature contributions for each prediction. + + Args: + data: Input dataset for which to compute feature contributions. + scale: If True, scale contributions to sum to 1.0 per quantile. + + Returns: + DataFrame with contributions per base learner. + """ + raise NotImplementedError("predict_contributions is not yet implemented for LGBMLinearForecaster") + # Get input features for prediction + input_data: pd.DataFrame = data.input_data(start=data.forecast_start) + + contributions: list[pd.DataFrame] = [] + + for i, quantile in enumerate(self.config.quantiles): + # Get model for specific quantile + model: LGBMRegressor = self._lgbmlinear_model.models[i] # type: ignore + + # Generate contributions NOT AVAILABLE FOR LGBM with linear_trees=true + contribs_quantile: np.ndarray[float] = model.predict(input_data, pred_contrib=True)[:, :-1] # type: ignore + + if scale: + # Scale contributions so that they sum to 1.0 per quantile + contribs_quantile = np.abs(contribs_quantile) / np.sum(np.abs(contribs_quantile), axis=1, keepdims=True) + + contributions.append(pd.DataFrame( + data=contribs_quantile, + index=input_data.index, + columns=[f"{feature}_{quantile.format()}" for feature in input_data.columns], + )) + + # Construct DataFrame + return pd.concat(contributions, axis=1) + @property @override def feature_importances(self) -> pd.DataFrame: diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py index 2c673c68b..6843df2fa 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py @@ -420,6 +420,51 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: sample_interval=data.sample_interval, ) + def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> pd.DataFrame: + """Get feature contributions for each prediction. + + Args: + data: Input dataset for which to compute feature contributions. + scale: If True, scale contributions to sum to 1.0 per quantile. + + Returns: + DataFrame with contributions per base learner. + """ + # Get input features for prediction + input_data: pd.DataFrame = data.input_data(start=data.forecast_start) + xgb_input: xgb.DMatrix = xgb.DMatrix(data=input_data) + + # Generate predictions + booster = self._xgboost_model.get_booster() + predictions_array: np.ndarray = booster.predict(xgb_input, pred_contribs=True, strict_shape=True)[:, :, :-1] + + # Remove last column + contribs = predictions_array / np.sum(predictions_array, axis=-1, keepdims=True) + + # Flatten to 2D array, name columns accordingly + contribs = contribs.reshape(contribs.shape[0], -1) + + df = pd.DataFrame( + data=contribs, + index=input_data.index, + columns=[ + f"{feature}_{quantile.format()}" + for feature in input_data.columns + for quantile in self.config.quantiles + + ], + ) + + if scale: + # Scale contributions so that they sum to 1.0 per quantile and are positive + for q in self.config.quantiles: + quantile_cols = [col for col in df.columns if col.endswith(f"_{q.format()}")] + row_sums = df[quantile_cols].abs().sum(axis=1) + df[quantile_cols] = df[quantile_cols].abs().div(row_sums, axis=0) + + # Construct DataFrame with appropriate quantile columns + return df + @property @override def feature_importances(self) -> pd.DataFrame: diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py index 1eba577f5..b260d1b4e 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py @@ -4,6 +4,7 @@ from datetime import timedelta +import numpy as np import pandas as pd import pytest @@ -132,3 +133,34 @@ def test_gblinear_forecaster__feature_importances( col_sums = feature_importances.sum(axis=0) pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=expected_columns), atol=1e-10) assert (feature_importances >= 0).all().all() + + +def test_gblinear_forecaster_predict_contributions( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: GBLinearForecasterConfig, +): + """Test basic fit and predict workflow with output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = GBLinearForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict_contributions(sample_forecast_input_dataset, scale=True) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + input_features = sample_forecast_input_dataset.input_data().columns + expected_columns = [f"{col}_{q.format()}" for col in input_features for q in expected_quantiles] + assert list(result.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.columns)}" + ) + + # Contributions should sum to 1.0 per quantile + for q in expected_quantiles: + quantile_cols = [col for col in result.columns if col.endswith(f"_{q.format()}")] + col_sums = result[quantile_cols].sum(axis=1) + pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=result.index, dtype=np.float32), atol=1e-10) diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py index b4fe1c989..886da0ce6 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py @@ -146,4 +146,35 @@ def test_lgbm_forecaster__feature_importances( assert (feature_importances >= 0).all().all() +def test_lgbm_forecaster_predict_contributions( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: LGBMForecasterConfig, +): + """Test basic fit and predict workflow with output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = LGBMForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict_contributions(sample_forecast_input_dataset, scale=True) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + input_features = sample_forecast_input_dataset.input_data().columns + expected_columns = [f"{col}_{q.format()}" for col in input_features for q in expected_quantiles] + assert sorted(result.columns) == sorted(expected_columns), ( + f"Expected columns {expected_columns}, got {list(result.columns)}" + ) + + # Contributions should sum to 1.0 per quantile + for q in expected_quantiles: + quantile_cols = [col for col in result.columns if col.endswith(f"_{q.format()}")] + col_sums = result[quantile_cols].sum(axis=1) + pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=result.index), atol=1e-10) + + # TODO(@MvLieshout): Add tests on different loss functions # noqa: TD003 diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py index dd0e80058..bde85e36a 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py @@ -22,7 +22,7 @@ def base_config() -> XGBoostForecasterConfig: """Base configuration for XGBoost forecaster tests.""" return XGBoostForecasterConfig( horizons=[LeadTime(timedelta(days=1))], - quantiles=[Q(0.1), Q(0.5), Q(0.9)], + quantiles=[Q(0.1), Q(0.3), Q(0.5), Q(0.7), Q(0.9)], hyperparams=XGBoostHyperParams( n_estimators=10, # Small for fast tests ), @@ -167,3 +167,34 @@ def test_xgboost_forecaster__feature_importances( col_sums = feature_importances.sum(axis=0) pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=expected_columns), atol=1e-10) assert (feature_importances >= 0).all().all() + + +def test_xgboost_forecaster_predict_contributions( + sample_forecast_input_dataset: ForecastInputDataset, + base_config: XGBoostForecasterConfig, +): + """Test basic fit and predict workflow with output validation.""" + # Arrange + expected_quantiles = base_config.quantiles + forecaster = XGBoostForecaster(config=base_config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict_contributions(sample_forecast_input_dataset, scale=True) + + # Assert + # Basic functionality + assert forecaster.is_fitted, "Model should be fitted after calling fit()" + + # Check that necessary quantiles are present + input_features = sample_forecast_input_dataset.input_data().columns + expected_columns = [f"{col}_{q.format()}" for col in input_features for q in expected_quantiles] + assert list(result.columns) == expected_columns, ( + f"Expected columns {expected_columns}, got {list(result.columns)}" + ) + + # Contributions should sum to 1.0 per quantile + for q in expected_quantiles: + quantile_cols = [col for col in result.columns if col.endswith(f"_{q.format()}")] + col_sums = result[quantile_cols].sum(axis=1) + pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=result.index), atol=1e-10) From 6f88d726d3e9fc8f768c6b31cd66bdd0379ab3dc Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 8 Dec 2025 09:46:57 +0100 Subject: [PATCH 058/104] Bugfixes Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 4 +- .../models/ensemble_forecasting_model.py | 72 ++++++-------- .../presets/forecasting_workflow.py | 4 +- .../tests/regression/__init__.py | 0 .../test_ensemble_forecasting_model.py | 98 +++++++++++++++++++ 5 files changed, 132 insertions(+), 46 deletions(-) create mode 100644 packages/openstef-meta/tests/regression/__init__.py create mode 100644 packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index be9053a75..9e6e0237a 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -44,9 +44,9 @@ OUTPUT_PATH = Path("./benchmark_results") -N_PROCESSES = 1 # multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark +N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark -ensemble_type = "stacking" # "stacking", "learned_weights" or "rules" +ensemble_type = "learned_weights" # "stacking", "learned_weights" or "rules" base_models = ["lgbm", "gblinear"] # combination of "lgbm", "gblinear", "xgboost" and "lgbm_linear" combiner_model = ( "lgbm" # "lgbm", "xgboost", "rf" or "logistic" for learned weights combiner, gblinear for stacking combiner diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index a6298088b..8299d43a7 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -193,7 +193,6 @@ def fit( Returns: FitResult containing training details and metrics. """ - score_data = data.copy_with(data=data.data) # Fit the feature engineering transforms self.common_preprocessing.fit(data=data) @@ -204,16 +203,8 @@ def fit( if data_test is not None: data_test = self.common_preprocessing.transform(data=data_test) - # Transform the input data to a valid forecast input and split into train/val/test - data, data_val, data_test = self.data_splitter.split_dataset( - data=data, - data_val=data_val, - data_test=data_test, - target_column=self.target_column, - ) - - # Fit predict forecasters - ensemble_predictions = self._preprocess_fit_forecasters( + # Fit forecasters + ensemble_predictions = self._fit_forecasters( data=data, data_val=data_val, data_test=data_test, @@ -280,8 +271,7 @@ def fit( def _transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputDataset | None: if len(self.combiner_preprocessing.transforms) == 0: return None - combiner_data = self.prepare_input(data=data) - combiner_data = self.combiner_preprocessing.transform(combiner_data) + combiner_data = self.combiner_preprocessing.transform(data) return ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) def _fit_transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputDataset | None: @@ -292,28 +282,32 @@ def _fit_transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInput combiner_data = self.combiner_preprocessing.transform(combiner_data) return ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) - def _preprocess_fit_forecasters( + def _fit_forecasters( self, data: TimeSeriesDataset, data_val: TimeSeriesDataset | None = None, data_test: TimeSeriesDataset | None = None, ) -> EnsembleForecastDataset: - predictions_raw: dict[str, ForecastDataset] = {} + predictions: dict[str, ForecastDataset] = {} if data_test is not None: logger.info("Data test provided during fit, but will be ignored for MetaForecating") for name, forecaster in self.forecasters.items(): validate_horizons_present(data, forecaster.config.horizons) + # Apply model-specific preprocessing if available - # Transform and split input data - input_data_train = self.prepare_input(data=data, forecaster_name=name, forecast_start=data.index[0]) - input_data_val = ( - self.prepare_input(data=data_val, forecaster_name=name, forecast_start=data_val.index[0]) - if data_val - else None - ) + if name in self.model_specific_preprocessing: + self.model_specific_preprocessing[name].fit(data=data) + data = self.model_specific_preprocessing[name].transform(data=data) + data_val = self.model_specific_preprocessing[name].transform(data=data_val) if data_val else None + + input_data_train = self.prepare_input(data=data, forecast_start=data.index[0]) + if data_val is not None: + input_data_val = self.prepare_input(data=data_val, forecast_start=data_val.index[0]) + else: + input_data_val = None # Drop target column nan's from training data. One can not train on missing targets. target_dropna = partial(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownMemberType] @@ -322,11 +316,11 @@ def _preprocess_fit_forecasters( # Fit the model forecaster.fit(data=input_data_train, data_val=input_data_val) - predictions_raw[name] = self.forecasters[name].predict(data=input_data_train) + predictions_raw = self.forecasters[name].predict(data=input_data_train) - return EnsembleForecastDataset.from_forecast_datasets( - predictions_raw, target_series=data.data[self.target_column] - ) + predictions[name] = self.postprocessing.transform(data=predictions_raw) + + return EnsembleForecastDataset.from_forecast_datasets(predictions, target_series=data.data[self.target_column]) def _predict_forecasters( self, data: TimeSeriesDataset, forecast_start: datetime | None = None @@ -340,9 +334,16 @@ def _predict_forecasters( Returns: DataFrame containing base learner predictions. """ + data_common = self.common_preprocessing.transform(data=data) + base_predictions: dict[str, ForecastDataset] = {} for name, forecaster in self.forecasters.items(): - forecaster_data = self.prepare_input(data, forecaster_name=name, forecast_start=forecast_start) + forecaster_data = ( + self.model_specific_preprocessing[name].transform(data=data_common) + if name in self.model_specific_preprocessing + else data_common + ) + forecaster_data = self.prepare_input(forecaster_data, forecast_start=forecast_start) preds_raw = forecaster.predict(data=forecaster_data) preds = self.postprocessing.transform(data=preds_raw) base_predictions[name] = preds @@ -354,29 +355,18 @@ def _predict_forecasters( def prepare_input( self, data: TimeSeriesDataset, - forecaster_name: str | None = None, forecast_start: datetime | None = None, ) -> ForecastInputDataset: - """Prepare input data for forecasting by applying preprocessing and filtering. - - Transforms raw time series data through the preprocessing pipeline, restores - the target column, and filters out incomplete historical data to ensure - training quality. + """Prepare input data for forecastingfiltering. Args: data: Raw time series dataset to prepare for forecasting. - forecaster_name: Name of the forecaster for model-specific preprocessing. forecast_start: Optional start time for forecasts. If provided and earlier than the cutoff time, overrides the cutoff for data filtering. Returns: Processed forecast input dataset ready for model prediction. """ - # Apply model-specific preprocessing if available - if forecaster_name in self.model_specific_preprocessing: - self.model_specific_preprocessing[forecaster_name].fit(data=data) - data = self.model_specific_preprocessing[forecaster_name].transform(data=data) - input_data = restore_target(dataset=data, original_dataset=data, target_column=self.target_column) # Cut away input history to avoid training on incomplete data @@ -401,6 +391,7 @@ def _predict_combiner_and_score( self, ensemble_dataset: EnsembleForecastDataset, additional_features: ForecastInputDataset | None = None ) -> SubsetMetric: prediction = self.combiner.predict(ensemble_dataset, additional_features=additional_features) + prediction.data[ensemble_dataset.target_column] = ensemble_dataset.target_series return self._calculate_score(prediction=prediction) def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: @@ -419,9 +410,6 @@ def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = Non if not self.is_fitted: raise NotFittedError(self.__class__.__name__) - # Common preprocessing - data = self.common_preprocessing.transform(data=data) - ensemble_predictions = self._predict_forecasters(data=data, forecast_start=forecast_start) features = self._transform_combiner_data(data=data) diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 6ad2b78d7..0a565793a 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -11,7 +11,6 @@ from datetime import timedelta from typing import TYPE_CHECKING, Literal -from openstef_meta.transforms.selector import Selector from pydantic import Field from openstef_beam.evaluation.metric_providers import ( @@ -29,6 +28,7 @@ from openstef_meta.models.forecast_combiners.rules_combiner import RulesCombiner from openstef_meta.models.forecast_combiners.stacking_combiner import StackingCombiner from openstef_meta.models.forecasting.residual_forecaster import ResidualForecaster +from openstef_meta.transforms.selector import Selector from openstef_models.integrations.mlflow import MLFlowStorage from openstef_models.mixins.model_serializer import ModelIdentifier from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster @@ -84,7 +84,7 @@ class EnsembleWorkflowConfig(BaseConfig): description="Time interval between consecutive data samples.", ) horizons: list[LeadTime] = Field( - default=[LeadTime.from_string("PT36H")], + default=[LeadTime.from_string("PT48H")], description="List of forecast horizons to predict.", ) diff --git a/packages/openstef-meta/tests/regression/__init__.py b/packages/openstef-meta/tests/regression/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py new file mode 100644 index 000000000..f3d156a13 --- /dev/null +++ b/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py @@ -0,0 +1,98 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta +from typing import cast + +import numpy as np +import pandas as pd +import pytest + +from openstef_core.datasets.validated_datasets import TimeSeriesDataset +from openstef_core.types import LeadTime, Q +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel +from openstef_meta.presets import EnsembleWorkflowConfig, create_ensemble_workflow +from openstef_models.models.forecasting_model import ForecastingModel +from openstef_models.presets import ForecastingWorkflowConfig, create_forecasting_workflow + + +@pytest.fixture +def sample_timeseries_dataset() -> TimeSeriesDataset: + """Create sample time series data with typical energy forecasting features.""" + n_samples = 25 + rng = np.random.default_rng(seed=42) + + data = pd.DataFrame( + { + "load": 100.0 + rng.normal(10.0, 5.0, n_samples), + "temperature": 20.0 + rng.normal(1.0, 0.5, n_samples), + "radiation": rng.uniform(0.0, 500.0, n_samples), + }, + index=pd.date_range("2025-01-01 10:00", periods=n_samples, freq="h", tz="UTC"), + ) + + return TimeSeriesDataset(data, timedelta(hours=1)) + + +@pytest.fixture +def config() -> EnsembleWorkflowConfig: + return EnsembleWorkflowConfig( + model_id="ensemble_model_", + ensemble_type="learned_weights", + base_models=["gblinear", "lgbm"], + combiner_model="lgbm", + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime.from_string("PT36H")], + forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 0}, + ) + + +@pytest.fixture +def create_models( + config: EnsembleWorkflowConfig, +) -> tuple[EnsembleForecastingModel, dict[str, ForecastingModel]]: + + ensemble_model = cast(EnsembleForecastingModel, create_ensemble_workflow(config=config).model) + + base_models: dict[str, ForecastingModel] = {} + for forecaster_name in config.base_models: + model_config = ForecastingWorkflowConfig( + model_id=f"{forecaster_name}_model_", + model=forecaster_name, # type: ignore + quantiles=config.quantiles, + horizons=config.horizons, + sample_weight_exponent=config.forecaster_sample_weight_exponent[forecaster_name], + ) + base_model = create_forecasting_workflow(config=model_config).model + base_models[forecaster_name] = cast(ForecastingModel, base_model) + + return ensemble_model, base_models + + +def test_preprocessing( + sample_timeseries_dataset: TimeSeriesDataset, + create_models: tuple[EnsembleForecastingModel, dict[str, ForecastingModel]], +) -> None: + + ensemble_model, base_models = create_models + + ensemble_model.common_preprocessing.fit(data=sample_timeseries_dataset) + + # Check all base models + for name, model in base_models.items(): + # Ensemble model + common_ensemble = ensemble_model.common_preprocessing.transform(data=sample_timeseries_dataset) + ensemble_model.model_specific_preprocessing[name].fit(data=common_ensemble) + transformed_ensemble = ensemble_model.model_specific_preprocessing[name].transform(data=common_ensemble) + # Base model + model.preprocessing.fit(data=sample_timeseries_dataset) + transformed_base = model.preprocessing.transform(data=sample_timeseries_dataset) + # Compare + pd.testing.assert_frame_equal( + transformed_ensemble.data, + transformed_base.data, + check_dtype=False, + check_index_type=False, + check_column_type=False, + ) From 20edf2d808b4b5af5f8c76f1e8b06f07b30cac5d Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 8 Dec 2025 10:16:27 +0100 Subject: [PATCH 059/104] fixes Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 2 +- .../src/openstef_meta/mixins/__init__.py | 4 ++++ .../src/openstef_meta/mixins/contributions.py | 4 ++++ .../src/openstef_meta/models/__init__.py | 0 .../models/forecast_combiners/__init__.py | 4 ++++ .../forecast_combiners/stacking_combiner.py | 14 +++++++------- .../src/openstef_meta/presets/__init__.py | 4 ++++ .../presets/forecasting_workflow.py | 18 ++++++++++++++++-- .../src/openstef_meta/utils/datasets.py | 1 - .../models/forecast_combiners/__init__.py | 0 .../test_stacking_combiner.py | 1 + .../tests/models/forecasting/__init__.py | 0 .../forecasting/test_residual_forecaster.py | 6 +----- .../models/test_ensemble_forecasting_model.py | 9 ++++++--- .../openstef_models/explainability/mixins.py | 1 + .../mlflow/mlflow_storage_callback.py | 6 ++++++ .../models/forecasting/flatliner_forecaster.py | 12 ++++++++++++ 17 files changed, 67 insertions(+), 19 deletions(-) create mode 100644 packages/openstef-meta/src/openstef_meta/models/__init__.py create mode 100644 packages/openstef-meta/tests/models/forecast_combiners/__init__.py create mode 100644 packages/openstef-meta/tests/models/forecasting/__init__.py diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index b490a800c..28bc782cc 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -44,7 +44,7 @@ OUTPUT_PATH = Path("./benchmark_results") -N_PROCESSES = 1 # multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark +N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark ensemble_type = "learned_weights" # "stacking", "learned_weights" or "rules" base_models = ["lgbm", "gblinear"] # combination of "lgbm", "gblinear", "xgboost" and "lgbm_linear" diff --git a/packages/openstef-meta/src/openstef_meta/mixins/__init__.py b/packages/openstef-meta/src/openstef_meta/mixins/__init__.py index 71d67869d..90a57a257 100644 --- a/packages/openstef-meta/src/openstef_meta/mixins/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/mixins/__init__.py @@ -1 +1,5 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """Mixins for OpenSTEF-Meta package.""" diff --git a/packages/openstef-meta/src/openstef_meta/mixins/contributions.py b/packages/openstef-meta/src/openstef_meta/mixins/contributions.py index 9fb68377c..f00c185b3 100644 --- a/packages/openstef-meta/src/openstef_meta/mixins/contributions.py +++ b/packages/openstef-meta/src/openstef_meta/mixins/contributions.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """ExplainableMetaForecaster Mixin.""" from abc import ABC, abstractmethod diff --git a/packages/openstef-meta/src/openstef_meta/models/__init__.py b/packages/openstef-meta/src/openstef_meta/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py index db4917778..56a4cadff 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """Forecast Combiners.""" from .forecast_combiner import ForecastCombiner, ForecastCombinerConfig diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index 1814ef49b..eb7b29424 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -12,6 +12,7 @@ import logging from typing import cast, override +from openstef_models.explainability.mixins import ExplainableForecaster import pandas as pd from pydantic import Field, field_validator @@ -203,7 +204,6 @@ def predict_contributions( additional_features: ForecastInputDataset | None = None, ) -> pd.DataFrame: - # Generate predictions predictions: list[pd.DataFrame] = [] for i, q in enumerate(self.quantiles): if additional_features is not None: @@ -213,14 +213,14 @@ def predict_contributions( ) else: input_data = data.select_quantile(quantile=q) - p = self.predict_contributions_quantile( - model=self.models[i], - data=input_data, - ) - p.columns = [f"{col}_{Quantile(self.quantiles[i]).format()}" for col in p.columns] + model = self.models[i] + if not isinstance(model, ExplainableForecaster): + raise NotImplementedError( + "Predicting contributions is only supported for ExplainableForecaster models." + ) + p = model.predict_contributions(data=input_data, scale=True) predictions.append(p) - # Concatenate predictions along columns to form a DataFrame with quantile columns return pd.concat(predictions, axis=1) @property diff --git a/packages/openstef-meta/src/openstef_meta/presets/__init__.py b/packages/openstef-meta/src/openstef_meta/presets/__init__.py index 53b9630aa..ad62320c2 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/presets/__init__.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """Package for preset forecasting workflows.""" from .forecasting_workflow import EnsembleForecastingModel, EnsembleWorkflowConfig, create_ensemble_workflow diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 88d063584..c4e947d0a 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -1,3 +1,7 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + """Ensemble forecasting workflow preset. Mimics OpenSTEF-models forecasting workflow with ensemble capabilities. @@ -235,8 +239,18 @@ class EnsembleWorkflowConfig(BaseConfig): ) -def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastingWorkflow: - """Create an ensemble forecasting workflow from configuration.""" +def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastingWorkflow: # noqa: C901, PLR0912, PLR0915 + """Create an ensemble forecasting workflow from configuration. + + Args: + config: Configuration for the ensemble forecasting workflow. + + Returns: + An instance of CustomForecastingWorkflow configured as an ensemble forecaster. + + Raises: + ValueError: If an unsupported base model or combiner type is specified. + """ # Build preprocessing components def checks() -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index 9d38c5b4f..e0bba9265 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -78,7 +78,6 @@ def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[str], list[Q Raises: ValueError: If an invalid base learner name is found in a feature name. """ - forecasters: set[str] = set() quantiles: set[Quantile] = set() diff --git a/packages/openstef-meta/tests/models/forecast_combiners/__init__.py b/packages/openstef-meta/tests/models/forecast_combiners/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py b/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py index 4235df532..530018ab7 100644 --- a/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py +++ b/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py @@ -3,6 +3,7 @@ # # SPDX-License-Identifier: MPL-2.0 from datetime import timedelta + import pandas as pd import pytest diff --git a/packages/openstef-meta/tests/models/forecasting/__init__.py b/packages/openstef-meta/tests/models/forecasting/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py b/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py index 9e80abc9d..0f319552e 100644 --- a/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py +++ b/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py @@ -4,7 +4,6 @@ from datetime import timedelta -import pandas as pd import pytest from openstef_core.datasets import ForecastInputDataset @@ -161,10 +160,7 @@ def test_residual_forecaster_predict_contributions( assert forecaster.is_fitted, "Model should be fitted after calling fit()" # Check that necessary quantiles are present - base_models = [ - forecaster.primary_name, - forecaster.secondary_name - ] + base_models = [forecaster.primary_name, forecaster.secondary_name] expected_columns = [f"{col}_{q.format()}" for col in base_models for q in expected_quantiles] assert sorted(result.columns) == sorted(expected_columns), ( f"Expected columns {expected_columns}, got {list(result.columns)}" diff --git a/packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py index 126163bd9..33b78cfc9 100644 --- a/packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py @@ -1,4 +1,8 @@ -import pickle +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +import pickle # noqa: S403 - Controlled test from datetime import datetime, timedelta from typing import override @@ -136,10 +140,9 @@ def model() -> EnsembleForecastingModel: ) # Act - model = EnsembleForecastingModel( + return EnsembleForecastingModel( forecasters=forecasters, combiner=combiner, common_preprocessing=TransformPipeline() ) - return model def test_forecasting_model__init__uses_defaults(model: EnsembleForecastingModel): diff --git a/packages/openstef-models/src/openstef_models/explainability/mixins.py b/packages/openstef-models/src/openstef_models/explainability/mixins.py index e9c6b45f3..b0fb6fab1 100644 --- a/packages/openstef-models/src/openstef_models/explainability/mixins.py +++ b/packages/openstef-models/src/openstef_models/explainability/mixins.py @@ -51,6 +51,7 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> p Args: data: Input dataset for which to compute feature contributions. + scale: Whether to scale contributions to sum to the prediction value. Returns: DataFrame with contributions per base learner. diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index fd59cd600..7ba5222c3 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -22,6 +22,7 @@ from openstef_core.datasets.versioned_timeseries_dataset import VersionedTimeSeriesDataset from openstef_core.exceptions import ModelNotFoundError, SkipFitting from openstef_core.types import Q, QuantileOrGlobal +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_models.explainability import ExplainableForecaster from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage from openstef_models.mixins.callbacks import WorkflowContext @@ -97,6 +98,11 @@ def on_fit_end( if self.model_selection_enable: self._run_model_selection(workflow=context.workflow, result=result) + if isinstance(context.workflow.model, EnsembleForecastingModel): + raise NotImplementedError( + "MLFlowStorageCallback does not yet support EnsembleForecastingWorkflow model storage." + ) + # Create a new run run = self.storage.create_run( model_id=context.workflow.model_id, diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py index 51a5b5ed0..73f9d56b3 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py @@ -102,3 +102,15 @@ def feature_importances(self) -> pd.DataFrame: index=["load"], columns=[quantile.format() for quantile in self.config.quantiles], ) + + @override + def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = True) -> pd.DataFrame: + + if scale: + pass + forecast_index = data.create_forecast_range(horizon=self.config.max_horizon) + + return pd.DataFrame( + data={quantile.format(): 0.0 for quantile in self.config.quantiles}, + index=forecast_index, + ) From 354b6f27d707b7b779568eba21dee9cad3a26f4d Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 8 Dec 2025 10:22:43 +0100 Subject: [PATCH 060/104] Squashed commit of the following: commit 6f88d726d3e9fc8f768c6b31cd66bdd0379ab3dc Author: Lars van Someren Date: Mon Dec 8 09:46:57 2025 +0100 Bugfixes Signed-off-by: Lars van Someren commit b44fd928ff9ddf5eab552fa33ebb2f46742843c0 Author: Lars van Someren Date: Thu Dec 4 14:39:31 2025 +0100 bug fixes Signed-off-by: Lars van Someren commit e212448dbcdad7dd5e024d49d69cf9a7532d7af2 Author: Lars van Someren Date: Thu Dec 4 12:38:24 2025 +0100 fixes Signed-off-by: Lars van Someren commit eb775e41c2d661f025cc97268f5bbdbf4abe148f Author: Lars van Someren Date: Thu Dec 4 11:40:44 2025 +0100 BugFix Signed-off-by: Lars van Someren commit c33ce9354abf3d08a90b59d419009822b5a29ae0 Author: Lars van Someren Date: Wed Dec 3 14:15:06 2025 +0100 Made PR Compliant Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 2 + .../openstef4_backtest_forecaster.py | 8 +- .../src/openstef_meta/models/__init__.py | 5 + .../models/ensemble_forecasting_model.py | 164 +++++++---- .../forecast_combiners/forecast_combiner.py | 22 -- .../learned_weights_combiner.py | 14 +- .../presets/forecasting_workflow.py | 267 ++++++++++-------- .../src/openstef_meta/transforms/selector.py | 2 +- .../src/openstef_meta/utils/datasets.py | 37 +++ .../tests/{models => regression}/__init__.py | 0 .../test_ensemble_forecasting_model.py | 98 +++++++ .../models}/__init__.py | 0 .../tests/{ => unit}/models/conftest.py | 0 .../models/forecast_combiners}/__init__.py | 0 .../models/forecast_combiners/conftest.py | 0 .../test_learned_weights_combiner.py | 0 .../forecast_combiners/test_rules_combiner.py | 0 .../test_stacking_combiner.py | 0 .../models/forecasting}/__init__.py | 0 .../forecasting/test_residual_forecaster.py | 0 .../models/test_ensemble_forecasting_model.py | 0 .../{utils => unit/transforms}/__init__.py | 0 .../transforms/test_flag_features_bound.py | 0 .../tests/unit/utils/__init__.py | 0 .../tests/{ => unit}/utils/test_datasets.py | 0 .../{ => unit}/utils/test_decision_tree.py | 0 .../mlflow/mlflow_storage_callback.py | 8 +- 27 files changed, 422 insertions(+), 205 deletions(-) rename packages/openstef-meta/tests/{models => regression}/__init__.py (100%) create mode 100644 packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py rename packages/openstef-meta/tests/{models/forecast_combiners => unit/models}/__init__.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/conftest.py (100%) rename packages/openstef-meta/tests/{models/forecasting => unit/models/forecast_combiners}/__init__.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/forecast_combiners/conftest.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/forecast_combiners/test_learned_weights_combiner.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/forecast_combiners/test_rules_combiner.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/forecast_combiners/test_stacking_combiner.py (100%) rename packages/openstef-meta/tests/{transforms => unit/models/forecasting}/__init__.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/forecasting/test_residual_forecaster.py (100%) rename packages/openstef-meta/tests/{ => unit}/models/test_ensemble_forecasting_model.py (100%) rename packages/openstef-meta/tests/{utils => unit/transforms}/__init__.py (100%) rename packages/openstef-meta/tests/{ => unit}/transforms/test_flag_features_bound.py (100%) create mode 100644 packages/openstef-meta/tests/unit/utils/__init__.py rename packages/openstef-meta/tests/{ => unit}/utils/test_datasets.py (100%) rename packages/openstef-meta/tests/{ => unit}/utils/test_decision_tree.py (100%) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 28bc782cc..48630af70 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -94,6 +94,8 @@ temperature_column="temperature_2m", relative_humidity_column="relative_humidity_2m", energy_price_column="EPEX_NL", + forecast_combiner_sample_weight_exponent=1, + forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 1, "xgboost": 0, "lgbm_linear": 0}, ) diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py index 5ae961632..966b06f10 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py @@ -17,6 +17,7 @@ from openstef_core.datasets import TimeSeriesDataset from openstef_core.exceptions import FlatlinerDetectedError, NotFittedError from openstef_core.types import Q +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel @@ -65,7 +66,12 @@ def quantiles(self) -> list[Q]: if self._workflow is None: self._workflow = self.workflow_factory() # Extract quantiles from the workflow's model - return self._workflow.model.forecaster.config.quantiles # type: ignore + + if isinstance(self._workflow.model, EnsembleForecastingModel): + # Assuming all ensemble members have the same quantiles + name = self._workflow.model.forecaster_names[0] + return self._workflow.model.forecasters[name].config.quantiles + return self._workflow.model.forecaster.config.quantiles @override def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: diff --git a/packages/openstef-meta/src/openstef_meta/models/__init__.py b/packages/openstef-meta/src/openstef_meta/models/__init__.py index e69de29bb..13175057c 100644 --- a/packages/openstef-meta/src/openstef_meta/models/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/models/__init__.py @@ -0,0 +1,5 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Meta Forecasting models.""" diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 7eab3d74f..6ce21dcd7 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -35,6 +35,8 @@ from openstef_models.models.forecasting_model import ModelFitResult from openstef_models.utils.data_split import DataSplitter +logger = logging.getLogger(__name__) + class EnsembleForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]): """Complete forecasting pipeline combining preprocessing, prediction, and postprocessing. @@ -61,16 +63,24 @@ class EnsembleForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastD >>> from openstef_models.models.forecasting.constant_median_forecaster import ( ... ConstantMedianForecaster, ConstantMedianForecasterConfig ... ) + >>> from openstef_meta.models.forecast_combiners.learned_weights_combiner import WeightsCombiner >>> from openstef_core.types import LeadTime >>> >>> # Note: This is a conceptual example showing the API structure >>> # Real usage requires implemented forecaster classes - >>> forecaster = ConstantMedianForecaster( + >>> forecaster_1 = ConstantMedianForecaster( + ... config=ConstantMedianForecasterConfig(horizons=[LeadTime.from_string("PT36H")]) + ... ) + >>> forecaster_2 = ConstantMedianForecaster( ... config=ConstantMedianForecasterConfig(horizons=[LeadTime.from_string("PT36H")]) ... ) + >>> combiner_config = WeightsCombiner.Config( + ... horizons=[LeadTime.from_string("PT36H")], + ... ) >>> # Create and train model - >>> model = ForecastingModel( - ... forecaster=forecaster, + >>> model = EnsembleForecastingModel( + ... forecasters={"constant_median": forecaster_1, "constant_median_2": forecaster_2}, + ... combiner=WeightsCombiner(config=combiner_config), ... cutoff_history=timedelta(days=14), # Match your maximum lag in preprocessing ... ) >>> model.fit(training_data) # doctest: +SKIP @@ -183,11 +193,18 @@ def fit( Returns: FitResult containing training details and metrics. """ + score_data = data.copy_with(data=data.data) # Fit the feature engineering transforms self.common_preprocessing.fit(data=data) + data = self.common_preprocessing.transform(data=data) - # Fit predict forecasters - ensemble_predictions = self._preprocess_fit_forecasters( + if data_val is not None: + data_val = self.common_preprocessing.transform(data=data_val) + if data_test is not None: + data_test = self.common_preprocessing.transform(data=data_test) + + # Fit forecasters + ensemble_predictions = self._fit_forecasters( data=data, data_val=data_val, data_test=data_test, @@ -200,13 +217,7 @@ def fit( else: ensemble_predictions_val = None - if len(self.combiner_preprocessing.transforms) > 0: - combiner_data = self.prepare_input(data=data) - self.combiner_preprocessing.fit(combiner_data) - combiner_data = self.combiner_preprocessing.transform(combiner_data) - features = ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) - else: - features = None + features = self._fit_transform_combiner_data(data=data) self.combiner.fit( data=ensemble_predictions, @@ -215,11 +226,36 @@ def fit( ) # Prepare input datasets for metrics calculation + metrics_train = self._predict_combiner_and_score( + ensemble_dataset=ensemble_predictions, additional_features=features + ) + if data_val is not None: + features_val = self._transform_combiner_data(data=data_val) + metrics_val = ( + self._predict_combiner_and_score( + ensemble_dataset=ensemble_predictions_val, additional_features=features_val + ) + if ensemble_predictions_val + else None + ) + else: + metrics_val = None - metrics_train = self._predict_and_score(data=data) - metrics_val = self._predict_and_score(data=data_val) if data_val else None - metrics_test = self._predict_and_score(data=data_test) if data_test else None - metrics_full = self.score(data=data) + if data_test is not None: + features_test = self._transform_combiner_data(data=data_test) + ensemble_predictions_test = self._predict_forecasters( + data=self.prepare_input(data=data_test), + ) + metrics_test = ( + self._predict_combiner_and_score( + ensemble_dataset=ensemble_predictions_test, additional_features=features_test + ) + if ensemble_predictions_test + else None + ) + else: + metrics_test = None + metrics_full = self.score(data=score_data) return ModelFitResult( input_dataset=data, @@ -232,44 +268,59 @@ def fit( metrics_full=metrics_full, ) - def _preprocess_fit_forecasters( + def _transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputDataset | None: + if len(self.combiner_preprocessing.transforms) == 0: + return None + combiner_data = self.combiner_preprocessing.transform(data) + return ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) + + def _fit_transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputDataset | None: + if len(self.combiner_preprocessing.transforms) == 0: + return None + combiner_data = self.prepare_input(data=data) + self.combiner_preprocessing.fit(combiner_data) + combiner_data = self.combiner_preprocessing.transform(combiner_data) + return ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) + + def _fit_forecasters( self, data: TimeSeriesDataset, data_val: TimeSeriesDataset | None = None, data_test: TimeSeriesDataset | None = None, ) -> EnsembleForecastDataset: - predictions_raw: dict[str, ForecastDataset] = {} + predictions: dict[str, ForecastDataset] = {} + + if data_test is not None: + logger.info("Data test provided during fit, but will be ignored for MetaForecating") for name, forecaster in self.forecasters.items(): validate_horizons_present(data, forecaster.config.horizons) + # Apply model-specific preprocessing if available - # Transform and split input data - input_data_train = self.prepare_input(data=data, forecaster_name=name) - input_data_val = self.prepare_input(data=data_val, forecaster_name=name) if data_val else None - input_data_test = self.prepare_input(data=data_test, forecaster_name=name) if data_test else None + if name in self.model_specific_preprocessing: + self.model_specific_preprocessing[name].fit(data=data) + data = self.model_specific_preprocessing[name].transform(data=data) + data_val = self.model_specific_preprocessing[name].transform(data=data_val) if data_val else None + + input_data_train = self.prepare_input(data=data, forecast_start=data.index[0]) + if data_val is not None: + input_data_val = self.prepare_input(data=data_val, forecast_start=data_val.index[0]) + else: + input_data_val = None # Drop target column nan's from training data. One can not train on missing targets. target_dropna = partial(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownMemberType] input_data_train = input_data_train.pipe_pandas(target_dropna) input_data_val = input_data_val.pipe_pandas(target_dropna) if input_data_val else None - input_data_test = input_data_test.pipe_pandas(target_dropna) if input_data_test else None - - # Transform the input data to a valid forecast input and split into train/val/test - input_data_train, input_data_val, input_data_test = self.data_splitter.split_dataset( - data=input_data_train, - data_val=input_data_val, - data_test=input_data_test, - target_column=self.target_column, - ) # Fit the model forecaster.fit(data=input_data_train, data_val=input_data_val) - predictions_raw[name] = self.forecasters[name].predict(data=input_data_train) + predictions_raw = self.forecasters[name].predict(data=input_data_train) - return EnsembleForecastDataset.from_forecast_datasets( - predictions_raw, target_series=data.data[self.target_column] - ) + predictions[name] = self.postprocessing.transform(data=predictions_raw) + + return EnsembleForecastDataset.from_forecast_datasets(predictions, target_series=data.data[self.target_column]) def _predict_forecasters( self, data: TimeSeriesDataset, forecast_start: datetime | None = None @@ -283,9 +334,16 @@ def _predict_forecasters( Returns: DataFrame containing base learner predictions. """ + data_common = self.common_preprocessing.transform(data=data) + base_predictions: dict[str, ForecastDataset] = {} for name, forecaster in self.forecasters.items(): - forecaster_data = self.prepare_input(data, forecaster_name=name, forecast_start=forecast_start) + forecaster_data = ( + self.model_specific_preprocessing[name].transform(data=data_common) + if name in self.model_specific_preprocessing + else data_common + ) + forecaster_data = self.prepare_input(forecaster_data, forecast_start=forecast_start) preds_raw = forecaster.predict(data=forecaster_data) preds = self.postprocessing.transform(data=preds_raw) base_predictions[name] = preds @@ -297,14 +355,9 @@ def _predict_forecasters( def prepare_input( self, data: TimeSeriesDataset, - forecaster_name: str | None = None, forecast_start: datetime | None = None, ) -> ForecastInputDataset: - """Prepare input data for forecasting by applying preprocessing and filtering. - - Transforms raw time series data through the preprocessing pipeline, restores - the target column, and filters out incomplete historical data to ensure - training quality. + """Prepare input data for forecastingfiltering. Args: data: Raw time series dataset to prepare for forecasting. @@ -315,14 +368,6 @@ def prepare_input( Returns: Processed forecast input dataset ready for model prediction. """ - # Transform and restore target column - data = self.common_preprocessing.transform(data=data) - - # Apply model-specific preprocessing if available - if forecaster_name in self.model_specific_preprocessing: - self.model_specific_preprocessing[forecaster_name].fit(data=data) - data = self.model_specific_preprocessing[forecaster_name].transform(data=data) - input_data = restore_target(dataset=data, original_dataset=data, target_column=self.target_column) # Cut away input history to avoid training on incomplete data @@ -343,8 +388,11 @@ def prepare_input( forecast_start=forecast_start, ) - def _predict_and_score(self, data: TimeSeriesDataset) -> SubsetMetric: - prediction = self.predict(data) + def _predict_combiner_and_score( + self, ensemble_dataset: EnsembleForecastDataset, additional_features: ForecastInputDataset | None = None + ) -> SubsetMetric: + prediction = self.combiner.predict(ensemble_dataset, additional_features=additional_features) + prediction.data[ensemble_dataset.target_column] = ensemble_dataset.target_series return self._calculate_score(prediction=prediction) def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: @@ -365,20 +413,12 @@ def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = Non ensemble_predictions = self._predict_forecasters(data=data, forecast_start=forecast_start) - additional_features = ( - ForecastInputDataset.from_timeseries( - self.combiner_preprocessing.transform(data=data), - target_column=self.target_column, - forecast_start=forecast_start, - ) - if len(self.combiner_preprocessing.transforms) > 0 - else None - ) + features = self._transform_combiner_data(data=data) # Predict and restore target column prediction = self.combiner.predict( data=ensemble_predictions, - additional_features=additional_features, + additional_features=features, ) return restore_target(dataset=prediction, original_dataset=data, target_column=self.target_column) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index 09b4e9017..b1df023f6 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -122,28 +122,6 @@ def predict( """ raise NotImplementedError("Subclasses must implement the predict method.") - @staticmethod - def _prepare_input_data( - dataset: ForecastInputDataset, additional_features: ForecastInputDataset | None - ) -> pd.DataFrame: - """Prepare input data by combining base predictions with additional features if provided. - - Args: - dataset: ForecastInputDataset containing base predictions. - additional_features: Optional ForecastInputDataset containing additional features. - - Returns: - pd.DataFrame: Combined DataFrame of base predictions and additional features if provided. - """ - df = dataset.input_data(start=dataset.index[0]) - if additional_features is not None: - df_a = additional_features.input_data(start=dataset.index[0]) - df = pd.concat( - [df, df_a], - axis=1, - ) - return df - @property @abstractmethod def is_fitted(self) -> bool: diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index acf366661..8ea079595 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -32,7 +32,7 @@ ForecastCombiner, ForecastCombinerConfig, ) -from openstef_meta.utils.datasets import EnsembleForecastDataset +from openstef_meta.utils.datasets import EnsembleForecastDataset, combine_forecast_input_datasets logger = logging.getLogger(__name__) @@ -241,18 +241,17 @@ def fit( for i, q in enumerate(self.quantiles): # Data preparation dataset = data.select_quantile_classification(quantile=q) - input_data = self._prepare_input_data( + combined_data = combine_forecast_input_datasets( dataset=dataset, - additional_features=additional_features, + other=additional_features, ) - labels = dataset.target_series + input_data = combined_data.input_data() + labels = combined_data.target_series self._validate_labels(labels=labels, model_index=i) labels = self._label_encoder.transform(labels) # Balance classes, adjust with sample weights - weights = compute_sample_weight("balanced", labels) - if sample_weights is not None: - weights *= sample_weights + weights = compute_sample_weight("balanced", labels) * combined_data.sample_weight_series self.models[i].fit(X=input_data, y=labels, sample_weight=weights) # type: ignore self._is_fitted = True @@ -276,6 +275,7 @@ def _prepare_input_data( df = pd.concat( [df, df_a], axis=1, + join="inner", ) return df diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index c4e947d0a..0a565793a 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -9,7 +9,7 @@ from collections.abc import Sequence from datetime import timedelta -from typing import Literal +from typing import TYPE_CHECKING, Literal from pydantic import Field @@ -28,9 +28,9 @@ from openstef_meta.models.forecast_combiners.rules_combiner import RulesCombiner from openstef_meta.models.forecast_combiners.stacking_combiner import StackingCombiner from openstef_meta.models.forecasting.residual_forecaster import ResidualForecaster -from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback +from openstef_meta.transforms.selector import Selector +from openstef_models.integrations.mlflow import MLFlowStorage from openstef_models.mixins.model_serializer import ModelIdentifier -from openstef_models.models.forecasting.forecaster import Forecaster from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster @@ -59,6 +59,9 @@ from openstef_models.utils.feature_selection import Exclude, FeatureSelection, Include from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow, ForecastingCallback +if TYPE_CHECKING: + from openstef_models.models.forecasting.forecaster import Forecaster + class EnsembleWorkflowConfig(BaseConfig): """Configuration for ensemble forecasting workflows.""" @@ -169,14 +172,19 @@ class EnsembleWorkflowConfig(BaseConfig): description="Percentile of target values used as scaling reference. " "Values are normalized relative to this percentile before weighting.", ) - sample_weight_exponent: float = Field( - default_factory=lambda data: 1.0 - if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "learned_weights", "stacking", "residual", "xgboost"} - else 0.0, + forecaster_sample_weight_exponent: dict[str, float] = Field( + default={"gblinear": 1.0, "lgbm": 0, "xgboost": 0, "lgbm_linear": 0}, description="Exponent applied to scale the sample weights. " "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " "Note: Defaults to 1.0 for gblinear congestion models.", ) + + forecast_combiner_sample_weight_exponent: float = Field( + default=0, + description="Exponent applied to scale the sample weights for the forecast combiner model. " + "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values.", + ) + sample_weight_floor: float = Field( default=0.1, description="Minimum weight value to ensure all samples contribute to training.", @@ -239,71 +247,82 @@ class EnsembleWorkflowConfig(BaseConfig): ) +# Build preprocessing components +def checks(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: + return [ + InputConsistencyChecker(), + FlatlineChecker( + load_column=config.target_column, + flatliner_threshold=config.flatliner_threshold, + detect_non_zero_flatliner=config.detect_non_zero_flatliner, + error_on_flatliner=False, + ), + CompletenessChecker(completeness_threshold=config.completeness_threshold), + ] + + +def feature_adders(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: + return [ + LagsAdder( + history_available=config.predict_history, + horizons=config.horizons, + add_trivial_lags=True, + target_column=config.target_column, + ), + WindPowerFeatureAdder( + windspeed_reference_column=config.wind_speed_column, + ), + AtmosphereDerivedFeaturesAdder( + pressure_column=config.pressure_column, + relative_humidity_column=config.relative_humidity_column, + temperature_column=config.temperature_column, + ), + RadiationDerivedFeaturesAdder( + coordinate=config.location.coordinate, + radiation_column=config.radiation_column, + ), + CyclicFeaturesAdder(), + DaylightFeatureAdder( + coordinate=config.location.coordinate, + ), + RollingAggregatesAdder( + feature=config.target_column, + aggregation_functions=config.rolling_aggregate_features, + horizons=config.horizons, + ), + ] + + +def feature_standardizers(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: + return [ + Clipper(selection=Include(config.energy_price_column).combine(config.clip_features), mode="standard"), + Scaler(selection=Exclude(config.target_column), method="standard"), + EmptyFeatureRemover(), + ] + + def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastingWorkflow: # noqa: C901, PLR0912, PLR0915 """Create an ensemble forecasting workflow from configuration. Args: - config: Configuration for the ensemble forecasting workflow. + config (EnsembleWorkflowConfig): Configuration for the ensemble workflow. Returns: - An instance of CustomForecastingWorkflow configured as an ensemble forecaster. + CustomForecastingWorkflow: Configured ensemble forecasting workflow. Raises: ValueError: If an unsupported base model or combiner type is specified. """ - - # Build preprocessing components - def checks() -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: - return [ - InputConsistencyChecker(), - FlatlineChecker( - load_column=config.target_column, - flatliner_threshold=config.flatliner_threshold, - detect_non_zero_flatliner=config.detect_non_zero_flatliner, - error_on_flatliner=False, - ), - CompletenessChecker(completeness_threshold=config.completeness_threshold), - ] - - def feature_adders() -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: - return [ - WindPowerFeatureAdder( - windspeed_reference_column=config.wind_speed_column, - ), - AtmosphereDerivedFeaturesAdder( - pressure_column=config.pressure_column, - relative_humidity_column=config.relative_humidity_column, - temperature_column=config.temperature_column, - ), - RadiationDerivedFeaturesAdder( - coordinate=config.location.coordinate, - radiation_column=config.radiation_column, - ), - CyclicFeaturesAdder(), - DaylightFeatureAdder( - coordinate=config.location.coordinate, - ), - RollingAggregatesAdder( - feature=config.target_column, - aggregation_functions=config.rolling_aggregate_features, - horizons=config.horizons, - ), - ] - - def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: - return [ - Clipper(selection=Include(config.energy_price_column).combine(config.clip_features), mode="standard"), - Scaler(selection=Exclude(config.target_column), method="standard"), - SampleWeighter( - target_column=config.target_column, - weight_exponent=config.sample_weight_exponent, - weight_floor=config.sample_weight_floor, - weight_scale_percentile=config.sample_weight_scale_percentile, - ), - EmptyFeatureRemover(), + # Common preprocessing + common_preprocessing = TransformPipeline( + transforms=[ + *checks(config), + *feature_adders(config), + HolidayFeatureAdder(country_code=config.location.country_code), + DatetimeFeaturesAdder(onehot_encode=False), + *feature_standardizers(config), ] - - # Model Specific LagsAdder + ) # Build forecasters and their processing pipelines forecaster_preprocessing: dict[str, list[Transform[TimeSeriesDataset, TimeSeriesDataset]]] = {} @@ -314,17 +333,12 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas config=LGBMForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(), - *feature_adders(), - LagsAdder( - history_available=config.predict_history, - horizons=config.horizons, - add_trivial_lags=True, + SampleWeighter( target_column=config.target_column, + weight_exponent=config.forecaster_sample_weight_exponent[model_type], + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, ), - HolidayFeatureAdder(country_code=config.location.country_code), - DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(), ] elif model_type == "gblinear": @@ -332,18 +346,54 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas config=GBLinearForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(), - *feature_adders(), - LagsAdder( - history_available=config.predict_history, - horizons=config.horizons, - add_trivial_lags=False, + SampleWeighter( target_column=config.target_column, - custom_lags=[timedelta(days=7)], + weight_exponent=config.forecaster_sample_weight_exponent[model_type], + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, + ), + Selector( + selection=FeatureSelection( + exclude={ # Fix hardcoded lag features should be replaced by a LagsAdder classmethod + "load_lag_P14D", + "load_lag_P13D", + "load_lag_P12D", + "load_lag_P11D", + "load_lag_P10D", + "load_lag_P9D", + "load_lag_P8D", + # "load_lag_P7D", # Keep 7D lag for weekly seasonality + "load_lag_P6D", + "load_lag_P5D", + "load_lag_P4D", + "load_lag_P3D", + "load_lag_P2D", + } + ) + ), + Selector( # Fix hardcoded holiday features should be replaced by a HolidayFeatureAdder classmethod + selection=FeatureSelection( + exclude={ + "is_ascension_day", + "is_christmas_day", + "is_easter_monday", + "is_easter_sunday", + "is_good_friday", + "is_holiday", + "is_king_s_day", + "is_liberation_day", + "is_new_year_s_day", + "is_second_day_of_christmas", + "is_sunday", + "is_week_day", + "is_weekend_day", + "is_whit_monday", + "is_whit_sunday", + "month_of_year", + "quarter_of_year", + } + ) ), - HolidayFeatureAdder(country_code=config.location.country_code), - DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(), Imputer( selection=Exclude(config.target_column), imputation_strategy="mean", @@ -358,34 +408,24 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas config=XGBoostForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(), - *feature_adders(), - LagsAdder( - history_available=config.predict_history, - horizons=config.horizons, - add_trivial_lags=True, + SampleWeighter( target_column=config.target_column, + weight_exponent=config.forecaster_sample_weight_exponent[model_type], + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, ), - HolidayFeatureAdder(country_code=config.location.country_code), - DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(), ] elif model_type == "lgbm_linear": forecasters[model_type] = LGBMLinearForecaster( config=LGBMLinearForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) ) forecaster_preprocessing[model_type] = [ - *checks(), - *feature_adders(), - LagsAdder( - history_available=config.predict_history, - horizons=config.horizons, - add_trivial_lags=True, + SampleWeighter( target_column=config.target_column, + weight_exponent=config.forecaster_sample_weight_exponent[model_type], + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, ), - HolidayFeatureAdder(country_code=config.location.country_code), - DatetimeFeaturesAdder(onehot_encode=False), - *feature_standardizers(), ] else: msg = f"Unsupported base model type: {model_type}" @@ -439,27 +479,34 @@ def feature_standardizers() -> list[Transform[TimeSeriesDataset, TimeSeriesDatas name: TransformPipeline(transforms=transforms) for name, transforms in forecaster_preprocessing.items() } + if config.forecast_combiner_sample_weight_exponent != 0: + combiner_transforms = [ + SampleWeighter( + target_column=config.target_column, + weight_exponent=config.forecast_combiner_sample_weight_exponent, + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, + ), + Selector(selection=Include("sample_weight", config.target_column)), + ] + else: + combiner_transforms = [] + + combiner_preprocessing: TransformPipeline[TimeSeriesDataset] = TransformPipeline(transforms=combiner_transforms) + ensemble_model = EnsembleForecastingModel( - common_preprocessing=TransformPipeline(transforms=[]), + common_preprocessing=common_preprocessing, model_specific_preprocessing=model_specific_preprocessing, + combiner_preprocessing=combiner_preprocessing, postprocessing=TransformPipeline(transforms=postprocessing), forecasters=forecasters, combiner=combiner, target_column=config.target_column, + data_splitter=config.data_splitter, ) callbacks: list[ForecastingCallback] = [] - if config.mlflow_storage is not None: - callbacks.append( - MLFlowStorageCallback( - storage=config.mlflow_storage, - model_reuse_enable=config.model_reuse_enable, - model_reuse_max_age=config.model_reuse_max_age, - model_selection_enable=config.model_selection_enable, - model_selection_metric=config.model_selection_metric, - model_selection_old_model_penalty=config.model_selection_old_model_penalty, - ) - ) + # TODO(Egor): Implement MLFlow for OpenSTEF-meta # noqa: TD003 return CustomForecastingWorkflow(model=ensemble_model, model_id=config.model_id, callbacks=callbacks) diff --git a/packages/openstef-meta/src/openstef_meta/transforms/selector.py b/packages/openstef-meta/src/openstef_meta/transforms/selector.py index 75eb4e321..e4c5d343b 100644 --- a/packages/openstef-meta/src/openstef_meta/transforms/selector.py +++ b/packages/openstef-meta/src/openstef_meta/transforms/selector.py @@ -24,7 +24,7 @@ class Selector(BaseConfig, TimeSeriesTransform): selection: FeatureSelection = Field( default=FeatureSelection.ALL, - description="Features to check for NaN values. Rows with NaN in any selected column are dropped.", + description="Feature selection for efficient model specific preprocessing.x", ) @override diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index e0bba9265..cb6dbdad2 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -20,6 +20,43 @@ DEFAULT_TARGET_COLUMN = {Quantile(0.5): "load"} +def combine_forecast_input_datasets( + dataset: ForecastInputDataset, other: ForecastInputDataset | None, join: str = "inner" +) -> ForecastInputDataset: + """Combine multiple TimeSeriesDatasets into a single dataset. + + Args: + dataset: First ForecastInputDataset. + other: Second ForecastInputDataset or None. + join: Type of join to perform on the datasets. Defaults to "inner". + + Returns: + Combined ForecastDataset. + """ + if not isinstance(other, ForecastInputDataset): + return dataset + if join != "inner": + raise NotImplementedError("Only 'inner' join is currently supported.") + df_other = other.data + if dataset.target_column in df_other.columns: + df_other = df_other.drop(columns=[dataset.target_column]) + + df_one = dataset.data + df = pd.concat( + [df_one, df_other], + axis=1, + join="inner", + ) + + return ForecastInputDataset( + data=df, + sample_interval=dataset.sample_interval, + target_column=dataset.target_column, + sample_weight_column=dataset.sample_weight_column, + forecast_start=dataset.forecast_start, + ) + + class EnsembleForecastDataset(TimeSeriesDataset): """First stage output format for ensemble forecasters.""" diff --git a/packages/openstef-meta/tests/models/__init__.py b/packages/openstef-meta/tests/regression/__init__.py similarity index 100% rename from packages/openstef-meta/tests/models/__init__.py rename to packages/openstef-meta/tests/regression/__init__.py diff --git a/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py new file mode 100644 index 000000000..f3d156a13 --- /dev/null +++ b/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py @@ -0,0 +1,98 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta +from typing import cast + +import numpy as np +import pandas as pd +import pytest + +from openstef_core.datasets.validated_datasets import TimeSeriesDataset +from openstef_core.types import LeadTime, Q +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel +from openstef_meta.presets import EnsembleWorkflowConfig, create_ensemble_workflow +from openstef_models.models.forecasting_model import ForecastingModel +from openstef_models.presets import ForecastingWorkflowConfig, create_forecasting_workflow + + +@pytest.fixture +def sample_timeseries_dataset() -> TimeSeriesDataset: + """Create sample time series data with typical energy forecasting features.""" + n_samples = 25 + rng = np.random.default_rng(seed=42) + + data = pd.DataFrame( + { + "load": 100.0 + rng.normal(10.0, 5.0, n_samples), + "temperature": 20.0 + rng.normal(1.0, 0.5, n_samples), + "radiation": rng.uniform(0.0, 500.0, n_samples), + }, + index=pd.date_range("2025-01-01 10:00", periods=n_samples, freq="h", tz="UTC"), + ) + + return TimeSeriesDataset(data, timedelta(hours=1)) + + +@pytest.fixture +def config() -> EnsembleWorkflowConfig: + return EnsembleWorkflowConfig( + model_id="ensemble_model_", + ensemble_type="learned_weights", + base_models=["gblinear", "lgbm"], + combiner_model="lgbm", + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=[LeadTime.from_string("PT36H")], + forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 0}, + ) + + +@pytest.fixture +def create_models( + config: EnsembleWorkflowConfig, +) -> tuple[EnsembleForecastingModel, dict[str, ForecastingModel]]: + + ensemble_model = cast(EnsembleForecastingModel, create_ensemble_workflow(config=config).model) + + base_models: dict[str, ForecastingModel] = {} + for forecaster_name in config.base_models: + model_config = ForecastingWorkflowConfig( + model_id=f"{forecaster_name}_model_", + model=forecaster_name, # type: ignore + quantiles=config.quantiles, + horizons=config.horizons, + sample_weight_exponent=config.forecaster_sample_weight_exponent[forecaster_name], + ) + base_model = create_forecasting_workflow(config=model_config).model + base_models[forecaster_name] = cast(ForecastingModel, base_model) + + return ensemble_model, base_models + + +def test_preprocessing( + sample_timeseries_dataset: TimeSeriesDataset, + create_models: tuple[EnsembleForecastingModel, dict[str, ForecastingModel]], +) -> None: + + ensemble_model, base_models = create_models + + ensemble_model.common_preprocessing.fit(data=sample_timeseries_dataset) + + # Check all base models + for name, model in base_models.items(): + # Ensemble model + common_ensemble = ensemble_model.common_preprocessing.transform(data=sample_timeseries_dataset) + ensemble_model.model_specific_preprocessing[name].fit(data=common_ensemble) + transformed_ensemble = ensemble_model.model_specific_preprocessing[name].transform(data=common_ensemble) + # Base model + model.preprocessing.fit(data=sample_timeseries_dataset) + transformed_base = model.preprocessing.transform(data=sample_timeseries_dataset) + # Compare + pd.testing.assert_frame_equal( + transformed_ensemble.data, + transformed_base.data, + check_dtype=False, + check_index_type=False, + check_column_type=False, + ) diff --git a/packages/openstef-meta/tests/models/forecast_combiners/__init__.py b/packages/openstef-meta/tests/unit/models/__init__.py similarity index 100% rename from packages/openstef-meta/tests/models/forecast_combiners/__init__.py rename to packages/openstef-meta/tests/unit/models/__init__.py diff --git a/packages/openstef-meta/tests/models/conftest.py b/packages/openstef-meta/tests/unit/models/conftest.py similarity index 100% rename from packages/openstef-meta/tests/models/conftest.py rename to packages/openstef-meta/tests/unit/models/conftest.py diff --git a/packages/openstef-meta/tests/models/forecasting/__init__.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/__init__.py similarity index 100% rename from packages/openstef-meta/tests/models/forecasting/__init__.py rename to packages/openstef-meta/tests/unit/models/forecast_combiners/__init__.py diff --git a/packages/openstef-meta/tests/models/forecast_combiners/conftest.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py similarity index 100% rename from packages/openstef-meta/tests/models/forecast_combiners/conftest.py rename to packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_learned_weights_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py similarity index 100% rename from packages/openstef-meta/tests/models/forecast_combiners/test_learned_weights_combiner.py rename to packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py similarity index 100% rename from packages/openstef-meta/tests/models/forecast_combiners/test_rules_combiner.py rename to packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py diff --git a/packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py similarity index 100% rename from packages/openstef-meta/tests/models/forecast_combiners/test_stacking_combiner.py rename to packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py diff --git a/packages/openstef-meta/tests/transforms/__init__.py b/packages/openstef-meta/tests/unit/models/forecasting/__init__.py similarity index 100% rename from packages/openstef-meta/tests/transforms/__init__.py rename to packages/openstef-meta/tests/unit/models/forecasting/__init__.py diff --git a/packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py b/packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py similarity index 100% rename from packages/openstef-meta/tests/models/forecasting/test_residual_forecaster.py rename to packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py diff --git a/packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py similarity index 100% rename from packages/openstef-meta/tests/models/test_ensemble_forecasting_model.py rename to packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py diff --git a/packages/openstef-meta/tests/utils/__init__.py b/packages/openstef-meta/tests/unit/transforms/__init__.py similarity index 100% rename from packages/openstef-meta/tests/utils/__init__.py rename to packages/openstef-meta/tests/unit/transforms/__init__.py diff --git a/packages/openstef-meta/tests/transforms/test_flag_features_bound.py b/packages/openstef-meta/tests/unit/transforms/test_flag_features_bound.py similarity index 100% rename from packages/openstef-meta/tests/transforms/test_flag_features_bound.py rename to packages/openstef-meta/tests/unit/transforms/test_flag_features_bound.py diff --git a/packages/openstef-meta/tests/unit/utils/__init__.py b/packages/openstef-meta/tests/unit/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/openstef-meta/tests/utils/test_datasets.py b/packages/openstef-meta/tests/unit/utils/test_datasets.py similarity index 100% rename from packages/openstef-meta/tests/utils/test_datasets.py rename to packages/openstef-meta/tests/unit/utils/test_datasets.py diff --git a/packages/openstef-meta/tests/utils/test_decision_tree.py b/packages/openstef-meta/tests/unit/utils/test_decision_tree.py similarity index 100% rename from packages/openstef-meta/tests/utils/test_decision_tree.py rename to packages/openstef-meta/tests/unit/utils/test_decision_tree.py diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index 7ba5222c3..fcaa4ed46 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -107,7 +107,7 @@ def on_fit_end( run = self.storage.create_run( model_id=context.workflow.model_id, tags=context.workflow.model.tags, - hyperparams=context.workflow.model.forecaster.hyperparams, + hyperparams=context.workflow.model.forecaster.hyperparams, # type: ignore TODO Make MLFlow compatible with OpenSTEF Meta ) run_id: str = run.info.run_id self._logger.info("Created MLflow run %s for model %s", run_id, context.workflow.model_id) @@ -120,7 +120,11 @@ def on_fit_end( self._logger.info("Stored training data at %s for run %s", data_path, run_id) # Store feature importance plot if enabled - if self.store_feature_importance_plot and isinstance(context.workflow.model.forecaster, ExplainableForecaster): + if ( + self.store_feature_importance_plot + and isinstance(context.workflow.model, ForecastingModel) + and isinstance(context.workflow.model.forecaster, ExplainableForecaster) + ): fig = context.workflow.model.forecaster.plot_feature_importances() fig.write_html(data_path / "feature_importances.html") # pyright: ignore[reportUnknownMemberType] From e6bc447c0f09fa1e17ed0d659c2aae2e11ebf4a8 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 8 Dec 2025 14:00:55 +0100 Subject: [PATCH 061/104] Fixes Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 6 +++--- .../openstef4_backtest_forecaster.py | 6 ++++-- .../models/ensemble_forecasting_model.py | 1 - .../forecast_combiners/stacking_combiner.py | 16 ++++++++++++---- .../mlflow/mlflow_storage_callback.py | 2 +- 5 files changed, 20 insertions(+), 11 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 48630af70..3678f6fc7 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -44,9 +44,9 @@ OUTPUT_PATH = Path("./benchmark_results") -N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark +N_PROCESSES = 1 # multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark -ensemble_type = "learned_weights" # "stacking", "learned_weights" or "rules" +ensemble_type = "stacking" # "stacking", "learned_weights" or "rules" base_models = ["lgbm", "gblinear"] # combination of "lgbm", "gblinear", "xgboost" and "lgbm_linear" combiner_model = ( "lgbm" # "lgbm", "xgboost", "rf" or "logistic" for learned weights combiner, gblinear for stacking combiner @@ -152,7 +152,7 @@ def _create_workflow() -> CustomForecastingWorkflow: start_time = time.time() create_liander2024_benchmark_runner( storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH / model), - data_dir=Path("local_data/liander2024-energy-forecasting-benchmark"), + data_dir=Path("../data/liander2024-energy-forecasting-benchmark"), callbacks=[StrictExecutionCallback()], ).run( forecaster_factory=_target_forecaster_factory, diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py index 966b06f10..e19c6937f 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py @@ -9,6 +9,7 @@ from pathlib import Path from typing import Any, override +import pandas as pd from pydantic import Field, PrivateAttr from openstef_beam.backtesting.backtest_forecaster.mixins import BacktestForecasterConfig, BacktestForecasterMixin @@ -19,7 +20,6 @@ from openstef_core.types import Q from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow -from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): @@ -137,7 +137,9 @@ def predict(self, data: RestrictedHorizonVersionedTimeSeries) -> TimeSeriesDatas if self.contributions and isinstance(self._workflow.model, EnsembleForecastingModel): contr_str = data.horizon.strftime("%Y%m%d%H%M%S") contributions = self._workflow.model.predict_contributions(predict_data) - contributions.to_parquet(path=self.cache_dir / f"contrib_{contr_str}_predict.parquet") + df = pd.concat([contributions, forecast.data.drop(columns=["load"])]) + + df.to_parquet(path=self.cache_dir / f"contrib_{contr_str}_predict.parquet") return forecast diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 6ce21dcd7..eba82d91f 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -361,7 +361,6 @@ def prepare_input( Args: data: Raw time series dataset to prepare for forecasting. - forecaster_name: Optional name of the forecaster for model-specific preprocessing. forecast_start: Optional start time for forecasts. If provided and earlier than the cutoff time, overrides the cutoff for data filtering. diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index eb7b29424..c4165197f 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -10,9 +10,8 @@ """ import logging -from typing import cast, override +from typing import TYPE_CHECKING, cast, override -from openstef_models.explainability.mixins import ExplainableForecaster import pandas as pd from pydantic import Field, field_validator @@ -24,13 +23,16 @@ from openstef_core.types import LeadTime, Quantile from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig from openstef_meta.utils.datasets import EnsembleForecastDataset -from openstef_models.models.forecasting.forecaster import Forecaster +from openstef_models.explainability.mixins import ExplainableForecaster from openstef_models.models.forecasting.gblinear_forecaster import ( GBLinearForecaster, GBLinearHyperParams, ) from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMHyperParams +if TYPE_CHECKING: + from openstef_models.models.forecasting.forecaster import Forecaster + logger = logging.getLogger(__name__) ForecasterHyperParams = GBLinearHyperParams | LGBMHyperParams @@ -221,7 +223,13 @@ def predict_contributions( p = model.predict_contributions(data=input_data, scale=True) predictions.append(p) - return pd.concat(predictions, axis=1) + contributions = pd.concat(predictions, axis=1) + + target_series = data.target_series + if target_series is not None: + contributions[data.target_column] = target_series + + return contributions @property def is_fitted(self) -> bool: diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index fcaa4ed46..beacf50f1 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -122,7 +122,7 @@ def on_fit_end( # Store feature importance plot if enabled if ( self.store_feature_importance_plot - and isinstance(context.workflow.model, ForecastingModel) + and isinstance(context.workflow.model, ForecastingModel) # type: ignore and isinstance(context.workflow.model.forecaster, ExplainableForecaster) ): fig = context.workflow.model.forecaster.plot_feature_importances() From bedf6af01bcff1b7af732ac2bf4231db7b26dfcc Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 8 Dec 2025 14:32:30 +0100 Subject: [PATCH 062/104] fixed tests Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 2 +- .../learned_weights_combiner.py | 6 +++++- .../models/forecast_combiners/rules_combiner.py | 6 +----- .../models/forecasting/residual_forecaster.py | 4 +++- .../test_stacking_combiner.py | 2 +- .../models/forecasting/base_case_forecaster.py | 17 +++++++++++++++++ .../forecasting/constant_median_forecaster.py | 17 +++++++++++++++++ .../models/forecasting/gblinear_forecaster.py | 5 +---- .../models/forecasting/lgbm_forecaster.py | 12 +++++++----- .../models/forecasting/lgbmlinear_forecaster.py | 12 +++++++----- .../models/forecasting/xgboost_forecaster.py | 5 +---- .../forecasting/test_gblinear_forecaster.py | 4 +--- .../forecasting/test_xgboost_forecaster.py | 6 ++---- 13 files changed, 64 insertions(+), 34 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 3678f6fc7..ed795eca7 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -44,7 +44,7 @@ OUTPUT_PATH = Path("./benchmark_results") -N_PROCESSES = 1 # multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark +N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark ensemble_type = "stacking" # "stacking", "learned_weights" or "rules" base_models = ["lgbm", "gblinear"] # combination of "lgbm", "gblinear", "xgboost" and "lgbm_linear" diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 8ea079595..08f094b27 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -289,7 +289,11 @@ def _validate_labels(self, labels: pd.Series, model_index: int) -> None: def _predict_model_weights_quantile(self, base_predictions: pd.DataFrame, model_index: int) -> pd.DataFrame: model = self.models[model_index] - weights_array = model.predict_proba(base_predictions.to_numpy()) # type: ignore + if isinstance(model, DummyClassifier): + weights_array = pd.DataFrame(0, index=base_predictions.index, columns=self._label_encoder.classes_) + weights_array[self._label_encoder.classes_[0]] = 1.0 + else: + weights_array = model.predict_proba(base_predictions.to_numpy()) # type: ignore return pd.DataFrame(weights_array, index=base_predictions.index, columns=self._label_encoder.classes_) # type: ignore diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py index 965997cde..eb3dabcd2 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py @@ -163,11 +163,7 @@ def predict_contributions( predictions.append(preds.to_frame(name=Quantile(q).format())) # Concatenate predictions along columns to form a DataFrame with quantile columns - df = pd.concat(predictions, axis=1) - - # TODO FLORIAN return only Decision datadrame - - return df + return pd.concat(predictions, axis=1) @property def is_fitted(self) -> bool: diff --git a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py index be9100a1a..79cc44cd5 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py @@ -285,6 +285,7 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = Tru Args: data: Input data for prediction contributions. + scale: Whether to scale contributions to sum to 1. Defaults to True. Returns: pd.DataFrame containing the prediction contributions. @@ -308,7 +309,8 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = Tru primary_contributions.columns = [f"{self.primary_name}_{q}" for q in primary_contributions.columns] secondary_contributions = secondary_predictions.abs() / ( - primary_predictions.abs() + secondary_predictions.abs()) + primary_predictions.abs() + secondary_predictions.abs() + ) secondary_contributions.columns = [f"{self.secondary_name}_{q}" for q in secondary_contributions.columns] return pd.concat([primary_contributions, secondary_contributions], axis=1) diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py index 530018ab7..cb182e242 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py @@ -100,4 +100,4 @@ def test_stacking_combiner_predict_contributions( # Assert assert isinstance(contributions, pd.DataFrame), "Contributions should be returned as a DataFrame." - assert len(contributions.columns) == len(ensemble_dataset.quantiles) * len(ensemble_dataset.forecaster_names) + assert len(contributions.columns) == (len(ensemble_dataset.quantiles) * len(ensemble_dataset.forecaster_names)) + 1 diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py index d7e01c965..324812a0a 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py @@ -189,6 +189,23 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: sample_interval=data.sample_interval, ) + @override + def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = True) -> pd.DataFrame: + """Generate feature contributions. + + Args: + data: The forecast input dataset containing target variable history. + scale: Whether to scale contributions to sum to 1. Defaults to True. + + Returns: + pd.DataFrame containing the prediction contributions. + """ + return pd.DataFrame( + data=1.0, + index=data.index, + columns=["load_" + quantile.format() for quantile in self.config.quantiles], + ) + @property @override def feature_importances(self) -> pd.DataFrame: diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py index 930881a55..e142f4711 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py @@ -141,3 +141,20 @@ def feature_importances(self) -> pd.DataFrame: index=["load"], columns=[quantile.format() for quantile in self.config.quantiles], ) + + @override + def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = True) -> pd.DataFrame: + """Generate feature contributions. + + Args: + data: The forecast input dataset containing target variable history. + scale: Whether to scale contributions to sum to 1. Defaults to True. + + Returns: + pd.DataFrame containing the prediction contributions. + """ + return pd.DataFrame( + data=1.0, + index=data.index, + columns=["load_" + quantile.format() for quantile in self.config.quantiles], + ) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py index 6fc0c30d6..9b230e060 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py @@ -353,10 +353,7 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = Tru data=contribs, index=input_data.index, columns=[ - f"{feature}_{quantile.format()}" - for feature in input_data.columns - for quantile in self.config.quantiles - + f"{feature}_{quantile.format()}" for feature in input_data.columns for quantile in self.config.quantiles ], ) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index 84852aaaa..ed3de0058 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -338,11 +338,13 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> p # Scale contributions so that they sum to 1.0 per quantile contribs_quantile = np.abs(contribs_quantile) / np.sum(np.abs(contribs_quantile), axis=1, keepdims=True) - contributions.append(pd.DataFrame( - data=contribs_quantile, - index=input_data.index, - columns=[f"{feature}_{quantile.format()}" for feature in input_data.columns], - )) + contributions.append( + pd.DataFrame( + data=contribs_quantile, + index=input_data.index, + columns=[f"{feature}_{quantile.format()}" for feature in input_data.columns], + ) + ) # Construct DataFrame return pd.concat(contributions, axis=1) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py index e39e60bb5..b484c8a37 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -342,11 +342,13 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> p # Scale contributions so that they sum to 1.0 per quantile contribs_quantile = np.abs(contribs_quantile) / np.sum(np.abs(contribs_quantile), axis=1, keepdims=True) - contributions.append(pd.DataFrame( - data=contribs_quantile, - index=input_data.index, - columns=[f"{feature}_{quantile.format()}" for feature in input_data.columns], - )) + contributions.append( + pd.DataFrame( + data=contribs_quantile, + index=input_data.index, + columns=[f"{feature}_{quantile.format()}" for feature in input_data.columns], + ) + ) # Construct DataFrame return pd.concat(contributions, axis=1) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py index 6843df2fa..1469309f6 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py @@ -448,10 +448,7 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> p data=contribs, index=input_data.index, columns=[ - f"{feature}_{quantile.format()}" - for feature in input_data.columns - for quantile in self.config.quantiles - + f"{feature}_{quantile.format()}" for feature in input_data.columns for quantile in self.config.quantiles ], ) diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py index b260d1b4e..f7766581f 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py @@ -155,9 +155,7 @@ def test_gblinear_forecaster_predict_contributions( # Check that necessary quantiles are present input_features = sample_forecast_input_dataset.input_data().columns expected_columns = [f"{col}_{q.format()}" for col in input_features for q in expected_quantiles] - assert list(result.columns) == expected_columns, ( - f"Expected columns {expected_columns}, got {list(result.columns)}" - ) + assert list(result.columns) == expected_columns, f"Expected columns {expected_columns}, got {list(result.columns)}" # Contributions should sum to 1.0 per quantile for q in expected_quantiles: diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py index bde85e36a..e8bcfd1ec 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py @@ -189,12 +189,10 @@ def test_xgboost_forecaster_predict_contributions( # Check that necessary quantiles are present input_features = sample_forecast_input_dataset.input_data().columns expected_columns = [f"{col}_{q.format()}" for col in input_features for q in expected_quantiles] - assert list(result.columns) == expected_columns, ( - f"Expected columns {expected_columns}, got {list(result.columns)}" - ) + assert list(result.columns) == expected_columns, f"Expected columns {expected_columns}, got {list(result.columns)}" # Contributions should sum to 1.0 per quantile for q in expected_quantiles: quantile_cols = [col for col in result.columns if col.endswith(f"_{q.format()}")] col_sums = result[quantile_cols].sum(axis=1) - pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=result.index), atol=1e-10) + pd.testing.assert_series_equal(col_sums, pd.Series(1.0, index=result.index), atol=1e-10, check_dtype=False) From c9f135f54d9103cf31fd2945f4a0b38bd5bec33c Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 9 Dec 2025 10:41:06 +0100 Subject: [PATCH 063/104] small fix Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 4 ++-- .../backtest_forecaster/openstef4_backtest_forecaster.py | 4 ++-- .../models/forecast_combiners/learned_weights_combiner.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index ed795eca7..133b23c8a 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -44,9 +44,9 @@ OUTPUT_PATH = Path("./benchmark_results") -N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark +N_PROCESSES = 1 # multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark -ensemble_type = "stacking" # "stacking", "learned_weights" or "rules" +ensemble_type = "learned_weights" # "stacking", "learned_weights" or "rules" base_models = ["lgbm", "gblinear"] # combination of "lgbm", "gblinear", "xgboost" and "lgbm_linear" combiner_model = ( "lgbm" # "lgbm", "xgboost", "rf" or "logistic" for learned weights combiner, gblinear for stacking combiner diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py index e19c6937f..874276089 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py @@ -136,8 +136,8 @@ def predict(self, data: RestrictedHorizonVersionedTimeSeries) -> TimeSeriesDatas if self.contributions and isinstance(self._workflow.model, EnsembleForecastingModel): contr_str = data.horizon.strftime("%Y%m%d%H%M%S") - contributions = self._workflow.model.predict_contributions(predict_data) - df = pd.concat([contributions, forecast.data.drop(columns=["load"])]) + contributions = self._workflow.model.predict_contributions(predict_data, forecast_start=data.horizon) + df = pd.concat([contributions, forecast.data.drop(columns=["load"])], axis=1) df.to_parquet(path=self.cache_dir / f"contrib_{contr_str}_predict.parquet") return forecast diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 08f094b27..c421e4c73 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -293,7 +293,7 @@ def _predict_model_weights_quantile(self, base_predictions: pd.DataFrame, model_ weights_array = pd.DataFrame(0, index=base_predictions.index, columns=self._label_encoder.classes_) weights_array[self._label_encoder.classes_[0]] = 1.0 else: - weights_array = model.predict_proba(base_predictions.to_numpy()) # type: ignore + weights_array = model.predict_proba(base_predictions) # type: ignore return pd.DataFrame(weights_array, index=base_predictions.index, columns=self._label_encoder.classes_) # type: ignore From 845e384ca2fb64b33e01daaca3267f8d180b702f Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 15 Dec 2025 13:00:30 +0100 Subject: [PATCH 064/104] Stacking Bugfix Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 12 +- .../models/ensemble_forecasting_model.py | 255 +++++++++--------- .../forecast_combiners/forecast_combiner.py | 2 - .../learned_weights_combiner.py | 1 - .../forecast_combiners/rules_combiner.py | 4 - .../forecast_combiners/stacking_combiner.py | 12 +- .../presets/forecasting_workflow.py | 17 +- 7 files changed, 154 insertions(+), 149 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 02fb9e3ee..23e2b4fcb 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -44,12 +44,12 @@ OUTPUT_PATH = Path("./benchmark_results") -N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark +N_PROCESSES = 1 if True else multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark -ensemble_type = "learned_weights" # "stacking", "learned_weights" or "rules" +ensemble_type = "stacking" # "stacking", "learned_weights" or "rules" base_models = ["lgbm", "gblinear"] # combination of "lgbm", "gblinear", "xgboost" and "lgbm_linear" combiner_model = ( - "lgbm" # "lgbm", "xgboost", "rf" or "logistic" for learned weights combiner, gblinear for stacking combiner + "gblinear" # "lgbm", "xgboost", "rf" or "logistic" for learned weights combiner, gblinear for stacking combiner ) model = "Ensemble_" + "_".join(base_models) + "_" + ensemble_type + "_" + combiner_model @@ -94,8 +94,8 @@ temperature_column="temperature_2m", relative_humidity_column="relative_humidity_2m", energy_price_column="EPEX_NL", - forecast_combiner_sample_weight_exponent=1, - forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 1, "xgboost": 0, "lgbm_linear": 0}, + forecast_combiner_sample_weight_exponent=0, + forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 0, "xgboost": 0, "lgbm_linear": 0}, ) @@ -143,7 +143,7 @@ def _create_workflow() -> CustomForecastingWorkflow: config=backtest_config, workflow_factory=_create_workflow, debug=False, - contributions=True, + contributions=False, cache_dir=OUTPUT_PATH / "cache" / f"{context.run_name}_{target.name}", ) diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 5f8824336..ce3c7df8f 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -37,8 +37,6 @@ logger = logging.getLogger(__name__) -logger = logging.getLogger(__name__) - class EnsembleForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]): """Complete forecasting pipeline combining preprocessing, prediction, and postprocessing. @@ -195,16 +193,6 @@ def fit( Returns: FitResult containing training details and metrics. """ - score_data = data.copy_with(data=data.data) - # Fit the feature engineering transforms - self.common_preprocessing.fit(data=data) - data = self.common_preprocessing.transform(data=data) - - if data_val is not None: - data_val = self.common_preprocessing.transform(data=data_val) - if data_test is not None: - data_test = self.common_preprocessing.transform(data=data_test) - # Fit forecasters ensemble_predictions = self._fit_forecasters( data=data, @@ -212,62 +200,21 @@ def fit( data_test=data_test, ) - if data_val is not None: - ensemble_predictions_val = self._predict_forecasters( - data=self.prepare_input(data=data_val), - ) - else: - ensemble_predictions_val = None - - features = self._fit_transform_combiner_data(data=data) - - self.combiner.fit( - data=ensemble_predictions, - data_val=ensemble_predictions_val, - additional_features=features, + self._fit_combiner( + ensemble_dataset=ensemble_predictions, + original_data=data, ) - # Prepare input datasets for metrics calculation - metrics_train = self._predict_combiner_and_score( - ensemble_dataset=ensemble_predictions, additional_features=features - ) - if data_val is not None: - features_val = self._transform_combiner_data(data=data_val) - metrics_val = ( - self._predict_combiner_and_score( - ensemble_dataset=ensemble_predictions_val, additional_features=features_val - ) - if ensemble_predictions_val - else None - ) - else: - metrics_val = None - - if data_test is not None: - features_test = self._transform_combiner_data(data=data_test) - ensemble_predictions_test = self._predict_forecasters( - data=self.prepare_input(data=data_test), - ) - metrics_test = ( - self._predict_combiner_and_score( - ensemble_dataset=ensemble_predictions_test, additional_features=features_test - ) - if ensemble_predictions_test - else None - ) - else: - metrics_test = None - metrics_full = self.score(data=score_data) - + metrics = self.score(data=data) return ModelFitResult( input_dataset=data, input_data_train=ForecastInputDataset.from_timeseries(data), input_data_val=ForecastInputDataset.from_timeseries(data_val) if data_val else None, input_data_test=ForecastInputDataset.from_timeseries(data_test) if data_test else None, - metrics_train=metrics_train, - metrics_val=metrics_val, - metrics_test=metrics_test, - metrics_full=metrics_full, + metrics_train=metrics, + metrics_val=None, + metrics_test=None, + metrics_full=metrics, ) def _transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputDataset | None: @@ -279,9 +226,8 @@ def _transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputData def _fit_transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputDataset | None: if len(self.combiner_preprocessing.transforms) == 0: return None - combiner_data = self.prepare_input(data=data) - self.combiner_preprocessing.fit(combiner_data) - combiner_data = self.combiner_preprocessing.transform(combiner_data) + self.combiner_preprocessing.fit(data=data) + combiner_data = self.combiner_preprocessing.transform(data) return ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) def _fit_forecasters( @@ -292,71 +238,104 @@ def _fit_forecasters( ) -> EnsembleForecastDataset: predictions: dict[str, ForecastDataset] = {} - if data_test is not None: logger.info("Data test provided during fit, but will be ignored for MetaForecating") - for name, forecaster in self.forecasters.items(): - validate_horizons_present(data, forecaster.config.horizons) - # Apply model-specific preprocessing if available - - if name in self.model_specific_preprocessing: - self.model_specific_preprocessing[name].fit(data=data) - data = self.model_specific_preprocessing[name].transform(data=data) - data_val = self.model_specific_preprocessing[name].transform(data=data_val) if data_val else None - - input_data_train = self.prepare_input(data=data, forecast_start=data.index[0]) - if data_val is not None: - input_data_val = self.prepare_input(data=data_val, forecast_start=data_val.index[0]) - else: - input_data_val = None - - # Drop target column nan's from training data. One can not train on missing targets. - target_dropna = partial(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownMemberType] - input_data_train = input_data_train.pipe_pandas(target_dropna) - input_data_val = input_data_val.pipe_pandas(target_dropna) if input_data_val else None - - # Fit the model - forecaster.fit(data=input_data_train, data_val=input_data_val) - predictions_raw = self.forecasters[name].predict(data=input_data_train) - - predictions[name] = self.postprocessing.transform(data=predictions_raw) + # Fit the feature engineering transforms + self.common_preprocessing.fit(data=data) + data_transformed = self.common_preprocessing.transform(data=data) + [ + self.model_specific_preprocessing[name].fit(data=data_transformed) + for name in self.model_specific_preprocessing + ] + logger.debug("Completed fitting preprocessing pipelines.") + + # Fit the forecasters + for name in self.forecasters: + logger.debug("Started fitting Forecaster '%s'.", name) + predictions[name] = self._fit_forecaster( + data=data, + data_val=data_val, + data_test=None, + forecaster_name=name, + ) return EnsembleForecastDataset.from_forecast_datasets(predictions, target_series=data.data[self.target_column]) - def _predict_forecasters( - self, data: TimeSeriesDataset, forecast_start: datetime | None = None - ) -> EnsembleForecastDataset: - """Generate predictions from base learners. + def _fit_forecaster( + self, + data: TimeSeriesDataset, + data_val: TimeSeriesDataset | None = None, + data_test: TimeSeriesDataset | None = None, + forecaster_name: str = "", + ) -> ForecastDataset: + """Train the forecaster on the provided dataset. Args: - data: Input data for prediction. - forecast_start: Optional start time for forecasts. + data: Historical time series data with features and target values. + data_val: Optional validation data. + data_test: Optional test data. + forecaster_name: Name of the forecaster to train. Returns: - DataFrame containing base learner predictions. + ForecastDataset containing the trained forecaster's predictions. """ - data_common = self.common_preprocessing.transform(data=data) - - base_predictions: dict[str, ForecastDataset] = {} - for name, forecaster in self.forecasters.items(): - forecaster_data = ( - self.model_specific_preprocessing[name].transform(data=data_common) - if name in self.model_specific_preprocessing - else data_common + forecaster = self.forecasters[forecaster_name] + validate_horizons_present(data, forecaster.config.horizons) + + # Transform and split input data + input_data_train = self.prepare_input(data=data, forecaster_name=forecaster_name) + input_data_val = self.prepare_input(data=data_val, forecaster_name=forecaster_name) if data_val else None + input_data_test = self.prepare_input(data=data_test, forecaster_name=forecaster_name) if data_test else None + + # Drop target column nan's from training data. One can not train on missing targets. + target_dropna = partial(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownMemberType] + input_data_train = input_data_train.pipe_pandas(target_dropna) + input_data_val = input_data_val.pipe_pandas(target_dropna) if input_data_val else None + input_data_test = input_data_test.pipe_pandas(target_dropna) if input_data_test else None + + # Transform the input data to a valid forecast input and split into train/val/test + input_data_train, input_data_val, input_data_test = self.data_splitter.split_dataset( + data=input_data_train, data_val=input_data_val, data_test=input_data_test, target_column=self.target_column + ) + logger.debug("Started fitting forecaster '%s'.", forecaster_name) + # Fit the model + forecaster.fit(data=input_data_train, data_val=input_data_val) + prediction = self._predict_forecaster(input_data=input_data_train, forecaster_name=forecaster_name) + logger.debug("Completed fitting forecaster '%s'.", forecaster_name) + + return ForecastDataset( + data=prediction.data, + sample_interval=prediction.sample_interval, + forecast_start=prediction.forecast_start, + ) + + def _predict_forecaster(self, input_data: ForecastInputDataset, forecaster_name: str) -> ForecastDataset: + # Predict and restore target column + prediction_raw = self.forecasters[forecaster_name].predict(data=input_data) + prediction = self.postprocessing.transform(prediction_raw) + return restore_target(dataset=prediction, original_dataset=input_data, target_column=self.target_column) + + def _predict_forecasters( + self, + data: TimeSeriesDataset, + forecast_start: datetime | None = None, + ) -> EnsembleForecastDataset: + predictions: dict[str, ForecastDataset] = {} + for name in self.forecasters: + logger.debug("Generating predictions for forecaster '%s'.", name) + input_data = self.prepare_input(data=data, forecast_start=forecast_start, forecaster_name=name) + predictions[name] = self._predict_forecaster( + input_data=input_data, + forecaster_name=name, ) - forecaster_data = self.prepare_input(forecaster_data, forecast_start=forecast_start) - preds_raw = forecaster.predict(data=forecaster_data) - preds = self.postprocessing.transform(data=preds_raw) - base_predictions[name] = preds - return EnsembleForecastDataset.from_forecast_datasets( - base_predictions, target_series=data.data[self.target_column] - ) + return EnsembleForecastDataset.from_forecast_datasets(predictions, target_series=data.data[self.target_column]) def prepare_input( self, data: TimeSeriesDataset, + forecaster_name: str = "", forecast_start: datetime | None = None, ) -> ForecastInputDataset: """Prepare input data for forecastingfiltering. @@ -365,12 +344,20 @@ def prepare_input( data: Raw time series dataset to prepare for forecasting. forecast_start: Optional start time for forecasts. If provided and earlier than the cutoff time, overrides the cutoff for data filtering. + forecaster_name: Name of the forecaster for which to prepare input data. Returns: Processed forecast input dataset ready for model prediction. """ + logger.debug("Preparing input data for forecaster '%s'.", forecaster_name) input_data = restore_target(dataset=data, original_dataset=data, target_column=self.target_column) + # Transform the data + input_data = self.common_preprocessing.transform(data=input_data) + if forecaster_name in self.model_specific_preprocessing: + logger.debug("Applying model-specific preprocessing for forecaster '%s'.", forecaster_name) + input_data = self.model_specific_preprocessing[forecaster_name].transform(data=input_data) + # Cut away input history to avoid training on incomplete data input_data_start = cast("pd.Series[pd.Timestamp]", input_data.index).min().to_pydatetime() input_data_cutoff = input_data_start + self.cutoff_history @@ -389,12 +376,28 @@ def prepare_input( forecast_start=forecast_start, ) - def _predict_combiner_and_score( - self, ensemble_dataset: EnsembleForecastDataset, additional_features: ForecastInputDataset | None = None - ) -> SubsetMetric: - prediction = self.combiner.predict(ensemble_dataset, additional_features=additional_features) + def _predict_combiner( + self, ensemble_dataset: EnsembleForecastDataset, original_data: TimeSeriesDataset + ) -> ForecastDataset: + logger.debug("Predicting combiner.") + features = self._transform_combiner_data(data=original_data) + prediction_raw = self.combiner.predict(ensemble_dataset, additional_features=features) + prediction = self.postprocessing.transform(prediction_raw) + prediction.data[ensemble_dataset.target_column] = ensemble_dataset.target_series - return self._calculate_score(prediction=prediction) + return prediction + + def _fit_combiner(self, ensemble_dataset: EnsembleForecastDataset, original_data: TimeSeriesDataset) -> None: + logger.debug("Fitting combiner.") + features = self._fit_transform_combiner_data(data=original_data) + self.combiner.fit(ensemble_dataset, additional_features=features) + + def _predict_contributions_combiner( + self, ensemble_dataset: EnsembleForecastDataset, original_data: TimeSeriesDataset + ) -> pd.DataFrame: + + features = self._transform_combiner_data(data=original_data) + return self.combiner.predict_contributions(ensemble_dataset, additional_features=features) def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: """Generate forecasts for the provided dataset. @@ -411,17 +414,15 @@ def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = Non """ if not self.is_fitted: raise NotFittedError(self.__class__.__name__) + logger.debug("Generating predictions.") ensemble_predictions = self._predict_forecasters(data=data, forecast_start=forecast_start) - features = self._transform_combiner_data(data=data) - # Predict and restore target column - prediction = self.combiner.predict( - data=ensemble_predictions, - additional_features=features, + prediction = self._predict_combiner( + ensemble_dataset=ensemble_predictions, + original_data=data, ) - return restore_target(dataset=prediction, original_dataset=data, target_column=self.target_column) def predict_contributions(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> pd.DataFrame: @@ -442,11 +443,9 @@ def predict_contributions(self, data: TimeSeriesDataset, forecast_start: datetim ensemble_predictions = self._predict_forecasters(data=data, forecast_start=forecast_start) - features = self._transform_combiner_data(data=data) - - return self.combiner.predict_contributions( - data=ensemble_predictions, - additional_features=features, + return self._predict_contributions_combiner( + ensemble_dataset=ensemble_predictions, + original_data=data, ) def score( diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index b1df023f6..f0078d949 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -93,7 +93,6 @@ def fit( data: EnsembleForecastDataset, data_val: EnsembleForecastDataset | None = None, additional_features: ForecastInputDataset | None = None, - sample_weights: pd.Series | None = None, ) -> None: """Fit the final learner using base learner predictions. @@ -101,7 +100,6 @@ def fit( data: EnsembleForecastDataset data_val: Optional EnsembleForecastDataset for validation during fitting. Will be ignored additional_features: Optional ForecastInputDataset containing additional features for the final learner. - sample_weights: Optional series of sample weights for fitting. """ raise NotImplementedError("Subclasses must implement the fit method.") diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index c421e4c73..1ee3c6c7a 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -233,7 +233,6 @@ def fit( data: EnsembleForecastDataset, data_val: EnsembleForecastDataset | None = None, additional_features: ForecastInputDataset | None = None, - sample_weights: pd.Series | None = None, ) -> None: self._label_encoder.fit(data.forecaster_names) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py index eb3dabcd2..57c44be02 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py @@ -88,16 +88,12 @@ def fit( data: EnsembleForecastDataset, data_val: EnsembleForecastDataset | None = None, additional_features: ForecastInputDataset | None = None, - sample_weights: pd.Series | None = None, ) -> None: # No fitting needed for rule-based final learner # Check that additional features are provided if additional_features is None: raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") - if sample_weights is not None: - logger.warning("Sample weights are ignored in RulesLearner.fit method.") - def _predict_tree(self, data: pd.DataFrame, columns: pd.Index) -> pd.DataFrame: """Predict using the decision tree rules. diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index c4165197f..8324ca607 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -10,6 +10,7 @@ """ import logging +from functools import partial from typing import TYPE_CHECKING, cast, override import pandas as pd @@ -154,7 +155,6 @@ def fit( data: EnsembleForecastDataset, data_val: EnsembleForecastDataset | None = None, additional_features: ForecastInputDataset | None = None, - sample_weights: pd.Series | None = None, ) -> None: for i, q in enumerate(self.quantiles): @@ -167,6 +167,10 @@ def fit( else: input_data = data.select_quantile(quantile=q) + # Prepare input data by dropping rows with NaN target values + target_dropna = partial(pd.DataFrame.dropna, subset=[input_data.target_column]) # pyright: ignore[reportUnknownMemberType] + input_data = input_data.pipe_pandas(target_dropna) + self.models[i].fit(data=input_data, data_val=None) @override @@ -188,6 +192,12 @@ def predict( ) else: input_data = data.select_quantile(quantile=q) + + if isinstance(self.models[i], GBLinearForecaster): + feature_cols = [x for x in input_data.data.columns if x != data.target_column] + feature_dropna = partial(pd.DataFrame.dropna, subset=feature_cols) # pyright: ignore[reportUnknownMemberType] + input_data = input_data.pipe_pandas(feature_dropna) + p = self.models[i].predict(data=input_data).data predictions.append(p) diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 0a565793a..9505e9e7a 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -9,7 +9,7 @@ from collections.abc import Sequence from datetime import timedelta -from typing import TYPE_CHECKING, Literal +from typing import TYPE_CHECKING, Literal, cast from pydantic import Field @@ -294,11 +294,14 @@ def feature_adders(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesD def feature_standardizers(config: EnsembleWorkflowConfig) -> list[Transform[TimeSeriesDataset, TimeSeriesDataset]]: - return [ - Clipper(selection=Include(config.energy_price_column).combine(config.clip_features), mode="standard"), - Scaler(selection=Exclude(config.target_column), method="standard"), - EmptyFeatureRemover(), - ] + return cast( + list[Transform[TimeSeriesDataset, TimeSeriesDataset]], + [ + Clipper(selection=Include(config.energy_price_column).combine(config.clip_features), mode="standard"), + Scaler(selection=Exclude(config.target_column), method="standard"), + EmptyFeatureRemover(), + ], + ) def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastingWorkflow: # noqa: C901, PLR0912, PLR0915 @@ -454,7 +457,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin if config.combiner_model == "lgbm": combiner_hp = StackingCombiner.LGBMHyperParams() elif config.combiner_model == "gblinear": - combiner_hp = StackingCombiner.GBLinearHyperParams() + combiner_hp = StackingCombiner.GBLinearHyperParams(reg_alpha=0.0, reg_lambda=0.0) else: msg = f"Unsupported combiner model type for stacking: {config.combiner_model}" raise ValueError(msg) From 780e012e1b97ccf63d2d138ac946d10991fd91c6 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 15 Dec 2025 13:40:46 +0100 Subject: [PATCH 065/104] Added hard Forecast Selection Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 6 ++--- .../learned_weights_combiner.py | 27 +++++++++++++++++++ .../mlflow/mlflow_storage_callback.py | 6 +---- 3 files changed, 31 insertions(+), 8 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 23e2b4fcb..f5ca1c5a6 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -44,12 +44,12 @@ OUTPUT_PATH = Path("./benchmark_results") -N_PROCESSES = 1 if True else multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark +N_PROCESSES = 11 if True else multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark -ensemble_type = "stacking" # "stacking", "learned_weights" or "rules" +ensemble_type = "learned_weights" # "stacking", "learned_weights" or "rules" base_models = ["lgbm", "gblinear"] # combination of "lgbm", "gblinear", "xgboost" and "lgbm_linear" combiner_model = ( - "gblinear" # "lgbm", "xgboost", "rf" or "logistic" for learned weights combiner, gblinear for stacking combiner + "lgbm" # "lgbm", "xgboost", "rf" or "logistic" for learned weights combiner, gblinear for stacking combiner ) model = "Ensemble_" + "_".join(base_models) + "_" + ensemble_type + "_" + combiner_model diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 1ee3c6c7a..6d6025e30 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -66,6 +66,16 @@ class LGBMCombinerHyperParams(HyperParams, ClassifierParamsMixin): description="Number of leaves for the LGBM Classifier. Defaults to 31.", ) + reg_alpha: float = Field( + default=0.0, + description="L1 regularization term on weights. Defaults to 0.0.", + ) + + reg_lambda: float = Field( + default=0.0, + description="L2 regularization term on weights. Defaults to 0.0.", + ) + @override def get_classifier(self) -> LGBMClassifier: """Returns the LGBM Classifier.""" @@ -73,6 +83,8 @@ def get_classifier(self) -> LGBMClassifier: class_weight="balanced", n_estimators=self.n_estimators, num_leaves=self.n_leaves, + reg_alpha=self.reg_alpha, + reg_lambda=self.reg_lambda, n_jobs=1, ) @@ -190,6 +202,15 @@ class WeightsCombinerConfig(ForecastCombinerConfig): min_length=1, ) + hard_selection: bool = Field( + default=False, + description=( + "If True, the combiner will select the base model with the highest predicted probability " + "for each instance (hard selection). If False, it will use the predicted probabilities as " + "weights to combine base model predictions (soft selection)." + ), + ) + @property def get_classifier(self) -> Classifier: """Returns the classifier instance from hyperparameters. @@ -223,6 +244,7 @@ def __init__(self, config: WeightsCombinerConfig) -> None: self._is_fitted: bool = False self._is_fitted = False self._label_encoder = LabelEncoder() + self.hard_selection = config.hard_selection # Initialize a classifier per quantile self.models: list[Classifier] = [config.get_classifier for _ in self.quantiles] @@ -310,6 +332,11 @@ def _generate_predictions_quantile( weights = self._predict_model_weights_quantile(base_predictions=input_data, model_index=model_index) + if self.hard_selection: + # If selection mode is hard, set the max weight to 1 and others to 0 + # Edge case if max weights are equal, distribute equally + weights = (weights == weights.max(axis=1).to_frame().to_numpy()) / weights.sum(axis=1).to_frame().to_numpy() + return dataset.input_data().mul(weights).sum(axis=1) @override diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index fcaa4ed46..57673d270 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -120,11 +120,7 @@ def on_fit_end( self._logger.info("Stored training data at %s for run %s", data_path, run_id) # Store feature importance plot if enabled - if ( - self.store_feature_importance_plot - and isinstance(context.workflow.model, ForecastingModel) - and isinstance(context.workflow.model.forecaster, ExplainableForecaster) - ): + if self.store_feature_importance_plot and isinstance(context.workflow.model.forecaster, ExplainableForecaster): fig = context.workflow.model.forecaster.plot_feature_importances() fig.write_html(data_path / "feature_importances.html") # pyright: ignore[reportUnknownMemberType] From 682ae2fef555789c2875aab42be78c308af06eb8 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Mon, 15 Dec 2025 17:27:23 +0100 Subject: [PATCH 066/104] Improved data handling in EnsembleForecasting model, correct data splitting and Model Fit Result. Validation and test data can now be fully used Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 2 +- .../models/ensemble_forecasting_model.py | 279 +++++++++++++++--- .../src/openstef_meta/utils/datasets.py | 24 ++ .../workflows/custom_forecasting_workflow.py | 8 +- 4 files changed, 273 insertions(+), 40 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index f5ca1c5a6..6cfe6f901 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -44,7 +44,7 @@ OUTPUT_PATH = Path("./benchmark_results") -N_PROCESSES = 11 if True else multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark +N_PROCESSES = 1 if True else multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark ensemble_type = "learned_weights" # "stacking", "learned_weights" or "rules" base_models = ["lgbm", "gblinear"] # combination of "lgbm", "gblinear", "xgboost" and "lgbm_linear" diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index ce3c7df8f..9140c4443 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -38,6 +38,56 @@ logger = logging.getLogger(__name__) +class EnsembleModelFitResult(BaseModel): + forecaster_fit_results: dict[str, ModelFitResult] = Field(description="ModelFitResult for each base Forecaster") + + combiner_fit_result: ModelFitResult = Field(description="ModelFitResult for the ForecastCombiner") + + # Make compatible with ModelFitResult interface + @property + def input_dataset(self) -> EnsembleForecastDataset: + """Returns the input dataset used for fitting the combiner.""" + return cast( + "EnsembleForecastDataset", + self.combiner_fit_result.input_dataset, + ) + + @property + def input_data_train(self) -> ForecastInputDataset: + """Returns the training input data used for fitting the combiner.""" + return self.combiner_fit_result.input_data_train + + @property + def input_data_val(self) -> ForecastInputDataset | None: + """Returns the validation input data used for fitting the combiner.""" + return self.combiner_fit_result.input_data_val + + @property + def input_data_test(self) -> ForecastInputDataset | None: + """Returns the test input data used for fitting the combiner.""" + return self.combiner_fit_result.input_data_test + + @property + def metrics_train(self) -> SubsetMetric: + """Returns the full metrics calculated during combiner fitting.""" + return self.combiner_fit_result.metrics_train + + @property + def metrics_val(self) -> SubsetMetric | None: + """Returns the full metrics calculated during combiner fitting.""" + return self.combiner_fit_result.metrics_val + + @property + def metrics_test(self) -> SubsetMetric | None: + """Returns the full metrics calculated during combiner fitting.""" + return self.combiner_fit_result.metrics_test + + @property + def metrics_full(self) -> SubsetMetric: + """Returns the full metrics calculated during combiner fitting.""" + return self.combiner_fit_result.metrics_full + + class EnsembleForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]): """Complete forecasting pipeline combining preprocessing, prediction, and postprocessing. @@ -174,7 +224,7 @@ def fit( data: TimeSeriesDataset, data_val: TimeSeriesDataset | None = None, data_test: TimeSeriesDataset | None = None, - ) -> ModelFitResult: + ) -> EnsembleModelFitResult: """Train the forecasting model on the provided dataset. Fits the preprocessing pipeline and underlying forecaster. Handles both @@ -194,27 +244,49 @@ def fit( FitResult containing training details and metrics. """ # Fit forecasters - ensemble_predictions = self._fit_forecasters( + train_ensemble, val_ensemble, test_ensemble, forecaster_fit_results = self._fit_forecasters( data=data, data_val=data_val, data_test=data_test, ) - self._fit_combiner( - ensemble_dataset=ensemble_predictions, - original_data=data, + combiner_fit_result = self._fit_combiner( + train_ensemble_dataset=train_ensemble, + val_ensemble_dataset=val_ensemble, + test_ensemble_dataset=test_ensemble, + data=data, + data_val=data_val, + data_test=data_test, ) - metrics = self.score(data=data) - return ModelFitResult( - input_dataset=data, - input_data_train=ForecastInputDataset.from_timeseries(data), - input_data_val=ForecastInputDataset.from_timeseries(data_val) if data_val else None, - input_data_test=ForecastInputDataset.from_timeseries(data_test) if data_test else None, - metrics_train=metrics, - metrics_val=None, - metrics_test=None, - metrics_full=metrics, + return EnsembleModelFitResult( + forecaster_fit_results=forecaster_fit_results, + combiner_fit_result=combiner_fit_result, + ) + + @staticmethod + def _combine_datasets( + data: ForecastInputDataset, additional_features: ForecastInputDataset + ) -> ForecastInputDataset: + """Combine base learner predictions with additional features for final learner input. + + Args: + data: ForecastInputDataset containing base learner predictions. + additional_features: ForecastInputDataset containing additional features. + + Returns: + ForecastInputDataset with combined features. + """ + additional_df = additional_features.data.loc[ + :, [col for col in additional_features.data.columns if col not in data.data.columns] + ] + # Merge on index to combine datasets + combined_df = data.data.join(additional_df) + + return ForecastInputDataset( + data=combined_df, + sample_interval=data.sample_interval, + forecast_start=data.forecast_start, ) def _transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputDataset | None: @@ -223,21 +295,57 @@ def _transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputData combiner_data = self.combiner_preprocessing.transform(data) return ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) - def _fit_transform_combiner_data(self, data: TimeSeriesDataset) -> ForecastInputDataset | None: + def _fit_prepare_combiner_data( + self, + data: TimeSeriesDataset, + data_val: TimeSeriesDataset | None = None, + data_test: TimeSeriesDataset | None = None, + ) -> tuple[ForecastInputDataset | None, ForecastInputDataset | None, ForecastInputDataset | None]: + if len(self.combiner_preprocessing.transforms) == 0: - return None + return None, None, None self.combiner_preprocessing.fit(data=data) - combiner_data = self.combiner_preprocessing.transform(data) - return ForecastInputDataset.from_timeseries(combiner_data, target_column=self.target_column) + + input_data_train = self.combiner_preprocessing.transform(data) + input_data_val = self.combiner_preprocessing.transform(data_val) if data_val else None + input_data_test = self.combiner_preprocessing.transform(data_test) if data_test else None + + input_data_train, input_data_val, input_data_test = self.data_splitter.split_dataset( + data=input_data_train, data_val=input_data_val, data_test=input_data_test, target_column=self.target_column + ) + combiner_data = ForecastInputDataset.from_timeseries(input_data_train, target_column=self.target_column) + + combiner_data_val = ( + ForecastInputDataset.from_timeseries(input_data_val, target_column=self.target_column) + if input_data_val + else None + ) + + combiner_data_test = ( + ForecastInputDataset.from_timeseries(input_data_test, target_column=self.target_column) + if input_data_test + else None + ) + + return combiner_data, combiner_data_val, combiner_data_test def _fit_forecasters( self, data: TimeSeriesDataset, data_val: TimeSeriesDataset | None = None, data_test: TimeSeriesDataset | None = None, - ) -> EnsembleForecastDataset: + ) -> tuple[ + EnsembleForecastDataset, + EnsembleForecastDataset | None, + EnsembleForecastDataset | None, + dict[str, ModelFitResult], + ]: + + predictions_train: dict[str, ForecastDataset] = {} + predictions_val: dict[str, ForecastDataset | None] = {} + predictions_test: dict[str, ForecastDataset | None] = {} + results: dict[str, ModelFitResult] = {} - predictions: dict[str, ForecastDataset] = {} if data_test is not None: logger.info("Data test provided during fit, but will be ignored for MetaForecating") @@ -253,14 +361,36 @@ def _fit_forecasters( # Fit the forecasters for name in self.forecasters: logger.debug("Started fitting Forecaster '%s'.", name) - predictions[name] = self._fit_forecaster( - data=data, - data_val=data_val, - data_test=None, - forecaster_name=name, + predictions_train[name], predictions_val[name], predictions_test[name], results[name] = ( + self._fit_forecaster( + data=data, + data_val=data_val, + data_test=None, + forecaster_name=name, + ) ) - return EnsembleForecastDataset.from_forecast_datasets(predictions, target_series=data.data[self.target_column]) + train_ensemble = EnsembleForecastDataset.from_forecast_datasets( + predictions_train, target_series=data.data[self.target_column] + ) + + if all(isinstance(v, ForecastDataset) for v in predictions_val.values()): + val_ensemble = EnsembleForecastDataset.from_forecast_datasets( + {k: v for k, v in predictions_val.items() if v is not None}, + target_series=data.data[self.target_column], + ) + else: + val_ensemble = None + + if all(isinstance(v, ForecastDataset) for v in predictions_test.values()): + test_ensemble = EnsembleForecastDataset.from_forecast_datasets( + {k: v for k, v in predictions_test.items() if v is not None}, + target_series=data.data[self.target_column], + ) + else: + test_ensemble = None + + return train_ensemble, val_ensemble, test_ensemble, results def _fit_forecaster( self, @@ -268,7 +398,12 @@ def _fit_forecaster( data_val: TimeSeriesDataset | None = None, data_test: TimeSeriesDataset | None = None, forecaster_name: str = "", - ) -> ForecastDataset: + ) -> tuple[ + ForecastDataset, + ForecastDataset | None, + ForecastDataset | None, + ModelFitResult, + ]: """Train the forecaster on the provided dataset. Args: @@ -298,18 +433,42 @@ def _fit_forecaster( input_data_train, input_data_val, input_data_test = self.data_splitter.split_dataset( data=input_data_train, data_val=input_data_val, data_test=input_data_test, target_column=self.target_column ) - logger.debug("Started fitting forecaster '%s'.", forecaster_name) + # Fit the model + logger.debug("Started fitting forecaster '%s'.", forecaster_name) forecaster.fit(data=input_data_train, data_val=input_data_val) - prediction = self._predict_forecaster(input_data=input_data_train, forecaster_name=forecaster_name) logger.debug("Completed fitting forecaster '%s'.", forecaster_name) - return ForecastDataset( - data=prediction.data, - sample_interval=prediction.sample_interval, - forecast_start=prediction.forecast_start, + prediction_train = self._predict_forecaster(input_data=input_data_train, forecaster_name=forecaster_name) + metrics_train = self._calculate_score(prediction=prediction_train) + + if input_data_val is not None: + prediction_val = self._predict_forecaster(input_data=input_data_val, forecaster_name=forecaster_name) + metrics_val = self._calculate_score(prediction=prediction_val) + else: + prediction_val = None + metrics_val = None + + if input_data_test is not None: + prediction_test = self._predict_forecaster(input_data=input_data_test, forecaster_name=forecaster_name) + metrics_test = self._calculate_score(prediction=prediction_test) + else: + prediction_test = None + metrics_test = None + + result = ModelFitResult( + input_dataset=input_data_train, + input_data_train=input_data_train, + input_data_val=input_data_val, + input_data_test=input_data_test, + metrics_train=metrics_train, + metrics_val=metrics_val, + metrics_test=metrics_test, + metrics_full=metrics_train, ) + return prediction_train, prediction_val, prediction_test, result + def _predict_forecaster(self, input_data: ForecastInputDataset, forecaster_name: str) -> ForecastDataset: # Predict and restore target column prediction_raw = self.forecasters[forecaster_name].predict(data=input_data) @@ -387,10 +546,56 @@ def _predict_combiner( prediction.data[ensemble_dataset.target_column] = ensemble_dataset.target_series return prediction - def _fit_combiner(self, ensemble_dataset: EnsembleForecastDataset, original_data: TimeSeriesDataset) -> None: + def _fit_combiner( + self, + data: TimeSeriesDataset, + train_ensemble_dataset: EnsembleForecastDataset, + data_val: TimeSeriesDataset | None = None, + data_test: TimeSeriesDataset | None = None, + val_ensemble_dataset: EnsembleForecastDataset | None = None, + test_ensemble_dataset: EnsembleForecastDataset | None = None, + ) -> ModelFitResult: + + features_train, features_val, features_test = self._fit_prepare_combiner_data( + data=data, data_val=data_val, data_test=data_test + ) + logger.debug("Fitting combiner.") - features = self._fit_transform_combiner_data(data=original_data) - self.combiner.fit(ensemble_dataset, additional_features=features) + self.combiner.fit( + data=train_ensemble_dataset, data_val=val_ensemble_dataset, additional_features=features_train + ) + + prediction_train = self.combiner.predict(train_ensemble_dataset, additional_features=features_train) + metrics_train = self._calculate_score(prediction=prediction_train) + + if val_ensemble_dataset is not None: + prediction_val = self.combiner.predict(val_ensemble_dataset, additional_features=features_val) + metrics_val = self._calculate_score(prediction=prediction_val) + else: + prediction_val = None + metrics_val = None + + if test_ensemble_dataset is not None: + prediction_test = self.combiner.predict(test_ensemble_dataset, additional_features=features_test) + metrics_test = self._calculate_score(prediction=prediction_test) + else: + prediction_test = None + metrics_test = None + + return ModelFitResult( + input_dataset=train_ensemble_dataset, + input_data_train=train_ensemble_dataset.select_quantile(quantile=self.config[0].quantiles[0]), + input_data_val=val_ensemble_dataset.select_quantile(quantile=self.config[0].quantiles[0]) + if val_ensemble_dataset + else None, + input_data_test=test_ensemble_dataset.select_quantile(quantile=self.config[0].quantiles[0]) + if test_ensemble_dataset + else None, + metrics_train=metrics_train, + metrics_val=metrics_val, + metrics_test=metrics_test, + metrics_full=metrics_train, + ) def _predict_contributions_combiner( self, ensemble_dataset: EnsembleForecastDataset, original_data: TimeSeriesDataset diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index cb6dbdad2..d6dfda8a7 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -256,3 +256,27 @@ def select_quantile(self, quantile: Quantile) -> ForecastInputDataset: target_column=self.target_column, forecast_start=self.forecast_start, ) + + def select_forecaster(self, forecaster_name: str) -> ForecastDataset: + """Select data for a specific base learner across all quantiles. + + Args: + forecaster_name: Name of the base learner to select. + + Returns: + ForecastDataset containing predictions from the specified base learner. + """ + selected_columns = [ + f"{forecaster_name}_{q.format()}" for q in self.quantiles if f"{forecaster_name}_{q.format()}" in self.data + ] + prediction_data = self.data[selected_columns].copy() + prediction_data.columns = [q.format() for q in self.quantiles] + + prediction_data[self.target_column] = self.data[self.target_column] + + return ForecastDataset( + data=prediction_data, + sample_interval=self.sample_interval, + forecast_start=self.forecast_start, + target_column=self.target_column, + ) diff --git a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py index 542d00448..8b0f6c1b6 100644 --- a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py @@ -18,7 +18,7 @@ from openstef_core.datasets import TimeSeriesDataset, VersionedTimeSeriesDataset from openstef_core.datasets.validated_datasets import ForecastDataset from openstef_core.exceptions import NotFittedError, SkipFitting -from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel, EnsembleModelFitResult from openstef_models.mixins import ModelIdentifier, PredictorCallback from openstef_models.mixins.callbacks import WorkflowContext from openstef_models.models.forecasting_model import ForecastingModel, ModelFitResult @@ -131,7 +131,7 @@ def fit( data: TimeSeriesDataset, data_val: TimeSeriesDataset | None = None, data_test: TimeSeriesDataset | None = None, - ) -> ModelFitResult | None: + ) -> ModelFitResult | EnsembleModelFitResult | None: """Train the forecasting model with callback execution. Executes the complete training workflow including pre-fit callbacks, @@ -154,6 +154,10 @@ def fit( result = self.model.fit(data=data, data_val=data_val, data_test=data_test) + if isinstance(result, EnsembleModelFitResult): + self._logger.info("Discarding EnsembleModelFitResult for compatibility.") + result = result.combiner_fit_result + for callback in self.callbacks: callback.on_fit_end(context=context, result=result) except SkipFitting as e: From 619c271871df7c2bb88a171ce308b36b2d91bda2 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 16 Dec 2025 11:22:32 +0100 Subject: [PATCH 067/104] Migrated Flagger and Selector to OpenSTEF Models transforms Signed-off-by: Lars van Someren --- .../models/forecast_combiners/forecast_combiner.py | 2 +- .../src/openstef_meta/presets/forecasting_workflow.py | 2 +- .../src/openstef_meta/transforms/__init__.py | 11 ----------- .../unit/models/test_ensemble_forecasting_model.py | 1 - .../openstef-meta/tests/unit/transforms/__init__.py | 0 .../openstef_models/transforms/general/__init__.py | 4 ++++ .../openstef_models/transforms/general/flagger.py} | 8 ++++---- .../openstef_models/transforms/general}/selector.py | 0 .../tests/unit/transforms/general/test_flagger.py} | 2 +- 9 files changed, 11 insertions(+), 19 deletions(-) delete mode 100644 packages/openstef-meta/src/openstef_meta/transforms/__init__.py delete mode 100644 packages/openstef-meta/tests/unit/transforms/__init__.py rename packages/{openstef-meta/src/openstef_meta/transforms/flag_features_bound.py => openstef-models/src/openstef_models/transforms/general/flagger.py} (92%) rename packages/{openstef-meta/src/openstef_meta/transforms => openstef-models/src/openstef_models/transforms/general}/selector.py (100%) rename packages/{openstef-meta/tests/unit/transforms/test_flag_features_bound.py => openstef-models/tests/unit/transforms/general/test_flagger.py} (97%) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index f0078d949..1e5eb29dd 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -18,7 +18,7 @@ from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.mixins import HyperParams, Predictor from openstef_core.types import LeadTime, Quantile -from openstef_meta.transforms.selector import Selector +from openstef_models.transforms.general.selector import Selector from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_models.utils.feature_selection import FeatureSelection diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 9505e9e7a..9b018a279 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -28,7 +28,7 @@ from openstef_meta.models.forecast_combiners.rules_combiner import RulesCombiner from openstef_meta.models.forecast_combiners.stacking_combiner import StackingCombiner from openstef_meta.models.forecasting.residual_forecaster import ResidualForecaster -from openstef_meta.transforms.selector import Selector +from openstef_models.transforms.general.selector import Selector from openstef_models.integrations.mlflow import MLFlowStorage from openstef_models.mixins.model_serializer import ModelIdentifier from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster diff --git a/packages/openstef-meta/src/openstef_meta/transforms/__init__.py b/packages/openstef-meta/src/openstef_meta/transforms/__init__.py deleted file mode 100644 index e551ace37..000000000 --- a/packages/openstef-meta/src/openstef_meta/transforms/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""Module for OpenSTEF Meta Transforms.""" - -from openstef_meta.transforms.flag_features_bound import Flagger - -__all__ = [ - "Flagger", -] diff --git a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py index 33b78cfc9..b106eca1f 100644 --- a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py @@ -76,7 +76,6 @@ def fit( data: EnsembleForecastDataset, data_val: EnsembleForecastDataset | None = None, additional_features: ForecastInputDataset | None = None, - sample_weights: pd.Series | None = None, ) -> None: self._is_fitted = True diff --git a/packages/openstef-meta/tests/unit/transforms/__init__.py b/packages/openstef-meta/tests/unit/transforms/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py index 79e59f58b..3f20c927e 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py @@ -13,17 +13,21 @@ from openstef_models.transforms.general.empty_feature_remover import ( EmptyFeatureRemover, ) +from openstef_models.transforms.general.flagger import Flagger from openstef_models.transforms.general.imputer import Imputer from openstef_models.transforms.general.nan_dropper import NaNDropper from openstef_models.transforms.general.sample_weighter import SampleWeighter from openstef_models.transforms.general.scaler import Scaler +from openstef_models.transforms.general.selector import Selector __all__ = [ "Clipper", "DimensionalityReducer", "EmptyFeatureRemover", + "Flagger", "Imputer", "NaNDropper", "SampleWeighter", "Scaler", + "Selector", ] diff --git a/packages/openstef-meta/src/openstef_meta/transforms/flag_features_bound.py b/packages/openstef-models/src/openstef_models/transforms/general/flagger.py similarity index 92% rename from packages/openstef-meta/src/openstef_meta/transforms/flag_features_bound.py rename to packages/openstef-models/src/openstef_models/transforms/general/flagger.py index 0d5fcd379..5c3675148 100644 --- a/packages/openstef-meta/src/openstef_meta/transforms/flag_features_bound.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/flagger.py @@ -2,11 +2,11 @@ # # SPDX-License-Identifier: MPL-2.0 -"""Transform for clipping feature values to observed ranges. +"""Transform for flagging feature values inside or outside observed training ranges. This module provides functionality to clip feature values to their observed -minimum and maximum ranges during training, preventing out-of-range values -during inference and improving model robustness. +minimum and maximum ranges during training. It is useful to flag data drift and +can be used to inform forecast combiners which models might perform better. """ from typing import override @@ -32,7 +32,7 @@ class Flagger(BaseConfig, TimeSeriesTransform): >>> import pandas as pd >>> from datetime import timedelta >>> from openstef_core.datasets import TimeSeriesDataset - >>> from openstef_meta.transforms import Flagger + >>> from openstef_models.transforms.general import Flagger >>> from openstef_models.utils.feature_selection import FeatureSelection >>> # Create sample training dataset >>> training_data = pd.DataFrame({ diff --git a/packages/openstef-meta/src/openstef_meta/transforms/selector.py b/packages/openstef-models/src/openstef_models/transforms/general/selector.py similarity index 100% rename from packages/openstef-meta/src/openstef_meta/transforms/selector.py rename to packages/openstef-models/src/openstef_models/transforms/general/selector.py diff --git a/packages/openstef-meta/tests/unit/transforms/test_flag_features_bound.py b/packages/openstef-models/tests/unit/transforms/general/test_flagger.py similarity index 97% rename from packages/openstef-meta/tests/unit/transforms/test_flag_features_bound.py rename to packages/openstef-models/tests/unit/transforms/general/test_flagger.py index dc0d3ea80..b250099f4 100644 --- a/packages/openstef-meta/tests/unit/transforms/test_flag_features_bound.py +++ b/packages/openstef-models/tests/unit/transforms/general/test_flagger.py @@ -8,7 +8,7 @@ import pytest from openstef_core.datasets import TimeSeriesDataset -from openstef_meta.transforms import Flagger +from openstef_models.transforms.general import Flagger from openstef_models.utils.feature_selection import FeatureSelection From 3b6587a0a1d2c93d64e85574a5b52c60bcd60c1f Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 16 Dec 2025 12:06:50 +0100 Subject: [PATCH 068/104] Fixed restore target Forecast Combiner Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 2 +- .../models/ensemble_forecasting_model.py | 31 +++++++++++-------- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 6cfe6f901..f82f6ff24 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -40,7 +40,7 @@ from openstef_models.presets.forecasting_workflow import LocationConfig from openstef_models.workflows import CustomForecastingWorkflow -logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s") +logging.basicConfig(level=logging.DEBUG, format="[%(asctime)s][%(levelname)s] %(message)s") OUTPUT_PATH = Path("./benchmark_results") diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 9140c4443..d216d159c 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -346,9 +346,6 @@ def _fit_forecasters( predictions_test: dict[str, ForecastDataset | None] = {} results: dict[str, ModelFitResult] = {} - if data_test is not None: - logger.info("Data test provided during fit, but will be ignored for MetaForecating") - # Fit the feature engineering transforms self.common_preprocessing.fit(data=data) data_transformed = self.common_preprocessing.transform(data=data) @@ -360,12 +357,12 @@ def _fit_forecasters( # Fit the forecasters for name in self.forecasters: - logger.debug("Started fitting Forecaster '%s'.", name) + logger.debug("Fitting Forecaster '%s'.", name) predictions_train[name], predictions_val[name], predictions_test[name], results[name] = ( self._fit_forecaster( data=data, data_val=data_val, - data_test=None, + data_test=data_test, forecaster_name=name, ) ) @@ -436,6 +433,7 @@ def _fit_forecaster( # Fit the model logger.debug("Started fitting forecaster '%s'.", forecaster_name) + logger.debug(input_data_train.data.head().iloc[:, :5]) forecaster.fit(data=input_data_train, data_val=input_data_val) logger.debug("Completed fitting forecaster '%s'.", forecaster_name) @@ -471,6 +469,8 @@ def _fit_forecaster( def _predict_forecaster(self, input_data: ForecastInputDataset, forecaster_name: str) -> ForecastDataset: # Predict and restore target column + logger.debug("Predicting forecaster '%s'.", forecaster_name) + logger.debug(input_data.data.head().iloc[:, :5]) prediction_raw = self.forecasters[forecaster_name].predict(data=input_data) prediction = self.postprocessing.transform(prediction_raw) return restore_target(dataset=prediction, original_dataset=input_data, target_column=self.target_column) @@ -535,16 +535,22 @@ def prepare_input( forecast_start=forecast_start, ) - def _predict_combiner( + def _predict_transform_combiner( self, ensemble_dataset: EnsembleForecastDataset, original_data: TimeSeriesDataset ) -> ForecastDataset: logger.debug("Predicting combiner.") features = self._transform_combiner_data(data=original_data) + + return self._predict_combiner(ensemble_dataset, features) + + def _predict_combiner( + self, ensemble_dataset: EnsembleForecastDataset, features: ForecastInputDataset | None + ) -> ForecastDataset: + logger.debug("Predicting combiner.") prediction_raw = self.combiner.predict(ensemble_dataset, additional_features=features) prediction = self.postprocessing.transform(prediction_raw) - prediction.data[ensemble_dataset.target_column] = ensemble_dataset.target_series - return prediction + return restore_target(dataset=prediction, original_dataset=ensemble_dataset, target_column=self.target_column) def _fit_combiner( self, @@ -565,18 +571,18 @@ def _fit_combiner( data=train_ensemble_dataset, data_val=val_ensemble_dataset, additional_features=features_train ) - prediction_train = self.combiner.predict(train_ensemble_dataset, additional_features=features_train) + prediction_train = self._predict_combiner(train_ensemble_dataset, features=features_train) metrics_train = self._calculate_score(prediction=prediction_train) if val_ensemble_dataset is not None: - prediction_val = self.combiner.predict(val_ensemble_dataset, additional_features=features_val) + prediction_val = self._predict_combiner(val_ensemble_dataset, features=features_val) metrics_val = self._calculate_score(prediction=prediction_val) else: prediction_val = None metrics_val = None if test_ensemble_dataset is not None: - prediction_test = self.combiner.predict(test_ensemble_dataset, additional_features=features_test) + prediction_test = self._predict_combiner(test_ensemble_dataset, features=features_test) metrics_test = self._calculate_score(prediction=prediction_test) else: prediction_test = None @@ -624,11 +630,10 @@ def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = Non ensemble_predictions = self._predict_forecasters(data=data, forecast_start=forecast_start) # Predict and restore target column - prediction = self._predict_combiner( + return self._predict_transform_combiner( ensemble_dataset=ensemble_predictions, original_data=data, ) - return restore_target(dataset=prediction, original_dataset=data, target_column=self.target_column) def predict_contributions(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> pd.DataFrame: """Generate forecasts for the provided dataset. From ede090879f182a72f271b67c4947a405440fead6 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 16 Dec 2025 12:10:27 +0100 Subject: [PATCH 069/104] Streamline logging statements, Fix quality Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 2 +- .../src/openstef_meta/models/ensemble_forecasting_model.py | 2 -- .../models/forecast_combiners/forecast_combiner.py | 2 +- .../src/openstef_meta/presets/forecasting_workflow.py | 2 +- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index f82f6ff24..6cfe6f901 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -40,7 +40,7 @@ from openstef_models.presets.forecasting_workflow import LocationConfig from openstef_models.workflows import CustomForecastingWorkflow -logging.basicConfig(level=logging.DEBUG, format="[%(asctime)s][%(levelname)s] %(message)s") +logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s") OUTPUT_PATH = Path("./benchmark_results") diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index d216d159c..b60c07a46 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -433,7 +433,6 @@ def _fit_forecaster( # Fit the model logger.debug("Started fitting forecaster '%s'.", forecaster_name) - logger.debug(input_data_train.data.head().iloc[:, :5]) forecaster.fit(data=input_data_train, data_val=input_data_val) logger.debug("Completed fitting forecaster '%s'.", forecaster_name) @@ -470,7 +469,6 @@ def _fit_forecaster( def _predict_forecaster(self, input_data: ForecastInputDataset, forecaster_name: str) -> ForecastDataset: # Predict and restore target column logger.debug("Predicting forecaster '%s'.", forecaster_name) - logger.debug(input_data.data.head().iloc[:, :5]) prediction_raw = self.forecasters[forecaster_name].predict(data=input_data) prediction = self.postprocessing.transform(prediction_raw) return restore_target(dataset=prediction, original_dataset=input_data, target_column=self.target_column) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index 1e5eb29dd..d64614067 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -18,8 +18,8 @@ from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.mixins import HyperParams, Predictor from openstef_core.types import LeadTime, Quantile -from openstef_models.transforms.general.selector import Selector from openstef_meta.utils.datasets import EnsembleForecastDataset +from openstef_models.transforms.general.selector import Selector from openstef_models.utils.feature_selection import FeatureSelection SELECTOR = Selector( diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 9b018a279..dbe34509b 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -28,7 +28,6 @@ from openstef_meta.models.forecast_combiners.rules_combiner import RulesCombiner from openstef_meta.models.forecast_combiners.stacking_combiner import StackingCombiner from openstef_meta.models.forecasting.residual_forecaster import ResidualForecaster -from openstef_models.transforms.general.selector import Selector from openstef_models.integrations.mlflow import MLFlowStorage from openstef_models.mixins.model_serializer import ModelIdentifier from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster @@ -40,6 +39,7 @@ from openstef_models.transforms.general import Clipper, EmptyFeatureRemover, SampleWeighter, Scaler from openstef_models.transforms.general.imputer import Imputer from openstef_models.transforms.general.nan_dropper import NaNDropper +from openstef_models.transforms.general.selector import Selector from openstef_models.transforms.postprocessing import QuantileSorter from openstef_models.transforms.time_domain import ( CyclicFeaturesAdder, From ab13581222680162e36dda5ffd514f2351d788a6 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 16 Dec 2025 14:09:48 +0100 Subject: [PATCH 070/104] Resolved comments, fixed bug Signed-off-by: Lars van Someren --- .../openstef4_backtest_forecaster.py | 7 +- .../src/openstef_meta/mixins/__init__.py | 5 - .../src/openstef_meta/mixins/contributions.py | 20 --- .../models/ensemble_forecasting_model.py | 9 +- .../forecast_combiners/forecast_combiner.py | 12 +- .../learned_weights_combiner.py | 19 ++- .../forecast_combiners/rules_combiner.py | 2 +- .../forecast_combiners/stacking_combiner.py | 14 +- .../models/forecasting/residual_forecaster.py | 11 +- .../presets/forecasting_workflow.py | 134 +++++++++--------- .../src/openstef_meta/utils/datasets.py | 26 ++-- .../test_ensemble_forecasting_model.py | 4 +- .../models/test_ensemble_forecasting_model.py | 2 +- .../openstef_models/explainability/mixins.py | 2 +- .../forecasting/flatliner_forecaster.py | 2 - .../models/forecasting/gblinear_forecaster.py | 2 +- .../models/forecasting/lgbm_forecaster.py | 2 +- .../forecasting/lgbmlinear_forecaster.py | 28 +--- .../models/forecasting/xgboost_forecaster.py | 2 +- .../presets/forecasting_workflow.py | 6 +- .../utils/multi_quantile_regressor.py | 8 +- .../workflows/custom_forecasting_workflow.py | 2 +- 22 files changed, 133 insertions(+), 186 deletions(-) delete mode 100644 packages/openstef-meta/src/openstef_meta/mixins/__init__.py delete mode 100644 packages/openstef-meta/src/openstef_meta/mixins/contributions.py diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py index 874276089..3c39c8846 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py @@ -44,7 +44,7 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): ) contributions: bool = Field( default=False, - description="When True, saves intermediate input data for explainability", + description="When True, saves base Forecaster prediction contributions for ensemble models in cache_dir", ) _workflow: CustomForecastingWorkflow | None = PrivateAttr(default=None) @@ -54,9 +54,7 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): @override def model_post_init(self, context: Any) -> None: - if self.debug: - self.cache_dir.mkdir(parents=True, exist_ok=True) - if self.contributions: + if self.debug or self.contributions: self.cache_dir.mkdir(parents=True, exist_ok=True) @property @@ -68,7 +66,6 @@ def quantiles(self) -> list[Q]: # Extract quantiles from the workflow's model if isinstance(self._workflow.model, EnsembleForecastingModel): - # Assuming all ensemble members have the same quantiles name = self._workflow.model.forecaster_names[0] return self._workflow.model.forecasters[name].config.quantiles return self._workflow.model.forecaster.config.quantiles diff --git a/packages/openstef-meta/src/openstef_meta/mixins/__init__.py b/packages/openstef-meta/src/openstef_meta/mixins/__init__.py deleted file mode 100644 index 90a57a257..000000000 --- a/packages/openstef-meta/src/openstef_meta/mixins/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""Mixins for OpenSTEF-Meta package.""" diff --git a/packages/openstef-meta/src/openstef_meta/mixins/contributions.py b/packages/openstef-meta/src/openstef_meta/mixins/contributions.py deleted file mode 100644 index f00c185b3..000000000 --- a/packages/openstef-meta/src/openstef_meta/mixins/contributions.py +++ /dev/null @@ -1,20 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""ExplainableMetaForecaster Mixin.""" - -from abc import ABC, abstractmethod - -import pandas as pd - -from openstef_core.datasets import ForecastInputDataset - - -class ContributionsMixin(ABC): - """Mixin class for models that support contribution analysis.""" - - @abstractmethod - def predict_contributions(self, X: ForecastInputDataset) -> pd.DataFrame: - """Get feature contributions for the given input data X.""" - raise NotImplementedError("This method should be implemented by subclasses.") diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index b60c07a46..5c1d00bcd 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -268,10 +268,10 @@ def fit( def _combine_datasets( data: ForecastInputDataset, additional_features: ForecastInputDataset ) -> ForecastInputDataset: - """Combine base learner predictions with additional features for final learner input. + """Combine Forecaster learner predictions with additional features for ForecastCombiner input. Args: - data: ForecastInputDataset containing base learner predictions. + data: ForecastInputDataset containing base Forecaster predictions. additional_features: ForecastInputDataset containing additional features. Returns: @@ -507,13 +507,12 @@ def prepare_input( Processed forecast input dataset ready for model prediction. """ logger.debug("Preparing input data for forecaster '%s'.", forecaster_name) - input_data = restore_target(dataset=data, original_dataset=data, target_column=self.target_column) - # Transform the data - input_data = self.common_preprocessing.transform(data=input_data) + input_data = self.common_preprocessing.transform(data=data) if forecaster_name in self.model_specific_preprocessing: logger.debug("Applying model-specific preprocessing for forecaster '%s'.", forecaster_name) input_data = self.model_specific_preprocessing[forecaster_name].transform(data=input_data) + input_data = restore_target(dataset=input_data, original_dataset=data, target_column=self.target_column) # Cut away input history to avoid training on incomplete data input_data_start = cast("pd.Series[pd.Timestamp]", input_data.index).min().to_pydatetime() diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index d64614067..a8cd4864f 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -83,7 +83,7 @@ def with_horizon(self, horizon: LeadTime) -> Self: class ForecastCombiner(Predictor[EnsembleForecastDataset, ForecastDataset]): - """Combines base learner predictions for each quantile into final predictions.""" + """Combines base Forecaster predictions for each quantile into final predictions.""" config: ForecastCombinerConfig @@ -94,7 +94,7 @@ def fit( data_val: EnsembleForecastDataset | None = None, additional_features: ForecastInputDataset | None = None, ) -> None: - """Fit the final learner using base learner predictions. + """Fit the final learner using base Forecaster predictions. Args: data: EnsembleForecastDataset @@ -108,10 +108,10 @@ def predict( data: EnsembleForecastDataset, additional_features: ForecastInputDataset | None = None, ) -> ForecastDataset: - """Generate final predictions based on base learner predictions. + """Generate final predictions based on base Forecaster predictions. Args: - data: EnsembleForecastDataset containing base learner predictions. + data: EnsembleForecastDataset containing base Forecaster predictions. data_val: Optional EnsembleForecastDataset for validation during prediction. Will be ignored additional_features: Optional ForecastInputDataset containing additional features for the final learner. @@ -132,10 +132,10 @@ def predict_contributions( data: EnsembleForecastDataset, additional_features: ForecastInputDataset | None = None, ) -> pd.DataFrame: - """Generate final predictions based on base learner predictions. + """Generate final predictions based on base Forecaster predictions. Args: - data: EnsembleForecastDataset containing base learner predictions. + data: EnsembleForecastDataset containing base Forecaster predictions. data_val: Optional EnsembleForecastDataset for validation during prediction. Will be ignored additional_features: Optional ForecastInputDataset containing additional features for the final learner. diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 6d6025e30..d2b0fac48 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -1,12 +1,13 @@ # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 -"""Hybrid Forecaster (Stacked LightGBM + Linear Model Gradient Boosting). +"""Learned Weights Combiner. -Provides method that attempts to combine the advantages of a linear model (Extraplolation) -and tree-based model (Non-linear patterns). This is acieved by training two base learners, -followed by a small linear model that regresses on the baselearners' predictions. -The implementation is based on sklearn's StackingRegressor. +Forecast combiner that uses a classification approach to learn weights for base forecasters. +It is designed to efficiently combine predictions from multiple base forecasters by learning which +forecaster is likely to perform best under different conditions. The combiner can operate in two modes: +- Hard Selection: Selects the base forecaster with the highest predicted probability for each instance. +- Soft Selection: Uses the predicted probabilities as weights to combine base forecaster predictions. """ import logging @@ -228,7 +229,13 @@ def get_classifier(self) -> Classifier: class WeightsCombiner(ForecastCombiner): - """Combines base learner predictions with a classification approach to determine which base learner to use.""" + """Combines base Forecaster predictions with a classification approach. + + The classifier is used to predict model weights for each base forecaster. + Depending on the `hard_selection` parameter in the configuration, the combiner can either + select the base forecaster with the highest predicted probability (hard selection) or use + the predicted probabilities as weights to combine base forecaster predictions (soft selection). + """ Config = WeightsCombinerConfig LGBMHyperParams = LGBMCombinerHyperParams diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py index 57c44be02..93a12744f 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py @@ -67,7 +67,7 @@ def _validate_hyperparams(v: HyperParams) -> HyperParams: class RulesCombiner(ForecastCombiner): - """Combines base learner predictions per quantile into final predictions using a regression approach.""" + """Combines base Forecaster predictions per quantile into final predictions using hard-coded rules.""" Config = RulesCombinerConfig diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index 8324ca607..d59811453 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -1,12 +1,10 @@ # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 -"""Hybrid Forecaster (Stacked LightGBM + Linear Model Gradient Boosting). +"""Stacking Forecast Combiner. -Provides method that attempts to combine the advantages of a linear model (Extraplolation) -and tree-based model (Non-linear patterns). This is acieved by training two base learners, -followed by a small linear model that regresses on the baselearners' predictions. -The implementation is based on sklearn's StackingRegressor. +This module implements a Stacking Combiner that integrates predictions from multiple base Forecasters. +It uses a regression approach to combine the predictions for each quantile into final forecasts. """ import logging @@ -88,7 +86,7 @@ def validate_forecaster( class StackingCombiner(ForecastCombiner): - """Combines base learner predictions per quantile into final predictions using a regression approach.""" + """Combines base Forecaster predictions per quantile into final predictions using a regression approach.""" Config = StackingCombinerConfig LGBMHyperParams = LGBMHyperParams @@ -128,10 +126,10 @@ def __init__( def _combine_datasets( data: ForecastInputDataset, additional_features: ForecastInputDataset ) -> ForecastInputDataset: - """Combine base learner predictions with additional features for final learner input. + """Combine base Forecaster predictions with additional features for final learner input. Args: - data: ForecastInputDataset containing base learner predictions. + data: ForecastInputDataset containing base Forecaster predictions. additional_features: ForecastInputDataset containing additional features. Returns: diff --git a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py index 79cc44cd5..de44e003c 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py @@ -1,12 +1,11 @@ # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 -"""Hybrid Forecaster (Stacked LightGBM + Linear Model Gradient Boosting). +"""Residual Forecaster. Provides method that attempts to combine the advantages of a linear model (Extraplolation) -and tree-based model (Non-linear patterns). This is acieved by training two base learners, -followed by a small linear model that regresses on the baselearners' predictions. -The implementation is based on sklearn's ResidualRegressor. +and tree-based model (Non-linear patterns). This is achieved by training a primary model, +typically linear, followed by a secondary model that learns to predict the residuals (errors) of the primary model. """ import logging @@ -132,10 +131,10 @@ def _init_secondary_model(self, hyperparams: ResidualBaseForecasterHyperParams) def _init_base_learners( config: ForecasterConfig, base_hyperparams: list[ResidualBaseForecasterHyperParams] ) -> list[ResidualBaseForecaster]: - """Initialize base learners based on provided hyperparameters. + """Initialize base Forecaster based on provided hyperparameters. Returns: - list[Forecaster]: List of initialized base learner forecasters. + list[Forecaster]: List of initialized base Forecaster forecasters. """ base_learners: list[ResidualBaseForecaster] = [] horizons = config.horizons diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index dbe34509b..52568b3a1 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -355,48 +355,28 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin weight_floor=config.sample_weight_floor, weight_scale_percentile=config.sample_weight_scale_percentile, ), + # Remove lags Selector( selection=FeatureSelection( - exclude={ # Fix hardcoded lag features should be replaced by a LagsAdder classmethod - "load_lag_P14D", - "load_lag_P13D", - "load_lag_P12D", - "load_lag_P11D", - "load_lag_P10D", - "load_lag_P9D", - "load_lag_P8D", - # "load_lag_P7D", # Keep 7D lag for weekly seasonality - "load_lag_P6D", - "load_lag_P5D", - "load_lag_P4D", - "load_lag_P3D", - "load_lag_P2D", - } + exclude=set( + LagsAdder( + history_available=config.predict_history, + horizons=config.horizons, + add_trivial_lags=True, + target_column=config.target_column, + ).features_added() + ).difference({"load_lag_P7D"}) ) ), - Selector( # Fix hardcoded holiday features should be replaced by a HolidayFeatureAdder classmethod + # Remove holiday features to avoid linear dependencies + Selector( selection=FeatureSelection( - exclude={ - "is_ascension_day", - "is_christmas_day", - "is_easter_monday", - "is_easter_sunday", - "is_good_friday", - "is_holiday", - "is_king_s_day", - "is_liberation_day", - "is_new_year_s_day", - "is_second_day_of_christmas", - "is_sunday", - "is_week_day", - "is_weekend_day", - "is_whit_monday", - "is_whit_sunday", - "month_of_year", - "quarter_of_year", - } + exclude=set(HolidayFeatureAdder(country_code=config.location.country_code).features_added()) ) ), + Selector( + selection=FeatureSelection(exclude=set(DatetimeFeaturesAdder(onehot_encode=False).features_added())) + ), Imputer( selection=Exclude(config.target_column), imputation_strategy="mean", @@ -435,46 +415,64 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin raise ValueError(msg) # Build combiner - if config.ensemble_type == "learned_weights": - if config.combiner_model == "lgbm": + # Case: Ensemble type, combiner model + match (config.ensemble_type, config.combiner_model): + case ("learned_weights", "lgbm"): combiner_hp = WeightsCombiner.LGBMHyperParams() - elif config.combiner_model == "rf": + combiner_config = WeightsCombiner.Config( + hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles + ) + combiner = WeightsCombiner( + config=combiner_config, + ) + case ("learned_weights", "rf"): combiner_hp = WeightsCombiner.RFHyperParams() - elif config.combiner_model == "xgboost": + combiner_config = WeightsCombiner.Config( + hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles + ) + combiner = WeightsCombiner( + config=combiner_config, + ) + case ("learned_weights", "xgboost"): combiner_hp = WeightsCombiner.XGBHyperParams() - elif config.combiner_model == "logistic": + combiner_config = WeightsCombiner.Config( + hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles + ) + combiner = WeightsCombiner( + config=combiner_config, + ) + case ("learned_weights", "logistic"): combiner_hp = WeightsCombiner.LogisticHyperParams() - else: - msg = f"Unsupported combiner model type: {config.combiner_model}" - raise ValueError(msg) - combiner_config = WeightsCombiner.Config( - hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles - ) - combiner = WeightsCombiner( - config=combiner_config, - ) - elif config.ensemble_type == "stacking": - if config.combiner_model == "lgbm": + combiner_config = WeightsCombiner.Config( + hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles + ) + combiner = WeightsCombiner( + config=combiner_config, + ) + case ("stacking", "lgbm"): combiner_hp = StackingCombiner.LGBMHyperParams() - elif config.combiner_model == "gblinear": + combiner_config = StackingCombiner.Config( + hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles + ) + combiner = StackingCombiner( + config=combiner_config, + ) + case ("stacking", "gblinear"): combiner_hp = StackingCombiner.GBLinearHyperParams(reg_alpha=0.0, reg_lambda=0.0) - else: - msg = f"Unsupported combiner model type for stacking: {config.combiner_model}" + combiner_config = StackingCombiner.Config( + hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles + ) + combiner = StackingCombiner( + config=combiner_config, + ) + case ("rules", _): + combiner_config = RulesCombiner.Config(horizons=config.horizons, quantiles=config.quantiles) + combiner = RulesCombiner( + config=combiner_config, + ) + case _: + msg = f"Unsupported ensemble and combiner combination: {config.ensemble_type}, {config.combiner_model}" raise ValueError(msg) - combiner_config = StackingCombiner.Config( - hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles - ) - combiner = StackingCombiner( - config=combiner_config, - ) - elif config.ensemble_type == "rules": - combiner_config = RulesCombiner.Config(horizons=config.horizons, quantiles=config.quantiles) - combiner = RulesCombiner( - config=combiner_config, - ) - else: - msg = f"Unsupported ensemble type: {config.ensemble_type}" - raise ValueError(msg) postprocessing = [QuantileSorter()] diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index d6dfda8a7..e85c05b09 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -93,7 +93,7 @@ def __init__( self.forecaster_names, self.quantiles = self.get_learner_and_quantile(pd.Index(quantile_feature_names)) n_cols = len(self.forecaster_names) * len(self.quantiles) if len(data.columns) not in {n_cols + 1, n_cols}: - raise ValueError("Data columns do not match the expected number based on base learners and quantiles.") + raise ValueError("Data columns do not match the expected number based on base Forecasters and quantiles.") @property def target_series(self) -> pd.Series | None: @@ -104,16 +104,16 @@ def target_series(self) -> pd.Series | None: @staticmethod def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[str], list[Quantile]]: - """Extract base learner names and quantiles from feature names. + """Extract base Forecaster names and quantiles from feature names. Args: feature_names: Index of feature names in the dataset. Returns: - Tuple containing a list of base learner names and a list of quantiles. + Tuple containing a list of base Forecaster names and a list of quantiles. Raises: - ValueError: If an invalid base learner name is found in a feature name. + ValueError: If an invalid base Forecaster name is found in a feature name. """ forecasters: set[str] = set() quantiles: set[Quantile] = set() @@ -132,13 +132,13 @@ def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[str], list[Q @staticmethod def get_quantile_feature_name(feature_name: str) -> tuple[str, Quantile]: - """Generate the feature name for a given base learner and quantile. + """Generate the feature name for a given base Forecaster and quantile. Args: - feature_name: Feature name string in the format "BaseLearner_Quantile". + feature_name: Feature name string in the format "model_Quantile". Returns: - Tuple containing the base learner name and Quantile object. + Tuple containing the base Forecaster name and Quantile object. """ learner_part, quantile_part = feature_name.split("_", maxsplit=1) return learner_part, Quantile.parse(quantile_part) @@ -192,10 +192,10 @@ def _prepare_classification(data: pd.DataFrame, target: pd.Series, quantile: Qua quantile: Quantile for which to prepare classification data. Returns: - Series with categorical indicators of best-performing base learners. + Series with categorical indicators of best-performing base Forecasters. """ - # Calculate pinball loss for each base learner + # Calculate pinball loss for each base Forecaster def column_pinball_losses(preds: pd.Series) -> pd.Series: return calculate_pinball_errors(y_true=target, y_pred=preds, quantile=quantile) @@ -210,7 +210,7 @@ def select_quantile_classification(self, quantile: Quantile) -> ForecastInputDat quantile: Quantile to select. Returns: - Series containing binary indicators of best-performing base learners for the specified quantile. + Series containing binary indicators of best-performing base Forecasters for the specified quantile. Raises: ValueError: If the target column is not found in the dataset. @@ -258,13 +258,13 @@ def select_quantile(self, quantile: Quantile) -> ForecastInputDataset: ) def select_forecaster(self, forecaster_name: str) -> ForecastDataset: - """Select data for a specific base learner across all quantiles. + """Select data for a specific base Forecaster across all quantiles. Args: - forecaster_name: Name of the base learner to select. + forecaster_name: Name of the base Forecaster to select. Returns: - ForecastDataset containing predictions from the specified base learner. + ForecastDataset containing predictions from the specified base Forecaster. """ selected_columns = [ f"{forecaster_name}_{q.format()}" for q in self.quantiles if f"{forecaster_name}_{q.format()}" in self.data diff --git a/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py index f3d156a13..23835d6e7 100644 --- a/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py @@ -82,7 +82,9 @@ def test_preprocessing( # Check all base models for name, model in base_models.items(): # Ensemble model - common_ensemble = ensemble_model.common_preprocessing.transform(data=sample_timeseries_dataset) + common_ensemble = ensemble_model.common_preprocessing.transform( + data=sample_timeseries_dataset.copy_with(sample_timeseries_dataset.data) + ) ensemble_model.model_specific_preprocessing[name].fit(data=common_ensemble) transformed_ensemble = ensemble_model.model_specific_preprocessing[name].transform(data=common_ensemble) # Base model diff --git a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py index b106eca1f..84f14cef7 100644 --- a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py @@ -64,7 +64,7 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: class SimpleCombiner(ForecastCombiner): - """Simple combiner that averages base learner predictions.""" + """Simple combiner that averages base Forecaster predictions.""" def __init__(self, config: ForecastCombinerConfig): self._config = config diff --git a/packages/openstef-models/src/openstef_models/explainability/mixins.py b/packages/openstef-models/src/openstef_models/explainability/mixins.py index b0fb6fab1..1e82fc413 100644 --- a/packages/openstef-models/src/openstef_models/explainability/mixins.py +++ b/packages/openstef-models/src/openstef_models/explainability/mixins.py @@ -54,7 +54,7 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> p scale: Whether to scale contributions to sum to the prediction value. Returns: - DataFrame with contributions per base learner. + DataFrame with contributions per feature. """ raise NotImplementedError diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py index 73f9d56b3..a0afa77b6 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py @@ -106,8 +106,6 @@ def feature_importances(self) -> pd.DataFrame: @override def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = True) -> pd.DataFrame: - if scale: - pass forecast_index = data.create_forecast_range(horizon=self.config.max_horizon) return pd.DataFrame( diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py index 9b230e060..60e7c0a73 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py @@ -334,7 +334,7 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = Tru scale: If True, scale contributions to sum to 1.0 per quantile. Returns: - DataFrame with contributions per base learner. + DataFrame with contributions per feature. """ # Get input features for prediction input_data: pd.DataFrame = data.input_data(start=data.forecast_start) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index ed3de0058..5868289d3 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -320,7 +320,7 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> p scale: If True, scale contributions to sum to 1.0 per quantile. Returns: - DataFrame with contributions per base learner. + DataFrame with contributions per feature. """ # Get input features for prediction input_data: pd.DataFrame = data.input_data(start=data.forecast_start) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py index b484c8a37..391bcceca 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -323,35 +323,9 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> p scale: If True, scale contributions to sum to 1.0 per quantile. Returns: - DataFrame with contributions per base learner. + DataFrame with contributions per feature. """ raise NotImplementedError("predict_contributions is not yet implemented for LGBMLinearForecaster") - # Get input features for prediction - input_data: pd.DataFrame = data.input_data(start=data.forecast_start) - - contributions: list[pd.DataFrame] = [] - - for i, quantile in enumerate(self.config.quantiles): - # Get model for specific quantile - model: LGBMRegressor = self._lgbmlinear_model.models[i] # type: ignore - - # Generate contributions NOT AVAILABLE FOR LGBM with linear_trees=true - contribs_quantile: np.ndarray[float] = model.predict(input_data, pred_contrib=True)[:, :-1] # type: ignore - - if scale: - # Scale contributions so that they sum to 1.0 per quantile - contribs_quantile = np.abs(contribs_quantile) / np.sum(np.abs(contribs_quantile), axis=1, keepdims=True) - - contributions.append( - pd.DataFrame( - data=contribs_quantile, - index=input_data.index, - columns=[f"{feature}_{quantile.format()}" for feature in input_data.columns], - ) - ) - - # Construct DataFrame - return pd.concat(contributions, axis=1) @property @override diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py index 1469309f6..495fbde6c 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py @@ -428,7 +428,7 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool) -> p scale: If True, scale contributions to sum to 1.0 per quantile. Returns: - DataFrame with contributions per base learner. + DataFrame with contributions per feature. """ # Get input features for prediction input_data: pd.DataFrame = data.input_data(start=data.forecast_start) diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 7d5769f0e..39a415f60 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -100,9 +100,9 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob model_id: ModelIdentifier = Field(description="Unique identifier for the forecasting model.") # Model configuration - model: Literal[ - "xgboost", "gblinear", "flatliner", "stacking", "residual", "learned_weights", "lgbm", "lgbmlinear" - ] = Field(description="Type of forecasting model to use.") # TODO(#652): Implement median forecaster + model: Literal["xgboost", "gblinear", "flatliner", "residual", "lgbm", "lgbmlinear"] = Field( + description="Type of forecasting model to use." + ) # TODO(#652): Implement median forecaster quantiles: list[Quantile] = Field( default=[Q(0.5)], description="List of quantiles to predict for probabilistic forecasting.", diff --git a/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py index 763932268..b95fbc28c 100644 --- a/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py +++ b/packages/openstef-models/src/openstef_models/utils/multi_quantile_regressor.py @@ -41,7 +41,7 @@ def __init__( base_learner: A scikit-learn compatible regressor class that supports quantile regression. quantile_param: The name of the parameter in base_learner that sets the quantile level. quantiles: List of quantiles to predict (e.g., [0.1, 0.5, 0.9]). - hyperparams: Dictionary of hyperparameters to pass to each base learner instance. + hyperparams: Dictionary of hyperparameters to pass to each estimator instance. """ self.quantiles = quantiles self.hyperparams = hyperparams @@ -56,7 +56,7 @@ def _init_model(self, q: float) -> BaseEstimator: base_learner = self.base_learner(**params) if self.quantile_param not in base_learner.get_params(): # type: ignore - msg = f"The base learner does not support the quantile parameter '{self.quantile_param}'." + msg = f"The base estimator does not support the quantile parameter '{self.quantile_param}'." raise ValueError(msg) return base_learner @@ -149,9 +149,9 @@ def models(self) -> list[BaseEstimator]: @property def has_feature_names(self) -> bool: - """Check if the base learners have feature names. + """Check if the base estimators have feature names. Returns: - True if the base learners have feature names, False otherwise. + True if the base estimators have feature names, False otherwise. """ return len(self.model_feature_names) > 0 diff --git a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py index 8b0f6c1b6..5aff3c938 100644 --- a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py @@ -155,7 +155,7 @@ def fit( result = self.model.fit(data=data, data_val=data_val, data_test=data_test) if isinstance(result, EnsembleModelFitResult): - self._logger.info("Discarding EnsembleModelFitResult for compatibility.") + self._logger.debug("Discarding EnsembleModelFitResult for compatibility.") result = result.combiner_fit_result for callback in self.callbacks: From b5a3737c487109d472e0473d999d17fb8e2cc215 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Tue, 16 Dec 2025 14:11:38 +0100 Subject: [PATCH 071/104] Moved example Signed-off-by: Lars van Someren --- .../examples => examples/benchmarks}/liander_2024_residual.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {packages/openstef-meta/src/openstef_meta/examples => examples/benchmarks}/liander_2024_residual.py (100%) diff --git a/packages/openstef-meta/src/openstef_meta/examples/liander_2024_residual.py b/examples/benchmarks/liander_2024_residual.py similarity index 100% rename from packages/openstef-meta/src/openstef_meta/examples/liander_2024_residual.py rename to examples/benchmarks/liander_2024_residual.py From abc67b7156c1a371f76790bd2c1e798c5fc80851 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Wed, 17 Dec 2025 15:54:31 +0100 Subject: [PATCH 072/104] Bring 4.1 up to date with release Squashed commit of the following: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 6d140bc374af3bef19923886be70e66d3b47ff9e Author: Lars Schilders <123180911+lschilders@users.noreply.github.com> Date: Wed Dec 17 10:33:19 2025 +0100 feature: add regex pattern matching in FeatureSelection and fix combine bug (#787) commit 32a42bba8a25e602a00531f20ed75cab576d6000 Author: Lars Schilders <123180911+lschilders@users.noreply.github.com> Date: Tue Dec 16 13:50:40 2025 +0100 feature: Selector transform (#786) * feature: add Selector transform * add ForecastInputDataset testcases * add selected_features to presets * add doctest commit d3977b19632b3ce4471f74d3f681a5d5166017a0 Author: Egor Dmitriev Date: Mon Dec 15 09:17:38 2025 +0100 feature: added tutorials for basic functionality. Added convenience method for simple openstef baselines. (#785) * feature: Added tutorial start. Signed-off-by: Egor Dmitriev * feature: Added example notebooks. First draft. Signed-off-by: Egor Dmitriev * chore(examples): add examples workspace project and register it in workspace; update lock * fix(lint): add missing docstrings in baselines package (D104, D103) * chore(examples): add examples workspace project and register it in workspace; update lock * chore(examples): Updated text in examples. --------- Signed-off-by: Egor Dmitriev commit 8a4097c6dffdc0dd3512ffb6a4560aa6fd167a53 Author: Bart Pleiter Date: Wed Dec 10 16:19:48 2025 +0100 fix: exclude stdev column from quantile column checking. (#783) * fix: exclude stdev column from quantile column checking. Signed-off-by: Bart Pleiter * fix: duplicate removed. Signed-off-by: Bart Pleiter * fix: type Signed-off-by: Bart Pleiter --------- Signed-off-by: Bart Pleiter commit 43987fc0048b6678eb97909e486fcd440ff5045f Author: Egor Dmitriev Date: Wed Dec 10 10:24:32 2025 +0100 fix(STEF-2549): Added none check for model end date from mlflow. Added experiment tags. (#782) Signed-off-by: Egor Dmitriev commit 18910098af96452500c401df45a8401a77f9928d Author: Lars Schilders <123180911+lschilders@users.noreply.github.com> Date: Tue Dec 9 14:40:40 2025 +0100 feature: check for model config change and skip model selection (#781) * feature: check for model config change and skip model selection * changed checking model compatibility * check for tag compatibility only * fix tests * rename new methods in callback commit c37ac928c99f1124113b5f30058931324a7c7433 Author: Lars Schilders <123180911+lschilders@users.noreply.github.com> Date: Tue Dec 9 09:22:58 2025 +0100 fix: clip values of wind and solar components to below 0 (#779) * fix: clip values of wind and solar components to below 0 * add test for not all components zero commit 3eb7e69e24e4a13600026ce285f4b39a19101c25 Author: Egor Dmitriev Date: Mon Dec 8 15:55:16 2025 +0100 feat(mlflow): suppress MLflow emoji URL logs (#780) * feat(mlflow): suppress MLflow emoji URL logs Add MLFLOW_SUPPRESS_PRINTING_URL_TO_STDOUT=true environment variable to prevent MLflow from printing 'View run...' messages with emojis that don't comply with ECS JSON logging format. Signed-off-by: Egor Dmitriev * feature: Style fixes. Signed-off-by: Egor Dmitriev --------- Signed-off-by: Egor Dmitriev commit eca628e7832c056059a5df0de476083204aa3c95 Author: Lars Schilders <123180911+lschilders@users.noreply.github.com> Date: Fri Dec 5 16:27:53 2025 +0100 feature: nonzero flatliner preset (#777) * add predict_nonzero_flatliner to presets * remove redundant validate_required_columns commit 61e16999e2bdf37c84c21a2a7eeaa691b7989863 Author: Lars Schilders <123180911+lschilders@users.noreply.github.com> Date: Fri Dec 5 15:37:05 2025 +0100 feature: add standard devation column to ForecastDataset and add it in ConfidenceIntervalApplicator (#778) * feature: add standard devation column to ForecastDataset and add it in ConfidenceIntervalApplicator * simplify code for adding column commit 4f70d00544f7fd926770d10d111976de48609a51 Author: Lars Schilders <123180911+lschilders@users.noreply.github.com> Date: Fri Dec 5 10:41:23 2025 +0100 chore: change radiation unit to Wm-2 (#776) * chore: change expected radiation unit to W/m-2 * change values in test for radiation features adder * fix docs for dni/gti unit * formatting commit 71ac4283129ac3da3eaa2dbfdd360c54ffac420e Author: Bart Pleiter Date: Wed Dec 3 09:45:34 2025 +0100 feature: added use_median option to flatliner forecaster so it predic… (#773) * feature: added use_median option to flatliner forecaster so it predicts the median of the training data. Signed-off-by: Bart Pleiter * feature: improved naming to predict_median. Signed-off-by: Bart Pleiter --------- Signed-off-by: Bart Pleiter commit 45ca37fca0a8ecf3a1581b38cea2910cfe686750 Author: Lars Schilders <123180911+lschilders@users.noreply.github.com> Date: Wed Nov 26 15:45:07 2025 +0100 fix: fixes in EvaluationPipeline and TimeSeriesPlotter (#769) * Remove target column from predictions to avoid duplication for lead_times * get sample_interval class attr commit ee41442b228757dd5adc558101fe02e9e6f5c2be Author: Egor Dmitriev Date: Wed Nov 26 14:01:16 2025 +0100 fix: Improved mlflow to use run names and load proper models for reuse. Fixed time series plotter to use correct sample interval paramter. (#768) * feature: Improved mlflow to use run names and load proper models for reuse. Fixed time series plotter to use correct sample interval paramter. Signed-off-by: Egor Dmitriev * feature(STEF-2551): Fixed path. Changed run_name to step_name in backtester. Signed-off-by: Egor Dmitriev --------- Signed-off-by: Egor Dmitriev commit 7deb69e57fae0372a203ae154ee2ae56fefa749b Author: Bart Pleiter Date: Fri Nov 21 14:42:54 2025 +0100 chore: replaced alliander emails with lfenergy email. (#767) Signed-off-by: Bart Pleiter Signed-off-by: Lars van Someren --- .github/CODE_OF_CONDUCT.md | 6 +- .github/CONTRIBUTING.md | 4 +- .github/pr-labeler.yml | 2 +- .github/workflows/_job_quality_check.yaml | 4 +- .github/workflows/check.yaml | 4 +- .github/workflows/citations.yaml | 4 +- .github/workflows/docs.yaml | 2 +- .github/workflows/pr-labeler.yaml | 4 +- .github/workflows/release-dev.yaml | 2 +- .github/workflows/release-v4.yaml | 4 +- .gitignore | 2 +- CITATION.cff | 4 +- COMMITTERS.md | 2 +- README.md | 3 +- REUSE.toml | 6 +- THIRD_PARTY_LICENSES.md | 3 +- docs/.gitkeep | 4 +- docs/pyproject.toml | 4 +- docs/source/_static/css/custom.css | 4 +- docs/source/_static/versions.json.license | 2 +- docs/source/_templates/custom_class.rst | 2 +- docs/source/_templates/custom_function.rst | 2 +- docs/source/_templates/module_overview.rst | 2 +- docs/source/_templates/package_overview.rst | 2 +- docs/source/api/index.rst | 4 +- docs/source/changelog.rst | 2 +- docs/source/conf.py | 2 +- docs/source/contribute/_getting_help.rst | 4 +- docs/source/contribute/code_of_conduct.rst | 2 +- docs/source/contribute/code_style_guide.rst | 2 +- docs/source/contribute/contributing_guide.rst | 2 +- docs/source/contribute/development_setup.rst | 2 +- .../contribute/development_workflow.rst | 4 +- docs/source/contribute/document.rst | 4 +- docs/source/contribute/index.rst | 4 +- docs/source/examples.rst | 2 +- .../methodology_train_predict.pptx.license | 2 +- .../methodology_train_predict.svg.license | 2 +- .../images/uncertainty_estimation.svg.license | 2 +- docs/source/index.rst | 4 +- docs/source/logos/favicon.ico.license | 2 +- .../logos/logo_openstef_small.png.license | 2 +- .../openstef-horizontal-color.svg.license | 2 +- .../openstef-horizontal-white.svg.license | 2 +- docs/source/project/citing.rst | 4 +- docs/source/project/committee.rst | 6 +- docs/source/project/index.rst | 4 +- docs/source/project/license.rst | 2 +- docs/source/project/maintainers.rst | 4 +- docs/source/project/support.rst | 6 +- docs/source/user_guide/external_resources.rst | 4 +- docs/source/user_guide/index.rst | 3 +- docs/source/user_guide/installation.rst | 6 +- docs/source/user_guide/intro/index.rst | 4 +- .../intro/methodology_train_predict.rst | 4 +- docs/source/user_guide/logging.rst | 2 +- docs/source/user_guide/quick_start.rst | 4 +- docs/source/user_guide/tutorials.rst | 2 +- examples/.gitkeep | 4 +- ...liander_2024_benchmark_xgboost_gblinear.py | 82 +- .../liander_2024_compare_results.py | 2 +- examples/examples/.gitignore | 4 +- .../configuring_model_pipeline_example.py | 2 +- .../examples/forecasting_preset_example.py | 2 +- examples/pyproject.toml | 28 + examples/tutorials/.gitignore | 16 + examples/tutorials/.gitkeep | 0 .../backtesting_openstef_with_beam.ipynb | 460 +++++++++ .../forecasting_with_workflow_presets.ipynb | 972 ++++++++++++++++++ packages/openstef-beam/README.md | 4 +- packages/openstef-beam/pyproject.toml | 4 +- .../src/openstef_beam/__init__.py | 2 +- .../src/openstef_beam/analysis/__init__.py | 2 +- .../analysis/analysis_pipeline.py | 2 +- .../openstef_beam/analysis/models/__init__.py | 2 +- .../analysis/models/target_metadata.py | 2 +- .../models/visualization_aggregation.py | 2 +- .../analysis/models/visualization_output.py | 2 +- .../openstef_beam/analysis/plots/__init__.py | 2 +- .../plots/forecast_time_series_plotter.py | 60 +- .../plots/grouped_target_metric_plotter.py | 2 +- .../plots/precision_recall_curve_plotter.py | 2 +- .../plots/quantile_calibration_box_plotter.py | 2 +- .../plots/quantile_probability_plotter.py | 2 +- .../analysis/plots/summary_table_plotter.py | 2 +- .../analysis/plots/windowed_metric_plotter.py | 2 +- .../analysis/visualizations/__init__.py | 2 +- .../analysis/visualizations/base.py | 2 +- .../grouped_target_metric_visualization.py | 2 +- .../precision_recall_curve_visualization.py | 2 +- .../quantile_calibration_box_visualization.py | 2 +- .../quantile_probability_visualization.py | 2 +- .../summary_table_visualization.py | 2 +- .../timeseries_visualization.py | 7 +- .../windowed_metric_visualization.py | 2 +- .../src/openstef_beam/backtesting/__init__.py | 2 +- .../backtesting/backtest_callback.py | 2 +- .../backtesting/backtest_event.py | 2 +- .../backtesting/backtest_event_generator.py | 2 +- .../backtest_forecaster/__init__.py | 6 +- .../backtest_forecaster/dummy_forecaster.py | 2 +- .../backtesting/backtest_forecaster/mixins.py | 2 +- .../backtesting/backtest_pipeline.py | 2 +- .../restricted_horizon_timeseries.py | 2 +- .../openstef_beam/benchmarking/__init__.py | 2 +- .../benchmarking/baselines/__init__.py | 20 + .../baselines/openstef4.py} | 115 ++- .../benchmark_comparison_pipeline.py | 2 +- .../benchmarking/benchmark_pipeline.py | 2 +- .../benchmarking/benchmarks/__init__.py | 2 +- .../benchmarking/benchmarks/liander2024.py | 2 +- .../benchmarking/callbacks/__init__.py | 2 +- .../benchmarking/callbacks/base.py | 2 +- .../callbacks/strict_execution_callback.py | 2 +- .../benchmarking/models/__init__.py | 2 +- .../benchmarking/models/benchmark_target.py | 2 +- .../benchmarking/storage/__init__.py | 2 +- .../benchmarking/storage/base.py | 2 +- .../benchmarking/storage/local_storage.py | 2 +- .../benchmarking/storage/s3_storage.py | 2 +- .../benchmarking/target_provider.py | 2 +- .../src/openstef_beam/evaluation/__init__.py | 2 +- .../evaluation/evaluation_pipeline.py | 6 +- .../evaluation/metric_providers.py | 2 +- .../evaluation/models/__init__.py | 2 +- .../openstef_beam/evaluation/models/report.py | 2 +- .../openstef_beam/evaluation/models/subset.py | 2 +- .../openstef_beam/evaluation/models/window.py | 2 +- .../evaluation/window_iterators.py | 2 +- .../src/openstef_beam/metrics/__init__.py | 2 +- .../metrics/metrics_deterministic.py | 2 +- .../metrics/metrics_probabilistic.py | 2 +- packages/openstef-beam/tests/__init__.py | 2 +- packages/openstef-beam/tests/unit/__init__.py | 2 +- .../test_forecast_time_series_plotter.py | 48 +- .../test_grouped_target_metric_plotter.py | 2 +- .../test_precision_recall_curve_plotter.py | 2 +- .../test_quantile_calibration_box_plotter.py | 2 +- .../test_quantile_probability_plotter.py | 2 +- .../plots/test_summary_table_plotter.py | 2 +- .../plots/test_windowed_metric_plotter.py | 2 +- .../unit/analysis/test_analysis_pipeline.py | 2 +- .../unit/analysis/visualizations/conftest.py | 2 +- ...est_grouped_target_metric_visualization.py | 2 +- ...st_precision_recall_curve_visualization.py | 2 +- ..._quantile_calibration_box_visualization.py | 2 +- ...test_quantile_probability_visualization.py | 2 +- .../test_summary_table_visualization.py | 2 +- .../test_timeseries_visualization.py | 7 +- .../test_windowed_metric_visualization.py | 2 +- .../test_backtest_event_generator.py | 2 +- .../backtesting/test_backtest_pipeline.py | 2 +- .../unit/backtesting/test_batch_prediction.py | 2 +- .../storage/test_local_storage.py | 2 +- .../benchmarking/storage/test_s3_storage.py | 2 +- .../benchmarking/test_benchmark_pipeline.py | 2 +- .../unit/benchmarking/test_target_provider.py | 2 +- .../tests/unit/evaluation/__init__.py | 2 +- .../tests/unit/evaluation/models/__init__.py | 2 +- .../unit/evaluation/models/test_window.py | 2 +- .../evaluation/test_evaluation_pipeline.py | 2 +- .../unit/evaluation/test_metric_provider.py | 2 +- .../metrics/test_metrics_deterministic.py | 2 +- .../metrics/test_metrics_probabilistic.py | 2 +- packages/openstef-beam/tests/utils/mocks.py | 2 +- packages/openstef-core/README.md | 4 +- packages/openstef-core/pyproject.toml | 4 +- .../src/openstef_core/__init__.py | 2 +- .../src/openstef_core/base_model.py | 2 +- .../src/openstef_core/datasets/__init__.py | 2 +- .../src/openstef_core/datasets/mixins.py | 2 +- .../datasets/timeseries_dataset.py | 2 +- .../datasets/validated_datasets.py | 22 +- .../src/openstef_core/datasets/validation.py | 2 +- .../datasets/versioned_timeseries_dataset.py | 2 +- .../src/openstef_core/exceptions.py | 2 +- .../src/openstef_core/mixins/__init__.py | 2 +- .../src/openstef_core/mixins/predictor.py | 2 +- .../src/openstef_core/mixins/stateful.py | 2 +- .../src/openstef_core/mixins/transform.py | 2 +- .../src/openstef_core/testing.py | 2 +- .../src/openstef_core/transforms/__init__.py | 2 +- .../transforms/dataset_transforms.py | 2 +- .../openstef-core/src/openstef_core/types.py | 2 +- .../src/openstef_core/utils/__init__.py | 2 +- .../src/openstef_core/utils/datetime.py | 2 +- .../src/openstef_core/utils/invariants.py | 2 +- .../src/openstef_core/utils/itertools.py | 2 +- .../openstef_core/utils/multiprocessing.py | 2 +- .../src/openstef_core/utils/pandas.py | 2 +- .../src/openstef_core/utils/pydantic.py | 2 +- packages/openstef-core/tests/__init__.py | 2 +- .../tests/unit/datasets/test_mixins.py | 2 +- .../unit/datasets/test_timeseries_dataset.py | 2 +- .../tests/unit/datasets/test_validation.py | 2 +- .../test_versioned_timeseries_dataset.py | 2 +- .../tests/unit/datasets/utils.py | 2 +- .../tests/unit/mixins/test_stateful.py | 2 +- .../tests/unit/mixins/test_transform.py | 2 +- .../tests/unit/test_base_model.py | 2 +- .../openstef-core/tests/unit/test_types.py | 2 +- .../tests/unit/utils/test_datetime.py | 2 +- .../tests/unit/utils/test_itertools.py | 2 +- .../tests/unit/utils/test_multiprocessing.py | 2 +- packages/openstef-models/README.md | 4 +- packages/openstef-models/pyproject.toml | 6 +- .../src/openstef_models/__init__.py | 2 +- .../explainability/__init__.py | 2 +- .../openstef_models/explainability/mixins.py | 2 +- .../explainability/plotters/__init__.py | 2 +- .../plotters/feature_importance_plotter.py | 2 +- .../openstef_models/integrations/__init__.py | 2 +- .../integrations/joblib/__init__.py | 2 +- .../joblib/joblib_model_serializer.py | 2 +- .../integrations/mlflow/__init__.py | 2 +- .../integrations/mlflow/mlflow_storage.py | 76 +- .../mlflow/mlflow_storage_callback.py | 130 ++- .../src/openstef_models/mixins/__init__.py | 2 +- .../src/openstef_models/mixins/callbacks.py | 2 +- .../mixins/model_serializer.py | 2 +- .../src/openstef_models/models/__init__.py | 2 +- .../models/component_splitting/__init__.py | 2 +- .../component_splitting/component_splitter.py | 2 +- .../constant_component_splitter.py | 2 +- .../linear_component_splitter.py | 15 +- .../linear_component_splitter_model.z.license | 2 +- .../models/component_splitting_model.py | 2 +- .../models/forecasting/__init__.py | 2 +- .../forecasting/base_case_forecaster.py | 2 +- .../forecasting/constant_median_forecaster.py | 2 +- .../forecasting/flatliner_forecaster.py | 27 +- .../models/forecasting/forecaster.py | 2 +- .../models/forecasting/gblinear_forecaster.py | 2 +- .../models/forecasting/xgboost_forecaster.py | 2 +- .../models/forecasting_model.py | 2 +- .../src/openstef_models/presets/__init__.py | 2 +- .../presets/forecasting_workflow.py | 51 +- .../openstef_models/transforms/__init__.py | 2 +- .../transforms/energy_domain/__init__.py | 2 +- .../energy_domain/wind_power_feature_adder.py | 2 +- .../transforms/general/__init__.py | 4 +- .../transforms/general/clipper.py | 2 +- .../general/dimensionality_reducer.py | 2 +- .../general/empty_feature_remover.py | 2 +- .../transforms/general/imputer.py | 2 +- .../transforms/general/nan_dropper.py | 2 +- .../transforms/general/sample_weighter.py | 2 +- .../transforms/general/scaler.py | 2 +- .../transforms/general/selector.py | 84 ++ .../transforms/postprocessing/__init__.py | 2 +- .../confidence_interval_applicator.py | 16 +- .../postprocessing/quantile_sorter.py | 2 +- .../transforms/time_domain/__init__.py | 2 +- .../time_domain/cyclic_features_adder.py | 2 +- .../time_domain/datetime_features_adder.py | 2 +- .../time_domain/holiday_features_adder.py | 2 +- .../transforms/time_domain/lags_adder.py | 2 +- .../time_domain/rolling_aggregates_adder.py | 2 +- .../time_domain/versioned_lags_adder.py | 2 +- .../transforms/validation/__init__.py | 2 +- .../validation/completeness_checker.py | 2 +- .../transforms/validation/flatline_checker.py | 2 +- .../validation/input_consistency_checker.py | 2 +- .../transforms/weather_domain/__init__.py | 2 +- .../atmosphere_derived_features_adder.py | 2 +- .../weather_domain/daylight_feature_adder.py | 2 +- .../radiation_derived_features_adder.py | 21 +- .../src/openstef_models/utils/__init__.py | 2 +- .../src/openstef_models/utils/data_split.py | 2 +- .../utils/evaluation_functions.py | 2 +- .../utils/feature_selection.py | 142 ++- .../openstef_models/utils/loss_functions.py | 2 +- .../src/openstef_models/workflows/__init__.py | 2 +- .../custom_component_split_workflow.py | 2 +- .../workflows/custom_forecasting_workflow.py | 7 +- packages/openstef-models/tests/__init__.py | 2 +- .../tests/integration/test_integration.py | 2 +- .../tests/unit/integrations/__init__.py | 2 +- .../unit/integrations/joblib/__init__.py | 2 +- .../joblib/test_joblib_model_serializer.py | 2 +- .../unit/integrations/mlflow/__init__.py | 2 +- .../mlflow/test_mlflow_storage.py | 19 +- .../mlflow/test_mlflow_storage_callback.py | 39 +- .../models/component_splitting/__init__.py | 2 +- .../test_constant_component_splitter.py | 2 +- .../test_linear_component_splitter.py | 14 +- .../tests/unit/models/forecasting/conftest.py | 2 +- .../forecasting/test_base_case_forecaster.py | 2 +- .../test_constant_median_forecaster.py | 2 +- .../forecasting/test_flatliner_forecaster.py | 24 +- .../forecasting/test_gblinear_forecaster.py | 2 +- .../forecasting/test_xgboost_forecaster.py | 2 +- .../unit/models/test_forecasting_model.py | 2 +- .../tests/unit/test_example.py | 2 +- .../test_wind_power_feature_adder.py | 2 +- .../unit/transforms/general/test_clipper.py | 2 +- .../general/test_dimensionality_reducer.py | 2 +- .../general/test_empty_feature_remover.py | 2 +- .../unit/transforms/general/test_imputer.py | 2 +- .../transforms/general/test_nan_dropper.py | 2 +- .../general/test_sample_weighter.py | 2 +- .../unit/transforms/general/test_scaler.py | 2 +- .../unit/transforms/general/test_selector.py | 82 ++ .../transforms/postprocessing/__init__.py | 2 +- .../test_confidence_interval_applicator.py | 44 +- .../postprocessing/test_quantile_sorter.py | 2 +- .../time_domain/test_cyclic_features_adder.py | 2 +- .../test_datetime_features_adder.py | 2 +- .../test_holiday_features_adder.py | 2 +- .../transforms/time_domain/test_lags_adder.py | 2 +- .../test_rolling_aggregates_adder.py | 2 +- .../time_domain/test_versioned_lags_adder.py | 2 +- .../validation/test_completeness_checker.py | 2 +- .../validation/test_flatline_checker.py | 2 +- .../test_input_consistency_checker.py | 2 +- .../test_atmosphere_derived_features_adder.py | 2 +- .../test_daylight_feature_adder.py | 2 +- .../test_radiation_derived_featuers_adder.py | 39 +- .../tests/unit/utils/__init__.py | 2 +- .../tests/unit/utils/test_data_split.py | 2 +- .../unit/utils/test_feature_selection.py | 125 +++ .../tests/unit/utils/test_loss_functions.py | 2 +- pyproject.toml | 11 +- tools/reuse-fix.py | 2 +- uv.lock | 940 ++++++++++++++++- 325 files changed, 3818 insertions(+), 606 deletions(-) create mode 100644 examples/pyproject.toml create mode 100644 examples/tutorials/.gitignore delete mode 100644 examples/tutorials/.gitkeep create mode 100644 examples/tutorials/backtesting_openstef_with_beam.ipynb create mode 100644 examples/tutorials/forecasting_with_workflow_presets.ipynb create mode 100644 packages/openstef-beam/src/openstef_beam/benchmarking/baselines/__init__.py rename packages/openstef-beam/src/openstef_beam/{backtesting/backtest_forecaster/openstef4_backtest_forecaster.py => benchmarking/baselines/openstef4.py} (52%) create mode 100644 packages/openstef-models/src/openstef_models/transforms/general/selector.py create mode 100644 packages/openstef-models/tests/unit/transforms/general/test_selector.py create mode 100644 packages/openstef-models/tests/unit/utils/test_feature_selection.py diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md index 77ed2fab8..fd2031aea 100644 --- a/.github/CODE_OF_CONDUCT.md +++ b/.github/CODE_OF_CONDUCT.md @@ -1,5 +1,5 @@ @@ -61,7 +61,7 @@ further defined and clarified by project maintainers. ## Conflict Resolution -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at short.term.energy.forecasts@alliander.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at openstef@lfenergy.org. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project’s leadership. @@ -69,4 +69,4 @@ Project maintainers who do not follow or enforce the Code of Conduct in good fai This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at -https://www.contributor-covenant.org/version/1/4/code-of-conduct.html \ No newline at end of file +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index c2282aa47..8f9778107 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -1,5 +1,5 @@ @@ -31,7 +31,7 @@ For information about: - 💬 **Slack**: [LF Energy workspace](https://slack.lfenergy.org/) (#openstef channel) - 🐛 **Issues**: [GitHub Issues](https://github.com/OpenSTEF/openstef/issues) -- 📧 **Email**: short.term.energy.forecasts@alliander.com +- 📧 **Email**: openstef@lfenergy.org - 📖 **Support**: [Support page](https://openstef.github.io/openstef/v4/project/support.html) ## Good First Issues diff --git a/.github/pr-labeler.yml b/.github/pr-labeler.yml index b29c00f13..485395948 100644 --- a/.github/pr-labeler.yml +++ b/.github/pr-labeler.yml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 # config of action in ./workflows/... diff --git a/.github/workflows/_job_quality_check.yaml b/.github/workflows/_job_quality_check.yaml index 388452dc2..f4507d23f 100644 --- a/.github/workflows/_job_quality_check.yaml +++ b/.github/workflows/_job_quality_check.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -62,4 +62,4 @@ jobs: - name: Stop if any quality step failed # All tests are run with always() not to stop on the first error. This step makes the workflow fail if any quality step failed. if: ${{ failure() }} - run: exit 1 \ No newline at end of file + run: exit 1 diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml index b8b40503f..ba85e63c2 100644 --- a/.github/workflows/check.yaml +++ b/.github/workflows/check.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # SPDX-License-Identifier: MPL-2.0 name: Quality Check @@ -18,4 +18,4 @@ permissions: jobs: quality: name: Quality Checks - uses: ./.github/workflows/_job_quality_check.yaml \ No newline at end of file + uses: ./.github/workflows/_job_quality_check.yaml diff --git a/.github/workflows/citations.yaml b/.github/workflows/citations.yaml index f8904588a..8809b15a1 100644 --- a/.github/workflows/citations.yaml +++ b/.github/workflows/citations.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -35,4 +35,4 @@ jobs: run: | sudo apt-get update && sudo apt-get install -y r-base - name: Validate CITATION.cff - uses: dieghernan/cff-validator@v4 \ No newline at end of file + uses: dieghernan/cff-validator@v4 diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 0f5ad1fa4..29cd4570e 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/.github/workflows/pr-labeler.yaml b/.github/workflows/pr-labeler.yaml index 665a18fac..86df8af08 100644 --- a/.github/workflows/pr-labeler.yaml +++ b/.github/workflows/pr-labeler.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2022 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2022 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 # Automatically label PRs, config in ../pr-labler.yml @@ -18,4 +18,4 @@ jobs: with: configuration-path: .github/pr-labeler.yml # optional, .github/pr-labeler.yml is the default value env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-dev.yaml b/.github/workflows/release-dev.yaml index 6cd590ed3..f007ca777 100644 --- a/.github/workflows/release-dev.yaml +++ b/.github/workflows/release-dev.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # SPDX-License-Identifier: MPL-2.0 name: Dev Release diff --git a/.github/workflows/release-v4.yaml b/.github/workflows/release-v4.yaml index 2f6d813c7..6479e85ff 100644 --- a/.github/workflows/release-v4.yaml +++ b/.github/workflows/release-v4.yaml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # SPDX-License-Identifier: MPL-2.0 name: Release V4 @@ -80,4 +80,4 @@ jobs: - name: Publish packages run: uv publish --trusted-publishing always - name: Summary - run: echo "Published version ${{ steps.ver.outputs.version }}" \ No newline at end of file + run: echo "Published version ${{ steps.ver.outputs.version }}" diff --git a/.gitignore b/.gitignore index 5d863242d..f143d21c5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # noqa E501> +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # noqa E501> # SPDX-License-Identifier: MPL-2.0 # Core diff --git a/CITATION.cff b/CITATION.cff index 4c884a641..35c1d6432 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -60,4 +60,4 @@ authors: - name: "Contributors to the OpenSTEF project" contact: - name: "Team mailbox OpenSTEF" - email: "short.term.energy.forecasts@alliander.com" + email: "openstef@lfenergy.org" diff --git a/COMMITTERS.md b/COMMITTERS.md index 0088210d9..94a0581dd 100644 --- a/COMMITTERS.md +++ b/COMMITTERS.md @@ -1,5 +1,5 @@ diff --git a/README.md b/README.md index 55b5024a0..d075f24dc 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ @@ -142,4 +142,3 @@ This project includes third-party libraries licensed under their respective Open - **[GitHub Discussions](https://github.com/OpenSTEF/openstef/discussions)** - community Q&A and discussions - **[Issue Tracker](https://github.com/OpenSTEF/openstef/issues)** - bug reports and feature requests - **[LF Energy OpenSTEF](https://www.lfenergy.org/projects/openstef/)** - project homepage - diff --git a/REUSE.toml b/REUSE.toml index 061010cca..0d4f42da5 100644 --- a/REUSE.toml +++ b/REUSE.toml @@ -5,9 +5,10 @@ path = [ ".github/ISSUE_TEMPLATE/**", ".python-version", "uv.lock", + "examples/*/*.ipynb", ] precedence = "override" -SPDX-FileCopyrightText = "2025 Contributors to the OpenSTEF project " +SPDX-FileCopyrightText = "2025 Contributors to the OpenSTEF project " SPDX-License-Identifier = "MPL-2.0" [[annotations]] @@ -15,6 +16,5 @@ path = [ "**/__pycache__/**", ] precedence = "override" -SPDX-FileCopyrightText = "2025 Contributors to the OpenSTEF project " +SPDX-FileCopyrightText = "2025 Contributors to the OpenSTEF project " SPDX-License-Identifier = "MPL-2.0" - diff --git a/THIRD_PARTY_LICENSES.md b/THIRD_PARTY_LICENSES.md index 2b7d377e4..400680a1b 100644 --- a/THIRD_PARTY_LICENSES.md +++ b/THIRD_PARTY_LICENSES.md @@ -1,5 +1,5 @@ @@ -1289,4 +1289,3 @@ Find a list of packages below - License: MIT - Compatible: True - Size: 22363 - diff --git a/docs/.gitkeep b/docs/.gitkeep index f44dad5bb..72baaab86 100644 --- a/docs/.gitkeep +++ b/docs/.gitkeep @@ -1,3 +1,3 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # -# SPDX-License-Identifier: MPL-2.0 \ No newline at end of file +# SPDX-License-Identifier: MPL-2.0 diff --git a/docs/pyproject.toml b/docs/pyproject.toml index e3ed9083f..f06651cfc 100644 --- a/docs/pyproject.toml +++ b/docs/pyproject.toml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -10,7 +10,7 @@ readme = "README.md" keywords = [ "energy", "forecasting", "machinelearning" ] license = "MPL-2.0" authors = [ - { name = "Alliander N.V", email = "short.term.energy.forecasts@alliander.com" }, + { name = "Alliander N.V", email = "openstef@lfenergy.org" }, ] requires-python = ">=3.12,<4.0" dependencies = [ diff --git a/docs/source/_static/css/custom.css b/docs/source/_static/css/custom.css index 1e115752c..7a1227464 100644 --- a/docs/source/_static/css/custom.css +++ b/docs/source/_static/css/custom.css @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project + * SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project * * SPDX-License-Identifier: MPL-2.0 */ @@ -7,4 +7,4 @@ .navbar-brand.logo { padding-top: 0.75rem; padding-bottom: 0.75rem; -} \ No newline at end of file +} diff --git a/docs/source/_static/versions.json.license b/docs/source/_static/versions.json.license index 37e10dd31..7d320d6e2 100644 --- a/docs/source/_static/versions.json.license +++ b/docs/source/_static/versions.json.license @@ -1,3 +1,3 @@ -SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/_templates/custom_class.rst b/docs/source/_templates/custom_class.rst index f9428029d..6887cc247 100644 --- a/docs/source/_templates/custom_class.rst +++ b/docs/source/_templates/custom_class.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/_templates/custom_function.rst b/docs/source/_templates/custom_function.rst index df6eb6284..b1c091ae0 100644 --- a/docs/source/_templates/custom_function.rst +++ b/docs/source/_templates/custom_function.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/_templates/module_overview.rst b/docs/source/_templates/module_overview.rst index da80ddc57..4978be78e 100644 --- a/docs/source/_templates/module_overview.rst +++ b/docs/source/_templates/module_overview.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/_templates/package_overview.rst b/docs/source/_templates/package_overview.rst index e67a2ad70..5cd4cd993 100644 --- a/docs/source/_templates/package_overview.rst +++ b/docs/source/_templates/package_overview.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/api/index.rst b/docs/source/api/index.rst index 67775642c..8e57fe44e 100644 --- a/docs/source/api/index.rst +++ b/docs/source/api/index.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -85,5 +85,3 @@ BEAM Package (:mod:`openstef_beam`) analysis evaluation benchmarking - - diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst index 02517547a..42ffb2b68 100644 --- a/docs/source/changelog.rst +++ b/docs/source/changelog.rst @@ -1,5 +1,5 @@ .. comment: - SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project + SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 .. _changelog: diff --git a/docs/source/conf.py b/docs/source/conf.py index 5aa8a0d41..932a5d101 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/contribute/_getting_help.rst b/docs/source/contribute/_getting_help.rst index 5bdbb53fa..033647d9f 100644 --- a/docs/source/contribute/_getting_help.rst +++ b/docs/source/contribute/_getting_help.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -9,7 +9,7 @@ If you need assistance: * 💬 **Slack**: Join the `LF Energy Slack workspace `_ (#openstef channel) * 🐛 **Issues**: Check `GitHub Issues `_ or create a new one -* 📧 **Email**: Contact us at ``short.term.energy.forecasts@alliander.com`` +* 📧 **Email**: Contact us at ``openstef@lfenergy.org`` * 🤝 **Community meetings**: Join our four-weekly co-coding sessions For more information, see our :doc:`/project/support` page. diff --git a/docs/source/contribute/code_of_conduct.rst b/docs/source/contribute/code_of_conduct.rst index 1f2c9076f..afa7fe065 100644 --- a/docs/source/contribute/code_of_conduct.rst +++ b/docs/source/contribute/code_of_conduct.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/contribute/code_style_guide.rst b/docs/source/contribute/code_style_guide.rst index 62cf7d506..62855cf75 100644 --- a/docs/source/contribute/code_style_guide.rst +++ b/docs/source/contribute/code_style_guide.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/contribute/contributing_guide.rst b/docs/source/contribute/contributing_guide.rst index 06782b03b..abf42cee7 100644 --- a/docs/source/contribute/contributing_guide.rst +++ b/docs/source/contribute/contributing_guide.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/contribute/development_setup.rst b/docs/source/contribute/development_setup.rst index 7e67cf1f6..56e7b5c2c 100644 --- a/docs/source/contribute/development_setup.rst +++ b/docs/source/contribute/development_setup.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/contribute/development_workflow.rst b/docs/source/contribute/development_workflow.rst index 123dfdcc3..7ac92edda 100644 --- a/docs/source/contribute/development_workflow.rst +++ b/docs/source/contribute/development_workflow.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -252,7 +252,7 @@ The ``reuse --fix`` command automatically adds the correct license header to new .. code-block:: python - # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project + # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/contribute/document.rst b/docs/source/contribute/document.rst index 04962f275..06c42be5a 100644 --- a/docs/source/contribute/document.rst +++ b/docs/source/contribute/document.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -397,4 +397,4 @@ If you need help with documentation specifically: * Check the `Sphinx documentation `_ * Look at existing documentation for examples -* Reference the `Diátaxis framework `_ for guidance on documentation types \ No newline at end of file +* Reference the `Diátaxis framework `_ for guidance on documentation types diff --git a/docs/source/contribute/index.rst b/docs/source/contribute/index.rst index 3f38db89d..9e86f5d60 100644 --- a/docs/source/contribute/index.rst +++ b/docs/source/contribute/index.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -208,4 +208,4 @@ maintainable and user-friendly. :maxdepth: 1 :titlesonly: - code_of_conduct \ No newline at end of file + code_of_conduct diff --git a/docs/source/examples.rst b/docs/source/examples.rst index 4ce99919e..23023da77 100644 --- a/docs/source/examples.rst +++ b/docs/source/examples.rst @@ -1,5 +1,5 @@ .. comment: - SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project + SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 .. _examples: diff --git a/docs/source/images/methodology_train_predict.pptx.license b/docs/source/images/methodology_train_predict.pptx.license index 37e10dd31..7d320d6e2 100644 --- a/docs/source/images/methodology_train_predict.pptx.license +++ b/docs/source/images/methodology_train_predict.pptx.license @@ -1,3 +1,3 @@ -SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/images/methodology_train_predict.svg.license b/docs/source/images/methodology_train_predict.svg.license index 37e10dd31..7d320d6e2 100644 --- a/docs/source/images/methodology_train_predict.svg.license +++ b/docs/source/images/methodology_train_predict.svg.license @@ -1,3 +1,3 @@ -SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/images/uncertainty_estimation.svg.license b/docs/source/images/uncertainty_estimation.svg.license index 37e10dd31..7d320d6e2 100644 --- a/docs/source/images/uncertainty_estimation.svg.license +++ b/docs/source/images/uncertainty_estimation.svg.license @@ -1,3 +1,3 @@ -SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/index.rst b/docs/source/index.rst index 1e0fe3720..06b65ce0a 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -168,4 +168,4 @@ About OpenSTEF * :doc:`project/committee` * :doc:`project/maintainers` * :doc:`project/citing` - * :doc:`project/license` \ No newline at end of file + * :doc:`project/license` diff --git a/docs/source/logos/favicon.ico.license b/docs/source/logos/favicon.ico.license index 57abd4c9e..ce6567d86 100644 --- a/docs/source/logos/favicon.ico.license +++ b/docs/source/logos/favicon.ico.license @@ -1,3 +1,3 @@ -SPDX-FileCopyrightText: 2017-2023 Contributors to the OpenSTEF project +SPDX-FileCopyrightText: 2017-2023 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/logos/logo_openstef_small.png.license b/docs/source/logos/logo_openstef_small.png.license index 854eb6f31..882929b50 100644 --- a/docs/source/logos/logo_openstef_small.png.license +++ b/docs/source/logos/logo_openstef_small.png.license @@ -1,3 +1,3 @@ -SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/logos/openstef-horizontal-color.svg.license b/docs/source/logos/openstef-horizontal-color.svg.license index 37e10dd31..7d320d6e2 100644 --- a/docs/source/logos/openstef-horizontal-color.svg.license +++ b/docs/source/logos/openstef-horizontal-color.svg.license @@ -1,3 +1,3 @@ -SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/logos/openstef-horizontal-white.svg.license b/docs/source/logos/openstef-horizontal-white.svg.license index 37e10dd31..7d320d6e2 100644 --- a/docs/source/logos/openstef-horizontal-white.svg.license +++ b/docs/source/logos/openstef-horizontal-white.svg.license @@ -1,3 +1,3 @@ -SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/project/citing.rst b/docs/source/project/citing.rst index eb7e0a037..6eb509e25 100644 --- a/docs/source/project/citing.rst +++ b/docs/source/project/citing.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -37,4 +37,4 @@ Citation File Format (CFF) .. container:: sphx-glr-download - :download:`Download CFF citation file: CITATION.cff <../../../CITATION.cff>` \ No newline at end of file + :download:`Download CFF citation file: CITATION.cff <../../../CITATION.cff>` diff --git a/docs/source/project/committee.rst b/docs/source/project/committee.rst index 7d01f3c74..2ddfe6ec2 100644 --- a/docs/source/project/committee.rst +++ b/docs/source/project/committee.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -22,8 +22,8 @@ The TSC consists of the following members: 6. Maxime Fortin 7. Bart Pleiter -Any community member or Contributor can ask that something be reviewed by the TSC by contacting the TSC at ``short.term.energy.forecasts@alliander.com``. +Any community member or Contributor can ask that something be reviewed by the TSC by contacting the TSC at ``openstef@lfenergy.org``. More information, meeting notes and meeting links can be found in the `TSC documentation `_. -More information on project governance can be found in the `project governance documentation `_. \ No newline at end of file +More information on project governance can be found in the `project governance documentation `_. diff --git a/docs/source/project/index.rst b/docs/source/project/index.rst index d9fb7385a..61ef24f2e 100644 --- a/docs/source/project/index.rst +++ b/docs/source/project/index.rst @@ -1,5 +1,5 @@ .. comment: - SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project + SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 .. _community: @@ -34,4 +34,4 @@ If you found a bug or would like to request a feature, please `open an issue `_. -.. include:: support.rst \ No newline at end of file +.. include:: support.rst diff --git a/docs/source/project/license.rst b/docs/source/project/license.rst index 6cf4880cb..f2ca4f7c5 100644 --- a/docs/source/project/license.rst +++ b/docs/source/project/license.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/project/maintainers.rst b/docs/source/project/maintainers.rst index b541463c2..8e6e387aa 100644 --- a/docs/source/project/maintainers.rst +++ b/docs/source/project/maintainers.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -24,4 +24,4 @@ The current maintainers of this project are: 6. Egor Dmitriev 7. Lars Schilders -Any community member or Contributor can ask a question or raise an issue to the maintainers by logging a GitHub issue. \ No newline at end of file +Any community member or Contributor can ask a question or raise an issue to the maintainers by logging a GitHub issue. diff --git a/docs/source/project/support.rst b/docs/source/project/support.rst index ce08beb0d..7727cbf55 100644 --- a/docs/source/project/support.rst +++ b/docs/source/project/support.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -9,7 +9,7 @@ Support There are a few ways to connect with the OpenSTEF project: * Join the **#openstef channel**, which is part of the `LF Energy Slack workspace `_. - * Depending on your work e-mail address, you may need to be invited in order to join the Slack workspace. If this is the case, please e-mail ``short.term.energy.forecasts@alliander.com``. We are happy to invite you. + * Depending on your work e-mail address, you may need to be invited in order to join the Slack workspace. If this is the case, please e-mail ``openstef@lfenergy.org``. We are happy to invite you. * Send a **direct message** to one of the most recent `contributors `_. * Submit an **issue** at `GitHub `_. * Join the four-weekly **community meeting**. You can find information, including meeting invite links, on `our wiki page `_. @@ -27,4 +27,4 @@ This project manages bugs and enhancements using the `GitHub issue tracker +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -10,4 +10,4 @@ External resources ====== Videos -====== \ No newline at end of file +====== diff --git a/docs/source/user_guide/index.rst b/docs/source/user_guide/index.rst index 7f119206d..af29e3944 100644 --- a/docs/source/user_guide/index.rst +++ b/docs/source/user_guide/index.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -17,4 +17,3 @@ ========== User Guide ========== - diff --git a/docs/source/user_guide/installation.rst b/docs/source/user_guide/installation.rst index b289ade05..4184dec60 100644 --- a/docs/source/user_guide/installation.rst +++ b/docs/source/user_guide/installation.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -314,7 +314,7 @@ If you encounter issues: 1. Check the `GitHub Issues `_ 2. Review the :doc:`../contribute/index` guide 3. Visit our :ref:`support` page for community resources -4. Contact us at short.term.energy.forecasts@alliander.com +4. Contact us at openstef@lfenergy.org Platform-Specific Notes ======================== @@ -396,4 +396,4 @@ OpenSTEF follows semantic versioning. To stay updated with the latest releases: # Upgrade to latest version pixi upgrade openstef -Subscribe to our `GitHub releases `_ for notifications about new versions and features. \ No newline at end of file +Subscribe to our `GitHub releases `_ for notifications about new versions and features. diff --git a/docs/source/user_guide/intro/index.rst b/docs/source/user_guide/intro/index.rst index 2610b31b7..986f8e6d8 100644 --- a/docs/source/user_guide/intro/index.rst +++ b/docs/source/user_guide/intro/index.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -12,4 +12,4 @@ Intro to Energy Forecasting This page is inherited from OpenSTEF 3.0 and may still be revised. ... -.. include:: methodology_train_predict.rst \ No newline at end of file +.. include:: methodology_train_predict.rst diff --git a/docs/source/user_guide/intro/methodology_train_predict.rst b/docs/source/user_guide/intro/methodology_train_predict.rst index 8e3fb099a..3ab6c744f 100644 --- a/docs/source/user_guide/intro/methodology_train_predict.rst +++ b/docs/source/user_guide/intro/methodology_train_predict.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -26,4 +26,4 @@ on how to the confidence estimations should be used. :alt: Uncertainty estimation -`Source file `__ \ No newline at end of file +`Source file `__ diff --git a/docs/source/user_guide/logging.rst b/docs/source/user_guide/logging.rst index 85dd04c3e..d6d590f0f 100644 --- a/docs/source/user_guide/logging.rst +++ b/docs/source/user_guide/logging.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 diff --git a/docs/source/user_guide/quick_start.rst b/docs/source/user_guide/quick_start.rst index 3f0e15b77..a1dfacc2e 100644 --- a/docs/source/user_guide/quick_start.rst +++ b/docs/source/user_guide/quick_start.rst @@ -1,4 +1,4 @@ -.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +.. SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project .. .. SPDX-License-Identifier: MPL-2.0 @@ -10,4 +10,4 @@ Quick Start .. admonition:: This page is under construction. - Want to help? Check :ref:`Contributing ` for more information. \ No newline at end of file + Want to help? Check :ref:`Contributing ` for more information. diff --git a/docs/source/user_guide/tutorials.rst b/docs/source/user_guide/tutorials.rst index 7ca78f9a7..54c36ceff 100644 --- a/docs/source/user_guide/tutorials.rst +++ b/docs/source/user_guide/tutorials.rst @@ -1,5 +1,5 @@ .. comment: - SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project + SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 .. _tutorials: diff --git a/examples/.gitkeep b/examples/.gitkeep index f44dad5bb..72baaab86 100644 --- a/examples/.gitkeep +++ b/examples/.gitkeep @@ -1,3 +1,3 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # -# SPDX-License-Identifier: MPL-2.0 \ No newline at end of file +# SPDX-License-Identifier: MPL-2.0 diff --git a/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py b/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py index dc41b1744..b18773206 100644 --- a/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py +++ b/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py @@ -6,7 +6,7 @@ The benchmark will evaluate XGBoost and GBLinear models on the dataset from HuggingFace. """ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -18,30 +18,23 @@ import logging import multiprocessing -from datetime import timedelta from pathlib import Path -from pydantic_extra_types.coordinate import Coordinate -from pydantic_extra_types.country import CountryAlpha2 - -from openstef_beam.backtesting.backtest_forecaster import BacktestForecasterConfig, OpenSTEF4BacktestForecaster -from openstef_beam.benchmarking.benchmark_pipeline import BenchmarkContext +from openstef_beam.benchmarking.baselines import ( + create_openstef4_preset_backtest_forecaster, +) from openstef_beam.benchmarking.benchmarks.liander2024 import Liander2024Category, create_liander2024_benchmark_runner from openstef_beam.benchmarking.callbacks.strict_execution_callback import StrictExecutionCallback -from openstef_beam.benchmarking.models.benchmark_target import BenchmarkTarget from openstef_beam.benchmarking.storage.local_storage import LocalBenchmarkStorage from openstef_core.types import LeadTime, Q from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage from openstef_models.presets import ( ForecastingWorkflowConfig, - create_forecasting_workflow, ) -from openstef_models.presets.forecasting_workflow import LocationConfig -from openstef_models.workflows import CustomForecastingWorkflow logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s") -OUTPUT_PATH = Path("./benchmark_results") +OUTPUT_PATH = Path("./benchmark_results_test_convenience") BENCHMARK_RESULTS_PATH_XGBOOST = OUTPUT_PATH / "XGBoost" BENCHMARK_RESULTS_PATH_GBLINEAR = OUTPUT_PATH / "GBLinear" @@ -73,11 +66,12 @@ common_config = ForecastingWorkflowConfig( model_id="common_model_", + run_name=None, model="flatliner", horizons=FORECAST_HORIZONS, quantiles=PREDICTION_QUANTILES, - model_reuse_enable=False, - mlflow_storage=None, + model_reuse_enable=True, + mlflow_storage=storage, radiation_column="shortwave_radiation", rolling_aggregate_features=["mean", "median", "max", "min"], wind_speed_column="wind_speed_80m", @@ -91,72 +85,30 @@ gblinear_config = common_config.model_copy(update={"model": "gblinear"}) -# Create the backtest configuration -backtest_config = BacktestForecasterConfig( - requires_training=True, - predict_length=timedelta(days=7), - predict_min_length=timedelta(minutes=15), - predict_context_length=timedelta(days=14), # Context needed for lag features - predict_context_min_coverage=0.5, - training_context_length=timedelta(days=90), # Three months of training data - training_context_min_coverage=0.5, - predict_sample_interval=timedelta(minutes=15), -) - - -def _target_forecaster_factory( - context: BenchmarkContext, - target: BenchmarkTarget, -) -> OpenSTEF4BacktestForecaster: - # Factory function that creates a forecaster for a given target. - prefix = context.run_name - base_config = xgboost_config if context.run_name == "xgboost" else gblinear_config - - def _create_workflow() -> CustomForecastingWorkflow: - # Create a new workflow instance with fresh model. - return create_forecasting_workflow( - config=base_config.model_copy( - update={ - "model_id": f"{prefix}_{target.name}", - "location": LocationConfig( - name=target.name, - description=target.description, - coordinate=Coordinate( - latitude=target.latitude, - longitude=target.longitude, - ), - country_code=CountryAlpha2("NL"), - ), - } - ) - ) - - return OpenSTEF4BacktestForecaster( - config=backtest_config, - workflow_factory=_create_workflow, - debug=False, - cache_dir=OUTPUT_PATH / "cache" / f"{context.run_name}_{target.name}", - ) - - if __name__ == "__main__": # Run for XGBoost model create_liander2024_benchmark_runner( storage=LocalBenchmarkStorage(base_path=BENCHMARK_RESULTS_PATH_XGBOOST), callbacks=[StrictExecutionCallback()], ).run( - forecaster_factory=_target_forecaster_factory, + forecaster_factory=create_openstef4_preset_backtest_forecaster( + workflow_config=xgboost_config, + cache_dir=OUTPUT_PATH / "cache", + ), run_name="xgboost", n_processes=N_PROCESSES, filter_args=BENCHMARK_FILTER, ) - # Run for GBLinear model + # # Run for GBLinear model create_liander2024_benchmark_runner( storage=LocalBenchmarkStorage(base_path=BENCHMARK_RESULTS_PATH_GBLINEAR), callbacks=[StrictExecutionCallback()], ).run( - forecaster_factory=_target_forecaster_factory, + forecaster_factory=create_openstef4_preset_backtest_forecaster( + workflow_config=gblinear_config, + cache_dir=OUTPUT_PATH / "cache", + ), run_name="gblinear", n_processes=N_PROCESSES, filter_args=BENCHMARK_FILTER, diff --git a/examples/benchmarks/liander_2024_compare_results.py b/examples/benchmarks/liander_2024_compare_results.py index 3de16460c..dabbbec49 100644 --- a/examples/benchmarks/liander_2024_compare_results.py +++ b/examples/benchmarks/liander_2024_compare_results.py @@ -1,5 +1,5 @@ """Example for comparing benchmark results from different runs on the Liander 2024 dataset.""" -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/examples/examples/.gitignore b/examples/examples/.gitignore index 1db058dc8..39116e399 100644 --- a/examples/examples/.gitignore +++ b/examples/examples/.gitignore @@ -1,5 +1,5 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 -mlflow_tracking* \ No newline at end of file +mlflow_tracking* diff --git a/examples/examples/configuring_model_pipeline_example.py b/examples/examples/configuring_model_pipeline_example.py index c315e5887..2b8250b1b 100644 --- a/examples/examples/configuring_model_pipeline_example.py +++ b/examples/examples/configuring_model_pipeline_example.py @@ -25,7 +25,7 @@ into a working forecasting system. """ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/examples/examples/forecasting_preset_example.py b/examples/examples/forecasting_preset_example.py index 47e1e42e4..480527252 100644 --- a/examples/examples/forecasting_preset_example.py +++ b/examples/examples/forecasting_preset_example.py @@ -25,7 +25,7 @@ into a working forecasting system. """ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/examples/pyproject.toml b/examples/pyproject.toml new file mode 100644 index 000000000..a347a8b7d --- /dev/null +++ b/examples/pyproject.toml @@ -0,0 +1,28 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +[project] +name = "openstef-examples" +version = "0.0.0" +description = "Examples and tutorials for OpenSTEF" +readme = "README.md" +requires-python = ">=3.12,<4.0" +dependencies = [ + "openstef", + "openstef-beam", + "openstef-core", + "openstef-models", +] + +optional-dependencies.tutorials = [ + "huggingface-hub>=1.2.2", + "jupyter>=1.1.1", + "kaleido" +] + +[tool.uv.sources] +openstef = { workspace = true } +openstef-beam = { workspace = true } +openstef-core = { workspace = true } +openstef-models = { workspace = true } diff --git a/examples/tutorials/.gitignore b/examples/tutorials/.gitignore new file mode 100644 index 000000000..1e97dea7c --- /dev/null +++ b/examples/tutorials/.gitignore @@ -0,0 +1,16 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +# Python-generated files +__pycache__/ +*.py[oc] +build/ +dist/ +wheels/ +*.egg-info + +# Virtual environments +.venv + +liander_dataset/ \ No newline at end of file diff --git a/examples/tutorials/.gitkeep b/examples/tutorials/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/examples/tutorials/backtesting_openstef_with_beam.ipynb b/examples/tutorials/backtesting_openstef_with_beam.ipynb new file mode 100644 index 000000000..9ae4d54fc --- /dev/null +++ b/examples/tutorials/backtesting_openstef_with_beam.ipynb @@ -0,0 +1,460 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "caf13084", + "metadata": {}, + "source": [ + "# 📊 Backtesting OpenSTEF Models with OpenSTEF-BEAM\n", + "\n", + "This tutorial demonstrates how to use **OpenSTEF-BEAM** (Backtesting, Evaluation, Analysis, Metrics) to systematically evaluate forecasting models. You'll learn how to:\n", + "\n", + "1. **Configure benchmark experiments** with multiple model types\n", + "2. **Run parallel backtests** across dozens of energy assets\n", + "3. **Compare model performance** with standardized metrics\n", + "4. **Generate analysis reports** with interactive visualizations\n", + "\n", + "> **BEAM** provides a rigorous framework for model evaluation, ensuring fair comparisons and reproducible results." + ] + }, + { + "cell_type": "markdown", + "id": "329ce2a3", + "metadata": {}, + "source": [ + "## 🔧 Environment Setup\n", + "\n", + "First, we configure thread settings to prevent conflicts with XGBoost's internal parallelization when running multiple processes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24d53eb6", + "metadata": {}, + "outputs": [], + "source": [ + "# --- Thread Configuration ---\n", + "# Prevent thread contention when running parallel backtests with XGBoost\n", + "import os\n", + "os.environ[\"OMP_NUM_THREADS\"] = \"1\"\n", + "os.environ[\"OPENBLAS_NUM_THREADS\"] = \"1\"\n", + "os.environ[\"MKL_NUM_THREADS\"] = \"1\"\n", + "\n", + "# --- Standard Imports ---\n", + "import logging\n", + "import multiprocessing\n", + "from pathlib import Path\n", + "\n", + "logging.basicConfig(level=logging.INFO, format=\"[%(asctime)s][%(levelname)s] %(message)s\")" + ] + }, + { + "cell_type": "markdown", + "id": "0a2d9aed", + "metadata": {}, + "source": [ + "## ⚙️ Benchmark Configuration\n", + "\n", + "Configure the benchmark parameters:\n", + "- **Output paths** — where to store results for each model\n", + "- **Forecast horizons** — how far ahead to predict (using ISO 8601 duration format)\n", + "- **Quantiles** — prediction intervals for probabilistic evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99c03b80", + "metadata": {}, + "outputs": [], + "source": [ + "# Import types for configuration\n", + "from openstef_core.types import LeadTime, Q # LeadTime: forecast horizon, Q: quantile\n", + "from openstef_beam.benchmarking.benchmarks.liander2024 import Liander2024Category\n", + "\n", + "# --- Output Paths ---\n", + "OUTPUT_PATH = Path(\"./benchmark_results\")\n", + "BENCHMARK_RESULTS_PATH_XGBOOST = OUTPUT_PATH / \"XGBoost\"\n", + "BENCHMARK_RESULTS_PATH_GBLINEAR = OUTPUT_PATH / \"GBLinear\"\n", + "\n", + "# --- Parallelization ---\n", + "N_PROCESSES = multiprocessing.cpu_count() # Use all available CPU cores\n", + "print(f\"🖥️ Running with {N_PROCESSES} parallel processes\")\n", + "\n", + "# --- Forecast Configuration ---\n", + "FORECAST_HORIZONS = [LeadTime.from_string(\"P3D\")] # 3-day ahead forecast (ISO 8601: P3D)\n", + "\n", + "# Quantiles for probabilistic forecasting (7 quantiles covering 5th to 95th percentile)\n", + "PREDICTION_QUANTILES = [\n", + " Q(0.05), Q(0.1), Q(0.3), # Lower quantiles\n", + " Q(0.5), # Median\n", + " Q(0.7), Q(0.9), Q(0.95), # Upper quantiles\n", + "]\n", + "\n", + "# --- Benchmark Filter (optional) ---\n", + "# Set to None to run all categories, or specify categories like:\n", + "# BENCHMARK_FILTER = [Liander2024Category.TRANSFORMER, Liander2024Category.MV_FEEDER]\n", + "BENCHMARK_FILTER: list[Liander2024Category] | None = None" + ] + }, + { + "cell_type": "markdown", + "id": "a3618966", + "metadata": {}, + "source": [ + "## 🛠️ Model Configuration\n", + "\n", + "We define a **common configuration** that both models share, then create model-specific variants. This ensures fair comparison by keeping all settings identical except the model type.\n", + "\n", + "### Available Models:\n", + "- **XGBoost** — Gradient boosting trees (handles complex nonlinear patterns)\n", + "- **GBLinear** — Gradient boosted linear model (better extrapolation, faster)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3a39b756", + "metadata": {}, + "outputs": [], + "source": [ + "# Import workflow configuration\n", + "from openstef_models.presets import ForecastingWorkflowConfig\n", + "\n", + "# Common configuration shared by all models\n", + "# This ensures fair comparison by keeping all settings identical\n", + "common_config = ForecastingWorkflowConfig(\n", + " model_id=\"benchmark_model_\",\n", + " run_name=None,\n", + " model=\"flatliner\", # Placeholder - will be overwritten per model\n", + " \n", + " # Forecast settings\n", + " horizons=FORECAST_HORIZONS,\n", + " quantiles=PREDICTION_QUANTILES,\n", + " \n", + " # Model reuse: reuse trained model for same target (speeds up backtesting)\n", + " model_reuse_enable=True,\n", + " mlflow_storage=None, # Disable MLflow for this demo\n", + " \n", + " # Weather feature column mappings (match dataset column names)\n", + " radiation_column=\"shortwave_radiation\",\n", + " wind_speed_column=\"wind_speed_80m\", # 80m wind speed for better wind park predictions\n", + " pressure_column=\"surface_pressure\",\n", + " temperature_column=\"temperature_2m\",\n", + " relative_humidity_column=\"relative_humidity_2m\",\n", + " \n", + " # Additional features\n", + " energy_price_column=\"EPEX_NL\", # Day-ahead electricity price\n", + " rolling_aggregate_features=[\"mean\", \"median\", \"max\", \"min\"], # Rolling window stats\n", + " \n", + " # Logging\n", + " verbosity=0, # Quiet mode for batch processing\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ed202922", + "metadata": {}, + "outputs": [], + "source": [ + "# Create model-specific configurations by copying common config and updating model type\n", + "xgboost_config = common_config.model_copy(update={\"model\": \"xgboost\"})\n", + "gblinear_config = common_config.model_copy(update={\"model\": \"gblinear\"})\n", + "\n", + "print(\"✅ Model configurations created:\")\n", + "print(f\" - XGBoost: {xgboost_config.model}\")\n", + "print(f\" - GBLinear: {gblinear_config.model}\")" + ] + }, + { + "cell_type": "markdown", + "id": "4425a740", + "metadata": {}, + "source": [ + "## 💾 Storage Configuration\n", + "\n", + "**LocalBenchmarkStorage** manages the file structure for benchmark results:\n", + "```\n", + "benchmark_results/\n", + "├── XGBoost/\n", + "│ ├── backtest/ # Raw predictions\n", + "│ ├── evaluation/ # Metrics per target\n", + "│ └── analysis/ # Visualizations (HTML)\n", + "└── GBLinear/\n", + " └── ...\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c2e44656", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialize storage backends for each model\n", + "from openstef_beam.benchmarking.storage.local_storage import LocalBenchmarkStorage\n", + "\n", + "storage_xgboost = LocalBenchmarkStorage(base_path=BENCHMARK_RESULTS_PATH_XGBOOST)\n", + "storage_gblinear = LocalBenchmarkStorage(base_path=BENCHMARK_RESULTS_PATH_GBLINEAR)\n", + "\n", + "print(f\"📁 XGBoost results: {BENCHMARK_RESULTS_PATH_XGBOOST}\")\n", + "print(f\"📁 GBLinear results: {BENCHMARK_RESULTS_PATH_GBLINEAR}\")" + ] + }, + { + "cell_type": "markdown", + "id": "41e6b2e3", + "metadata": {}, + "source": [ + "## 🚀 Run Backtests\n", + "\n", + "Now we run the **Liander 2024 Benchmark** — a comprehensive evaluation suite that:\n", + "1. Downloads the benchmark dataset from HuggingFace Hub (if needed)\n", + "2. Runs backtests across 5 asset categories (transformers, feeders, solar/wind parks)\n", + "3. Computes metrics and generates analysis visualizations\n", + "\n", + "⚠️ **Note**: This may take several minutes depending on your hardware." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d6aae871", + "metadata": {}, + "outputs": [], + "source": [ + "# Import benchmark components\n", + "from openstef_beam.benchmarking.benchmarks.liander2024 import create_liander2024_benchmark_runner\n", + "from openstef_beam.benchmarking.callbacks.strict_execution_callback import StrictExecutionCallback\n", + "from openstef_beam.benchmarking.baselines import create_openstef4_preset_backtest_forecaster\n", + "\n", + "# --- Run XGBoost Benchmark ---\n", + "print(\"🌲 Running XGBoost benchmark...\")\n", + "create_liander2024_benchmark_runner(\n", + " storage=storage_xgboost,\n", + " callbacks=[StrictExecutionCallback()], # Fail fast on errors\n", + ").run(\n", + " forecaster_factory=create_openstef4_preset_backtest_forecaster(\n", + " workflow_config=xgboost_config,\n", + " ),\n", + " run_name=\"xgboost\",\n", + " n_processes=N_PROCESSES,\n", + " filter_args=BENCHMARK_FILTER,\n", + ")\n", + "print(\"✅ XGBoost benchmark complete!\")\n", + "\n", + "# --- Run GBLinear Benchmark ---\n", + "print(\"\\n📈 Running GBLinear benchmark...\")\n", + "create_liander2024_benchmark_runner(\n", + " storage=storage_gblinear,\n", + " callbacks=[StrictExecutionCallback()],\n", + ").run(\n", + " forecaster_factory=create_openstef4_preset_backtest_forecaster(\n", + " workflow_config=gblinear_config,\n", + " ),\n", + " run_name=\"gblinear\",\n", + " n_processes=N_PROCESSES,\n", + " filter_args=BENCHMARK_FILTER,\n", + ")\n", + "print(\"✅ GBLinear benchmark complete!\")" + ] + }, + { + "cell_type": "markdown", + "id": "d1690a07", + "metadata": {}, + "source": [ + "## 📊 Compare Model Performance\n", + "\n", + "The **BenchmarkComparisonPipeline** generates side-by-side analysis of multiple models:\n", + "- Global metrics across all targets\n", + "- Per-category breakdowns (transformers, feeders, etc.)\n", + "- Time-windowed performance analysis" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0a6bdfcf", + "metadata": {}, + "outputs": [], + "source": [ + "# Run model comparison analysis\n", + "from openstef_beam.benchmarking import BenchmarkComparisonPipeline\n", + "from openstef_beam.benchmarking.benchmarks.liander2024 import LIANDER2024_ANALYSIS_CONFIG\n", + "\n", + "# Create comparison pipeline\n", + "target_provider = create_liander2024_benchmark_runner(\n", + " storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH),\n", + ").target_provider\n", + "\n", + "comparison_pipeline = BenchmarkComparisonPipeline(\n", + " analysis_config=LIANDER2024_ANALYSIS_CONFIG,\n", + " storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH),\n", + " target_provider=target_provider,\n", + ")\n", + "\n", + "# Generate comparison reports\n", + "print(\"📊 Generating comparison analysis...\")\n", + "comparison_pipeline.run(run_data={\n", + " \"xgboost\": storage_xgboost,\n", + " \"gblinear\": storage_gblinear,\n", + "})\n", + "print(\"✅ Comparison analysis complete!\")" + ] + }, + { + "cell_type": "markdown", + "id": "c22c61f4", + "metadata": {}, + "source": [ + "## 📈 View Analysis Results\n", + "\n", + "The benchmark generates interactive HTML visualizations. Let's open the most important ones:\n", + "\n", + "### Key Metrics:\n", + "- **rCRPS** (relative Continuous Ranked Probability Score) — measures probabilistic forecast accuracy\n", + "- **rMAE** (relative Mean Absolute Error) — measures point forecast accuracy\n", + "- Lower values = better performance" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af09be7e", + "metadata": {}, + "outputs": [], + "source": [ + "# Open key analysis plots in browser\n", + "# HTML visualizations are interactive and best viewed in a browser\n", + "import webbrowser\n", + "import os\n", + "\n", + "# Base path for analysis results\n", + "analysis_base = os.path.abspath('./benchmark_results/analysis/D-1T06:00')\n", + "\n", + "# Define key visualizations to open\n", + "visualizations = [\n", + " (\"rCRPS Grouped by Category\", \"rCRPS_grouped.html\"),\n", + " (\"rCRPS Time-Windowed (7 days)\", \"rCRPS_windowed_7D.html\"),\n", + "]\n", + "\n", + "print(\"🌐 Opening analysis visualizations in browser...\\n\")\n", + "for name, filename in visualizations:\n", + " filepath = os.path.join(analysis_base, filename)\n", + " if os.path.exists(filepath):\n", + " print(f\" 📊 {name}\")\n", + " webbrowser.open(f'file://{filepath}')\n", + " else:\n", + " print(f\" ⚠️ {name} not found at {filepath}\")" + ] + }, + { + "cell_type": "markdown", + "id": "59e8d779", + "metadata": {}, + "source": [ + "### 🔍 Explore Individual Target Results\n", + "\n", + "You can also view time series plots for individual targets. Let's look at a transformer forecast:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea2fd469", + "metadata": {}, + "outputs": [], + "source": [ + "# List available target-specific visualizations\n", + "import glob\n", + "\n", + "# Find all time series plots for individual targets\n", + "target_plots = glob.glob('./benchmark_results/XGBoost/analysis/*/*/time_series_plot*.html')\n", + "\n", + "if target_plots:\n", + " print(\"📊 Available target-specific time series plots:\\n\")\n", + " for i, plot in enumerate(sorted(target_plots)[:5]): # Show first 5\n", + " parts = plot.split('/')\n", + " category = parts[-3] # e.g., \"transformer\"\n", + " target = parts[-2] # e.g., \"OS Apeldoorn\"\n", + " print(f\" {i+1}. {category}/{target}\")\n", + " \n", + " # Open the first transformer plot as an example\n", + " transformer_plots = [p for p in target_plots if 'transformer' in p]\n", + " if transformer_plots:\n", + " example_plot = os.path.abspath(transformer_plots[0])\n", + " print(f\"\\n🌐 Opening example: {transformer_plots[0]}\")\n", + " webbrowser.open(f'file://{example_plot}')\n", + "else:\n", + " print(\"⚠️ No target-specific plots found. Run the benchmark first.\")" + ] + }, + { + "cell_type": "markdown", + "id": "e41df479", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## 🎯 Summary\n", + "\n", + "In this tutorial, you learned how to:\n", + "\n", + "1. ✅ **Configure benchmark experiments** with `ForecastingWorkflowConfig`\n", + "2. ✅ **Run parallel backtests** using the Liander 2024 benchmark\n", + "3. ✅ **Compare models** (XGBoost vs GBLinear) with `BenchmarkComparisonPipeline`\n", + "4. ✅ **Analyze results** with interactive HTML visualizations\n", + "\n", + "### 📁 Output Structure\n", + "\n", + "```\n", + "benchmark_results/\n", + "├── XGBoost/\n", + "│ ├── backtest/ # Raw predictions (parquet)\n", + "│ ├── evaluation/ # Metrics per target\n", + "│ └── analysis/ # HTML visualizations\n", + "├── GBLinear/\n", + "│ └── ...\n", + "└── analysis/ # Comparison analysis (both models)\n", + " └── D-1T06:00/\n", + " ├── rCRPS_grouped.html # Probabilistic accuracy by category\n", + " ├── rMAE_grouped.html # Point forecast accuracy\n", + " └── summary.html # Overall summary\n", + "```\n", + "\n", + "### 🚀 Next Steps\n", + "\n", + "- Experiment with different `FORECAST_HORIZONS` (e.g., `\"PT6H\"`, `\"P7D\"`)\n", + "- Add more quantiles for higher resolution prediction intervals\n", + "- Filter specific categories with `BENCHMARK_FILTER`\n", + "- Integrate MLflow for experiment tracking" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.5" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/tutorials/forecasting_with_workflow_presets.ipynb b/examples/tutorials/forecasting_with_workflow_presets.ipynb new file mode 100644 index 000000000..1a7c298a4 --- /dev/null +++ b/examples/tutorials/forecasting_with_workflow_presets.ipynb @@ -0,0 +1,972 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "65fbd6d9", + "metadata": {}, + "source": [ + "# 🔮 Forecasting with OpenSTEF 4.0 Workflow Presets\n", + "\n", + "This tutorial demonstrates how to use **OpenSTEF 4.0** to create energy load forecasts using the **Workflow Presets** pattern. You'll learn how to:\n", + "\n", + "1. **Load real-world energy data** from the Liander 2024 benchmark dataset\n", + "2. **Configure a forecasting workflow** with weather features and prediction quantiles\n", + "3. **Train a model** and inspect its performance\n", + "4. **Generate probabilistic forecasts** with confidence intervals\n", + "5. **Visualize results** and explain feature importance\n", + "\n", + "> **OpenSTEF** (Short-Term Energy Forecasting) is a modular library for creating accurate energy forecasts in the power grid domain." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "c8a83428", + "metadata": {}, + "outputs": [], + "source": [ + "# --- Setup: Logging and Display Configuration ---\n", + "# Configure logging to see training progress and plotly to render as PNG for VS Code compatibility\n", + "import logging\n", + "import pandas as pd\n", + "import plotly.io as pio\n", + "\n", + "pd.options.plotting.backend = \"plotly\"\n", + "pio.renderers.default = \"png\" # Use PNG for VS Code notebook compatibility\n", + "\n", + "logging.basicConfig(level=logging.INFO, format=\"[%(asctime)s][%(levelname)s] %(message)s\")\n", + "logger = logging.getLogger(__name__)\n", + "logging.getLogger(\"choreographer\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"kaleido\").setLevel(logging.ERROR)\n", + "logging.getLogger(\"choreographer\").disabled = True\n", + "logging.getLogger(\"kaleido\").disabled = True" + ] + }, + { + "cell_type": "markdown", + "id": "f2afee2d", + "metadata": {}, + "source": [ + "## 📦 Step 1: Download the Dataset\n", + "\n", + "We'll use the **Liander 2024 Energy Forecasting Benchmark** dataset from HuggingFace Hub. This dataset contains:\n", + "- **Load measurements** — historical energy consumption from various installations (mv feeders, transformers, etc.)\n", + "- **Weather forecasts** — versioned weather predictions (temperature, radiation, wind, etc.)\n", + "- **EPEX prices** — day-ahead electricity market prices\n", + "- **Profiles** — typical daily/weekly load patterns" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ead642f4", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/egor.dmitriev/projects/openstef/openstef4/.venv/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py:202: UserWarning:\n", + "\n", + "The `local_dir_use_symlinks` argument is deprecated and ignored in `hf_hub_download`. Downloading to a local directory does not use symlinks anymore.\n", + "\n", + "[2025-12-12 14:12:32,556][INFO] HTTP Request: HEAD https://huggingface.co/datasets/OpenSTEF/liander2024-energy-forecasting-benchmark/resolve/main/load_measurements/mv_feeder/OS%20Gorredijk.parquet \"HTTP/1.1 302 Found\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading load_measurements/mv_feeder/OS Gorredijk.parquet...\n", + "✓ load_measurements/mv_feeder/OS Gorredijk.parquet downloaded\n", + "Downloading weather_forecasts_versioned/mv_feeder/OS Gorredijk.parquet...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2025-12-12 14:12:32,672][INFO] HTTP Request: HEAD https://huggingface.co/datasets/OpenSTEF/liander2024-energy-forecasting-benchmark/resolve/main/weather_forecasts_versioned/mv_feeder/OS%20Gorredijk.parquet \"HTTP/1.1 302 Found\"\n", + "[2025-12-12 14:12:32,814][INFO] HTTP Request: HEAD https://huggingface.co/datasets/OpenSTEF/liander2024-energy-forecasting-benchmark/resolve/main/EPEX.parquet \"HTTP/1.1 302 Found\"\n", + "Warning: You are sending unauthenticated requests to the HF Hub. Please set a HF_TOKEN to enable higher rate limits and faster downloads.\n", + "[2025-12-12 14:12:32,815][WARNING] Warning: You are sending unauthenticated requests to the HF Hub. Please set a HF_TOKEN to enable higher rate limits and faster downloads.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✓ weather_forecasts_versioned/mv_feeder/OS Gorredijk.parquet downloaded\n", + "Downloading EPEX.parquet...\n", + "✓ EPEX.parquet downloaded\n", + "Downloading profiles.parquet...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2025-12-12 14:12:32,928][INFO] HTTP Request: HEAD https://huggingface.co/datasets/OpenSTEF/liander2024-energy-forecasting-benchmark/resolve/main/profiles.parquet \"HTTP/1.1 302 Found\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✓ profiles.parquet downloaded\n", + "\n", + "✅ All files downloaded successfully!\n" + ] + } + ], + "source": [ + "# Download dataset from HuggingFace Hub\n", + "# The dataset is stored as parquet files for efficient loading\n", + "from huggingface_hub import hf_hub_download\n", + "from openstef_core.base_model import Path\n", + "\n", + "repo_id = \"OpenSTEF/liander2024-energy-forecasting-benchmark\" # Public benchmark dataset\n", + "local_dir = Path(\"./liander_dataset\")\n", + "target = \"mv_feeder/OS Gorredijk\" # Specific installation to focus on\n", + "\n", + "# Download required files: load measurements, weather, prices, and profiles\n", + "files_to_download = [\n", + " \"load_measurements/mv_feeder/OS Gorredijk.parquet\", # Energy consumption data\n", + " \"weather_forecasts_versioned/mv_feeder/OS Gorredijk.parquet\", # Weather features\n", + " \"EPEX.parquet\", # Electricity prices (optional feature)\n", + " \"profiles.parquet\" # Standard load profiles (optional feature)\n", + "]\n", + "\n", + "for filename in files_to_download:\n", + " print(f\"Downloading {filename}...\")\n", + " hf_hub_download(repo_id=repo_id, filename=filename, repo_type=\"dataset\",\n", + " local_dir=local_dir, local_dir_use_symlinks=False)\n", + " print(f\"✓ {filename} downloaded\")\n", + "\n", + "print(\"\\n✅ All files downloaded successfully!\")" + ] + }, + { + "cell_type": "markdown", + "id": "81d12312", + "metadata": {}, + "source": [ + "## 📊 Step 2: Load and Prepare the Data\n", + "\n", + "OpenSTEF uses **VersionedTimeSeriesDataset** — a specialized data structure that handles:\n", + "- **Time versioning** — tracks when data became available (crucial for realistic backtesting)\n", + "- **Lazy composition** — efficiently combines datasets without O(n²) memory overhead\n", + "- **Temporal alignment** — ensures all features are properly aligned by timestamp" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a5522df3", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2025-12-12 14:12:32,950][WARNING] Parquet file does not contain 'sample_interval' attribute. Using default value of 15 minutes.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2025-12-12 14:12:32,987][WARNING] Parquet file does not contain 'sample_interval' attribute. Using default value of 15 minutes.\n", + "[2025-12-12 14:12:33,038][WARNING] Parquet file does not contain 'sample_interval' attribute. Using default value of 15 minutes.\n", + "[2025-12-12 14:12:33,047][WARNING] Parquet file does not contain 'sample_interval' attribute. Using default value of 15 minutes.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Dataset shape: (35136, 28)\n", + "Date range: 2024-01-01 00:00:00+00:00 to 2024-12-31 23:45:00+00:00\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
loadtemperature_2mrelative_humidity_2msurface_pressurecloud_coverwind_speed_10mwind_speed_80mwind_direction_10mshortwave_radiationdirect_radiation...E1C_AMI_AE2A_AZI_AE2A_AMI_AE2B_AZI_AE2B_AMI_AE3A_AE3B_AE3C_AE3D_AE4A_A
timestamp
2024-01-01 00:00:00+00:00423333.3333337.24350085.025322994.236450100.028.18595343.832863204.9284520.00.0...0.0000640.0000240.0000340.0000530.0000640.0000580.0000580.0000580.0000580.000079
2024-01-01 00:15:00+00:00436666.6666677.28100084.808533994.186523100.028.75338044.976219206.9310150.00.0...0.0000610.0000240.0000340.0000520.0000630.0000580.0000580.0000580.0000580.000079
2024-01-01 00:30:00+00:00410000.0000007.31850184.591743994.136597100.029.32080746.119572208.9335630.00.0...0.0000600.0000230.0000330.0000510.0000630.0000580.0000580.0000580.0000580.000079
2024-01-01 00:45:00+00:00403333.3333337.35600084.374954994.086731100.029.88823347.262924210.9361270.00.0...0.0000570.0000230.0000320.0000510.0000610.0000590.0000590.0000590.0000590.000079
2024-01-01 01:00:00+00:00420000.0000007.39350084.158165994.036804100.030.45566048.406281212.9386900.00.0...0.0000550.0000240.0000320.0000520.0000600.0000570.0000570.0000570.0000570.000079
\n", + "

5 rows × 28 columns

\n", + "
" + ], + "text/plain": [ + " load temperature_2m \\\n", + "timestamp \n", + "2024-01-01 00:00:00+00:00 423333.333333 7.243500 \n", + "2024-01-01 00:15:00+00:00 436666.666667 7.281000 \n", + "2024-01-01 00:30:00+00:00 410000.000000 7.318501 \n", + "2024-01-01 00:45:00+00:00 403333.333333 7.356000 \n", + "2024-01-01 01:00:00+00:00 420000.000000 7.393500 \n", + "\n", + " relative_humidity_2m surface_pressure \\\n", + "timestamp \n", + "2024-01-01 00:00:00+00:00 85.025322 994.236450 \n", + "2024-01-01 00:15:00+00:00 84.808533 994.186523 \n", + "2024-01-01 00:30:00+00:00 84.591743 994.136597 \n", + "2024-01-01 00:45:00+00:00 84.374954 994.086731 \n", + "2024-01-01 01:00:00+00:00 84.158165 994.036804 \n", + "\n", + " cloud_cover wind_speed_10m wind_speed_80m \\\n", + "timestamp \n", + "2024-01-01 00:00:00+00:00 100.0 28.185953 43.832863 \n", + "2024-01-01 00:15:00+00:00 100.0 28.753380 44.976219 \n", + "2024-01-01 00:30:00+00:00 100.0 29.320807 46.119572 \n", + "2024-01-01 00:45:00+00:00 100.0 29.888233 47.262924 \n", + "2024-01-01 01:00:00+00:00 100.0 30.455660 48.406281 \n", + "\n", + " wind_direction_10m shortwave_radiation \\\n", + "timestamp \n", + "2024-01-01 00:00:00+00:00 204.928452 0.0 \n", + "2024-01-01 00:15:00+00:00 206.931015 0.0 \n", + "2024-01-01 00:30:00+00:00 208.933563 0.0 \n", + "2024-01-01 00:45:00+00:00 210.936127 0.0 \n", + "2024-01-01 01:00:00+00:00 212.938690 0.0 \n", + "\n", + " direct_radiation ... E1C_AMI_A E2A_AZI_A \\\n", + "timestamp ... \n", + "2024-01-01 00:00:00+00:00 0.0 ... 0.000064 0.000024 \n", + "2024-01-01 00:15:00+00:00 0.0 ... 0.000061 0.000024 \n", + "2024-01-01 00:30:00+00:00 0.0 ... 0.000060 0.000023 \n", + "2024-01-01 00:45:00+00:00 0.0 ... 0.000057 0.000023 \n", + "2024-01-01 01:00:00+00:00 0.0 ... 0.000055 0.000024 \n", + "\n", + " E2A_AMI_A E2B_AZI_A E2B_AMI_A E3A_A \\\n", + "timestamp \n", + "2024-01-01 00:00:00+00:00 0.000034 0.000053 0.000064 0.000058 \n", + "2024-01-01 00:15:00+00:00 0.000034 0.000052 0.000063 0.000058 \n", + "2024-01-01 00:30:00+00:00 0.000033 0.000051 0.000063 0.000058 \n", + "2024-01-01 00:45:00+00:00 0.000032 0.000051 0.000061 0.000059 \n", + "2024-01-01 01:00:00+00:00 0.000032 0.000052 0.000060 0.000057 \n", + "\n", + " E3B_A E3C_A E3D_A E4A_A \n", + "timestamp \n", + "2024-01-01 00:00:00+00:00 0.000058 0.000058 0.000058 0.000079 \n", + "2024-01-01 00:15:00+00:00 0.000058 0.000058 0.000058 0.000079 \n", + "2024-01-01 00:30:00+00:00 0.000058 0.000058 0.000058 0.000079 \n", + "2024-01-01 00:45:00+00:00 0.000059 0.000059 0.000059 0.000079 \n", + "2024-01-01 01:00:00+00:00 0.000057 0.000057 0.000057 0.000079 \n", + "\n", + "[5 rows x 28 columns]" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Load datasets using OpenSTEF's VersionedTimeSeriesDataset\n", + "# This class handles versioned data where each value has an \"available_at\" timestamp\n", + "from openstef_core.datasets import VersionedTimeSeriesDataset\n", + "\n", + "# Load each data source from parquet files\n", + "load_dataset = VersionedTimeSeriesDataset.read_parquet(\n", + " local_dir / \"load_measurements/mv_feeder/OS Gorredijk.parquet\"\n", + ")\n", + "weather_dataset = VersionedTimeSeriesDataset.read_parquet(\n", + " local_dir / \"weather_forecasts_versioned/mv_feeder/OS Gorredijk.parquet\"\n", + ")\n", + "epex_dataset = VersionedTimeSeriesDataset.read_parquet(local_dir / \"EPEX.parquet\")\n", + "profiles_dataset = VersionedTimeSeriesDataset.read_parquet(local_dir / \"profiles.parquet\")\n", + "\n", + "# Combine all datasets using left join (keep all load timestamps, match features where available)\n", + "# select_version() materializes the lazy dataset into a concrete TimeSeriesDataset\n", + "dataset = VersionedTimeSeriesDataset.concat(\n", + " [load_dataset, weather_dataset, epex_dataset, profiles_dataset], \n", + " mode=\"left\" # Left join keeps all timestamps from the first dataset\n", + ").select_version()\n", + "\n", + "# Preview the combined dataset\n", + "print(f\"Dataset shape: {dataset.data.shape}\")\n", + "print(f\"Date range: {dataset.data.index.min()} to {dataset.data.index.max()}\")\n", + "dataset.data.head()" + ] + }, + { + "cell_type": "markdown", + "id": "675e594e", + "metadata": {}, + "source": [ + "## ✂️ Step 3: Split Data into Training and Forecast Periods\n", + "\n", + "We'll use:\n", + "- **90 days** of historical data for training\n", + "- **14 days** as the forecast period (where we'll generate predictions)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "844ac4a2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "📈 Training period: 2024-03-01 to 2024-05-30 (8640 samples)\n", + "🔮 Forecast period: 2024-05-30 to 2024-06-13 (1344 samples)\n" + ] + } + ], + "source": [ + "# Define training and forecast time periods\n", + "from datetime import datetime, timedelta\n", + "\n", + "# Training period: 90 days of historical data\n", + "train_start = datetime.fromisoformat(\"2024-03-01T00:00:00Z\")\n", + "train_end = train_start + timedelta(days=90)\n", + "\n", + "# Forecast period: 14 days after training (this is where we'll predict)\n", + "forecast_start = train_end\n", + "forecast_end = forecast_start + timedelta(days=14)\n", + "\n", + "# Split the dataset using time-based filtering\n", + "train_dataset = dataset.filter_by_range(start=train_start, end=train_end)\n", + "forecast_dataset = dataset.filter_by_range(start=forecast_start, end=forecast_end)\n", + "\n", + "print(f\"📈 Training period: {train_start.date()} to {train_end.date()} ({len(train_dataset.data)} samples)\")\n", + "print(f\"🔮 Forecast period: {forecast_start.date()} to {forecast_end.date()} ({len(forecast_dataset.data)} samples)\")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "07c5b563", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAArwAAAH0CAYAAADfWf7fAAAQAElEQVR4AexdB4AbxdV+kq7f2adzxRUMoXdC751A6IFAQqgp9CQQSgLhpwVCCyXUQAIOECD0FrrB2PTeuw027u10vqbr/367mtNotSPNSrvSSvfsm52ZN2/ee/PNavfN7OxseID/MQKMACPACDACjAAjwAgwAmWMQJj4HyPACDACjAARMQiMACPACDAC5YoAO7zl2rPcLkaAEWAEGAFGgBFgBHJBoAzrsMNbhp3KTWIEGAFGgBFgBBgBRoARSCLADm8SC04xAoyAPgLMyQgwAowAI8AIlAwC7PCWTFexoYwAI8AIMAKMACMQPATYolJAgB3eUugltpERYAQYAUaAEWAEGAFGIGcE2OHNGTquyAjoI8CcjAAjwAgwAowAI1A8BNjhLR72rJkRYAQYAUaAERhqCHB7GYGiIMAOb1FgZ6WMACPACDACjAAjwAgwAoVCgB3eQiHNevQRYE5GgBFgBBgBRoARYAQ8RIAdXg/BZFGMACPACDACjICXCLAsRoAR8AYBdni9wZGlMAKMACPACDACjAAjwAgEFAF2eAPaMfpmMScjwAgwAowAI8AIMAKMQCYE2OHNhA6XMQKMACPACJQOAmwpI8AIMAIKBNjhVQDDZEaAEWAEGAFGgBFgBBiB8kBgqDm85dFr3ApGgBFgBBgBRoARYAQYAW0E2OHVhooZGQFGgBEoJwS4LYwAI8AIDB0E2OEdOn3NLWUEGAFGgBFgBBgBRmBIIpDR4R2SiHCjGQFGgBFgBBgBRoARYATKCgF2eMuqO7kxjAAj4BMCLJYRYAQYAUaghBFgh7eEO49NZwQYAUaAEWAEGAFGoLAIlKY2dnhLs9/YakaAEWAEGAFGgBFgBBgBTQTY4dUEitkYAUZAHwHmZAQYAUaAEWAEgoQAO7xB6g22hRFgBBgBRoARYATKCQFuS0AQYIc3IB3BZjACjAAjwAgwAowAI8AI+IMAO7z+4MpSGQF9BJiTEWAEGAFGgBFgBHxFgB1eX+Fl4YwAI8AIMAKMACOgiwDzMQJ+IcAOr1/IslxGgBFgBBgBRoARYAQYgUAgwA5vILqBjdBHgDkZAUaAEWAEGAFGgBFwhwA7vO7wYm5GgBFgBBgBRiAYCLAVjAAjoI0AO7zaUDEjI8AIMAKMACPACDACjEApIsAObyn2mr7NzMkIMAKMACPACDACjMCQR4Ad3iF/CjAAjAAjwAgMBQS4jYwAIzCUEWCHdyj3PredEWAEGAFGgBFgBBiBIYAAO7xSJ3OSEWAEGAFGgBFgBBgBRqD8EBiyDm9Xdw+1tXdSb1+f614dGBgw63bGu13X5QqMQKkh8MjTM+m/j71YamZ7Zi+uEbhWdBvXDM+EBl8QW8gIMAKMQFkhEBiHt7mlldbf+RitcM2tD+TdCX+59i7a6scn0uvvfOZa1vxFy8y6x/zur67relVhi71PSMEK+X1+cTZddM2d9PEX3+alBs7Ntbc9mJeMXCrPmrMgpU1O58O8hUtzEV1Sdc66+BYTh6C09bb/PEk33PFISWGYydgXX3nPxNfp/LLTZr75ET097U3z937Tvx/LJJbLGAFGgBFgBAKMQO4Or8eNqqqspAN/tH1KGDu6ydSy63abptDX+cFkk57PYa3VJ9KOW29MI6LDXIupqa4y62620Vqu63pd4YiDd6ef7r8Lbb3ZuhTv6jZn4g4/4UL69wPP5qzqyRfeIDg5OQvIseJA/4BZE/1uPxdEvq62xuQp50NvX7/ZvP4EHmaGD54hMHb0iJTrCc4tIRxpOYweGaXRo6Lm733ViWMFG8eMACPACDACJYZAYBze+roauuSPv0oJm2+0tgnnmScdnkLfe9etTHo+hyMP2ZNuvuw0Wn/t1VyLGTWi0ax79sk/y1gXSx8yMhiFOjwGm+PfCMNZP+e3v6DzTz+arr/kd/T8fX+j6//yW5P3ihvvpRdmvmumS+2wzQ/XT+lv+bxAm1XtyQdLlUwVvZC6VDbIdK/t8UKeFzLkNnqVxm9ePqeQxkAK5xbScsDgeuvN1jN/7wftvYPSBC5gBBgBRoARCDYCgXF43cB0ueHMnX7BTeb6W6wvPPeyf9Jvz/s7LV0eo8eefZWOPPVS2vVQw5nd+Rja62dnEh4Rfznr+xQVTzz3Gp34x2vo+wVLBulCLh6tQz6WCSCcd8Xt1NrWMcjX09Nr1r3lzscHabAD8j776juCHOjdYJdj6VdnXEnfzl04yCcSH38+m35z5lUEHjxGPf2CG007z/nrbYLFdRyJhGnX7Tejf151pln3d+ddTx2dcTONw5U33UeHHX8h7XDgqeYjXdh41S3/peXNK1Fshkuuu4u++GaumUZ7RMAyDhB1ZIAPAY+AUX/6ax8g62lwizf6FOeIaPsvTrmEXn37kxSbhMwFi5bR6+98Spf+/W46+Zxr6a33vzD5sIbzhtsfISwdQb8hBg/aCH4w/f1fD5nnhtNyhAeffNks++TLb8Gad8Aa8r/dcv+gPYf8+ny668HnyD4z7KbPcP6ecPbfzPMS5z7Oy6XLW1zZ+sCT083zDBjhHANG7R3WeYj1sKeee515rjs5xJf+/T8mRh2dXYM6cf4cd9rlBHsQUH/OvMWD5XiygT7AU4mW1na6476nCXaff9Udgzz5JD7/eo5p00uvvT8oRpwr6MuLr7nTvN7ANlyLOozf3KKlKwhpnG+gw54VsdbB+iKRrW2Cj2NGgBFgBEoEgcCaWZIO73sffUXPTn+LjjjpL/Tny/9Fjz7zCk2b+Z75Itmb731G7338FY0fO4r22nlLGtE0nP437Q2Cg7Nw8fLBjsANc8YbH5p1BFHI3f/oc0z5q08eZxY9/NQMusJwFs2MccAjZ9T97OvvjJz19933iwi0Q39zAd35wLNUV1tNeDQPR+iEs682nXOLk2jmmx/T4SdeZDpcW2yyDmHJBmiwc+abHwm2nONtNl+fxGzU519bziuEPf7cqzTbcL7X/sFkExvQ4BycZDj+cESQb1nZPugkL10eMwcRiPsSL/fpyIAchE8Nxw6YLJBwB92L4Abvdz78ktCnOEdWmzSOdthqQ3r/k6/NAQccDmGPkPmHi242Byr/efgFQvm8hUuor6+ffvmHK+nmOx8zl47stsNmNKy+jsCDNi4xsIKcCauMNs+DR59+BdnB0Gvgd90/HyTYsuaUiYP0XBM9vX2ENeS33/cUVVdV0n57bmsOyi674R4674p/pYjV7TM49jh/Zxrn50brrUGbb7y2ea7CgUsRmCGDJwsXXDWVFixeZtoUHd5gYnTob84nOOgVkQhVVlaav0n0gSxq3sKlBu/zBtZ95u8HZTg/Meh48/3PzX7DsoIXX33fdPKXLIuBhXoNLNAH9z46jfY98o90lTGIe3b624S1uiZDnodYS5vZpwsWLR+UJM4VDCDve+xFGhEdTjXVlYRr0bG/v5z2O+ocM43lE6gEe/75nyeRHAw6bRtk5gQjwAgwAoxAXgiUpMMrWtwZ7zIf4b/04LX09H+uoAmrjKJfHbEvvfXULXT3DefS1RecRPfedB5h6QFu2jM1nckTjtqf3nnmVvrvP86nZ+65wrj51hCc3j7D6RG6VTEcoRfuv5oeuf0v9Nx9V9FWm65LuJF/+uV3ZhXMEv7l2jvNNGyceu0fCcsRXnviRpo4brRJ9+Kw4TpTTDGYSTYTxuHWK8+g15+80ZwBBjbADM42Zqm+m7vI4CC64rwTaLMN1zLTD952IYkwecJYk6Yjw2Q0DnB28Ji4tqbKyOn/wWnAzJhTkActkJgN717D0bzo6n+DlR6fegnddf05dMvlf6An7/yrSYMTaiakwzffzjeXiTz9n8vpxQeuMQYkmxGcRgyk9tp5C/Nc+/vFvzXPj0v/9GupJtGPdtnSzP/38RcJTqmZMQ4491YYM3xHHrKH6aAapLz+HjMGeeg3rN9+6J8X02Xn/IYeveMS2sIYQAG/Dz+bNShfp8+AkzgvwY/fDZb84HzRPS9nfTefsHZ83TVXpafuvty0Cb+hX/5sH8IA895HXzBtOnTfncz4Edug4CljYIqCQ/fbGZH5u4HzCnmvPnaD8Xs+2Twf/3L2L83yqf992ozFYfHSZtp0wzXp39f9iWY+er1xDfizKPIt3mPHzWn6Q9eadj1775Xmbxj9ssNWG9HLD19n0lGOwe/015NPOnBNcNM23xrAghkBRoARGCIIlLTDC2dsV+MR/phRUZo8YQxVGTNdmJXFeuCFxqwiZlefeO41WrbCeiQ7V1q+oOpfrOU79biDSThpI40ZYswIgn9FLPnoH3mngLrjxowwizCbtcdOm5vpRUtWmPE3hlOAm92h++5Mm26wpknDobIiQliSgLQXYXLCQf3623mD4uA4hENhgmPy8usf0mPPvkKhcMgsh01mIsvBjQyshYTjIWabs4geLEYfrDpxFXIKEWOGcJDRSGTDGzPcs+YsMF/sw+xunzFoQQA+wP+r2fMIgxBD1ODfv/52pskPHjgq0caGwdnCM088PMVhxUz+YEUjgXMPLxKuMJzbGQbGBsn8w84XSPzkx5azh3Q+4bmX3zarn3zMgRRO9CHOWQzWUDBNWr+t02ezvltAwGmNVcfTdltsABFmwDkciYTNdLbDS69ZDt3xR+5HwxrqBtl/84v9zPRT09404y2NQSBwxSASA1EQsQzjof/NMAeXO2+zCUiDa9CPPXxvUx76DQG/eTB8bNuNBL9TDEQwMz0iOsw4f8aCzddw8rEHEl5sgxKct2gb0qcedxBhrT/SOCewLh1OP56WgPZCon9024Y6HBgBRqD8EOAWFQ6BcOFUeasJNxc4uHapcOawlnH3w/5gPpb+46W30r/ufcpk6zecHTPh8tBoPJZFFazdRewmNA5rMNlF3e++t9YerpfDy3KmIM0D1hCCdbVJqyAyA26yOx38W9r/mHPppD9dYy4HmTbzPbOsf2DAjLMdXjBu1PnKyKZjT2OQgNlvp4DBTab6dry/n7/EZL//8Zdoo92OSwnikboYEJmMxqG2tto4pv7BMcY5N27syNQCh5xwajHLi2IMJma++bExU7yp+RQCtHzDN8bACU6dcKqEvB+sNsFMfjfPmrFHRqfP5s63zsudt7WcTdRzG+DQoY6wAWmEhvpa0/n8/Os5yJoDu8MP2NVMT0ucfx98+o05o/vzg3ajysoKs0zIwxp8ue+23e9ks3z+oqVmLA51tbUiWbR4mNFWKLf/nurrLNs6EmuT3bYNMjkwAowAI8AI5I5AyTq8Tk3GWjs4c7ixHn3oXvSvv51FeMz4wK0XOLFr08IhaxZUu4LEGImk1u3qtj5W0dEZl7i8T8JBg9RN1v8BIvMlLLzEFu/qobNO/pnxuPdcwlKQP//+SLNc54AZ83xl6OjJh8eOd3tnpykOa1wvPONYcgqYwTWZMhzQXwi9fdk/VLL2GpPM2Xu8FIeXIsV63p8ftHsGDaoiZ3prW+egYyhzaPyKdgAAEABJREFUCGdRzFrr9tnKVuulzCmJdeuyTN10V5d1bjsNRLHOGHLEi2r77bkdsvTQUzPM+PFnXzVjbAlmJoxDe7vVd5jFd+q3M0443OAK1l9YMRtunyUvxbYFC2m2hhFgBBgBdwiUlcOLNZZo/q+P2Nd06rb+4XrmmjrMMIEehDBujDVDKGz1wybsSIEX5yAbWzAhnpFYv3z1BScTBgN4nI/ZUuwpjHKd4IUMHT1e8kwaP8YUN2ncaDpk350cQ53G3r5YXgFBWN+LOFsQM5j3Pz6dMNOLdbBbbbZetmra5T+YMoGwZrXL9vUvsXRGtFu3z3AuQLl45I602zBpgoU1lhPJdfuMJyvzFi4zZ3lDIWsAOG7MCHNv27c/+IJwvmJnB6wdlx1u8XQCgzanvttnt61kNSWVLue2lVRHsLGlhQBbywjkgUBZObzLEttrVSUeiQpcPk28MCbyxYzXXWtVUz2+8Ca2K+s1Zg2xzZF4zGky5HDA7BnetP/VH64wa1/1fyeaayKREY5MZWUEWTNAL2bDzYx0aIpayzDEW/CiyI0M1IETg10D3v3oK2SLEjDbCsVT73/WdBCRFgHrRnXf5N/GGDyhHtadAmek585fQg88MR3JtLDbDj80sccuCljPe5TxxEGstU1jzoEg9qjGGnW5+kP/e9nMbrTuGmas22errzre5H/y+ddT1jRjO7qlmtuSCZ3od1NY4jDtlffMnT9+uNHaCYoVHWoMQJD67Z//jogOP9Ba5mBmjMPGiacT+Mqb/AKgUWTKw+w10qUYyrltpdgfbDMjwAiUPwJl5fBuvJ51k7/jv88Q9pPF2t0Tzv4bnXHRzYHpycZh9fT7Xx9i3rD3PepP5vZKG+/2S3M9rVsjV8RazXZiGyjsTfqjn59Fx552GYH+x1N+TvIHOrbY2HI2zr/yDrrh9kfoxjseoUN/fT5hWy273g3XWd0knfWXW+jeR6cRPjO8cMkKciMDAqa/9oG5Lyxm8JDXDa+/+ylhP2KngLZlkJNW1NQ4jM757RGDeGNv4MeMx+do009+dR6dmnC20iraCD8/eHfCmlngteU+J5r7ru59xFnm1nI2VjOLF8jELC8I++6xDSJX4cqb7nXEAYOUYw77kSkLe83eNPVRcxu9C6/+t9mfePFs78Tsp26fYYeTvXbegvDi2nGnX2F+ae+8K26nPQ8/w8TOVJblsOPWGxG2M4PT/MdLb6WnX3zTlHPa+TeYNcULdWbGOGAnA8yuY40z4l2328ygJv/w8tyu221qbiH3k1+eZ273hxfb8NvG/r73PjYtyVxiqXJuW4l1BZvLCDACQwSBEnF4rceg2fpk7TUm0XmnHWXeoO95ZBpd/Y/7CWtZTz72ILNqKJSUI5LhcHYIwmGrXiicyhsOJfOhUIKHrNhUKB1kPVhygW2k8IIQbvT77bkt3fn3c8zlF/aXkCQRjkm0EzNqeOkHb8bjpR/sXoEvyckVfmLMpmF9JJwL7CULx6+mptrcjQB8CfORpJ8duBtBDmbG/3LtXabT0tHRSW5kQFA4ITQRgZQxCD48qodT6hSwjhZCQiEL55AG3lg7i9nuYQ21pqMPRxofKcBjdrwlD3kIoVAIkSHRis1M4gDHGdt/wYnFTiB4MQtrSy844xiTAwMZMyEdsKQGWWwd5lSOMqcQDlv6sd+sEwaYecfuIdj6bq3VJ9KNhsN7+gU3EV7Mw04Ft19zNlVErJl8N312/unHmNvo4WU+DAgwm40txbCjgpOddlooFKJbLjud4Dhj5hkDTcjBco77bjk/7YU9rDfGjhaQ85Mf7zi4MwryIlxpPKU45biDCAMufNDl/668nXDO41zf3ZhFB18oZOGFtB8hFLLkh0JWDB2hkJUOGWcL8iKEE3QRC3ookUgUmzmdtpmMfGAEckKAKzECjICMQNJjk6kBSWNP2E+nTzW3HJNNwt6ebz99i0waTMMhef3JmwgvqmGv1Rf+ezWddPQBBDlnnpR8yQVOMGhwkkVlldxzf3ekWR/rDsGL2TvUvfaiU5A1A2ZtQcPaSpOQOOy185ZmXft6Qzi5N176e3OfTji/qAdnVF7DmBDhGKH90CfCzEevN2XBVmxDZa8EB+iSP/7KfFHt/n9cQNMeuNrco/j804827dtl200Hq2DNM+S89vgNhL1ooWuN1SaYTpSuDAjD/sKwDw408tkCdIA/U4DzBDlu8A6FQuZs94sPXEPY7/jRO/5i7pH61lM30xknHAZxZlDJNAuNA9a5YkCF8wR71WLGcv7CZUYJkdPuDU8l9pX96X7WvrImo8YBznkmDHbaZmNTCpxdOL1iv1ec99hjWB40uen3xuH1BGcZLzM+/K+L6b3nbqPTj/+puRcxzi9TaZYDZFx9wcn05v9uNs9HyMKLoxsm9oW2VxeY44mEvQx5rDE/8agDCH2F/Wwfu+MSesP4fWOf3/0TL77V19WY5/DVF5yEKjkFnOOqNmLggv7AIFAIF3bjdytoiIEXeO2/47NO/plpI7a6Ax+CTtvAx4ERYAQYAUYgfwQC7fDm2rzhDXW03lqrEW46YrYsV1l+1Jv55kfGo+fnCR8HWLBomRn/4cKbTFX77La1Gft1gNOGF9lWGT2CsunADBxu0JiFlnndyJDrBSGNmVZ86QxOYSgk5t30LLvixnvppdfeJ3ytDuut739iujn7DccTM76yFMxS4wMQeDnQaQAi8+abRlugA+e9SpabPgMvBoJiZwWVzEx0DJpgE2Rl4tMtC4VC5n63cDAxu6tbrxT4QqHybVsp4M82MgKMwNBAoCwd3qB3HdZJXvr3/9DPT7qY9jj8DDPGCzh4sUk8pg16G4aiffiK2CnnXEf7JdZeX/i3qeYyFHxtzb7tFHZmAEZYToGYAyPACAQaATaOEWAEyhwBdniL0MH48thNfz2NsAcuHqnjETaWX5xtPPYsgjmsUhMBrI2Gc3vmiYfT/512FGFZw2NTLyHMZNpFrL/WFLr4rONo9x1SX8Sy83GeEWAEGAFGgBFgBPxHgB1eXYw95MNjdazDxNpWvDSF3RSw/MJDFSzKBwTg2B6w13aEHRIOO2BXwpv2WIfppGo3w9E9eJ8dyekjDE78TGMEGAFGgBFgBBgB/xBgh9c/bFkyI8AIMAJliQA3ihFgBBiBUkOAHd5S6zG2lxFgBBgBRoARYAQYAUbAFQI+ObyubGBmRoARYAQYAUaAEWAEGAFGwDcE2OH1DVoWzAgwAowAETEIjAAjwAgwAkVHgB3eoncBG8AIMAKMACPACDACjED5I1DMFrLDW0z0WTcjwAgwAowAI8AIMAKMgO8IsMPrO8SsgBFgBPQRYE5GgBFgBBgBRsB7BNjh9R5TlsgIMAKMACPACDACjEB+CHBtTxFgh9dTOFkYI8AIMAKMACPACDACjEDQEGCHN2g9wvYwAvoIMCcjwAgwAowAI8AIaCDADq8GSMzCCDACjAAjwAgwAkFGgG1jBDIjwA5vZny4lBFgBBgBRoARYAQYAUagxBFgh7fEO5DN10eAORkBRoARYAQYAUZgaCLADu/Q7HduNSPACDACjMDQRYBbzggMOQTY4R1yXc4NZgQYAUaAEWAEGAFGYGghwA7v0Opv/dYyJyPACDACjAAjwAgwAmWCADu8ZdKR3AxGgBFgBBgBfxBgqYwAI1D6CLDDW/p9yC1gBBgBRoARYAQYAUaAEciAADu8GcDRL2JORoARYAQYAUaAEWAEGIGgIsAOb1B7hu1iBBgBRqAUEWCbGQFGgBEIIALs8AawU9gkRoARYAQYAUaAEWAEGAHvECiGw+ud9SyJEWAEGAFGgBFgBBgBRoARyIIAO7xZAOJiRoARYAT8Q4AlMwKMACPACBQCAXZ4C4Ey62AEGAFGgBFgBBgBRoARUCPgcwk7vD4DzOIZAUaAEWAEGAFGgBFgBIqLADu8xcWftTMCjIA+AszJCDACjAAjwAjkhAA7vDnBxpUYAUaAEWAEGAFGgBEoFgKs1y0C7PC6RYz5GQFGgBFgBBgBRoARYARKCgF2eEuqu9hYRkAfAeZkBBgBRoARYAQYAQsBdngtHPjICDACjAAjwAgwAuWJALeKESB2ePkkYAQYAUaAEWAEGAFGgBEoawTY4S3r7uXGaSPAjIwAI8AIMAKMACNQtgiww1u2XcsNYwQYAUaAEWAE3CPANRiBckSAHd5y7FVuEyPACDACjAAjwAgwAozAIALs8A5CwQl9BJiTEWAEGAFGgBFgBBiB0kGAHd7S6Su2lBFgBBgBRiBoCLA9jAAjUBIIsMNbEt3ERjICjAAjwAgwAowAI8AI5IoAO7y5IqdfjzkZAUaAEWAEGAFGgBFgBIqIADu8RQSfVTMCjAAjMLQQ4NYyAowAI1AcBNjhLQ7urJURYAQYAUaAEWAEGAFGoEAIBM7hLVC7WQ0jwAgwAowAI8AIMAKMwBBBgB3eIdLR3ExGgBEoOQTYYEaAEWAEGAGPEGCH1yMgWQwjwAgwAowAI8AIMAKMgB8I5C+THd78MWQJjAAjwAgwAowAI8AIMAIBRoAd3gB3DpvGCDAC+ggwJyPACDACjAAjoEKAHV4VMpr0Bcs7qZxCX/8ALWqOl1WbCt0/C1d00sAAMYZ5/jaWtXRRd28/45gnjrH2HuqI9zKOeeLYbmDYYmBZ6OtJuenr7umnZSu7hvT5qOle5MPGdR0QYIfXARQmMQKMACPACDACjAAjwAiUDwLs8JZPX3JLGAF9BJiTEWAEGAFGgBEYQgiwwzuEOpubyggwAowAI8AIMAKpCHBuaCDADu/Q6GduJSPACDACjAAjwAgwAkMWAXZ4h2zXc8P1EWBORoARYAQYAUaAEShlBNjhLeXeY9sZAUaAEWAEGIFCIsC6GIESRYAd3hLtODabEWAEGAFGgBFgBBiBfBF4dvrb9Po7n2qLuffRaXT6BTdm5D//qjvo1rufyMhT6EJ2eAuNePnr4xYyAowAI8AIMAKMQIkgcPmN99DdDz+vbe3Cxcvp0y+/y8j/1ex59P2CpRl5Cl3IDm+hEWd9jAAjwAgwAkMEAW4mIxB8BB694xK64s8nBN/QPC1khzdPALk6I8AIMAKMACPACDACfiHQ0dlFR556KT3w5PQUFXPmLaZfnHIJvfvRV7Ro6Qo65veX0Q4Hnkrr73wM7XroaXTtbQ9ST2/fYJ3zrridbr/vKZr55kd01sW3mPwtre10za0P0D2PvDDId/oFN9FePzvTlAN5f7z0Vlq8tHmwHImOzjhNvf8Z2v/oc0y+U8+9jpY3r0SRY2ht66BLrrvLtAv2HXfa5fTFN3Mdef0issPrF7KacpmNEWAEGAFGgBFgBBgBFQJ1tdXUFG2gm//9GPX3DwyyPfL0TPpy1ve03lqrUXd3D42IDqNTjj2Irr3oFDpk353ptv88SVP/+/Qg/+dfz6G/3XI/nXD21dRuOKzDh9URGeLgeM6dv2SQr7evlw47YPks27YAABAASURBVBe65sJTTHmvvvUxnXv5PwfLkVgRa6X7H3+J9ttzWzO8+Or79CfDMUaZPfT19dOv/nAlzXjjIzr6pz+iy875DbV3xE0nHo6wnd+vPDu8fiHLchkBRoARYATcIMC8jAAjoEDgsP13NWdZ3/7wC5MDM7cP/e9lOnTfnai2poomTxhLV19wsuGo7krbbr4B7W84optusKYx+/ulyS8OG623Bs189Hq68dLf098v/i01Dq8XRYMx6Mcdvg/ttM3GtNO2mxiytjNfaoPjKpjgXD92xyX06yP2NR3Yk44+gF59+xNauGSFYBmMZ7z5IX3y5bd0xXkn0NGH7mU6yBef/UvCLPGb738+yOd3gh1evxFm+YwAI8AIMAKMACPACOSBwNabrUdjRzfRw0/NMKVg1hWzrD8xHF4Qevv66OY7HzOXDGy5zwnmkoT3P/namEntQvFgWOcHk82Z4EGCQ+LZ6W/RQcf9mTbb89e026GnE5YugK2/vx+RGepqa6iyssJM4wBHGjFeaEMshy+/+d7MXnzNnXTIr883w9l/ucWkLVi0zIwLcSgth7cQiLAORoARYAQYAUaAEWAEAoRAJBKmnx+0Oz35/OsUa2kjzO5utuFatMaq400rb5r6KN1w+yN0xMF70CO3/4Vef/ImcybVLHRxwCzt6RfcZC6TuPem88zZ4AvOOCarhP7EUotQKJ013tVtEn/3q5+QCKcf/1O65fLTaedtNzXLCnFgh7cQKHugY+XKEL30cpiWLnM4mxzkf/1NiP7vogqa+Sp3sQM8TGIESh4BbgAjwAgMLQT233M7s8F48QxrZn9+0G5mHodX3/qEtttiA/rlz/ahtVafSMMb6ijs5H2COUN4+wNrycQFZxxLmLXF0oWKSCRDDavozfc+MxOrTlzFjOXDlMnjzOy4MSNph602SgmTxo82ywpxYG+oECh7oOPd98h0eN95T8/hffNti2/Zciv2wAQWwQgwAowAI8AIMAJFQmDMqCjttfMW9K97nyIsKdh1+80GLdly03Xp/U++oWkz36MPP5tF19/+MD327KuD5boJrPsF738eet5cd4sX067+x/0gpQQsp5j55sf07dyF9M97/kcPPPky/Xi3rR2XS+y+ww/N5Ri/Pe/v9PLrHxJ2l0B8+gU30vTXP0iRm2NGqxo7vFowBYApMVKLx/Uc2O5uPb4AtIxNYAQYAUaAEWAEGAENBA7dd2eTC7O71VWVZhqHww/clbA+F07lz0+6mF5751PaYO0phKUQKEeIhLO7fNttuYHpuF5583102PEXmo7zJuv/ANUHQygUMl84g8O671F/ImxrtvnGa9O5vztykEdO1NfV0D//dhatMnoEnfSna2ifX5xtxtgZYvzYUTKrr+nsrfdVPQtnBBgBRqAACLAKRoARYATKAIFtNl+fPp0+lU77zaEprZmwyii66/pz6IX//o1efOAawvrb//7jfJp67R8H+ZA///SjB/MiAd6LzzrOzGL5AnZTeO3xG+np/1xO0x+6jq6/5HemTvGSGnTDhtcev4GeuvtymvHI3+nmy05L2fFBlgnBq08eR7dfcza9++yt9Oy9V9JbT91CD952Ia29xiQUFySww1sQmPNXEmvhGdv8UWQJjAAjwAgwAoxA+SIwbuxIc/lAphbqlGG7Mmx1Js8Q2+vBAV514lga2TTcXqTM11RX0cRxowmzvkomnwrY4fUJWBbLCDACjAAjwAgwAowAIxAMBNjhDUY/sBWMQIAQYFMYAUaAEWAEGIHyQoAd3vLqz7TWNDenkZjACDACjAAjwAgwAjoIME/ZIMAOb9l0JTeEEWAEGAFGgBFgBBgBRsAJAXZ4nVAJME13xrYYL7l99XWIbrg5QosXB/e06uwM0c23Rujt9zyzMcBnC5vGCDACjAAjwAgwAkCA7/pAgYMnCHz6eZiWLA3R7O88EeeLkMWLB2jhohB9/DHveuELwCyUEWAEhjAC3HRGILgIsMMb3L7xxLK2Nu8du55eohdfDtPcuc6y43FPTPdFSLyLT3lfgGWhjAAjwAgwAoxAgBHgu3+AO8fJNPtSBTiXL0wL07dznJ3P3j4nKfnRvvgyTNMNh3fGK846naQLO5zKCkkDXm71dXXpt9OtbOZnBBgBRoARYARKFYFv5y6kN9//3HPzv/52Hr338VeeymWH11M4Cy/s61khmvFqmD75tHBOWZ8xw4uWfvWN8+nTZ3OyZ80O0UWXVBCcZNQrpfDyzBBdcnmEXn+jcPiq8BkYIFqypPh2qOxjOiPACLhCgJkZgZJHYOabH9E/7nzc83Y8P+Ndmnr/M57KdfZYPFXBwtwg0N5BdPvUCH32hZ5j09dr8S1dasVudPnFO/f7VFtaW618cwl+La6j07I9HoBZ3meeD9MNt0To4wIObvw6R1guI8AIMAKMACNQSATY4fUQ7bb2EL31dph6eoypuBzlfv11mL6bG6L/PR0hciGjtsYFc56snS7X6DbHLIW6O0xY3P4e7UtDVNriccvhVZUXki5sWbYsODYVsv2sixFgBBgBRiDYCLz02vu0/9Hn0Po7H0NHnnopfTV73qDBZ1/yD9rhwFPNMvA8O/3twbIOw7G44KqptMXeJ5g8jz3zymCZVwl2eL1C0pDz9jshevLpML32esTI5ffX2uqu/opmZ/7OeKrz/cVXYTr/4gp6973cbYxncXh1nUlni5na00M0fUaYli3Xc2xnzAyZfbp8hR4/I8wIlCICbDMjwAgQLV9B9OXXAwUPy5ZnR/+bb+fTKedcR7tuvxnddf05NHpkI/3y9Mupo7PLrLzRuqvTVeefRI/dcQntv9d2dPoFN1LLynaz7Mqb/0sz3vyQ/njKz+mGS39Pq6863qR7eQh7KWyoyxKOXn+qj5kzLM2JmVEdAV3dzs5Ol22GcuFCInMt6DKPjNQxrgx5MAs/bXqEsGOF1837/IsQvTg9TB99ovfzXN4cNvv0e9tSEq/tYnmMACPACDACxUXg9Xf66cobegseZr5heznHAYanXnyDJo4bTb//9SG02YZr0bm/O5JWxFrpzfc/M7kPP2A3GlZfSx99Pot6E2+yf79wifFUvJfuf/wlOuXYg+gnP96RNl5vDdrQcI7NSh4e9O6oHir0T9TQkByLpbbTjVMs14zbHGG5zE06nmW2142sbLxd3URYNpKNL9fy5mbnQQPkxa0BKpJm+OjjEL08I0RffqmuYzLmcGjvsGQuXqQ3KIl35qCEqzACjAAjwAiUHAIjm4jW/kGo4GHUCOu+lAmwBYuX06YbrjnIMrJpOI0d3USLlqyg9o44HfP7y+jo311mOMCfUxw3dIOzv6+fFi01pq2N9CYbJOsaWc//2OH1HFJvBIaMc2v4sKSs5uZkOp+U3XHLRxbqdhbQ4f3XHRG68uoI+eFkf/xJiK65PkL2nSQE7nHbAGFFwjnuTbw0CCwQXn4lTI88rv+zWrwkRK22vZLjCUw7bTohH0E8SUAaoTPBjzQHRsBEgA+MACNQlghss0WYzjy1ouBhh22y39dGRofTl9/MHcQdTu7ipc00IjqM3nj3M3ObsRfu/xtdfu7x5iywYFxlzEgzudhwjM2ET4fsLfBJcTmK9WKnBOG8YNnBSsU6XuEQ5YJhXOFE5SKr0HXiXSHz0b3AyEv9yxLrZdPmVAeMkYeDohWK9bLvvhem994P07IVaZLSpCxbRnTjLRF64klnHXGXjuzKlWkqmMAIMAKMACPACBQEge233NB8Se3Z6W8ZEzkdNPW/T5t6sbyhvs56sx6zvVi3e88j08wyHCorIrTbDpvR3Q8/T3PnL6aPP59NL77yHoo8CUIIO7wCCQ/itvb8hchOTott+YKQ7ofDJ2T7Gctt81OPl7LnzneW1tPt7ND2JfYottd6+50wfWFb/tDWZnHZZ2wtKhEcfJHWiY0nQzpszMMIMAKMACPACHiCQCiUnLDZZvP16ZTjDqLTL7iJtt73JJp6/7P094t/S6NHRmnLTdelPXbcnA7+5Xm07f4n0+vvfGLqD4Ws+scetje99f4XtPcRZ9Nxp19BlZUVZrmXB3Z4vUQzQLJiMXfGPPxohC69ooK6e9zVc8Pt1oFTyVYNBOz8b7wVpjffzu8U71es0xfrbO06WxNOrEyHo//EU2G67wHnnTG8wkXWyWk3CDAvI8AIMAKMQC4IHHnInnT7NWcPVj3xqAPo3WdvpWfvvZJef/JGc+YWheFwiK696BR6+eHr6JXHrqfrL/kdfTp96uDLaZtusCa99sSN9Nx9V9EbT95E9950nukso65XIT9vwCsrykxOf781Yilks+zbj2XTLdamCj7sOgDH7Ls5/YJkxp2dIerM8FJU3GGJREuLWTXnw7dzQvT8tHCa3l6X+xs/9UyY/ve08ynuFi+5MXHFUoNmxSBDOMH9qdDKIrXS4uW0eGfq7PLChc7n2+xvQ/Tiy2HimV8teJmJEWAEGAFGwAMEaqqrzN0aKiLpkzyjRjRSU6P0gpKkD0sbJqwyiiIR5/u2xJpT0h+pOZlSPpVUa2/9bKF9+zGVLruja+frkrY36+8boL9eGaHLrqoglZPXFU86X8Ihw/pjWW57u7NDJvPI6U8+DdHMV8P0xlup9doUcpYuo7SZ6WxOngqvZYqPOoi2wU6dJSUdHeC0wvz57n5mbYqlMUJvp22Q0dVt6bEfX38zRHgJ7/vkOwR2Fs4zAowAI8AIMAJDAgF3d+IhAUlpNVK1/jPXVsjOVkvik8BwYJtjqc6nk/xOaeazWZrtdLtXbXLng6ROWZ6se/HiEF1/UwXdcWfqSFL1Alc/GiMLSKRDIctxF7OxCfJgJLdtkJgh0Wmbhc3AmlYk2h+LpRYJNGItIpVabs91dVl8sRZPf+Z2NZxnBBgBRoARYAQCjwDfCX3oorY2y3nyQbRnIttsW2HFYplFyzO5mTnzL5VnU7NJE47l/PmWc5eNv6XFma+jM7efgsoRb2l1tkS1JljmVrV/TpYPS8RtewXLMuU0XqCbv8AZB5nPy/Snn4UIwUuZpSQLbf/uu1KymG1lBBiB7AgwRykhkNtdvpRaWARbe3pSnYkvvwrRK6+lQo2vsV16RYTuuieV7oe57dIjcrE0IfGRk5zU6c4wCuGqZRSwYfqMEGGWVvAiFrOpug4c6iConE+UZQtdms5iNjmiXMzSiryIFy9NPTdAj8VwTAbR/iTFSmVbAxy3LXXo7bXqycdFxow4XqB7abr35x2WkVz81wrHl/P++2CEEGRbhlIabb/jLu/fOh5KGHJbGQFGgBHIBwHv73r5WFOmdZ9+NkLPvYCXsJLOTksLERyUpcu86wKVw7d0KQ1+AjeeeMytglpn/1hVXbf0d94N04vTI/TOe0lcZBnAR847pWMuH9e7ddaddLqh2Xm9mCmPxexSnfNOs7jihbd8d+Po6AjRNKPv5sxN9h2WkfT0EC1QvEQHK51sAr2cQzzEVFeUAAAQAElEQVSx1Eexmsax6TNfDdFrryexdWQKKBGD+1dL1PaAQspmMQKMgAcIeOdteWBMuYpY0Wy1LJ+dASwJuR3fejdCF19aYX4KNzcJ7mu1aDhlCxdZN3R8bUzWMHp05iUhOk6ujn5Zp2oZgcyTa1o1Y5urPN16mRysfB3/r2eFzPPp08/cXUJ6FPsX67apFPly6f/np0XomedT16UXq+14wVO1tt3JJgzunw2I7U72Ma1oCLBiRqCoCLi7WxXV1NJXvmSJN3DDmcNM2pdf68lrS+wNuyKmxy+Qhh6RLmT8fZa1qqrlByp7xQxbpjY4OSVyvaamTLUzl8lyZE4dx13mz5Z247R3SrtIZJPrVL5ihUV12iGitZVo5UprMAMuVftRFqSw3GjT4sXufiNBst8vW26bGqarrqmgDsUuKbLeZo2BrszPaUaAEWAECoUAX90LhbShx+nGH4sZBYq/TA7RCy+F6T/3hunDj5OOhcrhy3c2T2Ge9+SExGzLLjoy7AucEJESOTmzKQyKTK71FOJ8J6vsdaLDUfXLOenrI8IewKLBsv5M57TgL1Z8/c0VdNsdEco0M15o2+T194XWLfR1d4dMTN58J3mtQdnrb4bpxlsi1NmZSkcZgoqOMg6MACPACBQaAXZ4C424g75580L0+ushuuCSClq82IHBgRRPvKCk89a/Q3XXpHiX86nijwOTeUmDa+MVFeKJtZVysRNNLi9kurraGxyC1KZC4udWF35L3d0DFGtxW1Ofv6vLXZ9i/b2+9MJyfv5FiLAcafFi5zap6IW1snS1seWMACPgLQLOXoy3OoaMtFgst6YuWx6iRUvDhBvugkXZuwTOZ7fHuwpks9wPp0m1e0MsZs0YyY/oW2LZLHRfLs88itqq2WWVraoZuLj0QQ4hG3Ffn9U2pLOFri585U6fP5s8e3k8MWiy03Xy2T6E44Stjlwveb7+Jkz/d1EFvfp69t8UdpjwUrdK1udfZLdFrhtXDDRlnkKl4xrXnD5jdr9Q9rAeRoARYATcIODu6utG8hDibWsPmc6qaHJr4oMNIu8mjsWyc8eNmckFC/1xhMTaTLsVjY3Oszjg++CjML3wUvYXbGIxcOsHldOkc+PV16LHGWtxxnuJwzZjkNimWCOr+gof+hT17CHuckbQXj9TPp8dI9o6nPEQ+lTtEeUixtZ07QqsBE+u8TezrZqqPrJKrePKlVbs/zEzbnb9OjguXhI2d4GZK+2YYZeTa37hohDFYlbtuG2A5PSbmDePbykWWnxkBBiBoCHAV6c8ewQ3yiv+FqFHnkw6fHj5xa1YpxlEyFbJUbuf6TWcdoeIxdL5MlFaWtQ36unTwzRjJmYjM0nwrsx+4/VOsv+SQiFnHFXOfa4WxY1BUa517fXw6eYum7wujdk+uxyn/MOPhunyqyqo0+W6bCdZdlrc5qDZy4uRVw14ZFviEtY666w/+2zA3Od71rfO55Ys22168WLvZbq1QYufmRgBRoARyIIAO7xZAMpWLGZEdR6569y8ZH1uH7PGFHvSdnlw489ke3/C6K5u55uj7BzFJVvi0o09ISKvSNV+nb7Jpthp0JCpzopmN0OSTJKcy9qkj4k4cXjlQC9ZSnT9TRX0yOPuLhW6s/DNzdY5s1hz7bpTW1U0pw9vqHhlervxxEbOe5n+ZpbV3kwyXfddYhCli3km3VzGCDACjEC5IuDuLla6KJS85QPCq9RoSSZHMhZLF5CJP51bjyI7n52Skys7jvEu6+bvh349K5Nc2WzQGTTEpXYmJeeXUsnEtnT5Sdar3ZFwrDsUb+I3NzvLUdlt527PsjTCzu8m/8mn1vnlpg54e3v8G6ysbEm1CV9hvPbvEfryq/wvxbqYo41Ooc1w9K+6toLeelvPls5OC6dmxUA7HqD1x07tZRojwAgMLQT0rmxDC5OcWht3OVupM+sYlx4br2xLvVFmMrLTpS0yf4dP6ymFvXEHpzCecHwFjxdxTHETVskWNsgOuYpXRXe7JrbZYfBhl+1Wpr1+prxbjGRZKkdX5il22s32Yjq/Ry/aYx+34qt0K2IhWrDQWXqsJfV3jy/bvTQ9RH19lrPpXMuiov2XXFZBt0218smjc+qbb0KEZVSffJaq05mbqCvxu43FnDniLq9DzlKYyggwAoyANwiww+sNjiQcJo/EmWLiknOIm5dJNA6xlgGKxYxEnn/tDs5tR2LWRlc0nDbhJIo4U10/HbhMenXL/J7JBV66tmTik8+NTHz2sniGWbeu7gHDkUqtEUsMHNpcDLhSJWTOxWKZy4tRKtpcDN3ZdH70SYhemhGhWbOT7wyIOvbBaqzFcEq7ibAsRfBkir06N1U6sINDX6+qlOmMACPACPiLgKPD66/K8pQ+IHukiibGWvRmTuTq384J0WNPhqnHcEYEvVnxxbTWNqLHnojQt98567HPyq1c6cwn9OjGwkkUsW69bHxxxQyRvF1ZNhmZyltydLZkp13Vp3A++vsHCFvOZbJBlMWlwY2gZYplGzLxoSwuPSmIKzBtjoXokssq6V//rkCVtLBseRopJ4L8NCEnAWVWKZa4Jsh9lKmJSxO7guD8svN99VXI/ECEoMcT51RcsRxF8BUqvuWfEbrYmHEulD7WwwgwAoyAjAA7vDIaOaTFjFlLS6rz2OzCmcrkBHzwYZjefS9MqjWUsskfG7M/774fok8/0+tWN06TrEc3vXBR2NyYXmfm10lmPPHI1F6mwktFt9cvRB4z5c++EKG/3xihzz5PPTec9Gfri7jCUZVlqXjiCcdH5rWn5yY+59xmDJrsZbp5HT1xjXbo6tPlsw/0stWznUfZ2LOWY9swwaRqf1yjjyAj0/7bWOQQawGXFcQ5hd8fgkUt3NGO4+LFIeq3r+konDmsiRFgBIY4Anqe0RAHKVPz4y5u4Kobr44MeQJZKSfhILa2ZrJYvyyWeJwtarzxVoRuvCVMC+bj1iqo6viWW8N0820R8nrmV6VRhaPcjrgP21/Bnqefi9DDj6c+Zo4nnBjV7hWopxvszoNTPdUAwYnXTnPzUqS9rsgLB0vk3cRi4Oimjl+8cRe/aR0bFkrrcyG7M48Z147EOaWjV+bRGTDL/E5p1XXHiRc0tBUxB0aAESgXBEq7HezwBrz/3N5k0Jx8HB/UV4XPPidjxtaYtV2a/bSBgwa3GF+PU8nLlR5rSc6YQo8bOW75dWV//kWIYrGkXbr1gsLn5omEbHNNjZxLpt2et147R7m2J9kC71K9PamyFi/GLyOVppuLxXQ5C8cXk36PhdPKmhgBRoARcIdAds/FnTzmzhEBL274sZilvM2jGV5LWvLo5sbWLa05TkpwTuXzuNUL3JytKh5Vxjlmm2X30iovnP9FxmNq2aZipNvaif7vogr639N6l7N580M0bXqY2toLM0D53tAn4/LokxE6/+IKiktrq+XyfNItiWtANhnY41t+apSNf6iUAxfdtl59XYSuvT71qY5uXeZjBBiBwiOgd4covF05a2xt66DmFp88PhdW6d54hMh4YjmCyOcT43OtudZvVzzyl50wWXZccdNe0ax/ahVqyYNstz0tO92ZZgfxOLo/9wk6u9qi5p0GCzIO+RqHlyexjrlb8UGSfOWL+suWWqnFS/Qc2K++CdHLM8KENe9WzcIeV6wImS+XLZKWOqgsUC3BUf3uVHJkOp66XPiXCrrjTmdnzem8kOvnm563IHs/zf42RM9Pi1CuHw/JxcaHHgnTRZfoD0RwTVyR+HBKLvq4TtkjwA0MGAL6XknADLeb02FMV5167nW09b4n0fYHnEo/O+liWraixc42mJ828z1af+dj0kJXdw/NW7jUpB92/IWD/Eh8/vUck/6rM65E1nXABdJ1pQJXcPuFsLhiTWFzs2V4KJT95mZxFv4orxvVcbrfez9Mf70yQs+94OwoyC0Y0STnkmmVo+LFTG4sltSTa8qOg/Gzyipq1qwQ3XVPhD76OPVy8s67YXr1tRB9N8ffc2DeglS9doPtv7tlyyyOuLRWd2VbZhlWDf+OKsdWxn9F4jcFKzoVA1OUZQtiPsCOi8jHFYNvGa9sOuzlzdK5uSyx04SdR86/9nqIZr6K/Yn9PXdknXO+D2sPROT25IOLrL/U0r29A7QwAE94Sg03trd4CBT3Ku9hu+95ZBp9NXsevfTgtfTGkzdRJBym6/75kFLDAA1QXW0NPXX35SmhqjK5LdMnX35Lb73/xaCMqfc/M5jOlmiO5X+hVt0Es+nWLZcdPlHH7Y1U9da1eAlKZ7s2odvLWHYU5LSsw+2NqmWlVXvBwux9+9Usi9d+jMsDBHuhi3wssaVcXHLaXFTXZu1QOFYrE1hA0EefhulrY9b0HWNAgDxCrCVErW1IEclbaMUVTwQsztyObr8615bYUxg2Co2Y9RTpQsZiYKg6R2Vb5s51d7mOrUzyN6/I/7FEXOEIyzZ6le7usX5j/Rof2PBKZ65ydPouV9lBrocZ+Jv/EaFPc/yiYZDbxraVJwLJK2KJt++Zl96iQ/bdicaMitKwhjo68pA96OGnZhgjdvWFvqa6kladODYlhELWhRZwHHHw7vTPe55EkuYvWkZPPv86HbrvzmY+20G+mWbj7etP6pR5VRdSN7Jlefa0G2eprtb5Yxf48pNdLvItrc5tirXkf8rpPHaX2yanYVuuAY/o7XVjMTulQPkBC1/VOSJbkc/AaWXCsYY8GccViUe5en1h2WrJSKabi4UdDEmEb+cQvWrMJiayBY28+C1kNFhx6WtJ4N7enrG2Y+HtUyP077tTn3DMX+DISl5cp5wG5c7amFpoBMQAqKc3+ZvOxwauywj4jUD+3offFmrKnzNvMU2eMHaQe9L4MWZ6ZZvD58TMEqIVsVY656+30YVX/5v+N+0N6sWngBJliH524G706tuf0Kdffkf/eeh5w4ne03SoUeZlmDtXLU3MRqk5ClMSaymMHh0t9sfuok48MXtofxmnIjlpL1g9jV97I0yxWKpIvLTX3aPwOFJZfc/pOMUqI2Rn2UmOqi9kRzgWU0lX0+19KDhVdJTLDjnyOuGppyP07PMRcvrqoE59Ow+ekFxwcQU9/Zz7S+vnX4Zoxit6zkOsRY/Pbp+cdzszjrrfzQ3RrNkhkrHOd432q6+H6Quj7ZBvD7Iee1lQ8osXu+/roNhut2Ou0b8vvRym7m57CecZgdJHoCx+qXhsjjW8NdVVgz1SXVVppjs6nJ/5jh09go49fG+aMnmcyXfWxbfQ5TfcY6bFYUTTcMIs799u+S/9+4Fn6Rc/2UMUDca11amzHaKguipCjfWVZhC02hoy86DX11QIMlVEQhR2uH+B7vSFq1xvApBH0j/YDluG1VlYiaJKsuxGuaB1xcMimRI72Q2GbsXjz1FNkUEM5LqwQwTUR4gbXSdoVRWpNgo6cAYvQn9vmO68u4Iu+EsFVUaS9nbFI1RnnA/1xox+hYE1Ecki0gAAEABJREFUeBHQPiEHeRGAB+iIBQ2xXBd5EZpXJHUJGh7nd3VadLmdVRWhwfbLtlO/My7CRrstKjrsRpD5YTdoCKgnbKw0MAINQbYFeRF6Eo+WUQcyBR15Efq6rXaKPGI4wj2Jl9VkGXL7q8LJPu3oSLa/tqLS3MXg4cciVGGAJ3T29Vj0F1+sGMQQZcL2bkMf8giwFXYgGCJS+CsS50CrMXstbJszJ9kGyIOMXMLcOZWElxrnGvJEfaEPtsgBfSHKED9nON4vvGi0OVQ1yAa6kAN+USDjCHsFvSteOdhWYCroPb1JfEc2JXEXshFDJvhjMRqUIesEjihHqJD6DnTUR5CdP8gDDUHuD8gEDaEyVGEMOML06ONJ+0BHu6FH5gXdz4B2QCeuzU56qivDJOyR2/Phh6m2O9UtFdoXX0UIDu9336b+xoYb9whcQ+V2oH+Bl8BELsuURt82GPe/TDzlXgbcOBQegXDhVXqvMRQKmetx8cKZkC7SdXWGlymIUrzhOlPojBMOo18fsS+df/rRdPFZxxHWAdtneX/xkz3pzfc/p/323JYmjhstSbCSff1WbD/is7I9fQOEIMoWLiIzD1ov7oqJAsxaOc0FzpmXYLBFccMRtJG0stAjM/b0DtC8hWTMbKdqb+3oN+2U22avK+Sk1hRUInxpLJlLpr773sIEGMh1V7ZbOkEX3Ggn8gjNK2XuJI7AWfAD0uXNRLC1uUVQiTDbeN5fBujM8wbMMlGC9kE2gqAh7jX6bcGSAbMe8iKEFb8W6BU8cgw7kJctBy/0Ici2t3U44yLosAmyRBC22+mQiyDTYQdoCKgnZMw1zi/QEGRb5i+WbJGMh0zwxm1bzrV3SkxCuBGLsSb0G1nzT7T/nQ8H6PJrk/WgH7IRZs+1flRfzx4gpEBDWLIUOaJZ3yb7H3TUhXBIQx4BtoKGINNRJtYlwy6UgaenB0crLF+RbD/43QSBL9ou6kGPJTn1CF5R1m3oBzbgmDvPaifSKBdywA8aAngFXbQfdPA40TGDLejfzwenFQQNMWRa1CS+kCdoAivkVfguWoxSK2CwDrkIMj9kgoawMMGPPkFeBLQbUmReUeZXLNqHa7OTDtjSZ1wbUCa3J96V+/kCWTmFhB1e1+3qAuq4J4TMe4CQP/WeATrjzwPU2p5sK/oX3EuWJ88XwZ8pxnmmwjhTPS/Lvpw1QHfeN0Dytc5L+dlkATcOhUdAcQsvvCH5asRa3LnzE1dPQ9j3C5YYR6LhDXVmnO0weqT1Wn2vbU+vyRPG0HmnHUXH/2I/RxHdPX2O9J7efuqI95pBZhC0ru5kvXbD2REXeJm3x+PHSn240kgKHn+a6ErD6Xjn/eQNFsWdhm2wU26bvS74EJzsBl0VZFzkuitifSZW0CvXRR6hpyfVxiXLLX7IE/y4CQmZSMt0PI7HC3ZyOxYvtWRAvuBF/M3sPrr8b0T3PihugaASqZYotLal8lncREKXsAl02AV9CLLtSIOGIPOvbLXOo86uXlQfDN3GeQdeO31FzDrnZDrsAC8C2iyE4EYNGgL0C/qyZZYM0FFX0L/4qo9uunWA3ng7ee6iTOZBXgTRDrl86bIBs5/nzU/tT+iHPgTx2xgw1MCZAw1h8VIL59Z2MmWAhoC60IlBFvIIcvthB2iDIfEiHuxCGeoijRhB7iPUmT2njxCQzhbQL5DRUG/1G/hl2SgTAbyiDE8EhC2gCx4M4CADQabLNor2ow54wIsg0/uNkx80BPCAF0H8jkCHTNAQkEeQeTuM6xTKEFT4ivaAB2nIQFi0OHn+QiZoCG0dSTryIqAuZHTGk79RlM2d30fX30L04SepdJTlGwT+S5Zb56iQN2deL309q9eYGOin7sR1XW4/bBW8pR7j9wnc5T5Cm2It1m9v9nfp14Z5xgANPLqh37gPxRP3GN06XvO9/2E/vfs+0cefJn+nXuqYv7CPPv4siZVdNjDmUHgEysbh3WvnLeiBJ6bTkmUxamvvpLsefJ4O3mdHCoVCJqpT73+Gjjz1UjONA2Zz3/3oK2MWr5sWLV1Bt979BG216bokL4sAH8LhB+w6uPQBeTk0G4//5Lw9jVlKO82ejyse/9v5XORN1uoa6yJlZhwO8cSNX57dAlusGcfUoFoz2BnPrCNVCpFKzrffhujaGyroi6/0TsmeLOtjxdvvdv06+XnzrHPGvn5a1ZcqutDlFiNRL1PcrDjv4NhnqpdP2bfGY/rZ34Xou7l6fSR0dUqOkqDpxDFpJwiZPxaTc8k0llGIXGen1YfI6+Av7ySBOnK46R8RQpBp2dLfz3OHkXC2ssm1l99xV8QYgMhtTXLIvzXMoCZLkinV78jp/Ory6TqVtCY9tWRZKu27OSGaM5fok8/c4ZsqJXPOPtFw4z8q6B//qshcaQiX+nnNcYL1409DdJ1xr1hs2xLtsSfDNPUuvX4S99xs124n/dloS5eG6PqbIvTvuyson/tQNj1c7h4B/64a7m3Jq8bPD9qdVl91PO1yyO9pqx+fSD09vXTqcQcPyly6LEZffGNcKROURcZzmKN+eylt/qPf0G6Hnm6M3vvoorOOS5RaUSiUvJFYFP1jPPFoqNAXA9nC2prc7ZflZErLTkYmPqeyWCxJxR6YK1YQLVyYpGVKtUh1Zb6exON23BgFHXJFWidujjnjFlfc8FUXzVjixaJ8MFLZGzfOL6y1W7go1VY3b97LdgtboS/elbwsdBl6QMsnLJY+RQ2dX34Vom+/S7VbJd+YmFQVZaR3G4+ZBYMKf9gieLqltcqCZo/t29HNmBmmbw0HzM5XyDwGip0dSSzlNsl2qK5Dqt/RbbdH6D/3RWQRRUnHbdv4tdqWNukZ5Y5rqfGIXq4htqxb3ixTk2n5t5Okpqfmfh+i6S+HU5ZVpXMFg6I6X4Jg3TezwrQc9wrbte/d98I0+1uiZsW9oVC2z18QMvwJS1tLixXzMRgIJO9swbAnZyvq62ro5stOo9eeuJFefvg6+u8/zk/ZUeHMkw6nt5++ZVD+6cf/lN599lZ65p4r6NXHbqB7bzpvcI0u1up+On2q43KIk445kP551ZmDclQJ+4Va8GFUOm16hObb9nLVmYWKRoUUb2P7BXuOMYMHG7HuT2iKxUTKn1iMhOHIyRoyzbzJfEh3Gc5uu3TzBw1hztzsDgH4RBAXzFhMUKxY1UdxhSNs1Uo9inamUonsfWAvt+e/+CJsvlzywYepP+HeHjsnkUq2yu64tEa8U3I4xGBE1YZ0zekU6Hz6mYj2xyiaE49S0yWpKbbNVpSMsMWpUNU+2YnGQOOFl8I0U3NXBSc9Mi0Wk3PJdCyWTMsplY3NhiMg8zmlO6X+dSoHDU83MDBB2uugo1/oTDufFZMQb78bNj8v/cZbqb8HIccp7kuupkgpXmi7NqcUamReeyNk2vLeB8nrDqq9adj2ouHwwmFDPshBvgbATqcv3un+zlDfy9CpeGdA6IhL1yxByze+/6EI4WVo24rHfMVy/QIjoH91KLBhuaprHFZPo0Y0alXH8gVsXxZtbNDi94Lpsy9C9PKMEH1pe3SvmoXKR2dNtbvlBkIXHhnBxq++LvzpYb9YLbGWYgvTMsYrlocylqNQ5eSgTAQVjx99JHQuWUrmVlZzJedclDnFwimPazgvTvVzoalwUTnUsZizltTVuxZPXDGTvMI222Zxq48zXw3ThZdU0Cuv53fuvjAtTJgxUuHblVjKs2y5Wo/oI7W1ziUrmrOfx6jZ7MAXW5m9rtwm/MbhuPfndqmAGdTbM0BPPRtOm+1WnRey/pi0L7dMNwUbB6xtNqKsf21tFktnYpmWlVMfH3okQhdeWkHz5mfHSy3FKrEPhFsSfdDSkipbtGXhojzAtlQ6Htvaibx4IuMkfPkKqy2xluT53tZu0Zz4/aJh6c+s2UkbnPSI36ZTmZ22ItEuO92ex3mCp02trfaS9LybAV16bab4iUDmM8dPzUNUttONPRbzB4wa5w0qsirrSrwsh4tLVmaXDJCJi6cfX1CKu5hpdWl2TuwCR7myygmYb9x4X38jTLO+Tb2JxBWOoCwzWzoWy8ZR/PLKitR261i0dBlRh+2mK2aisEWZjgwnHqx3nWE4zjNeCZPbm5dbfll/POFEyx/7QDnWA3dKa5IF7Zrr9ZccqGbFMDDA0oylSwbxh/jBoNMeOD5vvBmmN4zzd7BiDgknXarfi3j/IAc1ZpW5iXX6y5alt/s7l8tU7APh+OAMo7Nj29eXrtM0Ko9DewfRlVdX0r+mpp8TWEphP386Df77HwoTvpAoq40rBtCdDgMJXMtRV9VHKPM6xFqIxDsnTueLk77/PROmJ59ydnUgz6mOitZue4L48swQPf5EqmwVhiqZTC8cAqk9VTi9Za8JM3Y33Byhzz9PhTg+eDF0D0F9nfMFVCWpydp4QlWspKse9SkruCj45BO8cBChl2Y6v1yw1OEG5EK8J6z2GZtcheKmkmtdUc/t+RKX1t8KGYidbligv/9hiG78R4RaFC+IgUcEFS6NwwVH9hgyYrF0vrnfp9MyUVYYj+6vv6mC7ro39fflNOuZSU4sZpXCLitFJJxlnV8bZn1EPcRxhcOAsmwhrrg23HZ7mB55PNVJwvZY2eTJ5dmcA5VDHHfRnsU2p7k7MXCW7fAqna092fRgR49MPLnOzssy5dlQmW4fxLa0hOjjT1PPY5lfJ73UeBKG/ejjtkE/tg775x2RtPPnS+Pp3SeGznfeTdVrr6+ju6Ul9dzUqeMFj+65iaUkb72T2k6hvyUxGy/y2eLe7tSrwrvvR+id98O0zBh8Z6vL5cVHwPksyGQXl2khgBm7JUtD9MVX3lwMolGio4/s09KdL5N49JavHKf6Yg3USoWDJR5NirpffhWhy66KEJaCCJoqjsVUJe7o9hkbd7Xz447lefNQ3QTiipni9z8IE9aVz56dPE9VN3uBi93GFkVfOiEhZDiVuaGJ88c+4xIzZoCc5KhwEbw6dvX0JjES9WBHXOGoCh4v4kLoyNfOFc2pEuT197GW5K2mJsvuMalS9HL9A1bfxFqsWNTCo+jpM1JpKNPpb/D5Eex9iS/NPWDMtmJ5idf62hKP4O06xYBBxPnohaOdT32v68akcy2b7FgsG0dq+SLboE6U2u9bgs5xsBBIXoWCZVfJWCN93E3L5mbbTUGrUoKppjqRKFAUUzgPBVJvqvn0C6IO4zHSItsWNGZhDgd5y6FYS/qNMAeRFIu5q4XdFb51+dhUV0NjY+oMhKg3b36Y8MIMHtcLGuK4Ygbvmusr6HJjoAGebCEWy8bhfXlzLNl38s4UqvZ0GI9w3Vohz/qi7rx5OKYG5Npz3HYNdb0Oy42Z71xkqp4M5CIrW51Fi72/7WDg4aQXTzBenB6h775Lni9OfF7SxDV+wGmxuoOilsR1VtUGexX7UwWUxxROXnuHM9azZ6OW+6BykJuLcA2ItfjTpxDJqs0AABAASURBVE74CqQWLkrFMxazSmIK/DsSX9u0uIj+fXeE/nql89NNwcOxfwik9p5/espW8mJjFjeXxqluzEJWNCpS3sWqC0RlhbOT5J3m7JJUeMRimS9qqplLlcZ2yfFpV3wwQlXXC/pKY8YFDu8rr6rbFYulavp6VohefU3vp9rS4ix35swQYUukr2xPHPAUIlWblcNNu90YaFi5wh5j0s2jU2P2VHbi47ZHusLyN98OE17OEnmdOG0WULFDQKtihrvF1o86OlU8sZaQ1nri1lbn/lfJFXTV70+U22MVznY+pzzOLSd6dfKLx07FGWkqmdgTFRVjLTgWNixekqqvttb5OivOX50na9giDS9l3n2P8/UAH5ORtS5ZauXs/dvVbZ0n4ombxZU8xozzLZlLpuxykiWpKayzRUilkjHojtBrbzvjYOf9zpgUeOHFMPX06vHb6yPv1hH/6psQYTeGGcb1EvXzDfZzYJbxJK3TYT10vnoCUj/wZjj/agJvdukbmOmGUVvrXftGjswua2WON8nskvU5mhM3pG7bGimVhNfeiNAzz4UpruEQqWTo7L2qqpsrHTcs1O1R7PvqdON+/fUwqdagQZYcVBf4eOIG1yHN6GIG02k2I0gXZOEMyG1EulNqxyLbrAvK7QHbVc2YGSbZiZdnhu38Tvm4YllIoWZH41KbnewTtIcejdDsby2HRtBa20KErdREPt+4S9OWhx8J00rNdZKxxDVAts1+LupiIMtAujmWigdoCPJ5hN8DaAh9DqvHMul2Kuu0XZvss4PQg6AapHzwUci8xoFHhBZjwIyXxb76xvnWHe8MGddEwZ2M44qBYLvtpc9kjWTKqW3JUitlx+vyqyrTZjLRl9NfDtHTz+s5sK+/GaIZr4Rp4QLntlqarWNzHoNLuX3iJdjlzc46qyqdbe/U/D1Y1vKxWAg492qxrClDvfKPSbd5a6/l/KNC/WgUR/1gvxA51czFRic5bmhx2yi3K3Fz6DQu2DpyvjBmKl/L861wrLvCCx1+7TearR1xhQMl1wNObe0yJbc0to5CTYGzPY28CCG/rgpCgUPs9hzskm7g8izVyBHOvx04CVArD6h6HPYsBo/YLQFpOcQT56hMQzoesJvdh4aj9OlnqQ7eZ18M0M23RmixYg0i2uFH+ODjMM3SfHzu9JJbl+03Ipx2uc9luyPh1P7P1jdyufzbEGtf8XKkkP/MCyKVHjs5PLIDLdfAb1rO9ydMjtlmVbG7AK5xS6UXolokx07l5DnZIuuT0+J3IdOQlm3UkSfwQl3Y1d0zQGLHFNAQxPVu+YpEg0HMEMS9INdlOnbRcl/LZXL7ZhkzsHKZSIu+XGkMOARNjlWyZR5OFx+BItzait/oQloQl27M0KuzqXlTdICiirWYkOEUKhTLgnLdi9dJh5c0+SIjy1VdgGUer9LLlodo7vcheu/94vwM4goHSm4fcLKfQ3K5PY21zpgZstPbNGZyRB3509LiQi/K/IpVbWxpTd4cgUU2/csT+2ouyXGpEeTr9EtMWnaBOkELvYqtr3L9zLO9fZ2ddoo6v1LzCZIbTNuMGWsnjfhNy3RxXg0bJlOzp4UtK6XlKgtc7p8rO9CyRp3zGPyifntij2HQ3Aa7E61bX9dGJ3ny7wfOr+CRnXVB04nlNsiyVXVVtst02aGX5bRI/S3TRV90ak7IyHVVaaYXHoHi3OkL387AaOzyaauehnrnJq4y1pkuU+ULikzPlq724W1rlU6V46Uzg62SKejyhVDQSjX+/IsQPWw80lY9PtVp10LppSJxodep5wfPkiVJhzcuzaR2SWlZbyxm5XJ5Sc2q6d3x5VcidMElFaTr8HmnmchpaUwm+bJjkolPlMl9IWiq2GnZjIpX0HO9Jon69rhVMTNn5xN5p/MnV4dNyBSxG+xEHafYC3vs11V73klvNlpLS+rThWz82crlc0HnoxIyvnFpUkH+TWS75i9JrH3OZJufv5lMerksdwTY4c0dO7PmwoVm5Pqg2vfStaBEBdVWP5tt1k/bbKX5unBClm5UW+N8YYtGdSXo83UlLlz294bkGRh9ad5whn399eRn44cfOveNXWoolM4Xi9m5UvPyThepJfnl8DIf9gbVkdKZOB90ePPhkZ2KNsVLjqobHx6PYs22/Fg8H1v8rqua9fJbbzb5KnxRr9dhrS3o9iA7TfYyvBz10vTU34HTxERPL2m9OBhXDMaEXjHrLPJOcTYZTnXc0MQMubiuirr2vKCLOBOOgkdle1V1EmPZsV5qOJfnX1xBzz6fekFVyRnUo/GUQXaQV65MlS/kOMU6a5vleipc4tKSnM4s54Usj9P+IKB/Bvijv+Sl4hGy3Ahswp/JEaqstH70XbalDrIMzNbWSl9JC4VCtPqU5EyXzCvSqkd2q00eoK19cniF7kLGjY3ea8t1GcVwFx9cgNVGNyJyFayzxVUVk1n34prLHprt0k4XpjIPDvhIBxze56fl2mIPjMgiokfxomGWaoEqjrWoL/m654yXDcrkzGbS02k4Oy+9HKKLjBn0L74Kk3A4qqoyXychU9aJeng56qUZ6V8oA689QO/z08Jpy6Bk5yye4dpulyfn3cxAyvWQlvtOHqShDEF2MJFf5uKT3fG4+pyBLATVeVVdlfw9L5HWj8elAev8BWHCNXj+giQvZKbgCIItLJS2qnRqs41dmZVtUTIlCnC+JJKmzSKtit3IVslguncIZD+TvdM1JCThhZ9MjlBPT/YLcmUlUY20U8Oqk/vpwP2sqYw113Cere1RvIBTaNAzOfuZbIno3W/SRMQ9GDXjDfY0wT4QcFF3K/bbOSGKxdzWKj1+MaM2QKk3PVVLmmN6fPb6+dwY7bLKKW9/wUi07dvvLJztg7Xv51t0wWePKyvslGR+7vfOtx3ZaUtyO6e6jJkz4WTJM9NLljrLdpZiUTsd1mXGDflWaeqxvZ1o5qthenlmqp6uPK5Ddmc0VSORaKedLuezXQe7JAdTrqdKy/K64s73HLlui2Lt60qXS0lkmSIt96+g5R0PJCV0xaVMkpw15UXbsiphBk8RSP3VeiqaheWKQG2duuZSxad3GxrSf7SZXnxriqp1iJJsF+I6aRZa1BkzOt0OUZYp1l2La38JL27NpmQSnbUslzWGEGq3BTS/w4TxueHrt11eyY/F9CTFWvT4mEsPge/nZXZg7U9WvpmV+daBx/8qzf1iWwIbg+xkyUX4aMR3c9Ptc3KEYjG5ZjLtxCtKnZz9uMJBbHF53sU1HGG3zqiw289YdV2VcZTvD6pr6GJpFrZZ0TfZ2uFmIARZccVgRZbjdD6hrgg6/Ry3fVRC1OU4uAhkvmoF1+6ytqy6MnVELS9vUDV8yx+m1lHxCbrqi1yiHHG2C/Gqq6brrHFwgiHL/oKbzo0A9exh7NjCO3x224VNNYo1zKI8UxxrSb+BZ+IXZZj9F+lMca7yM8ksRplqH95YrPDWqM5Z+QYrpwtvoX8a7YPntiw7B1QrrgPCQmy3JdLZ4kefiNDtU9MfAS1clK1mslx2dpJUK4VH6lYq+zGTHKfabvmdZKhosRbV7VtVIze6rEduT7b7A7Spnmp1dqLUCrGYt9f0uGKwEtcYfFgWkblcIZuDLsuTBwJCBsfBQ6Awv5jgtdtXi6oq8xcvv4QmL29oalLLrsswM4xasRiORNEo0cQJ+V9kamosefZjiNJl19qcw3iOM7MjjPbbH6/a9Xudt9su5NfXp7cTZSoHGWUc3CHQ0emMsTsp3nCrztm4dIOV095ozV+K6neaj2SnWVFZXq3i2iB48NEYLA+QP5ARV8zMqZwmnUFdLCY0Osdff21crVQKnKvkTY3F8haRUYDsoMqMcRcOn1zPj7Rsy0BiGVOmpwJ+2CBkxrv03SB5VlvUR7yiGUcOQUdAv6eD3pIA2Td+XPrMpxvzKipDVJ/FeXWS98czep3IaTQQ9ty9nw4+IE87KyApPUyaOJBG9MLBFkLtj1cFvdCxamBTa3PuvbYrKA51NOrcshrpbWxnjvKiBn12Z8yY3J4m5NNLdbXp1wC7vJWtIWqRvsIWlwYOdl43+WYHhzKucPba2p0lyy+QyRwrHGSjXHaaYjFQ3AeVAx9zuYxC1iyfm/LsbAqPAzYqx06uJ6djLclzTNbj1BdyPaTFQEfnRbpWaT9x2UZ5K0XIdBNU54aTDNWsdlfiS5aqOqKNTuVMKxwC7PD6gHUonPzxuxE/cgTRIQf3mbOvw11ulO5Gj4rXraMyrMFZUr0Dfdiw7DdAZ2np1GKsnU23ongUvx3qfFsW7/Kur/O1pRD15Rt8IfS51bHtlkSrr6Zfa/PN+mmU7ZPkjcPd9WmV9Ha+agIVLwvFYvp2qZwnFV2WHFc/UZLZck67cZrcKmmOOd9PmjWw0zk3nXhUjp2O7W6xiCcGOjo6+3qTWMj8cQenXWWrHTecnwsXJeWq6tnpTrjZeTBw+uvfKuiKqypp8RJ2t+z4FDrPPVBoxDPog1O40QbixiLi1ArVGtvuiBpNihk4UW6PazRmZex17Pm11hygmmo7lRxp6Vx6FPkRrfypWL3a/nO5nZ2vdsDLKyvd2uJGr31dp5u6pcgbi5Wi1ZbNo0dZsc5xow36yf4S7AhjMH7sUX1UV+d8XbLLlZdXTZ9RPreZxUvsLbXyc+cSrZRmqy0qUUuO50x7h3sHTOj0M45JM7n56IknnNx8ZKjqwslUlTnRn30hZH52+2WX52k87iQtlRaLEfX3DVCvEVYsD2afplpc3rnyuRL51U85yB07Ru+mANHyNl7y+tz6eucfx5gxqOVPaGocINkeoWW1VfXbM3F8v6hWkLijADeGesVNXna85cai/yeMS++/6mpnHGulLehkOW7S0agzd7sPe+Y6a/KH2umwZZQ/mnKX6sWWgNmWqcRiudvnVc0pqw2QPHMLuar19NhKMdt1o6fXn9tPu/TYGzZ6GVQv673zfpietn04wa3eWCxZo7fb+VohL51Icqem7DOYojTuo5Mpv3Mi9GWKO13Y4pWTrbJH3EMUm4eoqmnRZdu7FH2qJYiZPEHAnyuOJ6aVrpB119F3+lTbuYwa5SwDjxs32cj5YpgvYpA6ZlS6o4YbXaOLDz6oZpZlhz4fW0ePgqWpEqLR1LyXOQwynAYCkyY69xF0n3dGBe29V6qd66+HkvQQlbAVF8holCgaTecFxYslHbq7PUCf2+A0w+9GhnzDVu3S4Eae37ztLgZd+Oqh004jY0anW6lyJjPtcZsuxVuKXdro0QMkf1xAlNfWEG22Ser5L8pE3G5bPyuvNxU8ucS9Dnudx2LOkpx+1+CMa8zegU8On3+efu28/c4K+vfdFTKbmZbPcZOQOMSzDPBysSshmrCERKR1Yje60luuoyF/Hnyh7YUX9dwYnY+SyBZ1Z1iXC7649JJlDzuzgCTwQe9MCXwzgmUgLva6FmEZgy6v4Dv4wD5lPyrwAAAQAElEQVS68LxekfU0Xmct50vX2NGpzl1FhChsBDfKsVbKDb+Kd5jDnsMq3mhUVaJPx6N7p4FJg2KXBkjGhzRqa1Jv+PjqndOs1+pT+snJuXGiQbZqZnm99VL1gVcV8Nh61Eh9fpUcJ7p8I3Aqd0vr6s5cY2VL5vJ8SmMtqZfIadMjlMvHDYQN667dT+PHiVwy3mOXPrKfG5tsPEAXnJv+Ox+p6Dd5ljhmYLJ4cVK+X6k64+nEOWf3aItXvegJAZ05OJmol09w+l1DXjyHNb8qWctXQKJeWLxUj68QXE79EYs5a16wMORYEGtxpjsy50DEPWXu96k6VJ8+//Kr1N9yNnXNzaly7fxxaZbazaDXLofzhUPA3RmQ1a6hx4C1brW2ta8qh8QJndbW5I9K9VjIiW53hqo92hlguy1DtNmm6Y7QzjsOpNAbhhENVzmeofT6aHuDw8tsoLsN22w9QKto7sdrdzrd6srG78WsNWZ4jz0y3bFpdHjRL9Ngaucd+rOZO1i+6mSi357cR5tmmYUbrOCQUM00y46XQ7WspLjk+KxoHqBLLqugDz5SX6rcPoqU3+7OaoyN4eUZIXr3/VRiZ+cAPfZkmNp8eJTuNKicPGmA8DVH+yz9KsYsMeiwLhYLkcoJQTkCHOx1DAcc6XyCfC2KZXFwphiDO5Wu1lZViTf0Ulge05xwsmLSQKu52Zv2q6S0xFQlyXuTnaNZWYcoLs18Ot27TFnwVM1E6sFJbiyWypMt166YJcd1NltduXyhwz7PeCKgGtiIJmX7Dcg6OF1YBNR3kcLaUbLazjq9j9ZZO938gw+wPgUsSlSPIaNRwUEpL3bJjo3OI2LlhSUpXis1eVKINt0o3XGaOHGA9t6zlxrqZTHOju2G6w8QlkEMM5ximTvTEgCZL1u6uppo3LhUrojiTN54wwGaMG6A8sLHuJKNW8W5rT85sI8yfRlPthIDoTrFemCZT6SbRohUMsaezKrlJRgE+LXcJWlBMlVbFzIeZyfzIlWrGHzpOsJxh9m1XG/6dbbBKGyU3+5G3m2ISzM7qNtl2Pvue2GaNRs5KfiUrDd+g2f8vpfsX90LhYkOOTB94GQ3Q5zJ+I1u8UORs3P5k68zzpl11nLW2ebRevO4Yl9VN7uHdHU5t3/pMmfbnblTqToDrWyDFFmiyrFS0eW6OukOF/2xeEnSOZZ/HwsVM7/NLUkLVL/tLo93e8n2dbWkRVbKCUc4uy0rrXL7cZH0VTl7GeeDgYBxiQyGIeVmBR6Dy23CVl1rr5nuSMqzI1Okl8PgHEWj1sVVx+GVdfmVrq4O0Shp/Wyj5KxXG04o9A4bHiI8zj/2qD7aY9fU9rpph2rLM1L8w6yXU1FdHdHxv+4zZoSdSvVoTU0hEu2z14DesaOtfrKX2fM11f20izFTbqe7zR+4Xx/ttksqtkLGiKZUW+yzgILPq3icMZjIJEt2clWOcKb6omxljrN/VcY5K2R4GTs97h3oT970vdTltayWlqSdlRWp50u+umKx7BJqjd+kE1e+AxEhMx4XqdTYzQxvZyfRHXemr9laKT2RS5WuzglHV6d9b7wVos+/CKcIc1oCA4b2Nnd91+7yCYTOulQ8UcA9DHg1O/R9tBGWWkF2IOfNT56DVmnqccYrIeqUZmpVfZpay8q1tFix/Sj6wU53m29xaCdk6K6Rdlqvj/rlHoLQvtRfVhAsKmMbtt12gLbcPNVRGTXSyuNFr9VWS72AiR/GBBdfRYtGiwPg3nv20RnGrNPIEaltkK3ZaEOrrTINaVwwEctBtU5R5sknbZ8dE7LwJTeRluPKSnW7ZL5safHCUiyWjTN1xl/mxoCicbhMSaajUSstLu6Z+sPi1DtiuzknTqd+kgd7UyY71XJPmzXb+VIl2uleYu414oYz5OYGnEmTcAIaE/2WiTefshHGoA31nZwS0Esl5IN7t8sZw2+/C1FzLLNjpoObk6Pb02PJbXa4DixfkXqt+fAjZy3dCRn2UpyfdhryS5ZZOtttLwuizE1obUty4xG/6okTuFSzqosWozQ1QFZtjUXr67NstXJETgNMUWaPjQdyRr9Z1Lg02+/UDxaXu+PKtlTb3NUmcnM/dyub+TMj4HwXyVyHS3NEADO4222b6vQ1NIRod2O2bo/dUulQsf12/bTzTgPk5VfKIFcEzCIjnavTACe9oY4Ie3yuuw4RRvuQpwr4sIbTSzi4QKnq6NIb6nU5LT7VSwbDFRvsb7dNP+Ey5/eMqWWddVx/vX7Cy4FWzt1RXNxFH+vWxpZqTrzd3U5UonXXtm7OWGohc4hBjIjlMud0blTRTlXtGttWcPINUFVHpi922HdVdfNV0WV5kQjOIpniJp1PXTd63PNm62cdbKAVb93HbUtGQBfBabmLKMsWx1qy3+6iUaJoNClJdT1IchCplqvJPPZ0R7v1u7HTkRfOMNIIuFbdenv6bPMKxctw2bDu7oHUZOhJfMwhrpgZT3JaqeXLrFjn6ObeYi0XsM7xvn4d6dl5dNuUXVKSo9/mjCdLOBV0BMJBN3Ao2LfjDv20wfrpv3C81b/rTn2Eka8TDtGoE1WfVlNt6czmNGSSeNYZvXTqSb1kf3FPVUd+CUfVLlXdTHRsj5Sp3F4mz0Lay5zyq08huvD/emm/fSzMnHi8pmG5xP775qdPzJjo2qbaE7hesSMFZn4vMnARjq/Qc9ih/YSlF3ZHWJRnir0YAAn5cPj3+VE/YX0zaNlugNEouJJh/gL9S6QsO5Lun5hCGxxeRDQLNA6xFg2mIrHY1+vbzZCxEb89p9lN9P2iRUlnsLrKLil73kkuaglHsMVhVhXlTmHu99n7f8ftU9/XcJJjp8VaLMfOTlfl581L53e7vKJPseRGzPgKfFQ26NLjncn+c3tv6ewYoEceD9OMmdlxz2TPTKP+3LnpmM2era7ltk/UkjwoYRG+IJDfWeWLSaUttMY2oxSE1rh1egphc6fxWBgjenyMQeX4qpwGlX2Ztjyy18HstJ2GfFMTjuUT4HDCcc63Rbvv2k8H7Kd/Y19vnf6UXT2gf5RiOy2UySEWk3P5p7fesj/tJUeV1IYGVYk7+gHGQGWXnfIbrLjTWFxu1W84k1WyE6ziO/Qn/YRzWFXuRP/k0zBhOYK9TEcf6uAFXcS6YdXJSQdPt87Dj0Xo5Vfc3X57bDOz0OXk3KvaOXcuaqSH5li6YwiuRYpP4S5fkcov7nlPPxuh3l6ixYuzt0vlCMdWhqi5OVU+bFlh0wlapvDO+2Ga9W26nDfezm6bkKtaGtKpeJnPq52ShH6OvUdAv/e9112WEjGjFLSG+WFTU9TlRR5TNxIwXYm3oJuMGbUx0otwggWO5/ou9pVFvSmr6TsYbpZA+IEf7G1uxjFzGOu8O0TmSrbSXx7TR1v8UA8bp10NIA7LUX7osF0dynSDahY+2+NwXfle8E1SrJdX7dLhhc5ylLHKKtlbpbM8YfRow+Gtzi4rFkvyLDUeuX/gsI3dosVkOmRJTudUPqtOVM6mk6Z+/fGjWb3Z5phiKYXTZMaixSGyXW7N+rEWM9I+PP1smPocbJw7L9VtENfHL74M0bx5RPYrTaf08plQjqUOTtv4NTs4u6jTmsPLgqhnD3FpuYz8Qq2dD3nVjPe8Bc74tmdYpgJ5HIqPQOqZW3x7ysYC8RJKLFY2TUppSKaPLqQwJjJTbC/kJciEm16T4kW3g4wZxWMc9qcVdeU42thPeIy+uaZjt+YP+ujoX6RfzcWjb3l2uSax9EPWV6h0jQdPDOBQTpqoZ/HYsXp8dq5oY/YBEHbpOOsPfWlr0vGBhd/80pgasgv1Ob/TDv20+ab9KS+RACsntXDgsG+tU5lbmsCqWfPaIG7MYsYpnD5x5dYE3/mrjd+McIR0lOW1baCDglgsnYjdD155Nfst76AD+tLO0XRpzhSVk+TEHU8M+lFWX0ek+oQ5yhHss5w9xk/G/tEF8CEI5zYeR84KsRj2kE4/eZYZAwSLI/UIp/n771NpyL3/QYg6JcdxlOL6DV6EaS+l6wR95ivp9D67twxGIzz5VITwslyn1B6DnPjTj4Cz4K5VbJ8oylXxx5+GCDPIolxcM955N3UdUyHf9xC2cJwZgey//sz1uXSIIlBRmX6xyhWKsWOca0YqiDDT61yaTq0w+Ie5eCQ9InGhjsUsWdEo0TZb9dP55/SSzoxeVWLmadz47M6epSH4R3nbOT+sxUAJ/WSXPXGCnUI0fFgqTXYQUktyy2Frt/3360956QgDnp12GCDhZMqSRye2n5P3JxU3O5nP67S4MYubfWNj7hqWLQvRwoW513dTs8ZwenX5a2qS15PpMyMkngChfk1i0BeXnCzQRejsFKlkjCUNTvydkpOZ5E5NYXmG0zmaypU5hxd4K7NcI2X7KquIttwi83Uk7uDsYWmEkyX4nYEe70riivyHDjPfn3wWpjkO613BH1O86Nct4bj66pnt/uobZzfDbhv0LZX280VehO6eAZo1K0wCAzdP6IQMxPMV+wKjDEHIR1q8qNvRkYohynql5SVVVVb5Z1+EDKfcSoPHaQkK6ByKh4DzmVg8e1izCwTETJGLKoFkxedJ/TTMrWw42jo3PLE/5jiFw+5Gb1DWfw2rH6ALzuulIKw/lV9wBJZxhcODMi/Dbrv0kXAyZbnbJ3ZY6Za2g9I5T2QZxU5jdgqPvGEHlhMhFiHWYt2scS6KgaagCZ5CxN/PC5HsxNbUWFpV+5x++ZVlN7gi0noEPDYHTQ75nEPrrR2iJts+17JsOQ2n+bw/SV6RXKhIi/6QMc+2prwjsZ403pUq9PU3U2/tqm0YUQszubMd1ruiTA7RaDLX3JxM48XVSOrkpllo//2aROMgfjPiiYVBGvz7/MsQzV+Q7M/BAlsCW1wCYxvZdTYUSuqav2CA5KUTy402ApslS9Id+pUrk6rkD2Qslz5M4jRoTtbiVDEQSP1VFMMC1hk4BGpqkheBQhi32Wb9tGvAXvDJ9tZ5JlxyudCJR7o93ZkkF6Ys30fmtQkHJV9r118nXwnu69e4tF3sq+xek1VDOGVeYWZJze8ozsX8pLirjaVI4kM7OjNj1YlZNWgZMDcMRIpI9TVHya+xGKXjD9ZIPke33xC7pFnVyZOJRkQLe21c8wcDjp//brRtn2h35N99L9UDHZMclEstJ6qstLKyw4xr34gRFr05ltwDV8y0WyXJY5UxOz1p4oBJaFmZxEc8JRPnuMlgHCZPCBtHopaVVh0zIx1U/S+ecIAVHwI65he9SGoF0Z5YLJV9h+2Tfb9kSZjaEwMIcHUZA+w5c5BKD7JTHo0my+PSvr+NtidUSS5OFQsB68wrlnbWG0gEamqdL0RujdV1HrC7ws4Fdngxi5WpPWIWIhOPqky33U71OxwezzrxBZnm9q16e1vE7N748USbbmwvJapOPOK2l1QbN17QVDNLKMsWalw8hoesNdcYEBLenQAAEABJREFUoA03yO33EjecKdxUISefcwb1SynAWbHbO7yBKOpiqUa0KemoyLKGK5yMPXdP58dsLWY+t906WTa8MbUvOw2nR5avSotBbty2hMCJf/ttU3U48WSjrb66gVfC0YJTKvjHjLFkW0dBJZo8KdnGJJVIrJmNS+3EbO3GiY8E4TPWceM8RR2co8KxRd4prJBeOtsy8T4FznGsNxb8qxoDB6Q7HF5mA10VhB2qctAbGuwtB5VItZPG9ts442LVIpLbAxq+3IlYFVY0J0u22WqADjm4jxqHWzSVI2+V8rEQCLDDWwiUS0mHg63iUZtcpDML5NZ5kOX7ndax328bZPmYVUEej9AQD+XQ1Z2cJTp4v/RL1F57EJ10fPoLh0cf1Ue/OrYvbe2v31jC6YUO+VE08tmCjnOUTUYpljstYYo2ZnY87O08+ADrhUM7fdNNnOXILyuJOo2GI3L8r/roB2sISu5xbeKpWFfCOcwkaUxiLXgmHjdl/dJPYf11nR0+7LICmfZz9KD9rcr25QWYtQW/2KMXaUwCwBlGWieEQsnf8QsvJn/HkyYIuogtaaNGWbH9uJM0C7t0WWodO+8WPxwgNy+X4j6w1x7O5wxki+U/SCPsvKOFF9IiYNAk8O2SlpYAq42MwXAo0fT29sy2C3kc+4dAoiv8U8CSyxMBvHFfni0rTqty/aJartZitibXuvnWq63JT0JFZGDwQxKyJLzlPnmS8w1f5tNJq7Zn06nLPLkh4Oa8gKMyzHBY7Zoi4dz6X/XhHLt8p9lpO0+h8huub7W1tTWpUUxOxGJJWqZUY+JDKJ02R91pfez4cZa+TPJUZXFpBlnFo9oSUH5i09amqp2kY01xMpc95TQYEk+R5ME3JI1owjE1jB5NNDmHvZhTpXCuEAiww1sIlA0d0cbcLxZG9ZL8GzEiRKFQqCRtL7bRfp8vYvZd9QjYz/bnu+TBT9uE7Fy3ZxP1deJC7ZagY4suT021f9cxL84LeamSmEEXyw0ytbG6Wu86tfMOmaQQdXVnLs+3VDimcPi/n2dJi0vrRi1K/kcx+IjbZiyzS9bDMbucJIcYnMu2JEvTU9l2cHjk0Yr0SjbKtls7n+dVlen0EdEkTddGmzrOFggBdnjzAporZ0Jg+PABahyCjn4mTIJWFs5xRixo7YA9TYk1jUgHIYgbtcoW++NSFV+Q6Ko2ZRugiUfp2fjybSscQSFDLC+oTSw3EHSdWOxS0RxLdeDWmEKOW9YJmR3tIuVPHI9bcuUnbIJmlXh7jGvMzMoaYy1ELUYQNDzWF2l7HIvZKc752sQTIbe2OEsjwu4LqrJMdJ2Bk1c2ZrKDy3JHgB3e3LHjmowAI5ADAtVVyRmRHKrnVWVFLNWBcSMs2mhx2986t6jpR3GjTi/JjRIXzk7CAXAjZWSTG+4ceV1Wk7e2cllVyd44nKiiIvc+VgqWCmpzcKCbE86djtMkqTKTwokXs7smUTqo6BJLQZPiPQQMjkaN9P+3vmx5iObNt5oInVYq/RiNWjR5CYhF0Tva+71Pf5MIPQXM5TsCYd81sIKCIpDPDEptDjfSgjZOUobZYyk7ZJPCCQoSAGOzvJRTW1s8a7/7zp3uWGK2Cr8rsTqnSzHr1dfr7809ntgBoEaxk4TYYspdC8uLG2tPVW/q67TUq2tgTbXzuVBrc5ZbEo4wzi+VfWI21/5bF8uR7HSVnELTa4zztEt6AdUv/VjX+3Xi4xa10j0s1pI68AmRc584OcmiPzAoW9mW7iataA7RR5+kyverfSzXOwTSe9I72XZJnA84Al6sodNpohcOT757xerYmYkHb+ZmKve7TFykhRPktz438rNt3eNGVrF5ezVmcdZIfGlqZWtxb4Drrq1+27wYOMrraYuhX6VTrHmVnSPBm+81UDhZXlzjhE2qeGQBZk9VukU7hWOo4tOhC4ddXNN06uTC05h4QmOvC8fcTpPzy5fLOSstf3jCovCxFBBgh7cUeilANuKRYb7mVFc7S6ircx6BO3MXl6rayL1QVmW7SBfKDjd6mhKPFN3UkXl7ewp/fmD2SLbBKb18edIuzAiBJ9qYzfkEl7ehEI+P3VgsHsW7qeMXr+gP9M+gg+Xjk4bG4e5bItvovrZ+jb4+7wdmToMHXYvEoF18eES3XqnxDZ53NcnrRam1odTtZYe31HuwwPbLW8R4rbpK+nqS17Ldyos28kXJCbNc1iA6ycmFNm8+X65ywS1odcRMnpglDJp9Xtgj9l71QpbXMsR612jUO8liZhwDCiG1ri7VsRZlI6PO19YaxTIQIS/Isc7gTqy1FktUgtyegttWIIV8BykQ0H6oyWdUDXsaFF8lQhkHRsAJgVrbGkQnnlxpjYpHjkKe821SlJZG7Cd+hUDAi/2ia6oLP/udDZtCDnCjjVb7hQOYzTZVuRg4xKWtw1S8KfSBVEc0pSxLpjaxRrYr8QKlnV0exFQW8eVUu11+53Wc2O/m5I673/YPFfns8JZwT4tRda5NyLRlTK4yva4XNZwgv2Z+c3nEXldntbCYOw1YFpTuMaLYBrMpqnRp825stNE/2TrG4UtMxx7VRzozQTryisXj9LEHv20RTpZYe+u3vlKRP4iL4iVKuR1ie8hYTKbqpYUTi+uluOd0aujUk+4vV2fCMc/0u6tv8NcGlh4cBNjhDU5fsCUOCODN+CCt7V1rzX465cRe2nP39E9MOpifkbR8RfBG/F68aBRNzGCpGr/jdv1U7Jf+VLb5RR82bICmrKbndHd16VkhHB497tLlEk6WWAPpZ0uG5+j81CQex8dLxBEUGDYmljUIp1bQgxKvPkX9mxG2Z7I1LhxexXsjqFuhGICjTDcU9kquaxXz2RFgh9eOSInnxUi+xJvh2nwvHDWV0lVWSb2cjRlNnuz1met+kCo73dBralPbJOpmmgkRPF7EvIWWGsWOTue+sdeo0XzpqilqPUK31y/1vHik72U71lg9N6yELcLB8tImt7KiUatGs8vZ3HjCWRfOuyWlcMfRo9PP+1Xz+GRva6vaWfa6VdGo1xJZnh8IhP0QyjKLh8APN9G/YNdUubdTXNh1Z5fqEzsv6Dw2iubx2Dmboya+nGRvsfwSlkq/7ocG7LL9yFdprIvT0VudmJGy8zbm8Ha5XYZOfuyYwt2MdOwpFx6dWS83bRXnvvjdu6nrN6/uNcidHcE+L8V6XT/6oytutT0X2VWVVl13WKdy19f3k9OEjbAn3pnKny33zawwvfNu0on2awmfsC+bPVxefATY4S1QH6gcrgKpd1RTm1iP6lioIG6+aR/tsmMfbaLpWO+1Zz/ttUc/jR6lEOgxOeLyjK7VeAkr2uixkXmImzwp9cYiHJI8RKZUxcb9KQSfMhPGeys4FEre2LyQrBr8eCHbrQxhSyzmtmb+/F2JWb/axMtKbiRi8OTnri5ubMmVd2VLrjWz1+vpzs5j5xCzsLn0h12Wl3n7dSlX2atPSa9Zk3jJUazHTedQU1rbQoOFw30azAv7oOj9D0LUl/9qN4ji4AMCLt0DHywoU5FBuyB5BfNo43H+LjsPkM5bqdC5mvFIartt9GedUSefsMGGqQ5hrrImT0za7LEvlatJZr1h9antEw6JWTiED+PHJ/urEDCINY94kUcsp4kn1gsWQn8p6MDgSXwNrBTsdbJxhY+DjPkLvR2kOdnvF81+rg9rSL0u6erV+bCLrqyg8PFHKYLSE+l25Ozw9vT00jffzqfPv55DnfEchqrptpQVJQiPOQYUn1IsB6CbmlIvsFWVVqtUn4+0Sh2OCpJfj78U6jwjNzToOX7VOSxn8cxIHwRVevDiSSazMv2exXKaeFd+DkxVmfVJJjy9LIvF8pcmtlurqMyvD/O3JBgSxPmuWkbQstIdTj2Kj8bMmxeM9upaMWmSM6fAy7mUqUFBwJXDO3/RMrrw6n/TIb8+nzbZ41d0wLHnmunNf/Qb2ucXZ9NZF99Cn3z5bVDaFkg7ajUeoXtleEO9u4uSV3oLISc6POnw4gs99fXutFYW8SMXDbZZWneWZ+YWN+5MXHjZSWyvlomPy5II1ObwSD9ZWy8VpN1I9CzOjyuX5TixmKUTM+tWSu9Ynbju2mcmRe2ttuynrY0wblzyuiLKCh0X4lxTtUnQxWN6+zICMbhbtFhw6sVtbaV7L+qT5hAwEVLr8LKowEsPDeYqFgJaDm9Pbx/dcd/TtOfhZ9DLr39Au26/Gd1+zdn09H8up+fuu4ruvuFcOuLgPWj23IV02PEX0mU33EOtbR3FalOg9YoLRiGMrKyQfqmFUFgkHW7X2MLha0i8TKcyecwY/y7QOrNIfq751lmOEgoV/8av6huvXtxTyS8WPRTK75wTTl2x7Her1+vlONGo2gJx3VXNwq+91gDt86N+yvQOQF0Og55o1LKppUW/b3F9smpZR+Hci2U0FjX1KGZi/Z5pFNeO/qFxazFB/uTTVDepOsMWZ2YFPgQWgdSeVJh5/pW3003/fowuPONYev6+v9FJRx9AW226Lk2eMJYmrDKKNt1gTcPh3Z0evO1CuuXy0+m5l9+mI07+i0La0CHrzLYVAw3xEox73eVRQ1y0M7VmqM222bFYZ207JTj5utqBnI0RM3zy7hw5CwtYReHUOZk1Ipo7Zk7yhiLNaWYvKDiImVg3M42qnVoK0aZo1NLS0qo/ELBqFP44lJz7wqNbWI1aDm9DfR099M8L6ZB9d6JIpiGwYfsOW21Ej9z+F1prDcViF4NnqPzxp3tz7+lSdsoby+CTzcW8GeZ+1mSvKWb4ahOPuO01GhM3Yjud894hoMI4k8PunXYPJRVYlJjF9UptbY1XknKXsyKAH9/JvTVcM+gIaDm85/z2CHM2V7cxjcPq6ar/O1GXnfkYgbwQ8Ovx/4m/6SMEt8aFI+5qZHoMiceZ+Wy+7s6S7Nzh4E/IZG8EcwQSAZ0nL24Mb262uP0cPOO3O268pUf3ODnHuSAxi6urR4dPOL2Z1lP7MRDJ5ylNpnaNHMlPMjLhM9TLtBzeufOX0F0PPmfuyNAnr+Ae6uhlbj+XaiCArYs02IrCMm6VAULwWnlNdepFuSaxz6RKz3bbpPKr+ApBHy69LFgIfYXQIW76XusKR5z7Lcjro73CoLqIL4V60YYa229UJXOVsQNU7XJ3jWwD4rY2lTZv6B3S6zVw2CE103pqrwci0OeHTMhtyHF7NNTlUP4IaDm8La3t5oto2J1h631PotMvuIkeeHI6fTt3YfkjlGghXsJrbmlN5IIbyS+uyOmgWmzfVzaodnppl7jJ6MqscXCIR48JUZ7vOOmqL3s++0tCXjV4uOLmG20MeaUisHLS18AH1lRHw/BkxbHAR2JNrXVeLF1mxX6p6uJdRP2CluUGHAEth3fDdabQW0/dQv+86kw66tA9aeGS5XTBVVNp36P+RDsceCr9+fJ/0RPPvWbQVwS8ue7N6zCeI5167nUER96KObUAABAASURBVH/7A06ln510MS1b0eJeUIFqyI+f5HSB1LtWE9I6A12LLfsK9XUD9Kcze2ndtZ1nEcsFgJFN1s0/FiuXFqW3Y1gR13yL9bSZdgBIt7j8KW4HpV4ggtliL+QEScboUflfn8TEjXjhNJf2uR3ANDVZWnR/F8JGqxYfg4qAtrtRX1dD22y+Pp163MF0703nDTrAh+2/K309ex798dJbafefnk6YDc6lsUGtc88j0+gro30vPXgtvfHkTRQJh+m6fz4UVHPZriGEAG7KNQ57Qg4hCAabKm5QgwSfEmJ3B9ULRLnclLG3p0/mstgCI9CcGJh5uW54+HBrD7DGxgI3RkNdRUVmh7ahQUNIFhYxcSNeOM3CXpRiYWNRlLNSbQS0HV5ZIvblxZ67X87+nj7/Zs7gxyZWnTiWKsrs6v3MS2+Zu1OMGRWlYQ11dOQhe9DDT82ggYHMP3QZL04zAoyAGgEvnQO1Fm9KahO7OxgPfhwFenRTdpRtJ06aODSuQcKZUGFux6UY+drEjge5DHiEvaq6TYmdQ+bMDQnWnOLBwVpXTtUdKw0blp9NjkKZyAj4hICWw9vb10cffz6bpv73GTrxj9fQJrv/kg4/4UK6//GXaPzYkXT1BSfTyw9fR0/dfTlhJtgnW4sids68xSk7VEwaP8a0Y2XiwxrD6iqpsiL5o6+IhAg0BHmNJXhAQzAFJA4yHWVuAuomxFBDbaWpF7GdBpmYDRR02UZBQwx5YcPo+prU77TWVEVM2ZDjRTBUQJ0ZZFuQNonGoaoyqVPmF/Ta6qSNsFvYJbffEEPCdvAgjwB5gh9p0BCgX9DdxNCB+iLUGLZDrp0OmyEXegQvYkFHWgTYC14ElAs60qCJAD6UQR9oKEdeBJSDLtsieEEHnoIXMWgIdhxBQ5BtF7JBl+XABtAQwAO5CLABNASkQUNAXdCEbNgHenVl2HiikvxtCbqQDx4RoAcyEAQfyiATNOhAXpQhBh0BdVGGgHaDJvhBQ4C9oKMe8pCLGHTUQRplCEjDRgSkEcCP+gioA5oIqAO6sAO8KEMedJEHDQFyQZ80LvXyDTsQwCOHqooIRRy2k4Qdgh82IKAeaNCNtAjghU47vSJxvUMseBGDF0HIFPWAK+SDRw5oE8pkGtLgHd4QQZL6+6wYGciGTUiLAF2gIwatpsq6RsA2yAENZaIebAK/UxA8qCNCnTHYAR0yBA2yzfoJx6+r2zpf0RbQBS/ahzrIg440gqBDTiRUCZK5Nh96kIGNDcY9BumQIRp5pFE+bkwYSWpdGSLURwbyRo1Aiqi7s8KUhRzaX5dYI9zXa9VDHZUtTVHLlh6jPWgLZECnSCOPAHsgA3qRh42IEaAT5UhDFwLS4QhROBxCkiBP1JH5zULjgHLQjaTZFiFDpqNMBPDCTpEXMWjCFkGDXtiOOoImYrk94AMdMfgRIy8CeGUZIQqZRdAJftEfJtE4wHYj4j9tBLxjtM78LPI++mw2HX7iRXTlzfeZJyq2HMMjfji45/7uSNpr5y1o1IgAPm/J0q5sxZjFxRreGuk13Ooq60LQ0RE3qw+rraBK6WYSMX7IoCGEpV8GeEBDMCsmDjIdZW4C6ibEUH1NhFAXsZ1m0uusHyHKZBuRFwHyYHJdtXFFEkQjNn+4Rjshx4sg4yLbgrShzvyrNpwdoUvmF3TZRtgteOX2Q9CEsWETF/AgjwB5gh9p0BCgX9DdxMAH9UUQeRELOmyGXOgRNMSCjrQIsBe8CCgXdKRBEwF8KEM7QEM58iKgHHTZFsELOvAUvIhBQ7DjCBqCbLuQDbosBzaAhgAeyEWADaAhIA0aAuqCJmTDPtCrKsIkaMgLupAPmgjQAxkIgg9lqA8adCAvyhCDjoC6KENAu0ET/KAhwF7QUQ95yEUMOuogjTIEpGEjAtII4Ed9BNQBTQTUAV3Y0d9n/VaRBx11BS9iyAUdMfIiwA4EkRdxlTEgrzAcU5EXMewQ/LABAWWgVUZSbw3ghU47PRI2BvjGtQEx6ooAXoQwLigGUdQDrpBvkFL+0JaJq6TqBAN4UQdpESMN2bAJaRGgC3TEoNVUWfJgG+SAhrIaYwCPNGwCv1MQPOBD2HKzMG31wwjBBsigxD/IRn3IAkmUgQ90kUf7RDnoSCMIOuSobJTpQg/sk+uivpAnp4V+yBBpIQN8KltGN0UgjvDEAm1BBjpFGnkEyIIMYYvQgTLoRDnS0IWANHhEGvLEzDZ0Cn7wIaAccpC21xN0lIkAGuwUeRGDZpcNebAddQSfiOX2gA90xOBHjLwI4JVlfJeYiYdO8Iu2Cn60SaQ5LiwC1hUhi85xY0bQj3fbmkZEh9H01z6g629/mP55z5P03Mvv0NLliUVLWWSUYnEoFKK62hrq6u4ZNF+k6+qsZ1itnb3UI23V1tc/QKAh9EvLHsADGsKgMCMh01HmJqCuIcL86w/1mnrb431mHgekhTzZFtlG8IkAeTC5oyspA2Xx7j5TtpCVb6yyBXZBH0JXT/+gTplf0GUbYbewCW1GfRHAhzLwCBrkgYaAtKBDP2huA/ARMhCLvIhBQxC2QA/yIgi6vA4V9go7UG7nFWXgQ1lNtXXeybygoxy8si1oM2gIwBN8IoCGUF2beg6AhiDbLmSDLsuBDaAhgEfIhg2gISAt6KgLmpAN+1DW3dtPgoa8oAv5oCEgQA9kIAg+0FEfNOhAXpQhBh0BdVGGgPMHNMEPGgLsBR31kIdcxKCjDtIoQ0AaNiIgjQB+1EdAHdBEQB3QhR3NK601m8iDjrqCFzHkgo4YeRFgB4LIi7i7d4B6+9KXP4QifST4YQMC6oAG3UiLAJuh006HbaAjFryIQUMQMle2WvojFf2DOsEnAtpSYzvnUAZbRF+IGHTIhk1IiwBdoCMGLd5t4QjbIAc0lIl6aAv4nYLgQR2EphED1GOcj7ABMkBDgGzUhyzkRRn4QBd5tE+Ugy5+6/MWpdvY0TFAQj/kyrYjDzkoFzJhAwLooMlpoR8yRFrIAB9sQT0E1EUMOviRRp2WVutaAJ1oF+giQBZkiLrgF2WQgXLkIRMBafCINOR1doJK1Gf8F/wWhQjlkIO8vZ6go0wE0GCnyIsYNLtscc1EHcEnYrk90At6VaV1jRV50BDAK8vARBno0AlsRFtBQ0CbEHMoPAJ6Du/YkXTFeSfQjEf+Tk/e+Vf65c9+bL6cdvE1/6adf/J72ucXZ9Ml191Fz05/y7iwWj+OwjfFH41Ylzx3/uJB4d8vWGKmhzfUmXFrR49xIRww0zjgxgIaApxH0BB6jJsOaAjIiyDTUeYmoK6Q09tvOLyGLW2dSeccaSFPtkW2UdRHDHn4MbfHe5EdDOYP15AtZOUbq2yBXUJpd08fCT0yv6B3diVthN2CF20WMhCDD2XgQR4B8kBDQBo0BOgHzW0APqgvQtywHXLtdGEL9AhexIKOOsgjwF5hB8pBQ0Ba0BGDD/TKqgETL5QjLwLKwSfbAj2gIQBPwYsYNBGQF0HQZNuFbJTJcmADaAjgETJgA2gISAs66oImZMM+lHUZg54+YwCJNIKgC/mgiQA9kIEg+FAGmaBBB/KiDDHoCKiLMgScP6AJftAQYC/oqIc85CIGHXWQRhkC0rARAWkE8KM+AuqAJgLqgF5fbzk/4oMJjY39Zp+iruBFDLngR4y8CLADQeRF3N1ruBLSoFzQDReOBD9sQEAZaD3G9QppEWAzdNrpsA30Ybb9mUFDEDJb2622hcJ9gzqFbMRoCwLScoAtoi9EjHLIhk1IiwBdoCMGLd5tXSNgI+SAhrLmldY9Cs43+J1C3Bjkg18E6O42HF7QIUPQx4zpN/tI4CLKwA+5Ii/ahrxMjzvYiLXKy2OWjZAr2448dMcN+4RMtA8BdNDkNPSBDhkiLWSAD7agHAF1EYMOfqRRZ8731v2tt7+PwsYgCXQRIAsyRF3wizLIQDnypsx2S05VFVF/4ncNnFCOADmCH3kElEMO0pANOUjLdORFAC+wEXkRg2aXLa6ZqCP4RCy3B3pBxxpo2CjyoCGA10kGdJr8ZLUbvAiwHbFPgcVmQEDL4RX1Q6EQTZk8jn7y4x3p8nOPNx3gWy7/Aw2rryPsZoD9edsTj/pFnVKPsVzjgSem05JlMWpr76S7HnyeDt5nRwqFrMeOpd4+tp8RYASCicAwD95wR8tqavhaBRxEEI/Qvfj4wSpjhVRv4+4A7pU7YUI/GQ88tRpaWRmipsTLdqIClkcgXWfNFSE5JMLw4UOimSXRSFcOL1q0aOkKemram3Th1f+m3X56Op1w9t/MXRrWWHU8HfPTH1GNtN4V/KUefn7Q7rS60bZdDvk9bfXjE6mnp9fcmq3U21Xq9k+YSDR2TOrIudTbVBD7XSiJRl0wlwmr7g29lJo7Ybx7a73eOUM4me4tSdaIJ3YXwExbkppM4RF1MpeaEs5XLJZKL1ROPLbPZKPKFr9tF3h2xr0bGNXXe3dt9rv9KtxBl79KhzyH0kZAy+HF7OZF19xJe/3sTNrt0NPpzItvphdmvEM7bLURXXneiTT9oWvp8X9fSmeedDiJl7pKG5ak9fV1NXTzZafRa0/caO5E8d9/nE/YoizJQSS2zZFpnPYXgcoKovXW9e6i6q+13kmPRr2TVU6SpqxqPTLPt0011tL8fMUUrX5NtXdOixeNEI5ztg38GzXO63jCIautcW5jkPsubr3jbNwrUlEVuIg1vaml/ubCibu/wFPYqNIaT+CvKi80XTjC+ejNVhcz7bFYNi4uLxUEEqd8ZnPnLVxqfklt/bVXo/P/cAxhHe/MR6+nC884lvbZbSsaPVLjapVZReBLG4fVk2onippqZ/OrqoaeQ+aMBFO9QGDKqgMUjXohqfxkCMdK1bJC3BxVugtJr60dMJyq4F134gmHr5BYFEJXtNGbgVYhbBU6enqs82O4yy/8dcWtekJOEOJoNAhWONugOjeGyrXIGZXiUrUc3g3XXZ1ef/JGc7/dn+63s7mOt7hml4b2utrSsJOtVCEQLLpXt5too1eSgoVPqVjjdibS7exfjWIGNBs+sZjFUegbcjhE1DicCOs+LQvK55jLEga0viuxfANpr0NbmwG4ITQUNg7SXzFnm4UZI5pKbwAhbOc4+AjYTnlngysrIlRRZl9Qc24pUxkBRkAgMHoUO8YCC3ssnINoo/MNOtPlsqbauY5dx1DJD28kwuP1CRPK73yrqbF6sb8/ZCU0j60Jp1STvTBsOWiZsprVp/YZ/qYRzsIah2fGyS7HWUr+1EaeFMgfxABK0HJ4P/96Du1/9DlaATsZBLCdgTUpSOt/o6XwI7fvCZNDz9YmbkI5VPWkyhSP1pt6YkwGIbvtkr9jVhLnVAYMci1qMB4X1ya+bJWrDC/r1QTIFi/b5ZesaKO7c19cxzsVj/3b2izHL5O98sx6a2smztzK6muz25CbZHWtVSdbOMa7MjuyagmpJV5vqHcWAAAQAElEQVTJSZXKuaGCgJbD2xnvpllzFpjhB1Mm0Hprr6YMYQzVhwp6HrRTtf7XA9GFFlEQffX1+V84V1ml8Bd+GZympvzbIMvLlBY34kw8qjKsB1WVMT0dgZYWoq6u5Lk1dmwync5dWErtEFpeVVNt4d5ZwHXD4jrepXixq3+gcL951ZlVWaUqYTojMDQQ0HJ41zSc3BOO2t/86tgHn35DG6+3Bp1/+jF02Tm/SQt1tdVDAzluZVEQwKblmRTX5rh+MZPMUi4TN+JSbkOp2I6HD52dxXdsSgUvHTvlWU8dfvCIZQRdHs0qQqb7UNo1ahNPwVQz1qXdOrZ+qCKg5fAOa6gz95596cFr6Oif/ohumvoo7XjQb+m2/zxpfnFtqILH7fYWgaYmd/JCiS/YiBscamNGc/w4a4YHeQ7pCFRUMD7pqBSPImYki2cBaw4aAoVaq6pqt7imqmasVfUKRRf2OelzuxzFSQbTyhMBLYdXNL2hvpaOPnQvmnb/1fTHU35O1972IG2738n05azvBQvHmggwW/4IbLnFAO2yUx/9cJNUB26sT18/yt/i3CSMGk3mSz2TJ+dWH7XkmfGJE0EJbhCzSrU1qf2ai8VrrG6tIdSp21Cvw+U9T6abt/faho7EWCxkNtbtQNqsVOTDosWW7UU2wzf1jcPyEy1moPOTkrk2f0w1Mz6lWOrK4UUDu7p76PHnXqMbpz6CLO27xzY0smm4mR6qhyB9XG4oPcLGJyp32WmAxozRd2pK8RyFI3bBn3tp9136cjZ/3LgkRn7fSqvzXFYiZpWyzXyO0HgisNMO+k5zRWXO8HJFFwg0pe+d6qJ28VmjLl7uFY5Zl2I9cXu79+0RDn6sJUyxmCU/aJiHI5ZdQT2i34YPT712uN23OKhtG8p2aTu8HZ1x+s/Dz9PuPz2dzr/qDtpt+83o6f9cQZefe7zygwxDBVg4XkFpa20t0bA8R8+4OPLo1vsezdcR9N4ifyRiWYk/klOl2m9IqaWcYwSKj0CNcT2GFZ2Kl9mWLM18C8426IPsXIMbxz1XHap6TVFr2B1rsWIVXzZ6ZvRSa7u5LuGpSyiUatvIkakOcKp0zpUCAlrny+y5C2mng39Pl/79P7T3rlubju7pxx9mfmENOzjIwZdGs1BXCGTaA1RX0KiRupz58YnZCF0puBDp8sp8xby4CzvcXHBFnXKKR2W5YWBWpZzay23xBgHxm493eiMvCFJ0B2vjx7OTlam/Vpuij49fTz8bS/yJRSZ8y61My+GNtbQRZnjReMzy7n3EWbT5j37jGFpafXhGA8VlGsIh/R+sLgRw7vDIqDGPlSb19d7bpWt/Jr6aIbJpf7k5x6tNHqCJEzOfU2I2LFP/cxnRUMNADIQ64+XT8pEj9NoyKctvRk9K+XI12pYdlG9LuWVeIKDl8K42aRW68rwTtUKdX8MoL1obQBmNjamPTbww8bij++j8c3rNF528kMcyCo9A2f2MvD/NXXZKZmdbVxgGk+CNxXDkwAj4i4Dtqbq/yjSkT56cfBdAg51ZGAG/EXAlX8vhHREdRvvstpVWqKyscGUAM/uDQNAulP60Mn+pP3DxFn++2mqqi+715duEkq0vPhUai1lNEI4rcnIaeQ6MQKEQaFlZKE3e6NF5UdQbTZaU1Vb1ZqBqSfPv2MTLGvwD10PJWg7vex9/7VplLnVcK+EKjECeCOywfeFmLPjLZXl2VobqEyYoChNkHmokgCinyIflYIWGBx8r8UKn2Le3Os8dUrywxSsZ0SjRlNWcHd6mJme6V7pZTnkioOXw3nHfU3TRNXdSR2dXVhR6+/po6n+foTMuuikrLzMwAjICVZV8EZPx4LSFQHViP16xP69FTT1WltCWYqXwkot4USwV5WDlpthm/9y+ABus1uhZM3+BM1888VW5clv779xaIt2X/lT1ZfpQmp2V2z0U01oO70/334Wef/lt2veoP9IjT890/Loa9uedNvM9OvyEi+jKm++jE47cfyjiqWzzULkQKQHQKCj04zINk5glAAjUJmatuhJbO9XVu5+v1Zn5GspLG7q7Uzu6NvFp2VRqsHKNjcGyx4010Rxt7+52f+67sStf3nj2ObGcVOgMejMJjjYW7kleJju4rLgIaDm8O2y1kbkV2Y923pL+fPm/zK+r/eyki41Z3JvpnL/eRsf8/jLa/oBT6bfn/Z0mTxhDLz5wDcFJLm7TgqW9pjppTzSaTHMqiQD2EE7mCp8qhZu8F6ioZzS8kO6/jMocngQUesDp57lU78MX4To69Pqtuir4T2EE9uIxv2hZNGql8t371ZKS7zE/HIPmwIkBpV9bx9XaBr25oB+kD0TlYj/XyR8BLYcXahrqa+msk39GLz14Ld165Rm0546bU0VFhOJd3bT1ZuvR5ef+xnCKL6erLziZxo5uQhUOPiNQ6Ju4z80puvgxY/K7CRW9AQ4GrLmGQ5uCPUnk0IrcSOEittPPJQHF/CJcsQelOmeC2N6uM/FEQKfOUOapqbauEfE88CqFe9G667qc5R3KJ0WZtl3b4RXtHzMqStttsQEde/jedNk5vzEd3BOO2p923X4zY3Z3rGDjuAAIyLPGBVBX9irCrn8NwYekvsG6mcmWbrTB0LjwD89jH2oZL05nR2DM6PTzLHst5mhtLeKoLAG/GJzFi7DPcXOzZUQ0asV8ZAT8RKAMb/F+wsWy/USgorR2tPMTCl9l5+PY19b4apq3wgvkSzRp3KzXXy+7Q1jK5399nbuuK+a2icLJ0uk3d61SczfHrJPR/mJdX591Xni5VETMtnYWwYFVI1A6JdFo6djKlrpDgB1ed3gxt48I/GCNfuJ1Vj4C7IFo8bjYA1FDSsTY0dln1SdNyM5TLqCtOtly9MqlPfm2w8ulIuLJn2rGVtDFzG6+tlv1sx/FLislNWjO3izmKCEE2OEtoc4qd1NxAa51OVNU7phw+xgBPxFg58NPdIMpO57YwqzQfT+4y0qtNdsdTHTYqnJGgB3eIvduMd62tT9WKwQEhdQhHukVSiccdSddKroTL9MYgUIjUN9AVM7naKzFQjTaWH6zyeg7q3V8LDUERo4sv/OxVPqAHd5S6Sm2UxsB8UgvtUL2i0yuN/+aaudH0Sp6ql2c8wuBcnR0vMRq9CgvpXkjKxr1Rk7ApeRtns5671iLNZMajeatjgV4iMCkidnvRR6qY1ESAuzwSmB4kdRZg1rM7ZK8aGMpyth+uwHKtk6u0I/47DjWJLYHstM5H1wEGjWdicqq4rahrpZvssXtgeJrF0/2YjFnW5oT9FIfKE6cSNTgsDuNc6uZOpQQ0HJ48VnhvX52JumE1jbNHczLFOW118p+Y8n6WcQyxUY0K59dAoQMt3FlBVG19PEPt/Xd8A9Q+jkQjWaXkOsMc3bJzFFsBEaOKK4FtYmN+4trBWvPhoBwSrPxcbkaAcx+jxqpLueSoYuAlsO74TpTaM+dtjDDsIY66untNdOChnhFrJWiwxuoAmfb0MWTRo10frxdbEiCNHs4flwwMfKqj4bn8Olbr3SzHPcIFGJ7qlIZzESjzviNHZs+iJM5/WyfrKfc043DM+NcyPZX11i2iF0d3Ogu9VliN21l3tJBIKxj6kF770B/OOGnZqgxntkfsu/OZlrQEJ950uG0YPEyqohoidRRyzweIhCkG1JlpbW2zMPmBUpUpKJ0HPp8nL0gnVOBOgHK0JhsL4LWKNaxlyEUQ6ZJ4qnAosXW9Vp3+U65A1TDS89Ktotde6dz5i2ivr6+tAZvsv4PCLO833w3P63MXwJLZwQYgWIgsOrE0nHsi4EPdE6YMEBBfDkMtnHIHwF2AvPHsNQk5DPQF7PmpdbmcrHXtcO7/tqr0Z0PPEcdnV0pGLww452UPGcYAUagtBCIRt3ZW1dnPfJELf6QAFBID8f/so9OPak3vcAvSoHlFmLNqXAwcnm0XmA4hoQ6dtpy72Yxa567BK6ZDwKuHd6Tjz3YcHbjtMXex9PpF9xIV//jfjry1EvpxqmP0l47b0nrrrlqPvZwXUZgyCFQ7fBCUTiUdCZLAZDVp2S2t9bHTxIX8zO1Tn0jHDSnMqa5R6AmsVxCfDDBvQSu4SUCtQ7XKy/lsyxGwCsE7HJcO7x4ge3hf11Mu+2wGb39wRf0r3ufoiXLmulXP/8xnf+Ho+3yOc8I5I1AdHjeIgItwGl9ZLnNmNbUqrtA5SCq6HZJjY12SnHzunYX18qk9qANGJKWpaZiMStfCi9E5bM23mpleR3b28qrPdya0kTAtcOLZq69xiT6+8W/pZmPXk+fTp9Kz957JZ32m0OpcVg9ijkwAp4i0DQi8+yhp8pYWMERqFXM/mbbNzl/Q1kCEAjagAE2FTs0ulzeU2x7g66/pzfoFrJ9QwGBnBzeZStaaOabH9Oz099OCz296S+0DQUgS7WNpfQRjHCEaPQY643hUsWb7Q4GAk5rT1WOt2yxDo/Mz+lUBPAbTqVwzgsEOuPWpACfn16gOURlDIFmu3Z4P/psFu108O/ohLP/Zq7hxTpeOXR0xocAbMFqYj4vEQwP6HIBp0eCB+7bT/XSi1K59kKQ9rrMtQ32euJG5/fMlPw4ecwYuxWlmY9ELGchGrXiTK3ItDQjU718ymTM85HjR91ql1s0TXKxs0coxINb3T7riltYldpyGt32MR8j4AUCrh3ef977P/PFtHtuOs/U/8jtf6HXnrjRXNO763ab8rIGE5XCHmrzeYnAuk4W1uAiayvH+2gBHLG0XqurSyP5QnDr8NXXutsubcMNBujwQ/to5x2TDu/Ikcm0L40qglCnteL5mvHDTfzDaeIEflqYS/8IpzfemUttrsMIlC8Crh3eb76dTwfvsyNhezLAgj15sXb32MP2phdffZ+WLEu8WYBCDowAI8AIFBiByir3o7j11h2g2tqk8za8IZkusPm+qavJZ2CssKrK5QyvQowjORJx34+OghLEmoSt5b69WW1iTTw/bE10vC8RCy1FBFw7vH191uxJRSRCE8eNpq9mzzPbPSLxKv28hUvMPB/KB4Foo9Xn5dOiwrRk5CiiUMjbmzbxP0bABQLyXskuqnnGKpwvzwTmIagm4Qh6ub1ZVQ6DqzyaUHJVnZamlUojGur1Br1BOsdLBdti2ena4Z2wyij67KvvTHu33WIDumnqo/TiK+/RP+563KSttfokM+YDIzBUEHDaRxdtH9ZAtOrk0h8s5LNGHDjoBuGQ6PIzX3YEvHbI3PaRaqmN00uD2VsTLI41VqfBdwpK2bErJqqxFmtCINqo51zmaqvqGp1J3ujRmUqTZW5/E8manCo0Aq4d3kP23ZnGjx1p2nniUQeYH6E49c9/p8eefZXOOOEwaqjPsOGmWYsPjEB5IeDH2shCIRQKD2RVVav5KDxfx3gkbz+XtS+KxRBttDTXJmZJrdzQPk6eVPqDWfSgGHwI5xM0j0PRxZXyNbro4JWRC6YFTwAAEABJREFUAa4d3n1224pOOuZAE4Ixo6L00kPX0gO3XkBvPHkTHXv43iadD4xAqSAgbuSlYm+udgpHpaIiVcLGG6auXU0tdZer1XSMVVKrKlUlQ5s+fFjx218KK3N0ZwljMQvPaIkv1WpM7BVczo6q1VOFPYq13oXVytoKgUA4VyVz5i2mF2a+S09Pe5O6unuotrY6V1FlW6+2xGZD5MdyYtRfkM7xUYm4KahUlMKNXGW7Gzoeu/3hd710uhHs9aqrrceKdjrng4FAJOLODrdbhbmTztyMQO4IRKO51/W6pur+jGul17pYXjAQcO3w9vT00jl/vY32+cXZ9Lvzrqc/Xnor/eKUS2j/o88ZfIEtGE0rvhWq9WvFt4wtCDoCuaw5y9amxkaihoB8DLHUZ9eyYe1nuTwwddIzfpwTlWmlggDbmRsCYcObyfbbkCXz/VlGY2ikjVPEXUNvu+d/5nrdU447iO6+4Vx64s6/0oVnHGsK+f3/XU+9fbx3ogkGH0oGgSmrBc9UXnOWvU+qqgayMxWIQ/eN7kKYU1kRHFwK0d5i6dCZCYw2Wn0Ra0m1UlW3XJ6spbY2/5zAMZOkyZMsrDPxuCnT0elGHvMWHwHXDu8zL75JP95ta8ILa5tusCatPnkcHbLvTvSnU48gLHOY8/2i4reqrC3gxnmNwPrr9hGWNowa7e0F02s7WV4qAvX1wVmKMWpUqm2ZcjU+LyGZODH/8zha4utbM+HvVZnqkbiO/Hzq6sgvVR4/93X2ChMelHiFZOHluHZ4sV531Ylj0ywdv4p1xW9pbU8rYwIjkA8CYrZTZ0eBXPRsucUAXXheL00cn7+jkIt+ruM/ArV5vlDnpYXyBy68lCtkRYcPofNYNJrjvBHojFvnjWr2OW8FGgLWW8eyQYOVWRgB1wi4dng33XBNmnr/szRrzgIaGLBOzuaWVvrHndY+vGuvMdm1EVyBEciEwG679NPhP+0n7CiQiY/LGAEVAmLQpCpnevAQiOT4pbWoWEaQ2I3BzbrO4KFQOIu64tYTk9oSe9m6cAixplJHwLXD+7tf/sRsM15S2/Gg39JBx/2Ztj/gVPrftDfovNOOovq6QP1aTFv5UNoIYLuq9dYpjz0vS7snktavtqo12E1SOMUIpCMwfHg6TZey9pr8m9fFyg8+3p4rP1T9ePE4P4u4tmuHd9zYkfTC/X+j3//6ENpik3VplTEj6chD9qT7/3EBHX7ArowoI8AIMAJDDoEgvUCnBr/wJeFQ7gOjVcaWj8ObXC6QOx6F7j3V0oagOsKrjE1iqzNLXVmZ5PcDW36q5Aeq+cl07fBCXeOwevr1EfvS1RecRDdfdhr98ZSfU0VFhE499zrzy2vg4cAIMALli4DODcXv1otH137r0ZEfpBfodOxlnswIZFoGUV+f3VGyO4tiuUBNGWxXL7dNTmdG1P9S+Slgti3H8FXIjTfI3o9urA6FrCUhbuowb2ERSHF481EdW9lGL776PvX09uUjhuvmgEBQR9w5NKUsqkSj+Tcj0w03f+n5S8h2Q8lfQzAl8KxNMPvFb6vkfl97zQEaOYJo3Di1w1RTXT6z05mwralWY5CpnmdlifeI3MqrrQnRJGkbs2jUrYR0/vHjhkafp7e8dCieObyl0+TyszSfUXaQZsnKr2e4RX4gUKDBgKPp5TBD59gwJmZEAA6SYNhk4wH63Sm9NHZMkZ09YdAQjnfYfoDGSUsZiglFZWUxtbNuHQTY4dVBiXkYAUagaAhUebTWjp84Fq0LWTEj4AsCFRVE1R6/J19XywOZ9M4qDwo7vOXRj65awbO6ruAqGDNvaJ4KtZjJHTc+lZ5rrrEx15pcjxEILgLihTisS5WtjMetXNGXHVhmeHoU1wZPhSaEVdfwWtwEFGUXaTm8c+cvpguumpox3HHfU2UHDjeIEWAE9BDwkyvM9x8/4Q28bHZAMneReCFOXnaBGvEu64eTz5I3yCmHUOvxLHA5YDIU26Dl8La2ddLr736aMXw7dxFNHDeawvzcsOzOo7p6/5okvwzinxaWzAgwAjoIZJsNHF6Er7gN5WtEbZnPNorZaZ1zMx8eN06/B+dbPqZyXR8R0HJ41197NXr23iu1wrCGOh/NzS66ta2D8OW37JxDiyOfx+V+Lsbnl4BK6zz081FiaSERPGtrPZjFyuYYNDXx+kanno82+vOGvnC+xHIFHQfRL1uc2p0vTcxOy3KiUTlX+PRmm/A5XnjUC6NRy+EtjCn5aenojJv7AG+970nml99+dtLFtGxFi1LotJnv0fo7H5MWurp7zDq7HnqaWSY7z/Gubtpi7xNMen8//yhMoPiQEYFtt+6nNX8wQD/cRLohZqzBhYxAbghkc1ZVs7cRD+4CjUV2UnJDrHRq1SZmep0cxNJpRWlYyh+RKY1+ysVKDy51uaj1vs49j0yjr2bPo5cevJbeePImioTDdN0/H1IqGqABqqutoafuvjwlVFVWpNS599EXB/P/e+EN/rDGIBqc0EFgnbUH6Mif99FOO7LDq4MX8/iHgMohbmjwb/Cu0ulfK1lyEBBws7OKjr3GrVqHjXkYgYwIlI3D+8xLb9Eh++5EY0ZFCcsqjjxkD3r4qRk0kGFj6prqSlp14tiUEApZC/2B2hEH70533Pc0tXfEqa+vn269+wk64uA9UFT2odKjraDKHqiANbC6Knn+Bsw038yJNvrnsPlmdIAES5c8z61iR8VzSHMWWMjfyYQJ3l6HMn3oRjy5qKjIrFNejlVILHLuMK7oOQJl4/DOmbeYJk8YOwjQpPFjzPTKtg4zdjqsiLXSOX+9jS68+t/0v2lvUG9f6lfitt18A9MZfuh/L9PLb3xI1VWVtMu2mziJcqSV8o9q0oSkE9E43LF5BSKyGjcI1NUNUC3vI+kGMuYtAgLjM3ylrAjmDGmV/tynkvcPv8HdbtsB2nWnPpog3bP81snySxOBwDu8CxYto9v+86QydMa7zVlcrOGtqa4a7AU4p8h0GLOziO1h7OgRdOzhe9OUyePMorMuvoUuv+EeMy0OoVCIfvOL/UzdN019lI4/an8KhVNHkVUVYYpIi+AqjDRoCBGJF2nQECQyyfwocxMgk4x/kJetHngMVvMP9QR/Cj1inQ6VkRCNbEq2s6YqTILfizhFp5GRZZoGGgdgKtPdpCOGTEOE+SfXMwnGAeUy3Yu0IXbwr9I4J5BRyUXbUC6C4JPM1sZb1EEs5FRXh4RoLTmDzEZCyBCxQTL/RF7YLusTZSajcRB5xMDaIJGdvyJinWsog0yTV6KBXmGch2hJBJVBSATwIgg6ipEXAXmwIhY0xKCJYC9DuRxk+1Bn0EZUBCERRB0bOSvuiepmhLqQo9QZSWKFCuBFsPODhmCnRwwFxp/ZB6gvAngRUCZoSINmlwGaCIIXsaAhRl4E5BGAm6AhBg1hh22IVl8t6RSBhgAeEWADaAiChljYaKejnaAhBp8IsAF0fA5Y0BDL8lFuD+ARATKgF7LtfMhDluBFjDzo4EdeBORNeiS1TyOGcNCNSLCaMWgIZgaHRAANIZEdjECD7kGCkYBM0KHDyA7+oU2Z6Kg3yGwkwIsg0yETNASDZfBPpodC+BUPFhGykdTmm+cmZCCMlF6MRB5B1glJoImw5uoh2nO3EFVXhgd/dyp+1EVAuaiPGDQRhtWHTDkRMAmiESMPXhvZ5HVLN8TxXxEQCBdBpyuVPb19hJlYVRgY6Dd+QCFzPa544QwKRLqurgbZtLDhOlPojBMOo18fsS+df/rRdPFZxxHWAdtneXfdflNT9orYStpzpy3S5Ayrq6CxI5Mw1lVHCDSEKuMHKCogDRpCxLiRC7rMjzI3ATIhB/Ky1RNOGPhRT/CjLmgItYZjix/zsLpKgl2U+DfMaKOXQWULdIRxNTT0whbkcwlonyHC/KuvTfaHkI3yXOSq6kCHqSxxaKiJmCkVP9pmMiQOqA9eGRfkdcLwBksX6gp+0U6IF7JFmVMs+BHL5SOjFRBBuNALurBd1ifKUB8VZJ3AGjQ7v3x+QSZkIAavCPU1FYTzU8gQdPAiCDp4kBcBefAiFjTEwj6U2e1BuRxk+8A/YUzI/F0LnaAhiLZCHvII0CPLckqDTwRhp10n8EBdxIIXMWgIdn7QEGR6xPg9oX5lZcTEEvVFAC8C9Asa0qDJMlAGGgLai7wIyDvRQUOAbsGLWPCjDLpAk/FCGjSEieMjJuaog7wIqIf6CIKGGH0DGmLkRYANoMt9hDK0EXRVAI8IkFFlYIjBvxM/ZAlexMiDr0q6B4COPOiQh7wIgm63EbwIMi6oA0wQkBYBPOCFbtCQRyzwgg7kRYAN4FfRUU/wIoY+8Mt01AUNZeARQdBRJg9sYBOuJ9UV1nVL8EMmeBFGjwqbZPAij6DCBWVOQcVvCjYOsj7Uhy6DTI3DQ8b9vNI879AG0ERAHrwq2ZApeBEDEzs/9ICGcg6FR8A6swqvV1sj1tieffLPSBXw4hmEgQ8fyEAa4fsFSxDRcM1t0kaPbDL5ew0H20wkDhWRCP2f4RBfeMaxVGn7kYJl+cpu6ujqRtIMKzt6CDSEzq7kEgmkQUPo6U3ObMj8KHMTIBNKIS9Rb1C3Pd/V0w9WM6CeKEddk2gc2jp7CZtPrGjtJthlkMw/wetVrLIF8vsTa65hC/K5BLTPNNw4NLcm+0PIRnkuclV1oMNQNfjX3NZjplX8aJvJkDigPnhlXJDXCRTuNaWgruAX7UTBQDjZflFujwU/Yrks3ttNW20xQJv/sG/wvBK2y/pEnepq63zvo6ROYA077Pzy+QWZkIEYvCK0tPdQb98ACRmCDl4EQcc5jLwIyIMXsaAhRvtAR7Dbg3I5yPaBH3mUC52gITj1HfSAN1NAXRGEndAhaIiBB2QgRl4E0BDs/KAhyPSa2n5qi/dRV7fRK9J1p6IiNNin0C9kI22XgTLQENBe5EVAHvTOxO42gg4agt12wY8y6AK/jBfSoCEIXsTIi4B6qI8gaIjRN6AhRl4E2AA6+lzQEAMn0FUBPCJARtzAsN3A0okfsgQvYuTBZ7cFedAhD3wiCLpsYzRKg30k42LWifSQHRfwQDZ0gwd5xAIv6EBeBNgAfhUd9QQvYugDv0wfCFnXBpSBRwRBB38fbiqJAtiE33SHgWWCZEaQCV4EgQF4kUcQNJPZOICWKaj4jarmn6wPcqALBaHwwCDmdlyQB69KNmRChgjAxM4PPaAJHo4Li0DgHV5dOPbaeQt64InptGRZjNraO+muB5+ng/fZ0Zz9hYyp9z9DR556KZJmwGzuux99RZ3xblq0dIX5QtpWm65LNdKyCJPROGy3xQa0w1YbGSn+YwRKB4HVVh0wzv/87P3x3n203TbJwVImab86ro/OPavPmEnMxMVlhURAtYe21zsz1NYSrbfuQCGbpqGrdFiaDOcW1lZXeYfhypWQ6G+odX6ASiq6eMHMblVTkzz3ANoAABAASURBVJ1S/HxTok+Kbwlb4BUCZePw/vyg3Wn1VcfTLof8nrb68YnU09NLpx538CBOSw1H+Itv5g7mFy1ZTkf99lLa/Ee/od0OPd2YSeqji846brCcE4wAI+AOgUpj1lBskJ+tZpW0zjgbL5eXBgLrrJWbs+bPS1P5YxaN5i/D7bZsXjh+4UjINLzfNk4N2+72bm0zheZ5KIbOPE3m6mWEgO0nULotq6+roZsvO41ee+JGevnh6+i//zjf3KJMtOjMkw6nt5++RWTp9ON/Su8+eys9c88V9OpjN9C9N51HE8eNHix/8YFraKdtNh7Mi8TWm61Hn06fSuGwdVER9GLGqtF0MW1i3YVFQHyRqbBac9fGO0kksfPCyUlKG1op3QFWsVBxe212w6+agRw2zHng0Wjbbaem2uYRFwukEtNbXZ3EN1w2HlSJdUKO5pZddzUOq6dRIxq14MDyBWxfFm1s0OIPKlOmPQqDanM52VUTgNnKmurSQtTNjb20WlY4axs9mIGUrC3JZG3iC2QlaTwbXZII1ErLOCZPTDq/JdmYIWZ02Tm8Q6z/PG1ukB43DVDpXEgwWzlpUvDsDfJep/K5Fm3kmSZPf8geCGts9EBIkUSoZj6LZE7JqpUdu5JthN+Gh/xWwPK9REDf4fVSK8sKJAJBesTVUF9aV5JIAH9J1SU26xvIH0UWo4LiXI0Z7fGAq7R+fmYvBaUvTGPK4ODmyWG0hAdIZdBV3ARNBAJ4m9a0nNnKGoHKivxn/ULGTXu4bd1aWYOWY+OijR47SznaUQrVhFOVr63RaL4SUuvX1aXmOVdcBGrrjItPcU0oqHZcawuqkJUxAjkgwA5vDqBxFf8QEC8ENAzL/YYhnJJGY9YhiDOv/qFXPpKj0fJpi25LGodgm3WxKQZfPlu3VXm4vVgx2s46GQEXCJQMKzu8BeqqSMRZEd/kUnHZb59+2nuvPho1kmcdU5EJRi4csfpFDEyCYVWwrKiR3uLWsSwblhXWB+8yilK9tMgzvxlhy1jY1pb7oDujYC5kBBiBoiDADm+BYN9wA8tRKJC6klUzedIAbbOVP1i5dURKAcTVpwzQ2msOED4yUQh7NzLO48MO7addd3LRRzkYFqBd/1xbL7+Qp1NZ9XLQyBEWxhusn315T61iScPYMZYMHTv84fFeP14S9cdWlsoIMALljAA7vAXrXe8v/LmaLh75o/648cGxC/b4Gdw6In7a4pXsVScP0BE/66MpqxWuH9dft5/83vd3+PDCtcervvBaToNiP1Wv9XglL+qw28akSUQ6M9RubBgzxg23mrfaxy3NqqvKd3a4ooJ/m+qzKnsJcxQPgXDxVLPmICAwzqObR65tkZ3vXGVwPUaAEQgmApilbvB4m3O3s/+qga6fg7a6utycwkhiyVAwe9Oyavx4Ky73o+q8Kfd2l3P72OEt597ltpUgAmwyI8AIeIlATQl9UWzChNwcZS/xyiZL9SJwNJqtZmmVl9J5U1rIFs9adniLhz1rZgRMBLye5Y42Bv+maTa8hA519f4Zu8nGQ6e/VOuV/UO3MJK9Wh4RhO29og5LUwqDooOWMiBF+XocmF5khzcwXcGGlDIC5XojL+U+8dL2ykovpaXKaooOHYdXfkxcX18+7fZzeUTq2TI0c/k4jdU15XOeDc3e967V7PB6h2WgJelckCORknvRIjCYl9oLRoEBjg0Zsgj8cFM9R8TrJyDFBFx2+HO1gx04d8jV+vhyYm1N0pampmSaU8FEgB3eYPaL51ap9umUFdU36N2A5DqcthDYYH3vsItGLZl89A8BnQGgf9qDJblYN+qKEnhBy+uekh2kFNkh/etHrdKBS5FY8pkal/tZF6PBNbXutFZV6vezO8nMrYMAO7w6KA0RniCsHytVqKesyheyUuo7nQFgKbXHK1trpRkrr2SynMwIbL1lv7mXdmauoVfqxWy4Lmr5LJkQOnS+rrfxRtn31BbyOPYeAXZ4vcc0sBLZsMIgUFNVGD2lpEVnRjXKL8sUvUvdzlh5bXCQZvUKZcs2W/dTrcuZQje4i0FMIR1IN/YFgdeLNfqjRmRvybAGXjaYHSX/ONjh9Q/bFMniopNC5IwvCESbijvbqvrqlS+NdRCa643azxl+nlF16CgPSdEyeRO8gE5ZVvSDZEtWYw0G1e9eDGJqFNuzedXOUnYmdtoh/5nX6mqjExz+Jk3MX7aDWCblgEApn6M5NLewVWqkNUheXVQK2wJ/tOnM9uWjuXGIf6Vr3Ljc0Cs0btFobnZyreIh0KTRZ0F9qSqfQUG4yOuNdWzP9R6jmoxxe50e7tGgy61ep1+D26/BDfPxq4YVEScLmVYMBNjhVaHuAb221vnxxVD5Uo0KQp7tUyHD9KGGQCnujLLzTplnrGrL8KWqsUX+ImUxfhc61+nKKu+fpunozYbH+uv2U2NjNi4uH2oIsMNbhB5fZ82+ImhNqsx1JiApgVN+IiA/GfBTT6nLxmdrS70NpfK4U8bZeRgvc5RfWueFpFxajWVE0RJ2zMaPy83hrfV5b9zVpxCtPiU323LpR65TGgiww1sa/eSplTWKtVyeKmFhOSPAAxI96Ib7+BhSz4LCc+WzXMDN9mO1vFtDQToXs5BweguizCMlXlyfvFi24FFzWMwQQsAjh3cIIcZNZQQYgZJEYMzo0p/xqS3QcgEvnJpsJ0lFpTVXPKwhGyeXywiEqLjnca0Hg6EIr2uVu5TTBUKAHV4fgda5aTRpvATio4kpoqurrBuQTHQzKyTX43QGBKRN5qPRDHxclBGBqmrrfA0rrmLDh6VWr69Pzeebi6i+TJiv4Az1J4z33tnRuU5lMCnnookTBuiwQ/toz90zrwnOWUGZVsSscKamqRzSispMtQpbNnx4YfV5qU35e5Gu69Cn6geUcSgOAopbRXGMKTetpbZ0oK4u/WZa6Df3y+0ccGrPlFWdqN7RKiosWZEy/3WvMmaANtsUwdlhGjky/Xy2kPHm6Oeb3SoL85mlrqxwxqOm2pmussFL+vrrDlBjY/H0e9mWoMhSOWReD/iC0t5C26FyZKO23YFU/VBoe4utL0j6y/yWGCSovbdFrP0q5g3L+1aVv0Tc4MM+zg7usF0//eH3fbSjB3tLBrk3wsZj0QP366ONNmCHSaefZIennAdD7GjonA3B4tHZdi1Xi63nQLnW5nrlhAA7vCXcm5tuPEA779hPG67vPMNVGk0bms7KZJ83Iy+dmXmi6gKtSy2N30NhrBzWUL6/u5oSfikXS9xUS3Sczox8XmJ0kleOtGI8iSlHHMuhTezwlnAvYqZw1537aeLE0m3EpEmlaztb7g0C/Ma2Nzi6ksLTXq7gKiSzm/WttTxYzNo1bgYQWYX5zcDyfUWAHV5f4WXh2RAIh8t3pilb24dC+aqTB2j0KKKJE4ZCa923sc7jF+ncW8A1/ESAl1ekossvQafiwbnCIsAOrwd4T5pMVJnhDdho1AMlLEIbgcrEbhOFnsTaYvN+2tII2oa6Zyy5Gtj8/dSTemnNH6Qvu8Fm/liHXuvBNkcyMF7Lk2V7nd5p+yQu+ABBKFTos9brFhVWnp9rP71oSSkvr/Ci/fnIqK7OpzbXZQTSEQink5jiFgG8ACK/EOK2PvN7i0BD3QBttWU/7bxjYb9ot98+/bSvEXJtTU310Jrt/vUx/XTm6b3k9SxYQwl9kEJezgFfF8uUcj1/ilFPDC6LoZt1FhaBQg8k6+qytY/LGQF3CLDD6w4v5i4RBH78o37adJPSciCH2uM+vHDT4MMjfT9kZjvtvXbas+kLQnlNLREGl0GwhW3wHwGncxwv2fmvmTwfFBfCZtYRPATY4Q1en7BFHiFQamJq+BGeY5fVGo6VY0GAiMWYncfLTdiaTReG4R7PfNd5vBRFtx3MN/QQKPTs8tBDeGi0mB3eodHPrkfIo8eEzJeNsDH8EIGImxkgBMaOSc7OrzJGWuc6RNfDr71WEgPRTVhKNXliEidBV8U/3GyADj4gXY6KPxt90iR93dlkZSuPRrNxcHkWBLiYERjyCLDDO0ROAbcj5Pq6AcLLRlgLO0QgCkAzC+dABKCxaSY0Sk5NsdfvybbIhkYbvXEYN1xvgH6wRj/pPhKuqwvey2y83ZN8ZhQuPdSWPhUOWdZU7giww1vuPazbviLxZdrdokgmFU1tHb+kUTTsC614j9376KgjvHGeC20762MESh2B4cNKvQVsfy4IsMObC2plWKe2tnCzi5FIEsBivGCU1B6sVFVl4fogWC1na4qJwMiRQ/O8y/TUy6v+iESCMzNfW5O7LaWwjl6nz7ATCviG6jmPtg/lwA6vx71fqp9JHTvGYyAyiJs8kWe2MsDDRSWIQENDCRqdMHl4GX9mONFEM7IvH3HadcBk9PDg1RZ5tR68IChvgee2iV7viZuPLW5tl/nHraI/uKuqzn2AIOvkdHAQYIc3p75QVyrWD1ltkVVSLe3xqlqfaHHykREoLgKYhRk/Tv/GVFxrLe2jR/IgzkIiv2MhnzRls9TNWlm7M51NtnC2ca5n40U5toBD7EfQWZfu9Xrt2jxmm/PBoKpKv3aQzkV9q5kzEwLs8GZCx0VZXWJJQFC3lqqtKS0HwgX0rlnd3pxcK+AKeSHQ2Ei09lp8vuYFYiErZ9G16cb6fbnK2CzCyqS4ptoaIDUO18emmE1fdXJp2FlMjFh38BFgh9ejPvr5Yf30m1/1kbzWSSxviEhrVj1Sx2IYgbJDoHbwsW1+N9dw2KofzvBEUsywBQ3EaKNle9DsyseeXXYqzBcPo4odNBql3T/yaUeh69ZIT+W80h2NZpck8HL7G0n+ftU6hvHLYmpwuMR3BArh8PreiCAoGG6M1CeOT71ZYb3QNtv00+67WKP5INjJNjACQUUAN9htt+6n7bdN/R2l2pupzOLcaYcBOu3UPtppR/XvTufmbEnjY5ARKJUZUjuGjVFrNDZmtL0kmR83LpnWSXl9TruVp7PsYtSI7L9fnbYGkaeiwurTINrGNlkIsMNr4eDbce89+mn99dQ3Xt8Us2BGoAQR+NGe/bTl5qm/l9ra5I1Ed1ePpiZ/bqz539RKsFMCbPKIEQE2LoNpTdEBOv+cXtp3H+9mv1UOZ7QMnxpkgNa3IkxqZRI+YcIAVfPSwUwQFb2MHd4idEFdffIGXgT1rJIRKCkEqqRHuxUVxTFdzHbhpqZyLIpjGWstJQSi0rKLiAfnsmrZAzte3p4Vm24yQD/c1BpEV1ZZsV0DXkCsLdLLeHZbSiZfYEPZ4S0w4FBXyfutAgYOBgJjRidnM8eODdHYMQO09v+zdx5wUhTZH3+zOe/sssuSREmiIPHIEpa0gGIABFFBUBADKgoiCKKo6KEomOAQFVFUuBNPOZBgQNBTVPTUU1T+6hkxIoIILLAz8+9XvT3bMzuhZ6Z7prvnx4fqUOHVe9+q7XldXV19Yk2clAX/TUCAp1vEpeaSAAAQAElEQVSwGvyjZtYXU1k/qwctQwHxmgeaEcSxCcY4EU6m0i/9dYLj5U9Ev/Pc6hfU9ZMISfEiAIc3XqRRDwgEIJCrGu3nZXAmX+aiU7sHHkEIUBxRBhHglTz0Xns0ElXZsc72vsQXSUlz5mV7tCxt1atX+Ju9tLTQfx+FGl7M0kIpJ1tLrpo82Rjdq4Gh4xFEgYBeBODw6kUyCjnxHBFQVoyIQk0UAYGkJJAdxOFxUGiHSw9YvAxUsNE7PeTHW8b0qS7iEK7eVA2/SM4C4/mH09PI9DSs6mMk3rjLxm9v3JEHrVDD5SVoWSTESCCeIwJm/SBGjAiJCBJAIL4EOneq7XDlF8RXB6NqM+o6kZfrodyc2txC2ZFtoxHuUHb6p7VtE36U279MNOdwxEJT46c8oXNoSzXqb0pb7cilJgCHV03DxsfZeNxm49aFaUYSSPEbceva2U0cp/5EtpbH9UbqqJZdt66DQq1BTCH+mWl+splGuON5/XQWhmigcEkRpFvJEVO/7BeBicgKAj4E4PD64LDvifriptedq31pGW9ZRrrxdaCG2Ag4q5dzKvBbLD9daru5s6vonOHxGYmL1IocaSQ11BrEkcpDfiL19RM89CeA3yT9mUJibQJweGszsXOMqWwrsMlj4Gig5uZFUwplYiGgOLCxyEimsk5nZNYWRpg/MumJy82j+YmrXa4ZfVfmoNcWDrZeJK0lBw6vtdpLd23z8z1CZqOG8l6cGLxRfhjN9BjYYJMhHgQMIWDnL1cFA6bf3NNgNfjGN24Uv2ujb804swOBeE1RsQMro22Aw2s0YRPJ52WW/Bfub9bUQzOvq6KB/c35eNZE+KAKCJiOgF1eltMKlle2qVsa2AGNtyOsVWe75Qs2txqj0IFbmpfkC5yC2HgTgMMbgrjdkm6YXkU3zqyqZVZODhH+KAn/QMD2BBo2kG9sj2tkTVP55bFUv5cIFUv0nmerZYk0pW477TMzQn/+IytT7kOhbK5THPimJFQZrWn4rdJKCvn8CcDh9Sdi43OeQsDBxiZaxjSMhsS/qU5obNyPcPytia7G0lKiW2/iJzquSAUkNH9WZuxtF2xd5WCG6fGOAT/O9n+qFqy+RMX7j4zzS4+x6JKVTdTIwGkgyjS8WHT0L5udKTv5sd7k4LruT9Zc53B4zdUe0AYEEkYg2+brnjqLYneajG4cu7dBtPyCPUaPRF5BAj5YwaORRr4foUd/0XtknJe2Y7vDtY3TGS5H4PSUaNfck8Tl5QW+BmRle+iC81w0eqS1bgQlk/A/AgL6ObwRVIqsyU2ARz3atfFQl07hH40lN6n4Wl9aN/CPQXy1SO7aeHTMqgTS0+VRMrN+KexIZWL6d2qqzMWIdrVyfzGCRziZSh8NlK9lCw+VlYXvI1ra0+n0raHIAjfbvhrb8wwObwLatbB6XU8jHs0kwJyIq+S7/xHDXNSzBxzeiOEZWCDWx3kGqmY50cmoMH9NjT/K0bsnRsnU7c9c1OexHifr70as3PQqz05xYUFk0iLNH5l05NZKAA6vVlI65uN1HW+eXUUTxuGHQUesEAUCIJBgAqcPcVP7duFHyRKsZlyr5+u9UqH/fFklPpJ9Mt6YKktZRsLJqLwZGUQYsY2IrmkyJ73D6/F4qMoVf8czNZUoJenpm+bvAIpIBPSYJymJsd1/vIiS+CbNzU28DnpooPd8WT10SiYZmRnyzZgjRd4nk+2wVfK5kh3C+pe2U8Xo62ph6DfyWmpdPp5+33/Am1Z55Ch1HnKZiHe78QfjBYMDWxDQ4wWYqECgEAiEIcCjamGyJDxZj5Uk2AjFGdNLHsuMR0ixgBP5lw4e6lfupjattf1+OxyOeKBDHXEikLRjjN/u/pkGnTedZt6xLCTqVc9v8aa/8PJbdOhwpfccByAAAtYk4CzE/HFrtpw+WvMHLPSRVCNFryck/ELvyBEu6t3T2D7KLw/XaB/7UZtTtDmRsdekTUJJSW1+PP+2vLdb89PV/PzaMrTVHn0ulDSOQNI6vA3qldDj999As6eMDUr3guED6LHVG+ngoUpyudy07Ml1dMHwgUHzIwEErEigqMg6WidC10wNa8DyUkzWoWhvTbU8qcjOMvfIHY9AqqdxqJ3p7Cx92k/vwcvCQiKmmpNNhv7TugpIfi5rE5sqKTEsgRZbzShtBIGkdXjTUlOpXmkxFRXmBeXao9MpdHyjMnr2hW207a0PKTMjnfr2aB80PxJAwHgCqCFeBIqL5ZpycsL/cPJHDYqcHrkAtgkloHYOWRG9HESWlaigtsHfvmh0ys4JXMrpDByvJZZfpps53UUTxvt+zTPYTWq0Uzbq1zPv35lyzdDCC3niT8B2Du8PP+2hh59aHzQcrjyqmbLD4aBJY84QspaseJ4uvfBMcvjd8RXmppOdQorDQfk59rJJaZ+06vUwszNTDW2zAomfhDFkHZkZqcT/MtIcIfMpuhu5Zx1YF6O5RGpDblYapabI/ZH148C6RipHyc/9mmVwYNlKfLB9z64pnJWOa6CtjeoUy45xQW5K1G3K9nGl3FfVeinx3G/U8XXryP2opMi3T3M+lsP7bKmvZaQH14lZcF4OnF+RL6HnKBGUON5zHo5knfhc78CyOajlcl0cxzqp4xVdOI3bV0njcyWU1UkX7aG201+OUk69b1iWIkQobcEMsySW6jzq48zqdYiLpScm6nhFdxam1lGdJ9wx/21yeQ58rOQvK5V15PiCvJp+mqsaZWVGSn62m/NmZ5JgwvGsE8dx4HSO48D1cBwHtoHjOKg58rk61CtJoxJnulc2p3FZlsGBZaZK12GWkacageV4zsuB9eW8XI7POah1zMmusbMwxO8vy2Q5SvuxnEhD3VKWQKTWhWWwTE7xj69bksLRVFrHV8cWTeX4ls3SBBuRCZu4E5BbIe7VGlfhsSoX7d13IGjweCKbk9OvZwfKyc6S5P1BFX0611K8ysWrPNgneMgjpm/YzS62p2ULotRUouZNPcTnRgWXW6IoDUKEku9wSBmk3pSRSYbqEkoHJU1SV9KEyC39aShxZti7PTIjl/Q3JhSUNqxrtLqp5XAbhZNzfGMP3XMHUZ+e2tqIb3IkFSkzO/r+xfaxDDZdrZ8Szy/LquO7dJJ17NbFt07Ox3J4zxxDtS2z4Lwc3FJGRX56GsfIQYnjvbtaGd7xud5BrtGXOdfF8dwj1PUpunAat6+SxudKUOLSM5QYkq5yvvKVPOq9wkVpC/7p8Egn6jzq44JCWX4tHTlCTpKurb7tpC4f6lhqlmoJ5PN3qrY/U3UtUY8Ccx5FtqJKkTSSq8QxN0U4pyvxmSpezF+JV7hwGSUu1J7Lcl4Owg6pEtZJQslRInC8IoPTOJLLKXEZGdLFiSOloI5X0gPtWaaUnbieQOla4v7SzsEiyL9OlskJ/vGK7k1O8G3nwQPlv1OeV8z1clmE+BOwncPLUxBmTD6PggV2XiPBzFMfbpo6jm657iJKT5O8Jb/CByuryE6B/5APHXHZxSYfO7p2rSJe/zg1w/g2424Sql906VJFw4e5qUf3xLNWLsBHjiVeFzWzw1I/5B/XQ0dqHpGyruo8kRyr5VQe1d/WvHzpl1xq+Fh0rFtXXiKR7VbbxjIl0XSsyu3Tp9V51MecT8l/5JhbuqkKXo5ZcF4OZWU1+Ro0lO3h+ECyWSd1vF7HXB8HtTyui+P4+qSOV+zkNG5fJY3PlaDEeVJq+pG/HCWPeq9wUdrimMtNzFKdR30cTEclnvVR66guG+7YkVqju/rvVG0/HytyWGeuj0NqWk2bst0cx+lK3szsGtmcrsRTyjHOKgLboMQrXDhBiQu157KclwPrznUflv7+eM9xHDhekcF2cByXU+Lc5CLlRUN1vJIeaF+/wTHi6Q+dOtXYHyhfqDjWy18Xzq/o7q+LojvvOV+wwDIR4k/Adg6vVoR8p37sWBVVSSPCXEYcB1mP99TOp1Cvrm05GwII6EIgVfrLa9/GTbk5NU6FLoKjEKJ8CaqgIPG6RKG+rYqUlca/DQqqRyYZZHpa7fqdTk4xfyio/oKl+TWNXMNo57tyTdIDSt4RUfQ7Z2HtfhG9tNolnRpWTTmxWWQ6OJ1El09yUeuT3bUrRExSEpB+dpPSbvry6x+o/cCJYlmyn3/9XRzfeOejyQkDVic1gX593XTNVS5q2iSyH5R4QePHr/GqKxnr4TmndrCbpyspdijTS5Rz7K1PoKTE+jbAgsQSSFqHt3mThrRz6wqfMH/WJG9rbHlmEfXp3s57rhx069hKlLHjciWKjdgnFwEebS4uMqezm1wtAWuDEYh0hLNQNWodTKZV4+vFaZWCHNVLZbGwSk/HtSUWfiirH4GkdXj1QwhJIAACIGAsAeUlJL45MbYmc0pX7PfXrk4d/xjdzk0pyOkkqlcWvQOZpnoRMZyBejmquUGWQAtXP9JBQG8CcHj1Jgp5IAACIKAzga6dXNSvj4u6d3PrLNna4o5rFJiHszB6p9DaRGTtgzm2ynx9ORe2IJBcBODwRtveKAcCIGBZAgWF8nJDVjGAF7Qv7+Oh9HSraJxYPfUanYzGipQQv6p6vEAWTCf1KHh+hC/wOZ3BpCZ3vHLjkJWV3DdQdukFIf407WIi7AABEAABXwLKOsi+sTiLloDZyuXlhb6hiXROcCT2hRpFVTulkcjUktdIZ1pL/XbMc0prN02+1EUVAwI/STCyH9mRZ6JtgsOb6BZA/SAAArYgYLfH6IkcJQ3WIZQRt2DpWuONcDzzc+VRwFAjvFr10zOfmdoxLVVmpKd9RssqK/NQsLnzWEHGaPr6yo+Tw6uv0pAGAiCQXASUF1/s5lSauRVLis2nXajR00RrmxrBC2F66arF4VKv9V1UpFfNRE5n5LLy8rQ7vPnVH3KJvJb4lTDixil+2idfTXB4k6/NYTEIWI6A+rOweiifnRX6kXcsdYSd7xeL8EjL8qezIi1TnT/Yj3mjhkS8zu0JjbU7L4R/MREojNC5LNDZWUzEKHGwUdWYQKJwUhOAw5vUzQ/jQSA5CRjplGZlRs80I1N2xPPzopfBJXt0c9PwYS7qeaqbT3UNLZq76ZY5VdShvf6ywynasEG4HPZMdxYSNW5Mmr8apve0ijyd1uStV0/u3/ZsJVgVDYF4loHDG0/aqAsEQCAmAsFGHWMSaqLCDep7aOz5LjptiCsmrTIlp7t9Gw+lp9vLwcjIiGxU2emMCaNpCvOI+sTxVTRoYPxvMvSCwCtHBLvRLLRJO+nFCnKMIQCH1xiukAoCIKALAV8h2Vm+55GcxVJWSz3Z2bIzlhrjizktmntImbOspV498zhSzOkgswPPdqam8ja6UGRDp8qRIve56IiELqUXL+Um1b/t9JxPHNoSpIKATAAOr8wBWxAAAZsTUH54jTKzWxcP3TSrinr3NM4JMUp3RW5+gXJkrj1P0ZDZRjbC6f8Dp/ej/kRT4icC0eigxBRBIwAAEABJREFU9ebP6YxGum8ZrXX5lrLqmcF/+1bFYhK9/a8HJlELaoAACICA9QjotWxWoix3kHl/sKNh276dywdlQbVDr2WedVa2PNqdka6NCb/Mx5Xlxjj/mmVoDdEwYdlZ2bw1T7CLU9yxPVHfPm5qdbK2PmOeFkgOTeDwJkc7w8rkIAArwxBo2sRDLZpHNkoYRiSSTUzA6ZSdVn8VW7cO3wfqlXmoYoCLyvtoc164Xw3o56ZePcLL9tdHj3NnYU29wT6IUKc4sC3OwsDxeuilRYbRT1+06KBHnkKJIzu8ZXUTy1MPW+woAw6vHVsVNoEACAQkMH6si8aeX+MYBMyEyIQSuHySizgYqUShxmW7evbwUKOG2p2X3j3d5D/NIJjzaaR92dWj0/51FBXJtsQyzzzSF8z4hTvWw5+D1pFzLsshI4O3RP5y5NhAW8SBgC8BOLy+PHAGAiAAAiCgIhDv9VDr1/MQB5UKlj484QRZ/cws2dmUzxKzbdqEqEN7D7UO8sg9209HPUZ+mzaVbzDL6vranJPje66cNZGewrRv66HmTX15ndpd/szv4ApZnpIfexDQSgAOr1ZSyGc7AjAIBEDAl0Bm9SiaOtYKX7xS62u243plsoOWbeDHTvxtdqqmN6Rn1Ezr4K+uDTvTRc38nEmlvLIahnKuxz4/j2jOrGM07Czf+dTBZPMHTYaf7aLjjvN1eDl/WZlHfPSEjxFAIFICcHgjJYb8IAACCSNgl7l+CQMYpuJgo25hiiUsOTuGZeoSprSOFVdW1jizgcTyqhQNG3gCJfnHGXqenuYg1oVU/6z8t8xOPJuSiC/Qcb0I0RGAwxsdN5QCARBIAIGsTHm0LAFVJ12V6lFCsxrPTlNxMdEJx5tVQ2P1KnKGdmaVVSmM1SI66Va+WenezU39yl1YjSG6pk9YKTi8CUNvsYqhLggkkEBWZugf9gSqhqoTTOCaK6vo4nFVCdYiePX8AQeeQtCyhf43a+zwK6ONag0KC+WznOqPochnJts6rPs3zS8ylvf2YHqFybpUOHXg8IYjhHQQAIGEE8jP8wgdol13VBTGJigBftzs/yWsvDw5e0kdmb18hi0TiDSMG+Oi04fo7/CyHqlpvPUN/BWzqy6vorHna5s361s69Bk78J06uolD6JyhU50Fcr9yhhmlDi0FqSCgnQAcXu2skBMEQCBBBLp1I+LHiPXrJUgBm1c7Y1oVzZjm6xyVlBBdd42Lhp5mjKNmJNKaJwKyU2VkXWaVXVpKlJurTTt2kLXllHOdOdRNvASbfBbbNtdkH8GIzRqUNjMBOLyGtA6EggAI6EmgRTM3DalwU6DRLD3rSVZZ2ZLTkeW3JBWzKJBG4Xj0l4+tFE5p5aE+vd21ViPgR9FsT4MGVrJGm66pqaFfYNMmJf650gOsDBJ/LVBjMhCAw5sMrQwbQQAEQCBRBBJQLy9p1b/cTfn5vpWPGuGiuTdWkfIRA99Ua5/lafyYhrWthPYgED0BOLzRs0NJEAABEAABEEgoAeXDINk6rPMb6dSGhBqOykEgQgJmcHgjVBnZQQAEQAAEQAAEmMCY86po4ngXBZqSwulagrPQEzCb8rJowEREgoDFCMDhtViDQV0QAAE7E4BtIBAZgTp1iBo3DuywapXEy5sFypuWbty84LQ0WXZGemy6B9IbcSAQiAAc3kBUEAcCIAACIAACSUIgp/qLdTWrWxhveIMGHurdy009ultvFRDj6aAGQUDnDRxenYFCHAiAAAiAAAhYiUDLlh7q28dNbdv4jrbWKZad0ZISea+nTbxaxoC+bmraRE+pkAUCwQnA4Q3OBikgAALmJgDtQAAEdCDA83/Z4W0ojbqqxfXu6aFbb6qiRg3VsTgGAWsSgMNrzXaD1iAAAiAAAiAAAiBQTQC7cATg8IYjhHQQAAEQAAEQAAEQAAFLE4DDa+nmg/IgoJ0AcoIACIAACIBAshKAw5usLQ+7QQAEQAAEQCA5CcDqJCQAhzcJGx0mgwAIgAAIgAAIgEAyEYDDm0ytDVu1E0BOEAABEAABEAAB2xCAw2ubpoQhIAACIAACIKA/AUgEATsQgMNrh1aEDSAAAiAAAiAAAiAAAkEJwOENigYJ2gkgJwiAAAiAAAiAAAiYlwAcXvO2DTQDARAAARCwGgHoCwIgYEoCcHhN2SxQCgRAAARAAARAAARAQC8CcHj1IqldDnKCAAiAAAiAAAiAAAjEkQAc3jjCRlUgAAIgAAJqAjgGARAAgfgQgMMbH86oBQRAAARAAARAAARAIEEETO/wJoiL5mob1MkmO4XUFAfVK8qylU3xbp/6xdnkcBAYxvi3UVKYSRlpKeAYI0dnbjrlZKWBY4wccyWGhRLLeF9P7FZfRnoKlRRkJnV/JPxLCAE4vAnBjkpBAARAIGICKAACIAACIBAlATi8UYJDMRAAARAAARAAARAAgUQQiLxOOLyRM0v6Eh6Ph6pcrqAc9uzdT4crjwZNRwIJfj/+speOHD0GHCCQUAL8t/rDT3vI7fYE1SPU33vQQkgAgRgJBOt33Fd/+nUvfmdi5JtsxeHwWrzFv//xV2pdPp7OvfQWH0s+/fwbET/xugU+8XqcrH9pO1WMvq6WqG93/0ynjZlBfYZPoU6DJ9Gcu5bTsargjnEtAQmMWPfim4LXvQ+v8dHiqX++JOIffmq9T3wsJyyrXf8JNGDUVOpYcQlNnbuY9v9xsJbI3ZIT0nnIZbTwoX/USrNahMvlpl5nXyVY8g+Vnvp/8/3PQi7/HagDs9OznkTIMpIb23PV7PvE3+pA6e+5z/Cr6Z6ltfvat7t/Ie6v7BRzGSuGRFwn9+47ELBfvvWfTyyDMBHcFDjB+t0bOz6WfmOupv4jp4q+e/t9K0PerCnysAcBOLw26QMf7/qK3nn/M681K/6xyXus1wE7tIPOm04z71gWUOS8e1fSSc0b07ubltH6J/5Km159hzZteTtgXrNGsjO6/4DsfLKz/uiqDbqr6izMo0cXXi84Pbd8Hu344DN6buPrPvUc+PMQXT5jIR06XOkTb9WT9/77f8QOQLEzn7hf6GlHg3oltOHJO33ChSMHUWmdQj2rSYgsI7mxQS2bNSbug+9tXka3XT+Blq/eQB99+j9OEuG8K26jIRdcL47tsInHdVLhxE/C+HjpndN8+ma7Vs052lIhntwYTLB+x08PJ02/m0ac3od2bFwq+u7Tz71Cazf/m4shBCeAFIkAHF4Jgh3+XzB8AD3ytDwKySODPAo7cmi517R9+/8kvojwqBeH8dfMp11ffudN57RX33yfbl30hMj35Tc/eNOUA3YsHr//Bpo9ZawS5d2zk8h33mPPqaDsrAxq0rg+nT34VHpx2w5vHrMfnNzieOrY5kR6Zt1WoepL296l0mIndTilhTjnzbsf7qIzx80iZsjh+tuWEtvOaV98tVuMtL//8edi1HbMlbdzdK3A7dKtYyvB6cSmjai8Rwd67a0PvfmqXC6aftvfhC6Dyrt446188MIr2+mMih50vtRP127y/XGa/+DTxIF/yHiElrnx6A7byyPvt0l98l8vvkGcvmDJao72CelpqXR8ozJvKCkupDXrt9HlF57lk8+KJ9Fy09oXr7x4GHEfzMrMkPpheyorLaLt7+30orrv1qto1ZI53nOrH4S7TrJ9M25/yPs0gv/WN2+Vr2E8iDBHemrFeZSw5PG1tGjZM8ppwH2j+iXevsn9lK+PATOaODLe3IL1u/9+8qWgdNHoIZSTnSX6Lt/cvvz6eyIeGxAIRQAObyg6Fko77+z+xA7nzl1f01PPvkTseNYtcXotcKQ4aFB5Z3r0num08oFZVLeOk2bPf8SbzheSK2fdR2mpqSIfX0y8idUHnFavtJiKpBHK6ijvbs9v+8Rxo/qlYs+bxg3LiOep8rFVwsTzT6eHVq6TRlaP0MNPraNLxgz1UT1Lcub5YssMly2YRp998S09+vQLIs/hyiPEIyHTbllCzY5vQH1P7SDiQ214FPmNHR9R65ZNvNnuWryajh6tkm4sxnjjDD8wsAKeI7rhlbfp9P7daLDkwP/f/74nDkqVPCXhhZe3U/+eHWnh3CvoD2mE/aGV/xLJv/3+B61eu4VWPb+FunZs5cNJZAiweWLNZuLR3SH9uwZItU5ULNyi6YvcDj//+jvxqK9Cia8hZdLfvHJu9X246yTb1/bkpnT3zVfQ2sdupzMHnSpuXnnK0V/atqR/bniN+EkX5zt4qJIWP/YcdWrXkk+DBp6SdOOdj9ITz2z23hwHzWzShHhzC9bv0tPTBaEUXvdRHBEd16Aufbf7l+oz7EAgOIGU4ElIsRKB4qICukAaPbtn6d/pcenCOmbEQB/1C/Nz6dwz+9HhI0fpw51fUEZGOvE8X3UmfvQ26+oLaPyowVS/brE6KezxH9IjeM7EcnnPIVMaNdq77w8+tEzo1bWtsJ2d1kOHj1BfafRVrfwpkmPas0sbYufg48++osKCPPIfDV/3+F9p8kXDaMJ5p6mLBjyed+8TdODPw+IGhTOsev4V2rb9A1p0y5WUnp7GUZYP/37nv8IGHtXmkX8eSd/wylsiTtnwKM25Z/WTbra6SOzOFiPeyiPhtq2a0VMP3ih4nhbGieVHng8uf46mXXquuHlT5FtxHys3tllrX2Tn7ZqbHiB+wsH9m8vaMYS7TrLNo8/qT/m52fTfT7+kqup3EL778Rdqc1IT4r77zw3y9COemsMj4j06ncLFaoVM6Rp7/rD+xP232Jkv3UCvp/FT/irdzFrvRdV4cqsFUhXRtlVTYpZT5jxAm7e+Q/9Yt5WeWfeqKoc+h5BiTwJweG3UrmNGVNDb738qHh2rR1rZRH7EWTF6Gt1yzwr65PNvqEp6bM7x6pCXm6U+jei4IC9H5D92rErseXNEcq6LnQV8aJmQkuKgSy4YKhyuS8eeQampvn8iG7e8TeUjrqEnpVH07374RaS7/FjmZGdqsnfJiueJH70vXzSDeESDC634+ybx+JNHOO9avIp27vqK3nx3p/ix5HQrhnUvvUlZmel0xwNP0S0LH5dGzyvp2Re2kcvlDmhOk8YNxHzfX3/bL9Jzc7KI20WchNk8Io22801Jv54dwuQ0f3Ks3NhCLX2RR5KvvflB0R4PzLta9Gkua9cQ6jrJjv/4a+bTuCnzxbW0UrqGMQd3dV/lQYWn/vkyHTl6TLoGvEjjpMEB/2sE5+eQJznNPP2LrydTLx1FT9w/SzzZ4KdCnG61EC9uobjwwM2T0s0vT6978tmX6T8f/Z/ot8c1rBuqGNJAQBDw/TUXUdhYlUBj6Y9+zrUX0qVjzqhlwrPSo7hmJzQkHvG55bqLiB9R1coUQ0RJHXn6BDuBipivv/uJIh0pVsomcj+4bxdiZ/f0Ad1rqfG3x9fS5IuGiWkhs64eQ727taVI//GSOjwX9THJuX1m2VwxcqTIuHj0EPGIlF9s48A/pjy/UpgL9dQAABAASURBVLmhUPJZZc9zx195/T9U0aczlRYXisB89+47IH6sAtmx64tvRbSzIFfstW64761c8yJdc8k55HA4tBYzZb54ceMnM5fPXEj8yJ4dMu5zpgSio1KhrpNvvfeJ6Jcv/+MeunP2paIvqaseVC7Pqb/7b6uF83pWxanq5JDHdUuKRDo/ZRMHFtskips/Jp4HPW/GBHENvkn6veMnbPz0yD8fzkHAnwAcXn8iFj8fLT0W5sfG/mbkSaNkfx48THv3/UE//vyb9Bhoq3+WsOf8iJlHcJXHfOK4enST77y7d2pNK9e8RDxi9NW3P9K/XnxTODphBZssQ3p6Gl09YQTxI0l/1Qryc+nX3/bRgT8PiSkhm1/d4Z8l7PlNC5YTvwCzcO5kMSWCXzLkUCWx5Mf6k6QbFiWc1JxfpGtBHB9WsAkzvPz6e+IRJN8cXDH+bOLAbDu3P4k2SKPlisq8/BEz5Rf+ePR8UHlnMe1GSdeyXyyNmLNc7oda8ps5Tzy48ZSdMZPn0S97fqdbr7+YDh6uJO6H6nn3PMf86DH5ETwfczAzN626BbtO8tMElvHTL3vFTQCvAMDnSuAR83OG9iGO532oG4Rt2z8Uj933HzhIhyS29z3yrHjR6qTmjRVxltsH5CZZoSc3SZxYzjJYv+N5/dwPeXnD2+97UlxfRpzem4shgEBIAnB4Q+KxTqLDEXpEa/jpfYQx/Dh+wLnTaM9e+SUzEalx8+XXP1D7gROJlyXjl1v4mF/GUIrPlkY8+RE8r8E79MIbJGe3E/FonpJuh/3l486il197l7oNvYIuvPqvxM6xw1H9ZxSmDRT7d3zwmTi8bMY9Yj1jXtOYw+4f94h4O23Wbn6Dhg3pVesx+VBp9JxXEjkqPRpmeze88rZgyis08I8nO8gcT1K3TtHAlVccWSfdYE2ZOEIUs/omdm4SuDAQ+AaDR8d4PvrwCXO8fXHUpJu9JXucMZkGny8vS8ZrbPc75xpvmhUPHI7QXLp0OJkG9u5EzKPHmZNp+7sfCzMdjppyyjXt3DP7irRgG3bYbrxzOTFDXtFl45a3iKeM8OBAsDJmjXc4auwPpKOe3Fg+MwvW71ZKT3HaD5hAvA7vb7/vpzUP3ypuJLgcAgiEIlD9Sx0qC9LMTIDn6u7cuoICPfK+QhpRe+Tu6UJ9nlqw5uFb6KXVd9OOjQ8Rv6DG5USitOFj9fJbUlSt/82bNCTOpw7zZ03y5uOR5c2rFtCWZxbROxuW0u0zJ7JD6E038wEvmcV8Aun45IOzxbxeTju18yn06rP30qan76I31y0mTvvb/Gs5SUxNYDYOR+gfB2bE+fwDP6oTglSbhXOvIJ7/p4qy1CGvZhFIfx4d27FxqXcUd9KYoaJfbl+/hHhOMy8txobyC5TLFtT+yAmnqUPLZseJvhmuD6vLmPk4Vm78ghX3L4cjeF8sKy0SzDifOrz+/ANeNNxGwdK8mSxwoPU6yXPF7731Str2z/vo32sfoAdunyIYtTm5qddKXg2nbatm1OrEE7xxgQ7Ycd6+frG4HvI18bXn7qduf2kVKKtp4xLBjWGE6neTpCdgfP394OVHxe8Y92MugwAC4QjA4Q1HyGbpPNmfH8sZaRZfgHiUzsg6Eimbl2fjpXB4/ddE6mG3urlfBrpxs5udetsDbnoTJeIbrqLC/FqCebrWY6s30tgRFbXSAkXwtYKvhxwcDr75CJTLPnF6cQtFhPs7rr+hCCEtGAE4vMHIIB4EQCAuBMaMGEi9u7WLS112qgTc4t+afx48RLx044BeHeNfuYVrBDcLN56NVIfDa7LGhDogkGwEeJoIT0lINrtjtRfcYiUYefnSOk4xJ1293njkUpKvBLglX5ub0WI4vGZsFegEAiAAAiAAAiAAAiCgGwE4vLqhhCAQAAEQAAEQAAEQAAEzErC2w2tGoibSidd15TU1+atAwdTas3e/WDfXP50/jsBpvIakfxrOQQAEQAAEQAAEQMBKBODwWqm1ItD14afWU7v+E2jAqKnUseISmjp3sVhIXRHx7e6fidfV7DN8CvG6uXPuWi4W++b07e/upK6nX06c1uOMycSf2vx411ecVCssWvYMtS4fT/zFplqJiAABEIgbAVQEAiAAAiAQnAAc3uBsLJ3CXwB6dOH19O6mZfTc8nm044PP6LmNr3ttmnfvSuIv/nD6+if+SptefYc2VX/5ypHiIP5kI68zy+tH8hJjix973ltWOWB5jzz9gnKKPQiAAAiAAAiAAAgkmkDA+uHwBsRi/ciRQ8upW8dWlJ2VQSc2bUTlPTrQa299KAzjaQq8ePrYcypEOn8w4uzBp9KL23aIdC7HH2LgLwLx+pGDy7uIsjxFQmSQNuxA33H/U3T3TZdLZ/gPAiAAAiAAAiAAAuYlAIfXvG2jm2b83fE3dnxErVs2ETL3/CZ/Vpi/oiMipE3jhmXE832lw1r/33j3Yzq5xfHEi6hzIn+K9Iob7iX+IlGLJo04CgEErEUA2oIACIAACCQVATi8SdDc8+59gg78eZh4RJfNVebbqteSzMzMoL37/uBkn7DuxTeJw7RLR4n4/X8cpEnT76ZrJ40kXgdURGIDAiAAAiAAAiBgSQLJojQcXpu39JIVz9Oa9dto+aIZVLfEKawtyMsR+2PHqsSeN0eOHKViZwEfegNPe5h5xzK6eeo46t6ptYh/6z876fsff6XvfviF7lq8ih5ZJc/hvffhNfTp59+IPNiAAAiAAAiAAAiAgJkIwOE1U2voqAsvK7ZgyWp67O+b6Jllc6nNSfJ0Bq6ipI7s+LLTyuccvv7uJ6pft5gPRdi89R0xkjtvxgQadWZfEceb5ic0pCkTR1BRYR7xi3GK8+wsyKWM9DTOgmA7AjAIBEAABEAABKxNAA6vtdsvqPY3LVhOK/6xiRbOnUyFBXm0+6c9IvCLZ/wyGo/YrlzzkliD96tvf6R/vfgmVfTpLOSt3fwGTZ27hGZeeT516XCyKMflDx2upGaSwztpzBmkhFFn9BVlxp87RKSJE2xAAARAAARAwI4EYJNlCcDhtWzThVZ8xwefiQyXzbiHKkZf5w27f9wj4mdfPYZ27vpKrME79MIbJGe3Ew3u20WkffjJl2I//8GnveVYxuat8ioOIhEbEAABEAABEAABELAIATi8FmmoSNXcvGoB7dy6olY4vlGZEMVLkXEeXmf3nQ1L6faZEym9ekoCr8EbqOywIb1EWfWmeZOGog5laoM6LUmPYTYIgAAIgAAIgIDJCMDhNVmDxFsdXmeXPywR73pRHwiAAAiAgN0JwD4QMA8BOLzmaQtoAgIgAAIgAAIgAAIgYAABOLwGQIVI7QSQEwRAAARAAARAAASMJgCH12jCkA8CIAACIAAC4QkgBwiAgIEE4PAaCBeiQQAEQAAEQAAEQAAEEk8ADm/i20C7BsgJAiAAAiAAAiAAAiAQMQE4vBEjQwEQAAEQAIFEE0D9IAACIBAJATi8kdBCXhAAARAAARAAARAAAcsRsLHDa7m2gMIgAAIgAAIgAAIgAAIGEIDDawBUiAQBEAABUxGAMiAAAiCQ5ATg8CZ5B4D5IAACIAACIAACIGB3AorDa3c7YR8IgAAIgAAIgAAIgECSEoDDm6QND7NBAASCEUA8CIAACICA3QjA4bVbi8IeEAABEAABEAABENCDgI1kwOG1UWPCFBAAARAAARAAARAAgdoE4PDWZoIYEAAB7QRsl/PX3/bRln//J2T4dvcvxOG5ja/T7/sP2I4BDAIBEAABuxGAw2u3FoU9IAACMRHYuetruurG+0OGf7/zEX302f/oxjsfpd0/7YmpPhQGARCwCwHYYWYCcHjN3DrQDQRAIO4E+nRvRx+8/Kg3DOzdiU5ucbz3nNNGn9WPKqT4N9Y+SCc1bxx3HVEhCIAACIBAZATg8EbGC7lBICYCKGx+Ag6Hg9LTUr0hJUW+TPrGOeizL76lK2ffR7/vk6c0/H3tFrrmpgdptbQ/c9ws6jzkMpp5xzLaf+AgLXl8LQ06bzr1G3ktPfL0C3S48qgXxIE/D9Ht960Uaa3Lx9PF194pZHsz4AAEQAAEQCBmAvKVPGYxEAACIAACyUXgD8lRff/jz+nI0WPCcJ7a8NJr79JjqzfSGRU9aPyoQbTuxTepxxmTadOWt+ncs/rS6f2706Jlz9AbOz4SZVwuN02ctoBee+u/NG7UYJo/axIdPFRJY6+6g9gRFpmwAQF7EoBVIBBXAnB444oblYEACNiZQLEzn9auuJ0uuWAoTb5oGPXq2oaaHd+Ann3kVrp49Gk07bJRdErLJpLD+7HA8NrbH9LHu76iu+ZcRuNGDhKO8m0zJtChw5X09vufijzYgAAIgAAIxE4ADm/sDCHBKAKQCwIWI5CTnUVZmRlerUuKnZSdlUnp6WneuLolTvrxZ/lFt11ffCfib1v0BJ1zyc0izJi3VMT9gJfhBAdsQAAEQEAPAnB49aAIGSAAAiAQgEBqau1LrCPF4c1ZeUSeyztl4ghSwtRLR9HSO6dSeY8O3nw4AAEQAAEQiI1A7atxbPJQGgRAAARAQCOBJo3ri5z169ahXl3b+oTjGpSKNGxAAARAAARiJwCHN3aGJpEANUAABKxGYECvv1BZaRFdPed+2rb9Q/rm+5/FfurcxbR1+wdWMwf6ggAIgIBpCcDhNW3TQDEQAAEzE0hxyFMTHA7fvVpnBznUp+I4xZFCDinwSW5OFj1yz/VUr7SYrrhhEZ02ZobY81fcGpSVcBaEaAigDAiAAAj4EUjxO8cpCIAACICAisDCuVfQmodvUcXIh907taadW1dQw3qyY3rtpJG0edUCObF6O/e68fT3h26uPpN39956Jf1t/rXyibRt2rg+LV80g97bvEyUf2fDUlFfy2bHSan4DwIgAAIgoAeBZHV49WAHGSAAAiCgGwFe3aFR/VLiUV/dhEIQCIAACICAIACHV2DABgRAAASSlQDsBgEQAAH7E4DDa/82hoUgAAIgAAIgAAIgkNQENDm8SU0IxoMACIAACIAACIAACFiaABxeSzcflAcBEIgzAVQHAiAAAiBgQQJweC3YaFAZBEAABEAABEAABBJLwFq1w+G1VntBWxAAARAAARAAARAAgQgJwOGNEBiygwAIaCeAnCAAAiAAAiBgBgJweM3QCtABBEAABEAABEDAzgRgW4IJwOFNcAOgehAAARAAARAAARAAAWMJwOE1li+kg4B2AsgJAiAAAiAAAiBgCAE4vIZghVAQAAEQAAEQAIFoCaAcCOhNAA6v3kQhDwRAAARAAARAAARAwFQE4PCaqjmgjHYCyAkCIAACIAACIAAC2gjA4dXGCblAAARAAARAwJwEoBUIgEBYAnB4wyJCBhAAARAAARAAARAAASsTgMNr5dbTrjtyggAIgAAIgAAIgEDSEoDDm7RND8NBAARAIBkJwGYQAIFkJACHNxlbHTaDAAiAAAgkHf1vAAAAZElEQVSAAAiAQBIRgMMboLERBQIgAAIgAAIgAAIgYB8CcHjt05awBARAAAT0JgB5IAACIGALAnB4bdGMMAIEQAAEQAAEQAAEQCAYgdgd3mCSEQ8CIAACIAACIAACIAACJiDw/wAAAP//yK2pCAAAAAZJREFUAwCpfmQvci7rQQAAAABJRU5ErkJggg==" + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Visualize the training data\n", + "# The plot shows the 'load' column (energy consumption in MW) over time\n", + "fig = train_dataset.data[[\"load\"]].plot(title=\"Training Data: Energy Load over Time\")\n", + "fig.update_layout(yaxis_title=\"Load (MW)\", xaxis_title=\"Time\")\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "6c13fe2b", + "metadata": {}, + "source": [ + "## ⚙️ Step 4: Configure the Forecasting Workflow\n", + "\n", + "OpenSTEF uses a **ForecastingWorkflowConfig** to define all aspects of the forecasting pipeline:\n", + "- **Model type** — `gblinear` (gradient boosted linear model) or `xgboost`\n", + "- **Forecast horizons** — how far ahead to predict (e.g., 36 hours)\n", + "- **Quantiles** — prediction intervals for probabilistic forecasts\n", + "- **Feature columns** — which weather variables to use\n", + "\n", + "The **GBLinear** model is particularly good for energy forecasting because:\n", + "1. It can extrapolate beyond training data (important for rare events)\n", + "2. It provides interpretable feature importance\n", + "3. It's fast to train and predict" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "1b5c88dc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "✅ Workflow configured successfully!\n" + ] + } + ], + "source": [ + "# Import workflow components\n", + "from openstef_core.types import LeadTime, Q # LeadTime: forecast horizon, Q: quantile\n", + "from openstef_models.presets import ForecastingWorkflowConfig, create_forecasting_workflow\n", + "from openstef_models.presets.forecasting_workflow import GBLinearForecaster\n", + "\n", + "# Configure the forecasting workflow\n", + "workflow = create_forecasting_workflow(\n", + " config=ForecastingWorkflowConfig(\n", + " # Model identification\n", + " model_id=\"gblinear_demo_v1\",\n", + " model=\"gblinear\", # Use gradient boosted linear model\n", + " \n", + " # Forecast settings\n", + " horizons=[LeadTime.from_string(\"PT36H\")], # Predict up to 36 hours ahead\n", + " quantiles=[Q(0.5), Q(0.1), Q(0.9)], # Median + 80% prediction interval\n", + " \n", + " # Target column (what we're predicting)\n", + " target_column=\"load\",\n", + " \n", + " # Weather feature columns (from the dataset)\n", + " temperature_column=\"temperature_2m\",\n", + " relative_humidity_column=\"relative_humidity_2m\",\n", + " wind_speed_column=\"wind_speed_10m\",\n", + " radiation_column=\"shortwave_radiation\", # Solar radiation\n", + " pressure_column=\"surface_pressure\",\n", + " \n", + " # Training settings\n", + " verbosity=1, # Show progress during training\n", + " mlflow_storage=None, # Disable MLflow tracking for this demo\n", + " \n", + " # Model-specific hyperparameters\n", + " gblinear_hyperparams=GBLinearForecaster.HyperParams(\n", + " n_steps=50 # Number of boosting iterations\n", + " )\n", + " )\n", + ")\n", + "\n", + "print(\"✅ Workflow configured successfully!\")" + ] + }, + { + "cell_type": "markdown", + "id": "8915b7ec", + "metadata": {}, + "source": [ + "## 🏋️ Step 5: Train the Model\n", + "\n", + "The workflow's `fit()` method handles the entire training pipeline:\n", + "1. **Preprocessing** — feature engineering, data validation, scaling\n", + "2. **Training** — fit the model on historical data\n", + "3. **Evaluation** — compute metrics on training data" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "0b941aec", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2025-12-12 14:12:35,855][INFO] 🏋️ Starting model training...\n", + "[2025-12-12 14:12:35,992][WARNING] No aggregation functions specified for RollingAggregatesAdder. Returning original data.\n", + "[2025-12-12 14:12:36,109][WARNING] No aggregation functions specified for RollingAggregatesAdder. Returning original data.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[0]\tvalidation_0-mean_pinball_loss:0.34619\tvalidation_1-mean_pinball_loss:0.36512\n", + "[1]\tvalidation_0-mean_pinball_loss:0.28327\tvalidation_1-mean_pinball_loss:0.28960\n", + "[2]\tvalidation_0-mean_pinball_loss:0.23968\tvalidation_1-mean_pinball_loss:0.24857\n", + "[3]\tvalidation_0-mean_pinball_loss:0.20084\tvalidation_1-mean_pinball_loss:0.21070\n", + "[4]\tvalidation_0-mean_pinball_loss:0.16668\tvalidation_1-mean_pinball_loss:0.17744\n", + "[5]\tvalidation_0-mean_pinball_loss:0.14015\tvalidation_1-mean_pinball_loss:0.15102\n", + "[6]\tvalidation_0-mean_pinball_loss:0.12257\tvalidation_1-mean_pinball_loss:0.13536\n", + "[7]\tvalidation_0-mean_pinball_loss:0.11089\tvalidation_1-mean_pinball_loss:0.12362\n", + "[8]\tvalidation_0-mean_pinball_loss:0.10177\tvalidation_1-mean_pinball_loss:0.11396\n", + "[9]\tvalidation_0-mean_pinball_loss:0.09542\tvalidation_1-mean_pinball_loss:0.10819\n", + "[10]\tvalidation_0-mean_pinball_loss:0.09122\tvalidation_1-mean_pinball_loss:0.10361\n", + "[11]\tvalidation_0-mean_pinball_loss:0.08817\tvalidation_1-mean_pinball_loss:0.10134\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/egor.dmitriev/projects/openstef/openstef4/.venv/lib/python3.13/site-packages/sklearn/utils/validation.py:2749: UserWarning:\n", + "\n", + "X does not have valid feature names, but StandardScaler was fitted with feature names\n", + "\n", + "/Users/egor.dmitriev/projects/openstef/openstef4/.venv/lib/python3.13/site-packages/sklearn/utils/validation.py:2749: UserWarning:\n", + "\n", + "X does not have valid feature names, but StandardScaler was fitted with feature names\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[12]\tvalidation_0-mean_pinball_loss:0.08606\tvalidation_1-mean_pinball_loss:0.09880\n", + "[13]\tvalidation_0-mean_pinball_loss:0.08434\tvalidation_1-mean_pinball_loss:0.09764\n", + "[14]\tvalidation_0-mean_pinball_loss:0.08324\tvalidation_1-mean_pinball_loss:0.09593\n", + "[15]\tvalidation_0-mean_pinball_loss:0.08256\tvalidation_1-mean_pinball_loss:0.09539\n", + "[16]\tvalidation_0-mean_pinball_loss:0.08257\tvalidation_1-mean_pinball_loss:0.09481\n", + "[17]\tvalidation_0-mean_pinball_loss:0.08251\tvalidation_1-mean_pinball_loss:0.09515\n", + "[18]\tvalidation_0-mean_pinball_loss:0.08331\tvalidation_1-mean_pinball_loss:0.09463\n", + "[19]\tvalidation_0-mean_pinball_loss:0.08284\tvalidation_1-mean_pinball_loss:0.09461\n", + "[20]\tvalidation_0-mean_pinball_loss:0.08272\tvalidation_1-mean_pinball_loss:0.09353\n", + "[21]\tvalidation_0-mean_pinball_loss:0.08310\tvalidation_1-mean_pinball_loss:0.09463\n", + "[22]\tvalidation_0-mean_pinball_loss:0.08209\tvalidation_1-mean_pinball_loss:0.09301\n", + "[23]\tvalidation_0-mean_pinball_loss:0.08167\tvalidation_1-mean_pinball_loss:0.09290\n", + "[24]\tvalidation_0-mean_pinball_loss:0.08122\tvalidation_1-mean_pinball_loss:0.09183\n", + "[25]\tvalidation_0-mean_pinball_loss:0.08246\tvalidation_1-mean_pinball_loss:0.09343\n", + "[26]\tvalidation_0-mean_pinball_loss:0.07978\tvalidation_1-mean_pinball_loss:0.09027\n", + "[27]\tvalidation_0-mean_pinball_loss:0.08002\tvalidation_1-mean_pinball_loss:0.09094\n", + "[28]\tvalidation_0-mean_pinball_loss:0.08027\tvalidation_1-mean_pinball_loss:0.09084\n", + "[29]\tvalidation_0-mean_pinball_loss:0.08078\tvalidation_1-mean_pinball_loss:0.09126\n", + "[30]\tvalidation_0-mean_pinball_loss:0.07913\tvalidation_1-mean_pinball_loss:0.08980\n", + "[31]\tvalidation_0-mean_pinball_loss:0.07941\tvalidation_1-mean_pinball_loss:0.08994\n", + "[32]\tvalidation_0-mean_pinball_loss:0.07874\tvalidation_1-mean_pinball_loss:0.08926\n", + "[33]\tvalidation_0-mean_pinball_loss:0.07926\tvalidation_1-mean_pinball_loss:0.09018\n", + "[34]\tvalidation_0-mean_pinball_loss:0.08022\tvalidation_1-mean_pinball_loss:0.09024\n", + "[35]\tvalidation_0-mean_pinball_loss:0.08110\tvalidation_1-mean_pinball_loss:0.09072\n", + "[36]\tvalidation_0-mean_pinball_loss:0.07766\tvalidation_1-mean_pinball_loss:0.08812\n", + "[37]\tvalidation_0-mean_pinball_loss:0.07749\tvalidation_1-mean_pinball_loss:0.08784\n", + "[38]\tvalidation_0-mean_pinball_loss:0.07753\tvalidation_1-mean_pinball_loss:0.08805\n", + "[39]\tvalidation_0-mean_pinball_loss:0.07742\tvalidation_1-mean_pinball_loss:0.08782\n", + "[40]\tvalidation_0-mean_pinball_loss:0.07893\tvalidation_1-mean_pinball_loss:0.08909\n", + "[41]\tvalidation_0-mean_pinball_loss:0.08088\tvalidation_1-mean_pinball_loss:0.09088\n", + "[42]\tvalidation_0-mean_pinball_loss:0.07880\tvalidation_1-mean_pinball_loss:0.08845\n", + "[43]\tvalidation_0-mean_pinball_loss:0.07898\tvalidation_1-mean_pinball_loss:0.08939\n", + "[44]\tvalidation_0-mean_pinball_loss:0.07875\tvalidation_1-mean_pinball_loss:0.08851\n", + "[45]\tvalidation_0-mean_pinball_loss:0.07938\tvalidation_1-mean_pinball_loss:0.08977\n", + "[46]\tvalidation_0-mean_pinball_loss:0.07810\tvalidation_1-mean_pinball_loss:0.08837\n", + "[47]\tvalidation_0-mean_pinball_loss:0.07834\tvalidation_1-mean_pinball_loss:0.08867\n", + "[48]\tvalidation_0-mean_pinball_loss:0.07771\tvalidation_1-mean_pinball_loss:0.08764\n", + "[49]\tvalidation_0-mean_pinball_loss:0.07863\tvalidation_1-mean_pinball_loss:0.08907\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2025-12-12 14:12:36,827][WARNING] No aggregation functions specified for RollingAggregatesAdder. Returning original data.\n", + "[2025-12-12 14:12:37,064][INFO] ✅ Training complete!\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "📊 Training Evaluation Metrics:\n", + " quantile R2 observed_probability\n", + "0 0.5 0.835640 0.588889\n", + "1 0.1 0.546782 0.092940\n", + "2 0.9 0.673923 0.894097\n" + ] + } + ], + "source": [ + "# Train the model on historical data\n", + "logger.info(\"🏋️ Starting model training...\")\n", + "\n", + "result = workflow.fit(train_dataset)\n", + "\n", + "# Display training metrics\n", + "if result is not None:\n", + " logger.info(\"✅ Training complete!\")\n", + " print(\"\\n📊 Training Evaluation Metrics:\")\n", + " print(result.metrics_full.to_dataframe())\n", + " \n", + " if result.metrics_test is not None:\n", + " print(\"\\n📊 Test Set Metrics (held-out validation):\")\n", + " print(result.metrics_test.to_dataframe())" + ] + }, + { + "cell_type": "markdown", + "id": "2f89ec59", + "metadata": {}, + "source": [ + "## 🔮 Step 6: Generate Forecasts\n", + "\n", + "Now we use the trained model to predict energy load for the next 14 days. The output is a **ForecastDataset** containing:\n", + "- **Median prediction** (`quantile_P50`)\n", + "- **Lower bound** (`quantile_P10`) — 10th percentile\n", + "- **Upper bound** (`quantile_P90`) — 90th percentile" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "5e18b079", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2025-12-12 14:12:37,071][INFO] 🔮 Generating forecasts...\n", + "[2025-12-12 14:12:37,120][WARNING] No aggregation functions specified for RollingAggregatesAdder. Returning original data.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "📈 Forecast generated for 1344 timestamps\n", + "📊 Quantiles: [0.5, 0.1, 0.9]\n", + "\n", + "🔍 Last 5 forecast values:\n", + " quantile_P50 quantile_P10 quantile_P90 \\\n", + "timestamp \n", + "2024-06-12 22:45:00+00:00 438055.28125 346233.06250 460536.84375 \n", + "2024-06-12 23:00:00+00:00 417909.34375 335798.56250 438184.90625 \n", + "2024-06-12 23:15:00+00:00 410978.18750 330271.25000 429153.15625 \n", + "2024-06-12 23:30:00+00:00 398514.87500 318843.21875 413887.43750 \n", + "2024-06-12 23:45:00+00:00 381927.81250 306417.43750 397558.40625 \n", + "\n", + " load stdev \n", + "timestamp \n", + "2024-06-12 22:45:00+00:00 326666.666667 36988.759513 \n", + "2024-06-12 23:00:00+00:00 326666.666667 35235.209770 \n", + "2024-06-12 23:15:00+00:00 320000.000000 35235.209770 \n", + "2024-06-12 23:30:00+00:00 310000.000000 35235.209770 \n", + "2024-06-12 23:45:00+00:00 296666.666667 35235.209770 \n" + ] + } + ], + "source": [ + "# Generate probabilistic forecasts for the forecast period\n", + "from openstef_core.datasets import ForecastDataset\n", + "\n", + "logger.info(\"🔮 Generating forecasts...\")\n", + "forecast: ForecastDataset = workflow.predict(forecast_dataset)\n", + "\n", + "# Display forecast summary\n", + "print(f\"\\n📈 Forecast generated for {len(forecast.data)} timestamps\")\n", + "print(f\"📊 Quantiles: {forecast.quantiles}\")\n", + "print(\"\\n🔍 Last 5 forecast values:\")\n", + "print(forecast.data.tail())" + ] + }, + { + "cell_type": "markdown", + "id": "67585d92", + "metadata": {}, + "source": [ + "## 📈 Step 7: Visualize Forecast Results\n", + "\n", + "OpenSTEF-BEAM provides **ForecastTimeSeriesPlotter** for beautiful interactive visualizations:\n", + "- Actual measurements shown as a line\n", + "- Forecast median shown as another line\n", + "- Prediction intervals shown as shaded areas" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "dbdead5d", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAArwAAAH0CAYAAADfWf7fAAAQAElEQVR4AexdBYAcRdb+usdn3SXuSiBAcFnc3Y+Dw91/7IBDD3d3dzk0eICFoCFACHG33az7uPT/XnX37OxmZTaZXZJNJVPuX1V3ffWqulbV5D+JgERAIiARkAhIBCQCEgGJQD9GQIX8JxGQCEgEJAIAJAgSAYmAREAi0F8RkIS3v/asbJdEQCIgEZAISAQkAhKB9UGgH6aRhLcfdqpskkRAIiARkAhIBCQCEgGJQCsCkvC2YiFtEgGJQOIIyJgSAYmAREAiIBHYZBCQhHeT6SpZUYmAREAiIBGQCEgENj4EZI02BQQk4d0UeknWUSIgEZAISAQkAhIBiYBEYL0RkIR3vaGTCSUCiSMgY0oEJAISAYmAREAi8PchIAnv34e9LFkiIBGQCEgEJAKbGwKyvRKBvwUBSXj/FthloRIBiYBEQCIgEZAISAQkAn2FgCS8fYW0LCdxBGRMiYBEQCIgEZAISAQkAklEQBLeJIIps5IISAQkAhIBiUAyEZB5SQQkAslBQBLe5OAoc5EISAQkAhIBiYBEQCIgEdhIEZCEdyPtmMSrJWNKBCQCEgGJgERAIiARkAh0hYAkvF2hI8MkAhIBiYBEYNNBQNZUIiARkAh0goAkvJ0AI70lAhIBiYBEQCIgEZAISAT6BwKbG+HtH70mWyERkAhIBCQCEgGJgERAIpAwApLwJgyVjCgRkAhIBPoTArItEgGJgERg80FAEt7Np69lSyUCEgGJgERAIiARkAhslgh0SXg3S0RkoyUCEgGJgERAIiARkAhIBPoVApLw9qvulI2RCEgEegkBma1EQCIgEZAIbMIISMK7CXeerLpEQCIgEZAISAQkAhKBvkVg0yxNEt5Ns99krSUCEgGJgERAIiARkAhIBBJEQBLeBIGS0SQCEoHEEZAxJQISAYmAREAisDEhIAnvxtQbsi4SAYmAREAiIBGQCPQnBGRbNhIEJOHdSDpCVkMiIBGQCEgEJAISAYmARKB3EJCEt3dwlblKBBJHQMaUCEgEJAISAYmARKBXEZCEt1fhlZlLBCQCEgGJgERAIpAoAjKeRKC3EJCEt7eQlflKBCQCEgGJgERAIiARkAhsFAhIwrtRdIOsROIIyJgSAYmAREAiIBGQCEgEeoaAJLw9w0vGlghIBCQCEgGJwMaBgKyFREAikDACkvAmDJWMKBGQCEgEJAISAYmAREAisCkiIAnvpthriddZxpQISAQkAhIBiYBEQCKw2SOwSRBejbqpvtGLn2fMx+uvfo7H7nsdT9z/Bj585xvMn7cCXm8AGv2naPInEZAISAQkAhKBDhCQXhIBicDmjMBGT3gD4Sh+/WUR7rjyQdx68lV4/so78fmjr+DbJ97FB7e8hMfOexjP3fwaFs5dKSnv5jySZdslAhIBiYBEQCIgEZAIdILARk14W7whfP7Or3jy+sfw69sfYuuBg3DQTjshMwK4QlEUhlIwuCEdwelVeOeGd/DVBz8iEo120tTuvWUMiYBEQCIgEZAISAQkAhKB/ofARkt4/YEwpr4wHVMffRkV8+ZgeN4AHH7EETj0qCOQ6UhFcVYOdpm0E446/Cgcc/RRGFSXg09vnooPX/4CUfrf/7pq82iRzx/Ec298gum/zN48GixbuVEiMPXLn/DyO19slHXro0rJYiQCEgGJQL9CYKMkvHxm9+dv5uObt99HY+UajBg0CkOHjsDUNz/E0/c8glAkgCJ3HibtNhnD/z0RAw4ahi2GboFR7qH4/unv8Mf3iZGlt6eWYkLJKZ2qPY+5tF91dmeNmXLAOdhY2urx+nDvE2/hs29mdFbdmH93/fefu56Lxe2vlh9+nSPwqqiu2yib2NjsiT1fV9365HrXsa/b+cYHX+OOR15b7/rKhBIBiYBEQCKwcSGgrnd1ejFhQ4MHH7/8P9SsWY7i7EEYUTAKVtWNsoYalFVXwOVwAR4FtfNrgLUaolURZGZkYED+AGT60/Hdi98hqnV/tEGLaqIVk8aPwOH777KO2r9kOxG+OWihUHiTa2Z3/bfVhJGbXJt6WuE/5y0VEvGausaeJu2T+F9//3usHJaaerz+mLsnlo29nT1pi4wrEZAISAQkAn2PgNr3RXZf4tzf52HJn7OQZk/DoPwhyHLlwqHZkGFNQX5qHvJdhUh358BbG0T1K5VY/txKVK2uI1JsQ25WLuoW1mH14rKEP2I7/rA9cevVZ6yjrjz/BGwM/zRNJ+YbQ116qw4b0sbO+u+og3Zbp7obUk58ZsnKJz7P/mj/+KufRbOOo2eMLd/+9Ccb/U7JBkkEJAISAYnAxo2AurFVj+Wy82f9Bc3nR0FmIQozi5FidcOh2JBlz8Kw/NEYmDEKac4C+Jo0LJmxGjX1LQgRKYyEFaSmpsMRdGP53BVEeJNHFO989HVcduNjWLqyXJh8DIAVb5s3t3jXgbH0x1k47dI7wXFYXXjtg1i5pjIWzx8I4tyr78fTr04Fb/s+/8anlO+juOGe52Nx/pq/DGddcQ8m7nGq2Ba+7MZHceUtT+Ca258WccKRCDjfy29+HFFDWi0CDO3WB18WZfC5WMNrg4y1VXWi/F0Pv1DU55RL7gBvNcdnWt/YLOIc+q9rRNsnlJyCI067Dq+++yVC4Uh8VGFnQnTc2TeJNvKxirsff0P4J1t7e2opzHL2O+EK3PbQK2gvbTT7mHF979PpuPaOZ3DRfx5CdW2DqA73E2N64D+vEvXldr309ueIRKIi3NRaPPqxjKPPvEFgwOYDT78D89jBbJLKct9zPRgfHh88Vr758Q8zi5j51fTfccbld4MxZ3XShbfhhbc+A4+fd6Z+iw8++17EvfXBV0Rfc75dkUquB8cpr6gR6eK1dz/5TuTx14LlwpulxowJ9yXXk9vNmPC4FBG60apqGvDTzLnYbYctwYsSjv7Rlz+ysY5iDF977yvwmGI8uCx+trgu3bWT47Fqn+kzr30s2hM//u9+7A0xDhhLbhP3wT1PvIna+qb2yaVbIiARkAhIBHqOwEabYqMjvD6PFzVrq2C32uF2psBmdSBK1FXRrMhMz8OuZx+M8YfsAJeaAYuaAovdAVdKKpzuVFgsNlgpncvlQvnKtURE1iVY69sTv89ehM9LZ4AnfzaHDy4SWTFJuIsmUeEwNCav51/zAH75Yz523X4LDBlYgK9/+AM8iTMJ4GhhIn/f/fwnXn//Kxx80tXgSffz0l9hbgFP/+UvHH/uzYJQTtlqLPbceTLYjwnidOODLqvFArfbiU+//gW/kFSc8zUVE3MmEFaLCpfTbnqvt7mqrEq0ncsfN2oIDthze8xduEIQ8g8+/yGWL5N/jtPi9WG7yWOx165bY83aGiKYr+KhZ/4Xi8cWJvtM4OcsXI4dt52AoQMLwdveHJZMdRctVm685wWUV9bgkH13QmZ6KhHwaTjmrBsQT4bMPj7xvP/iujufxftEJplwMoFlQsQElzFlcnbQXjtg0bI1YELIJNKsL8c79JRrxDGDYDCEHbYeh8rqOrGw+fSrX0Q07hvue7fLgX122xaTJ44Ej5ULrnkQ8WSVsWDC/efcpZi8xShMGDMUC5asApM2LsdLi0JWnGkdETYm5qz8gQB7daiKC3LAZU+d9lObcF4wPfL8e5j550KMHDpA4MIknAm93W4Dt9dBJmPy1kelbdJ25vjyu5ki6EAaK6OHDxTPAZdd19As/E2NpeUX0IKQFxNLV5QJzKw0tvnZevT5d8FtZMXxO2rnDHrOWHF4vGKsuLxQOBzz/vCLH7Bs1VqMGTkY+xlHlvh5PY8Wn2FaQMYiSotEQCIgEZAI9CsENjrCy2dJA76AADmMMAIIIqJEYLHZoaouNK1qRrgxSiQuAymuNCJ8pFLccKekwOFwQrVaoKgK/ES4oglOYCwx3ZWklu0VS+ZEReK0c04+FDM/ewpvPnkDPnvtLrhdTvDEHDGkfGvWVgvyyqTwhw8ewX03no93nr4J/73qdJHLC29+KkxTq6yuF2TmxQf/jenvP4xXHrkOTJT++8BLIsorj1yLFx64Gg/fejF+/OhRDCzKE/6mdtyhewjr2yTtExZDe/fj74Tt+MP3EuaGao88964gHnf95xw8dffluOf6c/H+8/8V7f/vAy+DSSGXkZeThQ+evxVfv30/Hr3tEjx0y0WY9ta9yM5MA38IxHFYra2sBRNF9v/89bvxzD1X4Ln7rwLbObwn6sFn3hFSO5bgmuqm+14UWTCBepGksNwfn7xyJ+645izRd6efcKCQuL/+/jQRL17z+QN4+L8X4Zt3HsCnr96FAYW5eOyF94m41uO2f58p+p1x+PXTJwSJ41slWBrKeTz07P9EvIvPOAofvngbuN++ffch0f+52RkcBTttO1Hk/d5z/8UDN18g8HzryRtFWPzi4dX39Lq9RWONcXzizv/Dd+89hCvOO55wd+DkY/bDCUfsLdLde+N5YpzxWDOJnAhop+23x3bC5380PpjkCgdps+YuFvU+4oBd6dmyY9acxWBifvA+O4p8ub1cX67ndrQAoyTd/j40FkK777SViHvovjsLc9r034Rpah9P+1mQcF7UffHGvQIzxu7lh68lkj9svdpp5t3e5LH709RHxXi7jzDj/uVyedG1YlVF++jSLRGQCEgEJAL9BIGNjvC6nE6kZ6QiGA7BE/QgEPXDRn6paVmwIQWrvyhH458tSMlMhyPFQWE2WF0WWEipDhs0FQhFwsjMzYTVZkUi/wrysoi4FK6jigtz2iRncnvhaUcKQsABOVnpQoLL9roGfUt0mjGZn3r8AUhLdZOUOSrUnrtszdHwl7FdLByksQSYycy2W44RpJClwUtIysXE+ZiDSzB54iiKpf9sVgssJLHVXbrO4Zzmc5I+m6SLpZYshWNyvOM2E/SIG6CHaeHAUtsRQ4qFpM/MiongqcftL4jwbyQBZ3+WJo8cNkBsuTOJ+Gr67/ji21+RlZEm4vGxAI5X+tMsNnDwPju1IfFOR8+l0c0tPiG9ZQmuqRqbPCL/b37Uyzn7pENEfwhP0s765yGkA58YUlfhMDQmjdxf+TSGBg/Ih9VqFWSd8Txo7x3E8ZEILXAcdruQdHOyZbSjECacePud4512woHsLZRKCzAmkoeQdJk9eLxx3ozF738tEjsHs+cv5SAsX1UuTNZYyskmS9fZZMX4nnLs/gJPdvdUZaSlgD/Q5PH1B5FaM71JtI88cFfhpRrjrJ6ksVxP4UkaS5nNdpCz0x8f3+H+Zwl/Oj0HHHGf3bdlAyYRFg7Spk77kXTgyvNPiD1b7LE1SbX5eWN7shQvfFRFxVJ6xr796U988Pn3YoHM+TMmbEolEZAISAT6CgFZTt8hoPZdUYmVZHPaMHDYID7EgGavB4FwEClpacjPH4iBxcMweNhIFI8YjIyBmbBlOqGkKtBSgYCT5MHOCHwBP1pCzRg6ZjAURUmo0IvPOJokq9euo5iIdpdBBm2PcxyWRkHcOAAAEABJREFUTLPJEz2bvFU/aa/TYKqdDjmfvVFWUS1MU3O7XKY1Zq5YXSns42kLW1i60BRFwUlH7ytifGBI1KbRVjJvAZ98zL5gsiUCN0BjKTQn76g+o4YP5CBqV40wI0QEH3vxA2yz31lC6spb8nycgKWFHMG8WWG5IU3bYevx7L1B6rpLThLScZaQm4qld5yp2R8jaZue3aZKTXGBFwrzF680vYTpJom93W4TdlOrrKkXViZEW+51eqxPuW+feOlDEVZFcUycthw/AiZZFYHtNCaQl934GHhM8JlctrOUvF00QUzZ77x/3y+Ow/znrufw9Q9/gI8AsP/6KvNjPj6ewHl4aUeFifrEMcMwlrb62W/rLUaLBRif0eZ6nnPVvXj8pQ9gHsnhOF0p81q58aOGEolfK5RKYzWbJP1MtMvizhDz0RDGfVBxfldZJiVsGi1Idz/yIhx6yrVgXK+781l8RYsyzjyqaWxIJRGQCEgEJAL9EIGNjvCqUDBuq3Gwux3QIho8Xi9UuwU5BTkoHFqEopGFyB6RhfSh6UgbnAJXkROWLAuiTg1hWwTNLS2w5lgwdPwQqPS/t/tMpUk8vgyPxyecLJm66fJT0V5dfs7xIrwrLRAMimCvzy/M7rQD99pBROEPw5hwvvnhN8LN0lNh2UCNP5DiLGwk6WQzXllJ6szuQECv8+NEdh99/j2wNJiPD/zvmZvx7bsPgs+AcjxTNTa3COuwwYXC7C3NrFd7EsvlOQxi2x2BNPuBCWH7/jTdk8YPh98f4Gzh6EZKff6/HxBS3RLa6n/ktosx9aXbxXGVbCKDIgNDY2L6zD1XYPvJ48TxCz46wx8pHn/OzQiF1/98Ou8KsBSa8+O2mR/LHWscj+HibdSvH79yJ3ingiXS03/5C4889x72OPoSktjP5Chdqg9IcsoRHn7uXRx88r9jqo4kxuz/RemvbAjFEvq0VJew96bGH9Bd/J+H4Q+EhDSZjwvxsRVeMPVmuTJviYBEQCIgEfj7EVD//iqsW4OxRHjHbDMGNosV4WAIwVAQ7mwnMgamIW1gClKKXEgd5ELGyBSkD0uBM8cBh5MIMvGNFk8zRu4yHJl5metm3Ac+QwfpBI7vgD364N3RXh241/bd1qIoP0fE4e1uYelG421qLocljC+89RlYgsbkhf27SZpQcHFBroi3urxKmPFaRZX+Bw/M4x98fIHDX37kWvDWN0sM+eyqSYw5jFVhXjYbMI9hCEcvaIMG5Itc+cywsBgaLwz4YzqW8ipK1zsBfHSDk9ls1nX6k3FnNXhAAYoL8ziakGYKSwca32LB/cPkmc8477HTZAwbXITO+mrHbSeIs82zpj2Llx66Rhxx4aMC7T/S4vZ0UFyHXoqi4IQj9LPdLN3kGyk44n4lU9iIKT6KcPk5x4nz2LxoueI8fbH2/Jttz6HHEhiWeYtWCII+iSTdnD5eXXDaESKWWSY7xowYJM4Pe0nSzO6uVGft9CawOPzO+NjzvhvPx7+O2U9gmZ+bifU5RtNVHWWYREAi0EsIyGwlAhuAwEZJeFmSeAxNrqpTgY3+B0jKG4r44cy0w1XghD3bCluWFY48G5yk3GkOuCIO+KuCsA9Wsc/Je5FsV90AWNY/6ZbGHzvgL97bS+F4UmYpU3e5jxs9RET5aea8GHkKRyJgkmBu0YsIcdrRB5cI131PviXMYw/R3cKxgZrLaQefffx11gIsXdl6xpTr9JYhTR4/eqgoZa1BgOMl300tXixf3faDoFHD9KMQH335k0hnanzzg2lPhjlp3AiRzdtTS4Vpal99/7s4U7zNpDGmV6cmEyImb0xUpxukKT4y+/OtCYwT30bAblbxcTh89ryl4FsG2J/JM5um4n41pZ+mH58vNscQS1y3mTQ6dmZ4xeq1IhrfOMGWiqpaNhJWB++9o4jL45THJJ8X52MewpM07gc+50pW8eNFy4lH7gM+esDtEJ6daOZxhjNOOEhIiFlKbKpzTz4MTPZ5HC1evkbksNXEkcJ8zfhITzhI42NC00myTFZ01c4i2v1h7HjBx3FZ8dGL+PqzH99gwabNZmFDKB7D7Y+1iACpSQQkAhIBiUC/QkDdWFszabstcfy//4EWfyPQFEGkJgpL1AZbmh3WVAtUtwI1XYElTYFds8NXEUStswLH3XQY8gpz0ZN/fHsA39TQXvGVUz3Jh+PuPGWiuEKMCc9Rp/8HfK0TfxHPVy7xnZ+vf/AVR+tSZaSl4JIzjxaEjLeD+TozPjvK5w07S7jF2GGClHI4b1kzQWV7oooJQ/v2m26fP4hLzzpGZHXKxbeD8froix9x1uX3iKu5jj9sT5jnL3fdfpKIx3e9Pvv6J+LaroP+eRXakyT+gCmbtvCZMPM9wnxFGV/lxucqRQbrp62TarcdJoHJKl/xdfVtT4kr3LisS294RMTlWzeEpRvtuotPEjHOueo+3HL/S2Lx8dQrH4HvSf7nBbeCz/ByhGsu+icbYD8+2sE48ZVz+//jSvCHfSwJ5nbz+OBz3iyR57ttuY9FwjiN72Q++KSrxdlZ/iiRb4N44qUPBOncz7hSiyXonITH6svvfAG+e5YJLPt1pZjA7kcSXT6XzPHMj9XYzmouSWn5nCtjxosFbse/CT9etJ33r8M4SocqEokKbDhwJ3oW2GyvDt1vZ+H1+Tf6sQYmxkyk73/qbfA55Q8+/0Fc48Zj/9V3vxBxu2rn9luPE3HOuepe8JjjM+N89ILPBosAQ5uypb64ueHu58XxDO6fY868QVxRZ0SRhkRAIiARkAj0UwTUjbVdChTscvBuOOamYxBOD8CzKgKt3g4lHEFUCSMKMkMRaI0RVC6uQ01RJQ66bQ8MHDmAUiqJNYu2djkikzGeZNsrJhkc3pUyPwpT1FYo777+XPDWLUs7mYhcf/dzeO29r5CW6sbeu24jslOUrut45okHg8/A8jlPt8sJPh7AW9p89pLJisikncbkjr3+ccTebPRYtW+/6Q6Fw2Aiz1do8flHJnxMhPjuWJbcXXX+CbGyrr7gH0KCx4SOpc1M+PmmCD6HypEURW83n5995t4rwedD+R5hvqJsxh8LBNGPj8f2TpWRV7w0uX1cRVHwxB2XgckdkzYm11wW4/jGEzfAPK7QPl17N99OwFfRTZ44ShD+6+58Fg8+8z9xTzKfT87PzRJJpmw1Fnz1FR+V4I/3GCe+55XvbWZJJkt2H7zlImQT2eebL/hOXf547PxTDhdEVmRiaLyQ4IUIn5297MbHcO8Tb4kx9NAtF8IcA3yTAR8Z4F2ROx55DUwa17T7MNLIbh3jiAN2E3583poXBcJhaONHDxFb/owZE0huB/cTS4LPoLFpRFvH4D8BzHXmmyBY4r1OBPIwn4GPjD9CkZmRCr56jccYnyvmhRb3EY+7fXefQimArtp50lH7ivHJBJfHHBP0E4/cW/hxYn3EAUcdvDu4Xkzy+QM87h+n0wE+/iPimRHZIZVEYJNHQDZAIiARiEeglaXF+24kdoWo65jtRiJU/AdmrrkRs2bdh6/efQCvPflfvPTILfj8hQfwxyf3YVH1NRh9VDUKhuZRisSbdOwhJZhb+kKn6uu3748hwWSH712NeRiWa0nyx3kU5WcbPhBnAnnrdsYnj6P0fw+Ie2l/nvoY+B7YQ427SFPcTlHufTeeF0vX3sIkl8958jVZTH75ui+erPnMZ/u44UgETJyYHO+969btg7t0c7u4DZ0pPsvJGfAfSfjl48fF3bR8J+vvXzwNJlvxH4Tl52bi9cevF23lev/44aPgO1z5jl3OPyM9hbMSasyIQZj25n0iLn+4xRgx0ed4/zXuLRYRO9HM/mOcOokivLnM+248H1x3rhN/qMT3/W5BUnERwdA662MjWBD5Vx65Fr99/hQ+eul2qvu9+POrZ0X7crLSzWiCaHFf873J3K6fqO85bybLHInJ27S37gNjyGrmZ0/hPCK83A/s5jis/u+cY8FjiM/PvvvsLWIsTX3pDvFHOjjcVLzo4DuhWX3/wcNgUmqGdWXylXiMNd952z4eHzvgtnLduA2s2H7j5aeAFyvt45tubhvneevVZ5he65i8yOE43AdmII9pXigwtlwWt/krwujIA3VSzvE6aycfxXjyrv8T/cEfSXIeLGnn/LgcXmhyeqvFAq4X9z/fJ/zV2/fh9cf+gxsu+5d4FvfYaTJHE4rbzmmFQ2oSAYmAREAisMkjkDg7/BuaGo1G8dPHL2HVr29g9NarkDvpN7T438XcBU9gzprHsdLzFEK5byJn0i+oW/0sAk3zsDHdLKQoCvJyMsFE1Zx0E4Vx+i+zaav1S7DErLyiRpj/d9NjIrl5K4NwGNrX3/+Byup68TFOPAE1gjs01sdTVRUMHpAPPqvaGfHhOCzhHDdqCJhsdlWOGZcJT/s7hrtKtz5hTIy4TkzK1ye9mYbP9LLEtqggp8vrx/hoCrfLXDCY6dlk7BhDVi6nnb06VIqiCGkuLw54LDFeHUVUFEUcK+H7jjsKX18/rhu3gRXb1zefRNMxtlwWS7AVRVknmaJ03E5FUcD9wUcfOI91EsZ5cP+zxN78cDIuSFolAhIBiYBEoJ8ioG7M7apaOQ8z37oLWwxPxTaHbYWR+4zGkMmZGDk6G2PHD0Da0HxgoBvZIwsR8lZh6W+PQ4smdpXXxtxurht/1HPbQ6/iH+fdgn2Ov1yYfDbz5GP2w97GsQiOZyrzy3m+ysr0k6ZEQCIgEZAIJISAjCQRkAj0cwTUjbl9mQVDcNTVT2PSCddDHXAYgtatieSWYNROh2Hk5IMwaOzByBxyAtIGnoWh292KIVteDEW1bcxNSrhu/Je5Hrv9Ulx3yUni2MA9158L3uqNPy9rZhYMhnDMwbvjwVsuFFIu01+aEgGJgERAIiARkAhIBCQCwEZNeJ3uNAyeVIIhU05AzsDDkD34WIzd+yrs8a87sNeJ/0XJEVdjwi7nI2fokcgo2Blp2aOgKJbe6dc+zjUjLQW777glTjh8L3G10wF7bi/ua+2oGnyEgc86diT57Si+9JMISAQkAhIBiYBEQCKwOSGgbk6NlW2VCEgEJAISgQ1HQOYgEZAISAQ2NQQk4d3UekzWVyIgEZAISAQkAhIBiYBEoEcI9BLh7VEdZGSJgERAIiARkAhIBCQCEgGJQK8hIAlvr0ErM5YISAQkAgAkCBIBiYBEQCLwtyMgCe/f3gWyAhIBiYBEQCIgEZAISAT6PwJ/Zwsl4f070ZdlSwQkAhIBiYBEQCIgEZAI9DoCkvD2OsSyAImARCBxBGRMiYBEQCIgEZAIJB8BSXiTj6nMUSIgEZAISAQkAhIBicCGISBTJxUBSXiTCqfMTCIgEZAISAQkAhIBiYBEYGNDQBLeja1HZH0kAokjIGNKBCQCEgGJgERAIpAAApLwJgCSjCIRkAhIBCQCEgGJwMaMgKybRKBrBCTh7RofGSoRkAhIBCQCEj9BCfMAABAASURBVAGJgERAIrCJIyAJ7ybegbL6iSMgY0oEJAISAYmAREAisHkiIAnv5tnvstUSAYmAREAisPkiIFsuEdjsEJCEd7PrctlgiYBEQCIgEZAISAQkApsXApLwbl79nXhrZUyJgERAIiARkAhIBCQC/QQBSXj7SUfKZkgEJAISAYlA7yAgc5UISAQ2fQQk4d30+1C2QCIgEZAISAQkAhIBiYBEoAsEJOHtApzEg2RMiYBEQCIgEZAISAQkAhKBjRUBSXg31p6R9ZIISAQkApsiArLOEgGJgERgI0RAEt6NsFNklSQCEgGJgERAIiARkAhIBJKHwN9BeJNXe5mTREAiIBGQCEgEJAISAYmARKAbBCTh7QYgGSwRkAhIBHoPAZmzREAiIBGQCPQFApLw9gXKsgyJgERAIiARkAhIBCQCEoHOEejlEEl4exlgmb1EQCIgEZAISAQkAhIBicDfi4AkvH8v/rJ0iYBEIHEEZEyJgERAIiARkAisFwKS8K4XbDKRREAiIBGQCEgEJAISgb8LAVluTxGQhLeniMn4EgGJgERAIiARkAhIBCQCmxQCkvBuUt0lKysRSBwBGVMiIBGQCEgEJAISAR0BSXh1HKQuEZAISAQkAhIBiUD/REC2SiIASXjlIJAISAQkAhIBiYBEQCIgEejXCEjC26+7VzYuYQRkRImAREAiIBGQCEgE+i0CkvD2266VDZMISAQkAhIBiUDPEZApJAL9EQFJePtjr8o2SQQkAhIBiYBEQCIgEZAIxBCQhDcGhbQkjoCMKRGQCEgEJAISAYmARGDTQUAS3k2nr2RNJQISAYmARGBjQ0DWRyIgEdgkEJCEd5PoJllJiYBEQCIgEZAISAQkAhKB9UVAEt71RS7xdDKmREAiIBGQCEgEJAISAYnA34iAJLx/I/iyaImAREAisHkhIFsrEZAISAT+HgQk4f17cJelSgQkAhIBiYBEQCIgEZAI9BECGx3h7aN2y2IkAhIBiYBEQCIgEZAISAQ2EwQk4d1MOlo2UyIgEdjkEJAVlghIBCQCEoEkISAJb5KAlNlIBCQCEgGJgERAIiARkAj0BgIbnqckvBuOocxBIiARkAhIBCQCEgGJgERgI0ZAEt6NuHNk1SQCEoHEEZAxJQISAYmAREAi0BkCkvB2hsxG6t/QEkQoom2ktdt0q+Xxh+ELRDbdBmykNQ+Eomj2hjbS2m261YpGNdQ1BzfdBmzENa9pDEC+YTfiDpJVSwQBGacDBCTh7QAU6SURkAhIBCQCEgGJgERAItB/EJCEt//0pWyJRCBxBGRMiYBEQCIgEZAIbEYISMK7GXW2bKpEQCIgEZAISAQkAm0RkK7NAwFJeDePfpatlAhIBCQCEgGJgERAIrDZIiAJ72bb9bLhiSMgY0oEJAISAYmAREAisCkjIAnvptx7su4SAYmAREAiIBHoSwRkWRKBTRQBSXg30Y6T1ZYISAQkAhIBiYBEQCIgEUgMAUl4E8NJxkocARlTIiARkAhIBCQCEgGJwEaFgCS8G1V3yMpIBCQCEgGJQP9BYPNsic8fRFVNA8KR5P0xH38giFAovFEByu3jPwLTUaWCwRAqq+uhaev+GZMAhTFGHaXrqZ/H6xdYd1QO51VT14jOympq8aKz+nPa/qYk4e1vPSrbIxGQCEgEJAISgb8Bgc9LZ2C/E67AtvufhT2OvgRb7nU6zrnqXsyet1TU5j93PYcJJafE1K6HX4inX50aI4UfffEj9jzmUhG3vXbG/92N+596u7333+ZmEnnkaf/BJ1/93KYOTDwfe/EDTN73TNGW3Y64CH8a7eeIr733FQ448UocdNJVeOa1j9lLqNr6Jkw54BxBkoVHNxoT6qtufRLbHXiOwHqvYy/DXwuWx1KtKqvEgf+8CrsfebHoD8Y+FG5dgFx7xzM4/NRrRV2m/zI7lo7th/7rmlifxAL6gUUS3r+5E2XxEgGJgERAIiAR6CkCtZ4gav4G1Vk9mchdduNjOOPEg/Djh49i1pfP4J2nb0JeThZ+/n1eLNl+JVMw7a378Omrd+G8Uw7HA0+/g/mLV8bCO5Pi3nLlaTj52P1i8f5Oyz1PvClI5NKV5etUY9bcJXj0+ffw8sPXgDE4fP9dcekNjwhJKktTH3/xfTx+x2V44YF/CwJvtvf5Nz/F0QfvjoK8rHXy7Mjjy+m/4cdf5wiMZ017FrvvuBUuv+kxsPSY4//3gZcxduRgzPzsKUx96XZ89s0MfPb1LxwErvcX387E56/fI/rg1Xe/FP5cvwef+R8uPuNoKIoi/PqTJglvf+pN2RaJgERAIrDpIiBr3gMEpq+ow7fLavtcRaLrbtHz1vitD76Mqy/4B445uAQZ6Smw2awYN2oImKiedPR+sZa5XU4U5Wdj8IB87L3rNsJfVbunIm9/VCoIHidgSfDlNz+OW+5/SUhFT7rwNvzyx3wOEoqlr3c88hpYgszSSiZ07MeBL7/zhZC8TiBJM4c/9sL7MWkm58t5fvjFDzjrintw92NvcJJ11BknHISv3r6vQ3L69fd/YMdtJ2DrLUYLDE46el8htV24dBXKK2tQ19BMbS/AwKI8ke+KNRUi/M0PvsFpxx8g/BLRXidJ8REH7CowtlktuOj0I7FmbTWWLC9DY7MHPxAZ5rJdTjuGDS7C4fvvjC++/VVkPW/hCowZMQicbuigQvw2e7Hw/+aHP4S55y6ThdnftO5HWX9rsWyPREAiIBGQCEgENnEEBqe7MCSj71VHgr+lK8oEmgfutYMw22tMuky/xcvWgKXBz7/xKS645kEiYrsI8mWGd2aupC366tpGEczb/5+StNLlcuDhWy/C8CFFbcjpnUR2//hrMe6+/lxce8lJePXdaZj23UyRtiAvG9ddcjLef/6/uOnyU/EoEd7vfta39DnfNz74Gq+//zW233o8JowZJtK01zIzUlFI+disVrT/t7aqFsOIRJr++bmZwspnmovyc+Amwl9GxLSC4nHA0IGFeOa1qfjHEXuRNDxTnMflc7kc1pViyXD8QiErI01E5/JrahuE3STV7Bg8oABrq+rYirGjBmPh0tWIRKJYXVaFbSaNEvYHn3mHpLtHCenuKvIPJ/EMtij4b9Y2LcL7N4Mli5cISAQkAhIBicDGgECqw4q/Q3XU9qqaeuGdnamTrhaPD7c99EpM8blQEYG06roGzPxzIf5asAyqRSWp40yYkkUKTvjHUtTLzzkOOxAxPeXY/cWxiMYmj/hA6+2ppThs/12QkZaC9FQ3dp4yEV9O1wnvvrtviyEkXV6weBVYusp1ZtMseNL4EXj1ketw+gkH4sC9tje9EzabSLrqdDjaxGeS2+L1wULtPfukQ3DqpXfguHNuwnn/OgyVhN37n/2AU47bHzfd9yKOP/cm7P+PK/AmEW/OpI4kwvc9+RZMNW36b+yNA6hufP75hTc/A5+djpdGs8SdI9ntNjaEcjjsJF1uEvaRQwcQbuOwz/H/hzsffQ3HHron5fEr0gmvrSaMxAnn3SLOXu97/OX4Y44u/RUJN3FN3cTrL6svEZAISAQ2SwT6S6N9oQgC4Wh/ac5m2Y5Cklxyw00JrKIocNjtQrEkdt6i1jO6O207EffdeB6p8/H6Y//BaSccAD4Ty+nXV6W4XSKpLxCAKTl995PvcOuDrwg1n8it1WIRcfiow6GnXEtE+1cigM3goxdRknSKQNJS3E6o6vqfX2XSGAgGKafWn9fnR6pRxzP+cRA+fuVOTH3pDpx/6hF48uWPcOrxB0Ch/299+I3wv/+mC/Di25+LDCIkZeWbFkzV3OIV/v86Zj/859KT8ePMOfjfx9/B6w8If5YiM8lnB0uB2WQVCASRnZnOViiKQpLxiwn/6/HVW/dhtx0mgaW7F55+JGbOXgiLquITquOxh+yxzkd5IoNNVFM30XrLaksEJAISAYnAJo5ASyCMzxZWYX5Vyybeks27+nxGlBF4/7PpbIBJ4/+dcyxYjRo2UPh1po0YMgAr11Qi/gaBzuIm4p+dpZO6m684Fa88cm1M3Xfj+eAjC3yG97n7rxKEjyXEo4d3Xb9EyoyPw4RzxeqKmBcfZWCHebSB7UxI+Zzz8lVrwR+T/fOofbBw2WoipGlwuxwYUJgrMGGinJeTidv+fWZM8bldzkNRFBx/2J546u7LhSrKz6a0TgwfUoxcSsNxVpdXsSEU14njCIeh8QdydpICf/TFj6LM7SePI0n5KgwZWCBiDCJJ+Ox5y4R9I9cSqp4kvAnBJCNJBCQCEgGJQLIR8JNkl7+BavSHkp21zK8PEWACd8W5x5OU8H944a3PwNJIPv/JRJbt8VVhEsdnSVeVVYoPqx557l2U7LQVbFZdAstxmQjGK5+/rcSU43Sm+BgDE7fbH34NXA4T6TkLlwuJKUtzOV15RQ34nOy3P/0Z+2CL/RNV3DZTehoKh9vcD8wffPEHY7//tViQ+Jfe+Vx83DZmxOB1sn/8xQ/ARxwYP75RgY8vsASXcRtBxJWPQqyTyPDgGxWqaxvgJ8ktHxl5+tWPxYdrLqddHOXYcdsJePmdL8URD8byQyK1++4+xUjdavD1Zg9TH7B0l30njB4qPnzj69VWECHfetJo9u4XShLeftGNshESAYlAlwjIwI0SgSBvJWtASyCEIJHfjbKSG2mlclJsyE2x97lSSbLYESR8I8B/rzodL9FW/O5HXizu4OV7YFlKuNOUibEkn5f+ir2PvQwHnHgVrr71SWy71VjcdvWZsXAmfQef/G/EK762jMuNFa0A7DYTmf4KFOF1+zVnITXFJcrZau/TcdzZN6GxqQVMLC87+1hcd+ez4v7ae594E3yGV1H0dJw8Pl+RWQfalbc8ia32OUPcisB5sZ1JJUflM7DnnHwoTrrwVnDZb37wDe694bx1jknwh37f/DhLfKzG6Zio89EGxuyaO57GmScezN6dKn8ggJKjLsE2+52Fa25/Glecdzy4D8wE1170T8wlos93IjOW++6+LfbfYzszOGZ+8MUPYDI+eeIo4bftlmOQlZmKvaiPuK8O2WdH4d8fNEl4+0MvyjZIBCQCEoFNEAE+u0t8F/6whhpv4lK8TbCpSa9yDpPdVCK8faw6awh/kMXb7V+/fT9+nvoYpr15L/786lk8etsl2GLsMJGMryibW/oCTDX9/Ydx/aUng7f3OcIh++4UCzPjsLn1FqPEEYSz/nkIRwN/pMZb+cJBGm/7czzeoienkKg+fsel+O3zp8D14XpcdPpRHCQ+RpvxyRPgu4A/fPE2fP763eAPxjiwfb7s15G678bz1qmneaxDURRceNqRouwv37gHv3z8OEwyibh/I4YOwK+fPiGOIZjel59znDg7+/lrd4OxMP07Mln6y1ejfffeQ2Acjz2kpE00rg+3jdvP7b316jNgSrjjIx5zcAkYK9OPj6M8cef/4X/P3AzGZ/zooWbQJm9KwrvJd6FsgESIkeV4AAAQAElEQVRAIiAR2DQR8IYi4g5U/uurYT7bsGk2Q9a6HQJpqW4UFeTA/FCsXXCfOZ0OuyC/7evBpK79edZkV4rLLi7MXUey2105jF1HxLSjdHw1Wo5xZrmjcPbjRQC3l+09UeY1Zz1Js7HHlYR3Y+8hWT+JQJ8jIAuUCPQNAv5QFFaVpyENHf1Bg76phSxFIiAR2BwQ4DfN5tBO2UaJgERAIiAR2MgQ8ATDsFsUaFQvSXgJBPnb+BCQNeo3CEjC22+6UjZEIiARkAhsGggsqGrBB3MrUOcNYWapE689mI7qGqa9m0b917eWs8obUbq0dn2Ty3QSAYnABiAgCe8GgCeTSgQASBAkAhKBHiKgVlZgzKP3IGX5YjxzVxrefy4VP3yv9PtjDY2+MOp9wX7fzh4OBxldItAnCEjC2ycwy0IkAhIBiYBEwETAtnQJxj5+LzIWzIXHo18JVVYXxLyqZjNKvzQbA2HwB3riOrZ+2ULZKInAxouAJLwbb9/ImkkEJAISgX6JgOLR/7KaxedFY51OeLWQBSwB7ZcNpkb5w1GEo1GyASG+f1jYpCYRkAj0FQKS8PYV0rIcgYDUJAISAYmA0uLRQWj2weCA0IIq6nwh3b8f6k1+XbrLJ5WDEdb7YSNlkyQCGzECkvBuxJ0jq7ZpI9BC25e/rWlErUdeqL9p96SsfbIRULw64U1dtAgaFFyIhxEhCW+YJJ/83CS7vI0hvzpvEFE+z0CVCet3DpNN/iQCEoG+QkAS3r5CWpaz2SHA0qqV9V5UtAQ2u7bLBksEukJAbdGPNGQ21IloRViLhbOt+PRNNxpJEio8+5nGH6spRO6Z8waJ2Pez5snmSAQ2egQk4d2Yu0jWbZNGgCVVLNHxBiObdDtk5SUCyUZA8egSXltNtci6GOWY9rENz9yagRdeFF69rjHp5L/01usFUQGBcBT13jBS7FZyyTO8AgSpSQT6GAFJePsYcFnc5oMAT6a8c+mRhHfz6XTZ0oQQUI2P1uw1FSJ+ASqF6XRpmPZZx9OSiJBErboliC8WVaO6D3Zgypv8CEYisIdsqFijwheSi+AkdqXMSiKQEAJ982ZJqCoykkSgfyHgCUSgUZM8fTy5LaxqQUVzgEqWP4nAxomAKeF11etEd2RuHS66Mogxk8Ko00859HrFvSH+iExDoA8+IOMbGkK0+p36uhsXHlSA19/kN0OvN1EWIBGQCMQh0I8Ib1yrpFUisBEg0BwIi1oEwxH01UcqlSStmlPZDFaicKlJBDZCBCyGhNcS8ovaDXDV4tzLgkhJAVqalD6RgPJZWuKgJHmNijr0puanRa9NVbC2TBHFzPtTP9ogHFKTCEgE+gQBSXj7BGZZyOaGAJ8PDEWjcNut4qL5H1bUoQ8ESWjwhcSX4E1k/ryqfnODvd+1t8lYNPW4YRt5AvOjNbOatmZ9rGZkAD6vCr7CywzrLZM/JuUz9ny+trfKMPP1haOwqtSuRp3wLp1rgzzqZKIjTYlA3yAgCW/f4CxL2cwQWNsUIOIJZNhVcayBryQK9cGX2UKSrEFIlPncoJ+ky/0Z+h+W1+HThVWYVd7U75o5q7wRXy+p6ZOFUl+Dp/q8bYq0NtRBbW5CWpoGX4uCIIte28RIvqPZHxGLUd6BCRAhTX4JrTl6ScJrJQlvk0l459vAfq0xpE0iIBHoLQTMfCXhNZGQpkQgiQgEiGiy9CjLZSPJjiImVr5jNIlFdJhVlIiCxaJiYLoTzLTJ2WG8/uJZ5QmCt4v74sOjvsYsRFsCPIaaA6G+LrrXy2sv4eUC7UsWISNTg7dFRaiXCShBi2BE/3BsRb0Pf67t3QWTLxiBnZ7LlmZdwuslc95CvXxuu1QSAYlA7yMgCW/vYyxL2AwR4PlaVRTYLBYUpTmYeyLUB+wzSlgTvYZF1SfWMM/s5Ncff0EC+a8ZNvxzp0I8cGPKBjRx40waoh0BPmfaHOh7YuQLRVFDi4neQsZi/OGJ+PztSxYjvwAIBoC6Rh7J8aHJtfuCYbEI/ekLJ04pyces3/TnJbml6LlxP4ajfKRBQUszMGa83raZv+nhUpcISAT6BgFJePsGZ1nKZoTAguoWrGr0weCcZOqTabgvCC+VoSgKVCpcA8TRBjL65MfnTet9fSeNDBMbrCqzQEjLfrf3yYKiT4A0CuEFkpDw/g1/iGFBVTO+X17Xa+dMLcY9vEZTheFYyoSXRy2wZo3w6jWNz9RySd+8lYGGGhWz/7D0Wll+WpjRUKXFr4LGegVbbhOBw6VRmf13+vX5g6iqaUDYkKInA1x/IIhQKJyMrGQeXSFAYU0tXnz69S9YW1lLrsR+vLtYUV2XUJ9/XjoD9Y20+usg61VlVaJsr49Wvh2Eb4hX/33iNgSVzTDtnIomMcFthk1PepP5wzEvSZAUKCJvi26AJT3Coxe1CM2sN5+djf23zhClREiyJCx9oM0ub8KPK+r7oCS9CP4w0OfRX2FMfPvb3aYswabuRJM/JM7xRmgxo7e89/XmQJjK1HrtnKnq98YasdI5GsERo2BnwlsYFf7lxm0GwtELGo8VxvbPmTrRXVtuPKS9UJYgvJSvzaKiplpBZpaG4WPCmD9HH7sU1G9+TGT2O+EKbLv/Wdjj6Euw5V6n45yr7sXseUtFG/9z13OYUHJKTO16+IV4+tWpJG3n5Qfw0Rc/Ys9jLhVx22tn/N/duP+pt9t797l7zdrqWP3j2/LA0+/0eV16UiDX+7IbH02IkD5IbXnv0+nIzdbnkTsffX2dNv/zgltjxX/705/Y/qBzsdcxl4k+f+uj0ljYs69/Ah4XMQ+yXHbjY1ixWr+Dm5xtfnk5mXj13Wl44qUP2vgnw9H/nrhkoLKZ5dFIEqTFNV5UewLo7Y83Ngdomdgyz7SQpPWlp204bNd01FSo6AvCwpxo6Twr6mr0CZwJcJIx7zQ7D5H8QDiC9+ZU9Mm1Unxcw9Ost9PboqCquu+3/jsFIwkBphSy2hvE9GU1+HJxTa8R0PbV9RtSSe7T9mHJcMdLeBdnbK0TXj7SUKgTn4q1ySil8zz4PeeJEzDVV6u99u4zyXU4oI/VtAwNxYOjWFvWv6bf1977CkxkzjjxIPz44aOY9eUzeOfpm5CXk4Wff58X64z9SqZg2lv34dNX78J5pxwOJorzF6+MhXcmxb3lytNw8rH7xeL93ZZn7rkCU1+6PaZOPmbjqVtH2DST1Pbz0l+h8STRUQTD709anHxIC4//XnUGbDb9+jyNVoclO22FT165M6buueFckYKl+Zff/DguOO0I/PnVs3jwlgtx070vgAk2R/hz3hIsXZn4A+1y2nH7NWeCifLCpas5i6Sp/vXEESzcqZ2JyilY/jpAYCFtwQuSRnNNC5GWDqJIrx4gEApr4BeEomiYO9uC1StUtDSpJOElgHuQz/pE5W2lcEifWF+5Pw2VdZH1yWa90nhDUbz8QBouOyYXdd7eP9rAR0T83tZX2OJlvY/vegGzHon4eYzQqsmiKiSRidJiNAgmn/znqtcjux4nEdJlaFRm74wfa3MTQplZol5/DtwPwZEk4V22BMUD9T6s6EWJKxfKYyfg158TdtN8Dib5bE+28tNzoRGWQY9OHjIzgdy8KOqq1D49cpTsdsXnx1vgtz74Mq6+4B845uASZKSnCLI0btQQMFE96ej9YtHdLieK8rMxeEA+9t51G+Gvqq3PsfDoQHubpIY//jpHhLAkmEnWLfe/hCkHnIOTLrwNv/wxX4SxxiTsjkdeA0uQD/3XNSQx/BLsx2Evv/MF9iQpMktnOfyxF94X72sO43w5zw+/+AFnXXEP7n7sDfbuUA0szsOwwUUYNlhX2ZlpiESiQmLN+XO9rr7tKTQ2eUT6JcvLcNzZN+GPOYtpYfAoTAnpzD8XCn+Of83tT+OvBctFfNb4SMFlJA3lenKetz30CnujqzasLq/C+dc8IHDhdFfe8gSCwRBYus6JTzjvFlHen0Rs2d1ePfXKR/jHEXshP5cGalxgWqobQwYWxFRhXrYInUG4e31+nHDYnrBaLOA+5Xjf/jRLSHZ/mjkPr783TZR53Z3PijSslf44C0efeYOo571PvBXrHw4bVJwv6vDsax+zM2mq+1GWtKJ6NyMG/MJrH8QOB5+HXQ67ENypNXWNnRb61fTf1xHR8wMQoIHBKxO28+CMz4BXoex/xuV3x3tv8nYmJ26bhR569NoEt8mD1IMG8NnSFLsFA9KdWDBHf8SCfvTJ5MZnPsNhfSJ///lULJiv23tQ/fWKGjGkBl++nYIVC6xYXRZdr3x6kohJoceQ8HK6JUtY7x+KPxpjEpZqt4pxk0Ymu0N99BFibS3w0r3pWLRYJ6BJRdVPDwNlGMzNJ52ei8xsIeFVAgHYVi5HRraGioreHbf8nAT9+rPpcABzZ9rFbR+iQknWfKEILIoK80qyjExNfJzHHK+mMbzepU2fDnz7bd+rjiq8dEWZ8D5wrx2E2V5jqZ3pt3jZGrA0+Pk3PsUF1zyIw/ffBWNGDDKDOzVXllWiulaf02vrm8Q5T5fLgYdvvQjDhxS1Iad3Etn946/FuPv6c3HtJScR4Z2Gad/NFHkXEFG77pKT8f7z/8VNl5+KR4nwfvfzbBHG+b7xwdd4/f2vsf3W4zFhzDDh35H2v4+/AxNPVm9SGo7z7qff4alXpuKckw/DfTeeBya5/7lbJ3k+fwBzFi7H/930GEYMKcYeO0/GqrIq/Ovi27HfHlPw2mPXobggFxf/5yGaizVxXvn0/7sLdQ1NuO3fZ+KGy07BvEW6JLyrNjBhZ8HHK49cS3U4H4qqIEQ7byceuTdXEf939rG4/NzjMHRQoXC31/j4CUvh2/szsWVCzouA32YvigVX1tQLEmy322J+3L6KqnpsNWGU6Ntdt58kyjTrwBG/+eEPnHb8gbj7P+eAMZ/55wL2jqn999he4BXzSIJFf+KTkNHfnQU/QIvoQfrmnQfw89THYFFVPPjM/zqtlkYrbl5pxovo2W43RPickAfnjD9aO+GFtz5j736lvMEIvCTVTXdYCRGNCO/6v4B7AkwdbdOubvD1JMkmEzdEq3yn1QIlbMWi+fojds0/8/Dmq5ZebwPzTibXZkHzW4ev6dUrJkvMaioVmAR0ybJeKaZNpiEif36vgsJinZQtXdq7JMksnMk9f5jYm7cYsLSRW5XrtmFkdgoClW689XgaVqwya9F7JpddX6vSVm0qLj01JfkFtbSIPAMxwpuDwKjRws++dAkys6OoN47kCM9e0LgPQ4aEd8SYCKrLLb12DMdPZMNuVUBCbdESPtJQVBylXR8F8xdGhd/6aAccAJSUACUlQEkJUFIClJQAJSVASQlQUgKUlAAlJUBJCVBSApSUACUlQEkJUFIClJQAJSVASQlQUgKUlAAlJUBJCVBSApSUACUlQEkJUFIClJQAXu+6ta0i0sO+2STlZLPF48NtJI001fRfdELJIceLEwAAEABJREFUYdV1DZhJUs2/FiyDalHxxbczweSHw3qidtx2Ai4/5zjsQMT0lGP3BwukWJrKkty3p5biMCLSGWkpSCfJ5M5TJuLL6Trh3Xf3bTGEpMsLFq/CijUV4DqzaZY9afwIvPrIdTj9hANx4F7bm97rmLPmLhFHNfi4hkkA3/1kOg7eZ0cce0gJmOSdc/KhYOEa18vM4KMXb8f5px4h8p/65Y+CLO64zQSEaZzsvuOWqKyuB2/lM0Yr11QKUs55cRiTWM6nqzZ4fQE4iHxmZaRhylZjcee1ZyPF7cTYkYM5KbbdcozwZ2yER5zGu+N1Dc20gCiO8wUmjB6KIw7YVZDk1WurcPJFtwnpLUdqavaAuRTbTeVw2MG77QV5WcjOSsNAkthyXVjib8a56YpTBb4lO22FPXeZjJ9/az32wnGYNHP7uT/ZnQylz8bJyOlvzuOzb2bg6IN3R35uJlj0ftLR++DdT74TK6XOquZ02MRgY/G7qRSlddLk1cgzr00VycsqajD1y59wDG3XCI8+0FgyEAiv/wsxkSourtG3W7JdNjgsFiK8kUSSbXCcvyqaMXNNI/jL/qqW5H+N2VUFf1hRh0XVeru7ire+YUEiYoqiYMnCto/X2jWtY2t98+4uXfvRsmRxdymSE85nhcuWWWOZLe8Twhslgq2ioFBDZk4UK1f0Pr78F8CmL6vDvMpmrOrFBZs3FKZ3F8TdrS7afXn/DQfefiIVP//cvodjkCfNEqR3TnO9PnZXLrEkLd9YRgbhvfTnM6DQMrtq5BSS8I4WwfzhWhZJeBvq9PKFZy9o+sJQL2PMOF5aAAsW6mayi/OFouKmFlPCm56uYcgQvZSlKzTdsh76p58CpaVAaSlQWgqUlgKlpUBpKVBaCpSWAqWlQGkpUFoKlJYCpaVAaSlQWgqUlgKlpUBpKVBaCpSWAqWlQGkpUFoKlJYCpaVAaSlQWgqUlgKlpYDbvW5lC/NzhKcpgVUUhUiXXSj+2t+QTIo4O207kSSP55E6H68/9h+cdsIBuOeJN0XY+mopbpdI6qNdgoqqWmHn+f/WB18Bq/lEbq00v3EAH3U49JRriWj/StLTZnH0IkpCCg5jlULkUCWpKNu7UnxU49HbLgGru0hKyXHXlFdh0rjhbBWKiSJb+PYCNlm5SSrNJqtVFJ8x4zqy4o/DJk8cBZY0l1fWCiLJ3ITjxquu2nDR6Udh9vyl4qPB/U64Akz+49N2ZWeew+TVSYQ1Pt4h++6EC087Emf98xA8dMtFYDd/1MZx0mlR4fX52RpTgUBQ8LCYRzcWXpR4SQIeH42PxbDb7E+2b6hSNzSDjSU9rwQGDyiIVWcQrSjYwWeL2OxI8UqGRfQ33fciPv7qZ4TbXaFywuF74Ydf52DuwhV49X9f4qSj9xWEOj4v3mbsDcUT62cLq7GgqkVMfGYZXLZpT4bJ10jxhGqllbbNopC0N9KmvGSU0VEefpoEeFvx68U1mLGqAUwSO4qXbL+WQASVzQHwXyGLz5vm3aS1G5SZRdFQV9uWgH33nYKKpkDSyomvv2mPGC/uQUM0uFM0LFui9mp5ZrnBsIZVS2w8PIVasZyoDM3lHE5w9Eod+NoulvCmpGkoGhzBKiK8XF5vKi9tT6+uCuH7zxz47TfqaaONyS7TF4wSZpogvAzo159Z2cDqla39yR7JLpfzYwlvo0F4uYwFi7guSWxrUzNnCw906bHTqSGSQ4TJZoO2ciUySMLb1KCCJfhcn95Q4WgUAZ/+fI6ZEBH1WboUhLmwCjNZ5YbombQQAWxu0stjCe8gg/CuWNFapl5y4vquuwK77973qqMa8jlW9n//s+lsCIni/51zLFiNGjZQ+HWmjRgyADx/h0jC2VmcnvhnZ6WL6DeTBJEloqa678bzBZHkIwjP3X8VHr71YiEhHj286/qJzBLU+FaDpSvLY7HNmwhY2hrzjLPkZWeShHoczDqaJkukc6gdTCTbH81kMtxVG7abPBZfvnEvPnj+Vhx+wC648Z4XsHzVWiiKPv543o2rQhtrES1cuEw+89smoJ2Dz+96vLqgqiA3S+8/WqSb0Xi3vTA/y3Su1yA3uRsf32jNaMNs/YLw8gdC3EnxqxIW6TM0Xm/blQf7sWIQTz3+AHHgnN18sJvP/bDdVPzgsJT3Xlp9vvj25/jnUfuYQTGzjshTb6i1JD3iF+UaMuPzD5P0sNkbRLzf+torOW9PAMTN4CMiyMSkzhtKSt7d1ckbDNMCI0qTWhSCRNR6UNdLWMbXpazeK85E1pBUOd6fpel8GX283/rYKwhT7qNIGGhsIjZkjBRxTvBXOz7+PNy7+DZEkYdq/Hu3L7HjlAaULbdiTa23d8s0+m3VUiuyczXk03btSiK8jJ/HHwLvUrA92arJFxQSXndKFEUDI1izQu39dtK4OXvfAtx/RTamlyq9Vl49PeMqTVD8XM6dQ4sJIvM8lFhizzg2eILgc3psT7aqafajMW6x9ueCQFLb2URbttyWFqSyAZsjKt4/nqKBiP71FwrU1WioUVHV6EtqufE4efxh+A3CO2wUPaxUk8VLIMojK+qNMR2fZn3t/G7hvqqt1d8HNmcEqRl6mcuWR2NlcrmbqmIJ3RXnHi+OEfLRPyZpYRIgMZFle3y7eK5eW1WHVWWVQqD0yHPvgre1bdbW3QQmaPHK5w/GZ9Glnbfqt588Drc//Bq4HCbSfDyR53CbcWSxnHZsPcQNvv3pT/w2O3nbYHvtsg0+IeEZn4PlowmvvTcN40YNQV5ORod13mPnrfD1D3+I69gYLyazb3zwtTj7u+WEEULC++gL74s7jesamsE3WnTXhnufeEvckDBiaDFKdtxKlMt3GA8ZWCjsfBSD8fT6AsIdr+UQyc7OTMPydleG8XVwfE7bxPLVd6dhl+22EEmnbDVWmK+//xXN6RFMm/6bKH93o+yJY4aBy+Tvo7gNInIC2jJaOAwsyiMMHAnETixKvyC8iqIQKE4woGazTbubtidMv3hzi7HDxOruzBMPxg2X/Ut8Sfrae3qHxcf751H7iq8/WYTP4MeHsT0n3YHeUA6HFbUVVvz2ixU1RBrMMqwWBekp9qSU2UQr6igU5KU64HJYkEZlasR601NsMMvrLZOKBajfFEUR2312u6XXy+S2qFaViwVvWdkcrWW6qHw3tZ/jbIjKSLXDoipwOlSEgyrMf7vupU9wZatby9yQcjpLi6gFl+ABnPvyftgz43esWmyFxa6is/jJ8k9xWVFXacGAwVEMGBJB+Sq9nakuGxy23imfJ0iW0qWnKxg0VENVuV5mstrUUT4uGiM+jy4pWbHQRu8da69gCyrCQWPV5bDgp1IH+N+I8WGsWam3MYvGGY/hjuq4oX5OehZaGi1cpFA11XqZG5qvmT5D08mLSXi5/7id/rwCFP8yHUevfQJNJGFOcffee8husyBkXBM2dBjgTtWw1ng2udHZSXyvK4oCh1WB36tjWligwktkcOCwCNaW6dhyme3VpubmHdD/XnU6XiLh0O5HXizuYz3wn1dh0IB87DRlYqw5n5f+ir2PvQwHnHgVrr71SWxLhOm2q8+MhTMpOvjkfyNe8flclXCknx6Png926w7A9FeggP/dfs1ZSE1xiXK22vt0cUNAY1OLOM972dnHgm8L2O7Ac8DCLCZ4iqKn4+Tx+XJePVF8PGPSuBHio3m+VYGlvXdeexYUhfJn1S6zrbcYLbjHzfe/JPDa7YiLBH52uxUsFX7olgvx7U+zxPGEXQ+/kMj5om7bsGL1WjDuE/c4VdzWcOlZxwjS7XLace7Jh+G0S+8U9yT/OXdJu9rozoljh4vjHrpL1/l87aGnXAsTSz5D/C/jiji3iz8cvBh8HIPvXb74Pw/juktOgsmX9tltW1TXNmDrfc/ERdc9pGfYga4ohFGc/+c0TraIOx4SF7TeVnW9U25kCfmcC68YzWrx1Rxs55Unm90pviuQ4/DBcTZNNZge1v9cejLO/uchplefmGHaBvvmAxduOC2HHgB9gkh2watIGummF38KTXDffWXBa0+6xVZeS1Df4kt2eWZ+fiLaLOsYmO7E8Cwqk0g2S0HM8N40m2nxwNvgd16ShamfJr+dIZLAc/0tigqvR3+Az7ooiFPODrE3li8XRq9pgaCGQ/CRyH9MZqUwFyxOfjtFxnFaOKrRhK4gLV1D8aAoKmki5yMHcVGSbg3RM+JpUpGWoWEQkWwuYM68KBu9pupJgm5mvmSuHb31rHhDUdhUffx8/40Fk6dEMWGrMNas0EmTWYfeMIPUl00NKlz6sUisWs1Pa/cl/bamER/P18dcl7GNM7wthoTX5dZjNw8YLCwjWvSPV9b04k0f/NFa0KdPf1x+0aAIVtMOgahAkjV+NhRFQXOjgpRUYG5VC1bQTlPRwCjKV+t1SHKRf0t2FosqPmz6+u37xYfj0968V9zLymdctyABE1eKz73OLX0Bppr+/sO4nuZX87wmC5bMsHhz6y1GiSMIfIaU8+GP1J66+3K2CpWXkyny5I+k2IPNx++4FL99/hS4Pn9+9Sz4bCuHnX7CgZjxyRPgu4A/fPE2fP763TjluP05CO3zFZ7tNCZyXDfz2GR8sNvlxAM3X4AfP3pUlPvJK3dixNABIgpjwOkURX+uhSdpRx64G9XncXz77oMiHacZbBzP3HHbCSKf7957iOI8gZcfvoZSAF21gY9qcLv5A35u+xn/OEikYe2C047AzM+eEuVw3uzXXp154kHEOb5AvGT+zSdvEH366at3ivS3Xn0GCXXssaR77jwZs796Dl+8cQ9mffkMTjh8r1gYH3d577n/gtvw/ANXC3/Ggc8qCwdp1158khgHZBW/cpLA87ENbqfwSJLWb562/UqmgO/p4z9nyF+IvvzOl+CBpCj64OJtlpMuvC0GG0tzf5u9CCza5wPlfPccb4M4Ha2daEY+/rA9Y0cfTL/eNoM0mQeNr4h/n+5ESyDcrsgNd1IRsJHEmHP67EMbHr/LSdQT8PYy4Q2GaQKln5UmdDeRbZtK5JAmeK5HbysPlVO5zIEZXzvx8UfJH/5MwqhpJLUmHA3Ce+k1QWyzfUQ/Z7o8+WXGYxYIRDESS4TX5NWfCLMvbmrgj9Y8zSpSSVLG5JPvGK1v6l2izYS6pUkRZ5UHkYSXG7twSe8S3oYmLgUYOFjDWpK2VtXrCxndN3m6LxTBUiLU11/uwNLFKgbQImLI8ChqK1XUN/duG4PhKFoaVBQUa0ilBcyassTaxX+dLWgs+LpMYRBeD1JENJeLnxigcdBQ4c4J6KS5okI4e0Xj8Ro0JLxut0a7ElGUreqtZ1ODheahpiYgnRZnzcEQeOzmD4igihaGfurrXmnk35gpfzheVJADq6X3F2hdNZPncya/7evBH6YV5Wd3lXSDwvhYBZebaCaKooDP/3K6jtLwUQOuc3wYuztrA7ebP+CPj2/aWdLbWTkch81b7SIAABAASURBVKXO+++xnbi3N0TCKfZjxX3KRJzTs7u94gXPgMJc8RFg+zB2cxt4V47tXSnenb/mjmdw8jH7Ccl0V3F7GtZbT3hP67HB8f9xxN7iKo09jr5E/Im7UCgsvio0M66uacCCJatMJyqqasXVGvwnEPc65jJx9uTmK0+LhbNFUXSyzPa+VvxCDBr3RNZWqfD0wksxrEWJmOltbKjXzfpaBXzBfW+215TwWonofjvNglP3yMdH7/XNUAwQy1+xwCaaV1WtCTOZWjgaFdmpRObbX99TTFKkstU6ziJSL2iBkAIXfCLnoT9/IMwFC4XRq1qIiA5LzlmCNYjIIBe2aEny8eV8TdXs0fNPJ1I2xCC8ixaZob1jNjToZW67o07m//hTdyeztBCNUR5HH7/hwtuv2FBTpSA7R8OQYfrYmrtALzuZZcbnFSDC29xgQVa2hpyCCMrLEhuz/BcbQUtmrn98fuvYPR7h1Yw0YbKElS1/nnohlh1wBFL9dezE2soeYCtSJK4FaOFrvl95zA6kHYJVS6zifH/iuXQfM0hYciyV5pL6OgW8A0IQsRfyi8NoqFVR08sLQ1GY1CQCPUCAr3vjYwuV1fqz2IOkGxy1oqoOh+23M84/5fANzqt9Bmp7j03Vzasd3sLgrQTeGmARfPwK54rzjsevnz4Rax6f42Gx/2ev3YUfPnhEXI/CWxUcgU0WuaenGntt7Gmo86gTnrnnCsPVe0aYCETIkPA21Fh6RerK3Izew6IRTY3CwNM3ZxHh7f0Jlb+AttLo+/FbKxpqVHzwxrqSdb1GydXDNAEtmWcTmVYRkRCWJGph2g7mtrFEh6WPe2X8iqH77ALXTz9gABFB3urn7dQkFtkmK2tVtXCXj9wRqrcFW6fMx7IliREWkXA9tTANJm+LQhIsYOhwPZPFS3WCpruSrzc36XmmpALFRQrsTg1Levk6tAbjQ8RtttOfkb/m6HVIps7SXR5D/ri/IpdFhHf4CJ0ALlikm8ksMz6vIBFuPkOblR1FfmEUlWu7Hz9c5wiNAa53mJ6B+PzWsRsS3hak4vDjwhg0JCo4IH89HsjMQqqnViTpTQlvKBpBwNvarqHDdUwXLkuuxJ4lyYyJqkIcaSjBNzhlymAM/OFrFJCElxu6uB/9hUBuj1SbPgJ8xITv3WUu1Net4eOpXDafwU522fQYJjvLvzc/FtXz1kC7WnTodDrs4HM4mRk0Y3YY4+/zDNHkETAI79pVVlTX6xNsMmvEE5MCRWTZYEh4Z3zjQF0vSxyCROa5UBttd835Ux+Cq5b1zdZXiCbjpfOtXDxtDysI0uQuHEnSeKHCWakEa1Wlgt3dM+D860/k3XcHBg6OgiU6vYVvhNpm4X1TqkDN0MmkA9sWLkPZit5ZMIkCDI1hbGlUxfGCoYa0dUkvE+2GRr1wv8WPRn8QBQMjWNHLhLepiTqWih0+Sn8eVyzVxy95Je3H14Ix/eIFhJlp0YAoxozWXYsX62Zv6bqEVwWT7LxCDTWVKoxHttMil9f5wHXmCMHuIhuEN7XYhdsf9BOp1sC3Joi02blweJrYigr9ZIOwJ1vjU1vBgIr0TA1/VTYR+dSPjM1L8mKC+5LrblFU8NjJcjSzE47GeipTR2z5it5dGIoCpSYRkAgg+W9rCWpSEOBtwYDxUUVLo4JffklKtutkovKdZOTbaBBed6qGuXPJoxd//nAEKvEGVksX6USX/9JRb58d5iYxyV8yTye8ZSusMCckDkuGCtFCRaOpnyW8lWtVFLlqRbbu6aUYmaPbF/aS5JMJr61RJwv1QyaIcrdMX06E14rmYFi4e0vj8cp52+2g7XeIr95X9PIHet4WLhHwqUGsbPChaFAYK3vpwyO9JKCpUbelZwBDRkSwZqUqrl3TfZOj+2i7naWCfBvEkGEazr88iN33DkOlB4bPfS7d4IVE5/Xke7n9oQga64jwZmsoLNJQvdYCvrKvs1Qs3V1a6wGPP6Zw3m7GmtbUDL8lBar+6ItsvfROYIuSm8MGBtvXoqqXCC/Xk49eBHyAzaGJHa20woAoN9nHcOq9IdA6FG6bKiS82Q590GYtXYScwpAoc/kKYUhNIiAR6GUE1F7OX2a/nggESEoSpHfwyNH66n/RAmKI65lXR8n0lz6IeCoiuKZawWnnhaBFFXz/hQNN/t4jSHyuzUHMoejS87Bvw9sYNkpv45xePpv4/Yo6LF+oz7K77KVPNitW6WULEJKghY3smJzwVnChvfUM1A5rPxUlLF3OtEBYk6rx9qm1WWdkzUP0uxFH21eihghLcy/2JzfC69PbNLBxPiw11SgcFCHyqY8tDk+24jHU0qzn70qNgr+DLCYJ+tJ2f90u2eUSVxNZulM0DKdxu3oZLSaS/EEpE0iNFk3cvnETI7jg8iBop1+Uy39gY3kv/gnlX9c0oNkbRcAPZGZpKCjSB3RZud6/ohLttDkVzUT6IxiY7gQT9Voiee2iADfdBHz9tfDWPB74rKmwxM0+vPDk3nQU6XeFDsuoQDW9k0SCJGshjxdjH7sXQ8tnwuGKij2urIKIKGVJkqXndb4grBYFNmpsc5OCDFuLKGfiS48jzak/q6tWCS+pSQQkAr2MQNwrp7Ukafv7EQgRc/J7VRQP0sBfSi9P8tYpn5djKYdCr3uP/g5Gbl4UB4+fh3k/RDCvsrnXQGAJb2r1WmS8/jIuxoMYP0mfbBYv7XxSTUZl6jwhmJLsrbaNiiyXLBVG0jReSFgV/bGqKFeQa21AaNAQeHfcGds/dpEoZ0kvfczFkiRTwhvNyETz0BHI8uoNXLhEb6+oQC9oTHi/xD64/PGtkPbxhzD/EEQvFCWyDBOz8rboOLuIfPIfnykcqC/SyspElF7Rmpr0bPnjo1FjNKxeYkVLNxJNPUXiOhNeG0lzPS0KUmjHJT7lwCGRXrs+i8sJ0ULbZ5xt5Y/J8gv1cdPVTQ38LuG/1pjttoPNWk8H1yjeeCPw1ltcBNDSAm87CW/AkPCmFxeIOINTqlDdC2fsOfMQ7YKMe/xe7L30RVzZcjNS165GmHZmBg6LYMVyhaMkTdUR+XfxxwqUY2MDkGrxkE3/Zf7yI4oHR7Cql3cl9NKkLhGQCKgSgo0TAZ54ggEFQSWEAfRS5EvR/USCk1VbJkdML1UaAeb53b3/fAhvzZyAonkzUFmnT3TJKi8+n0BIg9Orv/h3xg+YuIVe1tLl8bGSbx/01ku46cw8HIu3cOqSWzAQa7AsydLWUCQK5rtVFfrEmYkGRDJJP+k0qCQ2G4OFWJ7kSRXGPybbthadkWmZGWgqGogC70oRmuytWpFpnOb3awJP9rJWV4qJfOVSCzs7VasbfOiQHHWaojUgSDh7mRDCgz0/uBv5f/6KfJIqc4xlyT3Hy1nGFEtd2ZG7ajbGjI+yFb/P0k3hSILGz/nYpx/C2nIVmS5/mxwHDo2iukJF+xtA2kTaAAdLzn0+few6XRoKdIErVq/pPFMmvPw+4RipdgtYqhk2PdjTVOXluq2lGT4LSXit/AYC/OEwvKGIkIS6BxSJOAOc1airppeTcCVXCwcCIsM91r6Ni2tuQd7s34jwahhE2PIRFX6ORIQkaHzjjctqEUdhaA2KtDjCm/3X77QwjKKiTMc7CcXJLCQCEoEuEOidN0oXBcqgxBCw1Nfi3PKbsYXvZ+SR5KpitQX88kwsdfexxEtdA/hVa0o9ixsXgv8xCf3t594bGn4iK3ZPCxcl1IShjcJc0YuElydyq88ryrkc92Dr/92KT3EAVqwQXknTeKJn5Pg4A2eaHm1AND0DgVFj2ImdC+ZjzSpGXTg71DiPVUQGm3q4Vc5HGuwtOpb+1DS0EOHNrlstyljSy9d18V+p5AUEF2atqsSAIVG2ojNpa703iF9XN+CPcr2+InIPtBBJIvkatHGYj51evw/DvpyKvAEhkcNSXagt7MnUeAwd89310OipGbb3zjjku/+I7OfOVYSZLI3Py1qj+q5HlvGRk5n3EONqsoWLdXxN/6SYdXUY99g9SF8wT2TndAGFxZqwl3chNSdhOyEioiHFZhFnVvkssO5DusdDGv0qjUO5wSD8qhO82CZfNPkj8NFi3mGxIGOQTniL7FXgc8Qe/rqMIyVRhQNBkVtqRB97GauWg15JgvBWrLKKuogIG6iFOFPKQyVp/bIlKhpJwpubqu+ctQwfhbSVy1A4IIqqMivFkj+JwKaAwKZdR56bN+0W9MPaB+jln/9DKS5vuhmHlz2P7KIQyldYifDqk2AymmxOl6qiiq+HOc+MlrVsYE98DSbBXA/hkWTNNm8uBr77eizXIlc9BtJ24qoVySUOsQLIQvwIFoPwTsGv5APkqbVYvTK5ZTJZVRUFlSSF40JS/bWIZGUhOGIUOzE5ZT7KuvnrSiz1nElkcG6FPjmKhAlovIgxv3D3OR1EeAfBXV+N/CwPVq+wJP1GivgqheuakIoW4ZX50nM4/4N/CHtnkuUq2vZmgu4LmSNRRE9YCxOZ8DarGA2dyWesWib+sAdnsHCxTtLYnkzFxygsQV06yPkW+FezgYVJPl/vJUw048x1jl3HlKW+VS0BDKbnhAudtyD5bQxV1WAsbfUPWqEvfPkPQuTl6uWUreFSO1YszKUhLwLddisRXq2t5J4IrghsMnYfQiGEFRuI3wrvCC0h2OIkSSjy8tiKfEsN6knC6yPJr/BIohbxt/YhZ5vHEl4tiiG0mGhpUlBRk5z3LOPC6Cm0HChfo0+1mTYPgum0AB43HtmL5tEiLQL+Iy1cD6kkAhKB3kVAfwp7twyZew8RYMmAvU7/on9S3U/IL9ZfwMtWrB856Kj4aFTPS6FAvhCdDLgDjWxgN3yHcGVTUgm2yJi0EM0COb/9hLzPPiSX/hs9/xNcH7kJNSv8ukcv6GFqr0l4zewLomuxuhtpqxk3UZPLOWDb4dj+8QtFEoeXjzRkQXO7ERo0GOOVBahYYwETcBGhA80TDBNpACqa/WC8OojSoZcgvCThrUYeokoEnuKBIt7WBcvAV9utL7kUmXSjqWX6YsmMVlQ+W1g7I5/8RwpYMsh4cb1F5B5oYRpHfKRhgnWBSJUz/y+BaR49K4t76Yw0H6Ngwluj5KFu7BZI/9+bGDe8EcuNm0ZERZKgcZ9HfPrzmWnRFz1rGn1YVu9F1sCgKKE37uINB/W8NWNnweEEVFUFY1perohyO9JMUsdhKkXjLfxqWtCwWygiuMI0z2GQO6TYWwlvWCNKCEwZlC6iBTOzkKdVCfuaCv3dJxxxWi3tEHjWU/obNiS8nF2dPR/5RHh5LA4wjsQsXsYhrcpPpLsmvj2tQV3a+KgHc3m+CYeJNEe2hzwIO93Qtt8BGSuXYki63k4Ok0oiIBHoXQQk4e1dfNcrd57wrD59G3Bg02IMTa8Q+STzoy59iqFsFU1Ic1dhMLJn/wDN6SRPILViNbwh/SMg4ZEkLUCTh5O2vOOMgFUXAAAQAElEQVSzG3zjZThz1S2wr14tSEt8WLLsTJAsfiKQFr19Zr5Nq3ScTfeGmtQ8IUm2rtKlfzYioNH0TKxp8qFq4DAM8i1CS6OC2obOseXtXY1mSuJ04K3/ROvEE6zD24QGZCJqiZKE1yC8GYtRTjsEvSEtg/HPtlbf814zcJLwSVm7EhloxJKlmnC31xp8IXAIEw0mku3Du3OHaMUQDCgYRwsIjutoqMOxWxWjcGAEy5YS62LPJCsu0xoOoFlNx4o9DxC571C8DCuXdH1WWUTsoRY1JLwZVkPCSwOLW8VY5RZFsGRJDzNMIHrYeN5zFs3Ca/gHQgtnYEFNE3IKouiK8PK4MyW8XAz/ufA2RxqI4LI/jKMNChHrIOywWHgEAMR3RbDNok9HwawcZEVqhV9nt0NMX16HhVX6YkBE7IEWiSO8PxYfCKvXg+yFc2gnLSxyWdYO2zWNfnxP5X0wtwLlTX4RJxHNXMgpUGCe4LKHWhB2uZG6+y4ii/EtPwtTav0SAdmojQwB/Q2zkVVqc69OKBIVpMnEYWL998KazI9xeJLSKFdFUdDYoCATDeB/gfFbsAFLfTO86ylBERl0ogWobc7aKkSdLvxQeHibWBmNa1Fdp086bQKS4ODJhyW8DY58THMehMC4CfDZ0xEs63rS/G1NI96fUwHuk0SqYcazVZSJi/vVlma0uFPBk2bTkBEY0KBvF89fFO00OyamFuoXJoNNBvHpNHJcAB8RcHob0QAivCzhHTAY/G+McxnKV1rgJdLE7t5QjkpdwvvRETdi7olniSJ2z/0dyzo4l82LD08wLL7o54i8wGOzJ4pxDtNQGR1diMZho+ApHiSSDyuoRTKfE5GpofEZXmvIj6DqQPn2uwnfUfblaKy1gMOExwZqxOMpBw0Rvy7Z3ObHp+H+7hsw0SXeJD7wKh4SxvJlyX91RwxiOnjWNJyA17Hlj6+J87W5BRFUrlWoXh3//D5g9VJrLDDVZhXPSx1JYYUnEVxh+igiWTQqh480mPfwRjT9WXDa9TxCRHgzgjUUE1hbwW8pYY1pjBHfysELw5hnDyzxRxr+GLiXSFkw61fkDtEl3IuXAyzVFQGk1VA7+H3JSvQD+SXy43pyPEUhwuvR8bMHfAgR4XXuphPekWsl4WWMpJII9AUCyX9r9kWt+3kZIXpTao0+NCIDflsqRq6eIVq8MolnXPnlzZmSgFcQ3jToxG/peP0vdA39q7RXjjQESZzjrKlGy8jxOMHxLta6jL9DS5UZgDL01vVZEWKPVpqZ/aobZw/6EJU33wFXsAlDsQKLl+vkAh38q/eHSOqsoSFB4mmSN8ZzRKY+aQdS00TOjcNGwu2tRy5qsHiZBgjfdTV/KAqnVYXNolC5IST6jyXCLl8jGsCENwotvwD8b6x/NhtY0AXJFhHWU+PFhLtGJ7wrJ+6MP0+9QOS0U8osrOjgRopGwpTrmmKzgOkMpxcJeqCFolGEQwpGRBZh5R77Y+75V4rUEwuq9OMg1TqJEp5J0kK0WLOEQwhb7GgePETkmmdrQEuT0qOjJyJhJ5r5XEaNYwWjv38VxRedjSP22xb/2nYwlaOAb21Z0QuENxrUx1p6zUpRu0k/vivM7PwIEd7Op4pbL0vF1adkirispTgs1AcaFtd42QkQwRWW2JGGMFjCa9X5LaI8GCiCm8Y8GQhlZyPdpz87v/6uYVG1LuXmMHo1orolIMbN+i7IIyYBpwzrsgchMHwkCv+cSYuJqPiIbO0qCypbghQK8Meja0mqy2OUuh89+a6B+5LHt0Iv2ZZmBXxLg0LvoIixi9Y4ejzyKhaJcqQmEZAI9D4Cnb/Fer9sWUInCPDEqjZ7BOGtyB+DzGWLwRf5l61UwdKxTpL1yNuYYwASPASNrcGG8ZMw7/jT8Vf6FIxv/BUsaUSS//nDETirKlAWKcbqlQrmTD4WLfseKEphwrtkOU8RwplcbdEiuGjb3UuE150ChAuMr8GxFgsXd06OAuEoiCuj3quTge4qpTU2iigu+DAkXZ+0A6npDDMaSBLJgfyh1eo1nbfTQ5J1K23vOqwWNPlIjMmJElA8KZuEV6HtYqfdghUHHolxi6eJ1PN74UMnzpjHZGpNOcpRDJtNQ4hmdn/RAGytzEL5KpWjtFEN1CbGNM2uMx5O3yZCAg5Ok9e4Ck740Th0BJTCfJHq0O0r0Ew7FuddsG6ffrKwCr+X6f0jIvdQ4zJtYT9JeJ0IpmXAn5OHCbU/gv+VVSbeTxy/M8Xkj0dG1JDwcjxrxVq4avQbDtIXzEHxkAiqK5SkX00WCbdtg7uxBjv/90pk5kXgJ+7a1MS1WVeFIwrqa1QQlxOBFpJoOmj8ljf5dMm0SXg5lI81BAMIwQbVGBpRTYGiKBwqVCQ3F6meOvAZYpYsL62lwkUIUOsJ4KeVdWDG66OFoeHdIyMSaH2WI6kp8O2wIwpmzSAyG8HQYRoqV1sFqeZMmVzT2goZLq6vgiC9D9g/EcXPI8dTQX3lAVJSNfBRtYiLXkAUECoohKO2OvaxJXlt1j/ZeIlAbyOg9nYBMv+eI8ATq+rxoQWpqC8ahYyVy5BXHEZlmRXrK9VoXwveEmQ/hV7G4apGtmLRMScjNGAAVgzaFlv5ZpDEg6deEZQ0TT/SUI16d7HI0/rwdfj54RcQzMyBILxLhXfStZSHH0DuHzNQYymEy60hXKQTXi6zs5sEuBI8wbGkps2ZRA7oQPEEp4Z0yVCK4sXpJ+iE15eeJWL7ho8S5lgsQFW1sHaoeUMR2IgNuIg0NBqSvg4jtvPk8l3+RjQgU5zh5Y+HvKPHIq1uLfjfksUKG0lXPF5T6yqwBgNhsWki/4YxEzDWNxsNtSoq69oSqeX1XjAhslkVsZjgBZ5I1AMtRKK+4sbFIgUvJGzGX+jK16pw0D+8eO8tKxbrwSIOYxMggsR1FR7rofF2tiC8FgfsKknfh4xAUf0iOF0aapsi65HjukmiDB8prZN+z13wF/IH6Xh2JD1fN8fEfaI07szYv2Eb1A0Zjcxli4jwGuWtooqZEeJMEnoLl3kTATuGZLrBEtGK5gAQJ1GFxwOFCHBAccBiHH0OE6MkODmZUFpOLly1VRg1MYS5v9mxdAnQQLsCHMgLT86XaxLRouA+Yf+eqEjcGV4txYXgTjvDWV0J56oV4mqy1UtsWLxGf46b/GHwmeTBGS5Y6PHpSXlcR66XSo1jCa87RYOFpNxht5u9ES0sgquuGkee1irBFgFSkwhIBHoFAUl4ewXWDcuUX6oWrwdMeD0FQ5FavkpMcpWrLfBHkjWx6q9jFqxEa3XCG0hLBxOR2klbIZVKL5s6nyYUPd6Gtag1dYAmVZ5cau0FwjMzL4w1TX54SNoxxLIGy5cJ7w60DfPSjO3aNdYhcLmAKLVVI1FvMcklV3RyVERId0mUxAjEziN2UQ3eZldIesVRUrQWTBxUy1b409KEqRXobR7grEK77/ZEOGssVdeoTDuRXafNQoSQj1O0SqQ4TmcqQmJTd6ARjcgQpD7dSYxi4CARfUruYqzohW1wzjxEDCS9rhxltGSx2ciHiEHd6AkYVDmLHMCfC3TCxI4wMbpmIhFcNwsPPvLk9GT06MdpBjXp28FNg4fDVawvYKIVlTjtfL/I69U3osKMUAd+StJdXuTxdWbCcz20EGVkj/gRsjqR6XagiSTLBTUL4Pcp+GUGFYIN/8eLK84l2o7wthQNRDg9AzkL5yJ/oI7nkiUENEdOkooYzwhnx2NozQ4lyKTdpcwC/Z2zfKVucni8Cof1epSt1k0Oc1hV2C0KyhupL4I6eWR/1NAiMBRGGFaoNDzZj5FTldapSCMJL/tvPaIGS+bY8PC1Wahi4kye5hEjXsDQcMf6CACiwQDlpP+0NBc0IrzsyvnjVwweqqGJdgg+fMUl7onmo0x8vIjDVSgkBY6yNSHFdRQRqaKeFgU31l+GtJVLETEIrzpoINLWrMQuhzWLaFKTCEgEehcBtXezl7mvDwI8sVpIEuBBCrzF+lnBae+kIlrbAn8o8RduV2UT7xDBJHxApE6XMARSM2Ahj8DuW+lh0/9EZTNNWMKVHC26tkJkVG0rBs3f8If1yduTW4DBtjKUrVJEeLI1jSbZliHD8Gz6xXCThJfzDxUVY5i7HCv1I4vs1UbxwgMakGq3wheOdHvEI0yESKVyzEwsxtVy/tR0qBYFNoPg5bub4Q9SxmbEONNH/UvzI0l4AZdNBfcTS5nioqxrnTcPKCmB44fvYQ/7UA9dopzqsMI6ZJCIPz5zJarLLWgyJGXCM0kak9i0Rl3Ca7NrsEBBzahxIvet8TsR3gg4Dns0+kLUJg0sfTZfPtxGDuuJClEi/ityddY8RFPTkDl8sJ68qgojh1oxfHwIH03VMeY/5MCSel4QsNIj9lznNtgiASK8DmS7rGgYOgquxmrk2Rswc4bZmp7nG5/CrJ8Waksu+cv+4KQtwdevFQ3TCWSyr1+LP9LQjDQ0jxkNm6cZA5U1ooorVgljHc2U8P78vbVNWJrDhrVEVMMBvb4ikPoHRICDWtwtDSThtSh6X3Eca75+PGXCwEp2YtFsG+Yu0d8TtZ4Qsp02jMpxg1Pw/cQiUg80k9gvwmhEsjNgo3aG8/JRMOtXHH+mF3sfGMInr6Vg8Vo/ZpTa8M+9slH6pQUWeobFOyHBsvg5ZsUSXpJfYEv/DFj8fkRdbpGDbfQoYbpWrRRmjzQZWSIgEegxAmqPU8gEvY5AiCYAG70hmfAGBw6MlTcSS1BWya/5mNd6W1iSxC9jhciJVmdIeFNSYLOoyNhuAGqRg9w5v6NmPe6f7LJSFTrhrVILwFt8xO9EdG9eAYqi5SgzLmgXnsnUQiFoFisWhEeT9FPPOJhPJNtajjWrFN2jnc4kidFOd1hI0krbqj590m0XLeZkQqSGWqVHtvIyEeajhYRVVZHqtCKQkYnC1EbMnmETYe01IeGlQm1WlSRkKpgj8xVe7eO1cTOm336L1C8+Fd4snVPIlum2IW30CLIBI20rwMcL+Hyw8EiixiQtraUK5SiG1abBTUS7ZtR4UcJWmIVZM62YV6lLsVhixuPORdJrlRqnEW2JScKQ+L8QSZUzAzVotGZBoTwK0hziQn+lulrgtt3uQfw+wwI+d7qguoViACqVFwoTuIkX0yZmgBY99mgAYasDNlVBy/CRInzfYXMw+zeLsG+oxmd4OY+ov+1YY8KLyZORvXgu6PWA3riaLBq3WPOoqdS+MVwVDG+ZL8xVazrGzlizYt5sVcQztQwa79y3DU1e0wsVS1ZBCwQQgk0QSA7QqActamta94AC9sbEweU459KgsH/8kYo5Fc0kYY2A/7iFnd5TPI74eREReqBF/Hqeh+N92J2ayC+ww07In/0rPNTHJ58TQHODivefSsc3b2aKc+jffWUFL1gDPRg/ES0K/qfSuGMJrxN+doJvqGFLxsSxbCBl9Qph17wWZQAAEABJREFUSq1jBJavWotf/tDHYISeey//WceOo66Xbw3Nf198O3O90spEmxYCrW+ZTave/bq2TJysfi9akIrw0GGxtg7HMlRUx5xdWboN48mCI5HAAa6gQXjTMkHUDmkuG2al7YBhZTPR5gJ5TrCBKspbmpRHrZZDxFNDgF5gCrm9BUXIDa7F2pWWmDSQvJP202hWjlpt4D+2ZnVEMHNNAxpzC3WSvbrjx4ClOYxTqt0CXiDU+YJd1idCkRUi1mYk/tiI7Z6MLJosgUyXBaGUNOQ4m7BmmRV+mlw53FRMsPlCfXbbDQLgoIm9OdCW/HB4G0W7AezOffwhNuC1pcJK6QakOZFjTKqDLavQWKuiJRgRcZKpRVfqor8yDBCEN50Ib9OAweBjI3vn/YGPXkzF1GlB8AdATN65aTaLClVVRDUYW2HpgRaMaEgP1qHBlkdEVhUpAzl5sNVUCfse++jt/N/UMFY3+MFEsnDeLEz+52EAScPxzjsiXk+0EEmV7VE/wnYnuP7hMToh3KVwPhbR1ru/nVS2J3mbcTWyCNWuz8NOF6xTthUSwrTli1E4KIzly/V2U5Kk/CLxhNeaBt9ovX1pyxYhPSuKMn39tk5ZpoR3/lxLmzC3zQIr9XFdoyfmX7VsNfhZDKJVwkuPDSwUz4yUNqBIWHOUKlx8VRBbbB3BT186sZAWLr//4MDhO2Tiu2k2WsTQ+6MHH5GJTEmLkoSZDHAdHK6ouBHFPmI43NVV8AYimLBlBNtsH8Hbz7rxy/d6mz7/yIrmRhWBHpRHrzYuBtxLfIbXTiWyRzQ1hQ1ghL4YTSvTnx/dc9PV9zzmUkwoOUWoKQecg0uufwRVNQ0b3KDpv8zGky/pf6hoBhHfKQecjYZGfVdygzOnDJatXItLb3iEbPLX3xHgZ7G/t3GTax9Lr2wBLzxIgTp8EGov/7doA0t4q2t4OhTODdJo7hbp66oUZKIB/C/oToHVakEWbdcuG7ANRntmw9/YDG8SJnLOX6jaOmFURXJIwgsEDdLnzSsU/nm+NVi9VicrwiNZGhFRzWaD16MgbA2DyWlLbj5yA+WoLOv4MWAyzsXbVBVOi6XbmxpYmqWGWsmptaoSmstFEzNgtajIJwIaIoyzbE2cLRYubtuX/Od2l9V6BRG0GATAaVVR341kGZ5WQsEZR1UrrArbALsK+HILMCi6EjUVFqyo8oO4oh5IOpf3w8p6sm3Ar1Y/q1yHbLCEN4sky5xb8/gtcNjw3zFgUBRP/TcTv5c1gQm9i8bYV59Z8fzjNo7Wpj7CIwGNF4WZoRo02nMEXpwkRP1pr6thK3beUUFKuob3p0Zw45nZuPXsPKTWViNrxo8AScPBx0BEzMS1IDEYBxHeiN0hylSHDkHE4cRWzgUI+BTM+K1tfyaec2tMHkPsirZbmIRpG9y+zdYchMwFc1A0OIKVST6TrZnMlUp5IvUS2DIzEBwwEJlEePlqsvIyY1BRePwvbJzhbaRhVFXRNk4aLX4amn2x6GlLF0ENBBCAA/RICf8oSULN8So8jDO8FuNdse9BYcybacfRk4rw9uNpqKF3FrfdoqjgPhFputRaAxlf1WhnCDbQOkIE2lLdsDc30iJUl8qedl4QtBmD7BwNhx8XpoWyAk+dhcZqFJyHSNSNxu8Y8NOvUlriZw7NL1Jo7lRhorAQEerX1DUrdXc/0K8493h88cY9eOru/8PKNRW4/u5nk9qqLcYNx1tP3ojUVFdS85WZbR4IqJtHM/umlUwkfl5Vj8rmwAYVGKCM7AEPWpBKUlAFyk03oim3GEx4y1ZvUNaxxFEWq5CrrkZFGvTtZn9WDnjbLjfFgZoJkykUsNLqOlnHGniiUBt1cl0TyiLCq8Gkh+HCIlHeFvgLi5bok47wSJLGk7lGMywLQ21OPX8PSXidoRZaVng6PMfLEleNJiwmq25ijt3d1EB8CErcBzG2VSsQyc4RxyEsKsAS12BaOrLDuhRy7ry2jeMtc54kHRzZCHLZLDSpRxCTLB1zDHDffUaoYXCjDKswrFaoNMkKO2n+4gEYENYn1Vl/ActrWwkyt6maxisNOYq5fr9IICgShmCD1Q7kEOFVyKdh3EQ4583BVTf7sXKRFW++YMfT97pw1oE5uO9WO+66yQELbWdHeI+e4vfkF6EVW2qkSUizY3Dl5cFZVwuWtFpoG3mbXQP48HUHZv9sF+dAKxeHWotoaGi1J2gL0nY2k5aowyEkvFkuOxqGjcIQ3wKRw88/C2ODNJMyayHzydCz04hYY/x4sR2eSYR3wJAwmFy273o99vrppoS32Z6N8rQRcNpUhMaMQyZJlLPEXbzcq23z5vPlEapqRpZe80XzaaDHRclwWhH/hx6KvvmcCK8fQc0G1aJH5HcRL7R1F+nUj6TDaixe9j6ACmAPUnyelwxxdzj3e6iHAzdED6kS1vPj8ep0c26kjA/J/EbYnvtF8PIHXnz+sweHHROCjzi716OKZzlAeVCKbn88RhkVK41FDy20HQbhjdIi2EzsGzIUqWtWmc5N3szOSsOAwlxMnjgKxxyyB+YubD2uUV5RgwuvfRAs/T3j8rvxeemvMP+9/M4X2NOQEO96+IV47IX3CWtGz4yhm5U19bj5vhfpraGAjyEcd/ZNiFfX3akTbJ8/iDseeQ2c16H/ugavvvsl2I9z4Y9XX3z781h5tz/8CntLtRkg0PbttBk0uKMmbojfgqqW2MdMlc1+VDYFsJbMDckzRNtmKb56eIiKpacB2W47vEOGYASWYtUSW1L+IITBd1FboyADjSSpcokq26wKspw2+HbS/+Ka7bvZqGkJiLAN1Vgak7Z8icimLFQgbkuI0IRlVVUEt9lW+OuEd90XnQjcEI0IRES1iRxsTHhp7m4iySd7DEBZhyQ7KOpGESlSis0Kniy7+ugrTMQt6m0lVfblyxBJzwBlAytNepQNPENHIK9ax+C3v/SJl/1Z+UJRME8dkW3OwoCNPLivYmdvf/gBWLSIo7cqTyuBZc8oEXtF0evN7lBBETLCuhS2fKkNi2o8ok6N/jDWNPoQhYYgjTmOuz7K3CIOwwqHU0NhmlNkUzt6PNTmJvzr9WOx+94RvPlwOr6d6sKa5RYsW6yKOLxNnCB/EPFZC1BdNaqzzThPq+cEWIpoTBFJagzquO5YovfF4OERQViWz4pwcl3V1+tmD/QgVdQJPyJOO5hs5dJz2Th0JDJWL0JecQQzfulBZp1E5aMXHKQyi2SLoTSnQ9h84yYge/E8FJCElz2W6EOJrRuszDO8LZYM2BwaXDYVyoTxyOSbGvIiqFyrrlMGH/Nhgen4STq207+xtonDizeFdlfYM5KSCmd1JULpmZju2JN2PdgX1JMK4akh9i8lBVEi+DZavLDf0OEannnDh0OP1vuV/Z54wI5nbk9HiJ45dieqgrRQUuOONLhcRrkmCW0hUayR2agxUaSmAenpehxvs97+RJ8VJrxGVgj4AVuENPZITWVdqNDQ4Ugr0xejwqMn2vTp+m4F71j0pUqgjkwqZ81ZjJ2mTBSxQ+EImOSmkST9pYf+jSMP2A2X3fgoyogEc4SCvGxcd8nJeP/5/+Kmy0/Foy+8j+9+ns1BbZSfiOychctpzGjYasJIXHn+CUKddsIBYP8hAwtE/DuJ7P7x12Lcff25uPaSk4jwTsO07/Rzup9+PQN3Pfo6jj5od7z55A3YbYctRRqp9X8E1P7fxN5t4dzKZsxc3SgK+WttM3j1zxOy8FhPLUQvZU7KEt6sLL2LPIOGYox1ifgTnuvzoQbnF69YqsKkoeC7T3AyXhLEjMOtRLDyUu0omOTEXExA7l+/o8qjS/A4fEOUn4hKwQ/foLFkb3FpfkqKJkikhSQ9qUX5aMkvxigsxvJWocCGFNcmLUt1ooo+GTuNSc6TmyficJnLOiiTCY5JVN12PscL8EdXIlEHWpj6LZ7wqk2NiGZkCEmFjbbxOYlv+Ejw3ZsDM+qxYKE+kbI/Kz9NCioUtsaUzaoKe4DChIUneFbCYWjtCa/VBj2VHq7S1nSKvwkkmETtKgf4KieW8vJfsAoxG6dqxPLXk/RIjxjXWYVgE+n4GIaNGGH51jvBN3kbuGb/getu84vjJHxTRE4uFShiAvXVFvBYNJwJGSwFpxkPTHijNhuRJR0zd3Gh2JZuISLPGR19BHDjfV48+KwPQ4ZpaFgTYm9dGccwdEf3OpOX5gbABR8UJry0oMin56SRFjBuWsSN3TKEX37W68G5Maxs9lSZyLQnvIrTJbKKjB6NjJXLMHiYTjB/+MMv/DdIe+wx4IADwGdrAaDZkkljRYOV3gXuyVsKTIellKGxrrV9Znl1tMCLhBXwtdYjR0fx0lP6GDDDeRwozIjJI0g7DWTA1tSAFqSAn3t2Mzmy0aKX7aYK5+SgaOaP0Bbo0vOdSyK44oYAvp/jwQgqh+OFAop4f7A9URWiRQu/Czg+j1d+B7FdrL7Z4l8Xz7QMvVe8zQoPO/B7gaN2p3icWlQVDcbaim9QWbH3wWjYfY/WpCNGIH19jzRQn6GkBCgpAUpKgJISoKQEKCkBSkqAkhKgpAQoKQFKSoCSEqCkBCgpAUpKgJISoKQEKCkBSkqAkhKgpAQoKQFKSoCSEqCkBCgpAUpKgJISoKQE4uXd2oI2ttff/xpX3fokjjz9P1i8fA3OP+VwEf7b7IVYuaYSRx64m3APG1yIiWOG4duf/hTufXffFkMG5GPB4lVYsaYC2ZlpwhSBnWj5uZnYZtJobDlhBN6e+i1KdtoKp59wEHxEit+eWorD9t8FGWkpSCeSvTMR7y+n64T3HQo7ZN+dcB7Vjeuw8xRduNNJMdK7HyEQPy/2o2b1XVN4sq4kCShLer0kReRXY2h9Zzuj2kpjo7B5eFIwesgzeCiKwmtQtTgCD5UjImyAxvXm5AP/+AIs4YxmZrATdmMWys5VMMs5BYNW/Q5viMpsd6ZQRO6hFiTCayVyFho4SJAfvqUhSnnwZMfHKPxEQIvVClRXkWeyf6EgwoaE1+7iXgKqJ22LUFY2jsObWN7Bn8DlS/PvvCwLl5zpJFxUqIqG39fofdNR9ZgURb2tUiiOE0ojwksWu9GPvpGjyQXsXjgPq5dZEH9chK+cs1oUEW5qFiLALOFlMm36oT3h9fliQWzhoxvEVdgqlDUjHTZvC4aOiuDlJx04Z98CvPZuCE2BMFqa9EncXGSJBD3UooGASBG126Aoev2dVgWNeUXw7FICW0U5Bg7WcO5lQRQP0nD0ia3Es4EILxMDkUGCmkk4bNEg1BQb9Yteps24ixdrVoucUlMVHPePKMaOAwqLo6hYEhL+QmtoEEaiGpfp8+qdqLnssFtVkoBa0GL8MZH9hs7HmhUWrCwPCxL2/fJatOmzBAviMcT9rUTajiPF5RQ5WMaPR0pFGYbl6ZLI3+dEhP8GaXyemaSFkZCOT6OaCTtJ6q3cl+MIPKeHKI4AABAASURBVMp8fHQu6cBCev8Ii6HV0WI4GlVgsWqxfm1uQpt/NqMtoaIBMf9IVKU0MSfsRAxbXUCU3gXZC+fA/l1pzDs3TwMvlrKNBVM4yIRXf5ZjkbqxcJ+YBDxECzS3Wx87cLtFSqvfh+a4v8TGnlnZehlN9SotXoFAmN9aHNK14r7kp4u/G+CYtpAXjUNGQCkoZqdQ9tEjYfV6hL3H2qefAqWlQGkpUFoKlJYCpaVAaSlQWgqUlgKlpUBpKVBaCpSWAqWlQGkpUFoKlJYCpaVAaSlQWgqUlgKlpUBpKVBaCpSWAqWlQGkpUFoKlJYCpaUwseqovkX5ORg3agjWrK3BVhNHYbBx4wYfZ+D4Dzz9Dm598BWhbLRr5vMH2FscPzj0lGvxxbe/oq6hGRwWpcWJCOxG4+MPq8uqcNu/z4RKL76KKn03691PvhPlcHnziUhbjbmNifi2k8Z0k6sM7o8IqD1ulEzQBoFR2SnIdFkxr6oZJOATUhF/gi/ENhkZDn6ZWn36CzBod8NmEKDw0GEiRmr5KlTXbfgkx3VNXbUcWZX69njY+JDCLC/dacWSom2R610Ne2Ulqj36i0lUYj01fzgCC00oLE3xegEXzTE8KditCtKcNvhy8lBsIcJbbUxC61lOh8mo7AisIoiPNDhJAsmlNI3fAlvYF2Bluz8+weQ8QGlWLbah1qhPXoqDJDxap0SGSVHU2xanFqcbFnoJF6e7RNnhUaOFuX3GAixfaMNnX4eFm7XKmijq1lrYGlMKkWx2cH+x2aFihhQXELVYYTHGDXvbcrLhaKjDQSe2IK9AQ22lip++sePR2104dddCfPxqCri9HHd9lBbUdwA0krYqxhslxWEFE9lwdo7IUm1pxkVXBvHVrx7sc2Brm+uqLOAxICIlqPGCspHIhxN+qGl2ML4iqXGFn2VtuXDGawXFGjh+zK+hIWZNxBKkRWy4hgYtRdZcDtiMhgZGjCQfYJfshcIs/SGCeav9+O5b4Lel+nMsAhLUzIWoJRpqk8JiHGlI2XkH4Z85/3fkFUWxZqUBuPBdT42lmjSGorSw5Rzq1BwivFF691DeRLDZb0RAl7QuW6WxM6Z4R4v5rJUeLZOI1tXykxWLApPwRopaiV44osJi0WIfibVf6AXOPR9hkmpbly1pzciwMfFla8Cv0NhhW+IqRCRKNYi9Dy6kpxtpjSMNtoAf/mjU8NQNPtbAtqZGwoMsQcqDjG5/PK4VRQFfSWZGjtA2i82m58N+KSRBZ3O91K670sp5975XXVS2ZKctccqx++PxOy7FWx9+I44ScPScrAy4adH2woNX45VHro2p0084ELX1TeAzvM/dfxUevvViXH7OcRg9fCAn61aV/jgLT778ER64+QIhzeUE2Vl6p958xamxcrjM+248n4MxafwIVFTVCbvUNi8EWp+8zavdSWut02ZBUaoTCv230Mstkyb6QGQ9CGkdPYClpQg3NkFlNkg19NtTYVH1LrLR1hd5iXO88xcobN0gxdPWqNefw/gKmpkpp7Dx18AcFr28vBQbqsZPphBgxaPz8PucsLBviBYk0mBvrIeSkgKfV4E7RaMJS4OTZssUuxW+nHwUaBWYN9uyIcV0nDYcRlixijC7M4pUItjsaBo6EqMiRHhXAmubA+wlVJgIAP1AgmE0Nep4W4i4sl+4kwkvSP6Kv23fh10p4HOZgzN1wmsbq0sWxirz0UL5Pni3Xicu9KuP7Th5r1zM/Lm1/Yqil82TJ8cRqv34au+mMWmh8SjikpaSk4kgSZp3P5iI2J8e7LBLBJ+94cZ7z6VQKPDqg2ngugvHemhR40iDRkRbNcpNo+eAyRv/0Q3O0r5sKRtCDR3RSijqqlQxBkRAghqTFsbOBR9Ut42eER0jDB0qchj34uOYWdaIIPW58CCtoDAKJ/xkM34NDYYlMYPLDDcZ48PNJFtPF91iorCMh75wvOsOFf840obrT8vBI/fYRFhPNLOf2x9psBqEDFOmiOzS/vwNg4dHUFejgD8cE57rq/kJF3rnRA28bsq+lwivBiuNd2aE/sJiDG7QCe955wGNLa39F6RnOhxSYLMDOSSB5Spwndg0VZrKbxsgMqBVwhvVFFgsQNgQDtis5EDrv4xzzkDtuC2Q9eN3rZ6GLS9fzy/EEt525NSI0qkRovoqRjs5Ej0qbIAX4WzJn/UrfMEIW9soJtlCwktLXm5zm8BOHLzg41bRWg9ueEWsCN/wYSxihcdk/R0r7P1I23bLMeIs7m0PvQK+VmyriSNF6+569A14fX6h+DjDtOm/CWkuB7IU2OP1g/1/m72YvbpUq0iqe/41D4gzvAOK8tDU4gWn52MM208eh9sffg1ridiGSHDB53v5QzXOkI8wfPTlj/j9r8VYXV6F/32iz4EcJlX/RkDt383r/dbddp0DTITG5aViLCmrRaWJtnVCSLgGP/4I7LEHorS9aG5xBe0uOA1JUsqYESKroViB+XOxwf+YjKhxL/5Qmr4qtlt08lCY6kBoylh6TbvheW82/nOJS5RZS1uYf61tEpfAC48eaAFjcou6UyAEvW7Q9AFwmal2lQhvHgrDZagsU9FC2+09yLr7qCTVCUMnIA4ivHYLhASrfvAIpESaEV5egb8qWvdig0ReOVOezFuadUxUg3zyhy8c1l5x+xTjL5mFHdQ4ihBiMTaZdqtKOpBJuwGNw0Zhon0eDv9HADOn22Pb4EvmWkUcT4swhGalBY9GKPHkKTwiNBkz6xaOjjX+aM2i6nUWMbbZBnzlUsb33wjn8+/4sN+hIWFnLUiSMpbUsX19VNS4pUEcaaC6ch784SObLUS02cx6/ilYKyvYipRU4LJrg0jPAOprLIjq/EWEJaIxKYxE9PaFiWlZjX7B8OFoOfAQDPnmM1hqq1HlaW1j4QZKeHkrPNwUFNVTSNIf60+HDU2Dh0FduADnXunHvN9tmPeHPs5+n0GDTKRIXIuKqBosWtsFptXtFCEgkWTz8FHIIsI7cVIYM75xYk2tXi89wnroTHgpmXmG1xuwwu7QxPNB3vCNHouiSp3wrlhkxcVX6HXjfmhpBC0KFVhp6PIVXhy/tkYf62xn5dYibCAcd6SBvVR61/DCkgPteneyNaaWHPMvpC+aD/f3bQlJHi1eOFKICG9ni08O70iF6LlWwvq4cKdqsDLr5oh8CDktDUW//4yOjvdkZmloqKd20nNlvhs4WVeKigJobHo9CszFVoQIr9MsE/QvlR4GY2eCXP3qd/TBu+Oko/fFOVfdh5raBnFV2fczZotbGvimhstvfhwK/U9PdeOys48F37Cw3YHn4N4n3hRneBVFEXgoim6yI86KT776mb3EB2g7HnweWB3yr38Lv9uvOQupKS7sfexl2Grv08VNDo1N+ot13923JWmzAyddeCv2/8eVqG9oFmmktsEIbPQZtH0zbfTV3fgq+PIzNrzwhE1UrIWem79+sYktLD73KjwT1aJRETMcCsNinMkMOVNgM4hS4YhBCBN5Okt5GosXWERc1ipJKvn+3AqUNfnZmbBizqSGWifKIBEThVI7bDRzkTk4y41BI4P4FVOwHWaAJ3K+VaCBCN3iGg+WkGKCR1ET/oUaGkRcv+oSps0ZEabLpg/DYF6+cKf66/Hxl/qkKjySoCm07xo2jjTYnRpN5hbwh2h1Q0eJ3AtrFyFIfcDndtljFkkImYcFSajnaWFkIBY27BcmKRHHaa+C5K8ZH0z5sotEMN9trBJpFQ7SCtwONA4dgfQ1i3HqOTr+r78VFecCl83Xx5GHJkiK2ubH5EJ4cMcJSxeazQKrRcdUxNpzTwQzszDgc/3ydva78vog9j4wjN32irATq+p9WN+PIaO0mOBM+EiDShMY27OJFCpkqdhqO4Rz85Dx+stIm/o++ei/My8MYsiwKISEN5E26cmEHiKcVZ9X2KNEeC1EQoTD7UbqbbcIa9Ffv6ExbtFUUKTFSIeI0NwsjES1ELGXcKPeX0qKHVajT3NI2svnMu1LFuGiy8J45JUW7LJPEOMmh7BiSetzmmg5vBsQDCg0UsNtkjjSdGk8ezZusTVy/vod47bg0Qh8PN0nxg+HrZcyCK8S0ceCx2+Dw6XBYiy2w2PHI2flQvzrYi922DOIF5+w4/0vg2BMvvyfm3ZrQIRX61TCC4NghuKONBCc9Dxp4pnjOpsLCLabqvLgI+AtKEL2E4+YXsLMMyTJvFCjoSD8EtWYzFroXRCwumF3RWFXeZRS6q23BvbZB9mL5yPIlSOv+F9mtqYTXmJcQWPhHh/ekZ0XqRQdLbRgjhFePtJgUdtGv/HGtu5N1PX12/fj0H13blP7qy/4B+aWvoARQweIq8o+eeVO/PjRo/j23Qcx45PHsdeuhDul4KMNMz55AtPeug8fvngbPn/9bpxy3P4UAkGa+bgDO8aPHirys9Ki4ZyTDxV2zt9UXAeOV5CXBT5W8dvnT4H9/vzqWVx0+lEchNzsDLz77C2Y9ua94PCn7r5c5CMCpdavEWj35PXrtvZK447cvQx33ujA159bcNt1Tlx4YjptF6eAP0DqUYFEtjg+f/FuSnhDTjdNrOwLuIn4/nHxtdhCm40Viy2xCc4XjoAnyV9XN4A/nNNjd6+3l/AGDQmvI25EjJ+k4WfsIAgv5/jKaxoafSExkbPEq6yxZyQ73NzC2cBv0Sdvm0l4je3MaEGhCC9EBb7+WliTonFdVSJlIdhFfg63BhvNRHyMoo6kc+w5Fgtw2RH54AUE32JQ5w3BbbMgRNu1jTpPj30cFY5GOck6iu9/VWhBwAH+XJ3whqkPzTmV/VOdVjQR4U1ZvgSjRysYNi6ED9+zoCUYxtK5No4Cr0GwhYM0leoajheDxknmKRg0AIRhahGrLVZX069634Mx6JtPTSeKB2p4+Dk/dtlDJ1Y1dVH4E5zIY5kYFs040kCrCCIxOoEoSNWxbklJQ6h4oIipejzCNDU+89lQq4KvpjP9EjGZaMGnk09dwhuXasstgcGDMfajt+ANtfYTk+t0uz8uYs+sIWJW0RZa/VAyhdrmpOeRrChId6Bx6Ei4lupbsHvtDTz9chB7HhwQ/VhXx7ESV/xcBqgYG0JtErnT9GeGPb2Tt4GTJNiTM/XzrUvmWbGizstB66cMwrvVLVeJ9D6/RZfwWnVCrUwYD3tTI844biUefsqP7Pworvu3giff8eDLd1IwfHQEx/wzBPOoAV91iLh/Ci3i2RkxFrRsD0essNkUGrp6HzmMdwCHmcrtsGLeCacjddpn4AWF6Z9rHGkI0M4EaEehzbNhRurEDBGZtYRCCCt26AtffbyK6DRu3NUVYFK8tqXtWGHCW1+nQFWVDgkxOvjH72WVFoA87O3Qx2vEZoOdJNttop9+ehtnf3fwkYNcIp2KEoc9NTqFdjGK8rPJlryf02EHk18rEeT4XBVFQVFBDjg83l/a+zcCav9uXu+37pWlu4ny2xH3AAAQAElEQVRCLjvbhffetAp79VoLKnp6F69BYiL0MrYY0qsgSXhNSRJnbOFLecmy7Zx34SGCRFasbQqApX98tQ+/qNkvEcUTq2pIXjh+iIgJm0673ga2Z7isWFywDVLgweET/sD/3lJx9202/N+R+fjs1VQsrfWAr2XzGR+7cJqulOLRJ2Wfqk/eNpK0cnyXzcIGlEKd8G47rAzff6v7iYAN1MI0ySmEb0iziZwcJNmx0aST4bDAm5uPYEoGxmIB+M/9/jBdAd9ecNdlWZj+XqqQXnGiFhIIco1YGBki8sN+7RUTRiWgE5Vgrt6WoMu1Dvn0DRspktoXL8IeB/vx03cWXHm1Ti44gMti01Q0x4L7y3SvY7Yn4EQebJwoLmLLIYfC0dgAy1dfxfkCDofuDBLJ6kiypYd2rWvmR2s0dpgQmLF5dyIY1lB9+rnCS/XoCx7hIC01TSNSqKInhIWSCUJiDQXZiojdDgv1pXCY2lFHobD0CyjNTbTwjAjfkWOiKMoLoMpahOZTzxB+MIie7uhaZ+m/SXiRYo9FTqU2N40YBSuVZamqjPkXkkSZHWvKWvuV3d0plgoGfUobCe+8c/8P2GmnWFLfAQcJ+4TvX4SbHqU1i+1YUuvFL6vr0Rwn1RaREtHa4dDkselkUNGnB/eWk0Qu1oXzkZqq4uQzgpj7mw1XnpaOitUW7HdwRNzCwZEEMWz30Zpi9JVmbX23RDQFqqqBhgdRQpCEuC35Af1LIUK88LDjEaGdrQFnngz3j9OhNtRj55IIDjgsjFBQIboL8f6j6An9+Nm1kIQ3pNiEFNths7SmGzAAtsYGTHnwv6iinbPWAICPNDTWKyT1VmhM6SQd3fzj4xr8PHhoAeuETqCjdgccxmKpm+QyuJ8jIJvX9wiofV9k/yrRtWYZftv/MgwYpL8EU1LpBexXUd7uhdltqyP6xBwhaYgp4Y3QFq097uWojR+PkMWJHZq+Rnm1Hp+vtaLFKsIRjaS+ul+3ZVEElj6EjI9PGnKGoWo7fUJ1xpVXnOZE9YTJFBv45+ifxDVabzxOMyz5PHtXmphcWaq8sLotkaHgDn9Rg/B64RbhViKePM2lEPFkD+fAIjZwTNpUcYSiqj7x9oiEnWhMqBSe5DSriGEnom23Ksh224XbO3wk9h4yH1m5Ubz+vB3z1xB5mObE7dfo9eRITY0KSdu5tgDnx37tVZiJp0F4Q4V6W4KuFErXNmZ49GjhwVKrk04ECgZG8PJjLuHHmrfdkQaFKAEvajgsEaXRJK62e7It+x+AQEYWch9/oE0WNrtOyFhIG6KFQZvABB1Rk/CS9ErlwWikc9LWbZDGdcORxyGSnQ21pe044Y8WvUQGuiTzRl7xRojqqdJzwn58pMGqKmxtVUR42THhzefRuGw5W4VyKT40KpkIUH8LD6qbMBPQgiT9toV1kq05nW1SBIyr5hxLFsX8U4yh02Q8Y7GAbiwsyA8F1RjhDWZkYgET3m23hfnPNXwIyrfbBZlffIzxW0TwyRsurK3QsLrej1qvXkczbkJmHOFt2Gs/NCONCG8UNqs+iNyTtxDZuBYtECaTTbYEicOdeLYfJ52uL/LYj68NqzFuNWE3K4UWmwIzi4WdQkXp0WanOa5dNFZEQJyWRTiHU9Mx++Rz4Zg/F4OPPBCZr70kYlAQ+DsAZrxhBk34dq+JsUP1CcMGB70HbPFjxzhLO/rDt+Cj/m6kXQRWVZ4AMvkML0l4rRQ/0YUh10uhKsUT3ggRXlsHbaVo8icRkAj0MgL6G62XC+nv2U/+9km8+K4P387yYPjIKLwtKlqMs5wJt53JEkUOE/Ow+HRJaIS2eMgr9nPtsB1WDt8GA1CGuXMAH23Z8gu8ON0JlpLypByL3I0lSuHmH0loGTASNVtuC345k3fsNzDDhSPvdMBfUITdHb/iqdd82G3fEHbbR5/gQi1WWGgCWEUTLdcjlrATi2Jct+ZVdDZgd3AtAP6in5OkDB2MlXscgINnP4RilOOzr8LsvcGKJx6VyE0oahV58ccqNppt81Mdos0tw0dgZHQxDjvRj+8/d+CTj9sjAfCkpRpkLmz0lcjM0FjKzdJfJaATjkZqR8sBB6Nmi8nrSHjBF8NSOktTAwoLFRx+AolXyZ1CEk/iN2hs5PLJw/ipqgKW/AknTdZgJRydaFYrrEZdzRi5qTYsOfgYZMz8xfQSpinh5e3hEC2ahGdPNdqV4CSa3UZ46gSa3W6bFcRrxOIvSjsIqsfD3jHF1z0x4WUpX8wzAUuYCI5qkLQoSXjbtxU77wz+SwiTH78bw++4CZaaGpGrEwGSszkRVYzXXgf9KCJ2oHGZlpDeT3DZ28RQxo0VbscP3wmTtZQUvQ8bmlvxYP/uFBPAoE+FDfozFrVYYbEY9YX+jz8IXLPL3khZshDf/JGPKuTjh6N+xB+7voGFi3tWnsjRwJLtlbscwAYRXg12q/68ID0dPnoHuJfohHfwEBFFaFtPiSArp7VMPqZSv46ENwzNahPxTY2lsyrxX+pK4RW/sBcepA3Pc2OHwZn468xLsHaP/RHNzELOYw8ig0iv06UhSBJeioZwD/qRd8EstLMVhJ3aGAXfssN5CEUSXjZt9Fy6qyuxgKTm82s8WFbnhSMtAq8XsNCzyDs5HK87xUd1KDp4xybDQasDSsDXksULFchL/iQCEoE+QqDtm7SPCu1Pxaw+7FgoRFAHLfke+YUa+K/yeJpVEjxoJHHVCV1C7SVCxvH4DK9NiC4Af3oOe8VUYZoDNZO3wQH4FCvmeISEdc5MO267PA2Vq60I9oCwsFQt4mM6AjhSLSChGZhYxQojS7rTKqQ8NVtsjbQ/f8Wue0bw5EsBHHGMni5Dc2MokeIgJV7doL/QKVmnP63FI8JatBRhWhw6PrwtzB7pmWn46Zo72IqbcT1+/Kb7PEXkbrQwzar8ZXYwao/FdFhUcPt4QmoaMAS21Stx6nHNIvyeKzKFefCRYWGy1kJBjI9GPRvsAGfzAz7FOGri23VXrHn+dTQPHk6TJOfQqlx5WWgeMBi5t95EUs9mHH60Xk7R4DB4S/iT9624/T+OWAJ+SLkNMY/2FmPsmN6K3UJbxBbTKcxMpx1Vk7eDSmPL9usM4cea3SgmGARC1I/s1xPF9WJsRRqHFZa44wWF6Q5xUX+Fxw8/bUs7Z/2GnIfuhfs7/baI1FQNLM3msSjSJ6hxPS0hHbMIEV4b9eU6SQ8/XHgN+vR9FB28N9KmfgCH5odfcyLCnc6h7XBjr84Ul2kziH3sHIgReeDgIvhz8pD36INIf+cNuH79Gesv4dUQCiiworV9LFU0ihJGXqodq3fbG3WTtkZqoA55qMbz9Ufi1qbLUP74NBGnR1oc4Q0bi0K7Q4OdB56RUfPIsRj85VRkvPmq8Bk3KSzMgnxhxLQcIr/tJbz80ZpG0v9YJMNisQDmQs58BxhBwuDvFsbkp6GIFvRf3PUUav55Ci1eqvUzvTRu+RgOU+2eDNsgRVbDIQRhg4NIsyN+7BgSXi68cNl88OBVyMFKdekLEP5DKRRA71r93UXBnf7EkQZKzGM80+UT8SJ2J9x2YyEhfKSWEAIykkQgCQjEvdKSkNtmmMX0i64TrU6Z/g0sv/yESZgNT7PC70p4OrjPUUTuSDOkdlGayO2GJNRJW/7xUXkL3rvbFOGlkKRuaa0HH7+Sgo//Z8PqpTRFErETgQlomqbAV68TV1e6heqrkXSO3s7t0qY5LKicMFlsKSpenbAyKeNoDQ2KkJCMf+UJNP3vPfbqWrGIhGJ4orqE10btU+MkkRlEsAMkxVl97D9xOp5FyswfKfaG/1hqpoYjCNJknp7J7QTsNr2tLtr+L996e1HI8F/ew1mXBISdtQP2bcYYLGQr+EgDWyyqinB03cmOJUcaRVBJwutBClwuctCPJ3Qbz+xkN3/ZKXYsO/BI2KoqYFu+DEOHqPj3nS247o4AMql+NVUKSr9sOynyERQzPXVWzCosGpcsbELTrBYYzRNuU2vabgdhVX78XpisOYjYsBmhbXRuA9t7osI05lSDCEYdTlhoFJnpx+anCpdCHr+dciEcC+Yh77YbMfjYQ8V5TD7SQEFobmI9ccX1tDBDpyRRmx3tCSF5A9dfj/Bhh4P/pa5YiqKLzsZWZZ/BByeiioW9gR4RXg2WkF+k01w2YZrayNwUfPrsu4gQqSu+4EwUXHM5skK1IrjZ07ZvhGcXGsEJlrZbDcIbcqfCygC2S+MfNBRfvTQV1dfdjNXHnhMLbf7yr5g9YUsc4W09567Bbm2dHiqPPh5WWtg7Z/0ust3voCgGD9UwaLBwxrQOJbzUVxphE4tElgiNFH4sIlHQGOmggWj9t3VRunB8fchJaNxzXzj/mg2nU0NLk54u3MHzKBJ0oAXDGiz0ruV2MuF1xi3QMGwYYLyjRq5djolFaSioW0v1A9wZYZFbS4MqHr9gOCrcXWkRahy/33h3KM1JK0qKHG2HA3nJn0RAItBHCLS+0fqowP5WjJKdg9rxk5D+/DMoPP9M3PvD7hhX8zPJAQG+QSHh9hov7WgoCJvPh0ZkgPjDOskj2+8k/GzfzcSXX2qY8bVTuL3NKsL0ghWOBDQNGiwR/SWs0dZlWIsScVg3YZ7LgcqJk0WAy5jszD+1WW9sXY5+6iFkfjYVi7o7y2tMJk0RXcJrs0dhi5tweIJ12S34/fSLRXlZ839HtfFlvPDoXOsyJETYKiTVCUTs4EmOIzsUfei7bRaUb7U9ou4UuP/4DQcdFuFgbLdTBLs2fIwFGIuz8SSRMn1yVckIdyLhZd6pBEMkPaJy9G6hyZFwBiUSuera4Aw3lu97qHDwFi1bTv4XsD2tZZiQs3tV3F9+4zKZOLO/UN0QNSYXVk4kIrdqQ0cMQuOIMbD/ot9fySGmhDdM28MsxWS/nqgwjTnFILyqldoa15+8dbv94EzsOCgLFpcOSMQY1Mq338Lp0ksKkkSTFyW6q3udpXQxwtuZhLewENYHH4Dn/AvF86kaizVflCS85iKLxkX3pekxmOBYg/piyJqqt0UP0XXnmFH49ZL/IDhyFBGyPzF49lfgf009JrwawoRHjPCmpBLxNAg6Z2ioFBK/+mkc1F5wKTwP3Y0FFc0odw9HfsNSI0YPjHjCG7GKhHbjbLdwkBY46jg0DBsNxfgrdpdO+RpfvzkH+bSrRcGxH5/hXeeWBnr20I7oafRMMOEV46ft4xHLy7TkpNqx89BsBLJzsXjL7cVuTFa09fqLMK8SzMjdmDx2VKpPQONnVFv3AzKXC5FRo1G8bAG2SVWw/4M3YcxHbyElQxM5NzcS4SVbgMY9GV3++DWh0ljj3aF0hy7h1fjwcZepZKBEQCLQWwjos35v5b4Z5LtlUSrWTtkZ9vpapK1ZCXe4Gfs3vk1EhwhvUCdPCcEQjYpokVAE/NEaSwmdbk34xWvFA3Lxp31rjK36BTecmR0LMQtONwAAEABJREFUCvgUkPAi5u7OwgTDEgmJaJrFSvVVoarrzjwTi9OgbrO1iGf9epqQzA1EmXBXV+rDxxIIgKUm86tawHcCdzYBWWk7nRM2R3QJr9UZIcKr58H+rDIcVjTkFGJtxmhMbJ6J7xc3gm9NWFHvA+db3sP7hjlPnpv4DG+AJnMnbWOyn8PYVsxw2sQNCIGhw5D90jMYM6QZdz7ixxXXBzD0x/c4KnJQiyPvPRBDDtmHFgUKQh1MsAGSIHNkJRhEALTfyg5SGim7tW0byQuRMWOx7OBjkP7e2yg++xT2Eoo/jhEW0irK9f5QadIM8+xJftRRrHep+EiDJX6r1oi9ZXE6WnbYCbl/zICXSKqbJL1733McRmMRdMLLtTUiJ2iEieUzthzdRiSJRhFbY2pYVgqK0hwYu88uqJy8HWadcbH4C1qu6aXg+BwxQgO3J8caQoSFhRYWnDbqsMOirIsvh2HIEKQ88hB+evtL4WTNS4Q3asYnwsh+iSjuc1tYJ7wwyHt8uuHZbiw69DjMfP4d4d1QvkaYLS09w5SfS5bwpqJFpA/RQsym6uNAeBjaMCqP48Zfn+V1ZyM9oEuWjWjrGgsWADfeCFRWtobFEd6gZhP+jnbvnkyXFb7cfDj/+hOun37A4CMPQOGVl4i48Vp2rt7euprWOiu0a6VZbeD3THxci0Wj4azB0kH74uOxnaXoXIfa0ePZiUH1fwjT71XAOAhHN5oeT4MaCSOoWeFwRjs8XhCesAUwZw5w1VXAJ59gy8fuQqa1QeTuabIIk8egsHSi6WWBaD3E+X/zDG/U4UDv/5MlSAQkAh0hoHbkKf0SR2B8QToKr78Kix54ApXb7AC/NQU5oUowefSGekB4aZuNS+UzvFa/l6a7VHoh65MH+5tqQJoTTTtvhz3Vb1CKEmztmA3+xx/KhZjZsSMBRTwFlmhIj0kSXpDE16qqujtO57N124wqRN2YiSh+5D6a6A7EkI+fEzGqK/VJTfH7oBLRY0/eevcY51jZbSomqxZDytYYThHeKRlRWPX5Q7hZy6Ptfr5irXHSttgev2DmT1Z8NbcOv5c14LOFVZi9tof735RpmBYTasCPQMQKhytKPoDTIKFZbn2CX3naucI/+9kncfj+9dhidAtSv/hE+OWiBiCCxZJMbnG4A5yDRMJo+oZKRDKs6HlyYsbZti6s4OMpy/cwPhBasUxIrXLvvAWnLLuZkwlVtlpPyHyAshd+YJJG7dEdhm6MHcMFvqXBpuhpTT/TTNt7DzhpcRb54CNkvfA0Bvz0EXbFdAT9KoIdtMtM15kZpoop4RB8ihvEaag/GaF1Y+dNGIWKj79ExXmXoHKH3ZD7+y84+O3zRcQQSZdNgiA8utG4TGsoKGJpLOG1dlymiEDalEHpCKZlkA3ww4kILSCEoz2OwrNjjSV6JuHloxvtY43Jp4Uh5bsyNUcEuZtpzJCtxbvuM0zenf54LRWgvjAjhNypsFnWbd8WhengIVwRtwAUhDeiEzMz/Trm/PnATTcBq1e3BsV9TBiK6mPXXBiakYozXZh11qVwlq3GgHNPFd4p336NgaeeIOymlmP8UYjJe45F0SXnCW+F+kqz6/kKD0NjCS8NH3Q8Uo1Iccao3FRYx08QPhn1uiSbhe5hBk34dq3x+5F7Q6XnJUjttNPit8PF6BaTgL/+Ap58EthuOzhrqjB2/pci8+YGhUg6vQ66eVb08axBpTHh9SiI3QFt7HCIzKQmEZAI9CkCib5r+rRSm1phucOHYPTFZyPtx+lYUbgl8lGFEL3k+A8YJNyWSERE5TO8SpMXHqTQli+/noV3G8162oFwRP3YHd/i8PTPicRp8JOkA9ASknZwZixRs8YkvBZEKK2FXs4c1l7l0RZu7W57tnr//DPGF9eisqJ1IlaJ9PC8wzXuqN082Vh8+rZevZYl8uJtemc7xjuEJFeca2TPLVGEtfjr1vloPPtpjN7nWBS9/SpaAhGsbtDzEZkkoOmTD5GdsE1gxUmczBbIkuOygctbcdBRaN57f+TdegNGjyzGwFOOh+L3UwxgD6UUWfUrAJIM8QTG0j4REKeJLW9VgUIzcEh1iJAgTaxssZA/m/FqclEafPsfhJV7HgDnn39gxJSJyL3/Lhww76FYtLLVirCrtBBpM6l3Q9QsdpWkZtwTInkbLX3fvYR7ygX/QtqH7wp7HqoRom30jtolInShcb2Y5Adhh8UWRVfcc8uidBw0vhDlp5yDluJBcIQ9ImeGKSpsiWk8brhMjh0lIuWwdP0aK6BFop+2wzm+nwivP2TE7wZHjm8qHkO2sD4e0AlpyaXFE3+86M0vwtavPowncA4am9Cjf/xchgKtSUIpqbC1e0bMUF78Bogx1vv1havfnYkcrQZcBzPOOqbxnjHPqrYP94etwsvp0seecJDGH5BtcfCemHf8qbBWrIV/q23gn7wtUj+dirGFaci/8RqKBWTn6ONOox0Phd4J7KnQIlDj1RA74pRqAe2uRGmsti0rLkob6/iCNOy1G0lfyTc9pEuyaXMJ4QT7kTYSQK85qDTggiTJdjg1ymndX2irrXXPggLg008RzszC0L9KhV9TPY8drWuMKSaPUc6dHl20tACpVuOd1cHuAEWXP4mARKAPEOCntw+K2TyKcNM2uTc9B0wg/B4LOiJ+nSJhvLQj9DJWvD60IBUdHWng9Dn77YHqbXdkK04f9D5Gpq6Gp0nldzlCRj4isAuNJ/CYhJdFLZoiJEadJXH+36WxoNwfSnGc9W3wkQa1uUn48zYh58kSzY4k2yFiwxaSBHNkIfHIAE12gIPLZk9D8Z9qVYl4rx23pfD5sHwXnDLzOkys+B645334aSHRU8Jrfl/iC9vFIkJRFJE3awXpTjbgC2lY9fjzCA0eKtwp332DaFo6AuMmYCvtD2S1rAFP3BZV6VAS6qcJ3kr5KtR/YdUu8ggZLM5hswh3vJbptmNEjgvVEyfDP2BQLMgVaMQuoLaST0zCS3bGloyEfharRriqHcctLkZg2AgR1rzVNsLk8RryqWDSLjx6oIWpXxmXEGywUbNVwqC75JGCfASIRIyd8SZ+xRSEQwo4n+7ScXiAOlOjkW7hS2DJI+p0EGEiSze/7259GDed+SNOw3PwR4z+oL7qJpkINsu0mkw0xSX822s7DctBIY0nZSf92SxUK9HDv2BMz4QGlrZz3ksvuwHf33Av2nxYxQGGGpObBl6k8serlZ4A/KnZyEQDGhM5RmEsPgUbQ+u/gEl4abu/1Ve3DclyYc0x/0Tl1jvg93Mvx6xHX9ADSG+pqScdyMkzBn0oAiWoS+GVcIh2HWwi3NQisEB/9BWoauvziAT+8SIg1V8nYoYCCroRtop4rPGim0ko/EEEaYHmIAkvAA5qo0J77QNsSe+fe+8FsrPRvNseKP5luriBp7FBhaIo6G43RDyvVBi3rKVZQYotIMrQzEPzwiU1iYBEoC8RUPuysM2hrGCGPumEPCpa6KWfcJsNyYtG0hALbTF6kELkrOPUw7JTkPfGy1h85Iko/v1HvNx0DBoaKS69YBN9+WsU1yS8mtVKk4YGJnOUS4e/QWOGwvvhx/j15vsRyMjC9avOxYdf5cSkoA4iD5lOqyDNng7OLvNkY/F5EUlPh5cEey7jjKDLtu4QTHPaUDFyAurOvkDU5Xb8G08UX49tmqfj62csqGyhyZ0IpghMQIsQnhyNpVd2kupYFHa1KjtJ0PgMruZOwdIZf4kPgDx77YuaSy4n0psWi6iQhNdCk11HRYdI0qaqKtRQACbhDRIZ5MRMhNlsr4blpGDuSefgp4uuQ+U2O+Cn2x8RUe44bhpy8zSYEl5F4UmdOkyEktZ+UcOdSd6xn81KUkFrzNneoj76CGaddyWmPvc+vAOHQhDeIE/icWW0T9SJO0wDTqW+D8EGq02D1dIO3A7SMd4hp1uEpKIFESK8giAIn641QYypmhZ6TkRMuy12PEW4O9Eax09CRfE4ERoMG2OuPY4idF0tRG0kjg2T8Cow0qPtv0wa//uPzoPrvf+hfuJWKCLC28TPZdtoXbpoGEHz6JJk1WERca2WjsvjXQom2IxdVbMfobRMFNGuSENTRKTrUKO+Ev4m4TV2MYQfaYGwXqbbrZBr3d92B+6CaU+/jUVb74zFUXssgtbQAP4TwBPevU/34/cZEV3hCDLhbY3LfhoUMOHloav2kPAG0zKQ6tclvJGQhRZLUSTyT/QjRwyGweO1/bENDjKV9tZbwIknCqdvj72QUlmOLVMWokH8tTWgu8VhhBtGqVV6dr20SE+xBshFP1qgkS5/EgGJwN+AQMdv0r+hIhttkT2sWDQjDSxlCXqt4tYEb6Kk15iIBKny+ogGpMLlQuf/RoyA/6qrUbH/IZgcmIGmBn2Cir3UO08pQniKsETDwq5ZiBwpGpiICI9ONPchB2LKfy5B/SidOKRFGzF8T12aZae8Bma4iPBYSFq67oTLnMFKhDfqSoHPq8Sk13arXu/4IvNoazhKE0XlUcdj7bhdUTFuZwy66mCUO4bhH1MvBOe1tsmYQOITdmKPBPS43qAN/DGOqrYd9m67hSZNrU3qNU+9iLrzL4XmcMb8lVAYKlU31AFR8pPkkbm7GgoiYrGLNBFmL2Szd0ICmbBkueyoO+AQfPrEW1i496Hw5eRhmHUVBg+LYs1KvZ4qlRnRuMcoMy6bFVljv0gEWtxgsdhAfRkLXcdiO2B/VF9yBXg73JuRiVzUIEgS3kTHTnyGEZrYuc381btOePU6x8dpb3cQHmFaXLA/E7Qd3roLWLaUnd0qxp57isvkyFGnvcMPjzgsXlGRsNg4JRCMEKAcGI9jRQVAYw7PPIP2/0JRlikDtrAfzUgjomakbx8xzu0dPgqFWoUY6ywhjgvq0hqlMRP2REScCDFCLqmz8cORdh6SicI0B7y0nRBMy2IveLq7LYVjMeF95x0gL49dMeUL0eAhV2dkMJPw3n9sPlLtVkTo2WBF0WH1tCDtg3cx4Im72QmFFocKPS+Zr7wAx9LFBJ4VmtUiwkyNmick2lbV9EnMbBk8DMUVc0XkEK0NCDJh704LUUQargDVK0QLND7D22ka468hcnhwz73ZwL7Kl2ioU2ChBzLALyF0/o+DebSpFJegQYqFKkrRrS436fInEZAI/B0I9PBV83dUcdMqU8tMoymxGXykgV94CR9rMCZfhd6UVl8LPEghCa9BcjqBYIvtJiLlkINEqKu6jIVQYGmP8OhG47OC1riP1ngisBDp7SaZCG4aqm+Js8NSXcUGeFubLVYiDR2R/BC1y0KTrOZ0Cgmv0/h4LI0kdJwuXg3IdEIjPMqGjkbjN5/g3G92x4jjxsGTkgOlxUMEx4JZ5U1C0hufrjN7xBDJ+kJW8deVmPzEx021q+DbBuL9tJRU4YzSpC4spCnhELh9HWEcoDJ4IrREgghZHRQbCPPfTyWbkySuZLOUn2AAABAASURBVHT4O3RCAbYoSodCoaz8mdmw1lRjwCANq1fpj6dKmFKw3reEIdvXUcweDE+FmLdV1dMaXusYY3LdokyW1mejDgEfl65RnXnUrhO9U48QkQiFFmsh2GC1a7AYde00AQUw0Q87XWQDMtCIXd69G8oiIkXCp2uNy+MYlqC+Xd7Rx1Ac3l4xHlbjzuEujzTwg9AucYjGLntZw0EEFAdUdI9RtKAAedFKeGk720+LIU6fiGIJdsQXFlHDKi1EyeaydN6XbiKeo/PSKBbgSU0Xpr+8VpgdavRcCX++IpD6TdjjNFPC6+xku5+j8rGjEbkupDmsCGRlsxfsLc1QidmpZBYWEz6Ud8QbROaLz8C2YhmRXZ1Ii8iGplo0IryABYrhk5hRv8VkFKxdICKHg/Ts0hgUjm40sx8RCIojDU7a7ekmiQhWBw1E0+Dh2DH4HVjCq1J9u5XwGjh7m0UWcKk64VVVwkb3krpEQCLQxwiofVxe/y8uO0W0MVLdJEyewISlO42kdBxFkCqfBx4ivG43+3StLEVFIkJqo0482xM3EdiBRkIr2DSdNIQNcuRoJ4HpIJnw8tLLX1jiNIUkm+xkaY2PpE1sj1c82QgJb0oKvCThZUkrT3NuhyU+mrAPzdIb3hKMCrepMQm10gwyKN0lyF+zXycGZnhnZtQfEEE+lvA6o0JCIzwMLcupT8bVNEEbXjFDi9+CpElcUbjWGoLtJll2M6myECmKsoiVcuCPZDi2zcI6eXTyG52bAk6bTvXwZ+fBxoR3cBTl5kdrVKZGaVmaSgZgjBVhZ41JmtL6KGsuJ5hUclBnajBh/K9tB4njKTlqHYJBBZxN0CB3naVr7x+mid1CC4EwrLCRBNXadVNFcqfNggaSgIacqcLNmta+TezZgeJxxPW0mFeExfdPB/FNLyv1gUXnjwiGjUpS3c3wGKYd1CNIfc1l2qjMIBwwFyCxtB1YlPwCuDQv0ORFgBZDHUTp0Itlu1GD8EaMRYyDH6oOY+uew7JdYkw3G4Q3sFY/36qHttPNNvPCqYO2+oI6SMZ6r13iVufk4kwctUURIgbhddVWwbp6lYjAV5O5Ih6sWRKGeY5Xs9nga3djheDxtMi2qEZ/iNQJaHYbrCGdQAYDCngMJpCK4mm0VNGghEIIwSZ2exJJ57Co4IUo79zVk4TXSm7njJ+BxZ0v0mjIiKz99K5ji1vxI5SaBpu67vuOw6WSCEgEeh+B1lkyKWXJTNTcDAGCVucRBIInaOHRnWZMPgpJBW1+L1qQ2vWRBiM/yxYTha2gZYUoL2y+aYVv5xpLeM0jDVHjJcwv8s5TtIb4RoxsdRg2c2KzEbHoaAs3RBOtxecVW+8+mgQchnQl1aZPsEY2McNptcBPW48xD7IoGamESgsaalVBOnwJHheJElGl5PAQ4bW7QeSSXa0qJ8UhHKsbffC3k8ZF47YgeaKkuU7HmdojEpHG7eWr1Cw0cVsiIUSsdvKFIOVscdlUNrpUJ249APuMzoefCIS9tgaDh0ZF/DIivcR3QTM15Se8yK4ZFsMgNha162WyD3+0xmYiKpiZiRytFgGvXsdgjwkvkQhamDCBYJ7PGHRXroPwmHXmpfj92CtjUSPG+I95dGIxnydrUF+sRWgB1UnUNt4WcllsOqbhKLvII64PRaeSV0e/WJmRAPyKExaLjlVHcWN+hQXCmtJURYRXL1d4dKNtedlZOGb2nSKWSXid9o6fERHJ0HLdDvjT9XdPpLoLwktjRSTxEhk37cJD13xhG2wOLWGpq5adIxKmlq+G668/hf0fR+gS5pb6CBTj2dNsdqwtM3CnWBEqQSUnV8FmEHvyTuxH5NmMGCLCG+HVu+nRidngC2FtS0C8N7hOPF4d1M5OorfxdlhVRBxOuBUfWMJLTmx5wanAffe1iRfv4Dpx2zzN+lhxqgFE7A5Y6B0RH0/aJQISgb5DQH8a+668fl+SLS9NtDFc2yJMc7IUDtL+KGvEJwt1aSw5W3/GhK+QNMgWaDEIb/eSD8fI4ViWsyVOanla5BWTAgpX5xrPEVZDwhs1pIMOnoE6TxILCQxvPdJgerJkmu02C2/4rnttD/NIi98HzZ2CFtrms7uiHB3pzo4nc/6Txu3Jp5qVKghv2SpFkFYfZypy6Vpzzp0jIoRhhc0RhUUwSOEltCFZLkzITwMvApbV6/22uKYF5U1U37iL4hXqI4sxYcUTQya8nJGNwqxEiqIm4dXYFyTVUXVLN7qdovmzc+GorUbRAD3xmlUqTdJ6wkg8QdO9dJ38NWsrjqYkUw/sWo8SYcnS6kBdw5waoQQXTGauYR5IwSCCsIPP8HL/m2GdmSzhVSgwGHfumK+xIq/WH1/8X1ICzJzZ6ke2SJQ0qqlKUmW2WQgzNrtTVqsi6sfxghErG7QiiehmN3qICiVaD1vYjwAT3nbjp6PkGhNeCkhtql5nEUXenf5yfv0JIxpmifCIYhGmkxmWsHWuDc91I5SpH2mIGDcmdBibxrDwJyknDDIq3IbmC9hgJyJos+plG96dGzk64Y2PcOL2+vMmFsFB/co0jRa2a8taO0uDAua5GhAb32RN6GeJJ7y0MyHGYDcpa70h1BDh5Wef68Xj1eHqJpERzAuOsNMJJ/xorAe9e1Q9hJ473bKubr6H62lxzqH80VqESD+/I9gtlURAItD3CBhPbt8X3F9LjJh7gXVNNC2veyaSpZLhjkiF8fLkv7LG2HiQAuKGbO1WzRxzCPbCV0BZDREWwQi6TcPkzhUlKQ/FDJH0ggzYLDz9sK1rFR7ZgYTXkMZaifRRw9f5cI1Jg83nRdTtpq1NBXxbgqIonRY0NNsNi6qiiiYpM5JiSHh5W9FGTKc9ITbjtTeHPXCH8ArRNqbdGYXVuu5kPj4/FRlEvpv8Efxe3ohakgiVU9magQ1noNHkrdJEzSjF92GAFinsZ6X68s0XUWNCjhBR4nSdkXoOi1d2qwp/Vg6szU0YMEjvxzVE7hlSjhcbNiZpYU9TUdmmlY8WmPbuTGu+/tGSjWdyisz9REbCPyYbKvU9Y2u1ajSG1G7Tpth0whmmsWBG5jPbpl2Y9cQsvv0WiPuLYLzIKG/28/CCJRiADy7qS01E706z0mLOatMxDXV1D28HJDBEpF6lsWoNB6AT3gTKzC8UVcr0VhLhjQh7IpoSCsEZ0hddEcJJjevXrtKPzk1BJFc/T4uaLiS8ZibcTuOdY3qx6QtaaFGoQVE0dnavdtgeLcWD28TLfuox4bYhhAg9R+zQiOytrdD7nd2smPCyaVMVNhJWil0/guSiERD0K0hk3eunZ5S6USfX9PyEafGbMOGl5zJCC1+SoYs6Nou7eIEIYyh81tUiXBh5824UGXDAC86Dd8DYLZVEYHNDYGNob/ez08ZQy02oDtEMfVtRa2yml6sCnizjq8/nW82XYbw/6CXMbhuRHTaZ8KYaV3exuys1e6uDRXDhJx+hw7xFaFstPl4oLV0E2q1tJyTh2YHmtlnw8bPvYg98g/lD9C+YlYBfxLTSBM1TZaDdLBQm8ifO8LrcQprodEfB0haRqAONL5ln72X1XpK0BtgKS046slAvbqSw0iTpCSZIJIyJPUSE10GSZU4rMmynFac5wVMvEys2w8Qw4wmvQhOcSuVyMiZ6bLIKcjwN1B7AFg3AvGvTlPJwnESVP0cnoMNsa0SS8jUqVKoVZd/at7xXKkJbNQ43XXa7Ylq7NVnCy5Gcnnqxqx/fLvbvTvE4UgzCS5yGFindl83Xd3G+4RQ3G0KZx06EgzXjeUAggHoiTU2BMJr8YVQ1B6AS+VTDQSFVttu6L4+zs6kKrHYdpZB5pMEsgyN0oXgRQMlhZQmv6oTVonYRWw/SCvOFJd1f06MjDSoReUfYJ9KGFSu1VVgT0qwD9bFjaWzoPL7ZZhrLMJ6L+MjeoBX8wZrV2PVBN/+cV12Jef84TcSK0rPNFseSRWzQ0xaCFjAlvDaUx0l4OUJE059fqyWxPuQ0rCzGgtKKMPSP1qLs3aXij8x4ka+KsRMiKm6Dkxa/XSaKC4w4XXBGdAGBt5n6n57BaBdHqvi50KChsYHiUj5Okg5H7A4kMnYouvxJBCQCvYCA/jT2QsabbZZpqaLpSmMTESAivET0hIehBYRbn3gNL93gCYhsVk8L6UALUhM6w8uRPcNHYiHGYOD309CeYHN4dyqYkkaUCrAlOPG4bSqqJ22LhQN3w1rLQJG9pV6XKtloQqG5AHo7RZDQ+AMui9+PCE2KtTUKUrOisNE2swjsRCtKcxCGQFmTFwurPVAH5sNNkhJvhQd2VYW/iwmnTZbGJB+GVUiWuY5twg3HgDQ78lLsAgvF8Ft+/L8w58ALhUsN+sFkmdvHJEh4kqa3VRP48YeAmvFXpaI04almRhQvkV8wO0dE45saigdq4moy1cikUwLN7SMJpkhIGktayUjoF8nKEvEczXVUWy3hHQKRiLQgjWeVpJJB2GG1EQYJtlchlEPxEt5whHKL+xnPA4JBzFjdgLkVTfCHI2DBWa7bDjUUQAAOmFLCuJQdWm2EoUl4gxGLHodxY9tPPwHz57OtQxWiBY1FVaEfaaAyaYyjm3+WIl3Cmx+ugDcQ7Sa2HswkiXd4nBGP8IjQ86gmUJaITBq/epqRBkuDfoaWvNb98eBlX8aXFdvjFBNem0ODJcHbBPgYzhZjB4scArk64XbMmS3cduX/2XsPQFmWqzp0VVWHmRPuvS8/ZSEhgghGfEQGYUSOJolkojHRBGMwmGAQYECfHIVIlsEEEwS2QRhMlski+IPBgHJ8+d1wwoTurr9WhTl95qSZOVfvSu/NnK5ctatqV3XtVbur+0yASQS84Gb6rtfZED+zTPTVZ6wDMdeB7aoyBAo0mIy5xh4D3EOGnnXf7gQbpcOjuKm1zTQA3o3NrpfjdG9bDVC2o5Bp/5qF6TgX8/wJsYctgWvFcMmQA9uMEACvTZ0OsWtrzYE1Bx5KDtiHsrJHQl0maXiLa1eCdmZeYzZOms/5+KzhrRLg3cUmNk74+Ps8H7cvevwjnoKaj6UlNOfTjwu7/b1Z9GRzM/gXeblKGTeSwLn59g7XxrWiYPb3Ya9eIYi1ATjlfoZEWlMCI9W5020GTeItdzSwBD1MOvF6v6fcisfftAGBnMv7Y7jH3RHzvv5u1mNIxx85OhEzHLZN22B04Vbcg9tRDzzL2sMZUuiWzQE+5K3vwHs88Wa8zaMuhNa9+qY7sPs2/yTlAKK88lB/cqS0R54BZy0qP4bn408GITlsGSf/omaSNLz63Ntj9KWG1xhkzDMb23mgQhCj+iWURxhAmtZF6zNbceyLvZ1QZEJwFzwLWprHegzfcDNREiiVxfG8nSdnCeaavoaXY3QoTwYT1PBO2SYZPR3xnF13bBF0TkcQyC6LQ6VODJSsr6zEJaBp5wDvx30c8JznxLIatOib2dpEqlcFAc/U1CicQrNUbJBVAAAQAElEQVTkYz0DakivbN6OO3EX7r2/OzbPfGQGScNuNyR1xmEZLHhxy+EyLqG+9iBO/OX+aQ5lf8rstRmdbqMeetZ7dh9TMQxvuhi8kwuXgputzWICQ3CpsK8q3PV6I28wLRy8i+NRLbprCSWBoiyDT0cmmokBp0cIn2Zp7mxwsmxWDq4Zh7lTD04rcTjND2qUnHOKnU5jP7ppAvOKnDO6LyxvXO7RQoq+KtGSB4Ml+xoKr601B9YcuC4csNeFyopEHo7F/KW4+OublFzvCIzioq6+ahEEBbb8WbjJHwxBi1w3HslBR9DkKDRD4Axr+2KHfQxhxxPEOk4vIOCUBZFyjjcvyMECcjzk26odtOTfSsB7dVSFOFlFOm9ZsN3zgHdCwKsjDde6DWXFxVtbgscD3oTIY6ynP/4mPOVWgmQy897teEbR3XMPCmsCJ0dpA3FM0RCleoU8X/m2/xR/hHcLnyIqzpj1b876nnLLJtsHgiOP3Wc8E8/Hp0O/ggBWre5XKw1vwfZo6CSEUZfKCo2xZbtDYEFr2tPwPv4JHV77agtLbmt6nDq2HBABpJZAgnJ1wdqYLb04piMpJfsm8M7YhS8d+zCU6lOUqAh4q8IuVFZ8nHKDkTN7PQLIAbltKxs60jDl3NF80vn3wrKjTLEEUiMMCH4YWOBSOQFyZZ20CSVnwCfmKkEmx8mfjHhiWW9FwDuyQ5h+/pRn3hHwv7p9O7dY9+DBK5ox8zmOhrWxU+x2d1UOGgI7t+hNyRLb3Pjeh1tR76wGeCePfwJe0z0mjmPpSHHB69KlkHGcvhIRArRq12DYxo2U51OPV738YG54zWkTNwLVEn0kWRSDSg6k4Z1Sw9t2kU6IPMHS05Gn/Ievxu1f/1Uom33E+XpC5mOi9WRqBnhHcQ4eOXfeK9dyMI0xmBKQK1qbwq6sF15jVWZt1hxYc+D6cuBgBbq+dB+x1FwCENXeFS5uFk1vMZbglqz05I4WRDqza29vHPyumQTX8FmhNcoZgqdaFy4YSPhbasMaLrSnZmaixIOVhof+//YWX4wHkgZzY0GklD+Ef4mg9cHdAanEy169HDwCBwFohlC01C5LRLhvo0ZR4Mjas6ffRmHxTo+7ieIRuDf9J6n6fmp4XSwrEBRrON4Wnw3HoOkiyNFLaxW1fcfnPojVi2YCbw0HrH7ibfgbxM+/GWmyJcg6cTHmFyByBERMQg2OY1WHhM4bgtXFxhDpN73ttuBz99+HxzzO467XGXRtFLAC0CGRbZJ7Lx/T6qsfEuaqpaP2qGONReyqspxpzCCO30c++JMwxmDa69eZhZlB46pzmlMC3uFWx41IHBcmnXpJc9lux7mgjL5LAFcBmQR4O85pcGszphpPY12mOWMYPya3h8PIGxU5zQxKi8yXSdLQIfc11YUTfprLBce3bMcY2xol+XxC1lm05s7u9q24Fffh2jVA83CWeLwH7ZUrh1I6V3ANWax/Kqg2XsYl6Dw2zvrp/pfp5yOD9vcMygHHkXOhn3Sq/2Lc5M9reGs7mRXTJwFbbsZmEfSoCvWu0mRgeNGrqKuQdXMwDUcaFNDaKvdk43Hhr/8Kg//z1yGLp7bXckxDYAGrGwxRTOLZ6glBtop4bsTkHmc6D96JwIQsIFthdJyrrjFYZiNxHOF13JoDaw6szIHFpNPK5B95BQuu4pdrallG1yBcNe5prqZaBcUSLobZq6DM3ogrIz11EvxmUFCwLjY8Fy/6AHiL6ZgAm8RJ57SrY+WGGjLlee1NT8U4fVmCGFtRZ5qNKiKqm25rca2n4bVc1FVYQH3cV4Eysr0SNT1jM2QIiC+tBe+ZlgCqHkVevnhLyDu8fDekFRLuGx3zTy5CpmQJkBkCmta7EKMXcqoFAIsyKx9ZhVtu9UEjpDjxTePa9ITdmH6NuwRhyZwmaXg1EgLCKrew2d6G3ubWGV4daVAf77tbpT1moIn9Ucx9u2O84sF9NOMpPMGRp2a9o5jNj+6V5yxTbMTx+PD9X8CFV71sofnTpyn+Wj7anaDC5naHRT6hpfKFA4pLlbzBdORh8GQr9bEdjSEeTBnWfy0UqAtZJlNMTQUHE4JnWRrLkhpo5Zu9tJYBr1zSV9pxZsK2WSYU3QSNraC2M3jqJT5Mt7ahf1awv2tw1pMI8Of3R7QPrtYWKJcAZQVZ8YC7FZv78Tz9AaWeL4Nc9Vf97iXJq1s4HPvRJFfEIoZzVtlG2xH4yi9Tdwf9ubJfcdvCBiohGZ80vLUVd1PkAk6VAO9G3QTAq/nB/dAZJWPdZjoJ+bqywhKshSdYVcEBV1rd5/L7tpFzrNEm1FIWaJmlop5P30ZouRFeZjyPJbyOXHPgEcGBN0wnl1tp3jBteFhRrZzFqNrCcHIFjgCkr3kQSPKpt1oQkzc4PgmiQiskY0xdLKzduZAAbznhozoKZxY/9ZKmUCAlZOJq3FHwGRMFQog7w6qLOG2k4f1RfBb+79d8fyhh9+LZw5KAcsTH3CEyWZ0+vkv/ntmkDWqRQIFjgn8RS5qR/a0oULevvZ68NSwP7M/Vg7mfeG4omKYJ8MbziXOZTghKOM1AJjVCymYI7gzHtY/n9fJcQaEtsCCBaAalslKB2LGNi/dRhUqOw+imW+Huuxe33+kVhfvvcQQLIBhNWmVJeKZcHTUEgzrVygDjOuPQwoFDyojFLpM0vMpdkE/SVsu/iBHYlfbVTBs0KLBxoVukWMhTcY40bG8IyNL8f9nLgN//fYXY2Sa4XQKB4sQV9re0JsTracHU1XApHCJPsUoCOG12lGXSODmzOjhQyGfooXbE1Jk9JZpy1qJq9zG2AziO0SzxFE+7tYltXMPejoXmyClZQ1I7jmAsBGi1zqJYsH/gr2T+a8VFbI4f5FwRxxg5f3GehCjO41mfQ0S0RvsmHGkoSCvGLGDfcgvufad3w5UnPeVQ5mp0dRa+slPChFk8i4JPL8bVVRqPg6RTfXUCvMNqgmnStuo+P6lQnqfwHTWt45itXK5OneFVwQEB73RiYLTOnrL2qE71d8qnCcTWMHwi0VYVSmdEZm3WHFhz4AZwwN6AOh/WVZaFxajexnZLwEvuauHLHabcxJv/5x/Bu33xZ0CgM8cHVwsoPVoY6YAr48LCrqBQbMpB+GxS/vZroDFnvebKPv7gFQ9AmmYzjS9c6GxdBwtLGnPZTw0q+6XbW7wCT8TLHvfu0M8kcFIQEIwar6iZcaP4OHAXGyFO/3jCWRv8i1hVYdBRYN4zfDwu7t4ViqjfZwEJ8dxQTdu0LpSpNzrURfSHiFOsSnUSILzqyh6KQSrTtgRZIA+7WUlpeCXHxqMozMygCmmewWIZ4MBSZWExuvkWFPfeg1tvizy89+7IpzRFiDE9XnV5H/fsTMgRBrlh8eS5t448skjKKFI7+3JbcTyU0/LpwoS8kn8RI5ChFtrpBBNqeC/erNAiJcHpbdEWxSxzR77iec8DnvGMGKcwfTrSEKjSErQvEz/ddITGlViUv4NUjiQxbdJYkm8KQ26qDxzvEJeseP96FJzwFTWWAWRrsFP6aU67vYGLuILdawaaI6flVZofH2hEFe7Yv9xfhc8yjm28Wt6EW/dfB43Nsflz/9RfmX4mzqEx53A18GGO95NO9RPw/sHzX4BXfOA/OzHb5Z0aHmaWzruIa44P4WGRxiOEzrbKYR0yDUvOuwR4dZ+HyGOsvNYajrOZjEOOjrtCjWkILGLVw5BLgHfETQHvOjTHbI5CJlraKBtjIP1FUXhqeMdBwzusDuY8s62vNQfWHHgIOWAfwroeEVWVzmJ/4yIk6PavWmpaulm/JYS2Xvly3PriP4YWxFmCPNSuyTFaIekxwwKLCnPla4lyBs0uTgMsO5MW+jyP6jZJ2HXUXHYEOgvKcLYsXs5aXLilDYG7r24H144iqC1IrA9Ex1SH6gsNyqSvNMgV4NXRB/kXMRsER55g7PLGnbg0Cs/44ShQzvpvawIshv2bpDO8m1ueGtDFpn1dOEgk37/boNgoQzMNhZxh5HSGPoEJwb3GYPrgXsyTAW8HAgcT4ha1CvJ1/+bbgob3tjtIgAXvvcfAsq/sPsKPgntn3PAxeRvwGZsTo42DgERRLl6nu3QplJXl91v2Jdap8FmG3YYYZDl3JwS8dvFqUTtDwBt5qno8+0QEIW80eX4mraf6KKxW8f5SBkuQPS0GYQ4ofJYZVBFoDDY8pmkuQHWqoFwRl598lpONxllJBadM1Y2Dhrdk23P6aa6/uI2b8CDe/1e/BRc+/VNOyxrS/Ggc3Gx17Gu5BFOZHdeqm0LxyV68F0Ogb6mvCnMez/qvMI13LnwjW+fr9dk/Ri18lax8PBiemP++q/XsiYUyeRh4/slfi7nyLGq4FijroGwg/OoZaHK/6J+/NH7Ko3jdv3K7qgSHVN7FTHoSEgEv2Hqa2Q2JIz+BbA2dpnFFfG45XztqeIfL9vUI5XXEmgNHOLCOWJADS93zC9J8RGcrydGWK9wQ+9jfY4DcmBDw0YEAgoCmnY4xv1Z6rYzMZJKAt7XDomuj8vkEsno4jNQOXwLcWvwD4JWHyZRTaL2FC0s4Ixa8Cgr9C7dELfHdV6KgM3pri+VLrfQUZrN+s7NFOu6w66NGcbjVwdnIHxY586pYnzLtbN6OWyZRw1sXDvuT0wGaBKGhcJ8mDa9oDFwh50xTpzo76k0LbkBUQBsSx/5NG68gBOYluAv2pXj1q0OcG1bBVY6l+crHvKNLNwcN78VLgQzufr0Bq+Q4iSIglBt8wWKQj1bD+V1j2VJL7enpPEHvV7qDMWj3utCfXvKp3gAGmcO2U3Tk6RxWZMrJV81J2wzjXAi5pg2QnjqEMMdMruecClOVfeXFvhlFwzUTtNSAOvIrRJxhDVifsggrTZs0/qkO6N6TUYbsyk+TNbOWvK27fQQNL8eaSWde5tJmyPPW//Cb2PqlXwj+0yw/nge8xay/p5XLaTXHcqeOgLe9994cfdjNwFC8nuurMu7tmvCt6oq0FF7UlJygU657J+V/4HKB2x9zONXbLszrw7ELhKidVa5BMYU00vI3p6h4BT65HCFoeBOPTVXAuTiXVP4s45NWWYA31MlJmY+hHVe24ZpnjCEgN6wHsOMRNbwDrH9rDqw5cOM4YG9c1Q/Pmmvn0NQDaGGc8PEg10XoTX/1tglo1MONRui4ICouG6+MDBhqI+nA8BG6W1DolAXrTNqVVq+Ei8AxRouwZ7xcJMFnWLbzHSwFFpMWvgou5hvbHkUBvPb+KNizhteRlrqTwYKAkfos4le7LdZFn6cQIA36FrqqBFiuXbwTb9n9HS684OegenYmBEqnUGgoCA37J8C7dYGVMi9lHe2zr2FZpG2AQdbwCpSpXh0LEYXQR5JV8+74o19TFOywDG7nPcoieBe2ajJ0/5ZbUdz1+lDm0Y/zuOduy3YYaKMSIkmXV/CCkpzVQ0caOuMi4KVGKSVGAF+ChgAAEABJREFU5xS75BhMirhhaanhFb1ZPaeUU1KYR/S4riHgLQleLEOLXXVhDmUUrwLwzLFpfgrwKir0kVZhYx2uGUPHeEreb0o/y9RFLFfWHpNp9Od7QBsIkA+BxgFjQ3DK+cNqkbFRq3vbpfIhx8mWvWkrJN52+VXBbV4d/3teCBxjdfMaXs6FcsG6RK5gH3YGEfB295/wabLE18DrV8V2qawMbxM5qAect0vUq0Jqp/j09y+7C5c/6VMVdcjcf6XCox5zeMxBNto0nocynxUgX5SlFuAdU1PMivMaq/h5k4fU0KMNa0ivHKu3wbuIZZKG92I9wq5eVSCvu8zLYwi0nDeiPqVOoOByYKmK7vgU7pis66g1B9YceIg4oHvyIarqkVFNScHq6woR8NrQ6QNNZweTtCpzeBdI2iZDjZYKFbXFom8vlwSY2KhULIDp4DnGykAmAJUk3VxhgrwvskQ/ptxxUYWz0It3+m9gr30gCnZ3z93Y+MMXQf8AgzII0n6qrMCh3Y+P+3e6DaSPQhBEKJdynG0yYBnddFvgbfFHf4qK/ZZ2VW/Rn0RBfTXcaAjwDvg4W/kchZXcs8ygjOOnzUi95UJ2Q02kM0CuU2OrXji2pZ20IY/brJHHd7AgIAsFaanK6eY2ffG6/Q6Pe+82IHnk8QMFbaiT/ZCr4wDBJeBt4ZAUYFjkVxUO3/9ZPx+y+tE0zIUp+RUizrBye1zHcqSzzBGVYVEcou6FDGRybLof/N4eNMYySsp16BNhHTtamDhGSjvLWPKrqjymbRxLpHsxuNlPUNSnk3lR7F4L0b4usWiNPmk8t0bxP5/tvi4+mQiEjrO4Ee5He1egdovWBm4+LXY3bgok2vvuC+4RK/dPvP7FXzyU3KWeVYMOFdeFQ4lnBCrdFMwzqga4/GmfRd/h64ErJe54VHsoUmOayx1KOCuQ5s6wHFODGjPnuRhDh+2wmVIU+270nTD6TeVQpDYzeOZl09OIi/UeRvsmaIvBteCkgsS7MNZgOgE057Th15GGk/Kv4x86DqxreuRywD5yu/6G6XnNRbSlNkCAd7xHMMlqBLroILhcdKO/k3Ng+FhaAXd/ElRDClajmLNNSaHoN+qQ0U5GmGi1DaHDluqXkGkEmNIxC184agWBcklNi+ShQN2tt3fUQLKf7PPGn/8pHv/RH4Kb/uh/BeCUAW9DAOXG8YWcy9NN5LflBZoPt/Dk0CAJuf2bbg2Z7KtfGwWWB077NJk0P4Za83FbBM2VCtelk3OmCeftOAYC9uVmEfKbtoE1No4lY0YcNw1pSeHWjVvGAHZQYJT4WzoT4ha16sKi1WvdLGAI9m69rQtHGhic1Sm/hDibIS88B8Ibg9YUEOBNrAppZ1kF67N8mqB87W6ck6edA1e+bAQG1fdBs4OmqGGxeF8H5eEx6MjHADwz8QxAgzotRmru2tRp100CnwrHCRCTz7Q5RHClx6SJYwneB6GQgOaUqrgQOGxNeS+pj5WQC5Ok4S0XZLBPn3xjsXDt3XeC1jWkAm163J6C6FhPaXPobLfgHNjfjOdgugdOqCv/RznxN/c/ke44gvKW1IJXS64HeTwbzsXpox6Nnff7IJGamTFq9PZx4ase3nquO4vPmRmxCxeC92Z/P8YEnwqoXrnHGd0rijfsb35ZtxtUcOSX4hcxJv1XwC1qePe5roOTIrxoeULhhnVZpmlacRjpAzzXyOBZW2sOrDlwQzige/KGVPxwrXSjKriw1QiAl4/b1M+GQkAucR81A628AMFM9ETbc4GMvmgXBCGLCp2gRdysQkFLodnM0QoJtBq1w1Ow0u3SGVRXGK7dfqnFn6RQEiiTFKSBvOcug44akOK18ZFtmf49cnjcz8yq16X+Xp0eAN5lBE5NjCLR+L/f/7PwX/GRKF/3mtBmtWEixrKe4y5pfiToJo2FPkmmPKdPeuWI5ubNCltlAcO/OgNeah4LISf4AECl4QV/jnHdJI7tgx/xLEwFKBhfk090Fr4qaoQFdFRAj19vk4b3HgtL+m0e1z//c/yTR1/E4L57YSm0PQG9txadoYFFXRkVX8hsFBaOc02Zu1HLXoFt7xQ80xALzvKozdYuXu/AGXKVt4HZDDSkRT8O8BoC3oJ0CxtHTXxWAb1A1vIRcWmdggsZS14JzE2mqUzmZ790GrccNWFYQLvgI2nFtYOaTxbkO9sYtq+fa3xv1PT24w75k/Yxx/miQDW3Mchpx7k6gjNO/zURD5zyLV4V5jyW0zeZHTrSUBWR3/300/ylNSF5zEnR3n4Hrn3Ih4dwtqYokZ/sIP8IeAdVkUOLu097GtqNTTxt948w4Rqr8Wm5pp1EQGtETPMwaeNt6gJFmgYx7XTbDuOxn6f7F+Mxr/vzMHe1YdLadlxJxRtjMJ0aXLA7IYuvquCurTUH1hy4MRxYblW7MW1806t1o8IQ+5imM7zTBMgaShSTFuZ2TrhJc9jvqN1YXNg5C7jtOhSXJrVJdYSIniWh4BkOzUn/sEGAl1EgzpKzsJGAE0i549Ee991rCPIHKF8dzwQWeztBIIyTlnNKIVjs6eAbcLXdwMamWsE2JyG5SKWDJJ0GWy1ehcdj8PpXs82GahME4HkSDfHC7e8Frd5ws4MxLHNS5rn4LQrjWwh6pSEaXEjSsZlC2Es9mJKRE/ZNArdgZDeNgHe44aFjHCI34IZC7qKmLi30KFv5DeeIAO8VKusMK/TSRCohGUswRtxIuctExnXGoYOFQB2W+LmN2Ldu1IRSJ0yfkNa3pqxffQ9xBGZ2Cd5upm8VT00ZiocjPdOeljUDsr099shAc7xPv/Yj6HhBIQZgsZ9jXinPp13sb6gzF+W9GbzU3AU3WdLU8/ZCBrwYVqgWBYNzgHd6fw+E/vZvg5MReNGLkH/z4+sJ5oeL1kUi4lG3GYFZl45GMfrwxTELEeLvXF87H++NeiPkWMoacvxVQE9z5IJgPbjJalBgOypmUwzCmOpc7SxiCU+zvY1t7IC3SCjV5PELocNW58MyAdVlEl+0yXNLzFfLjY6ofuV9X4Gv/tNnkaAPx9PGejKhhDnTsFJruDbxltou43EuX78JvrQ21691cM2BN2UO2Dflxr/Rtn1YBw3vXvhKA8EPQZHaqkXQpbOzmHtB5ZB2i5ntwKJyhr6zL4EkQ2CmnKMHJpgJHUX0TMKfBIgd2qmPKZSSnotzQcAWIxazc9tuurXFg/cbdIMDKWl3dijMDGaAl8LIjfYD4Qf2N1HxkakCS8hyDAg+Vabe6CDAW1+7HwVpqhe5HqXPm8yLceMCEDRBhzmf6+SwoxZKqfmltSv3NNQs87ZhxQK1qruQZGOmbtJwmzOEcM40MXtADTGTFr6krc9n/QxByW23syKWvud1Dk2iyWC4DOeSMQYC5IpoYQPgdUUso7hFTDl0Idt0twmuNkbBc4YV5nMCEPpigk18OKNYSN6sYp2tiRo+z80DOxjSIGCfgJkAiiNdCwPLvoI/KekqTCCNWZHiGH3m5Zi3KD3Go1g3OC9nhebAX47f5yamcKydT05CXAI+wX+GZeaONEz6gDf1j49XDqikL7TkCGnNa9adw2e5eiJkhmXI5tM3sUOgb+U+c2757E/prbfBVw+Wmz8qJMBr6NEw0oEvYzvklxmjxsa2ciiUjO04pjYFlnP8YIiB34f20ZoXmosnUcj3B3LjmNEPStZNz4KX2xjMct4xehXs/h4M+TdJa/sskR4up7R9oC9APjBjhnnVaw0vubC+1hy4YRxYbbW5Yc19E6l4sw6AtxmbAK8e905vB3zqp3K99RTbsQ8XfvgHoyfbvcVYUUV6SUr+s0xtLYoLcTFtrxDwxhUX878M/gRofNLwonTwlEPlkjNhwHIAcMvtXahmUkbNkgJ25xoEArP2Q8KoFEph4pXJBoYb9PByZvFKh0nwb2x3eAWeCP0Gr389PP9EX+HjTE4bTQvopTVLXmGJn44YKHuVhPXoWhsEmWdk1PB2AdwzCE/AO+WjW8pi6Nwv2YqBk63UxUxVGLQZLFDjeVv6Fu/l+1yg2adiKXDFQYFFaQM7U4SzkUNulvr5zvK7rSJk0Rlk4b4ZQAixJ1uasqUKMEtHYL9MV8UVjUVjIjAK4KsPApM/AAvS36oKbKQ5R6yBTezCL6NtJQ3NybLqMGljfw8BXvKSWXAojhHqoyNQnlydMMRrI95n9J15mcEBSFJmaXD736dWXL8+P0l1hATAU0tapz6nqFOdYeXQXYgvkHaT8fF503iFzUX2p5ydFgL6VwK8nLcsyicbrRz48jCfpihxIR4vjulcCZ0D750QXNoS4B0S8E4mhhtQA61pOOE3182QSxuDfG+HiDMst7F5NAc3nNNjAG+b5pLlvGkbg00X31/w6VjEUULrmDUH1hx4KDhgH4pKHml1WAJe9VmCWYse2gYgeNHiaLokELxyHJj8Hd4ck1+SyuHTXAlFdzECh8kDY2SQN19mmlZ+rdEJT8DY2JCqWG4qDFL+m26L/RnbA8DrdqXhtRipIjZiStRQkBmeQmN/36AeRpC8jMAxBjDGYNgDvPXr45nhJgkYHPNruti/fIY3jMcx+U6KKp0NSXU60jC61gTAq8iG/ZuwbwJECnfUBragFGcgK2OHBGoMLnwNC4KWBBbMdII7HhXbf+1BC4HoPiHNJWMk7MlPuh3r7mBBVvWznenPcy0faWgTz84qqHG1bTqGQJBuzyowly5OtQTpira6R6h1lB+anMlfXL5MUGRw53aNJ1yKc2x3N/VwUC38FER0C45lWYNPHlQzY1QPnXBlf7pHQhwt8cIYg+m1MUOAWQLw2vSiE9LP8PH31XETQ7m+/twdRWAUMwCdKzAsU1tx9s9ZgwxW/fwTpFw810d3rqtgVMiVN6QhsKA1rMuQc5qIeG6AQkSy3u6dLd7yqT6FgJZz1bBrlrydRS7jofZcGl4uKzDGIN/nx5HQ0SvFm16H3bBA6dI8UuIZptqKc6+fzXAMRxzTg7jo0+2jnhpjsLH/ID7xdd8dEkw1CO7aWnNgzYEbwwF7Y6p9eNdqh3Hx767swnHRC+ssBXj4YkDqel6EU5CPx9rsje7W4oujtLPuQhXKTU/R8OrbkMrUUChJGym/vtIgt2A75S5qKhenzsX039b2zIEGRI+hhYf3qfEUPWJCFKN9dBsb2N8zkKZV8UsqW+GsIeBt8Ro8VsXx2M/4JMI7nCrsBFiUedQUKCsPQ42wwouaknUqr9ukdKZndLWZjamEu74QUaQ8ftIiAzjxmNmxPSjkLGy08RDQUQHDTdKlm7y82L1q0U6a4M+WobDVsIX5xcgWDp5ac7e4HGcpoEpPE9r0lQkJ7JBwhiUAXnJeK5sATuHinFB4EePY0MYm/mhOTptYTB0imFCguvduvPW3Pxtv+aQ7FQxm9GAEnxiUKJaYRJrjruwwmsQ629T2QFR1Bs9hqyEzLJk8vZrq3KwPZzglNHUEU/8AABAASURBVP/SGpopro5SH3M59jt7Ma+VpYZ3lragp6o99jHEJLd3vly/n30/8+UjDYNBnHOMWvgaFnHs2zZOvsmbvwVG7/j0WfnP/vKZd+ax3GxzCszCy3ikLRXgVRnLKvN9rvC84RDGqKRsUMAQ8DoVVGABUxyzCwhHGvLOtkej0ZiSheLI7XuvwEe++odi6pzGP0au7TUH1hx4qDige/KhqusRU49JgmqyM0VYU/noC+MxGi6OQZNFTviesG24IktbwOjZlT/dNYs4xbNBLWJxsQo5OmqiTlr8BVCUSZrJLp3h1aeBFFdTsyh3UVOXluARuHgrtYss9JI73o12vOzeXgAi++nYhIBhMSbg5SM9nbnLig5nY/5FbQlHsgoPDiL4sdQkWzJYcSfRkBZSaU1r4YougGaFFzWVKmXmetvRBkZXpuwbJSxDGjdpeBwBEYNBi9+ZmK+h0DPGBB6FtGWsKo6lmUywte1DyWtXLfL4hQhalgLcso6smutYW0fAa4xh6uJXncB8ux+1tS3bvkjphoy3zSRk9VXBub5cvYU1sw2CaVt4AvxAjP7wyD0EgOrBB2A0cVJ49EDShHJjqQ1Cij7TKZyBbs1p+scTnhr5cF5YJft1K5yMeKFv/06vxDPoZgnAW85peEsB3qzhzfe/+prqwpxWVqAuJy3q1kOPCSo0O+Pji/DeDAkcY7u/F7zZYlTwDhbfa4f8sjTyHE40vlUQkyc/Bfv/zwHg5c0HTk/kX9542iXnai6vIw11G8dkf8dAczGnzbvhiA5vo76G1w7t7D7GAr9y+2BDn7MbjuFYu/kckdyW94W8Wps2xw/IG4wZxvs6BNbWmgNrDjzkHFgScjzk7XvTrNBq+Qcm+10EAdKkUKA2HtTk0gIQbXp4aUG2fcHHuMFGpEHvQld9U1xMm2sTHLf4xzgf3pFp2R5P8B0Ip7aWBAMhvKCVz1PWm00AEX914T1mJc14nB4Xxk93CXQ6Cged69unhrcedEH2CfDMCi3gUX7Jkm2CwD9+wseEEgXrao4ROiGRlt/ZoQ2MpgU1vIjjEWIWs8rChYzVVrxVJjsU6EZR7Fs40uDZ15gGAqjWJO0hG7qiLIcvS1UAMxXgDV7sXbPEtf1Zg/CWuOP46fyrZ2Ud6+4IuBmFZX42vUzjk/bxcC0nU2rYf5e0sr4q4RIbTi5xOKVkgc4WIdIQvHsCwhAQ8urdD/UD6dvUIREY8ymGvJZaOtGQfxFTkTFl7Ykr45h2OkahulT4wQdl88Y83PvOG1jO1uleA/3yC37yn2UKbvD6eRz7OM73Xa43u8zo515ai61kwhKXPgQwIeBt98ZLlIpZ2/T1imOUmTHDGbazFi35lbN1PeSs+eE5R3Oah4FlBy3HJMct5ZK3gzYC9unYQmvrieVf8XK89XO/A8UD98+yFEMHrSeziDM8g5KNnctjqcjInyXsJ7WcQrw4b4DN8eVZkq2PHouYJa49aw6sOfAG54B9g9fwMKng2s4eHrxybaHemKShm+y1BAHUZRBgQoCXwMwmqOs7LYmRXNAIKE8M4vvxrzAgKEzBhZzq1jrmo4Y3gtsYzLbichWqj7I3JBlqauWpi6MLuuJPMrWL+QV6HvM4j7uubc+ymtE+Cgo31ScBv/n7v4uLf/c38HUNJqGmFkqZSwIeuYsaZy1aAoTtix5/efEZoVgx2WfcAS9DZM9S+xScNBZFFYE2lviVFMiG+at0hvfjfvITcfH3fyeM4jVq63Q0JffDNy3aBOA0vAXby6LLX/pfpCxluEmgEz7ntHNVrVDowEjgOvJZjTGsq/WcXcZCUQe5FvAlwNtOWpLyp/KzT01zqkgaPVQOoS39DGf4Sws8613+HLt2CxDDMsjlGEMmlbfUdCdvcMbXmuCC2tZBQSIxdKZdOAN9pWF/dqShY700Knn5suwjpmE7jDFoMuDdLo/kOSnCbae3M1MGbWp1P4Qg6R5yGTCTMe2Dq1jy/lDJwbDDCAP4/ah5V9yxJtfPxPaW+M9cclQ+csSkpS7xt8tEWNL3kLPXnCYfGR0uDwMFHe+vELGstb2Fuoka3nZquNdM43gMHfvSl+GpP/QdcJfTpoZ57EaJcom5MywcppucpyybLx1puIvr7f96+YEWV2mt5jI9lh3cnh6k2d6XHpi8vtYcWHPgIebA4tLiIW4Y3kjq29sf4Qu/+nvwrh/2+XjPj/xCfOLnfyPue+DKqa0zCQy21JhZw6xaACm0p3RNEuq+JxgYjQxumBsdLIYJFCq8iKku1jHb7ggS0jFwYDcE2zmkBdmn86CmMDBMKOnSWfjaql3IK23x7Y/qcNeVA+Fu9/dRsOOeOUYEgTf9+q9i8yV/HzSXxP2oE5gvKRCYZeGrJGBR5gsXgSvTWF/FR7TTHi+V3jdN0pq1cNALS2xWP/lM/4CCTpncxsGtYqiJtOTaA/tTAkRg4KyyIDyGJ+BUoKUlQEdn6StrXHWkQYUF8Pd3DXzepSiSxhAQx/6Q0+Sl+qgzvJZ+Ji9+JcDr9ndDGWmogucMS5r7YpqAVV2gyHw4o1xOrgsbAGjH+W6oLcNkivDTPcK+BT8t2/MziOmVeKTBpe8HK24RU7J9Ve259zQheye6qiuEkjUXbnhzip3dbgSjZfqiRcp9qlP1AJ8yFpz8oyYBszxn+/XxaYXyyXRVHTbL8i9jVOUYNcxI64A/uag6lVLbW24JPm2Y5Nk+jOsUtZBxpNmfO4c0vEVxhIZzfqU+ipC59VZc3L1HXkzHBvn9hBAxZ/XX2pxUblqUcRpgkR+7hq6sDmXVBkYvrd63N4HW1JyY/cYYbDcHINss8Um7TGvtrjmw5sD140CS1NeP4MON0k//0m/hH172GvzOL3w3/vhXfpALtMX3/Ogvnt7N9Eh6vNvBEQB55ZZwpUvYQhsEL11wZQk0GqlDFaCZooQ+bUXvwld3kSiQuTtqoiij6Tt8SXCHdjB6ygxtZ+gD0lNMgrYIYEPkgpYxBg2J3nqbx2se6Gl4J2MUlj1lmrSgbidqxpsiHg7MZ3gFQBasKmRzbLIE6uaWxwOTKJUdQQK7E9KPs4yAFBPEU2n3nIgwvOiVNwKzl6tUkGDFsH871PDSwQa1m4r2BDP6Hq38HfMsCwBVLhhqwuUW99wtB3pgMKFQN705ogTVLSO/jPrYGQsOi4ILG6vn4MxtRuMwv/eo6WXwzEvzyLKfIaOONHB8gn9Bq3IOeqwtoA4Cv9mXSp75TCD/C1zSsgSKdGbXZKcJfrtdB3dRqybgdaXH7jgCl1DfHE8xF9Z9Y8nQ6b62MECxHcsuUmedPhGW80ojvz9tIjiaqyfk4aY4uLT0aTrb4yejFrq0buyDj87H01O1nrmf00s3Y+f9PijQbjoLYTqL1X66n/v3YrdxcO7VVxwr8jFTbuFgWNGSt2MuDtz5KAzG10J4MrLo1xsie1bLudULBq/dLPlQwgX/olbbO6KhMlzhwoZXQ6l5ojgZHVFTgjUeW81VRQVz3ItvIWFtrTmw5sBDwgEuOQ9JPW+ylfyP3/lTfOyHPQO333oJ21sb+JSPfX+84IW/T3lBNHdCr0wRF9Jm3BFAxEx6IcdzFTRpZfYxOthBI9DT3kkY9GRFyHOW5UySjtRIB3pzBQ4W5PTIehoFuE+al9ql8nPlTgtaSuSWgEefzvqHB+/E5X/+6WjuuBNWGt5E74HdCcqrcdFvXRXIDTYj2BcoDhELWhX5qiMEFy54XB5HDW852sdUqPskGmyfkjpqEYvKw0nKKmJBI5CkrG3ik/yGoEU3jgTbpUHsU4hvG3Q2arJab2ZjjyV/Uwrz3Tsfgzu+9itCSWn7BXg5fUI4W6btQn+MBDrb9xVP/1V89GN/F3bJPloO/TV7AcV4l/TAanyu4lRXc6rbb2KeQcH+klAMLWSXBKD6ckaDAkbzP20KMRodKm/J135EuxPTlwGfKl85A53hnTQaPaAj/3gjK+kU48lP3jOjachTp5cXQ+AMq2L/+lkqbr50+z9AjeCs3jQ/Q74e4PXcDLC5IXoZa8AnQyMMYLl5mbQnj6M2K6J796d9Fvae/q7yQk2pht3S4xgK0yrZYN0T9Ibr8qd/Fh74vC8Kfs8N0fxOLGh487oVci1umTtuD5kfi9eAD1zQkrch4hjLi+lz8cV2gYLtnYs+NdgKtPdy6P7jzcKh5PwQ6k1pui88/ZZ9u9T2zg1vDBm7vtYcWHPgnBxYuXhc+Vcu/vAv+MrX3I3HP+aOWUcf9+i40F7diS9M7I0azBsk0DPZpZDUysfFsBtPKFAYkGAntZbC9qDcFP3Hth0syqo7Qlf5Oy7eY2rg5O8brq0YmSFBy4hPhrsjZXfYTpVlcehRtJ+BxE5rNqiMOFKmT/84v2Ol7AZuvrXFa65cwMu/6Xtw7X3eD4aAV/LH07prZ4LyWjwCMjEl9CvKWKeA6nF0T4qzHqGtG9Tw3r8bAa9jXeOmPbbtV3YnwLRRlWjhUFK7J+F3HH19QWNKDe18midoYLWY9EBky0fvnmM6ZdrFqmAVXTC0AuAVnY7owRL0ztNbJDy58068/AM+IjyWFq2C7R6PgD6YCJ2ipA9aJrZF2t/JlOCsAJoT+HFS3YbExnYIN9rjPDDUgB6d08eVnXLwp9c4x1kedYGOk2s+34Qbq5Z8mo9X2HN+WD7W9uQTSKtLgM/vxqMVIhvMlONIj3ghM06f3JKWTnQWNZ5jojnQ8f4iOUx4T+4l8KywzJTtzfSu7U3ZJ4DNRP7qQbFVHjvXcpm+27Hf440LIhuMYz9a8uh+zssxNbCKHBNI5zLgXFacjJfqG+bEuvbHLbz3R9KlhByjhuWTj2sE1pl2dkVbZjRp5FDLacFmBn/bGlQ1t+UeR+jm8qe5llTUJo1RNm3i9ZTutGOG/sWJ13DcT6N5Ulpz862B0u24B5y2aNiJk/KOU19DgWTZDQfD9hxXRln2uV7Op7XpyYvSZQzX8innlO7Lq5wrOX8eG7+7h89sflhZg+n45C/nmXdDhrW15sCaA29QDmiNWqmCKYHES17+WvzdP74S+6PJSjTe2Atp8dYZ3kE646j21tJU0LO3RwQil4Jnb85IO8MkNKMWEnBo6VK4UtYBwaLDhXJWbsKVl2GVkWlQYJNazFl6j76KjwR4e3HKJyQ4NgNUzQhjAbe59B0u4CqrPk0pHDzzqK6utBSrFHBz+UXzLGNJoGG7b76tow949asNGj4eN9TQqY6L1H7u8bF/sbsT0qe2Cm5FLZQ8Apl7S9SrPqrccKvF/aMteYM2WZqs4+kQWCftYEOeFnXHMgQRx9Sp9uqN6+PoGJaapE0MvRDgvXNzgJvyx/bJT5UHeeGthfzEDCvzFQSA4qM+uyZaeslvPDJQ69H7eYIFYh6oLs/Nx3RqAqjX0B7Xj5PiJLD33QaKyT4sO7t7DH/my16hZlcAt6WrJpkBNzPkw3y+MVFOw4k3H68wcTmkdfdUf7hCAAAQAElEQVSBU4D6E2jddZecmTFJ89sQQIgfUwJGJfqyhugcMSe0v+vIH07BEQYqjik3LvucnyGQrEaAN5XXkRXxRnzXvawsmze7hevU/G76WkGOl2FfxbtJqldgTO0vPv5ZuPPnf0pVBNMSHIFIW2nHmX2uARr7+bR64LGPIdxkxI0LN4KpLzlfIE6LQ0UbaAisG7ZJgal3qFj+OLq5/Gmu9RxDALofwzixEvGOUZi4EpwK8gYz4hiUHIuGG+/TaJ6UNr3ltkAnAN6xIeDlJn+ur7msZFXI3LNKanhz+ryrbPNxCreDuMlWuozh+GzwyQqnN3ZH3CSm+sf3P4inPvc7MPzLP1e2mWnKwYlzZ5Zp7VlzYM2BNxgHhFkWJv7au+7Ds7/zP+Fj/+XX4R3e/7PwkZ/x1cH/Th/02fiQf/4V+Lff+EP4m79/+cL03tgzGmOwMRxgTMGY25r9G+mN243aYd6U1Pop/3TUQY/5DAOW2jiBCRvgCahd8LNyOidquiwaQGBjoU8ozdNVWDQG1dE6q8JiRC1d2Y5AzDWjrTIyenzH7sAYE9JBYQT+jDMMs5/H9EPlTjOFAyQcH/1oSjoAl++zMFQx2fEo9HuoR7qsr7x2lamgcB0EVwBOntNoH5tWWRXD1rbHA/tR+FTjPbbBH+mvylelQT5j2sEGcCU+KW3eaJxOSrNkutdb5oi/smuwRX7pv3+pXDZG4JqPohVWzs1jxmm+3uPCNTchrQ7ukohocQoSnGHWF0aHq6CGaaMsQKgJxza2jYErPGpnj+XHcXUpruJAjosN1NNdKmotNDUUf5qpCgNWSS101PDaQYHj5qX6UjDjcbTUTuKFcEcYTiTdIzjmZxPgrXhniB9+dxxyeWrcjqN7UtzFIXlFDWYoTEuzaYP9oHd2kfUz3okvbDrUfkONqTKZzY1Z+kn19OM7tlHlZFw7DeNkWXEldMjImq7yO96HDM4ubx3voZPHcVg53ss40pbtbWCMmpuXMSxpinbf5AoMN+HyO1pOnaTridwEeAfkSb/Mov6BmEc6HdcyjZOM4dxiFIq6QkpWcGZ0/n1R+v18w8c8KtB4L7wIN73y78kLc4QXOb8jMA2Ze1ZxcXBifmXLZftul+5JpctY8vDWYcm6gbI4GKt65zLe5nnfiYt/9WJlm5mNmy+eWucs49qz5sB15MCa1AEH7IH3ZN+Uqpj/+LO/hg/4hC/D7/3RX+F93/Md8ePf9RX4tZ96Dn7jZ78d//n7vxqf/NHvj5e96vX4+M95Nr71+38a+ozXyRTfdFKe8Ng78KrXxpeH1OpXvy6+GXxha0NBbAyKI6asypAGanW1EIILruHjWmMMQUs8O2sYl8uWpYNtG+SfJbArHBfQY2hbCqeawi6XzW5F4TalBrUKgJeL/1zZggsyqw+Ls6XHSA3ICi2Fm8XR/JnuaW5Fmt4Aj3o0CfG6/14HQ3Rm9/eCALi0UYIPSFGnIw2NiXwZ6Jwg23Aa7ePStgax/AYB7x4i/ys+zxRHj8s/ICh1BIVsGqGShZRm4tNxecUfjdVxaY5tbS07ivgT0FTeeeM6gj8K+MvpcfWlzerI3DiO/nzcsHTIb4TXo13oRSSd4bWx+pktQR4EcmEIpCx0dMA6YHPglqq3ZvlJMUDd7KHimOo/As63aT5cVQ7GkCfUaoE/ozrr4ki9FfsiMDdfXuGa81wAvfMWxntI40pSRy43nYS40niI535EPjPGVNWR+kT3JHNxo0ZZeRjOShYP9+KQmyL5s2H3ZzRrts8Yg9IZII1pzfE9if5x8X5QZdIQoBd9Yww3JaTJlNoi1OdSmxgVLu8K1GUR0o6jO+TcNubofbu1YSLgnY4i4O2vAzhYY3yoBcxjUah/ANrOQBpi8fi4Os+K22JdJMN7LY6T6DiuY4or6jKMnfwyFeKYDlnmLLrHpQ+f8BiRwVfhm/E+L3wOrAU0J4/L60LOw5ZlgePyKk45j2uXr2olzYzh2lJpjnBGlUTzKivjOC4h0078bFrw07p4ynrA5PW15sCaA29gDnCZOLuGr/u2H8cP/qf/imd/2Wfgf/7sd+DzP+0j8S5Pe+twtvUxd96Kp73tUwh43w+/8CPPxg8950vxG7/3Z/jkL/imswm/CeT4wPd5On7+v/8u7rnvMnZ29/GTv/A/8dEf8t4wxpzYeltHITfZ94i5aE+TkE6lfK98R80KqBVJSdC/ls2CIsed5UooBw1vMwaxw5HsOmumyDzgHR8lKuwJdiSE5V/WlMZBj3xvu8OHovfcZdDpu0ipr6WzeOyFIYqk4R3bQchXUMvW636IW8SSAFW+4abHLjblRUFtcvAcYwU+JL42KOAKDzbpmJynR1k2tuHj81kuanZm/p7HML5zJfQf5jjiuJMCrpe8sLd2Do1QLksYbppqytnxyBwZV4FEZokXBfiYj3Z13tep8hi7gA0CHotRuYlqyo0KC+vYyz4f7Z9WWMcUNOr5n1WYQYnSLlex5ixxHaGeYd9Ijfw7ts6k4SWiD8ntXrqXBoMQXtSSVl788emu9JobMicQaMMEAjXZBm0C9nbJPnb1QRsNFQecStCGgh2OtaY6oIQYE+yucHAE+CGwhLW1bbCPIdx0DB0rOFQ016VIgjU5qlfaZPmb1qLkvVlYi1V+gzJCy6k/KJ2Pd/mygu/RbVxcIwtzkHcpX1lishnPR7dtLKm1KPoO290xY2zM8hV7buYPUSY/HSKdtJyG5C7zNh17U2TLeVCvutCKwNqsObDmwLk5sNDKtsXHeL/4o88OXytwZyCG93qXt8cv/fg34S2e/LhzN+6NgcAnfdT74UlPeDT+6cd+Cd7lQz8POg/2hZ/50ac2zVFYKYOEpDFaECkBKLS5PsKkxTefV1Q+4d0cr7ApLYUsyyiwoCkpTCZugLIbQ2Bkvphe6nBsi0kLtNdza2WixHFLCnEVk6ECSg42t4CiAC4/SOCSQIhJ/wr2luZAyzFBHfLXdQc2JfiXsQZJGzfYbjEDvNQmg5Bp2nZHSKmLma8tXBDmpbNH8p0V4QxroMn5/LTJ3kOuoaAzhUMGSoOKTDmUY7EASaBN2iT9E4+aIESAdwaSEpnt//t/oi9MLGA6BqTBdNbG+AXtgvmn5Qbqdg/OxLJTTcpTyuc51o0i2rDU8ErLdUqRI0mWk0CbEA+D2hoYAsIjmRhhkobXpDZ1e+wo4712AnSXuQR4c/5O9aX7McehF9aU8kywatt4jF2zRf4Yxix+tel+UAnDpziB1yKc68muxlCZkul4QxXc+KTgwo5A8thyHeDGd6J6+iUzMmScyWnWAhwH8NdSw1sNurABYnDpqypIi6WaXj1I88lvbCD7wZ9uQ3HSqn6GV7n2b4rneMF5IfZ1dI+j0/Xbwwz7ZgjVTe9SV/94igqKh7n5TXuw/rTp6Rn29pUtGN3Pw9IF/9p6I+bAumkPaw7EFeqMLn7VF31y0OaekW2WfHF7E9/+7z9vFn5T9mxuDPDcb/3X+MP//gP4vRd8D/7L874O+kTZaX0qqqi98NSSaWE1EmqNtFL+APBqhU5E9GKb6S/KlaNgtSl1MccRlQnw1u0+jlv4BVAsBTeMWkSaaYE2BJGFW64ulg5X7VzAYOqKFJJjaiE7eZhq90e0AXvlSnAnb/GWuOuWtwr+eoN8gAn+ZayBdaHUxlaHa9gORd14FNqg/oWIntWR75mvLRykvbK5/718Z3mdNWhJK+cbp+/A5rBcNgOFHhkTrWZNU8kxUdqypiZwyG+EGxIWSxPmO0TqKd/+DTGstlnLzZgJfUwYI6YtYBfs36QeYtjtcqMFbh+ARruFU8o2af749PKV2yjYf3NKiaNJjmORAWjF9hsCwqO5AJM3GNxQgD9pvenADOIGSv5FTT04aKPXPSfe9QtrMqdw3rhYzbrJFI0pyR+fUhdz+hpeDhDUZy4LZHKik+vPbiLbFSXzpsASTsF7sikqFO0YR+6JXt8OkeQ4KNy2JhxpsGEGKGY5Iw2vuJvxnkpf/vhPxqte8EJ5DxlbxGBho7uKPU7/MKNLczU5gdQ9O2P8w707wd/NAeGWM9VyzofEJSxfDw7nblvOB3HLo+3V0TI+ZExroPwCvHLXZs2BNQduHAcWWm5e9dp7+Cj/N8IXGdok6G5ck29MzQLxt958caHKHUGPMgoAjdImX0LbMzI/hvb+QCMggGSSMGcWmNJhWUFQEQwK8FbdCOPe4it6MhJ+1gDGqBUgokn1O7PSY37wlzU6I0rwwcDjhb9c4Id/ND5mlGaSWeDScYZ7vurZ+P13+gJFoRx6CorgXcqSQFWB4XYnJ5gifcqpL+xCAq2QK/G5g0XQfpIHWPIXhaPBtNoMJfM/PgiBZKkZGm9Q7Z0F7GDZQUy0tJHIAtJSsygl4YibCfioTU3ZZo4hkNERmckEKEqaBGBmGc7wFJwYDQHvwO8TZEUGTecA2DwJPcJltciA1w4dCs6l+XynhbWR4JNpwisDTUtzIuDVZhEIG0cQK46ihhcraHjLKswKNNUQ/jgNL+nnK4+j2OnJXJ2RL5zNyQu5fS204aZXtILmNYOiE/jsCVydXa4uNSiMpRugpIb3yFOPXl1Gg6cCalCqp+0MKt7Hy/ZRZGS2OPflhiMb8tA0j3s89t79vejjpbro6LIacHrUXjorXZNbbw3ldFt4+vR1Gjrhuntngr+7JwJePzevNI5LTtVA89Wf/6X43a/49uCXZTiGvHXkPbS5CPOKsWZvj3a82hXmaiy5ttccWHPgenHALkLoyrXd8CKavs7wrh/2+fjSr/9B/Pyv/C5e/qrXL1L8EZenqKL6wqHFeI8s5mps0yN+LZJiSJY38guMZmGusCkMiiXVdIUDJnYAvbQ2okYsCFURS0bCz4LAgkZRGV+rrtKwsCKXNFUS/hMu/FJ+3P16g797yUagIs2kPDYB3u7CReztGkVRqLZwWVKEmMWsQWVDxoqPXeUZDy/BEWmSvZi2EcgoPhvhfts0IdigCIC3dJFGiFzQKtjsjmDB8zGzikx2jwJP9c1xvLVZ6RKPbU/Aq9yiRtOnmx1pGEGa6b1dYAZSMPfTZGJderFNGtMyAZi5XCcGnTUEgANsYA9jbtBEbnrcDqJHQS+yglDV709CrB0UqIo470PEApZlRTrS0HEz0k09rADoMeVMOLbChDSWSIDXaCfA6GWuxFZ43l8aU3BcD5XvhQXc2ESQtcCkQWOL2YbgUJlTAn0Nrza9AlpTzVURVrl5V3E0nSvIFU/fcpfm6rSssdlcxVQ3QK/4A71vDpsEAj3HXpslZZu0Lsy1Uo1UxJJmqInLMnFu0HPKxf15SLWBucG7tDW+NR5pMFx/xMZdjlEm0nIcM/e6ubncmoJjanLWhd3xk5+MB/7JOxzkT5tpZy0Bb4ff/Md78cevehBtijc9DW9XVQflHja+dUfWHHjT4oBdpLlv91Zvhj994Q/hR7/9y/GpH/cBqfFaMAAAEABJREFUeP099+Prv/35+LBP/Xd4r3/2hfia5/wY/vtv/CHjH1iE3MM+T5ketUrjNx4dsNhRyIbn7+RAH+BGDW/H2HhZAjuBkBhazK4Li2lRo/ZjSM5dHUWgl0u3lAiimZd5kwQeKCFLmpxvGbdOglHaPv03MJXdI2ySawlE5bqr8UhDt7WFEcGU4qw1sCsIusqSlyw32I6Ac1xuEvBGLUqrTuPwz7PPmd8dbBDmLrUZS/wK5+CZ30t9Sneyd5i3jGLfDDTetiygtqzSP9GRGbC+JoE5MxphOFQs2IPonmRTgYiq9iisOSnLsfGVs2hYiQDvdEIeM1cjUEb3pEvsFk8waUMWt0nAuyRvXWFRVvqOh4EnQZPnJE74aTyZZCZJ43thm6Hlror1qURHxOW5MQSBkcIzk+pQuGOb5DqQnwTbnS1QkFeKW9R4nUdJmQ1pOK4B2uCCIC1EZzcEDqyOm4dVNmcqI8ArSpP+h28ZkftDLw76zb6FCKDtDMJXGnSfpbhlHC5bAO/POXyJ435kf4jWmebgWcGapH8+AS6dmkW7aS6KVBi6MEEBP89jttHRKN8ypiwcULlZEd2bChiuDi3bIHY37LxPZzryUy7l6fJOS4G1WXNgzYEbwgG7aK06y/pu7/Q20AtbP/ODXzsDwB//Ee+Lf3zZa/CV3/zDeL9nfSmkDV6U5sM1X5E+S1agocbMwyR1qpVsSbt/ULAGwUcmaHHuA2BTOmpAmbDEVVJI6dNSOtIgoDcPeKfEJZb0KG9pA12rEGAKC6t2YflfVUYa6keVjrftI6IzaXhF0V6N3+BtL1LDu2dARS/FA3mixBWMaqw3KF1YdlRswCUULR4y6tClOJOEXYMC+i9bxQqCLmIcijWCWVXQ7JGZ8vSM/reGk4ZXAjGPcS99Ga+ObrRSmbPQ8C9fjFvGr6UPBNXqffDOrI0X/S7szjWACCJreJ0zs/RFPOLJdLiJC7iKKTdoniOkMT2t7JRA0cKgTWd4zXAFwMuJx2aHajpvYNNY4YSfyXwdxyMNboWxLKtIvDOWt2AHWjHiGFsbF0WrmmK6j7Hm23KshU+bX9EBAb1jn1kptMkNcVwHgkt+BjdZnhremDdFLOiUpN+W8WZs0qYzF22P468651zIomVKGyZtnkPECpYjfwT+jivq0yZOaapWbqkC8qxgXvnRn4R/cG+FliCzdBY7PQ2vwL0nTY3hjNcM69Jmh2ySdymj+0TjMl/IcS41HL+GHZ+yLbxCFsvNavDQ6mv6GVxfaw6sOXADOGBXqVOPrPTN3b9/2av5CPuVs382oW/WFmnxXIXuw6VMVUWp6giAJuMDFgdwxoUx91OLsfwt4wyFofwytrZLa5JKCo5pMcSg20dJ8PvXd13FL/+fu3D3TgQHEwo7SyljCFKgH8NyLNUyipd/WSNgZlio6Tyyhnc/Ad682Nv00lpLpCtsOtwQnDKwViWx9M+xnCFLNzaBsRmifv1rUd/1egiAzRObUgBRGoboFg7liucTC2sDQDFFEWhN038XC4Fk7e8bQuoGtirQkceWvE5JSzsbBM35DO/t3/A1eNpL/mug0Sbtawgk6/Ef9+GoXvZSgPVNJyZoTAtrsMyvcEC3EUHSWGeFWXj+SAyjDl0NpTqr5CDEIw3mZg7IoRxnB3RsR0caNDaeOzI7ji86nliSGlIdi6z4FEN5CseJIM8Spq67kNtzEnW9+REiZfFelCOjpyIg+Lfkp/6D4bQYwIVOK3VB0wN59Uv+ETf94e+RItBKuywSqT5puBXMpuNcc8sNYygqnuS50/a+EqBEn+qS/5BJfepgg4ZXGv9D6UsEHGlpPTiriHM+ZHF2+TEMBWlNHvUYXLMXycwOJevVfwhkdLg4Pel6tFybfFrrGBEuAd4Vpg7XZM6GY9rLqrn+eKjfWoe6/ASiN5+7ug51r601B9YcuHEcWGi1abhg/PXfvQzP/y//A5/3ld+Fd3i/f4FP+Nxn4+f+2+/g0Xfcgu/8+i8IXzB44X9+DqQJvnHdeeOouazL0BBpeNsJ0UQIASVVKKYndLLmgWsytcBRAChrR8DsKGTlX9QMiFomRY3aj1CkstJy5Bc59JRNNE0iOHvsVpjlhTjib9PZ4GmoeUsKSYwQgVN+3OfykYabbsberoGe8EoDnYqG8stY6psEi/7b2p7ZxOYf/S884YUvCIJtno4AS+Z3C4eq9OQNlv6VsZsggVC22W+D27d0zFTjbWsCXmrtbBqDfp5F/ZYSNIMWlRn6eGzjuHqVno3ka1EC5ZLMrSwB/cYwkJnefZk8MphGxBDijrOU7CzLpe/TZg3/cXlPilMzdebYw0Bz9aR8OV5jee2KQYUJRtUWOHVz0sJuVduQtzUF9NWEIxreQ/cnAQ5zC8TVzR7G5XBp3qIHeM3eLgavf00gOutvqs+znv6lM7zOxrb248/yl86greOG22uH2SswA9mMM1zT6cATWMuV6Qh4B5sd+5hXCcUuZ1R/x/XgrFLcb4QsFdsbPCtYjmWN9eCyCm2+r6WnDSLV8R4MLrcXM14rgsbzsYLlnKN3qUtrrMZlvhBvV0y0wLKuCW+Mlq7yOC0K8tB0dU17fa05sObAjeSAXaTy/+9vX4ZP+LxvwLc992dhrQmfHPudX/huCOB+9Rd/CvTPGRb9gsEi9b3J50lCxFHDKxCS+2PbKSjZQ9BQoyugq0AAZlq1FaAZb12g0FloaJg7XhIcDR9lCvA6jpFntOjHhRgEMB0sV2ZeTOHFhZk2yoEJY4oVfkNqM1WsJa0v/soxPvWzp9jDhqJg9+OBXUvA221thzg9id7c8kEc2BUEjojEvnlsX/CzusprV9FIW6cMPdOwXSYJ9paAt6TMWRYMipxAtlwOSnDanmANEbT29wxraCGNuYRtfgsdK/76j8IHiJpPndE9llwS7korqcEsZoOsmLNNWTh0WxHwtlf3OU8sNCdPKzklv0Mf0z8ZSad4TityJK10FgUBOvQLgEGeUwwnNDEjxI+Gm7vZuJxSZD6pppZfcdLwBo0ntcYKH2ekHczxdbuHaT2Es0uCwQR4r+JCIFVMJmH+t2le5vUAITZkCZa+0rBK/6rCIoMryxsu3/8i2psmh+4+8ULpMhdvaVGThvyrGGdZ/yHqx1Nx7vj4ZWKJd6Fz1YZrqdY/aVfzBl9jp/4Gk3mdiHuOoVPhFF7UKdk37+yR7IoZcf6qLt0XPmnv3SSugZN6E55KjCMF1xFrDqw58JByQPfqmRU+6vab8aHPfFfcfGkbv/uHf4Xv+/EX4Ed/+lfwG7/3Ytx7/+Uzyz/iMri4mv8T/G+Ur3wltbddYIEjSDgQcEAGFVqcTW9R3t+6Ccuux3VF/aKrsI1rQShr8RW0HLNO4j5IoHKdlxNEq2c8I+EJjiy8vEubuojTZ8Luvf3TOnzMJ04xwiDQMUm75HauobsQhf3ujqHCi61i45yNZUPmJSzxRUoxYehdH8F1ee0KYh8PE5Im2HRNiLRJTVutUK8AoYi89LufhyvmIppdblwU0TN61M4RgC1dUBoWZrX+ZZJeqvAUqAm25G3Gx4MtK8Dv45xzVFwWYpIKLGiqwsCnIw3Ty3sEvMAkzY+TSOS52yXwb2BOynpifMEJqSMNyuDTY2D5s2mHEYTnsCGwuXKFcwgjtGXJ/cfyPM64o4OF4T2ney/TDy7nZnBpqY+W98dkDAyxzzoHvLeYsMRlkua8QRFKFem+6DJ/2QYl5PtRfhlpXt2S46hyFeddlzS8ArxTbhIUL9P1wL0hLxUXTA/EbV3qcJ5/kFBwGs5rVEMd85bxMObkOTOf/bhwYTmGJNHx5q9dvO/yOV5tOlVGANQkHissIw2vI5/kX8aUlvcJtcPzZdSNvWl+6uMxTV8bcWmsX/mW74prT337+WLr8JoDaw48xBywi9T3qDtuwf/7tZ+L3/+l78Wv/MS34F984oeGl9O+8bv+E97nY74EH/LPvwL/4Xt+Er/+u39KTVu+8Reh/DDNU0Th9jX4Jjz1N5+H/LLN4OqDcLvx25BCRVnYerLBCMXR1TW5dBHFksJuo7Dh01Iq77gCa8GX7J5SGDQUrqpD8YaLtvIYxsv1FBrumEVcaYsYkfPpEaaOGewTGqhc/wzvTMNLRaXO3gIGlo8iscJP5wvFt4uXPHa7YaBQkqcCtyHQsxTnxATG2SpO9bIn3Bm90JVB8rW3fxp0bribRBDdLzweGQjwutqF7QOHoJ+8vD9pBlVQ57LltlNKd3mOMRziEDvY6GCXrLwqOF+340alu0rAS0pNmh/0HntNWaEzFpZocAdbsPbkth1LgJECVjrSwBbDTCeMOXz5+XnJe0SadB1p6MpqJcCrJwOqpTMO4FOVLmmoFRdMDxwJK6pbevdoE7uYchNS2jiPQt4FLJvGcYw65LbjkW59dAkUgX1SQtA2y5NMRwBnVXkKL+pUpYUfHtQ1TfRVvuOYyZUx6lzwEMSxLnlbOFy8uV16/qhsNrpXtPbk8EmuqmTNJyUvFO/IH8Ph8OxLzU0bHdy/Owll5ZdnKk9aAxSW8axcZeVfxgzIW8zNScOdbt7cehJTVZnPRdLwvuALn4uXftGXM3V9rTmw5sCN5ACXi8WrN8bgzR7/KHzMh743nvPVnxMA8A89599ge3MDP/1LvxW+z7vb+//hi1NeNucbeX4BiNREvz8hvItgYPul/4jiNa9OKZS3Wh0ZEriQIKQ3XJMLN6HkYh4CS1iNntkzf3X5Qdzyp3+A+v57MaJgFfATCpMAtWyN/F0CNAIV1rLQildBAaAjDSq+semRAW/+SoOj9rG9cFHJ2NMZXj5S9uy3M6tVWliKSbJTRyMut1uBbrmzA4HgEOhZirNZyBcEOExzK1RbOlbIshKeeoTajFqGDl97uyBcIFioCgIaj/PwVJT9dtSKy1823CnQMzlBw8sktK2Rg6qW2A3ehS3tBfxm3Dw0V0dw5PGEj2hPI8BpxT4adHx825iSIOm03CenSSPdgYNCOkdyUYt7KI7g7fG/9CP4YPwa2rqG5sKh9AUC6puydRwtgb5wbyjiGKP5Y3i/7OrJhLTK9QAuzYVjsh8bZeMOD/m+cONxyNe1aQ7xXggRc1ZXlJC2di76zGDpLNoEst1kTG1jNyszq5MxeRPODgG8hxnFZcHg1kc38q5sqoKAe4HShkMO8hbn+GkotDEAQW1dOFSMeHA/Pn1pEl+1toLzBr2fJ48cNcy9qIW8pXPoiriOzApws295v5B5syguAMFfjq4Ft93ehqXsDIG1tebAmgM3jANh2Vmm9rvufQAv/K0/wbO/8z/hmc/6UnzuV3xH+ErDk5/waHz6sz4Ig/Q4bRmaD7u8XBhnfRqNqeH1Idh/icEQiHGdDvHzGpHRLbdy8Z5bWEPO060ufQLp0qtegff5rI/D4//kRRBwyULdcdG1BmFtNlngMsIx7nTKJ6eqq0l0Q7h2hKgpNPkM79cjEV4AABAASURBVJUr6C4mwLtnMNyI9Tu25WSqJ6eUEqgUZtLUXZ1uhowS7AInIdCzGgo6S6MoV8VO6sUThZcxGfCqDr204o/R8I7SVxqk4dW4lnbpW+tQkyiT8def86Vob7oZVbsf0vw4zqMQmLNSNzEgf+eSFgqai8OY7+ouHOeEwH2MON6Oj/tBDe8EArzOHJ/vrFjiupjlGMDbB/3KZPhIfvNVL8HtuAd6gbBkOxW/jNGoUDmMDjYcaRBgP1SecyuHWzJV3ZJWeZMa3nY4RL3kuNp0T05QBbJxY+vRapIoJt2H0lIqmI3njaVxyOFT3V7ioLDAINXFtac/jh3XnJzVsG/Zj3QvSts+GLL8LGF5j0CnSk20I5LnBGMIOM0K49cnV7A8L3Rp815wbKbJrzjPzOr/DNwzHC7mK8jf4F/CqjnJpSDoFzEcP7VBdSlervF5RVQMYIoOLvEY69+aA2sO3DAOLLS63XPfZXzDd/0EPvATvxzP/LgvxZd/43Pxm7//YrzXu7w9vu1rPw+/+4vfjf/2n74ZX/75n4B6lbdXblj330AV9xZTM5nMKrnQRe1DiKBgzUIvu4q/hm3Uww5WklYRSxifNDtV0rKXJKKXKaYUdJ50GKQtH0FnErieC/F5FmOVzWf2KEeAYQS8NgFed/Uy2qStHBG36bNkbAQFgOzljfqkUpvbwJXphrywfDQtcBsCPUtKypv+9n+HGFPFqb4KiCidgSEVjZO+w3ks4KUStkADnb1kVo6fSsi3mqk4h/7qX/5rNHc+agZ4m4OpdIRoAxfiamrQg2dJy16I4+b39gPgPesMb8M5ZTl3TDNFY0sYs1p/izJqIAVmjzSZE2r6xCcdRFOb1qQvZHTbG9Dm5yBxMZ/aXFY+AF6QXkvAcqgk78scbniPWGugIw0b2EPLub3s/CmS5jwDXp3h1R3oCd5DPb36QjhZ0vAK0KXgcs5mPNLQ7owxTQBQBDr2V+68yXN2wP3jsv2bp1VLZc9IAU06J16c3oh35IlZzkxQW72z4f5X5oL3qdY7+bvE1ynnqemDeyb6wvHpAD1LXsPKwavh/XKkbcPc90hVAqyzn6Xk/mPlsewTWvvXHFhz4EwOnJZhoTXnNa+/N/wntbd5yyfi6/7Np4dzvC/65e/Ds7/sM/Ahz3wX3HbLpdPqeOSlFcVBn8djmP29EK6m4+BmKy+QPZmE+3ArtEDqLFzOt6jbJcDbXiGyZCGHFmOiPgluBgOQec8Pejc8/cs/F0gaGM/HmedZjAsKnAhZVAOwseExcUOYMREgo/Qd3i5pePep4S3rmNtRODF56UuPTFVouNngWhMBr2kb5D4qLZuGwuhSAryWwsoYk5OWcvWfz1RAWk0B3m7SKnjIqG/OMN45gikfeH0ow5IBJ/6wuZ6P0cvRTig9nQgqBe8Rq03/SGQwPDnPkUK9iOKWYQj5a3tQ1eJdiDjBEqCxelZAzWzU8LKxJ+Q9LboqDfllcdwZXqJoTB/96FlxQ3A6HZHHjGk2N1EXlr7lLsf56kqP1jiI3uwsbSbDOTPzkpXq1d6uQQC8G0MUYk7OsIDrNuIcnYCoh/kNta7OWrS5HvaJ0fDctMnNxjuHPNex7G8j1uXuuh/b3/JNwF/8RaTAtSB6wKdOHfTzhjxM90U5NHDsM87xGxQulB73F7UQc9jS58SM5s/h6KVCWntgDTpuTFSw4GBpvZNf8zO4Ap/zfeIcKFKflWdRY1km/0OYXMY0rZqQg9CabmSlmB1sgUPJe4qNS3FrZ82BNQduDAe42p1d8du99ZPwR7/yA+F7u8/68PcJ53jPLvUIztEDvLan4bV7EfgGznBRzKAiC7+73v6f4tvw5UHDW6wgzJFeVvFXdkMVEj2qQ9o6VseF2UALtCMYnSl7uIhbE7KvZBGvzASOCGxuAWO3gazhtTpfq7MOTNxn9wcRV60sAGpJD9La2PLYIwyhFzoe0iShp3A2UwpdmwCFKR3MigK2TCAn1EGBnj87lOuRK+11CWp4U/vseZhKgmqtBHnLzUK1d5UxwPSYfzwREmhl/FQPIpBh1FJXW8eBMZyjzsZlQfPmJCIt+W0Ilgw1la11sCv2V7eKh4HoqK7Xf89z8apf+BV5Ac5NJI0h9GMn8/np6XATTnFLGmfADSUBr3eQ5q8hYDlEQjdKioh9BHSuWVF6GWzZjWi5uaGiGGEQXH29xNDnc725vuwyTVfHflt5VjB2MwLeZ3zrF+Hmb/tm4MUvDlQ68i94aOmeoQPxuE2cHGxyJFatFPFXJcTcCGjGqGNty3oMzLFpi0YKtBrOu2G7i7e6cxtP/PEfRJ6z4V4loQB85zYThvezYf1MXvpidYfLcCG1JOYZSx0vbV49Pmujoy+RWHe+vpLq+lpzYM2Bc3Jgodu+pJAvkiA/Z32PjOJFMeunm0ZNpyKsEJ88NIYLJTEDfYBPj/9f+bbvh+fi81DzsfRgFcA7iIIOuxHw6vyqFuLdpJG0xsNSG2onY5gEBKXhcXahaRDaOm+VbGfXi9y64DE2A0iwK9ru7qBLL+5Mp8CAGmDFuyOSQ7Fnm6zVG2532MVmKKAjDQInIdCzWgoem/pZ1IaAf7V+1qWDfmG8ygJGHVFEz4zHBuEfRPA+EXaRMO4lL+2tKCA1du2lm1BdfSCUbyeKCd4jVttGgTrYiO6RDGdE+KSJtAS84pL6kEHDcUXVEsv55JoxGlth1fGsiLO9gE8fINnIbx238b25aQgS2/SPLprhEKtoQB1BdFkLmpBPnB/9F7lCP9Xx4AGk0TfGoKXWW1F+Y7B0nVUCvBPEe9NMJ3CsumVfRPPuK9wF0nP0DK9dui6SCZdNRxpCQNZ4LBu+B/xM9rN/996rEUe4N4sev0OhJa1B5TSa6CmTj1AYW64PDrBixJHUxSNKljdsuktHxawHt7Qe+9P4FEBDqTlsOM6HqPIerWkOxS0YMHP8MS23C4aFWTftcPXrG6NGUXmcdz0IhNfWmgPXmwOPMHpcLs7u8d/94yvxEZ/2VQuZnd39swk+3HP0FkUJuNxdk4BtCHM1FiCT3yfpMGkLBVER8AbPkpbPGt6ru6FkoeWfC/HVURPCzlLKUNA5CkCfAEY7GAQBHDKsYJXWQl9dyEU3qSUaCfCyjhwHCpeM9a2N8FjlZulLeMoElIes54/xrth5zJtTU9eioTZ3nozYKoGkeFM5WAp3+Zc1GyX5xkKdBGdRQGd1d64xonftRZZD2k5FF1b26qYsJEWB6YULKHYuB0JtxC3BP2+1Xcw/XPFIQ3fpUiBpOFAFx1SBaRvHSv6+ifEelvnE384WsJxr/TyL+guhFGUWb+UaUuJ8kReWfeqPGedum777Kw3vRhXvl5B3QcuyvqL01MUXYdPXTuO9MSue28EIfX1EI9+kI0Ld5oCxy13m/d8Pz/+TV+E38X6hoBlPYIzBOL34mO9D3kQhPVvS8Gb/sq6bB7x5g9YbT8s2ZLqvu6sM3pqbUWfJ8xBazdpMYzLlWJ1MwXBoPc5XE1A4C9A4tNAvNN0De2GDTw8jtS70ASij4HlzhrIKLGlcj2+hKAGvMSbMfhtc1ktlRkijNQE3g5xEju1kcH2tObDmwA3kAFeMs2vfH03w0le+Lpg3f7PH4Klv+cQTjaUQPJviwzwHtU+5h0Vfw6vn3jmBgDdoDBlukkCaNHE4shaUSctd6b9lIW069GECvbxx/96ECzIBigEsNUtWYLQ7IG2tOQgs6ctatjHpqqj+IcQ+NiBtttHbPoz0ziJ1EenLaSuD7AwEh1sd/hJPw4OPfktIk50/Q8TqZpceZ0qTrghHDS+okZR/FWOMgTR+OgsswHv1ymGejUYx3CawlkHjKnWpTGkpJemZbl1Ecf999IE8jHWEwJzVZsBL0DKXtFAwzjzAcY46zgeKbb3wdGxZ8UHplpYAm2f+I0Dg2JJHI+sa4QwvekBTICbkJM8hEwLR0pGGq+XNePUzPiBGLGmLryU1vK23MARlHQHLIRK8L3NYeyjDvnW78SmNyfdXzrCgWziDP67eC/dvPR7aADv2adpEoO1T/WTlIWpeB/kPxSweKC+Sqf3s6ebr2njTe25ykeqFtbj7riLkrjc5/iZ4V7Zq0lNhaVblHmcqNu8bf/JeGP4dl75onDPM6Sx0P4I/B2nuCXip4c3DGI449OcW85nSoeC40rv0ZefXENJOXQ5rmuXYIi/spD6mhrfmJtSybQyurzUH1hy4gRywi9T9FILcz/3Uj8DGcIC/+j8vwT956pPxdV/66fjWr/rsI2YjaRkXoftIyOOayaybl37ix2d+wxU5r4uGglcJ0zaCnHpFDa8ZRk0NdnZEDhIABVdjaeTCQsxY0zZwkzFcG9tlGKc8dFa6yjSDmihLoe/j7vsBzGhMJNNGmgRvk4lqAvTCkCLLBAzlX8bU1LAaFhhsR9rSihsKcvWR0YeuhvH5SIOhhtdZlTyUZeGAYT89BbShBksCdufaYVrEiYFW5yJ4KMj3ELGiVc00vBcDhU3s4tQjDQnwrrpZshTUO3YbxXiP2rdQJR9L++iZs8O8ZZKxhlrSBuqzdWTQXL5FgrPPkvF+UP5wjMHF+wBsE3p8HO3zcfW1Djtbt+Kud3uGsi9tHNtcVh4R8HbIIHBGKLVDYYE2yzb4a/sKAluD6C5pO2vxVxffHXdtPRkCvEagKdUzq5/AqU/WF4kH/cgF/eaW7cM5J/Fe79I6A+ug/84XMrF/r3ldrKscAs7YEL2qtTGI8z/j6WPpsA5BUys+HJthsUiNJUnBJQ2vYTGxNR9pYJCbNi5MilQgGUOknG6vFLO446zFpNyYFdDabaGaAcs0R36a3liOCXiVuTCRx/KvzZsqB9btflPngF2kA9tbG/jCz/xo/M4vfBc+7VkfhB98/i/jvT/qi/AjP/Ur4T+uLULjkZqnOOk5NBdhacqCFonATPwZpyMN+cUuxS1jsgZKL4qFclx49d+siE3gTIiBoWbJjkdwzThG0C7cQtOAOY9edRLMkyThtrY9dvwG6n/4v7j4C/8lFPAEMMTYwV/wcbI8q1ZZpqZubFKQkdC0c0FT1wYUxojepX5LICnKVQBzyruSUbUtx8lWRdAo7cwdadjfM4FubBVQpJd3QuQK1iAN2PjCxVD6jvpB9J6Uhri+1aQzvMNhbEc/bRG/tQb7djMAXkc/p2cEC8cU1kZC0cwWeO8JoFbVmFUEnx0skOYPCBhAeqLvjaVe7KA/D9yDwHtbcyQPopV1YSO26v9ZNJoNXcdq28NlGZcjWgJEy0C3EwGv3a4ZWv7SMYoh5+vIVzCTKQr2K4MizzpEsX8sKITTxkn+Zc2RYy0J8ILzV7S8Va/koyG/X/e6CFI9g8WqSJBldYkySaKZ+xat0vrGewNnlRsr/8qC5Z1Fgagt19iCM+YQ4OW6YOZvnNKhUFks/yvYuY7r2awk561JAbYGVjdFbw5lwKs5kLKtnTU9+2sDAAAQAElEQVQH1hy4QRzQPbpw1VubQ3zax30gfuvnvhNf+a8+Cd/9I7+Ad//wL8Dfv/Tgv4ctTOwRktG20yM99Rub0CKp4wYCUSYJh/E0DsfmdoZNR4qeGmG36pBu9/aCCy68Qy7sEqaWC7UiJWil4S2aKfbslqIIN1arT4UHFB5ykyylhpePFNsByle9Ajf9xI8pCaCASE9VUVaxLmdiX2OGxe0BAadyD7YiUJlQK24IGlovca2UaCbcSXgKP0OB9FL3FIw2LyCxIGZY0nYs3FBwZsB77ao5TGF3D4rIwlCgRuFVTRbIk63tQOJmexkd+xQCx1hta6FHpxL4xySfGUUIibEbopjswVnD/B46EkLPkUvNELctU8RfASgBAQaXvmYv2YlgKu05X5IX/UEbjUCYynHnJsvCYJVf6SwKguzWF+EoTD4/j2N+U05VDjuQjgi5ixvH5Do7SnNHmvdRVxPwjiN/CcRU0ucbh/eqwjNTRBA6Cy/h2dww2MfwoES6+bQOhEhrgyPLs4OvfV0pL7TBWHUcA4FkOd7brT95fDzrVFuMOTlPInWqoyNb2hu5pOFVrzSN9qYd5KrwVPxNvFZYxnBNrNlG+Zc1lm0Wn3I5w3F7+3d8Ct7hm/4dlKY2wOfagamtQ1Z3vq4GGmtrzYE1B87HgXB/LkNiTA3Ff/uNP8QPPP+XQrEPe/93wy03XQj+tXXAAZ/UtPqCwEFs9HmpmOjVOhyAGhdNBjGeOuiMq11RmLuNSmRg08txWowFeFWPFuOQSADoxmNYAvGpifmdtSFpFWuQVvIgWEhA3+G91kZg4B64nzGEYHxuPZ0daTChd2VhQtqyVv4s1HAzCpV4pIEgiIQadZSurqCFZBYdafi/5qloNzdBWaWklYyzbC+FpB0UKKhRunrF4NFf8Fl483d8q0Bvmv4hQusiUCkTX0LiCtaQoI81YrIdNbw3F1eQ9kXHUms6C4EqAddjM5wR6aiR3i82UU734cQo8i6P6XzRrE23zGfaBvonCatOIdXVgq0mnVAP+031W/BC/h7h/V0wJ8e6ZH5rYp4lbcepXlYe4pchMGla0uvTYFw/aNlHpH/k0g5XP9Iw2Oyw31UwBJ+BJjdPqqdriarpEQikM7t0TGQWWNKjf+6SNYuhKOuU26V15tBxCWvwmtfGOasbpGRYec9jiCeR6zqWjsaUfDXnPNJQa1PAAbWIPFRdju3fm8Yx1YY3rAOp30qX0YaqVCMVWNI4Z+DTPR6KpjVHa63j3LLsFzsfkmRNMuBVnxXxyDHrnq458EbHAd6ii7Vpb3+En3rB/8T7PetL8XXf/h/xzPd8R/zaT/2/eM5Xfw5uvTkK5cUoPTJyaVFVT136ZI782QjwWgq8lgsxcQVM04Sk0bTAYNhRzi88LOj/3PYwBN1ePMOrhVdfGJAMz/BAxxn0WbKyHSEvxqVzodwq1rAsINrSfqq8Pks2QgQG+qcTigOFQD7S4JKGd0V5g1COFXYwQZs8plbcJD42dEN9tBp1mq7Spo2BQI6zLMi4VS7LPnQUbpaAt8IEWcNbvO61gdy/vPwdwe2kcqLP2dXGkEXDVZex/CgdabgN98G3PqQdZ/3NTe8Rvu5hXSx3XJ7T4hz5OS0GqKZ7IZtl+xv2NwTmrBzvrIkAlCCgYH6s8HPkKyHETCMn7ZhPPJwnJ8CrzQYKx3rnUxcLF84hanhtPI6RAOesdJpDbei7J1eYsh954gdxI8eYpS6xZijA2xLwTibUBIJ1dwi/dO/rXg3hZBn2MXmXdjbZzM/Dcw/Ksc4QyOCePA9hWjp7fvmKo48X4wtn6DnfVTiLfP8dS4n3pjS8jhvIY9MXjBRoNWxvmBMqw7ErrMHOuFEoGN0yWmtDIFmeA7JqP63x3OAlfpGeNnwm8dWKf9ZwbnrkX5OUCufsaia3dtccWHPgHBywi5R92atej2d89Jfgm7/3p/DB7/uuAeh+6ed8PPQf1vQFh75ZhN4jIk9RhG46f7D4hghavqqo9uTCSaEq8GCS0B1NXHgszTUTq/zsdnw0WYyoChMBCQAKH3ldj2h57So+5u4fQWPZDiauuvizKIZVXPybqFSBzvDmx6n6Bq/yePJimthQpjO8BYWD0lYxhjCkY98uXPSYNAWCwKGMiSAlUmwo6RjFtAYNigBy3DmkjgSpjqAUQ9IixR29tMY2qDZ75TL+1SgC3saVbB2IyYySVjaDwoayo624mbxkL6NLPAwJc9ZfXHivCHgpkOeSFgo6ZzEuNzBo4twhJiBvu2PL6qkEMQse/ZVfhmfg9+Ctg7Or9VfFPCvL9wA0LxSpmvt+hkf7hmCihSktrF2tvoLltPlpvAugs01gBfmnjtEf+kjXsg35iYkfxg0lo5e6StKQ9n2/qWGmk9BFAXsR8bn+VK/iZA5pERWxhLlwCcj3oIq144kcbpjSeHK8QgStB+531I9a+rgksZ2li/4QsaIlHnt/yvhYC8+/89ZUE+wamhngZXsL9oFOZq+8sF4rQfAGS/OtLlar3Rm2nZumQEiW1gDfsT4PZw0ef2mIx/XOemelQm6XiqzNmgNrDtwYDix011++sgNpeNVEaXk/+JP/Ld7pgz77WHPlWhSYyvuINunYgj0GpfiyguEiTLwLgSjDBVO82p8UAfCeIiqU7URTbkfNqhvvpzxxoX/yLRt4/MWjwnpqy5CvcgtNg5B33tosXYhqtPDTt7F5WNgyClJpTUYmevnoXJ6yiOXkX9ZQrpBvCOB6JMDbtYFE2jdEP/kbPE1HeFqgINA+z4sjTpWCfXBFAF1ZaaY6dHRjA1EL2LnIy4qCWGmrmg1qzlV2lM7w3oTLmFJTjRN+O+MKVd3BUSCfkOXUaEegMK42ULexH45dnaYxnS/YJEa36UU5PSIvLAvMZ1wgTOwjrhL+pPJshzZIKuoJLARO5JfZ32H/wLGunKaUopY26pfmwtQXsG0LbZyOIxI2Tx4wxsPkIw3pmBKW/Ik3OtKwO61hOHEsx8gk3vrs+kh0D1TP0ttpU0x3lcuRh1cQN0oq302mcgIok0d8lStz3wMFORrvRc9y5YrjKFrZiIbWtRzuu3nT4GHg0r2CFX+W7TWFhSVkFwnDe95ZA6+ATPYwXsFstJkQj3J4GVf0oUmbCmmzzSkSQo59kkftkCvTcI3VzC56ZRQ/b9bhNQfWHHjDc8AuUsUTH3cnvu1rP28hszGoFyH5sM/jXWRtiShs+h32VUnAC7RckINgTY81J3w8r386Yblo9/Mv6pcCah9DlON4pMEIUbPwkOAy0KSwZXB2ZQ2vORARs7RFPZQ5YZlPVWFry2OECLxnNKzDNLGhqNhpJlBO0V7tsqxU9QUN79RA2kFRzaBbVAMoC5ENxaGFPvXmTBwTpS9rNJwdNyaeIExj2qg/SZCOdgjCEsHOxjqq5KbopR12EcYYjC5QXcfSt5gHcdoLVvtjh3roWcZjlZ/Go6kGGHYHG9b7d6f4g1c8gHwmMtMNc5ZzppukfpM5Zex2zrKwa9lHT16ZtGkR6BIgmRFgevbraHrB7Ythfc6aHL2UWzrDjYFH01mqNDt0k+ZweYJgRcQ+Ao5zxqUnJn5jqKSljTZ3Arx7TQVpeB/7vO/Bm//kDwc6Pt37RKMxDBNcFEV0V7A1luPB1qykT+eJfH4MwzmcE69eRbg/crgif7J/VbfkeuN5rxxbXveMxlQu59CxeZaJZHs1J0IR0gya1HAL+Bl1m+ZWyENLc4zOSpdl2/OGLBDQhoXmloHDHVmzm+aQ0qcuysPCpnFV5NqsObDmwA3hAFf9s+u9+dI2PuSZ77KQKZNm6myqD+8cpogCa7YYp+5e+9CPQEdNkaFA0GPTjsjNcKFW8n5bQtond2RUlHq2qWsCJILNYpI0vIluLmm4MGe/3PAGMddhy0Vc4VWNtQb5zJ40vHtJS5XpeWq7x+MYckWQRigTf2LscjZlHDcLXTjDuzctAfJSFLSBkCvTsBpeQNtxy1HClR0c26m0VYzKip2emrfCtJjoJTxFkNhkt6Mdr9YWwVMWKw5iKB0tNbcjAOqo5d3GFczJ7Zgp2dLw6h8quBXH0jmLSb2B2o8CRUegNyHv7tmZYD+9BBQSaHHKBjDRdZw8DAsAFD0QxaiFL0sSHXV0GfDCOiDTUl9kErX9PYOq6NAWBYFoilzSqUi7KIGmc+FRd9t1hymkMdW9qYS3+syPx2f/3b+VF9Dkjr6lbGc8Bpsee20NI21rr0+69zVvPUyg2SDOH90zIWIFqyBTd6q4UVJxP42gvkt97W8oJhPLLUSsE2xXUVgVOZepWL/60x1HhePrneX8MSuPIXo/w7osIXuI4jpQBNogfaCkP8RbG5xs+TL1N0cs4Wosfck5msvkmzLNmxBNPgaX1tQOaOPcR5wCkbW15sCaA+figF2k9F/89T8uku1QnlXKHCLwph6o6tCDCvH8XAjQuvbhHwWkBVjCVuABSRBNGgedL3QwzLn8VZYGIwxQTZKWrqdpCNTym/AhAExdxZpWqyuRCI6zNncBOsN7BPAyfTKO9RR87K5C1UIzTzmPmoL0OjJu+4LHeFpQw0tNoyeIaQ9ErHgbSjJfR0BVDjwFbGxDiF/SKiWoWcYLMJkGfVZOCQqZFK6O6fKUFMRyz2NEQmCou3gRF/zVQ3XO092bFBhssKdmNcaW5Gkz3MCT8DI88T3+H/IqgoaOgnyqRuDglzc3M1lPAKHyBzkW9xVsryEgPFSCbQlh8hxMD35aOsNbuRae6c6tNpYFi+keazoLw/suHykg+UNX28X+K09OsBlA5YgF3bpw0EtrE1SwO9cItEk8l2UbwlztTyilsYycVUzJspNh/JydynfjsRxwKIML8i96EI7JcNaEoCe/S0MGhdDqVl3aUDj/++QQOGJ5WBPzHUk6K6KXbjgmFt0spuBN4wPcBenHaB1dib5ks5/Jt7RTOAcd4ckFq5e9FGZunTW9sZwWVcjqgr221hxYc+BGcmChFec//uwL8Q3f9RPY248L52kN1md+nv9f/ge+7Bt+8LRsD/s0U5Whj0PsB3feMlyUJVSlSTIJOUymFtLw2hWFjuTYyAywtX8f9OsL6xCeTuXMTGtLAt5ZcGWP5FvL/oiA/tOaQLf8M0MBs5sweNoHzJJW8RjjAfJIgHdEDZWhwGEMtXayEX5NBml8ZNzCQS/LOROSVrJKmwoXBWHLlBpeBMAE/qZXolaUXrTsq9yNqpBzLlMQEAgMeQ6szh93uU/HUN3ZK1HVHhL4xySfGaVyTT2M+fiY37K/2lQIJEnTGxOi3XIT4ch/z3yKMc7A2QPeY4mf6iUpgMAvF1N/s1/jnP17ux61nYb/7OZCoZyyuFtYi6Ly4dyq0dyYNjj0U4cZIaBPB5pbcq8WN7GPaQ4oYglTOMPNiA9PGlSsT8UQHGkdULxMkzS8xpUKrmTE02sbt8zK+nTfe9YVInu8G08dquEBHNNXQG6VAwAAEABJREFUV3DOX83+isT8vFFc31jdx/2IFfymsJitsRy7gvNWS5FjHzOfDTW/h0gTJB8KLxFwJOp5X+YiN//Y86CdhGHdOU7h7G9sFbxFsZCoDXnX1poDaw68YTiw0F34rI/4p/ifv/dn+LBP/Ur80q+96Nj/rqbv8/7Wi/4Cn/C534Bve+7P4nM/5SPeMC0+m+obRQ5DYHRcQ2aPE7kgC1AEwZrO1k1ah5KgxWlVPa7wGXESW2OTQIvyzi308wC4SefLlPU8xlmLNkltneHdpwjq09Mj771dE6LqYQfL/CGwomUp1Ii5IMC7OypmwFNALJPUY3iyGAJSrQBv4OtC0z2TOOQKtCiiIaB1hEvESkDi73SvUVIwrWUdsashfB5L8yDIUWNRmI48xom/3UkRzvCq+hMznZJQsM1tPqPKzm0TsNvAQBz5WoP4rKS2ZSHRLN3KQFvtbS3HMPFS5OCK4Ajs9s9bjvcI6F0HAWJnTcyzpF0SeEjD23JOaG5wEA9TCAwHNL+UYNJmdOyGsEJSilzSVNZhuNVhhEEoaScHT30MKzp09hyp72nDHAosaZWcghXvs0l6nB4O0L/2tdj+278OlLxjhuADphMDu1kj/Dio1+MMb11G+lMfqB6yvAY8xRfyH0pdPnCIBNtfFw5kKSz9xsQ5YtPalKnf/0/eKXuXdkvyzuf5qdKatzT9eYre5q0pIm+djTxRkbVZc2DNgRvDgYXuwvd6l7cPnyL7oPd5Z3zNc34s/He1T/z8b6QW97n4qm/5EXz6l3wr3vMjvxBf9LXfi8c/5nb89s9/FwSSb0yX3khqLZLgOq45FICmbdFwZeY1A2wjaluihve4QmfHCSCNs5BT9t7Cq+A8YgpHGmwUCiF9RaskQO8Q6WxsgTrtHugWTdaxuxPT603lTBJPaSuYLCi3LxCYwMJevYJn/IuPgfu7v51RuzJuUBPcGIK3hiCipKKFzZylL+uRoFMZgTO9tBaOaCRw1OwcAJiOY2sJUJX3vKYg30IVFJaWgBdNNyPZ1YOZX56dvSJoeBe6oVVgzqiudpBoUiN4aVjirW7bgjDDeA4wCPCqj60OSosOQUDtnHxLG8c+WuOB0NFUPE4VBBChBqTo0b5HaVsINLkVeaw5IcDrwWcs1Jgbzo9EPjrpnmnoqkmGrhImbkAN72rcrcia4UaHCZ8NiJa+hS1XJq8DfZCkeHAeBXcFq+R8qbnB27U61kDWjsbAC16Ap37rv4/UejwV9u4/dSmL1foYCUe7Lthhepu5ecMogHWHcQVg6adzrsv028s1VZ/z8/BwBuAF/earufs93lfRK5nCGnjX4xEniRmNZk8CAtE0Z+RvXdTUV/0ySlibNQfWHHjIOdC7c0+ve2tziH/7BZ+I3/mF78YPf9uX4QPe+51QcGEbjSd413d8Kp7z1Z9NUPwcfOfXfwHuuO2m04k9ElIpdI7r5m//ZoVXvMyExVjaXQkFCT3lneoMLwWVHmUrvKwpKCQntp4Vy8I6R8wLdy3GLrQk51jNlXawo5ZDpavqGMBLMLS/BwyGoBw3rNEo68pGwsxT0Fy85KlrdYHObS/+I/jLDwa/rAnBYcExEA86WEhzXpxD6JQUdOCv45wv+HC6j5PaPQIKpunqVCcFrvznNWpvJyKU2Doy0JOj6IpCKTOzv29Qb3iCssiPWcKCnlBX0vCa3mN+8Vq87JNpuEuz7GN+698Q0bkVeeusBdS/roF+nnNFgFZ+KE0mBIAR+1iaeKQhj0dKWsrRXGjhIkjhPDpUOIUF6hWf781xtUHerjZvS/JmuN0DvJOD+QLeN6GuVK9HrMPWvJHUgBVMYR2qocc1lz5Nxg0M+hPWkOeIv2lreV/6EPBs50bpgv881i0bdejF/rQ7nkwaU5vc4zMtFuu0+KSs+ViBQKkxkY/qmUla+pQN/hy8dVwHPPmbaSGN2yxMj9YcOuFqtdOmT/cRnfW15sCaAzeQA3bZum+/9RLe4+lvi8/4hA/Gt37VZweA+7mf+hF43/d8Rzz+MXcsS+7hm7+MO/v5Dr7qlQ7Xdiy0OLdMJHZAXpD3Jw5l5Vd+PCxQNFlCw9sUFVsgkUDnHFdVWKqRDgjY9D3gHOMpIHauGQwJyDqiNkuhkdNWcQtHuMVmb2weAF7RaQly5coIRIRqqPURuBFfzyN0soamZV8K30AYIgu2dvfgbLTSbQ9QqC2rGkGPloDIW0t41qHNGlUSVBydQ1c16KC3yA9FLhgoHDVXm1HDa/JZT5Z17Mv8WUzx1hiDLrendFj1JcSCdHgRPuPgx/6GgKGtRDq6Rnue/YsaXrVXcauYsjJxo8T5Yo7TQpKo7ks6vDc7OZgWA6itIbCkNSB/hgSgE+h+A0pNnkTDaH4KNPG+UFSDuJHpvxil+GVMVZig7b+C4wGv56Yi0xtPDOo6h2z2nMu9NChQ8B7dnbZH6fTGM2j2j+ZYKsZaTZJURHykd7sqIeBuDNM8OIa0cPDz5/h0Zsl+HdpspjoPAV/es7m2Jh1pyOG1u+bAmgM3jgPXZ4W7ce1/4615TgOXG7o3Krg2ciHmJeDQccHMgHfUFgHw9tfwXG4R13GBHzuqUXPmJERz0PSAjOJaV4FF5D2XKS0BKCmwK7QBO4zASZ/TUoQnL/b3DDY2EN6nPq+gUz87UtIZ3gwQVE/X02KJt5adE2+VR1q9ku1UvlVMYU0o1rkCA7+PZspw6vB0dCDYW6aft39Iv7Lg7ak6WBVxJxpq41ISH4/PUEqI6qjFrvQlCgrkELGkVTkC2C0OkMq1B/1Rt8cEhorORhpe+WfZSkeA4xS1tBH9sEnwqU6Omc+ATOPFcCZ67ZqFPgnnyWOngljtV1aaPYm3h0kAqVOaP4AnWIrtmlRD2BXrrFhuY7vDGHHMbPpqQqiayFr89CHA6hH5aMoqxSzvaHOms/JXsR0LC2CnfinC6zGMPDTTqYWevNB7XdYC0ZGpOXcnc+uP4oNJY8oRCMHzWLaK/Ao0Et3HXhzg0RcGMBw/xWdX/mDmjgOFuAUtx0Z7bnpzdpP7mOoO8bpngwfcKFVsh0mhtbPmwJoDN5IDvH1vZPUP47qz0J7r4v7IoOksH6dSY5eEHdKiOZGGt/ZwWlXnyi0SlKCb9jQKhvT75UwPECq+KUrY/kKtyBVM0PCy3DgJVZM0hZ0QLuOJFLC3i/DJMh1FsMYqdmVTWAkQg60LnrD3gFbXdjOaU/bdUuBZgvys4S0Oss7yLeqpnKHgwuwrDMIQ3LmE4r739RIdabD2HBUFitESUPKs1VuH0jTwvUfE08bETMlWvgG1iGXgTYpcwtHcAUGCitj2QGNdOIt5wNt6wHHe+ASErTSKzqjo0kYas7BB6IEEsL8ipLOevsdLy9Eu/RRd4VCs2E/R1fnWBgV0PxjOD8XNTGpHS5fdBJKWckrAu2IXUZcOF27uMEUZqjG9Iw2Gj9t1rCnPpZCBliHf6ax0lWyo/tHKFX8pltdkTfdmiGDfgktLTZH2mV6AY4rr9BuWjhs0f5Qa68ixxQlr5NFCJ8eQ3EFiWkdzhOXcUV2mF/83n/b5MOcAvJVo0uQ6ZuPWq8P01qGOT9HM9VkOZlWuPWsOPDQcePjVsr4V31BjWhTHUt7bNyFeC/Wb/8QP4TGf8UlwSRjpSAMx6MrC3BEETItBoB+s3qO1EE5fgwh+Wi0XY6uG0H+eqybgUXlpquSardgGn1VHBDC7uwb6bj9xKEG2cq1uChd5ONxqIDCbKbUJ0P/9vTtQWywlTTHZC3mC9rMvqHKhBd26LELONgnp6SQEg9XtjoMrSxpeF5un4LlMUcTbU8BPx1XaHuBtTWxPv4LQxxXrrtiv0R134JV4AorJwaf0BKCPvLRGgW5ZTyfkywaY6mhbGL3QZUWIxua5yvno9S8DVZp+yMhPYwl4remgz0JpA8Kola5qYEQJYD+Q652jJMCrqC6B+msXbkfh4ngofhmzXUf+tGUdipnxKLiyDO/9qE1WiE1C1FianhY2pixuV5w3mguX/YVYiIC3nRxsYkANeUwAruw4XLpZsJAxdrX+seSRS0cK9PRqPkGtaDwnDxPYTNrnvDhvZxR6oFNxsTceFql/jLz8pKdQoUDPilfFOTB7AiEaafPQB9VIcUruOI6xtwqtzZoDaw7cSA7ENeFGtuDhWvdJgHdUcPk10BnerZe/FJt/+sdwvgtcEHgrKw9HABAilrQcwUFDEDsrNicApE2apdHTuahxovdcV11EIZ1fbrKbw0CvSxpeCQhpePWNXs/eW7YzZFjRKlL5LT6x/Rl8Il7zxHcJlDoBGPr+7u4d2oA1wQmAV3wVeIsxy9tlulMEaFWaGILopJMXfiwxHrzw1rHeVHGMWtmWcFVhDwNHoBfqRPy1NoKoGAK5alDpDO+KoKVwBlfe9m3xH/EZmWRwNRenia8hglbcTBh0jWcIsLUJ7ipWwUHScJp0D5B5p5JxHE3Np2LFfop4lY806P6Y6xsUx0wCoY4N89wk/i2eij9410/jGKzeT5KETd+7NeOD3ZKhhjfsG1K9f4an47/8h5/H+K3eWkVWMvpSQTXweGAaAW/QKCf6gSD7FVxaOzsWNxHwNsMNGI4Fo67LtVnaQGd30gRX1ihtSGEiH6OtlNWNOW0esAJhT20qcg26P52Jbctxy7ja9Gj+5TJ92jkOPV630mCk/s7S1541B9YcuCEcWP3OvyHNfROq9ATAO54YeHAlJlYyFHaghidrB1o4FAS8pTUrdbR0NgCuXDjTzWFkgZMimqrGilUlCtHJgDcDo+7SRfzJ8BmYPv6JIYMvHHauGWp4PZGZOXedJfsJ/sphh5fhSbhv+80YImnyUh7WgpZCx6bOia8V+Zo1psqzrKmLeKt0Nrp2dKAFbfcPAK/SXap32Trm8+vxqeI8BaYAb3OAkziHYjuUno0eY6fm5aiFXfWvIkia4vAmqCBB8XOcNJ0iqP+0JszQ5bi+lk0ZljCO3eiMgyWQPVKMm4fW21m07hznGwhw5DkwS1zCU6eX1ozmSwbaubwQEv2tHkXoPmUf/xpvh9c+6R1QrHZbkhpgOYZ2o4r+voaX8zTUleq9H7fgde/w7sB2euEslFjOUl0ay/zSmr664Xu7Jc8xzRR3dwxuvoUjzPYZY3L0ud2s1d5LR0JEcNqwHvJUm16FBR7lnscY02tz4mGmZ1mX/L0c8AxYS0sJKxg9WehccVBSc0ihft09f1tWsEpfm4c7B9b9exPgwPpefEMN0gmAdzR26LgEemmWJFS5YFoKPTVDwCz8R7AVF+TSERIUUdsqekh0g59WEPB089VSw3uexT/TGZQ2iJZpWujHt9yJj7z0O7j2QR8as1gLCVYqkaDHnAzG+BXtrPnUp5dE4rXbbyEHHYWrtI+EvtCnkW5NAKNBAX3fuOwLx1BicWujjEJOPFOpbjRlNV5euL2oUVagtQVHV77zm5p8FRUBFGs9xtnoegkAABAASURBVHpRThE0jSlpH1yaO/XQw604dxx5I8DckFcHVIFC9NjN/rEG8dgR8XZBNcnc7DPtla7SWRjjwxOPQIDtCK4s+pv8zy0UprFdA720FtrF8CqXtP2dRon3R94UtukbxHtJIykQaomLdJ8qb/gvdmzrKvWpjBWxYSUvzLh3pIH3jPjZfwxunQexfsi7qqVzylcRNbx2Oua9wfmaifX64WGgIw2//b0/gVd8/KfmHOd2L22UpAyMuGHIxPamTfRyXOVx4ok85zDG2VNKR2idx1gZO85VzXX5VzFae3Q/zspy/Gb+5DFSZCR/V9YwhhMphdfOmgNrDtw4Dpy2WsxapX8r/IGf+OVYxFzb2ZuVe0R7TgC8e3sEpTAU8EBYiAPgjW+Ct3DQ1wRWFeYFF1ZpU5F/FOjZG9zeQqywtA8sIu+5zCAJnSbVp6MGe7sGyDygRmSf02Jz04d6nLHBXdUqcnE+5tdxzxc/9sMCqZbaOgEVBUrL+uWhEV/ZBBSpnYxa+sp8ajMNass1kiL0YX/1bXKC6di4YxWeIXU5q6/hLdjXpvei2k5FLTriUY5MVVq984B6Y9ijIm6YzN5uIOvIRo1aPq6iSPGYWeET4D0055RhCVPwXgDHijUfW6rtAV7lsb6FAIeAx7EFFogUttWc0P23VcbJ5BPC3B032Jm00MbMGHa+baG82kwUbOcC5I/NYknLbSTe6r89pFyG96T4mYLw5Ic2Z475c9wq7mDD46fwyXj9Mz8StmEfemd4tWHINDtY3HKrx93v8M4YPyE+Kclp53FvSS+u9ufNSPOl16/IjfPUAoSNRCKh8Uze4ChNeNTICjHco1qDwqTACo6ehOgJQy5qsuac8yTH5c3LH9r3wNWt2ziifpa09qw5sObAjeNAXO3PqP/t3urN8AHPeHow21sbmDZN8Oc4uQ9cvoZLF7aIcYozqD1CkjPYm+tug8gfr2drBIiGIM3SVTYJVgm7oicUFL+oEaDrdGYsF/CHF1rDcctJclvmXbUulc9mWLrgbbsoSfR93GtXgU7IgikSEDt8dKp4tYgyh7GrX2UCZQ0FqL7tuzMqA7GOGiQBFXXbGAP9B6SQkPKXzoTgqpa1Fp2QMwk0o44dpKG/biI4pDekO9Yt/3lNVRoKS1ZDetZ49I80vOLCU/Fp9c/OqvDMWQ87nKduAUpfloFmFuTOmRCektfyRMcTJgEzwFtFzaXSlzWFszCGNftUkv7kgy8KNOl+URy5z00iwSjHoEztUvyyRhpe0RXYvFDYULzL/ea9eG3UQP20hJ++7QLg1WbCmZg3FFjSCtVkDW8P8Fo+5Wk5YXP3O3JW2Pu892U9AF6Lx+DqHU+Eaabo2I9Zk128XxX2MLh4k2dPgYLzW3HXw1QW4WlD/8nApGE9rFubC8NKrkt1nAskdexl2DclGK6xcmVUtzvH3BGNzvX4x/FT3CHDOaTwh+KFeNmdT4PtzWnFrw2w5sGaAzeCA1yWzq72oz74vfBvPvdZwQzqCh/7Ye8T/DlO7pd//ifgdXffh4IC7GyKj4AcFNYn9VLAVsJWC7Hb34dc5VW8Hp2WQToqZjmjt7P72jbVcYhCT+h9HZ6NP33bj4Y5lGG1QAa8Mw3vlg+Edi7cib13f080m9vQU1x9pUGfJXP2fLWWLC8KAiWimY/T6p9aZPlDCAVK+dAOTsrgFiwXPCtaKt5mAZs1Oz1a93zdf8C1Ox8Lq4y9+FW9WXOuDYMzzaEj2N5blMMDwQv+6tpzw7nQLc3cR6/Q7jLSNNQKKoeAF/EYMnBpOIc0upZCPCu1Og2CMq9gNCYdAcQ2roXSAiTBk6z5r4ZZakTFj/NoeIdDzQ7OIHUsgaFcr/p2dTSFtK7GGIJF8p2gW8c9CqfU1LAlHfHLbhShVHiJLPgQ7v2m8zCJmR0sx5Dj6FYfR/A3GHja4IahQkFtfdsD2Z79ConJCmd4md1Zk2Kuj1M6O5s3opj/gUmu3zFd8ecx5pQ2K4kQ+9Aa561FQYNz/Poacs+xO0JK84qRTWfDWKodWP/WHFhz4IZzYOlV9ZWvuQttWpz7rX+Ht3lzSMv7kle8th/9yPVTiB/X+Y4CTfGSszZpAmzip9KkfRLIUJ5ljcCDKXpDmuhnOtL0ZP9f4R1w5eZHw84JP6z4s9ZA2lUV15EGufc89T3wqhf8Gq7c8kQFsRmAsGGdOON3enLFupRDmrGtbY+9cQRp7ZSPbpOwkSDMIMKWkSeDBOZUdhUjjV9rIi0+5kB+dJlp7bzvB6ClVrvIeXLCiu6wKkJJdclRw9sJ4YcYkNcG9TAF6GjuSAtZJN4waunLst0m8chM4xtyAi0CDZN0FlM8lzrQWpJP87Y9B+AtneF86DD7qbMM+GHsXKqCMfHSmHpWXhVxzGPscrb+s1gLB0s0bTJgyfcB6786bshfj61XvARuvA/xdkDteXXCPb1I7Y7jYjfjeFpucnMZ9adpO7E0RBn6yhrkSQiubA3T8aEGUWPf9Z/ukH+ZsPomwNuxl87m2Ovj1oVFf87q29gPPuktcJlGNZScb3LPY2yvL/P3Y+6O6feddRYci/PUid48MGkjYThvMk2NqfyeULuo5DOy1mbNgTUHbjAH8pqwcDPe5i2fiJ/4+d/A3v74UJnf/P0XHwo/4gNFFG7zfOhgKdIo1jyo3aEFzFwJ4QB43dLDAv0KFvNVFHAKYx7w9tDDCIOwbmc5H/Kfw3Ik1CZAtkkQKlK7UWmHPZ3nZYRwkcCTs2wow6tepTrKwsQJ4csP+6MIfiR0pJljErlMIZP6G7TeDPJS0srGWYu2SOMqDegcfycu8l7fzF25kl7BmvUpKI2mM22YNwrLtARsPblLuGIx5IbCnUOYa1Q8n+CIPne1wZHlKLjHiZfSRirOMs6n8W43NxW1knGOo8K5Myuc/N46yEx94nfK4CYjiB96Wz5FLe0M6i4cU4B2nWkMZ0c52M8rBLzq55v/6PdjY+fewNuwmUhtW7pCFtAmyG3H+ZFBEqNhWH/G3Ap78lVu4Is8K5r8CewpqkCh66/XvX50vFNuuc0zj0E+M87Adbk2qwL6okcmpnvzN577M/jN735+iHLWBvc8lu1vfMjLQ7Q4tYRDe93l3LE4zz0i+pp/cmX0xEouVFHw0Ep+3aPhBUQj/jJ+1Wtdbs2BNQeuCweWXnG+4DM+mmB3hKd/8OfgS7/+B/Cdz/s5fMoXfjN+4Pm/jA98n3fGWz/lCdelYW/yRDIwmutIRwGjKK2JlsJVfkdNk9wGBfTS2qraq0ICpCcABABFd2ZSfQqPCHjDYnwOgCQ62RQELm0K5JfT9GUGRemTZHK3CMjkFn0JpIgljb4zqiIdIaC0dfqHHQqbroE0kZ4BSyFjhIjpN4UjjKD0o/88l2W7uzSuftpgnr9tGW+n82gC++3TcIY62Xr5HWHaQbpHZw+DQaVVxdE4xS9iAhCoYh9MmpMq55xF/iyZQIv4a4xBYi86fX5DGVcwAlnGHjM2zgGqI08qHPz6j5QPYhf3bWy6AGJNH/C6yLeSfdUZXmkjTbpfdF8K8KYsi1fUy1lYj5mGdyftBJmuNkzSxoFBGN5H4sa575GhRgmY+EpkgZ5W2ZcpjikaOuqz6QMc6w6e62RtcGOqVoyShjU8HejRtrbrhVbzWmNmBY0W1VkIyGl5HVCSNxaFNTjXrzcRZrQ1lzLR5B9xjdU7GVY3b05bu2sOrDlwwzgQpdsS1esFthf82Dfime/1jvizv/q/+LGfeSHuue9BfNYnfSi+7t982hKUHuZZHQX2CV3sQLZTmJq0MNp0HlTxBZVABZNPKHpc9OG4slcv6+gn5iMN/+vH/hC/j/dGUXlcLxnnKHi6pKrKRxr0oprq30vvdFUbnYI4r7zZoOZIhFRfxce/43FiGLWuk4TCDPjXTJUNpqQ/+M5nOWsIMlNdYz7yn9MotYaDxyqUj851uVglpD0qfMMe+RnNzlsClMhPRW5tR3/Cq4pa2qgum+ZPXwup+Zg/LyXAK8LO4ODc6dYWzvPj1DlaPIGEpk38Zg7qC2mTHe4gLkQsaWkz1MGimu7N+lCo86RjuYmS1m6HWl6X5o8n5/XVg6qIoJjZlr7KwqHYjOVND3xq0zvhPLIJFGZelKn/S1eUCqQTIZhmwJtvwpSeHd2rTeNDsLIuuNfL2hrE/u4n+jryZMjLTL84ZsOW0xZ1H/jGb8bPP/kLYnbyMXqy7TmagOkOdk2eDC7POX9Q9PjUHtBG+mUQXGHCe9TDcPOdktbOmgNrDtxADqwkOd7yyY/D937jF+FFv/x9+D+/+3z8+s98G/71Z38cLm6v/mjzBvLgDVP1CYC3o6D1MADBofU+1O0S4G3hUNYedbHSsARaSIBFAT0ulZuNISCUv0mCTY6hxkNx5zUFAUOEXEA8qwvsps/T6t8Ki349iDkKoSVFnMcYQBqjeuAxnkR+qb/5awLGKEMURqawsGzfeapTWZJBK6YxUKCBgCi9s6uxsR3FdagrE7Wk2dFIaJogvmOKMQ62x8dqGOPPY1u229QukDAJgClQOou9SeRlBrxibz7S4HVWRRlXNax3VlSEGfCKsxZN77NkmzV5zjRTRCBF70qXMxYz0JI2neB0ETFH0MRbU14IjMrT8r6UW9uUSYEljTS2xfbRdhsCpmk6Hy2SJtXhkqu4VUweknFXxuKjUXRlOys7mAGX7IZ9VqAXreC5zcU61j3i3NmlEcH8dEZ+i7geyL+qCfdab80DDihdqAs8+aYN5HVWKZ6dLM/JW9h4j4ieyU9C0lquOCR+ek4qSxY4c8DvkL621hxYc+CGcGClO/G+B67gRX/y1/j13/2zI2baRMF4Q3rzxlTpD/wAXv53Lz/Soi4BXr3MYZOwtUmTpDQ9AltdrAKm6gnVtPAi/yhc5Z02ccG2fMxanKcyEUtGoCiv+fr8mKJ3rkXiGfjW6TGrsytNO5GcGUshEjS8fDo7msT+gPycaXhVdeqvNgEKzgqv6CnY7s5F/grwagz7pKYJiFUUqv348/hFSo9hHcFBH/B6ziPrDnpVDkDxinP9nChULtLoAV6BigxYBAY1ztayNQz87cV3wrV3eudYZkXbkNasaAK8MJwj7F7bA7x10YRstkhtDKHlrZL1CfiEkrN7hJUxgrVyW+G5mQEymLGJJ47lsOKv5I12LOCl9jFvIgLpVId4HsIrWkPOBxUdtbxB5Nk/ALzeqpeKBDa2DPKJiro4iI+p57Mv5X8+wacu+9MoF2o+bclUba8dOW5Zt3Bsc54Pmpg9AiWVDlsEvV3SMIckzisVCf5VLdLNRU0b+4Ve3dp4K73jPVoUulMVWps1B9YcuNEc4GqxXBP+v799KZ7x0V+Mz/2K7whneHWOt2/2egvrcpQfmtwPZS1uY+NIdVoEFanP2RRpkbTpjfiWmiQ9olf6qsZUEZCF8jNhHkIzAT41ZYjQum2inA/h81gCeW29zUPcAAAQAElEQVSqbzM94d7dicT30ktrg2EUDj2ctnKVlrCkY2l9iuvgDG+HPuDNR0ZAQX5uIce6RKNNoFaANw0fU+LVuMj7IIRj1LntggMkbacew/eJhXmkBqVIM6xgzgkgBKANAYJIzgQ5AwKIIL/F2/wSkhM4JpDprEPCaMy54tUnYOKcgfpC/7QHeDfKSajAJj6HwAqW4wQ0NCo666fqUwQHteZ88fRnDa/mjzGpXYxf5arIJ3exPlJUT7tVVwZMuZoite9IgQUjNoaxvaM2zkmn//ySy6a+/srWs3D19jdDw42Lkor+OCjinOZSOtIwphJEn7VTi7Z7L9Xa3Nlz1DMwFihjHzMP58n1Aa93DnXvvsEKP5/rY9liskcbyCAX+nEOydF67gR4r0M/RW9t1hxYc+B8HLDLFv/Rn/nV8GLaT//g14aiv/Tj34Q//O8/EM70vu97PG19rCFwJVrF1tHnzJ5AoYNFALzwIePARyCoBbKqBONC9EqWrQ6G1OTHbYnS5XtjPZMmCghpk911WoydpcYv1aP/3CTvbjq7m4FvnV5aK5PAVZ5VjSEfddZSH9ifP9IgeWOZjqSltKWFlWBctbJUrmC7+xre/Eg/JaNNQMzZHHN+1wW+Gli07FGcL+Cv8+wT0+gN1+ZgwvTgXdlyLGnq2Pj8WTJGQePlWfWIj97zpkbTRhp2sgTunLw9rriACVhJ08b2qB1Pf+eJHJge4AgRS1oOnKsEPipmCMa8DryyLoUF7AWINLfyuVpbHLQh5lneFoDVp8J0j/dLx3vUw4jBTMhAXBsdBle+Llz0oex+G0G2nYxDWJYn+Jb7s+6TMHrsmyGPqTatir+eRveMjhmNOXdEd7uOm235K2fknMsUBWlULtBop7HPIdCzPDdmOainJaUmbY5YwTWJfycW5ZOmnKb3JKxlG3PE2l1zYM2BG8aBpVfyl7z8tfjoD3lv6PNkanXbtgHkfsbHfzB++w/+Evfcd1nRa0MO1PqEAN3+1SGyXBjXpIVx0DUhi9Kq+nyLo+kJFJMAXyBO63//aaS9sx8FhNbt67UY1xI8rENaQDqw7OZuOtKwl4Dv7EhDbIayrWxEX49iK8rz/FkybSJy/cIv+eURXzpcD5lTkEgGC0HDO7c3aYqaUAq4nsAh1zmv4W0JeOHsjH9u6ELds4gVPE4AZJAACYFgJhHawIBASyOm0y/+it+ePCkOmsGUFS4NZiqml4qCl3Hy96fwM94ngjbjXMiyquXUT9JX+QDsDTegKQwCT50z9Uw0XUubV2nPPX9K1jnY5BOI9JkwUg1XXgNUb4yIN0fRG9sQv6TF6qAXYPebOJ4u/3cW0Ul93d212Nr26LwJc6c8Z50iPW8G5J1eyhPoNUzcrGTTc52uqnBAWvP6c6VP/tDRI87XMP79DMv6Ved8mU4zJkWmJ10KOQcYc337jPVvzYE1B1biwOmi6hiSbdotF7yTH/uo2/APL3tNyHXzpQvBfc3r7wnu2gL0aHTkNg+xooOFDLgoGm4WlGjGUZA3KJC/Yav4VYyr3UGxHmip//Zv8DZ/+8shbX9SBNfycZuzS0+BUHbeqpMQaJIm58Ilj6zZ3ZsdafChWHEdBKuj4CILof8olQG8IUDR56RUiZHFsBxDrbe9DkKntAYd571oRsDbyTszjXYxDFXXiackxafpBgKVmjX0KSoYAV4XhzGEtZmwQjkhtJpVsN0Cziptek8HKiJa4kAEDS89nk8mlBdaC1jmvON5qN2kp/qDhpf+pjuYzya93GmKXseVeUlTchw9+xSKkaZnPcEvi/3bKB2kvTb5/mReY8KMUo6VTO0sBpseY9SHyutReLwrUjTbJl/NOuWuakr2qd7osDcDvAdneME00W0ag5tu9sjHVOq0aVXa9TID8rLlbTLhvViSB0W6f64X/WHB9auKc6Q3ZQ+R92lNUqS3DnUR82PVX5p/I6SD0nN0NKY5ypUe85vVnLZ21xxYc+Ch5QBXi+UqfMydt+Jv/+EVodC7P/1t8YPP/2X89v/6CzzvJ/9biHuLJz0uuGsLKLnAj4vDi6KHYIuhQCWHkkC95fu+kwEEIHxOzAKbFn8RND0JMPjff4G3f9mvKBr5JS+t20m+hvjzWBnkjYVCSUhvic9eWiPg1XFmfZaISQRxVs65jCMA6cixqgLhVwQjEjSTBPINO2aaJtRhCkehE7znskrS6aQ2I5UAeJO2k0G01O7O+uesoq6LKZ2BHsNKaNI3o+mplet3qixbzqxZ8koeVgXLzYEKj69O5QRTECAJ5I6nLXTe07ImsVbtMRZwHIuQcUXr2OIEJmDCtGEFmW66X859pIEkDfskskHDKj/rCmHOXz12f7ObNlCm+8eWFtYYKH1VUxFMDghAJ+CE7REx3msCIy4IgJrSS17ZWziLsvLYnUYN76EjDUXcMHSwuHiTRz7SULIMrvNvs+TmgX3Uk5ea4NSZVEF2U/A8juX4qHw77eQcMT6tCUrwZHB1zn6aBNp30VNmsI+iHwznUF4nwrfODSdcSFhbaw6sOXAjObD0nfixH/Y+ePQdt4Q2f96nfiT0ktoXfs334r/++h/gyz7347G1OQxpawsB8E7cYX60cPDgas9FEfT1+aQ0a5nWj1zSb/loe1ZEqCQFzGicfMDOOArdsBjbWfS5PLWLxafp0Z7+ycTBGV5gY8ujI0hT75z1MfM5bMksyZh6QIFNngZSvsOE2hxPvtoZjwGBOGtDjnNZlNfwLhIq0PAp9EE//uCDvwpUYgX61Uyqh+C5rJIN9wRjhn3rE2q8hWFajiuHBhbn+xVsd54/k/0D8KApaY3BPoFDxy4ba6CHEgHwsgyvc1Vsil7LWU8gFvpm8jHsEAVqY+WxLk02BVYwBcvncQzfG1Zd7FMgpUlFj97uN+EeZaB05C07Tu+qV02Qqa+XTOYALyfRYZKcX8aYw3ErhMRS/bOMvUkZSpfXrgY3WpG+5z0iDW+bJm5dnI+vkfZhe7OK4FpHGmo2qiCfQ+3+cL5zhepYR9qfHCGlozc5UoCXTcjBlVzD+RMK9scpzZsQT783kZcFNbzc64TotbXmwMOIA2+SXbHLtvpDnvku+PxP/2eh2O23XsLv/OJ34+d/+Ovxx7/yg/iMT/jgEL+2Ige0sE6p+YuhaHsKmWCIHEzSWMUUwG7VMMbk4Erutae8DXYQP5NgeoDXjg8eaY6bKCAog1HgfPXlRurRpfx6FCxXX2roH2nYpDKkSwCiKgplOZexZK5eLNJLax3hiIgJoGTAzWTk/usMr7sO/awIRroibhb0UXm0XtUG85cf8W8J6IM3bHSi7/x2VVh4dsZ4aXAP6uu8he0hzaru4M45dwrWkwFvs3eg4VUvCmsxogat4fNptgiTkSHXO7bNonLnG89DzU4BARPQP+29tNal+WzK89XHYQSCBQhRe/ZbdTF06DIZQRXkNdtyKHHJQO1MKDExdXCzZby2Z4DhJg38GeNhTMzL4MpXyfHSZnB3erg+EcxgnzXh4iWP1sf6dHZZ6dfTXBwejJX+YUxR2EA+1hi857bcINZxsobXH9Rh3YF/RZ8p4ybCm9iXQIbjGFxZXOd8GkMtdTb5lbQ2aw6sOXDjONC7Y5drxCtfczd+80V/jl/7rT/BeDLFcHh0YV2O4sMz93TuSMP2JYk2Gs/+EvTSnl1V2RBEKGEWtbRn9ylvhQdwcyhnssBmyEglR1dXPtIQzvAmQaz485hhWUBCTC+oiM4mNbqzIw07Jmp4k1BPMk/ZVjaFJQ9Zo440dIjT2BKMHSKYNFcCN87GPIfSlwxUBEldEQWmQwtPwSYS0sxXtYCLVxCD4vx1BUK0StVHwRqAUOIfoyGQ4nqqo3rIWA0AnVUvR55m8DDdaw6RUZf00lpLwW7Id/2zMIsORmVc7PehAksEzHHschYCvYeONFDDLLI2HSuRfxVTGM6dpKULm6IAgkwklcZUgXxm2FYGxhhFrWyy1r+xESzNCOX6yNcQx/vxOHaEtCWsknSk4d2dxg3aoaKp7x3vm0s3IWzUzte7Q9QPBS4OSmTaFecKWRnSzXW4HwMhWraO92RvuWPswdX/SoPhvDpIWc1ndE+yaGdivfQeusImJk1qp/ckzjl3DhFfB9YcWHNgZQ4svbZOpw2+6lt+BB/yz78CX/y134ev/OYfxj//V/8BH/FpXzV7gW3l1jwMC07LwaFe1ZvxSIMX2M1CLuVwtYWjUEjBlZwBq5s9Nu0BQNP7LNFueswpuefs0lPg2HbpcaUS1C25OtKwl77OoE+ACgB3Poq+8jrU6SRGPSAtVgunKiFQKI9NAkYaX4VBAXVevopOQRABV8iLAjrSELysvYU+yNEi9i/GXh9bAMGH/rCzPZKdJyDUAKY4Ae7c7xS1tFNQSNuN2L93+vefDCNUm6iUzmIvneFVcyYTEH4bGLLeGZNyreZY0s4lvSVBBl72+y/G637oP6LpaXh9+l61Lc43Z0uVT3UGwMv2Rx6z4v7VtiGkIzHOnq+P0m6K2NTxBpUnGVYNr6ENFmBUj1EEzvWTRl73xl46w3uIWLr/OlhcuokbNQ/Wez6e4oTfTRsHgLsuLaoizi/MHdE5ofhC0XajDPny/jYEelZYa1PYuzi/UnAlx+Q+GHN8eY5lnsdF5WHeMKw9vu517BslB9aNeuPgwNK34o/89K+G87r/6jM/Cv/5+78a//0nvgXP/rLPCL35kn//fRRQUUiEiLWFphoc4oKCRkKNmh2TBGrOoJdM7DlBUz0wmCIKAJP+g5vo294/BNlppA4EpH2wSrwOZquKgqzpfKAmgJs1vDvS8G4ALR/LK1F4Q+55jHOG+kWgpDztKLhFK2t4TeLhZDfORc8KMxhWvlVNZTH7SkNJLvcFqZ5y6oiFMWZV8seWk2bQE6BYSnNDiJkzteyz62lWq0GH8/axdEDRPwPeAyUl27A3adFyfC37ONo3AejDWWgscI6f4VjOihMszPz0TJsDfuaXj5yLc43JK12lsUCq0xBEi79g/wIx3pfBpWWSytAU7ty8JTkYWu1828lPRs++wwtrYIxR1LlMyf6VnBO747gW9In1H8XfdIunhpdzpze3+nnP69c9wy4FMhulAzFv8F9PyyWizQLf4TXOnbvqTKOzB7QM788ZYc4hDxOCqs4azrcQWltrDqw5cCM5sPSd+D9++0/woc98V+iFtae97VPwpMc/Ch/7Yc/Av/vCT4aOObzy1XfdyP680dXd6EOxvVaVA4egZZD2tb9IMk8AvAQQ9K58bRDLTvKLMT1AbfpneNNxXgHe8pz15YYKLxhjuOHpQtThM7zA5qYHfBQCWdsVMq5oVaoQnppVjxHipiL/q+YM/LomtsUEQbv0VD/SMvGqS4/Tg4Y3gRVlLMORhk54RcElzcnZK7YdQWD6Q5nazjDazOKqgYfLyGIWu5ynMAZus8enHvgsGO3J7xG1vJb5RmkOqWklV0uNQgAAEABJREFUwzjHz7iDfmCO1rSv4U0A1NXlOWoDHPsCIRFR0TEJ+efqVRInc3AMH5mXPXATIlewLOds4w4f/dLjb94Y8Ll+a3Ad3ulEXVjUnJM748P1QT/1l2614QK7PQycBhJvmF+d6pOGN9/79jrWN33UnaHhveUuhGUN/+yPceHe+FUhhc11GEdXxfnnT6ClJ0udcaoO+iyZMz7419aaA2sO3FgOaOlfqgU6r/uEx95xpMyj77w1xF25lp5jh9Da6vSsu8eGWtjMOgSNQA9QKEtBAeWy4FPECmaTmtQp4oJsehLgMOA1gXJBRRllcPBfD8uy7dIAitbWBY+rV+QD9vRZsi2PnBZjz2c7F/tgy25GKMYAlgJckd04vnjlKwdrFHM+UzpuVhLDHKg9TlULcOsoiff2fBUcU3pYFvBsvKF2vK/h7bxFFyZTLHTbY1rY1O8Ys7ztCJIKgqBcMoKxGCrZb0+5fW3Cetie6diwNg/LcdCLdTHXarZozEpyDs389Eynhna6BE7ptYWjvfpVFZz4iUZ4CsK+IdXb7zOmTajElCYnh/CqlmXBTo8C6OZLY0q2Iv+TFEPeWpqcvqpbOIt6w2N3wkcgc0TChptxW/HT6Qgvkxq1gpFvgKvm/WdI946tA/CtzROjrst1/7u/J16JJ6BtjvZh+KcEvA+8alaPKTQKs+BKHqP5o5JpzsgL3RzBQyv41WNAX2m4HuNJqo+ca93TNQfeQBywy9J92ts9Bc//uV/HS1/5Ot7jcYF58Mo1PO8n4nd43/LJj1+W5MM6fysk1OthMSDLKYzQeQo5gqZemmShknpRS3sdF+EZ4E1HGh71hZ+N7V9/4YyWztQqoEfiLq7LCp7bOApqASIRChpdekb7CP+AInyHF+wz28foc1+FjaCnLP2MlskAP9XRpkecxlqUjnzH+X4VmZU1vP0jDR0sJAM9H2Vac/560PtJUwfSLPd38QSK9ZzUEPCS3TmIt3+PfbCbs/AqntIY5H88Af3YHzkyhRM0kw9QvTre6wj6Pfla0sSU1WxzSvnDL61FAJo1bKvVBugxO1KdOsPrjYXnZibQ6z91SYBXZ3ivx6g68rBNX/kIdR1jaVqLv8ckLRWleVNWBLyjo4AXaaLo2JGIeloOhvYb5nrXx17Eez7pZuRjT6rJXMf6NoaCzwbdMYD30AaG3TOp7/SufNnChbL+pIHiOtSZOGMc91ZVmmuh0Npac2DNgRvGgXhXLlH9F/+Ljwm59ZLae3/UF+GjPvNr8J4f+YX41d/6Y3ztv/5UbG5IhRmyrC1ywA+GtA+uesilnoKPuwU+yZSoOUgrqw5KOohZ3leQwJ8X74wrQ2rh2y4QGP7Fi+Hujf8Bbx9DjEYmxGsxdtdR8BSRbKCtIw3y7FK7q0+A6j/IeW9Ym1f0eUwoW5hIx/U0vIQtIc0gpnXURoaI0pGvNnjPYxUUlhkYOYE99kb0PF29NNaxXv2DCMVdL6MvPnjWO3jtq/HpeD70G7/t2+PV7okQOFJYxnug6EcocklTFw7uYo2X4M1jSRGNPpRsg4IyJQX9mBpeJRnOt4Jh+Vc1rrCzop5tmAXoSV8io4+3yzRq7AvtLkLMalbQSGcQomMSzgEE+5j/pcpt7VDk/PN5lgg78qnVofN+GYIjZw/6D3qNMf0cK/mroggvdF6b1EfLp/o2LrAypnoYqG14A/3uuDDAk2/enFE3nDP2OvQxE9RmuoU7FvCGdTZnpGu16NE9z+XIW5U3pjdOvc2h0rwGkp6iIHd72Ri1vtYcWHPgBnEgrnhLVP6oO27Bb/7cd+BL/uXH4unv8Na48/Zb8Ckf+wH4ued9PT7hI993CUqPjKx+cFjDIsALCljbNQCFXebC2Aygc6A2CaMcv6zrjMVXbX8n/vIxHwiTAMJM80liLRwBLz289O9oS7f0FGDJ46/HXoqbnRH7tbXtQ6YH74+rvbRJne9gTAyHxHNYud3lINYjUpbaOYVsqsNPowbdlgbX459dCCh1SdhJw2sElgB4GEiR3xHQ57oZfV2uAHhTfzZsPDh717d8B16w+SlIx4lTPZ6uDJ0VL/HU3bKB78EXRwpCt9EHpcmrF/MGhcN0Aor0DpbgpWJYaaua/pSf18hNWoLRTDgBUFdSbZbjVnRtbUJJIy1u4m+I8MGOVjpC4XgL27TBigmr2brVXnPrWx8uTB5beBgaJRgyw8amKbiykRZb9/fufnmEhjZQitxORxo0pmyAoh4SY1iZvw78zI21ZJj4x2UnRx24/z973wEoS1Glfaqqu2fm3vsyGSSIioCKIIigCKhgQl0TKmZ/c9Y157BmV9fsGtawZtTVFRWMYM45r4qi5PCA995NM9P9n6/CTM/c8O6dqb7xzO2KXX1Ona+qq06frq7L+HYTRCoCX+PX8OYl5bncbxXGId+nGmMFGR8n+QkCgsCyIjCQtrNpwyg95sFn0Jte9kR612ufQc978lmU8KT3lBe+hfCf15ZVohXGvOizeKvRBime+RRbBBQrgKG6OWmr8CZK0TA/TMwpv8ps5catEwax0kzQooSmnN6EM4TJwkYieIk2lgrYBYX3isucPLDCYO5RpGyZYb2UJznQMCwrQjhgihAvOBHmUGYQyRJK9EBdHVf3uLwxYtMJtTrGI7QdLLxQfX21bJlYnlIOs0w5CydxGhhzN+qwKDg27MSaGsV9kCjnvsjkuP/kCKyDXJr55tyIY6mxD02aSyq+psbOFhrQ893GXr1r3MlqE+wFSzJHCcsPECZe4UB8UKdS1x9Uc5oKAMmyWVosnw3ZCw80mrVHrVx5zh74SBjXL97mqZ3ri5FRG1eKZQ58DRFwtieG9KDwTocPWMu0+IEbydGOwgueyFkaByRVEY+XQftpRXl790SNl30Y7hprz0CAeSKAa5d589ieEzckTrDTaF8O5RAEBIHlRQBjT5QaXHv9TvrGd39OTW8ViUK0eiLVc6h3Xyleq7ZQe/MWKnjQVQVbH9tdhaIgxQ8Nw088GVsdYCmeytkK5i1iZcUaFt6JCdWRe1gFu0OII5lxdPERjJ/LKSi8WNMb85V/ikmOeeKjEA5oqrGJxi78C8FQZpSvh1/DS4axZYdywzrtSFNKrHzmhSVXcNvhTTWSSvkC9kwcD/0FlBLVQmAdFF7ttyUrYF7mXDMk74ApLOJMjggC2YjzEkOUMu5pommS+5BisKHw6iH5GtPF7Fc/YyaO3UwfQnNuknLf5nCYw7DVH9cr0ET9vUKrWFlBPlxQsHXdUKoVsoZykDMt7YKRN+oE/hr8PV9EjR6eFyqa1XLaQRsQndWNbXB8+MUEGa1nLVNFJlg5znGoGwaND/LDXS9Rj2vIVDw+hvigYdglxHcZS8a/7LFx4oeXnMcElyAy5YIhU0JBQBBYcgSWbpRbctFWBkO9oW4r8u3Tn0+nF+dRvcHqUWIIr9/Lk2tOmqCoJmaeCZ92/8Ore1h4m0VCYcIuzwQttvBOT3XpYPLppoaLZYmyBKZYUcISBiSuvEIjoJExpz9pzEw2ZzgvNcpOKcZ/tHbt5gNJ8axTWLLK+nmTHyo4pjNDw6HKRPyhlKPtLLyOG07hXyfj1XAs+UAzOKUczwxKNmcWnMY83ukqfkJVqlsfLrboI2NMcVHvUgnkOHeTbWN02B7ckJxEH3IKLyeGPEyqOxSuuKwb37WLqF1uOa/RZLWsU37QiMoSeyn+IUuhuXcwpjaDlRUbsof+xAHhLUI4jfSgjp8TSG9y48HkfgfR+G1PJihHrnUdVVsVFx3ar7uXETPoTHgr+qjXhXO+X52SPaNoJRmGweQjGm281coVt6HfhrCHcKk9ke9vFUQHdsa4PlqUlOdWab9oxQ9R+BASm6igbbH948DM5EJBQBCIhoC7c6ORW35CO3aOE3aNWP6auBqoUWfhvWTz4fRjOo4aDSJlFFmrK7QWcr8cCm9WEBRWlzOYz6QJCu90zhMAD7yWSolPixVeTHiNkcKeSjAL29jwXuq1Z2w/FpY0XH6psoRh4S0KRbEUwlriuq7OnFK7fcMBVLv8UsJPOZZE0y0kiVLD1nPGw6WG8rVW9nrDqhh5XK3CxLksHoXznIx3GFf3NCxpYMqs25PPZjsrZ/CR+ImYowMd2gOnvfWTSktu+gliqYGmnHRJWe0vs9C01g5TlL/sCqeIIj7FVuTCPtYgRYTlB4Rf0i2D5CDOBBnxRkprKrzsVBQdcqbp1v5AOU4C2J2zi4+kTKM+mtsLp+sbqLVtDxs3LL8KfI3mPmSzh/agcM1GZNJ/cDi22Z3Fg1rGfF2qel8pbm8+YnFKuP1AMgx3ZbodXH1mEpYj+PQgQaAxutG1JWjk4QNZJLgL5YXiMYcjnEb9OJBDEBAElhkBvRj+K7ks1g5jDfFtznii3TXiQU98JV11jd8IdpaKf/3bP6MjT3nEDId9hlH8Dvd/hj1XVp4np6bpuLs+3ubDKoJyu3PZRmeN2rHTQV3DR1Y88emiTQpaiyeA+S5hhTdRrpzPXnSQ8sQFS/F0O2EFgV+7MwVVmgly0jQxQaQTPsFHpofjxyQ6Rz01Vj2Bwht2abjicmXPj4wWrEsUpHlytxlDeiljCBK1kRwBTZpRSmASpIIMZj/O7Vh42fIca0LX3FDteoPKSxo6DymY5CLJx9XvHMrTtDyRy/JhNYxG03kckB3kRnxQB1Z4YML15TcQSJfdJPoQK7yudctnFh/XLE+46uorQoxsPy1sj3J5nX5cktmdWbxvUoDHBlY8FFkgPY1ZlHzF90oypPUc1FOtqO77q31Wgtx5mzibK5KjCCnmY5Srm80Ywms0HM1+Etdtd63WWdLABRLt8jha+bG5kdKmCFb6UNGEcVS6oHw2C68FOpQkUhHkDGvIFY935H+lIZaI2xQW3vCmxBS+kASCgCCwrAjohXC/6OLL6WVv/OC87gOf6O7zuhCasct87H++Tn/66z/pm5/+D/rBOe8kozW95X2fmZNNwYrRSKNOX/rI63pclvLsVrrq45/7Rif1xa/9gKBYdzIWEClg0uVyu8Yd1Ph4S6faWXg5PxxtMpTVCjZGDjfxJIm22xFNtVPqKAilQX+aMoLlzG2XoyjmL/OTSYv5waIL2lf5JQ1QgDHuaxWHZ1BgmRXYUJNYXkw0YMJti8yO1aWWcH9AzvDOaE15klIjbbIC7+gpfshADB90GURiOywQZpplhTdvK7JvVDHTekyN0jTsTzMN9M/d0YGFl4tSwXjsruzuzl/91GfSdYfc2Bb72U9SutepIzY+NalYpe7K1FmiE8PCW/ct1WoRa5kUOki/RZD4p1ix0VpxrOdYdMIwiTo/+OHCvGC5NDtOaG4/PsUxIvQlpCnCzw891GqMdahNUp3+8mfHd2yToml/A4V7t1OwwsidbrQH3eWme0bjkABYpajF98RMonZA6GSbCH2H/ANXue+HPb8Do5w035+FTSZ+fLAJ8QQBQV7gwt8AABAASURBVGDZEHAj327Y79g5Qd//6W/ndRdedBkdsO+eFGuw3k2VZpw+95s/ovudcTLttcdm2jA2Qg+932n02S99i5USN+jMuIAz6rWUDjpg7x6nVJh6iB58nzvRBz7xZdo1PkltNqm95yNf4LzT+MqFH4X/oGjHDgc1LLyY1BQsSZhsPamCFGE9qtFd/jTAD9fj38xOthJSE+OWgioKG8LLSdM4Z0NZYXCQFc3VUmNp5QURG0Ft/HK/pAEWXmJFdEjxLE14Yb0plExM7HbNMpQ/PqmUw7DgNuMkKX4ISH0e0sM4rVkJSwxlukXBQA/6xD8Wm7SDgFPxjtx/AZgWzmJP3FcgqnZdigo/AYc0DfFTSpHOPGEwodl/sPAaxRbECEzbNzmMpjZs6jD60+8d/8lJ9BjXlji59Vc/Q0AUgadJlaWlmvzg4vGzGd5TYO7j/BRK4QErZA0SYhlOYzQn7IXdgsLLWPMtQZvrKd+VjiIslYb7mEsN52OXBlBoG/eWycbJ0DXbE0QJ/2lt2ltFa9ynbeYq9IAXlM9ZLbx98iQx5PRKc4H28/TLyjYMDTm3KD8X27Oon42IJwisWQRWh2BuZtlNXY887GA67+NvWJCDsrkbcpWc/vs/L6cD99+7Q/sG++1l49fvZO3OxmZ611y7g17wmvfSy9/0IfuPM1p9E/yJx97MKsOf+eIFdMEPfkm1LKVTT7zlTELz5XiFd3yXm2Cxq5XiQdeU1oDi8oKVGLxKNqVBFPmLdZjHYSlutlnzCopBSbFu84QHmknKHIfkBTplV2PFEukWNF6OHHTDgi75p5Mblu2cZUxCnfj8MEfmeeW5oqxO1OZQcfsVTDSIVUy1OEWkWYGLMtExNa343QCbVqHwTu5scw4RJ22I5wq8XrWJiF6OL+KYniEnD0fx1pQNTVyXWo21QkjN9QiCo8CAzrB8sP7byyGQjcz0pqfAj1tUzTy32BxjmAgf5et+9D3Ts30ezmXXbUdALLgLh/B1TdurrdVYcTxgx30IJ5T/L4WIQzlGFREfxiVGEyy8LUoI/RYKE/rshlpCWCoD2gq3LSIRXFinX1Z4C74Hr73GEcczRtPLW0uUy1yFfspjCpovPOD2iJDnPUkV42HCcCMxVWDJgT3yZokP88wLRSmPsTiZ+PKIixMEBIHlQ4BH+uVjvhDOl1x2Fb33o+fM6SYmp9lQWRCWGtRL68KgnIL+OFtnEfa7vffcSo984F3pkAP3taee88p30+ve/jEbD55Sih77kHtY3u/84OfocQ+7J/UPmNePN2k+14Y2RkQ7drgJxaQ5FTzxYQ0vz3p8xh1tVkSh8DbZ4jIvvZxlnWzNyXOqWRD+GcNkKyXQn8JAzAOw40L21T/iYQyej9diz01Pt2Gwoma7IPC99wOnwMq6kY1trk7BLp+z7ovlpxjSZjunLGN+OcvLkzfWVmM/TvBvNduWd5FqmpxszssXlq5JLr+7OkDAnMGDwhv0QShEU8CZTwLq3dFY7PkcTwssSbDwMszEohI27y+04ZCB4PMtxmKxtPvLF0xHM5QcEP31r7YdIRtc/re/U/qtC2we3hJY5Z/599Mopye4ss12QeW8/vgUlwmb+OM+AO9f/EzR9TsKfiw0SPa4ndzH+2ksNp1kDjPih8GcO1LOFjkwQZtC1in/sIQ8xf1nKgLPgu/deiPnx5aEWrmiNitFoD/FfaejLvHY0OZK7E6enTwG4O3GfOXwNgn022HBPicg5/S0k11nRONTjjNXbd77Yz4+y30O/Z5YkcU9ASzLrt0uWOru0VZ6t3Ki9I55xvVd3BdQpmBFGyFcu8VjELcjeOdcEfRjnRQE7jvHpxbEE3TECQKCQHUIrHiFt9lqEyyxc7miyEkpRViPOzXd7CAV4iN9//ghFLj5TQ+hZz3+AfYfaLz0mQ+nVz7nUYR1wC0erEIZhHe43dGW9jXXXk+nn3wcsnpcPTU0n9N4385XXL/TTdww1hmeQDVxvaEd8TkcBSlKeQJqpMn89FjWNNFzlhnNDMHCO9F0WkvCpilV4oOBmPiHrXI005qv7oOc0zyXFiwb+N70yJw5EcHIjTSpghIuMAjd2a5RSvGEUhCMnK3CEOTkLDLMA/zafjLPRjSN1dI5MQNtwxemPIEhPp9LuEzBJl3smJAkVjxShluTlRSk8Op7vusHOafG3P5RpmiBBbG+TVP8LAEZSSlSNpfsa/dB6JevSVgh0GwRB8m93/dOAo/gtn3+bDr0zDMonZ6k6SnFOgb3WuZfvr4/nnJbGK5gf345PcJABuXhZW+YJPwmdmlqTWtuX74YGSWHbcnK1w8SNzVjKepWk9B+iuuJDPQoyJsWbSStU3yPj3D5QfiUrxlJNeE/b7XJUDvXpAEM9xvwY0EdL65Hxvnl62aLZ9znNLf8bOdC3kjDYdfSbiwAA24xZuXyDdMg5eKjbOEN1626MGEkWIy8rXr6K3DVLC3kDq7Gbbk7+VB2vnIZjyUoU3jsEMdDNvjBac7ISRM2hOBq0VgjnXfsqXOdSH7rCQGRdZkQwL25TKwXxhZrbJ/7pAfRXG6kwe+zmRTK4eM6jtrjH5e4z703jo3Y9O68PbdtsUVarGDbiPcS1i5ewgrxy5/1SEoT43O7QcaT2HzOeP47rlP2olGujk4NjdA4mWuutnnw7ADJlspGqmg+ehhjUx7g5yozWkuoVs9puuW0sWR6ihQWXIIJuxa/TuWAWG9DMC+vuXjMl2+UJliLjFZ00yMKy2NsY0FIY+5JGcP5rl/MOcNgFEy0Vi+omRtWeKGkKOal2bHq4q10yYjhSSeZV1Zj+Dp2u+OfcpncGDr2VtO0bSv4ETmlwcla201/2B392c5r34ey3CmDhuUm/iXcxOhVYeJtZGZeGWej3Z+HfmG89TPhd99ot+A0uZ/hBxdYCZVdw6vm5ZlwX9XcF/r5lNMNKNhaW+I3P7qgjZuIJsYVK/WKH51cvj3pvayRzcuzTHuueNpg8JiemRonAm84TvPrItt3TO7blvPSEcUKixqa52g9JcX0WiqhPFekwJMVXuCLhzU+RdoQpUYviBeIzSUf8v2zNrV1BtLWQdm2EfY082nDYMDxDVw3XLMaXT1l0JSiol3QwQ+5L2378PtdG3K/0zw+sHido8ZvAXcnIwrPVyatOzwLtB8Ks2MYOzwVJ/JCcVu6MWGMx+T56OEcyU8QEAQqR0BXzmGJGNz5lOPo7C+cT1dcdS3t3DVB//3pr9J97nZ7UkrZGnzwU+fSQ5/yahuHB2vuT3/1J5qYnKbLrryG8EHa8UcfTnUeEHG+7G573M3opONvUc5acFz7Weca2mqvwcdcmpUAmyh5BSnCkoYUWkwpf7HRjGnjo7WptpvQqTRxg1YrKLyGSPOEgLyYTrEyhFetoLn3vgVt2kyswBRI8tSjCK/BKdJPa57OeGLJasQKb0LKWucLlssxaE47vrDwupzhfWOU3aVhz21NGvF7GRP3saAeJSbCLdVXTbXBWXhDdmG48TiBoEB/QYTTKgJrdAms72ZyZLb7xZ5IwOU5fFKtFuGbLsPqqDKJzRvGS7nPBplAp8G4TrAeOjVJzGEWoZIIPMMuDczQ8ua+xFF+aPIy2r6EHCJr0Y8gZ83zaKuU2qzwQmFSHlPHiX3uP7H6UKNj4XUKGlO392BBClEiY6jlxKVGNjymjujS+4bvSbRh0c5p5Affo+zvF3YrwfdmN0Gkx0bLycHi3I4T++xHU5vdmA4iPc3IDzF4sEhSIqUUyU8QEARWBgKzzCYro2KLrcVZ974T3fCg/ejU+z2djr/7E6jZbNFTHnWfDpkrWRH+w58v6qQvu+JqethTX03H3uWxdMf7P5OwlOEVz3lU53ysyPQtbklH3XaCLqCTLcl6oyDNljibKHk5aavw8txPw/6gAGLABR3FygnC4EI+/i2tpiJkRwsxWbOhpUPv7vdu0q1PDOogyx5DK/PUDfde1hvskoZmnpBqt6xEipQt0ZpyfJMt3QnfnhjCS3myg4UXX/fDGmhJ8aSG9ZmIJxxHGNNN3uNedPnRt55B0kDv5ck11CMGb8PKg8oYWOZmrulVeINyhj41PalIqYIUlxv2SIApTJueEHb0mGAL7+TEHNS5vC86cJB6K7YlUG4z4InM0n1TcJ9NHCQ4M7Cr+fsea2pbuScY+OWFpcusSIU8mzO4t8n/6+BmycKb8zgDF6jCwov42CpWeFPus3iQts9BwK6sfZbjLCi/QGN/yOOEE+jcr/6Erjqy+wGzKhsWmGdeKEr4jV24N4fkuK4vF+EFgVgI+FE3FrnlozM6Uqd3vfYZ9L0vvIMu+Oxb6JP/+VLCFmWhRs9+4gPpx19+d0jSMx93Jv30vPfQuR97PX3382+nj7/zxXZbtVDgG2e/mU4+4aiQ7IS3OeYI+u35HyQNU1gnd+5IwoMx1tSGErU6kU5USHZCTEImLSjGxLplz1bn4zRVWtcMZq1g4dXFgmXAdQt1mpUHfqPXKf7i10zRS183RS1vSkr0TNk7hRcZQeeFNdl+tNZmhRfLUQrqKGHtqZzwq9dRErHhXQL5TEJ5SSFiIGkaEy2TR3tzEPXAes0c5iJPtTntIhpioT7sgGqKJwB3amDfKE2GLVMgoLBQGJHgQsPyhH7GJe+nTa3tRMybhvxh2UOHjjGEXdjGdxFNsJV3VtJWs5n1zIIzE79Lg71AGyr65Cjv0lAw0BGgpSxBK7HFmPm12mg8y9163G1tyPBTjHYEsXCvtfw9j7ycFV6EcJAZH3wppYhFpNX6y7QihpTarYIK7puK3WteXKNjDhkj4jiVfobLlpIDR/vxynPXtiCo2m37ZsKYotOtkS9OEBAElheB3lF3eesShfumDaO0x9ZNC6KF5QvYvmzzJh4YF3TF4gvBolBr5J0L8RpcJzNh56GRoBjX0qRTdtDIDQ5td75uV9NTHTJ/oUPpI+kjbdowG0XKxmN6sLQUs5DNeQoAn0jzDUiR5om6yInqDSJYzDDRFHxGKVeB3K/h9Uk+M/yR8ANMnhiiadY6vVWnYAb473KgnjrWiEZzGRPFK9tAMPfdCRMqYeaF0HySa8X+cEdqNKX84EX48cSNoOM8Y8XK/gOveTdtbG0nxeU75weM4HYAhvZyfnCAhffvF2q7hreYrY+yUmzLDuHVyg9BihsNDvR8myp+Q4QkHBTeLEkQHcplaCumkLN21oaCxOnOXtkeW9yXESBlLkQJ91VEmqpbd+CpqEA2O8V3peb7iKOr+Khx2+AjXMVyabsOJuc3fPzANMFCcX9iv3PEGF9BLOE2LMp907cfzlFe8KEIS4OU4r5lM8UTBASB5UZAL3cF1jr/lGevtFZ0xBxl3VqnM2EvePDEKzA/R3XKDxLZ75Cm3foI15YtVX+im9DnNj8M2cTjNVUxFkNeHu8tj7KXF27gTyIoK4EuDGbQ/fCgYP+VcqtpTwW5wn9aC2l7ckhlA/V4AAAQAElEQVQv4QbCmk7iCU6VJtOc0yCdcnsjjOlqSlOBBvNEPSvqZHkBE2iOvsygAUiZmmsrKLZlOnigsGlWeNPcPUgp5cra/AE9qwiW6GzcWGB5KUF3MWjkfroR+lBtxHSo2oeJwL9w2ardchH2C1ZMI0DLlIg002rplNoFjwHgmXpzerCea0UG+TT8LzGaGqMFNVXWIZYT9yVSLs182u2cjOa6uJxV6aPfY0xVrPDmhvHkG2Rq0slYvkchXKx2NIxhrgqQdC4vx/nxnsc7vJRRytXDFVoCX1gIAoLAnAis7pFuTrFWzomUJ2zsIoAahXlaZV2LC/Lhcp6Isu68hKyBHbYBa2w19noFS6SNsccTm/GssYbX8OTKuVGPBAN8SREMxFs+LylPEuHkgGGS8KtopgvcJtssmLdIBrHa07mlHEuBALGaTihnixKUwQKyIpNBzVmBwdSWJvCRGc/hg6I8KEZMNi8cD/SngoUtjLuNE47z6aGOhGXSmaOvSm8HLFE+h1CxcmZy/3DBfQp5wzr7EAEiTG/LtoKuuVrRxIQinTjZqPxj/MvJQeJhlwZ7rWYeXjbWY2wWHmhchKhgoK1SHjKGCPEGJOc+1FF4mw5HCoy5LY3m+tDwv4T7A6yM09QdWDDOFCVebWaTxmHHlJbnGEk0aX4LAoVXtx2eYdgrPbfYyqU6jrCJYW7cjpYoe3nBXji4L7ULQzjt7qRwQkJBQBBYTgT0cjJfD7wzNsPVGm40xNpEyIwnf4Rl1yZD2F2BIvx4vKXN7h/NUc86TKMp6E2G9cMIrGaQSFk5cNL2nsr9jKB5Eu49M3iK5xwqioKg4MPCC0o33DJCY5khc+UVdMTlF9CkahDwwLkYDssIcgavYEVFsSXJ0uQ6hI9/0N42L6JXTzTl3I8CyemmsVGGmojzC5sigkXPRwcOEm6fpKsf9dJhy67N4DAtpm2UbCVcdChfda/esrWgKy9X1sKLnUu6Z4h+89lzicbGylkDxWt+BwNcXChNBeOIuGKLpw2bTQTO+XMuMZxvuDO2WRPiLkNQpDvUOOO7dFu65Ia3pETrTvYwkcwoAn7Nonuz5/xgfePD3INgwXXBfWl0HH7D1HXYa/H9A6ugjgxjGZoP++O6TOenjImLDefzbcL25KJLxI9vyFCsZbd4PMfSIIYYWeIEAUFgBSCw+ke6FQDifFVIeIANFl7s0ICyehYLb0GYnHB2eGd4At+0r6NTtvAqntiCsq01c1SFKxTRN0y3KA3+gXTTKxL4ACvkDRtaxYAVJeA71TKWHPa8RH566SV0s2u+SzljoRhbezKClzKGBTuCNTkovDyrefEI7R2BTQ8JbDVXpF2lZSr8pyzcvcybZ15bPkXcxgb3YOENSxpmUGFFAnmKZc+KKbrg0DPpwle/CVlDu8IrlQi3soUXBK+6QpFJISR1fpofNjqJISKN0pIGAm5woBdk9MtjbBZbXRHGcEazWsb9Jy8UWb7U/X2HbkeX3OiYaH0oYT5YJjVFtS4TlvOu9/X3Pcex9jzmPdlltNQxRaFrqLxN01OMLxHNr/BygQEPtCPx2BIux4N3iPNTOKF98Z/Wentvp4REBAFBYBkQkPuxYtAznnTCGl5YIsGODTwIZrgsnZE1UIbisX7TPuzx1Zf8uWSpMponUzfZ2SUN5MpQxF+mDWEuD7syBNKF58X6f8gaOmTdAXMLW7GIml7h3fK+dzm6XmEpKG4XTxnDnJ8a+rcly6mwfDNubxuJ7EERDCSnJx2vMMETGpxPJhHA5eYjFo+p+YOtuT7W2S6rNdmmjKZpfGQz9yf3oBHKDByiMXExK5ybtzj5LrtU99aFiEwtzk2iy0KW2yw8xGDHD+b3gH2/Qtff5EiOxTkMtxUsvO28r1+yklaQIuCv+k7RgD8oshBzmrqY5bgfGONAsqCcjfQqJFdvyP0nS9uu/ixf2GCk3cxdHvvtWo3wEMDRoQ88GPYQyYtOEg+Eea4IRgXN9SL5CQKCwIpAINLQuiJkiV6JGARhnav5XRqwoT5oJvWutQ5pOCxpqI8gNrzDpLphD0fnuitbLsK+YmUtzTjCByZWzZMvR6MeQelqliYAMMD2YQiNjtflUu0m6jTLya7hBQOe7BBg0kEI5QLrJhGP4dKELXTG0MZf/ISSyy61JAvGNVi10d42M7IHJTuQnJhyGGJ5RcF1KYApQ6EjtGeN6aX1wInD8nrZ3CkPQeGFApGa7kTPpQc/Qt1ZFqzhBaFL/6koyZysSMMl9ZK1EhkDuiQt0dUAz6d9PRS/lrak04R8lk0O64HV00/5Ct1369eIlOdJRKY5xaqnJuw2kJXy+dTAR8rMMvzXxTzt0ChAWymX5rYmfjpNutVw+avRZ5nS1PVPPAWHJQ2t6a4whTbdxJAxjHMF99VABnfBGbcfoSc9nG8eHoNaZLjvEj9eqFBEQkFAEFhmBNbCULfMEM7PHgoQdhFAqWDh7X9Ni3MFKapho3IkhnSaJzrTcIN7a8dUh5o2ikxC9oe5TmPys6l4HiZZUAsfqSEOh1enCFNWDhHGcIlxk4n9MMdbeJVXypTfVipXhnREOWtcfyiZ/fVvUWGz0N42EtkrT65T/nWt1l0msVanGIbU+IciUFcT2NsJMXYe2+auFm2gHVSwMmxiYatdf2UutHmrwxIfrqFtkRecnuU/IYZziwkNbgB/AbC9/t73o4njbkOq3ba5+CgRkSI1pBSDgkQElyaG8Ko77KVcJlmQYot2QdzFytkDx1MmhOU+k+2uwkuQBY6pTrcLKli0egkLzp7rWOH5imqJazvifop/jIIKh4/XEEc7I4zhNONGZD3CD+POrp2KJrE7BPPPc27LpKBSEZKfICAILC8CennZrw/utYaTs+Y/XtN9ViucbZMh7DZAEX6GR2Pl/3Vqa3y6Q1GxNpN4hRev21QsLanDgQivUZHst/ASj/yYHlA3ivRLE2MpYZ1iM7y2zXObR95Cl2tDivFwmcP7CdPKEw9iIKf5NuIJTilIGDLjhkXaVVqmJh1tq6dYnooU6uCyh/JT7iOmxvIEKvyqPUQVW64Qz5tOscDOEcADeUM7K4yjgo/WXIyo/+FQR1r3Y8pLI5j39fe+P43f+jaBLZFXfIktvJpYcemeGSqWMLQJK0LT04pmU8C0Icp8vx6KEV9cY2ZZvaDJvPsEUyiuAJ/DEdadx+IHmsvmlOKHBXfvb/7Yh+kXPzM0Rjup1Sy1XUn2YeuZ8H1S8FgQ6ODW2LlD0aR9PiyoXWjCg03M8S7wklAQEAQGQ6A7+g12ffcqic2JQC0safCKb89/eSpdldVUKTV4FK/wk4Zr2rys8PIEmHorMr5q1qUBe3BuvVdmqbEZraB42hTxBOAmo9S4evnsoYLUw5VmOeGBwRLzfDsWOq0JeNhzETxYsHMsjOyj1WalSLHry46XDKZ5pjjllzRAOSKWD1N6LFSN1lTepeGQu55CnZ/HNh+fslk5P6FFa0/fltxYFD5aA5PU91fE4ZJYFl5VQizEWWlS47to7CtforB/tcoMmYj3SQJ808L+YwRifpApOPSflM8lkfgliab6CCu87TSwoLLC61qR+CE1gN8ptuoigDILFl5fe+DZu6RB+zPDBzV+SCrA1JPSlNPOHUR4+6L4YSkvFCvgOLn6sYUU4gSBtYBAvBFgLaBRkQwjY1BJiMIuDYYnohmsePA0ypWbcW6RGZoU6YYm/HosvImmxJD9YY6PNK9aesGrGcc3rGkN+W2YQDiRutMcG/7IeNIBFcNKUU6OcLBCBoWlYK1QQ1gUjOBGsoQKVlp6SfHUWhDpKgD1jPLQcJye9pqKh5q4uSnWL2WiZYUXr4c7tIPCu8uZmNtsdU60w71TZtCI6tIZ20DEtwPhl4SnGiTYZZEsvEktYWru6LSnZ7rhy+cQ+Y/WFN8zSilXcBH+XEUT7iOGWVsrfQk77M1bkCLuroRt6CjCD/vTprWcJtrddc9W4fXytPLCcqmZePJZgsvhKc3GePdQTf6nqKCW34vbZjH2NozgJXyflMmAF9IT40RFu2D1VxPWYyvVWyeUEScICALLg0B3llke/uuC64ZNTsyG/ygtmcWSi4lIeaWNhvwZHoyzUde0xWR3lwbN+WnmiOOtvNGujMuJ46eJ4mmbaNoruIGq15UoJksWx5LPeD5vEWsRSAVGYQ0vM9R+gsfpGA5rV8t0CqaP/Uy1io9n4FPmOTXt+GjDkyvLB/5chVB0qNBoRSnj2SHidCKbVEVuQ9pl39sSsfKZxPpojeVwxJ2/596Ocer/CYbLJVLMkyL8EoAX6ATeAcRWi8IbAjZ/RrXwpkZRwg9oYP29b/s+ywlgm/P9jzcvnIx2wMI71SpZeFnWQjnybmEKRVOwHdVl8rntksT3T18FKKHt0pIGjLH+1NCBwS1Yut/BC0Sxhrdo51bhRX2MHQ1xRpwgsG4RWDGC47ZdMZVZqxXZsMkNxPV6YUVMGokNyx6sTIYnw3LeoHHDg38bGi0TyCdKa3gTTcZwJh+aFRXNFhCORj2ChbfNg36ZcOF5ZclM2cvlFhNPjLLTScKvgT9Aj6SpPfZhDdBhPLnDTef4D14Mx2LI7rbsjCUNDCobdchOgru9erACRdJVWib9kgbs0kBQ3JQmHUlIGFTnVAz8w0ThFd6clc8U/AcTqfcqDx7uA5wI63hN33p3g6cbFBjSGa57IFGEPsnKIPKg7Cq/BryopaSUQnYUlzFeQeH9/iwKL5Y0RGHkidTqRBMt/5TLea5tnTxN0oRYlhk+s8oPbqNUt3qEeD69hsauvKib59u3mzF4LNWK/nD/h9GVd7pbD5F/v+KRNPrzH9klVgnDrrlePQUkIQgIAsuGgF42zuuIcViTWPdreGmWgbetEoo17WBpRO4n8e2XdicBzdbXsCaS510yKn7z1zzfvMBU2m3koP8mEVmmnpjh17bgVLvqMsIreDU+Tpv/9zPIotwknf1jbUYML0l6qfCkVhRtMlZ96D0VLVVSeCfwJTgTZj2bCLyJWHVhL8KRsmyFVz7nIqcm3JKGvFYjWCznKrfgfBS0wiDiXFB4kz4LrwlbnbhiA/spK7Kdi/19EJRtKLzhozVVMxRzDXhiiIJSix6DOqhJh2eL31IYfohDXiyHrRB3Nbsme6UUoc8Q/1o5e3yM7qa9uciKP1gqMmlvNZ9Lr6PN2//Wyew82HRyBo+kjBl42r5SImNabr1RQYqsUUFpkp8gIAisDATkblyCdtA8Mt7ncTvo5Du16JLrp+jyXd1lBoG9VjlpngxDepgw4cEYlk3QqJN//cwJxfmsz3CMCNZBg4rZVDxvzFvk2qWv+0Hd2VuJslmUfZwfxNWN676w8IbrVVGQue5a2uPrn3NZrEBo5cq5jOH9wpheIqxEFDzBcdCbHzFVJF2e004/og6UHDGR2jLr1+XboeVYmBDfuYsTfLCVNDVxsFWMH1Mk7pg2CApv4gJOAAAAEABJREFU/1Z9yYZRe35YLwk3AggF7HwDYv13sPAq7s8m1A1lh3Q1xoufwSyV8BocD2nIQB/Cuk/EY7m0VlCLFelALy8NMm3lcrEu3cVWsZ8Y2nplV7kNkuiwIS8yfPsiOqzLlOv3he87oS2TtlN48TADBVypYlhWcv06Q0DErQ4Bd9dWR18oMwKaFZL7PO56OuGkNl09MU3X4f0355ePnCeiRJly1sBxwwN7wfRAYHpXC4F1JtUU5nmc1n6wpgp+/SJijauKqDigyql23bes8EJ5UOGrLi6Us2XU6MiTDtNk0p0DCjA4oJ07mbEjoeGY7viU6yfQu6EIF8BB8YkIR5YkVKBzzEaLHyZs9qSb1ItaQiYS307H5L4LHmEv3p71xHwigawcDnvUShbejuXP88YHa8p/tEas1Guv3AzLE9enRlPor4W/H5R/OGyT4XOKYv4aowVNUdfCS0FGZtIkxytL3H3EWav3YLnGrnT/CKZHCL8MB3lz9mucXKQLDy0F88WlQeHNaBpJQttifXvMvmMJiycICAIDI7AGRrqBZV+yC6Fw5X7gzVlpCINkuQIYNyPN5QQLb544pSjf6ZQT8FKJItZnECXM4WGQthkRPW0UhSUMgaziuRU8QzpGWPcTtcmcFdKuT2TlQZf+WQJ4angxGAYaHtuQxH92QvuinTt5kSNQqh1JounJ3EbtpJsXBP4m0sMLGzQtPcsAXrDqclyBF4cGn6JzWNQyimUdVIo7CNMkf5+E/7ZW/mgNby1s/VBuSKeU59dDx+XBwkvTTnGBlQ5vQ3qKDZGocZ8NCm9OfvhttSzFghXQhl/nbzMieI1Gr4WX0E+87DnfpNrHI7BaVhJKubbrr4Rpu3a0+XOUsecW6dX9QJpzm+FSTTkCqpN7/YK2TLktEzN7vWxh8QQBQWBJEfAj7pLyXHfMDA+KhZca/3FsVksDa7ua7QK+2FBBwgM7lAMQKdiijBDOJIpS/4W4VgUZLof82A6dqmDFvkwXupJmHMp5w8aDZSrxBizldxFQU10lH9Y7E3nOKdKst+qMI9rXRJavh0madpJTTWPjGnJZDxHUwGYP7SnTt64hUPT46nG3pKGod+sUigwcGu0u5fsAkbCkIfPb6yEv90oG4kO7kZEOiXCvkOetWNlV/OBkC9QT1hGBr00N7WXcXkGMnJzMwZqMtOLzQzMpEcAa3mb4pyycX0BGOI63ub+ayPyY7LIcc7VQpx1RK+PwRnRYl/pBpdDuXgz0goUXbZnVc+KhIZySsAoEhKYgsAgE4o0Ai2C63oqavkmlbK0LWOTGUGLiNEfKgzHogXZroonAOpUlxGxsHDpNEomfJVjyDE8CM5c0tEsl4kWVUnjr3CGo2EKoSksaKOUJiZX7ToEYkYRplukwmNDv0/78cpkh4yrpKqGT4aO1pCAoa4WiqBOr6ukXBXV+EJITUAg5IFVaFoD0ME5p3XN5+NCz/NFaO9IODT2MkPC8C+5LSBKUXW/Z1jVDsZZRgHaD78Fg4UW67HLS1AN9+eSAcWyFOFVe0sCyBjnb3F9TowakvLIum7zRjWetkEZb+jMFy+6jQwfG95XcN1iNpugUOp+20dWEH9oyqxXcogWS4gQBQWAFINA7y6yACq3FKhgeFMM/YoAiGCacsqwYP5M+xbh8fjFxw3MYFCFcE16xIc7zG1t4ESPCv72Mxc9R7PqGLdX9w3yLM6rgp5ityXL2iVppnQgKb9fCS6ytkFHano/lqX4LLxRBdmjDWDxm0Em7Cu+UV3i1IbsDBSZyQ5ygOD+tdYeQYrlCQnkl0IzvdFmN1IURfOV5NgktShTW8G7a7NoWLGZ7UET+0C48BXpCqtmipv+wVLGFN4nYsFAww167bd9m4QFNJYrb01ciUgALb6v00ZpSqkMZWweaUrpzYhVG/v6iV85aa5M3u/m+j3UzhosppXikczT2ocvom3QqHUG/sxk5acrqBcUeeyxx8QQBQWAgBLoz20CXy0ULQQCKXlBy8ap/1olbK7buKIrxM1pT7idxWB4CTZUagtKLNE4bhVh8Z1gWrFUuU8bbcFMBQ82TjvErDAqeZKzCO+nW0Vn+LHBi4imDoDmj/bgOaF+0M85X4XTpQ7kgnuG7F3zBz5gCQRRn9Bwdwyu/esrhq2NaeA0Lw7UPUpx4+zb9/rKddOCh3brkJQy4aLQDDwyWGN83CLGGtz3p3kgU9RrpiP22kRoy3WcXsLMPaTbC/PVc2NsCi/ewcuMS2o8uPfwke3FHVk7lpCjyrUHL9TMbNszKWheuHa/f9yDacctbzVpm0Ew0lf1ugAmUDQucpJzHImvhjdh3QHc4J1cLAusbAb2+xV8a6VNWiAInq/CqWWDn0TOZLT9cuIgwZeUhKGUpNTtXQl8Ia3hhZeqqEp0iUSIsCg/4RQ+tgpklJRx6Tg6RYFFZgXCTWgH6rJTpqakORZVqntbzTjpGRGe9ls0cfJlwFfIxWXeUlzSUdmlwGosi7etAEX4KoAY63qprkz5upt1Wd6rhF0/bk8N5iZdvEq9Aekhxx/Hp3JfxyaGDyXTM0dDahR5D1WxSMd2yedjWK+7jElFYjl1wzwQTFf7JBWufXWlxZng30iBreTz7X79CF6sDiIKsTLpQmlJ2tAZ+iW+7flFM4drxd/d5GP31ze/qPz1UWnP7lR8gysRy0mT7jiL5CQKCwApBQK+QeqzpaiT+KX+SJ9GCJS3KCgWn7aEVpb6cTQ/hpUwLH2uBRNnyoDPT2aWB51ZK+xQIlI/hwJ8K1UMKFl/w7MmMkDCMpUmdQlvwJINX8MpbIC35REfDlfxvcv8DaOf+B/oUsahO1ljt1yFciujSkobJ0pKGUESz7CE+bKjnUIKALWibKafwGn7dj3QMl6bGkplq5jbseCVFJg+aYufkcJHJzCm8HaUl8GLFPvf1wHrbJHF1G45b9+rw0Jn7NlN+lwZiZZS7c7dghNiI37YYG5eAn+KxobXPfjR+4u0o525b87hHYLWsJFLDwsxSg0YxbnN1piiWQcESZI+biwpWejk640C+XdKAQjPOSoYgIAgsBwKi8C4B6omfxabafjJXM2EvtKE5xuxF1zBlfmENb3lJg0kUMRvCT+uCp9sC0eguYSUzxxqGEmVYttOgUJTyh41qJpB6Q2OuOGXX8LpX7nyKCtayNfKRiOS23+1e9Md7n9WhVvhJL7Rz50TEiE66VuWg8F4xPkG7Wjnl3N6JmX3CH6QKCfeTznVsMe/GcxtNJp0SYcYym47h1VL3nn8q7H87C9HQp2c5NVBWUHinW9xvQMH3T3yUF3RQ/BvgNB604EIZK1+IjDh9m8iPCwUro4rvSor4C2/6J8YV5YUijDM77nlvuuizX6ZmY5RSL3pElstCCvdeK2j3s9WAx4B0cct+ZqPSk6e5v+RMtyfTJ9pk/BreguQnCAgCKwOBNTLcrQww56pFwgMjzk3517U5K2FIl53SioyO0xwpK5yFp1Ve0mAyzZOtG4AT1i9MRCWJSj/DsoSP9EI2uCaJCclooeXFdhYQLFhZMNdcTbU//B5J6wrW8FOuj01E8tCehcfXklTWJ+S7WHxfZTMV3msmp4mU6zOJrwNF+OmSco09fjsk+WEC8aTpLLy1jd06IX8Yl2XcIZlAq4wrp6mULtJ4/EB6uu40znbheBfKgQiLa84PEigDa6yO3H9qKe4GotENjt/0uFuSo/h+jH1Pjvrd17CTXMEPZsrLMjHteNYquCeB21I7w7fBfEte8DCRKC4UsWIa/cU14QyqwBoW3kS5vjWjgGQIAoLAkiMQdwRY8uovI8NFsE55IkNxP4cS+YH3W/XTqPPjMilG7U7G4JGMFd52rW4JNEx3PatJVXdJQ1KQ0XOM1vbKwb1MawrKQ6BSFES1CvgpBRkUbdhIVPCE3vjJj2jrf749sGWLliZj4nbzhNuq0KbDI/ftGdq5cyJiRCVdfsHCC7aFl1/bMA7DRANTT8tbH5FSQeGddhbeIuJHa+FhyK8kADvrnHw2Sp3O65PDBtN1976/lWtHymOo2i1q+4pgQ440cv/Z76CcXvifV9FNb15Yvp3/hsh8wrIReyKCt9Er1duvI8r5gZCYB8hOt53Cm3FfRnq1O9x77fCqZxZh0I9MuV/PUmaxWaCXl8aBnut5DKzVC8KbtJ58SQgCgsCyIeBH+mXjvy4Yhzllyk8yuXGwX0z7d+RXPBj3KBqdM4uPjCSOPq4cSdgKiAg7PVonTOAcJYzTieqWQ14sl7AsoDXtFSSsXUba+HzEYzn8Aw0Yzmu1wk7ourQH75SqE6w+saVMDCv0PKEFGTCZIo58hFU4nXWXD0w6AyvZnRm8bmq8shaDd2g/0MKDCkLrfCJpuocoFXNfXF//NhrTMvOez7epWs0Gsbxmw1l4z/1iRu99WxdfajYpbN+KNbzdR404nLE37k2PnaRNWxy96YncRbhfGR23t2q+x2uNgnZer+wDofL3YNuzjGXhdQIsn28035Nm7pYqeMCL/eCiVEGFv//6JU9q7oRxQf9pSQsCgsAyIBB3dF0GAVYDyyRxME/7yVwpl27mpQGas2K+lg64jGinnNj0xlHiecFGtSnI+MnPZkT0aqmTq+lN2i1PO+T7ZJTAkOI/IuheOVuwVGmHhmlVI0x0RjG4Ubg5IolWlM8yuaYBXFcsqp/Uusoe5ARxjQ7DE3nBSmFM1ibtvobtWZqSF2BLWdP9pzU1j0XNFlyM5wVokeMx26XlD/dmO7/YvOmG28rq4ktT+uVPuY/4OmBbsnw6pwlqEBRewLxY2vOVTwzOKjKpIvyaE87aWnA/1dy3kBfLpaxx4dX6tduJkWV+nn7Ta/SZYbljMVtGOhmL1p5vyQvfI2lkWVNtCG02m9gJKsQnlF4b+LIocggCqx6BJbobVz1OQwmASYfHY2qyhRdheA3WzLuKBSWGUiysHYpT92I/r1F5SUO21yZWDJ1CAVaRx/8O84QnFyTwzyYQNoOiFCqFzEgOJPGBHF4fFlB9/ddGk3qELM484aSJisTNkcnAlJUTpIpGg3YdfCg4E9oZeVU4lXb7ipWTmWBtslIsG8uYoE6cF+NIU6uRWVK7ri/okn8yD6S8xT5rORNzERaI4tywjmUAiba3IiNuXUkuXbJy23NDeu3RMUsBeO7cwTICS84x11xDh37nE1Rwq6JfpVkXDz499JEwH/TZ8LzQnMwtTZUowhsLivjLWOGFhXcHy5eTJuVv+nbBceZTL7U1J1ftkSaa8vD6ahYpCu5fptSXZimy6CzN9OZUeIOFVy+arFwgCAgCFSEgt2NFwJbJ1rSDue0Vv8K4dPg6HP+h55yTnkk8ZpcvGyqulaI2vzvNVHcf3vaGDRQMk9bCyxP6UEzmuBiTLE41vfIZXlOHfJyL5TRP6AXLAQNoSyWkJ9z60knVoFwZyg3nkVMoYvE03H6YQEHvihe/kn738jciSpxtwwfSkmAAABAASURBVCq8JEs7ZCEvEvhveZrlJ27rbY3ueZwbxiWalT9PYEv7Ktr5x8t9ygX1lvtPa6ZUzp0Zwg/3iOpTLn0+KI+OunXpiMdwLb4fQKdNhrZfo6hgHJGGG7vmn9xrtLXw8ssQivlLjLbfAiY1J+u0t/ASt6X2D1Kx+Bluylo9p+u2s3zECeYN2tN+TUMjdWMR8lazy5KEFd557gFuW29QjyYmHk6KOe6BJHO4JpHbM1rlhZAgsA4RcHflOhR8KUVO/STj3/AT+UGwRc5q9zs6gv558NGUJfGaQ7OiULB2W1fdJQ352AYKb/1YD4zKj0q/zDg58oInWM5vB8sgmHI65mEUJvKCoPDCOKgmJy35KaoR8Tm4NDLfGsvXmeiYR5sKwg/5CKtwJnN9BbQhMULDk22DLb8N7jcHbRtFVhSXcr8pE9rni5+wSbzqtxH2pikjY1w7c3L4g3EEkdy/akfcOjSqjbDHSg370Y5i1GFWkKId17u+WiaOfCxpSBnfcv6w8QT3JhMJ92K7o/Bq1nkLPhPvSBNDWNKwa6eTTykXwsJM/Bst9StOrtoD3y3g4XZOAVjuNHI7Gj13/09qxL2KyPA9SvITBASBORFYyhNz37FLWYs1zgsTHEQMr2uDdTDnV4zIL3hoxORXjzggg1Q7SSjLnQL4/vqTaOrGhxFngSVpNlvFVLAtUe/V+DWp4njLK7rNNiQkqiUF58Y9jOIuzGQxqeceT3CYpLpdv4u1tkZzGWRGcim/eiZtLDXsDevFpNDO9kRkz6Tdj6oK7i8gb0zOin5KdcYb6VguzZxsgV5z53SIdsKmzlg5U5300BHfRrnqaytWVDq0sy4GnbwhIvnGTfbqNhm67lqWpU/Rz7k/Jcwy9n2S6MLy1Uluw+mwpMFoVpD65LclBvcyplmrF7TjOn7jw3KSx7npO23MMWfwWsa5cr5/TFLw/Rq9Hblv5kx3ttonfkmDitucs7GSPEFAEFggAnI7LhCoYYqFgTb3k0zQOlvewluwAoN/QzkMj/5rreWTB+OkcEsaPrj5KdS6wYEEixXKRjZ6gmTHpaw7IMF6LgJqFTZgxaxrpXQ5w/uJdsxg4c2LbneesAov20JZiUm62cMzZAopz2KFVxyIw3buBAztzEWiH8ksSq1JvWA88cZkWAtPRZ5o+3r30KTabZ9D1FRQeJ3cncxhIoyjvbwoaCrcJ8hQrn0Rpb562bwhPLXRWXhzVmyxT20/qQL3ZVZEf5DJuE+Cl/I3SnuyjSSxtsu8lItH8lMovCMF7drh6GrjwrxQ3HV9/6G18ZtX4eV7JAl9LJK4CcYepjsbudqIy83U2sLYSSW+ILA6EZC7cQnarWNF4cHRjpHwmG/OEy0H/EJcUeo3o0c6hsPgjiUNadspK2nNTXRBZ0gqsLaGetf8a1K/TJDC69MqFELjJ/AkzSlnBSXUYbKd0ePP+hN970WvZyUi5MYJ00Rb6zGoFYmhQhWIUqedbSquZ0p73hZeTgNLISZx7lcxuSVp77BQTE458qyMughRU9coYWUqpIcOIQeIsCzTfg9cJIOsiFNkC2+x0e3SYGrGkt+1q1funO9P/OOJJNTNlhreS7nPgIr2z3+tcfdQStyX3V2Ks3FcymNNVs9pYpwf/tBvuO+CMh7SeqVF7up2Oczxc4hQMA6x7090i4L762wst+1X2OyYt4glKN76RkCkHwqBtTbmDQXGUlysMEoaN8G2ShbeLO73OJTxSIs1bemk+8AoaTieKb+ihZy+CohGd/57DWp5jRdLGsBkJPEzPBKRXOI1BGxL1i663XmcRqjeyC2XBJjbWBwPijuWSlhq2hCUB5pj4rNlIngpBPR0cnJyjowWZPlGlq9+1FH0ubO/QeFXTE7TAQ97AI1854KQxQpvyvqZB7+TO0TEd0hYzqd8v7HUyrimqc2K5eltmy2prXvZgCamHa4uRVSQIuxw4HXEkD106HesIuPvxcN/6NZIkzF8RMSUa4q+iiUNHKV30JPowhPuiyhhSUNsuSzhZfSKNJmTe8H36ZwnBzwxyvxgVJjt8pFNbryNPfbMxkvyBAFBYGEI6IUVk1LDIqDZwgAahidw4y08La/w5qzAwJKE87EcJrqOUsZEE78u84ST2vTpX15KNz1m5rpMLhblGAkWXm/5hIUX07iuoLdlrCSg0oYt5EUBLkgRTVKdwkNEbL5Woed2BKeCied5wS1YIFmZS+teO2IOsNVxwOoYy+vrgTQRRQkgX15SEPAhoN55fQ/ttk7J+D7dc2LQhJcDXQbKWCBT1n0p8gPT5gMals0Jp7ZsOOE2+LBxeIVDGNGoLkudMqRcQEf+4lOWPtIYH2wikpcZzfdBYam9i55AF53kFN68KOJa6C2H5fXytHuP9NdE+/7Vnz9MenNj7gcwfCMB2hUMeSArThAQBAZAQO7HAUAb5BKllL1Ms91IGzfT5awm2Uz28J/COIh21IwifFAVCKZ1RVftmqY/Xr0zZFUaKqUoLMXEWl6tnfyxmdb9w0OWFVS28E4lo1QfKyy72FYW0CuCQshtCXVX6YpvpZJ1s2BFDIIxxETgayPIiec0K0qBmp6aIL1jR0jasGlqlIK3TUXwAi1F1CwtaWixYtahns2t0HTKLCKiNm2ypWtj7n6cnHShzWSvTYYU30ccjXrUPLbZCAtboqz5dUXmz5Wyh4pmXH9YqQOR0I2KnNacwltkaRBzRlhExhUM9txQo844gIySK5S2KRgebES8ZUBAWAoCvQi4u7I3T1IVIBCA1qz4KW+pankLLxQYfHQVk23CE12emA7JpG7o6vFp2j7eJOyeoLzS1CkQOWIUK7yFI8oGUGKxXSKyH+Yxk/UqvNe3Rmj/g5uVSVkYhy0eKrD7RmjfyOJ1yfk+g4yCpWJ4SSlF7DlHkX8BWCarp6bI7Ox9UGqZlFLD/Pl8lOP442n6RS+mdlanZtGl2PMeImhr3dNDxZTfhzdpODkmJlwYiBaML5ZJh3SsMGWi4NQsK/NMvNCajFeUOBntCEsaQDDhNyEIc/YSfvjmYM0cRTK3wqsqwBVboWmNlpwJYc59B7laVz4ygI04QUAQWAACcjcuAKQYRcK4B+ug9oNhTg7+ghS/dozBpUsjY2tREayQnH3YkQVB8eQowWqG/wOPeFUO9O3aVmbQZlNvYjTH4h+Z/9Idk3rZwjtOI7TPAW1SevYJCTUZxqkgDzdswYoDB8OQ2/21JYUXhaErWUQVyweHzIhO66RDzTQn2cLbt6TBZGRiYssKb/bKV1C7XqdW6Khcg1bBHh+7Tr0T0d3uxrF4R3HwwfTBH19EV5z1UEt0YtIiauPwCoU044tERFf3invB932ZrOa+XEU/aowWHTaZ1wkL7kC1tNvGnQKrOFLMsaThmvo+dP2ND6tEMlMyKpQZuL5DlOhyrsQFAUFgORGQ23GJ0Dd+cquxVUz7iaZVtvBmcStSZwtkWeF95JNzfuWfWyZ4nWnsZG6TlXhQcIPeUrDsml0VjGqsIUAl0diloUDMcYHCm9RY4XXJ+L5/mChYEWXdgUL7xmfkKTIfH2P1WhEUfFIFEctPVSi8qQnsyDSnSO/c0Ukj0mZrmqmAL5ToFj8ggQec67FE03c6nejOd0ZWNKc9pdFNjCPHx/sUXiLFSj1F/2VaWZozeie3JR6I7cmInu0rnh50QsALiRNfD39q1QdF6rX5PkmuHtmXdtzwJn25cZJp6T4pUywY2wpujzKL2HGhJwiseQTCmL/mBV1uAZVyk9y20Yy0ccpETg7+gidWfpMbtYp1VpDykvWhMJow0YGJ+89gBaKVOc0yFdCsmUPOKlrC/Dka/Ug9XTuRezzBZJwtvCNjBXnYkRXVqdS1IfHEhg+ANIdRGfQT4/YMWSwVKWav0KWsF87EC02JX625i/DhWpl6iy28aQUyG62pWdrvF+u/wTdJWGBEIrpQf/RP/A+KiXEA2mWQK/TibjpWrJE5WfK+tlNs4cWbkVh8Ap2wWwnS2P5wOm8hSngLZCNrxCvmWuPN7Zhwv6pCzKnbn0qX3Ob2M0i7vtPbn2YUkgxBQBBYUgT0knJbx8xgTcHwt/eGGtG++9LrPnsJvZ2ebBHJSVOjVth4LG+EJ888ybrkWMnOUQHOKdj0qherrPB1izkgb+EndFhAU+OZL4bIAsrWM/daNmX82mxnbW3bk845/kX0ZborQdvVPNlRBT9lHN8icValqvGkkvUq5/6i+c41xJgC45JyGktUVaK5cfrqGWTb/ISRplyJGWeGy0A/KQqWy5NpsayIpqnDG/FYDvUHJ+yysZGtvLsme3kU3HcSAB2Loadj+N5T3HZtdj7LBirRrITGx7Th/wkCmCTcXcPOF7UKHiLAY7ncXApvldZWc9qd6J8nnjpD5ML2m2JGvmQIAoLA8iEQf3RdPllWNOeb7bOB9mDr7sFbRsjwq2h8PKLY8hkqXfcfzoT0sGEjSwhW3Q4dVoywllYhg+NK2RhSlThjiMAPxGFZ5rkc0egOH46AaFqDnU7R1F770dlHvYR+vvUU5l/wW/8Cp6M7BQFBlSe2oiioV1XCiciupIAq7jdJUpBGG557LtF3vhOZGZEpNdiW1pUz6Lf5YSpj2WecGDIjNZrcGwhHKPSheqJcRkQ/ZYUW5Pj5jzawwvu7X/UOh1B4ATHKxHaw5Ob9Ci/LnvTlxeC7x7451UfcfYDnpilvNk91fExj1HdgGqV7pEwDbagj99VAf58x7NTQ22/COQ3GISGhICAILDsCs9+py16ttVeBG+0xSnc/fG8rWJoYwr67hZ/cctJkIrfExjorvNpYfpe/5t8pH9tAfoWBzYvMztIse4lmtaxwOQUUNE67VDW+SYhyxjFXhvBvYhsNYq4F6YomHeUn0IIV34LbUcduwH6YRrpmOvCDrlaRaJZzUno9vKm41uaVvTxNKFHlnDjxjDsmFNBArc3Y2ngFwoYmw6PS5s0F9e/SQMyzqv5jtOb+aiXrekZRwq6bESd2+FEtOuK4KUsM90n4hzD11Ni8teKpOba64WYk+18JKxAU46xKePDpo43dWxSpvlxJCgKCwHIiwNPLcrJfq7znlythbSXNCpok9+/VNGsOuoKxMbxuD7UpQoRDXQVDphuOlJVtWD5brdxmIW0jFXhaazIp7IKK2ial8V2K8F/I8KqaT1XAkUgZf+swg5yfJAyr17SEP8MW3gq6TEcCsxvFK09rlMwy0XcIDBipJawIljTe9oB0FnJZ6tsQfXQjK7xtMr2XMcCqovtEK8UKLzMocVSppoT7E0X+Ke6bx506Qced2KZGoyB8FAjO9d20ceRqVE5Oz6Hw5saQLg9+kWuCh95+kjljzq+X+rMlLQgIAsuIgF5G3uuWdWIKaox1R2DN8ywmwNiAFAkTZqIFGHCY86t3DtxRjrucqL7RmGaJmljPwJRTVuo5qORgVpTWCmpRQrlOaXKCyK1bxLQOF58dWKQXAAAQAElEQVTtziNuTp/8+q9p8uZHEaA0S6g8QDFDk2pWmuJL5igaVjxdbHY/z1KCNXb2s4Pn1ozrs5OtliXSpuqGqNQE2oqwhrfos8i1dcIqcDX9B90lRyNaKZ2nGXM+XCKinzCf291jnD782QkaHcM9WVjqo7XEhpV7S8RgLoUXzWoS168qqYqZiSPa1lR4f1YihxAVBNY4AmHEX+NirizxsGVYuUY8H5HpTL7lM8PFO5YHthpB73TTnKNpVLVNn2mnKIT1gjWfdtzj+pqtKcZvqN9WCeFr+zpbsmBh0aosdTy+xebNNLlxE+XswAFKRTzqu6eENaBVNmFWy+atRMHW3awC7ayeOcXE67v8MJG7elSgPIT640Fw40bHpsfnLmwMez2ZcRKG7we8AemhxngmfK/25EVIGH7ALvOChRdka9yGCNeKS2qpFeWqe7l/n2wT7KHrJPA4XsWhjOuzZdoFP6gpbuNynsQFAUFgeRGoVutZmGzrrlRqemEvOF3FgKz8hFbwgDwdNAiPtq5oIvfkqZa4SWDCbzGVsYzhXOzQaE0ZW3hznmRaOrFLGkZHiZUlRVq7elDkX8qYlkniQ8Ryuop425mtLWk0ralwEk9hCrScZvfa9drsJ4bMrbPSBxLTfiuBAuY5ZHAbI4jpwl2Y8wPT5m3wVQ95xQoLHqZ6MiMlEgPuRQ81dNWghPecGDJhWA68hQhkcr9OZCxDHULu6g8zLNxnMcb3P4j97oF2BAbdnLgxZWbB0Sg8b8dlJNQEAUFgKARmuVOHoicXLwCBjAdDFBvd4CY8THSVWCKhFYERD8hhaQGScLpCZQn0Uy/j1HSOJAWZbSKyZzRPLmzFejK9nX541qto1y6iEftVekFKOf6RWRKzZDWJaAqmcyZehWWOyfYeXslmqUixvEr1no6Zynb3nJClMdl1aIVdN6a8wut1s8752BGlFLVzRQcdUtDoxl5ACz5XgZ5tRcDt0VZ9w2+KtD0d1TMsR/kuwH+vU6pX1qgMl4mYabi3Eq3+RjPK3q9U0U/5+7JMPidFSqlylsQFAUFgmRGoZoRdZqFWOvtamhCGwtSNz6QTRcksg+bQcniFt2CNulWAY5eiKvJuooJYxpMMyE55c1LNVNfVlCrsGt6f0TF0+T4390saiPCq2likUZO4Lnz13fQf5SXQgOOymEENlnpkwtCrGU7drzDhZCSX7a4/VqXwpq7FgoU3r6j9AkxupXlBp92tRS99/XTItqHiNk343rGJyF5imLPiRizR1UlvunRqqGiKzmIfzxwZbPWmWTaXWjt+6t865NrhOJ26nU2QrMSgEKBLZz78FazsrkGIg8QSCgJLg0BkLm5kiExUyM2PQM2jXm/ktqBma13i82xGJE+FraVYeWm2Ha+gP2iecCOxmZUM/tMbTkyzQghVu54ZJCtxKSsOaebkm5pUrPASYZcGWEINy14FU+y0AbqTTcc3WYLZrfAT6+atOUEsw5Mq6lCFK29Z9Ru6GW2/4VE9bMC/JyNSYkM9sZT8VrFdNa0iWTVrQ9jNwzLt8/CAURXG6C/ldbVgbVLcKYjFdYlxg8t1Uy3649U7aZLvyYTljstl+alldbfrTdsPcq20YSuljKKExwibqMDTs937WpOuqM9WIIKQFATWBQJuJFwXoq4cIbPMTepjGwtbKQzIRlfQFH6iw8QNfbc8neoKJwAItW00tdPOVMvJWPN1wbnYLkkMaf/R2tQU0a5diuqNwvJn429sdpZelhobhgeJpEL5LCN4xvWbDVsU2SUNyKvIldvr+fQauuARb+3hpDInf09mhMSIvzdarPFiuzBSaj6qQ59jO2tnezC8CekhqBWZitinWhPuSyr/El1ORYsHstdOTNP28Sa1ioLlKqLRXymE0hGv8PqxrZW4NAvLb9AqakgWXuHf13FYPnKug+L+U86TuCAgCCwvAtWMsMsr04rnHtYpjm3O6VT6Jp27/1lsgYg/IGtvEYTFo+mXFiR+EMZEXyVQ2JDdMC9MruATFBnEYztDBSWZm8CnphRNTZLdfonn9comuoBjyy8NSVnW2HL10wsW3vs9YpIe+LRrSVeljTHjWtIdGnLSNNl0yjafskeSxu+vljB76DdNxhXLGgpWDDmrssNAoUZHAQfEEXqnGILEVCNnxnIVfaRNygw975hBwrxAD29bEMJlFckF2svm/BKu3Li+ev2GvWxVYFDY7RIdW3IwTyUzH/4K7ksJu8EoylWCwCAIyDW7Q6CaEXZ3XOU8Kf4b25jT+XQKXbnpQEormIC0H/hhZOW3mIQfJlqEBrM5IhW6emkiqOLr81B1w9iF9dDXbne5+GgtVwXpiiadRCvLaJotkYikZgluJa+4nHiHKTrsmCliscG6EldPNOWZ24mh4L66a8opEYGZTquT12hN7bygqaCIgmlF7Wg08+IHJrCgMj/OUNzGWlUjJ/oss+g5TEUPESnLAUb4END1WqJEz1TSUGZVO7+EK+f+Cjmu2WN/BKS565oguM2J66lZ7n0ovL4acZkJNUFAEBgYAT3wlXLhUAjgQ6uxLW1LA1tqVTEgaz8BTPPk1saaBuYWFOtKP+JgPjhGUte9/HyLrEpcygySWm5pX3uNm9nwYRf0mIr0JKr5SQ7rIcGxIl3FytTx/IdirtcQaf7rnFtAZDFF8IDyf/c8016SM58dk/4LS5vDvCvc0op1bavwYrlIERRO7fqSZx8t2DKS0jSeCEHRGPgdV99AVNV2c8CXgmyeo6kI02DdDA+9YGeUQrC23F570ZXHnkATGzdZucyR+9gQL7rCuGczInvhTVqZLD6cU30PUOXzEhcEBIGlR6CaWWTp5Vh1HA1Pdnvt51QXvI43On5T6NRN4C02NWBO18wjTVxeYuLz62+ETfXUZhld7eSasUJfqxeW17Udhdelq1JYIBukglIGxmmCFGIVOq+QhX8cUPVDy679bmCFKbj/7BpnM5lNOa+q1++gnnEfZQMvtcsKQzmOQpHcWGb4uaigyenWDIqau29V90mN70XqUzq1qeaeDGRbAJW7KR+U9jbnDNlXZcatbkXf+q/P0DWH3cxWf2LbnjYsePwxuhpswUAbN6YiHhwwNgZ+yJFwhSEg1VmHCFQ3CqxDMBcjsuZZaO8DnMILC2/K6cVcv5CyxlsEcx7s23lOPO4Tpndca/omW+TFdpvZeoYhv17x7GpK2F27HRyJat7ia/jBIrZcoLfnWMb6irJfvCNdRfuBbo9LWAPjjHbhblvN7crJyg6lHJYm1bRjoldDQl5VjDMWD8pui99K+CoQg01V/OqJsWTxut++hrYp73HfqRl33udEC0bqLGQfNV3Ra4LEKFLMq8VjQMIyHbR1hA7e7Lbs4uw1deABJbRjzvfLxP4HWvmAgY1U4AXDQpk03kxUNfaU+UhcEBAEFo7AzFF34ddKySEQgOK554Fu308ovEZjShqC4CyXpl7hbfF0B9UaU7fR8IkUT3xU8e/IvTfQHW+8J93nZu7VYlXsMp7QQXtkrKDt3sKbjSGHyHQ0JpdesL+AgtsaXSWwlnTjC7h0oCIqdTxaqrDXG2WDyjyl3fCQNTTtnMp6+CQjri49mZES+GAOeyi3uNN2PlqrqB0bmbsfmtb62QsoFCfjIIgkWZfM1kZGHdl8dnVLGrpy1fjh5ZQbbqND9xj1XNdWkLKobWKPxcr5nvz9U55Dv3jcv1KGdTKcV8WhWbHup9vmjqMr6rP9vCQtCAgCC0NAL6yYlIqNgOEBca/9WnTb06bo8OOmCAN1dB5+DW9LGcJeo7B+pKwlYToI/zghNs9+egdsqvdnRU8H6w3Eve5aSEfU8BZer7NF5wmCh+21wU+tVOmECl7WYTEiR6xyxqHRTlaOVnIEhbexgWj7dUkPj3CuJzNSouatqtN2ZxEvY0XKQ93fD9NsTSbTOxwW2lDoW5FE65AZyxjPvodOnZjO+ZiRGssR6KV9Mob8tRKmCY91XpiCFdGLzrgvXXnzY3h87W1bXyROUMJ05x3v7Ghy26qK/sujY7C0vnATBNYCAhWOAmsBnupk0FRQWiN6yduuo5sdP0lpBRaIsN9vm8WAASvl+TQoujzPc+7aOGp+woGlfPvVygpVG/FWUH/OZkb2bsRWspTpJ+zqFbRff3WVVwS569hTFemAljY87RXqTZuIrrzGLadAPpyqsAPVMzcs2WW1FQs5VmPFkwVq4gbhsP+oVdiuAd/A01S0Dtw4ES2bKh6sLeEV4mXcZ92dT5RrTU28JuC6sWGb/WqOxD+IgvquvfZGQFjSoFnptQnxBAFBYEUg4GaWFVGV9VWJNDGEHa1yt7kAJTw4R0fAD8RNnVBe5JSwwpT4QThRpVlwaMbLSyAxTsmFwntNUHgbbtozFStMZx29Pz2Y3VIgoGDCZkZN/wGX0dXevto4XMc2a/q/a/ehiz77JfrJQf/CNeCjrEVxMuZRT/jJjAk2+ebQxsWpIlmtpRW8cDP29ZWCH2Qydny6kgP3Y5mwyappz3rSvdfTgGeZ8RqK4+G38H0lZwtveI6pZHz1uOnU91FO74B1gUMsh0n6+hNnyyEICALLiEA1I+wyCrRaWBtYIlhxaZNTzCoxJPnJrc16Cwb+hMPwH8K09pr2agFsnnqmDB6LRjy/dUoldSdfUPA7J1ZxRCdOcSnQmCxH1Tfvppqz6o5szOmqKxSNn3gSXV53Ozdo053kuSpRj/CPWfCRFd8mjjbfKy4S18/Qd1gxgXJNHPZTr9LCa5h3mV9ZcSrnDxuvl/jUXBcaluSKvT5NMRL46nEfbfl+E952+TNRA22694K9NUGdO65ih6g4QUAQWBkIVD1nrgwpV2AtDCmr6uZsxUL1stKkhHQU5xWklueVJYYSP6krzovCYwUQqRvXjVP/39ZQpczHjXEPFMhb7U5lTgFt+zY02sldlVyp/0huZBPRzh1EzSbRZNvVQRtTFVsaYYuZYupQHryoRBXKalgxyQvcj4q5lg6lqUrLoDG97ZeUlTWK+0tYRlCs8RiAcK26GveTyS3b6BePeQZdc5MjiNxzL2W9UEcVX/v7EkRz7e6Lgjuuf0GCbHGCgCCwAhCocBhYAdKtzCrYWqUJT7Bsfch5YERGJROrV3hznrjBI+NJL2O+0HXTvskW51erq3k501pXuc1G3ExXfp27WuXr1Nsrme3CKWa6cDJ2zkeOhD4ytsnhesVliqZbbsjQfmKPzNKSG826ZsgqP46zzNjDLdFs87tofy9ylj3warxsHbWZEb1a4pSjQNKkDtuQjhlmiaPdzzMmj5VAq84PS1Obt9IvHvsMuvbQwwjb21VdryTr9lf0GfAr+P7o607IFicICALLiIAbBZexAuuVdepHw7ydk1JOgYmOhVcEW17hrWeG9hhJ6aAtI7SVw+j8longWOa6cbDw1upkredAtZbCX6aKxWbr2xNvBSCV4geY2CzK9FJvohrZ7HKvvkrR4vh5+QAAEABJREFUZMtN7lV+tDZWdzzAVbPFDmGVDnJaxaj/PuxPR65E4h9gAtm0bkI0eph6XjVo99GpD0Mw7rVQeHFvBKo5R6ruQzp1bz2YFRXKjUUF99uk4v4DfuIEAUFg4Qi4u3Ph5ddcyYKtrC1Yd5ZYsjRxwzL+3WdleotXkNo8+EK8Glsd9hyrE/bh3DqSIWtNuaDwNkYKCmv3amYNdXHfnt7Ay6/bq22+xFsFRzc7C+/VV8LC65TRsLyiqhporyx0LLw+XQU/fJiW5yyjv086PPrTnRNxIlnW2zerXNIAVhhx6hV+bBgHleGoNGa53zVx2w5Hdt6rs5LCm3f6jCKje9uX5CcICALLisCKvyOrRuecr36fTn/gs2awucP9n0FHnvII2n7djs65yalpOu6uj7f5doLsnFl8JPETOL9IJTbxUiW/oCCxogv62VqydkKgktM8uQSFd2SECP9ZDqdrnI9wTTg/sebeimR8WJVsqe8/jU2wkxFdfaVmC6+zQuqKeSfGD03Q0iCgv18Qje1S5pWDPlyJuNKBeSkzYrSedC2DIJtmDlvEY7t66vCsZ9XKFLvei6VX8w9p5euUqlZmk7qHQPCEZdeFilKDmDhBQBBYKQi4UXCl1GYJ63HRxZfTnR/0bHreq98zL9ePf+4bnfNf/NoPaHxispMeJgKrEq5vs/FBsUM8uvOvMcMgXF/DCi/mtMSv4R1hC+/4tH2UoPLr8ej4LjVBr4AGtlXr8kniZuyN21wHvfIKRVN+SUNVOwoE2VKvbBpTrbICfnXWTLDzRYFOhIzgKgY4S3uH36QvHapRCgeObhupEfYc3pB2lbOBia3gC2e736vuQqm/TywsoQ8p3flAmOQnCAgCKwKB3hF3RVRpaSqx3z570Ife+nx64dMeOifDB9/nTvSBT3yZdo1PUrud03s+8gV68H1Om7P8Yk5kPMmifLvVru7VdOImNyi8ipmNruHJzvBEEyy8Dbbwak7Dsdhr5/DtSYTWpOpfmTKGxL8289uwERZeReNTrk8ZXe3QEQx1mhUHrgKRrwtV8Asfpk1jfVGJvtKqlIofrfVZdLO+dEyOR+23ke57833X1gPgHAAppUiVzumK27HRqHW4YaxFAmG6xpePQE5xgsDyIbB4ztXOWouvz5JdkRhD++y5lbZsGpuT54nH3owOOmBv+swXL6ALfvBLqmUpnXriLecsv5gTqXLQw6qkVHl4XgyV3ZT1ClLhlzSsqdf7faJrKijxW5FhDW8z57R2GPcVXb1J357k+0vl4nk+LQ637VnQxf/QtGvCKbwqc2FVYNa8tVNXLiRRPXWW7PAPPYJM2t83IR07HMl6lzQk9TXWX2MDtkB6xiu4mvstLlGkEFTneC4JxMNYS9xv06RivoGphIKAILAgBNbcCHvJZVfRez96zpxuYnJ6QcCgkFKKHvuQe1ha7/zg5+hxD7snKd07iI1PtdnqtXhXsIJG/MuLgodjWM4WRoP1OJrk1/UL4nvfB9DP/3EtXX/QDS03rRbOZ0H0B5S9CtqwBAaFt94oqM1AabNweZvtgmDhq6JusWi2lFPMuNvY9pxuFgP1vYXWZ5oxAa92oWjbnjn9+heaWpQgi/nrBfGeaubU4rZYKM9QTvNdAUYFPHZTrcXLGmjtLkw8L76tmFPpYKVld9cOcz5h+iVuRJoWhCl4TnBleehYcHlcs14cw0gY6xSDa/sPA7UY2fmyxeHawhXOFb5NC+5TRXvhfdZdLb4gIAhUiQDGhirpLzntZqtN11y7Y05XLHLv0jvc7mgaadSZ3vV0+snHzZSHB1MawKVhR3SmqO3IXNBC6aD4QstmljjxBMCMBqjnQvksdzkYWZI0ZyGJYOHNWSWzS5YXKLPicgpXc7jcsszJ368VLPjBBVVNEFZZXzBh12a3dVtB2KWhTU7pth+0LYC34nawa9QXULYsd824oal96I1ofKJJ7QedteD7o0xnIfGGXz/R5LqyqJ1D495ZZL0Xwi+U4Tu+wwuRGiy8C+aHK5jCgsuvn7JbRjLa2jBk+IEX97TRenF9h6HFeBDaabch6PM1OAr0GY4gTJj/bq8N7cfXyCEIVIiAkGYEeCRgfw0dWILw3Cc9iOZyUF4XI25iDL3kmQ+nlz/rkZQmbrIvXz9ST2gQt2m0zjYAsi5J9IJpaB5QsfZvoTw3jWaWh2HlaKHXrMZy0I+yGtnf2BhZCy/WZi5UFrRBuoh2WCjdmOWSWmblI25LxbGtY9mC+80g9cj8sgUo2HvuxQoT82x5C+9IY2G8sVbdGLXoeo7WDEHG0QHvr8XIu3U0tbxy1TscGr7fF0NnsWUb9ZQR7R5YC7pQGg3gw/1goeXXU7m7HLYn3f2IfSnlsRLoZou8r3FNYzH9btQPPLhQG/hEWhPG3oXi7i4SXxAQBKpEoHeEr5LTCqNd8JN1s9miFluEUTUbn2M/3tsedzM66fhboFg0B2UsEDN9lqWQHyPEl9mgo3kARrhWXcITTfhPa/YfT7AlH8rWmpI3TasTZzbKrFAhG9aqPbzCGyy8WVC+UaACN+qV7RornRWQ7yEZvuznN9Cd/O03Ppwu+dAnOulKIn33JB66KuGzTokqpazkqsLx1TIwXsnlRFjSQDwejbCizVlyCAKCwApBYN0qvH/52yV0y9Mebbclu/zK7Tb+ote9f8maBVaHwMywBSzEY4eb2VIBmukab2nMLUmWQ1TC9mQF2+xm24TeFlitXpK4mvuJ3CUq9D2fttK0x56FZVTf6DpS1crZ3htqdNDWEdpvQ93yrdJLWPFUSlHTiWhZFZyuXGFhHpaZ90zq29enJRgOAaNdX02rfmgyXYVXeZ6mwjF9OFTk6oUgIGXWJgJuRFibss0r1Y0O2Z9+e/4He9xrX/DYzjXfOPvNdPIJR3XSIXKbY46w12j/uizkDxIGq6th5WyQ6xd6DQbfxKztpjbcHqlfw5t6xbdRW2MKRMnCq5SzXi20DwxUTvs+wwrvMbduW6V3ZLPna8xAJBd60baRzP5HwD3H/DKOhV44YDnuPmT/+US4nmUeqVUrIwV8PU+Ptk9JMCwCiV08TqR8SFX9RkeJTnPbVSp0JOZTuZLNPOQQBASBxSEgY+zi8Ipa2o+NlFRsDbjRllE6hK1lUSu/wojVWHkwNVcpnThTXaMyXB2fJfdf/nL69l+usmwrf01ruTgP1s79Dm7St3+9i/7l8eMuk/F2kbXh417MySvzLBJkDssqOFn50VIJrbXuWjlou2FgPKDpUvTVu9zF1kb5/56XJmvsYdtKJ54gsLoREIV3GdtvY93YKXbriNfUKqrLbQ7eQkftu7Ei6iuDrDGa0tQpuibDvgJEjczQWvtpb9kNYaXyeV5Ywzvt17e3WTGzPM3awhbLGtpFV+ElrWgkS6yolXklRUwnmgzzrIzXOiQc8LS7mVQtv29LtCNYbeQ3FAjXhRMhBYFVgoAovMvYUAdvHqEj9h6jW+63tpXRpYCY9QVKM8dJp2QfJNbiv1ENbwOUlZCq/flJnJSmlnuWoHawXK0xhRfK0dToGI2feDua3rSFKMheJcIlHvjYSZfSVbJdL7RTbexdkiaqepGV46GNe0g6fN9N1fMUDoKAILAoBEThXRRccQvfnK2ux92AJ9e4ZFcatSWpT40VsLBLg/FreTesQStLWIutlsIaePrpdPn/nksT2/akprfwtpQfMtaYcpaxUrT9kBvRTz78Ofrm+86mn7zwtUvSbwMTKLx23+iQIeHQCBi/dreWmKFp7ZYAjz8ocySP6Qhpjd0fVibxBIFVjoCfvVa5FFL9dY9AnRWW1P9rYSxpUEpRptceLNpP4nZj/KrF23tvolNPpbxWp+ncmXjbYSIPlt6q67BE9Pfb2LB7N/91+zhdfshNaOeRcbchnFUMryThHBReLKtAXFwcBAKejXQJFN5735vom98k2uQtuzz+zC6F5AoCgsByIbAGVYLlglL4LicCSaKpo/CyhTdZoxPOhiy1r2m1/yCHKv41GFewyNtEk60WTW7ZRjtuc9vuxI6Ta8DdeNuoxRWiQLdPzBIMjeU+qjQtBUvIt15chgcKRTSWmupF3n9/olNOIQoPguHBsHrOwkEQEAQWiIBeYDkptkQICJvBEMCODDc8cpq+8qcr6dg7TFGa8Ew3GKkVfdWN9hilW+63iY5htxQV3Vh3axJb+EctrYKuutnR9PfPnkt0+OFLwX7JeEDOm+w11lF6l/LtwK59D6DLjz6OdSWH9ZIJvcYZ3WjPBt32oK20x2i2dJKGhxhReJcOc+EkCCwQAVF4FwiUFFvZCNRTpyxgqWkrzyldw+ayo/bbSFB8l6xFeBJv50QTLfaYaWONLjY94cAtFP47X+ot2yxudQfjCuJ/PuP+9PU3fYCWgiX4rSI3VFU31zN7n2TLAaxv26EEkIsFAUEgKgKi8EaFU4gtFwIjmVN48yInvJLOluKjruUSdon5Jjx5t/I2TfODBOzmY9kSvCJeYhkDu9HEybbB96eQX0nIuFq6/uGsvhyKma2AeNEQCG0qFt5okAohQSAWAqtb4Y2FgtBZ9QgEZaHVJlZ4i46lbtULtgIEwIdy7YKoCY/rs8Evc+Domjs2N9waaVgHKxfOK0XaOCW7cn7CoHoEROGtHmPhIAgMiIAovAMCJ5etPARg1J1mSyRqVhMdAjBEcfjaHUsaprzCG6zpUYivMCI32lanm++zgQ7a2qi+Zl450ui4EbgJiRWAgG9TCuEKqJJUQRAQBBwC2gXiCwKrHwEoDsEKOZKlq1+gFSJBwq/ci6KgNiu8iK+QalVSjf02jdAxB2ym8MagEiaBqFeKFOOrFBaLhBMSrloEQjtqmVpXbRtKxdcCArPKIHflrLBI5mpEwGhDTSzg5covicLCfNbDkbFC1i5yxrZN6RJth7YecCXtht/RekZreZkIracf9q4++WSijfLfM9dTs4usqwMBN+KujrpKLQWBeRFI2EiWsyUShcaW4qMjMFoHDv+xqmA5ge2a+a6K5Vn2w1sDD9o6Svc+cp9lr45UIAICJ51EdP75RIccEoGYkBAEBIGYCIjCGxNNobWsCBi/FpL1XhqrySLeWI2BbZ1gOG+z1luXD6xiwUqddZ5e8SX5CQKCgCCwDAisF5ai8K6Xll4HcsLCG8QcFQtvgGLoMPNKbpu13noqQ8bQgAYCQdH1SxtCtoSCgCAgCAgC8RGQ2Ss+pkJxmRBI/PpS6BGyhjdeI2QJbOaOXvgHHy4l/lAIoKOCgCi8QEGcICAICAKVIiAKb6XwCvGlRCBL3DIGLQpEVNjLH6qNpA7jqAzWK7HQT0O4XnEQuQWB1YSA1HXVIiAK76ptOql4PwKJX8NbXtrQX0bSi0egvG53ZA3/l7XFIzPkFSMjRC99KdFtbjMkIblcEBAEBAFBYHcIiMK7O4Tk/KpBIPUKb2qWtVuvGrwWWtF6ktiiWNgwJhZei0UUr9tv5HAAABAASURBVF4netnLiG596yjkhIggIAgIAoLA3AiIZjA3NnJmlSFQS113zrziu8qqv2KrO1IzBGUXFdw8miEQJwgIAoLAAhCQIoLAykHAaQgrpz5SE0FgYARqWlvFrC47NAyM4WwXNhJts2E5H/FxmyGeICAICAKCgCCwShBwM9kqqaxUc+0hEFOizO/SEBS0mLTXM62w48VozS1tWM9YiOyCgCAgCAgCqxMBUXhXZ7tJrWdBoI71pfzufawm3XoWeAbOytiqe/DWETp8r7GBaciFgoAgsFsEpIAgIAhUiIBoBhWCK6SXFoEDt4zQw291A7rlfpuXlvE64HbyDbfRTfYYJfkJAoKAICAICAKrEQFReFdTq0ldBQFBQBAQBAQBQUAQEAQWjYAovIuGTC4QBAQBQUAQWG4EhL8gIAgIAotBQBTexaAlZQUBQUAQEAQEAUFAEBAEVh0Ca1jhXXVtIRUWBAQBQUAQEAQEAUFAEKgAAVF4KwBVSAoCgoAgsKIQkMoIAoKAILDOERCFd513ABFfEBAEBAFBQBAQBASBtY5AUHjXupwinyAgCAgCgoAgIAgIAoLAOkVAFN512vAitiAgCMyFgOQLAoKAICAIrDUEROFday0q8ggCgoAgIAgIAoKAIBADgTVEQxTeNdSYIoogIAgIAoKAICAICAKCwEwEROGdiYnkCAKCwMIRkJKCgCAgCAgCgsCKR0AU3hXfRFJBQUAQEAQEAUFAEFj5CEgNVzICovCu5NaRugkCgoAgIAgIAoKAICAIDI2AKLxDQygEBIGFIyAlBQFBQBAQBAQBQWDpERCFd+kxF46CgCAgCAgCgsB6R0DkFwSWFAFReJcU7uGZbR7LKDVqeEJCoQeB0XpCjZoh+cVFoJZq2jCSxiUq1EhrRVs3ZIJEBQjssalGMsJWAKyQFASWGQFReJe5AYT9PAjIKUFAEBAEBAFBQBAQBCIgIApvBBCFhCAgCAgCgoAgUCUCQlsQEASGQ0AU3uHwW5FXF0VB11y7gy66+Aqamm7OWsc8L6jdzmc9J5nzI7AQfOenIGd3h8DV268nuN2Vk/MLQwD3+2VXXkMTk9MLu0BKzYkA7v9Wuz3reeB7yWVXEfCetYBkCgKCwLIhIApvhdD/89Ir6chTHkEPeNzLe7j8/v/+bvMf/aw39OQPl3BX/+p3f6Hb3/updNK/PIXu+uDn0O3u9RT6ny9/2530Pgbsl7/pg/SKN3/I56yd4Li7Pt5iC9z/fOHF0QVbCL7Rma4Ags982Ts6uH7qC+dXUiMoCe/96Dm276IP3+Ws51TCZyURXQpcv/vj39DJ93kq3fH+z6Rj7/JYetVb/nvNKWR//Ms/Ov0TY0CVbXzOV79Ppz/wWTNYPOWFb7H4nsbngPe/v/tTM8pIhiAgCCwfAqLwLgH2v/njhfSjn/+hw+mDnzq3E48dKZjg0x59P/r2595Gv/ja++lh9z+dXvS693csveed/yOrEH/6nAu45No7zv/Mf9A5H35NZYLtDt/KGC8z4Vc97zH0wy++iw7Yd08ifoNQRXXe/J6z6cNnn0ePf9i9bP/9wodfXQWbFUWzalyvuuY6euyz30j3vfvJ9OMvv5v+57/+jT72P1+nz5/3nRWFw7CVufEhB9j++ernP8aRqsC/6OLL6c4PejY979XvmZX6YYceaPH96XnvoVc+5//Rf33iS/Tr3/911rKSKQgIAkuPgCi8S4D5g+9zJ3rfx86xnC7m112wENz/jFNsOnjPfdV/WssWLJP3fPgL6Lzzf2xPQTl+8ev/y8aD984PfZ6gHIR0OTzqiEPpfmecTFs3b6A0MbT3nlttXGvX1CcdfxSd/d6X0xmnnVC+bM3ER0fqtGFspEeel73xg/Tlb/ywk/fN7/2cgDcyYAW+32NeSsAZkxncp/73mzg1q9sdvrNetAYyG/WMxkYbZIzrR8S/+XDl0/SgJ76S3vORLxDwhdUNFi+88sW5fnfl1ddaBeGZjzuTcL+g/+7Dfbe/3FpLz4br2eecT8AqyHrpFdfYt0Q7d03YrNe+/WP0xnd/kp7wvDcTcH3OK99N/7jkCnuu38MbCeQ98oF3pZFGnW5ywwP4IfjO9LVv/xTZa8ZprWz/bNRrHZmarbbF7e//vLyT984Pfo7++9NfsekvfOV79KxXvIte+eYPWxwf+pRX0w9//nt7bjZvv332oA+99fn0wqc9dLbT9ORH3dviW69ldMqJt+Sxdwt9/6e/nbWsZAoCgsDSI9CdvZae93JyXFLeD/qXOxJeK/72j3+jj37mq/TQ+51Oe+2xuacOtzj8hvTGlz6RPv+BV9E973xbwqvO667fRbe6xWH02S99i2BdwAW7xifpHR/4Hzr2qMOQnNP99Fd/ope+8QP07g9/np73lAdb5ReFRxo1giIxOtJAcl24v/z9Etp+3c6OrNdy/E/8ChQZE5NThCUmP/3lH+lFT38oPfzMu9DL3/Qhum7HLpye082F75wXrMET8+EKcaFs4eHuUQ+8G73hxY+nT3z+G/STX/4Bp2a4X/3OWcJ+84cLCYoHlvv871e+O6Pcesi48urr6O8XX9YRtdlsEt4StfPc5kGB++Tnv0m3u/XN6O2vfhohffYXZl9mkqZuSzitlL0W3g3224v+cfHsCjLOrxVXMF7ADfd4kAnLzC6/artNYo04HoQbPCa+7VVPpRsetC+94Z2fsOdm8xJj7Ni5ZdPYbKd78tAml1+5nWD17TkhCUFAEFg2BPSycV5HjLdu2WitVv/OVpkP8Svbh9z3tBnSP/Bed6QNbEH71e//Qi22TKDAPy69gm5+00Po8BsfxEqvW4d77jd/ZC0HJx57MxSZ0116+dV0xVXXUrPZomuv2zFnOTnhEHjrvz2VTjr+FnTWve9oLeI/+/Wf3Ik5fMF3DmD6sl/+7EfS3e54vLV43eF2R9MPfvq7vhIuiQ+qENtj2yZ65APuwg96N6Hnv/q99MWv/wDZ4voQeOxDzuAx5TQ6/ujD6cx7nkrf/uGv+kq45C2OuKHtz0978dv4rdGPCOuvz/7CN93Jjr9+IycceyQ96/EPoNsccwQ9gh928fALQ8MwiMAo8fSXvI2OuflN+KHk5sOQkmsFAUEgIgI6Ii0hNQ8CD7nv6fZ12T1OP9GtgyyVxQD5iKe/lh7+tNfaMpNT0/Zs7ndRwCvej372a4QdFz7yma9YK2T51bIt3OdhycK7XvsM+o9XPIVe/daPzvnKs+8ySTICWBIxMeHagJOzHoLvrLDMm7lxbITG2aI+V6FDD9qPnvCwe9EdbneMDXGvfPWCn8xVXPI9AmOjdRqfmPKp3mDThlH6yNtfRHgd/5HPfI3wINfmceUG++/VW1BSFN56TUzNjuVCIJqYnKZnvPTtdgect/FD9O7G6YXQlDKCgCAQB4EFKbxxWK1vKgfyBPPiZzyMHveQe8wAAlYvTERf+9S/0+te+Dh6+mPu11Pmzqfc2qbf+K5P0J/++k+61+m3temFeAcdsLctho9XbGQdeFi7BzHDZIO1zNNzbM+GcsO49YYvlCXDr3aBWUxc8TEclkiEtgN9vOlotlqIrnlXxtVozW9mZt/2ahAg0Ef/7bn/j/77bS+gl/AYBJxh0RyE1kq/Bn0m1FExjojjLRfCqt31O8fpCc97E8FC/OG3voA2L2DpQ9V1EvqCgCDQRUAU3i4WlcceeK870CEH7juDz+hI3eZddsU1drDEV9Q2w3tYd4sP0ZCPcL6BFFuQfeM7PyOsQd3BAzA+cMPHKjc6eH9LDRMrJoB2u22XTiCO7aDsyVXsQa4LL7qUYC0/7/wfWSv6wTfYx0p07C1vSud//xcWk9/96W+d5SH25CK93eG7SHKrojjWI45PTFrrINZAnsivgVHxmLgeffMb24+q/vPD/2utY7/83V/sh4a3PW7FvRKG6FHcXLgefbMb009++UfCPtr4yPUDnzx3KH5Yq9pstQnLRl71lo/YJQ73vfvth6K5ki7GWIflWwi/9u2f2KVJqB8eyLCs4Os8HkIZveD7v5xz6QfK785hO0eMl0GptnEeR3HdOFvYH/Kkf6MrrtpOr3jOo2gX3y9oO3xwiPPiBAFBYPkREIV3CdpAqe4HI7Oxu/XRh9Nptz+W7vP/Xkwn3vNJ9P2f/MYWU6p73V1OdVbeB9zzVHtuLg8T21Ne9FY68R5Potuc8UQ6/3s/J7xaw2t6XPOZL15Atzzt0fTpcy6gz537HRv/3LnfxqlV7bAM5IyHPZ9ufbfH0+dZruc95SxSyuF3V8buuut3WkywbGR0pEbBSsmFZpXbXzrj3O7wnXHBGsjALgDYDQDrQPH2Yd+9t1mp5sXVlpjpKeXapP8Mlju89ZVPIaxxv8UdH0VnPfGVdj31mffs3c2k/7rVnJ4LVyj/tz76pnYfbez3ir7bL6dSZRzL8f6SZHcluOWd/h9hH96rt19Hn37vK+zDxcySqzMHHzqeer+n2/sbO1k86RH36giC9eAY807gsfB17/gY7bF1Eyn+swUUUfljvgCpCudtoa73l79dYsdLbEuGD9IwjmLLR5SAcQGWczzEYBxHu8Gd+diX4rQ4QWCNIrC6xBKFt8L2wmva357/QcJk3s/miY/4F3rfG59ts7VW9B+veDJd8Nm30Hc+/zZ626ueRrju5off0J6Hh10ebnHEoXTETQ5Gck535j1OoV9+/f30jbPfTF8/+030tU++iW5zqyM65c9khRm0y+4+d1v91h5YybH/JfDDXqOnnnh0R2ZY1ZEHPL5/zjvo3a/7V570X27P46NAYKGUsml4X/rI6+iudzge0Rlud/jOuGANZHz0nS+yffOCz76VHvPgMzoSzYcrCgFXWCsRh8N2TniljvhsDh8QoX2+8ok32j1jUR5fxs9Wdi3kzYUrLJPvePXT6Vv/81b6ybnvoTe97El2PMB6XMiNtfmPPuvuiFp351OOo/M+/gYbn8177EPuQed+7PX0i6+93/b9vffcMluxVZt32+NuRj845512H973vOFZdKh/mwWB7nC7Y+ibn/kPOx7ivsY48K+PPxOn7EdqKG8T7O25bbPFeS58bnTI/vY8+nVwr33BY/lKIlwT8srhtz/3NntePEFAEFh+BEThXf426NQA1octmzZ00iGCDyE+8Ikv00Pve3rImjeEkoABGNuPQZmet/AaOon9L2fDL4gIPIBNSA8agsZ6whfyom/O1Zfmw3WxGIPX/vvssaYskHNhAFnnw3Xblo3UqGdzXb7gfCyJwlZkUKQXfNEqK4g3WGOjs2+1CJxxv64ykaS6goAgEBkBUXgjA1oFuZ27xukFT30w3emkY6ogLzQFAUFAEBAEBAFBoFoEhPoyIyAK7zI3wELY41Xbve96EmVZupDiUkYQEAQEAUFAEBAEBAFBoISAKLwlMCQqCCwrAsJcEBAEBAFBQBAQBCpBQBTeSmAVooKAICAICAKCgCAwKAJhaIW5AAALt0lEQVRynSAQGwFReGMjugh62P8W/xAC+0fOdRnO46O12c7jOuw/Ods5yRMEBAFBQBAQBAQBQUAQcAiIwutwWHL/+z/5LR1/9yfQyfd5mt0/Ev9a+Dd/vLBTj4suvpzu9pDn2vPH3uWx9OLX/xdhD1gUgBKMcyfe40mE/Sfv+fAX0Be+8j2cmuGw2fqRpzyCENKa+okwgoAgIAgIAoKAICAILAwBUXgXhlP0Ukor+28+v/eFd9g9IrGP7Ds+8LkOn3/7j/+mm97oQLsP5zkffg2d+80f0bnf+KE9D8vwv9zldnafXew/iX9K8Yo3f5j6LcF//Ms/6FmveJe9RjxBQBAQBASBNYqAiCUICAK7RUAU3t1CVE2B2xxzBN3j9BMJm8ljj8i7nHJr+tYPfkmtdpuwVAH/aOKh9zvd7sOJDf7/5S63pa9c8GNbmb322EzYTB77n2L/yXve+bY0PjFJv/+/v9nz8K68+lp6wvPeRC995sPXxZ6mkFmcICAICAKCgCAgCAgCsyEgCu9sqCxD3nd/8hs6/MYHETZJv4qVVVQB/6kNIdyB++9Nc/1f9h//4g8oQgffYF8bwtL75Be8he5z19vTGaedgDxxgoAgIAgIAoKAICAIrFsEROFdAU2P9bdw//o49y8vr985bmtV3ne3Vsvommuvt/ll7/8u/Ce9+q0fpSc87F60dfMGwnKHF772fbT/vnvSEx/xL+WiEhcEBAFBQBAggUAQEATWIwKi8C5zq2PpwvNe/R679OCEY4+0tdk4NmLDZrNlQ3hTU9Os0G5EtOMuvuwqetxz/p3ucLuj6QkPv5fNxwdt553/I9ow1qA3vusT9Pp3fNwud/jUF75J553vlkTYguIJAoKAICAICAKCgCCwThAQhXeWhl6qrPNYMX3ss99I//bc/0dn3vPUDts9tm228X9ccoUN4f3tH5fRvnttRdS6P194MT3w8S+nk46/Bb3qeY8mY1xTjo3W6WmPvi/tv88etHnTmHW4YGy0QSONGqLiBAFBQBAQBAQBQUAQWFcIOC1pXYm8MoT9/HnfpWe+7J30vCefRbc++nCCtRYOH5/hQzZYe//701+1Oy9ceNGl9L9f+R6dfvJxtvLYfeFej3whnXCrI+nRZ92dLr9yu71++3U7WKmt2w/a8FFbcCONOt3tDrexyrElIJ4gIAgIAgtDQEoJAoKAILAmEBCFd5ma8Ze/+4vl/Nq3f4xOf+CzOu48v+zghU99CP32jxcS9uA942HPZ2X3WML2Y7jor3+/BAF98es/oLuc9ZzOta97x8dtvniCgCAgCAgCgoAgIAgIAl0Ehld4u7QktggEXvKMh9Fvz//gDHfvu55kqWArsvM+/gb6xtlvph996d122UKaJvbcXe9w/IzrQOu1L3isPd/v/fjL76aTTziqP1vSgoAgIAgIAoKAICAIrAsEROFd4c2MPXrxTylWeDWleoKAIEBEAoIgIAgIAoLAykRAFN6V2S5SK0FAEBAEBAFBQBAQBFYrAiuu3qLwrrgmkQoJAoKAICAICAKCgCAgCMREQBTemGgKLUFAEFg4AlJSEBAEBAFBQBBYIgRE4V0ioIWNICAICAKCgCAgCAgCsyEgedUjIApv9RgLB0FAEBAEBAFBQBAQBASBZURAFN5lBF9YCwILR0BKCgKCgCAgCAgCgsCgCIjCOyhycp0gIAgIAoKAICAILD0CwlEQGAABUXgHAE0uEQQEAUFAEBAEBAFBQBBYPQiIwrt62kpqunAEpKQgIAgIAoKAICAICAIdBETh7UAhEUFAEBAEBAFBYK0hIPIIAoIAEBCFFyiIEwQEAUFAEBAEBAFBQBBYswiIwrtmm3bhgklJQUAQEAQEAUFAEBAE1jICovCu5dYV2QQBQUAQEAQWg4CUFQQEgTWKgCi8a7RhRSxBQBAQBAQBQUAQEAQEAYeAKLwOh4X7UlIQWCcIXHn1tfSN7/xsXnfRxVcQ3P98+du0/bod6wQZEVMQEAQEAUFgtSEgCu9qazGpryCwRAj89o9/o6e86K3zuu/86Nf06z/8lV70uvfTxZddtUQ1EzYrBQGphyAgCAgCqwUBUXhXS0tJPQWBJUbg5BOOol987f0dd9rtj6XDb3xQJ41zD7zXHeh0zv/u599ON73RgUtcQ2EnCAgCgoAgIAgsDIGKFd6FVUJKCQKCwMpDQClFaWI6Tms3XPTmKfrDny+iJ7/wLbT9Wrek4ZOf/wY9/SVvp09weM+Hv4COu+vj6Xmvfg9dt2MXvfNDn6c7P+jZdIf7P4Pe97Ev0sTkdEfwHTvH6VVv+W977shTHkGPesbrLO1OAYkIAoKAICAICAIDIuBmsAEvlssEAUFAELieFdWf/+b/aGq6acHA0oavfusn9IFPfJnucfqJ9Igz70xf+Mr36MR7PInO/cYP6QH3OpXufscT6M3vOZu+++Nf22va7Zwe/a9voG/94Ff08DPvQq99wWNp1/gkPfQpryYowrbQavek/oKAICAICALLhoAovMsGvTAWBNYuAls3b6DPf/BV9JgHn0FPeuS96aTjb06HHrQffeZ9r6BHPfBu9K+PP5NudtghrPD+xoLwrR/+kn7zxwvp9S9+PD38/ne2ivIrn/v/aHxikn7489/bMuIJAoKAICAIrA0ElkMKUXiXA3XhKQiscQRGGnWq17KOlHts3UyNeo3SNOnk7bXHZrr0cveh2x///A+b/8o3f5ju95iXWvfcf3u3zbtEPoazOIgnCAgCgoAgMDgCovAOjp1cKQgIAgtEwJiZQ43SqnP15JRby/u0R9+XnLsvPfNxZ9K7X/dMOuXEozvlJCIICAKCgCAgCAyCwMxZaBAqco0gIAgIAkMgcMiB+9qr991rG510/C163A3229OeE08QEAQEgXWJgAgdBQFReKPAKEQEAUFgGATudNKtaO89t9BTX/xWuuD7v6S///NyGz7zZe+g87//i2FIy7WCgCAgCAgCggCJwiudQBBY/QgsqwRauaUJSvWG5UopUuWkjWulSbFDYnSkTu/79+fQPntupSc+/810t4c814b4L2777b0HiogTBAQBQUAQEAQGRkAPfKVcKAgIAusKgTe97In06fe+fIbMJxx7JP32/A/S/vs4xfQZj70/nffxN/SUe9mzHkGf/M+X9uT9xyueTO967TM6eTc8cF/6rzc/l3563nvs9T/60rstv8MOvUGnjEQEAUFAEJgfATkrCMyOgCi8s+MiuYKAILBMCGB3hwP23ZNg9V2mKghbQUAQEAQEgTWGgCi8a6xBRZzdIyAlBAFBQBAQBAQBQWB9ISAK7/pqb5FWEBAEBAFBQBAICEgoCKwbBEThXTdNLYIKAoKAICAICAKCgCCwPhEQhXd9tvvCpZaSgoAgIAgIAoKAICAIrHIEROFd5Q0o1RcEBAFBQBBYGgSEiyAgCKxeBEThXb1tJzUXBAQBQUAQEAQEAUFAEFgAAqLwLgCkhReRkoKAICAICAKCgCAgCAgCKw0BUXhXWotIfQQBQUAQWAsIiAyCgCAgCKwgBEThXUGNIVURBAQBQUAQEAQEAUFAEIiPwHIqvPGlEYqCgCAgCAgCgoAgIAgIAoJAHwKi8PYBIklBQBAQBJYeAeEoCAgCgoAgUCUCovBWia7QFgQEAUFAEBAEBAFBQBBYOAIVlRSFtyJghawgIAgIAoKAICAICAKCwMpAQBTeldEOUgtBQBBYOAJSUhAQBAQBQUAQWBQCovAuCi4pLAgIAoKAICAICAKCwEpBQOqxUARE4V0oUlJOEBAEBAFBQBAQBAQBQWBVIiAK76psNqm0ILBwBKSkICAICAKCgCCw3hEQhXe99wCRXxAQBAQBQUAQWB8IiJTrGAFReNdx44vogoAgIAgIAoKAICAIrAcEROFdD60sMi4cASkpCAgCgoAgIAgIAmsOAVF411yTikCCgCAgCAgCgsDwCAgFQWAtISAK71pqTZFFEBAEBAFBQBAQBAQBQWAGAv8fAAD//+i5Y1MAAAAGSURBVAMAxzcqdTPYB18AAAAASUVORK5CYII=" + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Create an interactive forecast visualization\n", + "from openstef_beam.analysis.plots import ForecastTimeSeriesPlotter\n", + "\n", + "fig = (\n", + " ForecastTimeSeriesPlotter()\n", + " # Add actual measurements (ground truth)\n", + " .add_measurements(measurements=forecast_dataset.data[\"load\"])\n", + " # Add model predictions with confidence bands\n", + " .add_model(\n", + " model_name=\"GBLinear\",\n", + " forecast=forecast.median_series, # P50 prediction\n", + " quantiles=forecast.quantiles_data # P10-P90 confidence band\n", + " )\n", + " .plot()\n", + ")\n", + "\n", + "# Update layout for better presentation\n", + "fig.update_layout(\n", + " title=\"🔮 Energy Load Forecast vs Actual\",\n", + " yaxis_title=\"Load (MW)\",\n", + " xaxis_title=\"Time\",\n", + " height=500\n", + ")\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "f1647ef2", + "metadata": {}, + "source": [ + "## 🔍 Step 8: Explain Feature Importance\n", + "\n", + "Understanding **why** the model makes certain predictions is crucial for trust and debugging. GBLinear models provide clear feature importance rankings." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "d5c6859c", + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAArwAAAH0CAYAAADfWf7fAAAQAElEQVR4AeydBWAURxfH/xdP8AR3d3eX4ny4eylWaHFKoUCBtlhxKVKguLu7u7u7ewIkIS7fvgl3XMIluUACkT/Nm52defPmzW+X5t272cXiwoULgQsXLoyxMue//wLr1msYaGVtE2hraxeYNUeuwJ79Bwd26v5rYNsfOkbquubNnx84+Z+ZgZOmzgz8b+48k7anzZgd+Pf4fwL/mTHLZL8x63nzguzN+W9uMN05/80LHDtxWuA/08O3YWzPVH3BggWB02fODhw/aXrgrNn/BZvHlP7XaBs6YkJg7grfB/b+bWTgXI2jMA3Pt3kaqwmTpyv28+cv+Ox1yPURrsY25FqO0/iICPvIZPCvxnzilBmBcgzNrvSN19Ym10muV2h6UdUu65+g+Shc5B4PbR7pk2sV1v0f2lhz24XFmAn/BAoz42tkPL599yHq/hFms+fMVX8n5T4y1mE95v4/ndeO1473AO+ByL4Hzp8/HygBLzTDMVZWLF8OWxsr5C9QEL5+fnjy6CF2bdmEqxfP4NWLZ5G6riWLF2PT+tXYvGE1li1dYtL2ujUrsGvbeqxfs9JkvzHrJUuC7C1ftjSY7vJlS7BjyzqsXxu+DWN7puqLFi3C2tUrsG3zWqxcsSzYPKb0v0bbnj17IH+OHz+OpRpHYRqeb8Jq66a1iv3ixYs+ex1yfYSrsQ25lts1PiLCPjIZrNKYb9m4BnIMza70bdPWJtdJrldoelHVLuvfqvkoXOQeD20e6ZNrFdb9H9pYc9uFxc6t6yHMjK+R8fjLly/L7YONGzdixfKl6u+k3EfGOp9XX/jZ9xXnIzveA7wHeA9E33tAS+7CQv3miMFFYGAg3rxxQZbMGVGqdBl4eXnh+tVL8HR7Cztbmxi8MrpOAiRAAiRAAiRAAt+AQCycMsYHvPpr8vbNG6RMngwVK1ZE8eLFkSpVSri5uem7eYxGBKx0vkhk4QIbC59o5BVdiSkEbHWe6v6xgH9McZl+kgAJkAAJfGMCsSbglUzvu3dvYWmhQ/z48eHt7f2N0XL60AhY6QIQz9IT1jq/0FTYHv0JfDMPbS181f1jqQv8Zj5wYhIgARIggZhFINYEvKFht7S0RKJEiULrZjsJkAAJkAAJkAAJfAEBDo0JBMIMeLdv345du3YFk02bNn3RuuShnNKlS3+RjYgMbtiwIVavXo3UqVNHZFiU6LZu3RoTJkyIEts0SgIkQAIkQAIkQAIkYJpAmAGvDDl9+jQGDBhgkH79+knzZ0vKlCmRIkWKzx4f0YF79+7FqFGj8OrVq4gOjXT9TJkyIXPmzJFulwajPwF6SAIkQAIkQAIk8O0IhBvwPn/+HGfOnDHI9evXodPp8Ntvv2Hr1q3YsWMHpkyZguzZs6tVSCZVXhe0c+dOiKxbtw5ly5ZVfaInlc6dO0MyxQMHDkTlypWxfv16aTaIZGTr1q2rzmfOnInBgwdjzJgxSu/nn38Oc341yKjIkiULevfujYCAANUqvomtLVu2KP/+/fdfVK1aFeKn+DtnzhxDNrhkyZLKz7///huS7ZZ+CZ6VIa2QwH3+/PnKjvSPHTsW1tbWWg/Qo0cPzJo1Cz179sSaNWswb948xSFevHjKpqxf9hoPHTpU2ZZMurAcN26cWp8YERtLlixRdmTu9evXQzLW0ieSMGFCTJ06Fdu2bVPXYdmyZZCgWqcL/frIOAoJkAAJkAAJfCMCnJYEvgmBcANeeeOBBGV6kYD2p59+QqVKlbB582aMHDkSyZMnVwGwrMDf3x/nzp2DBG7Dhw/H+/fv0b9/f+nC8uXL1fHkyZP477//1Hs0ZX+tBIGq40ORIEECJE6cWJ05OTmhQoUKSJMmDeQ9ag8ePEBY86tBRoUElXZ2drCwCFqqvb098uTJgw0bNqitDpJxlay1+DR79mykTZsWLVu2VBbEDxkrc8v79W7evImiRYuiQIECqn/8+PFq7YsXL8bu3btVu2TDpTNp0qQq+JSAXj4kXLx4EeK7r6+vWrus39PTE0+fPlVchK8ErGK7VatWYgJiQ9j6+PhAAn8XFxf88MMPqk+KyZMnI1u2bCrg/ueff9SDelmzZo0QH7FDIQESIAESIAESIIHYTCAoCgxjhRJ4SoCoFwlQq1WrpgK1K1euqJGy7UGCQsluvnjxQgW7EmiWKVMGfn5+kKBRFI8ePSoHnD9/XmVr9S+QV41hFFevXoUEgRIUSpAd1vxhmDF0TZs2TWVNJQMr7+0VvyRzu2rVKty5c0cFrgZlrdKmTRssXboU3bp1gwSsVapUUQG5ZHglUyzBsAT4d+/ehWSFtSHqRwLa+vXrY9CgQZg4cSIePXoECV7Xa5laEflwIEH2tWvXVCAtwapkoiWIVQa0Ql6tJvOuXbsW06dPVyxlW4hkdyU4l8yw2JaMcbt27dR+6y/lo037bX84OwmQAAmQAAmQAAlEIoFwA175Kr1p06bQiwRnEsBKsNenTx+IlCtXDhLcpUqVSgV8siWhY8eOKvsob0n4Un89PDyCmQhr/mCKZpxIAKvT6Qyasg4J3A0NISrv3r1Djhw5IFslpEuCZTmKyAcA47ES7EsAK32hiWRuJRMuHw7kw4W8Xk2fjQ455s2bN6pJgl39FpIDBw6oNuMiMvkY22WdBEiABEjg6xLgbCRAApFDINyA19Q0kqU8fvw46tWrF0wePnyI77//HhKg1qlTB+3bt4d83R/Shk73McCUoDBkf3jnYc0f3tgv7U+SJAnevn2LJ0+eKFP58uVTRylk/2xY65FgVqf7uPb06dOrwHnGjBlo1qyZyiC7u7uLqXDl8ePHSke2QKiKUfEt+Ri5wSoJkAAJkAAJkAAJRAsCnxXwSiZTXi0mD3tJBjd//vzqwTVZkQS7VlZWkGAuZ86caNGihTQbRL6il6/9bW1tIV/NHzx4UPW1bdtW7dOVB9RCy3AqRa0Ia36tO9J/SpUqpR5kk0ysrFcefJOH+WSt8nCdrLNSpUrIlSuX2hIRmgOyB1n2EAsb2QIi2ylEN2PGjHB0dIS8tkyyt9IWnujnr1OnDsqXL6/+sY1ffvkFJUqUwNfmE56v7CcBEiABEiABEiCBb0ngswLeP//8E7Jf9ddff1VvGJCHt9KlS6fWIftipSJHeYOA7OWVc73IHlzJSspRxku2VAJB2Sc7f/58SPAsupINlaOIcV3Ow5pf+o0l5FjjPlN10Rcx7pO9wwsWLFDBpLxJ4fDhw6pb/HBwcFBvSpA3Tkgw/8cff6g+sSGiTj4U8iYHV1dX9dCarFUCXnlYrmbNmlixYoV6WE729erHyVHkw3DDmyb0bb///rt6o4Mc5S0T8oCcbMkQv0K7PnpbPJIACZBArCPABZEACZBAKATCDHhr1KgBeRMAQvyRzGbXrl1Rq1Yt9UYAeVVWgwYNlNaNGzcg2xnklVqNGzeGiGSCVadWzJ07V43r0KED+vbtq7UAkpmUr/Ql6JW9wqIv/0CFdDZp0gQSTEpdL2HNr9fRH/ft26deOyZ7daVNbMvr1KQuIr5LQCt1EXljQ8isdO3atSH+Cg95OE30ROR1bcKgS5cu0Pv++vVr6YLYbNSokarrC/FBeEgmV/ok+JUH2mTt8mCa2JLgd8iQIWpISBv37t1Ta7l165bqlzc/yLaS5s2bq+0jMlbaIsJHGWJBAiRAAiRAAiRAArGYQJgBb3jrlgBOgi/JbIbUlYfb5AGvkO1yLllM2e+rz1RKm7xyS76ml7q5Etb8odj4rGbZlyv+it+mDMibHSLiu7zJQoJdvS1Zu3xQ0J9H9Ojs7KzeABFy3NfiE3JenpMACZAACZAACZBAdCLwRQFvdFpIVPgi78+Vf9TBODCPinlokwRIgAS+PgHOSAIkQAJxhwAD3jCutbw3d8KECWFosIsESIAESIAESIAESCC6Ewgz4I3uztM/EiABEiABEiABEiABEgiPAAPe8AixnwRIgAQAMiABEiABEojBBBjwxuCLR9dJgARIgARIgARI4OsSiJmzMeCNmdeNXpMACZAACZAACZAACZhJINSAV94rS2mj3q9LDrGHg/xLefq/G/L+Yl7bqLm2MZEr7w3eC6bu2/Tp0+v/l6H+VUtTOmyL3feOtbW14R6Q9+bzekfv6224WCEqJgNeuZgZCmbDPd1rChnEmnsgZZ4M6NCpo/orIP8ASIHqJfA2q45CBshUPg86du5kuDeqNamGrNWyUOI4g3y18mHk3yPVfZEhQwb06NsDOapno8QhBhUalEeXn7qoe0D+ga26LWujcJ0CERHqfkVeNbT/d7dt21Zdr5CFyYBXlPZfOIZhCydQyCDW3AM7Tx9AYECA3N5KLjy5hilHF1HIAIfvn0FA4Md746n7Exx5dpgSxxmce3UWvn6+6v8XUnj5eOL4i2OUOMTgvts9BBj93nD2eI3LLhco0ZTBS88X8lfVpIQa8JrUZiMJkEDkE6BFEiABEiABEiCBKCXAgDdK8dI4CZAACZAACZCAuQSoRwJRRYABb1SRpV0SIAESIAESIAESIIFoQeCLA95u9drhfyUqR4vFxDQnLC0sMKnrMGRMkVa5niR+IsS3c1D1iBYOtnaQ8Yj1f6LPAhPaxoeDtV30cYiefHMCAf4Bwfb7hXTIz9sPnm+8IHoh+3geOwnINXd/6Q7Pd16xc4FcVbgEFvddEaqOxzsPPLjwyNB/88ht3Dl5z3D+JZVtk3bB+fGbLzERbOyxFafg8iS4PU9XT6wetkHp6ftDrkl1mlnI2rdO2AnxXViYOcwstS8OeH9v3Qvd6/9g1mTmKG0dsRATugw1RzXG69jb2KFnw44okbOQWsuDJSdwYMIaVQ+rKJu3GB4vOwVbaxuD2vZRS/B0xRnDOSuRS6BbqdZY1nx8MKMHf1yMpSHaginwJNYQ2DN4L3b+uvsTMV6gBDYHRxzG4+NPjJtV/eWVV9j/50Hs/X0/Do06jN2/7cWZOWfh5eqt+s0pRF98eHwiuP0HRx4qv66svhrMzJ1dd1X7zS23VPuR8cfUudg4OPKwajO3CG1uc8fHVr2Tc05jc99tn4jzHRe15JP/ncb2gbuw/+9D2PPnPuwddQDebkHX/N6h+8HH9duGE7NPwdfz40NyykhYhdZ3Z/89Zef0/LPaGX+iAwFfL18EBgQaXPExuqbeHj54/8bD8MHY9ZU7Lu28YtBNniUZkmZwMpybqsgHZpnDVJ9xm5e7t+ZHADzdvBAY+NEfY52QdR9PH/hqH8yl3c/XD8bzZC6aAQ6JPibl3r/1gJ+PP3w+rE/fH3JNYstc8dHmL1S7AArXLYgTq88EC9hlDRJM623JuvR1c45fHPCaM0lEdApmyYOCWXJHZEis0W3y14/4cVL/cNeTxikl0iRNBWtLK4Nuz2lD0PjPHw3nrEQugRzJMiJX8izBjHbf+BcG75wUrI0nsZdAxgoZUKp3SSMpYVjspRVXVDDr4+5jaNNXXt94jfMLRJTYWQAAEABJREFULiBFvuQoP6gsKv1RAYXaF8T7lx7wfO2hVwvzKEGQ800XWDlYaQH1Y5O6T04+hX5+fz9/3D9wX+nJLwmplOpZAslyJ0WSLElQ9tfS0mSWmDO3WYZiqVLiDIlQ/peywSRR2oRqtfGSOqB0txKo+Xc1VOhXFl5vPXF7713VJ4WFlQ6VfiuPiv3Lo2jbQnh18zXuHohYdu/BsYewdrDG8ysvIB+6xC7l6xB4dvM5lvZfjZWD10GyuPI2h/1zD2PzuB1YNWQ97p6+H8yRawduYM0fG7Fz2l4s7bcK7i7uOL/1Il7ee41Vvwfp3z5xF/fOPsCeWQdwec81NV4CuwU9l0H+Xl/cdQXrhm/G+pFbcGLVadUfVrFn5gFsHLUVy35dDQlQH156jB1T96gh8v+GuT8tVvXT688pn9b8sUmtRdaxcuA6LP9trSHjLBlcd2d3+Pv6Kx/Warqbxm5X46XQ9xuv6cq+69D7LjoH5h+BrEHqpiRXhRxIlT0FUmROhgTJ4sPlcdCHx/+6LFKM1g/fouZeo3FcP2KL4izrMGUrZJvZAe+KwTPgvfUuAnY+wvtNtzCsbR+DrRxps+DFqvOq7/7i4yiXt7ih78/vf4HX1jsI3PUY79ZfQ/3S1Q1952bugNjdM2aF6js3YztSOiZHhfyl4L7pJm7OP4QZPUbh7qKjhjEn/9mMjX/OM5y/WXcFnWq1RNbUGeG28YbyQXyU9kZlaym9h0tOYuGvwQOTZyvOYlzn31W/bMuQ+WTcnYVH0KZKI9UeXjG9x0hcmrUbM3uOwus1l3B62laILVmv2BIRHvotC2KvX5MuBh7PVp6TJoOMbD8AP9X9Xp2HZWfuLxOUzotVFxSn9tWboX2N5hjTaZBql+IDd5PchdPO0UvVdRQfT0zdBKcEiWVYnJayGYrgdLc1uP3LTlztvQVnu6/F4EpdUTpDIVTPVhbxbRxwsedGJbKdoW+59mhdqG6cZhaXFm+X2A4JUsU3kgSG5WevlRXlBpSBpc2n/0u9tu4GkmROglwNcsIukR2s7a2RLGdSpZ8ofSKDjbAqz849h04LjvI2zQPXJ26fZIZtE9nCIak97u2/r8w8Pf1MC4JsYO9kr86lsLCygM5CB50OkLq0mSPhzW2OjdisYxPPBglTJQgmVrZByYg89XLDMZMjLK0sES9pPIXBNoGNOuoLaY+fPB5S5ksJaztr6Cws9F3hHt1fvYeH9qGpWPvCQADw/FLor2QK1xgVPouATqdD7X7V0Xp8Mzy/+QLe771Rvdt3qNC+LM5uvhDMZs7y2VH/t1oo1qAQEiZPiIcXn6BgrfxInikpmvxVH5mLZjTo5/kuF65qwaI03Dh8C9lLZ0WAXwAu776GGj2q4H99quHW8TtadvXTD9kyRi8lmxVDs5ENkb5AOm0+0x+WRVeCxkza/C1GN9L8KwwPLXvbYkxjfPdjeVzX5hcdvdw4cguW1pZoNb4J6g8MirP0fXI0XlOeSjnhlC4J7p15oHy9f+4hcpbNJmphykMtMHfTst/p86dTevL/rroDakF8evv8HSr8UAbiq5WNFZwfuSid8Aqz/mZJQNm0Qh1MXT8PDYZ1wNrDW5E7fXaD7Qwp0uLQpRPoM/MPJE3kiLGdB6u+OiWrQLY8nL11GT9N+Q1unu5YM3Q2kmk60P6kdkoBsZstTSbsu3AU207th7vne9x/8QgD/xuN3+eNwcV7V5EpZXoVkCWKlwDFchREzeKVYKHdZBJYJ46fCPvOH4Wfvx/2njuC9uP6oNnwrnj33g0L+0/WZgEu37+O5pXqqTHS0PK7+iqwXrhrtRZcl8TUbsMh7x2WtT15/Rxzfxlv0BX90CRt0lTImyknWlVuiJPXz+Ho1dN46+6KRbvWKB+6TBqgzZMMywdNVybE3zEaG5mj6+TfMG/HCtWuL9IkTYl0yVKr07DsLN+/QekMXThOcRJ2aZOlQirtw4J0hMc9vn08lM9fAnO2LcWfiycqpl3rmn5Rs9iLCxJfC2ZnN/oLOm2xfx+YjQmH58HB2h4pEjjh+su7uPVa+8vq74txh+Yq8fD1RMoESZEqQTJtBH/iAoGnZ57i6rprBrm2/rph2bYJbGHvaA9diGBFfkF5ungiddFUBl19Rf4Hbm7g+fjkE6QqlEoFyhL4Pj31VG/GcJR/KOPB4YfaV5B+uLvrLrJUy2zo+5KKOXN/if2vNzZqZnr78B0urbkcTDxcPAyTSTbsyoZr2P/3QSRMkxDpSwb9AheFAO1r71u7buP6tps4Nv0E5H7IUOpjv+iEJY9OPIZ9EnsVVDtlc8KD44/AP1+XQMLkCaD/mv/hxcdwfeWGI0tP4OL2y3BMkySYM7J1Yb2WbX10+SlkW4JkbIMpGJ2kzJpcy+gG4PVDZ1w7cBN5KuXAizsv4e/jj6PLTqg5UmRJDi+3sPeGx0vioKzaax+2w9N1SGSvdO0S2MEhsQN0Oh3stbqPFsSrjg/Fu+euSJ8vLSwsLD60hH3IXy0vLu26iptHbiNTkQywsQ/+oS/k6GfaB4cDc4+gZu8qsHX4qGsX31b5ZBvPFg4Jg3y1T2gXLgO9fQt9JaxjIi3QlH4Xt7fYceoA2vzdE02Hd5EmJTvPHFBfp09aOwerDmxGgQ9bEvo3+1m9tLt0z3qYsWkRqvVvqQB1r99ejZPi2NUzSN+qOOoP7YCBc0ergPfBi8eYsn4uVhzYhCV71okamlSog5/rtoObhzssLSzRpHxtNChbE14+Xrj99D7ua2Paa8GuBMD1y1SHj58vHOyCgIhdaytrdKjZQtn6telPePL6mRZMX1OZah9fHyzatRp21rb4b9syWFlaoVG5/ynd8AoJ0BPXz4Vag9qix7QhWLxnrfpgUDxnQdQoVhHePj7ImDKdMjOkTW+1byfb92Uxc/MiFayqDhNFWHZ2nj6gRszUmAqne8+D/0/OHO49/hmCntOHqn9UQnjXL11D2YyrReN81WFtYYWu6//A7FOrMOfUanj5eSscLp7vcPfNI/hoAe/Cs+sh4hfgr/pYxB0Cvp5+8HTxMhLPcBev369pp2Vgw1UORcHzjSfcn7ojbfHU0FnokKpgKjwOsY9XhibPnxw22i+HM/+egb9vAFIXTiXNXyTmzv1Fk8TwwYH+AfDQPtQYi/DXL0v2cro+cVUfRPy0e8jXw1ffBcnKvr7ljDf33sBHa/d29cbNXbc/9odTe3jiEdIVT6O00pdIq+x4uwf9f0s1sviqBNLnT4tEKRKi6k+VlBRroGXejTy4oQV833Ush1Ja1jVJ6qBvVW20b3yM98kaqSNvlVzYN+cQHLRgNWHyhJAAV/Qrdiir7JdpVQL2H4JU43Fh1SVQ9HgX9P8uj7dBx7D0TfUlz5IML++9Ul2erp8G3OKj8ZrS5UuD92/e49T6c8hfNY8aF1px9/R97PvvECRrnjS9U2hqn9VuVsA7Uwusrj64iRHt+8Nz6x3IFoGyeYuZnNDZ9Y0KSKUzddIUeKwFllIXufrwlsrEZk/7MfPg6uEmXaGKqxbgik15E0TrKg2x5cQeXNPsdKnTRmVnL94N2uMiWc2Xqy/i744DUSRbfi1otTTYPH/nKp45v0CfRp0hwXv+zLlUUCoKGVKkhQTDs/uMhYhkeyWozpwqvXSHKxJY+xv9Kyzy1oULs3ahc61WyJI6AwICAww8ZK5Hr7RPdmZsHg/LTnhOmcPd2IZk3vUfDozb41I9i2N6BGr/nXx8MS4tm2uNAIEMZdOjSIdCBincPuhh07BMSOZX+j3ffPpLQdrNEdmeIHr39z+A7AWW4MnrrRfcngX/f6dkWzJVzgjXJ27IXCWTCo5l3JeIuXN/yRwxfWySTElQolOxYJIgRXzDsqxsrVDqpxKo9mdl6CwtINlefaeFlU71SX+FX8qiWIcikIcQ5YOGXie0o8v9N5Dg+eW1Vzg97ywen36iVJ9o30SoCouvTiBVjpSQ7OO6vzZhxcC12DMrKDmldyS39vX+run7sWzAGjzSvrKXdskCS6Zf9vzKFgWdTqdlMaUHyF46C1xfuiFftaAgUba85KmcC5vH7lBvRlgzbJN6aCxI23SpWVMdFrqgcM8pvaPKLosPWyfuVH0hC82FkE3qXN+eNk9qSNC8qPcK7J6xX/VJoe83tSbZzuGYJjESp0okqqHK8RWn1LaQLeN3qL3EBxce/URXp9N90mZOQxCBcDTdvTyQp+N3yNKmNH6fPwbJEjsatguENdTF9S30X7OLntQle/r41TM5DVV0uuCLkSxw8ZwFkTNdVoxbNVNtBSiduyhypMuCTcd3KTt/fP8LJHiOXyc7cravgOGLJ6t2fTF90wLkTJ8V438cAn8tOzdh9SzIn1dvnfHizSskrJczmPy9Yrp0R1gkEN93/ggSaVnfAj9Ww5Erpw025EODY4KgT3WGxlAqYdkJ+BAwW1p+DOqNzXwud2Mbca2+584x6LT/0iZKaXLpH5Cb7GMjCYRGwMLKAnaJ7fDk5JNPVNRXmr7hf1MgWwoSpkkA2acrkiRTYsheYWkPaTRdibRIWyKNkpB9ETxX6jKHuXOrASxCJaDT6RA/eTyE9eEnYeqEarzbC3d1DKt4ePwRbOLbIHG6xLBLZId4TvHgkNQB0h7WOPZFHoFU2VOqvbR6izqdDhXalUG9gf9D/cGaaEfpaz+9tRwg+1mbjmiAxsPqovX4ZshXJegB/UZD66JO/5rIVjILitQtiAI18il9WwdbdJjZBpmLfNzbm1cLeBsMro3/9a2ONhObQb5hkKxoSJHXh4mekxbgirFCtfOj0P/yQ6fTQeaTvbdN/qwPvW+SjS5QI6+oIkuxTKjYvqyqO6ZJgga/11H1er/9D0kzOEH8qq+trfGf9dT+4JZ/Nw7WLycyh35Ncv70+nMUqBm0Lo+3HuqBPlM+txzbBD/800rxaa0xKt+2tAw3+CknsnfXIbGDVNV+5rR50qh6eIVZAe/gVj0w9ee/8O69K6asm4sXb17D0zv8jMXy/RtgZ2OnHg6T/a7LBk1T/sjeWVUxUVy+fwP5MuVSmdicWkArKisPbELyxEnxXgu8z9y6hOkbF6isbDw7B+htSbBrY2WDXOmzQV7zNaBFNxlqkLErZyJAy8TKtgbZ6+vr76f65u9cCXlQTh5Aky0NubXxywZOg2SMlUIECx9fX2UvbdJUaFahDioWKGWwINszEjjEx+Sf/kDejDmwVJvH0BmiEpadveeCXin0Q/VmcEqQ2LAnWm/ic7jrx8bV44G7pxCgZePXtp6K/hU6YXWrKeohNT2P4w/PI56NPbI6pUfGJGmg0/7T9/EYNwiorOpTN7jp5dnHoESCV9mvKyQC/QOhr8t5zno58O6hKy6vvKoFO54qK/fq2msc/vsoXB+5ikqoInN5v/NGnqZ5kKt+ToOkLpYaz848/+RVQxJg526UC5JVDGlUfAoMCNTGaN+k+wWE7P7kPKJzf2Igjq01ZEYAABAASURBVDT4vPeB61PXYOLn4wcfDx+cX35Rtfv7+eP1LWc8u/QcSbM5BiPz/vV7uL90h/NtZ5xdfB6S9U2SPnEwnZAncr89PfcUWSplRt6GuQ2Sp14uzdZ7iM2QY3j+9QhYaJl8CQpNzWjrYANrO+tPuuzi2X7SFlaD7K0Nqz+8PvsPe2DD0wurPzwf9GvyeBe0p122fIRlL6r7zAp4Jbj9uV47vF57Ge82XEd8+3joOP4Xg2/ydJ/+RPvfqb6qZWP/xd5zR9C3yY94tOwUyucriVHLpqq9s3ol47HSJhnceHb2eLv+Gk7+s0WaIPuCRW/XmYPq3EMLtm89uacC4Icvn6q2X2eNUEfZTnB86qZP/hEGb18fHLh4TOnInl5V0YoZmxZBHh7rUruN2q5x5b99aFi2pmbbU+sN+0d8EjHWGrF0CnKkzaLWu2zQdEiQLYGU6MjDfg9fPkGPBh1wafYeSNZa2gM+pA/Floi0hWXH2e0tDl06gYldh6lr8n21JtovsUAZpmTcKjO4a1/fK+UPhX7eD6dx7iD37dDdU2FlYYnvC9eHnfbhSfbsevoGfbBbfXkH3ni6YvsPc7C7wzwktk+gmMu4OAcrji74/oEHODbpxEeZeNxA4szsc9g9cC/8vPxwY9NNVXf7EBAnz5MM+VrkxcvLL3Fo1BHsG3YA5+adh72TvRKDEROVJ6eeqsyuvB3CuDtNsTRqrjf33ho3h1k/NvkEXl19jTd33uDwmKNh6kpnZM4t9mKrvH3wDgfHHwkm77QPMvKh+PXN16p9W/+dOD7zJJJlT4ocNbIbUAT4BWLfqIPY//chHPv3pPZByR+lu5WCtcOnAZFhkFZ5deO1phuItEVSa2cff5LnTKYC5kennnxsZC1WE5BMZ+aiGdUbHoyPkpmNLguXh/rq9q8JC4ugkPNb+Rw0ezhUxmtf/1tVT488HSoheeP8SNooH45dO6tGpWhSEDUHtlF1KfrNGg6bmpmkqqTyr83g2CAPyvVuAPv/ZcHAuX+rdilCjpW2nVpQa/+/rGqulE0LShO8fL1hUS0dGv7RSZ1LkeOH8pDtC1IXOXlDy8DVyYbSPeoqH5NpfuqqppUug3zXrxmkTbLEhkat0n5cX8j68nWqjEytS8K2Vmbsv3AUmVOmD1Vke0a9oe0VC82E4Ue2QjjUzoryvRtq/mVT2yRSNAlah+z1zdCqhFpb5jalkLJpIeXPqoOb1fhUzQqj2oCWqh6WHVEo36cRkjbMq/yVAFfYOGrn0icSFnedxmX21qWipiR/56pqy4o6icPFmss7UXhqQ+Se+D90XDMY8p7jKy/uKCIS/Baf1gQVZrVBsWmNVfBbakZztFv1m+pnEbsJVB7+HaqNqfKJ6FddrEuRT/qMg9RUhVLiuz8rotIfFdQ7cKuM/A7FfiwC63jWkHfnhiZZa2RBhUHl9NMYjglTJ1DzOWZOggxl0pvUEeVy/csgR+2gAKtM31JqjKyj/MCykKxjaPNKuzlzyxxxWYp3LIra42t+Ik5ZHCFBa5Uh36H6X1Ug7+CtPqKK2uerz75nKpcx+LixNVGuVxkkTpdIXRtvd2+EJmJf5tXvEddfA52FDrX+roGcNYOuub6dRxIgAcCsgFdABWhZSHno7NU7FzmNkLxxf4fDl0/BW8uymjtQ5pJMrrn6ej0JxD/HR1nf5fs3IG97EFupnVLi1PStoYq8P1j0TIms89DlkwjNf1lbyDcrfI4dyfTq/TU1/nO4m7ITV9oO/bgE53usx+EuS3Hwx8V45+WORec2BFv+E9cXKtgN1sgTEjCTgLW9tdpnaWEV9L/et1qG9vTsswhNXlx6iaj68y3njqo1RUe7EvgmSJnA5NfYofnrcveNlhE+Fao8u/g8tKFsJ4FvTSDazh/0f91o6963c0zeLuGkZUxDE8mwfjvvOHNUEBi2+x9sv3kIpx9fgrxvt+T0pggIDH+vY1T4Qptxg4C8O7V075IITdIUTR1lIL7l3FG2qFhiWLY+yFsbQpN0xYJ/exlLls1lkECUEmDAG6V4aTwmEdh28yAGbB+PXptHYdbJlfALCP8J+pi0vmjjKx0hARIgARIgga9MgAHvVwbO6UiABEiABEiABEhACFC+HgEGvF+PNWciARIgARIgARIgARL4BgQY8H4D6JySBMwnQE0SIAESIAESIIEvJcCA90sJcjwJkAAJkAAJkEDUE+AMJPAFBBjwfgE8DiUBEiABEiABEiABEoj+BBjwRv9rRA/NJ0BNEiABEiABEiABEviEAAPeT5CwgQRIgARIgARiOgH6TwIkYEyAAa8xDdZJgARIgARIgARIgARiHQEGvLHukpq/IGqSAAmQAAmQAAmQQFwgwIA3LlxlrpEESIAESCAsAuwjARKI5QQY8MbyC8zlkQAJkAAJkAAJkEBcJ8CA19w7gHokQAIkQAIkQAIkQAIxkgAD3hh52eg0CZAACXw7ApyZBEiABGIaAQa8Me2K0V8SIAESIAESIAESIIEIEYiigDdCPlCZBEiABEiABEiABEiABKKMAAPeKENLwyRAAiQAgBBIgARIgAS+OQEGvN/8EtABEiABEiABEiABEoj9BL7lChnwfkv6nJsESIAESIAESIAESCDKCTDgjXLEnIAESMB8AtQkARIgARIggcgnwIA38pnSIgmQAAmQAAmQAAl8GQGOjlQCDHgjFSeNkQAJkAAJkAAJkAAJRDcCDHij2xWhPyRgPgFqkgAJkAAJkAAJmEGAAa8ZkKhCAiRAAiRAAiQQnQnQNxIImwAD3rD5sJcESIAESIAESIAESCCGE2DAG8MvIN03nwA1SYAESIAESIAE4iYBBrxx87pz1SRAAiRAAnGXAFdOAnGOAAPeOHfJuWASIAESIAESIAESiFsEGPDGrett/mqpSQIkQAIkQAIkQAKxhAAD3lhyIbkMEiABEiCBqCFAqyRAAjGfAAPemH8NuQISIAESIAESIAESIIEwCDDgDQOO+V3UJAESIAESIAESIAESiK4EGPBG1ytDv0iABEggJhKgzyRAAiQQDQkw4I2GF4UukQAJkAAJkAAJkAAJRB6BbxHwRp73tEQCJEACJEACJEACJEAC4RBgwBsOIHaTAAmQQNQRoGUSIAESIIGvQYAB79egzDlIgARIgARIgARIgARCJxDFPQx4oxgwzZMACZAACZAACZAACXxbAgx4vy1/zk4CJGA+AWqSAAmQAAmQwGcRYMD7Wdg4iARIgARIgARIgAS+FQHOG1ECDHgjSoz6JEACJEACJEACJEACMYoAA94YdbnoLAmYT4CaJEACJEACJEACQQQY8AZxYEkCJEACJEACJBA7CXBVJAAGvLwJSIAESIAESIAESIAEYjUBBryx+vJycWYToCIJkAAJkAAJkECsJcCAN9ZeWi6MBEiABEiABCJOgCNIIDYSYMAbG68q10QCJEACJEACJEACJGAgwIDXgIIV8wlQkwRIgARIgARIgARiDgEGvDHnWtFTEiABEiCB6EaA/pAACcQIAgx4Y8RlopMkQAIkQAIkQAIkQAKfS4AB7+eSM38cNUmABEiABEiABEiABL4hAQa83xA+pyYBEiCBuEWAqyUBEiCBb0OAAe+34c5ZSYAESIAESIAESIAEvhKBaBfwfqV1cxoSIAESIAESIAESIIE4QoABbxy50FwmCZBAjCNAh0mABEiABCKJAAPeSAJJMyRAAiRAAiRAAiRAAlFB4MttMuD9coa0QAIkQAIkQAIkQAIkEI0JMOCNxheHrpEACZhPgJokQAIkQAIkEBoBBryhkWE7CZAACZAACZAACcQ8AvTYBAEGvCagsIkESIAESIAESIAESCD2EGDAG3uuJVdCAuYToCYJkAAJkAAJxCECDHjj0MXmUkmABEiABEiABIIT4FncIMCAN25cZ66SBEiABEiABEiABOIsAQa8cfbSc+HmE6AmCZAACZAACZBATCbAgDcmXz36TgIkQAIkQAJfkwDnIoEYSoABbwy9cHSbBEiABEiABEiABEjAPAIMeM3jRC3zCVCTBEiABEiABEiABKIVAQa80epy0BkSIAESIIHYQ4ArIQESiC4EGPBGlytBP0iABEiABEiABEiABKKEAAPeKMFqvlFqkgAJkAAJkAAJkAAJRC0BBrxRy5fWSYAESIAEzCNALRIgARKIMgIMeKMMLQ2TAAmQAAmQAAmQAAlEBwIxK+CNDsToAwmQAAmQAAmQAAmQQIwiwIA3Rl0uOksCJEACQQRYkgAJkAAJmE+AAa/5rKhJAiRAAiRAAiRAAiQQvQiY5Q0DXrMwUYkESIAESIAESIAESCCmEmDAG1OvHP0mARIwnwA1SYAESIAE4jQBBrxx+vJz8SRAAiRAAiRAAnGJQFxdKwPeuHrluW4SIAESIAESIAESiCMEGPDGkQvNZZKA+QSoaWlhBTtLO0ocZ2BraQvjPzqdjvdEHLsnrLT/FxjfAxYWlrCxsKVEUwaWOkuE9ocBb2hk2E4CJBAnCZw7dw7Z4mdD+2wdKHGcQf30DXDq9Cn198DFxQXOr13QOvP3lDjEoHCSojhx4ri6B86fP4+UNqlRI1VtSjRlkCleVu16nVDXK2TBgDckEZ6TAAnEaQJ37txB4waNUb9uA0pcZ1CnPqZPnq7+Pri5uaFHlx7aPVGfUjfuMKhbuy5Onzyj7oF79+6p/zfU0+4LSn1ERwZ1/lcHN27cUNcrZMGANyQRnpNAxAhQOxYSSJAgASjRg4G9vb3hDrO0tOR1iSb3po2NjeG6SJ1/X77874uFxceQzMHBgff6Z97rhhszROUj3RAdPCUBEiCBuEigYMGCWLh4IRYvXUyJBgxmzJqOlClTqluxe6/uWLJsCa9LNLguk6ZOUtdEislafYnm05Kli8Hj5zFYqt3XPXr1EJxInTo1Zs6eSZafcU8tW74MxYsXVxxDFgx4QxLhOQmQQJwmkDVrVjz0uI/NT9ZSogEDnS2QNGlSdU9my5YVx50PYsvTtZRvzCBb5mzqmkiRVasfcd4PyuczuOx6AVm1+1t4yv2u0xLox10OgRIxBk+9HyFnzpyC8RNhwPsJEjZEJQHaJgESIAESIAESIIGvTYAB79cmzvlIgARIgARIACADEiCBr0iAAe9XhM2pSIAESIAESIAESIAEvj4BBrxfn7n5M1KTBEggxhDwdPOCm7N7jPGXjsY9AoEBgXj7/B2cH7t8lcW/ePASJ7adNmuuOxfu4urx63h4/THuXrpvcszBNUfw9tW7YH13LtwLdh4ZJ8Y29604FBkmv9iGrNvPx++L7YgBr/fa/6vehP//qsicU+b91sKA91tfAc5PAiQQ7QnM67kEs7ss+ETEcXeX91jYdxkW91uB5YPWqOOV/dekK5hsnbJLjb9++GawdnNOJFD5r9siNd7ttVuwIQt6L1PtIf1bN3Kz0lv9xwZD/7KBq1WbucWX+GzuHObqmdJ7cv15d15NAAAQAElEQVQZxEdTfdG57dy2iziz+fxXdTEwMBD/dV+MVdr9sG7UFsztsQQPLz2OEh8C/AO0wPURnt17DldnN8jcz++9UIGsh5sHvD294f4h4BLdNy/ewsPVE+/feSBeQgfES+Sg/JLATILf10+c4ePlA9Fz08Y9uvEYAQEB8PLwxo75u/Hi4UuIrhpkopC5XZ1dVY+Me3rnmRovDa4ubtq87xGazeTpk4mamltsSBAvDX6+fnj3waanuydEpF38fPHwlVSViM9S8fP1h8wl9XevXXH/6kPDGGkLTxb8uQSPbz8NT82s/stHr2LDv1vC1Y3MOY0ne3TzMab/MlvJrIHzsGfZfkgwL+vTtxsf37t6wHjMnMELsGbqBnWPGdsNr86ANzxC7CcBEiABjUD+qnnQ6Pe6Bmn4ex2tVX4Cka1kVjQf3hBtxzdHpiIZcWzlKfh6+UqnEm/tF/OTq09hG88G1w5GPOB9dOUJAvwCYGFlgetHbimb+qJe/1oGnxpp/lXqUE51pcubRh0bDKqNDPnTIVX2lGj6ZwPVZk7xpT6bM8eX6kjw7/zQRQVLXu5eBnMSPL168FoFWvpGH08fdU2833vj5b1X8NMCFul7/+Y93jx9K1WDiC1/LUBxd3GH2JGgzNCpVcKyr3VD5pAPKXKU8S/uvlJzS5+/n7+a782zd8pvuU/EF9GVfhF1rt0zUhc/xB+pi66cS118cn7kAg8tSJTz8ESn06F8m9LoOK0NOvzTGtlLZsHx1UHZV7Epc8hR2Ehd7Hl7+AStXwsu5dxcWTF2DS4cuIy7F4MytWL37N7zkMBzwbClkABm2ZigD1/XT97E+f0XDaaf3nmGh9ceq2Bw3pDFuH/lIdb9swkPrj5SOvuWH8S1EzewdvJGvH/7XgW9YkMfTCqlD4UEt2LjyrFr2PTvNnXN5w9ZosbP+32xCqLF3hrNVmg2j248rqwtHrECku3dv+owzuw6Bwla9yzdr/quHL2GW2fv4Pn9F1gyciUuaOtZPXG96ls9Kej45sUbyFyyPul7cuspjqwPsq0UP6OQ9ZkaFlq76Mp9I8fPEfngImJqrNzXptpNtcl9JfdAva61UbFxOVw9cR3b5u9C0lSOqNO5pkHK1C2pMvoWFjoYj6nUtDwSOibA4lErcFm7tqbmMNVmYaoxZrbRaxIgARKIOgLxHePBMU0SgzilcVSTxXeMj1JNiiFB0gRaQGuLPBVzQoKd53deqn4p7py8BwlWK7Qtg9cPnbVf1B7SbLZcP3QTaXKnRq5y2XHz6O1g4xKnTGTwSXw8vvKUCm6L1Cmo9CytLKGz1EFnAUhdNZpRfKnPZkzxxSqHl52AbCVZOWQ91o/equyd2nBOy7KvxM4Z+7Cg93LcP/9QtcuHkOWD12LhLyuwcdx2TWcVdv27H0sHrsHqvzZi49htSk+KNcM3YZmWrV82aC3WjdyClcPWQ/+LPiz7GzQbizT7C/osxyPtQ4oct07ehZ3T92K+lol/evM5XJ68wZ3T93Hv7AOI30dWnMSt43exafwOmVrJjSO3sXlC0Pm98w+wfPA6rNUy9vN6LsXJdWfU1pkl/VepMeL/lok7Df4pA6EUEuTqtOBBp9NB7hvXD98WyByLNXsL+i7HZs3Wwr4rcGjxMe2bi+XYMGYbFvZZoXH2DMVq8GYJZCQLW+fHmiharXBQpw5wSOCgZemewFv74CFBV+KkiSDB36mdZ1FMrxekrco7F+8hW6Es+K55eWQvkhXQbED7U6tDNVRrWxkuz9/AKbUjEmh//yo0Lovk6YIysZqK4Ueyw+myp0HllhXRamAzFTxnzJNenecslh23z91VuubYtItni3o//U8bWwEPrgcF32qwUXH58FVUaFJW+eelfWBxf/veqPdjVbLAFpYWKFnb9PtiP2qarj249giTu0/H6B8mYGqvmXj+4IVSPLH9NEb9MF61j+08GZcOX1HtUpzZcx5/d5yI0e0nYN+qQ9JktkgAPeGnfzCq3XglkvmVLL0YkGs9d+gi/N1horIveq8ev5auMEX+X5QmSypkL5wVpeuU1DLeD2AXzw7psqc1yLVTN1GwYn7Yx7dXtvRjsuTPhKqtvkPtTjWwceYWyP2kFMIpLMLpZzcJkAAJkIBG4ObxOzi89LhBjiw/obV++vPo8hPV6KgFx6qiFbKNIVvxzEifL50KfG8eCx60aiqh/khW7+Hlx8hROitylMmmZfQ8VdYt5AAJyCTIk/ZqXStBp/sQIUjDZ8iX+PwZ033WkModyyNeEge0m9RCZdiF1fntl9ByVCO0Gt0EJZsUxdmtFw224yWJp2Xhm+GHyS1VmwR/7ae0QttxzfD89ksVSKoOrchSLBM6TGuNZn81gNtrd/VBJTz7Xm5eqNypAtpOaK596EiB1mOa4PsJLdB6bFNkKZoRV/ZdR7IMSSHXUoJP8bvi92W02cL+8fX2VR+k2o5vhsK1C+DK/utIly8tvp/YXK1FgmjxP2wrH3tlHRd3XUFW7Z782Ao0HFgbwiNDgXR4fO0pWv3dBJINtotvi4eXgu5rY31TdWtbK1hZW6kuuSelcvP0bchWhma/NET6XGmlCSX+Vwxb5uyAbGGInyS+ajMuEjkl1IJjb9Uk/qqKUSHXzujUZFXG+WjspFOCIlt7Gy2r6yun8Pbyga2DjarrC3NsWlpaQr5tsbaxgnwNL2PFthxtHWw1+z5ShWTprTQd+fArDRI0yjFFhuRoNaiZFsTZYemoldIUYdk6bycKVSqA3tN+RtaCWbB7WVCmWQLBn8d1wsAFv6DWD9Wwdf5OZfvVk9fYsXA3araril/n9ELh74I+DKtOMwoLCws07lFPjf3l3x7wcPc0BNOb52yHpaUFfp7QGT+P76yumb+/vxlWg1SE4Y3TN5EsbdKghg/lk9tPce/yfVRoVPZDy6eH3CVyqsZnWmZdVcIpGPCGA4jdJEACJCAEfLSvd+WhNGORdmORr69PrD2NXOVzIF5iB9Xlpn0t7vz4DXKUzQb5hSpBhmRsVacZxb1zD5RWxgLp4ZTWUdm9pmV8VaNRcXrjOS1oe4H/9akOG+0Xu1FXhKtf6nOEJ4ykAXe1rKkEGFsn78bKoetxdvMFvH7gDPlKXaZIlsFJC3JsYallvRMmS4CUWZLD0toStlr2zkoL1FxfuUH/J2XW5LDQftEnSp5QcX90+SnCsy9bR1LnSKllqmwhQd/lvdewoM8yzPlpkcrqyj2ktx+Ro2TuJUi2i28HCapuHbuDx1oGedWwDVjz1yZ4vffGg4ums44h55GvnrdM3gkLSwuUafYxwyj3q2R9RT9J6sRInCIRHBLaqw9Och5y24fomRIbWxuk1jJ3835fhB3zdiuVVJlTqi0Jy0avwv3LQRn39DnTKr9L1CqqdEIWabKlhq+3H+YOXoTrp2/BWgseQ+rIeeZ8GbF8zBpI1lPOjUUyxC8evMSSkSsgWxvSatleNxc3iB8vH75CxjwZjNUN9bBs6pUSagG5h5snlmprOrnjrGrOVy4PTm47o9rS50gLOy0AzlIgExb9pWXO/92udG6euY11UzdBHoyTPtUYgcLV2RWyPaJkrWJa0GyvZUdL4KGW8ZWssdxzu5buw7Q+s7D5v+2G+14y3YmTJUL+snnVfZnIKUEEZgxSffnoFSSz+49mW/Ylu711Vx33rz5AmbqlIB9Q4ieKp+4r1RFO4fXeC5INHtNpEtzfvUdNLUA3HrJt/i6UqFFUfSAybjeuW2p/j3XaNxa+Hz7UGPcZ1/V1Brx6EjySAAmQQBgE8n6XCzW7VzFIjZ8rB9N+++Id5GvoVNlToEzzEoY+/RaEizuvYNfMfSoAc3d5D+cnLgadsCrXDt5QgfLe/w6q8ZI5uqVlmyWw04+TYOf8tksqu5gkVWJ982cfv9Tnz574MwYac/By90bC5AlQo3tlJbX7VkfzEQ0h20k+Ma0L3mJhEfqvQ9nPHIhARMT+7VP3cHH3VXW/dJzeBkXrBs+qBQQEBnNAnyUM1hjKiZcW4BZvWEStUdYqWehCNfKFov2xWTKem8Zvh6+nH+r/9j9Y21l/7DSqWeh0kEBC36TT6fRVs441fqiClgObovOYH1C1dSUkSZEYnUa3Q6Ne9dDn325ImtpJ7eN1SGCPdFpgKEYlWCxWvTD0R7keuUvmhNiS/Zpps6VRWwqSpnESdXQa1U4dZbtCwx51tMAcWrB5OpjIvt4f/mytzVsfP/zVWtPRoUnfBmjUuz6a/9oIEjDJNoXQbGbIlc4wj34+WUtjbbxM3m5YKzTsURfdJnVG/vJ5IUFl2yEtIP3ftaggKhD/ZM4OI9oq/3OVyAHZXlGv6/9Un1KKQOH7Ye+5fGCRYbIGOfr7+WPh8KWQDxwt+zdBl787SLMSnU6nBcd2qv45heyT3bPiAGTvbNcxHZCnVC6DGRs7G1jZWBrOza1Y21qj3ZBWkIxx55E/QAJm/dibZ2/D+bkLytYrpW8yeXx86wnk73/arKlN9odsDP1veEhNnpMACZBArCTw5Yt69dBZy7RtRJocqbQApyqMg4Ubh28haXonOGgZX5GU2VJAsonSHt7M8nDUy3uvIZlDGSuSqXBG9ZXqoytBT9i/e+mqAuH8VfMgUyHTGavw5gnZL759rs8hbUXleSIt+yp7eCWQk1/4aXOlgutLN7x59hYJkyZQv+QfXX6iAp2I+iF2AwICcPfMfciWhvR50yAi9uUhNNn3KdlRyew+vf7c4IJkUsVHaRC/JfMs11HGeLp64sGFR9IVqqTJmQpXD1xX65N1ylYK5ydvQtWXDrG9+q+NUoV8ELC0stAyqL4qYFCNkVzY2tsGsyjZRwmO9I2Saa2iBcP6c1PH10+dVSa0ZvuqkO0BpnSkTeymyJAcEiAbiwTK0i+ZVgmgpS5iowVbcgxLxGZY/fo+sa2v648h7YfUsdK+VQhrPXo7po5OKR2RIEl8XDx0GbJl5Ny+C0iVKQVk/6unljXNVTwHkqRIEuwNBlkLZMZzLdP94uFL+Hj7hLoH2dR80uauZXOTJE8MyYjLnGIHH/6I7YNrj6o3SJzefc6QVf7QHerBUsvOOqZMAhs7m2A68ndu+4JdKN+gzCd9ekVPdy9cO3kDy8etUUGxuSwZ8OoJ8kgCJEACYRBQWdnHLnD+ILJvUtTlqfb1IzcjZdYUKNagMORBIMn2ytPz8hT9+7ceqPB9GUjWVy/y9fSt43fVLyyxEZrcPnEXFlpgUrljBcP4cq1KIXHKRJAHmyRY2jJhh/rqWbZK6H2To7xvVeyKTqC/lp8MAKQubWHJl/oclu3I7pN90ikyJ4M8ELbol5VwSueI0s2LY+f0fZjz8yLIQ17CyTBvuInKj1nXI8tOYE7XRdg964DKzsre24jYz14qC2S7xLweSyEPr0lAq/cjU+EM8NDui9ldFyr78uEiZdbkSm9Rv5WQe02va+oo91OAX6Ban9iQB8skoDWl0t7sBgAAEABJREFUq2+T+SVwf3n3NeTBOvFL5H44wbV+vC5cdnpN844pM6aAZFDD0i5dtwTKNSz9yf5OU2MkwJa9wMZibiBkyl50bqvcoiJ2LNqjHkA7vOEYKjUprz7UlatXGisnroU8uHZ4w3HDEiSwLFQxP/77fSHGdZ6i3pZh6AxZMXGev2weeGnBtDyYNrnHjGAPMFZqUg5if+WEtbh/9YEabefw+dnkCwcuwdfHz+SDjOLDyO/HYVL3adi7/IDa31u+YRk1pzkFA15zKFGHBEggzhOQh3zWDt8Evaz5kC3TB75Prz/D6j82YNXQ9Ur2LziCG0dvqz23EpgZA8xeOhvkNVnPb70wbv6kfu3QDfVgkXHGWJTk4TXZxuDh6qHe+CBB9doRmwy+iY8bx24VVawbsVnt73x28zlWDlmn2sIqvtTnsGxHRV/dfjXVQ2etxzZR5vNWygXZQtBieEP1QFfDQbVVuwSJ8mFBnWhFw4G1IdtUtKr6aTepBdLk/PjVaJXOFdBmbFPIg2uF/1dA6Uhhrn3ZR91iRCO0GNkIP0xpiZajGuN/vauJCSRwio/WY5qqh8Kqdamk2ur0raG1NYE8JNb0j/poNLiuas9aLDPEjjr5UMTTvi2QdcnDdy01++JjxgLp4fxE+0AWiiTLmBSd//3+E8lUKD1CzlG0XiHU6Fb5w2xAta6VUKJRUcM5K1+fQM+pPyFj7vRq4twlcuLX2b3QdUxH9P+vNyTzKh2l65RAn+nd0H3ij1pfB/XwmrSL1Pi+KnpP64ZfZvVUD5i1HtAM8h5heVeyKfH29IZ+TnnDRo/JXTWbHdFPGy/tEmSLXbnPa7arquaVrGwCLfssmXVTNqVNsvZZC2ZW+jI+pMjDeH1ndId8I2DcJ2PkYTyR3+b1VWsoXr2IsUq4dQa84SKiAgmQgBGBOFn9YXIrdJr5/SciMHKWzf5Ju+jW6lEVpZsVR8vRQYGY6OolqZaJFB3J7Hm6eaqMialjk6H1Ia8y04/TH2X7QsfpbZHAMYHJucV22/EtlHrjofUMOi1GNoZ8/W9qLn1bycZFw/RZ3uerDEejQj109uHNAOKWTqeDvJEhtD2qomOO2Ce0h6X21WtIXZ3OfPsS3Mp+xZA25Fw9FGbxMXXqkMgBkhWWPnNE1ifrlK/r5cGdC9svIzSRvY7m2KROzCAg96XsGZZrb+yxbG2Inzi+cZOhbh/fDsbbLeS1cBcOXoIpcXV2M4zTV2S+kIHo5aPX1CvJ5AG0VRPXoaKW8fXz8zdpU+aR/bl6e1/7yID3axPnfCRAAiTwgcDlfdewddKuUOXN87cfNCPv8C3mjDzvv44leThR3rbwdWaLnFkk0/Zdh/IITfQPOUXObLQSRCBml7LfVzK/piTka8JCW2nRKoXQTcso/zC0lcq65iuTRwXVpmxKW+naHx/oDc1mVLUz4I0qsrRLAiRAAuEQKFQzPxr9XjdUcfrwj1uEYyZC3d9izgg5GA2UHdMkgbz+Kxq4QhdIINoTkC0M8qBcdHeUAW90v0L0L0YToPMkQAIkQAIkQALfngAD3m9/DegBCZAACZAACcR2AlwfCXxTAgx4vyl+Tk4CJEACJEACJEACJBDVBBjwRjVh2jefADVJgARIgARIgARIIAoIMOCNAqg0SQIkQAIkQAJfQoBjSYAEIpcAA97I5UlrJEACJEACJEACJEAC0YwAA95odkHMd4eaJEACJEACJEACJEAC5hBgwGsOJeqQAAmQAAlEXwL0jARIgATCIcCANxxA7CYBEiABEiABEiABEojZBOJKwBuzrxK9JwESIAESIAESIAES+GwCDHg/Gx0HkgAJkEBMJECfSYAESCDuEWDAG/euOVdMAiRAAiRAAiRAAnGKgMmAN04R4GJJgARIgARIgARIgARiNQEGvLH68nJxJEACX0iAw0mABEiABGIBAQa8seAicgkkQAIkQAIkQAIkELUEYrZ1Brwx+/rRexIgARIgARIgARIggXAIMOANBxC7SYAEzCdATRIgARIgARKIjgQY8EbHq0KfSIAESIAESIAEYjIB+h7NCDDgjWYXhO6QAAmQAAmQAAmQAAlELgEGvJHLk9ZIwHwC1CQBEiABEiABEvgqBBjwfhXMnIQESIAESIAESCA0AmwngagmwIA3qgnTPgmQAAmQAAmQAAmQwDclwID3m+Ln5OYToCYJkAAJkAAJkAAJfB4BBryfx42jSIAESIAESODbEOCsJEACESbAgDfCyDiABEiABEiABEiABEggJhFgwBuTrpb5vlKTBEiABEiABEiABEjgAwEGvB9A8EACJEACJBAbCXBNJEACJAAw4OVdQAIkQAIkQAIkQAIkEKsJMOAFEKuvMBdHAiRAAiRAAiRAAnGcAAPeOH4DcPkkQAIkYESAVRIgARKIlQQY8MbKy8pFkQAJkAAJkAAJkAAJ6AlEPODVj+SRBEiABEiABEiABEiABGIAAQa8MeAi0UUSIIHoSYBekQAJkAAJxAwCDHhjxnWilyRAAiRAAiRAAiQQXQlEe78Y8Eb7S0QHSYAESIAESIAESIAEvoQAA94vocexJEAC5hOgJgmQAAmQAAl8IwIMeL8ReE5LAiRAAiRAAiQQNwlw1V+fAAPer8+cM5IACZAACZAACZAACXxFAgx4vyJsTkUC5hOgJgmQAAmQAAmQQGQRYMAbWSRphwRIgARIgARIIPIJ0CIJRAIBBryRAJEmSIAESIAESIAESIAEoi8BBrzR99rQM/MJUJMESIAESIAESIAEQiXAgDdUNOwgARIgARIggZhGgP6SAAmYIsCA1xQVtpEACZAACZAACZAACcQaAqEGvFlSZ0T1ohUosZBBXL2uOdJmgU6nizV/ebkQEiABEiABEiAB8wiYDHi3bt2KXEkzYWijHhQyiDX3QKG0ubBl8xbz/mZQiwRIIC4Q4BpJgATiCAGTAa+zszMG/PIrhvw2mEIGseYe+O2X/njy5Ekc+avNZZIACZAACZAACegJmAx4pbNo0aKgFEVRcohV94GFRai3vNz2FBIgARIgARIggVhIwORv/xo1aqBtl/ao0uR/FDKINfdAq87tUK9+vVj415hLIoGvQ4CzkAAJkEBMJWAy4E2WLBlWHtuCKn+0ppBBrLkH5u9bjaRJk6q/q69evUKFjMWwtOFYChmgSe7qePHihbo3WJAACZAACcQ+AiYD3s9fJkeSQMwgcODAAUwbPRkrpy+ikAFmjpmKzRs2xYybl16SAAmQAAlEmAAD3ggj44DYQuDy5cu4cOEChQzUPRAQEBC5tzatkQAJkAAJRBsCDHijzaWgI1+TgGxtGDNpHChkIPfAxH8mI3369F/zFuRcJEACJBBnCESHhX52wNu7bgdUK1hOrcHBxg5J4iVETP9jaWGBSR2HIGPytGopsqb4dg6qHtEitjCJ6Lpjin7FihXx1OINxl9ZTCEDXPS6gxr/qxlTbl/6SQIkQAIkEEECnx3wDmvRC52qtVDTbR+2EE/nn1L1yCy2DpmHCe0HR6bJMG3Za4F7zzrtUSJ7QaX34L+jODBypaqHVZTNVRSP5x6HrZWNQS2qmBgmYOWLCbxwe42Tjy9SogWDb3sdHr59hsDAwC++p2iABEiABEggehL47IDXeDk95/yBxn93MW6KlHrBTHlQMHPuSLH1OUaa/P0Tfpz+W7hD0zilhIi1lZVBN6qYGCZghQRIgARIgARIIPYR4IqihIDZAW+FPCXwevE5BG64D+81N5HAPr7BofZVmmJMu4HqvE6xynBfcRWdteyvZD1dl19W7aVyFMYzLQscsP4enBefx9gP+tIpGdWHWjbVf91d+K69jQuTt6nMbsokyVAhT0ll7+aMfaIappiau9v/2sJr9Q3IvCL3Zx82bFkQY/0adFb9si7xT9r0MrLNr/ipZht1GpaduT3GKp0XC88oX4WHyBijNf7Zso9hnnfLLqF+iWpqjBRuy69g5x+L8H7lNeXnibHr4ZQgsXRRSIAESIAESIAESIAEvpCAWQGvnbUtdv65CDZW1hi6dAIGLx4H/wB/w9RptQxnqiTJ1bmjFqjFs3PAtC5/4faz+zh89RRkP+v+kcvh4e2JFuO6Y/H+dfhFCzTL5CoC2SN7cNRKFUAP0Wz/tWIKMqVIhyUH1sPd6z3uv3yMgYvG4Pcl45X9sApTc791d8WifevQbOzP6DJ9IFJqQfTyflOVmXK5i6lA/YnzC3SdMQjz9qxS7foijVMKpEuWWp2GZWf5oY1KR9iIr/suHkNaIyYSiP/erAfO3rmCn2YOhpvne6wZMBPJEjqqcfHt46F8nuKYs3M5/lwxGcWyFUDXmq1VHwsSMEGATSRAAiRAAiRAAhEgYFbA26FqMy3YtUHj0V21gGwKxq6bpYLXsOZJ36EUKg5qjlp//oAfa7RS4ydunKOGHL1+Bu+9PCD7Zbt86Gvyd1eMWPWPsp+weV6cuXMZ7p4eePDqMaZsno8VhzerseYUxnMvPrAeUzfPR/FsBVGjcAV4+/oYMrxDmveEvIooW5cKmLl9iQqsQ7Mflp2d5w6pYWJDfL338pE61xf9G3WFr58vSvdviBnbFqPa0NaQf+K2e+12ehX0mDUMsg1i2LJJas31S1Q39LHybQi0K9wQZTIUVpPbWdkioe3HbzWg/amdoyJGVOuNIqnzaGdAgZQ58GeVnmicN2Zeu/DWqxZpRmFlYYkk9onM0KQKCZAACXwpAY4nAfMIWJijli9DDvVAx87zQYGdOWNevH1tUJPxciJbBGZ3Gw2RgMAAld3Nkz67sr37whFRiRQxnlveunBhynZ0rt4CWVJlQEBgICy1X8gyUYZkafDo9VPVJudhSVh2whonfakdU+Cx83OpKrn66Db8/P2QPXUmdR6ykAywg619yGaef2UCPUq3QbN8tdSs/zUagSM/LlN1KfZ0WIBxtfqjfMZiKJexKEZV64NVLaegStZSqJ2zkqjEOAlrvWEtZm/HhaicpZRB5fvCDXCi60qkTxz07YihgxUSIAESIAES+EYEzAp4ZVuCTqdDikRJP8vNB6+eqKA2eZsikOytXiT7K306nQ6502U1aVsHncl2cxslg7zv0lEkapEPBXrWxJFrpw1Drz66BdkGYWgIoxKWnQAteJeh+kBa6sbi4vYWqT5s+ZD2VEmSwcrSKlgQLO2UqCEQGVaH75uB7pv+UqacHBIjXaKU6LbxL5Sb1RKTji5A1WxlsPnGfpSe2RztVg9QejG5MF5veOtImzAF0mii19t8fR/6bB2NZ26v9E08kgAJkAAJkMA3JWBWwLvqyFYVsK4bOAsFM+XGyDb91J5bcz1fuHetUj0+dp0KmiVwHtq8J0a1+RVLD2xQtlf3n6ls50yTBUdGr1H6lx/eQL6MOZHIIQGkXTVGsPDx80XKxMnVntpmZWujYt6SBguyT1gevpvccSjyapnmpX2nGPpCVsKys/fiUaX+Q+UmkI48VsMAABAASURBVIfN9HtzVaNWyB5fOxtbjPthkPJj2S9Be4gX7g1ap6bCn2hAoFjafDjx0yrc7LMDV3puRjybj1l22abwa/mOystd7eep4/j/DcD57huwtNl4td2heray6rxWjgrY1m4OBlXsovSk6Fm6LSQTKnXZGrGp7Uxc770NN/psx8mfVhu2ThRMlUtlkvXt+jllXHhyrvt6yDxbv58FqYvPchRbIqd/XgPxUW/H3PWmT5xa2RMbIsZ2VrWYrMz1r9BJrX3C/35DzmSZMbxqL+3vdYDqq5CpGE5paxSulzWuv5brqNql2NB6OlZrmfFjXVYoFvu0bHHRNHmli0ICJBD5BGiRBOIsAbMCXm8/H6w6sgWlchbGuUlb0btuR/XQWiCC3ltp/P5K2RMbkqbsaZUHxnJpWdznC09DZGhz7ReiNv7m03vov2AUsqXOqGxfm74HeTNkVybGrZuFeLYOeLvsEk6O36DawipMzT1i5T/IkSYzHs09jmVaoCnZWBGxs/bYdjzUss896vyAS1N3onj2AtIMfX+gtjz92sKy46xlcA9dOYmJHYfg9eLz+P67Rtov+0BlS4px62dDguK+9TspP8rnKYFRq6fh4oPr0q1Ez1KdaEXIc62JP1FIwMbSGvMbjYKNhRUmH12IiUcWaPd4UMAm06aMnwzJ4jlKFWuv7FLHWSdXYNyh//DvyeXanRyI4w/PY8LhuTj39CpSxHdCck2UolZIPZlDEq0GTNAC5WxOGTB41yT02zYGt50fIItjOsg+4SVNx8LLzxu9t4zCxmt70LFoExRObd6r+eJZ2+Pnkq3gG+CvfHnj6aqOv+0Yj56bR8DN+z3G1PxV+RCR9coDqrI2U3ZmamsXgwfunVRrX3p+ExLYxoeDtR10OgvEt3HAjHp/wMffF3/unabYdCzWBPKhQMYJl3wps+P0k0sYuf9ftfe3f/lO0kUhARIgARIggUgjYFbAK7M1G9sNKdsWRdE+tWHfJAesG2ZF0zE/Sxcaju4Cx1ZBweKi/eugq5dRCxoDVZ++mLVzGWwbZUemTmWRp1sVWDXIjIGLxqpueQhO7El76nbF1PYDaH92nj+k5pL2VN8XQ+YU6UIV2SZgau6/186EQ5OcKP9bE8RvmkttqUihrUMzrwKaDB3LKH8ydyqHlNoc4rtktKU/leZLtaFtpIqw7IhC+YFNkbR1QbW+cVqAa8xE+iv/3hKOLfOj3IDGsG+cw7B26ZM5Z+9cLlUl+XvU0HyqqupfvYijEzbJVwPWWtDbbdNfmHZ8CeacXqUCT1M4JLiT9tWXt2Px+Y04cO+UnOL0k8tYeG5DuF/l22vBoH9gAFy93LHp2j60XNFXjWue/3/Kh3lngr4ROasFzh6+XpA9sWoCM4oJh+eh3qKu6LphGPbcOQYJUiUArZq1NHz9/WBvZausRGS9T1xfhGpH5hCDxx9eUGsQBnKulxYFakMeYhN/hFWbVb+q4FcCeb3O4ftn0H3TcCw4uw7bbx5UGWJ9H48kQAIkQAIkEBkEzA54ZbIX715D3p4g9c8Vec2YPLQVIOnTEEak/dmbVyFaAWlPEj8RTo3fFKqs6Dftk3H6Bm8tQ33o6il4+Hjpm4Idxb5koYM1mjgJz45kemV9JoaqpjfvXXH42mmIHdXAItoQyJE0k8rSHnlwNsp9+vvAbO1e9MQ/dYfgep9tkG0B8laDHEkzqrn7lv0BI6r2UhKoBcYOWuZWdZhRSFZXr1Ypcwkc77oSv5brgDwpshke1pT+HBFYb1h2xFZYkjlJWu3DbwAuPr9hUHvs+lzLlgdluw2NHyriv7zB5MMpDyTwTQlwchIggdhDIEIB77dctrzlwEnLoIYmkmH9lv5x7phNQDKTOuggD6RFxkpkG4CtpY1JUxL8FZveGE2W9sCKi9uQP1UOFZQ+cXupgu4SM5qi0D8NDNJp3WCTdsJrlP287j4eKDC1HmrM64jpJ5YahkRkvWHZ0Ru00Jn+X8lz99eQvnSJUupVkSKeE956uRnOWSEBEiABEiCBqCZg+rdUVM8aa+xzIbGFwLabh1SwOb3uUORKlhl9tCyr8UNrEV3ntVd3UTRtXsgbDOR9vTWzlzeYmFpnMOSr/quajuz/9Q8IgASm6z/sDV7VcooKvCX47laqNfqWbW8YG5GK2JRtGlkc00PeEfxjsWaG4RFZb1h2xOA7LXitpGWTZQ+ycWArfRuu7VFcJ9T6DakSJEOvMt8jno0Ddtw6LN0UEiABEiABEvgqBBjwfhXMnCS6E/D198W2GwdRKHVubGgzAz8UbggJRAO1cE181x9V/cN2nJDbcgI/tIuOBLKS4ZU3M8jbHMR+wAdbNlrm94/K3dWbIPZ2XADZIzvm0H947PoCQ3ZNQVbHdJC3Foh01wLewA/jxG54Yqw75mDQP/Syqe1M9Y7gRHbxDcPFH3PXG5YdMbjs4haUSJcfF3tsxOgavwBGHO6/eYIpRxepLPaBTovxU4mWaq/zP8cWy1Alxj4b11Uni5hDgJ6SAAmQQDQmwIA3Gl8cuvZ1CfTaMhKlZjZDg8XdkG9KHeSeVEu93UC86LbxT8g2BKkffnAG2SdUx6v3LnKqJMeEGtC/sUAajjw4q2w0XNINeSfXgWxTKDClrnThx/VDtLbaah7pqzr3B0gAKp0rLm1Fnsm1UWlOW9Sc3wk5J9SEPIiWIn5S9e5fyaCGFMkiy1jxaeWlbVJVIlsnZM6my3qp+cUH0VGdWmHuesOzI/7l1XwWf1uv6Ad5H7HMo1+TPASYZ9L/0Hx5bxSf3lg9pKdNr36Ed8e1g1VdCgmuRVfqFBIgARIgARKILAJfM+CNLJ9phwSijICzx1tceXkr0uxffnHLEMwaG5XXdMk8+qDQuE/qkvW94/JQy+0Gve1kfuPRWNPqH5MiWyBkTGhy/tk1vPF8Z7I7IusNy45fgD+M/Q05mfTLWye4dzckGZ6TAAmQAAl8DQIMeL8GZc5BAl9IoOb8jio7KhnSkCJZ0i80z+FfnQAnJAESIAES+JoEGPB+TdqciwRIgARIgARIgARI4COBr1RjwPuVQHMaEiABEiABEiABEiCBb0OAAe+34c5ZSYAEzCdATRIgARIgARL4IgIMeL8IHweTAAmQAAmQAAmQwNciwHk+lwAD3s8lx3EkQAIkQAIkQAIkQAIxggAD3hhxmegkCZhPgJokQAIkQAIkQALBCTDgDc6DZyRAAiRAAiRAArGDAFdBAgYCDHgNKFghARIgARIgARIgARKIjQQY8MbGq8o1mU+AmiRAAiRAAiRAArGeAAPeWH+JuUASIAESIAESCJ8ANUggNhNgwBubry7XRgIkQAIkQAIkQAIkAAa8vAkiQICqJEACJEACJEACJBDzCDDgjXnXjB6TAAmQAAl8awKcnwRIIEYRYMAboy4XnSUBEohqAs7OzkhmkxzFE5ahRAMGOn8LvH//Xl12lzdvkMMuL4on0K4N5ZtyeOX8Ul0TKV67vEJOu3yUL2CQziYj3ri4CE51v+v8dchum4cSQQaJLZzw6tUrxTFkwYA3JJHIO6clEiCBGEhg3759GDt8HP6dMIsSDRgMHzoC9+7dU3fSmJFjMHXsP5ip+UWZ9U05/NKnn7omUvTt/QumjZtO+QIGk0ZPxpjRYwUn7ty5g7+GDsc/46ZRIshgzPAx2LZtm+IYsmDAG5IIz0mABOI8gXPnzuH06dOUSGPw+Sxv3rxpuB/d3Nx4TaLBNZG/G0+fPjVcF6lLG+Xz73NhJ/e3HuqNGzd4r3/mva5nGPLIgDckEZ6TAAnEaQKOjo6YMGkCJk6hRDaDHr26w8rKSt1fTZo2weQpkzBpykSKCQZT/pmMYsWLKVbx48fHn8P/1DgJL8okdd+Yx2HqtKkoULCA4pggQQKMGDVC3Xdy78VVmfLPFDg5OSkmKVKkwLTp0zQmk2OFyFqSJk2q1hayiDYBb0jHeE4CJEAC34LAd999h+QZkyIwpS8lkhlUrlwZiRMnVpe1RYsWcEhjA9vUlhQTDBKnTYCmzZpC/uTPnx8582RHvHQ2lAgycEqfCHqOhQsXRvZc2ZAoQ4I4LWkypULVqlXl1kLdunUh58kyJUFsEFlLrVq11NpCFgx4QxLhOQmQQJwn4OXniTfezt9KYu28AQEBhnsrIDAAbn6ucPV9SzHBwMP/PQL8/Q28fP194e7rSokgAy9/TwQEGHP0gYefe5wW3wAfw30VGBgIvwBfCKfYIH4Bfoa1haww4A1JhOckQAIkQAIkQAIkQALRgEDkucCAN/JY0hIJkAAJkAAJkAAJkEA0JMCANxpeFLpEAiRgPgFqkgAJkAAJRA4BP7+gLQH+/v4ICAjAu7eu6ijWpc3Tw1OqSry9vNVR2qUScqy06UVsSb/xGGmTfn2b1N3d3kP0jOth2ffy9BJVs4QBr1mYqEQCJEACJBDTCfj5+sHXx9ewDKnPGDYH7u/cDW1fWjl35CKePXj+pWYiPP7Rnce4fv7jK9wibCASBwhnb6+P+0Qj0XSkm1owZTFG9P07mFw+e9Uwz4M7DzFr7H+Gc6ns33rQoD9mwAQsnbkCnu8/BoKiE5pIcDe63zis/G9NMJVl/6402DT2R+7R7Wt2Gvr2bzsYbFyIE8Op3sdnj54b2sypzJ2yAL6+vpg7dSEun72CI3uP4T+tTfwe3H0YVsxfjWMHTmDFvNXYtGordm/Zi8X/LlOmF81cCr3ehuWbVJu+uHDqIv4ZPRMzxs3Gy2cvMX7oJCydvQL7tu3Hzo27MX/6Ihw/eBIbtXG7N+8NVg/N/oblm7Fr0x5sXbtDP02YRwa8YeJhJwmQAAkAoztOxF9txnwiwub1U+dg7WN/nIx9qw5Jl5LQxvr7B2BSzxmY/9dSpacvLh6+rOy9eGT6XwvS68lx+fjVSnfrvJ1yqsTDzVO1iV/SIDoiUo/rsmXJDiyetMKAIUC7BvdvPISvT1BWy9DxBZXju07i8b2nX2Dh84aeOngW21ft/rzBkTxq/YLNGNlzrFlWNy3eimN7TpqlGxVKd2/cQ9ZcWVC3ZW2DpM2YRk31z18zMHbARDy880id64sXT1/Czt4OTX5oiGoNquDq+WtYPX+dvjvM49mj5/Hi6QtIQOrh7mHQLfVdCcP84otOp8N7Nw9YWlmi1HclkTBRQqROlwpFyxYxjAmrsmXFNrx+/hqHdh4JS+2TvhLli2sB6B44JXPEowdPNB/c8ea1i9LLWygPWnVqjivaet1c3dG4TQPcv/1Q9cnDb6qiFaLXoGU9rRb8p1KNCqjyv0q4ff0OkqVMhtY/tsCNK7eg0+lgb2+PtBnSwF0yvNoH03TaNdDXxYop+5fOXlZjbWytRSVcYcAbLiIqkEAsIsClfDaBUrWK48eRPxiks1Y3NtaiX2NIW7n6pXF44zGc3HnG0G1qrKWlBZr0qIdHNx/j/MFLStftrTs2zt6GCo3KIkW6ZKotvMLS2hL2sMqnAAAQAElEQVRn9p6Hq4ubUjX+xaAaVKFTZXQuXj55BcmMPr77BIe3HcPD24+Vu/euP8CR7cfh+sZVneuLN6/e4pgWXJ7efxburu/1zdDbeXL/mRr3+pmz6pPgwvm5C1xdXHHt7A2IXdWhFZKVOnf4Ai4cu6R9nfrxiX6tK9QfyWJePnlVzXHt3A3IuSjXaF4V2fNnlaqa483rt7h+7iaO7z4FD3dP1a4vXN+44ZTm/4Vjl82eVz82Oh+/q1cBnQa0M8vFO9fu49nD52bpRpVShizpkK9IHoMkdkykpmr9c0s079RE1UMWjkmTIHvebChYIj8yZssIC+3vc0gdU+cSgP6vaU04JkuCk4dOG1QyZstgmN/LwwtPHzxD7z+7wcLCAomSJIRDPHvESxAP8TUxDAqlcvvaHXh7+6DVTy1wbO9xmP5/gunB+YvkhWRYK1YvB1e1nSEQvloAKto3Lt/EnEnzUaRkQQgjyfJm1taeNWdmLPp3KW5cuSlqocqh3Yexbd1O5Mib3aBTpFQhzb4v/P38tA8CL7VAOClevXiN508+1kOzn79IPkjiIDAg0GAvrIpFWJ3sIwESIAESCCKQ0CkBkmtBqF5CBqSJnBKqILVkzWJIlSkl7l66FzRQKxOGMjZNltQoXr0INv+3Xfta/T3WTduEZGmSomzdktoo837ylMiJRNrce1bsN29ANNU6suM4pgycgdkjFuD0gXOY9vss7Xwm5o9dgpP7zmDET+MMgZEExWN6TcShLUexbfkujOo2DvJhQZamtzNz2H84e+g8xvaZjCunrym+8rX/y6evsGfdAdUn+iJTB83Evg2HsH7uZkz4Zao0hSmy93C8prdxwVbcvHgbK6evxe41+9WYVTPXqYBaTqQ+pudErJ2zUfn6R6dRhuD8sRbY/91zggqYD2w6jKHth2sZuaDgXMaGJ/IV+n9jF6Jv89/Qr9VgjdnZYEOunruOP38arfr/HTEXt6/eVf0LJy3Fjg+ZYFnHqN7jceXMNdUnX3/LGG9PbzzS/BvWZRTWzd+E/m2HKDsbFm5RelI8uf8Uf/eZiN5NB+D3TsO1a3RampXIB4Bda/equt7O1uU7MLDdMCV71gexkszuIy17emz3SeXr7NHz1ZivVejnWbdoE0b0+dsgV7UMpvRJUBc/UXypfiKy7WHyH9PUmPPHL6BK3e8+0QnZ8NblHe7feoASFYuhTJVSOLLrWEgV7duBJ5BtFj8N6qwFf+Z96A1p5Mju4yhWrggKFM8P+fbixsWbIVVCPdfpdJg4bwwckzqiRYemqN+iDgb93V/p59QC1U69f0ChEgUh2d1Gberju1oVUea70kp39IzhsLWzVVlg51fO2L/9oEHeu79HWU2v77CeWvbYCT90a6tsFitTFHWb1UZzbS4Jfv/XuJbqM66bsi+D6zb7H2o1qo6qdSrLabjCgDdcRFQgARIgAeDi4SvYOn+nQbYvNP31sWQcXJ1d4ZDQwYAtrLGVm1VA/MTxMXPAf3ioZXub9KyvsjqGweFUdFoGqHqbyrh89BpcXrwJRzt6d6fOmAq//9sfvUb/hMy5M8HaxgpDZvVH779/RrqsaXH1zA21gOO7TiFv8Tz4dVIvDJ7xK5xSOKnsrOrUilQZUmHo7AHoPqILilYohPNHLiF5mmQoWCY/subNgm5/dUajTvU0zaCfdv1ao8/Ybug7vgecX7hoX+G+DeoIpXR5+QYi7fq1wg+/tlY+l6xazKR22VqlMXDaL+g3sScSJ01kCIbX/bcJxSoVUWvtMbKL8uv80UsmbZhqXDZjNW5euo3arWqi828/wCmlk0HtxeOXmD1qPgqVKYBeI37W+DhCgsnAwEAk1LKFZ49eULq3r9xVGfGju04EnV++q2XM/GFrbwsfL2+80wK0549eoHmXxihSrhD2bz4EOZf9uVOGzNT0bNDpt3YoWCofxJ97N+4rO++0bPzLJ69VXW/npuZrk84NUaF2OWxeuh1u79yRPV9WOCZ3RNbcmdG4Y31UaVAR3+JPkTKFUL9NXYOkTp86XDeSpUyKijXLo0bjaiqAlYDZW2MW1sDj+04gVbqU8PH2RbY82fDkwVO8fvHxQ45sE5g4dCrqtKiF3AVzhWUq1D5fLRt76uBp5CqQA67adciRLzsO7ToSqn54HRLAio6NrQ0atq4Pqw//UqK0Gdetra2lySBOyZxQsUZ5g8hWiTyFcpv8f5ulpaWh3cbmox3jekj7+olCa9f3Gx8tjE9YJwESMCbAOgl8JCBfM7599Q4fJXhQdGrXWexYvAcz+s9RX12Xqf0xSxvWWCtrK1RqXBae7l7IVSwHHFMk+TipmbUcRbLBKZUjdi/bZ+aI6KmWJFli7ReqpXJOgsPESRNDfhlKQxKt/kYLNKV+6eRV5C6SQ6pqD1+uwtlhHCw6JtfsaFxFwSmloxacukg1VEn6IViMnzCe9otXh7evg1/bkANFP0Xa5JDM8Ixhc3Bs50nI2JB6cp401cdANLFTIujX8PT+M5zYfQq/txuu5OaFW7hh5kNnsn3i0skrqNm0KirVKY9sWhCfKUcGmU7J4Z3H4BDfHvmK5VYBbKHS+eHj5YMHtx4hV6EcKsiVh6HOH72IdFnS4vr5G2pLxp1rd5EjfzZlQ190GdwBMr7lz02VzesXbuKGJmKvdffmyF0oJxq1rwfJhB7TMrX6cSGPPYf/pOxUb1wZcs9LAOykBbvyNX2y1EmRu3BOZMqRMeSwr3KePnNa5CmUyyCS2Q1v4hSpk2sZ1HwoUroQ2vzcUqnfunpHHUMrDu86ilfPX2PkL2MwY9Qs7V6zwNE9x5W6vIlg6p/TkSNfNlRvWFW1fU5x6dRl9VaFZbNWqXnu3riLCycuIbxgPLy5dDod7OztwlMLtV8CU5FQFb5CBwPerwCZU5AACcR8AsWrFUHLfk0M0rxv42CLenz7KZyfuiB7oazoPr6zCkD1CmGNla8c9648qLYlXD1xHS/NeFhNb9f4WPP7qrhx5jZefcisGffFxLqF9gvW2G851VnoVFO8BA7qQ4U60QrZGyttWvWTH50uan7N9fr7J0hmOH22dNi6dAe2L9/1ydwhG2Q/pr7NSsuINe3aEJLBFvlz3mD8OKSDvjvMo7OWhRYFCXTlGFJePXsN+ZC15J+VEFk+cw0kSJeMX5ZcmZS6BL/nj13UgtW6sNW+hpZA9t6NB8hZMLvq/6TQGlKmTYH7Nx/A5dUbFbSKTa1Z/WTUOOj9Ug1hFDa21l8cgIVhPsJd8q2Mn58f9CKZcHONyAeH04fPqA8MKVInC3XYo3uPFbcx80Zi/MLRSpp1aowju4O2NSyfvQqeHl5o83Mrgx/iT6gGQ+mQbG6Vet8p+zLPhMVjIBn7c8eDsvqhDIsTzVHzf4I4gY6LJAESIIGPBBp0rY2WvzZBtVbfIZH21fXHnrBrO7WssL+fP34c3R5psqTCysnrtKxcQNiDTPRmypNB7R0Weya6Y1WTbE04see09pW7q8pWysNjRcoXDHeNKdOnUPqyd1UylOEOCEXh9XNnnNx7BlnyZkatltWQRftK3vO9VyjappuzF8iCXav3QcZZadnoV09f49KJK6aVQ7Q6pXBULS6vTGeiEzkmUpnbQVP6wVjyl8irAtU0GVNhz4d9tBmypUfhsgVxcNsR7StwN2TXMozKuIni8f2naktEgsQJVIAn2xL0as8fv1R9+vOIHCMSYEbErjm6Op0OS2YsR49mfQ1yfN9JNfT3n/7EnHHzVKDao3lfnD5yVrXrdDqcOnQGPzXqiZ4tfsGa+evRrmebMPfcHtt7AnmL5IGdva2yIYVkh93eueH+rQc4sf8UnF84o2+b/gY/xKfnj59DXll25ug57N96EFtWbpehJsXD3QOyX1fe+KBX0Ol0KFGhGI5o2WV9W1w9MuCNq1c+8tdNiyQQqwm4OrvhxcOXBolIJja0sfeuPMDZfRfQqHs92NrZoGG3umrLxMG1Rz6LZY02lTX/Xn0y1sPdU2v/PN8/MRZFDTqdDjozs7ElqhRVAdfIn8dBHh6T/bm5CudQnul0odvJUySn0vmt1TD88/ss6HQ6dY4Ph6CT8EsJ0LYs3oFBbf7AwNbDIHtWqzSuZHKgTmfaeIMOdZHIMSFG/DQWA1oOxeTfZuCNljk1aSREowTIEqjKg2HyoNmdq3dx0ShYLlQqv9q+cGj7US1b6I93Lq4QXXmATEzJ9gHJ6MoeXzkvXLYAbl26A9k2EnJrhjxc5/bWDZuWbFPbIopowXG2PJlhoWXbNy/dhvdu73H64FnIh4B8xXOLuQhJxuzpcffafS3j66P8jNDgSFAe9s9gTF8zOZjoA8a/pg/BP6smqr4py8ejaJnCasamHRqpNv24UXP+QvHyRfFeCzgleDUl1RpUwU8DO6vx+sIhvoOyI29oEPt6e8bHlGlTosWPTZWetNdoVFUFyKbmcNWCZ9FJpY3RzyFH8bfviF5SjdPCgDdOX34ungRIwFwCx7aexKxB8w3y78B55g6FqbHy4MrqKetRoHxeZMqdQdmSfatVWlSEvNbshdlbGwLVWCnSZkuDDDnTSTWYPLn91OC3rEF8lyxnMKVvfFL/h9po1bOpwYsmXRqgRbeP20Za9mhqeNBMXtP066Re6mEwecit67COsLaxVmND2qlUr5x6eE064yeKj/6Te+P3mb+ix8gusNE+ZPy97E8V6Em/yKglfyBTzozqwbXXz5xhSuIliIe/5g9WdobM/k09VCfBoowXv0pWKSZV9VBd8UpFVF2KH4e0R7WmQU+US2Ap58MX/I4BU/tg5OKh+K5+BVEzS2o2q4on955gTN9JmP7nbFhZBe19lsGyLaFe2/9h/fxN6NdyEIZ1GQl54MzKOkgn54dtC0XKBWXFZe+ssMhZ4NPtDPJw2pDOI7B3wwGITQm05cG31j2aqzdDDO7wl9o2UbpqCRQuE2RPp/sY5H+8O8Wzj6LTBekUr1QU7q7uGNB2CP4Z9u9HhRhYe+v8FicOnDIp8oBaZCzJz9fPpH2Z9/qFG5ExRay1wYA31l5aLowESCCyCAyYowVJi37F7yFE7CdN7aTa5XVich5SQhtrY2uNfv/2RN1OtYINKVmjmLLnlDIJ3rt6hCrylbzsIw45vu2gFmq8+CWGRSek33JuvJ9U9GKiSIZUAseI+i6Br5W1VZjDDm4+gk2LtpmUGxduqbFix97h8x/kESMSqEuwrH84T9rMEXm47O9Ff+G3SX0xdukIDP7nV/Qa8ZNhaMXa5TBu2UgMmT4Af835HSPmDlVvCBCFzDkzYeLK0ZCjnOt0Ovy98E80/bGhnAaT0Qv/wLCZAzF++UiITX1nodIF1Lwy/+gFf6BJpwb6LvUwXf8JvdV51tyZ1Vw6XVCAK43iS6nKxaWK1OlT4o9/B6k5+o8PGqM6YmCRJkNqNOvY2KTIQ3GRsSRbO1uT9mXeirXKR8YUsdYGA95vdGk5cvcVQQAAEABJREFULQmQAAmEReDhjcdYPHpFqHLtFLM5YfH70r567f4HeeWYKSlUJv+Xmo+U8RK0J0+dDKF9eNHpdCp7/TkfCvQOim35YCFHfZv+KG0yvzwUpW/73KPMIev53PEcRwLhEWDAGx4h9pMACZDANyCQOW9GGP/LbiHrBcrl+wZeRcmUNBoNCSRNmRT1v68Nne5jZjYaukmXSMBsAgx4zUZFRRIgARIgARKIGwQk41rhf2XjxmK5yjhBIGYEvHHiUnCRJEACJEACJEACJEACUUGAAW9UUKVNEiABEogiAjRLAiRAAiQQcQIMeCPOjCNIgARIgARIgARIgAS+LYEIzc6AN0K4qEwCJEACJEACJEACJBDTCDDgjWlXjP6SAAmYT4CaJEACJEACJKARYMCrQeAPCZAACZAACZAACcRmAnF9bQx44/odwPWTAAmQAAmQAAmQQCwnwIA3ll9gLo8EzCdATRIgARIgARKInQQY8MbO68pVkQAJkAAJkAAJfC4Bjot1BBjwxrpLygWRAAmQAAmQAAmQAAkYE2DAa0yDdRIwnwA1SYAESIAESIAEYggBBrwx5ELRTRIgARIgARKIngToFQlEfwIMeKP/NaKHJEACJEACJEACJEACX0CAAe8XwONQ8wlQkwRIgARIgARIgAS+FQEGvN+KPOclARIgARKIiwS4ZhIggW9AgAHvN4DOKUmABEiABEiABEiABL4eAQa8X4+1+TNRkwRIgARIgARIgARIINIIMOCNNJQ0RAIkQAIkENkEaI8ESIAEIoMAA97IoEgbJEACJEACJEACJEAC0ZZALAh4oy1bOkYCJEACJEACJEACJBANCDDgjQYXgS6QAAmQQKQQoBESIAESIAGTBBjwmsTCRhIgARIgARIgARIggZhKIKTfDHhDEuE5CZAACZAACZAACZBArCLAgDdWXU4uhgRIwHwC1CQBEiABEogrBBjwxpUrzXWSAAmQAAmQAAmQgCkCcaCNAW8cuMhcIgmQAAmQAAmQAAnEZQIMeOPy1efaScB8AtQkARIgARIggRhLgAFvjL10dJwESIAESIAESODrE+CMMZEAA96YeNXoMwmQAAmQAAmQAAmQgNkEGPCajYqKJGA+AWqSAAmQAAmQAAlEHwIMeKPPtaAnJEACJEACJBDbCHA9JBAtCDDgjRaXgU6QAAmQAAmQAAmQAAlEFQEGvFFFlnbNJ0BNEiABEiABEiABEohCAgx4oxAuTZMACZAACZBARAhQlwRIIGoIMOCNGq60SgIkQAIkQAIkQAIkEE0IMOCNJhfCfDeoSQIkQAIkQAIkQAIkEBECDHgjQou6JEACJEAC0YcAPSEBEiABMwkw4DUTFNVIgARIgARIgARIgARiJoHYHvDGzKtCr0mABEiABEiABEiABCKNAAPeSENJQyRAAiQQnQnQNxIgARKIuwQY8Mbda8+VkwAJkAAJkAAJkECcIBAs4I0TK+YiSYAESIAESIAESIAE4hQBBrxx6nJzsSRAAmYSoBoJkAAJkEAsIsCANxZdTC6FBEiABEiABEiABCKXQOywxoA3dlxHroIESIAESIAESIAESCAUAgx4QwHDZhIgAfMJUJMESIAESIAEojMBBrzR+erQNxIgARIgARIggZhEgL5GUwIMeKPphaFbJEACJEACJEACJEACkUOAAW/kcKQVEjCfADVJgARIgARIgAS+KgEGvF8VNycjARIgARIgARLQE+CRBL4WAQa8X4s05yEBEiABEiABEiABEvgmBBjwfhPsnNR8AtQkARIgARIgARIggS8jwID3y/hxNAmQAAmQAAl8HQKchQRI4LMJMOD9bHQcSAIkQAIkQAJfh4CdtT2cbJNTIsgggXUiWFpaQv9HOCa2cURcFjtLez0O6HQ62FjaQTjFBrGxtDWsLWTFImQDz2M0ATpPAiRAAiQQywgcP34cq5evwbHtpygRZLB/6yFMmjBZ3RGHDx/GmhVrcWjr0Tgt2zfuxJo1axSThQsXYsemndi7+UCsEFnLsmXL1NpCFgx4QxLhOQmQAAmQQCwgEHuWEBAQAPklPmPGDFAixmDatGl4+fKluhn8/f2xZMmSOM9w5syZ8PX1VUy8vb0hjGLLfSVr0a9NLdCoYMBrBINVEiABEiABEoiOBL777js0aNAg1kuGDBkM+AsVKhTr1/sl1zRJkiQGVhUqVCCrD38/DFBCVOJ0wBuCBU9JgARIgARIINoRKF68OHr07o62nVvHavm+cxuMGz9O8Xd0dMSwP4ZB2ihtPuHQ/sd2GDpsiGJVsGBB9O7bG+06t43z0qlLRzRv3lxxCVkw4A1JhOckQAIkEPcIcMXRmICVlRW8fb3xzsclVour71vDA2Y6nQ7QfqSN8hYhGXj4vYdOpwECIPeHj5/PJzohx8SFc29/L9jY2GhUPv2x+LSJLSRAAiRAAiRAAiRAAiQQewiYH/DGnjVzJSRAAiRAAiRAAiRAAnGIAAPeOHSxuVQSIIHIIUArJEAC347A/dv38fzpi0hxIDAwECcOn8ShPUcixZ4Y8fTwxPlTF6Rqtty+ccds3chWvHf7Hjat3hLZZqOdPQa80e6S0CESIAESIAESCJvAon+XYvakuQa5cv6qYcCODbvg/MrZcC4Vl9cuSvfxgydyGq6Ivf+mzDe8vkoGyJzGgdy5k+excOYS6cLOjbuV/XnTFqrzsApTtsPSN+6TV4tdvXQdjx88Vs3yyjZViUBhPObm1Vs4e+I8MmfLGAELgATKIsaD9Hbd3dxxZN9R4y6Tdb2+dN67dU8OBjHuk8aQ59IWGSKv8JqrXbNsObMqcyHXpBrDLmJMLwPeGHOp6CgJkAAJkAAJBBHYt/0AChbLj6KlCytJnio53N3eY1D3oVg2dyVc37kFKX4o9+84iNPHzqrA9ENTmIfVi9bh5JHTOKcFg3pFmXP+tEWQ4Etk4cylELvSX0DzRewXLV1ETsMUU7bDHPChc+4/8zG0z5/YtXmPatm0agsm/DUZv3UbjEf3H2PKqGl49uS5CtL/+GW40glZnD52BmOHTsDfv49TY7as2YqH9x7i8cOnIVUxbexMvH75Gkv/W64ywGdPnNPm3q2ytzLX30PG4c7Nu3j14pWae9KIqdi9da/BzotnLzF8wCjturgb2vSVudMWYPyfkzCk9x/KX2Enff1+HIBRg8bg917D1Nyh2Rbd0OTJo6f4XbM7Zug47Ny8W8mF0xeVurQJowEasz/6DdfulRXaPK9w5eJVbFy1GWOHjcfAHr/jqWZD5h77xwSMHPS3wcakkVMhNu7dvq/shSz6dRmAEQNHY1DPIYrLtLEzIDbkKD4Yjz964DjEh7/6j1Qc501fqGyL7w+0a7Js3gplfvaU//DG5Y12PUK3pRTDKRjwhgOI3SRAAl9IgMNJgASihECufDmRO38uJY5JkyB+gngYMfUPpM+U7pP5Du46jN6Du+OYFmRIlvQTBaOGNy5v8eLZC7Tt0gr7dx4y9FhaWiBrrqw4fuAETh05gxx5sxv6UmgBt5W1FdKkS2VoM1UJzbYpXeM217euuKtlQYdP/gNVa1dWXUVKFkLqtKlgbW2Ny+evoFL1Cti+fgeOHzwJ6VNKIYota7ah79BeaNauiQpeC5cohBp1q6FE2WIhNIEcebLj6sXreHj/EU4dPY0rF64he+7sWLdsAzJmyYDUaVJh3/b92LNtP+zj2SN7rmzYsWGnsvPy+StMGzMDP/frol2X+KrNuLirBcoFi+ZHz4HdlP+yDUL6be3t8NuIX1GtTlVcOnvZpG3RC0vk2tnYWuN/DWuhSq3v4OPtAz9/PzVEPhQFBgQgZeoUGDp2MGo1qIE8BfKgUcsGKFqqCNJlTAcLCwtcvnBVBe//a1ATA0f0V2zXLF2nZcIzIUWqFNirrVsZDFHY2dtj0MgBqNukNo4fOgkPD0+07dxK49AVIcffvn5b2fu+SxtkypoRt67fQuEShdFrUHcgEPD28lbWPT284O8fEKYtpRhOYRFOP7tJgARIgARIgASiIYHBPYahf5dBSg7vPRqqh5KFtLW3Rc58OZAmfRpcOH0pVF3pOLT7MEpXKqUFQIVxS/vKX4IkaRdp2LIe1izZgI0rN6N+8zrSFCEJy3ZYhiwsLREvfrxgKpJRrVC1HGo3rqW2GOQrnBfXr9zEjk278F3NSsF09ScS7Ot0OhXUBWiBH8L4U6BIPmxZuw258+VSr0uTIFU+TEhgX0zLZFerWxWNWjWAr48PChcvqGXai+CXYX2URcmwy1xyVA0fCv1hwPBf1euzhg8YDQnm9e0Wmm9St7G1gZ+fn0nb0h+WFCpWEE3aNMLRA8ewZM4yiL9+vkEBb6AWSMrYePEd5BBMJg6fjLKVSmuBck0IG+dXLmqsKMmrzyy1ayDrrFq7Chq2qCfNn8gH9+GrzSf/ipsoOMQLum4hx7fs0Fx9qJg7bT7On76A34b3V9dFMr4S7Mp2CxlvLKHZMtYJrc6ANzQybCcBEiABEiCBaExgzKxRmDR/nBIJ/EJz9cCOQ5As2cS/puKdlindt/1AaKqq/cDOw7hx+Samj52lzo/uO6aOUqTWMrgpUidHqrSp4JTMUZoiJGHZDsuQZK8TJkqgtgjs3LhLqWbInB7L5q3E6kVr1bkUpSuURPqMaeEQ79OATvqr162KscMmYOG/i1FZy34ijD/JUiSD6ztXlK5YEsW1DHC8BPHUu29r1q+OVdqcS7Vg8qD24aDK/yrjwM6DWLN4LZbNXaEsZs2ZRQW/08f9q2XLX6o240KCy6uXriNZiqSwtbNVdqVfpwt6t67URUzZlvawRLYKbFy5Ca5v3VTGNl/BPNignctWBNe370IdmiFzBqxctAZrlqxTOjXqVcOsSXMwcfgU7NqyB7Lu1VqfrPFwKHuUnV+7KP31yzegYrUKyo6+CDle7Mi2GUsrSyRLngyTRk7BjSs3kDxlMqTPnA737zzA2D8m4IqWbdbb0B9D2tK3h3VkwBsWHfaRwFcnwAlJgARIIPIISJbxxOFT+Knfj6jfoo527Iwr56/C472HyUke3nuksnvtu3+v9Ft0aIZ9O4IHyF36dES7rq1Njg+r0RzbYY3/+deu+GVob0yaN17LPheBnHfp0xljZo5SX83LWNlbWkv7Gl7q+zW/ZfuBXuS8jJbB7DukFwaNGqC+Rq9YvQIqVCuPy+evqK0Ket31yzeqDOuMJVORNHlSlCpfQs0tdouWKoIev/2sseyCes3qIFWalOij2ezQ/Qf1dXwyLVCWrQxJHBPj7+kjcO3itWC2xY/Bo3/TGLbBwJH9VcD7x4QhYhr6o8wnmdSQtpVSOEWZiqU0/7qr7RIVtbVJVv+P8UPQ5/demDxvAlKnS41OPTooK7K2bhpXOZHjj706Ydy/f6ttHllzZMGYGaPQtW9nVNOyusXLFEPP37pB1ibBsJ6V/nhVW6dTUkd0/eVHjJ05GkmTORCscXAAABAASURBVKHf0D5IkDC+mIfx+DqN/4c2nVuhQ7d2+H30QC0wT4vBo4RJW7WFws7ODsMn/YHuv/6Ef5dNC9eWmiCcwiKcfnaTAAmQAAmQAAlEQwK92vXFTy17KJGsrWw9GNR9KB4/eIJ/Rs+EPNx14fQlpEmfGjnzZlcBXrZcWZE7f06cOHTK5IoO7DykBYDllK7sq6xYvTxev3gdLEuZMHFCxP8QxOiNLJ69DO/d3mOClkXWt4U8mmM72BgTJ3b2dsFaJfNr3FC8TFGkzZBGNZX5rrQKhGs1qKGOci4d1tbWhoyqnIvkzJtD6eh1azWsAfkaX/pMiU6nQ0hfQp7rx8m8BruaL3Ku0+lg72CvVwn3GJrt0AbaaVljY/9lzcbnoY0LyVP28xrPrdPp1LrFnmwlMRbZ81y3SW3I3Dpd8Ey1fj6dLmi8/tyYgU6nC8ZEp9PBeG79GP1Rpwu7X6+nP1roKzySAAmQAAmQwNciYKGzQELrxEhs40gxwSCeVQJYWFiGejnmrvsX05dOMUilGhUgwYo8tCZ94+eMVg93FS5REEPGDgxm55c/eiNLjswq6JXA11gatKgL4725FhYWmL16BlKkSq6OxoYk6JG5pK11pxaYv2E2hk8eCsnkGtvU18OyLTYiQ0pr2U29HfHPVgv89CLn+r6QRwkG9XpyDO2fpw05zpxzmVds6kXOzRkX3XVkHcZiaWmpMu/R1W8GvNH1ytAvcwhQhwQincDz58+RyCoJcjkUoEQyAzd3N/j4+KhrNm/BPDjffYe3995TTDB4cecVFi9erFhFRSH5N0srS4SUyJgrKm1Hhn+0ETcJWMTNZXPVJEACJGCawOHDhzHo18H4a/BwSiQz+LVvf7i6uirwm9ZvQv9f+lNCYfBLn364cP6CYmVeETGtdJnSadm4wp9IyK0KEbMapB2VtoNmYEkCESfAgDfizDiCBEgglhO4efMmrl27RolkBpI91986KVKkQNmyZSlhMIj34XVOemY8kgAJfD4BBryfzy7GjaTDJEAC4ROQQGzmrJn4dzYlshkMGDRAvWRfrsKov0ehc7eO6Phze4oJBj/36orefXoJKgoJkEAkEGDAGwkQaYIESCD2EChXrhziJbODWwIXSiQzKFmyJBIlSqRuloQJE+KN5Su8tnj2LSTaz+lm8RYJNEYKFgsSIIEvJmDxxRZogARIgARiGQEffx+4+bpSIplBYDj/slUsu424HBIggWhEgAFvaBeD7SRAAiRAAiRAAiRAArGCAAPeWHEZuQgSIAESiDoCtEwCJEACMZ0AA96YfgXpPwmQAAmQAAmQAAmQQJgEIingDXMOdpIACZBAnCDg7x+AgDD2qXp7+eDd63cQvTgBJAKLvHHhFuaPW/KJPL77JJiVw9uP4eyh88Ha9CfXzt5Q46+fv6lvMvvo8tJFjd2/6VCwMbcu3VHtpnzz8/VTvuj7lkxdGWxseCd71x9Qtt+8fhueKvtJgAS+kAAD3i8EyOEkQAKxn8DIjuPxR5vRn4jxyiWYndjjH5zZ+2kwdv3MTYz7eQpGd5qASb1nYHi7MVg8ZgXc3rgZmwizLvriw9l954PprZi09hO/RE9EFHcs2ROs39PdU5rNkmPbTqqxKyavNUv/S5SePXyOq2euwSmFYzCxtrVWZu9cvQcJKNfP24xHIYJgpaAV21bswuVTV7F9xW7tLGI/x3efUmO3Lt0BXx9fw2AbO5tg/oh/d6/dU75Cp0Oy1Mm0DzD+kCA7e74shnHhVXy0Dz/bP/h7Ys+p8NTZTwIk8IUEGPB+IUAOJwESiBsEStcqji4j2wcT/crX/7tZBbPvXT30TYbj7Yt3IUFp7uI50XvKz+j/by+0/KUJXj91hsuLtwa9sCqe771w59I92Me3w+kQAXWNNlWC+dRhaBtYWlsidaaUymTVFpUgOjoLHQbN/UWzYa/azSlO7z2n6dvhxtlbkADNnDFfohM/UXzUaVMzmKRIk1yZlH8CN1W6FBAd1RCikCzp0/vPULdtLUhW+J1L0L/oFkIt1NPjWtBZrUll1X/pxBV1lCJDtnTB/MmYIz08tA8NHQZ8DysrS6TLkgZZcmeCnYMdilUsIkPMkgsnLsPC0hIVapfFyX1nzBpDJRKIaQSik78W0ckZ+kICJEAC0ZVAQqeESJEueTDR+1qleSX0mNAF+mykvl2OW+bvQIac6VHr+2pImCSBCoyyFcii9NNkSSUq4crlY1dVEFuv0//w7P7zYJnhRCH8OrjhKKytrdCqXzNl18LCQp1D+2OltWsHs36cn7vA5fkbNO/VCIEBgbh2OuLbBMyayEjJy9Mbl09eDSZu79yVRsbs6fFd/QpwSp5EnYcsTu0/iyRJE6NcrdKw14LPU/vNDyLv3XiggthSVYsjV+GcOLb7ZEjz6vz5oxdYNHEZarWohhwFsqm2zy2O7TyBwmULoGTlYnDVMv0Pbz/6XFMcRwIkYAYBBrxmQKIKCZBAVBGIOXYvHr4MCV71sm3hLoPz8RPFQ5JkiWFpGfx/qf5+/nj76h0Kls9n0NVXLLRA1NwA9Oz+88hfOg+yFcwCSy17e/7gJb2ZYMfDm47h1vk7aPtbSzgksA/WF9GTcwcuQoLp9DnSIVOeDAi5lSKi9szRlyzy6jkbYCyy1cGcsce1ILVE5aLQ6XQoUr4QTuw5bc4wpXNi9ylkzJEBCbQMc4nviuLe9Qdw/xBoKwWt8HD3wPRhs5GnaC4VeGtNn/0j2eeHtx+jZJViSJY6KZKmcsJxzYfPNsiBJEAC4RII/n/ncNWpQAIkQAJxk4CXh7cKXiWAFXnzKvztCG5vg7KTCR0TfDa0t6/f4fmDlyhUoQAkSM5XKjfOhNjHK8bvXrmPPSsPoF7n/yFVxhTS9EUiQXahCvmVjULa3A9vPoapLRtKIZIKyYAPm/UbjCV7vqzhWn9054nKkjq/cMGOlbshWWHZ4vDk/rNwx/ppH0rOHj6vZbED1Nj7Nx+qMacPnlNHKeRBxDmjFmgfIhzQqkdQ5lzaP1dOHzirhl49c13NaWNjjbOHL6i9wKqDRdwkwFVHKQEGvFGKl8ZJgARiC4Hi1YqgVb+mBmnZt0m4S0uQOL7SkQBZVT6juHAoKJt7dMsJtRf42b3neOfsihePXhqsSVC8dPwqFKlUEAXLfZpNNiiaWXl06wk83b1w60LQ/mPJbsvQi0evyCHayfE9J+EQ315tS3j64Dnk7Qk2djZalvdUuL5eO3sdAQGBam+wjH3x+CUckycJlnHdMH8LnmvtXYd0hFUEtoWENrlkc5OnToZnD19A5kySLIny+drZG6ENYTsJkMAXEmDA+4UAOZwEviIBThXDCFhaWaptAecOXPjEc3k1ma+P3yftIRvO7r+gZWxTQrLEIrIfWPYKS7voSnC3aPRy7atxJ9RqV02avljO7j+PeAkdkCZzKjWvY4okcEyZBGf3nv9i22EZkLW8evYaxiJvvzAeExgYaHyqBasBOHPwPKo3rYIf+rU2SMU65SBZ2oAwXhMnho7uOKG2KRiPbdm9KV4/d1ZBrmRej+w4jgbt68DH2yeYb/7+/mIiQvL0wTNI9tl4PqnLw3ESCEfIGJVJgATMJsCA12xUVCQBEojLBFy1rOrzhy+hF+MMqwSvEqwJH9m3q6/Lubwh4fHtp9gwewskEyuvBbt5/jam9p2Jp3fD/sr9+YMXcHVxQ73OtVCzbVWDFKqQH5J1leBv45ytcHnxBlVbfIeXj18b/BM/xS8J+Hx9gwJrY7/EN1MiYy4du4oytUsa5pO5q7eqjNfPnNVcpsZFRpuHuyf+7jUxmNy8eEuZ3rZ8F35pNgiy9/XQ1qOqfvvyHdw4f0tlRwuVCdp+oZS1omiFQpA9wfIeXe3U5I+nhxduaTaKVyoSrD9j9vQqY3xy72lcOxeUdV0xfU0wv8TPV09fQ16TtnnxdrXn99cWg4PZMXVycu8ZJE3ppH1ASRqsu0TlYurVZt6e3sHaeRIaAbaTQMQIMOCNGC9qkwAJxFECR7eexL+D5hpk5sC5BhKSYR3Rfhxkn6+891bq+oA4Z5HsaNi1Dq6fvonJvWdgTNfJWDZ+NZIkT4wkKRIbbJiqnDt4UWVY5e0Qxv2FyudXcz248Qj3rgbtORUfjP2TuvsbN+xatg/bF+2GvGlB/JKA29hWyPqdS3fh7+uPfKXzBOvKmj9zmA/MBVP+jBPJyI5bMQIhJV/xID9qNq/6SV/WvFmQq3AO1e4Q3yHYrI7Jkqh2eW3YW+d3MCUSXMp88iBasMHayZ//DVavOGulZXtFx5SkTJcC9X+oreaR/jHLhkMy0qbm0rfVbl0DAyb30WYI/iNBt9iwtbcN3sEzEiCBSCHAgDdSMNJIdCRAn0ggsggMnNMXQxcN+ET09tsNavlJn3GQKsFj/39749eZvdB93I/qfbjfD2ypsojvXd8jNKncpAJ6T/5ZP43hmDJDCjVfxpzp0XdqN1U35V+ipIkgmVnjPtkOEdp80i42RT9+oniG+aQiD8wNntsP3zUpL6cxRuR1ZfIqsdBEMuCRuZhHtx+pV5eFNt+Lxx/3XkfmvLRFAiQQNgEGvGHzYS8JkAAJRBoB+3h2kP2w+gefHt54DMnMhiZXTwV9nR5pDmiGvsWc2rTf7Ef+YYfuw7sgNJFAPjKdk6xzaHNJe5pMqSNzOnNtUY8E4jwBBrxx/hYgABIggW9FIHPejOgyskOoEhlvXAi5tm8xZ0gfeE4CJEACX5sAA96vTTy6zke/SIAESIAESIAESCCWEmDAG0svLJdFAiRAAiTweQQ4igRIIPYRYMAb+64pV0QCJEACJEACJEACJGBEgAGvEQzzq9QkARIgARIgARIgARKIKQQY8MaUK0U/SYAESCA6EqBPJEACJBADCDDgjQEXiS6SAAmQAAmQAAmQAAl8PoGvEfB+vnccSQIkQAIkQAIkQAIkQAJfSIAB7xcC5HASIAESMJ8ANUmABEiABL4FAZMB7/Xr19GnTkc4LzxPIYNYcw8MbdYTN2/c/BZ/zzgnCZAACZAACZCAMYGvXLcwNd+pU6fQru336NS+A4UMYs09IPf0gQMHTN3ybCMBEiABEohmBCwtLOFgGY9igoGtpR0sLS0NV8zayhr2ml5cF2sLawOTkBWTAa8oubq6gkIGse0ekHubEmMI0FESIIE4SsDFxQVzZs/B0T0nKCYYHN5zFJMnTVF3x4ULF7B+zXqN0/E4Lwf3Hsbq1asVl5CFyYA3e/bsWLN+LVasXUUhg1hzD6zW7ulChQqF/DvAcxIgARIggWhGIDAwEOvXr8eECRMoikFwDuPGjcOtW7fUVfP19cWCBQvISeM0ZswYeHh4KC4hC5MBb4kSJTD75BrkGVOfQgax5h6YdGgRipYoGvLvAM9JgARIgASiIYG8efOiRo0aMVpy5sxpIJshQ4YYt5b48eMb/C9TpkyM8N/gcIiKyYBXdDx8vOAMlI0iAAAQAElEQVT8/i2FDGLNPfDex1Nu7VgrXBgJkAAJxBYCSZIkwfARf+HnXj/FaPn779FwcHBQl2XCxAno1uvnGCM9enfHr7/+qnwvVqwY+g/4Ndr73rN3DzRq1Ej5HLIINeANqchzEiABEiABEiABEvgaBCwsLGBhYQl333efI9FmjJW1NXQ6nUJma2MLDz93vPdzixHi7e8FaxvrIN9tbeHj66357xatxTfAB/HixVM+hywY8IYkwnMSIAESIAESIAESIIFYRYABb6y6nFyM2QSoSAIkQAIkQAIkEGcIfHbA26NCa1TJUcosUEnjJYGNZVBa3KwBVCIBEiABEiABEvgqBDjJtyEgb6LQzxyyHvLclF5AQIC++ZOjfrz+KAr6+r079/HowSNpgrENfb/qiIXFZwe8Q6p3QYeSDcNF4uiQCE//2oeB1TqFq0sFEiABEiABEiABEvhcAl5eXnj6+CneOL/5XBNfZdzrl68xfuREzPt3AZ4+eYaFcxZj2sQZKgCdP2uh6nv54iXGjZiARf8tUTrSP3PyLDx7+hyjhv6NBbMXYcPqTcH8lQB2xqR/ITauXLqGf6fMhpy/evlK2ZT5XJxd8OqVM/r3+A3TJszAVU3v2KHjWLV0DVYsXhXMXlgnwtrDwxPG4uvraxgia7x14zbcXN0MbcYVT08vPLz/EO/evjNuDrcuc8ic4SqGUPjsgDeEnVBP33q6oc2iAfjv2NpQddgR3QnQPxIgARIgARL4tgS2b9qJFnXafCLLF65Ujo35czx+aNIJfbv2x0/teqD3j/3w9k3wYGrL+m1q/MRRk9WYiBZfOl4/n0M8B8PDVWdOnIGVlSUSJUqoZV4fawGkBywsLPDsyXOkTpsa793f49WLV8iWIytqN6iFc6fOIVnypPjhx+/x8vlLvUl1vHvrLjJlzaT6bl2/hfpN6yFDpvQah7eG+SQoDvD3R/5C+dC8TVNIUHpa88HGxgb+fn7KjjnF7/2GoV3TDsFkhhaQy9hOrbvip/Y9MGLIaHRo+SNWL/sYAzq/dsGw3/7C903aY/AvQyG6PTr1xvNnL2RouDKo7xA15x1treEqGylYGNXDrJbLUgTP/toPnwnn4T72FBLYfnwKbmqjgbj9+zac7bca3uPP4fXIw+hevpWyFxAYgOlNf0fJjAXUOQsSiC4EUiZIhlLpC1LIABkSp4ZO+y+63Jv0IxoToGvflIC1tTX+njoymNSsW0P5lDJVCgz9ezAWrp2LsdNGw1nLYm5as1n16Yvd2/YifoL4OH38LCRDqW839/il4/XzvHF5g0SJE0KO+bTA08fHF/7+AZCA093VHb7aueh6vg/6RxTE52uXr2Pzuq0oUDi/dCn58AIIVZdCAmQJoBfPXYIs2TJjo5YBlsDQzs7OMJ+/FuyKrohOp5MDChUtCD8tO2thaanOzS1aft8cyzcuNkj3vj+poa3atcCSdQswf8UcdPzpB0PAK9smRg0bo9b339J/sXD1PMxbPlsF3+/evFVjwyoka3z/7gOkSp0Su7fvDUv1kz6zAl5bKxts6zITNlbW+GP7dAzZ+g/8A/0NxtIkToH0SVLBy9cL/TaOxzPX1/irVndDvwTHTvESG85ZIYFvTWDfvn1I4ZcQ3XM0p5ABcttmxNbNW771bcn5SYAEzCCQPmM6GIsEjjKsbafWyJk7ByQoTpEquTQhYeKE6ijFs6fP8VyTvoN7qa0Dp46dlmaz5UvHG0+UJl0aNGnVGH1+64WMmTKgXee2aPVDC2TWgtQ+A3vh1yG/qMC2nZbF/bFHJxW89vy1Ozp37wgZ2/HnDspc+64/YM+OvQaRAHrgnwPQTMvc5iuYFx20YLP7Lz8jXYZ0hvlKlimBoiWKaPO1RFItU1yvcR2UrVgGDZrVR1PNJ2XYzEKn06lstGSkRXS6oAC6YpXy6jrI9gPJIGfMnAHy58bVm2obQ9+BvZEgYQJpQrz48bSguD1yaNdONYRR7N21X7EQPw/uPaSuYxjqwbrMCnh/KNFAPXTWbP4vGLFzFibsWwD5hymMLbl4vEPpSa0x5cBi9Fn3Nxxs7JDJMY2xSlypc50xgICzszMG9PkVv1HIQLsHfuneG48fP44Bdy5dJIG4TUACqLkz5sNYLpy9aIDi4+Oj9sP+0rU/JMiqXL2SoW//zgNImsxJBcV5C+TBnu37DH3mVL50fMg5JDDXt+l0OlhZWalTadfpggJHSy3jKoGk6tAKOdcOhh/pq1z9O+hFgmGdTqeCTVEy1he70haaiK7YC63fVPuS+cvQvG5rg/w7dY5B7eSxU2jV4HscPXgcXXt2Vu0PHzxUGXZHpyTqPKKFBPdValZGsVJFIZnqc6fPm23CrIA3X6pskDT07hvHzDL80s1F6SWJl0gdWZBAdCSQP39+FCpUiEIG6h6I6P/oo+M9Hf18okckEPkEXr54BWPx9PA0TCLbAu7feQB5qOn9+/dwd39v6Nu7cz8qVq2gzitVqwDJNrq+c1Xn5hRfOt6cOWKaTt1GtTF19kSDtPy+mWEJskd47NRRKFuhNP4YOEJlY3W6oEDeoBSBimzNeOPyVu11lqxxei3Tv2vbHrMtmBXwHrl3DjqdDsnjO5ptmIokEJ0JVKxYET0H9kLrnu0oZIBuA3qgXoP60fmWpW8kQAIaAclSDhjWD8ZSsmwJrSfox97eDkNGDcKsJdNVxnTh7MWq4+b1W3B3c8f5MxcwYeQkHNx7WLUf3ndUHcMrvnR8ePZjan+CBAmQLEUyg+i3Kch6ZN9whkwZ1PYKYS8fUtJnSKeuw8sXL0UlQiJ7du3sbFUGX94+IR9mzp46pz7cmGPIrIB39fmdKsO7uv1EFEidQ+3PTWD00Jo5E4Wmw3YS+BYEkiZNigtvrmHkxakUMsCBlyeQLHmyb3Erck4SIIEoIKDT6dQbDuTBNTG/d8d+9dCWPMjl6OQIecAtZeqUkK/IpT88+dLx4dmPqf2yhUSCWb3Ig4CShd23+wBk+4nsDhDG8g2aU1JHZM+VHekzpsfoP8ZpWfqgoPf1K2dMGTcN8nq00DjI9gXZs9ut70+YomWURf6ZMwkSAB89aN6HFrMCXh9/X6y+sFO9aeHULyvQs0Jr9dCaLESck6OI1EXkzQxy1KJkdZDC0CYnFBIgARIggZAEeE4CoRKw0FnAQmcZ6yVUAEYdD+49hLG8e+uqsoYzJ89W7RJoXbl4FSePnkKe/LnVXs+jB46hjvb1e7sf20IvbTu2wtPHzxDe67Ak2PqS8Uaux6qqTqdT7+5t36Iz9DLrn/+0QNcH82bOV/t3W9Rrg/WrNqL3gJ5qX7FOp8PAP/ojSZLE6Nahl9r7+9MP3fH44WMt6ZA0VD6SmZfrULhYIYOOBNHlKpU1+20NFoaR4VRaLeyPtEO+Q8kJLZCwfwk4/FIULRf+qkY1mtsLqX6vqOpSXHp2CzZ9CuLs42tyquqzjq5SdRYkQAIkQAIkQALmE7h9+zY83b2QMDBJrJb4AYlw6tTJMMFIMDugxyAYy7aN29W2y0vnLqv2tg3bY/igUchXKC+atmmMi2cvaUGYL8pUKB3MdoEi+VUQdmD3wWDtIU++dHxIe7HlfMyUUVi5eWkw6fHLz0ieIrl63djsxTO0bOwELFozDyVKFzMsWx5Y+33EQCxdv1Dt/V2wai7ElqWVlXqDw8P7D/HwfnDJmj2LevWZ/sE+vbFOP3fA6Ekj9KdhHs0OeMXKS3cXQxAr5xQSIAESIAESIIGoJfDy5Uu0bdMW9es1iOVSH6NGjg4VZo061bBs06JPpHnbpurVVtPmT8acZTPVO3jnrpgF2edrZ2eHQsUKqjGJkwR/kF4yhPLO3obN6+OdliUOTXLlyxnm+GZtmoTqc1zuSJQ4kQp+hbMpDhK8yv5f2Xct/ZcvXMb61ZtMyvWrN0XliyRCAe8XzcTBJBCDCHgcc4HP3aCnez3PvEXAe78v9t7f3Q8B3h/fX/3FBqPQgOu6pwjQ/A1risjiop/D380PrhueqVO/F17wvumm6qEVbCeBuEQgUaJEcHJyivVia2v7RZdV3umaNn0a2DvYm23n+pUbGPn76FDlxJFTZtui4ucTKP9dOUiG2JQYZ4g/dwYGvJ9LjuNiNQG/517wf+er1ijBV6BfoKp/SfF+z0t4X40ZQZzvY08E+gSEudzI4qKfJNA3AL4PPdRpgLs/At5++YcMZYwFCcRwAlmyZMHMf2fiv7lzYr2MGDniq18t+QcaQv7rbcbnFSqX++o+RXBCqptBgAGvGZCoEjcI+D7zgmQ2Xdc8gb+zj2HRFg6WgLUOkuV9f/A1fB554P3eVxD9AB9/eF16B9f1T+F11RWB/kFBYqBfALyvuMJ17VO47xddT/g984bXZVe473gBv9feBvshK2Lf4/QbvNfGyXif2+4GlQAtQ+yp9blufAbfR56GdtEVf94feKX88Lr4Lsivzc8gWVM/bT5Pbdy71U/gef4t9A+Zepxwgau2Xldp1zLZBoNmVMLiEtIfH20NwvbdyidqXbIOmSJQ4+VxxBnvVjyGx2FnaVKis7WAzs5C1UMbq9aordNt63O4bXkOv5deSl8KP2297rtfQjj5aHNLm2TY32tzuW1+Dp8PgbW0U0gguhNIkSIFLG10eAfnWC1uurfInDlzdL8c9C+GEgj6jRJDnafbJBBZBPxdffFu+WNYJraGXYHg+7y8LrkCXgEI1MTr9Ft47H8NaL98dNrfHtc1T7VAy1uN8dICRn0G992qJ/C+4Q7b/AkB30D4e/hDAkSrpLawzuigBXOWCO2P/2sfeBxyhgR9NtniwXXTcwR8yLa6agGj30tv2GaLDwn0vK8HZYy9LryDm2wH0BLROksL+Nx+D48jLrBJr82lBY/vFj6CnxbE2+VNqGz7azZkfp2lTvluWygRJPj1ufUxuJb+sCQsLl4h/AkMCIRNjvhwKJkEfi+84XHQWZl23/MKsga7/IlglezjV5l+r7zhcydoS0loY9Uaj7rAOq09LOJZajxeKJu+Tz3xbskjWDpawzZLPHhrdgK0DyBv5z2AXAOb7PEUK/8PGXw1iAUJkAAJkECsJqD9yo7V6+PiSMAsAj433WHlZIN4lZLBJmt8WGp1kwO1TG/iNukRr2xSWCSwgt9zb1jE045aECdH72vuWnDrp9rj104BW81W/KrJYZclPiwSWcEqpS1scySAZXwrk+b1jTZaoOZQygl2+RLBMpE1/LQgLuC9ZlcLhuNXTw7bXAlgXzxJsC0SiZqmQbyKyTT78ZUZ+1KOkEDSoVgSwBJIUF3zJ2cCWGdwgO8TL6VjnclBy3Z6wvuCFtT7B6qgWHVEtDDiYpXCTo029sdaC7wDtADT8/w7BLj7wd/FR+n4XHNDvCpB67HRAmLVGKIIbayo6ddoX9oRAS6+KsMuWXS7golgXzgJbPMkVOv2u+cBnY0FAr0DtLl91TXz0T6QiA0KCZBA3CLAIA04KwAAEABJREFU1cZNAgx44+Z156pDEAj0DYBFYmuE90dnoTOoBLj5Qaf9DZLsoohkY+2LJEaAFpTqrHSwsNGiTHz5H7GFAMDf1U+zaQGdpYUyKkGzv5aZVidSBDVLLbiEdEP0tIyrfMX/Vsv8StbTvkQSlSkNPtD8M50RF8MomefDyTst4+rv7As7LeMtQTwCgUDNB/hDZdU/qJk8mBobUlFnrZ9MB3/58JEo+LX00wJsnYMl5DqJ2BdLDOsM9iHN8JwEYgSBgIAAvHf3gJenl5Ir56/C28sbfn5+wcTd1V29juv86Yu4e+ueWpuPjy9kvPxzvIGB2l9E1Qo1Tqre3t5wd3tv0JE2eRWY6Orty3h9m/RLu/gidWn39/dX/si53pb4K3p6O6IndfFb9CgkENUELKJ6AtongZhAwDpjPPg+9kSAhz8CvP0ND6whjD9WKeygs7OEhaONysTa5k4I2OhgmVT7at43ED73gr6S93/rowWrWlZRy+oGaNlNfOYfyQ5LkOirZXvl6KVlR221bPFnmoNkXHUWUJliqxS2CPTUos/PNRbGOPE14J0f7EsmgY3GOcDTT2nrtCDZKrWd2tIgDf6vPt3XHNpY0Q9NrFJqNq+4qm0gMt5Xu66SEZcPKDZapl0CbpvM8QBt/tBssJ0EojOBHRt2Ye/2/Th55DT++XsGbt+4o/7xhIO7D0Nk7ZINuH/7ATav2Yblc1fi/KkLuH39Do7uP44DOw+qcbu27MW+HQcMyzyw8xDOnjiHcyfOazan48LpS9i1eQ92b96LjSu3QOYU28cOnMC9W/dVm4+3jxq/b/sBHNFsX714HfOmLcL2DTsxe/JcHNxzGKsWrsH65ZuUvzLexfmNVj+gxu/T1iB68o9IKEMsSCAKCWi/7qLQOk2TQAwhIAGf7Hd1+fce3sy8h8D3WvCn++h8oFYPlLTkxyZVi699He+x7zVknPOUO/C9/R6SMXWonAyu65/BefpdvF3wEIFe/rDVAmLvi65wmXYX3nfc1XhzC5lbp9MhXpVkcNv0XNmAlpW2y6cF2aaMGP/N1saZUrHSgk3LpDZwnnxH2fN7G/RWCr2urFlfD+0oOuJbaP3SLoGtXZFEeLvokTbXbfjcei/NSmRLgte5d3g98TY8DgXt61UdUmgBaVhjYbxG0dckUMtYOZRxhGSOhbPL1DvwuvAOlgmt4VA0idqn7aJdX5dZ9+GvZX3BPyQQAwlkyJIBD+89wpNHT5HYMTFSpEoBb09vvHz2Ck8fPUPSFE7IkCW9lgV+r17P5eDgAPm7YWVlCT9fP1haWCJfoTzBVl6sdBEcO3gS7966IXO2zJBMr2SDLayC/qJZamN9vHxw7dINNa5wiYKwtdM+3KszwEfLMFtbWyFrzsxImTol8hfJh+QpksFKa0vsmEj5a2dvh5OHgl7xJeMTJUms9OLH1z6AfrDDAwlEFYGgOzmqrNMuCcQQAhJYJaidEo4/ZoRjt8xw/Dkz7AsmVt47dc8CqyQ2sHKyVe2q8UMhe20Tt06HxN+nh1OPLIhXKZnqsS+QCE49syCJ1u4o45PbwTqVHZJ0zYTEP6SHpZYVlszjJ/LEE/aFEiNh3VTQ/0ncNj1sMwfty7XLlVDzMROSdMqIhI3SwELLGoueU8+ssExsA/2fhPVTQ3yQc8tE1povWaWqJGHtVLAvmgQ6nQ6JW6WHo2ZL1uv0U2Y4lHRUOrJmGfeJf1q2VNrknbmiExoXpxD+xKuQDI5dNL87a9IxIxI1T6vmscnggCSdM8JRkyRau9iUDnthUC+IQWhjjdeo0+ng1CcrLLRfzhYOVhBmjj9lQhJtzgT/SykmVSY7SfsMSNwmncYjC74kO64MsiCBb0Qgd/6c6Nq3E5p93xgdurdDyfLFtUAzC5p+3witO7XAdzUqwtraGu1+aoPGbRpq0gDV61ZF8bLFUL1eVRQrUwSZsmZEpeoVIP86mYh/QAB+7vcjqtetgkat6yOjFlSXrlBS2WrQoi6q1q4MGduxRztkyZFZ9cs4EXnnrfRly5VV6RcpWQhlvyuNnHlzoGWHZqpN/BV7oldNsyXz6/WckjmBf0ggqglYRPUEtB/rCcSqBUqwpLOM+F8LCztL6Cx1wVjotAylRTwryFHfIXWZI0DLpvo+8lSvFgt21AJevW5YR5kvrP6I9EnQrNMCxZBjAv0CPvXvg88Bn7E1w8LeEiIh51FMNE4h243PZZyIcVt4dQtbbT5NQuqFvCYh+3lOAjGBgIWFBUR0uqD/70hdxNh3S0tLdWrcblzX6XRapjevkiSOQR/wZYCMS5k6BVKnSyWnah6p6HS6YPV8hYLGSnBrbBfaH/252NJODeP07dImEvJc2igkEBUELKLCKG2SAAmETcAmUzw4lHL8VIo7hj3wK/bKQ3cmfdT8ttay1V/RFU5FAjGEAN0kARKIrgQY8EbXK0O/SIAESIAESIAESIAEIoUAA95IwWi+EWqSAAmQAAmQAAmQAAl8XQIMeL8ub85GAiRAAiQQRIAlCZAACXw1Agx4vxpqTkQCJEACJEACJEACJPAtCETvgPdbEOGcJEACJEACJEACJEACsYoAA95YdTm5GBIggdhKgOsiARIgARL4fAIMeD+fHUeSAAmQAAmQAAmQAAl8XQKfNRsD3s/CxkEkQAIkQAIkQAIkQAIxhQAD3phypegnCZCA+QSoSQIkQAIkQAJGBBjwGsFglQRIgARIgARIgARiEwGuJYgAA94gDixJgARIgARIgARIgARiKQEGvLH0wnJZJGA+AWqSAAmQAAmQQOwmwIA3dl9fro4ESIAESIAESMBcAtSLtQQY8MbaS8uFkQAJkAAJkAAJkAAJCAEGvEKBQgLmE6AmCZAACZAACUSYwPUb15HYxglJbJLGCIlnlQCXLl5S63zw4AEQaKH5nzRai52lA86dO6d8Dlkw4A1JhOckQAIkQAIkQAJmEKBKRAj80vcXVKtWLUbJ0qVL1RIfPXqExo0axwjfL10KCtKV40YFA14jGKySAAmQAAmQAAmQQFQRsLCwQEwUnU4XDEl0XkMwR41OGPAawWA18gnQIgmQAAmQAAmQADBh4gRs3749Rsq2bVuRN29edRmLFy8erddQsGBB5WfIggFvSCI8JwESIAESIIHIJ0CLcZxA9mzZ4errgne+zjFOvAO8DAFvgQIF4B3gqa3lTbQT8TN//vwm7zQGvCaxsJEESIAESIAESOBbE7BAzP7vW/Pj/B8JMOD9yOLb1+gBCZAACZAACZAA3N3dce/ePdgGxIvRcvPmDXh7e/OKRgMCDHijwUWgCyRAAiRAAsEJ8CxuE5AgsWePnqhfr36Mlt69+sDPz8/si+nv7w9PT88w9e/duR9m/9fufPf2HR7cf6hEfH/65CkePXxscMPLywsuzi6G85CVZ0+eGZpkrJwIs5fPX8Ldzd1gV9qNJTy7xrpSZ8ArFCgkQAIkQAIkQALRikC8ePHg5OQUo0XWEBGor168wuH9R8McMnfm/DD7v3bn1o3bcOLoSVy/cl0LUN/j8IGjOHLgCDZv2Kpc2bRuC8aOGK/qIYsb126iX48BeKcFzdD+9OrSF48fPcGubbsxaewUzZ47pk2cAW+vT7PkYdnVTH3yY/FJS4xpoKMkQAIkQAIkQAKxkYAEilOnTsV/c/+L0TJl6mTY2Nh8comeP32OVUvX4I3LG0we+w8CAwMxfdJMpSfbOQ7tO4zhv4/C778Ow6H9R1SWeNSwMfhz0Ag808YqxRCFjJH+Ab0G4cjBYwgICMC/U2dj5JDRkD6xc/7MBZw6fhrbNm5XmdMtG7Zp2dc3au4xf43H1UvXlO7E0ZMxdMCfIWYI/fSxltF9oGV5JTPftGVjZMmeBb4+PmrAg3sPUKhoIdy/e1+dGxcH9h5Ez1+7Y9/u/aq5aIki2LBmo8oQJ02WFClTp0TKVCmQOEli1W9chGXXWE9fZ8CrJ8EjCZAACcRUAvSbBGIZATs7O0jA42nhhpgsKVOmgrW19SdXJ4UWxEnwefzwCdy+cRvXr96Ag4MDvLUg0fm1C968eYtKVSvij9FDsHPrLi2DegrpM6TDkBGDkEoLAmHij4ypUuM7jJzwFzat3YzLF67A0ckRA/8cgO2bdyJtujQ4ffIsjh46jlMnzqh+sbV98w4kTJgQ+Qvmxerla9XcufPlVnObmMZkU+t2LdH5545qjlvaeo5qAXfdhnVw9/ZdyJYFCYj37NwbbKxsW7h47hIunruIw1pQL53x4sfTMvqOKFexrJyGKmHZDW2QRWgdbCcBEiABEiCBqCJw6+ZNZIuXG7kTFKSYYJDWPhOuX7seVfhp9xsT0Ol0iJ8gPs6fvYi2HVtjzvR5KFikQDCv7Oxs1T9SgUCobK2jU5Jg/aZObG1todPp8N79vXrwzyGeg0Etbfo0uHvrLiwtLZFGC34P7j2E3Plywc3VDZmzZVJZ2R9+/F7pJ03mpI7mFovnL8WsaXNw4/pN/DHwL9g72GPHll3Yu2sfBv35G/oM6IXnT1+odehtntGC72atm6BDl/bIXyi/yupKX/M2zZArT06p4s6tO7ityfMQWe2w7KqBJgoGvCagsIkESIAESCBqCQwaOBgNGzaihMagQUPMnxe99mpG7R0R96wXLJwfTkkdta/7C6osqASfoVEQ3T0790O2Ncge19D0Vi5ZjQG9BqFW3RooXKwQJKgcO3wC8mnZW8k0x9cyqMVLFUX5SmXh5+cPyaTXqFNdy7AexdaN27Fr6+7QTIfa3qJtcxXQSoY3R87sWLxmgcr2ig8du3ZQWWYZPGDoryrTLPOIOGhBsT6T27ZDa6RLnxY/9+oqqkokSM6SLQum/zcV7965Kv9knEgrLaMs2WtRlIBa/uU3qYclFmF1so8ESIAEYh8Brig6EJD9hfKUNcULoTGQfZ3R4VpFRx883nvg2uVPM+BvnN/g9PEzX+zymRNn4fL645sF5Fps27Ad61du0AJFP7UdYPO6rXj31vWz56qpBaWdu3WElZUVlq5fqPb6ZsiYHh27/oC6DWujSPHCyvZfY4chQcIEGDNlJHoP6IF5y2dDtkMsX7QSetm4drPSbdyiIYaP+wNiW4JZ2QLR89duaN6mqeqX7Q3FSxVDthxZMX7a36otgzbnr7/3ReduHdC+S7tgcyuFSCoksyxBsF7yFcxntuUcubKrIF4/1t7e3uyxekUGvHoSPJIACZAACXw1AkOGDcHWrVsoYTBo1qLZV7se0X2iB/ce4tb121ixaBXu3roHOXdzdcf8WYuwcPZidb5h9SasXbEeb1zeBlvOnu17sWrJGm38LcyY+C/m/7sQHh6eEP2Vi1err9n1dV9fX8ydMV9lOn20ut6Qu5s7bmrzBwYC8rosebircvVKWmb0iPJh3swFWDZ/hapLcKwfF5lHyWJKECs2ZfuDBLF6kQC5UNGCyJw18yd7hk09NCc2lBgVnxNEGg2P9vlwS08AABAASURBVFUGvNH+EtFBEiCBr03A3toBSe1SUCKZgfzCxoc/BfLnx53313Hd7RLFBIOnXo9QuHBQhu8Dsjh9kIe89uzYh9s37mCn9rW7Y1JHvHzxEtZWVmjYvD7OnT4PeaVXnUa1P+Hk6all0TWxtbND+kzpUaBwfhzaewhPHj6BvO7qwpkLhvrZU+eQI3d2FC9dLJgd+fpfth+8eP4S/v4B8JftAPZ2cH3npnz4X4NacErmBHmjgHFmOJiRKD6RLQHiYxRPE2PNM+CNsZeOjpPAVyEQ5ybZvn077l69D7e7HpRIZrBp8ya4uLioe0pLlCGA/4VJQIFioQjIA1zPnzzHT71/xNNHT9XeU9WhFfJBKjAgEBKIbl2/TWsJ/iNbBnQ6HV48e4Hzpy9g3679KFSkIOSBLpF0GdIb6um1+unjZ3Hs0IlgRqytrbTg1hV2WpBrYaGDQ3wHrF2+HoWKBT1oJm0yQKfTIVD7T+qU6EWAAW/0uh70hgRI4BsTkHdgDv5tMAb2H0SJZAZz/v1PfX38jS8xp4+hBIaN+R1JnJLgz3FDkShxQtTWsqqt2rdQwWqDZvXQuXsHtOnYCpVrVIJ67+y+w+pYvnI5NP++KbLnyo6CRQug14AeSJ4qOdr92BZ1G9dG0uROhnqqNCnR7ZeuGDR8ALy0rLDejjwo1qVXZ7Tp0FK94aBe4zqo37QucubOAfFBXqFW7X9VUL12VfU6tRiKOFa7zYA3Vl9eLo4ESOBzCJQtWxYVKlSgRDKD7Nmzf87l4BgSMIuATqdTr9ySjG+5SmWhF3kbgGR5JUj+X/2aQa/6QtAfa+uP78jV1+XhKunNmDmDwYYEtmJX3yf9xnU5p0RvAgx4o/f1oXcxjADdjfkEqlevju69u6Htj20okczgz+F/IHHixDH/JuEKSIAEYhwBBrwx7pLRYRIggagkkCBBAnjgPV7gESWSGchDQ5JpM3X9bly4ifnjFn8ij+8+UeoHNh/Gv3/9h7F9J2H9vE14cv+papfCeOyiScuwZs563LvxQLrMFpeXLmru/ZsOBRtz69Id1W7KNz9fP5w9dN7Qv2TqimBjwzvZu/6AGvvm9dvwVGNiP30mgWhFgAFvtLocdIYESIAE4iaBZw9f4OqZa3BK4RhMrG2DvnLet/EgUqZPgdJVS+Dh7ceYOngmJOAUWvqxqdKnhFNyRzx/9ALThvwbLCgWvbDk+O5TuHzqKrYu3Q5fH1+Dqo2dNUL6dPfaPeUrtK/Qk6VOCn9/f1w/fxPZ82U1jAuv4uPlg+0rdqo5T+w5FZ46+0mABL6QAAPeLwTI4V9AgENJgARIwIhA/ETxUadNrWCSIk1ypTFk5gDU+742ytQohY6/fa+C3Wvnbqg+KWRs9aZVUKtldfw0rLM04cXjl+poTnF8z0lUa1JZqV46cUUdpciQLX0wfzLmyAAPd090GPA9rKwskS5LWmTJnRl2DnYoVrGIDDFLLpy4DAtLS1SoXRYn9502awyVSIAEPp8AA97PZ8eRJEACJEACkUjAy9Mbl09eCSZu79zVDPLAkKpohfOLoFebOcR30M6Cfny9fXH/5kPcuXoPG+ZvhkN8e+QsmD2oM5xStj9IEFtKyx7nKpwTx3afMDlCMseLJi5FrRbVkaOAebZNGtIaj+08gcJlC6Bk5eJwfeOGh7cfaa38IQESiCoCDHijiiztkgAJkAAJRIiAfM2/es4GGMuzh8+D2RCdJVNWIEXa5FpmNZOhz9PDC/+Nmo///l6AozuPw8raCk/vPzP0h1U5sfskJHObIFF8lPiuGO5dfwD3D4G2fpyHuwemD5uFPEVz47v6FfTNn3V85+IKCXBLVikO2RKRNJUTjms+fJYxDiIBEjCLAANeszBFByX6QAIkEJsJuL11w1ujh5dkf+rIruOgz2Z+q7Uf2HgYty/f/SrTJ0ySAMNmDQwmxvtihcmskfMgwWfnQe2D+SRj/5o3BCMXDMPoJX9pgWkuLNSyscGUTJzIv6B19vB5BAYEYMfK3SpLLGqnD56Tg5IArW/OqAVwSOCAVj2aqbYvKU4fOKuGXz1zXc1pY2ONs4cvqL3AqoMFCZBApBNgwBvpSGmQBEiABCJO4NCWY1gza6NhYGBgoJZlfA/5J0wNjd+g8vLxK7i/DdpW8A2mN0zp6eGJf4b8i3fO7/DLuF5I5JjQ0BeyItsfchfJpQXGnpBxIfuNz6+dvY6AgEDIHuCnD57hxeMXcEyeJFjGdcP8LXiutXcd0klljo3Hf05dsrnJUyeDZK9lziTJkgTtST77cU/y59jlGBIggdAJMOANnQ17SIAESEALhgJwfNcpvH7mDMl27lq1Fy4v30AysnJ+aPNRFVgZo7p+7ia2LN4B1efmYei6cuo6nj14jrMHz2P7st14/vCF6nN/9x6Pbj+B83MXNdeFo5dUuxTvtfFiZ/+GQ9qc5gee4qOM27liD8SfAC1LKfaeavPvWrUPu1fv04K7V9KkRPrPH7mEbUt2qXXqfctTPBdSZ0qldB7dfow7Wrb35sXbSk+OEpirTq2QPbhHth3HjuW7g9nWusz6kQzuq2evYSzeXj7wfO+JcX0n473re7Tv3xbyVgR5lZeb0bYDGStbBVxevVH7eLcu3aECV3sH+zDnPrrjuMoG/9CvDfTSsnszvH7urILcs4fP48iOY2jQvi58vH2C+SZ+hGncRKcEuOK7fi79MUO2dMGCbBND2UQCJPAFBGJrwPsFSDiUBEiABD4SkAzrxnlbMbHfNPXVvgSFU/rPwNiek3Hzwm2c3HMGk7Q+/QgJgheOXYZXT17j0JajGNd7KsSG9G9ftgtTf/sXh7cex5N7TzFlwEwVAHt7eePNq7fw8vCGvPf14c3Hoq5kzvAFKmCVecf2nKQCcNURRiF7UMf3maoFUKfg/OINlk5aiYe3HuPBjYf4R5v/5vlbuHr6Bib/Oh0SAIupFf+sxUZtnV6eXrh18Q4WTVguzVrwuketU07Eh/9GLsLqGevx8ukrLBizFIe1zLT0ebh7KibCQwL4Gb/Pxp41B6TLbBEbf/eaAGO5efGWCvQlmJVAcXy/KRjx8xgl/41eYLAtY//qOhoju43F7JHzkCBxAnQZ0tHQb6oi2d9bl++geKWiwbozZk8Peejt5N7T0L8JYsX01cH8Eh9fPX2N9fM2YfPibXDXgu9fWwwKZsfUidhMmtJJ7d017i9RuZh6tZm3p7dxM+skQAKRRIABbySBpBkSIIHYTaD9b23QYWBbdBnWQcv0+aJV72bo9Hs7/DSikxbsvNcCSxcF4NjOU2jYuS7a9W+FAdP6aF9V+0KCWNWpFeVql0aP0V2UrXRZ0+DyyWvqPa/5S+VBmsyp0KZPc9RpV1PTDPr5YUBrNU/3UT/C3z8AD26G/zT/pvnbkDVPZvSb1BMtejTW/OiL5GmS4fT+c8hXMg9+HtFZ+ZC9QFac0dpkJglyS1YrhgYd66Dj4O/x81+dpPkTSaNle2Vd3/drie8aVMDF41eUzsFNR+AQzx69x/2MRj/WQ9OfG2ofBk6rPnOKinXKYdyKkZ9IvuJ5lO+m+nqN+lmZDjl29OI/0XnQD3BMlkTLvnvgrfNbkyLBpdjNUzSXsmNc/Pnf76jb9n9opWV7RceUpEyXAvV/qGPwecyyEZCMdGjzSXvt1jUxYHJf46lUXYJumcPW3ladsyABEohcAgx4I5cnrZEACcRSAgkSx1cr0x/jJ4qnzu0d7NTR7a27CnpdXVyRLX8W1SZ7SbPkzYxLx6+qcykSOX3ce5o4aWK8ff1OmkMVeRhLOi0tLdW7Xt85u8ppmPLi8UvkLfExiJNspYhsqchRMKthbHatfvlEkG9lapbE/vWHMOqn8VilZXDlrQcGRaNKvITxIOuSpoSOCSDrlbpkrJ1fuGBQ6z+VLJm0UmVmJfMq/d9KTu0/i0UTl4UqspUjMn17dPtRqHOJH3JtInM+2iIBEjCPgAp4zVOlFgmQAAmQQFgEJBiUfgl+5Sji6uKGxEk/BrnSFtVibW0F1zef7vdNoAXt7zR/9PO7vXFDwg8Pf1VuVAF9JnRDpQbl1TaL6b/P0auFetQHvqJga2eLAmXyYcTiIQYZuXSo2hog/d9K5B926D68K0IT4zVEho9Z82YJdS7xIU2m1JExDW2QAAlEkAAD3ggCozoJkECsJvBFi7PTvo7OkCM9Dm48rB60ktd5yUNqBUrnC9duyvQpIG9EkIevvL5wH6dsVTi55zQe332q9vyePXQBL5+8QsGy+XFay3hKJlYewjt3+BIKlcuvfNu2dBckaC1ZtRhK1ygBLw8viC+q04wid7GcuHDkEu5eva+0Xd+4qgfj1AkLEiABEvjGBBjwfuMLwOlJgASiNwGdTmfSQZ3OdHvNFlXUQ15/dRqDuSMXoUjFQmoPqhgxlU20sAiyk7NQdtjY2mDI9yMwse8/0OmC2mWcsYTSbKyCuj/8T8sqJ8L0wbMxuPVf2Lxgu7JXrFJhWFlZYXzvqZigzREvgQMKlgkKeG+cv6W2M8iWBHmzQ6POdWGlZYp1mn863Udf5Nx4Mp1F0K+RwuUKoNz/SkMeshvY8g+M/nmiejDOWJd1EiCBmEQgdvka9H+q2LUmroYESIAEIo2ABH3y1bw89KU3KuepM6bSn0LOM2qZXWlInz0dhs79Db9M6qEdB0ACR2kXkQe6SlcvIdX/s3cmgDVd+R//3veSCM2OlNS+rxGE2netpbRVe8cyY/ynarqhtJ2ZMq3OqI5BS6dSrdqXxhZqSxCEEAlB7bRkQcguIUjkf89J3+t7z0tE8iLvvXxf/c4953d+53d+53Mvfj3Ou5Ey4u3BEF/wEg1xxnby3Lfw0f+m4P3578hkU/g1nPcfAVMhdotTk9Lkq9HEq8dMJSMtE+J88YRP/izn/2Dhe/h48TT5VgChFzEI3YdfT4b4IpyYV8z/7uw3pf20r96TMbTq4ifUEHpxvlc0BozpC/FlNVEX0rqrHz5Y8J6oSun7em/MXPEPTP3yXelLfDlPdrAgARIggVImwIS3lG8ApycBErA/AoqiyHfAiiMCT7o6kZSKJLugcbsDQ7Fx8RazEh12Uj9UzO/m+ej5YaET53n1hr9V8uxdf2sV7SJ2sT0qucvjEUXzwFGmBLKzs+HkWA7uTp52La6OHijKu41hIx9FUaBVHOFgg6JRtDD8aKC14Dosx0Sj5J/W5t9juDLWSYAESIAErIaA2BUe99Fo+Woz02un/u2tJk4GYhkCERER+HLuV1j27Uo7l+WYMnmKZaBZoZe169Yi7pd4Va7ZnFz95SqCg4Ml1aCgIMRciUWsuhZrk5hfY7Ft2zYZp2nBhNeUCNskQAJWR4ABkUBZJ7Bnzx5s2LDB7uXq1at2e6uXLV2Gt/76tk3KXye+haSkJHlvEhISMOGNCeo63rI6EXElJibKOE0LJrymRNgmARIgARIgASsiII6JjBgxAhOx8GUuAAAQAElEQVQmTLB78fPzsyLylg3F29sb/v7+xZWnMr5KlSr6xfv4+Fh8zpo1a0L3qVixokX96/yaXpnwmhJhmwRIgARIgASsiEC7du0wbMRQ9Hypu11Lr5d6YMY/Z1gRecuG8p85/8G0D6fahMybP08uXlEUzJs3Dx989IFFZfYXs+Hu7i7nmD17Nj7824cW8f/x9I/Rt29f6de00Jgq2CYBErBxAgyfBEjA7gjcz76HzOx0u5Y7ORmw9E++s6YHwcvTCxpnQHHOtXoRsUL9KIoCDw8PaMsrFpXy5cvD0dER4uPp6QmtswKLzOGkQeXKlWHuw4TXHBXqSIAESIAESIAEbJ4AF0ACOgJMeHUkeCUBEiABEiABEiCBp0zg5o2bRjPGx8Ubtc017ty5g5TkFNmVlJiMu3fvyrooHtc2nE83V2pKqhj6WMnKyoIQU0PdeBGXaZ85e1MbXVvYXr1yFbExsbhx/YZcY1JiEs6fuyBN4uOuyb709NvIyMhAXEwcYq7EyL7HFUx4H0eI/XZOgMsjARIgARIggadL4Ozpc9izay+OHIpAwo0EBG8PwYHQML1uW9AONbGLw7pVgTh5/CS2bNyKsH0HsSlwM06fOo0dW3fi1s28txGIxFCMjzoShRvXbiA6Klr6E3WxKl3/mVNn5fiLFy4h6ugxBK7ZAKHbunkb9u89AMNEWIwzJ5cvXsbBA4cQtHELDuwLkwlpZEQUTv98RpqLONeuXAexFqEI2bkbhw8dwe7gPfqkVejzk0tqbPfu3cel85ekBG3YAqG7rSa4Ysz5s+fV9Z/BBfUaeSRS9bsXqampouuxwoT3sYhoQAIkQAIkQALWRWDZopUImPe9Xk5Hn0HslTgEzP0OX0yfi+Ctu/HgwQMZtKgL28VfLsH6lZtwO+221D9SGCiEP2Gv8yG6xJzRR0+IqpTjEdFY+s0KWd8ZFCJjWbJwmWwXVJjzXZC9PfZVq/4cbibchEh809LSUb5CeblMsUOamwvk5GSrid5lZKiJXuKtJNxKuIXMjEzcv/8AqSlpcHBwkPa6wrelL5LVHV+RXIpdUkVRIPwa9l9Vd07F+LTUNNzLugeRRIp5PTzcUbFSxUKdn27avCl69u6Bga8OQOeunaT7Zr5N0bFzB7meISMGY+jIIfBr3UK2e/Tujm49usoxDRs1kPYFFc18m6F+g3roro7r2LUjRo8bhbbt20pdXGw8uvfqhj79X4T/8/7o1rMbRv/pD/D18y3Ipb6PCa8eBSskQAIkQAIkYBsE9u7YB782vmjToZUU76reMilq0qIRXh72EvYFh+FQ6GG5mGNqYlqxshfadPRXdwAT1MT0O6kvqPhx+QZEHIzEsSPRejMxp0hoxRfLhCz9ZiVCd+6X/SKWyPAo+KvxSEUBhTnfBZjbZZermyuGjxqGseNHo5Oa2Anp3K0T+g3si9eGvYr+L/dTk7uu6NClA3q80B1j/28MXuz/AgYPH4SOqu6lV/rD08tTJsyVvSujVu2a6N23FwYOGiDHCj9eFb2M+vsO6CPHi77adWqhuV8z/dzCp3cV7ydmLWJwdnaW47yf9YZ4hZ6iKHB1dYWuLTufoFAURVorigJFyRN3NSkX/5OgKIrs0xWKYtzW6c1dmfCao0JdfgSoJwESIAESsBICTZo3QhPfxlK8Knmi1fMt0alHRzRoUh/N/Jrg0rnL+kjrNqgN31bNUK2GD9w93fV6c5WU5FQkXL+J0W+8jtBdeQmtsNNqNajfuB7C9x3BUTUZbtTs9x27Z9WE28HRAT7VfYRpvpKf73wHlLEOjSYvLdNdGzVpKAno2obXyt6V0LhpIzWx/P2tBLp+rVaLgvqrPlcV/m1bw/CjG2uos6d6Hll7WhHXQgIkQAIkQAIlTqD0J/jb29Mx9Y2PpITtOaQPKEn9J/B9wQfQ7cUuet2iud/h7bGTsWnNFlTyrqjXm6vsDwlDx+7t0aZ9a1w8cxEZtzP1ZoNGvgJxLGLzuq14ZfhAvb6wlYJ8F9YH7UigKASY8BaFGseQAAmQAAmQQCkT+CJgFub/MEdK196dZTRiB/XTabMw/I9DULdBHakTxV/eG4evlv4Xi9YuxO7toYi9EifUZmXfrgM49/MFLPxikew/uPf3ZNqnelU86+MNn2pVIY5JSIMnKAry/QRuaEoCT0yACe8TIyv8AFqSAAmQAAmQwNMiEHc1HjMmfYoRfxoKXQJsOvfDnBzkqJKdnW3aJdsxv8bKLy+Ne2sMXh0xECPGDcfenftkn66YMGk8xk4YpWsW+loY34V2RkMSeEICTHifEBjNSYAESIAEnpgAB5QAgXfGTsaEkW9L2bMjFFvXb0N62m2s+HY13hozCX9/55/6Wb/+T4DUvTduKnq82BW169XS9xlWxJndbi90lv3CpvuLXZCYkCjP9Ors3Dzc4OLmomvKq5gz83Ym5n76pWybKwrj29w46kjAEgSY8FqCIn2QAAmQAAmQwFMksGRjAP636ku99OjTDW+oO69CL44uCJk5f7qMaNonkxGwdqE80iDGDB41CGK39ciBCJjKoBEvG53NFV9kWhz4DZ6t6g1xlQ5/K8SPhhXzieYfxo/A0s2LMXP+jCL5Fj4oJFCSBKwn4S3JVdI3CZAACZAACZCAEQGtgwNMxcigGA1Tv6JdDHccSgLFJsCEt9gI6YAESIAELEuA3kigpAnUqF0d/u1bPSKmRxWKEoelfCuKAgfF0aalKPw4pmQIMOEtGa70SgIkQAIkQAIkUEQC6enpCA8PR9qN2zYt+/ftx927d4tIgcMAWAwCE16LoaQjEiABEiABEiABSxAQP9L4X5/9C3/+83ibls8//1y+9cISTOijeASY8BaPH0eTAAmUNgHOTwIkYJcEqlSpgkaNGtm0eHt72+W9scVFMeG1xbvGmEmABEiABEjAjgm4urriv3Pn4F+zPrNpmTd/LpydnZ/aneJE+RNgwps/G/aQAAmQAAmQAAmUAgEnJye4uLjivkOWTYu7uwe0Wi34KX0CTHhL/x4wAhJ4igQ4FQmQAAmQAAmUPQJMeMvePeeKSYAESIAESIAESKBMEWDCW6ZuNxdLAiRAAiRAArZPION2xiOLSLqVhNzcXCN9VlYWhK0Qow6Dhm5MQTYG5rh7N0s2s7OzcfcOXzkmYdhAwYTXBm4SQyw1ApyYBEiABEjACgiIZPZO5h3EXo1FfGw8zpw6i18u/oL9uw/g/v37OPvzOSxZtBQ5OTn6aG9cT0BQ4BZcPH9J2p85dUaOOR4ZLX2kJKdiX8h+rFm2DgdDD0mb5MRkHA2PlK8SO3b0OCIOHdX7ExUx/1rVXtQ3rQvC5sAgZGZk4tD+cJw4dhIH9oRBJMKin2JdBJjwWtf9YDQkQAIkQAIkYIUESjekB+pu6roVgQjethvHI0/gevx1HD0ciWo1q+H40WhEqvX6DesZBRmyfTdatWkpdcL+cFgEstTdWWEv/IRsD0HLNn5w93CDb6vm0ueeXaFwcXXBqeifZQKceCvRaBe3es3q8PB0lz6dnBxRp14dXFYTb7GTLPx6VfLEhbMXZT8L6yLAhNe67gejIQESIAESIAESMCFQpeqziIuJQ936deBV0VP2itd9ubu7QfyQinv38nZ5ZcdvxTPPVMDhgxG/tYCKlbxQtZoP3NQx9RrUlccfwkIPwtPLExHhR6VdhWfKq8nuKVT2rgRXNfGtoPoQO8iyUy3EDu95NaEVu8PiKMSlC5elXzc3N9WPh5osu8p4VFP+sjICTHit7IbYcjiMnQRIgARIgARKisDfP/sIXXt1QYcu7fHykIFSKlauiE7dOuJPE8Zi2oz35e5q5OEoRB2JwqvDXsGIMcPQ0t9P2ooxnl4eGDT8FXTo2h5D/zAEL/Tvjfad26Fbr67Spt/LfTF45GvwURNjYS/08bHX1B3kPJ9ih3fa9CkyuRX9w0cPxXPVn4N/u9ZyfM3aNdCilW9JIaDfYhBgwlsMeBxKAiRAAiRAAmYIUPWUCWg0GiiKgibNG8vks/XzrWUE5t6BK2yFCAMHBwdxMXpXrq5PdqiFqU9Vpf9laqvvYMXqCGisLiIGRAIkQAIkQAIkQAJ2RiDhZgI02Y7QZjtZvYhY7Qw/mPCW1h3lvCRAAiRAAiRAAmWGwORJkzHt/Wk2Ie+8/Y7d3RcmvHZ3S7kgEiABErAtAoyWBMoCgdTUVFy4cMEmJDk52e5uCRNeu7ulXBAJkAAJkAAJkIC1EZg48U18//13NiGTJk2S+B4+fIjY2Fg45DhZVFLUhPru3bwf2vHrlV+hzXG0iH8lR4Pz58/L2E0LG0l4TcNmmwRIgARIgARIgARsh0C/fv3gVcVDFXerlz59+ujBTpw4EaNHjbaoTJjwJjIzM+UcH37wocV8v/766zhy5Ij0a1ow4TUlwjYJkAAJWDMBxkYCJGCTBHJzgZzcHJsQQ8Dih2qkpKTAknLnzh39FOI9x5b0rXdsUmHCawKETRIgARIgARIgARIggTwCs7+YjU2bNllM1m9Yjzp16kjnvr6+CAoKKrLvTSZxCV8NGjSQvk0LJrymRNgmARIgARIgARIgARKAeM+wXws/KOVyLSZOzo5o3TrvPckdO3aEQzktNM6KRcShnAPatWsHcx+NOSV1JEACJGAfBLgKEiABEiCB4hLIhWX/ezSeXFVlKVFdmfnFhNcMFKpIgARIgARIgARIoCQJxMXG40Bo2CPy4MEDo2lFW9ilp6Ub6dNS0+RY4ceoI7+Ggb44Y7OzsyHe3iBEnO81cCurOr04p5umxizasqOUCya8pXwDOD0JkAAJkAAJkEDZI3DoQDi+nLMQ69duNBKR4BrSmPv5fMz74ivEx10zVGPLpp+k/puvAoz0hWkUdezli5exa/suREUek9eU5BScO3seQRu3YPvW7QjeEYK9u0Mh9MI2LiYWe0P2ImxfWGHCwoVzF3D92nVcuvQLrl6JkWNiruZdZaMYBRPeYsDjUBKwMwJcDgmQAAmQwFMkUL1GNcz73xwjqVChgj6C5UtWqYlfrL5tWAnesRudu3XC2dPnkKbu9hr2Pa5e1LF169dFvwH90KatP/r074Py5cujVu2a6NG7O/q+1Be9+/RCz949pL5+w/po2rwper7QE526dnpcSLL/5IlTSE9Px6XzF5CQkCB10cdO4NbNW7JenIIJb3HocSwJkAAJkAAJkIAdEng6S7p3776azCUaie4IwN6QfQjeHoxPP5/+SDDnzpxHxu0MjH9zHFxcXRC6Z/8jNvkpijPW1KeHpwecnZ3h4uJi1KXTGykL0Rg87DU0aNhAJtO+LZrL3d6Brw5AZe/KhRhdsImm4G72kgAJkAAJkAAJkAAJlASBG9dv4I0/TjSSzMw7OH3qDBYt+BaffD4DXhW9Hpk6eEcIWrb2wzPPVFB3VLtj17aQR2zyUxRnbH4+LalXFEW6E4l0VZ+qsm6JggmvJSjSR5kkwEWTp6AHngAABq1JREFUAAmQAAmQQHEI1KxVA+t/WmskruqO7ezP5qBdx+dxLe4axFlfMceJ4ydxLf4axJfG9u8Ng9jZFbvAiqJAJM6xMXHCrEApztgCHRejU6to4ahxsogIX7odctOQmPCaEmGbBEiABEiABEjgSQjQ1sIEfP2aIzkpGTt+2iVFuI8IP4qYq3GIOBwpmki4kYBd24Nx5vRZODo6ImTHbqkvqCjO2IL8FrUvICAAy5Yux4qlKy0i0teKFWbDYcJrFguVJEACJEACJEACJFCyBO7ffyATW5Hc6iQ3NxeTP3gXn8yarhcRhTiv265DW5nkduzSAf+eM1Mvw14fjL0hoRBjhW1+IhLkoo7Nz2dx9Dk5OVi5cqVFJb94mPDmR4Z6yxKgNxIgARIgARIgAT0BRVHkl7LGj54AQ8nMyNTbmFYy1L5T0T+j14s9jLq69ugC3dlfow6DRnHGGrixaFXsTI8dOxZjzYh4A4RusqFDh5q1MTdOURTdMKMrE14jHGyQAAmQAAmQQMkSoHcSEASGjHjN6Oyu7iyvOJsr+g1F9DVu2gguLs/IMc18mxp2yy+2CRvxmrPr8ddhTu5l3StwrKlPowlKqDF+/HgMHzkcQ0cMMZLhI4fh3XffkbP2798fY8aO0fc3aFQfTX2boN+AvmjRqgVatPRF0+ZN0LBxA/Tt3xe9evWSPxjDdLdbI72xIAESIAESIAESIAESsGkC4ozukm+XwZxs27LD6tYmktKHuTnIzn1gJEKXk/NQH++D7Pv6/mZ+TVGnfm3kah6iVt0aqFGnOho2bYCmLZrAwVGLqlWrQqPRQFGMd3o1em+sWBEBhkICJEACJEACJEACT0agd5+e+GjGNLMy6o8jn8xZKViLM72FmVYchRBHHrRarfzCnm6M0OnqplcmvKZE2CYBEiABErAeAoyEBEigzBAQCWxJLZYJb0mRpV8SIAESIAESIAESIIFiE1AUDZRC/AfVBvl8NPnobUnNWEmABEiABEiABEiABOyQwLFjx5CWkobM1LuPl/RM7N+/3ywFJrxmsVBJAiRAArZIgDGTAAmQgH0RuH79OkaPHo3hw4c/VoYMGYIrV66YBcCE1ywWKkmABEiABEiABEiABKyBgHjzQrVq1WBOKleurA/RyclJX8/NzdXXRYUJr6BAIQESIAESIAESIAESsDoCLVu2xNdff40FCxfkK7Vq1ZJxz5o1C4MGDZJ1RVHkVVcw4dWR4JUESKCsEeB6SYAESIAErJyAj48PFK0Cbbk8OX4iCkejjiDqeKQqR5GcnCxfTfbw4UP5Dt569erB3IcJrzkq1JEACZAACZAACZBAmSFgOwv1b+uPdh3aoW27Nqq0hZubGypUqABzP2zCcFVMeA1psE4CJEACJEACJEACJGAzBMQPmxDJrgh42bJlWL16tag+IvkmvF3q+uPvL/yFQgZ28wz0atgeivL7mZ56nrXxWr3+lEIysGdWvpUaGz0bbuXc4FOhBsXCDAxfKq/VaFHJuQqeLe9DMcPAw8kLDg4O+r+0yzk6w9XR3a7FxcFN7tLpFi2ekfLaZ2DLoii//52j0Sgop3VWpbzVi+4e6K6OGidYShw0jjq38s9djaKFg+JoJEKn1Wr1do4Oxv2G9qJPZ7ht2zbExsbqmkZXjVHrt8by5csRE34WtW+5UMjAbp6BhIjL+P7b7+VTHhgYiPDNB5B1OIVCBji5PQrfBSzWPxuBKzdgz4Z9FAsz+PfMfyMxMVFyfn/K+9j5YzCCA/dQzDDYum4bZn4yU7I6dOgQvl20GJvWbLFr2bhmM6ZMniLXnJSUhJkzZ2L96g3WKIWO6eN/fIzMzEy5psnq2tatCoQtyOTJk2XM4lzs1KlTsXblOovJ8qXL8eOPP0r/AQEBWLViFdasXGskq1asxty5c6XNTz/9hB+W/IDVK9eYlQULFuD48ePStqDCbMIrBohtYcoykIF9McjOzhaPtxTxzx68v/Z1f4tzP/lslPyzEB4eLn/vieLSpUv881X959eCntmUlBSBSsrWrVvLBC/xXMgFq4V4XgriYwt9kZGR6kryfp09e9Zm7uGpU6fyglbL6Ohoi8Yt/u5V3cpf4s/d/O5jVlaWtBGFSJDzs9u5c6cweazkm/A+diQNSIAE8ifAHhIgARIgARIgAashwITXam4FAyEBEiABEiAB+yPAFZGANRBgwmsNd4ExkAAJkAAJkAAJkAAJlBgBja+vL0aNGkUhg1J8Bvj88fcgnwE+A3wG+AzwGeAzUDLPQIsWLfD/AAAA//810PPFAAAABklEQVQDAKkTQqSLlWq+AAAAAElFTkSuQmCC" + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Visualize feature importance using the ExplainableForecaster interface\n", + "from typing import cast\n", + "from openstef_models.explainability import ExplainableForecaster\n", + "\n", + "# The GBLinear model implements ExplainableForecaster, providing feature importance\n", + "explainable_model = cast(ExplainableForecaster, workflow.model.forecaster)\n", + "\n", + "# Create an interactive treemap of feature importances\n", + "# Larger boxes = more important features\n", + "fig = explainable_model.plot_feature_importances()\n", + "fig.update_layout(title=\"🔍 Feature Importance Treemap\")\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "01c28d0d", + "metadata": {}, + "source": [ + "---\n", + "\n", + "## 🎯 Summary\n", + "\n", + "In this tutorial, you learned how to:\n", + "\n", + "1. ✅ **Load energy data** from the Liander 2024 benchmark dataset\n", + "2. ✅ **Configure a workflow** with `ForecastingWorkflowConfig`\n", + "3. ✅ **Train a GBLinear model** for probabilistic forecasting\n", + "4. ✅ **Generate forecasts** with confidence intervals\n", + "5. ✅ **Visualize results** and feature importance\n", + "\n", + "### 🚀 Next Steps\n", + "\n", + "- Try different models: `\"xgboost\"` for more complex patterns\n", + "- Experiment with more quantiles for narrower prediction intervals\n", + "- Use the **backtesting notebook** to evaluate model performance systematically\n", + "- Explore MLflow integration for experiment tracking" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/packages/openstef-beam/README.md b/packages/openstef-beam/README.md index aac731009..526cccec5 100644 --- a/packages/openstef-beam/README.md +++ b/packages/openstef-beam/README.md @@ -1,7 +1,7 @@ -# openstef-beam \ No newline at end of file +# openstef-beam diff --git a/packages/openstef-beam/pyproject.toml b/packages/openstef-beam/pyproject.toml index ae12687ec..5c33fb373 100644 --- a/packages/openstef-beam/pyproject.toml +++ b/packages/openstef-beam/pyproject.toml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -15,7 +15,7 @@ readme = "README.md" keywords = [ "energy", "forecasting", "machinelearning" ] license = "MPL-2.0" authors = [ - { name = "Alliander N.V", email = "short.term.energy.forecasts@alliander.com" }, + { name = "Alliander N.V", email = "openstef@lfenergy.org" }, ] requires-python = ">=3.12,<4.0" classifiers = [ diff --git a/packages/openstef-beam/src/openstef_beam/__init__.py b/packages/openstef-beam/src/openstef_beam/__init__.py index 600748532..9507dc9f1 100644 --- a/packages/openstef-beam/src/openstef_beam/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/__init__.py b/packages/openstef-beam/src/openstef_beam/analysis/__init__.py index d7dcbfcdc..6f3cca7c6 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/analysis_pipeline.py b/packages/openstef-beam/src/openstef_beam/analysis/analysis_pipeline.py index 51bfdcdfe..78b6954c3 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/analysis_pipeline.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/analysis_pipeline.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/models/__init__.py b/packages/openstef-beam/src/openstef_beam/analysis/models/__init__.py index 0f762926c..fc43186fe 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/models/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/models/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/models/target_metadata.py b/packages/openstef-beam/src/openstef_beam/analysis/models/target_metadata.py index 6fd670638..d545fc09e 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/models/target_metadata.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/models/target_metadata.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/models/visualization_aggregation.py b/packages/openstef-beam/src/openstef_beam/analysis/models/visualization_aggregation.py index dee42a6a7..665600696 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/models/visualization_aggregation.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/models/visualization_aggregation.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/models/visualization_output.py b/packages/openstef-beam/src/openstef_beam/analysis/models/visualization_output.py index 3c5106b22..00a7d1238 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/models/visualization_output.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/models/visualization_output.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/plots/__init__.py b/packages/openstef-beam/src/openstef_beam/analysis/plots/__init__.py index 424c903e9..92892f7d9 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/plots/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/plots/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/plots/forecast_time_series_plotter.py b/packages/openstef-beam/src/openstef_beam/analysis/plots/forecast_time_series_plotter.py index 3e37cc324..28ee40367 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/plots/forecast_time_series_plotter.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/plots/forecast_time_series_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -8,12 +8,16 @@ forecasts, measurements, and uncertainty quantiles across multiple models. """ +from datetime import timedelta from typing import Any, ClassVar, Self, TypedDict, cast import numpy as np import pandas as pd import plotly.express as px import plotly.graph_objects as go +from pydantic import Field, PrivateAttr + +from openstef_core.base_model import BaseConfig class ModelData(TypedDict): @@ -53,7 +57,7 @@ class QuantilePolygonStyle(TypedDict): legendgroup: str -class ForecastTimeSeriesPlotter: +class ForecastTimeSeriesPlotter(BaseConfig): """Creates interactive time series charts comparing forecasts, measurements, and uncertainty bands. This plotter visualizes forecast performance over time by overlaying multiple models' @@ -111,17 +115,21 @@ class ForecastTimeSeriesPlotter: stroke_opacity: float = 0.8 stroke_width: float = 1.5 - def __init__(self, *, connect_gaps: bool = True): - """Initialize the ForecastTimeSeriesPlotter. - - Args: - connect_gaps: If True, connects data points across missing timestamps with lines. - If False, leaves gaps where data is missing (no interpolation). - """ - self.measurements: pd.Series | None = None - self.models_data: list[ModelData] = [] - self.limits: list[dict[str, Any]] = [] - self.connect_gaps = connect_gaps + sample_interval: timedelta = Field( + default=timedelta(minutes=15), + description="Expected interval between consecutive samples in the time series data.", + ) + connect_gaps: bool = Field( + default=True, + description=( + "If True, connects data points across missing timestamps with lines. " + "If False, leaves gaps where data is missing (no interpolation)." + ), + ) + + _measurements: pd.Series | None = PrivateAttr(default=None) + _models_data: list[ModelData] = PrivateAttr(default_factory=list[ModelData]) + _limits: list[dict[str, Any]] = PrivateAttr(default_factory=list[dict[str, Any]]) def _insert_gaps_for_missing_timestamps(self, series: pd.Series, sample_interval: pd.Timedelta) -> pd.Series: """Insert NaN values where there are temporal gaps larger than the expected sample interval. @@ -158,7 +166,7 @@ def add_measurements(self, measurements: pd.Series) -> Self: Returns: ForecastTimeSeriesPlotter: The current instance for method chaining. """ - self.measurements = measurements + self._measurements = measurements return self def add_model( @@ -200,7 +208,7 @@ def add_model( "quantiles": quantiles, } - self.models_data.append(model_data) + self._models_data.append(model_data) return self def add_limit( @@ -219,9 +227,9 @@ def add_limit( ForecastTimeSeriesPlotter: The current instance for method chaining. """ if name is None: - name = f"Limit {len(self.limits) + 1}" + name = f"Limit {len(self._limits) + 1}" - self.limits.append({ + self._limits.append({ "value": value, "name": name, }) @@ -373,7 +381,7 @@ def _prepare_quantile_bands(self) -> list[BandData]: List of BandData dictionaries with quantile band information. """ bands: list[BandData] = [] - for model_index, model_data in enumerate(self.models_data): + for model_index, model_data in enumerate(self._models_data): if model_data["quantiles"] is None: continue @@ -408,7 +416,7 @@ def _prepare_forecast_lines(self) -> list[LineData]: List of LineData dictionaries with forecast line information. """ lines: list[LineData] = [] - for model_index, model_data in enumerate(self.models_data): + for model_index, model_data in enumerate(self._models_data): model_name = model_data["model_name"] forecast = model_data["forecast"] @@ -430,7 +438,7 @@ def _prepare_quantile_50th_lines(self) -> list[LineData]: List of LineData dictionaries with 50th quantile line information. """ lines: list[LineData] = [] - for model_index, model_data in enumerate(self.models_data): + for model_index, model_data in enumerate(self._models_data): model_name = model_data["model_name"] quantiles = model_data["quantiles"] forecast = model_data["forecast"] @@ -499,17 +507,17 @@ def _add_lines_to_figure(self, figure: go.Figure, lines: list[LineData]) -> None def _add_measurements_to_figure(self, figure: go.Figure) -> None: """Add measurements to the figure.""" - if self.measurements is not None: + if self._measurements is not None: if self.connect_gaps: # Original behavior - use data as-is - measurements_data = self.measurements + measurements_data = self._measurements x_data = measurements_data.index y_data = measurements_data else: # Process data to insert gaps for missing timestamps - measurements_data = self.measurements + measurements_data = self._measurements processed_data = self._insert_gaps_for_missing_timestamps( - measurements_data, pd.Timedelta(self.measurements.sample_interval) + measurements_data, pd.Timedelta(self.sample_interval) ) x_data = processed_data.index y_data = processed_data @@ -529,7 +537,7 @@ def _add_measurements_to_figure(self, figure: go.Figure) -> None: def _add_limits_to_figure(self, figure: go.Figure) -> None: """Add horizontal limit lines to the figure.""" - for limit in self.limits: + for limit in self._limits: figure.add_hline( # type: ignore[reportUnknownMemberType] y=limit["value"], line_dash="dot", @@ -560,7 +568,7 @@ def plot(self, title: str = "Time Series Plots") -> go.Figure: Raises: ValueError: If no data has been added to the plotter. """ - if not self.models_data and self.measurements is None: + if not self._models_data and self._measurements is None: msg = "No data has been added. Use add_measurements or add_model first." raise ValueError(msg) diff --git a/packages/openstef-beam/src/openstef_beam/analysis/plots/grouped_target_metric_plotter.py b/packages/openstef-beam/src/openstef_beam/analysis/plots/grouped_target_metric_plotter.py index a1ba48a6f..81fd989e5 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/plots/grouped_target_metric_plotter.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/plots/grouped_target_metric_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/plots/precision_recall_curve_plotter.py b/packages/openstef-beam/src/openstef_beam/analysis/plots/precision_recall_curve_plotter.py index 82bcc2b96..c4cc00453 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/plots/precision_recall_curve_plotter.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/plots/precision_recall_curve_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/plots/quantile_calibration_box_plotter.py b/packages/openstef-beam/src/openstef_beam/analysis/plots/quantile_calibration_box_plotter.py index d293d0f59..785734cb6 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/plots/quantile_calibration_box_plotter.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/plots/quantile_calibration_box_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/plots/quantile_probability_plotter.py b/packages/openstef-beam/src/openstef_beam/analysis/plots/quantile_probability_plotter.py index b167239c3..8f3f2ea4d 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/plots/quantile_probability_plotter.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/plots/quantile_probability_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/plots/summary_table_plotter.py b/packages/openstef-beam/src/openstef_beam/analysis/plots/summary_table_plotter.py index 736ec1484..3c573cca6 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/plots/summary_table_plotter.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/plots/summary_table_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/plots/windowed_metric_plotter.py b/packages/openstef-beam/src/openstef_beam/analysis/plots/windowed_metric_plotter.py index a3192fe0d..33c75a183 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/plots/windowed_metric_plotter.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/plots/windowed_metric_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/__init__.py b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/__init__.py index 8a5f82722..bb1b95397 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/base.py b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/base.py index 5294db8a4..1fb1105f0 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/base.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/base.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/grouped_target_metric_visualization.py b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/grouped_target_metric_visualization.py index c0717a180..42af4584d 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/grouped_target_metric_visualization.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/grouped_target_metric_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/precision_recall_curve_visualization.py b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/precision_recall_curve_visualization.py index 9fc9389ad..f1d16156e 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/precision_recall_curve_visualization.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/precision_recall_curve_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/quantile_calibration_box_visualization.py b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/quantile_calibration_box_visualization.py index 7b316a877..8fe8f07a0 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/quantile_calibration_box_visualization.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/quantile_calibration_box_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/quantile_probability_visualization.py b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/quantile_probability_visualization.py index eb0fedaa1..04cbb7b3f 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/quantile_probability_visualization.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/quantile_probability_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/summary_table_visualization.py b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/summary_table_visualization.py index e952e06ab..280c9ca7c 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/summary_table_visualization.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/summary_table_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/timeseries_visualization.py b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/timeseries_visualization.py index 3a444f3a2..ab80ec6f5 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/timeseries_visualization.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/timeseries_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -102,7 +102,10 @@ def create_by_none( report: EvaluationSubsetReport, metadata: TargetMetadata, ) -> VisualizationOutput: - plotter = ForecastTimeSeriesPlotter(connect_gaps=self.connect_gaps) + plotter = ForecastTimeSeriesPlotter( + connect_gaps=self.connect_gaps, + sample_interval=report.subset.sample_interval, + ) # Add measurements as the baseline plotter.add_measurements(report.get_measurements()) diff --git a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/windowed_metric_visualization.py b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/windowed_metric_visualization.py index a0f294dad..249b8d5da 100644 --- a/packages/openstef-beam/src/openstef_beam/analysis/visualizations/windowed_metric_visualization.py +++ b/packages/openstef-beam/src/openstef_beam/analysis/visualizations/windowed_metric_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/__init__.py b/packages/openstef-beam/src/openstef_beam/backtesting/__init__.py index 61d352e03..600bdf236 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_callback.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_callback.py index 51da8b7c1..723a940ac 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_callback.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_callback.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event.py index 2917520af..9e1700e10 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event_generator.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event_generator.py index 32638dc30..cf028e495 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event_generator.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_event_generator.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/__init__.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/__init__.py index 1f6689bbf..95bf12ced 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -16,14 +16,10 @@ BacktestForecasterConfig, BacktestForecasterMixin, ) -from openstef_beam.backtesting.backtest_forecaster.openstef4_backtest_forecaster import ( - OpenSTEF4BacktestForecaster, -) __all__ = [ "BacktestBatchForecasterMixin", "BacktestForecasterConfig", "BacktestForecasterMixin", "DummyForecaster", - "OpenSTEF4BacktestForecaster", ] diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/dummy_forecaster.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/dummy_forecaster.py index 9b931ad6d..85314e638 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/dummy_forecaster.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/dummy_forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/mixins.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/mixins.py index 5c02a6fa5..709242440 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/mixins.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/mixins.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_pipeline.py b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_pipeline.py index 00a89a63a..65a8393af 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_pipeline.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/backtest_pipeline.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/restricted_horizon_timeseries.py b/packages/openstef-beam/src/openstef_beam/backtesting/restricted_horizon_timeseries.py index 5040a038b..1950b28f4 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/restricted_horizon_timeseries.py +++ b/packages/openstef-beam/src/openstef_beam/backtesting/restricted_horizon_timeseries.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/__init__.py b/packages/openstef-beam/src/openstef_beam/benchmarking/__init__.py index 6015b6941..312814fe1 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/__init__.py b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/__init__.py new file mode 100644 index 000000000..19124fc22 --- /dev/null +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/__init__.py @@ -0,0 +1,20 @@ +"""Benchmarks baselines used by the OpenSTEF Beam benchmarking utilities. + +This package exposes baseline forecasters for use in backtesting. +""" + +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from openstef_beam.benchmarking.baselines.openstef4 import ( + OpenSTEF4BacktestForecaster, + WorkflowCreationContext, + create_openstef4_preset_backtest_forecaster, +) + +__all__ = [ + "OpenSTEF4BacktestForecaster", + "WorkflowCreationContext", + "create_openstef4_preset_backtest_forecaster", +] diff --git a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py similarity index 52% rename from packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py rename to packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py index 56dad935f..d4e7cc355 100644 --- a/packages/openstef-beam/src/openstef_beam/backtesting/backtest_forecaster/openstef4_backtest_forecaster.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -6,20 +6,34 @@ import logging from collections.abc import Callable +from datetime import timedelta +from functools import partial from pathlib import Path -from typing import Any, override +from typing import Any, cast, override from pydantic import Field, PrivateAttr +from pydantic_extra_types.coordinate import Coordinate from openstef_beam.backtesting.backtest_forecaster.mixins import BacktestForecasterConfig, BacktestForecasterMixin from openstef_beam.backtesting.restricted_horizon_timeseries import RestrictedHorizonVersionedTimeSeries -from openstef_core.base_model import BaseModel +from openstef_beam.benchmarking.benchmark_pipeline import BenchmarkContext, BenchmarkTarget, ForecasterFactory +from openstef_core.base_model import BaseConfig, BaseModel from openstef_core.datasets import TimeSeriesDataset from openstef_core.exceptions import FlatlinerDetectedError, NotFittedError from openstef_core.types import Q +from openstef_models.presets import ForecastingWorkflowConfig from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow +class WorkflowCreationContext(BaseConfig): + """Context information for workflow execution within backtesting.""" + + step_name: str | None = Field( + default=None, + description="Name of the current backtesting step.", + ) + + class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): """Forecaster that allows using a ForecastingWorkflow to be used in backtesting, specifically for OpenSTEF4 models. @@ -30,7 +44,7 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): config: BacktestForecasterConfig = Field( description="Configuration for the backtest forecaster interface", ) - workflow_factory: Callable[[], CustomForecastingWorkflow] = Field( + workflow_factory: Callable[[WorkflowCreationContext], CustomForecastingWorkflow] = Field( description="Factory function that creates a new CustomForecastingWorkflow instance", ) cache_dir: Path = Field( @@ -56,14 +70,15 @@ def model_post_init(self, context: Any) -> None: def quantiles(self) -> list[Q]: # Create a workflow instance if needed to get quantiles if self._workflow is None: - self._workflow = self.workflow_factory() + self._workflow = self.workflow_factory(WorkflowCreationContext()) # Extract quantiles from the workflow's model return self._workflow.model.forecaster.config.quantiles @override def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: # Create a new workflow for this training cycle - workflow = self.workflow_factory() + context = WorkflowCreationContext(step_name=data.horizon.isoformat()) + workflow = self.workflow_factory(context) # Extract the dataset for training training_data = data.get_window( @@ -124,4 +139,90 @@ def predict(self, data: RestrictedHorizonVersionedTimeSeries) -> TimeSeriesDatas return forecast -__all__ = ["OpenSTEF4BacktestForecaster"] +class OpenSTEF4PresetBacktestForecaster(OpenSTEF4BacktestForecaster): + pass + + +def _preset_target_forecaster_factory( + base_config: ForecastingWorkflowConfig, + backtest_config: BacktestForecasterConfig, + cache_dir: Path, + context: BenchmarkContext, + target: BenchmarkTarget, +) -> OpenSTEF4BacktestForecaster: + from openstef_models.presets import create_forecasting_workflow # noqa: PLC0415 + from openstef_models.presets.forecasting_workflow import LocationConfig # noqa: PLC0415 + + # Factory function that creates a forecaster for a given target. + prefix = context.run_name + + def _create_workflow(context: WorkflowCreationContext) -> CustomForecastingWorkflow: + # Create a new workflow instance with fresh model. + return create_forecasting_workflow( + config=base_config.model_copy( + update={ + "model_id": f"{prefix}_{target.name}", + "run_name": context.step_name, + "location": LocationConfig( + name=target.name, + description=target.description, + coordinate=Coordinate( + latitude=target.latitude, + longitude=target.longitude, + ), + ), + } + ) + ) + + return OpenSTEF4BacktestForecaster( + config=backtest_config, + workflow_factory=_create_workflow, + debug=False, + cache_dir=cache_dir / f"{context.run_name}_{target.name}", + ) + + +def create_openstef4_preset_backtest_forecaster( + workflow_config: ForecastingWorkflowConfig, + backtest_config: BacktestForecasterConfig | None = None, + cache_dir: Path = Path("cache"), +) -> ForecasterFactory[BenchmarkTarget]: + """Create a factory that returns an OpenSTEF4BacktestForecaster for a benchmark target. + + Args: + workflow_config: The configured `ForecastingWorkflowConfig` that will be cloned and + assigned to a target-specific workflow instance. + backtest_config: Optional `BacktestForecasterConfig` to control training/prediction windows. + If None, a sensible default is created. + cache_dir: Directory to store cached artifacts for created forecasters. A subdirectory will be + created per benchmark run and target. + + Returns: + A `ForecasterFactory[BenchmarkTarget]` partial which accepts a `BenchmarkContext` and a + `BenchmarkTarget` and returns a configured `OpenSTEF4BacktestForecaster`. + """ + if backtest_config is None: + backtest_config = BacktestForecasterConfig( + requires_training=True, + predict_length=timedelta(days=7), + predict_min_length=timedelta(minutes=15), + predict_context_length=timedelta(days=14), # Context needed for lag features + predict_context_min_coverage=0.5, + training_context_length=timedelta(days=90), # Three months of training data + training_context_min_coverage=0.5, + predict_sample_interval=timedelta(minutes=15), + ) + + return cast( + ForecasterFactory[BenchmarkTarget], + partial( + _preset_target_forecaster_factory, + workflow_config, + backtest_config, + cache_dir, + ), + ) + + +__all__ = ["OpenSTEF4BacktestForecaster", "WorkflowCreationContext", "create_openstef4_preset_backtest_forecaster"] diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_comparison_pipeline.py b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_comparison_pipeline.py index 406f9d162..2aaac8f5c 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_comparison_pipeline.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_comparison_pipeline.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py index 7d4649def..f4220e0fa 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmark_pipeline.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmarks/__init__.py b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmarks/__init__.py index c28bb079f..be557d247 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmarks/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmarks/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmarks/liander2024.py b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmarks/liander2024.py index 4018016a9..3315f3401 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/benchmarks/liander2024.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/benchmarks/liander2024.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/__init__.py b/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/__init__.py index 6cea43259..eeda01bff 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/base.py b/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/base.py index 4b9eef430..56d4b6f4e 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/base.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/base.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/strict_execution_callback.py b/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/strict_execution_callback.py index d8c3efe9b..834fb682a 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/strict_execution_callback.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/callbacks/strict_execution_callback.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/models/__init__.py b/packages/openstef-beam/src/openstef_beam/benchmarking/models/__init__.py index 438ad0acd..1480ae8ce 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/models/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/models/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/models/benchmark_target.py b/packages/openstef-beam/src/openstef_beam/benchmarking/models/benchmark_target.py index d164d5682..10417dc27 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/models/benchmark_target.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/models/benchmark_target.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/storage/__init__.py b/packages/openstef-beam/src/openstef_beam/benchmarking/storage/__init__.py index 7ff55f500..c783d91f8 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/storage/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/storage/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/storage/base.py b/packages/openstef-beam/src/openstef_beam/benchmarking/storage/base.py index 0b64e06c5..5b42dd2ed 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/storage/base.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/storage/base.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/storage/local_storage.py b/packages/openstef-beam/src/openstef_beam/benchmarking/storage/local_storage.py index 1f0c26639..892cd09ec 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/storage/local_storage.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/storage/local_storage.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/storage/s3_storage.py b/packages/openstef-beam/src/openstef_beam/benchmarking/storage/s3_storage.py index 477391903..e51232bfb 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/storage/s3_storage.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/storage/s3_storage.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/target_provider.py b/packages/openstef-beam/src/openstef_beam/benchmarking/target_provider.py index bbd2c7bb2..c9f084f0e 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/target_provider.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/target_provider.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/evaluation/__init__.py b/packages/openstef-beam/src/openstef_beam/evaluation/__init__.py index 1a378f004..d2474a9cf 100644 --- a/packages/openstef-beam/src/openstef_beam/evaluation/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/evaluation/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/evaluation/evaluation_pipeline.py b/packages/openstef-beam/src/openstef_beam/evaluation/evaluation_pipeline.py index 6355735d5..d7bbe7963 100644 --- a/packages/openstef-beam/src/openstef_beam/evaluation/evaluation_pipeline.py +++ b/packages/openstef-beam/src/openstef_beam/evaluation/evaluation_pipeline.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -237,6 +237,10 @@ def _iterate_subsets( if evaluation_mask is not None: predictions_filtered = predictions_filtered.filter_index(evaluation_mask) + # Remove target column from predictions to avoid duplication + if target_column in predictions_filtered.data.columns: + predictions_filtered = predictions_filtered.pipe_pandas(lambda df: df.drop(columns=[target_column])) + yield ( lead_time, ForecastDataset( diff --git a/packages/openstef-beam/src/openstef_beam/evaluation/metric_providers.py b/packages/openstef-beam/src/openstef_beam/evaluation/metric_providers.py index d1b7abc6f..95d79e00f 100644 --- a/packages/openstef-beam/src/openstef_beam/evaluation/metric_providers.py +++ b/packages/openstef-beam/src/openstef_beam/evaluation/metric_providers.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/evaluation/models/__init__.py b/packages/openstef-beam/src/openstef_beam/evaluation/models/__init__.py index d93e0acd8..198bbc210 100644 --- a/packages/openstef-beam/src/openstef_beam/evaluation/models/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/evaluation/models/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/evaluation/models/report.py b/packages/openstef-beam/src/openstef_beam/evaluation/models/report.py index 7766d5947..1818f39e6 100644 --- a/packages/openstef-beam/src/openstef_beam/evaluation/models/report.py +++ b/packages/openstef-beam/src/openstef_beam/evaluation/models/report.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/evaluation/models/subset.py b/packages/openstef-beam/src/openstef_beam/evaluation/models/subset.py index 0a1414899..16a5e7f63 100644 --- a/packages/openstef-beam/src/openstef_beam/evaluation/models/subset.py +++ b/packages/openstef-beam/src/openstef_beam/evaluation/models/subset.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/evaluation/models/window.py b/packages/openstef-beam/src/openstef_beam/evaluation/models/window.py index 318802d0e..2212af3d0 100644 --- a/packages/openstef-beam/src/openstef_beam/evaluation/models/window.py +++ b/packages/openstef-beam/src/openstef_beam/evaluation/models/window.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/evaluation/window_iterators.py b/packages/openstef-beam/src/openstef_beam/evaluation/window_iterators.py index 1e5f2045c..7188bf3f0 100644 --- a/packages/openstef-beam/src/openstef_beam/evaluation/window_iterators.py +++ b/packages/openstef-beam/src/openstef_beam/evaluation/window_iterators.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/metrics/__init__.py b/packages/openstef-beam/src/openstef_beam/metrics/__init__.py index 96758954d..ea4ccf7ce 100644 --- a/packages/openstef-beam/src/openstef_beam/metrics/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/metrics/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py b/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py index 2de97042d..f77f55579 100644 --- a/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py +++ b/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/src/openstef_beam/metrics/metrics_probabilistic.py b/packages/openstef-beam/src/openstef_beam/metrics/metrics_probabilistic.py index 79bfa85f7..a49f2386c 100644 --- a/packages/openstef-beam/src/openstef_beam/metrics/metrics_probabilistic.py +++ b/packages/openstef-beam/src/openstef_beam/metrics/metrics_probabilistic.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/__init__.py b/packages/openstef-beam/tests/__init__.py index 81747127d..72baaab86 100644 --- a/packages/openstef-beam/tests/__init__.py +++ b/packages/openstef-beam/tests/__init__.py @@ -1,3 +1,3 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/__init__.py b/packages/openstef-beam/tests/unit/__init__.py index 81747127d..72baaab86 100644 --- a/packages/openstef-beam/tests/unit/__init__.py +++ b/packages/openstef-beam/tests/unit/__init__.py @@ -1,3 +1,3 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/plots/test_forecast_time_series_plotter.py b/packages/openstef-beam/tests/unit/analysis/plots/test_forecast_time_series_plotter.py index b860adeb2..98acb3111 100644 --- a/packages/openstef-beam/tests/unit/analysis/plots/test_forecast_time_series_plotter.py +++ b/packages/openstef-beam/tests/unit/analysis/plots/test_forecast_time_series_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 # pyright: basic, reportAttributeAccessIssue=false @@ -23,11 +23,11 @@ def test_add_model_with_forecast_only(): plotter.add_model(model_name="Model A", forecast=forecast) # Assert - assert len(plotter.models_data) == 1 - assert plotter.models_data[0]["model_name"] == "Model A" - assert plotter.models_data[0]["forecast"] is not None - pd.testing.assert_series_equal(plotter.models_data[0]["forecast"], forecast) - assert plotter.models_data[0]["quantiles"] is None + assert len(plotter._models_data) == 1 + assert plotter._models_data[0]["model_name"] == "Model A" + assert plotter._models_data[0]["forecast"] is not None + pd.testing.assert_series_equal(plotter._models_data[0]["forecast"], forecast) + assert plotter._models_data[0]["quantiles"] is None def test_add_model_with_quantiles_only(): @@ -42,11 +42,11 @@ def test_add_model_with_quantiles_only(): plotter.add_model(model_name="Model B", quantiles=quantiles) # Assert - assert len(plotter.models_data) == 1 - assert plotter.models_data[0]["model_name"] == "Model B" - assert plotter.models_data[0]["forecast"] is None - assert plotter.models_data[0]["quantiles"] is not None - pd.testing.assert_frame_equal(plotter.models_data[0]["quantiles"], quantiles) + assert len(plotter._models_data) == 1 + assert plotter._models_data[0]["model_name"] == "Model B" + assert plotter._models_data[0]["forecast"] is None + assert plotter._models_data[0]["quantiles"] is not None + pd.testing.assert_frame_equal(plotter._models_data[0]["quantiles"], quantiles) def test_add_model_with_forecast_and_quantiles(): @@ -64,12 +64,12 @@ def test_add_model_with_forecast_and_quantiles(): plotter.add_model(model_name="Model C", forecast=forecast, quantiles=quantiles) # Assert - assert len(plotter.models_data) == 1 - assert plotter.models_data[0]["model_name"] == "Model C" - assert plotter.models_data[0]["forecast"] is not None - pd.testing.assert_series_equal(plotter.models_data[0]["forecast"], forecast) - assert plotter.models_data[0]["quantiles"] is not None - pd.testing.assert_frame_equal(plotter.models_data[0]["quantiles"], quantiles) + assert len(plotter._models_data) == 1 + assert plotter._models_data[0]["model_name"] == "Model C" + assert plotter._models_data[0]["forecast"] is not None + pd.testing.assert_series_equal(plotter._models_data[0]["forecast"], forecast) + assert plotter._models_data[0]["quantiles"] is not None + pd.testing.assert_frame_equal(plotter._models_data[0]["quantiles"], quantiles) def test_method_chaining(): @@ -88,11 +88,11 @@ def test_method_chaining(): ) # Assert - assert plotter.measurements is not None - pd.testing.assert_series_equal(plotter.measurements, measurements) - assert len(plotter.models_data) == 2 - assert plotter.models_data[0]["model_name"] == "Model 1" - assert plotter.models_data[1]["model_name"] == "Model 2" + assert plotter._measurements is not None + pd.testing.assert_series_equal(plotter._measurements, measurements) + assert len(plotter._models_data) == 2 + assert plotter._models_data[0]["model_name"] == "Model 1" + assert plotter._models_data[1]["model_name"] == "Model 2" def test_add_model_raises_if_no_data(): @@ -126,8 +126,8 @@ def test_add_measurements(): plotter.add_measurements(measurements) # Assert - assert plotter.measurements is not None - pd.testing.assert_series_equal(plotter.measurements, measurements) + assert plotter._measurements is not None + pd.testing.assert_series_equal(plotter._measurements, measurements) def test_plot_with_no_data_raises(): diff --git a/packages/openstef-beam/tests/unit/analysis/plots/test_grouped_target_metric_plotter.py b/packages/openstef-beam/tests/unit/analysis/plots/test_grouped_target_metric_plotter.py index be9aa516b..227d54a83 100644 --- a/packages/openstef-beam/tests/unit/analysis/plots/test_grouped_target_metric_plotter.py +++ b/packages/openstef-beam/tests/unit/analysis/plots/test_grouped_target_metric_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/plots/test_precision_recall_curve_plotter.py b/packages/openstef-beam/tests/unit/analysis/plots/test_precision_recall_curve_plotter.py index 79382fe1c..79853e716 100644 --- a/packages/openstef-beam/tests/unit/analysis/plots/test_precision_recall_curve_plotter.py +++ b/packages/openstef-beam/tests/unit/analysis/plots/test_precision_recall_curve_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/plots/test_quantile_calibration_box_plotter.py b/packages/openstef-beam/tests/unit/analysis/plots/test_quantile_calibration_box_plotter.py index dd0b753a8..61782eff6 100644 --- a/packages/openstef-beam/tests/unit/analysis/plots/test_quantile_calibration_box_plotter.py +++ b/packages/openstef-beam/tests/unit/analysis/plots/test_quantile_calibration_box_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 # pyright: basic diff --git a/packages/openstef-beam/tests/unit/analysis/plots/test_quantile_probability_plotter.py b/packages/openstef-beam/tests/unit/analysis/plots/test_quantile_probability_plotter.py index 521093428..84bf048d8 100644 --- a/packages/openstef-beam/tests/unit/analysis/plots/test_quantile_probability_plotter.py +++ b/packages/openstef-beam/tests/unit/analysis/plots/test_quantile_probability_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/plots/test_summary_table_plotter.py b/packages/openstef-beam/tests/unit/analysis/plots/test_summary_table_plotter.py index 756b50dc8..1c38373e5 100644 --- a/packages/openstef-beam/tests/unit/analysis/plots/test_summary_table_plotter.py +++ b/packages/openstef-beam/tests/unit/analysis/plots/test_summary_table_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/plots/test_windowed_metric_plotter.py b/packages/openstef-beam/tests/unit/analysis/plots/test_windowed_metric_plotter.py index f5a54c6ed..8b1097f09 100644 --- a/packages/openstef-beam/tests/unit/analysis/plots/test_windowed_metric_plotter.py +++ b/packages/openstef-beam/tests/unit/analysis/plots/test_windowed_metric_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/test_analysis_pipeline.py b/packages/openstef-beam/tests/unit/analysis/test_analysis_pipeline.py index 0c4f25b82..e6b6dfe6c 100644 --- a/packages/openstef-beam/tests/unit/analysis/test_analysis_pipeline.py +++ b/packages/openstef-beam/tests/unit/analysis/test_analysis_pipeline.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/visualizations/conftest.py b/packages/openstef-beam/tests/unit/analysis/visualizations/conftest.py index 126c9b923..50220bd94 100644 --- a/packages/openstef-beam/tests/unit/analysis/visualizations/conftest.py +++ b/packages/openstef-beam/tests/unit/analysis/visualizations/conftest.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/visualizations/test_grouped_target_metric_visualization.py b/packages/openstef-beam/tests/unit/analysis/visualizations/test_grouped_target_metric_visualization.py index 5b1bed99d..adf965f44 100644 --- a/packages/openstef-beam/tests/unit/analysis/visualizations/test_grouped_target_metric_visualization.py +++ b/packages/openstef-beam/tests/unit/analysis/visualizations/test_grouped_target_metric_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/visualizations/test_precision_recall_curve_visualization.py b/packages/openstef-beam/tests/unit/analysis/visualizations/test_precision_recall_curve_visualization.py index 11a0d176f..df6740b88 100644 --- a/packages/openstef-beam/tests/unit/analysis/visualizations/test_precision_recall_curve_visualization.py +++ b/packages/openstef-beam/tests/unit/analysis/visualizations/test_precision_recall_curve_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/visualizations/test_quantile_calibration_box_visualization.py b/packages/openstef-beam/tests/unit/analysis/visualizations/test_quantile_calibration_box_visualization.py index f28d414b2..480860433 100644 --- a/packages/openstef-beam/tests/unit/analysis/visualizations/test_quantile_calibration_box_visualization.py +++ b/packages/openstef-beam/tests/unit/analysis/visualizations/test_quantile_calibration_box_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/visualizations/test_quantile_probability_visualization.py b/packages/openstef-beam/tests/unit/analysis/visualizations/test_quantile_probability_visualization.py index 99532b06d..8d9f572c8 100644 --- a/packages/openstef-beam/tests/unit/analysis/visualizations/test_quantile_probability_visualization.py +++ b/packages/openstef-beam/tests/unit/analysis/visualizations/test_quantile_probability_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/visualizations/test_summary_table_visualization.py b/packages/openstef-beam/tests/unit/analysis/visualizations/test_summary_table_visualization.py index 8e335b0da..dd733592e 100644 --- a/packages/openstef-beam/tests/unit/analysis/visualizations/test_summary_table_visualization.py +++ b/packages/openstef-beam/tests/unit/analysis/visualizations/test_summary_table_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/analysis/visualizations/test_timeseries_visualization.py b/packages/openstef-beam/tests/unit/analysis/visualizations/test_timeseries_visualization.py index 768595157..eef82d89e 100644 --- a/packages/openstef-beam/tests/unit/analysis/visualizations/test_timeseries_visualization.py +++ b/packages/openstef-beam/tests/unit/analysis/visualizations/test_timeseries_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -146,12 +146,10 @@ def test_connect_gaps_parameter_integration( ): """Test that connect_gaps parameter works end-to-end for both True and False values.""" # Arrange - viz = TimeSeriesVisualization(name="test_viz") - viz.connect_gaps = connect_gaps_value + viz = TimeSeriesVisualization(name="test_viz", connect_gaps=connect_gaps_value) # Act with ( - patch.object(ForecastTimeSeriesPlotter, "__init__", return_value=None) as mock_init, patch.object(ForecastTimeSeriesPlotter, "plot", return_value=mock_plotly_figure), patch.object(ForecastTimeSeriesPlotter, "add_measurements"), patch.object(ForecastTimeSeriesPlotter, "add_model"), @@ -160,6 +158,5 @@ def test_connect_gaps_parameter_integration( result = viz.create_by_none(sample_evaluation_report, simple_target_metadata) # Assert - mock_init.assert_called_once_with(connect_gaps=connect_gaps_value) assert result.name == viz.name assert result.figure == mock_plotly_figure diff --git a/packages/openstef-beam/tests/unit/analysis/visualizations/test_windowed_metric_visualization.py b/packages/openstef-beam/tests/unit/analysis/visualizations/test_windowed_metric_visualization.py index a5d97f5d9..0d905fc67 100644 --- a/packages/openstef-beam/tests/unit/analysis/visualizations/test_windowed_metric_visualization.py +++ b/packages/openstef-beam/tests/unit/analysis/visualizations/test_windowed_metric_visualization.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/backtesting/test_backtest_event_generator.py b/packages/openstef-beam/tests/unit/backtesting/test_backtest_event_generator.py index 7d86f4004..b5497629d 100644 --- a/packages/openstef-beam/tests/unit/backtesting/test_backtest_event_generator.py +++ b/packages/openstef-beam/tests/unit/backtesting/test_backtest_event_generator.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/backtesting/test_backtest_pipeline.py b/packages/openstef-beam/tests/unit/backtesting/test_backtest_pipeline.py index 8fbea70b0..9e68fd5a1 100644 --- a/packages/openstef-beam/tests/unit/backtesting/test_backtest_pipeline.py +++ b/packages/openstef-beam/tests/unit/backtesting/test_backtest_pipeline.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/backtesting/test_batch_prediction.py b/packages/openstef-beam/tests/unit/backtesting/test_batch_prediction.py index e7ff1ddd4..09e6ac4f0 100644 --- a/packages/openstef-beam/tests/unit/backtesting/test_batch_prediction.py +++ b/packages/openstef-beam/tests/unit/backtesting/test_batch_prediction.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/benchmarking/storage/test_local_storage.py b/packages/openstef-beam/tests/unit/benchmarking/storage/test_local_storage.py index d789bbd4e..a666a79d6 100644 --- a/packages/openstef-beam/tests/unit/benchmarking/storage/test_local_storage.py +++ b/packages/openstef-beam/tests/unit/benchmarking/storage/test_local_storage.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/benchmarking/storage/test_s3_storage.py b/packages/openstef-beam/tests/unit/benchmarking/storage/test_s3_storage.py index e2edd2087..88c8bbec0 100644 --- a/packages/openstef-beam/tests/unit/benchmarking/storage/test_s3_storage.py +++ b/packages/openstef-beam/tests/unit/benchmarking/storage/test_s3_storage.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/benchmarking/test_benchmark_pipeline.py b/packages/openstef-beam/tests/unit/benchmarking/test_benchmark_pipeline.py index 1f18c83b8..fb73e9f0c 100644 --- a/packages/openstef-beam/tests/unit/benchmarking/test_benchmark_pipeline.py +++ b/packages/openstef-beam/tests/unit/benchmarking/test_benchmark_pipeline.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/benchmarking/test_target_provider.py b/packages/openstef-beam/tests/unit/benchmarking/test_target_provider.py index 7ee94db89..1692f92b4 100644 --- a/packages/openstef-beam/tests/unit/benchmarking/test_target_provider.py +++ b/packages/openstef-beam/tests/unit/benchmarking/test_target_provider.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/evaluation/__init__.py b/packages/openstef-beam/tests/unit/evaluation/__init__.py index 81747127d..72baaab86 100644 --- a/packages/openstef-beam/tests/unit/evaluation/__init__.py +++ b/packages/openstef-beam/tests/unit/evaluation/__init__.py @@ -1,3 +1,3 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/evaluation/models/__init__.py b/packages/openstef-beam/tests/unit/evaluation/models/__init__.py index 81747127d..72baaab86 100644 --- a/packages/openstef-beam/tests/unit/evaluation/models/__init__.py +++ b/packages/openstef-beam/tests/unit/evaluation/models/__init__.py @@ -1,3 +1,3 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/evaluation/models/test_window.py b/packages/openstef-beam/tests/unit/evaluation/models/test_window.py index dd6bf378b..084f67766 100644 --- a/packages/openstef-beam/tests/unit/evaluation/models/test_window.py +++ b/packages/openstef-beam/tests/unit/evaluation/models/test_window.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/evaluation/test_evaluation_pipeline.py b/packages/openstef-beam/tests/unit/evaluation/test_evaluation_pipeline.py index 8e70dd7ae..256a176b5 100644 --- a/packages/openstef-beam/tests/unit/evaluation/test_evaluation_pipeline.py +++ b/packages/openstef-beam/tests/unit/evaluation/test_evaluation_pipeline.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/evaluation/test_metric_provider.py b/packages/openstef-beam/tests/unit/evaluation/test_metric_provider.py index 9ecabdaa6..afd3acca6 100644 --- a/packages/openstef-beam/tests/unit/evaluation/test_metric_provider.py +++ b/packages/openstef-beam/tests/unit/evaluation/test_metric_provider.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py b/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py index 66960c087..b3b9ac35d 100644 --- a/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py +++ b/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/unit/metrics/test_metrics_probabilistic.py b/packages/openstef-beam/tests/unit/metrics/test_metrics_probabilistic.py index a05bfbd7f..c9bb5d63c 100644 --- a/packages/openstef-beam/tests/unit/metrics/test_metrics_probabilistic.py +++ b/packages/openstef-beam/tests/unit/metrics/test_metrics_probabilistic.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-beam/tests/utils/mocks.py b/packages/openstef-beam/tests/utils/mocks.py index 6943e49db..9d46a7b77 100644 --- a/packages/openstef-beam/tests/utils/mocks.py +++ b/packages/openstef-beam/tests/utils/mocks.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/README.md b/packages/openstef-core/README.md index b2d10e0ab..ed6c7ed13 100644 --- a/packages/openstef-core/README.md +++ b/packages/openstef-core/README.md @@ -1,7 +1,7 @@ -# openstef-core \ No newline at end of file +# openstef-core diff --git a/packages/openstef-core/pyproject.toml b/packages/openstef-core/pyproject.toml index 75f531b99..cc436ee40 100644 --- a/packages/openstef-core/pyproject.toml +++ b/packages/openstef-core/pyproject.toml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -15,7 +15,7 @@ readme = "README.md" keywords = [ "energy", "forecasting", "machinelearning" ] license = "MPL-2.0" authors = [ - { name = "Alliander N.V", email = "short.term.energy.forecasts@alliander.com" }, + { name = "Alliander N.V", email = "openstef@lfenergy.org" }, ] requires-python = ">=3.12,<4.0" classifiers = [ diff --git a/packages/openstef-core/src/openstef_core/__init__.py b/packages/openstef-core/src/openstef_core/__init__.py index a174d8d83..b5c91ce93 100644 --- a/packages/openstef-core/src/openstef_core/__init__.py +++ b/packages/openstef-core/src/openstef_core/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 """Core functionality for OpenSTEF, a framework for short-term energy forecasting.""" diff --git a/packages/openstef-core/src/openstef_core/base_model.py b/packages/openstef-core/src/openstef_core/base_model.py index eb9b9fac5..0f44e6137 100644 --- a/packages/openstef-core/src/openstef_core/base_model.py +++ b/packages/openstef-core/src/openstef_core/base_model.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/datasets/__init__.py b/packages/openstef-core/src/openstef_core/datasets/__init__.py index fd6cd4b40..153ed6611 100644 --- a/packages/openstef-core/src/openstef_core/datasets/__init__.py +++ b/packages/openstef-core/src/openstef_core/datasets/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/datasets/mixins.py b/packages/openstef-core/src/openstef_core/datasets/mixins.py index 8c6feb579..613906f07 100644 --- a/packages/openstef-core/src/openstef_core/datasets/mixins.py +++ b/packages/openstef-core/src/openstef_core/datasets/mixins.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/datasets/timeseries_dataset.py b/packages/openstef-core/src/openstef_core/datasets/timeseries_dataset.py index 101bbb337..d4dfb63d2 100644 --- a/packages/openstef-core/src/openstef_core/datasets/timeseries_dataset.py +++ b/packages/openstef-core/src/openstef_core/datasets/timeseries_dataset.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/datasets/validated_datasets.py b/packages/openstef-core/src/openstef_core/datasets/validated_datasets.py index dd39f3ba4..bb06368d6 100644 --- a/packages/openstef-core/src/openstef_core/datasets/validated_datasets.py +++ b/packages/openstef-core/src/openstef_core/datasets/validated_datasets.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -250,12 +250,14 @@ def __init__( *, horizon_column: str = "horizon", available_at_column: str = "available_at", + standard_deviation_column: str = "stdev", ) -> None: if "forecast_start" in data.attrs: self.forecast_start = datetime.fromisoformat(data.attrs["forecast_start"]) else: self.forecast_start = forecast_start if forecast_start is not None else data.index.min().to_pydatetime() self.target_column = data.attrs.get("target_column", target_column) + self.standard_deviation_column = data.attrs.get("standard_deviation_column", standard_deviation_column) super().__init__( data=data, @@ -264,7 +266,8 @@ def __init__( available_at_column=available_at_column, ) - quantile_feature_names = [col for col in self.feature_names if col != target_column] + exclude_columns = {target_column, standard_deviation_column} + quantile_feature_names = [col for col in self.feature_names if col not in exclude_columns] if not all(Quantile.is_valid_quantile_string(col) for col in quantile_feature_names): raise ValueError("All feature names must be valid quantile strings.") @@ -296,6 +299,20 @@ def median_series(self) -> pd.Series: raise MissingColumnsError(missing_columns=[median_col]) return self.data[median_col] + @property + def standard_deviation_series(self) -> pd.Series: + """Extract the standard deviation series if it exists. + + Returns: + Time series containing standard deviation values with original datetime index. + + Raises: + MissingColumnsError: If the standard deviation column is not found. + """ + if self.standard_deviation_column not in self.data.columns: + raise MissingColumnsError(missing_columns=[self.standard_deviation_column]) + return self.data[self.standard_deviation_column] # pyright: ignore[reportUnknownVariableType] + @property def quantiles_data(self) -> pd.DataFrame: """Extract DataFrame containing only the quantile forecast columns. @@ -331,6 +348,7 @@ def to_pandas(self) -> pd.DataFrame: df = super().to_pandas() df.attrs["target_column"] = self.target_column df.attrs["forecast_start"] = self.forecast_start.isoformat() + df.attrs["standard_deviation_column"] = self.standard_deviation_column return df @classmethod diff --git a/packages/openstef-core/src/openstef_core/datasets/validation.py b/packages/openstef-core/src/openstef_core/datasets/validation.py index f4d176862..2f62abb5c 100644 --- a/packages/openstef-core/src/openstef_core/datasets/validation.py +++ b/packages/openstef-core/src/openstef_core/datasets/validation.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/datasets/versioned_timeseries_dataset.py b/packages/openstef-core/src/openstef_core/datasets/versioned_timeseries_dataset.py index 280db518f..e430e4a49 100644 --- a/packages/openstef-core/src/openstef_core/datasets/versioned_timeseries_dataset.py +++ b/packages/openstef-core/src/openstef_core/datasets/versioned_timeseries_dataset.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/exceptions.py b/packages/openstef-core/src/openstef_core/exceptions.py index 061f0b824..29ef4bcc6 100644 --- a/packages/openstef-core/src/openstef_core/exceptions.py +++ b/packages/openstef-core/src/openstef_core/exceptions.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/mixins/__init__.py b/packages/openstef-core/src/openstef_core/mixins/__init__.py index 6fc5a4605..0da051876 100644 --- a/packages/openstef-core/src/openstef_core/mixins/__init__.py +++ b/packages/openstef-core/src/openstef_core/mixins/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/mixins/predictor.py b/packages/openstef-core/src/openstef_core/mixins/predictor.py index 18b2b811e..4e237b234 100644 --- a/packages/openstef-core/src/openstef_core/mixins/predictor.py +++ b/packages/openstef-core/src/openstef_core/mixins/predictor.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/mixins/stateful.py b/packages/openstef-core/src/openstef_core/mixins/stateful.py index 49d3e45e3..71fe48286 100644 --- a/packages/openstef-core/src/openstef_core/mixins/stateful.py +++ b/packages/openstef-core/src/openstef_core/mixins/stateful.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/mixins/transform.py b/packages/openstef-core/src/openstef_core/mixins/transform.py index 1254c487b..0d3eaabc6 100644 --- a/packages/openstef-core/src/openstef_core/mixins/transform.py +++ b/packages/openstef-core/src/openstef_core/mixins/transform.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/testing.py b/packages/openstef-core/src/openstef_core/testing.py index 31486ebc4..692c8597f 100644 --- a/packages/openstef-core/src/openstef_core/testing.py +++ b/packages/openstef-core/src/openstef_core/testing.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/transforms/__init__.py b/packages/openstef-core/src/openstef_core/transforms/__init__.py index 76f9f4e72..06d309f12 100644 --- a/packages/openstef-core/src/openstef_core/transforms/__init__.py +++ b/packages/openstef-core/src/openstef_core/transforms/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/transforms/dataset_transforms.py b/packages/openstef-core/src/openstef_core/transforms/dataset_transforms.py index 659070d6f..87b7dba4e 100644 --- a/packages/openstef-core/src/openstef_core/transforms/dataset_transforms.py +++ b/packages/openstef-core/src/openstef_core/transforms/dataset_transforms.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/types.py b/packages/openstef-core/src/openstef_core/types.py index 030bff083..6c989c84d 100644 --- a/packages/openstef-core/src/openstef_core/types.py +++ b/packages/openstef-core/src/openstef_core/types.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/utils/__init__.py b/packages/openstef-core/src/openstef_core/utils/__init__.py index e39625757..b94b9e6b5 100644 --- a/packages/openstef-core/src/openstef_core/utils/__init__.py +++ b/packages/openstef-core/src/openstef_core/utils/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/utils/datetime.py b/packages/openstef-core/src/openstef_core/utils/datetime.py index 68fd65c49..8258798d7 100644 --- a/packages/openstef-core/src/openstef_core/utils/datetime.py +++ b/packages/openstef-core/src/openstef_core/utils/datetime.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/utils/invariants.py b/packages/openstef-core/src/openstef_core/utils/invariants.py index cf912f8d5..93c5745e8 100644 --- a/packages/openstef-core/src/openstef_core/utils/invariants.py +++ b/packages/openstef-core/src/openstef_core/utils/invariants.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/utils/itertools.py b/packages/openstef-core/src/openstef_core/utils/itertools.py index 89e4681a8..02a4d592b 100644 --- a/packages/openstef-core/src/openstef_core/utils/itertools.py +++ b/packages/openstef-core/src/openstef_core/utils/itertools.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/utils/multiprocessing.py b/packages/openstef-core/src/openstef_core/utils/multiprocessing.py index 517d51bfe..65394ff01 100644 --- a/packages/openstef-core/src/openstef_core/utils/multiprocessing.py +++ b/packages/openstef-core/src/openstef_core/utils/multiprocessing.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/utils/pandas.py b/packages/openstef-core/src/openstef_core/utils/pandas.py index 5388079e3..f714962e7 100644 --- a/packages/openstef-core/src/openstef_core/utils/pandas.py +++ b/packages/openstef-core/src/openstef_core/utils/pandas.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/src/openstef_core/utils/pydantic.py b/packages/openstef-core/src/openstef_core/utils/pydantic.py index 42316515a..e74eb81cc 100644 --- a/packages/openstef-core/src/openstef_core/utils/pydantic.py +++ b/packages/openstef-core/src/openstef_core/utils/pydantic.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/__init__.py b/packages/openstef-core/tests/__init__.py index 81747127d..72baaab86 100644 --- a/packages/openstef-core/tests/__init__.py +++ b/packages/openstef-core/tests/__init__.py @@ -1,3 +1,3 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/datasets/test_mixins.py b/packages/openstef-core/tests/unit/datasets/test_mixins.py index 053339759..80179aa49 100644 --- a/packages/openstef-core/tests/unit/datasets/test_mixins.py +++ b/packages/openstef-core/tests/unit/datasets/test_mixins.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/datasets/test_timeseries_dataset.py b/packages/openstef-core/tests/unit/datasets/test_timeseries_dataset.py index b9e034ae8..8a72ca53c 100644 --- a/packages/openstef-core/tests/unit/datasets/test_timeseries_dataset.py +++ b/packages/openstef-core/tests/unit/datasets/test_timeseries_dataset.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/datasets/test_validation.py b/packages/openstef-core/tests/unit/datasets/test_validation.py index 35fa21d70..7f751e4b8 100644 --- a/packages/openstef-core/tests/unit/datasets/test_validation.py +++ b/packages/openstef-core/tests/unit/datasets/test_validation.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/datasets/test_versioned_timeseries_dataset.py b/packages/openstef-core/tests/unit/datasets/test_versioned_timeseries_dataset.py index 98bc23cd7..fae83f8b1 100644 --- a/packages/openstef-core/tests/unit/datasets/test_versioned_timeseries_dataset.py +++ b/packages/openstef-core/tests/unit/datasets/test_versioned_timeseries_dataset.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/datasets/utils.py b/packages/openstef-core/tests/unit/datasets/utils.py index f30c08f94..40ed1bb7b 100644 --- a/packages/openstef-core/tests/unit/datasets/utils.py +++ b/packages/openstef-core/tests/unit/datasets/utils.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/mixins/test_stateful.py b/packages/openstef-core/tests/unit/mixins/test_stateful.py index a9d179ccb..75d118603 100644 --- a/packages/openstef-core/tests/unit/mixins/test_stateful.py +++ b/packages/openstef-core/tests/unit/mixins/test_stateful.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/mixins/test_transform.py b/packages/openstef-core/tests/unit/mixins/test_transform.py index d5c3bd253..0f2351b3d 100644 --- a/packages/openstef-core/tests/unit/mixins/test_transform.py +++ b/packages/openstef-core/tests/unit/mixins/test_transform.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/test_base_model.py b/packages/openstef-core/tests/unit/test_base_model.py index f83eef5b9..11cf42441 100644 --- a/packages/openstef-core/tests/unit/test_base_model.py +++ b/packages/openstef-core/tests/unit/test_base_model.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/test_types.py b/packages/openstef-core/tests/unit/test_types.py index 0eaf393bb..b1f3af49f 100644 --- a/packages/openstef-core/tests/unit/test_types.py +++ b/packages/openstef-core/tests/unit/test_types.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/utils/test_datetime.py b/packages/openstef-core/tests/unit/utils/test_datetime.py index 2ff5b7406..c99bf9dd4 100644 --- a/packages/openstef-core/tests/unit/utils/test_datetime.py +++ b/packages/openstef-core/tests/unit/utils/test_datetime.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/utils/test_itertools.py b/packages/openstef-core/tests/unit/utils/test_itertools.py index dfc5472e9..565c4082f 100644 --- a/packages/openstef-core/tests/unit/utils/test_itertools.py +++ b/packages/openstef-core/tests/unit/utils/test_itertools.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-core/tests/unit/utils/test_multiprocessing.py b/packages/openstef-core/tests/unit/utils/test_multiprocessing.py index fead7f859..edcbbebb4 100644 --- a/packages/openstef-core/tests/unit/utils/test_multiprocessing.py +++ b/packages/openstef-core/tests/unit/utils/test_multiprocessing.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/README.md b/packages/openstef-models/README.md index 59f9ae249..8f42bd62f 100644 --- a/packages/openstef-models/README.md +++ b/packages/openstef-models/README.md @@ -1,7 +1,7 @@ -# openstef-model \ No newline at end of file +# openstef-model diff --git a/packages/openstef-models/pyproject.toml b/packages/openstef-models/pyproject.toml index 2b6f727bf..1b2f9d54b 100644 --- a/packages/openstef-models/pyproject.toml +++ b/packages/openstef-models/pyproject.toml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -15,7 +15,7 @@ readme = "README.md" keywords = [ "energy", "forecasting", "machinelearning" ] license = "MPL-2.0" authors = [ - { name = "Alliander N.V", email = "short.term.energy.forecasts@alliander.com" }, + { name = "Alliander N.V", email = "openstef@lfenergy.org" }, ] requires-python = ">=3.12,<4.0" classifiers = [ @@ -35,7 +35,7 @@ dependencies = [ "openstef-core>=4.0.0.dev0,<5", "pvlib>=0.13", "pycountry>=24.6.1", - "scikit-learn>=1.7.1,<2", + "scikit-learn>=1.7.1,<1.8", "scipy>=1.16.3,<2", "skops>=0.13", ] diff --git a/packages/openstef-models/src/openstef_models/__init__.py b/packages/openstef-models/src/openstef_models/__init__.py index e659c6c12..b4e55ee27 100644 --- a/packages/openstef-models/src/openstef_models/__init__.py +++ b/packages/openstef-models/src/openstef_models/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 """Core models for OpenSTEF.""" diff --git a/packages/openstef-models/src/openstef_models/explainability/__init__.py b/packages/openstef-models/src/openstef_models/explainability/__init__.py index 6f0cf7494..a444c4f38 100644 --- a/packages/openstef-models/src/openstef_models/explainability/__init__.py +++ b/packages/openstef-models/src/openstef_models/explainability/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/explainability/mixins.py b/packages/openstef-models/src/openstef_models/explainability/mixins.py index 9969b4993..dda56059b 100644 --- a/packages/openstef-models/src/openstef_models/explainability/mixins.py +++ b/packages/openstef-models/src/openstef_models/explainability/mixins.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/explainability/plotters/__init__.py b/packages/openstef-models/src/openstef_models/explainability/plotters/__init__.py index e38073f6b..b3d603eec 100644 --- a/packages/openstef-models/src/openstef_models/explainability/plotters/__init__.py +++ b/packages/openstef-models/src/openstef_models/explainability/plotters/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/explainability/plotters/feature_importance_plotter.py b/packages/openstef-models/src/openstef_models/explainability/plotters/feature_importance_plotter.py index a2a9ca8d0..98dc15de6 100644 --- a/packages/openstef-models/src/openstef_models/explainability/plotters/feature_importance_plotter.py +++ b/packages/openstef-models/src/openstef_models/explainability/plotters/feature_importance_plotter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/integrations/__init__.py b/packages/openstef-models/src/openstef_models/integrations/__init__.py index 7cb6b280f..261a35cca 100644 --- a/packages/openstef-models/src/openstef_models/integrations/__init__.py +++ b/packages/openstef-models/src/openstef_models/integrations/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/integrations/joblib/__init__.py b/packages/openstef-models/src/openstef_models/integrations/joblib/__init__.py index f7e538ab2..10fddd524 100644 --- a/packages/openstef-models/src/openstef_models/integrations/joblib/__init__.py +++ b/packages/openstef-models/src/openstef_models/integrations/joblib/__init__.py @@ -6,7 +6,7 @@ single-machine deployments. """ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/integrations/joblib/joblib_model_serializer.py b/packages/openstef-models/src/openstef_models/integrations/joblib/joblib_model_serializer.py index 100641ebe..01eabc5ce 100644 --- a/packages/openstef-models/src/openstef_models/integrations/joblib/joblib_model_serializer.py +++ b/packages/openstef-models/src/openstef_models/integrations/joblib/joblib_model_serializer.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 """Local model storage implementation using joblib serialization. diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py index 78da566cb..a64f59c51 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage.py index 8fe559c03..3d17004d8 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -19,9 +19,11 @@ from mlflow import MlflowClient from mlflow.entities import Metric, Param, Run +from mlflow.exceptions import MlflowException from pydantic import Field, PrivateAttr from openstef_core.base_model import BaseConfig +from openstef_core.exceptions import ModelNotFoundError from openstef_core.mixins import HyperParams from openstef_models.integrations.joblib import JoblibModelSerializer from openstef_models.mixins import ModelIdentifier, ModelSerializer @@ -35,12 +37,17 @@ class MLFlowStorage(BaseConfig): before uploading to MLflow tracking server. """ - tracking_uri: str = Field(default="./mlflow") - local_artifacts_path: Path = Field(default=Path("./mlflow_artifacts_local")) - experiment_name_prefix: str = Field(default="") + tracking_uri: str = Field(default="./mlflow", description="MLflow tracking server URI.") + local_artifacts_path: Path = Field( + default=Path("./mlflow_artifacts_local"), description="Local path for storing MLflow artifacts before upload." + ) + experiment_name_prefix: str = Field(default="", description="Prefix for MLflow experiment names.") # Artifact subdirectories - data_path: str = Field(default="data") - model_path: str = Field(default="model") + data_path: str = Field(default="data", description="Subdirectory for storing training data artifacts.") + model_path: str = Field(default="model", description="Subdirectory for storing model artifacts.") + enable_mlflow_stdout: bool = Field( + default=False, description="Keep MLflow stdout messages which circumvent standard logging." + ) model_serializer: ModelSerializer = Field(default_factory=JoblibModelSerializer) @@ -49,7 +56,10 @@ class MLFlowStorage(BaseConfig): @override def model_post_init(self, context: Any) -> None: - os.environ.setdefault("MLFLOW_ENABLE_ARTIFACTS_PROGRESS_BAR", "false") + if not self.enable_mlflow_stdout: + # Suppress MLflow's stdout messages (emoji URLs) + os.environ.setdefault("MLFLOW_SUPPRESS_PRINTING_URL_TO_STDOUT", "true") + os.environ.setdefault("MLFLOW_ENABLE_ARTIFACTS_PROGRESS_BAR", "false") self._client = MlflowClient(tracking_uri=self.tracking_uri) def create_run( @@ -173,6 +183,40 @@ def search_latest_runs( max_results=limit, ) + def search_run( + self, + model_id: ModelIdentifier, + run_name: str, + ) -> Run | None: + """Search for a specific run of a model by its name in MLflow. + + Queries MLflow for a run matching the provided run name. + Returns None if no experiment or run exists for the model. + + Args: + model_id: Model identifier to search runs for. + run_name: Name of the run to search for. + + Returns: + The matching Run object if found, otherwise None. + """ + # Get related experiment + experiment = self._client.get_experiment_by_name(name=f"{self.experiment_name_prefix}{model_id}") + if experiment is None: + return None + + # Search for the run by name + runs = self._client.search_runs( + experiment_ids=[experiment.experiment_id], + filter_string=f"attribute.run_name = '{run_name}'", + order_by=["start_time DESC"], + max_results=1, + ) + + if runs: + return runs[0] + return None + def save_run_model(self, model_id: ModelIdentifier, run_id: str, model: object) -> None: """Save a trained model to local artifacts directory for the run. @@ -193,7 +237,7 @@ def save_run_model(self, model_id: ModelIdentifier, run_id: str, model: object) with Path(model_path / f"model.{self.model_serializer.extension}").open("wb") as f: self.model_serializer.serialize(model, file=f) - def load_run_model(self, run_id: str) -> object: + def load_run_model(self, run_id: str, model_id: ModelIdentifier) -> object: """Load a trained model from MLflow artifacts. Downloads model artifacts from MLflow and deserializes them into the @@ -201,15 +245,21 @@ def load_run_model(self, run_id: str) -> object: Args: run_id: MLflow run ID containing the model artifacts. + model_id: Model identifier for locating artifact paths. Returns: Model instance with restored state from the run. + + Raises: + ModelNotFoundError: If the model artifacts cannot be found in MLflow. """ - # Download and load the model - with TemporaryDirectory() as tmpdir: - self._client.download_artifacts(run_id=run_id, path=self.model_path, dst_path=tmpdir) - with (Path(tmpdir) / self.model_path / f"model.{self.model_serializer.extension}").open("rb") as f: - model = cast(Any, self.model_serializer.deserialize(file=f)) + try: + with TemporaryDirectory() as tmpdir: + self._client.download_artifacts(run_id=run_id, path=self.model_path, dst_path=tmpdir) + with (Path(tmpdir) / self.model_path / f"model.{self.model_serializer.extension}").open("rb") as f: + model = cast(Any, self.model_serializer.deserialize(file=f)) + except (MlflowException, FileNotFoundError) as e: + raise ModelNotFoundError(model_id=model_id) from e return model diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index fd59cd600..78f4abb4a 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -20,7 +20,11 @@ from openstef_core.base_model import BaseConfig from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset from openstef_core.datasets.versioned_timeseries_dataset import VersionedTimeSeriesDataset -from openstef_core.exceptions import ModelNotFoundError, SkipFitting +from openstef_core.exceptions import ( + MissingColumnsError, + ModelNotFoundError, + SkipFitting, +) from openstef_core.types import Q, QuantileOrGlobal from openstef_models.explainability import ExplainableForecaster from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage @@ -72,20 +76,29 @@ def on_fit_start( return # Find the latest successful run for this model - runs = self.storage.search_latest_runs(model_id=context.workflow.model_id) - run = next(iter(runs), None) + if context.workflow.run_name is not None: + run = self.storage.search_run( + model_id=context.workflow.model_id, + run_name=context.workflow.run_name, + ) + else: + runs = self.storage.search_latest_runs(model_id=context.workflow.model_id) + run = next(iter(runs), None) if run is not None: # Check if the run is recent enough to skip re-fitting now = datetime.now(tz=UTC) - run_end_datetime = datetime.fromtimestamp(cast(float, run.info.end_time) / 1000, tz=UTC) + end_time_millis = cast(float | None, run.info.end_time) + run_end_datetime = ( + datetime.fromtimestamp(end_time_millis / 1000, tz=UTC) if end_time_millis is not None else None + ) self._logger.info( "Found previous MLflow run %s for model %s ended at %s", cast(str, run.info.run_id), context.workflow.model_id, run_end_datetime, ) - if (now - run_end_datetime) <= self.model_reuse_max_age: + if run_end_datetime is not None and (now - run_end_datetime) <= self.model_reuse_max_age: raise SkipFitting("Model is recent enough, skipping re-fit.") @override @@ -102,6 +115,8 @@ def on_fit_end( model_id=context.workflow.model_id, tags=context.workflow.model.tags, hyperparams=context.workflow.model.forecaster.hyperparams, + run_name=context.workflow.run_name, + experiment_tags=context.workflow.experiment_tags, ) run_id: str = run.info.run_id self._logger.info("Created MLflow run %s for model %s", run_id, context.workflow.model_id) @@ -149,7 +164,9 @@ def on_predict_start( # Load the model from the latest run run_id: str = run.info.run_id - old_model = self.storage.load_run_model(run_id=run_id) + + old_model = self.storage.load_run_model(run_id=run_id, model_id=context.workflow.model_id) + if not isinstance(old_model, ForecastingModel): self._logger.warning( "Loaded model from run %s is not a ForecastingModel, cannot use for prediction", @@ -167,19 +184,34 @@ def _run_model_selection(self, workflow: CustomForecastingWorkflow, result: Mode if run is None: return - # Backup the new model + run_id = cast(str, run.info.run_id) + + if not self._check_tags_compatible( + run_tags=run.data.tags, # pyright: ignore[reportUnknownMemberType, reportUnknownArgumentType] + new_tags=workflow.model.tags, + run_id=run_id, + ): + return + new_model = workflow.model new_metrics = result.metrics_full - # Restore the old model and evaluate - old_model = self.storage.load_run_model(run_id=cast(str, run.info.run_id)) - if not isinstance(old_model, ForecastingModel): - self._logger.warning( - "Loaded old model from run %s is not a ForecastingModel, skipping model selection", - cast(str, run.info.run_id), - ) + old_model = self._try_load_model( + run_id=run_id, + workflow=workflow, + ) + + if old_model is None: + return + + old_metrics = self._try_evaluate_model( + run_id=run_id, + old_model=old_model, + input_data=result.input_dataset, + ) + + if old_metrics is None: return - old_metrics = old_model.score(result.input_dataset) if self._check_is_new_model_better(old_metrics=old_metrics, new_metrics=new_metrics): workflow.model = new_model @@ -188,10 +220,74 @@ def _run_model_selection(self, workflow: CustomForecastingWorkflow, result: Mode self._logger.info( "New model did not improve %s metric from previous run %s, reusing old model", self.model_selection_metric, - cast(str, run.info.run_id), + run_id, ) raise SkipFitting("New model did not improve monitored metric, skipping re-fit.") + def _try_load_model( + self, + run_id: str, + workflow: CustomForecastingWorkflow, + ) -> ForecastingModel | None: + try: + old_model = self.storage.load_run_model(run_id=run_id, model_id=workflow.model_id) + except ModelNotFoundError: + self._logger.warning( + "Could not load model from previous run %s for model %s, skipping model selection", + run_id, + workflow.model_id, + ) + return None + + if not isinstance(old_model, ForecastingModel): + self._logger.warning( + "Loaded old model from run %s is not a ForecastingModel, skipping model selection", + run_id, + ) + return None + + return old_model + + def _try_evaluate_model( + self, + run_id: str, + old_model: ForecastingModel, + input_data: TimeSeriesDataset, + ) -> SubsetMetric | None: + try: + return old_model.score(input_data) + except (MissingColumnsError, ValueError) as e: + self._logger.warning( + "Could not evaluate old model from run %s, skipping model selection: %s", + run_id, + e, + ) + return None + + def _check_tags_compatible(self, run_tags: dict[str, str], new_tags: dict[str, str], run_id: str) -> bool: + """Check if model tags are compatible, excluding mlflow.runName. + + Returns: + True if tags are compatible, False otherwise. + """ + old_tags = {k: v for k, v in run_tags.items() if k != "mlflow.runName"} + + if old_tags == new_tags: + return True + + differences = { + k: (old_tags.get(k), new_tags.get(k)) + for k in old_tags.keys() | new_tags.keys() + if old_tags.get(k) != new_tags.get(k) + } + + self._logger.info( + "Model tags changed since run %s, skipping model selection. Changes: %s", + run_id, + differences, + ) + return False + def _check_is_new_model_better( self, old_metrics: SubsetMetric, diff --git a/packages/openstef-models/src/openstef_models/mixins/__init__.py b/packages/openstef-models/src/openstef_models/mixins/__init__.py index f505ac4b1..cc3a0ac25 100644 --- a/packages/openstef-models/src/openstef_models/mixins/__init__.py +++ b/packages/openstef-models/src/openstef_models/mixins/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/mixins/callbacks.py b/packages/openstef-models/src/openstef_models/mixins/callbacks.py index acc399cda..5f744e47c 100644 --- a/packages/openstef-models/src/openstef_models/mixins/callbacks.py +++ b/packages/openstef-models/src/openstef_models/mixins/callbacks.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/mixins/model_serializer.py b/packages/openstef-models/src/openstef_models/mixins/model_serializer.py index 40e74a52a..31bf67cff 100644 --- a/packages/openstef-models/src/openstef_models/mixins/model_serializer.py +++ b/packages/openstef-models/src/openstef_models/mixins/model_serializer.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/__init__.py b/packages/openstef-models/src/openstef_models/models/__init__.py index 9d4b0e8d2..766194fe5 100644 --- a/packages/openstef-models/src/openstef_models/models/__init__.py +++ b/packages/openstef-models/src/openstef_models/models/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/component_splitting/__init__.py b/packages/openstef-models/src/openstef_models/models/component_splitting/__init__.py index aa39c8b7d..d70b53096 100644 --- a/packages/openstef-models/src/openstef_models/models/component_splitting/__init__.py +++ b/packages/openstef-models/src/openstef_models/models/component_splitting/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/component_splitting/component_splitter.py b/packages/openstef-models/src/openstef_models/models/component_splitting/component_splitter.py index 7aa0182f2..43a0f8f72 100644 --- a/packages/openstef-models/src/openstef_models/models/component_splitting/component_splitter.py +++ b/packages/openstef-models/src/openstef_models/models/component_splitting/component_splitter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/component_splitting/constant_component_splitter.py b/packages/openstef-models/src/openstef_models/models/component_splitting/constant_component_splitter.py index 0bf4a1784..d841fc082 100644 --- a/packages/openstef-models/src/openstef_models/models/component_splitting/constant_component_splitter.py +++ b/packages/openstef-models/src/openstef_models/models/component_splitting/constant_component_splitter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/component_splitting/linear_component_splitter.py b/packages/openstef-models/src/openstef_models/models/component_splitting/linear_component_splitter.py index db0618d24..029f42c41 100644 --- a/packages/openstef-models/src/openstef_models/models/component_splitting/linear_component_splitter.py +++ b/packages/openstef-models/src/openstef_models/models/component_splitting/linear_component_splitter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -136,13 +136,6 @@ def _create_input_features(self, data: TimeSeriesDataset) -> pd.DataFrame: radiation_col = self.config.radiation_column wind_col = self.config.windspeed_100m_column - # Validate required columns - required_cols = [source_col, radiation_col, wind_col] - missing_cols = [col for col in required_cols if col not in df.columns] - if missing_cols: - error_msg = f"Missing required columns for linear model prediction: {missing_cols}" - raise ValueError(error_msg) - # Create feature dataframe with the expected column names input_df = pd.DataFrame( { @@ -196,9 +189,9 @@ def predict(self, data: TimeSeriesDataset) -> EnergyComponentDataset: index=input_df.index, ) - # Clip wind and solar components to be non-negative - forecasts[EnergyComponentType.SOLAR] = forecasts[EnergyComponentType.SOLAR].clip(lower=0.0) - forecasts[EnergyComponentType.WIND] = forecasts[EnergyComponentType.WIND].clip(lower=0.0) + # Clip wind and solar components to be strictly negative + forecasts[EnergyComponentType.SOLAR] = forecasts[EnergyComponentType.SOLAR].clip(upper=0.0) + forecasts[EnergyComponentType.WIND] = forecasts[EnergyComponentType.WIND].clip(upper=0.0) # Calculate "other" component as residual forecasts[EnergyComponentType.OTHER] = ( diff --git a/packages/openstef-models/src/openstef_models/models/component_splitting/linear_component_splitter_model/linear_component_splitter_model.z.license b/packages/openstef-models/src/openstef_models/models/component_splitting/linear_component_splitter_model/linear_component_splitter_model.z.license index 37e10dd31..7d320d6e2 100644 --- a/packages/openstef-models/src/openstef_models/models/component_splitting/linear_component_splitter_model/linear_component_splitter_model.z.license +++ b/packages/openstef-models/src/openstef_models/models/component_splitting/linear_component_splitter_model/linear_component_splitter_model.z.license @@ -1,3 +1,3 @@ -SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/component_splitting_model.py b/packages/openstef-models/src/openstef_models/models/component_splitting_model.py index 653d60eb7..4fbb6ecc5 100644 --- a/packages/openstef-models/src/openstef_models/models/component_splitting_model.py +++ b/packages/openstef-models/src/openstef_models/models/component_splitting_model.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/__init__.py b/packages/openstef-models/src/openstef_models/models/forecasting/__init__.py index adb81d012..1623e576e 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/__init__.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py index d7e01c965..215e85344 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py index 930881a55..9461cdddb 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py index 51a5b5ed0..fa7f141d3 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -11,6 +11,7 @@ from typing import override import pandas as pd +from pydantic import Field from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset from openstef_models.explainability.mixins import ExplainableForecaster @@ -20,18 +21,24 @@ class FlatlinerForecasterConfig(ForecasterConfig): """Configuration for flatliner forecaster.""" + predict_median: bool = Field( + default=False, + description="If True, predict the median of load measurements instead of zero.", + ) + MODEL_CODE_VERSION = 1 class FlatlinerForecaster(Forecaster, ExplainableForecaster): - """Flatliner forecaster that predicts a flatline of zeros. + """Flatliner forecaster that predicts a flatline of zeros or median. - A simple forecasting model that always predicts zero for all horizons and quantiles. + A simple forecasting model that always predicts zero (or the median of historical + load measurements if configured) for all horizons and quantiles. Invariants: - Configuration quantiles determine the number of prediction outputs - - Zeros are predicted for all horizons and quantiles + - Zeros (or median values) are predicted for all horizons and quantiles Example: >>> from openstef_core.types import LeadTime, Quantile @@ -52,6 +59,7 @@ class FlatlinerForecaster(Forecaster, ExplainableForecaster): Config = FlatlinerForecasterConfig _config: FlatlinerForecasterConfig + _median_value: float | None def __init__( self, @@ -63,6 +71,7 @@ def __init__( config: Configuration specifying quantiles and horizons. """ self._config = config or FlatlinerForecasterConfig() + self._median_value = None @property @override @@ -72,6 +81,9 @@ def config(self) -> FlatlinerForecasterConfig: @property @override def is_fitted(self) -> bool: + # When predict_median is True, the model needs to be fitted to compute the median + if self._config.predict_median: + return self._median_value is not None return True @override @@ -80,15 +92,18 @@ def fit( data: ForecastInputDataset, data_val: ForecastInputDataset | None = None, ) -> None: - pass + if self._config.predict_median: + self._median_value = float(data.target_series.median()) @override def predict(self, data: ForecastInputDataset) -> ForecastDataset: forecast_index = data.create_forecast_range(horizon=self.config.max_horizon) + prediction_value = self._median_value if self._config.predict_median else 0.0 + return ForecastDataset( data=pd.DataFrame( - data={quantile.format(): 0.0 for quantile in self.config.quantiles}, + data={quantile.format(): prediction_value for quantile in self.config.quantiles}, index=forecast_index, ), sample_interval=data.sample_interval, diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py index 9628c61e3..54e96a723 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py index 92c3981a3..74c904364 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py index 2c673c68b..0e371a339 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/models/forecasting_model.py b/packages/openstef-models/src/openstef_models/models/forecasting_model.py index 9d9e47498..f2de3c4b3 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting_model.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting_model.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/presets/__init__.py b/packages/openstef-models/src/openstef_models/presets/__init__.py index b0a0929ca..0615a0ea4 100644 --- a/packages/openstef-models/src/openstef_models/presets/__init__.py +++ b/packages/openstef-models/src/openstef_models/presets/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 1a33b4622..57afe7847 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -35,7 +35,15 @@ from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster from openstef_models.transforms.energy_domain import WindPowerFeatureAdder -from openstef_models.transforms.general import Clipper, EmptyFeatureRemover, Imputer, NaNDropper, SampleWeighter, Scaler +from openstef_models.transforms.general import ( + Clipper, + EmptyFeatureRemover, + Imputer, + NaNDropper, + SampleWeighter, + Scaler, + Selector, +) from openstef_models.transforms.postprocessing import ConfidenceIntervalApplicator, QuantileSorter from openstef_models.transforms.time_domain import ( CyclicFeaturesAdder, @@ -98,6 +106,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob """ model_id: ModelIdentifier = Field(description="Unique identifier for the forecasting model.") + run_name: str | None = Field(default=None, description="Optional name for this workflow run.") # Model configuration model: Literal["xgboost", "gblinear", "flatliner", "hybrid", "lgbm", "lgbmlinear"] = Field( @@ -160,6 +169,11 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob default="relative_humidity", description="Name of the relative humidity column in datasets.", ) + selected_features: FeatureSelection = Field( + default=FeatureSelection.ALL, + description="Feature selection for which features to include/exclude.", + ) + predict_history: timedelta = Field( default=timedelta(days=14), description="Amount of historical data available at prediction time.", @@ -187,6 +201,12 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob default=False, description="If True, flatliners are also detected on non-zero values (median of the load).", ) + predict_nonzero_flatliner: bool = Field( + default=False, + description="If True, predict the median of load measurements instead of zero (only for flatliner model).", + ) + + # Feature engineering rolling_aggregate_features: list[AggregationFunction] = Field( default=[], description="If not None, rolling aggregate(s) of load will be used as features in the model.", @@ -266,7 +286,11 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob # Metadata tags: dict[str, str] = Field( default_factory=dict, - description="Optional metadata tags for the model.", + description="Optional metadata tags for the model run.", + ) + experiment_tags: dict[str, str] = Field( + default_factory=dict, + description="Optional metadata tags for experiment tracking.", ) @@ -288,6 +312,7 @@ def create_forecasting_workflow( ValueError: If an unsupported model type is specified. """ checks = [ + Selector(selection=config.selected_features), InputConsistencyChecker(), FlatlineChecker( load_column=config.target_column, @@ -358,7 +383,13 @@ def create_forecasting_workflow( verbosity=config.verbosity, ) ) - postprocessing = [QuantileSorter()] + postprocessing = [ + QuantileSorter(), + ConfidenceIntervalApplicator( + quantiles=config.quantiles, + add_quantiles_from_std=False, + ), + ] elif config.model == "lgbmlinear": preprocessing = [ *checks, @@ -413,16 +444,24 @@ def create_forecasting_workflow( verbosity=config.verbosity, ), ) - postprocessing = [QuantileSorter()] + postprocessing = [ + QuantileSorter(), + ConfidenceIntervalApplicator( + quantiles=config.quantiles, + add_quantiles_from_std=False, + ), + ] elif config.model == "flatliner": preprocessing = [] forecaster = FlatlinerForecaster( config=FlatlinerForecaster.Config( quantiles=[Q(0.5)], horizons=config.horizons, + predict_median=config.predict_nonzero_flatliner, ) ) postprocessing = [ + QuantileSorter(), ConfidenceIntervalApplicator(quantiles=config.quantiles), ] elif config.model == "hybrid": @@ -484,5 +523,7 @@ def create_forecasting_workflow( tags=tags, ), model_id=config.model_id, + run_name=config.run_name, callbacks=callbacks, + experiment_tags=config.experiment_tags, ) diff --git a/packages/openstef-models/src/openstef_models/transforms/__init__.py b/packages/openstef-models/src/openstef_models/transforms/__init__.py index 427ca37d6..fde29d25e 100644 --- a/packages/openstef-models/src/openstef_models/transforms/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/energy_domain/__init__.py b/packages/openstef-models/src/openstef_models/transforms/energy_domain/__init__.py index 428f2ec02..333f524f3 100644 --- a/packages/openstef-models/src/openstef_models/transforms/energy_domain/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/energy_domain/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/energy_domain/wind_power_feature_adder.py b/packages/openstef-models/src/openstef_models/transforms/energy_domain/wind_power_feature_adder.py index d89f0050c..4d9e6175a 100644 --- a/packages/openstef-models/src/openstef_models/transforms/energy_domain/wind_power_feature_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/energy_domain/wind_power_feature_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py index 79e59f58b..32a8b979c 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 """General feature transforms for time series data. @@ -17,6 +17,7 @@ from openstef_models.transforms.general.nan_dropper import NaNDropper from openstef_models.transforms.general.sample_weighter import SampleWeighter from openstef_models.transforms.general.scaler import Scaler +from openstef_models.transforms.general.selector import Selector __all__ = [ "Clipper", @@ -26,4 +27,5 @@ "NaNDropper", "SampleWeighter", "Scaler", + "Selector", ] diff --git a/packages/openstef-models/src/openstef_models/transforms/general/clipper.py b/packages/openstef-models/src/openstef_models/transforms/general/clipper.py index eb3a21446..a148d5c58 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/clipper.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/clipper.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/general/dimensionality_reducer.py b/packages/openstef-models/src/openstef_models/transforms/general/dimensionality_reducer.py index 32764151d..2bca878b2 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/dimensionality_reducer.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/dimensionality_reducer.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/general/empty_feature_remover.py b/packages/openstef-models/src/openstef_models/transforms/general/empty_feature_remover.py index 3147427ff..c0914e9e1 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/empty_feature_remover.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/empty_feature_remover.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/general/imputer.py b/packages/openstef-models/src/openstef_models/transforms/general/imputer.py index 2a08dcf05..addc8df54 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/imputer.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/imputer.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/general/nan_dropper.py b/packages/openstef-models/src/openstef_models/transforms/general/nan_dropper.py index 0d8cef10c..e9144bba9 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/nan_dropper.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/nan_dropper.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/general/sample_weighter.py b/packages/openstef-models/src/openstef_models/transforms/general/sample_weighter.py index 4dbfe1ea2..f820008d3 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/sample_weighter.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/sample_weighter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/general/scaler.py b/packages/openstef-models/src/openstef_models/transforms/general/scaler.py index a474ed763..fbbb4c215 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/scaler.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/scaler.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/general/selector.py b/packages/openstef-models/src/openstef_models/transforms/general/selector.py new file mode 100644 index 000000000..f6ce646ee --- /dev/null +++ b/packages/openstef-models/src/openstef_models/transforms/general/selector.py @@ -0,0 +1,84 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Transform for dropping for dropping features from dataset based on FeatureSelection. + +This transform allows selecting a subset of features from a TimeSeriesDataset based on a specified +FeatureSelection strategy. It can be used to exclude certain features before model training +or inference. +""" + +from typing import override + +from pydantic import Field, PrivateAttr + +from openstef_core.base_model import BaseConfig +from openstef_core.datasets import TimeSeriesDataset +from openstef_core.datasets.validated_datasets import ForecastInputDataset +from openstef_core.transforms import TimeSeriesTransform +from openstef_models.utils.feature_selection import FeatureSelection + + +class Selector(BaseConfig, TimeSeriesTransform): + """Selects features based on FeatureSelection. + + Example: + >>> import pandas as pd + >>> from datetime import timedelta + >>> from openstef_core.datasets import TimeSeriesDataset + >>> from openstef_models.transforms.general import Selector + >>> from openstef_models.utils.feature_selection import FeatureSelection + >>> + >>> # Create sample dataset + >>> data = pd.DataFrame( + ... { + ... "load": [100.0, 110.0, 120.0], + ... "temperature": [20.0, 22.0, 23.0], + ... "humidity": [60.0, 65.0, 70.0], + ... }, + ... index=pd.date_range("2025-01-01", periods=3, freq="1h"), + ... ) + >>> dataset = TimeSeriesDataset(data, timedelta(hours=1)) + >>> + >>> # Select specific features + >>> selector = Selector(selection=FeatureSelection(include={'load', 'temperature'})) + >>> transformed = selector.transform(dataset) + >>> transformed.feature_names + ['load', 'temperature'] + """ + + selection: FeatureSelection = Field( + default=FeatureSelection.ALL, + description="Feature selection for efficient model specific preprocessing.", + ) + _is_fitted: bool = PrivateAttr(default=False) + + @property + @override + def is_fitted(self) -> bool: + return self._is_fitted + + @override + def fit(self, data: TimeSeriesDataset) -> None: + if ( + isinstance(data, ForecastInputDataset) + and self.selection.include is not None + and (data.target_column not in self.selection.include) + ): + self.selection.include.add(data.target_column) + + self._is_fitted = True + + @override + def transform(self, data: TimeSeriesDataset) -> TimeSeriesDataset: + + features = self.selection.resolve(data.feature_names) + + transformed_data = data.data.drop(columns=[col for col in data.feature_names if col not in features]) + + return data.copy_with(data=transformed_data, is_sorted=True) + + @override + def features_added(self) -> list[str]: + return [] diff --git a/packages/openstef-models/src/openstef_models/transforms/postprocessing/__init__.py b/packages/openstef-models/src/openstef_models/transforms/postprocessing/__init__.py index 72e77049f..6d0e75c54 100644 --- a/packages/openstef-models/src/openstef_models/transforms/postprocessing/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/postprocessing/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/postprocessing/confidence_interval_applicator.py b/packages/openstef-models/src/openstef_models/transforms/postprocessing/confidence_interval_applicator.py index 857c448f0..09748428b 100644 --- a/packages/openstef-models/src/openstef_models/transforms/postprocessing/confidence_interval_applicator.py +++ b/packages/openstef-models/src/openstef_models/transforms/postprocessing/confidence_interval_applicator.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -72,6 +72,11 @@ class ConfidenceIntervalApplicator(BaseModel, Transform[ForecastDataset, Forecas """ quantiles: list[Quantile] | None = Field(default=None) + add_quantiles_from_std: bool = Field( + default=True, + description="If True, adds quantiles based on computed standard deviation. " + "If False, only computes standard deviation without adding quantiles.", + ) _standard_deviation: pd.DataFrame = PrivateAttr(default_factory=pd.DataFrame) _is_fitted: bool = PrivateAttr(default=False) @@ -149,8 +154,15 @@ def transform(self, data: ForecastDataset) -> ForecastDataset: # Compute standard deviation series stdev_series = self._compute_stdev_series(data) + # Add standard deviation column + stdev_column = data.standard_deviation_column + data = data.pipe_pandas(lambda df: df.assign(**{stdev_column: stdev_series})) + # Add quantiles based on standard deviation - return self._add_quantiles_from_stdev(forecast=data, stdev_series=stdev_series, quantiles=self.quantiles) + if self.add_quantiles_from_std: + return self._add_quantiles_from_stdev(forecast=data, stdev_series=stdev_series, quantiles=self.quantiles) + + return data def _calculate_hourly_std(errors: pd.Series) -> pd.Series: diff --git a/packages/openstef-models/src/openstef_models/transforms/postprocessing/quantile_sorter.py b/packages/openstef-models/src/openstef_models/transforms/postprocessing/quantile_sorter.py index 81fac5c30..14f44e2ec 100644 --- a/packages/openstef-models/src/openstef_models/transforms/postprocessing/quantile_sorter.py +++ b/packages/openstef-models/src/openstef_models/transforms/postprocessing/quantile_sorter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/time_domain/__init__.py b/packages/openstef-models/src/openstef_models/transforms/time_domain/__init__.py index 75f8e1e00..f587914ba 100644 --- a/packages/openstef-models/src/openstef_models/transforms/time_domain/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/time_domain/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/time_domain/cyclic_features_adder.py b/packages/openstef-models/src/openstef_models/transforms/time_domain/cyclic_features_adder.py index 690ba118c..5d3b0b948 100644 --- a/packages/openstef-models/src/openstef_models/transforms/time_domain/cyclic_features_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/time_domain/cyclic_features_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/time_domain/datetime_features_adder.py b/packages/openstef-models/src/openstef_models/transforms/time_domain/datetime_features_adder.py index e269a6b7d..bc0520cb9 100644 --- a/packages/openstef-models/src/openstef_models/transforms/time_domain/datetime_features_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/time_domain/datetime_features_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/time_domain/holiday_features_adder.py b/packages/openstef-models/src/openstef_models/transforms/time_domain/holiday_features_adder.py index 7f300de97..59e39f2cc 100644 --- a/packages/openstef-models/src/openstef_models/transforms/time_domain/holiday_features_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/time_domain/holiday_features_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/time_domain/lags_adder.py b/packages/openstef-models/src/openstef_models/transforms/time_domain/lags_adder.py index 53d2a707f..33c881280 100644 --- a/packages/openstef-models/src/openstef_models/transforms/time_domain/lags_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/time_domain/lags_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py b/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py index a4e95a1d8..5c5dc4fde 100644 --- a/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/time_domain/versioned_lags_adder.py b/packages/openstef-models/src/openstef_models/transforms/time_domain/versioned_lags_adder.py index 7833945df..934f536ad 100644 --- a/packages/openstef-models/src/openstef_models/transforms/time_domain/versioned_lags_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/time_domain/versioned_lags_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/validation/__init__.py b/packages/openstef-models/src/openstef_models/transforms/validation/__init__.py index c6dfb5151..32b14f592 100644 --- a/packages/openstef-models/src/openstef_models/transforms/validation/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/validation/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/validation/completeness_checker.py b/packages/openstef-models/src/openstef_models/transforms/validation/completeness_checker.py index ff563452f..79ae2b3e4 100644 --- a/packages/openstef-models/src/openstef_models/transforms/validation/completeness_checker.py +++ b/packages/openstef-models/src/openstef_models/transforms/validation/completeness_checker.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/validation/flatline_checker.py b/packages/openstef-models/src/openstef_models/transforms/validation/flatline_checker.py index c3269603b..aba3b0148 100644 --- a/packages/openstef-models/src/openstef_models/transforms/validation/flatline_checker.py +++ b/packages/openstef-models/src/openstef_models/transforms/validation/flatline_checker.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/validation/input_consistency_checker.py b/packages/openstef-models/src/openstef_models/transforms/validation/input_consistency_checker.py index ad4fd07f9..2f16d720b 100644 --- a/packages/openstef-models/src/openstef_models/transforms/validation/input_consistency_checker.py +++ b/packages/openstef-models/src/openstef_models/transforms/validation/input_consistency_checker.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/weather_domain/__init__.py b/packages/openstef-models/src/openstef_models/transforms/weather_domain/__init__.py index f4f1e8fc7..33dd06560 100644 --- a/packages/openstef-models/src/openstef_models/transforms/weather_domain/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/weather_domain/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/weather_domain/atmosphere_derived_features_adder.py b/packages/openstef-models/src/openstef_models/transforms/weather_domain/atmosphere_derived_features_adder.py index c6a03ed53..9b313477d 100644 --- a/packages/openstef-models/src/openstef_models/transforms/weather_domain/atmosphere_derived_features_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/weather_domain/atmosphere_derived_features_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/weather_domain/daylight_feature_adder.py b/packages/openstef-models/src/openstef_models/transforms/weather_domain/daylight_feature_adder.py index c70ebdac0..af6a79c2b 100644 --- a/packages/openstef-models/src/openstef_models/transforms/weather_domain/daylight_feature_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/weather_domain/daylight_feature_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/transforms/weather_domain/radiation_derived_features_adder.py b/packages/openstef-models/src/openstef_models/transforms/weather_domain/radiation_derived_features_adder.py index 299079ed6..3e242cd70 100644 --- a/packages/openstef-models/src/openstef_models/transforms/weather_domain/radiation_derived_features_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/weather_domain/radiation_derived_features_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -26,15 +26,14 @@ class RadiationDerivedFeaturesAdder(BaseConfig, TimeSeriesTransform): """Transform that adds radiation derived features to time series data. - Computes features that are derived from radiation data (in J/m²) based on geographical coordinates + Computes features that are derived from radiation data (in W/m²) based on geographical coordinates (latitude and longitude) and solar position. The features added can include: - - dni: Direct Normal Irradiance (DNI) in kWh/m². - - gti: Global Tilted Irradiance (GTI) in kWh/m² on a tilted surface. + - dni: Direct Normal Irradiance (DNI) in W/m². + - gti: Global Tilted Irradiance (GTI) in W/m² on a tilted surface. Note: - The input radiation data must be in J/m² units. The transform will automatically - convert this to kWh/m² for internal calculations. + The input radiation data must be in W/m² units. Example: >>> import pandas as pd @@ -45,9 +44,9 @@ class RadiationDerivedFeaturesAdder(BaseConfig, TimeSeriesTransform): ... ) >>> from pydantic_extra_types.coordinate import Coordinate, Latitude, Longitude >>> - >>> # Create sample dataset with radiation data in J/m² + >>> # Create sample dataset with radiation data in W/m² >>> data = pd.DataFrame({ - ... 'radiation': [3600000, 7200000, 5400000] # Corresponds to 1, 2, and 1.5 kWh/m² + ... 'radiation': [1000, 2000, 1500] ... }, index=pd.date_range('2025-06-01', periods=3, freq='D', tz='Europe/Amsterdam')) >>> dataset = TimeSeriesDataset(data, sample_interval=timedelta(minutes=15)) >>> @@ -92,7 +91,7 @@ class RadiationDerivedFeaturesAdder(BaseConfig, TimeSeriesTransform): ) radiation_column: str = Field( default="radiation", - description="Name of the column in the dataset containing radiation data in J/m².", + description="Name of the column in the dataset containing radiation data in W/m².", ) _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) @@ -115,8 +114,8 @@ def transform(self, data: TimeSeriesDataset) -> TimeSeriesDataset: ) return data - # Convert radiation from J/m² to kWh/m² and rename to 'ghi' - ghi = (data.data[self.radiation_column] / 3600).rename("ghi") + # Rename radiation column to 'ghi' + ghi = data.data[self.radiation_column].rename("ghi") location = pvlib.location.Location( latitude=self.coordinate.latitude, diff --git a/packages/openstef-models/src/openstef_models/utils/__init__.py b/packages/openstef-models/src/openstef_models/utils/__init__.py index 9ca0c6544..fe29ec223 100644 --- a/packages/openstef-models/src/openstef_models/utils/__init__.py +++ b/packages/openstef-models/src/openstef_models/utils/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/utils/data_split.py b/packages/openstef-models/src/openstef_models/utils/data_split.py index 908203fda..f27ca83e5 100644 --- a/packages/openstef-models/src/openstef_models/utils/data_split.py +++ b/packages/openstef-models/src/openstef_models/utils/data_split.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/utils/evaluation_functions.py b/packages/openstef-models/src/openstef_models/utils/evaluation_functions.py index 7d568af13..d7753ebad 100644 --- a/packages/openstef-models/src/openstef_models/utils/evaluation_functions.py +++ b/packages/openstef-models/src/openstef_models/utils/evaluation_functions.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 """Utility functions for evaluation metrics in forecasting models.""" diff --git a/packages/openstef-models/src/openstef_models/utils/feature_selection.py b/packages/openstef-models/src/openstef_models/utils/feature_selection.py index ae260fa87..c9405822b 100644 --- a/packages/openstef-models/src/openstef_models/utils/feature_selection.py +++ b/packages/openstef-models/src/openstef_models/utils/feature_selection.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 """Feature selection utilities for transforms. @@ -7,6 +7,7 @@ Transforms use this to consistently specify which features to operate on. """ +import re from typing import ClassVar, Self from pydantic import Field @@ -17,29 +18,114 @@ class FeatureSelection(BaseConfig): """Standardized feature selection with include/exclude patterns. - Defines which features a transform should operate on. Features can be - specified by inclusion (whitelist) or exclusion (blacklist), or both. + Supports both exact matching and regex pattern matching for feature selection. + Features can be specified by inclusion (whitelist) or exclusion (blacklist), or both. When both are specified, inclusion is applied first, then exclusion. Use `FeatureSelection.ALL` to select all available features. + + Example: + >>> from openstef_models.utils.feature_selection import ( + ... FeatureSelection, + ... Include, + ... Exclude, + ... ) + >>> + >>> # Select all features + >>> all_features = FeatureSelection.ALL + >>> all_features.resolve(['a', 'b', 'c']) + ['a', 'b', 'c'] + >>> + >>> # Include only specific features (exact match) + >>> include_only = Include('a', 'b') + >>> include_only.resolve(['a', 'b', 'c', 'd']) + ['a', 'b'] + >>> + >>> # Exclude specific features (exact match) + >>> exclude_some = Exclude('b', 'd') + >>> exclude_some.resolve(['a', 'b', 'c', 'd']) + ['a', 'c'] + >>> + >>> # Regex matching + >>> regex_sel = FeatureSelection(include_regex={r'^b_.*'}) + >>> regex_sel.resolve(['b_1', 'b_2', 'c_1']) + ['b_1', 'b_2'] + >>> + >>> # Combine exact and regex + >>> combined = FeatureSelection(include={'a'}, include_regex={r'^b.*'}) + >>> combined.resolve(['a', 'b1', 'b2', 'c']) + ['a', 'b1', 'b2'] """ include: set[str] | None = Field( default=None, - description=("List of feature names to include. Use None to include all features from the input dataset."), + description="Set of exact feature names to include. Use None to include all features.", + frozen=True, + ) + include_regex: set[str] | None = Field( + default=None, + description="Set of regex patterns to include features. Use None to include all features.", frozen=True, ) exclude: set[str] | None = Field( default=None, - description="List of feature names to exclude. Use None to exclude no features.", + description="Set of exact feature names to exclude. Use None to exclude no features.", + frozen=True, + ) + exclude_regex: set[str] | None = Field( + default=None, + description="Set of regex patterns to exclude features. Use None to exclude no features.", frozen=True, ) ALL: ClassVar[Self] NONE: ClassVar[Self] + @staticmethod + def _matches_regex(feature: str, patterns: set[str]) -> bool: + """Check if a feature matches any regex pattern in the set. + + Args: + feature: Feature name to check. + patterns: Set of regex patterns to match against. + + Returns: + True if feature matches any regex pattern. + """ + return any(re.match(pattern, feature) for pattern in patterns) + + def _should_include_feature(self, feature: str) -> bool: + """Check if a feature should be included based on include filters. + + Args: + feature: Feature name to check. + + Returns: + True if feature should be included. + """ + if self.include is None and self.include_regex is None: + return True + exact_match = self.include is not None and feature in self.include + regex_match = self.include_regex is not None and self._matches_regex(feature, self.include_regex) + return exact_match or regex_match + + def _should_exclude_feature(self, feature: str) -> bool: + """Check if a feature should be excluded based on exclude filters. + + Args: + feature: Feature name to check. + + Returns: + True if feature should be excluded. + """ + if self.exclude is None and self.exclude_regex is None: + return False + exact_match = self.exclude is not None and feature in self.exclude + regex_match = self.exclude_regex is not None and self._matches_regex(feature, self.exclude_regex) + return exact_match or regex_match + def resolve(self, features: list[str]) -> list[str]: - """Resolve the final list of features based on include and exclude lists. + """Resolve the final list of features based on include and exclude filters. Args: features: List of all available feature names. @@ -50,8 +136,7 @@ def resolve(self, features: list[str]) -> list[str]: return [ feature for feature in features - if (self.include is None or feature in self.include) - and (self.exclude is None or feature not in self.exclude) + if self._should_include_feature(feature) and not self._should_exclude_feature(feature) ] def combine(self, other: Self | None) -> Self: @@ -66,14 +151,19 @@ def combine(self, other: Self | None) -> Self: if other is None: return self + def _union(a: set[str] | None, b: set[str] | None) -> set[str] | None: + return None if a is None and b is None else (a or set()) | (b or set()) + return self.__class__( - include=((self.include or set()) | (other.include or set())), - exclude=((self.exclude or set()) | (other.exclude or set())), + include=_union(self.include, other.include), + include_regex=_union(self.include_regex, other.include_regex), + exclude=_union(self.exclude, other.exclude), + exclude_regex=_union(self.exclude_regex, other.exclude_regex), ) -FeatureSelection.ALL = FeatureSelection(include=None, exclude=None) -FeatureSelection.NONE = FeatureSelection(include=set(), exclude=None) +FeatureSelection.ALL = FeatureSelection(include=None, include_regex=None, exclude=None, exclude_regex=None) +FeatureSelection.NONE = FeatureSelection(include=set(), include_regex=set(), exclude=None, exclude_regex=None) def Include(*features: str) -> FeatureSelection: # noqa: N802 @@ -85,7 +175,7 @@ def Include(*features: str) -> FeatureSelection: # noqa: N802 Returns: FeatureSelection instance with specified features included. """ - return FeatureSelection(include={*features}, exclude=None) + return FeatureSelection(include={*features}, include_regex=None, exclude=None, exclude_regex=None) def Exclude(*features: str) -> FeatureSelection: # noqa: N802 @@ -97,4 +187,28 @@ def Exclude(*features: str) -> FeatureSelection: # noqa: N802 Returns: FeatureSelection instance with specified features excluded. """ - return FeatureSelection(include=None, exclude={*features}) + return FeatureSelection(include=None, include_regex=None, exclude={*features}, exclude_regex=None) + + +def IncludeRegex(*patterns: str) -> FeatureSelection: # noqa: N802 + """Helper to create a FeatureSelection that includes features matching regex patterns. + + Args: + *patterns: Regex patterns to include. + + Returns: + FeatureSelection instance with specified patterns included. + """ + return FeatureSelection(include=None, include_regex={*patterns}, exclude=None, exclude_regex=None) + + +def ExcludeRegex(*patterns: str) -> FeatureSelection: # noqa: N802 + """Helper to create a FeatureSelection that excludes features matching regex patterns. + + Args: + *patterns: Regex patterns to exclude. + + Returns: + FeatureSelection instance with specified patterns excluded. + """ + return FeatureSelection(include=None, include_regex=None, exclude=None, exclude_regex={*patterns}) diff --git a/packages/openstef-models/src/openstef_models/utils/loss_functions.py b/packages/openstef-models/src/openstef_models/utils/loss_functions.py index 60b8dddfe..294e6694c 100644 --- a/packages/openstef-models/src/openstef_models/utils/loss_functions.py +++ b/packages/openstef-models/src/openstef_models/utils/loss_functions.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/workflows/__init__.py b/packages/openstef-models/src/openstef_models/workflows/__init__.py index 3d66390fe..5ac3d10b2 100644 --- a/packages/openstef-models/src/openstef_models/workflows/__init__.py +++ b/packages/openstef-models/src/openstef_models/workflows/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/workflows/custom_component_split_workflow.py b/packages/openstef-models/src/openstef_models/workflows/custom_component_split_workflow.py index 5c535793b..2e1cf24ef 100644 --- a/packages/openstef-models/src/openstef_models/workflows/custom_component_split_workflow.py +++ b/packages/openstef-models/src/openstef_models/workflows/custom_component_split_workflow.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py index a740ac7c0..d2f517c15 100644 --- a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -122,6 +122,11 @@ class CustomForecastingWorkflow(BaseModel): default_factory=list[ForecastingCallback], description="List of callbacks to execute during workflow events." ) model_id: ModelIdentifier = Field(...) + run_name: str | None = Field(default=None, description="Optional name for this workflow run.") + experiment_tags: dict[str, str] = Field( + default_factory=dict, + description="Optional metadata tags for experiment tracking.", + ) _logger: logging.Logger = PrivateAttr(default_factory=lambda: logging.getLogger(__name__)) diff --git a/packages/openstef-models/tests/__init__.py b/packages/openstef-models/tests/__init__.py index 81747127d..72baaab86 100644 --- a/packages/openstef-models/tests/__init__.py +++ b/packages/openstef-models/tests/__init__.py @@ -1,3 +1,3 @@ -# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/integration/test_integration.py b/packages/openstef-models/tests/integration/test_integration.py index 89c46bce1..c2cbe61f0 100644 --- a/packages/openstef-models/tests/integration/test_integration.py +++ b/packages/openstef-models/tests/integration/test_integration.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/integrations/__init__.py b/packages/openstef-models/tests/unit/integrations/__init__.py index 63d543f53..1c11ceb54 100644 --- a/packages/openstef-models/tests/unit/integrations/__init__.py +++ b/packages/openstef-models/tests/unit/integrations/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/integrations/joblib/__init__.py b/packages/openstef-models/tests/unit/integrations/joblib/__init__.py index 63d543f53..1c11ceb54 100644 --- a/packages/openstef-models/tests/unit/integrations/joblib/__init__.py +++ b/packages/openstef-models/tests/unit/integrations/joblib/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/integrations/joblib/test_joblib_model_serializer.py b/packages/openstef-models/tests/unit/integrations/joblib/test_joblib_model_serializer.py index 861eb244d..0bd2649da 100644 --- a/packages/openstef-models/tests/unit/integrations/joblib/test_joblib_model_serializer.py +++ b/packages/openstef-models/tests/unit/integrations/joblib/test_joblib_model_serializer.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/integrations/mlflow/__init__.py b/packages/openstef-models/tests/unit/integrations/mlflow/__init__.py index 60a258f81..7b9e0469f 100644 --- a/packages/openstef-models/tests/unit/integrations/mlflow/__init__.py +++ b/packages/openstef-models/tests/unit/integrations/mlflow/__init__.py @@ -1,3 +1,3 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage.py b/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage.py index 25e0d8a90..079341b3b 100644 --- a/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage.py +++ b/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -107,7 +107,7 @@ def test_model_roundtrip(storage: MLFlowStorage, model_id: str): # Act storage.save_run_model(model_id=model_id, run_id=run_id, model=original_model) storage.finalize_run(model_id=model_id, run_id=run_id) - loaded_model = storage.load_run_model(run_id=run_id) + loaded_model = storage.load_run_model(model_id=model_id, run_id=run_id) # Assert assert isinstance(loaded_model, SimpleStatefulModel) @@ -145,3 +145,18 @@ def test_search_latest_runs__no_experiment(storage: MLFlowStorage): # Assert assert latest_runs == [] + + +def test_search_run__returns_matching_run(storage: MLFlowStorage, model_id: str): + """Test that search_run finds a run by its name.""" + # Arrange + run_name = "my_training_run" + created_run = storage.create_run(model_id=model_id, run_name=run_name) + created_run_id = cast(str, created_run.info.run_id) + + # Act + found_run = storage.search_run(model_id=model_id, run_name=run_name) + + # Assert + assert found_run is not None + assert cast(str, found_run.info.run_id) == created_run_id diff --git a/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py b/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py index e28725ee3..9561f4d03 100644 --- a/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py +++ b/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -128,7 +128,7 @@ def test_mlflow_storage_callback__on_fit_end__stores_model_and_metrics( # Assert - Model can be loaded from the run run_id = cast(str, runs[0].info.run_id) - loaded_model = callback.storage.load_run_model(run_id=run_id) + loaded_model = callback.storage.load_run_model(model_id=workflow.model_id, run_id=run_id) assert isinstance(loaded_model, ForecastingModel) assert loaded_model.is_fitted @@ -255,3 +255,38 @@ def test_mlflow_storage_callback__model_selection__keeps_better_model( # Act & Assert - Should raise SkipFitting because new model is worse with pytest.raises(SkipFitting, match="New model did not improve"): callback.on_fit_end(context=worse_context, result=worse_result) + + +def test_mlflow_storage_callback__model_selection__skips_on_tag_change( + storage: MLFlowStorage, + workflow: CustomForecastingWorkflow, + fit_result: ModelFitResult, + sample_dataset: TimeSeriesDataset, +): + """Test that model selection keeps the better performing model.""" + # Arrange - Create callback with R2 metric (capital letters) + callback = MLFlowStorageCallback( + storage=storage, + model_selection_metric=(Q(0.5), "R2", "higher_is_better"), + ) + + # Store an initial model + context = WorkflowContext(workflow=workflow) + callback.on_fit_end(context=context, result=fit_result) + + # Create a new result by fitting with a model with a different tag + new_model = ForecastingModel( + forecaster=SimpleTestForecaster( + config=ForecasterConfig(horizons=[LeadTime(timedelta(hours=6))], quantiles=[Q(0.5)]) + ), + tags={"version": "2.0"}, + ) + new_workflow = CustomForecastingWorkflow(model_id="test_model", model=new_model) + new_result = new_model.fit(sample_dataset) + + # Act + result = callback._run_model_selection(workflow=new_workflow, result=new_result) + + # Assert - Should not raise SkipFitting because model changed + assert result is None + assert new_workflow.model == new_model diff --git a/packages/openstef-models/tests/unit/models/component_splitting/__init__.py b/packages/openstef-models/tests/unit/models/component_splitting/__init__.py index 60a258f81..7b9e0469f 100644 --- a/packages/openstef-models/tests/unit/models/component_splitting/__init__.py +++ b/packages/openstef-models/tests/unit/models/component_splitting/__init__.py @@ -1,3 +1,3 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/models/component_splitting/test_constant_component_splitter.py b/packages/openstef-models/tests/unit/models/component_splitting/test_constant_component_splitter.py index e18c3a679..66dfd2423 100644 --- a/packages/openstef-models/tests/unit/models/component_splitting/test_constant_component_splitter.py +++ b/packages/openstef-models/tests/unit/models/component_splitting/test_constant_component_splitter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/models/component_splitting/test_linear_component_splitter.py b/packages/openstef-models/tests/unit/models/component_splitting/test_linear_component_splitter.py index 19df47b67..a9e0197fb 100644 --- a/packages/openstef-models/tests/unit/models/component_splitting/test_linear_component_splitter.py +++ b/packages/openstef-models/tests/unit/models/component_splitting/test_linear_component_splitter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -24,8 +24,8 @@ def sample_timeseries_dataset() -> TimeSeriesDataset: """ data = pd.DataFrame( { - "load": [100.0, 150.0, 200.0, 250.0, 300.0], - "radiation": [0.0, 0.0, 10.0, 50.0, 100.0], + "load": [10.0, 15.0, 20.0, 25.0, 30.0], + "radiation": [0.0, 0.0, 1000.0, 5000.0, 1000.0], "windspeed_100m": [10.0, 5.0, 0.0, 3.0, 7.0], }, index=pd.date_range(datetime.fromisoformat("2025-01-01T00:00:00"), periods=5, freq="1h"), @@ -63,12 +63,12 @@ def test_linear_component_splitter__predict_returns_correct_components( # Check that result has same index as input pd.testing.assert_index_equal(result.data.index, sample_timeseries_dataset.data.index) - # Check that components are non-negative - assert (result.data[EnergyComponentType.SOLAR] >= 0).all() - assert (result.data[EnergyComponentType.WIND] >= 0).all() + # Check that components are negative + assert (result.data[EnergyComponentType.SOLAR] <= 0).all() + assert (result.data[EnergyComponentType.WIND] <= 0).all() # Check that not all components are zero - assert (result.data[EnergyComponentType.SOLAR] > 0).any() or (result.data[EnergyComponentType.WIND] > 0).any() + assert (result.data[EnergyComponentType.SOLAR] < 0).any() or (result.data[EnergyComponentType.WIND] < 0).any() def test_linear_component_splitter__create_input_features( diff --git a/packages/openstef-models/tests/unit/models/forecasting/conftest.py b/packages/openstef-models/tests/unit/models/forecasting/conftest.py index 968e68d8c..0823f4ebc 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/conftest.py +++ b/packages/openstef-models/tests/unit/models/forecasting/conftest.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_base_case_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_base_case_forecaster.py index d5ab45e7d..a9898cb5c 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_base_case_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_base_case_forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_constant_median_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_constant_median_forecaster.py index 68557cd45..d4d037ca8 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_constant_median_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_constant_median_forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_flatliner_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_flatliner_forecaster.py index 6d0dc30ba..b657061e1 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_flatliner_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_flatliner_forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -30,3 +30,25 @@ def test_predict_returns_zeros(config: FlatlinerForecasterConfig, sample_forecas def test_is_fitted_always_true(config: FlatlinerForecasterConfig): forecaster = FlatlinerForecaster(config) assert forecaster.is_fitted + + +def test_predict_returns_median_when_predict_median_is_true(sample_forecast_input_dataset: ForecastInputDataset): + """Test that the forecaster predicts the median of load measurements when predict_median is True.""" + # Arrange + config = FlatlinerForecasterConfig( + quantiles=[Quantile(0.5), Quantile(0.9)], + horizons=[LeadTime(timedelta(hours=1))], + predict_median=True, + ) + forecaster = FlatlinerForecaster(config) + + # Act + forecaster.fit(sample_forecast_input_dataset) + result = forecaster.predict(sample_forecast_input_dataset) + + # Assert + expected_median = sample_forecast_input_dataset.target_series.median() + assert forecaster.is_fitted + assert isinstance(result.data, pd.DataFrame) + assert (result.data == expected_median).all().all() + assert set(result.data.columns) == {q.format() for q in config.quantiles} diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py index 1eba577f5..58ca98159 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_gblinear_forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py index dd0e80058..9e51d1047 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_xgboost_forecaster.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/models/test_forecasting_model.py b/packages/openstef-models/tests/unit/models/test_forecasting_model.py index b99a01c9b..9e2d43f30 100644 --- a/packages/openstef-models/tests/unit/models/test_forecasting_model.py +++ b/packages/openstef-models/tests/unit/models/test_forecasting_model.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/test_example.py b/packages/openstef-models/tests/unit/test_example.py index f410538a5..97c6f72a4 100644 --- a/packages/openstef-models/tests/unit/test_example.py +++ b/packages/openstef-models/tests/unit/test_example.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/energy_domain/test_wind_power_feature_adder.py b/packages/openstef-models/tests/unit/transforms/energy_domain/test_wind_power_feature_adder.py index ff6b8efb3..55a014003 100644 --- a/packages/openstef-models/tests/unit/transforms/energy_domain/test_wind_power_feature_adder.py +++ b/packages/openstef-models/tests/unit/transforms/energy_domain/test_wind_power_feature_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/general/test_clipper.py b/packages/openstef-models/tests/unit/transforms/general/test_clipper.py index c2aaf1b02..a45769083 100644 --- a/packages/openstef-models/tests/unit/transforms/general/test_clipper.py +++ b/packages/openstef-models/tests/unit/transforms/general/test_clipper.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/general/test_dimensionality_reducer.py b/packages/openstef-models/tests/unit/transforms/general/test_dimensionality_reducer.py index 7dcc45396..400c2bf3c 100644 --- a/packages/openstef-models/tests/unit/transforms/general/test_dimensionality_reducer.py +++ b/packages/openstef-models/tests/unit/transforms/general/test_dimensionality_reducer.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/general/test_empty_feature_remover.py b/packages/openstef-models/tests/unit/transforms/general/test_empty_feature_remover.py index 8c89b27d8..a1fb1fe51 100644 --- a/packages/openstef-models/tests/unit/transforms/general/test_empty_feature_remover.py +++ b/packages/openstef-models/tests/unit/transforms/general/test_empty_feature_remover.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/general/test_imputer.py b/packages/openstef-models/tests/unit/transforms/general/test_imputer.py index 049fbde48..d72d4f3e0 100644 --- a/packages/openstef-models/tests/unit/transforms/general/test_imputer.py +++ b/packages/openstef-models/tests/unit/transforms/general/test_imputer.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/general/test_nan_dropper.py b/packages/openstef-models/tests/unit/transforms/general/test_nan_dropper.py index 7c6c1d5c3..5a35e6e8b 100644 --- a/packages/openstef-models/tests/unit/transforms/general/test_nan_dropper.py +++ b/packages/openstef-models/tests/unit/transforms/general/test_nan_dropper.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/general/test_sample_weighter.py b/packages/openstef-models/tests/unit/transforms/general/test_sample_weighter.py index 3417085e1..c1bb81f6e 100644 --- a/packages/openstef-models/tests/unit/transforms/general/test_sample_weighter.py +++ b/packages/openstef-models/tests/unit/transforms/general/test_sample_weighter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/general/test_scaler.py b/packages/openstef-models/tests/unit/transforms/general/test_scaler.py index 08cc124ae..cad9fdc67 100644 --- a/packages/openstef-models/tests/unit/transforms/general/test_scaler.py +++ b/packages/openstef-models/tests/unit/transforms/general/test_scaler.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/general/test_selector.py b/packages/openstef-models/tests/unit/transforms/general/test_selector.py new file mode 100644 index 000000000..c6fd78081 --- /dev/null +++ b/packages/openstef-models/tests/unit/transforms/general/test_selector.py @@ -0,0 +1,82 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import pandas as pd +import pytest + +from openstef_core.datasets import TimeSeriesDataset +from openstef_core.datasets.validated_datasets import ForecastInputDataset +from openstef_models.transforms.general import Selector +from openstef_models.utils.feature_selection import FeatureSelection + + +@pytest.mark.parametrize( + ("timeseries_type", "feature_selection", "expected_features"), + [ + pytest.param( + TimeSeriesDataset, + FeatureSelection(include={"temperature"}), + {"temperature"}, + id="include_subset", + ), + pytest.param( + TimeSeriesDataset, + FeatureSelection(exclude={"humidity"}), + {"load", "temperature"}, + id="exclude_subset", + ), + pytest.param( + TimeSeriesDataset, + FeatureSelection.ALL, + {"load", "temperature", "humidity"}, + id="all_features", + ), + pytest.param( + TimeSeriesDataset, + FeatureSelection.NONE, + set(), + id="no_features", + ), + pytest.param( + ForecastInputDataset, + FeatureSelection.NONE, + {"load"}, + id="forecast_input_no_features_keep_target", + ), + pytest.param( + ForecastInputDataset, + FeatureSelection(include={"humidity"}), + {"load", "humidity"}, + id="forecast_input_include_subset_keep_target", + ), + ], +) +def test_selector__selects_specified_features( + timeseries_type: type[TimeSeriesDataset], + feature_selection: FeatureSelection, + expected_features: set[str], +) -> None: + """Test that Selector selects only the specified features.""" + # Arrange + data = pd.DataFrame( + { + "load": [100.0, 110.0, 120.0], + "temperature": [20.0, 22.0, 23.0], + "humidity": [60.0, 65.0, 70.0], + }, + index=pd.date_range("2025-01-01", periods=3, freq="1h"), + ) + dataset = timeseries_type(data, timedelta(hours=1)) + + selector = Selector(selection=feature_selection) + + # Act + transformed = selector.fit_transform(dataset) + + # Assert + assert set(transformed.feature_names) == expected_features + for feature in expected_features: + pd.testing.assert_series_equal(transformed.data[feature], dataset.data[feature]) diff --git a/packages/openstef-models/tests/unit/transforms/postprocessing/__init__.py b/packages/openstef-models/tests/unit/transforms/postprocessing/__init__.py index 2a7b1e830..6e0512288 100644 --- a/packages/openstef-models/tests/unit/transforms/postprocessing/__init__.py +++ b/packages/openstef-models/tests/unit/transforms/postprocessing/__init__.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/postprocessing/test_confidence_interval_applicator.py b/packages/openstef-models/tests/unit/transforms/postprocessing/test_confidence_interval_applicator.py index 7111d93db..c07399345 100644 --- a/packages/openstef-models/tests/unit/transforms/postprocessing/test_confidence_interval_applicator.py +++ b/packages/openstef-models/tests/unit/transforms/postprocessing/test_confidence_interval_applicator.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -89,6 +89,47 @@ def test_single_horizon_workflow( assert not result.data["quantile_P10"].isna().any() assert not result.data["quantile_P50"].isna().any() assert not result.data["quantile_P90"].isna().any() + assert not result.data["stdev"].isna().any() + + +def test_no_add_quantiles_from_std( + validation_predictions: ForecastDataset, + predictions: ForecastDataset, +): + """Test complete single-horizon workflow with quantile generation.""" + # Arrange + quantiles = [Quantile(0.1), Quantile(0.5), Quantile(0.9)] + applicator = ConfidenceIntervalApplicator(quantiles=quantiles, add_quantiles_from_std=False) + + # Act + applicator.fit(validation_predictions) + result = applicator.transform(predictions) + + # Assert + assert "quantile_P10" not in result.data.columns + assert "quantile_P90" not in result.data.columns + assert "quantile_P50" in result.data.columns + assert "stdev" in result.data.columns + assert not result.data["quantile_P50"].isna().any() + assert not result.data["stdev"].isna().any() + + +@pytest.mark.parametrize("add_quantiles_from_std", [True, False]) +def test_quantiles_none( + add_quantiles_from_std: bool, + validation_predictions: ForecastDataset, + predictions: ForecastDataset, +): + """Test complete single-horizon workflow with quantile generation.""" + # Arrange + applicator = ConfidenceIntervalApplicator(quantiles=None, add_quantiles_from_std=add_quantiles_from_std) + + # Act + applicator.fit(validation_predictions) + result = applicator.transform(predictions) + + # Assert + pd.testing.assert_frame_equal(result.data, predictions.data) def test_multi_horizon_workflow( @@ -113,6 +154,7 @@ def test_multi_horizon_workflow( assert "quantile_P10" in result.data.columns assert "quantile_P50" in result.data.columns assert "quantile_P90" in result.data.columns + assert "stdev" in result.data.columns # Quantiles should follow normal distribution properties assert (result.data["quantile_P10"] <= result.data["quantile_P50"]).all() diff --git a/packages/openstef-models/tests/unit/transforms/postprocessing/test_quantile_sorter.py b/packages/openstef-models/tests/unit/transforms/postprocessing/test_quantile_sorter.py index 843f21e3e..cece8aaaf 100644 --- a/packages/openstef-models/tests/unit/transforms/postprocessing/test_quantile_sorter.py +++ b/packages/openstef-models/tests/unit/transforms/postprocessing/test_quantile_sorter.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/time_domain/test_cyclic_features_adder.py b/packages/openstef-models/tests/unit/transforms/time_domain/test_cyclic_features_adder.py index 568aef7ef..5e376f713 100644 --- a/packages/openstef-models/tests/unit/transforms/time_domain/test_cyclic_features_adder.py +++ b/packages/openstef-models/tests/unit/transforms/time_domain/test_cyclic_features_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/time_domain/test_datetime_features_adder.py b/packages/openstef-models/tests/unit/transforms/time_domain/test_datetime_features_adder.py index 5fe9a2552..79b52f2e4 100644 --- a/packages/openstef-models/tests/unit/transforms/time_domain/test_datetime_features_adder.py +++ b/packages/openstef-models/tests/unit/transforms/time_domain/test_datetime_features_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/time_domain/test_holiday_features_adder.py b/packages/openstef-models/tests/unit/transforms/time_domain/test_holiday_features_adder.py index 83866b492..8b391f635 100644 --- a/packages/openstef-models/tests/unit/transforms/time_domain/test_holiday_features_adder.py +++ b/packages/openstef-models/tests/unit/transforms/time_domain/test_holiday_features_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/time_domain/test_lags_adder.py b/packages/openstef-models/tests/unit/transforms/time_domain/test_lags_adder.py index bcdaf7d23..430f7b396 100644 --- a/packages/openstef-models/tests/unit/transforms/time_domain/test_lags_adder.py +++ b/packages/openstef-models/tests/unit/transforms/time_domain/test_lags_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/time_domain/test_rolling_aggregates_adder.py b/packages/openstef-models/tests/unit/transforms/time_domain/test_rolling_aggregates_adder.py index 5bda95ea5..b74a42317 100644 --- a/packages/openstef-models/tests/unit/transforms/time_domain/test_rolling_aggregates_adder.py +++ b/packages/openstef-models/tests/unit/transforms/time_domain/test_rolling_aggregates_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/time_domain/test_versioned_lags_adder.py b/packages/openstef-models/tests/unit/transforms/time_domain/test_versioned_lags_adder.py index d7c5ba6b9..a93f949c9 100644 --- a/packages/openstef-models/tests/unit/transforms/time_domain/test_versioned_lags_adder.py +++ b/packages/openstef-models/tests/unit/transforms/time_domain/test_versioned_lags_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/validation/test_completeness_checker.py b/packages/openstef-models/tests/unit/transforms/validation/test_completeness_checker.py index 2ea46f059..d0bbe7b99 100644 --- a/packages/openstef-models/tests/unit/transforms/validation/test_completeness_checker.py +++ b/packages/openstef-models/tests/unit/transforms/validation/test_completeness_checker.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/validation/test_flatline_checker.py b/packages/openstef-models/tests/unit/transforms/validation/test_flatline_checker.py index 71e7596c3..4b9ff23cf 100644 --- a/packages/openstef-models/tests/unit/transforms/validation/test_flatline_checker.py +++ b/packages/openstef-models/tests/unit/transforms/validation/test_flatline_checker.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/validation/test_input_consistency_checker.py b/packages/openstef-models/tests/unit/transforms/validation/test_input_consistency_checker.py index 549b6c4f9..41e49a57c 100644 --- a/packages/openstef-models/tests/unit/transforms/validation/test_input_consistency_checker.py +++ b/packages/openstef-models/tests/unit/transforms/validation/test_input_consistency_checker.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/weather_domain/test_atmosphere_derived_features_adder.py b/packages/openstef-models/tests/unit/transforms/weather_domain/test_atmosphere_derived_features_adder.py index c5a9ffe0b..d250b4fbc 100644 --- a/packages/openstef-models/tests/unit/transforms/weather_domain/test_atmosphere_derived_features_adder.py +++ b/packages/openstef-models/tests/unit/transforms/weather_domain/test_atmosphere_derived_features_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/weather_domain/test_daylight_feature_adder.py b/packages/openstef-models/tests/unit/transforms/weather_domain/test_daylight_feature_adder.py index 3e0dbfffe..84b31ca38 100644 --- a/packages/openstef-models/tests/unit/transforms/weather_domain/test_daylight_feature_adder.py +++ b/packages/openstef-models/tests/unit/transforms/weather_domain/test_daylight_feature_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/transforms/weather_domain/test_radiation_derived_featuers_adder.py b/packages/openstef-models/tests/unit/transforms/weather_domain/test_radiation_derived_featuers_adder.py index 088844f12..5720d9936 100644 --- a/packages/openstef-models/tests/unit/transforms/weather_domain/test_radiation_derived_featuers_adder.py +++ b/packages/openstef-models/tests/unit/transforms/weather_domain/test_radiation_derived_featuers_adder.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -24,7 +24,7 @@ def sample_dataset() -> TimeSeriesDataset: """Create a sample TimeSeriesDataset with radiation data for testing.""" data = pd.DataFrame( - {"radiation": [3600000, 7200000, 5400000, 1800000, 0]}, # J/m² values + {"radiation": [1000, 2000, 3000, 500, 0]}, # W/m² values index=pd.date_range("2025-06-01 08:00", periods=5, freq="h", tz="Europe/Amsterdam"), ) return TimeSeriesDataset(data, timedelta(hours=1)) @@ -33,9 +33,7 @@ def sample_dataset() -> TimeSeriesDataset: @pytest.fixture def sample_dataset_no_tz() -> TimeSeriesDataset: """Create a sample TimeSeriesDataset without timezone for testing error cases.""" - data = pd.DataFrame( - {"radiation": [3600000, 7200000, 5400000]}, index=pd.date_range("2025-06-01", periods=3, freq="h") - ) + data = pd.DataFrame({"radiation": [1000, 2000, 3000]}, index=pd.date_range("2025-06-01", periods=3, freq="h")) return TimeSeriesDataset(data, timedelta(hours=1)) @@ -147,27 +145,6 @@ def test_transform_preserves_original_data_and_metadata( pd.testing.assert_series_equal(result.data[feature], sample_dataset.data[feature]) -def test_transform_radiation_unit_conversion(): - """Test that radiation is correctly converted from J/m² to kWh/m² for pvlib.""" - # Arrange - data = pd.DataFrame( - {"radiation": [3600000, 7200000]}, # 1000 and 2000 kWh/m² when divided by 3.6e6 - index=pd.date_range("2025-06-01 12:00", periods=2, freq="1h", tz="Europe/Amsterdam"), - ) - dataset = TimeSeriesDataset(data, timedelta(hours=1)) - transform = RadiationDerivedFeaturesAdder(coordinate=Coordinate(latitude=Latitude(52.0), longitude=Longitude(5.0))) - - # Act - result = transform.transform(dataset) - - # Assert - # The exact values depend on solar calculations, but we can verify the result is reasonable - assert "dni" in result.data.columns - assert "gti" in result.data.columns - assert (result.data["dni"] >= 0).all() - assert (result.data["gti"] >= 0).all() - - def test_transform_with_empty_dataset(): """Test handling of empty dataset.""" # Arrange @@ -222,7 +199,7 @@ def test_transform_custom_radiation_column(): """Test transform with custom radiation column name.""" # Arrange data = pd.DataFrame( - {"solar_irradiance": [3600000, 7200000]}, + {"solar_irradiance": [1000, 2000]}, index=pd.date_range("2025-06-01 12:00", periods=2, freq="1h", tz="Europe/Amsterdam"), ) dataset = TimeSeriesDataset(data, timedelta(hours=1)) @@ -243,7 +220,7 @@ def test_transform_handles_missing_columns(): # Arrange dataset = TimeSeriesDataset( data=pd.DataFrame( - {"radiation": [3600000, 7200000]}, + {"radiation": [1000, 2000]}, index=pd.date_range("2025-06-01 12:00", periods=2, freq="1h", tz="Europe/Amsterdam"), ), sample_interval=timedelta(hours=1), @@ -271,7 +248,7 @@ def test_pvlib_integration_different_locations(latitude: float, longitude: float """Test RadiationDerivedFeaturesAdder with real pvlib calls across different locations.""" # Arrange data = pd.DataFrame( - {"radiation": [7200000, 5400000]}, # J/m² values + {"radiation": [2000, 3000]}, index=pd.date_range("2025-06-01 12:00", periods=2, freq="1h", tz=timezone), ) dataset = TimeSeriesDataset(data, timedelta(hours=1)) @@ -302,7 +279,7 @@ def test_pvlib_integration_summer_midday(): """Test that solar calculations produce reasonable results during summer midday.""" # Arrange - Use summer midday for better solar radiation data = pd.DataFrame( - {"radiation": [7200000, 10800000, 14400000]}, # High radiation values for summer + {"radiation": [3000, 4000, 5000]}, # High radiation values for summer index=pd.date_range("2025-06-21 11:00", periods=3, freq="1h", tz="Europe/Amsterdam"), ) dataset = TimeSeriesDataset(data, timedelta(hours=1)) @@ -332,7 +309,7 @@ def test_pvlib_integration_surface_orientations(): """Test different surface orientations with real pvlib calculations.""" # Arrange data = pd.DataFrame( - {"radiation": [7200000, 7200000]}, + {"radiation": [2000, 2000]}, index=pd.date_range("2025-06-01 12:00", periods=2, freq="1h", tz="Europe/Amsterdam"), ) dataset = TimeSeriesDataset(data, timedelta(hours=1)) diff --git a/packages/openstef-models/tests/unit/utils/__init__.py b/packages/openstef-models/tests/unit/utils/__init__.py index 60a258f81..7b9e0469f 100644 --- a/packages/openstef-models/tests/unit/utils/__init__.py +++ b/packages/openstef-models/tests/unit/utils/__init__.py @@ -1,3 +1,3 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/utils/test_data_split.py b/packages/openstef-models/tests/unit/utils/test_data_split.py index a59aa8c79..1ca673d19 100644 --- a/packages/openstef-models/tests/unit/utils/test_data_split.py +++ b/packages/openstef-models/tests/unit/utils/test_data_split.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-models/tests/unit/utils/test_feature_selection.py b/packages/openstef-models/tests/unit/utils/test_feature_selection.py new file mode 100644 index 000000000..9d2ef2d0a --- /dev/null +++ b/packages/openstef-models/tests/unit/utils/test_feature_selection.py @@ -0,0 +1,125 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Unit tests for the FeatureSelection utility.""" + +from openstef_models.utils.feature_selection import ( + Exclude, + ExcludeRegex, + FeatureSelection, + Include, + IncludeRegex, +) + + +def test_feature_selection_all(): + """Test FeatureSelection.ALL selects all features.""" + features = ["a", "b", "c"] + assert FeatureSelection.ALL.resolve(features) == features + + +def test_feature_selection_none(): + """Test FeatureSelection.NONE selects no features.""" + features = ["a", "b", "c"] + assert FeatureSelection.NONE.resolve(features) == [] + + +def test_feature_selection_include(): + """Test including specific features.""" + selection = Include("a", "c") + assert selection.resolve(["a", "b", "c", "d"]) == ["a", "c"] + + +def test_feature_selection_exclude(): + """Test excluding specific features.""" + selection = Exclude("b", "d") + assert selection.resolve(["a", "b", "c", "d"]) == ["a", "c"] + + +def test_feature_selection_include_and_exclude(): + """Test combination of include and exclude.""" + selection = FeatureSelection(include={"a", "b", "c"}, exclude={"b"}) + assert selection.resolve(["a", "b", "c", "d"]) == ["a", "c"] + + +def test_feature_selection_combine_both_none(): + """Test combining two ALL selections preserves None.""" + combined = FeatureSelection.ALL.combine(FeatureSelection.ALL) + assert combined.include is None + assert combined.exclude is None + assert combined.resolve(["a", "b", "c"]) == ["a", "b", "c"] + + +def test_feature_selection_combine_include_sets(): + """Test combining include sets.""" + sel1 = Include("a", "b") + sel2 = Include("c", "d") + combined = sel1.combine(sel2) + assert set(combined.resolve(["a", "b", "c", "d", "e"])) == {"a", "b", "c", "d"} + + +def test_feature_selection_combine_mixed(): + """Test combining selections with different patterns.""" + sel1 = FeatureSelection(include={"a", "b"}, exclude={"b"}) + sel2 = FeatureSelection(include={"c"}, exclude={"a"}) + combined = sel1.combine(sel2) + assert combined.include == {"a", "b", "c"} + assert combined.exclude == {"a", "b"} + assert combined.resolve(["a", "b", "c", "d"]) == ["c"] # exclusion applied last + + +def test_regex_include_pattern(): + """Test including features by regex pattern.""" + selection = IncludeRegex(r"^temp_.*") + features = ["temp_sensor", "temp_valve", "pressure_sensor", "humidity"] + assert selection.resolve(features) == ["temp_sensor", "temp_valve"] + + +def test_regex_exclude_pattern(): + """Test excluding features by regex pattern.""" + selection = ExcludeRegex(r".*_old$") + features = ["temp_new", "pressure_old", "humidity_current", "wind_old"] + assert selection.resolve(features) == ["temp_new", "humidity_current"] + + +def test_regex_include_and_exclude(): + """Test combination of include and exclude regex patterns.""" + selection = FeatureSelection(include_regex={r"^temp_.*", r"^pressure_.*"}, exclude_regex={r".*_old$"}) + features = ["temp_sensor", "temp_old", "pressure_valve", "humidity_sensor", "pressure_old"] + assert selection.resolve(features) == ["temp_sensor", "pressure_valve"] + + +def test_exact_and_regex_include(): + """Test combining exact and regex include patterns.""" + selection = FeatureSelection(include={"a"}, include_regex={r"^b.*"}) + features = ["a", "b1", "b2", "c"] + assert set(selection.resolve(features)) == {"a", "b1", "b2"} + + +def test_exact_and_regex_exclude(): + """Test combining exact and regex exclude patterns.""" + selection = FeatureSelection(exclude={"a"}, exclude_regex={r"^b.*"}) + features = ["a", "b1", "b2", "c"] + assert selection.resolve(features) == ["c"] + + +def test_combine_exact_and_regex(): + """Test combining selections with exact and regex patterns.""" + sel1 = Include("a", "b") + sel2 = IncludeRegex(r"^c.*") + combined = sel1.combine(sel2) + features = ["a", "b", "c1", "c2", "d"] + assert set(combined.resolve(features)) == {"a", "b", "c1", "c2"} + + +def test_combine_all_types(): + """Test combining all types of patterns.""" + sel1 = FeatureSelection(include={"a"}, exclude_regex={r".*_old$"}) + sel2 = FeatureSelection(include_regex={r"^temp_.*"}, exclude={"a"}) + combined = sel1.combine(sel2) + features = ["a", "temp_sensor", "temp_old", "pressure"] + # include: {a} + regex temp_.* + # exclude: {a} + regex .*_old$ + # So: a excluded, temp_sensor included, temp_old excluded + assert combined.resolve(features) == ["temp_sensor"] diff --git a/packages/openstef-models/tests/unit/utils/test_loss_functions.py b/packages/openstef-models/tests/unit/utils/test_loss_functions.py index c5b2c6367..587360413 100644 --- a/packages/openstef-models/tests/unit/utils/test_loss_functions.py +++ b/packages/openstef-models/tests/unit/utils/test_loss_functions.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/pyproject.toml b/pyproject.toml index 87ef62841..b81bb5eba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 @@ -15,7 +15,7 @@ readme = "README.md" keywords = [ "energy", "forecasting", "machinelearning" ] license = "MPL-2.0" authors = [ - { name = "Alliander N.V", email = "short.term.energy.forecasts@alliander.com" }, + { name = "Alliander N.V", email = "openstef@lfenergy.org" }, ] requires-python = ">=3.12,<4.0" classifiers = [ @@ -81,10 +81,11 @@ microsoft-python-type-stubs = { git = "git+https://github.com/microsoft/python-t [tool.uv.workspace] members = [ - "packages/openstef-models", - "packages/openstef-beam", "docs", + "examples", + "packages/openstef-beam", "packages/openstef-core", + "packages/openstef-models", ] [tool.ruff] @@ -265,7 +266,7 @@ help = "Check REUSE compliance (with optional fix)" args = [ { name = "fix", type = "boolean", help = "Automatically fix REUSE compliance issues before lint check" } ] control.expr = "fix" switch = [ - { case = "True", cmd = "uv run tools/reuse-fix.py --license \"MPL-2.0\" --copyright \"Contributors to the OpenSTEF project \"" }, + { case = "True", cmd = "uv run tools/reuse-fix.py --license \"MPL-2.0\" --copyright \"Contributors to the OpenSTEF project \"" }, { cmd = "reuse lint" }, ] diff --git a/tools/reuse-fix.py b/tools/reuse-fix.py index 4fc5b8f72..50785c03e 100755 --- a/tools/reuse-fix.py +++ b/tools/reuse-fix.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 diff --git a/uv.lock b/uv.lock index 013babc38..92b9273aa 100644 --- a/uv.lock +++ b/uv.lock @@ -1,6 +1,10 @@ version = 1 revision = 3 requires-python = ">=3.12, <4.0" +resolution-markers = [ + "python_full_version >= '3.14'", + "python_full_version < '3.14'", +] [manifest] members = [ @@ -8,6 +12,7 @@ members = [ "openstef-beam", "openstef-core", "openstef-docs", + "openstef-examples", "openstef-models", ] @@ -216,6 +221,71 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3b/00/2344469e2084fb287c2e0b57b72910309874c3245463acd6cf5e3db69324/appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128", size = 9566, upload-time = "2020-05-11T07:59:49.499Z" }, ] +[[package]] +name = "appnope" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/35/5d/752690df9ef5b76e169e68d6a129fa6d08a7100ca7f754c89495db3c6019/appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee", size = 4170, upload-time = "2024-02-06T09:43:11.258Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/29/5ecc3a15d5a33e31b26c11426c45c501e439cb865d0bff96315d86443b78/appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c", size = 4321, upload-time = "2024-02-06T09:43:09.663Z" }, +] + +[[package]] +name = "argon2-cffi" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "argon2-cffi-bindings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0e/89/ce5af8a7d472a67cc819d5d998aa8c82c5d860608c4db9f46f1162d7dab9/argon2_cffi-25.1.0.tar.gz", hash = "sha256:694ae5cc8a42f4c4e2bf2ca0e64e51e23a040c6a517a85074683d3959e1346c1", size = 45706, upload-time = "2025-06-03T06:55:32.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/d3/a8b22fa575b297cd6e3e3b0155c7e25db170edf1c74783d6a31a2490b8d9/argon2_cffi-25.1.0-py3-none-any.whl", hash = "sha256:fdc8b074db390fccb6eb4a3604ae7231f219aa669a2652e0f20e16ba513d5741", size = 14657, upload-time = "2025-06-03T06:55:30.804Z" }, +] + +[[package]] +name = "argon2-cffi-bindings" +version = "25.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5c/2d/db8af0df73c1cf454f71b2bbe5e356b8c1f8041c979f505b3d3186e520a9/argon2_cffi_bindings-25.1.0.tar.gz", hash = "sha256:b957f3e6ea4d55d820e40ff76f450952807013d361a65d7f28acc0acbf29229d", size = 1783441, upload-time = "2025-07-30T10:02:05.147Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/97/3c0a35f46e52108d4707c44b95cfe2afcafc50800b5450c197454569b776/argon2_cffi_bindings-25.1.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:3d3f05610594151994ca9ccb3c771115bdb4daef161976a266f0dd8aa9996b8f", size = 54393, upload-time = "2025-07-30T10:01:40.97Z" }, + { url = "https://files.pythonhosted.org/packages/9d/f4/98bbd6ee89febd4f212696f13c03ca302b8552e7dbf9c8efa11ea4a388c3/argon2_cffi_bindings-25.1.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:8b8efee945193e667a396cbc7b4fb7d357297d6234d30a489905d96caabde56b", size = 29328, upload-time = "2025-07-30T10:01:41.916Z" }, + { url = "https://files.pythonhosted.org/packages/43/24/90a01c0ef12ac91a6be05969f29944643bc1e5e461155ae6559befa8f00b/argon2_cffi_bindings-25.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3c6702abc36bf3ccba3f802b799505def420a1b7039862014a65db3205967f5a", size = 31269, upload-time = "2025-07-30T10:01:42.716Z" }, + { url = "https://files.pythonhosted.org/packages/d4/d3/942aa10782b2697eee7af5e12eeff5ebb325ccfb86dd8abda54174e377e4/argon2_cffi_bindings-25.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a1c70058c6ab1e352304ac7e3b52554daadacd8d453c1752e547c76e9c99ac44", size = 86558, upload-time = "2025-07-30T10:01:43.943Z" }, + { url = "https://files.pythonhosted.org/packages/0d/82/b484f702fec5536e71836fc2dbc8c5267b3f6e78d2d539b4eaa6f0db8bf8/argon2_cffi_bindings-25.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e2fd3bfbff3c5d74fef31a722f729bf93500910db650c925c2d6ef879a7e51cb", size = 92364, upload-time = "2025-07-30T10:01:44.887Z" }, + { url = "https://files.pythonhosted.org/packages/c9/c1/a606ff83b3f1735f3759ad0f2cd9e038a0ad11a3de3b6c673aa41c24bb7b/argon2_cffi_bindings-25.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4f9665de60b1b0e99bcd6be4f17d90339698ce954cfd8d9cf4f91c995165a92", size = 85637, upload-time = "2025-07-30T10:01:46.225Z" }, + { url = "https://files.pythonhosted.org/packages/44/b4/678503f12aceb0262f84fa201f6027ed77d71c5019ae03b399b97caa2f19/argon2_cffi_bindings-25.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ba92837e4a9aa6a508c8d2d7883ed5a8f6c308c89a4790e1e447a220deb79a85", size = 91934, upload-time = "2025-07-30T10:01:47.203Z" }, + { url = "https://files.pythonhosted.org/packages/f0/c7/f36bd08ef9bd9f0a9cff9428406651f5937ce27b6c5b07b92d41f91ae541/argon2_cffi_bindings-25.1.0-cp314-cp314t-win32.whl", hash = "sha256:84a461d4d84ae1295871329b346a97f68eade8c53b6ed9a7ca2d7467f3c8ff6f", size = 28158, upload-time = "2025-07-30T10:01:48.341Z" }, + { url = "https://files.pythonhosted.org/packages/b3/80/0106a7448abb24a2c467bf7d527fe5413b7fdfa4ad6d6a96a43a62ef3988/argon2_cffi_bindings-25.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b55aec3565b65f56455eebc9b9f34130440404f27fe21c3b375bf1ea4d8fbae6", size = 32597, upload-time = "2025-07-30T10:01:49.112Z" }, + { url = "https://files.pythonhosted.org/packages/05/b8/d663c9caea07e9180b2cb662772865230715cbd573ba3b5e81793d580316/argon2_cffi_bindings-25.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:87c33a52407e4c41f3b70a9c2d3f6056d88b10dad7695be708c5021673f55623", size = 28231, upload-time = "2025-07-30T10:01:49.92Z" }, + { url = "https://files.pythonhosted.org/packages/1d/57/96b8b9f93166147826da5f90376e784a10582dd39a393c99bb62cfcf52f0/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:aecba1723ae35330a008418a91ea6cfcedf6d31e5fbaa056a166462ff066d500", size = 54121, upload-time = "2025-07-30T10:01:50.815Z" }, + { url = "https://files.pythonhosted.org/packages/0a/08/a9bebdb2e0e602dde230bdde8021b29f71f7841bd54801bcfd514acb5dcf/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2630b6240b495dfab90aebe159ff784d08ea999aa4b0d17efa734055a07d2f44", size = 29177, upload-time = "2025-07-30T10:01:51.681Z" }, + { url = "https://files.pythonhosted.org/packages/b6/02/d297943bcacf05e4f2a94ab6f462831dc20158614e5d067c35d4e63b9acb/argon2_cffi_bindings-25.1.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:7aef0c91e2c0fbca6fc68e7555aa60ef7008a739cbe045541e438373bc54d2b0", size = 31090, upload-time = "2025-07-30T10:01:53.184Z" }, + { url = "https://files.pythonhosted.org/packages/c1/93/44365f3d75053e53893ec6d733e4a5e3147502663554b4d864587c7828a7/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e021e87faa76ae0d413b619fe2b65ab9a037f24c60a1e6cc43457ae20de6dc6", size = 81246, upload-time = "2025-07-30T10:01:54.145Z" }, + { url = "https://files.pythonhosted.org/packages/09/52/94108adfdd6e2ddf58be64f959a0b9c7d4ef2fa71086c38356d22dc501ea/argon2_cffi_bindings-25.1.0-cp39-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d3e924cfc503018a714f94a49a149fdc0b644eaead5d1f089330399134fa028a", size = 87126, upload-time = "2025-07-30T10:01:55.074Z" }, + { url = "https://files.pythonhosted.org/packages/72/70/7a2993a12b0ffa2a9271259b79cc616e2389ed1a4d93842fac5a1f923ffd/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c87b72589133f0346a1cb8d5ecca4b933e3c9b64656c9d175270a000e73b288d", size = 80343, upload-time = "2025-07-30T10:01:56.007Z" }, + { url = "https://files.pythonhosted.org/packages/78/9a/4e5157d893ffc712b74dbd868c7f62365618266982b64accab26bab01edc/argon2_cffi_bindings-25.1.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1db89609c06afa1a214a69a462ea741cf735b29a57530478c06eb81dd403de99", size = 86777, upload-time = "2025-07-30T10:01:56.943Z" }, + { url = "https://files.pythonhosted.org/packages/74/cd/15777dfde1c29d96de7f18edf4cc94c385646852e7c7b0320aa91ccca583/argon2_cffi_bindings-25.1.0-cp39-abi3-win32.whl", hash = "sha256:473bcb5f82924b1becbb637b63303ec8d10e84c8d241119419897a26116515d2", size = 27180, upload-time = "2025-07-30T10:01:57.759Z" }, + { url = "https://files.pythonhosted.org/packages/e2/c6/a759ece8f1829d1f162261226fbfd2c6832b3ff7657384045286d2afa384/argon2_cffi_bindings-25.1.0-cp39-abi3-win_amd64.whl", hash = "sha256:a98cd7d17e9f7ce244c0803cad3c23a7d379c301ba618a5fa76a67d116618b98", size = 31715, upload-time = "2025-07-30T10:01:58.56Z" }, + { url = "https://files.pythonhosted.org/packages/42/b9/f8d6fa329ab25128b7e98fd83a3cb34d9db5b059a9847eddb840a0af45dd/argon2_cffi_bindings-25.1.0-cp39-abi3-win_arm64.whl", hash = "sha256:b0fdbcf513833809c882823f98dc2f931cf659d9a1429616ac3adebb49f5db94", size = 27149, upload-time = "2025-07-30T10:01:59.329Z" }, +] + +[[package]] +name = "arrow" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dateutil" }, + { name = "tzdata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/33/032cdc44182491aa708d06a68b62434140d8c50820a087fac7af37703357/arrow-1.4.0.tar.gz", hash = "sha256:ed0cc050e98001b8779e84d461b0098c4ac597e88704a655582b21d116e526d7", size = 152931, upload-time = "2025-10-18T17:46:46.761Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/c9/d7977eaacb9df673210491da99e6a247e93df98c715fc43fd136ce1d3d33/arrow-1.4.0-py3-none-any.whl", hash = "sha256:749f0769958ebdc79c173ff0b0670d59051a535fa26e8eba02953dc19eb43205", size = 68797, upload-time = "2025-10-18T17:46:45.663Z" }, +] + [[package]] name = "asttokens" version = "3.0.1" @@ -225,6 +295,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/39/e7eaf1799466a4aef85b6a4fe7bd175ad2b1c6345066aa33f1f58d4b18d0/asttokens-3.0.1-py3-none-any.whl", hash = "sha256:15a3ebc0f43c2d0a50eeafea25e19046c68398e487b9f1f5b517f7c0f40f976a", size = 27047, upload-time = "2025-11-15T16:43:16.109Z" }, ] +[[package]] +name = "async-lru" +version = "2.0.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/4d/71ec4d3939dc755264f680f6c2b4906423a304c3d18e96853f0a595dfe97/async_lru-2.0.5.tar.gz", hash = "sha256:481d52ccdd27275f42c43a928b4a50c3bfb2d67af4e78b170e3e0bb39c66e5bb", size = 10380, upload-time = "2025-03-16T17:25:36.919Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/49/d10027df9fce941cb8184e78a02857af36360d33e1721df81c5ed2179a1a/async_lru-2.0.5-py3-none-any.whl", hash = "sha256:ab95404d8d2605310d345932697371a5f40def0487c03d6d0ad9138de52c9943", size = 6069, upload-time = "2025-03-16T17:25:35.422Z" }, +] + [[package]] name = "attrs" version = "23.2.0" @@ -284,6 +363,23 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/94/fe/3aed5d0be4d404d12d36ab97e2f1791424d9ca39c2f754a6285d59a3b01d/beautifulsoup4-4.14.2-py3-none-any.whl", hash = "sha256:5ef6fa3a8cbece8488d66985560f97ed091e22bbc4e9c2338508a9d5de6d4515", size = 106392, upload-time = "2025-09-29T10:05:43.771Z" }, ] +[[package]] +name = "bleach" +version = "6.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "webencodings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/07/18/3c8523962314be6bf4c8989c79ad9531c825210dd13a8669f6b84336e8bd/bleach-6.3.0.tar.gz", hash = "sha256:6f3b91b1c0a02bb9a78b5a454c92506aa0fdf197e1d5e114d2e00c6f64306d22", size = 203533, upload-time = "2025-10-27T17:57:39.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cd/3a/577b549de0cc09d95f11087ee63c739bba856cd3952697eec4c4bb91350a/bleach-6.3.0-py3-none-any.whl", hash = "sha256:fe10ec77c93ddf3d13a73b035abaac7a9f5e436513864ccdad516693213c65d6", size = 164437, upload-time = "2025-10-27T17:57:37.538Z" }, +] + +[package.optional-dependencies] +css = [ + { name = "tinycss2" }, +] + [[package]] name = "blinker" version = "1.9.0" @@ -492,6 +588,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, ] +[[package]] +name = "choreographer" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "logistro" }, + { name = "simplejson" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/47/64a035c6f764450ea9f902cbeba14c8c70316c2641125510066d8f912bfa/choreographer-1.2.1.tar.gz", hash = "sha256:022afd72b1e9b0bcb950420b134e70055a294c791b6f36cfb47d89745b701b5f", size = 43399, upload-time = "2025-11-09T23:04:44.749Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/9f/d73dfb85d7a5b1a56a99adc50f2074029468168c970ff5daeade4ad819e4/choreographer-1.2.1-py3-none-any.whl", hash = "sha256:9af5385effa3c204dbc337abf7ac74fd8908ced326a15645dc31dde75718c77e", size = 49338, upload-time = "2025-11-09T23:04:43.154Z" }, +] + [[package]] name = "click" version = "8.3.1" @@ -522,6 +631,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] +[[package]] +name = "comm" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/13/7d740c5849255756bc17888787313b61fd38a0a8304fc4f073dfc46122aa/comm-0.2.3.tar.gz", hash = "sha256:2dc8048c10962d55d7ad693be1e7045d891b7ce8d999c97963a5e3e99c055971", size = 6319, upload-time = "2025-07-25T14:02:04.452Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/97/891a0971e1e4a8c5d2b20bbe0e524dc04548d2307fee33cdeba148fd4fc7/comm-0.2.3-py3-none-any.whl", hash = "sha256:c615d91d75f7f04f095b30d1c1711babd43bdc6419c1be9886a85f2f4e489417", size = 7294, upload-time = "2025-07-25T14:02:02.896Z" }, +] + [[package]] name = "contourpy" version = "1.3.3" @@ -741,6 +859,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a7/27/b822b474aaefb684d11df358d52e012699a2a8af231f9b47c54b73f280cb/databricks_sdk-0.73.0-py3-none-any.whl", hash = "sha256:a4d3cfd19357a2b459d2dc3101454d7f0d1b62865ce099c35d0c342b66ac64ff", size = 753896, upload-time = "2025-11-05T06:52:56.451Z" }, ] +[[package]] +name = "debugpy" +version = "1.8.18" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/62/1a/7cb5531840d7ba5d9329644109e62adee41f2f0083d9f8a4039f01de58cf/debugpy-1.8.18.tar.gz", hash = "sha256:02551b1b84a91faadd2db9bc4948873f2398190c95b3cc6f97dc706f43e8c433", size = 1644467, upload-time = "2025-12-10T19:48:07.236Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/01/439626e3572a33ac543f25bc1dac1e80bc01c7ce83f3c24dc4441302ca13/debugpy-1.8.18-cp312-cp312-macosx_15_0_universal2.whl", hash = "sha256:530c38114725505a7e4ea95328dbc24aabb9be708c6570623c8163412e6d1d6b", size = 2549961, upload-time = "2025-12-10T19:48:21.73Z" }, + { url = "https://files.pythonhosted.org/packages/cd/73/1eeaa15c20a2b627be57a65bc1ebf2edd8d896950eac323588b127d776f2/debugpy-1.8.18-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:a114865099283cbed4c9330cb0c9cb7a04cfa92e803577843657302d526141ec", size = 4309855, upload-time = "2025-12-10T19:48:23.41Z" }, + { url = "https://files.pythonhosted.org/packages/e4/6f/2da8ded21ae55df7067e57bd7f67ffed7e08b634f29bdba30c03d3f19918/debugpy-1.8.18-cp312-cp312-win32.whl", hash = "sha256:4d26736dfabf404e9f3032015ec7b0189e7396d0664e29e5bdbe7ac453043c95", size = 5280577, upload-time = "2025-12-10T19:48:25.386Z" }, + { url = "https://files.pythonhosted.org/packages/f5/8e/ebe887218c5b84f9421de7eb7bb7cdf196e84535c3f504a562219297d755/debugpy-1.8.18-cp312-cp312-win_amd64.whl", hash = "sha256:7e68ba950acbcf95ee862210133681f408cbb78d1c9badbb515230ec55ed6487", size = 5322458, upload-time = "2025-12-10T19:48:28.049Z" }, + { url = "https://files.pythonhosted.org/packages/fe/3f/45af037e91e308274a092eb6a86282865fb1f11148cdb7616e811aae33d7/debugpy-1.8.18-cp313-cp313-macosx_15_0_universal2.whl", hash = "sha256:75d14dd04b617ee38e46786394ec0dd5e1ac5e3d10ffb034fd6c7b72111174c2", size = 2538826, upload-time = "2025-12-10T19:48:29.434Z" }, + { url = "https://files.pythonhosted.org/packages/cc/f4/2de6bf624de05134d1bbe0a8750d484363cd212c3ade3d04f5c77d47d0ce/debugpy-1.8.18-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:1b224887af5121fa702f9f542968170d104e3f9cac827d85fdefe89702dc235c", size = 4292542, upload-time = "2025-12-10T19:48:30.836Z" }, + { url = "https://files.pythonhosted.org/packages/93/54/89de7ef84d5ac39fc64a773feaedd902536cc5295814cd22d19c6d9dea35/debugpy-1.8.18-cp313-cp313-win32.whl", hash = "sha256:636a5445a3336e4aba323a3545ca2bb373b04b0bc14084a4eb20c989db44429f", size = 5280460, upload-time = "2025-12-10T19:48:32.696Z" }, + { url = "https://files.pythonhosted.org/packages/4f/59/651329e618406229edbef6508a5aa05e43cd027f042740c5b27e46854b23/debugpy-1.8.18-cp313-cp313-win_amd64.whl", hash = "sha256:6da217ac8c1152d698b9809484d50c75bef9cc02fd6886a893a6df81ec952ff8", size = 5322399, upload-time = "2025-12-10T19:48:35.057Z" }, + { url = "https://files.pythonhosted.org/packages/36/59/5e8bf46a66ca9dfcd0ce4f35c07085aeb60d99bf5c52135973a4e197ed41/debugpy-1.8.18-cp314-cp314-macosx_15_0_universal2.whl", hash = "sha256:be7f622d250fe3429571e84572eb771023f1da22c754f28d2c60a10d74a4cc1b", size = 2537336, upload-time = "2025-12-10T19:48:36.463Z" }, + { url = "https://files.pythonhosted.org/packages/a1/5a/3b37cc266a69da83a4febaa4267bb2062d4bec5287036e2f23d9a30a788c/debugpy-1.8.18-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:df8bf7cd78019d5d155213bf5a1818b36403d0c3758d669e76827d4db026b840", size = 4268696, upload-time = "2025-12-10T19:48:37.855Z" }, + { url = "https://files.pythonhosted.org/packages/de/4b/1e13586444440e5754b70055449b70afa187aaa167fa4c20c0c05d9c3b80/debugpy-1.8.18-cp314-cp314-win32.whl", hash = "sha256:32dd56d50fe15c47d0f930a7f0b9d3e5eb8ed04770bc6c313fba6d226f87e1e8", size = 5280624, upload-time = "2025-12-10T19:48:39.28Z" }, + { url = "https://files.pythonhosted.org/packages/7a/21/f8c12baa16212859269dc4c3e4b413778ec1154d332896d3c4cca96ac660/debugpy-1.8.18-cp314-cp314-win_amd64.whl", hash = "sha256:714b61d753cfe3ed5e7bf0aad131506d750e271726ac86e3e265fd7eeebbe765", size = 5321982, upload-time = "2025-12-10T19:48:41.086Z" }, + { url = "https://files.pythonhosted.org/packages/dc/0d/bf7ac329c132436c57124202b5b5ccd6366e5d8e75eeb184cf078c826e8d/debugpy-1.8.18-py2.py3-none-any.whl", hash = "sha256:ab8cf0abe0fe2dfe1f7e65abc04b1db8740f9be80c1274acb625855c5c3ece6e", size = 5286576, upload-time = "2025-12-10T19:48:56.071Z" }, +] + [[package]] name = "decorator" version = "5.2.1" @@ -750,6 +889,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, ] +[[package]] +name = "defusedxml" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/d5/c66da9b79e5bdb124974bfe172b4daf3c984ebd9c2a06e2b8a4dc7331c72/defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69", size = 75520, upload-time = "2021-03-08T10:59:26.269Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/6c/aa3f2f849e01cb6a001cd8554a88d4c77c5c1a31c95bdf1cf9301e6d9ef4/defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61", size = 25604, upload-time = "2021-03-08T10:59:24.45Z" }, +] + [[package]] name = "docker" version = "7.1.0" @@ -831,6 +979,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/eb/23/dfb161e91db7c92727db505dc72a384ee79681fe0603f706f9f9f52c2901/fastapi-0.121.2-py3-none-any.whl", hash = "sha256:f2d80b49a86a846b70cc3a03eb5ea6ad2939298bf6a7fe377aa9cd3dd079d358", size = 109201, upload-time = "2025-11-13T17:05:52.718Z" }, ] +[[package]] +name = "fastjsonschema" +version = "2.21.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/b5/23b216d9d985a956623b6bd12d4086b60f0059b27799f23016af04a74ea1/fastjsonschema-2.21.2.tar.gz", hash = "sha256:b1eb43748041c880796cd077f1a07c3d94e93ae84bba5ed36800a33554ae05de", size = 374130, upload-time = "2025-08-14T18:49:36.666Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/a8/20d0723294217e47de6d9e2e40fd4a9d2f7c4b6ef974babd482a59743694/fastjsonschema-2.21.2-py3-none-any.whl", hash = "sha256:1c797122d0a86c5cace2e54bf4e819c36223b552017172f32c5c024a6b77e463", size = 24024, upload-time = "2025-08-14T18:49:34.776Z" }, +] + [[package]] name = "fhconfparser" version = "2024.1" @@ -924,6 +1081,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c7/93/0dd45cd283c32dea1545151d8c3637b4b8c53cdb3a625aeb2885b184d74d/fonttools-4.60.1-py3-none-any.whl", hash = "sha256:906306ac7afe2156fcf0042173d6ebbb05416af70f6b370967b47f8f00103bbb", size = 1143175, upload-time = "2025-09-29T21:13:24.134Z" }, ] +[[package]] +name = "fqdn" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/3e/a80a8c077fd798951169626cde3e239adeba7dab75deb3555716415bd9b0/fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f", size = 6015, upload-time = "2021-03-11T07:16:29.08Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/58/8acf1b3e91c58313ce5cb67df61001fc9dcd21be4fadb76c1a2d540e09ed/fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014", size = 9121, upload-time = "2021-03-11T07:16:28.351Z" }, +] + [[package]] name = "frozenlist" version = "1.8.0" @@ -1184,7 +1350,7 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "1.1.4" +version = "1.2.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, @@ -1198,9 +1364,9 @@ dependencies = [ { name = "typer-slim" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/44/8a/3cba668d9cd1b4e3eb6c1c3ff7bf0f74a7809bdbb5c327bcdbdbac802d23/huggingface_hub-1.1.4.tar.gz", hash = "sha256:a7424a766fffa1a11e4c1ac2040a1557e2101f86050fdf06627e7b74cc9d2ad6", size = 606842, upload-time = "2025-11-13T10:51:57.602Z" } +sdist = { url = "https://files.pythonhosted.org/packages/67/51/6db95c854e5eb3af8e0edfbfad7588983f63be39662054a49d5e116fb65d/huggingface_hub-1.2.2.tar.gz", hash = "sha256:b5b97bd37f4fe5b898a467373044649c94ee32006c032ce8fb835abe9d92ea28", size = 614598, upload-time = "2025-12-10T14:51:50.208Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/33/3f/969137c9d9428ed8bf171d27604243dd950a47cac82414826e2aebbc0a4c/huggingface_hub-1.1.4-py3-none-any.whl", hash = "sha256:867799fbd2ef338b7f8b03d038d9c0e09415dfe45bb2893b48a510d1d746daa5", size = 515580, upload-time = "2025-11-13T10:51:55.742Z" }, + { url = "https://files.pythonhosted.org/packages/71/40/eb2f3a2c09bebf2fc989ba8bf701ce1f56b2f054b51e1a0fcb3e5d23f13a/huggingface_hub-1.2.2-py3-none-any.whl", hash = "sha256:0f55d7d22058fbf8b29d8095aeee80a7b695aa764f906a21e886c1f87223718f", size = 520964, upload-time = "2025-12-10T14:51:48.206Z" }, ] [[package]] @@ -1242,6 +1408,30 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] +[[package]] +name = "ipykernel" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "appnope", marker = "sys_platform == 'darwin'" }, + { name = "comm" }, + { name = "debugpy" }, + { name = "ipython" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "matplotlib-inline" }, + { name = "nest-asyncio" }, + { name = "packaging" }, + { name = "psutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/a4/4948be6eb88628505b83a1f2f40d90254cab66abf2043b3c40fa07dfce0f/ipykernel-7.1.0.tar.gz", hash = "sha256:58a3fc88533d5930c3546dc7eac66c6d288acde4f801e2001e65edc5dc9cf0db", size = 174579, upload-time = "2025-10-27T09:46:39.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/17/20c2552266728ceba271967b87919664ecc0e33efca29c3efc6baf88c5f9/ipykernel-7.1.0-py3-none-any.whl", hash = "sha256:763b5ec6c5b7776f6a8d7ce09b267693b4e5ce75cb50ae696aaefb3c85e1ea4c", size = 117968, upload-time = "2025-10-27T09:46:37.805Z" }, +] + [[package]] name = "ipython" version = "9.7.0" @@ -1275,6 +1465,34 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" }, ] +[[package]] +name = "ipywidgets" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "comm" }, + { name = "ipython" }, + { name = "jupyterlab-widgets" }, + { name = "traitlets" }, + { name = "widgetsnbextension" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4c/ae/c5ce1edc1afe042eadb445e95b0671b03cee61895264357956e61c0d2ac0/ipywidgets-8.1.8.tar.gz", hash = "sha256:61f969306b95f85fba6b6986b7fe45d73124d1d9e3023a8068710d47a22ea668", size = 116739, upload-time = "2025-11-01T21:18:12.393Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/6d/0d9848617b9f753b87f214f1c682592f7ca42de085f564352f10f0843026/ipywidgets-8.1.8-py3-none-any.whl", hash = "sha256:ecaca67aed704a338f88f67b1181b58f821ab5dc89c1f0f5ef99db43c1c2921e", size = 139808, upload-time = "2025-11-01T21:18:10.956Z" }, +] + +[[package]] +name = "isoduration" +version = "20.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "arrow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/1a/3c8edc664e06e6bd06cce40c6b22da5f1429aa4224d0c590f3be21c91ead/isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9", size = 11649, upload-time = "2020-11-01T11:00:00.312Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/55/e5326141505c5d5e34c5e0935d2908a74e4561eca44108fbfb9c13d2911a/isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042", size = 11321, upload-time = "2020-11-01T10:59:58.02Z" }, +] + [[package]] name = "itsdangerous" version = "2.2.0" @@ -1338,6 +1556,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/79/ee/5134fa786f6c4090ac5daec7d18656ca825f7a7754139e38aaad95e544a2/joserfc-1.4.2-py3-none-any.whl", hash = "sha256:b15a5ea3a464c37e8006105665c159a288892fa73856fa40be60266dbc20b49d", size = 66435, upload-time = "2025-11-17T09:03:14.46Z" }, ] +[[package]] +name = "json5" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/ae/929aee9619e9eba9015207a9d2c1c54db18311da7eb4dcf6d41ad6f0eb67/json5-0.12.1.tar.gz", hash = "sha256:b2743e77b3242f8d03c143dd975a6ec7c52e2f2afe76ed934e53503dd4ad4990", size = 52191, upload-time = "2025-08-12T19:47:42.583Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/e2/05328bd2621be49a6fed9e3030b1e51a2d04537d3f816d211b9cc53c5262/json5-0.12.1-py3-none-any.whl", hash = "sha256:d9c9b3bc34a5f54d43c35e11ef7cb87d8bdd098c6ace87117a7b7e83e705c1d5", size = 36119, upload-time = "2025-08-12T19:47:41.131Z" }, +] + [[package]] name = "jsonpatch" version = "1.33" @@ -1386,6 +1613,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, ] +[package.optional-dependencies] +format-nongpl = [ + { name = "fqdn" }, + { name = "idna" }, + { name = "isoduration" }, + { name = "jsonpointer" }, + { name = "rfc3339-validator" }, + { name = "rfc3986-validator" }, + { name = "rfc3987-syntax" }, + { name = "uri-template" }, + { name = "webcolors" }, +] + [[package]] name = "jsonschema-path" version = "0.3.4" @@ -1413,6 +1653,220 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, ] +[[package]] +name = "jupyter" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ipykernel" }, + { name = "ipywidgets" }, + { name = "jupyter-console" }, + { name = "jupyterlab" }, + { name = "nbconvert" }, + { name = "notebook" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/f3/af28ea964ab8bc1e472dba2e82627d36d470c51f5cd38c37502eeffaa25e/jupyter-1.1.1.tar.gz", hash = "sha256:d55467bceabdea49d7e3624af7e33d59c37fff53ed3a350e1ac957bed731de7a", size = 5714959, upload-time = "2024-08-30T07:15:48.299Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/64/285f20a31679bf547b75602702f7800e74dbabae36ef324f716c02804753/jupyter-1.1.1-py2.py3-none-any.whl", hash = "sha256:7a59533c22af65439b24bbe60373a4e95af8f16ac65a6c00820ad378e3f7cc83", size = 2657, upload-time = "2024-08-30T07:15:47.045Z" }, +] + +[[package]] +name = "jupyter-client" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-core" }, + { name = "python-dateutil" }, + { name = "pyzmq" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/27/d10de45e8ad4ce872372c4a3a37b7b35b6b064f6f023a5c14ffcced4d59d/jupyter_client-8.7.0.tar.gz", hash = "sha256:3357212d9cbe01209e59190f67a3a7e1f387a4f4e88d1e0433ad84d7b262531d", size = 344691, upload-time = "2025-12-09T18:37:01.953Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/f5/fddaec430367be9d62a7ed125530e133bfd4a1c0350fe221149ee0f2b526/jupyter_client-8.7.0-py3-none-any.whl", hash = "sha256:3671a94fd25e62f5f2f554f5e95389c2294d89822378a5f2dd24353e1494a9e0", size = 106215, upload-time = "2025-12-09T18:37:00.024Z" }, +] + +[[package]] +name = "jupyter-console" +version = "6.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ipykernel" }, + { name = "ipython" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "pyzmq" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bd/2d/e2fd31e2fc41c14e2bcb6c976ab732597e907523f6b2420305f9fc7fdbdb/jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539", size = 34363, upload-time = "2023-03-06T14:13:31.02Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/77/71d78d58f15c22db16328a476426f7ac4a60d3a5a7ba3b9627ee2f7903d4/jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485", size = 24510, upload-time = "2023-03-06T14:13:28.229Z" }, +] + +[[package]] +name = "jupyter-core" +version = "5.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "platformdirs" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/02/49/9d1284d0dc65e2c757b74c6687b6d319b02f822ad039e5c512df9194d9dd/jupyter_core-5.9.1.tar.gz", hash = "sha256:4d09aaff303b9566c3ce657f580bd089ff5c91f5f89cf7d8846c3cdf465b5508", size = 89814, upload-time = "2025-10-16T19:19:18.444Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/e7/80988e32bf6f73919a113473a604f5a8f09094de312b9d52b79c2df7612b/jupyter_core-5.9.1-py3-none-any.whl", hash = "sha256:ebf87fdc6073d142e114c72c9e29a9d7ca03fad818c5d300ce2adc1fb0743407", size = 29032, upload-time = "2025-10-16T19:19:16.783Z" }, +] + +[[package]] +name = "jupyter-events" +version = "0.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonschema", extra = ["format-nongpl"] }, + { name = "packaging" }, + { name = "python-json-logger" }, + { name = "pyyaml" }, + { name = "referencing" }, + { name = "rfc3339-validator" }, + { name = "rfc3986-validator" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/c3/306d090461e4cf3cd91eceaff84bede12a8e52cd821c2d20c9a4fd728385/jupyter_events-0.12.0.tar.gz", hash = "sha256:fc3fce98865f6784c9cd0a56a20644fc6098f21c8c33834a8d9fe383c17e554b", size = 62196, upload-time = "2025-02-03T17:23:41.485Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/48/577993f1f99c552f18a0428731a755e06171f9902fa118c379eb7c04ea22/jupyter_events-0.12.0-py3-none-any.whl", hash = "sha256:6464b2fa5ad10451c3d35fabc75eab39556ae1e2853ad0c0cc31b656731a97fb", size = 19430, upload-time = "2025-02-03T17:23:38.643Z" }, +] + +[[package]] +name = "jupyter-lsp" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-server" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/5a/9066c9f8e94ee517133cd98dba393459a16cd48bba71a82f16a65415206c/jupyter_lsp-2.3.0.tar.gz", hash = "sha256:458aa59339dc868fb784d73364f17dbce8836e906cd75fd471a325cba02e0245", size = 54823, upload-time = "2025-08-27T17:47:34.671Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/60/1f6cee0c46263de1173894f0fafcb3475ded276c472c14d25e0280c18d6d/jupyter_lsp-2.3.0-py3-none-any.whl", hash = "sha256:e914a3cb2addf48b1c7710914771aaf1819d46b2e5a79b0f917b5478ec93f34f", size = 76687, upload-time = "2025-08-27T17:47:33.15Z" }, +] + +[[package]] +name = "jupyter-server" +version = "2.17.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "argon2-cffi" }, + { name = "jinja2" }, + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "jupyter-events" }, + { name = "jupyter-server-terminals" }, + { name = "nbconvert" }, + { name = "nbformat" }, + { name = "packaging" }, + { name = "prometheus-client" }, + { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "pyzmq" }, + { name = "send2trash" }, + { name = "terminado" }, + { name = "tornado" }, + { name = "traitlets" }, + { name = "websocket-client" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/ac/e040ec363d7b6b1f11304cc9f209dac4517ece5d5e01821366b924a64a50/jupyter_server-2.17.0.tar.gz", hash = "sha256:c38ea898566964c888b4772ae1ed58eca84592e88251d2cfc4d171f81f7e99d5", size = 731949, upload-time = "2025-08-21T14:42:54.042Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/80/a24767e6ca280f5a49525d987bf3e4d7552bf67c8be07e8ccf20271f8568/jupyter_server-2.17.0-py3-none-any.whl", hash = "sha256:e8cb9c7db4251f51ed307e329b81b72ccf2056ff82d50524debde1ee1870e13f", size = 388221, upload-time = "2025-08-21T14:42:52.034Z" }, +] + +[[package]] +name = "jupyter-server-terminals" +version = "0.5.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "terminado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/d5/562469734f476159e99a55426d697cbf8e7eb5efe89fb0e0b4f83a3d3459/jupyter_server_terminals-0.5.3.tar.gz", hash = "sha256:5ae0295167220e9ace0edcfdb212afd2b01ee8d179fe6f23c899590e9b8a5269", size = 31430, upload-time = "2024-03-12T14:37:03.049Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/2d/2b32cdbe8d2a602f697a649798554e4f072115438e92249624e532e8aca6/jupyter_server_terminals-0.5.3-py3-none-any.whl", hash = "sha256:41ee0d7dc0ebf2809c668e0fc726dfaf258fcd3e769568996ca731b6194ae9aa", size = 13656, upload-time = "2024-03-12T14:37:00.708Z" }, +] + +[[package]] +name = "jupyterlab" +version = "4.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "async-lru" }, + { name = "httpx" }, + { name = "ipykernel" }, + { name = "jinja2" }, + { name = "jupyter-core" }, + { name = "jupyter-lsp" }, + { name = "jupyter-server" }, + { name = "jupyterlab-server" }, + { name = "notebook-shim" }, + { name = "packaging" }, + { name = "setuptools" }, + { name = "tornado" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/e5/4fa382a796a6d8e2cd867816b64f1ff27f906e43a7a83ad9eb389e448cd8/jupyterlab-4.5.0.tar.gz", hash = "sha256:aec33d6d8f1225b495ee2cf20f0514f45e6df8e360bdd7ac9bace0b7ac5177ea", size = 23989880, upload-time = "2025-11-18T13:19:00.365Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6c/1e/5a4d5498eba382fee667ed797cf64ae5d1b13b04356df62f067f48bb0f61/jupyterlab-4.5.0-py3-none-any.whl", hash = "sha256:88e157c75c1afff64c7dc4b801ec471450b922a4eae4305211ddd40da8201c8a", size = 12380641, upload-time = "2025-11-18T13:18:56.252Z" }, +] + +[[package]] +name = "jupyterlab-pygments" +version = "0.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/90/51/9187be60d989df97f5f0aba133fa54e7300f17616e065d1ada7d7646b6d6/jupyterlab_pygments-0.3.0.tar.gz", hash = "sha256:721aca4d9029252b11cfa9d185e5b5af4d54772bb8072f9b7036f4170054d35d", size = 512900, upload-time = "2023-11-23T09:26:37.44Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/dd/ead9d8ea85bf202d90cc513b533f9c363121c7792674f78e0d8a854b63b4/jupyterlab_pygments-0.3.0-py3-none-any.whl", hash = "sha256:841a89020971da1d8693f1a99997aefc5dc424bb1b251fd6322462a1b8842780", size = 15884, upload-time = "2023-11-23T09:26:34.325Z" }, +] + +[[package]] +name = "jupyterlab-server" +version = "2.28.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "babel" }, + { name = "jinja2" }, + { name = "json5" }, + { name = "jsonschema" }, + { name = "jupyter-server" }, + { name = "packaging" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/2c/90153f189e421e93c4bb4f9e3f59802a1f01abd2ac5cf40b152d7f735232/jupyterlab_server-2.28.0.tar.gz", hash = "sha256:35baa81898b15f93573e2deca50d11ac0ae407ebb688299d3a5213265033712c", size = 76996, upload-time = "2025-10-22T13:59:18.37Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/07/a000fe835f76b7e1143242ab1122e6362ef1c03f23f83a045c38859c2ae0/jupyterlab_server-2.28.0-py3-none-any.whl", hash = "sha256:e4355b148fdcf34d312bbbc80f22467d6d20460e8b8736bf235577dd18506968", size = 59830, upload-time = "2025-10-22T13:59:16.767Z" }, +] + +[[package]] +name = "jupyterlab-widgets" +version = "3.0.16" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/2d/ef58fed122b268c69c0aa099da20bc67657cdfb2e222688d5731bd5b971d/jupyterlab_widgets-3.0.16.tar.gz", hash = "sha256:423da05071d55cf27a9e602216d35a3a65a3e41cdf9c5d3b643b814ce38c19e0", size = 897423, upload-time = "2025-11-01T21:11:29.724Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/b5/36c712098e6191d1b4e349304ef73a8d06aed77e56ceaac8c0a306c7bda1/jupyterlab_widgets-3.0.16-py3-none-any.whl", hash = "sha256:45fa36d9c6422cf2559198e4db481aa243c7a32d9926b500781c830c80f7ecf8", size = 914926, upload-time = "2025-11-01T21:11:28.008Z" }, +] + +[[package]] +name = "kaleido" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "choreographer" }, + { name = "logistro" }, + { name = "orjson" }, + { name = "packaging" }, + { name = "pytest-timeout" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/ad/76eec859b71eda803a88ea50ed3f270281254656bb23d19eb0a39aa706a0/kaleido-1.2.0.tar.gz", hash = "sha256:fa621a14423e8effa2895a2526be00af0cf21655be1b74b7e382c171d12e71ef", size = 64160, upload-time = "2025-11-04T21:24:23.833Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/97/f6de8d4af54d6401d6581a686cce3e3e2371a79ba459a449104e026c08bc/kaleido-1.2.0-py3-none-any.whl", hash = "sha256:c27ed82b51df6b923d0e656feac221343a0dbcd2fb9bc7e6b1db97f61e9a1513", size = 68997, upload-time = "2025-11-04T21:24:21.704Z" }, +] + [[package]] name = "kiwisolver" version = "1.4.9" @@ -1485,6 +1939,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/80/be/3578e8afd18c88cdf9cb4cffde75a96d2be38c5a903f1ed0ceec061bd09e/kiwisolver-1.4.9-cp314-cp314t-win_arm64.whl", hash = "sha256:4a48a2ce79d65d363597ef7b567ce3d14d68783d2b2263d98db3d9477805ba32", size = 70260, upload-time = "2025-08-10T21:27:36.606Z" }, ] +[[package]] +name = "lark" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/da/34/28fff3ab31ccff1fd4f6c7c7b0ceb2b6968d8ea4950663eadcb5720591a0/lark-1.3.1.tar.gz", hash = "sha256:b426a7a6d6d53189d318f2b6236ab5d6429eaf09259f1ca33eb716eed10d2905", size = 382732, upload-time = "2025-10-27T18:25:56.653Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/3d/14ce75ef66813643812f3093ab17e46d3a206942ce7376d31ec2d36229e7/lark-1.3.1-py3-none-any.whl", hash = "sha256:c629b661023a014c37da873b4ff58a817398d12635d3bbb2c5a03be7fe5d1e12", size = 113151, upload-time = "2025-10-27T18:25:54.882Z" }, +] + [[package]] name = "lazy-object-proxy" version = "1.12.0" @@ -1552,6 +2015,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/65/bd/606e2f7eb0da042bffd8711a7427f7a28ca501aa6b1e3367ae3c7d4dc489/licensecheck-2025.1.0-py3-none-any.whl", hash = "sha256:eb20131cd8f877e5396958fd7b00cdb2225436c37a59dba4cf36d36079133a17", size = 26681, upload-time = "2025-03-26T22:58:03.145Z" }, ] +[[package]] +name = "logistro" +version = "2.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/08/90/bfd7a6fab22bdfafe48ed3c4831713cb77b4779d18ade5e248d5dbc0ca22/logistro-2.0.1.tar.gz", hash = "sha256:8446affc82bab2577eb02bfcbcae196ae03129287557287b6a070f70c1985047", size = 8398, upload-time = "2025-11-01T02:41:18.81Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl", hash = "sha256:06ffa127b9fb4ac8b1972ae6b2a9d7fde57598bf5939cd708f43ec5bba2d31eb", size = 8555, upload-time = "2025-11-01T02:41:17.587Z" }, +] + [[package]] name = "lightgbm" version = "4.6.0" @@ -1758,6 +2230,15 @@ name = "microsoft-python-type-stubs" version = "0" source = { git = "https://github.com/microsoft/python-type-stubs.git#692c37c3969d22612b295ddf7e7af5907204a386" } +[[package]] +name = "mistune" +version = "3.1.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/02/a7fb8b21d4d55ac93cdcde9d3638da5dd0ebdd3a4fed76c7725e10b81cbe/mistune-3.1.4.tar.gz", hash = "sha256:b5a7f801d389f724ec702840c11d8fc48f2b33519102fc7ee739e8177b672164", size = 94588, upload-time = "2025-08-29T07:20:43.594Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/f0/8282d9641415e9e33df173516226b404d367a0fc55e1a60424a152913abc/mistune-3.1.4-py3-none-any.whl", hash = "sha256:93691da911e5d9d2e23bc54472892aff676df27a75274962ff9edc210364266d", size = 53481, upload-time = "2025-08-29T07:20:42.218Z" }, +] + [[package]] name = "mlflow-skinny" version = "3.6.0" @@ -1973,6 +2454,70 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ef/82/7a9d0550484a62c6da82858ee9419f3dd1ccc9aa1c26a1e43da3ecd20b0d/natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c", size = 38268, upload-time = "2023-06-20T04:17:17.522Z" }, ] +[[package]] +name = "nbclient" +version = "0.10.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-client" }, + { name = "jupyter-core" }, + { name = "nbformat" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/87/66/7ffd18d58eae90d5721f9f39212327695b749e23ad44b3881744eaf4d9e8/nbclient-0.10.2.tar.gz", hash = "sha256:90b7fc6b810630db87a6d0c2250b1f0ab4cf4d3c27a299b0cde78a4ed3fd9193", size = 62424, upload-time = "2024-12-19T10:32:27.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/6d/e7fa07f03a4a7b221d94b4d586edb754a9b0dc3c9e2c93353e9fa4e0d117/nbclient-0.10.2-py3-none-any.whl", hash = "sha256:4ffee11e788b4a27fabeb7955547e4318a5298f34342a4bfd01f2e1faaeadc3d", size = 25434, upload-time = "2024-12-19T10:32:24.139Z" }, +] + +[[package]] +name = "nbconvert" +version = "7.16.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "bleach", extra = ["css"] }, + { name = "defusedxml" }, + { name = "jinja2" }, + { name = "jupyter-core" }, + { name = "jupyterlab-pygments" }, + { name = "markupsafe" }, + { name = "mistune" }, + { name = "nbclient" }, + { name = "nbformat" }, + { name = "packaging" }, + { name = "pandocfilters" }, + { name = "pygments" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/59/f28e15fc47ffb73af68a8d9b47367a8630d76e97ae85ad18271b9db96fdf/nbconvert-7.16.6.tar.gz", hash = "sha256:576a7e37c6480da7b8465eefa66c17844243816ce1ccc372633c6b71c3c0f582", size = 857715, upload-time = "2025-01-28T09:29:14.724Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/9a/cd673b2f773a12c992f41309ef81b99da1690426bd2f96957a7ade0d3ed7/nbconvert-7.16.6-py3-none-any.whl", hash = "sha256:1375a7b67e0c2883678c48e506dc320febb57685e5ee67faa51b18a90f3a712b", size = 258525, upload-time = "2025-01-28T09:29:12.551Z" }, +] + +[[package]] +name = "nbformat" +version = "5.10.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "fastjsonschema" }, + { name = "jsonschema" }, + { name = "jupyter-core" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6d/fd/91545e604bc3dad7dca9ed03284086039b294c6b3d75c0d2fa45f9e9caf3/nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a", size = 142749, upload-time = "2024-04-04T11:20:37.371Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/82/0340caa499416c78e5d8f5f05947ae4bc3cba53c9f038ab6e9ed964e22f1/nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b", size = 78454, upload-time = "2024-04-04T11:20:34.895Z" }, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418, upload-time = "2024-01-21T14:25:19.227Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, +] + [[package]] name = "networkx" version = "3.5" @@ -1991,6 +2536,34 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, ] +[[package]] +name = "notebook" +version = "7.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-server" }, + { name = "jupyterlab" }, + { name = "jupyterlab-server" }, + { name = "notebook-shim" }, + { name = "tornado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/89/ac/a97041621250a4fc5af379fb377942841eea2ca146aab166b8fcdfba96c2/notebook-7.5.0.tar.gz", hash = "sha256:3b27eaf9913033c28dde92d02139414c608992e1df4b969c843219acf2ff95e4", size = 14052074, upload-time = "2025-11-19T08:36:20.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/96/00df2a4760f10f5af0f45c4955573cae6189931f9a30265a35865f8c1031/notebook-7.5.0-py3-none-any.whl", hash = "sha256:3300262d52905ca271bd50b22617681d95f08a8360d099e097726e6d2efb5811", size = 14460968, upload-time = "2025-11-19T08:36:15.869Z" }, +] + +[[package]] +name = "notebook-shim" +version = "0.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-server" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/54/d2/92fa3243712b9a3e8bafaf60aac366da1cada3639ca767ff4b5b3654ec28/notebook_shim-0.2.4.tar.gz", hash = "sha256:b4b2cfa1b65d98307ca24361f5b30fe785b53c3fd07b7a47e89acb5e6ac638cb", size = 13167, upload-time = "2024-02-14T23:35:18.353Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/33/bd5b9137445ea4b680023eb0469b2bb969d61303dedb2aac6560ff3d14a1/notebook_shim-0.2.4-py3-none-any.whl", hash = "sha256:411a5be4e9dc882a074ccbcae671eda64cceb068767e9a3419096986560e1cef", size = 13307, upload-time = "2024-02-14T23:35:16.286Z" }, +] + [[package]] name = "numpy" version = "2.3.5" @@ -2276,6 +2849,36 @@ requires-dist = [ { name = "sphinx-pyproject", specifier = ">=0.3.0" }, ] +[[package]] +name = "openstef-examples" +version = "0.0.0" +source = { virtual = "examples" } +dependencies = [ + { name = "openstef" }, + { name = "openstef-beam" }, + { name = "openstef-core" }, + { name = "openstef-models" }, +] + +[package.optional-dependencies] +tutorials = [ + { name = "huggingface-hub" }, + { name = "jupyter" }, + { name = "kaleido" }, +] + +[package.metadata] +requires-dist = [ + { name = "huggingface-hub", marker = "extra == 'tutorials'", specifier = ">=1.2.2" }, + { name = "jupyter", marker = "extra == 'tutorials'", specifier = ">=1.1.1" }, + { name = "kaleido", marker = "extra == 'tutorials'" }, + { name = "openstef", editable = "." }, + { name = "openstef-beam", editable = "packages/openstef-beam" }, + { name = "openstef-core", editable = "packages/openstef-core" }, + { name = "openstef-models", editable = "packages/openstef-models" }, +] +provides-extras = ["tutorials"] + [[package]] name = "openstef-models" version = "0.0.0" @@ -2311,7 +2914,7 @@ requires-dist = [ { name = "openstef-core", editable = "packages/openstef-core" }, { name = "pvlib", specifier = ">=0.13" }, { name = "pycountry", specifier = ">=24.6.1" }, - { name = "scikit-learn", specifier = ">=1.7.1,<2" }, + { name = "scikit-learn", specifier = ">=1.7.1,<1.8" }, { name = "scipy", specifier = ">=1.16.3,<2" }, { name = "skops", specifier = ">=0.13" }, { name = "xgboost", marker = "sys_platform == 'darwin' and extra == 'xgb-cpu'", specifier = ">=3,<4" }, @@ -2390,6 +2993,59 @@ numpy = [ { name = "numpy-typing-compat" }, ] +[[package]] +name = "orjson" +version = "3.11.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/04/b8/333fdb27840f3bf04022d21b654a35f58e15407183aeb16f3b41aa053446/orjson-3.11.5.tar.gz", hash = "sha256:82393ab47b4fe44ffd0a7659fa9cfaacc717eb617c93cde83795f14af5c2e9d5", size = 5972347, upload-time = "2025-12-06T15:55:39.458Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a4/8052a029029b096a78955eadd68ab594ce2197e24ec50e6b6d2ab3f4e33b/orjson-3.11.5-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:334e5b4bff9ad101237c2d799d9fd45737752929753bf4faf4b207335a416b7d", size = 245347, upload-time = "2025-12-06T15:54:22.061Z" }, + { url = "https://files.pythonhosted.org/packages/64/67/574a7732bd9d9d79ac620c8790b4cfe0717a3d5a6eb2b539e6e8995e24a0/orjson-3.11.5-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:ff770589960a86eae279f5d8aa536196ebda8273a2a07db2a54e82b93bc86626", size = 129435, upload-time = "2025-12-06T15:54:23.615Z" }, + { url = "https://files.pythonhosted.org/packages/52/8d/544e77d7a29d90cf4d9eecd0ae801c688e7f3d1adfa2ebae5e1e94d38ab9/orjson-3.11.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed24250e55efbcb0b35bed7caaec8cedf858ab2f9f2201f17b8938c618c8ca6f", size = 132074, upload-time = "2025-12-06T15:54:24.694Z" }, + { url = "https://files.pythonhosted.org/packages/6e/57/b9f5b5b6fbff9c26f77e785baf56ae8460ef74acdb3eae4931c25b8f5ba9/orjson-3.11.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a66d7769e98a08a12a139049aac2f0ca3adae989817f8c43337455fbc7669b85", size = 130520, upload-time = "2025-12-06T15:54:26.185Z" }, + { url = "https://files.pythonhosted.org/packages/f6/6d/d34970bf9eb33f9ec7c979a262cad86076814859e54eb9a059a52f6dc13d/orjson-3.11.5-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:86cfc555bfd5794d24c6a1903e558b50644e5e68e6471d66502ce5cb5fdef3f9", size = 136209, upload-time = "2025-12-06T15:54:27.264Z" }, + { url = "https://files.pythonhosted.org/packages/e7/39/bc373b63cc0e117a105ea12e57280f83ae52fdee426890d57412432d63b3/orjson-3.11.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a230065027bc2a025e944f9d4714976a81e7ecfa940923283bca7bbc1f10f626", size = 139837, upload-time = "2025-12-06T15:54:28.75Z" }, + { url = "https://files.pythonhosted.org/packages/cb/aa/7c4818c8d7d324da220f4f1af55c343956003aa4d1ce1857bdc1d396ba69/orjson-3.11.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b29d36b60e606df01959c4b982729c8845c69d1963f88686608be9ced96dbfaa", size = 137307, upload-time = "2025-12-06T15:54:29.856Z" }, + { url = "https://files.pythonhosted.org/packages/46/bf/0993b5a056759ba65145effe3a79dd5a939d4a070eaa5da2ee3180fbb13f/orjson-3.11.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c74099c6b230d4261fdc3169d50efc09abf38ace1a42ea2f9994b1d79153d477", size = 139020, upload-time = "2025-12-06T15:54:31.024Z" }, + { url = "https://files.pythonhosted.org/packages/65/e8/83a6c95db3039e504eda60fc388f9faedbb4f6472f5aba7084e06552d9aa/orjson-3.11.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e697d06ad57dd0c7a737771d470eedc18e68dfdefcdd3b7de7f33dfda5b6212e", size = 141099, upload-time = "2025-12-06T15:54:32.196Z" }, + { url = "https://files.pythonhosted.org/packages/b9/b4/24fdc024abfce31c2f6812973b0a693688037ece5dc64b7a60c1ce69e2f2/orjson-3.11.5-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e08ca8a6c851e95aaecc32bc44a5aa75d0ad26af8cdac7c77e4ed93acf3d5b69", size = 413540, upload-time = "2025-12-06T15:54:33.361Z" }, + { url = "https://files.pythonhosted.org/packages/d9/37/01c0ec95d55ed0c11e4cae3e10427e479bba40c77312b63e1f9665e0737d/orjson-3.11.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e8b5f96c05fce7d0218df3fdfeb962d6b8cfff7e3e20264306b46dd8b217c0f3", size = 151530, upload-time = "2025-12-06T15:54:34.6Z" }, + { url = "https://files.pythonhosted.org/packages/f9/d4/f9ebc57182705bb4bbe63f5bbe14af43722a2533135e1d2fb7affa0c355d/orjson-3.11.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ddbfdb5099b3e6ba6d6ea818f61997bb66de14b411357d24c4612cf1ebad08ca", size = 141863, upload-time = "2025-12-06T15:54:35.801Z" }, + { url = "https://files.pythonhosted.org/packages/0d/04/02102b8d19fdcb009d72d622bb5781e8f3fae1646bf3e18c53d1bc8115b5/orjson-3.11.5-cp312-cp312-win32.whl", hash = "sha256:9172578c4eb09dbfcf1657d43198de59b6cef4054de385365060ed50c458ac98", size = 135255, upload-time = "2025-12-06T15:54:37.209Z" }, + { url = "https://files.pythonhosted.org/packages/d4/fb/f05646c43d5450492cb387de5549f6de90a71001682c17882d9f66476af5/orjson-3.11.5-cp312-cp312-win_amd64.whl", hash = "sha256:2b91126e7b470ff2e75746f6f6ee32b9ab67b7a93c8ba1d15d3a0caaf16ec875", size = 133252, upload-time = "2025-12-06T15:54:38.401Z" }, + { url = "https://files.pythonhosted.org/packages/dc/a6/7b8c0b26ba18c793533ac1cd145e131e46fcf43952aa94c109b5b913c1f0/orjson-3.11.5-cp312-cp312-win_arm64.whl", hash = "sha256:acbc5fac7e06777555b0722b8ad5f574739e99ffe99467ed63da98f97f9ca0fe", size = 126777, upload-time = "2025-12-06T15:54:39.515Z" }, + { url = "https://files.pythonhosted.org/packages/10/43/61a77040ce59f1569edf38f0b9faadc90c8cf7e9bec2e0df51d0132c6bb7/orjson-3.11.5-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:3b01799262081a4c47c035dd77c1301d40f568f77cc7ec1bb7db5d63b0a01629", size = 245271, upload-time = "2025-12-06T15:54:40.878Z" }, + { url = "https://files.pythonhosted.org/packages/55/f9/0f79be617388227866d50edd2fd320cb8fb94dc1501184bb1620981a0aba/orjson-3.11.5-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:61de247948108484779f57a9f406e4c84d636fa5a59e411e6352484985e8a7c3", size = 129422, upload-time = "2025-12-06T15:54:42.403Z" }, + { url = "https://files.pythonhosted.org/packages/77/42/f1bf1549b432d4a78bfa95735b79b5dac75b65b5bb815bba86ad406ead0a/orjson-3.11.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:894aea2e63d4f24a7f04a1908307c738d0dce992e9249e744b8f4e8dd9197f39", size = 132060, upload-time = "2025-12-06T15:54:43.531Z" }, + { url = "https://files.pythonhosted.org/packages/25/49/825aa6b929f1a6ed244c78acd7b22c1481fd7e5fda047dc8bf4c1a807eb6/orjson-3.11.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ddc21521598dbe369d83d4d40338e23d4101dad21dae0e79fa20465dbace019f", size = 130391, upload-time = "2025-12-06T15:54:45.059Z" }, + { url = "https://files.pythonhosted.org/packages/42/ec/de55391858b49e16e1aa8f0bbbb7e5997b7345d8e984a2dec3746d13065b/orjson-3.11.5-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cce16ae2f5fb2c53c3eafdd1706cb7b6530a67cc1c17abe8ec747f5cd7c0c51", size = 135964, upload-time = "2025-12-06T15:54:46.576Z" }, + { url = "https://files.pythonhosted.org/packages/1c/40/820bc63121d2d28818556a2d0a09384a9f0262407cf9fa305e091a8048df/orjson-3.11.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e46c762d9f0e1cfb4ccc8515de7f349abbc95b59cb5a2bd68df5973fdef913f8", size = 139817, upload-time = "2025-12-06T15:54:48.084Z" }, + { url = "https://files.pythonhosted.org/packages/09/c7/3a445ca9a84a0d59d26365fd8898ff52bdfcdcb825bcc6519830371d2364/orjson-3.11.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d7345c759276b798ccd6d77a87136029e71e66a8bbf2d2755cbdde1d82e78706", size = 137336, upload-time = "2025-12-06T15:54:49.426Z" }, + { url = "https://files.pythonhosted.org/packages/9a/b3/dc0d3771f2e5d1f13368f56b339c6782f955c6a20b50465a91acb79fe961/orjson-3.11.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75bc2e59e6a2ac1dd28901d07115abdebc4563b5b07dd612bf64260a201b1c7f", size = 138993, upload-time = "2025-12-06T15:54:50.939Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a2/65267e959de6abe23444659b6e19c888f242bf7725ff927e2292776f6b89/orjson-3.11.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:54aae9b654554c3b4edd61896b978568c6daa16af96fa4681c9b5babd469f863", size = 141070, upload-time = "2025-12-06T15:54:52.414Z" }, + { url = "https://files.pythonhosted.org/packages/63/c9/da44a321b288727a322c6ab17e1754195708786a04f4f9d2220a5076a649/orjson-3.11.5-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:4bdd8d164a871c4ec773f9de0f6fe8769c2d6727879c37a9666ba4183b7f8228", size = 413505, upload-time = "2025-12-06T15:54:53.67Z" }, + { url = "https://files.pythonhosted.org/packages/7f/17/68dc14fa7000eefb3d4d6d7326a190c99bb65e319f02747ef3ebf2452f12/orjson-3.11.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a261fef929bcf98a60713bf5e95ad067cea16ae345d9a35034e73c3990e927d2", size = 151342, upload-time = "2025-12-06T15:54:55.113Z" }, + { url = "https://files.pythonhosted.org/packages/c4/c5/ccee774b67225bed630a57478529fc026eda33d94fe4c0eac8fe58d4aa52/orjson-3.11.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c028a394c766693c5c9909dec76b24f37e6a1b91999e8d0c0d5feecbe93c3e05", size = 141823, upload-time = "2025-12-06T15:54:56.331Z" }, + { url = "https://files.pythonhosted.org/packages/67/80/5d00e4155d0cd7390ae2087130637671da713959bb558db9bac5e6f6b042/orjson-3.11.5-cp313-cp313-win32.whl", hash = "sha256:2cc79aaad1dfabe1bd2d50ee09814a1253164b3da4c00a78c458d82d04b3bdef", size = 135236, upload-time = "2025-12-06T15:54:57.507Z" }, + { url = "https://files.pythonhosted.org/packages/95/fe/792cc06a84808dbdc20ac6eab6811c53091b42f8e51ecebf14b540e9cfe4/orjson-3.11.5-cp313-cp313-win_amd64.whl", hash = "sha256:ff7877d376add4e16b274e35a3f58b7f37b362abf4aa31863dadacdd20e3a583", size = 133167, upload-time = "2025-12-06T15:54:58.71Z" }, + { url = "https://files.pythonhosted.org/packages/46/2c/d158bd8b50e3b1cfdcf406a7e463f6ffe3f0d167b99634717acdaf5e299f/orjson-3.11.5-cp313-cp313-win_arm64.whl", hash = "sha256:59ac72ea775c88b163ba8d21b0177628bd015c5dd060647bbab6e22da3aad287", size = 126712, upload-time = "2025-12-06T15:54:59.892Z" }, + { url = "https://files.pythonhosted.org/packages/c2/60/77d7b839e317ead7bb225d55bb50f7ea75f47afc489c81199befc5435b50/orjson-3.11.5-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e446a8ea0a4c366ceafc7d97067bfd55292969143b57e3c846d87fc701e797a0", size = 245252, upload-time = "2025-12-06T15:55:01.127Z" }, + { url = "https://files.pythonhosted.org/packages/f1/aa/d4639163b400f8044cef0fb9aa51b0337be0da3a27187a20d1166e742370/orjson-3.11.5-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:53deb5addae9c22bbe3739298f5f2196afa881ea75944e7720681c7080909a81", size = 129419, upload-time = "2025-12-06T15:55:02.723Z" }, + { url = "https://files.pythonhosted.org/packages/30/94/9eabf94f2e11c671111139edf5ec410d2f21e6feee717804f7e8872d883f/orjson-3.11.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82cd00d49d6063d2b8791da5d4f9d20539c5951f965e45ccf4e96d33505ce68f", size = 132050, upload-time = "2025-12-06T15:55:03.918Z" }, + { url = "https://files.pythonhosted.org/packages/3d/c8/ca10f5c5322f341ea9a9f1097e140be17a88f88d1cfdd29df522970d9744/orjson-3.11.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3fd15f9fc8c203aeceff4fda211157fad114dde66e92e24097b3647a08f4ee9e", size = 130370, upload-time = "2025-12-06T15:55:05.173Z" }, + { url = "https://files.pythonhosted.org/packages/25/d4/e96824476d361ee2edd5c6290ceb8d7edf88d81148a6ce172fc00278ca7f/orjson-3.11.5-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9df95000fbe6777bf9820ae82ab7578e8662051bb5f83d71a28992f539d2cda7", size = 136012, upload-time = "2025-12-06T15:55:06.402Z" }, + { url = "https://files.pythonhosted.org/packages/85/8e/9bc3423308c425c588903f2d103cfcfe2539e07a25d6522900645a6f257f/orjson-3.11.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92a8d676748fca47ade5bc3da7430ed7767afe51b2f8100e3cd65e151c0eaceb", size = 139809, upload-time = "2025-12-06T15:55:07.656Z" }, + { url = "https://files.pythonhosted.org/packages/e9/3c/b404e94e0b02a232b957c54643ce68d0268dacb67ac33ffdee24008c8b27/orjson-3.11.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa0f513be38b40234c77975e68805506cad5d57b3dfd8fe3baa7f4f4051e15b4", size = 137332, upload-time = "2025-12-06T15:55:08.961Z" }, + { url = "https://files.pythonhosted.org/packages/51/30/cc2d69d5ce0ad9b84811cdf4a0cd5362ac27205a921da524ff42f26d65e0/orjson-3.11.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1863e75b92891f553b7922ce4ee10ed06db061e104f2b7815de80cdcb135ad", size = 138983, upload-time = "2025-12-06T15:55:10.595Z" }, + { url = "https://files.pythonhosted.org/packages/0e/87/de3223944a3e297d4707d2fe3b1ffb71437550e165eaf0ca8bbe43ccbcb1/orjson-3.11.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d4be86b58e9ea262617b8ca6251a2f0d63cc132a6da4b5fcc8e0a4128782c829", size = 141069, upload-time = "2025-12-06T15:55:11.832Z" }, + { url = "https://files.pythonhosted.org/packages/65/30/81d5087ae74be33bcae3ff2d80f5ccaa4a8fedc6d39bf65a427a95b8977f/orjson-3.11.5-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:b923c1c13fa02084eb38c9c065afd860a5cff58026813319a06949c3af5732ac", size = 413491, upload-time = "2025-12-06T15:55:13.314Z" }, + { url = "https://files.pythonhosted.org/packages/d0/6f/f6058c21e2fc1efaf918986dbc2da5cd38044f1a2d4b7b91ad17c4acf786/orjson-3.11.5-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:1b6bd351202b2cd987f35a13b5e16471cf4d952b42a73c391cc537974c43ef6d", size = 151375, upload-time = "2025-12-06T15:55:14.715Z" }, + { url = "https://files.pythonhosted.org/packages/54/92/c6921f17d45e110892899a7a563a925b2273d929959ce2ad89e2525b885b/orjson-3.11.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bb150d529637d541e6af06bbe3d02f5498d628b7f98267ff87647584293ab439", size = 141850, upload-time = "2025-12-06T15:55:15.94Z" }, + { url = "https://files.pythonhosted.org/packages/88/86/cdecb0140a05e1a477b81f24739da93b25070ee01ce7f7242f44a6437594/orjson-3.11.5-cp314-cp314-win32.whl", hash = "sha256:9cc1e55c884921434a84a0c3dd2699eb9f92e7b441d7f53f3941079ec6ce7499", size = 135278, upload-time = "2025-12-06T15:55:17.202Z" }, + { url = "https://files.pythonhosted.org/packages/e4/97/b638d69b1e947d24f6109216997e38922d54dcdcdb1b11c18d7efd2d3c59/orjson-3.11.5-cp314-cp314-win_amd64.whl", hash = "sha256:a4f3cb2d874e03bc7767c8f88adaa1a9a05cecea3712649c3b58589ec7317310", size = 133170, upload-time = "2025-12-06T15:55:18.468Z" }, + { url = "https://files.pythonhosted.org/packages/8f/dd/f4fff4a6fe601b4f8f3ba3aa6da8ac33d17d124491a3b804c662a70e1636/orjson-3.11.5-cp314-cp314-win_arm64.whl", hash = "sha256:38b22f476c351f9a1c43e5b07d8b5a02eb24a6ab8e75f700f7d479d4568346a5", size = 126713, upload-time = "2025-12-06T15:55:19.738Z" }, +] + [[package]] name = "packaging" version = "25.0" @@ -2459,6 +3115,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/40/96/1e4a035eaf4dce9610aac6e43026d0c6baa05773daf6d21e635a4fe19e21/pandas_stubs-2.3.2.250926-py3-none-any.whl", hash = "sha256:81121818453dcfe00f45c852f4dceee043640b813830f6e7bd084a4ef7ff7270", size = 159995, upload-time = "2025-09-26T19:50:38.241Z" }, ] +[[package]] +name = "pandocfilters" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/70/6f/3dd4940bbe001c06a65f88e36bad298bc7a0de5036115639926b0c5c0458/pandocfilters-1.5.1.tar.gz", hash = "sha256:002b4a555ee4ebc03f8b66307e287fa492e4a77b4ea14d3f934328297bb4939e", size = 8454, upload-time = "2024-01-18T20:08:13.726Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/af/4fbc8cab944db5d21b7e2a5b8e9211a03a79852b1157e2c102fcc61ac440/pandocfilters-1.5.1-py2.py3-none-any.whl", hash = "sha256:93be382804a9cdb0a7267585f157e5d1731bbe5545a85b268d6f5fe6232de2bc", size = 8663, upload-time = "2024-01-18T20:08:11.28Z" }, +] + [[package]] name = "parso" version = "0.8.5" @@ -2620,6 +3285,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/92/1b/5337af1a6a478d25a3e3c56b9b4b42b0a160314e02f4a0498d5322c8dac4/poethepoet-0.37.0-py3-none-any.whl", hash = "sha256:861790276315abcc8df1b4bd60e28c3d48a06db273edd3092f3c94e1a46e5e22", size = 90062, upload-time = "2025-08-11T18:00:27.595Z" }, ] +[[package]] +name = "prometheus-client" +version = "0.23.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/23/53/3edb5d68ecf6b38fcbcc1ad28391117d2a322d9a1a3eff04bfdb184d8c3b/prometheus_client-0.23.1.tar.gz", hash = "sha256:6ae8f9081eaaaf153a2e959d2e6c4f4fb57b12ef76c8c7980202f1e57b48b2ce", size = 80481, upload-time = "2025-09-18T20:47:25.043Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b8/db/14bafcb4af2139e046d03fd00dea7873e48eafe18b7d2797e73d6681f210/prometheus_client-0.23.1-py3-none-any.whl", hash = "sha256:dd1913e6e76b59cfe44e7a4b83e01afc9873c1bdfd2ed8739f1e76aeca115f99", size = 61145, upload-time = "2025-09-18T20:47:23.875Z" }, +] + [[package]] name = "prettytable" version = "3.17.0" @@ -2743,6 +3417,32 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/08/b4/46310463b4f6ceef310f8348786f3cff181cea671578e3d9743ba61a459e/protobuf-6.33.1-py3-none-any.whl", hash = "sha256:d595a9fd694fdeb061a62fbe10eb039cc1e444df81ec9bb70c7fc59ebcb1eafa", size = 170477, upload-time = "2025-11-13T16:44:17.633Z" }, ] +[[package]] +name = "psutil" +version = "7.1.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e1/88/bdd0a41e5857d5d703287598cbf08dad90aed56774ea52ae071bae9071b6/psutil-7.1.3.tar.gz", hash = "sha256:6c86281738d77335af7aec228328e944b30930899ea760ecf33a4dba66be5e74", size = 489059, upload-time = "2025-11-02T12:25:54.619Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/93/0c49e776b8734fef56ec9c5c57f923922f2cf0497d62e0f419465f28f3d0/psutil-7.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0005da714eee687b4b8decd3d6cc7c6db36215c9e74e5ad2264b90c3df7d92dc", size = 239751, upload-time = "2025-11-02T12:25:58.161Z" }, + { url = "https://files.pythonhosted.org/packages/6f/8d/b31e39c769e70780f007969815195a55c81a63efebdd4dbe9e7a113adb2f/psutil-7.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19644c85dcb987e35eeeaefdc3915d059dac7bd1167cdcdbf27e0ce2df0c08c0", size = 240368, upload-time = "2025-11-02T12:26:00.491Z" }, + { url = "https://files.pythonhosted.org/packages/62/61/23fd4acc3c9eebbf6b6c78bcd89e5d020cfde4acf0a9233e9d4e3fa698b4/psutil-7.1.3-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95ef04cf2e5ba0ab9eaafc4a11eaae91b44f4ef5541acd2ee91d9108d00d59a7", size = 287134, upload-time = "2025-11-02T12:26:02.613Z" }, + { url = "https://files.pythonhosted.org/packages/30/1c/f921a009ea9ceb51aa355cb0cc118f68d354db36eae18174bab63affb3e6/psutil-7.1.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1068c303be3a72f8e18e412c5b2a8f6d31750fb152f9cb106b54090296c9d251", size = 289904, upload-time = "2025-11-02T12:26:05.207Z" }, + { url = "https://files.pythonhosted.org/packages/a6/82/62d68066e13e46a5116df187d319d1724b3f437ddd0f958756fc052677f4/psutil-7.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:18349c5c24b06ac5612c0428ec2a0331c26443d259e2a0144a9b24b4395b58fa", size = 249642, upload-time = "2025-11-02T12:26:07.447Z" }, + { url = "https://files.pythonhosted.org/packages/df/ad/c1cd5fe965c14a0392112f68362cfceb5230819dbb5b1888950d18a11d9f/psutil-7.1.3-cp313-cp313t-win_arm64.whl", hash = "sha256:c525ffa774fe4496282fb0b1187725793de3e7c6b29e41562733cae9ada151ee", size = 245518, upload-time = "2025-11-02T12:26:09.719Z" }, + { url = "https://files.pythonhosted.org/packages/2e/bb/6670bded3e3236eb4287c7bcdc167e9fae6e1e9286e437f7111caed2f909/psutil-7.1.3-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b403da1df4d6d43973dc004d19cee3b848e998ae3154cc8097d139b77156c353", size = 239843, upload-time = "2025-11-02T12:26:11.968Z" }, + { url = "https://files.pythonhosted.org/packages/b8/66/853d50e75a38c9a7370ddbeefabdd3d3116b9c31ef94dc92c6729bc36bec/psutil-7.1.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:ad81425efc5e75da3f39b3e636293360ad8d0b49bed7df824c79764fb4ba9b8b", size = 240369, upload-time = "2025-11-02T12:26:14.358Z" }, + { url = "https://files.pythonhosted.org/packages/41/bd/313aba97cb5bfb26916dc29cf0646cbe4dd6a89ca69e8c6edce654876d39/psutil-7.1.3-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f33a3702e167783a9213db10ad29650ebf383946e91bc77f28a5eb083496bc9", size = 288210, upload-time = "2025-11-02T12:26:16.699Z" }, + { url = "https://files.pythonhosted.org/packages/c2/fa/76e3c06e760927a0cfb5705eb38164254de34e9bd86db656d4dbaa228b04/psutil-7.1.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fac9cd332c67f4422504297889da5ab7e05fd11e3c4392140f7370f4208ded1f", size = 291182, upload-time = "2025-11-02T12:26:18.848Z" }, + { url = "https://files.pythonhosted.org/packages/0f/1d/5774a91607035ee5078b8fd747686ebec28a962f178712de100d00b78a32/psutil-7.1.3-cp314-cp314t-win_amd64.whl", hash = "sha256:3792983e23b69843aea49c8f5b8f115572c5ab64c153bada5270086a2123c7e7", size = 250466, upload-time = "2025-11-02T12:26:21.183Z" }, + { url = "https://files.pythonhosted.org/packages/00/ca/e426584bacb43a5cb1ac91fae1937f478cd8fbe5e4ff96574e698a2c77cd/psutil-7.1.3-cp314-cp314t-win_arm64.whl", hash = "sha256:31d77fcedb7529f27bb3a0472bea9334349f9a04160e8e6e5020f22c59893264", size = 245756, upload-time = "2025-11-02T12:26:23.148Z" }, + { url = "https://files.pythonhosted.org/packages/ef/94/46b9154a800253e7ecff5aaacdf8ebf43db99de4a2dfa18575b02548654e/psutil-7.1.3-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2bdbcd0e58ca14996a42adf3621a6244f1bb2e2e528886959c72cf1e326677ab", size = 238359, upload-time = "2025-11-02T12:26:25.284Z" }, + { url = "https://files.pythonhosted.org/packages/68/3a/9f93cff5c025029a36d9a92fef47220ab4692ee7f2be0fba9f92813d0cb8/psutil-7.1.3-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc31fa00f1fbc3c3802141eede66f3a2d51d89716a194bf2cd6fc68310a19880", size = 239171, upload-time = "2025-11-02T12:26:27.23Z" }, + { url = "https://files.pythonhosted.org/packages/ce/b1/5f49af514f76431ba4eea935b8ad3725cdeb397e9245ab919dbc1d1dc20f/psutil-7.1.3-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3bb428f9f05c1225a558f53e30ccbad9930b11c3fc206836242de1091d3e7dd3", size = 263261, upload-time = "2025-11-02T12:26:29.48Z" }, + { url = "https://files.pythonhosted.org/packages/e0/95/992c8816a74016eb095e73585d747e0a8ea21a061ed3689474fabb29a395/psutil-7.1.3-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56d974e02ca2c8eb4812c3f76c30e28836fffc311d55d979f1465c1feeb2b68b", size = 264635, upload-time = "2025-11-02T12:26:31.74Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/c3ed1a622b6ae2fd3c945a366e64eb35247a31e4db16cf5095e269e8eb3c/psutil-7.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:f39c2c19fe824b47484b96f9692932248a54c43799a84282cfe58d05a6449efd", size = 247633, upload-time = "2025-11-02T12:26:33.887Z" }, + { url = "https://files.pythonhosted.org/packages/c9/ad/33b2ccec09bf96c2b2ef3f9a6f66baac8253d7565d8839e024a6b905d45d/psutil-7.1.3-cp37-abi3-win_arm64.whl", hash = "sha256:bd0d69cee829226a761e92f28140bec9a5ee9d5b4fb4b0cc589068dbfff559b1", size = 244608, upload-time = "2025-11-02T12:26:36.136Z" }, +] + [[package]] name = "ptyprocess" version = "0.7.0" @@ -3070,6 +3770,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ee/49/1377b49de7d0c1ce41292161ea0f721913fa8722c19fb9c1e3aa0367eecb/pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861", size = 22424, upload-time = "2025-09-09T10:57:00.695Z" }, ] +[[package]] +name = "pytest-timeout" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ac/82/4c9ecabab13363e72d880f2fb504c5f750433b2b6f16e99f4ec21ada284c/pytest_timeout-2.4.0.tar.gz", hash = "sha256:7e68e90b01f9eff71332b25001f85c75495fc4e3a836701876183c4bcfd0540a", size = 17973, upload-time = "2025-05-05T19:44:34.99Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/b6/3127540ecdf1464a00e5a01ee60a1b09175f6913f0644ac748494d9c4b21/pytest_timeout-2.4.0-py3-none-any.whl", hash = "sha256:c42667e5cdadb151aeb5b26d114aff6bdf5a907f176a007a30b940d3d865b5c2", size = 14382, upload-time = "2025-05-05T19:44:33.502Z" }, +] + [[package]] name = "pytest-xdist" version = "3.8.0" @@ -3116,6 +3828,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" }, ] +[[package]] +name = "python-json-logger" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/29/bf/eca6a3d43db1dae7070f70e160ab20b807627ba953663ba07928cdd3dc58/python_json_logger-4.0.0.tar.gz", hash = "sha256:f58e68eb46e1faed27e0f574a55a0455eecd7b8a5b88b85a784519ba3cff047f", size = 17683, upload-time = "2025-10-06T04:15:18.984Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/e5/fecf13f06e5e5f67e8837d777d1bc43fac0ed2b77a676804df5c34744727/python_json_logger-4.0.0-py3-none-any.whl", hash = "sha256:af09c9daf6a813aa4cc7180395f50f2a9e5fa056034c9953aec92e381c5ba1e2", size = 15548, upload-time = "2025-10-06T04:15:17.553Z" }, +] + [[package]] name = "python-magic" version = "0.4.27" @@ -3150,6 +3871,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, ] +[[package]] +name = "pywinpty" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/bb/a7cc2967c5c4eceb6cc49cfe39447d4bfc56e6c865e7c2249b6eb978935f/pywinpty-3.0.2.tar.gz", hash = "sha256:1505cc4cb248af42cb6285a65c9c2086ee9e7e574078ee60933d5d7fa86fb004", size = 30669, upload-time = "2025-10-03T21:16:29.205Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/4e/1098484e042c9485f56f16eb2b69b43b874bd526044ee401512234cf9e04/pywinpty-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:99fdd9b455f0ad6419aba6731a7a0d2f88ced83c3c94a80ff9533d95fa8d8a9e", size = 2050391, upload-time = "2025-10-03T21:19:01.642Z" }, + { url = "https://files.pythonhosted.org/packages/fc/19/b757fe28008236a4a713e813283721b8a40aa60cd7d3f83549f2e25a3155/pywinpty-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:18f78b81e4cfee6aabe7ea8688441d30247b73e52cd9657138015c5f4ee13a51", size = 2050057, upload-time = "2025-10-03T21:19:26.732Z" }, + { url = "https://files.pythonhosted.org/packages/cb/44/cbae12ecf6f4fa4129c36871fd09c6bef4f98d5f625ecefb5e2449765508/pywinpty-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:663383ecfab7fc382cc97ea5c4f7f0bb32c2f889259855df6ea34e5df42d305b", size = 2049874, upload-time = "2025-10-03T21:18:53.923Z" }, + { url = "https://files.pythonhosted.org/packages/ca/15/f12c6055e2d7a617d4d5820e8ac4ceaff849da4cb124640ef5116a230771/pywinpty-3.0.2-cp314-cp314-win_amd64.whl", hash = "sha256:28297cecc37bee9f24d8889e47231972d6e9e84f7b668909de54f36ca785029a", size = 2050386, upload-time = "2025-10-03T21:18:50.477Z" }, + { url = "https://files.pythonhosted.org/packages/de/24/c6907c5bb06043df98ad6a0a0ff5db2e0affcecbc3b15c42404393a3f72a/pywinpty-3.0.2-cp314-cp314t-win_amd64.whl", hash = "sha256:34b55ae9a1b671fe3eae071d86618110538e8eaad18fcb1531c0830b91a82767", size = 2049834, upload-time = "2025-10-03T21:19:25.688Z" }, +] + [[package]] name = "pyyaml" version = "6.0.3" @@ -3196,6 +3930,49 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, ] +[[package]] +name = "pyzmq" +version = "27.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "implementation_name == 'pypy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/0b/3c9baedbdf613ecaa7aa07027780b8867f57b6293b6ee50de316c9f3222b/pyzmq-27.1.0.tar.gz", hash = "sha256:ac0765e3d44455adb6ddbf4417dcce460fc40a05978c08efdf2948072f6db540", size = 281750, upload-time = "2025-09-08T23:10:18.157Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/e7/038aab64a946d535901103da16b953c8c9cc9c961dadcbf3609ed6428d23/pyzmq-27.1.0-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:452631b640340c928fa343801b0d07eb0c3789a5ffa843f6e1a9cee0ba4eb4fc", size = 1306279, upload-time = "2025-09-08T23:08:03.807Z" }, + { url = "https://files.pythonhosted.org/packages/e8/5e/c3c49fdd0f535ef45eefcc16934648e9e59dace4a37ee88fc53f6cd8e641/pyzmq-27.1.0-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1c179799b118e554b66da67d88ed66cd37a169f1f23b5d9f0a231b4e8d44a113", size = 895645, upload-time = "2025-09-08T23:08:05.301Z" }, + { url = "https://files.pythonhosted.org/packages/f8/e5/b0b2504cb4e903a74dcf1ebae157f9e20ebb6ea76095f6cfffea28c42ecd/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3837439b7f99e60312f0c926a6ad437b067356dc2bc2ec96eb395fd0fe804233", size = 652574, upload-time = "2025-09-08T23:08:06.828Z" }, + { url = "https://files.pythonhosted.org/packages/f8/9b/c108cdb55560eaf253f0cbdb61b29971e9fb34d9c3499b0e96e4e60ed8a5/pyzmq-27.1.0-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:43ad9a73e3da1fab5b0e7e13402f0b2fb934ae1c876c51d0afff0e7c052eca31", size = 840995, upload-time = "2025-09-08T23:08:08.396Z" }, + { url = "https://files.pythonhosted.org/packages/c2/bb/b79798ca177b9eb0825b4c9998c6af8cd2a7f15a6a1a4272c1d1a21d382f/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0de3028d69d4cdc475bfe47a6128eb38d8bc0e8f4d69646adfbcd840facbac28", size = 1642070, upload-time = "2025-09-08T23:08:09.989Z" }, + { url = "https://files.pythonhosted.org/packages/9c/80/2df2e7977c4ede24c79ae39dcef3899bfc5f34d1ca7a5b24f182c9b7a9ca/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:cf44a7763aea9298c0aa7dbf859f87ed7012de8bda0f3977b6fb1d96745df856", size = 2021121, upload-time = "2025-09-08T23:08:11.907Z" }, + { url = "https://files.pythonhosted.org/packages/46/bd/2d45ad24f5f5ae7e8d01525eb76786fa7557136555cac7d929880519e33a/pyzmq-27.1.0-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f30f395a9e6fbca195400ce833c731e7b64c3919aa481af4d88c3759e0cb7496", size = 1878550, upload-time = "2025-09-08T23:08:13.513Z" }, + { url = "https://files.pythonhosted.org/packages/e6/2f/104c0a3c778d7c2ab8190e9db4f62f0b6957b53c9d87db77c284b69f33ea/pyzmq-27.1.0-cp312-abi3-win32.whl", hash = "sha256:250e5436a4ba13885494412b3da5d518cd0d3a278a1ae640e113c073a5f88edd", size = 559184, upload-time = "2025-09-08T23:08:15.163Z" }, + { url = "https://files.pythonhosted.org/packages/fc/7f/a21b20d577e4100c6a41795842028235998a643b1ad406a6d4163ea8f53e/pyzmq-27.1.0-cp312-abi3-win_amd64.whl", hash = "sha256:9ce490cf1d2ca2ad84733aa1d69ce6855372cb5ce9223802450c9b2a7cba0ccf", size = 619480, upload-time = "2025-09-08T23:08:17.192Z" }, + { url = "https://files.pythonhosted.org/packages/78/c2/c012beae5f76b72f007a9e91ee9401cb88c51d0f83c6257a03e785c81cc2/pyzmq-27.1.0-cp312-abi3-win_arm64.whl", hash = "sha256:75a2f36223f0d535a0c919e23615fc85a1e23b71f40c7eb43d7b1dedb4d8f15f", size = 552993, upload-time = "2025-09-08T23:08:18.926Z" }, + { url = "https://files.pythonhosted.org/packages/60/cb/84a13459c51da6cec1b7b1dc1a47e6db6da50b77ad7fd9c145842750a011/pyzmq-27.1.0-cp313-cp313-android_24_arm64_v8a.whl", hash = "sha256:93ad4b0855a664229559e45c8d23797ceac03183c7b6f5b4428152a6b06684a5", size = 1122436, upload-time = "2025-09-08T23:08:20.801Z" }, + { url = "https://files.pythonhosted.org/packages/dc/b6/94414759a69a26c3dd674570a81813c46a078767d931a6c70ad29fc585cb/pyzmq-27.1.0-cp313-cp313-android_24_x86_64.whl", hash = "sha256:fbb4f2400bfda24f12f009cba62ad5734148569ff4949b1b6ec3b519444342e6", size = 1156301, upload-time = "2025-09-08T23:08:22.47Z" }, + { url = "https://files.pythonhosted.org/packages/a5/ad/15906493fd40c316377fd8a8f6b1f93104f97a752667763c9b9c1b71d42d/pyzmq-27.1.0-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:e343d067f7b151cfe4eb3bb796a7752c9d369eed007b91231e817071d2c2fec7", size = 1341197, upload-time = "2025-09-08T23:08:24.286Z" }, + { url = "https://files.pythonhosted.org/packages/14/1d/d343f3ce13db53a54cb8946594e567410b2125394dafcc0268d8dda027e0/pyzmq-27.1.0-cp313-cp313t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:08363b2011dec81c354d694bdecaef4770e0ae96b9afea70b3f47b973655cc05", size = 897275, upload-time = "2025-09-08T23:08:26.063Z" }, + { url = "https://files.pythonhosted.org/packages/69/2d/d83dd6d7ca929a2fc67d2c3005415cdf322af7751d773524809f9e585129/pyzmq-27.1.0-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d54530c8c8b5b8ddb3318f481297441af102517602b569146185fa10b63f4fa9", size = 660469, upload-time = "2025-09-08T23:08:27.623Z" }, + { url = "https://files.pythonhosted.org/packages/3e/cd/9822a7af117f4bc0f1952dbe9ef8358eb50a24928efd5edf54210b850259/pyzmq-27.1.0-cp313-cp313t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6f3afa12c392f0a44a2414056d730eebc33ec0926aae92b5ad5cf26ebb6cc128", size = 847961, upload-time = "2025-09-08T23:08:29.672Z" }, + { url = "https://files.pythonhosted.org/packages/9a/12/f003e824a19ed73be15542f172fd0ec4ad0b60cf37436652c93b9df7c585/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c65047adafe573ff023b3187bb93faa583151627bc9c51fc4fb2c561ed689d39", size = 1650282, upload-time = "2025-09-08T23:08:31.349Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4a/e82d788ed58e9a23995cee70dbc20c9aded3d13a92d30d57ec2291f1e8a3/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:90e6e9441c946a8b0a667356f7078d96411391a3b8f80980315455574177ec97", size = 2024468, upload-time = "2025-09-08T23:08:33.543Z" }, + { url = "https://files.pythonhosted.org/packages/d9/94/2da0a60841f757481e402b34bf4c8bf57fa54a5466b965de791b1e6f747d/pyzmq-27.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:add071b2d25f84e8189aaf0882d39a285b42fa3853016ebab234a5e78c7a43db", size = 1885394, upload-time = "2025-09-08T23:08:35.51Z" }, + { url = "https://files.pythonhosted.org/packages/4f/6f/55c10e2e49ad52d080dc24e37adb215e5b0d64990b57598abc2e3f01725b/pyzmq-27.1.0-cp313-cp313t-win32.whl", hash = "sha256:7ccc0700cfdf7bd487bea8d850ec38f204478681ea02a582a8da8171b7f90a1c", size = 574964, upload-time = "2025-09-08T23:08:37.178Z" }, + { url = "https://files.pythonhosted.org/packages/87/4d/2534970ba63dd7c522d8ca80fb92777f362c0f321900667c615e2067cb29/pyzmq-27.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8085a9fba668216b9b4323be338ee5437a235fe275b9d1610e422ccc279733e2", size = 641029, upload-time = "2025-09-08T23:08:40.595Z" }, + { url = "https://files.pythonhosted.org/packages/f6/fa/f8aea7a28b0641f31d40dea42d7ef003fded31e184ef47db696bc74cd610/pyzmq-27.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:6bb54ca21bcfe361e445256c15eedf083f153811c37be87e0514934d6913061e", size = 561541, upload-time = "2025-09-08T23:08:42.668Z" }, + { url = "https://files.pythonhosted.org/packages/87/45/19efbb3000956e82d0331bafca5d9ac19ea2857722fa2caacefb6042f39d/pyzmq-27.1.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ce980af330231615756acd5154f29813d553ea555485ae712c491cd483df6b7a", size = 1341197, upload-time = "2025-09-08T23:08:44.973Z" }, + { url = "https://files.pythonhosted.org/packages/48/43/d72ccdbf0d73d1343936296665826350cb1e825f92f2db9db3e61c2162a2/pyzmq-27.1.0-cp314-cp314t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:1779be8c549e54a1c38f805e56d2a2e5c009d26de10921d7d51cfd1c8d4632ea", size = 897175, upload-time = "2025-09-08T23:08:46.601Z" }, + { url = "https://files.pythonhosted.org/packages/2f/2e/a483f73a10b65a9ef0161e817321d39a770b2acf8bcf3004a28d90d14a94/pyzmq-27.1.0-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7200bb0f03345515df50d99d3db206a0a6bee1955fbb8c453c76f5bf0e08fb96", size = 660427, upload-time = "2025-09-08T23:08:48.187Z" }, + { url = "https://files.pythonhosted.org/packages/f5/d2/5f36552c2d3e5685abe60dfa56f91169f7a2d99bbaf67c5271022ab40863/pyzmq-27.1.0-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01c0e07d558b06a60773744ea6251f769cd79a41a97d11b8bf4ab8f034b0424d", size = 847929, upload-time = "2025-09-08T23:08:49.76Z" }, + { url = "https://files.pythonhosted.org/packages/c4/2a/404b331f2b7bf3198e9945f75c4c521f0c6a3a23b51f7a4a401b94a13833/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:80d834abee71f65253c91540445d37c4c561e293ba6e741b992f20a105d69146", size = 1650193, upload-time = "2025-09-08T23:08:51.7Z" }, + { url = "https://files.pythonhosted.org/packages/1c/0b/f4107e33f62a5acf60e3ded67ed33d79b4ce18de432625ce2fc5093d6388/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:544b4e3b7198dde4a62b8ff6685e9802a9a1ebf47e77478a5eb88eca2a82f2fd", size = 2024388, upload-time = "2025-09-08T23:08:53.393Z" }, + { url = "https://files.pythonhosted.org/packages/0d/01/add31fe76512642fd6e40e3a3bd21f4b47e242c8ba33efb6809e37076d9b/pyzmq-27.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cedc4c68178e59a4046f97eca31b148ddcf51e88677de1ef4e78cf06c5376c9a", size = 1885316, upload-time = "2025-09-08T23:08:55.702Z" }, + { url = "https://files.pythonhosted.org/packages/c4/59/a5f38970f9bf07cee96128de79590bb354917914a9be11272cfc7ff26af0/pyzmq-27.1.0-cp314-cp314t-win32.whl", hash = "sha256:1f0b2a577fd770aa6f053211a55d1c47901f4d537389a034c690291485e5fe92", size = 587472, upload-time = "2025-09-08T23:08:58.18Z" }, + { url = "https://files.pythonhosted.org/packages/70/d8/78b1bad170f93fcf5e3536e70e8fadac55030002275c9a29e8f5719185de/pyzmq-27.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:19c9468ae0437f8074af379e986c5d3d7d7bfe033506af442e8c879732bedbe0", size = 661401, upload-time = "2025-09-08T23:08:59.802Z" }, + { url = "https://files.pythonhosted.org/packages/81/d6/4bfbb40c9a0b42fc53c7cf442f6385db70b40f74a783130c5d0a5aa62228/pyzmq-27.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:dc5dbf68a7857b59473f7df42650c621d7e8923fb03fa74a526890f4d33cc4d7", size = 575170, upload-time = "2025-09-08T23:09:01.418Z" }, +] + [[package]] name = "referencing" version = "0.36.2" @@ -3373,6 +4150,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7b/44/4e421b96b67b2daff264473f7465db72fbdf36a07e05494f50300cc7b0c6/rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa", size = 3490, upload-time = "2021-05-12T16:37:52.536Z" }, ] +[[package]] +name = "rfc3986-validator" +version = "0.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/da/88/f270de456dd7d11dcc808abfa291ecdd3f45ff44e3b549ffa01b126464d0/rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055", size = 6760, upload-time = "2019-10-28T16:00:19.144Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/51/17023c0f8f1869d8806b979a2bffa3f861f26a3f1a66b094288323fba52f/rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9", size = 4242, upload-time = "2019-10-28T16:00:13.976Z" }, +] + +[[package]] +name = "rfc3987-syntax" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "lark" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2c/06/37c1a5557acf449e8e406a830a05bf885ac47d33270aec454ef78675008d/rfc3987_syntax-1.1.0.tar.gz", hash = "sha256:717a62cbf33cffdd16dfa3a497d81ce48a660ea691b1ddd7be710c22f00b4a0d", size = 14239, upload-time = "2025-07-18T01:05:05.015Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/71/44ce230e1b7fadd372515a97e32a83011f906ddded8d03e3c6aafbdedbb7/rfc3987_syntax-1.1.0-py3-none-any.whl", hash = "sha256:6c3d97604e4c5ce9f714898e05401a0445a641cfa276432b0a648c80856f6a3f", size = 8046, upload-time = "2025-07-18T01:05:03.843Z" }, +] + [[package]] name = "rich" version = "14.2.0" @@ -3660,6 +4458,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3d/03/941d4c31846e8a56f36e824a2b183d8a0896d54d2c5cdc59fa87b588e6d7/scoringrules-0.8.0-py3-none-any.whl", hash = "sha256:98eafc66fe83143d88da4b53d544896823390fdad01c9fd49424469eced3a716", size = 77078, upload-time = "2025-05-30T08:59:35.48Z" }, ] +[[package]] +name = "send2trash" +version = "1.8.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fd/3a/aec9b02217bb79b87bbc1a21bc6abc51e3d5dcf65c30487ac96c0908c722/Send2Trash-1.8.3.tar.gz", hash = "sha256:b18e7a3966d99871aefeb00cfbcfdced55ce4871194810fc71f4aa484b953abf", size = 17394, upload-time = "2024-04-07T00:01:09.267Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/b0/4562db6223154aa4e22f939003cb92514c79f3d4dccca3444253fd17f902/Send2Trash-1.8.3-py3-none-any.whl", hash = "sha256:0c31227e0bd08961c7665474a3d1ef7193929fedda4233843689baa056be46c9", size = 18072, upload-time = "2024-04-07T00:01:07.438Z" }, +] + [[package]] name = "setuptools" version = "80.9.0" @@ -3678,6 +4485,41 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" }, ] +[[package]] +name = "simplejson" +version = "3.20.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/41/f4/a1ac5ed32f7ed9a088d62a59d410d4c204b3b3815722e2ccfb491fa8251b/simplejson-3.20.2.tar.gz", hash = "sha256:5fe7a6ce14d1c300d80d08695b7f7e633de6cd72c80644021874d985b3393649", size = 85784, upload-time = "2025-09-26T16:29:36.64Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9d/9e/1a91e7614db0416885eab4136d49b7303de20528860ffdd798ce04d054db/simplejson-3.20.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4376d5acae0d1e91e78baeba4ee3cf22fbf6509d81539d01b94e0951d28ec2b6", size = 93523, upload-time = "2025-09-26T16:28:00.356Z" }, + { url = "https://files.pythonhosted.org/packages/5e/2b/d2413f5218fc25608739e3d63fe321dfa85c5f097aa6648dbe72513a5f12/simplejson-3.20.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f8fe6de652fcddae6dec8f281cc1e77e4e8f3575249e1800090aab48f73b4259", size = 75844, upload-time = "2025-09-26T16:28:01.756Z" }, + { url = "https://files.pythonhosted.org/packages/ad/f1/efd09efcc1e26629e120fef59be059ce7841cc6e1f949a4db94f1ae8a918/simplejson-3.20.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25ca2663d99328d51e5a138f22018e54c9162438d831e26cfc3458688616eca8", size = 75655, upload-time = "2025-09-26T16:28:03.037Z" }, + { url = "https://files.pythonhosted.org/packages/97/ec/5c6db08e42f380f005d03944be1af1a6bd501cc641175429a1cbe7fb23b9/simplejson-3.20.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12a6b2816b6cab6c3fd273d43b1948bc9acf708272074c8858f579c394f4cbc9", size = 150335, upload-time = "2025-09-26T16:28:05.027Z" }, + { url = "https://files.pythonhosted.org/packages/81/f5/808a907485876a9242ec67054da7cbebefe0ee1522ef1c0be3bfc90f96f6/simplejson-3.20.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac20dc3fcdfc7b8415bfc3d7d51beccd8695c3f4acb7f74e3a3b538e76672868", size = 158519, upload-time = "2025-09-26T16:28:06.5Z" }, + { url = "https://files.pythonhosted.org/packages/66/af/b8a158246834645ea890c36136584b0cc1c0e4b83a73b11ebd9c2a12877c/simplejson-3.20.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db0804d04564e70862ef807f3e1ace2cc212ef0e22deb1b3d6f80c45e5882c6b", size = 148571, upload-time = "2025-09-26T16:28:07.715Z" }, + { url = "https://files.pythonhosted.org/packages/20/05/ed9b2571bbf38f1a2425391f18e3ac11cb1e91482c22d644a1640dea9da7/simplejson-3.20.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:979ce23ea663895ae39106946ef3d78527822d918a136dbc77b9e2b7f006237e", size = 152367, upload-time = "2025-09-26T16:28:08.921Z" }, + { url = "https://files.pythonhosted.org/packages/81/2c/bad68b05dd43e93f77994b920505634d31ed239418eb6a88997d06599983/simplejson-3.20.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a2ba921b047bb029805726800819675249ef25d2f65fd0edb90639c5b1c3033c", size = 150205, upload-time = "2025-09-26T16:28:10.086Z" }, + { url = "https://files.pythonhosted.org/packages/69/46/90c7fc878061adafcf298ce60cecdee17a027486e9dce507e87396d68255/simplejson-3.20.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:12d3d4dc33770069b780cc8f5abef909fe4a3f071f18f55f6d896a370fd0f970", size = 151823, upload-time = "2025-09-26T16:28:11.329Z" }, + { url = "https://files.pythonhosted.org/packages/ab/27/b85b03349f825ae0f5d4f780cdde0bbccd4f06c3d8433f6a3882df887481/simplejson-3.20.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:aff032a59a201b3683a34be1169e71ddda683d9c3b43b261599c12055349251e", size = 158997, upload-time = "2025-09-26T16:28:12.917Z" }, + { url = "https://files.pythonhosted.org/packages/71/ad/d7f3c331fb930638420ac6d236db68e9f4c28dab9c03164c3cd0e7967e15/simplejson-3.20.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:30e590e133b06773f0dc9c3f82e567463df40598b660b5adf53eb1c488202544", size = 154367, upload-time = "2025-09-26T16:28:14.393Z" }, + { url = "https://files.pythonhosted.org/packages/f0/46/5c67324addd40fa2966f6e886cacbbe0407c03a500db94fb8bb40333fcdf/simplejson-3.20.2-cp312-cp312-win32.whl", hash = "sha256:8d7be7c99939cc58e7c5bcf6bb52a842a58e6c65e1e9cdd2a94b697b24cddb54", size = 74285, upload-time = "2025-09-26T16:28:15.931Z" }, + { url = "https://files.pythonhosted.org/packages/fa/c9/5cc2189f4acd3a6e30ffa9775bf09b354302dbebab713ca914d7134d0f29/simplejson-3.20.2-cp312-cp312-win_amd64.whl", hash = "sha256:2c0b4a67e75b945489052af6590e7dca0ed473ead5d0f3aad61fa584afe814ab", size = 75969, upload-time = "2025-09-26T16:28:17.017Z" }, + { url = "https://files.pythonhosted.org/packages/5e/9e/f326d43f6bf47f4e7704a4426c36e044c6bedfd24e072fb8e27589a373a5/simplejson-3.20.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:90d311ba8fcd733a3677e0be21804827226a57144130ba01c3c6a325e887dd86", size = 93530, upload-time = "2025-09-26T16:28:18.07Z" }, + { url = "https://files.pythonhosted.org/packages/35/28/5a4b8f3483fbfb68f3f460bc002cef3a5735ef30950e7c4adce9c8da15c7/simplejson-3.20.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:feed6806f614bdf7f5cb6d0123cb0c1c5f40407ef103aa935cffaa694e2e0c74", size = 75846, upload-time = "2025-09-26T16:28:19.12Z" }, + { url = "https://files.pythonhosted.org/packages/7a/4d/30dfef83b9ac48afae1cf1ab19c2867e27b8d22b5d9f8ca7ce5a0a157d8c/simplejson-3.20.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6b1d8d7c3e1a205c49e1aee6ba907dcb8ccea83651e6c3e2cb2062f1e52b0726", size = 75661, upload-time = "2025-09-26T16:28:20.219Z" }, + { url = "https://files.pythonhosted.org/packages/09/1d/171009bd35c7099d72ef6afd4bb13527bab469965c968a17d69a203d62a6/simplejson-3.20.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:552f55745044a24c3cb7ec67e54234be56d5d6d0e054f2e4cf4fb3e297429be5", size = 150579, upload-time = "2025-09-26T16:28:21.337Z" }, + { url = "https://files.pythonhosted.org/packages/61/ae/229bbcf90a702adc6bfa476e9f0a37e21d8c58e1059043038797cbe75b8c/simplejson-3.20.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2da97ac65165d66b0570c9e545786f0ac7b5de5854d3711a16cacbcaa8c472d", size = 158797, upload-time = "2025-09-26T16:28:22.53Z" }, + { url = "https://files.pythonhosted.org/packages/90/c5/fefc0ac6b86b9108e302e0af1cf57518f46da0baedd60a12170791d56959/simplejson-3.20.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f59a12966daa356bf68927fca5a67bebac0033cd18b96de9c2d426cd11756cd0", size = 148851, upload-time = "2025-09-26T16:28:23.733Z" }, + { url = "https://files.pythonhosted.org/packages/43/f1/b392952200f3393bb06fbc4dd975fc63a6843261705839355560b7264eb2/simplejson-3.20.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:133ae2098a8e162c71da97cdab1f383afdd91373b7ff5fe65169b04167da976b", size = 152598, upload-time = "2025-09-26T16:28:24.962Z" }, + { url = "https://files.pythonhosted.org/packages/f4/b4/d6b7279e52a3e9c0fa8c032ce6164e593e8d9cf390698ee981ed0864291b/simplejson-3.20.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7977640af7b7d5e6a852d26622057d428706a550f7f5083e7c4dd010a84d941f", size = 150498, upload-time = "2025-09-26T16:28:26.114Z" }, + { url = "https://files.pythonhosted.org/packages/62/22/ec2490dd859224326d10c2fac1353e8ad5c84121be4837a6dd6638ba4345/simplejson-3.20.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b530ad6d55e71fa9e93e1109cf8182f427a6355848a4ffa09f69cc44e1512522", size = 152129, upload-time = "2025-09-26T16:28:27.552Z" }, + { url = "https://files.pythonhosted.org/packages/33/ce/b60214d013e93dd9e5a705dcb2b88b6c72bada442a97f79828332217f3eb/simplejson-3.20.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bd96a7d981bf64f0e42345584768da4435c05b24fd3c364663f5fbc8fabf82e3", size = 159359, upload-time = "2025-09-26T16:28:28.667Z" }, + { url = "https://files.pythonhosted.org/packages/99/21/603709455827cdf5b9d83abe726343f542491ca8dc6a2528eb08de0cf034/simplejson-3.20.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f28ee755fadb426ba2e464d6fcf25d3f152a05eb6b38e0b4f790352f5540c769", size = 154717, upload-time = "2025-09-26T16:28:30.288Z" }, + { url = "https://files.pythonhosted.org/packages/3c/f9/dc7f7a4bac16cf7eb55a4df03ad93190e11826d2a8950052949d3dfc11e2/simplejson-3.20.2-cp313-cp313-win32.whl", hash = "sha256:472785b52e48e3eed9b78b95e26a256f59bb1ee38339be3075dad799e2e1e661", size = 74289, upload-time = "2025-09-26T16:28:31.809Z" }, + { url = "https://files.pythonhosted.org/packages/87/10/d42ad61230436735c68af1120622b28a782877146a83d714da7b6a2a1c4e/simplejson-3.20.2-cp313-cp313-win_amd64.whl", hash = "sha256:a1a85013eb33e4820286139540accbe2c98d2da894b2dcefd280209db508e608", size = 75972, upload-time = "2025-09-26T16:28:32.883Z" }, + { url = "https://files.pythonhosted.org/packages/05/5b/83e1ff87eb60ca706972f7e02e15c0b33396e7bdbd080069a5d1b53cf0d8/simplejson-3.20.2-py3-none-any.whl", hash = "sha256:3b6bb7fb96efd673eac2e4235200bfffdc2353ad12c54117e1e4e2fc485ac017", size = 57309, upload-time = "2025-09-26T16:29:35.312Z" }, +] + [[package]] name = "six" version = "1.17.0" @@ -3967,6 +4809,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" }, ] +[[package]] +name = "terminado" +version = "0.18.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess", marker = "os_name != 'nt'" }, + { name = "pywinpty", marker = "os_name == 'nt'" }, + { name = "tornado" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/11/965c6fd8e5cc254f1fe142d547387da17a8ebfd75a3455f637c663fb38a0/terminado-0.18.1.tar.gz", hash = "sha256:de09f2c4b85de4765f7714688fff57d3e75bad1f909b589fde880460c753fd2e", size = 32701, upload-time = "2024-03-12T14:34:39.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/9e/2064975477fdc887e47ad42157e214526dcad8f317a948dee17e1659a62f/terminado-0.18.1-py3-none-any.whl", hash = "sha256:a4468e1b37bb318f8a86514f65814e1afc977cf29b3992a4500d9dd305dcceb0", size = 14154, upload-time = "2024-03-12T14:34:36.569Z" }, +] + [[package]] name = "threadpoolctl" version = "3.6.0" @@ -3976,6 +4832,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb", size = 18638, upload-time = "2025-03-13T13:49:21.846Z" }, ] +[[package]] +name = "tinycss2" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "webencodings" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7a/fd/7a5ee21fd08ff70d3d33a5781c255cbe779659bd03278feb98b19ee550f4/tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7", size = 87085, upload-time = "2024-10-24T14:58:29.895Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/34/ebdc18bae6aa14fbee1a08b63c015c72b64868ff7dae68808ab500c492e2/tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289", size = 26610, upload-time = "2024-10-24T14:58:28.029Z" }, +] + [[package]] name = "toml-fmt-common" version = "1.1.0" @@ -4035,6 +4903,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, ] +[[package]] +name = "tornado" +version = "6.5.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7f/2e/3d22d478f27cb4b41edd4db7f10cd7846d0a28ea443342de3dba97035166/tornado-6.5.3.tar.gz", hash = "sha256:16abdeb0211796ffc73765bc0a20119712d68afeeaf93d1a3f2edf6b3aee8d5a", size = 513348, upload-time = "2025-12-11T04:16:42.225Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d3/e9/bf22f66e1d5d112c0617974b5ce86666683b32c09b355dfcd59f8d5c8ef6/tornado-6.5.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2dd7d7e8d3e4635447a8afd4987951e3d4e8d1fb9ad1908c54c4002aabab0520", size = 443860, upload-time = "2025-12-11T04:16:26.638Z" }, + { url = "https://files.pythonhosted.org/packages/ca/9c/594b631f0b8dc5977080c7093d1e96f1377c10552577d2c31bb0208c9362/tornado-6.5.3-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:5977a396f83496657779f59a48c38096ef01edfe4f42f1c0634b791dde8165d0", size = 442118, upload-time = "2025-12-11T04:16:28.32Z" }, + { url = "https://files.pythonhosted.org/packages/78/f6/685b869f5b5b9d9547571be838c6106172082751696355b60fc32a4988ed/tornado-6.5.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f72ac800be2ac73ddc1504f7aa21069a4137e8d70c387172c063d363d04f2208", size = 445700, upload-time = "2025-12-11T04:16:29.64Z" }, + { url = "https://files.pythonhosted.org/packages/91/4c/f0d19edf24912b7f21ae5e941f7798d132ad4d9b71441c1e70917a297265/tornado-6.5.3-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c43c4fc4f5419c6561cfb8b884a8f6db7b142787d47821e1a0e1296253458265", size = 445041, upload-time = "2025-12-11T04:16:30.799Z" }, + { url = "https://files.pythonhosted.org/packages/eb/2b/e02da94f4a4aef2bb3b923c838ef284a77548a5f06bac2a8682b36b4eead/tornado-6.5.3-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de8b3fed4b3afb65d542d7702ac8767b567e240f6a43020be8eaef59328f117b", size = 445270, upload-time = "2025-12-11T04:16:32.316Z" }, + { url = "https://files.pythonhosted.org/packages/58/e2/7a7535d23133443552719dba526dacbb7415f980157da9f14950ddb88ad6/tornado-6.5.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dbc4b4c32245b952566e17a20d5c1648fbed0e16aec3fc7e19f3974b36e0e47c", size = 445957, upload-time = "2025-12-11T04:16:33.913Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1f/9ff92eca81ff17a86286ec440dcd5eab0400326eb81761aa9a4eecb1ffb9/tornado-6.5.3-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:db238e8a174b4bfd0d0238b8cfcff1c14aebb4e2fcdafbf0ea5da3b81caceb4c", size = 445371, upload-time = "2025-12-11T04:16:35.093Z" }, + { url = "https://files.pythonhosted.org/packages/70/b1/1d03ae4526a393b0b839472a844397337f03c7f3a1e6b5c82241f0e18281/tornado-6.5.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:892595c100cd9b53a768cbfc109dfc55dec884afe2de5290611a566078d9692d", size = 445348, upload-time = "2025-12-11T04:16:36.679Z" }, + { url = "https://files.pythonhosted.org/packages/4b/7d/7c181feadc8941f418d0d26c3790ee34ffa4bd0a294bc5201d44ebd19c1e/tornado-6.5.3-cp39-abi3-win32.whl", hash = "sha256:88141456525fe291e47bbe1ba3ffb7982549329f09b4299a56813923af2bd197", size = 446433, upload-time = "2025-12-11T04:16:38.332Z" }, + { url = "https://files.pythonhosted.org/packages/34/98/4f7f938606e21d0baea8c6c39a7c8e95bdf8e50b0595b1bb6f0de2af7a6e/tornado-6.5.3-cp39-abi3-win_amd64.whl", hash = "sha256:ba4b513d221cc7f795a532c1e296f36bcf6a60e54b15efd3f092889458c69af1", size = 446842, upload-time = "2025-12-11T04:16:39.867Z" }, + { url = "https://files.pythonhosted.org/packages/7a/27/0e3fca4c4edf33fb6ee079e784c63961cd816971a45e5e4cacebe794158d/tornado-6.5.3-cp39-abi3-win_arm64.whl", hash = "sha256:278c54d262911365075dd45e0b6314308c74badd6ff9a54490e7daccdd5ed0ea", size = 445863, upload-time = "2025-12-11T04:16:41.099Z" }, +] + [[package]] name = "tqdm" version = "4.67.1" @@ -4108,6 +4995,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, ] +[[package]] +name = "uri-template" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/31/c7/0336f2bd0bcbada6ccef7aaa25e443c118a704f828a0620c6fa0207c1b64/uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7", size = 21678, upload-time = "2023-06-21T01:49:05.374Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/00/3fca040d7cf8a32776d3d81a00c8ee7457e00f80c649f1e4a863c8321ae9/uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363", size = 11140, upload-time = "2023-06-21T01:49:03.467Z" }, +] + [[package]] name = "url-normalize" version = "2.2.1" @@ -4247,6 +5143,33 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" }, ] +[[package]] +name = "webcolors" +version = "25.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/7a/eb316761ec35664ea5174709a68bbd3389de60d4a1ebab8808bfc264ed67/webcolors-25.10.0.tar.gz", hash = "sha256:62abae86504f66d0f6364c2a8520de4a0c47b80c03fc3a5f1815fedbef7c19bf", size = 53491, upload-time = "2025-10-31T07:51:03.977Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/cc/e097523dd85c9cf5d354f78310927f1656c422bd7b2613b2db3e3f9a0f2c/webcolors-25.10.0-py3-none-any.whl", hash = "sha256:032c727334856fc0b968f63daa252a1ac93d33db2f5267756623c210e57a4f1d", size = 14905, upload-time = "2025-10-31T07:51:01.778Z" }, +] + +[[package]] +name = "webencodings" +version = "0.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/02/ae6ceac1baeda530866a85075641cec12989bd8d31af6d5ab4a3e8c92f47/webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923", size = 9721, upload-time = "2017-04-05T20:21:34.189Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", size = 11774, upload-time = "2017-04-05T20:21:32.581Z" }, +] + +[[package]] +name = "websocket-client" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/41/aa4bf9664e4cda14c3b39865b12251e8e7d239f4cd0e3cc1b6c2ccde25c1/websocket_client-1.9.0.tar.gz", hash = "sha256:9e813624b6eb619999a97dc7958469217c3176312b3a16a4bd1bc7e08a46ec98", size = 70576, upload-time = "2025-10-07T21:16:36.495Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/db/b10e48aa8fff7407e67470363eac595018441cf32d5e1001567a7aeba5d2/websocket_client-1.9.0-py3-none-any.whl", hash = "sha256:af248a825037ef591efbf6ed20cc5faa03d3b47b9e5a2230a529eeee1c1fc3ef", size = 82616, upload-time = "2025-10-07T21:16:34.951Z" }, +] + [[package]] name = "websockets" version = "15.0.1" @@ -4290,6 +5213,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/52/24/ab44c871b0f07f491e5d2ad12c9bd7358e527510618cb1b803a88e986db1/werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e", size = 224498, upload-time = "2024-11-08T15:52:16.132Z" }, ] +[[package]] +name = "widgetsnbextension" +version = "4.0.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bd/f4/c67440c7fb409a71b7404b7aefcd7569a9c0d6bd071299bf4198ae7a5d95/widgetsnbextension-4.0.15.tar.gz", hash = "sha256:de8610639996f1567952d763a5a41af8af37f2575a41f9852a38f947eb82a3b9", size = 1097402, upload-time = "2025-11-01T21:15:55.178Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/0e/fa3b193432cfc60c93b42f3be03365f5f909d2b3ea410295cf36df739e31/widgetsnbextension-4.0.15-py3-none-any.whl", hash = "sha256:8156704e4346a571d9ce73b84bee86a29906c9abfd7223b7228a28899ccf3366", size = 2196503, upload-time = "2025-11-01T21:15:53.565Z" }, +] + [[package]] name = "win32-setctime" version = "1.2.0" From 297f186d7a9e4ff131804f9efee40f4ea6193a5e Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Wed, 17 Dec 2025 16:25:59 +0100 Subject: [PATCH 073/104] Integrated changes to beam structure Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_ensemble.py | 56 +++-------------- .../benchmarking/baselines/openstef4.py | 63 +++++++++---------- .../models/ensemble_forecasting_model.py | 4 +- 3 files changed, 43 insertions(+), 80 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 6cfe6f901..29ecb9b25 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -22,23 +22,18 @@ from datetime import timedelta from pathlib import Path -from pydantic_extra_types.coordinate import Coordinate -from pydantic_extra_types.country import CountryAlpha2 - -from openstef_beam.backtesting.backtest_forecaster import BacktestForecasterConfig, OpenSTEF4BacktestForecaster -from openstef_beam.benchmarking.benchmark_pipeline import BenchmarkContext +from openstef_beam.backtesting.backtest_forecaster import BacktestForecasterConfig +from openstef_beam.benchmarking.baselines import ( + create_openstef4_preset_backtest_forecaster, +) from openstef_beam.benchmarking.benchmarks.liander2024 import Liander2024Category, create_liander2024_benchmark_runner from openstef_beam.benchmarking.callbacks.strict_execution_callback import StrictExecutionCallback -from openstef_beam.benchmarking.models.benchmark_target import BenchmarkTarget from openstef_beam.benchmarking.storage.local_storage import LocalBenchmarkStorage from openstef_core.types import LeadTime, Q from openstef_meta.presets import ( EnsembleWorkflowConfig, - create_ensemble_workflow, ) from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage -from openstef_models.presets.forecasting_workflow import LocationConfig -from openstef_models.workflows import CustomForecastingWorkflow logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s") @@ -78,7 +73,7 @@ else: storage = None -common_config = EnsembleWorkflowConfig( +workflow_config = EnsembleWorkflowConfig( model_id="common_model_", ensemble_type=ensemble_type, base_models=base_models, # type: ignore @@ -112,42 +107,6 @@ ) -def _target_forecaster_factory( - context: BenchmarkContext, - target: BenchmarkTarget, -) -> OpenSTEF4BacktestForecaster: - # Factory function that creates a forecaster for a given target. - prefix = context.run_name - base_config = common_config - - def _create_workflow() -> CustomForecastingWorkflow: - # Create a new workflow instance with fresh model. - return create_ensemble_workflow( - config=base_config.model_copy( - update={ - "model_id": f"{prefix}_{target.name}", - "location": LocationConfig( - name=target.name, - description=target.description, - coordinate=Coordinate( - latitude=target.latitude, - longitude=target.longitude, - ), - country_code=CountryAlpha2("NL"), - ), - } - ) - ) - - return OpenSTEF4BacktestForecaster( - config=backtest_config, - workflow_factory=_create_workflow, - debug=False, - contributions=False, - cache_dir=OUTPUT_PATH / "cache" / f"{context.run_name}_{target.name}", - ) - - if __name__ == "__main__": start_time = time.time() create_liander2024_benchmark_runner( @@ -155,7 +114,10 @@ def _create_workflow() -> CustomForecastingWorkflow: data_dir=Path("../data/liander2024-energy-forecasting-benchmark"), callbacks=[StrictExecutionCallback()], ).run( - forecaster_factory=_target_forecaster_factory, + forecaster_factory=create_openstef4_preset_backtest_forecaster( + workflow_config=workflow_config, + cache_dir=OUTPUT_PATH / "cache", + ), run_name=model, n_processes=N_PROCESSES, filter_args=BENCHMARK_FILTER, diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py index 82fc162cd..9019a2156 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py @@ -32,6 +32,7 @@ from openstef_core.exceptions import FlatlinerDetectedError, NotFittedError from openstef_core.types import Q from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel +from openstef_meta.presets import EnsembleWorkflowConfig, create_ensemble_workflow from openstef_models.presets import ForecastingWorkflowConfig from openstef_models.workflows.custom_forecasting_workflow import ( CustomForecastingWorkflow, @@ -57,10 +58,8 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): config: BacktestForecasterConfig = Field( description="Configuration for the backtest forecaster interface", ) - workflow_factory: Callable[[WorkflowCreationContext], CustomForecastingWorkflow] = ( - Field( - description="Factory function that creates a new CustomForecastingWorkflow instance", - ) + workflow_factory: Callable[[WorkflowCreationContext], CustomForecastingWorkflow] = Field( + description="Factory function that creates a new CustomForecastingWorkflow instance", ) cache_dir: Path = Field( description="Directory to use for caching model artifacts during backtesting", @@ -112,9 +111,7 @@ def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: if self.debug: id_str = data.horizon.strftime("%Y%m%d%H%M%S") - training_data.to_parquet( - path=self.cache_dir / f"debug_{id_str}_training.parquet" - ) + training_data.to_parquet(path=self.cache_dir / f"debug_{id_str}_training.parquet") try: # Use the workflow's fit method @@ -134,9 +131,7 @@ def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: ) @override - def predict( - self, data: RestrictedHorizonVersionedTimeSeries - ) -> TimeSeriesDataset | None: + def predict(self, data: RestrictedHorizonVersionedTimeSeries) -> TimeSeriesDataset | None: if self._is_flatliner_detected: self._logger.info("Skipping prediction due to prior flatliner detection") return None @@ -147,8 +142,7 @@ def predict( # Extract the dataset including both historical context and forecast period predict_data = data.get_window( start=data.horizon - self.config.predict_context_length, - end=data.horizon - + self.config.predict_length, # Include the forecast period + end=data.horizon + self.config.predict_length, # Include the forecast period available_before=data.horizon, # Only use data available at prediction time (prevents lookahead bias) ) @@ -163,23 +157,13 @@ def predict( if self.debug: id_str = data.horizon.strftime("%Y%m%d%H%M%S") - predict_data.to_parquet( - path=self.cache_dir / f"debug_{id_str}_predict.parquet" - ) - forecast.to_parquet( - path=self.cache_dir / f"debug_{id_str}_forecast.parquet" - ) + predict_data.to_parquet(path=self.cache_dir / f"debug_{id_str}_predict.parquet") + forecast.to_parquet(path=self.cache_dir / f"debug_{id_str}_forecast.parquet") - if self.contributions and isinstance( - self._workflow.model, EnsembleForecastingModel - ): + if self.contributions and isinstance(self._workflow.model, EnsembleForecastingModel): contr_str = data.horizon.strftime("%Y%m%d%H%M%S") - contributions = self._workflow.model.predict_contributions( - predict_data, forecast_start=data.horizon - ) - df = pd.concat( - [contributions, forecast.data.drop(columns=["load"])], axis=1 - ) + contributions = self._workflow.model.predict_contributions(predict_data, forecast_start=data.horizon) + df = pd.concat([contributions, forecast.data.drop(columns=["load"])], axis=1) df.to_parquet(path=self.cache_dir / f"contrib_{contr_str}_predict.parquet") return forecast @@ -190,7 +174,7 @@ class OpenSTEF4PresetBacktestForecaster(OpenSTEF4BacktestForecaster): def _preset_target_forecaster_factory( - base_config: ForecastingWorkflowConfig, + base_config: ForecastingWorkflowConfig | EnsembleWorkflowConfig, backtest_config: BacktestForecasterConfig, cache_dir: Path, context: BenchmarkContext, @@ -204,6 +188,23 @@ def _preset_target_forecaster_factory( def _create_workflow(context: WorkflowCreationContext) -> CustomForecastingWorkflow: # Create a new workflow instance with fresh model. + if isinstance(base_config, EnsembleWorkflowConfig): + return create_ensemble_workflow( + config=base_config.model_copy( + update={ + "model_id": f"{prefix}_{target.name}", + "location": LocationConfig( + name=target.name, + description=target.description, + coordinate=Coordinate( + latitude=target.latitude, + longitude=target.longitude, + ), + ), + } + ) + ) + return create_forecasting_workflow( config=base_config.model_copy( update={ @@ -230,7 +231,7 @@ def _create_workflow(context: WorkflowCreationContext) -> CustomForecastingWorkf def create_openstef4_preset_backtest_forecaster( - workflow_config: ForecastingWorkflowConfig, + workflow_config: ForecastingWorkflowConfig | EnsembleWorkflowConfig, backtest_config: BacktestForecasterConfig | None = None, cache_dir: Path = Path("cache"), ) -> ForecasterFactory[BenchmarkTarget]: @@ -253,9 +254,7 @@ def create_openstef4_preset_backtest_forecaster( requires_training=True, predict_length=timedelta(days=7), predict_min_length=timedelta(minutes=15), - predict_context_length=timedelta( - days=14 - ), # Context needed for lag features + predict_context_length=timedelta(days=14), # Context needed for lag features predict_context_min_coverage=0.5, training_context_length=timedelta(days=90), # Three months of training data training_context_min_coverage=0.5, diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 5c1d00bcd..9299e879d 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -605,7 +605,9 @@ def _predict_contributions_combiner( ) -> pd.DataFrame: features = self._transform_combiner_data(data=original_data) - return self.combiner.predict_contributions(ensemble_dataset, additional_features=features) + predictions = self.combiner.predict_contributions(ensemble_dataset, additional_features=features) + predictions[ensemble_dataset.target_column] = ensemble_dataset.target_series + return predictions def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: """Generate forecasts for the provided dataset. From 0ac62c89f25f5085389f74d954d01d95b2459f74 Mon Sep 17 00:00:00 2001 From: Lars van Someren Date: Thu, 18 Dec 2025 09:44:05 +0100 Subject: [PATCH 074/104] make PR compliant Signed-off-by: Lars van Someren --- examples/benchmarks/liander_2024_residual.py | 55 +++------------- .../models/ensemble_forecasting_model.py | 2 +- .../mlflow/mlflow_storage_callback.py | 65 +++++-------------- .../models/forecasting_model.py | 2 +- .../transforms/general/selector.py | 4 +- 5 files changed, 28 insertions(+), 100 deletions(-) diff --git a/examples/benchmarks/liander_2024_residual.py b/examples/benchmarks/liander_2024_residual.py index a8a42b113..aecb3de9e 100644 --- a/examples/benchmarks/liander_2024_residual.py +++ b/examples/benchmarks/liander_2024_residual.py @@ -22,23 +22,18 @@ from datetime import timedelta from pathlib import Path -from pydantic_extra_types.coordinate import Coordinate -from pydantic_extra_types.country import CountryAlpha2 - -from openstef_beam.backtesting.backtest_forecaster import BacktestForecasterConfig, OpenSTEF4BacktestForecaster -from openstef_beam.benchmarking.benchmark_pipeline import BenchmarkContext +from openstef_beam.backtesting.backtest_forecaster import BacktestForecasterConfig +from openstef_beam.benchmarking.baselines import ( + create_openstef4_preset_backtest_forecaster, +) from openstef_beam.benchmarking.benchmarks.liander2024 import Liander2024Category, create_liander2024_benchmark_runner from openstef_beam.benchmarking.callbacks.strict_execution_callback import StrictExecutionCallback -from openstef_beam.benchmarking.models.benchmark_target import BenchmarkTarget from openstef_beam.benchmarking.storage.local_storage import LocalBenchmarkStorage from openstef_core.types import LeadTime, Q from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage from openstef_models.presets import ( ForecastingWorkflowConfig, - create_forecasting_workflow, ) -from openstef_models.presets.forecasting_workflow import LocationConfig -from openstef_models.workflows import CustomForecastingWorkflow logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s") @@ -104,50 +99,18 @@ ) -def _target_forecaster_factory( - context: BenchmarkContext, - target: BenchmarkTarget, -) -> OpenSTEF4BacktestForecaster: - # Factory function that creates a forecaster for a given target. - prefix = context.run_name - base_config = common_config - - def _create_workflow() -> CustomForecastingWorkflow: - # Create a new workflow instance with fresh model. - return create_forecasting_workflow( - config=base_config.model_copy( - update={ - "model_id": f"{prefix}_{target.name}", - "location": LocationConfig( - name=target.name, - description=target.description, - coordinate=Coordinate( - latitude=target.latitude, - longitude=target.longitude, - ), - country_code=CountryAlpha2("NL"), - ), - } - ) - ) - - return OpenSTEF4BacktestForecaster( - config=backtest_config, - workflow_factory=_create_workflow, - debug=False, - cache_dir=OUTPUT_PATH / "cache" / f"{context.run_name}_{target.name}", - ) - - if __name__ == "__main__": start_time = time.time() + # Run for XGBoost model create_liander2024_benchmark_runner( storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH / model), - data_dir=Path("../data/liander2024-energy-forecasting-benchmark"), # adjust path as needed callbacks=[StrictExecutionCallback()], ).run( - forecaster_factory=_target_forecaster_factory, + forecaster_factory=create_openstef4_preset_backtest_forecaster( + workflow_config=common_config, + cache_dir=OUTPUT_PATH / "cache", + ), run_name=model, n_processes=N_PROCESSES, filter_args=BENCHMARK_FILTER, diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 9299e879d..5394ab476 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -732,7 +732,7 @@ def restore_target[T: TimeSeriesDataset]( target_series = original_dataset.select_features([target_column]).select_version().data[target_column] def _transform_restore_target(df: pd.DataFrame) -> pd.DataFrame: - return df.assign(**{str(target_series.name): df.index.map(target_series)}) # pyright: ignore[reportUnknownMemberType] + return df.assign(**{str(target_series.name): df.index.map(target_series)}) # type: ignore return dataset.pipe_pandas(_transform_restore_target) diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index 0d66be6e1..91be9fcab 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -93,9 +93,7 @@ def on_fit_start( now = datetime.now(tz=UTC) end_time_millis = cast(float | None, run.info.end_time) run_end_datetime = ( - datetime.fromtimestamp(end_time_millis / 1000, tz=UTC) - if end_time_millis is not None - else None + datetime.fromtimestamp(end_time_millis / 1000, tz=UTC) if end_time_millis is not None else None ) self._logger.info( "Found previous MLflow run %s for model %s ended at %s", @@ -103,10 +101,7 @@ def on_fit_start( context.workflow.model_id, run_end_datetime, ) - if ( - run_end_datetime is not None - and (now - run_end_datetime) <= self.model_reuse_max_age - ): + if run_end_datetime is not None and (now - run_end_datetime) <= self.model_reuse_max_age: raise SkipFitting("Model is recent enough, skipping re-fit.") @override @@ -132,23 +127,17 @@ def on_fit_end( experiment_tags=context.workflow.experiment_tags, ) run_id: str = run.info.run_id - self._logger.info( - "Created MLflow run %s for model %s", run_id, context.workflow.model_id - ) + self._logger.info("Created MLflow run %s for model %s", run_id, context.workflow.model_id) # Store the model input - run_path = self.storage.get_artifacts_path( - model_id=context.workflow.model_id, run_id=run_id - ) + run_path = self.storage.get_artifacts_path(model_id=context.workflow.model_id, run_id=run_id) data_path = run_path / self.storage.data_path data_path.mkdir(parents=True, exist_ok=True) result.input_dataset.to_parquet(path=data_path / "data.parquet") self._logger.info("Stored training data at %s for run %s", data_path, run_id) # Store feature importance plot if enabled - if self.store_feature_importance_plot and isinstance( - context.workflow.model.forecaster, ExplainableForecaster - ): + if self.store_feature_importance_plot and isinstance(context.workflow.model.forecaster, ExplainableForecaster): fig = context.workflow.model.forecaster.plot_feature_importances() fig.write_html(data_path / "feature_importances.html") # pyright: ignore[reportUnknownMemberType] @@ -166,17 +155,11 @@ def on_fit_end( if result.metrics_val is not None: metrics.update(_metrics_to_dict(metrics=result.metrics_val, prefix="val_")) if result.metrics_test is not None: - metrics.update( - _metrics_to_dict(metrics=result.metrics_test, prefix="test_") - ) + metrics.update(_metrics_to_dict(metrics=result.metrics_test, prefix="test_")) # Mark the run as finished - self.storage.finalize_run( - model_id=context.workflow.model_id, run_id=run_id, metrics=metrics - ) - self._logger.info( - "Stored MLflow run %s for model %s", run_id, context.workflow.model_id - ) + self.storage.finalize_run(model_id=context.workflow.model_id, run_id=run_id, metrics=metrics) + self._logger.info("Stored MLflow run %s for model %s", run_id, context.workflow.model_id) @override def on_predict_start( @@ -196,9 +179,7 @@ def on_predict_start( # Load the model from the latest run run_id: str = run.info.run_id - old_model = self.storage.load_run_model( - run_id=run_id, model_id=context.workflow.model_id - ) + old_model = self.storage.load_run_model(run_id=run_id, model_id=context.workflow.model_id) if not isinstance(old_model, ForecastingModel): self._logger.warning( @@ -214,9 +195,7 @@ def on_predict_start( context.workflow.model_id, ) - def _run_model_selection( - self, workflow: CustomForecastingWorkflow, result: ModelFitResult - ) -> None: + def _run_model_selection(self, workflow: CustomForecastingWorkflow, result: ModelFitResult) -> None: # Find the latest successful run for this model runs = self.storage.search_latest_runs(model_id=workflow.model_id) run = next(iter(runs), None) @@ -252,9 +231,7 @@ def _run_model_selection( if old_metrics is None: return - if self._check_is_new_model_better( - old_metrics=old_metrics, new_metrics=new_metrics - ): + if self._check_is_new_model_better(old_metrics=old_metrics, new_metrics=new_metrics): workflow.model = new_model else: workflow.model = old_model @@ -263,9 +240,7 @@ def _run_model_selection( self.model_selection_metric, run_id, ) - raise SkipFitting( - "New model did not improve monitored metric, skipping re-fit." - ) + raise SkipFitting("New model did not improve monitored metric, skipping re-fit.") def _try_load_model( self, @@ -273,9 +248,7 @@ def _try_load_model( workflow: CustomForecastingWorkflow, ) -> ForecastingModel | None: try: - old_model = self.storage.load_run_model( - run_id=run_id, model_id=workflow.model_id - ) + old_model = self.storage.load_run_model(run_id=run_id, model_id=workflow.model_id) except ModelNotFoundError: self._logger.warning( "Could not load model from previous run %s for model %s, skipping model selection", @@ -309,9 +282,7 @@ def _try_evaluate_model( ) return None - def _check_tags_compatible( - self, run_tags: dict[str, str], new_tags: dict[str, str], run_id: str - ) -> bool: + def _check_tags_compatible(self, run_tags: dict[str, str], new_tags: dict[str, str], run_id: str) -> bool: """Check if model tags are compatible, excluding mlflow.runName. Returns: @@ -363,13 +334,9 @@ def _check_is_new_model_better( ) match direction: - case "higher_is_better" if ( - new_metric >= old_metric / self.model_selection_old_model_penalty - ): + case "higher_is_better" if new_metric >= old_metric / self.model_selection_old_model_penalty: return True - case "lower_is_better" if ( - new_metric <= old_metric / self.model_selection_old_model_penalty - ): + case "lower_is_better" if new_metric <= old_metric / self.model_selection_old_model_penalty: return True case _: return False diff --git a/packages/openstef-models/src/openstef_models/models/forecasting_model.py b/packages/openstef-models/src/openstef_models/models/forecasting_model.py index f2de3c4b3..9acea87fa 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting_model.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting_model.py @@ -381,7 +381,7 @@ def restore_target[T: TimeSeriesDataset]( target_series = original_dataset.select_features([target_column]).select_version().data[target_column] def _transform_restore_target(df: pd.DataFrame) -> pd.DataFrame: - return df.assign(**{str(target_series.name): df.index.map(target_series)}) # pyright: ignore[reportUnknownMemberType] + return df.assign(**{str(target_series.name): df.index.map(target_series)}) # type: ignore return dataset.pipe_pandas(_transform_restore_target) diff --git a/packages/openstef-models/src/openstef_models/transforms/general/selector.py b/packages/openstef-models/src/openstef_models/transforms/general/selector.py index 00afbd68a..38f7c68bc 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/selector.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/selector.py @@ -74,9 +74,7 @@ def fit(self, data: TimeSeriesDataset) -> None: def transform(self, data: TimeSeriesDataset) -> TimeSeriesDataset: features = self.selection.resolve(data.feature_names) - transformed_data = data.data.drop( - columns=[col for col in data.feature_names if col not in features] - ) + transformed_data = data.data.drop(columns=[col for col in data.feature_names if col not in features]) return data.copy_with(data=transformed_data, is_sorted=True) From 10f7c7077443834da06a3c4b0ac2e8d0f8e3e8a7 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Tue, 17 Feb 2026 12:18:22 +0100 Subject: [PATCH 075/104] Fix when aggregation functions empty. Signed-off-by: Marnix van Lieshout --- .../transforms/time_domain/rolling_aggregates_adder.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py b/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py index d675d72e7..a874b9774 100644 --- a/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py +++ b/packages/openstef-models/src/openstef_models/transforms/time_domain/rolling_aggregates_adder.py @@ -94,6 +94,11 @@ def _compute_rolling_aggregates(self, series: pd.Series) -> pd.DataFrame: @override def fit(self, data: TimeSeriesDataset) -> None: """Compute and store last valid aggregates from training data for fallback.""" + if not self.aggregation_functions: + self._logger.warning("No aggregation functions specified. Skipping fit.") + self._is_fitted = True + return + validate_required_columns(df=data.data, required_columns=[self.feature]) rolling_df = self._compute_rolling_aggregates(data.data[self.feature]) From d2d2c3a726a573b495a671d0a237eaf9ebb3d844 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Tue, 17 Feb 2026 12:20:47 +0100 Subject: [PATCH 076/104] Improve naming Signed-off-by: Marnix van Lieshout --- .../src/openstef_beam/benchmarking/baselines/openstef4.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py index 3d6c0f8a1..6becf0fbf 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py @@ -161,11 +161,11 @@ def predict(self, data: RestrictedHorizonVersionedTimeSeries) -> TimeSeriesDatas forecast.to_parquet(path=self.cache_dir / f"debug_{id_str}_forecast.parquet") if self.contributions and isinstance(self._workflow.model, EnsembleForecastingModel): - contr_str = data.horizon.strftime("%Y%m%d%H%M%S") # TODO: Change variable naming... + id_str = data.horizon.strftime("%Y%m%d%H%M%S") contributions = self._workflow.model.predict_contributions(predict_data, forecast_start=data.horizon) df = pd.concat([contributions, forecast.data.drop(columns=["load"])], axis=1) - df.to_parquet(path=self.cache_dir / f"contrib_{contr_str}_predict.parquet") + df.to_parquet(path=self.cache_dir / f"contrib_{id_str}_predict.parquet") return forecast From e132e069bec3d85ef623005828a37cd8628ad242 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Tue, 17 Feb 2026 14:17:18 +0100 Subject: [PATCH 077/104] Cleaning up Signed-off-by: Marnix van Lieshout --- ...liander_2024_benchmark_xgboost_gblinear.py | 3 +- .../benchmarking/baselines/openstef4.py | 50 +++++++------------ .../src/openstef_meta/examples/__init__.py | 5 -- .../models/ensemble_forecasting_model.py | 2 +- .../forecast_combiners/forecast_combiner.py | 2 +- .../learned_weights_combiner.py | 8 +-- .../models/forecasting/residual_forecaster.py | 11 ++-- .../src/openstef_meta/utils/datasets.py | 2 +- 8 files changed, 31 insertions(+), 52 deletions(-) delete mode 100644 packages/openstef-meta/src/openstef_meta/examples/__init__.py diff --git a/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py b/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py index 00c07bdb7..7ce3e33e1 100644 --- a/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py +++ b/examples/benchmarks/liander_2024_benchmark_xgboost_gblinear.py @@ -34,13 +34,12 @@ logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s") -OUTPUT_PATH = Path("./benchmark_results_test_convenience") +OUTPUT_PATH = Path("./benchmark_results") BENCHMARK_RESULTS_PATH_XGBOOST = OUTPUT_PATH / "XGBoost" BENCHMARK_RESULTS_PATH_GBLINEAR = OUTPUT_PATH / "GBLinear" N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark - # Model configuration FORECAST_HORIZONS = [LeadTime.from_string("P3D")] # Forecast horizon(s) PREDICTION_QUANTILES = [ diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py index 6becf0fbf..d7f99e0d1 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py @@ -70,7 +70,7 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): ) contributions: bool = Field( default=False, - description="When True, saves base Forecaster prediction contributions for ensemble models in cache_dir", + description="When True, saves base forecaster prediction contributions for ensemble models", ) _workflow: CustomForecastingWorkflow | None = PrivateAttr(default=None) @@ -188,39 +188,25 @@ def _preset_target_forecaster_factory( def _create_workflow(context: WorkflowCreationContext) -> CustomForecastingWorkflow: # Create a new workflow instance with fresh model. + location = LocationConfig( + name=target.name, + description=target.description, + coordinate=Coordinate( + latitude=target.latitude, + longitude=target.longitude, + ), + ) + + update = { + "model_id": f"{prefix}_{target.name}", + "location": location, + "run_name": context.step_name, + } + if isinstance(base_config, EnsembleWorkflowConfig): - return create_ensemble_workflow( - config=base_config.model_copy( - update={ - "model_id": f"{prefix}_{target.name}", - "location": LocationConfig( - name=target.name, - description=target.description, - coordinate=Coordinate( - latitude=target.latitude, - longitude=target.longitude, - ), - ), - } - ) - ) + return create_ensemble_workflow(config=base_config.model_copy(update=update)) - return create_forecasting_workflow( - config=base_config.model_copy( - update={ - "model_id": f"{prefix}_{target.name}", - "run_name": context.step_name, - "location": LocationConfig( - name=target.name, - description=target.description, - coordinate=Coordinate( - latitude=target.latitude, - longitude=target.longitude, - ), - ), - } - ) - ) + return create_forecasting_workflow(config=base_config.model_copy(update=update)) return OpenSTEF4BacktestForecaster( config=backtest_config, diff --git a/packages/openstef-meta/src/openstef_meta/examples/__init__.py b/packages/openstef-meta/src/openstef_meta/examples/__init__.py deleted file mode 100644 index 765b7c107..000000000 --- a/packages/openstef-meta/src/openstef_meta/examples/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""Examples for OpenSTEF Meta.""" diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 5394ab476..462fed13d 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -495,7 +495,7 @@ def prepare_input( forecaster_name: str = "", forecast_start: datetime | None = None, ) -> ForecastInputDataset: - """Prepare input data for forecastingfiltering. + """Prepare input data for forecasting and filtering. Args: data: Raw time series dataset to prepare for forecasting. diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index a8cd4864f..cb5a6bd2a 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -5,7 +5,7 @@ Provides the fundamental building blocks for implementing meta models in OpenSTEF. These mixins establish contracts that ensure consistent behavior across different meta model types -while ensuring full compatability with regular Forecasters. +while ensuring full compatibility with regular Forecasters. """ from abc import abstractmethod diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index d2b0fac48..00924b0b8 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -212,7 +212,7 @@ class WeightsCombinerConfig(ForecastCombinerConfig): ), ) - @property + @property # TODO: Check if this should be a property or a method def get_classifier(self) -> Classifier: """Returns the classifier instance from hyperparameters. @@ -238,18 +238,18 @@ class WeightsCombiner(ForecastCombiner): """ Config = WeightsCombinerConfig - LGBMHyperParams = LGBMCombinerHyperParams + LGBMHyperParams = LGBMCombinerHyperParams # TODO: do we need all these hyperparams here? RFHyperParams = RFCombinerHyperParams XGBHyperParams = XGBCombinerHyperParams LogisticHyperParams = LogisticCombinerHyperParams def __init__(self, config: WeightsCombinerConfig) -> None: - """Initialize the Weigths Combiner.""" + """Initialize the WeightsCombiner.""" self.quantiles = config.quantiles self.config = config self.hyperparams = config.hyperparams self._is_fitted: bool = False - self._is_fitted = False + self._label_encoder = LabelEncoder() self.hard_selection = config.hard_selection diff --git a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py index de44e003c..cebdf5069 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py @@ -3,9 +3,8 @@ # SPDX-License-Identifier: MPL-2.0 """Residual Forecaster. -Provides method that attempts to combine the advantages of a linear model (Extraplolation) -and tree-based model (Non-linear patterns). This is achieved by training a primary model, -typically linear, followed by a secondary model that learns to predict the residuals (errors) of the primary model. +This module implements a residual forecasting model that combines two forecasters: +A primary model followed by a secondary model that learns to predict the residuals (errors) of the primary model. """ import logging @@ -241,7 +240,7 @@ def _prepare_secondary_input( return predictions_quantiles - def _predict_secodary_model(self, data: ForecastInputDataset) -> ForecastDataset: + def _predict_secondary_model(self, data: ForecastInputDataset) -> ForecastDataset: predictions: dict[str, pd.Series] = {} for model in self._secondary_model: pred = model.predict(data=data) @@ -270,7 +269,7 @@ def predict(self, data: ForecastInputDataset) -> ForecastDataset: primary_predictions = self._primary_model.predict(data=data).data - secondary_predictions = self._predict_secodary_model(data=data).data + secondary_predictions = self._predict_secondary_model(data=data).data final_predictions = primary_predictions + secondary_predictions @@ -291,7 +290,7 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = Tru """ primary_predictions = self._primary_model.predict(data=data).data - secondary_predictions = self._predict_secodary_model(data=data).data + secondary_predictions = self._predict_secondary_model(data=data).data if not scale: primary_contributions = primary_predictions diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index e85c05b09..7dfaf678d 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -5,7 +5,7 @@ Validated dataset for ensemble forecasters first stage output. Implements methods to select quantile-specific ForecastInputDatasets for final learners. -Also supports constructing classifation targets based on pinball loss. +Also supports constructing classification targets based on pinball loss. """ from datetime import datetime, timedelta From 445cc5fbfe1fbfac2c6b4cb6cb05fbe3659dca1c Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Tue, 17 Feb 2026 15:18:23 +0100 Subject: [PATCH 078/104] Change how config's are made and used for combiner and stacking models. Signed-off-by: Marnix van Lieshout --- .../forecast_combiners/forecast_combiner.py | 13 +- .../learned_weights_combiner.py | 4 - .../forecast_combiners/stacking_combiner.py | 5 +- .../presets/forecasting_workflow.py | 114 +++++++++++------- .../test_learned_weights_combiner.py | 12 +- .../test_stacking_combiner.py | 6 +- .../presets/forecasting_workflow.py | 1 + 7 files changed, 88 insertions(+), 67 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index cb5a6bd2a..44a8e885e 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -1,11 +1,10 @@ # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 -"""Core meta model interfaces and configurations. +"""Forecast combiner base classes and configurations. -Provides the fundamental building blocks for implementing meta models in OpenSTEF. -These mixins establish contracts that ensure consistent behavior across different meta model types -while ensuring full compatibility with regular Forecasters. +Provides abstract base classes and configuration schemas for implementing +forecast combiners that aggregate predictions from multiple base forecasters. """ from abc import abstractmethod @@ -19,12 +18,6 @@ from openstef_core.mixins import HyperParams, Predictor from openstef_core.types import LeadTime, Quantile from openstef_meta.utils.datasets import EnsembleForecastDataset -from openstef_models.transforms.general.selector import Selector -from openstef_models.utils.feature_selection import FeatureSelection - -SELECTOR = Selector( - selection=FeatureSelection(include=None), -) class ForecastCombinerConfig(BaseConfig): diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 00924b0b8..65214365a 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -238,10 +238,6 @@ class WeightsCombiner(ForecastCombiner): """ Config = WeightsCombinerConfig - LGBMHyperParams = LGBMCombinerHyperParams # TODO: do we need all these hyperparams here? - RFHyperParams = RFCombinerHyperParams - XGBHyperParams = XGBCombinerHyperParams - LogisticHyperParams = LogisticCombinerHyperParams def __init__(self, config: WeightsCombinerConfig) -> None: """Initialize the WeightsCombiner.""" diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index d59811453..c4f234771 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -27,7 +27,7 @@ GBLinearForecaster, GBLinearHyperParams, ) -from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMHyperParams +from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams if TYPE_CHECKING: from openstef_models.models.forecasting.forecaster import Forecaster @@ -35,7 +35,6 @@ logger = logging.getLogger(__name__) ForecasterHyperParams = GBLinearHyperParams | LGBMHyperParams -ForecasterType = GBLinearForecaster | LGBMForecaster class StackingCombinerConfig(ForecastCombinerConfig): @@ -89,8 +88,6 @@ class StackingCombiner(ForecastCombiner): """Combines base Forecaster predictions per quantile into final predictions using a regression approach.""" Config = StackingCombinerConfig - LGBMHyperParams = LGBMHyperParams - GBLinearHyperParams = GBLinearHyperParams def __init__( self, diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 52568b3a1..df42c61da 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -24,9 +24,17 @@ from openstef_core.mixins.transform import Transform, TransformPipeline from openstef_core.types import LeadTime, Q, Quantile, QuantileOrGlobal from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel -from openstef_meta.models.forecast_combiners.learned_weights_combiner import WeightsCombiner +from openstef_meta.models.forecast_combiners.learned_weights_combiner import ( + LGBMCombinerHyperParams, + LogisticCombinerHyperParams, + RFCombinerHyperParams, + WeightsCombiner, + XGBCombinerHyperParams, +) from openstef_meta.models.forecast_combiners.rules_combiner import RulesCombiner -from openstef_meta.models.forecast_combiners.stacking_combiner import StackingCombiner +from openstef_meta.models.forecast_combiners.stacking_combiner import ( + StackingCombiner, +) from openstef_meta.models.forecasting.residual_forecaster import ResidualForecaster from openstef_models.integrations.mlflow import MLFlowStorage from openstef_models.mixins.model_serializer import ModelIdentifier @@ -118,6 +126,34 @@ class EnsembleWorkflowConfig(BaseConfig): description="Hyperparameters for Residual forecaster.", ) + # Learned weights combiner hyperparameters + lgbm_combiner_hyperparams: LGBMCombinerHyperParams = Field( + default=LGBMCombinerHyperParams(), + description="Hyperparameters for LightGBM combiner.", + ) + rf_combiner_hyperparams: RFCombinerHyperParams = Field( + default=RFCombinerHyperParams(), + description="Hyperparameters for Random Forest combiner.", + ) + xgboost_combiner_hyperparams: XGBCombinerHyperParams = Field( + default=XGBCombinerHyperParams(), + description="Hyperparameters for XGBoost combiner.", + ) + logistic_combiner_hyperparams: LogisticCombinerHyperParams = Field( + default=LogisticCombinerHyperParams(), + description="Hyperparameters for Logistic Regression combiner.", + ) + + # Stacking combiner hyperparameters + stacking_lgbm_combiner_hyperparams: LGBMForecaster.HyperParams = Field( + default=LGBMForecaster.HyperParams(), + description="Hyperparameters for LightGBM stacking combiner.", + ) + stacking_gblinear_combiner_hyperparams: GBLinearForecaster.HyperParams = Field( + default=GBLinearForecaster.HyperParams(), + description="Hyperparameters for GBLinear stacking combiner.", + ) + # Data properties target_column: str = Field(default="load", description="Name of the target variable column in datasets.") energy_price_column: str = Field( @@ -167,6 +203,7 @@ class EnsembleWorkflowConfig(BaseConfig): default=FeatureSelection(include=None, exclude=None), description="Feature selection for which features to clip.", ) + # TODO: Add sample weight method parameter sample_weight_scale_percentile: int = Field( default=95, description="Percentile of target values used as scaling reference. " @@ -178,6 +215,10 @@ class EnsembleWorkflowConfig(BaseConfig): "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " "Note: Defaults to 1.0 for gblinear congestion models.", ) + sample_weight_floor: float = Field( + default=0.1, + description="Minimum weight value to ensure all samples contribute to training.", + ) forecast_combiner_sample_weight_exponent: float = Field( default=0, @@ -185,11 +226,6 @@ class EnsembleWorkflowConfig(BaseConfig): "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values.", ) - sample_weight_floor: float = Field( - default=0.1, - description="Minimum weight value to ensure all samples contribute to training.", - ) - # Data splitting strategy data_splitter: DataSplitter = Field( default=DataSplitter( @@ -304,7 +340,7 @@ def feature_standardizers(config: EnsembleWorkflowConfig) -> list[Transform[Time ) -def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastingWorkflow: # noqa: C901, PLR0912, PLR0915 +def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastingWorkflow: # noqa: C901, PLR0912 """Create an ensemble forecasting workflow from configuration. Args: @@ -415,61 +451,53 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin raise ValueError(msg) # Build combiner - # Case: Ensemble type, combiner model match (config.ensemble_type, config.combiner_model): case ("learned_weights", "lgbm"): - combiner_hp = WeightsCombiner.LGBMHyperParams() - combiner_config = WeightsCombiner.Config( - hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles - ) combiner = WeightsCombiner( - config=combiner_config, + config=WeightsCombiner.Config( + hyperparams=config.lgbm_combiner_hyperparams, horizons=config.horizons, quantiles=config.quantiles + ) ) case ("learned_weights", "rf"): - combiner_hp = WeightsCombiner.RFHyperParams() - combiner_config = WeightsCombiner.Config( - hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles - ) combiner = WeightsCombiner( - config=combiner_config, + config=WeightsCombiner.Config( + hyperparams=config.rf_combiner_hyperparams, horizons=config.horizons, quantiles=config.quantiles + ) ) case ("learned_weights", "xgboost"): - combiner_hp = WeightsCombiner.XGBHyperParams() - combiner_config = WeightsCombiner.Config( - hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles - ) combiner = WeightsCombiner( - config=combiner_config, + config=WeightsCombiner.Config( + hyperparams=config.xgboost_combiner_hyperparams, + horizons=config.horizons, + quantiles=config.quantiles, + ) ) case ("learned_weights", "logistic"): - combiner_hp = WeightsCombiner.LogisticHyperParams() - combiner_config = WeightsCombiner.Config( - hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles - ) combiner = WeightsCombiner( - config=combiner_config, + config=WeightsCombiner.Config( + hyperparams=config.logistic_combiner_hyperparams, + horizons=config.horizons, + quantiles=config.quantiles, + ) ) case ("stacking", "lgbm"): - combiner_hp = StackingCombiner.LGBMHyperParams() - combiner_config = StackingCombiner.Config( - hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles - ) combiner = StackingCombiner( - config=combiner_config, + config=StackingCombiner.Config( + hyperparams=config.stacking_lgbm_combiner_hyperparams, + horizons=config.horizons, + quantiles=config.quantiles, + ) ) case ("stacking", "gblinear"): - combiner_hp = StackingCombiner.GBLinearHyperParams(reg_alpha=0.0, reg_lambda=0.0) - combiner_config = StackingCombiner.Config( - hyperparams=combiner_hp, horizons=config.horizons, quantiles=config.quantiles - ) combiner = StackingCombiner( - config=combiner_config, + config=StackingCombiner.Config( + hyperparams=config.stacking_gblinear_combiner_hyperparams, + horizons=config.horizons, + quantiles=config.quantiles, + ) ) case ("rules", _): - combiner_config = RulesCombiner.Config(horizons=config.horizons, quantiles=config.quantiles) - combiner = RulesCombiner( - config=combiner_config, - ) + combiner = RulesCombiner(config=RulesCombiner.Config(horizons=config.horizons, quantiles=config.quantiles)) case _: msg = f"Unsupported ensemble and combiner combination: {config.ensemble_type}, {config.combiner_model}" raise ValueError(msg) diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py index ac7a4c380..704f9b7d0 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py @@ -9,8 +9,12 @@ from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q from openstef_meta.models.forecast_combiners.learned_weights_combiner import ( + LGBMCombinerHyperParams, + LogisticCombinerHyperParams, + RFCombinerHyperParams, WeightsCombiner, WeightsCombinerConfig, + XGBCombinerHyperParams, ) from openstef_meta.utils.datasets import EnsembleForecastDataset @@ -25,13 +29,13 @@ def classifier(request: pytest.FixtureRequest) -> str: def config(classifier: str) -> WeightsCombinerConfig: """Fixture to create WeightsCombinerConfig based on the classifier type.""" if classifier == "lgbm": - hp = WeightsCombiner.LGBMHyperParams(n_leaves=5, n_estimators=10) + hp = LGBMCombinerHyperParams(n_leaves=5, n_estimators=10) elif classifier == "xgboost": - hp = WeightsCombiner.XGBHyperParams(n_estimators=10) + hp = XGBCombinerHyperParams(n_estimators=10) elif classifier == "rf": - hp = WeightsCombiner.RFHyperParams(n_estimators=10, n_leaves=5) + hp = RFCombinerHyperParams(n_estimators=10, n_leaves=5) elif classifier == "logistic": - hp = WeightsCombiner.LogisticHyperParams() + hp = LogisticCombinerHyperParams() else: msg = f"Unsupported classifier type: {classifier}" raise ValueError(msg) diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py index cb182e242..5504ee074 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py @@ -14,6 +14,8 @@ StackingCombinerConfig, ) from openstef_meta.utils.datasets import EnsembleForecastDataset +from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster +from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster @pytest.fixture(params=["lgbm", "gblinear"]) @@ -26,9 +28,9 @@ def regressor(request: pytest.FixtureRequest) -> str: def config(regressor: str) -> StackingCombinerConfig: """Fixture to create StackingCombinerConfig based on the regressor type.""" if regressor == "lgbm": - hp = StackingCombiner.LGBMHyperParams(num_leaves=5, n_estimators=10) + hp = LGBMForecaster.HyperParams(num_leaves=5, n_estimators=10) elif regressor == "gblinear": - hp = StackingCombiner.GBLinearHyperParams(n_steps=10) + hp = GBLinearForecaster.HyperParams(n_steps=10) else: msg = f"Unsupported regressor type: {regressor}" raise ValueError(msg) diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index d80363193..f1ad1f7dc 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -217,6 +217,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob default=FeatureSelection(include=None, exclude=None), description="Feature selection for which features to clip.", ) + # TODO: Add sample weight method parameter sample_weight_scale_percentile: int = Field( default=95, description="Percentile of target values used as scaling reference. " From f3bd62337cf0fe402362b755bc64ca2214ccdb42 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Tue, 17 Feb 2026 17:19:10 +0100 Subject: [PATCH 079/104] Cleaning up Signed-off-by: Marnix van Lieshout --- .../models/ensemble_forecasting_model.py | 10 ++- .../learned_weights_combiner.py | 16 ++-- .../forecast_combiners/rules_combiner.py | 10 ++- .../forecast_combiners/stacking_combiner.py | 12 +-- .../src/openstef_meta/utils/datasets.py | 73 +++++++------------ .../src/openstef_meta/utils/decision_tree.py | 3 +- .../models/forecast_combiners/conftest.py | 4 +- .../test_learned_weights_combiner.py | 12 ++- .../forecast_combiners/test_rules_combiner.py | 2 +- .../test_stacking_combiner.py | 12 ++- .../tests/unit/utils/test_datasets.py | 10 +-- 11 files changed, 73 insertions(+), 91 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 462fed13d..4f5ac1966 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -587,11 +587,15 @@ def _fit_combiner( return ModelFitResult( input_dataset=train_ensemble_dataset, - input_data_train=train_ensemble_dataset.select_quantile(quantile=self.config[0].quantiles[0]), - input_data_val=val_ensemble_dataset.select_quantile(quantile=self.config[0].quantiles[0]) + input_data_train=train_ensemble_dataset.get_base_predictions_for_quantile( + quantile=self.config[0].quantiles[0] + ), + input_data_val=val_ensemble_dataset.get_base_predictions_for_quantile(quantile=self.config[0].quantiles[0]) if val_ensemble_dataset else None, - input_data_test=test_ensemble_dataset.select_quantile(quantile=self.config[0].quantiles[0]) + input_data_test=test_ensemble_dataset.get_base_predictions_for_quantile( + quantile=self.config[0].quantiles[0] + ) if test_ensemble_dataset else None, metrics_train=metrics_train, diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 65214365a..79c211889 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -41,7 +41,6 @@ # Base classes for Learned Weights Final Learner Classifier = LGBMClassifier | XGBClassifier | LogisticRegression | DummyClassifier -ClassifierNames = Literal["lgbm", "xgb", "logistic_regression", "dummy"] class ClassifierParamsMixin: @@ -212,7 +211,6 @@ class WeightsCombinerConfig(ForecastCombinerConfig): ), ) - @property # TODO: Check if this should be a property or a method def get_classifier(self) -> Classifier: """Returns the classifier instance from hyperparameters. @@ -250,7 +248,7 @@ def __init__(self, config: WeightsCombinerConfig) -> None: self.hard_selection = config.hard_selection # Initialize a classifier per quantile - self.models: list[Classifier] = [config.get_classifier for _ in self.quantiles] + self.models: list[Classifier] = [config.get_classifier() for _ in self.quantiles] @override def fit( @@ -264,10 +262,10 @@ def fit( for i, q in enumerate(self.quantiles): # Data preparation - dataset = data.select_quantile_classification(quantile=q) + dataset = data.get_best_forecaster_labels(quantile=q) combined_data = combine_forecast_input_datasets( - dataset=dataset, - other=additional_features, + input_data=dataset, + additional_features=additional_features, ) input_data = combined_data.input_data() labels = combined_data.target_series @@ -277,7 +275,7 @@ def fit( # Balance classes, adjust with sample weights weights = compute_sample_weight("balanced", labels) * combined_data.sample_weight_series - self.models[i].fit(X=input_data, y=labels, sample_weight=weights) # type: ignore + self.models[i].fit(X=input_data, y=labels, sample_weight=weights) # pyright: ignore[reportUnknownMemberType] self._is_fitted = True @staticmethod @@ -354,7 +352,7 @@ def predict( # Generate predictions predictions = pd.DataFrame({ Quantile(q).format(): self._generate_predictions_quantile( - dataset=data.select_quantile(quantile=Quantile(q)), + dataset=data.get_base_predictions_for_quantile(quantile=Quantile(q)), additional_features=additional_features, model_index=i, ) @@ -383,7 +381,7 @@ def predict_contributions( # Generate predictions contribution_list = [ self._generate_contributions_quantile( - dataset=data.select_quantile(quantile=Quantile(q)), + dataset=data.get_base_predictions_for_quantile(quantile=Quantile(q)), additional_features=additional_features, model_index=i, ) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py index 93a12744f..e9a3b0364 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py @@ -118,13 +118,14 @@ def predict( raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") decisions = self._predict_tree( - additional_features.data, columns=data.select_quantile(quantile=self.quantiles[0]).data.columns + additional_features.data, + columns=data.get_base_predictions_for_quantile(quantile=self.quantiles[0]).data.columns, ) # Generate predictions predictions: list[pd.DataFrame] = [] for q in self.quantiles: - dataset = data.select_quantile(quantile=q) + dataset = data.get_base_predictions_for_quantile(quantile=q) preds = dataset.input_data().multiply(decisions).sum(axis=1) predictions.append(preds.to_frame(name=Quantile(q).format())) @@ -147,13 +148,14 @@ def predict_contributions( raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") decisions = self._predict_tree( - additional_features.data, columns=data.select_quantile(quantile=self.quantiles[0]).data.columns + additional_features.data, + columns=data.get_base_predictions_for_quantile(quantile=self.quantiles[0]).data.columns, ) # Generate predictions predictions: list[pd.DataFrame] = [] for q in self.quantiles: - dataset = data.select_quantile(quantile=q) + dataset = data.get_base_predictions_for_quantile(quantile=q) preds = dataset.input_data().multiply(decisions).sum(axis=1) predictions.append(preds.to_frame(name=Quantile(q).format())) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index c4f234771..5a2ad7fd6 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -154,13 +154,13 @@ def fit( for i, q in enumerate(self.quantiles): if additional_features is not None: - dataset = data.select_quantile(quantile=q) + dataset = data.get_base_predictions_for_quantile(quantile=q) input_data = self._combine_datasets( data=dataset, additional_features=additional_features, ) else: - input_data = data.select_quantile(quantile=q) + input_data = data.get_base_predictions_for_quantile(quantile=q) # Prepare input data by dropping rows with NaN target values target_dropna = partial(pd.DataFrame.dropna, subset=[input_data.target_column]) # pyright: ignore[reportUnknownMemberType] @@ -182,11 +182,11 @@ def predict( for i, q in enumerate(self.quantiles): if additional_features is not None: input_data = self._combine_datasets( - data=data.select_quantile(quantile=q), + data=data.get_base_predictions_for_quantile(quantile=q), additional_features=additional_features, ) else: - input_data = data.select_quantile(quantile=q) + input_data = data.get_base_predictions_for_quantile(quantile=q) if isinstance(self.models[i], GBLinearForecaster): feature_cols = [x for x in input_data.data.columns if x != data.target_column] @@ -215,11 +215,11 @@ def predict_contributions( for i, q in enumerate(self.quantiles): if additional_features is not None: input_data = self._combine_datasets( - data=data.select_quantile(quantile=q), + data=data.get_base_predictions_for_quantile(quantile=q), additional_features=additional_features, ) else: - input_data = data.select_quantile(quantile=q) + input_data = data.get_base_predictions_for_quantile(quantile=q) model = self.models[i] if not isinstance(model, ExplainableForecaster): raise NotImplementedError( diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index 7dfaf678d..8eeaa57bd 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -21,39 +21,39 @@ def combine_forecast_input_datasets( - dataset: ForecastInputDataset, other: ForecastInputDataset | None, join: str = "inner" + input_data: ForecastInputDataset, additional_features: ForecastInputDataset | None, join: str = "inner" ) -> ForecastInputDataset: - """Combine multiple TimeSeriesDatasets into a single dataset. + """Combine base forecaster predictions with optional additional features. Args: - dataset: First ForecastInputDataset. - other: Second ForecastInputDataset or None. + input_data: ForecastInputDataset containing base forecaster predictions. + additional_features: Optional ForecastInputDataset containing additional features to combine. join: Type of join to perform on the datasets. Defaults to "inner". Returns: - Combined ForecastDataset. + Combined ForecastInputDataset containing both input data and additional features. """ - if not isinstance(other, ForecastInputDataset): - return dataset + if not isinstance(additional_features, ForecastInputDataset): + return input_data if join != "inner": raise NotImplementedError("Only 'inner' join is currently supported.") - df_other = other.data - if dataset.target_column in df_other.columns: - df_other = df_other.drop(columns=[dataset.target_column]) + df_additional = additional_features.data + if input_data.target_column in df_additional.columns: + df_additional = df_additional.drop(columns=[input_data.target_column]) - df_one = dataset.data + df_input = input_data.data df = pd.concat( - [df_one, df_other], + [df_input, df_additional], axis=1, join="inner", ) return ForecastInputDataset( data=df, - sample_interval=dataset.sample_interval, - target_column=dataset.target_column, - sample_weight_column=dataset.sample_weight_column, - forecast_start=dataset.forecast_start, + sample_interval=input_data.sample_interval, + target_column=input_data.target_column, + sample_weight_column=input_data.sample_weight_column, + forecast_start=input_data.forecast_start, ) @@ -203,14 +203,19 @@ def column_pinball_losses(preds: pd.Series) -> pd.Series: return pinball_losses.idxmin(axis=1) - def select_quantile_classification(self, quantile: Quantile) -> ForecastInputDataset: - """Select classification target for a specific quantile. + def get_best_forecaster_labels(self, quantile: Quantile) -> ForecastInputDataset: + """Get labels indicating the best-performing base forecaster for each sample at a specific quantile. + + Creates a dataset where each sample's target is labeled with the name of the base forecaster + that performed best, determined by pinball loss. Used as classification target for training + the final learner. Args: quantile: Quantile to select. Returns: - Series containing binary indicators of best-performing base Forecasters for the specified quantile. + ForecastInputDataset where the target column contains labels of the best-performing + base forecaster for each sample. Raises: ValueError: If the target column is not found in the dataset. @@ -236,14 +241,14 @@ def select_quantile_classification(self, quantile: Quantile) -> ForecastInputDat forecast_start=self.forecast_start, ) - def select_quantile(self, quantile: Quantile) -> ForecastInputDataset: - """Select data for a specific quantile. + def get_base_predictions_for_quantile(self, quantile: Quantile) -> ForecastInputDataset: + """Get base forecaster predictions for a specific quantile. Args: quantile: Quantile to select. Returns: - ForecastInputDataset containing base predictions for the specified quantile. + ForecastInputDataset containing predictions from all base forecasters at the specified quantile. """ selected_columns = [f"{learner}_{quantile.format()}" for learner in self.forecaster_names] selected_columns.append(self.target_column) @@ -256,27 +261,3 @@ def select_quantile(self, quantile: Quantile) -> ForecastInputDataset: target_column=self.target_column, forecast_start=self.forecast_start, ) - - def select_forecaster(self, forecaster_name: str) -> ForecastDataset: - """Select data for a specific base Forecaster across all quantiles. - - Args: - forecaster_name: Name of the base Forecaster to select. - - Returns: - ForecastDataset containing predictions from the specified base Forecaster. - """ - selected_columns = [ - f"{forecaster_name}_{q.format()}" for q in self.quantiles if f"{forecaster_name}_{q.format()}" in self.data - ] - prediction_data = self.data[selected_columns].copy() - prediction_data.columns = [q.format() for q in self.quantiles] - - prediction_data[self.target_column] = self.data[self.target_column] - - return ForecastDataset( - data=prediction_data, - sample_interval=self.sample_interval, - forecast_start=self.forecast_start, - target_column=self.target_column, - ) diff --git a/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py b/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py index 8e3940dfa..d8ee1fd82 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py +++ b/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py @@ -140,4 +140,5 @@ def get_decision(self, row: pd.Series) -> str: msg = f"Invalid node type at index {current_idx}." raise TypeError(msg) - __all__ = ["Node", "Rule", "Decision", "DecisionTree"] + +__all__ = ["Decision", "DecisionTree", "Node", "Rule"] diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py index cf4edb982..5d251833b 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py @@ -15,7 +15,7 @@ @pytest.fixture def forecast_dataset_factory() -> Callable[[], ForecastDataset]: - def _make() -> ForecastDataset: + def _make_forecast_dataset() -> ForecastDataset: rng = np.random.default_rng() coef = rng.normal(0, 1, 3) @@ -46,7 +46,7 @@ def _make() -> ForecastDataset: target_column="load", ) - return _make + return _make_forecast_dataset @pytest.fixture diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py index 704f9b7d0..556937121 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py @@ -45,13 +45,11 @@ def config(classifier: str) -> WeightsCombinerConfig: ) -@pytest.fixture -def forecaster(config: WeightsCombinerConfig) -> WeightsCombiner: - return WeightsCombiner(config) - - -def test_initialization(forecaster: WeightsCombiner): - assert isinstance(forecaster, WeightsCombiner) +def test_initialization(config: WeightsCombinerConfig): + forecaster = WeightsCombiner(config) + assert forecaster.is_fitted is False + assert len(forecaster.models) == len(config.quantiles) + assert forecaster.quantiles == config.quantiles def test_quantile_weights_combiner__fit_predict( diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py index aa08bf59a..bcf95b42d 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py @@ -40,7 +40,7 @@ def test_quantile_weights_combiner__fit_predict( # Arrange expected_quantiles = config.quantiles forecaster = RulesCombiner(config=config) - additional_features = ensemble_dataset.select_quantile(Q(0.5)) + additional_features = ensemble_dataset.get_base_predictions_for_quantile(Q(0.5)) additional_features.data = additional_features.data.drop(columns=additional_features.target_column) additional_features.data.columns = ["feature1", "feature2"] diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py index 5504ee074..a506abf34 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py @@ -40,13 +40,11 @@ def config(regressor: str) -> StackingCombinerConfig: ) -@pytest.fixture -def forecaster(config: StackingCombinerConfig) -> StackingCombiner: - return StackingCombiner(config) - - -def test_initialization(forecaster: StackingCombiner): - assert isinstance(forecaster, StackingCombiner) +def test_initialization(config: StackingCombinerConfig): + forecaster = StackingCombiner(config) + assert forecaster.is_fitted is False + assert len(forecaster.models) == len(config.quantiles) + assert forecaster.quantiles == config.quantiles def test_quantile_weights_combiner__fit_predict( diff --git a/packages/openstef-meta/tests/unit/utils/test_datasets.py b/packages/openstef-meta/tests/unit/utils/test_datasets.py index efb64f3ea..1002bf5fd 100644 --- a/packages/openstef-meta/tests/unit/utils/test_datasets.py +++ b/packages/openstef-meta/tests/unit/utils/test_datasets.py @@ -100,17 +100,17 @@ def test_from_ensemble_output(ensemble_dataset: EnsembleForecastDataset): assert set(ensemble_dataset.quantiles) == {Quantile(0.1), Quantile(0.5), Quantile(0.9)} -def test_select_quantile(ensemble_dataset: EnsembleForecastDataset): +def test_get_base_predictions_for_quantile(ensemble_dataset: EnsembleForecastDataset): - dataset = ensemble_dataset.select_quantile(Quantile(0.5)) + dataset = ensemble_dataset.get_base_predictions_for_quantile(Quantile(0.5)) assert isinstance(dataset, ForecastInputDataset) assert dataset.data.shape == (3, 3) # 3 timestamps, 2 learners * 1 quantiles + target -def test_select_quantile_classification(ensemble_dataset: EnsembleForecastDataset): - - dataset = ensemble_dataset.select_quantile_classification(Quantile(0.5)) +def test_get_best_forecaster_labels(ensemble_dataset: EnsembleForecastDataset): + """Test get_best_forecaster_labels.""" + dataset = ensemble_dataset.get_best_forecaster_labels(Quantile(0.5)) assert isinstance(dataset, ForecastInputDataset) assert dataset.data.shape == (3, 3) # 3 timestamps, 2 learners * 1 quantiles + target From 38ff7f4992e9c2fc0c174e6387eee2dce8c1c489 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Wed, 18 Feb 2026 11:26:33 +0100 Subject: [PATCH 080/104] Move skops code to separate branch Signed-off-by: Marnix van Lieshout --- packages/openstef-models/pyproject.toml | 1 - .../integrations/skops/__init__.py | 15 --- .../skops/skops_model_serializer.py | 105 ------------------ .../mixins/model_serializer.py | 1 - .../tests/unit/integrations/skops/__init__.py | 5 - .../skops/test_skops_model_serializer.py | 72 ------------ uv.lock | 88 ++++++--------- 7 files changed, 32 insertions(+), 255 deletions(-) delete mode 100644 packages/openstef-models/src/openstef_models/integrations/skops/__init__.py delete mode 100644 packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py delete mode 100644 packages/openstef-models/tests/unit/integrations/skops/__init__.py delete mode 100644 packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py diff --git a/packages/openstef-models/pyproject.toml b/packages/openstef-models/pyproject.toml index 1b2f9d54b..e34f59a34 100644 --- a/packages/openstef-models/pyproject.toml +++ b/packages/openstef-models/pyproject.toml @@ -37,7 +37,6 @@ dependencies = [ "pycountry>=24.6.1", "scikit-learn>=1.7.1,<1.8", "scipy>=1.16.3,<2", - "skops>=0.13", ] optional-dependencies.xgb-cpu = [ diff --git a/packages/openstef-models/src/openstef_models/integrations/skops/__init__.py b/packages/openstef-models/src/openstef_models/integrations/skops/__init__.py deleted file mode 100644 index 16fcbd789..000000000 --- a/packages/openstef-models/src/openstef_models/integrations/skops/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Joblib-based model storage integration. - -Provides local file-based model persistence using Skops for serialization. -This integration provides a safe way for storing and loading ForecastingModel instances on -the local filesystem, making it suitable for development, testing, and -single-machine deployments. -""" - -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -from .skops_model_serializer import SkopsModelSerializer - -__all__ = ["SkopsModelSerializer"] diff --git a/packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py b/packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py deleted file mode 100644 index 6296d3abb..000000000 --- a/packages/openstef-models/src/openstef_models/integrations/skops/skops_model_serializer.py +++ /dev/null @@ -1,105 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""Local model storage implementation using joblib serialization. - -Provides file-based persistence for ForecastingModel instances using joblib's -pickle-based serialization. This storage backend is suitable for development, -testing, and single-machine deployments where models need to be persisted -to the local filesystem. -""" - -from typing import BinaryIO, ClassVar, override - -from openstef_core.exceptions import MissingExtraError -from openstef_models.mixins.model_serializer import ModelSerializer - -try: - from skops.io import dump, get_untrusted_types, load -except ImportError as e: - raise MissingExtraError("joblib", package="openstef-models") from e - - -class SkopsModelSerializer(ModelSerializer): - """File-based model storage using joblib serialization. - - Provides persistent storage for ForecastingModel instances on the local - filesystem. Models are serialized using joblib and stored as pickle files - in the specified directory. - - This storage implementation is suitable for development, testing, and - single-machine deployments where simple file-based persistence is sufficient. - - Note: - joblib.dump() and joblib.load() are based on the Python pickle serialization model, - which means that arbitrary Python code can be executed when loading a serialized object - with joblib.load(). - - joblib.load() should therefore never be used to load objects from an untrusted source - or otherwise you will introduce a security vulnerability in your program. - - Invariants: - - Models are stored as .pkl files in the configured storage directory - - Model files use the pattern: {model_id}.pkl - - Storage directory is created automatically if it doesn't exist - - Load operations fail with ModelNotFoundError if model file doesn't exist - - Example: - Basic usage with model persistence: - - >>> from pathlib import Path - >>> from openstef_models.models.forecasting_model import ForecastingModel - >>> storage = LocalModelStorage(storage_dir=Path("./models")) # doctest: +SKIP - >>> storage.save_model("my_model", my_forecasting_model) # doctest: +SKIP - >>> loaded_model = storage.load_model("my_model") # doctest: +SKIP - """ - - extension: ClassVar[str] = ".skops" - - @override - def serialize(self, model: object, file: BinaryIO) -> None: - dump(model, file) # type: ignore[reportUnknownMemberType] - - @staticmethod - def _get_stateful_types() -> set[str]: - return { - "tests.unit.integrations.skops.test_skops_model_serializer.SimpleSerializableModel", - "openstef_core.mixins.predictor.BatchPredictor", - "openstef_models.models.forecasting.forecaster.Forecaster", - "openstef_models.models.forecasting.xgboost_forecaster.XGBoostForecaster", - "openstef_models.models.component_splitting_model.ComponentSplittingModel", - "openstef_core.mixins.transform.TransformPipeline", - "openstef_core.mixins.transform.TransformPipeline[EnergyComponentDataset]", - "openstef_core.mixins.transform.TransformPipeline[TimeSeriesDataset]", - "openstef_models.models.forecasting.lgbm_forecaster.LGBMForecaster", - "openstef_models.models.component_splitting.component_splitter.ComponentSplitter", - "openstef_models.models.forecasting_model.ForecastingModel", - "openstef_core.mixins.transform.Transform", - "openstef_core.mixins.transform.TransformPipeline[ForecastDataset]", - "openstef_core.mixins.predictor.Predictor", - "openstef_models.models.forecasting.lgbmlinear_forecaster.LGBMLinearForecaster", - } - - @override - def deserialize(self, file: BinaryIO) -> object: - """Load a model's state from a binary file and restore it. - - Returns: - The restored model instance. - - Raises: - ValueError: If no safe types are found in the serialized model. - """ - safe_types = self._get_stateful_types() - - # Weak security measure that checks a safe class is present. - # Can be improved to ensure no unsafe classes are present. - model_types: set[str] = set(get_untrusted_types(file=file)) # type: ignore - - if len(safe_types.intersection(model_types)) == 0: - raise ValueError("Deserialization aborted: No safe types found in the serialized model.") - - return load(file, trusted=list(model_types)) # type: ignore[reportUnknownMemberType] - - -__all__ = ["SkopsModelSerializer"] diff --git a/packages/openstef-models/src/openstef_models/mixins/model_serializer.py b/packages/openstef-models/src/openstef_models/mixins/model_serializer.py index 31bf67cff..4f6f15452 100644 --- a/packages/openstef-models/src/openstef_models/mixins/model_serializer.py +++ b/packages/openstef-models/src/openstef_models/mixins/model_serializer.py @@ -34,7 +34,6 @@ class ModelSerializer(BaseConfig, ABC): See Also: JoblibModelSerializer: Concrete implementation using joblib. - SkopsModelSerializer: Concrete implementation using skops. """ extension: ClassVar[str] diff --git a/packages/openstef-models/tests/unit/integrations/skops/__init__.py b/packages/openstef-models/tests/unit/integrations/skops/__init__.py deleted file mode 100644 index 63d543f53..000000000 --- a/packages/openstef-models/tests/unit/integrations/skops/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -__all__ = [] diff --git a/packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py b/packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py deleted file mode 100644 index 8d4bb9eb7..000000000 --- a/packages/openstef-models/tests/unit/integrations/skops/test_skops_model_serializer.py +++ /dev/null @@ -1,72 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -from __future__ import annotations - -from io import BytesIO -from typing import TYPE_CHECKING - -import pytest - -from openstef_core.mixins import Stateful -from openstef_core.types import LeadTime, Q -from openstef_models.integrations.skops.skops_model_serializer import SkopsModelSerializer -from openstef_models.models.forecasting.forecaster import ForecasterConfig -from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster -from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster -from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster - -if TYPE_CHECKING: - from openstef_models.models.forecasting.forecaster import Forecaster - - -class SimpleSerializableModel(Stateful): - """A simple model class that can be pickled for testing.""" - - def __init__(self) -> None: - self.target_column = "load" - self.is_fitted = True - - -def test_skops_model_serializer__roundtrip__preserves_model_integrity(): - """Test complete serialize/deserialize roundtrip preserves model state.""" - # Arrange - buffer = BytesIO() - serializer = SkopsModelSerializer() - model = SimpleSerializableModel() - - # Act - Serialize then deserialize - serializer.serialize(model, buffer) - buffer.seek(0) - restored_model = serializer.deserialize(buffer) - - # Assert - Model state should be identical - assert isinstance(restored_model, SimpleSerializableModel) - assert restored_model.target_column == model.target_column - assert restored_model.is_fitted == model.is_fitted - - -@pytest.mark.parametrize( - "forecaster_class", - [ - XGBoostForecaster, - LGBMForecaster, - LGBMLinearForecaster, - ], -) -def test_skops_works_with_different_forecasters(forecaster_class: type[Forecaster]): - buffer = BytesIO() - serializer = SkopsModelSerializer() - - config: ForecasterConfig = forecaster_class.Config(horizons=[LeadTime.from_string("PT12H")], quantiles=[Q(0.5)]) # type: ignore - assert isinstance(config, ForecasterConfig) - forecaster = forecaster_class(config=config) - - # Act - Serialize then deserialize - serializer.serialize(forecaster, buffer) - buffer.seek(0) - restored_model = serializer.deserialize(buffer) - - # Assert - Model state should be identical - assert isinstance(restored_model, forecaster.__class__) diff --git a/uv.lock b/uv.lock index dde23531c..e388a8f87 100644 --- a/uv.lock +++ b/uv.lock @@ -2016,15 +2016,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/65/bd/606e2f7eb0da042bffd8711a7427f7a28ca501aa6b1e3367ae3c7d4dc489/licensecheck-2025.1.0-py3-none-any.whl", hash = "sha256:eb20131cd8f877e5396958fd7b00cdb2225436c37a59dba4cf36d36079133a17", size = 26681, upload-time = "2025-03-26T22:58:03.145Z" }, ] -[[package]] -name = "logistro" -version = "2.0.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/08/90/bfd7a6fab22bdfafe48ed3c4831713cb77b4779d18ade5e248d5dbc0ca22/logistro-2.0.1.tar.gz", hash = "sha256:8446affc82bab2577eb02bfcbcae196ae03129287557287b6a070f70c1985047", size = 8398, upload-time = "2025-11-01T02:41:18.81Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl", hash = "sha256:06ffa127b9fb4ac8b1972ae6b2a9d7fde57598bf5939cd708f43ec5bba2d31eb", size = 8555, upload-time = "2025-11-01T02:41:17.587Z" }, -] - [[package]] name = "lightgbm" version = "4.6.0" @@ -2042,6 +2033,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5e/23/f8b28ca248bb629b9e08f877dd2965d1994e1674a03d67cd10c5246da248/lightgbm-4.6.0-py3-none-win_amd64.whl", hash = "sha256:37089ee95664b6550a7189d887dbf098e3eadab03537e411f52c63c121e3ba4b", size = 1451509, upload-time = "2025-02-15T04:03:01.515Z" }, ] +[[package]] +name = "logistro" +version = "2.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/08/90/bfd7a6fab22bdfafe48ed3c4831713cb77b4779d18ade5e248d5dbc0ca22/logistro-2.0.1.tar.gz", hash = "sha256:8446affc82bab2577eb02bfcbcae196ae03129287557287b6a070f70c1985047", size = 8398, upload-time = "2025-11-01T02:41:18.81Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/6aa79ba3570bddd1bf7e951c6123f806751e58e8cce736bad77b2cf348d7/logistro-2.0.1-py3-none-any.whl", hash = "sha256:06ffa127b9fb4ac8b1972ae6b2a9d7fde57598bf5939cd708f43ec5bba2d31eb", size = 8555, upload-time = "2025-11-01T02:41:17.587Z" }, +] + [[package]] name = "loguru" version = "0.7.3" @@ -2852,23 +2852,6 @@ requires-dist = [ { name = "sphinx-pyproject", specifier = ">=0.3.0" }, ] -[[package]] -name = "openstef-meta" -version = "0.0.0" -source = { editable = "packages/openstef-meta" } -dependencies = [ - { name = "openstef-beam" }, - { name = "openstef-core" }, - { name = "openstef-models" }, -] - -[package.metadata] -requires-dist = [ - { name = "openstef-beam", editable = "packages/openstef-beam" }, - { name = "openstef-core", editable = "packages/openstef-core" }, - { name = "openstef-models", editable = "packages/openstef-models" }, -] - [[package]] name = "openstef-examples" version = "0.0.0" @@ -2899,6 +2882,23 @@ requires-dist = [ ] provides-extras = ["tutorials"] +[[package]] +name = "openstef-meta" +version = "0.0.0" +source = { editable = "packages/openstef-meta" } +dependencies = [ + { name = "openstef-beam" }, + { name = "openstef-core" }, + { name = "openstef-models" }, +] + +[package.metadata] +requires-dist = [ + { name = "openstef-beam", editable = "packages/openstef-beam" }, + { name = "openstef-core", editable = "packages/openstef-core" }, + { name = "openstef-models", editable = "packages/openstef-models" }, +] + [[package]] name = "openstef-models" version = "0.0.0" @@ -2913,7 +2913,6 @@ dependencies = [ { name = "pycountry" }, { name = "scikit-learn" }, { name = "scipy" }, - { name = "skops" }, ] [package.optional-dependencies] @@ -2936,7 +2935,6 @@ requires-dist = [ { name = "pycountry", specifier = ">=24.6.1" }, { name = "scikit-learn", specifier = ">=1.7.1,<1.8" }, { name = "scipy", specifier = ">=1.16.3,<2" }, - { name = "skops", specifier = ">=0.13" }, { name = "xgboost", marker = "sys_platform == 'darwin' and extra == 'xgb-cpu'", specifier = ">=3,<4" }, { name = "xgboost", marker = "extra == 'xgb-gpu'", specifier = ">=3,<4" }, { name = "xgboost-cpu", marker = "(sys_platform == 'linux' and extra == 'xgb-cpu') or (sys_platform == 'win32' and extra == 'xgb-cpu')", specifier = ">=3,<4" }, @@ -3314,18 +3312,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b8/db/14bafcb4af2139e046d03fd00dea7873e48eafe18b7d2797e73d6681f210/prometheus_client-0.23.1-py3-none-any.whl", hash = "sha256:dd1913e6e76b59cfe44e7a4b83e01afc9873c1bdfd2ed8739f1e76aeca115f99", size = 61145, upload-time = "2025-09-18T20:47:23.875Z" }, ] -[[package]] -name = "prettytable" -version = "3.17.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "wcwidth" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/79/45/b0847d88d6cfeb4413566738c8bbf1e1995fad3d42515327ff32cc1eb578/prettytable-3.17.0.tar.gz", hash = "sha256:59f2590776527f3c9e8cf9fe7b66dd215837cca96a9c39567414cbc632e8ddb0", size = 67892, upload-time = "2025-11-14T17:33:20.212Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ee/8c/83087ebc47ab0396ce092363001fa37c17153119ee282700c0713a195853/prettytable-3.17.0-py3-none-any.whl", hash = "sha256:aad69b294ddbe3e1f95ef8886a060ed1666a0b83018bbf56295f6f226c43d287", size = 34433, upload-time = "2025-11-14T17:33:19.093Z" }, -] - [[package]] name = "prompt-toolkit" version = "3.0.52" @@ -3740,6 +3726,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e9/0b/2bdac7f9f7cddcd8096af44338548f1b7d5b797e3bcee27831c3752c9168/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97b6ba9923975667fab130c23bfd8ead66c4cdea4b66ae238de860a06afbb108", size = 1539351, upload-time = "2025-11-05T12:53:37.836Z" }, { url = "https://files.pythonhosted.org/packages/06/fc/48b4932570097a08ed6abc3a7455aacf9a15271ff0099c33d48e7f745eaa/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b16ce0874ef2aee219a2c0dacd7c0ce374562c19937bd9c767093ade91e5e452", size = 1429957, upload-time = "2025-11-05T12:53:39.382Z" }, { url = "https://files.pythonhosted.org/packages/f7/8d/52f52e039e5e1cfb33cf0f79651edd4d8ff7f6a83d1fb5dddf19bca9993a/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2daf29e4958c310c27ce7750741ef60f79b2f4164df26b1f2bdd063f2beddf4c", size = 1375776, upload-time = "2025-11-05T12:53:40.659Z" }, + { url = "https://files.pythonhosted.org/packages/58/7b/253e8c1d6bef9b5d041f8f104ae5ca70afc6a8bb23042b4272d30a67a2f9/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:e5da4b06eea58f89a93d7ae4426e31261d8c571dc5f71d8e13b9c7cdd8e8b253", size = 1317349, upload-time = "2026-01-07T23:30:20.34Z" }, + { url = "https://files.pythonhosted.org/packages/fa/40/8b876c4244fd8cace8e85afc9c30b806f940dde713d81d15318f98179b39/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_24_armv7l.whl", hash = "sha256:a28c3425785fb28cee1316fdf860eab403274626f2cc0b6df14ec7dcce0f66d2", size = 1271918, upload-time = "2026-01-07T23:30:21.581Z" }, + { url = "https://files.pythonhosted.org/packages/69/26/9c7ac96a390ec246892e427be7937400e9f8fe4e1f1cfa412bb919175f90/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_24_i686.whl", hash = "sha256:584e957a951330f777e632ccd12e87d6e8b2a7d113d4a2abbe1f84b8d85fce06", size = 1430842, upload-time = "2026-01-07T23:30:23.589Z" }, + { url = "https://files.pythonhosted.org/packages/ce/cf/a619fbb8b19cafe78b471e69549e306c6f42e1a1296f12899a12403e474a/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_24_ppc64le.whl", hash = "sha256:ad8d5c0825a37ebe3414b0590336700600ee718bf6a461fbd1be01dba68e7a99", size = 1573886, upload-time = "2026-01-07T23:30:25.191Z" }, + { url = "https://files.pythonhosted.org/packages/ab/19/da02ed5b31c71d617273459a34e9401c4303c1c2eb00487faa0b55f0c64c/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_24_s390x.whl", hash = "sha256:a5fd2882b6ae5f0b1651e9e30cb2ba1ad07d99359dc1d056999766ec20706400", size = 1447873, upload-time = "2026-01-07T23:30:27.691Z" }, + { url = "https://files.pythonhosted.org/packages/d4/2a/7eb3f9c1848d72186a9ace63429375e30544f69795b05be0998523e31ff0/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:56abdaa2afafe86436302220ea90bde025dcec066e348e7f9001a7663dc398f8", size = 1416637, upload-time = "2026-01-07T23:30:29.388Z" }, { url = "https://files.pythonhosted.org/packages/3b/24/bab927c42d88befbb063b229b44c9ce9b8a894f650ca14348969858878f5/pyproject_fmt-2.11.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:44b1edad216b33817d2651a15fb2793807fd7c9cfff1ce66d565c4885b89640e", size = 1379396, upload-time = "2025-11-05T12:53:41.857Z" }, { url = "https://files.pythonhosted.org/packages/09/fe/b98c2156775067e079ca8f2badbe93a5de431ccc061435534b76f11abc73/pyproject_fmt-2.11.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:08ccf565172179fc7f35a90f4541f68abcdbef7e7a4ea35fcead44f8cabe3e3a", size = 1506485, upload-time = "2025-11-05T12:53:43.108Z" }, { url = "https://files.pythonhosted.org/packages/8e/2f/bf0df9df04a1376d6d1dad6fc49eb41ffafe0c3e63565b2cde8b67a49886/pyproject_fmt-2.11.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:27a9af1fc8d2173deb7a0bbb8c368a585e7817bcbba6acf00922b73c76c8ee23", size = 1546050, upload-time = "2025-11-05T12:53:44.491Z" }, @@ -4549,22 +4541,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] -[[package]] -name = "skops" -version = "0.13.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "numpy" }, - { name = "packaging" }, - { name = "prettytable" }, - { name = "scikit-learn" }, - { name = "scipy" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/b5/0c/5ec987633e077dd0076178ea6ade2d6e57780b34afea0b497fb507d7a1ed/skops-0.13.0.tar.gz", hash = "sha256:66949fd3c95cbb5c80270fbe40293c0fe1e46cb4a921860e42584dd9c20ebeb1", size = 581312, upload-time = "2025-08-06T09:48:14.916Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/04/e8/6a2b2030f0689f894432b9c2f0357f2f3286b2a00474827e04b8fe9eea13/skops-0.13.0-py3-none-any.whl", hash = "sha256:55e2cccb18c86f5916e4cfe5acf55ed7b0eecddf08a151906414c092fa5926dc", size = 131200, upload-time = "2025-08-06T09:48:13.356Z" }, -] - [[package]] name = "smmap" version = "5.0.2" From 17f25285d0ad4b19f4af89cc1f6cccc3d441d76e Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Wed, 18 Feb 2026 11:40:16 +0100 Subject: [PATCH 081/104] Move rules combiner code to separate branch Signed-off-by: Marnix van Lieshout --- .../models/forecast_combiners/__init__.py | 3 - .../forecast_combiners/rules_combiner.py | 176 ------------------ .../presets/forecasting_workflow.py | 3 - .../src/openstef_meta/utils/__init__.py | 4 - .../src/openstef_meta/utils/decision_tree.py | 144 -------------- .../forecast_combiners/test_rules_combiner.py | 62 ------ .../tests/unit/utils/test_decision_tree.py | 45 ----- 7 files changed, 437 deletions(-) delete mode 100644 packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py delete mode 100644 packages/openstef-meta/src/openstef_meta/utils/decision_tree.py delete mode 100644 packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py delete mode 100644 packages/openstef-meta/tests/unit/utils/test_decision_tree.py diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py index 56a4cadff..90c9f3509 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/__init__.py @@ -13,7 +13,6 @@ WeightsCombinerConfig, XGBCombinerHyperParams, ) -from .rules_combiner import RulesCombiner, RulesCombinerConfig from .stacking_combiner import StackingCombiner, StackingCombinerConfig __all__ = [ @@ -22,8 +21,6 @@ "LGBMCombinerHyperParams", "LogisticCombinerHyperParams", "RFCombinerHyperParams", - "RulesCombiner", - "RulesCombinerConfig", "StackingCombiner", "StackingCombinerConfig", "WeightsCombiner", diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py deleted file mode 100644 index e9a3b0364..000000000 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/rules_combiner.py +++ /dev/null @@ -1,176 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""Rules-based Meta Forecaster Module.""" - -import logging -from typing import cast, override - -import pandas as pd -from pydantic import Field, field_validator - -from openstef_core.datasets import ForecastDataset, ForecastInputDataset -from openstef_core.mixins import HyperParams -from openstef_core.types import LeadTime, Quantile -from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig -from openstef_meta.utils.datasets import EnsembleForecastDataset -from openstef_meta.utils.decision_tree import Decision, DecisionTree - -logger = logging.getLogger(__name__) - - -class RulesLearnerHyperParams(HyperParams): - """HyperParams for Stacking Final Learner.""" - - decision_tree: DecisionTree = Field( - description="Decision tree defining the rules for the final learner.", - default=DecisionTree( - nodes=[Decision(idx=0, decision="LGBMForecaster")], - outcomes={"LGBMForecaster"}, - ), - ) - - -class RulesCombinerConfig(ForecastCombinerConfig): - """Configuration for Rules-based Forecast Combiner.""" - - hyperparams: HyperParams = Field( - description="Hyperparameters for the Rules-based final learner.", - default=RulesLearnerHyperParams(), - ) - - quantiles: list[Quantile] = Field( - default=[Quantile(0.5)], - description=( - "Probability levels for uncertainty estimation. Each quantile represents a confidence level " - "(e.g., 0.1 = 10th percentile, 0.5 = median, 0.9 = 90th percentile). " - "Models must generate predictions for all specified quantiles." - ), - min_length=1, - ) - - horizons: list[LeadTime] = Field( - default=..., - description=( - "Lead times for predictions, accounting for data availability and versioning cutoffs. " - "Each horizon defines how far ahead the model should predict." - ), - min_length=1, - ) - - @field_validator("hyperparams", mode="after") - @staticmethod - def _validate_hyperparams(v: HyperParams) -> HyperParams: - if not isinstance(v, RulesLearnerHyperParams): - raise TypeError("hyperparams must be an instance of RulesLearnerHyperParams.") - return v - - -class RulesCombiner(ForecastCombiner): - """Combines base Forecaster predictions per quantile into final predictions using hard-coded rules.""" - - Config = RulesCombinerConfig - - def __init__(self, config: RulesCombinerConfig) -> None: - """Initialize the Rules Learner. - - Args: - config: Configuration for the Rules Combiner. - """ - hyperparams = cast(RulesLearnerHyperParams, config.hyperparams) - self.tree = hyperparams.decision_tree - self.quantiles = config.quantiles - self.config = config - - @override - def fit( - self, - data: EnsembleForecastDataset, - data_val: EnsembleForecastDataset | None = None, - additional_features: ForecastInputDataset | None = None, - ) -> None: - # No fitting needed for rule-based final learner - # Check that additional features are provided - if additional_features is None: - raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") - - def _predict_tree(self, data: pd.DataFrame, columns: pd.Index) -> pd.DataFrame: - """Predict using the decision tree rules. - - Args: - data: DataFrame containing the additional features. - columns: Expected columns for the output DataFrame. - - Returns: - DataFrame with predictions for each quantile. - """ - predictions = data.apply(self.tree.get_decision, axis=1) - - return pd.get_dummies(predictions).reindex(columns=columns) - - @override - def predict( - self, - data: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None = None, - ) -> ForecastDataset: - if additional_features is None: - raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") - - decisions = self._predict_tree( - additional_features.data, - columns=data.get_base_predictions_for_quantile(quantile=self.quantiles[0]).data.columns, - ) - - # Generate predictions - predictions: list[pd.DataFrame] = [] - for q in self.quantiles: - dataset = data.get_base_predictions_for_quantile(quantile=q) - preds = dataset.input_data().multiply(decisions).sum(axis=1) - - predictions.append(preds.to_frame(name=Quantile(q).format())) - - # Concatenate predictions along columns to form a DataFrame with quantile columns - df = pd.concat(predictions, axis=1) - - return ForecastDataset( - data=df, - sample_interval=data.sample_interval, - ) - - @override - def predict_contributions( - self, - data: EnsembleForecastDataset, - additional_features: ForecastInputDataset | None = None, - ) -> pd.DataFrame: - if additional_features is None: - raise ValueError("Additional features must be provided for RulesForecastCombiner prediction.") - - decisions = self._predict_tree( - additional_features.data, - columns=data.get_base_predictions_for_quantile(quantile=self.quantiles[0]).data.columns, - ) - - # Generate predictions - predictions: list[pd.DataFrame] = [] - for q in self.quantiles: - dataset = data.get_base_predictions_for_quantile(quantile=q) - preds = dataset.input_data().multiply(decisions).sum(axis=1) - - predictions.append(preds.to_frame(name=Quantile(q).format())) - - # Concatenate predictions along columns to form a DataFrame with quantile columns - return pd.concat(predictions, axis=1) - - @property - def is_fitted(self) -> bool: - """Check the Rules Final Learner is fitted.""" - return True - - -__all__ = [ - "RulesCombiner", - "RulesCombinerConfig", - "RulesLearnerHyperParams", -] diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index df42c61da..b3ab224ec 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -31,7 +31,6 @@ WeightsCombiner, XGBCombinerHyperParams, ) -from openstef_meta.models.forecast_combiners.rules_combiner import RulesCombiner from openstef_meta.models.forecast_combiners.stacking_combiner import ( StackingCombiner, ) @@ -496,8 +495,6 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin quantiles=config.quantiles, ) ) - case ("rules", _): - combiner = RulesCombiner(config=RulesCombiner.Config(horizons=config.horizons, quantiles=config.quantiles)) case _: msg = f"Unsupported ensemble and combiner combination: {config.ensemble_type}, {config.combiner_model}" raise ValueError(msg) diff --git a/packages/openstef-meta/src/openstef_meta/utils/__init__.py b/packages/openstef-meta/src/openstef_meta/utils/__init__.py index a6b9e93a4..616ee56d6 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/utils/__init__.py @@ -4,12 +4,8 @@ """Utility functions and classes for OpenSTEF Meta.""" -from .decision_tree import Decision, DecisionTree, Rule from .pinball_errors import calculate_pinball_errors __all__ = [ - "Decision", - "DecisionTree", - "Rule", "calculate_pinball_errors", ] diff --git a/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py b/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py deleted file mode 100644 index d8ee1fd82..000000000 --- a/packages/openstef-meta/src/openstef_meta/utils/decision_tree.py +++ /dev/null @@ -1,144 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""A simple decision tree implementation for making decisions based on feature rules.""" - -from typing import Literal - -import pandas as pd -from pydantic import BaseModel, Field, model_validator - - -class Node(BaseModel): - """A node in the decision tree, either a rule or a decision.""" - - idx: int = Field( - description="Index of the rule in the decision tree.", - ) - - -class Rule(Node): - """A single rule in the decision tree.""" - - idx: int = Field( - description="Index of the decision in the decision tree.", - ) - - rule_type: Literal["greater_than", "less_than"] = Field( - ..., - description="Type of the rule to apply.", - ) - feature_name: str = Field( - ..., - description="Name of the feature to which the rule applies.", - ) - - threshold: float | int = Field( - ..., - description="Threshold value for the rule.", - ) - - next_true: int = Field( - ..., - description="Index of the next rule if the condition is true.", - ) - - next_false: int = Field( - ..., - description="Index of the next rule if the condition is false.", - ) - - -class Decision(Node): - """A leaf decision in the decision tree.""" - - idx: int = Field( - description="Index of the decision in the decision tree.", - ) - - decision: str = Field( - ..., - description="The prediction value at this leaf.", - ) - - -class DecisionTree(BaseModel): - """A simple decision tree defined by a list of rules.""" - - nodes: list[Node] = Field( - ..., - description="List of rules that define the decision tree.", - ) - - outcomes: set[str] = Field( - ..., - description="Set of possible outcomes from the decision tree.", - ) - - @model_validator(mode="after") - def validate_tree_structure(self) -> "DecisionTree": - """Validate that the tree structure is correct. - - Raises: - ValueError: If tree is not built correctly. - - Returns: - The validated DecisionTree instance. - """ - node_idx = {node.idx for node in self.nodes} - if node_idx != set(range(len(self.nodes))): - raise ValueError("Rule indices must be consecutive starting from 0.") - - for node in self.nodes: - if isinstance(node, Rule): - if node.next_true not in node_idx: - msg = f"next_true index {node.next_true} not found in nodes." - raise ValueError(msg) - if node.next_false not in node_idx: - msg = f"next_false index {node.next_false} not found in nodes." - raise ValueError(msg) - if isinstance(node, Decision) and node.decision not in self.outcomes: - msg = f"Decision '{node.decision}' not in defined outcomes {self.outcomes}." - raise ValueError(msg) - - return self - - def get_decision(self, row: pd.Series) -> str: - """Get decision from the decision tree based on input features. - - Args: - row: Series containing feature values. - - Returns: - The decision outcome as a string. - - Raises: - ValueError: If the tree structure is invalid. - TypeError: If a node type is invalid. - """ - current_idx = 0 - while True: - current_node = self.nodes[current_idx] - if isinstance(current_node, Decision): - return current_node.decision - if isinstance(current_node, Rule): - feature_value = row[current_node.feature_name] - if current_node.rule_type == "greater_than": - if feature_value > current_node.threshold: - current_idx = current_node.next_true - else: - current_idx = current_node.next_false - elif current_node.rule_type == "less_than": - if feature_value < current_node.threshold: - current_idx = current_node.next_true - else: - current_idx = current_node.next_false - else: - msg = f"Invalid rule type '{current_node.rule_type}' at index {current_idx}." - raise ValueError(msg) - else: - msg = f"Invalid node type at index {current_idx}." - raise TypeError(msg) - - -__all__ = ["Decision", "DecisionTree", "Node", "Rule"] diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py deleted file mode 100644 index bcf95b42d..000000000 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_rules_combiner.py +++ /dev/null @@ -1,62 +0,0 @@ -# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# # -# # SPDX-License-Identifier: MPL-2.0 - -from datetime import timedelta - -import pytest - -from openstef_core.types import LeadTime, Q -from openstef_meta.models.forecast_combiners.rules_combiner import ( - RulesCombiner, - RulesCombinerConfig, -) -from openstef_meta.utils.datasets import EnsembleForecastDataset - - -@pytest.fixture -def config() -> RulesCombinerConfig: - """Fixture to create RulesCombinerConfig.""" - return RulesCombiner.Config( - quantiles=[Q(0.1), Q(0.5), Q(0.9)], - horizons=[LeadTime(timedelta(days=1))], - ) - - -@pytest.fixture -def forecaster(config: RulesCombinerConfig) -> RulesCombiner: - return RulesCombiner(config=config) - - -def test_initialization(forecaster: RulesCombiner): - assert isinstance(forecaster, RulesCombiner) - - -def test_quantile_weights_combiner__fit_predict( - ensemble_dataset: EnsembleForecastDataset, - config: RulesCombinerConfig, -): - """Test basic fit and predict workflow with comprehensive output validation.""" - # Arrange - expected_quantiles = config.quantiles - forecaster = RulesCombiner(config=config) - additional_features = ensemble_dataset.get_base_predictions_for_quantile(Q(0.5)) - additional_features.data = additional_features.data.drop(columns=additional_features.target_column) - additional_features.data.columns = ["feature1", "feature2"] - - # Act - forecaster.fit(ensemble_dataset, additional_features=additional_features) - result = forecaster.predict(ensemble_dataset, additional_features=additional_features) - - # Assert - # Basic functionality - assert forecaster.is_fitted, "Model should be fitted after calling fit()" - - # Check that necessary quantiles are present - expected_columns = [q.format() for q in expected_quantiles] - assert list(result.data.columns) == expected_columns, ( - f"Expected columns {expected_columns}, got {list(result.data.columns)}" - ) - - # Forecast data quality - assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" diff --git a/packages/openstef-meta/tests/unit/utils/test_decision_tree.py b/packages/openstef-meta/tests/unit/utils/test_decision_tree.py deleted file mode 100644 index f40bdb220..000000000 --- a/packages/openstef-meta/tests/unit/utils/test_decision_tree.py +++ /dev/null @@ -1,45 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -import pandas as pd -import pytest - -from openstef_meta.utils.decision_tree import Decision, DecisionTree, Node, Rule - - -@pytest.fixture -def sample_dataset() -> pd.DataFrame: - data = { - "feature_1": [1, 2, 3, 4, 5], - "feature_2": [10, 20, 30, 40, 50], - } - return pd.DataFrame(data) - - -@pytest.fixture -def simple_decision_tree() -> DecisionTree: - nodes: list[Node] = [ - Rule( - idx=0, - rule_type="less_than", - feature_name="feature_1", - threshold=3, - next_true=1, - next_false=2, - ), - Decision(idx=1, decision="Class_A"), - Decision(idx=2, decision="Class_B"), - ] - return DecisionTree(nodes=nodes, outcomes={"Class_A", "Class_B"}) - - -def test_decision_tree_prediction(sample_dataset: pd.DataFrame, simple_decision_tree: DecisionTree): - - decisions = sample_dataset.apply(simple_decision_tree.get_decision, axis=1) - - expected_decisions = pd.Series( - ["Class_A", "Class_A", "Class_B", "Class_B", "Class_B"], - ) - - pd.testing.assert_series_equal(decisions, expected_decisions) From ba8d99f1804ad9c7d74d97ed7eee9d5c5995c409 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Wed, 18 Feb 2026 13:11:52 +0100 Subject: [PATCH 082/104] Cleaning up Signed-off-by: Marnix van Lieshout --- .../forecast_combiners/forecast_combiner.py | 1 + .../src/openstef_meta/utils/pinball_errors.py | 2 +- .../test_ensemble_forecasting_model.py | 14 +-- .../test_forecast_combiner.py | 101 ++++++++++++++++++ .../test_learned_weights_combiner.py | 9 +- .../test_stacking_combiner.py | 9 +- .../forecasting/test_residual_forecaster.py | 6 +- .../models/test_ensemble_forecasting_model.py | 25 ++--- .../tests/unit/utils/test_datasets.py | 38 ++----- .../tests/unit/utils/test_pinball_errors.py | 96 +++++++++++++++++ .../forecasting/test_lgbm_forecaster.py | 9 +- .../forecasting/test_lgbmlinear_forecaster.py | 9 +- .../utils/test_multi_quantile_regressor.py | 13 ++- 13 files changed, 257 insertions(+), 75 deletions(-) create mode 100644 packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py create mode 100644 packages/openstef-meta/tests/unit/utils/test_pinball_errors.py diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index 44a8e885e..0d45d149e 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -96,6 +96,7 @@ def fit( """ raise NotImplementedError("Subclasses must implement the fit method.") + @abstractmethod def predict( self, data: EnsembleForecastDataset, diff --git a/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py b/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py index 08e1c7704..bff8419ef 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py +++ b/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py @@ -10,7 +10,7 @@ import numpy as np import pandas as pd - +# TODO: Replace by openstef-beam implementation? def calculate_pinball_errors(y_true: pd.Series, y_pred: pd.Series, quantile: float) -> pd.Series: """Calculate pinball loss for given true and predicted values. diff --git a/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py index 23835d6e7..2ddb8d542 100644 --- a/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py @@ -70,27 +70,27 @@ def create_models( return ensemble_model, base_models -def test_preprocessing( +def test_preprocessing( # TODO: Move this to unit/models/test_ensemble_forecasting_model.py? sample_timeseries_dataset: TimeSeriesDataset, create_models: tuple[EnsembleForecastingModel, dict[str, ForecastingModel]], ) -> None: - + # Arrange ensemble_model, base_models = create_models - ensemble_model.common_preprocessing.fit(data=sample_timeseries_dataset) - # Check all base models for name, model in base_models.items(): - # Ensemble model + # Act - Transform through ensemble pipeline common_ensemble = ensemble_model.common_preprocessing.transform( data=sample_timeseries_dataset.copy_with(sample_timeseries_dataset.data) ) ensemble_model.model_specific_preprocessing[name].fit(data=common_ensemble) transformed_ensemble = ensemble_model.model_specific_preprocessing[name].transform(data=common_ensemble) - # Base model + + # Act - Transform through base model pipeline model.preprocessing.fit(data=sample_timeseries_dataset) transformed_base = model.preprocessing.transform(data=sample_timeseries_dataset) - # Compare + + # Assert pd.testing.assert_frame_equal( transformed_ensemble.data, transformed_base.data, diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py new file mode 100644 index 000000000..923828c9a --- /dev/null +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py @@ -0,0 +1,101 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +from datetime import timedelta + +import pytest + +from openstef_core.mixins import HyperParams +from openstef_core.types import LeadTime, Q +from openstef_meta.models.forecast_combiners.forecast_combiner import ( + ForecastCombiner, + ForecastCombinerConfig, +) + + +@pytest.fixture +def horizons() -> list[LeadTime]: + return [LeadTime(timedelta(hours=6)), LeadTime(timedelta(hours=12)), LeadTime(timedelta(days=1))] + + +@pytest.fixture +def config(horizons: list[LeadTime]) -> ForecastCombinerConfig: + return ForecastCombinerConfig( + hyperparams=HyperParams(), + quantiles=[Q(0.1), Q(0.5), Q(0.9)], + horizons=horizons, + ) + + +def test_config_max_horizon(config: ForecastCombinerConfig, horizons: list[LeadTime]): + """max_horizon returns the largest configured horizon.""" + # Act + result = config.max_horizon + + # Assert + assert result == max(horizons) + + +def test_config_with_horizon_returns_new_instance(config: ForecastCombinerConfig): + """with_horizon creates a new config with the specified single horizon.""" + # Arrange + new_horizon = LeadTime(timedelta(hours=3)) + + # Act + new_config = config.with_horizon(new_horizon) + + # Assert + assert new_config.horizons == [new_horizon] + assert len(config.horizons) == 3 # Original is unchanged + + +def test_config_with_horizon_preserves_other_fields(config: ForecastCombinerConfig): + """with_horizon preserves quantiles and hyperparams.""" + # Act + new_config = config.with_horizon(LeadTime(timedelta(hours=1))) + + # Assert + assert new_config.quantiles == config.quantiles + assert new_config.hyperparams == config.hyperparams + + +def test_config_requires_at_least_one_quantile(): + """Config validation rejects empty quantiles list.""" + # Act & Assert + with pytest.raises(ValueError): + ForecastCombinerConfig( + hyperparams=HyperParams(), + quantiles=[], + horizons=[LeadTime(timedelta(hours=1))], + ) + + +def test_config_requires_at_least_one_horizon(): + """Config validation rejects empty horizons list.""" + # Act & Assert + with pytest.raises(ValueError): + ForecastCombinerConfig( + hyperparams=HyperParams(), + quantiles=[Q(0.5)], + horizons=[], + ) + + +def test_forecast_combiner_methods_raise_not_implemented(): + """ForecastCombiner base methods raise NotImplementedError.""" + # Arrange + combiner = ForecastCombiner() # type: ignore[abstract] + + # Act & Assert + with pytest.raises(NotImplementedError): + combiner.fit(data=None) # type: ignore[arg-type] + + with pytest.raises(NotImplementedError): + combiner.predict(data=None) # type: ignore[arg-type] + + with pytest.raises(NotImplementedError): + _ = combiner.is_fitted + + with pytest.raises(NotImplementedError): + combiner.predict_contributions(data=None) # type: ignore[arg-type] diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py index 556937121..17fef9043 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py @@ -1,6 +1,6 @@ -# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# # -# # SPDX-License-Identifier: MPL-2.0 +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 from datetime import timedelta @@ -46,7 +46,10 @@ def config(classifier: str) -> WeightsCombinerConfig: def test_initialization(config: WeightsCombinerConfig): + # Act forecaster = WeightsCombiner(config) + + # Assert assert forecaster.is_fitted is False assert len(forecaster.models) == len(config.quantiles) assert forecaster.quantiles == config.quantiles diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py index a506abf34..213b0aeb6 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py @@ -1,6 +1,6 @@ -# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# # -# # SPDX-License-Identifier: MPL-2.0 +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 from datetime import timedelta @@ -41,7 +41,10 @@ def config(regressor: str) -> StackingCombinerConfig: def test_initialization(config: StackingCombinerConfig): + # Act forecaster = StackingCombiner(config) + + # Assert assert forecaster.is_fitted is False assert len(forecaster.models) == len(config.quantiles) assert forecaster.quantiles == config.quantiles diff --git a/packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py b/packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py index 0f319552e..cf2cfef0d 100644 --- a/packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py +++ b/packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py @@ -1,6 +1,6 @@ -# # SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# # -# # SPDX-License-Identifier: MPL-2.0 +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 from datetime import timedelta diff --git a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py index 84f14cef7..b44fee5c7 100644 --- a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py @@ -146,8 +146,9 @@ def model() -> EnsembleForecastingModel: def test_forecasting_model__init__uses_defaults(model: EnsembleForecastingModel): """Test initialization uses default preprocessing and postprocessing when not provided.""" + # Arrange & Act - model created by fixture - # Assert - Check that components are assigned correctly + # Assert assert model.common_preprocessing is not None assert model.postprocessing is not None assert model.target_column == "load" # Default value @@ -156,26 +157,24 @@ def test_forecasting_model__init__uses_defaults(model: EnsembleForecastingModel) def test_forecasting_model__fit(sample_timeseries_dataset: TimeSeriesDataset, model: EnsembleForecastingModel): """Test that fit correctly orchestrates preprocessing and forecaster calls, and returns metrics.""" - # Act result = model.fit(data=sample_timeseries_dataset) - # Assert - Model is fitted and returns metrics + # Assert assert model.is_fitted assert result is not None def test_forecasting_model__predict(sample_timeseries_dataset: TimeSeriesDataset, model: EnsembleForecastingModel): """Test that predict correctly orchestrates preprocessing and forecaster calls.""" - - # Fit the model first + # Arrange model.fit(data=sample_timeseries_dataset) forecast_start = datetime.fromisoformat("2025-01-01T12:00:00") # Act result = model.predict(data=sample_timeseries_dataset, forecast_start=forecast_start) - # Assert - Prediction returns a forecast dataset with expected properties + # Assert assert isinstance(result, ForecastDataset) assert result.sample_interval == sample_timeseries_dataset.sample_interval assert result.quantiles == [Q(0.3), Q(0.5), Q(0.7)] @@ -188,7 +187,6 @@ def test_forecasting_model__predict__raises_error_when_not_fitted( sample_timeseries_dataset: TimeSeriesDataset, model: EnsembleForecastingModel ): """Test predict raises NotFittedError when model is not fitted.""" - # Act & Assert with pytest.raises(NotFittedError): model.predict(data=sample_timeseries_dataset) @@ -198,16 +196,15 @@ def test_forecasting_model__score__returns_metrics( sample_timeseries_dataset: TimeSeriesDataset, model: EnsembleForecastingModel ): """Test that score evaluates model and returns metrics.""" - + # Arrange model.fit(data=sample_timeseries_dataset) # Act metrics = model.score(data=sample_timeseries_dataset) - # Assert - Metrics are calculated for the median quantile + # Assert assert metrics.metrics is not None assert all(x in metrics.metrics for x in [Q(0.3), Q(0.5), Q(0.7)]) - # R2 metric should be present (default evaluation metric) assert "R2" in metrics.metrics[Q(0.5)] @@ -217,15 +214,13 @@ def test_forecasting_model__pickle_roundtrip(): This verifies that the entire forecasting pipeline, including transforms and forecaster, can be serialized and deserialized while maintaining functionality. """ - # Arrange - create synthetic dataset + # Arrange dataset = create_synthetic_forecasting_dataset( length=timedelta(days=30), sample_interval=timedelta(hours=1), random_seed=42, ) - # Create forecasting model with preprocessing and postprocessing - # Arrange horizons = [LeadTime(timedelta(hours=1))] quantiles = [Q(0.3), Q(0.5), Q(0.7)] config = ForecasterConfig(quantiles=quantiles, horizons=horizons) @@ -264,11 +259,11 @@ def test_forecasting_model__pickle_roundtrip(): # Get predictions from original model expected_predictions = original_model.predict(data=dataset) - # Act - pickle and unpickle the model + # Act pickled = pickle.dumps(original_model) restored_model = pickle.loads(pickled) # noqa: S301 - Controlled test - # Assert - verify the restored model is the correct type + # Assert assert isinstance(restored_model, EnsembleForecastingModel) assert restored_model.is_fitted assert restored_model.target_column == original_model.target_column diff --git a/packages/openstef-meta/tests/unit/utils/test_datasets.py b/packages/openstef-meta/tests/unit/utils/test_datasets.py index 1002bf5fd..7e77d39e5 100644 --- a/packages/openstef-meta/tests/unit/utils/test_datasets.py +++ b/packages/openstef-meta/tests/unit/utils/test_datasets.py @@ -9,40 +9,11 @@ import pandas as pd import pytest -from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset +from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset from openstef_core.types import Quantile from openstef_meta.utils.datasets import EnsembleForecastDataset -@pytest.fixture -def simple_dataset() -> TimeSeriesDataset: - return TimeSeriesDataset( - data=pd.DataFrame( - data={ - "available_at": pd.to_datetime([ - "2023-01-01T09:50:00", # lead time = 10:00 - 09:50 = +10min - "2023-01-01T10:55:00", # lead time = 11:00 - 10:55 = +5min - "2023-01-01T12:10:00", # lead time = 12:00 - 12:10 = -10min - "2023-01-01T13:20:00", # lead time = 13:00 - 13:20 = -20min - "2023-01-01T14:15:00", # lead time = 14:00 - 14:15 = -15min - "2023-01-01T14:30:00", # lead time = 14:00 - 14:30 = -30min - ]), - "value1": [10, 20, 30, 40, 50, 55], # 55 should override 50 for 14:00 - }, - index=pd.to_datetime([ - "2023-01-01T10:00:00", - "2023-01-01T11:00:00", - "2023-01-01T12:00:00", - "2023-01-01T13:00:00", - # Duplicate timestamp with different availability - "2023-01-01T14:00:00", - "2023-01-01T14:00:00", - ]), - ), - sample_interval=timedelta(hours=1), - ) - - @pytest.fixture def forecast_dataset_factory() -> Callable[[], ForecastDataset]: def _make() -> ForecastDataset: @@ -93,7 +64,7 @@ def ensemble_dataset(base_predictions: dict[str, ForecastDataset]) -> EnsembleFo def test_from_ensemble_output(ensemble_dataset: EnsembleForecastDataset): - + # Assert assert isinstance(ensemble_dataset, EnsembleForecastDataset) assert ensemble_dataset.data.shape == (3, 7) # 3 timestamps, 2 learners * 3 quantiles + target assert set(ensemble_dataset.forecaster_names) == {"model_1", "model_2"} @@ -101,17 +72,20 @@ def test_from_ensemble_output(ensemble_dataset: EnsembleForecastDataset): def test_get_base_predictions_for_quantile(ensemble_dataset: EnsembleForecastDataset): - + # Act dataset = ensemble_dataset.get_base_predictions_for_quantile(Quantile(0.5)) + # Assert assert isinstance(dataset, ForecastInputDataset) assert dataset.data.shape == (3, 3) # 3 timestamps, 2 learners * 1 quantiles + target def test_get_best_forecaster_labels(ensemble_dataset: EnsembleForecastDataset): """Test get_best_forecaster_labels.""" + # Act dataset = ensemble_dataset.get_best_forecaster_labels(Quantile(0.5)) + # Assert assert isinstance(dataset, ForecastInputDataset) assert dataset.data.shape == (3, 3) # 3 timestamps, 2 learners * 1 quantiles + target assert all(dataset.target_series.apply(lambda x: x in {"model_1", "model_2"})) # type: ignore diff --git a/packages/openstef-meta/tests/unit/utils/test_pinball_errors.py b/packages/openstef-meta/tests/unit/utils/test_pinball_errors.py new file mode 100644 index 000000000..e6ce6aab8 --- /dev/null +++ b/packages/openstef-meta/tests/unit/utils/test_pinball_errors.py @@ -0,0 +1,96 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +import numpy as np +import pandas as pd +import pytest + +from openstef_meta.utils.pinball_errors import calculate_pinball_errors + + +@pytest.fixture +def index() -> pd.DatetimeIndex: + return pd.to_datetime(["2023-01-01", "2023-01-02", "2023-01-03", "2023-01-04"]) + + +@pytest.fixture +def y_true(index: pd.DatetimeIndex) -> pd.Series: + return pd.Series([10.0, 20.0, 30.0, 40.0], index=index) + + +def test_perfect_predictions_zero_loss(y_true: pd.Series): + """When predictions match actual values exactly, pinball loss is zero everywhere.""" + # Act + result = calculate_pinball_errors(y_true, y_true, quantile=0.5) + + # Assert + assert (result == 0).all() + + +def test_under_prediction_penalized_by_quantile(y_true: pd.Series, index: pd.DatetimeIndex): + """Under-prediction (y_true > y_pred) is penalized by quantile * error.""" + # Arrange + y_pred = pd.Series([5.0, 15.0, 25.0, 35.0], index=index) # all under-predict by 5 + quantile = 0.9 + + # Act + result = calculate_pinball_errors(y_true, y_pred, quantile=quantile) + + # Assert — errors = y_true - y_pred = 5, pinball = 0.9 * 5 = 4.5 + expected = pd.Series([4.5, 4.5, 4.5, 4.5], index=index) + pd.testing.assert_series_equal(result, expected) + + +def test_over_prediction_penalized_by_complement(y_true: pd.Series, index: pd.DatetimeIndex): + """Over-prediction (y_true < y_pred) is penalized by (1 - quantile) * |error|.""" + # Arrange + y_pred = pd.Series([15.0, 25.0, 35.0, 45.0], index=index) # all over-predict by 5 + quantile = 0.9 + + # Act + result = calculate_pinball_errors(y_true, y_pred, quantile=quantile) + + # Assert — errors = y_true - y_pred = -5, pinball = (0.9 - 1) * (-5) = 0.5 + expected = pd.Series([0.5, 0.5, 0.5, 0.5], index=index) + pd.testing.assert_series_equal(result, expected) + + +def test_median_quantile_symmetric(y_true: pd.Series, index: pd.DatetimeIndex): + """At quantile 0.5, under- and over-prediction penalties are symmetric.""" + # Arrange + y_under = pd.Series([5.0, 15.0, 25.0, 35.0], index=index) + y_over = pd.Series([15.0, 25.0, 35.0, 45.0], index=index) + + # Act + loss_under = calculate_pinball_errors(y_true, y_under, quantile=0.5) + loss_over = calculate_pinball_errors(y_true, y_over, quantile=0.5) + + # Assert + pd.testing.assert_series_equal(loss_under, loss_over) + + +def test_result_preserves_index(y_true: pd.Series, index: pd.DatetimeIndex): + """Output Series has the same index as the input y_true.""" + # Arrange + y_pred = pd.Series([10.0, 20.0, 30.0, 40.0], index=index) + + # Act + result = calculate_pinball_errors(y_true, y_pred, quantile=0.5) + + # Assert + pd.testing.assert_index_equal(result.index, index) + + +def test_pinball_loss_is_non_negative(y_true: pd.Series, index: pd.DatetimeIndex): + """Pinball loss should always be >= 0 for any quantile.""" + # Arrange + rng = np.random.default_rng(42) + y_pred = pd.Series(rng.normal(25, 15, size=len(y_true)), index=index) + + for q in [0.1, 0.25, 0.5, 0.75, 0.9]: + # Act + result = calculate_pinball_errors(y_true, y_pred, quantile=q) + + # Assert + assert (result >= 0).all(), f"Negative pinball loss found at quantile {q}" diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py index 886da0ce6..ffc37c9fe 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgbm_forecaster.py @@ -30,12 +30,11 @@ def base_config() -> LGBMForecasterConfig: ) -@pytest.fixture -def forecaster(base_config: LGBMForecasterConfig) -> LGBMForecaster: - return LGBMForecaster(base_config) - +def test_initialization(base_config: LGBMForecasterConfig): + # Act + forecaster = LGBMForecaster(base_config) -def test_initialization(forecaster: LGBMForecaster): + # Assert assert isinstance(forecaster, LGBMForecaster) assert forecaster.config.hyperparams.n_estimators == 100 # type: ignore diff --git a/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py b/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py index cc4b4701e..b7ebfe3de 100644 --- a/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py +++ b/packages/openstef-models/tests/unit/models/forecasting/test_lgbmlinear_forecaster.py @@ -30,12 +30,11 @@ def base_config() -> LGBMLinearForecasterConfig: ) -@pytest.fixture -def forecaster(base_config: LGBMLinearForecasterConfig) -> LGBMLinearForecaster: - return LGBMLinearForecaster(base_config) - +def test_initialization(base_config: LGBMLinearForecasterConfig): + # Act + forecaster = LGBMLinearForecaster(base_config) -def test_initialization(forecaster: LGBMLinearForecaster): + # Assert assert isinstance(forecaster, LGBMLinearForecaster) assert forecaster.config.hyperparams.n_estimators == 100 # type: ignore diff --git a/packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py b/packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py index d2e8ad7be..078ef0f9e 100644 --- a/packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py +++ b/packages/openstef-models/tests/unit/utils/test_multi_quantile_regressor.py @@ -65,8 +65,10 @@ def baselearner_config(request: pytest.FixtureRequest) -> BaseLearnerConfig: # def test_init_sets_quantiles_and_models(baselearner_config: BaseLearnerConfig): + # Arrange quantiles = [0.1, 0.5, 0.9] + # Act model = MultiQuantileRegressor( base_learner=baselearner_config.base_learner, quantile_param=baselearner_config.quantile_param, @@ -74,13 +76,14 @@ def test_init_sets_quantiles_and_models(baselearner_config: BaseLearnerConfig): hyperparams=baselearner_config.hyperparams, ) + # Assert assert model.quantiles == quantiles assert len(model._models) == len(quantiles) def test_fit_and_predict_shape(dataset: tuple[pd.DataFrame, pd.Series], baselearner_config: BaseLearnerConfig): + # Arrange quantiles = [0.1, 0.5, 0.9] - X, y = dataset[0], dataset[1] model = MultiQuantileRegressor( base_learner=baselearner_config.base_learner, @@ -89,12 +92,16 @@ def test_fit_and_predict_shape(dataset: tuple[pd.DataFrame, pd.Series], baselear hyperparams=baselearner_config.hyperparams, ) + # Act model.fit(X, y) preds = model.predict(X) + + # Assert assert preds.shape == (X.shape[0], len(quantiles)) def test_is_fitted_true_after_fit(dataset: tuple[pd.DataFrame, pd.Series], baselearner_config: BaseLearnerConfig): + # Arrange quantiles = [0.1, 0.5, 0.9] X, y = dataset[0], dataset[1] model = MultiQuantileRegressor( @@ -103,5 +110,9 @@ def test_is_fitted_true_after_fit(dataset: tuple[pd.DataFrame, pd.Series], basel quantiles=quantiles, hyperparams=baselearner_config.hyperparams, ) + + # Act model.fit(X, y) + + # Assert assert model.is_fitted From 51e84900e524ebd64a4af48746520518b1ad22d2 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Wed, 18 Feb 2026 13:38:51 +0100 Subject: [PATCH 083/104] Cleaning up Signed-off-by: Marnix van Lieshout --- .../src/openstef_beam/metrics/__init__.py | 2 + .../metrics/metrics_deterministic.py | 57 +++++++++-- .../metrics/test_metrics_deterministic.py | 69 +++++++++++++ .../src/openstef_meta/utils/__init__.py | 6 +- .../src/openstef_meta/utils/datasets.py | 29 +++--- .../src/openstef_meta/utils/pinball_errors.py | 32 ------- .../tests/unit/utils/test_pinball_errors.py | 96 ------------------- 7 files changed, 134 insertions(+), 157 deletions(-) delete mode 100644 packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py delete mode 100644 packages/openstef-meta/tests/unit/utils/test_pinball_errors.py diff --git a/packages/openstef-beam/src/openstef_beam/metrics/__init__.py b/packages/openstef-beam/src/openstef_beam/metrics/__init__.py index ea4ccf7ce..94e946aea 100644 --- a/packages/openstef-beam/src/openstef_beam/metrics/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/metrics/__init__.py @@ -22,6 +22,7 @@ confusion_matrix, fbeta, mape, + pinball_losses, precision_recall, r2, relative_pinball_loss, @@ -44,6 +45,7 @@ "mape", "mean_absolute_calibration_error", "observed_probability", + "pinball_losses", "precision_recall", "r2", "rcrps", diff --git a/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py b/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py index f77f55579..1a5a1b97d 100644 --- a/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py +++ b/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py @@ -463,6 +463,48 @@ def r2( return float(r2_score(y_true, y_pred, sample_weight=sample_weights)) +def pinball_losses( + y_true: npt.ArrayLike, + y_pred: npt.ArrayLike, + *, + quantile: float, +) -> npt.NDArray[np.floating]: + """Calculate the per-sample Pinball Loss (also known as Quantile Loss). + + The pinball loss asymmetrically penalizes over- and under-predictions based on + the target quantile. For quantiles above 0.5, under-predictions are penalized more + heavily; for quantiles below 0.5, over-predictions receive higher penalties. + + Args: + y_true: Ground truth values with shape (num_samples,). + y_pred: Predicted quantile values with shape (num_samples,). + quantile: The quantile level being predicted (e.g., 0.1, 0.5, 0.9). + Must be in [0, 1]. + + Returns: + An array of per-sample pinball losses with shape (num_samples,). + + Example: + Basic usage for 90th percentile predictions: + + >>> import numpy as np + >>> y_true = np.array([100.0, 120.0, 110.0]) + >>> y_pred = np.array([95.0, 125.0, 110.0]) + >>> losses = pinball_losses(y_true, y_pred, quantile=0.9) + >>> losses + array([4.5, 0.5, 0. ]) + """ + y_true = np.asarray(y_true) + y_pred = np.asarray(y_pred) + + errors = y_true - y_pred + return np.where( + errors >= 0, + quantile * errors, # Under-prediction + (quantile - 1) * errors, # Over-prediction + ) + + def relative_pinball_loss( y_true: npt.NDArray[np.floating], y_pred: npt.NDArray[np.floating], @@ -506,21 +548,16 @@ def relative_pinball_loss( 0.0167 """ # Ensure inputs are numpy arrays - y_true = np.array(y_true) - y_pred = np.array(y_pred) + y_true = np.asarray(y_true) + y_pred = np.asarray(y_pred) if y_true.size == 0 or y_pred.size == 0: return float("NaN") - # Calculate pinball loss for each sample - errors = y_true - y_pred - pinball_losses = np.where( - errors >= 0, - quantile * errors, # Under-prediction - (quantile - 1) * errors, # Over-prediction - ) + # Calculate per-sample pinball losses + losses = pinball_losses(y_true, y_pred, quantile=quantile) # Calculate mean pinball loss (weighted if weights provided) - mean_pinball_loss = np.average(pinball_losses, weights=sample_weights) + mean_pinball_loss = np.average(losses, weights=sample_weights) # Calculate measurement range for normalization y_range = np.quantile(y_true, q=measurement_range_upper_q) - np.quantile(y_true, q=measurement_range_lower_q) diff --git a/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py b/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py index b3b9ac35d..00416422e 100644 --- a/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py +++ b/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py @@ -13,6 +13,7 @@ confusion_matrix, fbeta, mape, + pinball_losses, precision_recall, relative_pinball_loss, riqd, @@ -413,6 +414,74 @@ def test_riqd_returns_nan_when_inputs_empty() -> None: assert np.isnan(result) +def test_pinball_losses_perfect_predictions_zero_loss() -> None: + """When predictions match actual values exactly, pinball loss is zero everywhere.""" + # Arrange + y = np.array([10.0, 20.0, 30.0, 40.0]) + + # Act + result = pinball_losses(y, y, quantile=0.5) + + # Assert + np.testing.assert_array_equal(result, np.zeros(4)) + + +def test_pinball_losses_under_prediction_penalized_by_quantile() -> None: + """Under-prediction (y_true > y_pred) is penalized by quantile * error.""" + # Arrange + y_true = np.array([10.0, 20.0, 30.0, 40.0]) + y_pred = np.array([5.0, 15.0, 25.0, 35.0]) # all under-predict by 5 + + # Act + result = pinball_losses(y_true, y_pred, quantile=0.9) + + # Assert — errors = 5, pinball = 0.9 * 5 = 4.5 + np.testing.assert_array_almost_equal(result, np.full(4, 4.5)) + + +def test_pinball_losses_over_prediction_penalized_by_complement() -> None: + """Over-prediction (y_true < y_pred) is penalized by (1 - quantile) * |error|.""" + # Arrange + y_true = np.array([10.0, 20.0, 30.0, 40.0]) + y_pred = np.array([15.0, 25.0, 35.0, 45.0]) # all over-predict by 5 + + # Act + result = pinball_losses(y_true, y_pred, quantile=0.9) + + # Assert — errors = -5, pinball = (0.9 - 1) * (-5) = 0.5 + np.testing.assert_array_almost_equal(result, np.full(4, 0.5)) + + +def test_pinball_losses_median_quantile_symmetric() -> None: + """At quantile 0.5, under- and over-prediction penalties are symmetric.""" + # Arrange + y_true = np.array([10.0, 20.0, 30.0, 40.0]) + y_under = np.array([5.0, 15.0, 25.0, 35.0]) + y_over = np.array([15.0, 25.0, 35.0, 45.0]) + + # Act + loss_under = pinball_losses(y_true, y_under, quantile=0.5) + loss_over = pinball_losses(y_true, y_over, quantile=0.5) + + # Assert + np.testing.assert_array_almost_equal(loss_under, loss_over) + + +def test_pinball_losses_is_non_negative() -> None: + """Pinball loss should always be >= 0 for any quantile.""" + # Arrange + rng = np.random.default_rng(42) + y_true = np.array([10.0, 20.0, 30.0, 40.0]) + y_pred = rng.normal(25, 15, size=len(y_true)) + + for q in [0.1, 0.25, 0.5, 0.75, 0.9]: + # Act + result = pinball_losses(y_true, y_pred, quantile=q) + + # Assert + assert (result >= 0).all(), f"Negative pinball loss found at quantile {q}" + + @pytest.mark.parametrize( ( "y_true", diff --git a/packages/openstef-meta/src/openstef_meta/utils/__init__.py b/packages/openstef-meta/src/openstef_meta/utils/__init__.py index 616ee56d6..6aa87b01f 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/utils/__init__.py @@ -4,8 +4,4 @@ """Utility functions and classes for OpenSTEF Meta.""" -from .pinball_errors import calculate_pinball_errors - -__all__ = [ - "calculate_pinball_errors", -] +__all__: list[str] = [] diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index 8eeaa57bd..8f6ed8078 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -11,11 +11,13 @@ from datetime import datetime, timedelta from typing import Self, override +import numpy as np +import numpy.typing as npt import pandas as pd +from openstef_beam.metrics import pinball_losses from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset from openstef_core.types import Quantile -from openstef_meta.utils.pinball_errors import calculate_pinball_errors DEFAULT_TARGET_COLUMN = {Quantile(0.5): "load"} @@ -93,7 +95,7 @@ def __init__( self.forecaster_names, self.quantiles = self.get_learner_and_quantile(pd.Index(quantile_feature_names)) n_cols = len(self.forecaster_names) * len(self.quantiles) if len(data.columns) not in {n_cols + 1, n_cols}: - raise ValueError("Data columns do not match the expected number based on base Forecasters and quantiles.") + raise ValueError("Data columns do not match the expected number based on base forecasters and quantiles.") @property def target_series(self) -> pd.Series | None: @@ -104,16 +106,16 @@ def target_series(self) -> pd.Series | None: @staticmethod def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[str], list[Quantile]]: - """Extract base Forecaster names and quantiles from feature names. + """Extract base forecaster names and quantiles from feature names. Args: feature_names: Index of feature names in the dataset. Returns: - Tuple containing a list of base Forecaster names and a list of quantiles. + Tuple containing a list of base forecaster names and a list of quantiles. Raises: - ValueError: If an invalid base Forecaster name is found in a feature name. + ValueError: If an invalid base forecaster name is found in a feature name. """ forecasters: set[str] = set() quantiles: set[Quantile] = set() @@ -132,13 +134,13 @@ def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[str], list[Q @staticmethod def get_quantile_feature_name(feature_name: str) -> tuple[str, Quantile]: - """Generate the feature name for a given base Forecaster and quantile. + """Generate the feature name for a given base forecaster and quantile. Args: feature_name: Feature name string in the format "model_Quantile". Returns: - Tuple containing the base Forecaster name and Quantile object. + Tuple containing the base forecaster name and Quantile object. """ learner_part, quantile_part = feature_name.split("_", maxsplit=1) return learner_part, Quantile.parse(quantile_part) @@ -192,16 +194,15 @@ def _prepare_classification(data: pd.DataFrame, target: pd.Series, quantile: Qua quantile: Quantile for which to prepare classification data. Returns: - Series with categorical indicators of best-performing base Forecasters. + Series with categorical indicators of best-performing base forecasters. """ + # Calculate pinball loss for each base forecaster + def _column_losses(preds: pd.Series) -> npt.NDArray[np.floating]: + return pinball_losses(y_true=np.asarray(target), y_pred=np.asarray(preds), quantile=quantile) - # Calculate pinball loss for each base Forecaster - def column_pinball_losses(preds: pd.Series) -> pd.Series: - return calculate_pinball_errors(y_true=target, y_pred=preds, quantile=quantile) + losses_per_forecaster = data.apply(_column_losses) - pinball_losses = data.apply(column_pinball_losses) - - return pinball_losses.idxmin(axis=1) + return losses_per_forecaster.idxmin(axis=1) def get_best_forecaster_labels(self, quantile: Quantile) -> ForecastInputDataset: """Get labels indicating the best-performing base forecaster for each sample at a specific quantile. diff --git a/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py b/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py deleted file mode 100644 index bff8419ef..000000000 --- a/packages/openstef-meta/src/openstef_meta/utils/pinball_errors.py +++ /dev/null @@ -1,32 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""Utility functions for calculating pinball loss errors. - -This module provides a function to compute the pinball loss for quantile regression. -""" - -import numpy as np -import pandas as pd - -# TODO: Replace by openstef-beam implementation? -def calculate_pinball_errors(y_true: pd.Series, y_pred: pd.Series, quantile: float) -> pd.Series: - """Calculate pinball loss for given true and predicted values. - - Args: - y_true: True values as a pandas Series. - y_pred: Predicted values as a pandas Series. - quantile: Quantile value. - - Returns: - A pandas Series containing the pinball loss for each sample. - """ - errors = y_true - y_pred - pinball_loss = np.where( - errors >= 0, - quantile * errors, # Under-prediction - (quantile - 1) * errors, # Over-prediction - ) - - return pd.Series(pinball_loss, index=y_true.index) diff --git a/packages/openstef-meta/tests/unit/utils/test_pinball_errors.py b/packages/openstef-meta/tests/unit/utils/test_pinball_errors.py deleted file mode 100644 index e6ce6aab8..000000000 --- a/packages/openstef-meta/tests/unit/utils/test_pinball_errors.py +++ /dev/null @@ -1,96 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -import numpy as np -import pandas as pd -import pytest - -from openstef_meta.utils.pinball_errors import calculate_pinball_errors - - -@pytest.fixture -def index() -> pd.DatetimeIndex: - return pd.to_datetime(["2023-01-01", "2023-01-02", "2023-01-03", "2023-01-04"]) - - -@pytest.fixture -def y_true(index: pd.DatetimeIndex) -> pd.Series: - return pd.Series([10.0, 20.0, 30.0, 40.0], index=index) - - -def test_perfect_predictions_zero_loss(y_true: pd.Series): - """When predictions match actual values exactly, pinball loss is zero everywhere.""" - # Act - result = calculate_pinball_errors(y_true, y_true, quantile=0.5) - - # Assert - assert (result == 0).all() - - -def test_under_prediction_penalized_by_quantile(y_true: pd.Series, index: pd.DatetimeIndex): - """Under-prediction (y_true > y_pred) is penalized by quantile * error.""" - # Arrange - y_pred = pd.Series([5.0, 15.0, 25.0, 35.0], index=index) # all under-predict by 5 - quantile = 0.9 - - # Act - result = calculate_pinball_errors(y_true, y_pred, quantile=quantile) - - # Assert — errors = y_true - y_pred = 5, pinball = 0.9 * 5 = 4.5 - expected = pd.Series([4.5, 4.5, 4.5, 4.5], index=index) - pd.testing.assert_series_equal(result, expected) - - -def test_over_prediction_penalized_by_complement(y_true: pd.Series, index: pd.DatetimeIndex): - """Over-prediction (y_true < y_pred) is penalized by (1 - quantile) * |error|.""" - # Arrange - y_pred = pd.Series([15.0, 25.0, 35.0, 45.0], index=index) # all over-predict by 5 - quantile = 0.9 - - # Act - result = calculate_pinball_errors(y_true, y_pred, quantile=quantile) - - # Assert — errors = y_true - y_pred = -5, pinball = (0.9 - 1) * (-5) = 0.5 - expected = pd.Series([0.5, 0.5, 0.5, 0.5], index=index) - pd.testing.assert_series_equal(result, expected) - - -def test_median_quantile_symmetric(y_true: pd.Series, index: pd.DatetimeIndex): - """At quantile 0.5, under- and over-prediction penalties are symmetric.""" - # Arrange - y_under = pd.Series([5.0, 15.0, 25.0, 35.0], index=index) - y_over = pd.Series([15.0, 25.0, 35.0, 45.0], index=index) - - # Act - loss_under = calculate_pinball_errors(y_true, y_under, quantile=0.5) - loss_over = calculate_pinball_errors(y_true, y_over, quantile=0.5) - - # Assert - pd.testing.assert_series_equal(loss_under, loss_over) - - -def test_result_preserves_index(y_true: pd.Series, index: pd.DatetimeIndex): - """Output Series has the same index as the input y_true.""" - # Arrange - y_pred = pd.Series([10.0, 20.0, 30.0, 40.0], index=index) - - # Act - result = calculate_pinball_errors(y_true, y_pred, quantile=0.5) - - # Assert - pd.testing.assert_index_equal(result.index, index) - - -def test_pinball_loss_is_non_negative(y_true: pd.Series, index: pd.DatetimeIndex): - """Pinball loss should always be >= 0 for any quantile.""" - # Arrange - rng = np.random.default_rng(42) - y_pred = pd.Series(rng.normal(25, 15, size=len(y_true)), index=index) - - for q in [0.1, 0.25, 0.5, 0.75, 0.9]: - # Act - result = calculate_pinball_errors(y_true, y_pred, quantile=q) - - # Assert - assert (result >= 0).all(), f"Negative pinball loss found at quantile {q}" From f571ef8fe8ac6e203c8765d203495f2b4e264392 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Wed, 18 Feb 2026 14:27:40 +0100 Subject: [PATCH 084/104] Improve hyperparam naming Signed-off-by: Marnix van Lieshout --- .../learned_weights_combiner.py | 3 -- .../presets/forecasting_workflow.py | 42 +++++++++++-------- .../mlflow/mlflow_storage_callback.py | 2 +- .../forecasting/base_case_forecaster.py | 3 +- .../forecasting/constant_median_forecaster.py | 3 +- .../forecasting/flatliner_forecaster.py | 6 +-- .../models/forecasting/lgbm_forecaster.py | 18 +++++++- .../forecasting/lgbmlinear_forecaster.py | 18 +++++++- .../models/forecasting_model.py | 2 +- 9 files changed, 68 insertions(+), 29 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 79c211889..8f8361bc1 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -38,8 +38,6 @@ logger = logging.getLogger(__name__) -# Base classes for Learned Weights Final Learner - Classifier = LGBMClassifier | XGBClassifier | LogisticRegression | DummyClassifier @@ -131,7 +129,6 @@ def get_classifier(self) -> LGBMClassifier: ) -# 3 XGB Classifier class XGBCombinerHyperParams(HyperParams, ClassifierParamsMixin): """Hyperparameters for Learned Weights Final Learner with LGBM Random Forest Classifier.""" diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index b3ab224ec..29874cb2c 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -126,29 +126,29 @@ class EnsembleWorkflowConfig(BaseConfig): ) # Learned weights combiner hyperparameters - lgbm_combiner_hyperparams: LGBMCombinerHyperParams = Field( + combiner_lgbm_hyperparams: LGBMCombinerHyperParams = Field( default=LGBMCombinerHyperParams(), description="Hyperparameters for LightGBM combiner.", ) - rf_combiner_hyperparams: RFCombinerHyperParams = Field( + combiner_rf_hyperparams: RFCombinerHyperParams = Field( default=RFCombinerHyperParams(), description="Hyperparameters for Random Forest combiner.", ) - xgboost_combiner_hyperparams: XGBCombinerHyperParams = Field( + combiner_xgboost_hyperparams: XGBCombinerHyperParams = Field( default=XGBCombinerHyperParams(), description="Hyperparameters for XGBoost combiner.", ) - logistic_combiner_hyperparams: LogisticCombinerHyperParams = Field( + combiner_logistic_hyperparams: LogisticCombinerHyperParams = Field( default=LogisticCombinerHyperParams(), description="Hyperparameters for Logistic Regression combiner.", ) # Stacking combiner hyperparameters - stacking_lgbm_combiner_hyperparams: LGBMForecaster.HyperParams = Field( + combiner_stacking_lgbm_hyperparams: LGBMForecaster.HyperParams = Field( default=LGBMForecaster.HyperParams(), description="Hyperparameters for LightGBM stacking combiner.", ) - stacking_gblinear_combiner_hyperparams: GBLinearForecaster.HyperParams = Field( + combiner_stacking_gblinear_hyperparams: GBLinearForecaster.HyperParams = Field( default=GBLinearForecaster.HyperParams(), description="Hyperparameters for GBLinear stacking combiner.", ) @@ -202,7 +202,7 @@ class EnsembleWorkflowConfig(BaseConfig): default=FeatureSelection(include=None, exclude=None), description="Feature selection for which features to clip.", ) - # TODO: Add sample weight method parameter + # TODO: Add sample weight method parameters sample_weight_scale_percentile: int = Field( default=95, description="Percentile of target values used as scaling reference. " @@ -368,7 +368,9 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin for model_type in config.base_models: if model_type == "lgbm": forecasters[model_type] = LGBMForecaster( - config=LGBMForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) + config=LGBMForecaster.Config( + hyperparams=config.lgbm_hyperparams, quantiles=config.quantiles, horizons=config.horizons + ) ) forecaster_preprocessing[model_type] = [ SampleWeighter( @@ -381,7 +383,9 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin elif model_type == "gblinear": forecasters[model_type] = GBLinearForecaster( - config=GBLinearForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) + config=GBLinearForecaster.Config( + hyperparams=config.gblinear_hyperparams, quantiles=config.quantiles, horizons=config.horizons + ) ) forecaster_preprocessing[model_type] = [ SampleWeighter( @@ -423,7 +427,9 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin ] elif model_type == "xgboost": forecasters[model_type] = XGBoostForecaster( - config=XGBoostForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) + config=XGBoostForecaster.Config( + hyperparams=config.xgboost_hyperparams, quantiles=config.quantiles, horizons=config.horizons + ) ) forecaster_preprocessing[model_type] = [ SampleWeighter( @@ -435,7 +441,9 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin ] elif model_type == "lgbm_linear": forecasters[model_type] = LGBMLinearForecaster( - config=LGBMLinearForecaster.Config(quantiles=config.quantiles, horizons=config.horizons) + config=LGBMLinearForecaster.Config( + hyperparams=config.lgbmlinear_hyperparams, quantiles=config.quantiles, horizons=config.horizons + ) ) forecaster_preprocessing[model_type] = [ SampleWeighter( @@ -454,19 +462,19 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin case ("learned_weights", "lgbm"): combiner = WeightsCombiner( config=WeightsCombiner.Config( - hyperparams=config.lgbm_combiner_hyperparams, horizons=config.horizons, quantiles=config.quantiles + hyperparams=config.combiner_lgbm_hyperparams, horizons=config.horizons, quantiles=config.quantiles ) ) case ("learned_weights", "rf"): combiner = WeightsCombiner( config=WeightsCombiner.Config( - hyperparams=config.rf_combiner_hyperparams, horizons=config.horizons, quantiles=config.quantiles + hyperparams=config.combiner_rf_hyperparams, horizons=config.horizons, quantiles=config.quantiles ) ) case ("learned_weights", "xgboost"): combiner = WeightsCombiner( config=WeightsCombiner.Config( - hyperparams=config.xgboost_combiner_hyperparams, + hyperparams=config.combiner_xgboost_hyperparams, horizons=config.horizons, quantiles=config.quantiles, ) @@ -474,7 +482,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin case ("learned_weights", "logistic"): combiner = WeightsCombiner( config=WeightsCombiner.Config( - hyperparams=config.logistic_combiner_hyperparams, + hyperparams=config.combiner_logistic_hyperparams, horizons=config.horizons, quantiles=config.quantiles, ) @@ -482,7 +490,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin case ("stacking", "lgbm"): combiner = StackingCombiner( config=StackingCombiner.Config( - hyperparams=config.stacking_lgbm_combiner_hyperparams, + hyperparams=config.combiner_stacking_lgbm_hyperparams, horizons=config.horizons, quantiles=config.quantiles, ) @@ -490,7 +498,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin case ("stacking", "gblinear"): combiner = StackingCombiner( config=StackingCombiner.Config( - hyperparams=config.stacking_gblinear_combiner_hyperparams, + hyperparams=config.combiner_stacking_gblinear_hyperparams, horizons=config.horizons, quantiles=config.quantiles, ) diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index e22bb6ec8..eecd2cf63 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -117,7 +117,7 @@ def on_fit_end( if isinstance(context.workflow.model, EnsembleForecastingModel): raise NotImplementedError( "MLFlowStorageCallback does not yet support EnsembleForecastingWorkflow model storage." - ) + ) # TODO: Implement model selection and storage for EnsembleForecastingWorkflow, including handling of base forecasters and combiner model. # Create a new run run = self.storage.create_run( diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py index 4b021d2b3..1e1094092 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py @@ -200,9 +200,10 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = Tru Returns: pd.DataFrame containing the prediction contributions. """ + input_data = data.input_data(start=data.forecast_start) return pd.DataFrame( data=1.0, - index=data.index, + index=input_data.index, columns=["load_" + quantile.format() for quantile in self.config.quantiles], ) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py index e516472a2..2289398d5 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py @@ -153,8 +153,9 @@ def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = Tru Returns: pd.DataFrame containing the prediction contributions. """ + input_data = data.input_data(start=data.forecast_start) return pd.DataFrame( data=1.0, - index=data.index, + index=input_data.index, columns=["load_" + quantile.format() for quantile in self.config.quantiles], ) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py index e4ab21437..d684d1373 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py @@ -121,9 +121,9 @@ def feature_importances(self) -> pd.DataFrame: @override def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = True) -> pd.DataFrame: - forecast_index = data.create_forecast_range(horizon=self.config.max_horizon) + input_data = data.input_data(start=data.forecast_start) return pd.DataFrame( - data={quantile.format(): 0.0 for quantile in self.config.quantiles}, - index=forecast_index, + data={"load_" + quantile.format(): 0.0 for quantile in self.config.quantiles}, + index=input_data.index, ) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index 5868289d3..6ec87593d 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -233,13 +233,29 @@ def __init__(self, config: LGBMForecasterConfig) -> None: self._config = config lgbm_params = { + # Core parameters "linear_tree": False, "objective": "quantile", + "n_estimators": config.hyperparams.n_estimators, + "learning_rate": config.hyperparams.learning_rate, + "max_depth": config.hyperparams.max_depth, + "min_child_weight": config.hyperparams.min_child_weight, + # Data binning + "min_data_in_leaf": config.hyperparams.min_data_in_leaf, + "min_data_in_bin": config.hyperparams.min_data_in_bin, + # Regularization + "reg_alpha": config.hyperparams.reg_alpha, + "reg_lambda": config.hyperparams.reg_lambda, + # Tree structure control + "num_leaves": config.hyperparams.num_leaves, + "max_bin": config.hyperparams.max_bin, + # Subsampling + "colsample_bytree": config.hyperparams.colsample_bytree, + # General parameters "random_state": config.random_state, "early_stopping_rounds": config.early_stopping_rounds, "verbosity": config.verbosity, "n_jobs": config.n_jobs, - **config.hyperparams.model_dump(), } self._lgbm_model: MultiQuantileRegressor = MultiQuantileRegressor( diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py index 391bcceca..3a4c9c12c 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -235,13 +235,29 @@ def __init__(self, config: LGBMLinearForecasterConfig) -> None: self._config = config lgbmlinear_params = { + # Core parameters "linear_tree": True, "objective": "quantile", + "n_estimators": config.hyperparams.n_estimators, + "learning_rate": config.hyperparams.learning_rate, + "max_depth": config.hyperparams.max_depth, + "min_child_weight": config.hyperparams.min_child_weight, + # Data binning + "min_data_in_leaf": config.hyperparams.min_data_in_leaf, + "min_data_in_bin": config.hyperparams.min_data_in_bin, + # Regularization + "reg_alpha": config.hyperparams.reg_alpha, + "reg_lambda": config.hyperparams.reg_lambda, + # Tree structure control + "num_leaves": config.hyperparams.num_leaves, + "max_bin": config.hyperparams.max_bin, + # Subsampling + "colsample_bytree": config.hyperparams.colsample_bytree, + # General parameters "random_state": config.random_state, "early_stopping_rounds": config.early_stopping_rounds, "verbosity": config.verbosity, "n_jobs": config.n_jobs, - **config.hyperparams.model_dump(), } self._lgbmlinear_model: MultiQuantileRegressor = MultiQuantileRegressor( diff --git a/packages/openstef-models/src/openstef_models/models/forecasting_model.py b/packages/openstef-models/src/openstef_models/models/forecasting_model.py index b140e62ba..fe1f2e173 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting_model.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting_model.py @@ -391,7 +391,7 @@ def restore_target[T: TimeSeriesDataset]( target_series = original_dataset.select_features([target_column]).select_version().data[target_column] def _transform_restore_target(df: pd.DataFrame) -> pd.DataFrame: - return df.assign(**{str(target_series.name): df.index.map(target_series)}) # type: ignore + return df.assign(**{str(target_series.name): df.index.map(target_series)}) # pyright: ignore[reportUnknownMemberType] return dataset.pipe_pandas(_transform_restore_target) From 2c102dc2aa4c5731d445549a2a922361338fb93b Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Wed, 18 Feb 2026 22:58:56 +0100 Subject: [PATCH 085/104] Remove openstef-meta dependency in openstef-models Signed-off-by: Marnix van Lieshout --- examples/benchmarks/liander_2024_ensemble.py | 9 +- .../benchmarking/baselines/openstef4.py | 8 +- .../src/openstef_core/datasets/__init__.py | 2 + .../datasets/validated_datasets.py | 211 ++++++++++ .../src/openstef_core/mixins/__init__.py | 3 + .../src/openstef_core/mixins/forecaster.py | 220 ++++++++++ .../openstef_meta/integrations/__init__.py | 4 + .../integrations/mlflow/__init__.py | 9 + .../ensemble_mlflow_storage_callback.py | 104 +++++ .../models/ensemble_forecasting_model.py | 109 +---- .../forecast_combiners/forecast_combiner.py | 2 +- .../learned_weights_combiner.py | 3 +- .../forecast_combiners/stacking_combiner.py | 4 +- .../models/forecasting/residual_forecaster.py | 6 +- .../presets/forecasting_workflow.py | 140 +++---- .../src/openstef_meta/utils/__init__.py | 4 +- .../src/openstef_meta/utils/datasets.py | 224 +--------- .../test_ensemble_forecasting_model.py | 11 +- .../tests/unit/integrations/__init__.py | 3 + .../test_ensemble_mlflow_storage_callback.py | 391 ++++++++++++++++++ .../models/forecast_combiners/conftest.py | 3 +- .../test_learned_weights_combiner.py | 2 +- .../test_stacking_combiner.py | 2 +- .../models/test_ensemble_forecasting_model.py | 5 +- .../tests/unit/utils/test_datasets.py | 3 +- .../openstef_models/explainability/mixins.py | 2 +- .../integrations/mlflow/mlflow_storage.py | 18 + .../mlflow/mlflow_storage_callback.py | 66 ++- .../src/openstef_models/models/__init__.py | 2 + .../models/base_forecasting_model.py | 156 +++++++ .../models/forecasting/__init__.py | 20 +- .../forecasting/base_case_forecaster.py | 2 +- .../forecasting/constant_median_forecaster.py | 2 +- .../forecasting/flatliner_forecaster.py | 2 +- .../models/forecasting/forecaster.py | 228 +--------- .../models/forecasting/gblinear_forecaster.py | 2 +- .../models/forecasting/lgbm_forecaster.py | 2 +- .../forecasting/lgbmlinear_forecaster.py | 2 +- .../models/forecasting/xgboost_forecaster.py | 2 +- .../models/forecasting_model.py | 103 +---- .../presets/forecasting_workflow.py | 9 +- .../transforms/general/__init__.py | 3 +- .../transforms/general/sample_weighter.py | 98 +++-- .../workflows/custom_forecasting_workflow.py | 30 +- .../mlflow/test_mlflow_storage_callback.py | 2 +- .../unit/models/test_forecasting_model.py | 2 +- .../general/test_sample_weighter.py | 27 +- 47 files changed, 1435 insertions(+), 827 deletions(-) create mode 100644 packages/openstef-core/src/openstef_core/mixins/forecaster.py create mode 100644 packages/openstef-meta/src/openstef_meta/integrations/__init__.py create mode 100644 packages/openstef-meta/src/openstef_meta/integrations/mlflow/__init__.py create mode 100644 packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py create mode 100644 packages/openstef-meta/tests/unit/integrations/__init__.py create mode 100644 packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py create mode 100644 packages/openstef-models/src/openstef_models/models/base_forecasting_model.py diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 29ecb9b25..769db2183 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -34,6 +34,7 @@ EnsembleWorkflowConfig, ) from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage +from openstef_models.transforms.general import SampleWeightConfig logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s") @@ -89,8 +90,12 @@ temperature_column="temperature_2m", relative_humidity_column="relative_humidity_2m", energy_price_column="EPEX_NL", - forecast_combiner_sample_weight_exponent=0, - forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 0, "xgboost": 0, "lgbm_linear": 0}, + forecaster_sample_weights={ + "gblinear": SampleWeightConfig(method="exponential", weight_exponent=1.0), + "lgbm": SampleWeightConfig(weight_exponent=0.0), + "xgboost": SampleWeightConfig(weight_exponent=0.0), + "lgbm_linear": SampleWeightConfig(weight_exponent=0.0), + }, ) diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py index d7f99e0d1..d6f114a69 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py @@ -33,6 +33,7 @@ from openstef_core.types import Q from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_meta.presets import EnsembleWorkflowConfig, create_ensemble_workflow +from openstef_models.models.forecasting_model import ForecastingModel from openstef_models.presets import ForecastingWorkflowConfig from openstef_models.workflows.custom_forecasting_workflow import ( CustomForecastingWorkflow, @@ -94,7 +95,10 @@ def quantiles(self) -> list[Q]: if isinstance(self._workflow.model, EnsembleForecastingModel): name = self._workflow.model.forecaster_names[0] return self._workflow.model.forecasters[name].config.quantiles - return self._workflow.model.forecaster.config.quantiles + if isinstance(self._workflow.model, ForecastingModel): + return self._workflow.model.forecaster.config.quantiles + msg = f"Unsupported model type: {type(self._workflow.model)}" + raise TypeError(msg) @override def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: @@ -124,7 +128,7 @@ def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: self._workflow = workflow - if self.debug: + if self.debug and isinstance(self._workflow.model, ForecastingModel): id_str = data.horizon.strftime("%Y%m%d%H%M%S") self._workflow.model.prepare_input(training_data).to_parquet( # pyright: ignore[reportPrivateUsage] path=self.cache_dir / f"debug_{id_str}_prepared_training.parquet" diff --git a/packages/openstef-core/src/openstef_core/datasets/__init__.py b/packages/openstef-core/src/openstef_core/datasets/__init__.py index 153ed6611..d18191a6e 100644 --- a/packages/openstef-core/src/openstef_core/datasets/__init__.py +++ b/packages/openstef-core/src/openstef_core/datasets/__init__.py @@ -19,6 +19,7 @@ from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset, validate_horizons_present from openstef_core.datasets.validated_datasets import ( EnergyComponentDataset, + EnsembleForecastDataset, ForecastDataset, ForecastInputDataset, ) @@ -26,6 +27,7 @@ __all__ = [ "EnergyComponentDataset", + "EnsembleForecastDataset", "ForecastDataset", "ForecastInputDataset", "TimeSeriesDataset", diff --git a/packages/openstef-core/src/openstef_core/datasets/validated_datasets.py b/packages/openstef-core/src/openstef_core/datasets/validated_datasets.py index bb06368d6..57e83257a 100644 --- a/packages/openstef-core/src/openstef_core/datasets/validated_datasets.py +++ b/packages/openstef-core/src/openstef_core/datasets/validated_datasets.py @@ -12,6 +12,8 @@ from datetime import datetime, timedelta from typing import Self, override +import numpy as np +import numpy.typing as npt import pandas as pd from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset @@ -427,8 +429,217 @@ def __init__( ) +class EnsembleForecastDataset(TimeSeriesDataset): + """First stage output format for ensemble forecasters.""" + + forecast_start: datetime + quantiles: list[Quantile] + forecaster_names: list[str] + target_column: str + + @override + def __init__( + self, + data: pd.DataFrame, + sample_interval: timedelta = timedelta(minutes=15), + forecast_start: datetime | None = None, + target_column: str = "load", + *, + horizon_column: str = "horizon", + available_at_column: str = "available_at", + ) -> None: + if "forecast_start" in data.attrs: + self.forecast_start = datetime.fromisoformat(data.attrs["forecast_start"]) + else: + self.forecast_start = forecast_start if forecast_start is not None else data.index.min().to_pydatetime() + self.target_column = data.attrs.get("target_column", target_column) + + super().__init__( + data=data, + sample_interval=sample_interval, + horizon_column=horizon_column, + available_at_column=available_at_column, + ) + quantile_feature_names = [col for col in self.feature_names if col != target_column] + + self.forecaster_names, self.quantiles = self.get_learner_and_quantile(pd.Index(quantile_feature_names)) + n_cols = len(self.forecaster_names) * len(self.quantiles) + if len(data.columns) not in {n_cols + 1, n_cols}: + raise ValueError("Data columns do not match the expected number based on base forecasters and quantiles.") + + @property + def target_series(self) -> pd.Series | None: + """Return the target series if available.""" + if self.target_column in self.data.columns: + return self.data[self.target_column] + return None + + @staticmethod + def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[str], list[Quantile]]: + """Extract base forecaster names and quantiles from feature names. + + Args: + feature_names: Index of feature names in the dataset. + + Returns: + Tuple containing a list of base forecaster names and a list of quantiles. + + Raises: + ValueError: If an invalid base forecaster name is found in a feature name. + """ + forecasters: set[str] = set() + quantiles: set[Quantile] = set() + + for feature_name in feature_names: + quantile_part = "_".join(feature_name.split("_")[-2:]) + learner_part = feature_name[: -(len(quantile_part) + 1)] + if not Quantile.is_valid_quantile_string(quantile_part): + msg = f"Column has no valid quantile string: {feature_name}" + raise ValueError(msg) + + forecasters.add(learner_part) + quantiles.add(Quantile.parse(quantile_part)) + + return list(forecasters), list(quantiles) + + @staticmethod + def get_quantile_feature_name(feature_name: str) -> tuple[str, Quantile]: + """Generate the feature name for a given base forecaster and quantile. + + Args: + feature_name: Feature name string in the format "model_Quantile". + + Returns: + Tuple containing the base forecaster name and Quantile object. + """ + learner_part, quantile_part = feature_name.split("_", maxsplit=1) + return learner_part, Quantile.parse(quantile_part) + + @classmethod + def from_forecast_datasets( + cls, + datasets: dict[str, ForecastDataset], + target_series: pd.Series | None = None, + sample_weights: pd.Series | None = None, + ) -> Self: + """Create an EnsembleForecastDataset from multiple ForecastDatasets. + + Args: + datasets: Dict of ForecastDatasets to combine. + target_series: Optional target series to include in the dataset. + sample_weights: Optional sample weights series to include in the dataset. + + Returns: + EnsembleForecastDataset combining all input datasets. + """ + ds1 = next(iter(datasets.values())) + additional_columns: dict[str, pd.Series] = {} + if isinstance(ds1.target_series, pd.Series): + additional_columns[ds1.target_column] = ds1.target_series + elif target_series is not None: + additional_columns[ds1.target_column] = target_series + + sample_weight_column = "sample_weight" + if sample_weights is not None: + additional_columns[sample_weight_column] = sample_weights + + combined_data = pd.DataFrame({ + f"{learner}_{q.format()}": ds.data[q.format()] for learner, ds in datasets.items() for q in ds.quantiles + }).assign(**additional_columns) + + return cls( + data=combined_data, + sample_interval=ds1.sample_interval, + forecast_start=ds1.forecast_start, + target_column=ds1.target_column, + ) + + @staticmethod + def _prepare_classification(data: pd.DataFrame, target: pd.Series, quantile: Quantile) -> pd.Series: + """Prepare data for classification tasks by converting quantile columns to binary indicators. + + Args: + data: DataFrame containing quantile predictions. + target: Series containing true target values. + quantile: Quantile for which to prepare classification data. + + Returns: + Series with categorical indicators of best-performing base forecasters. + """ + y_true = np.asarray(target) + + def _column_losses(preds: pd.Series) -> npt.NDArray[np.floating]: + y_pred = np.asarray(preds) + errors = y_true - y_pred + return np.where(errors >= 0, quantile * errors, (quantile - 1) * errors) + + losses_per_forecaster = data.apply(_column_losses) + + return losses_per_forecaster.idxmin(axis=1) + + def get_best_forecaster_labels(self, quantile: Quantile) -> ForecastInputDataset: + """Get labels indicating the best-performing base forecaster for each sample at a specific quantile. + + Creates a dataset where each sample's target is labeled with the name of the base forecaster + that performed best, determined by pinball loss. Used as classification target for training + the final learner. + + Args: + quantile: Quantile to select. + + Returns: + ForecastInputDataset where the target column contains labels of the best-performing + base forecaster for each sample. + + Raises: + ValueError: If the target column is not found in the dataset. + """ + if self.target_column not in self.data.columns: + msg = f"Target column '{self.target_column}' not found in dataset." + raise ValueError(msg) + + selected_columns = [f"{learner}_{quantile.format()}" for learner in self.forecaster_names] + prediction_data = self.data[selected_columns].copy() + prediction_data.columns = self.forecaster_names + + target = self._prepare_classification( + data=prediction_data, + target=self.data[self.target_column], + quantile=quantile, + ) + prediction_data[self.target_column] = target + return ForecastInputDataset( + data=prediction_data, + sample_interval=self.sample_interval, + target_column=self.target_column, + forecast_start=self.forecast_start, + ) + + def get_base_predictions_for_quantile(self, quantile: Quantile) -> ForecastInputDataset: + """Get base forecaster predictions for a specific quantile. + + Args: + quantile: Quantile to select. + + Returns: + ForecastInputDataset containing predictions from all base forecasters at the specified quantile. + """ + selected_columns = [f"{learner}_{quantile.format()}" for learner in self.forecaster_names] + selected_columns.append(self.target_column) + prediction_data = self.data[selected_columns].copy() + prediction_data.columns = [*self.forecaster_names, self.target_column] + + return ForecastInputDataset( + data=prediction_data, + sample_interval=self.sample_interval, + target_column=self.target_column, + forecast_start=self.forecast_start, + ) + + __all__ = [ "EnergyComponentDataset", + "EnsembleForecastDataset", "ForecastDataset", "ForecastInputDataset", ] diff --git a/packages/openstef-core/src/openstef_core/mixins/__init__.py b/packages/openstef-core/src/openstef_core/mixins/__init__.py index 0da051876..49cfb87a4 100644 --- a/packages/openstef-core/src/openstef_core/mixins/__init__.py +++ b/packages/openstef-core/src/openstef_core/mixins/__init__.py @@ -9,6 +9,7 @@ and data transformation pipelines. """ +from .forecaster import Forecaster, ForecasterConfig from .predictor import BatchPredictor, BatchResult, HyperParams, Predictor from .stateful import Stateful from .transform import Transform, TransformPipeline @@ -16,6 +17,8 @@ __all__ = [ "BatchPredictor", "BatchResult", + "Forecaster", + "ForecasterConfig", "HyperParams", "Predictor", "Stateful", diff --git a/packages/openstef-core/src/openstef_core/mixins/forecaster.py b/packages/openstef-core/src/openstef_core/mixins/forecaster.py new file mode 100644 index 000000000..262736542 --- /dev/null +++ b/packages/openstef-core/src/openstef_core/mixins/forecaster.py @@ -0,0 +1,220 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Core forecasting model interfaces and configurations. + +Provides the fundamental building blocks for implementing forecasting models in OpenSTEF. +These mixins establish contracts that ensure consistent behavior across different model types +while supporting both single and multi-horizon forecasting scenarios. + +Key concepts: +- **Horizon**: The lead time for predictions, accounting for data availability and versioning cutoffs +- **Quantiles**: Probability levels for uncertainty estimation +- **State**: Serializable model parameters that enable saving/loading trained models +- **Batching**: Processing multiple prediction requests simultaneously for efficiency + +Multi-horizon forecasting considerations: +Some models (like linear models) cannot handle missing data or conditional features effectively, +making them suitable only for single-horizon approaches. Other models (like XGBoost) can +handle such data complexities and work well for multi-horizon scenarios. +""" + +from abc import abstractmethod +from typing import Self + +from pydantic import Field + +from openstef_core.base_model import BaseConfig +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.mixins.predictor import BatchPredictor, HyperParams +from openstef_core.types import LeadTime, Quantile + + +class ForecasterConfig(BaseConfig): + """Configuration for forecasting models with support for multiple quantiles and horizons. + + Fundamental configuration parameters that determine forecasting model behavior across + different prediction horizons and uncertainty levels. These are operational parameters + rather than hyperparameters that affect training. + + Example: + Basic configuration for daily energy forecasting: + + >>> from openstef_core.types import LeadTime, Quantile + >>> config = ForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime.from_string("PT1H"), LeadTime.from_string("PT6H"), LeadTime.from_string("PT24H")] + ... ) + >>> len(config.horizons) + 3 + >>> str(config.max_horizon) + 'P1D' + + See Also: + HorizonForecasterConfig: Single-horizon variant of this configuration. + BaseForecaster: Multi-horizon forecaster that uses this configuration. + ForecasterHyperParams: Hyperparameter configuration used alongside this. + """ + + quantiles: list[Quantile] = Field( + default=[Quantile(0.5)], + description=( + "Probability levels for uncertainty estimation. Each quantile represents a confidence level " + "(e.g., 0.1 = 10th percentile, 0.5 = median, 0.9 = 90th percentile). " + "Models must generate predictions for all specified quantiles." + ), + min_length=1, + ) + + horizons: list[LeadTime] = Field( + default=..., + description=( + "Lead times for predictions, accounting for data availability and versioning cutoffs. " + "Each horizon defines how far ahead the model should predict." + ), + min_length=1, + ) + + supports_batching: bool = Field( + default=False, + description=( + "Indicates if the model can handle batch predictions. " + "Batching allows multiple prediction requests to be processed simultaneously, " + "which is more efficient for models that support it, especially on GPUs." + ), + ) + + @property + def max_horizon(self) -> LeadTime: + """Returns the maximum lead time (horizon) from the configured horizons. + + Useful for determining the furthest prediction distance required by the model. + This is commonly used for data preparation and validation logic. + + Returns: + The maximum lead time. + """ + return max(self.horizons) + + def with_horizon(self, horizon: LeadTime) -> Self: + """Create a new configuration with a different horizon. + + Useful for creating multiple forecaster instances for different prediction + horizons from a single base configuration. + + Args: + horizon: The new lead time to use for predictions. + + Returns: + New configuration instance with the specified horizon. + """ + return self.model_copy(update={"horizons": [horizon]}) + + @classmethod + def forecaster_class(cls) -> type["Forecaster"]: + """Get the associated Forecaster class for this configuration. + + Returns: + The Forecaster class that uses this configuration. + """ + raise NotImplementedError("Subclasses must implement forecaster_class") + + +class Forecaster(BatchPredictor[ForecastInputDataset, ForecastDataset]): + """Base for forecasters that handle multiple horizons simultaneously. + + Designed for models that train and predict across multiple prediction horizons + in a unified manner. These models handle the complexity of different lead times + internally, providing a simpler interface for multi-horizon forecasting. + + Ideal for models that can share parameters or features across horizons, avoiding + the need to train separate models for each prediction distance. + + Invariants: + - Predictions must include all quantiles specified in the configuration + - predict_batch() only called when supports_batching returns True + + Example: + Implementation for a model that handles multiple horizons: + + >>> from typing import override + >>> class CustomForecaster(Forecaster): + ... def __init__(self, config: ForecasterConfig): + ... self._config = config + ... self._fitted = False + ... + ... @property + ... @override + ... def config(self): + ... return self._config + ... + ... @property + ... @override + ... def is_fitted(self): + ... return self._fitted + ... + ... @override + ... def get_state(self): + ... return {"config": self._config, "fitted": self._fitted} + ... + ... @override + ... def from_state(self, state): + ... instance = self.__class__(state["config"]) + ... instance._fitted = state["fitted"] + ... return instance + ... + ... @override + ... def fit(self, input_data, data_val): + ... # Train on data for all horizons + ... self._fitted = True + ... + ... @override + ... def predict(self, input_data): + ... # Generate predictions for all horizons + ... from openstef_core.datasets.validated_datasets import ForecastDataset + ... import pandas as pd + ... return ForecastDataset( + ... data=pd.DataFrame(), + ... sample_interval=pd.Timedelta("15min"), + ... forecast_start=pd.Timestamp.now() + ... ) + """ + + @abstractmethod + def __init__(self, config: ForecasterConfig) -> None: + """Initialize the forecaster with the given configuration. + + Args: + config: Configuration object specifying quantiles, horizons, and batching support. + """ + raise NotImplementedError("Subclasses must implement __init__") + + @property + @abstractmethod + def config(self) -> ForecasterConfig: + """Access the model's configuration parameters. + + Returns: + Configuration object containing fundamental model parameters. + """ + raise NotImplementedError("Subclasses must implement config") + + @property + def hyperparams(self) -> HyperParams: + """Access the model's hyperparameters for training and prediction. + + Hyperparameters control model behavior during training and inference. + Default implementation returns empty hyperparameters, which is suitable + for models without configurable parameters. + + Returns: + Hyperparameter configuration object. + """ + return HyperParams() + + +__all__ = [ + "Forecaster", + "ForecasterConfig", +] diff --git a/packages/openstef-meta/src/openstef_meta/integrations/__init__.py b/packages/openstef-meta/src/openstef_meta/integrations/__init__.py new file mode 100644 index 000000000..74b18f446 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/integrations/__init__.py @@ -0,0 +1,4 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Integration modules for external services.""" diff --git a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/__init__.py b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/__init__.py new file mode 100644 index 000000000..19ba099a5 --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/__init__.py @@ -0,0 +1,9 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""MLflow integration for ensemble forecasting models.""" + +from openstef_meta.integrations.mlflow.ensemble_mlflow_storage_callback import EnsembleMLFlowStorageCallback + +__all__ = ["EnsembleMLFlowStorageCallback"] diff --git a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py new file mode 100644 index 000000000..30a78511d --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py @@ -0,0 +1,104 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""MLflow storage callback for ensemble forecasting models. + +Extends the base MLFlowStorageCallback with ensemble-specific behavior: +- Logs hyperparameters for each base forecaster and the combiner +- Stores feature importance plots for each explainable forecaster component +""" + +import logging +from pathlib import Path +from typing import override + +from pydantic import PrivateAttr + +from openstef_core.mixins.predictor import HyperParams +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel +from openstef_models.explainability import ExplainableForecaster +from openstef_models.integrations.mlflow.mlflow_storage_callback import MLFlowStorageCallback +from openstef_models.models.base_forecasting_model import BaseForecastingModel + + +class EnsembleMLFlowStorageCallback(MLFlowStorageCallback): + """MLFlow callback with ensemble-specific logging for multi-model forecasting. + + Extends the base MLFlowStorageCallback to handle EnsembleForecastingModel + instances by: + - Logging combiner hyperparameters as the primary model hyperparams + - Logging per-forecaster hyperparameters with name-prefixed keys + - Storing feature importance plots for each explainable base forecaster + + For non-ensemble models, falls back to the base class behavior. + """ + + _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) + + @override + def _get_hyperparams(self, model: BaseForecastingModel) -> HyperParams | None: + """Extract hyperparameters from the ensemble combiner. + + For ensemble models, the combiner's hyperparams are treated as the + primary hyperparameters. Per-forecaster hyperparams are logged + separately via _log_additional_hyperparams. + + Falls back to base class behavior for non-ensemble models. + + Returns: + The combiner hyperparams for ensemble models, or base class result otherwise. + """ + if isinstance(model, EnsembleForecastingModel): + return model.combiner.config.hyperparams + return super()._get_hyperparams(model) + + @override + def _log_additional_hyperparams(self, model: BaseForecastingModel, run_id: str) -> None: + """Log per-forecaster hyperparameters to the MLflow run. + + Each base forecaster's hyperparameters are logged with a prefix + of its name (e.g., 'lgbm.n_estimators', 'xgboost.max_depth'). + + Args: + model: The ensemble forecasting model. + run_id: MLflow run ID to log parameters to. + """ + if not isinstance(model, EnsembleForecastingModel): + return + + for name, forecaster in model.forecasters.items(): + hyperparams = forecaster.hyperparams + prefixed_params = {f"{name}.{k}": str(v) for k, v in hyperparams.model_dump().items()} + self.storage.log_hyperparams(run_id=run_id, params=prefixed_params) + self._logger.debug("Logged hyperparams for forecaster '%s' in run %s", name, run_id) + + @staticmethod + @override + def _store_feature_importance( + model: BaseForecastingModel, + data_path: Path, + ) -> None: + """Store feature importance plots for each explainable forecaster in the ensemble. + + For ensemble models, generates separate feature importance HTML plots for + each base forecaster that implements ExplainableForecaster. Files are named + 'feature_importances_{forecaster_name}.html'. + + For non-ensemble models, falls back to the base class behavior. + + Args: + model: The forecasting model (ensemble or single). + data_path: Directory path where HTML plots will be saved. + """ + if not isinstance(model, EnsembleForecastingModel): + MLFlowStorageCallback._store_feature_importance(model=model, data_path=data_path) # noqa: SLF001 + return + + for name, forecaster in model.forecasters.items(): + if isinstance(forecaster, ExplainableForecaster): + fig = forecaster.plot_feature_importances() + fig.write_html(data_path / f"feature_importances_{name}.html") # pyright: ignore[reportUnknownMemberType] + + +__all__ = ["EnsembleMLFlowStorageCallback"] diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 4f5ac1966..c1377029e 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -10,15 +10,14 @@ """ import logging -from datetime import datetime, timedelta +from datetime import datetime from functools import partial from typing import cast, override import pandas as pd from pydantic import Field, PrivateAttr -from openstef_beam.evaluation import EvaluationConfig, EvaluationPipeline, SubsetMetric -from openstef_beam.evaluation.metric_providers import MetricProvider, ObservedProbabilityProvider, R2Provider +from openstef_beam.evaluation import SubsetMetric from openstef_core.base_model import BaseModel from openstef_core.datasets import ( ForecastDataset, @@ -26,19 +25,19 @@ TimeSeriesDataset, ) from openstef_core.datasets.timeseries_dataset import validate_horizons_present +from openstef_core.datasets.validated_datasets import EnsembleForecastDataset from openstef_core.exceptions import NotFittedError -from openstef_core.mixins import Predictor, TransformPipeline +from openstef_core.mixins import TransformPipeline +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner -from openstef_meta.utils.datasets import EnsembleForecastDataset -from openstef_models.models.forecasting import Forecaster -from openstef_models.models.forecasting.forecaster import ForecasterConfig +from openstef_models.models.base_forecasting_model import BaseForecastingModel from openstef_models.models.forecasting_model import ModelFitResult -from openstef_models.utils.data_split import DataSplitter logger = logging.getLogger(__name__) class EnsembleModelFitResult(BaseModel): + """Fit result for EnsembleForecastingModel containing details for both forecasters and combiner.""" forecaster_fit_results: dict[str, ModelFitResult] = Field(description="ModelFitResult for each base Forecaster") combiner_fit_result: ModelFitResult = Field(description="ModelFitResult for the ForecastCombiner") @@ -88,7 +87,7 @@ def metrics_full(self) -> SubsetMetric: return self.combiner_fit_result.metrics_full -class EnsembleForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]): +class EnsembleForecastingModel(BaseForecastingModel): """Complete forecasting pipeline combining preprocessing, prediction, and postprocessing. Orchestrates the full forecasting workflow by managing feature engineering, @@ -115,6 +114,7 @@ class EnsembleForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastD ... ) >>> from openstef_meta.models.forecast_combiners.learned_weights_combiner import WeightsCombiner >>> from openstef_core.types import LeadTime + >>> from datetime import timedelta >>> >>> # Note: This is a conceptual example showing the API structure >>> # Real usage requires implemented forecaster classes @@ -170,37 +170,6 @@ class EnsembleForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastD exclude=True, ) - postprocessing: TransformPipeline[ForecastDataset] = Field( - default_factory=TransformPipeline[ForecastDataset], - description="Postprocessing pipeline for transforming model outputs into final forecasts.", - exclude=True, - ) - target_column: str = Field( - default="load", - description="Name of the target variable column in datasets.", - ) - data_splitter: DataSplitter = Field( - default_factory=DataSplitter, - description="Data splitting strategy for train/validation/test sets.", - ) - cutoff_history: timedelta = Field( - default=timedelta(days=0), - description="Amount of historical data to exclude from training and prediction due to incomplete features " - "from lag-based preprocessing. When using lag transforms (e.g., lag-14), the first N days contain NaN values. " - "Set this to match your maximum lag duration (e.g., timedelta(days=14)). " - "Default of 0 assumes no invalid rows are created by preprocessing.", - ) - # Evaluation - evaluation_metrics: list[MetricProvider] = Field( - default_factory=lambda: [R2Provider(), ObservedProbabilityProvider()], - description="List of metric providers for evaluating model score.", - ) - # Metadata - tags: dict[str, str] = Field( - default_factory=dict, - description="Optional metadata tags for the model.", - ) - _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) @property @@ -208,6 +177,11 @@ def config(self) -> list[ForecasterConfig]: """Returns the configuration of the underlying forecaster.""" return [x.config for x in self.forecasters.values()] + @property + @override + def scoring_config(self) -> ForecasterConfig: + return self.config[0] + @property @override def is_fitted(self) -> bool: @@ -661,59 +635,6 @@ def predict_contributions(self, data: TimeSeriesDataset, forecast_start: datetim original_data=data, ) - def score( - self, - data: TimeSeriesDataset, - ) -> SubsetMetric: - """Evaluate model performance on the provided dataset. - - Generates predictions for the dataset and calculates evaluation metrics - by comparing against ground truth values. Uses the configured evaluation - metrics to assess forecast quality at the maximum forecast horizon. - - Args: - data: Time series dataset containing both features and target values - for evaluation. - - Returns: - Evaluation metrics including configured providers (e.g., R2, observed - probability) computed at the maximum forecast horizon. - """ - prediction = self.predict(data=data) - - return self._calculate_score(prediction=prediction) - - def _calculate_score(self, prediction: ForecastDataset) -> SubsetMetric: - if prediction.target_series is None: - raise ValueError("Prediction dataset must contain target series for scoring.") - - # We need to make sure there are no NaNs in the target label for metric calculation - prediction = prediction.pipe_pandas(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownArgumentType, reportUnknownMemberType] - - pipeline = EvaluationPipeline( - # Needs only one horizon since we are using only a single prediction step - # If a more comprehensive test is needed, a backtest should be run. - config=EvaluationConfig(available_ats=[], lead_times=[self.config[0].max_horizon]), - quantiles=self.config[0].quantiles, - # Similarly windowed metrics are not relevant for single predictions. - window_metric_providers=[], - global_metric_providers=self.evaluation_metrics, - ) - - evaluation_result = pipeline.run_for_subset( - filtering=self.config[0].max_horizon, - predictions=prediction, - ) - global_metric = evaluation_result.get_global_metric() - if not global_metric: - return SubsetMetric( - window="global", - timestamp=prediction.forecast_start, - metrics={}, - ) - - return global_metric - def restore_target[T: TimeSeriesDataset]( dataset: T, @@ -741,4 +662,4 @@ def _transform_restore_target(df: pd.DataFrame) -> pd.DataFrame: return dataset.pipe_pandas(_transform_restore_target) -__all__ = ["EnsembleForecastingModel", "ModelFitResult", "restore_target"] +__all__ = ["EnsembleForecastingModel", "EnsembleModelFitResult", "ModelFitResult", "restore_target"] diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index 0d45d149e..beee00679 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -15,9 +15,9 @@ from openstef_core.base_model import BaseConfig from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.datasets.validated_datasets import EnsembleForecastDataset from openstef_core.mixins import HyperParams, Predictor from openstef_core.types import LeadTime, Quantile -from openstef_meta.utils.datasets import EnsembleForecastDataset class ForecastCombinerConfig(BaseConfig): diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index 8f8361bc1..e83344add 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -24,6 +24,7 @@ from xgboost import XGBClassifier from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.datasets.validated_datasets import EnsembleForecastDataset from openstef_core.exceptions import ( NotFittedError, ) @@ -33,7 +34,7 @@ ForecastCombiner, ForecastCombinerConfig, ) -from openstef_meta.utils.datasets import EnsembleForecastDataset, combine_forecast_input_datasets +from openstef_meta.utils.datasets import combine_forecast_input_datasets logger = logging.getLogger(__name__) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index 5a2ad7fd6..27c15c8ee 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -15,13 +15,13 @@ from pydantic import Field, field_validator from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.datasets.validated_datasets import EnsembleForecastDataset from openstef_core.exceptions import ( NotFittedError, ) from openstef_core.mixins import HyperParams from openstef_core.types import LeadTime, Quantile from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig -from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_models.explainability.mixins import ExplainableForecaster from openstef_models.models.forecasting.gblinear_forecaster import ( GBLinearForecaster, @@ -30,7 +30,7 @@ from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams if TYPE_CHECKING: - from openstef_models.models.forecasting.forecaster import Forecaster + from openstef_core.mixins.forecaster import Forecaster logger = logging.getLogger(__name__) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py index cebdf5069..f0083b331 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py @@ -18,11 +18,11 @@ NotFittedError, ) from openstef_core.mixins import HyperParams -from openstef_core.types import Quantile -from openstef_models.models.forecasting.forecaster import ( +from openstef_core.mixins.forecaster import ( Forecaster, ForecasterConfig, ) +from openstef_core.types import Quantile from openstef_models.models.forecasting.gblinear_forecaster import ( GBLinearForecaster, GBLinearHyperParams, @@ -86,7 +86,7 @@ class ResidualForecasterConfig(ForecasterConfig): ) -class ResidualForecaster(Forecaster): +class ResidualForecaster(Forecaster): # TODO: Move to a separate PR for now... """MetaForecaster that implements residual modeling. It takes in a primary forecaster and a residual forecaster. The primary forecaster makes initial predictions, diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 29874cb2c..98730d376 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -23,6 +23,7 @@ from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset from openstef_core.mixins.transform import Transform, TransformPipeline from openstef_core.types import LeadTime, Q, Quantile, QuantileOrGlobal +from openstef_meta.integrations.mlflow import EnsembleMLFlowStorageCallback from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_meta.models.forecast_combiners.learned_weights_combiner import ( LGBMCombinerHyperParams, @@ -43,7 +44,7 @@ from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster from openstef_models.presets.forecasting_workflow import LocationConfig from openstef_models.transforms.energy_domain import WindPowerFeatureAdder -from openstef_models.transforms.general import Clipper, EmptyFeatureRemover, SampleWeighter, Scaler +from openstef_models.transforms.general import Clipper, EmptyFeatureRemover, SampleWeightConfig, SampleWeighter, Scaler from openstef_models.transforms.general.imputer import Imputer from openstef_models.transforms.general.nan_dropper import NaNDropper from openstef_models.transforms.general.selector import Selector @@ -67,7 +68,7 @@ from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow, ForecastingCallback if TYPE_CHECKING: - from openstef_models.models.forecasting.forecaster import Forecaster + from openstef_core.mixins.forecaster import Forecaster class EnsembleWorkflowConfig(BaseConfig): @@ -202,27 +203,19 @@ class EnsembleWorkflowConfig(BaseConfig): default=FeatureSelection(include=None, exclude=None), description="Feature selection for which features to clip.", ) - # TODO: Add sample weight method parameters - sample_weight_scale_percentile: int = Field( - default=95, - description="Percentile of target values used as scaling reference. " - "Values are normalized relative to this percentile before weighting.", + forecaster_sample_weights: dict[str, SampleWeightConfig] = Field( + default={ + "gblinear": SampleWeightConfig(method="exponential", weight_exponent=1.0), + "lgbm": SampleWeightConfig(weight_exponent=0.0), + "xgboost": SampleWeightConfig(weight_exponent=0.0), + "lgbm_linear": SampleWeightConfig(weight_exponent=0.0), + }, + description="Per-forecaster sample weighting configuration. Use weight_exponent=0 to produce uniform weights.", ) - forecaster_sample_weight_exponent: dict[str, float] = Field( - default={"gblinear": 1.0, "lgbm": 0, "xgboost": 0, "lgbm_linear": 0}, - description="Exponent applied to scale the sample weights. " - "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " - "Note: Defaults to 1.0 for gblinear congestion models.", - ) - sample_weight_floor: float = Field( - default=0.1, - description="Minimum weight value to ensure all samples contribute to training.", - ) - - forecast_combiner_sample_weight_exponent: float = Field( - default=0, - description="Exponent applied to scale the sample weights for the forecast combiner model. " - "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values.", + combiner_sample_weight: SampleWeightConfig = Field( + default_factory=lambda: SampleWeightConfig(weight_exponent=0.0), + description="Sample weighting configuration for the forecast combiner. " + "Defaults to weight_exponent=0 (uniform weights).", ) # Data splitting strategy @@ -366,20 +359,16 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin forecaster_preprocessing: dict[str, list[Transform[TimeSeriesDataset, TimeSeriesDataset]]] = {} forecasters: dict[str, Forecaster] = {} for model_type in config.base_models: + sample_weight_config = config.forecaster_sample_weights.get(model_type, SampleWeightConfig()) + sample_weighter = SampleWeighter(config=sample_weight_config, target_column=config.target_column) + if model_type == "lgbm": forecasters[model_type] = LGBMForecaster( config=LGBMForecaster.Config( hyperparams=config.lgbm_hyperparams, quantiles=config.quantiles, horizons=config.horizons ) ) - forecaster_preprocessing[model_type] = [ - SampleWeighter( - target_column=config.target_column, - weight_exponent=config.forecaster_sample_weight_exponent[model_type], - weight_floor=config.sample_weight_floor, - weight_scale_percentile=config.sample_weight_scale_percentile, - ), - ] + forecaster_preprocessing[model_type] = [sample_weighter] elif model_type == "gblinear": forecasters[model_type] = GBLinearForecaster( @@ -388,12 +377,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin ) ) forecaster_preprocessing[model_type] = [ - SampleWeighter( - target_column=config.target_column, - weight_exponent=config.forecaster_sample_weight_exponent[model_type], - weight_floor=config.sample_weight_floor, - weight_scale_percentile=config.sample_weight_scale_percentile, - ), + sample_weighter, # Remove lags Selector( selection=FeatureSelection( @@ -431,28 +415,14 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin hyperparams=config.xgboost_hyperparams, quantiles=config.quantiles, horizons=config.horizons ) ) - forecaster_preprocessing[model_type] = [ - SampleWeighter( - target_column=config.target_column, - weight_exponent=config.forecaster_sample_weight_exponent[model_type], - weight_floor=config.sample_weight_floor, - weight_scale_percentile=config.sample_weight_scale_percentile, - ), - ] + forecaster_preprocessing[model_type] = [sample_weighter] elif model_type == "lgbm_linear": forecasters[model_type] = LGBMLinearForecaster( config=LGBMLinearForecaster.Config( hyperparams=config.lgbmlinear_hyperparams, quantiles=config.quantiles, horizons=config.horizons ) ) - forecaster_preprocessing[model_type] = [ - SampleWeighter( - target_column=config.target_column, - weight_exponent=config.forecaster_sample_weight_exponent[model_type], - weight_floor=config.sample_weight_floor, - weight_scale_percentile=config.sample_weight_scale_percentile, - ), - ] + forecaster_preprocessing[model_type] = [sample_weighter] else: msg = f"Unsupported base model type: {model_type}" raise ValueError(msg) @@ -513,36 +483,52 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin name: TransformPipeline(transforms=transforms) for name, transforms in forecaster_preprocessing.items() } - if config.forecast_combiner_sample_weight_exponent != 0: - combiner_transforms = [ - SampleWeighter( - target_column=config.target_column, - weight_exponent=config.forecast_combiner_sample_weight_exponent, - weight_floor=config.sample_weight_floor, - weight_scale_percentile=config.sample_weight_scale_percentile, - ), - Selector(selection=Include("sample_weight", config.target_column)), - ] - else: - combiner_transforms = [] + combiner_transforms = [ + SampleWeighter(config=config.combiner_sample_weight, target_column=config.target_column), + Selector(selection=Include("sample_weight", config.target_column)), + ] combiner_preprocessing: TransformPipeline[TimeSeriesDataset] = TransformPipeline(transforms=combiner_transforms) - ensemble_model = EnsembleForecastingModel( - common_preprocessing=common_preprocessing, - model_specific_preprocessing=model_specific_preprocessing, - combiner_preprocessing=combiner_preprocessing, - postprocessing=TransformPipeline(transforms=postprocessing), - forecasters=forecasters, - combiner=combiner, - target_column=config.target_column, - data_splitter=config.data_splitter, - ) + tags = { + **config.location.tags, + "ensemble_type": config.ensemble_type, + "combiner_model": config.combiner_model, + **config.tags, + } callbacks: list[ForecastingCallback] = [] - # TODO(Egor): Implement MLFlow for OpenSTEF-meta # noqa: TD003 - - return CustomForecastingWorkflow(model=ensemble_model, model_id=config.model_id, callbacks=callbacks) + if config.mlflow_storage is not None: + callbacks.append( + EnsembleMLFlowStorageCallback( + storage=config.mlflow_storage, + model_reuse_enable=config.model_reuse_enable, + model_reuse_max_age=config.model_reuse_max_age, + model_selection_enable=config.model_selection_enable, + model_selection_metric=config.model_selection_metric, + model_selection_old_model_penalty=config.model_selection_old_model_penalty, + ) + ) + + return CustomForecastingWorkflow( + model=EnsembleForecastingModel( + common_preprocessing=common_preprocessing, + model_specific_preprocessing=model_specific_preprocessing, + combiner_preprocessing=combiner_preprocessing, + postprocessing=TransformPipeline(transforms=postprocessing), + forecasters=forecasters, + combiner=combiner, + target_column=config.target_column, + data_splitter=config.data_splitter, + cutoff_history=config.cutoff_history, + # Evaluation + evaluation_metrics=config.evaluation_metrics, + # Other + tags=tags, + ), + model_id=config.model_id, + callbacks=callbacks, + ) __all__ = ["EnsembleWorkflowConfig", "create_ensemble_workflow"] diff --git a/packages/openstef-meta/src/openstef_meta/utils/__init__.py b/packages/openstef-meta/src/openstef_meta/utils/__init__.py index 6aa87b01f..1fdd3fdd4 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/utils/__init__.py @@ -4,4 +4,6 @@ """Utility functions and classes for OpenSTEF Meta.""" -__all__: list[str] = [] +from openstef_meta.utils.datasets import combine_forecast_input_datasets + +__all__: list[str] = ["combine_forecast_input_datasets"] diff --git a/packages/openstef-meta/src/openstef_meta/utils/datasets.py b/packages/openstef-meta/src/openstef_meta/utils/datasets.py index 8f6ed8078..eb6769080 100644 --- a/packages/openstef-meta/src/openstef_meta/utils/datasets.py +++ b/packages/openstef-meta/src/openstef_meta/utils/datasets.py @@ -1,25 +1,11 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project # # SPDX-License-Identifier: MPL-2.0 -"""Ensemble Forecast Dataset. +"""Dataset utility functions for ensemble forecasting.""" -Validated dataset for ensemble forecasters first stage output. -Implements methods to select quantile-specific ForecastInputDatasets for final learners. -Also supports constructing classification targets based on pinball loss. -""" - -from datetime import datetime, timedelta -from typing import Self, override - -import numpy as np -import numpy.typing as npt import pandas as pd -from openstef_beam.metrics import pinball_losses -from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset, TimeSeriesDataset -from openstef_core.types import Quantile - -DEFAULT_TARGET_COLUMN = {Quantile(0.5): "load"} +from openstef_core.datasets import ForecastInputDataset def combine_forecast_input_datasets( @@ -59,206 +45,4 @@ def combine_forecast_input_datasets( ) -class EnsembleForecastDataset(TimeSeriesDataset): - """First stage output format for ensemble forecasters.""" - - forecast_start: datetime - quantiles: list[Quantile] - forecaster_names: list[str] - target_column: str - - @override - def __init__( - self, - data: pd.DataFrame, - sample_interval: timedelta = timedelta(minutes=15), - forecast_start: datetime | None = None, - target_column: str = "load", - *, - horizon_column: str = "horizon", - available_at_column: str = "available_at", - ) -> None: - if "forecast_start" in data.attrs: - self.forecast_start = datetime.fromisoformat(data.attrs["forecast_start"]) - else: - self.forecast_start = forecast_start if forecast_start is not None else data.index.min().to_pydatetime() - self.target_column = data.attrs.get("target_column", target_column) - - super().__init__( - data=data, - sample_interval=sample_interval, - horizon_column=horizon_column, - available_at_column=available_at_column, - ) - quantile_feature_names = [col for col in self.feature_names if col != target_column] - - self.forecaster_names, self.quantiles = self.get_learner_and_quantile(pd.Index(quantile_feature_names)) - n_cols = len(self.forecaster_names) * len(self.quantiles) - if len(data.columns) not in {n_cols + 1, n_cols}: - raise ValueError("Data columns do not match the expected number based on base forecasters and quantiles.") - - @property - def target_series(self) -> pd.Series | None: - """Return the target series if available.""" - if self.target_column in self.data.columns: - return self.data[self.target_column] - return None - - @staticmethod - def get_learner_and_quantile(feature_names: pd.Index) -> tuple[list[str], list[Quantile]]: - """Extract base forecaster names and quantiles from feature names. - - Args: - feature_names: Index of feature names in the dataset. - - Returns: - Tuple containing a list of base forecaster names and a list of quantiles. - - Raises: - ValueError: If an invalid base forecaster name is found in a feature name. - """ - forecasters: set[str] = set() - quantiles: set[Quantile] = set() - - for feature_name in feature_names: - quantile_part = "_".join(feature_name.split("_")[-2:]) - learner_part = feature_name[: -(len(quantile_part) + 1)] - if not Quantile.is_valid_quantile_string(quantile_part): - msg = f"Column has no valid quantile string: {feature_name}" - raise ValueError(msg) - - forecasters.add(learner_part) - quantiles.add(Quantile.parse(quantile_part)) - - return list(forecasters), list(quantiles) - - @staticmethod - def get_quantile_feature_name(feature_name: str) -> tuple[str, Quantile]: - """Generate the feature name for a given base forecaster and quantile. - - Args: - feature_name: Feature name string in the format "model_Quantile". - - Returns: - Tuple containing the base forecaster name and Quantile object. - """ - learner_part, quantile_part = feature_name.split("_", maxsplit=1) - return learner_part, Quantile.parse(quantile_part) - - @classmethod - def from_forecast_datasets( - cls, - datasets: dict[str, ForecastDataset], - target_series: pd.Series | None = None, - sample_weights: pd.Series | None = None, - ) -> Self: - """Create an EnsembleForecastDataset from multiple ForecastDatasets. - - Args: - datasets: Dict of ForecastDatasets to combine. - target_series: Optional target series to include in the dataset. - sample_weights: Optional sample weights series to include in the dataset. - - Returns: - EnsembleForecastDataset combining all input datasets. - """ - ds1 = next(iter(datasets.values())) - additional_columns: dict[str, pd.Series] = {} - if isinstance(ds1.target_series, pd.Series): - additional_columns[ds1.target_column] = ds1.target_series - elif target_series is not None: - additional_columns[ds1.target_column] = target_series - - sample_weight_column = "sample_weight" - if sample_weights is not None: - additional_columns[sample_weight_column] = sample_weights - - combined_data = pd.DataFrame({ - f"{learner}_{q.format()}": ds.data[q.format()] for learner, ds in datasets.items() for q in ds.quantiles - }).assign(**additional_columns) - - return cls( - data=combined_data, - sample_interval=ds1.sample_interval, - forecast_start=ds1.forecast_start, - target_column=ds1.target_column, - ) - - @staticmethod - def _prepare_classification(data: pd.DataFrame, target: pd.Series, quantile: Quantile) -> pd.Series: - """Prepare data for classification tasks by converting quantile columns to binary indicators. - - Args: - data: DataFrame containing quantile predictions. - target: Series containing true target values. - quantile: Quantile for which to prepare classification data. - - Returns: - Series with categorical indicators of best-performing base forecasters. - """ - # Calculate pinball loss for each base forecaster - def _column_losses(preds: pd.Series) -> npt.NDArray[np.floating]: - return pinball_losses(y_true=np.asarray(target), y_pred=np.asarray(preds), quantile=quantile) - - losses_per_forecaster = data.apply(_column_losses) - - return losses_per_forecaster.idxmin(axis=1) - - def get_best_forecaster_labels(self, quantile: Quantile) -> ForecastInputDataset: - """Get labels indicating the best-performing base forecaster for each sample at a specific quantile. - - Creates a dataset where each sample's target is labeled with the name of the base forecaster - that performed best, determined by pinball loss. Used as classification target for training - the final learner. - - Args: - quantile: Quantile to select. - - Returns: - ForecastInputDataset where the target column contains labels of the best-performing - base forecaster for each sample. - - Raises: - ValueError: If the target column is not found in the dataset. - """ - if self.target_column not in self.data.columns: - msg = f"Target column '{self.target_column}' not found in dataset." - raise ValueError(msg) - - selected_columns = [f"{learner}_{quantile.format()}" for learner in self.forecaster_names] - prediction_data = self.data[selected_columns].copy() - prediction_data.columns = self.forecaster_names - - target = self._prepare_classification( - data=prediction_data, - target=self.data[self.target_column], - quantile=quantile, - ) - prediction_data[self.target_column] = target - return ForecastInputDataset( - data=prediction_data, - sample_interval=self.sample_interval, - target_column=self.target_column, - forecast_start=self.forecast_start, - ) - - def get_base_predictions_for_quantile(self, quantile: Quantile) -> ForecastInputDataset: - """Get base forecaster predictions for a specific quantile. - - Args: - quantile: Quantile to select. - - Returns: - ForecastInputDataset containing predictions from all base forecasters at the specified quantile. - """ - selected_columns = [f"{learner}_{quantile.format()}" for learner in self.forecaster_names] - selected_columns.append(self.target_column) - prediction_data = self.data[selected_columns].copy() - prediction_data.columns = [*self.forecaster_names, self.target_column] - - return ForecastInputDataset( - data=prediction_data, - sample_interval=self.sample_interval, - target_column=self.target_column, - forecast_start=self.forecast_start, - ) +__all__ = ["combine_forecast_input_datasets"] diff --git a/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py index 2ddb8d542..3dda72012 100644 --- a/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py @@ -15,6 +15,7 @@ from openstef_meta.presets import EnsembleWorkflowConfig, create_ensemble_workflow from openstef_models.models.forecasting_model import ForecastingModel from openstef_models.presets import ForecastingWorkflowConfig, create_forecasting_workflow +from openstef_models.transforms.general import SampleWeightConfig @pytest.fixture @@ -44,7 +45,10 @@ def config() -> EnsembleWorkflowConfig: combiner_model="lgbm", quantiles=[Q(0.1), Q(0.5), Q(0.9)], horizons=[LeadTime.from_string("PT36H")], - forecaster_sample_weight_exponent={"gblinear": 1, "lgbm": 0}, + forecaster_sample_weights={ + "gblinear": SampleWeightConfig(method="exponential", weight_exponent=1.0), + "lgbm": SampleWeightConfig(method="exponential", weight_exponent=0.0), + }, ) @@ -57,12 +61,13 @@ def create_models( base_models: dict[str, ForecastingModel] = {} for forecaster_name in config.base_models: + sample_weight_config = config.forecaster_sample_weights.get(forecaster_name, SampleWeightConfig()) model_config = ForecastingWorkflowConfig( model_id=f"{forecaster_name}_model_", model=forecaster_name, # type: ignore quantiles=config.quantiles, horizons=config.horizons, - sample_weight_exponent=config.forecaster_sample_weight_exponent[forecaster_name], + sample_weight_exponent=sample_weight_config.weight_exponent, ) base_model = create_forecasting_workflow(config=model_config).model base_models[forecaster_name] = cast(ForecastingModel, base_model) @@ -70,7 +75,7 @@ def create_models( return ensemble_model, base_models -def test_preprocessing( # TODO: Move this to unit/models/test_ensemble_forecasting_model.py? +def test_preprocessing( # TODO: Move this to unit/models/test_ensemble_forecasting_model.py? sample_timeseries_dataset: TimeSeriesDataset, create_models: tuple[EnsembleForecastingModel, dict[str, ForecastingModel]], ) -> None: diff --git a/packages/openstef-meta/tests/unit/integrations/__init__.py b/packages/openstef-meta/tests/unit/integrations/__init__.py new file mode 100644 index 000000000..7b9e0469f --- /dev/null +++ b/packages/openstef-meta/tests/unit/integrations/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 diff --git a/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py b/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py new file mode 100644 index 000000000..ce8273876 --- /dev/null +++ b/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py @@ -0,0 +1,391 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Tests for EnsembleMLFlowStorageCallback.""" + +from __future__ import annotations + +from datetime import timedelta +from typing import TYPE_CHECKING, cast, override + +import pandas as pd +import pytest + +from openstef_core.datasets import TimeSeriesDataset +from openstef_core.datasets.validated_datasets import EnsembleForecastDataset, ForecastDataset, ForecastInputDataset +from openstef_core.exceptions import SkipFitting +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig +from openstef_core.mixins.predictor import HyperParams +from openstef_core.types import LeadTime, Q +from openstef_meta.integrations.mlflow import EnsembleMLFlowStorageCallback +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel +from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig +from openstef_models.integrations.mlflow import MLFlowStorage +from openstef_models.mixins.callbacks import WorkflowContext +from openstef_models.models.forecasting_model import ForecastingModel, ModelFitResult +from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow + +if TYPE_CHECKING: + from pathlib import Path + + +class SimpleForecasterHyperParams(HyperParams): + """Test hyperparameters for the simple forecaster.""" + + alpha: float = 0.5 + n_rounds: int = 100 + + +class SimpleTestForecaster(Forecaster): + """Simple forecaster for testing that stores and restores median value.""" + + def __init__(self, config: ForecasterConfig): + self._config = config + self._median_value: float = 0.0 + self._is_fitted = False + + @property + def config(self) -> ForecasterConfig: + return self._config + + @property + @override + def is_fitted(self) -> bool: + return self._is_fitted + + @property + @override + def hyperparams(self) -> SimpleForecasterHyperParams: + return SimpleForecasterHyperParams() + + @override + def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: + if self._is_fitted: + return + self._median_value = float(data.target_series.median()) + self._is_fitted = True + + @override + def predict(self, data: ForecastInputDataset) -> ForecastDataset: + if not self._is_fitted: + raise RuntimeError("Model not fitted") + forecast_data = pd.DataFrame( + {quantile.format(): [self._median_value] * len(data.index) for quantile in self.config.quantiles}, + index=data.index, + ) + return ForecastDataset(forecast_data, data.sample_interval, data.forecast_start) + + +class SimpleCombinerHyperParams(HyperParams): + """Test hyperparameters for the simple combiner.""" + + learning_rate: float = 0.01 + + +class SimpleTestCombiner(ForecastCombiner): + """Simple combiner for testing that averages base forecaster predictions.""" + + def __init__(self, config: ForecastCombinerConfig): + self.config = config + self._is_fitted = False + self.quantiles = config.quantiles + + @override + def fit( + self, + data: EnsembleForecastDataset, + data_val: EnsembleForecastDataset | None = None, + additional_features: ForecastInputDataset | None = None, + ) -> None: + self._is_fitted = True + + @override + def predict( + self, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, + ) -> ForecastDataset: + combined_data = pd.DataFrame(index=data.data.index) + for quantile in self.quantiles: + quantile_cols = [col for col in data.data.columns if col.endswith(quantile.format())] + combined_data[quantile.format()] = data.data[quantile_cols].mean(axis=1) + return ForecastDataset( + data=combined_data, sample_interval=data.sample_interval, forecast_start=data.forecast_start + ) + + @property + @override + def is_fitted(self) -> bool: + return self._is_fitted + + @override + def predict_contributions( + self, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, + ) -> pd.DataFrame: + return pd.DataFrame() + + +# --- Fixtures --- + + +@pytest.fixture +def storage(tmp_path: Path) -> MLFlowStorage: + """Create MLflow storage with temporary paths.""" + return MLFlowStorage( + tracking_uri=str(tmp_path / "mlflow"), + local_artifacts_path=tmp_path / "artifacts", + ) + + +@pytest.fixture +def callback(storage: MLFlowStorage) -> EnsembleMLFlowStorageCallback: + """Create ensemble callback with test storage.""" + return EnsembleMLFlowStorageCallback(storage=storage) + + +@pytest.fixture +def sample_dataset() -> TimeSeriesDataset: + return TimeSeriesDataset( + data=pd.DataFrame( + {"load": [100.0, 110.0, 120.0, 105.0, 95.0, 115.0, 125.0, 130.0], "value": 100.0}, + index=pd.date_range("2025-01-01", periods=8, freq="h"), + ), + sample_interval=timedelta(hours=1), + ) + + +def _create_ensemble_workflow() -> CustomForecastingWorkflow: + """Create an ensemble forecasting workflow for testing.""" + horizons = [LeadTime(timedelta(hours=1))] + quantiles = [Q(0.5)] + config = ForecasterConfig(horizons=horizons, quantiles=quantiles) + + forecasters: dict[str, Forecaster] = { + "model_a": SimpleTestForecaster(config=config), + "model_b": SimpleTestForecaster(config=config), + } + combiner_config = ForecastCombinerConfig( + quantiles=quantiles, + horizons=horizons, + hyperparams=SimpleCombinerHyperParams(), + ) + combiner = SimpleTestCombiner(config=combiner_config) + + ensemble_model = EnsembleForecastingModel( + forecasters=forecasters, + combiner=combiner, + ) + + return CustomForecastingWorkflow(model_id="test_ensemble", model=ensemble_model) + + +@pytest.fixture +def ensemble_workflow() -> CustomForecastingWorkflow: + return _create_ensemble_workflow() + + +@pytest.fixture +def ensemble_fit_result( + sample_dataset: TimeSeriesDataset, ensemble_workflow: CustomForecastingWorkflow +) -> ModelFitResult: + """Create a fit result from the ensemble model, downcast to combiner's ModelFitResult.""" + ensemble_result = ensemble_workflow.model.fit(sample_dataset) + return ensemble_result.combiner_fit_result + + +@pytest.fixture +def single_workflow() -> CustomForecastingWorkflow: + """Create a single-model forecasting workflow for testing fallback behavior.""" + horizons = [LeadTime(timedelta(hours=1))] + quantiles = [Q(0.5)] + model = ForecastingModel( + forecaster=SimpleTestForecaster(config=ForecasterConfig(horizons=horizons, quantiles=quantiles)), + ) + return CustomForecastingWorkflow(model_id="test_single", model=model) + + +@pytest.fixture +def single_fit_result(sample_dataset: TimeSeriesDataset, single_workflow: CustomForecastingWorkflow) -> ModelFitResult: + """Create a fit result from a single model.""" + return single_workflow.model.fit(sample_dataset) + + +# --- Tests --- + + +def test_on_fit_end__stores_ensemble_model( + callback: EnsembleMLFlowStorageCallback, + ensemble_workflow: CustomForecastingWorkflow, + ensemble_fit_result: ModelFitResult, +): + """Test that on_fit_end stores an EnsembleForecastingModel to MLflow.""" + context = WorkflowContext(workflow=ensemble_workflow) + + callback.on_fit_end(context=context, result=ensemble_fit_result) + + runs = callback.storage.search_latest_runs(model_id=ensemble_workflow.model_id, limit=1) + assert len(runs) == 1 + + run_id = cast(str, runs[0].info.run_id) + loaded_model = callback.storage.load_run_model(model_id=ensemble_workflow.model_id, run_id=run_id) + assert isinstance(loaded_model, EnsembleForecastingModel) + assert loaded_model.is_fitted + assert set(loaded_model.forecasters.keys()) == {"model_a", "model_b"} + + +def test_on_fit_end__logs_combiner_hyperparams_as_primary( + callback: EnsembleMLFlowStorageCallback, + ensemble_workflow: CustomForecastingWorkflow, + ensemble_fit_result: ModelFitResult, +): + """Test that combiner hyperparams are logged as the run's primary params.""" + context = WorkflowContext(workflow=ensemble_workflow) + + callback.on_fit_end(context=context, result=ensemble_fit_result) + + runs = callback.storage.search_latest_runs(model_id=ensemble_workflow.model_id, limit=1) + run = runs[0] + params = run.data.params # pyright: ignore[reportUnknownMemberType] + + # Combiner hyperparams should be logged as primary params + assert "learning_rate" in params + assert params["learning_rate"] == "0.01" + + +def test_on_fit_end__logs_per_forecaster_hyperparams( + callback: EnsembleMLFlowStorageCallback, + ensemble_workflow: CustomForecastingWorkflow, + ensemble_fit_result: ModelFitResult, +): + """Test that per-forecaster hyperparams are logged with name prefixes.""" + context = WorkflowContext(workflow=ensemble_workflow) + + callback.on_fit_end(context=context, result=ensemble_fit_result) + + runs = callback.storage.search_latest_runs(model_id=ensemble_workflow.model_id, limit=1) + run = runs[0] + params = run.data.params # pyright: ignore[reportUnknownMemberType] + + # Per-forecaster hyperparams should be prefixed + assert "model_a.alpha" in params + assert params["model_a.alpha"] == "0.5" + assert "model_a.n_rounds" in params + assert params["model_a.n_rounds"] == "100" + assert "model_b.alpha" in params + assert "model_b.n_rounds" in params + + +def test_on_fit_end__single_model_fallback( + callback: EnsembleMLFlowStorageCallback, + single_workflow: CustomForecastingWorkflow, + single_fit_result: ModelFitResult, +): + """Test that non-ensemble models fall back to base class behavior.""" + context = WorkflowContext(workflow=single_workflow) + + callback.on_fit_end(context=context, result=single_fit_result) + + runs = callback.storage.search_latest_runs(model_id=single_workflow.model_id, limit=1) + assert len(runs) == 1 + + run_id = cast(str, runs[0].info.run_id) + loaded_model = callback.storage.load_run_model(model_id=single_workflow.model_id, run_id=run_id) + assert isinstance(loaded_model, ForecastingModel) + assert loaded_model.is_fitted + + +def test_on_predict_start__loads_ensemble_model( + callback: EnsembleMLFlowStorageCallback, + ensemble_workflow: CustomForecastingWorkflow, + ensemble_fit_result: ModelFitResult, + sample_dataset: TimeSeriesDataset, +): + """Test that on_predict_start loads an ensemble model from MLflow.""" + # Store a fitted model first + context = WorkflowContext(workflow=ensemble_workflow) + callback.on_fit_end(context=context, result=ensemble_fit_result) + + # Create a new unfitted ensemble workflow + unfitted_workflow = _create_ensemble_workflow() + unfitted_context = WorkflowContext(workflow=unfitted_workflow) + + callback.on_predict_start(context=unfitted_context, data=sample_dataset) + + assert unfitted_context.workflow.model.is_fitted + + +def test_model_selection__keeps_better_ensemble_model( + storage: MLFlowStorage, + ensemble_workflow: CustomForecastingWorkflow, + ensemble_fit_result: ModelFitResult, + sample_dataset: TimeSeriesDataset, +): + """Test that model selection keeps the better performing ensemble model.""" + callback = EnsembleMLFlowStorageCallback( + storage=storage, + model_selection_metric=(Q(0.5), "R2", "higher_is_better"), + ) + + # Store an initial ensemble model + context = WorkflowContext(workflow=ensemble_workflow) + callback.on_fit_end(context=context, result=ensemble_fit_result) + + # Create a worse ensemble + horizons = [LeadTime(timedelta(hours=1))] + quantiles = [Q(0.5)] + config = ForecasterConfig(horizons=horizons, quantiles=quantiles) + + worse_a = SimpleTestForecaster(config=config) + worse_a._median_value = 50.0 + worse_a._is_fitted = True + worse_b = SimpleTestForecaster(config=config) + worse_b._median_value = 50.0 + worse_b._is_fitted = True + + worse_ensemble = EnsembleForecastingModel( + forecasters={"model_a": worse_a, "model_b": worse_b}, + combiner=SimpleTestCombiner( + config=ForecastCombinerConfig( + quantiles=quantiles, + horizons=horizons, + hyperparams=SimpleCombinerHyperParams(), + ) + ), + ) + worse_result = worse_ensemble.fit(sample_dataset) + worse_workflow = CustomForecastingWorkflow(model_id="test_ensemble", model=worse_ensemble) + worse_context = WorkflowContext(workflow=worse_workflow) + + with pytest.raises(SkipFitting, match="New model did not improve"): + callback.on_fit_end(context=worse_context, result=worse_result.combiner_fit_result) + + +def test_get_hyperparams__returns_combiner_hyperparams_for_ensemble( + callback: EnsembleMLFlowStorageCallback, + ensemble_workflow: CustomForecastingWorkflow, +): + """Test _get_hyperparams returns combiner hyperparams for ensemble models.""" + model = ensemble_workflow.model + assert isinstance(model, EnsembleForecastingModel) + + result = callback._get_hyperparams(model) + + assert isinstance(result, SimpleCombinerHyperParams) + assert result.learning_rate == 0.01 + + +def test_get_hyperparams__falls_back_for_single_model( + callback: EnsembleMLFlowStorageCallback, + single_workflow: CustomForecastingWorkflow, +): + """Test _get_hyperparams falls back to base for non-ensemble models.""" + model = single_workflow.model + assert isinstance(model, ForecastingModel) + + result = callback._get_hyperparams(model) + + # SimpleTestForecaster returns SimpleForecasterHyperParams via .hyperparams + assert isinstance(result, SimpleForecasterHyperParams) diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py index 5d251833b..99b782241 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/conftest.py @@ -9,8 +9,7 @@ import pandas as pd import pytest -from openstef_core.datasets.validated_datasets import ForecastDataset -from openstef_meta.utils.datasets import EnsembleForecastDataset +from openstef_core.datasets.validated_datasets import EnsembleForecastDataset, ForecastDataset @pytest.fixture diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py index 17fef9043..f9b9916fd 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_learned_weights_combiner.py @@ -6,6 +6,7 @@ import pytest +from openstef_core.datasets.validated_datasets import EnsembleForecastDataset from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q from openstef_meta.models.forecast_combiners.learned_weights_combiner import ( @@ -16,7 +17,6 @@ WeightsCombinerConfig, XGBCombinerHyperParams, ) -from openstef_meta.utils.datasets import EnsembleForecastDataset @pytest.fixture(params=["lgbm", "xgboost", "rf", "logistic"]) diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py index 213b0aeb6..d5b986d23 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_stacking_combiner.py @@ -7,13 +7,13 @@ import pandas as pd import pytest +from openstef_core.datasets.validated_datasets import EnsembleForecastDataset from openstef_core.exceptions import NotFittedError from openstef_core.types import LeadTime, Q from openstef_meta.models.forecast_combiners.stacking_combiner import ( StackingCombiner, StackingCombinerConfig, ) -from openstef_meta.utils.datasets import EnsembleForecastDataset from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster diff --git a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py index b44fee5c7..97121fb34 100644 --- a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py @@ -12,16 +12,15 @@ from openstef_core.datasets import ForecastInputDataset from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset -from openstef_core.datasets.validated_datasets import ForecastDataset +from openstef_core.datasets.validated_datasets import EnsembleForecastDataset, ForecastDataset from openstef_core.exceptions import NotFittedError +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.mixins.predictor import HyperParams from openstef_core.mixins.transform import TransformPipeline from openstef_core.testing import assert_timeseries_equal, create_synthetic_forecasting_dataset from openstef_core.types import LeadTime, Q from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig -from openstef_meta.utils.datasets import EnsembleForecastDataset -from openstef_models.models.forecasting import Forecaster, ForecasterConfig from openstef_models.transforms.postprocessing.quantile_sorter import QuantileSorter from openstef_models.transforms.time_domain.lags_adder import LagsAdder diff --git a/packages/openstef-meta/tests/unit/utils/test_datasets.py b/packages/openstef-meta/tests/unit/utils/test_datasets.py index 7e77d39e5..312d9fd80 100644 --- a/packages/openstef-meta/tests/unit/utils/test_datasets.py +++ b/packages/openstef-meta/tests/unit/utils/test_datasets.py @@ -9,9 +9,8 @@ import pandas as pd import pytest -from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset +from openstef_core.datasets.validated_datasets import EnsembleForecastDataset, ForecastDataset, ForecastInputDataset from openstef_core.types import Quantile -from openstef_meta.utils.datasets import EnsembleForecastDataset @pytest.fixture diff --git a/packages/openstef-models/src/openstef_models/explainability/mixins.py b/packages/openstef-models/src/openstef_models/explainability/mixins.py index 2e1fa81ca..4a29633b8 100644 --- a/packages/openstef-models/src/openstef_models/explainability/mixins.py +++ b/packages/openstef-models/src/openstef_models/explainability/mixins.py @@ -18,7 +18,7 @@ from openstef_models.explainability.plotters.feature_importance_plotter import FeatureImportancePlotter -class ExplainableForecaster(ABC): +class ExplainableForecaster(ABC): # TODO: Inherit from Forecaster once it is moved to openstef-core? """Mixin for forecasters that can explain feature importance. Provides a standardized interface for accessing and visualizing feature diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage.py index 3d17004d8..465f7f02b 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage.py @@ -13,6 +13,7 @@ import os from collections.abc import Sequence from datetime import UTC, datetime +from itertools import starmap from pathlib import Path from tempfile import TemporaryDirectory from typing import Any, cast, override @@ -114,6 +115,23 @@ def create_run( return run + def log_hyperparams(self, run_id: str, params: dict[str, str]) -> None: + """Log additional hyperparameters to an existing MLflow run. + + Useful for logging hyperparameters from multiple components (e.g., + ensemble base forecasters and combiner) with prefixed names. + + Args: + run_id: MLflow run ID to log parameters to. + params: Key-value pairs of hyperparameter names and string values. + """ + if not params: + return + self._client.log_batch( + run_id=run_id, + params=list(starmap(Param, params.items())), + ) + def finalize_run( self, model_id: ModelIdentifier, run_id: str, metrics: dict[str, float] | None = None, status: str = "FINISHED" ) -> None: diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index eecd2cf63..74340631a 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -11,6 +11,7 @@ import logging from datetime import UTC, datetime, timedelta +from pathlib import Path from typing import Any, cast, override from pydantic import Field, PrivateAttr @@ -27,12 +28,13 @@ ModelNotFoundError, SkipFitting, ) +from openstef_core.mixins.predictor import HyperParams from openstef_core.types import Q, QuantileOrGlobal -from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_models.explainability import ExplainableForecaster from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage from openstef_models.mixins.callbacks import WorkflowContext from openstef_models.models import ForecastingModel +from openstef_models.models.base_forecasting_model import BaseForecastingModel from openstef_models.models.forecasting_model import ModelFitResult from openstef_models.workflows.custom_forecasting_workflow import ( CustomForecastingWorkflow, @@ -114,22 +116,21 @@ def on_fit_end( if self.model_selection_enable: self._run_model_selection(workflow=context.workflow, result=result) - if isinstance(context.workflow.model, EnsembleForecastingModel): - raise NotImplementedError( - "MLFlowStorageCallback does not yet support EnsembleForecastingWorkflow model storage." - ) # TODO: Implement model selection and storage for EnsembleForecastingWorkflow, including handling of base forecasters and combiner model. - # Create a new run + model = context.workflow.model run = self.storage.create_run( model_id=context.workflow.model_id, - tags=context.workflow.model.tags, - hyperparams=context.workflow.model.forecaster.hyperparams, + tags=model.tags, + hyperparams=self._get_hyperparams(model), run_name=context.workflow.run_name, experiment_tags=context.workflow.experiment_tags, ) run_id: str = run.info.run_id self._logger.info("Created MLflow run %s for model %s", run_id, context.workflow.model_id) + # Hook for subclasses to log additional hyperparameters (e.g., per-component for ensembles) + self._log_additional_hyperparams(model=model, run_id=run_id) + # Store the model input run_path = self.storage.get_artifacts_path(model_id=context.workflow.model_id, run_id=run_id) data_path = run_path / self.storage.data_path @@ -137,10 +138,9 @@ def on_fit_end( result.input_dataset.to_parquet(path=data_path / "data.parquet") self._logger.info("Stored training data at %s for run %s", data_path, run_id) - # Store feature importance plot if enabled - if self.store_feature_importance_plot and isinstance(context.workflow.model.forecaster, ExplainableForecaster): - fig = context.workflow.model.forecaster.plot_feature_importances() - fig.write_html(data_path / "feature_importances.html") # pyright: ignore[reportUnknownMemberType] + # Store feature importance plots if enabled + if self.store_feature_importance_plot: + self._store_feature_importance(model=model, data_path=data_path) # Store the trained model self.storage.save_run_model( @@ -162,6 +162,36 @@ def on_fit_end( self.storage.finalize_run(model_id=context.workflow.model_id, run_id=run_id, metrics=metrics) self._logger.info("Stored MLflow run %s for model %s", run_id, context.workflow.model_id) + def _get_hyperparams(self, model: BaseForecastingModel) -> HyperParams | None: + """Extract hyperparameters from the model for MLflow logging. + + Override in subclasses for models with different hyperparameter structures. + """ + if isinstance(model, ForecastingModel): + return model.forecaster.hyperparams + return None + + def _log_additional_hyperparams(self, model: BaseForecastingModel, run_id: str) -> None: + """Hook for logging additional hyperparameters. Override in subclasses.""" + + @staticmethod + def _store_feature_importance( + model: BaseForecastingModel, + data_path: Path, + ) -> None: + """Store feature importance plots for the model. + + For a ForecastingModel, stores a single feature importance plot if the + forecaster is explainable. + + Args: + model: The model to extract feature importances from. + data_path: Directory path where HTML plots will be saved. + """ + if isinstance(model, ForecastingModel) and isinstance(model.forecaster, ExplainableForecaster): + fig = model.forecaster.plot_feature_importances() + fig.write_html(data_path / "feature_importances.html") # pyright: ignore[reportUnknownMemberType] + @override def on_predict_start( self, @@ -190,9 +220,9 @@ def on_predict_start( old_model = self.storage.load_run_model(run_id=run_id, model_id=context.workflow.model_id) - if not isinstance(old_model, ForecastingModel): + if not isinstance(old_model, BaseForecastingModel): self._logger.warning( - "Loaded model from run %s is not a ForecastingModel, cannot use for prediction", + "Loaded model from run %s is not a BaseForecastingModel, cannot use for prediction", cast(str, run.info.run_id), ) return @@ -255,7 +285,7 @@ def _try_load_model( self, run_id: str, workflow: CustomForecastingWorkflow, - ) -> ForecastingModel | None: + ) -> BaseForecastingModel | None: try: old_model = self.storage.load_run_model(run_id=run_id, model_id=workflow.model_id) except ModelNotFoundError: @@ -266,9 +296,9 @@ def _try_load_model( ) return None - if not isinstance(old_model, ForecastingModel): + if not isinstance(old_model, BaseForecastingModel): self._logger.warning( - "Loaded old model from run %s is not a ForecastingModel, skipping model selection", + "Loaded old model from run %s is not a BaseForecastingModel, skipping model selection", run_id, ) return None @@ -278,7 +308,7 @@ def _try_load_model( def _try_evaluate_model( self, run_id: str, - old_model: ForecastingModel, + old_model: BaseForecastingModel, input_data: TimeSeriesDataset, ) -> SubsetMetric | None: try: diff --git a/packages/openstef-models/src/openstef_models/models/__init__.py b/packages/openstef-models/src/openstef_models/models/__init__.py index 766194fe5..a623e5c1e 100644 --- a/packages/openstef-models/src/openstef_models/models/__init__.py +++ b/packages/openstef-models/src/openstef_models/models/__init__.py @@ -8,10 +8,12 @@ imports. """ +from .base_forecasting_model import BaseForecastingModel from .component_splitting_model import ComponentSplittingModel from .forecasting_model import ForecastingModel __all__ = [ + "BaseForecastingModel", "ComponentSplittingModel", "ForecastingModel", ] diff --git a/packages/openstef-models/src/openstef_models/models/base_forecasting_model.py b/packages/openstef-models/src/openstef_models/models/base_forecasting_model.py new file mode 100644 index 000000000..65cfa4ccb --- /dev/null +++ b/packages/openstef-models/src/openstef_models/models/base_forecasting_model.py @@ -0,0 +1,156 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Abstract base class for forecasting models. + +Provides shared fields and evaluation logic used by both single-model +(ForecastingModel) and ensemble (EnsembleForecastingModel) implementations. +""" + +import logging +from abc import abstractmethod +from datetime import datetime, timedelta +from typing import Any, override + +import pandas as pd +from pydantic import Field, PrivateAttr + +from openstef_beam.evaluation import EvaluationConfig, EvaluationPipeline, SubsetMetric +from openstef_beam.evaluation.metric_providers import MetricProvider, ObservedProbabilityProvider, R2Provider +from openstef_core.base_model import BaseModel +from openstef_core.datasets import ForecastDataset, TimeSeriesDataset +from openstef_core.mixins import Predictor, TransformPipeline +from openstef_core.mixins.forecaster import ForecasterConfig +from openstef_models.utils.data_split import DataSplitter + + +# TODO: Move to openstef-core? +class BaseForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]): + """Abstract base for forecasting models.""" + + # Shared model components + postprocessing: TransformPipeline[ForecastDataset] = Field( + default_factory=TransformPipeline[ForecastDataset], + description="Postprocessing pipeline for transforming model outputs into final forecasts.", + exclude=True, + ) + target_column: str = Field( + default="load", + description="Name of the target variable column in datasets.", + ) + data_splitter: DataSplitter = Field( + default_factory=DataSplitter, + description="Data splitting strategy for train/validation/test sets.", + ) + cutoff_history: timedelta = Field( + default=timedelta(days=0), + description="Amount of historical data to exclude from training and prediction due to incomplete features " + "from lag-based preprocessing. When using lag transforms (e.g., lag-14), the first N days contain NaN values. " + "Set this to match your maximum lag duration (e.g., timedelta(days=14)). " + "Default of 0 assumes no invalid rows are created by preprocessing.", + ) + + # Evaluation + evaluation_metrics: list[MetricProvider] = Field( + default_factory=lambda: [R2Provider(), ObservedProbabilityProvider()], + description="List of metric providers for evaluating model score.", + ) + + # Metadata + tags: dict[str, str] = Field( + default_factory=dict, + description="Optional metadata tags for the model.", + ) + + _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) + + @property + @abstractmethod + def scoring_config(self) -> ForecasterConfig: + """Return the forecaster config used for evaluation metrics. + + For a single-model pipeline this is the forecaster's own config. + For an ensemble it is typically the first (or canonical) base-forecaster config. + """ + + @abstractmethod + @override + def fit( + self, + data: TimeSeriesDataset, + data_val: TimeSeriesDataset | None = None, + data_test: TimeSeriesDataset | None = None, + ) -> Any: + """Train the forecasting model on the provided dataset. + + Args: + data: Historical time series data with features and target values. + data_val: Optional validation data. + data_test: Optional test data. + + Returns: + Fit result containing training details and metrics. + """ + + @abstractmethod + @override + def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: + """Generate forecasts for the input data. + + Args: + data: Input dataset for generating forecasts. + forecast_start: Optional start time for forecasts. + + Returns: + Generated forecast dataset. + """ + + def score(self, data: TimeSeriesDataset) -> SubsetMetric: + """Evaluate model performance on the provided dataset. + + Generates predictions for the dataset and calculates evaluation metrics + by comparing against ground truth values. Uses the configured evaluation + metrics to assess forecast quality at the maximum forecast horizon. + + Args: + data: Time series dataset containing both features and target values + for evaluation. + + Returns: + Evaluation metrics including configured providers (e.g., R², observed + probability) computed at the maximum forecast horizon. + """ + prediction = self.predict(data=data) + return self._calculate_score(prediction=prediction) + + def _calculate_score(self, prediction: ForecastDataset) -> SubsetMetric: + if prediction.target_series is None: + raise ValueError("Prediction dataset must contain target series for scoring.") + + # Drop NaN targets for metric calculation + prediction = prediction.pipe_pandas(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownArgumentType, reportUnknownMemberType] + + pipeline = EvaluationPipeline( + config=EvaluationConfig(available_ats=[], lead_times=[self.scoring_config.max_horizon]), + quantiles=self.scoring_config.quantiles, + window_metric_providers=[], + global_metric_providers=self.evaluation_metrics, + ) + + evaluation_result = pipeline.run_for_subset( + filtering=self.scoring_config.max_horizon, + predictions=prediction, + ) + global_metric = evaluation_result.get_global_metric() + if not global_metric: + return SubsetMetric( + window="global", + timestamp=prediction.forecast_start, + metrics={}, + ) + + return global_metric + + +__all__ = ["BaseForecastingModel"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/__init__.py b/packages/openstef-models/src/openstef_models/models/forecasting/__init__.py index 1623e576e..83fb8e4e7 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/__init__.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/__init__.py @@ -2,25 +2,13 @@ # # SPDX-License-Identifier: MPL-2.0 -"""Forecasting interfaces and implementations for OpenSTEF models. +"""Forecasting implementations for OpenSTEF models. -This module provides the core forecasting abstractions and concrete implementations. -The base interfaces define the contract for all forecasters, while specific -implementations demonstrate different forecasting approaches. - -Interfaces: - - BaseForecaster: Core multi-horizon forecasting interface - - BaseHorizonForecaster: Single-horizon forecasting interface - - Configuration classes for forecaster setup and validation +Concrete forecaster implementations for different ML frameworks. +The base `Forecaster` and `ForecasterConfig` interfaces live in +``openstef_core.mixins.forecaster``. Implementations: - constant_median_forecaster: Simple baseline forecaster using historical medians - multi_horizon_adapter: Adapter pattern for converting single to multi-horizon forecasters """ - -from .forecaster import Forecaster, ForecasterConfig - -__all__ = [ - "Forecaster", - "ForecasterConfig", -] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py index 1e1094092..d03714705 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py @@ -21,10 +21,10 @@ from pydantic import Field from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.mixins.predictor import HyperParams from openstef_core.types import LeadTime, Quantile from openstef_models.explainability.mixins import ExplainableForecaster -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig class BaseCaseForecasterHyperParams(HyperParams): diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py index 2289398d5..cf254cf5c 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py @@ -16,10 +16,10 @@ from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import NotFittedError +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.mixins.predictor import HyperParams from openstef_core.types import Any, LeadTime, Quantile from openstef_models.explainability.mixins import ExplainableForecaster -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig class ConstantMedianForecasterHyperParams(HyperParams): diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py index d684d1373..79bf24534 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py @@ -14,8 +14,8 @@ from pydantic import Field from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_models.explainability.mixins import ExplainableForecaster -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig class FlatlinerForecasterConfig(ForecasterConfig): diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py index 54e96a723..ae75cee02 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py @@ -2,229 +2,19 @@ # # SPDX-License-Identifier: MPL-2.0 -"""Core forecasting model interfaces and configurations. +"""Re-exports of core forecasting interfaces from openstef-core. -Provides the fundamental building blocks for implementing forecasting models in OpenSTEF. -These mixins establish contracts that ensure consistent behavior across different model types -while supporting both single and multi-horizon forecasting scenarios. - -Key concepts: -- **Horizon**: The lead time for predictions, accounting for data availability and versioning cutoffs -- **Quantiles**: Probability levels for uncertainty estimation -- **State**: Serializable model parameters that enable saving/loading trained models -- **Batching**: Processing multiple prediction requests simultaneously for efficiency - -Multi-horizon forecasting considerations: -Some models (like linear models) cannot handle missing data or conditional features effectively, -making them suitable only for single-horizon approaches. Other models (like XGBoost) can -handle such data complexities and work well for multi-horizon scenarios. +The canonical definitions of Forecaster and ForecasterConfig +now live in ``openstef_core.mixins.forecaster``. This module re-exports them for +backwards compatibility. """ -from abc import abstractmethod -from typing import Self - -from pydantic import Field - -from openstef_core.base_model import BaseConfig -from openstef_core.datasets import ForecastDataset, ForecastInputDataset -from openstef_core.mixins import BatchPredictor, HyperParams -from openstef_core.types import LeadTime, Quantile - - -class ForecasterConfig(BaseConfig): - """Configuration for forecasting models with support for multiple quantiles and horizons. - - Fundamental configuration parameters that determine forecasting model behavior across - different prediction horizons and uncertainty levels. These are operational parameters - rather than hyperparameters that affect training. - - Example: - Basic configuration for daily energy forecasting: - - >>> from openstef_core.types import LeadTime, Quantile - >>> config = ForecasterConfig( - ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], - ... horizons=[LeadTime.from_string("PT1H"), LeadTime.from_string("PT6H"), LeadTime.from_string("PT24H")] - ... ) - >>> len(config.horizons) - 3 - >>> str(config.max_horizon) - 'P1D' - - See Also: - HorizonForecasterConfig: Single-horizon variant of this configuration. - BaseForecaster: Multi-horizon forecaster that uses this configuration. - ForecasterHyperParams: Hyperparameter configuration used alongside this. - """ - - quantiles: list[Quantile] = Field( - default=[Quantile(0.5)], - description=( - "Probability levels for uncertainty estimation. Each quantile represents a confidence level " - "(e.g., 0.1 = 10th percentile, 0.5 = median, 0.9 = 90th percentile). " - "Models must generate predictions for all specified quantiles." - ), - min_length=1, - ) - - horizons: list[LeadTime] = Field( - default=..., - description=( - "Lead times for predictions, accounting for data availability and versioning cutoffs. " - "Each horizon defines how far ahead the model should predict." - ), - min_length=1, - ) - - supports_batching: bool = Field( - default=False, - description=( - "Indicates if the model can handle batch predictions. " - "Batching allows multiple prediction requests to be processed simultaneously, " - "which is more efficient for models that support it, especially on GPUs." - ), - ) - - @property - def max_horizon(self) -> LeadTime: - """Returns the maximum lead time (horizon) from the configured horizons. - - Useful for determining the furthest prediction distance required by the model. - This is commonly used for data preparation and validation logic. - - Returns: - The maximum lead time. - """ - return max(self.horizons) - - def with_horizon(self, horizon: LeadTime) -> Self: - """Create a new configuration with a different horizon. - - Useful for creating multiple forecaster instances for different prediction - horizons from a single base configuration. - - Args: - horizon: The new lead time to use for predictions. - - Returns: - New configuration instance with the specified horizon. - """ - return self.model_copy(update={"horizons": [horizon]}) - - @classmethod - def forecaster_class(cls) -> type["Forecaster"]: - """Get the associated Forecaster class for this configuration. - - Returns: - The Forecaster class that uses this configuration. - """ - raise NotImplementedError("Subclasses must implement forecaster_class") - - -class ConfigurableForecaster: - @property - @abstractmethod - def config(self) -> ForecasterConfig: - """Access the model's configuration parameters. - - Returns: - Configuration object containing fundamental model parameters. - """ - raise NotImplementedError("Subclasses must implement config") - - @property - def hyperparams(self) -> HyperParams: - """Access the model's hyperparameters for training and prediction. - - Hyperparameters control model behavior during training and inference. - Default implementation returns empty hyperparameters, which is suitable - for models without configurable parameters. - - Returns: - Hyperparameter configuration object. - """ - return HyperParams() - - -class Forecaster(BatchPredictor[ForecastInputDataset, ForecastDataset], ConfigurableForecaster): - """Base for forecasters that handle multiple horizons simultaneously. - - Designed for models that train and predict across multiple prediction horizons - in a unified manner. These models handle the complexity of different lead times - internally, providing a simpler interface for multi-horizon forecasting. - - Ideal for models that can share parameters or features across horizons, avoiding - the need to train separate models for each prediction distance. - - Invariants: - - Predictions must include all quantiles specified in the configuration - - predict_batch() only called when supports_batching returns True - - Example: - Implementation for a model that handles multiple horizons: - - >>> from typing import override - >>> class CustomForecaster(Forecaster): - ... def __init__(self, config: ForecasterConfig): - ... self._config = config - ... self._fitted = False - ... - ... @property - ... @override - ... def config(self): - ... return self._config - ... - ... @property - ... @override - ... def is_fitted(self): - ... return self._fitted - ... - ... @override - ... def get_state(self): - ... return {"config": self._config, "fitted": self._fitted} - ... - ... @override - ... def from_state(self, state): - ... instance = self.__class__(state["config"]) - ... instance._fitted = state["fitted"] - ... return instance - ... - ... @override - ... def fit(self, input_data, data_val): - ... # Train on data for all horizons - ... self._fitted = True - ... - ... @override - ... def predict(self, input_data): - ... # Generate predictions for all horizons - ... from openstef_core.datasets.validated_datasets import ForecastDataset - ... import pandas as pd - ... return ForecastDataset( - ... data=pd.DataFrame(), - ... sample_interval=pd.Timedelta("15min"), - ... forecast_start=pd.Timestamp.now() - ... ) - """ - - @abstractmethod - def __init__(self, config: ForecasterConfig) -> None: - """Initialize the forecaster with the given configuration. - - Args: - config: Configuration object specifying quantiles, horizons, and batching support. - """ - raise NotImplementedError("Subclasses must implement __init__") - - @property - @abstractmethod - def config(self) -> ForecasterConfig: - """Access the model's configuration parameters. - - Returns: - Configuration object containing fundamental model parameters. - """ - raise NotImplementedError("Subclasses must implement config") +# TODO: Remove... Backwards compat not needed +from openstef_core.mixins.forecaster import ( + Forecaster, + ForecasterConfig, +) __all__ = [ "Forecaster", diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py index f08dc4269..43cc8761f 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py @@ -22,9 +22,9 @@ from openstef_core.datasets.mixins import LeadTime from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import InputValidationError, MissingExtraError, NotFittedError +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.mixins.predictor import HyperParams from openstef_models.explainability.mixins import ExplainableForecaster -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.utils.evaluation_functions import EvaluationFunctionType, get_evaluation_function from openstef_models.utils.loss_functions import ( ObjectiveFunctionType, diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index 6ec87593d..af9918391 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -21,8 +21,8 @@ NotFittedError, ) from openstef_core.mixins import HyperParams +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_models.explainability.mixins import ExplainableForecaster -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.utils.multi_quantile_regressor import MultiQuantileRegressor if TYPE_CHECKING: diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py index 3a4c9c12c..7118346eb 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -21,8 +21,8 @@ NotFittedError, ) from openstef_core.mixins import HyperParams +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_models.explainability.mixins import ExplainableForecaster -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.utils.multi_quantile_regressor import MultiQuantileRegressor if TYPE_CHECKING: diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py index c5415e2d6..4c1e1452f 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py @@ -19,8 +19,8 @@ from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import MissingExtraError, NotFittedError from openstef_core.mixins import HyperParams +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_models.explainability.mixins import ExplainableForecaster -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.utils.evaluation_functions import EvaluationFunctionType, get_evaluation_function from openstef_models.utils.loss_functions import ( ObjectiveFunctionType, diff --git a/packages/openstef-models/src/openstef_models/models/forecasting_model.py b/packages/openstef-models/src/openstef_models/models/forecasting_model.py index fe1f2e173..b55cbd9fb 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting_model.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting_model.py @@ -10,15 +10,14 @@ """ import logging -from datetime import datetime, timedelta +from datetime import datetime from functools import partial from typing import cast, override import pandas as pd from pydantic import Field, PrivateAttr -from openstef_beam.evaluation import EvaluationConfig, EvaluationPipeline, SubsetMetric -from openstef_beam.evaluation.metric_providers import MetricProvider, ObservedProbabilityProvider, R2Provider +from openstef_beam.evaluation import SubsetMetric from openstef_core.base_model import BaseModel from openstef_core.datasets import ( ForecastDataset, @@ -27,10 +26,9 @@ ) from openstef_core.datasets.timeseries_dataset import validate_horizons_present from openstef_core.exceptions import InsufficientlyCompleteError, NotFittedError -from openstef_core.mixins import Predictor, TransformPipeline -from openstef_models.models.forecasting import Forecaster -from openstef_models.models.forecasting.forecaster import ForecasterConfig -from openstef_models.utils.data_split import DataSplitter +from openstef_core.mixins import TransformPipeline +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig +from openstef_models.models.base_forecasting_model import BaseForecastingModel class ModelFitResult(BaseModel): @@ -60,7 +58,7 @@ class ModelFitResult(BaseModel): metrics_full: SubsetMetric = Field(description="Evaluation metrics computed on the full original dataset.") -class ForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]): +class ForecastingModel(BaseForecastingModel): """Complete forecasting pipeline combining preprocessing, prediction, and postprocessing. Orchestrates the full forecasting workflow by managing feature engineering, @@ -86,6 +84,7 @@ class ForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]) ... ConstantMedianForecaster, ConstantMedianForecasterConfig ... ) >>> from openstef_core.types import LeadTime + >>> from datetime import timedelta >>> >>> # Note: This is a conceptual example showing the API structure >>> # Real usage requires implemented forecaster classes @@ -114,36 +113,6 @@ class ForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]) description="Underlying forecasting algorithm, either single-horizon or multi-horizon.", exclude=True, ) - postprocessing: TransformPipeline[ForecastDataset] = Field( - default_factory=TransformPipeline[ForecastDataset], - description="Postprocessing pipeline for transforming model outputs into final forecasts.", - exclude=True, - ) - target_column: str = Field( - default="load", - description="Name of the target variable column in datasets.", - ) - data_splitter: DataSplitter = Field( - default_factory=DataSplitter, - description="Data splitting strategy for train/validation/test sets.", - ) - cutoff_history: timedelta = Field( - default=timedelta(days=0), - description="Amount of historical data to exclude from training and prediction due to incomplete features " - "from lag-based preprocessing. When using lag transforms (e.g., lag-14), the first N days contain NaN values. " - "Set this to match your maximum lag duration (e.g., timedelta(days=14)). " - "Default of 0 assumes no invalid rows are created by preprocessing.", - ) - # Evaluation - evaluation_metrics: list[MetricProvider] = Field( - default_factory=lambda: [R2Provider(), ObservedProbabilityProvider()], - description="List of metric providers for evaluating model score.", - ) - # Metadata - tags: dict[str, str] = Field( - default_factory=dict, - description="Optional metadata tags for the model.", - ) _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) @@ -152,6 +121,11 @@ def config(self) -> ForecasterConfig: """Returns the configuration of the underlying forecaster.""" return self.forecaster.config + @property + @override + def scoring_config(self) -> ForecasterConfig: + return self.forecaster.config + @property @override def is_fitted(self) -> bool: @@ -316,59 +290,6 @@ def _predict(self, input_data: ForecastInputDataset) -> ForecastDataset: prediction = self.forecaster.predict(data=input_data) return restore_target(dataset=prediction, original_dataset=input_data, target_column=self.target_column) - def score( - self, - data: TimeSeriesDataset, - ) -> SubsetMetric: - """Evaluate model performance on the provided dataset. - - Generates predictions for the dataset and calculates evaluation metrics - by comparing against ground truth values. Uses the configured evaluation - metrics to assess forecast quality at the maximum forecast horizon. - - Args: - data: Time series dataset containing both features and target values - for evaluation. - - Returns: - Evaluation metrics including configured providers (e.g., R2, observed - probability) computed at the maximum forecast horizon. - """ - prediction = self.predict(data=data) - - return self._calculate_score(prediction=prediction) - - def _calculate_score(self, prediction: ForecastDataset) -> SubsetMetric: - if prediction.target_series is None: - raise ValueError("Prediction dataset must contain target series for scoring.") - - # We need to make sure there are no NaNs in the target label for metric calculation - prediction = prediction.pipe_pandas(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownArgumentType, reportUnknownMemberType] - - pipeline = EvaluationPipeline( - # Needs only one horizon since we are using only a single prediction step - # If a more comprehensive test is needed, a backtest should be run. - config=EvaluationConfig(available_ats=[], lead_times=[self.forecaster.config.max_horizon]), - quantiles=self.forecaster.config.quantiles, - # Similarly windowed metrics are not relevant for single predictions. - window_metric_providers=[], - global_metric_providers=self.evaluation_metrics, - ) - - evaluation_result = pipeline.run_for_subset( - filtering=self.forecaster.config.max_horizon, - predictions=prediction, - ) - global_metric = evaluation_result.get_global_metric() - if not global_metric: - return SubsetMetric( - window="global", - timestamp=prediction.forecast_start, - metrics={}, - ) - - return global_metric - def restore_target[T: TimeSeriesDataset]( dataset: T, diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index f1ad1f7dc..b3f6c6e42 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -40,6 +40,7 @@ EmptyFeatureRemover, Imputer, NaNDropper, + SampleWeightConfig, SampleWeighter, Scaler, Selector, @@ -363,9 +364,11 @@ def create_forecasting_workflow( Scaler(selection=Exclude(config.target_column), method="standard"), SampleWeighter( target_column=config.target_column, - weight_exponent=config.sample_weight_exponent, - weight_floor=config.sample_weight_floor, - weight_scale_percentile=config.sample_weight_scale_percentile, + config=SampleWeightConfig( + weight_exponent=config.sample_weight_exponent, + weight_floor=config.sample_weight_floor, + weight_scale_percentile=config.sample_weight_scale_percentile, + ), ), EmptyFeatureRemover(), ] diff --git a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py index e601043c1..c4888e6c0 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/__init__.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/__init__.py @@ -16,7 +16,7 @@ from openstef_models.transforms.general.flagger import Flagger from openstef_models.transforms.general.imputer import Imputer from openstef_models.transforms.general.nan_dropper import NaNDropper -from openstef_models.transforms.general.sample_weighter import SampleWeighter +from openstef_models.transforms.general.sample_weighter import SampleWeightConfig, SampleWeighter from openstef_models.transforms.general.scaler import Scaler from openstef_models.transforms.general.selector import Selector @@ -27,6 +27,7 @@ "Flagger", "Imputer", "NaNDropper", + "SampleWeightConfig", "SampleWeighter", "Scaler", "Selector", diff --git a/packages/openstef-models/src/openstef_models/transforms/general/sample_weighter.py b/packages/openstef-models/src/openstef_models/transforms/general/sample_weighter.py index c9aa85a6d..9e7fbc6d9 100644 --- a/packages/openstef-models/src/openstef_models/transforms/general/sample_weighter.py +++ b/packages/openstef-models/src/openstef_models/transforms/general/sample_weighter.py @@ -21,6 +21,39 @@ from openstef_models.transforms.general.scaler import StandardScaler +class SampleWeightConfig(BaseConfig): + """Configuration for sample weighting parameters. + + Groups all parameters that control how training samples are weighted. + Supports two methods: exponential (magnitude-based) and inverse_frequency (rarity-based). + """ + + method: Literal["exponential", "inverse_frequency"] = Field( + default="exponential", + description="Weighting method: 'exponential' scales by magnitude, 'inverse_frequency' by rarity.", + ) + weight_scale_percentile: int = Field( + default=95, + description="[exponential method only] Percentile of target values used as scaling reference.", + ) + weight_exponent: float = Field( + default=1.0, + description="[exponential method only] Exponent for scaling: 0=uniform, 1=linear, >1=stronger emphasis.", + ) + n_bins: int = Field( + default=50, + description="[inverse_frequency method only] Number of equal-width histogram bins for frequency estimation.", + ) + dampening_exponent: float = Field( + default=0.5, + description="[inverse_frequency method only] Exponent in [0,1] applied to inverse frequency to compress range.", + ) + weight_floor: float = Field( + default=0.1, + description="Minimum weight value to ensure all samples contribute to training.", + ) + + class SampleWeighter(BaseConfig, TimeSeriesTransform): """Transform that adds sample weights based on target variable distribution. @@ -61,29 +94,9 @@ class SampleWeighter(BaseConfig, TimeSeriesTransform): 2025-01-01 04:00:00 150.0 0.789474 """ - method: Literal["exponential", "inverse_frequency"] = Field( - default="exponential", - description="Weighting method: 'exponential' scales by magnitude, 'inverse_frequency' by rarity.", - ) - weight_scale_percentile: int = Field( - default=95, - description="[exponential method only] Percentile of target values used as scaling reference.", - ) - weight_exponent: float = Field( - default=1.0, - description="[exponential method only] Exponent for scaling: 0=uniform, 1=linear, >1=stronger emphasis.", - ) - n_bins: int = Field( - default=50, - description="[inverse_frequency method only] Number of equal-width histogram bins for frequency estimation.", - ) - dampening_exponent: float = Field( - default=0.5, - description="[inverse_frequency method only] Exponent in [0,1] applied to inverse frequency to compress range.", - ) - weight_floor: float = Field( - default=0.1, - description="Minimum weight value to ensure all samples contribute to training.", + config: SampleWeightConfig = Field( + default_factory=SampleWeightConfig, + description="Sample weight configuration parameters.", ) target_column: str = Field( default="load", @@ -160,23 +173,23 @@ def _calculate_weights(self, target: np.ndarray) -> np.ndarray: Raises: ValueError: If an unknown weighting method is configured. """ - match self.method: + match self.config.method: case "exponential": return exponential_sample_weight( x=target, - scale_percentile=self.weight_scale_percentile, - exponent=self.weight_exponent, - floor=self.weight_floor, + scale_percentile=self.config.weight_scale_percentile, + exponent=self.config.weight_exponent, + floor=self.config.weight_floor, ) case "inverse_frequency": return inverse_frequency_sample_weight( x=target, - n_bins=self.n_bins, - dampening_exponent=self.dampening_exponent, - floor=self.weight_floor, + n_bins=self.config.n_bins, + dampening_exponent=self.config.dampening_exponent, + floor=self.config.weight_floor, ) case _: - msg = f"Unknown weighting method: {self.method}" + msg = f"Unknown weighting method: {self.config.method}" raise ValueError(msg) @override @@ -185,9 +198,26 @@ def features_added(self) -> list[str]: @override def __setstate__(self, state: dict[str, Any]) -> None: # TODO(#799): delete after stable release - if "method" not in state["__dict__"]: - state["__dict__"]["method"] = "exponential" - cast(set[str], state["__pydantic_fields_set__"]).add("method") + d = state["__dict__"] + fields_set = cast(set[str], state["__pydantic_fields_set__"]) + + # Migrate flat fields into nested SampleWeightConfig + if "config" not in d: + config_fields: dict[str, Any] = {} + for key in ( + "method", + "weight_scale_percentile", + "weight_exponent", + "n_bins", + "dampening_exponent", + "weight_floor", + ): + if key in d: + config_fields[key] = d.pop(key) + fields_set.discard(key) + d["config"] = SampleWeightConfig(**config_fields) + fields_set.add("config") + return super().__setstate__(state) diff --git a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py index 5fbeac8a0..7f4c81b9e 100644 --- a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py @@ -11,6 +11,7 @@ import logging from datetime import datetime +from typing import Any from pydantic import Field, PrivateAttr @@ -18,10 +19,10 @@ from openstef_core.datasets import TimeSeriesDataset, VersionedTimeSeriesDataset from openstef_core.datasets.validated_datasets import ForecastDataset from openstef_core.exceptions import NotFittedError, SkipFitting -from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel, EnsembleModelFitResult from openstef_models.mixins import ModelIdentifier, PredictorCallback from openstef_models.mixins.callbacks import WorkflowContext -from openstef_models.models.forecasting_model import ForecastingModel, ModelFitResult +from openstef_models.models.base_forecasting_model import BaseForecastingModel +from openstef_models.models.forecasting_model import ModelFitResult class ForecastingCallback( @@ -118,7 +119,7 @@ class CustomForecastingWorkflow(BaseModel): ... ) # doctest: +SKIP """ - model: ForecastingModel | EnsembleForecastingModel = Field(description="The forecasting model to use.") + model: BaseForecastingModel = Field(description="The forecasting model to use.") callbacks: list[ForecastingCallback] = Field( default_factory=list[ForecastingCallback], description="List of callbacks to execute during workflow events." ) @@ -136,7 +137,7 @@ def fit( data: TimeSeriesDataset, data_val: TimeSeriesDataset | None = None, data_test: TimeSeriesDataset | None = None, - ) -> ModelFitResult | EnsembleModelFitResult | None: + ) -> ModelFitResult | None: """Train the forecasting model with callback execution. Executes the complete training workflow including pre-fit callbacks, @@ -151,20 +152,29 @@ def fit( ModelFitResult containing training metrics and information, or None if fitting was skipped. """ + result: ModelFitResult | None = None context: WorkflowContext[CustomForecastingWorkflow] = WorkflowContext(workflow=self) try: for callback in self.callbacks: callback.on_fit_start(context=context, data=data) - result = self.model.fit(data=data, data_val=data_val, data_test=data_test) + fit_output: Any = self.model.fit(data=data, data_val=data_val, data_test=data_test) - if isinstance(result, EnsembleModelFitResult): - self._logger.debug("Discarding EnsembleModelFitResult for compatibility.") - result = result.combiner_fit_result + # Ensemble models return a composite result; extract the combiner result + # for callback compatibility (avoids importing EnsembleModelFitResult). + if isinstance(fit_output, ModelFitResult): + fit_result: ModelFitResult = fit_output + elif hasattr(fit_output, "combiner_fit_result"): + self._logger.debug("Extracting combiner_fit_result for callback compatibility.") + fit_result = fit_output.combiner_fit_result # pyright: ignore[reportUnknownMemberType] + else: + fit_result = fit_output # pyright: ignore[reportUnknownVariableType] for callback in self.callbacks: - callback.on_fit_end(context=context, result=result) + callback.on_fit_end(context=context, result=fit_result) + + result = fit_result except SkipFitting as e: self._logger.info("Skipping model fitting: %s", e) result = None @@ -203,4 +213,4 @@ def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = Non return forecasts -__all__ = ["CustomForecastingWorkflow"] +__all__ = ["CustomForecastingWorkflow", "ForecastingCallback"] diff --git a/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py b/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py index 9561f4d03..c9b9c0e47 100644 --- a/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py +++ b/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py @@ -14,10 +14,10 @@ from openstef_core.datasets import TimeSeriesDataset from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ModelNotFoundError, SkipFitting +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.types import LeadTime, Q from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback from openstef_models.mixins.callbacks import WorkflowContext -from openstef_models.models.forecasting import Forecaster, ForecasterConfig from openstef_models.models.forecasting_model import ForecastingModel, ModelFitResult from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow diff --git a/packages/openstef-models/tests/unit/models/test_forecasting_model.py b/packages/openstef-models/tests/unit/models/test_forecasting_model.py index 9f59a3a30..30dfa1352 100644 --- a/packages/openstef-models/tests/unit/models/test_forecasting_model.py +++ b/packages/openstef-models/tests/unit/models/test_forecasting_model.py @@ -13,13 +13,13 @@ from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import InsufficientlyCompleteError, NotFittedError from openstef_core.mixins import TransformPipeline +from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.testing import assert_timeseries_equal, create_synthetic_forecasting_dataset from openstef_core.types import LeadTime, Quantile, override from openstef_models.models.forecasting.constant_median_forecaster import ( ConstantMedianForecaster, ConstantMedianForecasterConfig, ) -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.models.forecasting_model import ForecastingModel from openstef_models.transforms.postprocessing.quantile_sorter import QuantileSorter from openstef_models.transforms.time_domain.lags_adder import LagsAdder diff --git a/packages/openstef-models/tests/unit/transforms/general/test_sample_weighter.py b/packages/openstef-models/tests/unit/transforms/general/test_sample_weighter.py index d266e5bd9..dcd9bf764 100644 --- a/packages/openstef-models/tests/unit/transforms/general/test_sample_weighter.py +++ b/packages/openstef-models/tests/unit/transforms/general/test_sample_weighter.py @@ -10,6 +10,7 @@ from openstef_core.testing import create_timeseries_dataset from openstef_models.transforms.general.sample_weighter import ( + SampleWeightConfig, SampleWeighter, exponential_sample_weight, inverse_frequency_sample_weight, @@ -78,9 +79,11 @@ def test_sample_weighter__fit_transform(): ) transform = SampleWeighter( - weight_scale_percentile=95, - weight_exponent=1.0, - weight_floor=0.1, + config=SampleWeightConfig( + weight_scale_percentile=95, + weight_exponent=1.0, + weight_floor=0.1, + ), target_column="load", normalize_target=True, ) @@ -144,9 +147,11 @@ def test_sample_weighter__transform_all_nan_target(): ) transform = SampleWeighter( - weight_scale_percentile=95, - weight_exponent=1.0, - weight_floor=0.1, + config=SampleWeightConfig( + weight_scale_percentile=95, + weight_exponent=1.0, + weight_floor=0.1, + ), target_column="load", normalize_target=True, ) @@ -246,11 +251,13 @@ def test_sample_weighter__inverse_frequency_weighting(): ) transform = SampleWeighter( - method="inverse_frequency", + config=SampleWeightConfig( + method="inverse_frequency", + weight_floor=0, + n_bins=3, + dampening_exponent=1.0, + ), target_column="load", - weight_floor=0, - n_bins=3, - dampening_exponent=1.0, ) # Act From 1b5b0274416f91dfd7dd0f76492054bfb3316621 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 09:49:23 +0100 Subject: [PATCH 086/104] Separating openstef-models and openstef-meta WIP Signed-off-by: Marnix van Lieshout --- .../benchmarking/baselines/openstef4.py | 20 +- .../ensemble_mlflow_storage_callback.py | 241 +++++++++--- .../models/ensemble_forecasting_model.py | 3 +- .../presets/forecasting_workflow.py | 10 +- .../src/openstef_meta/workflows/__init__.py | 11 + .../custom_ensemble_forecasting_workflow.py | 145 ++++++++ .../test_ensemble_mlflow_storage_callback.py | 108 ++---- .../test_forecast_combiner.py | 4 +- .../mlflow/mlflow_storage_callback.py | 346 +++++++++--------- .../workflows/custom_forecasting_workflow.py | 22 +- 10 files changed, 552 insertions(+), 358 deletions(-) create mode 100644 packages/openstef-meta/src/openstef_meta/workflows/__init__.py create mode 100644 packages/openstef-meta/src/openstef_meta/workflows/custom_ensemble_forecasting_workflow.py diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py index d6f114a69..b659a98c0 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py @@ -33,12 +33,15 @@ from openstef_core.types import Q from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_meta.presets import EnsembleWorkflowConfig, create_ensemble_workflow +from openstef_meta.workflows import CustomEnsembleForecastingWorkflow from openstef_models.models.forecasting_model import ForecastingModel from openstef_models.presets import ForecastingWorkflowConfig from openstef_models.workflows.custom_forecasting_workflow import ( CustomForecastingWorkflow, ) +ForecastingWorkflow = CustomForecastingWorkflow | CustomEnsembleForecastingWorkflow + class WorkflowCreationContext(BaseConfig): """Context information for workflow execution within backtesting.""" @@ -59,8 +62,8 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): config: BacktestForecasterConfig = Field( description="Configuration for the backtest forecaster interface", ) - workflow_factory: Callable[[WorkflowCreationContext], CustomForecastingWorkflow] = Field( - description="Factory function that creates a new CustomForecastingWorkflow instance", + workflow_factory: Callable[[WorkflowCreationContext], ForecastingWorkflow] = Field( + description="Factory function that creates a new forecasting workflow instance", ) cache_dir: Path = Field( description="Directory to use for caching model artifacts during backtesting", @@ -74,7 +77,7 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): description="When True, saves base forecaster prediction contributions for ensemble models", ) - _workflow: CustomForecastingWorkflow | None = PrivateAttr(default=None) + _workflow: ForecastingWorkflow | None = PrivateAttr(default=None) _is_flatliner_detected: bool = PrivateAttr(default=False) _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) @@ -90,15 +93,12 @@ def quantiles(self) -> list[Q]: # Create a workflow instance if needed to get quantiles if self._workflow is None: self._workflow = self.workflow_factory(WorkflowCreationContext()) - # Extract quantiles from the workflow's model + # Extract quantiles from the workflow's model if isinstance(self._workflow.model, EnsembleForecastingModel): name = self._workflow.model.forecaster_names[0] return self._workflow.model.forecasters[name].config.quantiles - if isinstance(self._workflow.model, ForecastingModel): - return self._workflow.model.forecaster.config.quantiles - msg = f"Unsupported model type: {type(self._workflow.model)}" - raise TypeError(msg) + return self._workflow.model.forecaster.config.quantiles @override def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: @@ -130,7 +130,7 @@ def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: if self.debug and isinstance(self._workflow.model, ForecastingModel): id_str = data.horizon.strftime("%Y%m%d%H%M%S") - self._workflow.model.prepare_input(training_data).to_parquet( # pyright: ignore[reportPrivateUsage] + self._workflow.model.prepare_input(training_data).to_parquet( path=self.cache_dir / f"debug_{id_str}_prepared_training.parquet" ) @@ -190,7 +190,7 @@ def _preset_target_forecaster_factory( # Factory function that creates a forecaster for a given target. prefix = context.run_name - def _create_workflow(context: WorkflowCreationContext) -> CustomForecastingWorkflow: + def _create_workflow(context: WorkflowCreationContext) -> ForecastingWorkflow: # Create a new workflow instance with fresh model. location = LocationConfig( name=target.name, diff --git a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py index 30a78511d..68d631e3f 100644 --- a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py +++ b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py @@ -4,101 +4,226 @@ """MLflow storage callback for ensemble forecasting models. -Extends the base MLFlowStorageCallback with ensemble-specific behavior: -- Logs hyperparameters for each base forecaster and the combiner +Provides MLflow storage and tracking for ensemble forecasting workflows using +composition with MLFlowStorageCallbackBase rather than inheriting from +MLFlowStorageCallback. This avoids conflicting generic type parameters and +keeps the callback fully type-safe. + +Ensemble-specific behavior: +- Logs combiner hyperparameters as the primary hyperparams +- Logs per-forecaster hyperparameters with name-prefixed keys - Stores feature importance plots for each explainable forecaster component """ import logging -from pathlib import Path -from typing import override +from datetime import UTC, datetime +from typing import cast, override from pydantic import PrivateAttr -from openstef_core.mixins.predictor import HyperParams -from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel +from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset +from openstef_core.datasets.versioned_timeseries_dataset import VersionedTimeSeriesDataset +from openstef_core.exceptions import ModelNotFoundError, SkipFitting +from openstef_meta.models.ensemble_forecasting_model import EnsembleModelFitResult +from openstef_meta.workflows import CustomEnsembleForecastingWorkflow, EnsembleForecastingCallback from openstef_models.explainability import ExplainableForecaster -from openstef_models.integrations.mlflow.mlflow_storage_callback import MLFlowStorageCallback +from openstef_models.integrations.mlflow.mlflow_storage_callback import MLFlowStorageCallbackBase, metrics_to_dict +from openstef_models.mixins.callbacks import WorkflowContext from openstef_models.models.base_forecasting_model import BaseForecastingModel -class EnsembleMLFlowStorageCallback(MLFlowStorageCallback): - """MLFlow callback with ensemble-specific logging for multi-model forecasting. +class EnsembleMLFlowStorageCallback(MLFlowStorageCallbackBase, EnsembleForecastingCallback): + """MLFlow callback for ensemble forecasting workflows. - Extends the base MLFlowStorageCallback to handle EnsembleForecastingModel - instances by: + Uses composition with MLFlowStorageCallbackBase for shared MLflow storage + configuration and utility methods, combined with EnsembleForecastingCallback + for properly-typed ensemble workflow hooks. + + Handles EnsembleForecastingModel instances by: - Logging combiner hyperparameters as the primary model hyperparams - Logging per-forecaster hyperparameters with name-prefixed keys - Storing feature importance plots for each explainable base forecaster - - For non-ensemble models, falls back to the base class behavior. """ _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) @override - def _get_hyperparams(self, model: BaseForecastingModel) -> HyperParams | None: - """Extract hyperparameters from the ensemble combiner. - - For ensemble models, the combiner's hyperparams are treated as the - primary hyperparameters. Per-forecaster hyperparams are logged - separately via _log_additional_hyperparams. - - Falls back to base class behavior for non-ensemble models. - - Returns: - The combiner hyperparams for ensemble models, or base class result otherwise. - """ - if isinstance(model, EnsembleForecastingModel): - return model.combiner.config.hyperparams - return super()._get_hyperparams(model) - - @override - def _log_additional_hyperparams(self, model: BaseForecastingModel, run_id: str) -> None: - """Log per-forecaster hyperparameters to the MLflow run. - - Each base forecaster's hyperparameters are logged with a prefix - of its name (e.g., 'lgbm.n_estimators', 'xgboost.max_depth'). + def on_fit_start( + self, + context: WorkflowContext[CustomEnsembleForecastingWorkflow], + data: VersionedTimeSeriesDataset | TimeSeriesDataset, + ) -> None: + """Check model reuse before fitting. - Args: - model: The ensemble forecasting model. - run_id: MLflow run ID to log parameters to. + Raises: + SkipFitting: If a recent model already exists. """ - if not isinstance(model, EnsembleForecastingModel): + if not self.model_reuse_enable: return + run = self._find_run(model_id=context.workflow.model_id, run_name=context.workflow.run_name) + + if run is not None: + now = datetime.now(tz=UTC) + end_time_millis = cast(float | None, run.info.end_time) + run_end_datetime = ( + datetime.fromtimestamp(end_time_millis / 1000, tz=UTC) if end_time_millis is not None else None + ) + self._logger.info( + "Found previous MLflow run %s for model %s ended at %s", + cast(str, run.info.run_id), + context.workflow.model_id, + run_end_datetime, + ) + if run_end_datetime is not None and (now - run_end_datetime) <= self.model_reuse_max_age: + raise SkipFitting("Model is recent enough, skipping re-fit.") + + @override + def on_fit_end( + self, + context: WorkflowContext[CustomEnsembleForecastingWorkflow], + result: EnsembleModelFitResult, + ) -> None: + """Store ensemble model, hyperparams, artifacts, and metrics to MLflow.""" + if self.model_selection_enable: + self._run_model_selection(workflow=context.workflow, result=result) + + model = context.workflow.model + + # Create a new run with combiner hyperparameters + run = self.storage.create_run( + model_id=context.workflow.model_id, + tags=model.tags, + hyperparams=model.combiner.config.hyperparams, + run_name=context.workflow.run_name, + experiment_tags=context.workflow.experiment_tags, + ) + run_id: str = run.info.run_id + self._logger.info("Created MLflow run %s for model %s", run_id, context.workflow.model_id) + + # Log per-forecaster hyperparameters for name, forecaster in model.forecasters.items(): hyperparams = forecaster.hyperparams prefixed_params = {f"{name}.{k}": str(v) for k, v in hyperparams.model_dump().items()} self.storage.log_hyperparams(run_id=run_id, params=prefixed_params) self._logger.debug("Logged hyperparams for forecaster '%s' in run %s", name, run_id) - @staticmethod + # Store the model input + run_path = self.storage.get_artifacts_path(model_id=context.workflow.model_id, run_id=run_id) + data_path = run_path / self.storage.data_path + data_path.mkdir(parents=True, exist_ok=True) + result.input_dataset.to_parquet(path=data_path / "data.parquet") + self._logger.info("Stored training data at %s for run %s", data_path, run_id) + + # Store feature importance plots for each explainable forecaster + if self.store_feature_importance_plot: + for name, forecaster in model.forecasters.items(): + if isinstance(forecaster, ExplainableForecaster): + fig = forecaster.plot_feature_importances() + fig.write_html(data_path / f"feature_importances_{name}.html") # pyright: ignore[reportUnknownMemberType] + + # Store the trained model + self.storage.save_run_model( + model_id=context.workflow.model_id, + run_id=run_id, + model=context.workflow.model, + ) + self._logger.info("Stored trained model for run %s", run_id) + + # Format the metrics for MLflow + metrics = metrics_to_dict(metrics=result.metrics_full, prefix="full_") + metrics.update(metrics_to_dict(metrics=result.metrics_train, prefix="train_")) + if result.metrics_val is not None: + metrics.update(metrics_to_dict(metrics=result.metrics_val, prefix="val_")) + if result.metrics_test is not None: + metrics.update(metrics_to_dict(metrics=result.metrics_test, prefix="test_")) + + # Mark the run as finished + self.storage.finalize_run(model_id=context.workflow.model_id, run_id=run_id, metrics=metrics) + self._logger.info("Stored MLflow run %s for model %s", run_id, context.workflow.model_id) + @override - def _store_feature_importance( - model: BaseForecastingModel, - data_path: Path, - ) -> None: - """Store feature importance plots for each explainable forecaster in the ensemble. + def on_predict_start( + self, + context: WorkflowContext[CustomEnsembleForecastingWorkflow], + data: VersionedTimeSeriesDataset | TimeSeriesDataset, + ): + """Load ensemble model from MLflow for prediction. + + Raises: + ModelNotFoundError: If no model run is found. + """ + if context.workflow.model.is_fitted: + return - For ensemble models, generates separate feature importance HTML plots for - each base forecaster that implements ExplainableForecaster. Files are named - 'feature_importances_{forecaster_name}.html'. + run = self._find_run(model_id=context.workflow.model_id, run_name=context.workflow.run_name) - For non-ensemble models, falls back to the base class behavior. + if run is None: + raise ModelNotFoundError(model_id=context.workflow.model_id) - Args: - model: The forecasting model (ensemble or single). - data_path: Directory path where HTML plots will be saved. + run_id: str = run.info.run_id + old_model = self.storage.load_run_model(run_id=run_id, model_id=context.workflow.model_id) + + if not isinstance(old_model, BaseForecastingModel): + self._logger.warning( + "Loaded model from run %s is not a BaseForecastingModel, cannot use for prediction", + cast(str, run.info.run_id), + ) + return + + context.workflow.model = old_model # pyright: ignore[reportAttributeAccessIssue] + self._logger.info( + "Loaded model from MLflow run %s for model %s", + run_id, + context.workflow.model_id, + ) + + def _run_model_selection(self, workflow: CustomEnsembleForecastingWorkflow, result: EnsembleModelFitResult) -> None: + """Compare new ensemble model against the previous best and keep the better one. + + Raises: + SkipFitting: If the new model does not improve on the monitored metric. """ - if not isinstance(model, EnsembleForecastingModel): - MLFlowStorageCallback._store_feature_importance(model=model, data_path=data_path) # noqa: SLF001 + run = self._find_run(model_id=workflow.model_id, run_name=None) + if run is None: return - for name, forecaster in model.forecasters.items(): - if isinstance(forecaster, ExplainableForecaster): - fig = forecaster.plot_feature_importances() - fig.write_html(data_path / f"feature_importances_{name}.html") # pyright: ignore[reportUnknownMemberType] + run_id = cast(str, run.info.run_id) + + if not self._check_tags_compatible( + run_tags=run.data.tags, # pyright: ignore[reportUnknownMemberType, reportUnknownArgumentType] + new_tags=workflow.model.tags, + run_id=run_id, + ): + return + + new_model = workflow.model + new_metrics = result.metrics_full + + old_model = self._try_load_model(run_id=run_id, model_id=workflow.model_id) + + if old_model is None: + return + + old_metrics = self._try_evaluate_model( + run_id=run_id, + old_model=old_model, + input_data=result.input_dataset, + ) + + if old_metrics is None: + return + + if self._check_is_new_model_better(old_metrics=old_metrics, new_metrics=new_metrics): + workflow.model = new_model # pyright: ignore[reportAttributeAccessIssue] + else: + workflow.model = old_model # pyright: ignore[reportAttributeAccessIssue] + self._logger.info( + "New model did not improve %s metric from previous run %s, reusing old model", + self.model_selection_metric, + run_id, + ) + raise SkipFitting("New model did not improve monitored metric, skipping re-fit.") __all__ = ["EnsembleMLFlowStorageCallback"] diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index c1377029e..51f06cd7b 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -38,7 +38,8 @@ class EnsembleModelFitResult(BaseModel): """Fit result for EnsembleForecastingModel containing details for both forecasters and combiner.""" - forecaster_fit_results: dict[str, ModelFitResult] = Field(description="ModelFitResult for each base Forecaster") + + forecaster_fit_results: dict[str, ModelFitResult] = Field(description="ModelFitResult for each base forecaster") combiner_fit_result: ModelFitResult = Field(description="ModelFitResult for the ForecastCombiner") diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 98730d376..359d05c5f 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -36,6 +36,7 @@ StackingCombiner, ) from openstef_meta.models.forecasting.residual_forecaster import ResidualForecaster +from openstef_meta.workflows import CustomEnsembleForecastingWorkflow, EnsembleForecastingCallback from openstef_models.integrations.mlflow import MLFlowStorage from openstef_models.mixins.model_serializer import ModelIdentifier from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster @@ -65,7 +66,6 @@ ) from openstef_models.utils.data_split import DataSplitter from openstef_models.utils.feature_selection import Exclude, FeatureSelection, Include -from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow, ForecastingCallback if TYPE_CHECKING: from openstef_core.mixins.forecaster import Forecaster @@ -332,14 +332,14 @@ def feature_standardizers(config: EnsembleWorkflowConfig) -> list[Transform[Time ) -def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastingWorkflow: # noqa: C901, PLR0912 +def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomEnsembleForecastingWorkflow: # noqa: C901, PLR0912 """Create an ensemble forecasting workflow from configuration. Args: config (EnsembleWorkflowConfig): Configuration for the ensemble workflow. Returns: - CustomForecastingWorkflow: Configured ensemble forecasting workflow. + CustomEnsembleForecastingWorkflow: Configured ensemble forecasting workflow. Raises: ValueError: If an unsupported base model or combiner type is specified. @@ -497,7 +497,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin **config.tags, } - callbacks: list[ForecastingCallback] = [] + callbacks: list[EnsembleForecastingCallback] = [] if config.mlflow_storage is not None: callbacks.append( EnsembleMLFlowStorageCallback( @@ -510,7 +510,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin ) ) - return CustomForecastingWorkflow( + return CustomEnsembleForecastingWorkflow( model=EnsembleForecastingModel( common_preprocessing=common_preprocessing, model_specific_preprocessing=model_specific_preprocessing, diff --git a/packages/openstef-meta/src/openstef_meta/workflows/__init__.py b/packages/openstef-meta/src/openstef_meta/workflows/__init__.py new file mode 100644 index 000000000..e9dff0fee --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/workflows/__init__.py @@ -0,0 +1,11 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 +"""Workflow orchestration for ensemble forecasting models.""" + +from openstef_meta.workflows.custom_ensemble_forecasting_workflow import ( + CustomEnsembleForecastingWorkflow, + EnsembleForecastingCallback, +) + +__all__ = ["CustomEnsembleForecastingWorkflow", "EnsembleForecastingCallback"] diff --git a/packages/openstef-meta/src/openstef_meta/workflows/custom_ensemble_forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/workflows/custom_ensemble_forecasting_workflow.py new file mode 100644 index 000000000..741763d6b --- /dev/null +++ b/packages/openstef-meta/src/openstef_meta/workflows/custom_ensemble_forecasting_workflow.py @@ -0,0 +1,145 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""High-level workflow orchestration for ensemble forecasting operations. + +Provides a complete ensemble forecasting workflow that combines model management, +callback execution, and optional model persistence. Acts as the main entry point +for production ensemble forecasting systems. +""" + +import logging +from datetime import datetime + +from pydantic import Field, PrivateAttr + +from openstef_core.base_model import BaseModel +from openstef_core.datasets import TimeSeriesDataset, VersionedTimeSeriesDataset +from openstef_core.datasets.validated_datasets import ForecastDataset +from openstef_core.exceptions import NotFittedError, SkipFitting +from openstef_meta.models.ensemble_forecasting_model import ( + EnsembleForecastingModel, + EnsembleModelFitResult, +) +from openstef_models.mixins import ModelIdentifier, PredictorCallback +from openstef_models.mixins.callbacks import WorkflowContext + + +class EnsembleForecastingCallback( + PredictorCallback[ + "CustomEnsembleForecastingWorkflow", + VersionedTimeSeriesDataset | TimeSeriesDataset, + EnsembleModelFitResult, + ForecastDataset, + ] +): + """Callback interface for monitoring ensemble forecasting workflow lifecycle events. + + Similar to ForecastingCallback but parameterized with EnsembleModelFitResult + instead of ModelFitResult, giving callbacks access to the full ensemble fit + result including per-forecaster and combiner results. + + All methods have default no-op implementations, so subclasses only need + to override the specific events they care about. + """ + + +class CustomEnsembleForecastingWorkflow(BaseModel): + """Complete ensemble forecasting workflow with model management and lifecycle hooks. + + Orchestrates the full ensemble forecasting process by combining an + EnsembleForecastingModel with callback execution. Provides the main + interface for production ensemble forecasting systems. + + Invariants: + - Callbacks are executed at appropriate lifecycle stages + - Model fitting returns EnsembleModelFitResult with per-forecaster details + - Prediction delegates to the underlying EnsembleForecastingModel + """ + + model: EnsembleForecastingModel = Field(description="The ensemble forecasting model to use.") + callbacks: list[EnsembleForecastingCallback] = Field( + default_factory=list[EnsembleForecastingCallback], + description="List of callbacks to execute during workflow events.", + ) + model_id: ModelIdentifier = Field(...) + run_name: str | None = Field(default=None, description="Optional name for this workflow run.") + experiment_tags: dict[str, str] = Field( + default_factory=dict, + description="Optional metadata tags for experiment tracking.", + ) + + _logger: logging.Logger = PrivateAttr(default_factory=lambda: logging.getLogger(__name__)) + + def fit( + self, + data: TimeSeriesDataset, + data_val: TimeSeriesDataset | None = None, + data_test: TimeSeriesDataset | None = None, + ) -> EnsembleModelFitResult | None: + """Train the ensemble forecasting model with callback execution. + + Executes the complete training workflow including pre-fit callbacks, + model training, and post-fit callbacks. Returns the full ensemble + fit result with per-forecaster and combiner details. + + Args: + data: Training dataset for the forecasting model. + data_val: Optional validation dataset for model tuning. + data_test: Optional test dataset for final evaluation. + + Returns: + EnsembleModelFitResult containing training metrics for each + base forecaster and the combiner, or None if fitting was skipped. + """ + result: EnsembleModelFitResult | None = None + context: WorkflowContext[CustomEnsembleForecastingWorkflow] = WorkflowContext(workflow=self) + + try: + for callback in self.callbacks: + callback.on_fit_start(context=context, data=data) + + result = self.model.fit(data=data, data_val=data_val, data_test=data_test) + + for callback in self.callbacks: + callback.on_fit_end(context=context, result=result) + except SkipFitting as e: + self._logger.info("Skipping model fitting: %s", e) + result = None + + return result + + def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: + """Generate forecasts with callback execution. + + Executes the complete prediction workflow including pre-prediction callbacks, + model prediction, and post-prediction callbacks. + + Args: + data: Input dataset for generating forecasts. + forecast_start: Optional start time for forecasts. + + Returns: + Generated forecast dataset. + + Raises: + NotFittedError: If the underlying model hasn't been trained. + """ + context: WorkflowContext[CustomEnsembleForecastingWorkflow] = WorkflowContext(workflow=self) + + for callback in self.callbacks: + callback.on_predict_start(context=context, data=data) + + if not self.model.is_fitted: + raise NotFittedError(type(self.model).__name__) + + forecasts = self.model.predict(data=data, forecast_start=forecast_start) + + for callback in self.callbacks: + callback.on_predict_end(context=context, data=data, result=forecasts) + + return forecasts + + +__all__ = ["CustomEnsembleForecastingWorkflow", "EnsembleForecastingCallback"] diff --git a/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py b/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py index ce8273876..456e4bea9 100644 --- a/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py +++ b/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py @@ -19,12 +19,11 @@ from openstef_core.mixins.predictor import HyperParams from openstef_core.types import LeadTime, Q from openstef_meta.integrations.mlflow import EnsembleMLFlowStorageCallback -from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel +from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel, EnsembleModelFitResult from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig +from openstef_meta.workflows import CustomEnsembleForecastingWorkflow from openstef_models.integrations.mlflow import MLFlowStorage from openstef_models.mixins.callbacks import WorkflowContext -from openstef_models.models.forecasting_model import ForecastingModel, ModelFitResult -from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow if TYPE_CHECKING: from pathlib import Path @@ -157,7 +156,7 @@ def sample_dataset() -> TimeSeriesDataset: ) -def _create_ensemble_workflow() -> CustomForecastingWorkflow: +def _create_ensemble_workflow() -> CustomEnsembleForecastingWorkflow: """Create an ensemble forecasting workflow for testing.""" horizons = [LeadTime(timedelta(hours=1))] quantiles = [Q(0.5)] @@ -179,38 +178,20 @@ def _create_ensemble_workflow() -> CustomForecastingWorkflow: combiner=combiner, ) - return CustomForecastingWorkflow(model_id="test_ensemble", model=ensemble_model) + return CustomEnsembleForecastingWorkflow(model_id="test_ensemble", model=ensemble_model) @pytest.fixture -def ensemble_workflow() -> CustomForecastingWorkflow: +def ensemble_workflow() -> CustomEnsembleForecastingWorkflow: return _create_ensemble_workflow() @pytest.fixture def ensemble_fit_result( - sample_dataset: TimeSeriesDataset, ensemble_workflow: CustomForecastingWorkflow -) -> ModelFitResult: - """Create a fit result from the ensemble model, downcast to combiner's ModelFitResult.""" - ensemble_result = ensemble_workflow.model.fit(sample_dataset) - return ensemble_result.combiner_fit_result - - -@pytest.fixture -def single_workflow() -> CustomForecastingWorkflow: - """Create a single-model forecasting workflow for testing fallback behavior.""" - horizons = [LeadTime(timedelta(hours=1))] - quantiles = [Q(0.5)] - model = ForecastingModel( - forecaster=SimpleTestForecaster(config=ForecasterConfig(horizons=horizons, quantiles=quantiles)), - ) - return CustomForecastingWorkflow(model_id="test_single", model=model) - - -@pytest.fixture -def single_fit_result(sample_dataset: TimeSeriesDataset, single_workflow: CustomForecastingWorkflow) -> ModelFitResult: - """Create a fit result from a single model.""" - return single_workflow.model.fit(sample_dataset) + sample_dataset: TimeSeriesDataset, ensemble_workflow: CustomEnsembleForecastingWorkflow +) -> EnsembleModelFitResult: + """Create a fit result from the ensemble model.""" + return ensemble_workflow.model.fit(sample_dataset) # --- Tests --- @@ -218,8 +199,8 @@ def single_fit_result(sample_dataset: TimeSeriesDataset, single_workflow: Custom def test_on_fit_end__stores_ensemble_model( callback: EnsembleMLFlowStorageCallback, - ensemble_workflow: CustomForecastingWorkflow, - ensemble_fit_result: ModelFitResult, + ensemble_workflow: CustomEnsembleForecastingWorkflow, + ensemble_fit_result: EnsembleModelFitResult, ): """Test that on_fit_end stores an EnsembleForecastingModel to MLflow.""" context = WorkflowContext(workflow=ensemble_workflow) @@ -238,8 +219,8 @@ def test_on_fit_end__stores_ensemble_model( def test_on_fit_end__logs_combiner_hyperparams_as_primary( callback: EnsembleMLFlowStorageCallback, - ensemble_workflow: CustomForecastingWorkflow, - ensemble_fit_result: ModelFitResult, + ensemble_workflow: CustomEnsembleForecastingWorkflow, + ensemble_fit_result: EnsembleModelFitResult, ): """Test that combiner hyperparams are logged as the run's primary params.""" context = WorkflowContext(workflow=ensemble_workflow) @@ -257,8 +238,8 @@ def test_on_fit_end__logs_combiner_hyperparams_as_primary( def test_on_fit_end__logs_per_forecaster_hyperparams( callback: EnsembleMLFlowStorageCallback, - ensemble_workflow: CustomForecastingWorkflow, - ensemble_fit_result: ModelFitResult, + ensemble_workflow: CustomEnsembleForecastingWorkflow, + ensemble_fit_result: EnsembleModelFitResult, ): """Test that per-forecaster hyperparams are logged with name prefixes.""" context = WorkflowContext(workflow=ensemble_workflow) @@ -278,29 +259,10 @@ def test_on_fit_end__logs_per_forecaster_hyperparams( assert "model_b.n_rounds" in params -def test_on_fit_end__single_model_fallback( - callback: EnsembleMLFlowStorageCallback, - single_workflow: CustomForecastingWorkflow, - single_fit_result: ModelFitResult, -): - """Test that non-ensemble models fall back to base class behavior.""" - context = WorkflowContext(workflow=single_workflow) - - callback.on_fit_end(context=context, result=single_fit_result) - - runs = callback.storage.search_latest_runs(model_id=single_workflow.model_id, limit=1) - assert len(runs) == 1 - - run_id = cast(str, runs[0].info.run_id) - loaded_model = callback.storage.load_run_model(model_id=single_workflow.model_id, run_id=run_id) - assert isinstance(loaded_model, ForecastingModel) - assert loaded_model.is_fitted - - def test_on_predict_start__loads_ensemble_model( callback: EnsembleMLFlowStorageCallback, - ensemble_workflow: CustomForecastingWorkflow, - ensemble_fit_result: ModelFitResult, + ensemble_workflow: CustomEnsembleForecastingWorkflow, + ensemble_fit_result: EnsembleModelFitResult, sample_dataset: TimeSeriesDataset, ): """Test that on_predict_start loads an ensemble model from MLflow.""" @@ -319,8 +281,8 @@ def test_on_predict_start__loads_ensemble_model( def test_model_selection__keeps_better_ensemble_model( storage: MLFlowStorage, - ensemble_workflow: CustomForecastingWorkflow, - ensemble_fit_result: ModelFitResult, + ensemble_workflow: CustomEnsembleForecastingWorkflow, + ensemble_fit_result: EnsembleModelFitResult, sample_dataset: TimeSeriesDataset, ): """Test that model selection keeps the better performing ensemble model.""" @@ -356,36 +318,8 @@ def test_model_selection__keeps_better_ensemble_model( ), ) worse_result = worse_ensemble.fit(sample_dataset) - worse_workflow = CustomForecastingWorkflow(model_id="test_ensemble", model=worse_ensemble) + worse_workflow = CustomEnsembleForecastingWorkflow(model_id="test_ensemble", model=worse_ensemble) worse_context = WorkflowContext(workflow=worse_workflow) with pytest.raises(SkipFitting, match="New model did not improve"): - callback.on_fit_end(context=worse_context, result=worse_result.combiner_fit_result) - - -def test_get_hyperparams__returns_combiner_hyperparams_for_ensemble( - callback: EnsembleMLFlowStorageCallback, - ensemble_workflow: CustomForecastingWorkflow, -): - """Test _get_hyperparams returns combiner hyperparams for ensemble models.""" - model = ensemble_workflow.model - assert isinstance(model, EnsembleForecastingModel) - - result = callback._get_hyperparams(model) - - assert isinstance(result, SimpleCombinerHyperParams) - assert result.learning_rate == 0.01 - - -def test_get_hyperparams__falls_back_for_single_model( - callback: EnsembleMLFlowStorageCallback, - single_workflow: CustomForecastingWorkflow, -): - """Test _get_hyperparams falls back to base for non-ensemble models.""" - model = single_workflow.model - assert isinstance(model, ForecastingModel) - - result = callback._get_hyperparams(model) - - # SimpleTestForecaster returns SimpleForecasterHyperParams via .hyperparams - assert isinstance(result, SimpleForecasterHyperParams) + callback.on_fit_end(context=worse_context, result=worse_result) diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py index 923828c9a..f5cf4b279 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py @@ -63,7 +63,7 @@ def test_config_with_horizon_preserves_other_fields(config: ForecastCombinerConf def test_config_requires_at_least_one_quantile(): """Config validation rejects empty quantiles list.""" # Act & Assert - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="quantiles"): ForecastCombinerConfig( hyperparams=HyperParams(), quantiles=[], @@ -74,7 +74,7 @@ def test_config_requires_at_least_one_quantile(): def test_config_requires_at_least_one_horizon(): """Config validation rejects empty horizons list.""" # Act & Assert - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="horizons"): ForecastCombinerConfig( hyperparams=HyperParams(), quantiles=[Q(0.5)], diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index 74340631a..7218bfb61 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -11,9 +11,9 @@ import logging from datetime import UTC, datetime, timedelta -from pathlib import Path from typing import Any, cast, override +from mlflow.entities import Run from pydantic import Field, PrivateAttr from openstef_beam.evaluation import SubsetMetric @@ -28,12 +28,10 @@ ModelNotFoundError, SkipFitting, ) -from openstef_core.mixins.predictor import HyperParams from openstef_core.types import Q, QuantileOrGlobal from openstef_models.explainability import ExplainableForecaster from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage from openstef_models.mixins.callbacks import WorkflowContext -from openstef_models.models import ForecastingModel from openstef_models.models.base_forecasting_model import BaseForecastingModel from openstef_models.models.forecasting_model import ModelFitResult from openstef_models.workflows.custom_forecasting_workflow import ( @@ -42,8 +40,13 @@ ) -class MLFlowStorageCallback(BaseConfig, ForecastingCallback): - """MLFlow callback for logging forecasting workflow events.""" +class MLFlowStorageCallbackBase(BaseConfig): + """Base configuration and shared utilities for MLflow storage callbacks. + + Provides common fields and helper methods used by both single-model and + ensemble-model MLflow callbacks. Not a callback itself — subclasses should + also inherit from the appropriate callback type. + """ storage: MLFlowStorage = Field(default_factory=MLFlowStorage) @@ -71,6 +74,149 @@ class MLFlowStorageCallback(BaseConfig, ForecastingCallback): def model_post_init(self, context: Any) -> None: pass + def _find_run(self, model_id: str, run_name: str | None) -> Run | None: + """Find an MLflow run by model_id and optional run_name. + + Args: + model_id: The model identifier. + run_name: Optional specific run name to search for. + + Returns: + The MLflow Run object, or None if not found. + """ + if run_name is not None: + return self.storage.search_run(model_id=model_id, run_name=run_name) + + runs = self.storage.search_latest_runs(model_id=model_id) + return next(iter(runs), None) + + def _try_load_model( + self, + run_id: str, + model_id: str, + ) -> BaseForecastingModel | None: + """Try to load a model from MLflow, returning None on failure. + + Args: + run_id: The MLflow run ID. + model_id: The model identifier. + + Returns: + The loaded model, or None if loading failed. + """ + try: + old_model = self.storage.load_run_model(run_id=run_id, model_id=model_id) + except ModelNotFoundError: + self._logger.warning( + "Could not load model from previous run %s for model %s, skipping model selection", + run_id, + model_id, + ) + return None + + if not isinstance(old_model, BaseForecastingModel): + self._logger.warning( + "Loaded old model from run %s is not a BaseForecastingModel, skipping model selection", + run_id, + ) + return None + + return old_model + + def _try_evaluate_model( + self, + run_id: str, + old_model: BaseForecastingModel, + input_data: TimeSeriesDataset, + ) -> SubsetMetric | None: + """Try to evaluate a model, returning None on failure. + + Args: + run_id: The MLflow run ID (for logging). + old_model: The model to evaluate. + input_data: The dataset to evaluate on. + + Returns: + The evaluation metrics, or None if evaluation failed. + """ + try: + return old_model.score(input_data) + except (MissingColumnsError, ValueError) as e: + self._logger.warning( + "Could not evaluate old model from run %s, skipping model selection: %s", + run_id, + e, + ) + return None + + def _check_tags_compatible(self, run_tags: dict[str, str], new_tags: dict[str, str], run_id: str) -> bool: + """Check if model tags are compatible, excluding mlflow.runName. + + Returns: + True if tags are compatible, False otherwise. + """ + old_tags = {k: v for k, v in run_tags.items() if k != "mlflow.runName"} + + if old_tags == new_tags: + return True + + differences = { + k: (old_tags.get(k), new_tags.get(k)) + for k in old_tags.keys() | new_tags.keys() + if old_tags.get(k) != new_tags.get(k) + } + + self._logger.info( + "Model tags changed since run %s, skipping model selection. Changes: %s", + run_id, + differences, + ) + return False + + def _check_is_new_model_better( + self, + old_metrics: SubsetMetric, + new_metrics: SubsetMetric, + ) -> bool: + """Compare old and new model metrics to determine if the new model is better. + + Returns: + True if the new model improves on the monitored metric. + """ + quantile, metric_name, direction = self.model_selection_metric + + old_metric = old_metrics.get_metric(quantile=quantile, metric_name=metric_name) + new_metric = new_metrics.get_metric(quantile=quantile, metric_name=metric_name) + + if old_metric is None or new_metric is None: + self._logger.warning( + "Could not find %s metric for quantile %s in old or new model metrics, assuming improvement", + metric_name, + quantile, + ) + return True + + self._logger.info( + "Comparing old model %s metric %.5f to new model %s metric %.5f for quantile %s", + metric_name, + old_metric, + metric_name, + new_metric, + quantile, + ) + + match direction: + case "higher_is_better" if new_metric >= old_metric / self.model_selection_old_model_penalty: + return True + case "lower_is_better" if new_metric <= old_metric / self.model_selection_old_model_penalty: + return True + case _: + return False + + +class MLFlowStorageCallback(MLFlowStorageCallbackBase, ForecastingCallback): + """MLFlow callback for logging forecasting workflow events.""" + @override def on_fit_start( self, @@ -80,16 +226,7 @@ def on_fit_start( if not self.model_reuse_enable: return - # If run_name is provided, load that specific run - if context.workflow.run_name is not None: - run = self.storage.search_run( - model_id=context.workflow.model_id, - run_name=context.workflow.run_name, - ) - else: - # Find the latest successful run for this model - runs = self.storage.search_latest_runs(model_id=context.workflow.model_id) - run = next(iter(runs), None) + run = self._find_run(model_id=context.workflow.model_id, run_name=context.workflow.run_name) if run is not None: # Check if the run is recent enough to skip re-fitting @@ -121,16 +258,13 @@ def on_fit_end( run = self.storage.create_run( model_id=context.workflow.model_id, tags=model.tags, - hyperparams=self._get_hyperparams(model), + hyperparams=context.workflow.model.forecaster.hyperparams, run_name=context.workflow.run_name, experiment_tags=context.workflow.experiment_tags, ) run_id: str = run.info.run_id self._logger.info("Created MLflow run %s for model %s", run_id, context.workflow.model_id) - # Hook for subclasses to log additional hyperparameters (e.g., per-component for ensembles) - self._log_additional_hyperparams(model=model, run_id=run_id) - # Store the model input run_path = self.storage.get_artifacts_path(model_id=context.workflow.model_id, run_id=run_id) data_path = run_path / self.storage.data_path @@ -139,8 +273,9 @@ def on_fit_end( self._logger.info("Stored training data at %s for run %s", data_path, run_id) # Store feature importance plots if enabled - if self.store_feature_importance_plot: - self._store_feature_importance(model=model, data_path=data_path) + if self.store_feature_importance_plot and isinstance(model.forecaster, ExplainableForecaster): + fig = model.forecaster.plot_feature_importances() + fig.write_html(data_path / "feature_importances.html") # pyright: ignore[reportUnknownMemberType] # Store the trained model self.storage.save_run_model( @@ -151,47 +286,17 @@ def on_fit_end( self._logger.info("Stored trained model for run %s", run_id) # Format the metrics for MLflow - metrics = _metrics_to_dict(metrics=result.metrics_full, prefix="full_") - metrics.update(_metrics_to_dict(metrics=result.metrics_train, prefix="train_")) + metrics = metrics_to_dict(metrics=result.metrics_full, prefix="full_") + metrics.update(metrics_to_dict(metrics=result.metrics_train, prefix="train_")) if result.metrics_val is not None: - metrics.update(_metrics_to_dict(metrics=result.metrics_val, prefix="val_")) + metrics.update(metrics_to_dict(metrics=result.metrics_val, prefix="val_")) if result.metrics_test is not None: - metrics.update(_metrics_to_dict(metrics=result.metrics_test, prefix="test_")) + metrics.update(metrics_to_dict(metrics=result.metrics_test, prefix="test_")) # Mark the run as finished self.storage.finalize_run(model_id=context.workflow.model_id, run_id=run_id, metrics=metrics) self._logger.info("Stored MLflow run %s for model %s", run_id, context.workflow.model_id) - def _get_hyperparams(self, model: BaseForecastingModel) -> HyperParams | None: - """Extract hyperparameters from the model for MLflow logging. - - Override in subclasses for models with different hyperparameter structures. - """ - if isinstance(model, ForecastingModel): - return model.forecaster.hyperparams - return None - - def _log_additional_hyperparams(self, model: BaseForecastingModel, run_id: str) -> None: - """Hook for logging additional hyperparameters. Override in subclasses.""" - - @staticmethod - def _store_feature_importance( - model: BaseForecastingModel, - data_path: Path, - ) -> None: - """Store feature importance plots for the model. - - For a ForecastingModel, stores a single feature importance plot if the - forecaster is explainable. - - Args: - model: The model to extract feature importances from. - data_path: Directory path where HTML plots will be saved. - """ - if isinstance(model, ForecastingModel) and isinstance(model.forecaster, ExplainableForecaster): - fig = model.forecaster.plot_feature_importances() - fig.write_html(data_path / "feature_importances.html") # pyright: ignore[reportUnknownMemberType] - @override def on_predict_start( self, @@ -201,21 +306,12 @@ def on_predict_start( if context.workflow.model.is_fitted: return - # If run_name is provided, load that specific run - if context.workflow.run_name is not None: - run = self.storage.search_run( - model_id=context.workflow.model_id, - run_name=context.workflow.run_name, - ) - else: - # Find the latest successful run for this model - runs = self.storage.search_latest_runs(model_id=context.workflow.model_id) - run = next(iter(runs), None) + run = self._find_run(model_id=context.workflow.model_id, run_name=context.workflow.run_name) if run is None: raise ModelNotFoundError(model_id=context.workflow.model_id) - # Load the model from the latest run + # Load the model from the run run_id: str = run.info.run_id old_model = self.storage.load_run_model(run_id=run_id, model_id=context.workflow.model_id) @@ -227,7 +323,7 @@ def on_predict_start( ) return - context.workflow.model = old_model + context.workflow.model = old_model # pyright: ignore[reportAttributeAccessIssue] self._logger.info( "Loaded model from MLflow run %s for model %s", run_id, @@ -235,9 +331,7 @@ def on_predict_start( ) def _run_model_selection(self, workflow: CustomForecastingWorkflow, result: ModelFitResult) -> None: - # Find the latest successful run for this model - runs = self.storage.search_latest_runs(model_id=workflow.model_id) - run = next(iter(runs), None) + run = self._find_run(model_id=workflow.model_id, run_name=None) if run is None: return @@ -253,10 +347,7 @@ def _run_model_selection(self, workflow: CustomForecastingWorkflow, result: Mode new_model = workflow.model new_metrics = result.metrics_full - old_model = self._try_load_model( - run_id=run_id, - workflow=workflow, - ) + old_model = self._try_load_model(run_id=run_id, model_id=workflow.model_id) if old_model is None: return @@ -271,9 +362,9 @@ def _run_model_selection(self, workflow: CustomForecastingWorkflow, result: Mode return if self._check_is_new_model_better(old_metrics=old_metrics, new_metrics=new_metrics): - workflow.model = new_model + workflow.model = new_model # pyright: ignore[reportAttributeAccessIssue] else: - workflow.model = old_model + workflow.model = old_model # pyright: ignore[reportAttributeAccessIssue] self._logger.info( "New model did not improve %s metric from previous run %s, reusing old model", self.model_selection_metric, @@ -281,107 +372,8 @@ def _run_model_selection(self, workflow: CustomForecastingWorkflow, result: Mode ) raise SkipFitting("New model did not improve monitored metric, skipping re-fit.") - def _try_load_model( - self, - run_id: str, - workflow: CustomForecastingWorkflow, - ) -> BaseForecastingModel | None: - try: - old_model = self.storage.load_run_model(run_id=run_id, model_id=workflow.model_id) - except ModelNotFoundError: - self._logger.warning( - "Could not load model from previous run %s for model %s, skipping model selection", - run_id, - workflow.model_id, - ) - return None - - if not isinstance(old_model, BaseForecastingModel): - self._logger.warning( - "Loaded old model from run %s is not a BaseForecastingModel, skipping model selection", - run_id, - ) - return None - - return old_model - - def _try_evaluate_model( - self, - run_id: str, - old_model: BaseForecastingModel, - input_data: TimeSeriesDataset, - ) -> SubsetMetric | None: - try: - return old_model.score(input_data) - except (MissingColumnsError, ValueError) as e: - self._logger.warning( - "Could not evaluate old model from run %s, skipping model selection: %s", - run_id, - e, - ) - return None - - def _check_tags_compatible(self, run_tags: dict[str, str], new_tags: dict[str, str], run_id: str) -> bool: - """Check if model tags are compatible, excluding mlflow.runName. - - Returns: - True if tags are compatible, False otherwise. - """ - old_tags = {k: v for k, v in run_tags.items() if k != "mlflow.runName"} - - if old_tags == new_tags: - return True - - differences = { - k: (old_tags.get(k), new_tags.get(k)) - for k in old_tags.keys() | new_tags.keys() - if old_tags.get(k) != new_tags.get(k) - } - - self._logger.info( - "Model tags changed since run %s, skipping model selection. Changes: %s", - run_id, - differences, - ) - return False - - def _check_is_new_model_better( - self, - old_metrics: SubsetMetric, - new_metrics: SubsetMetric, - ) -> bool: - quantile, metric_name, direction = self.model_selection_metric - - old_metric = old_metrics.get_metric(quantile=quantile, metric_name=metric_name) - new_metric = new_metrics.get_metric(quantile=quantile, metric_name=metric_name) - - if old_metric is None or new_metric is None: - self._logger.warning( - "Could not find %s metric for quantile %s in old or new model metrics, assuming improvement", - metric_name, - quantile, - ) - return True - - self._logger.info( - "Comparing old model %s metric %.5f to new model %s metric %.5f for quantile %s", - metric_name, - old_metric, - metric_name, - new_metric, - quantile, - ) - - match direction: - case "higher_is_better" if new_metric >= old_metric / self.model_selection_old_model_penalty: - return True - case "lower_is_better" if new_metric <= old_metric / self.model_selection_old_model_penalty: - return True - case _: - return False - -def _metrics_to_dict(metrics: SubsetMetric, prefix: str) -> dict[str, float]: +def metrics_to_dict(metrics: SubsetMetric, prefix: str) -> dict[str, float]: return { f"{prefix}{quantile}_{metric_name}": value for quantile, metrics_dict in metrics.metrics.items() @@ -389,4 +381,4 @@ def _metrics_to_dict(metrics: SubsetMetric, prefix: str) -> dict[str, float]: } -__all__ = ["MLFlowStorageCallback"] +__all__ = ["MLFlowStorageCallback", "MLFlowStorageCallbackBase"] diff --git a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py index 7f4c81b9e..afb514f99 100644 --- a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py @@ -11,7 +11,6 @@ import logging from datetime import datetime -from typing import Any from pydantic import Field, PrivateAttr @@ -21,8 +20,7 @@ from openstef_core.exceptions import NotFittedError, SkipFitting from openstef_models.mixins import ModelIdentifier, PredictorCallback from openstef_models.mixins.callbacks import WorkflowContext -from openstef_models.models.base_forecasting_model import BaseForecastingModel -from openstef_models.models.forecasting_model import ModelFitResult +from openstef_models.models.forecasting_model import ForecastingModel, ModelFitResult class ForecastingCallback( @@ -119,7 +117,7 @@ class CustomForecastingWorkflow(BaseModel): ... ) # doctest: +SKIP """ - model: BaseForecastingModel = Field(description="The forecasting model to use.") + model: ForecastingModel = Field(description="The forecasting model to use.") callbacks: list[ForecastingCallback] = Field( default_factory=list[ForecastingCallback], description="List of callbacks to execute during workflow events." ) @@ -159,22 +157,10 @@ def fit( for callback in self.callbacks: callback.on_fit_start(context=context, data=data) - fit_output: Any = self.model.fit(data=data, data_val=data_val, data_test=data_test) - - # Ensemble models return a composite result; extract the combiner result - # for callback compatibility (avoids importing EnsembleModelFitResult). - if isinstance(fit_output, ModelFitResult): - fit_result: ModelFitResult = fit_output - elif hasattr(fit_output, "combiner_fit_result"): - self._logger.debug("Extracting combiner_fit_result for callback compatibility.") - fit_result = fit_output.combiner_fit_result # pyright: ignore[reportUnknownMemberType] - else: - fit_result = fit_output # pyright: ignore[reportUnknownVariableType] + result = self.model.fit(data=data, data_val=data_val, data_test=data_test) for callback in self.callbacks: - callback.on_fit_end(context=context, result=fit_result) - - result = fit_result + callback.on_fit_end(context=context, result=result) except SkipFitting as e: self._logger.info("Skipping model fitting: %s", e) result = None From 5d8cf7d743dfc04a76d8f565e6288b519aa09f23 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 10:06:01 +0100 Subject: [PATCH 087/104] Move residual model to separate branch Signed-off-by: Marnix van Lieshout --- examples/benchmarks/liander_2024_residual.py | 121 ------- .../models/forecasting/__init__.py | 8 - .../models/forecasting/residual_forecaster.py | 326 ------------------ .../presets/forecasting_workflow.py | 6 - .../forecasting/test_residual_forecaster.py | 173 ---------- .../presets/forecasting_workflow.py | 36 +- 6 files changed, 4 insertions(+), 666 deletions(-) delete mode 100644 examples/benchmarks/liander_2024_residual.py delete mode 100644 packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py delete mode 100644 packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py diff --git a/examples/benchmarks/liander_2024_residual.py b/examples/benchmarks/liander_2024_residual.py deleted file mode 100644 index aecb3de9e..000000000 --- a/examples/benchmarks/liander_2024_residual.py +++ /dev/null @@ -1,121 +0,0 @@ -"""Liander 2024 Benchmark Example. - -==================================== - -This example demonstrates how to set up and run the Liander 2024 STEF benchmark using OpenSTEF BEAM. -The benchmark will evaluate XGBoost and GBLinear models on the dataset from HuggingFace. -""" - -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -import os -import time - -os.environ["OMP_NUM_THREADS"] = "1" # Set OMP_NUM_THREADS to 1 to avoid issues with parallel execution and xgboost -os.environ["OPENBLAS_NUM_THREADS"] = "1" -os.environ["MKL_NUM_THREADS"] = "1" - -import logging -import multiprocessing -from datetime import timedelta -from pathlib import Path - -from openstef_beam.backtesting.backtest_forecaster import BacktestForecasterConfig -from openstef_beam.benchmarking.baselines import ( - create_openstef4_preset_backtest_forecaster, -) -from openstef_beam.benchmarking.benchmarks.liander2024 import Liander2024Category, create_liander2024_benchmark_runner -from openstef_beam.benchmarking.callbacks.strict_execution_callback import StrictExecutionCallback -from openstef_beam.benchmarking.storage.local_storage import LocalBenchmarkStorage -from openstef_core.types import LeadTime, Q -from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage -from openstef_models.presets import ( - ForecastingWorkflowConfig, -) - -logging.basicConfig(level=logging.INFO, format="[%(asctime)s][%(levelname)s] %(message)s") - -logger = logging.getLogger(__name__) - -OUTPUT_PATH = Path("./benchmark_results") - -N_PROCESSES = multiprocessing.cpu_count() # Amount of parallel processes to use for the benchmark - -model = "residual" # Can be "stacking", "learned_weights" or "residual" - -# Model configuration -FORECAST_HORIZONS = [LeadTime.from_string("PT36H")] # Forecast horizon(s) -PREDICTION_QUANTILES = [ - Q(0.05), - Q(0.1), - Q(0.3), - Q(0.5), - Q(0.7), - Q(0.9), - Q(0.95), -] # Quantiles for probabilistic forecasts - -BENCHMARK_FILTER: list[Liander2024Category] | None = None - -USE_MLFLOW_STORAGE = False - -if USE_MLFLOW_STORAGE: - storage = MLFlowStorage( - tracking_uri=str(OUTPUT_PATH / "mlflow_artifacts"), - local_artifacts_path=OUTPUT_PATH / "mlflow_tracking_artifacts", - ) -else: - storage = None - -common_config = ForecastingWorkflowConfig( - model_id="common_model_", - model=model, - horizons=FORECAST_HORIZONS, - quantiles=PREDICTION_QUANTILES, - model_reuse_enable=False, - mlflow_storage=None, - radiation_column="shortwave_radiation", - rolling_aggregate_features=["mean", "median", "max", "min"], - wind_speed_column="wind_speed_80m", - pressure_column="surface_pressure", - temperature_column="temperature_2m", - relative_humidity_column="relative_humidity_2m", - energy_price_column="EPEX_NL", -) - - -# Create the backtest configuration -backtest_config = BacktestForecasterConfig( - requires_training=True, - predict_length=timedelta(days=7), - predict_min_length=timedelta(minutes=15), - predict_context_length=timedelta(days=14), # Context needed for lag features - predict_context_min_coverage=0.5, - training_context_length=timedelta(days=90), # Three months of training data - training_context_min_coverage=0.5, - predict_sample_interval=timedelta(minutes=15), -) - - -if __name__ == "__main__": - start_time = time.time() - - # Run for XGBoost model - create_liander2024_benchmark_runner( - storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH / model), - callbacks=[StrictExecutionCallback()], - ).run( - forecaster_factory=create_openstef4_preset_backtest_forecaster( - workflow_config=common_config, - cache_dir=OUTPUT_PATH / "cache", - ), - run_name=model, - n_processes=N_PROCESSES, - filter_args=BENCHMARK_FILTER, - ) - - end_time = time.time() - msg = f"Benchmark completed in {end_time - start_time:.2f} seconds." - logger.info(msg) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py b/packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py index fce9bcb92..b02e265cd 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py @@ -2,11 +2,3 @@ # # SPDX-License-Identifier: MPL-2.0 """This module provides meta-forecasting models.""" - -from .residual_forecaster import ResidualForecaster, ResidualForecasterConfig, ResidualHyperParams - -__all__ = [ - "ResidualForecaster", - "ResidualForecasterConfig", - "ResidualHyperParams", -] diff --git a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py b/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py deleted file mode 100644 index f0083b331..000000000 --- a/packages/openstef-meta/src/openstef_meta/models/forecasting/residual_forecaster.py +++ /dev/null @@ -1,326 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""Residual Forecaster. - -This module implements a residual forecasting model that combines two forecasters: -A primary model followed by a secondary model that learns to predict the residuals (errors) of the primary model. -""" - -import logging -from typing import override - -import pandas as pd -from pydantic import Field, model_validator - -from openstef_core.datasets import ForecastDataset, ForecastInputDataset -from openstef_core.exceptions import ( - NotFittedError, -) -from openstef_core.mixins import HyperParams -from openstef_core.mixins.forecaster import ( - Forecaster, - ForecasterConfig, -) -from openstef_core.types import Quantile -from openstef_models.models.forecasting.gblinear_forecaster import ( - GBLinearForecaster, - GBLinearHyperParams, -) -from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster, LGBMHyperParams -from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearForecaster, LGBMLinearHyperParams -from openstef_models.models.forecasting.xgboost_forecaster import XGBoostForecaster, XGBoostHyperParams - -logger = logging.getLogger(__name__) - -ResidualBaseForecaster = LGBMForecaster | LGBMLinearForecaster | XGBoostForecaster | GBLinearForecaster -ResidualBaseForecasterHyperParams = LGBMHyperParams | LGBMLinearHyperParams | XGBoostHyperParams | GBLinearHyperParams - - -class ResidualHyperParams(HyperParams): - """Hyperparameters for Stacked LGBM GBLinear Regressor.""" - - primary_hyperparams: ResidualBaseForecasterHyperParams = Field( - default=GBLinearHyperParams(), - description="Primary model hyperparams. Defaults to GBLinearHyperParams.", - ) - - secondary_hyperparams: ResidualBaseForecasterHyperParams = Field( - default=LGBMHyperParams(), - description="Hyperparameters for the final learner. Defaults to LGBMHyperparams.", - ) - - primary_name: str = Field( - default="primary_model", - description="Name identifier for the primary model.", - ) - - secondary_name: str = Field( - default="secondary_model", - description="Name identifier for the secondary model.", - ) - - @model_validator(mode="after") - def validate_names(self) -> "ResidualHyperParams": - """Validate that primary and secondary names are not the same. - - Raises: - ValueError: If primary and secondary names are the same. - - Returns: - ResidualHyperParams: The validated hyperparameters. - """ - if self.primary_name == self.secondary_name: - raise ValueError("Primary and secondary model names must be different.") - return self - - -class ResidualForecasterConfig(ForecasterConfig): - """Configuration for Hybrid-based forecasting models.""" - - hyperparams: ResidualHyperParams = ResidualHyperParams() - - verbosity: bool = Field( - default=True, - description="Enable verbose output from the Hybrid model (True/False).", - ) - - -class ResidualForecaster(Forecaster): # TODO: Move to a separate PR for now... - """MetaForecaster that implements residual modeling. - - It takes in a primary forecaster and a residual forecaster. The primary forecaster makes initial predictions, - and the residual forecaster models the residuals (errors) of the primary forecaster to improve overall accuracy. - """ - - Config = ResidualForecasterConfig - HyperParams = ResidualHyperParams - - def __init__(self, config: ResidualForecasterConfig) -> None: - """Initialize the Hybrid forecaster.""" - self._config = config - - self._primary_model: ResidualBaseForecaster = self._init_base_learners( - config=config, base_hyperparams=[config.hyperparams.primary_hyperparams] - )[0] - - self._secondary_model: list[ResidualBaseForecaster] = self._init_secondary_model( - hyperparams=config.hyperparams.secondary_hyperparams - ) - self.primary_name = config.hyperparams.primary_name - self.secondary_name = config.hyperparams.secondary_name - self._is_fitted = False - - def _init_secondary_model(self, hyperparams: ResidualBaseForecasterHyperParams) -> list[ResidualBaseForecaster]: - """Initialize secondary model for residual forecasting. - - Returns: - list[Forecaster]: List containing the initialized secondary model forecaster. - """ - models: list[ResidualBaseForecaster] = [] - # Different datasets per quantile, so we need a model per quantile - for q in self.config.quantiles: - config = self._config.model_copy(update={"quantiles": [q]}) - secondary_model = self._init_base_learners(config=config, base_hyperparams=[hyperparams])[0] - models.append(secondary_model) - - return models - - @staticmethod - def _init_base_learners( - config: ForecasterConfig, base_hyperparams: list[ResidualBaseForecasterHyperParams] - ) -> list[ResidualBaseForecaster]: - """Initialize base Forecaster based on provided hyperparameters. - - Returns: - list[Forecaster]: List of initialized base Forecaster forecasters. - """ - base_learners: list[ResidualBaseForecaster] = [] - horizons = config.horizons - quantiles = config.quantiles - - for hyperparams in base_hyperparams: - forecaster_cls = hyperparams.forecaster_class() - config = forecaster_cls.Config(horizons=horizons, quantiles=quantiles) - if "hyperparams" in forecaster_cls.Config.model_fields: - config = config.model_copy(update={"hyperparams": hyperparams}) - - base_learners.append(config.forecaster_from_config()) - - return base_learners - - @override - def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None = None) -> None: - """Fit the Hybrid model to the training data. - - Args: - data: Training data in the expected ForecastInputDataset format. - data_val: Validation data for tuning the model (optional, not used in this implementation). - - """ - # Fit primary model - self._primary_model.fit(data=data, data_val=data_val) - - # Reset forecast start date to ensure we fit on the full training set - full_dataset = ForecastInputDataset( - data=data.data, - sample_interval=data.sample_interval, - target_column=data.target_column, - forecast_start=data.index[0], - ) - - secondary_input = self._prepare_secondary_input( - quantiles=self.config.quantiles, - base_predictions=self._primary_model.predict(data=full_dataset), - data=data, - ) - # Predict primary model on validation data if provided - if data_val is not None: - full_val_dataset = ForecastInputDataset( - data=data_val.data, - sample_interval=data_val.sample_interval, - target_column=data_val.target_column, - forecast_start=data_val.index[0], - ) - - secondary_val_input = self._prepare_secondary_input( - quantiles=self.config.quantiles, - base_predictions=self._primary_model.predict(data=full_val_dataset), - data=data_val, - ) - # Fit secondary model on residuals - [ - self._secondary_model[i].fit(data=secondary_input[q], data_val=secondary_val_input[q]) - for i, q in enumerate(secondary_input) - ] - - else: - # Fit secondary model on residuals - [ - self._secondary_model[i].fit(data=secondary_input[q], data_val=None) - for i, q in enumerate(secondary_input) - ] - - self._is_fitted = True - - @property - @override - def is_fitted(self) -> bool: - """Check the ResidualForecaster is fitted.""" - return self._is_fitted - - @staticmethod - def _prepare_secondary_input( - quantiles: list[Quantile], - base_predictions: ForecastDataset, - data: ForecastInputDataset, - ) -> dict[Quantile, ForecastInputDataset]: - """Adjust target series to be residuals for secondary model training. - - Args: - quantiles: List of quantiles to prepare data for. - base_predictions: Predictions from the primary model. - data: Original input data. - - Returns: - dict[Quantile, ForecastInputDataset]: Prepared datasets for each quantile. - """ - predictions_quantiles: dict[Quantile, ForecastInputDataset] = {} - sample_interval = data.sample_interval - for q in quantiles: - predictions = base_predictions.data[q.format()] - df = data.data.copy() - df[data.target_column] = data.target_series - predictions - predictions_quantiles[q] = ForecastInputDataset( - data=df, - sample_interval=sample_interval, - target_column=data.target_column, - forecast_start=df.index[0], - ) - - return predictions_quantiles - - def _predict_secondary_model(self, data: ForecastInputDataset) -> ForecastDataset: - predictions: dict[str, pd.Series] = {} - for model in self._secondary_model: - pred = model.predict(data=data) - q = model.config.quantiles[0].format() - predictions[q] = pred.data[q] - - return ForecastDataset( - data=pd.DataFrame(predictions), - sample_interval=data.sample_interval, - ) - - def predict(self, data: ForecastInputDataset) -> ForecastDataset: - """Generate predictions using the ResidualForecaster model. - - Args: - data: Input data for prediction. - - Returns: - ForecastDataset containing the predictions. - - Raises: - NotFittedError: If the ResidualForecaster instance is not fitted yet. - """ - if not self.is_fitted: - raise NotFittedError("The ResidualForecaster instance is not fitted yet. Call 'fit' first.") - - primary_predictions = self._primary_model.predict(data=data).data - - secondary_predictions = self._predict_secondary_model(data=data).data - - final_predictions = primary_predictions + secondary_predictions - - return ForecastDataset( - data=final_predictions, - sample_interval=data.sample_interval, - ) - - def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = True) -> pd.DataFrame: - """Generate prediction contributions using the ResidualForecaster model. - - Args: - data: Input data for prediction contributions. - scale: Whether to scale contributions to sum to 1. Defaults to True. - - Returns: - pd.DataFrame containing the prediction contributions. - """ - primary_predictions = self._primary_model.predict(data=data).data - - secondary_predictions = self._predict_secondary_model(data=data).data - - if not scale: - primary_contributions = primary_predictions - primary_name = self._primary_model.__class__.__name__ - primary_contributions.columns = [f"{primary_name}_{q}" for q in primary_contributions.columns] - - secondary_contributions = secondary_predictions - secondary_name = self._secondary_model[0].__class__.__name__ - secondary_contributions.columns = [f"{secondary_name}_{q}" for q in secondary_contributions.columns] - - return pd.concat([primary_contributions, secondary_contributions], axis=1) - - primary_contributions = primary_predictions.abs() / (primary_predictions.abs() + secondary_predictions.abs()) - primary_contributions.columns = [f"{self.primary_name}_{q}" for q in primary_contributions.columns] - - secondary_contributions = secondary_predictions.abs() / ( - primary_predictions.abs() + secondary_predictions.abs() - ) - secondary_contributions.columns = [f"{self.secondary_name}_{q}" for q in secondary_contributions.columns] - - return pd.concat([primary_contributions, secondary_contributions], axis=1) - - @property - def config(self) -> ResidualForecasterConfig: - """Get the configuration of the ResidualForecaster. - - Returns: - ResidualForecasterConfig: The configuration of the forecaster. - """ - return self._config - - -__all__ = ["ResidualForecaster", "ResidualForecasterConfig", "ResidualHyperParams"] diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 359d05c5f..6b1f23d54 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -35,7 +35,6 @@ from openstef_meta.models.forecast_combiners.stacking_combiner import ( StackingCombiner, ) -from openstef_meta.models.forecasting.residual_forecaster import ResidualForecaster from openstef_meta.workflows import CustomEnsembleForecastingWorkflow, EnsembleForecastingCallback from openstef_models.integrations.mlflow import MLFlowStorage from openstef_models.mixins.model_serializer import ModelIdentifier @@ -121,11 +120,6 @@ class EnsembleWorkflowConfig(BaseConfig): description="Hyperparameters for LightGBM forecaster.", ) - residual_hyperparams: ResidualForecaster.HyperParams = Field( - default=ResidualForecaster.HyperParams(), - description="Hyperparameters for Residual forecaster.", - ) - # Learned weights combiner hyperparameters combiner_lgbm_hyperparams: LGBMCombinerHyperParams = Field( default=LGBMCombinerHyperParams(), diff --git a/packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py b/packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py deleted file mode 100644 index cf2cfef0d..000000000 --- a/packages/openstef-meta/tests/unit/models/forecasting/test_residual_forecaster.py +++ /dev/null @@ -1,173 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -from datetime import timedelta - -import pytest - -from openstef_core.datasets import ForecastInputDataset -from openstef_core.exceptions import NotFittedError -from openstef_core.types import LeadTime, Q -from openstef_meta.models.forecasting.residual_forecaster import ( - ResidualBaseForecasterHyperParams, - ResidualForecaster, - ResidualForecasterConfig, - ResidualHyperParams, -) -from openstef_models.models.forecasting.gblinear_forecaster import GBLinearHyperParams -from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams -from openstef_models.models.forecasting.lgbmlinear_forecaster import LGBMLinearHyperParams -from openstef_models.models.forecasting.xgboost_forecaster import XGBoostHyperParams - - -@pytest.fixture(params=["gblinear", "lgbmlinear"]) -def primary_model(request: pytest.FixtureRequest) -> ResidualBaseForecasterHyperParams: - """Fixture to provide different primary models types.""" - learner_type = request.param - if learner_type == "gblinear": - return GBLinearHyperParams() - if learner_type == "lgbm": - return LGBMHyperParams() - if learner_type == "lgbmlinear": - return LGBMLinearHyperParams() - return XGBoostHyperParams() - - -@pytest.fixture(params=["gblinear", "lgbm", "lgbmlinear", "xgboost"]) -def secondary_model(request: pytest.FixtureRequest) -> ResidualBaseForecasterHyperParams: - """Fixture to provide different secondary models types.""" - learner_type = request.param - if learner_type == "gblinear": - return GBLinearHyperParams() - if learner_type == "lgbm": - return LGBMHyperParams() - if learner_type == "lgbmlinear": - return LGBMLinearHyperParams() - return XGBoostHyperParams() - - -@pytest.fixture -def base_config( - primary_model: ResidualBaseForecasterHyperParams, - secondary_model: ResidualBaseForecasterHyperParams, -) -> ResidualForecasterConfig: - """Base configuration for Residual forecaster tests.""" - - params = ResidualHyperParams( - primary_hyperparams=primary_model, - secondary_hyperparams=secondary_model, - ) - return ResidualForecasterConfig( - quantiles=[Q(0.1), Q(0.5), Q(0.9)], - horizons=[LeadTime(timedelta(days=1))], - hyperparams=params, - verbosity=False, - ) - - -def test_residual_forecaster_fit_predict( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: ResidualForecasterConfig, -): - """Test basic fit and predict workflow with comprehensive output validation.""" - # Arrange - expected_quantiles = base_config.quantiles - forecaster = ResidualForecaster(config=base_config) - - # Act - forecaster.fit(sample_forecast_input_dataset) - result = forecaster.predict(sample_forecast_input_dataset) - - # Assert - # Basic functionality - assert forecaster.is_fitted, "Model should be fitted after calling fit()" - - # Check that necessary quantiles are present - expected_columns = [q.format() for q in expected_quantiles] - assert list(result.data.columns) == expected_columns, ( - f"Expected columns {expected_columns}, got {list(result.data.columns)}" - ) - - # Forecast data quality - assert not result.data.isna().any().any(), "Forecast should not contain NaN or None values" - - -def test_residual_forecaster_predict_not_fitted_raises_error( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: ResidualForecasterConfig, -): - """Test that predict() raises NotFittedError when called before fit().""" - # Arrange - forecaster = ResidualForecaster(config=base_config) - - # Act & Assert - with pytest.raises(NotFittedError, match="ResidualForecaster"): - forecaster.predict(sample_forecast_input_dataset) - - -def test_residual_forecaster_with_sample_weights( - sample_dataset_with_weights: ForecastInputDataset, - base_config: ResidualForecasterConfig, -): - """Test that forecaster works with sample weights and produces different results.""" - # Arrange - forecaster_with_weights = ResidualForecaster(config=base_config) - - # Create dataset without weights for comparison - data_without_weights = ForecastInputDataset( - data=sample_dataset_with_weights.data.drop(columns=["sample_weight"]), - sample_interval=sample_dataset_with_weights.sample_interval, - target_column=sample_dataset_with_weights.target_column, - forecast_start=sample_dataset_with_weights.forecast_start, - ) - forecaster_without_weights = ResidualForecaster(config=base_config) - - # Act - forecaster_with_weights.fit(sample_dataset_with_weights) - forecaster_without_weights.fit(data_without_weights) - - # Predict using data without sample_weight column (since that's used for training, not prediction) - result_with_weights = forecaster_with_weights.predict(data_without_weights) - result_without_weights = forecaster_without_weights.predict(data_without_weights) - - # Assert - # Both should produce valid forecasts - assert not result_with_weights.data.isna().any().any(), "Weighted forecast should not contain NaN values" - assert not result_without_weights.data.isna().any().any(), "Unweighted forecast should not contain NaN values" - - # Sample weights should affect the model, so results should be different - # (This is a statistical test - with different weights, predictions should differ) - differences = (result_with_weights.data - result_without_weights.data).abs() - assert differences.sum().sum() > 0, "Sample weights should affect model predictions" - - -def test_residual_forecaster_predict_contributions( - sample_forecast_input_dataset: ForecastInputDataset, - base_config: ResidualForecasterConfig, -): - """Test basic fit and predict workflow with output validation.""" - # Arrange - expected_quantiles = base_config.quantiles - forecaster = ResidualForecaster(config=base_config) - - # Act - forecaster.fit(sample_forecast_input_dataset) - result = forecaster.predict_contributions(sample_forecast_input_dataset, scale=True) - - # Assert - # Basic functionality - assert forecaster.is_fitted, "Model should be fitted after calling fit()" - - # Check that necessary quantiles are present - base_models = [forecaster.primary_name, forecaster.secondary_name] - expected_columns = [f"{col}_{q.format()}" for col in base_models for q in expected_quantiles] - assert sorted(result.columns) == sorted(expected_columns), ( - f"Expected columns {expected_columns}, got {list(result.columns)}" - ) - - # Contributions should sum to 1.0 per quantile - for q in expected_quantiles: - quantile_cols = [col for col in result.columns if col.endswith(f"_{q.format()}")] - col_sums = result[quantile_cols].sum(axis=1) - assert all(abs(col_sums - 1.0) < 1e-6), f"Contributions for quantile {q.format()} should sum to 1.0" diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index b3f6c6e42..3ee03d577 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -25,7 +25,6 @@ from openstef_core.base_model import BaseConfig from openstef_core.mixins import TransformPipeline from openstef_core.types import LeadTime, Q, Quantile, QuantileOrGlobal -from openstef_meta.models.forecasting.residual_forecaster import ResidualForecaster from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback from openstef_models.mixins import ModelIdentifier from openstef_models.models import ForecastingModel @@ -112,7 +111,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob ) # Model configuration - model: Literal["xgboost", "gblinear", "flatliner", "residual", "lgbm", "lgbmlinear"] = Field( + model: Literal["xgboost", "gblinear", "flatliner", "lgbm", "lgbmlinear"] = Field( description="Type of forecasting model to use." ) # TODO(#652): Implement median forecaster quantiles: list[Quantile] = Field( @@ -148,11 +147,6 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob description="Hyperparameters for LightGBM forecaster.", ) - residual_hyperparams: ResidualForecaster.HyperParams = Field( - default=ResidualForecaster.HyperParams(), - description="Hyperparameters for Residual forecaster.", - ) - location: LocationConfig = Field( default=LocationConfig(), description="Location information for the forecasting workflow.", @@ -226,7 +220,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob ) sample_weight_exponent: float = Field( default_factory=lambda data: 1.0 - if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "learned_weights", "stacking", "residual", "xgboost"} + if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "learned_weights", "stacking", "xgboost"} else 0.0, description="Exponent applied to scale the sample weights. " "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " @@ -331,10 +325,10 @@ def create_forecasting_workflow( history_available=config.predict_history, horizons=config.horizons, add_trivial_lags=config.model - not in {"gblinear", "residual", "stacking", "learned_weights"}, # GBLinear uses only 7day lag. + not in {"gblinear", "stacking", "learned_weights"}, # GBLinear uses only 7day lag. target_column=config.target_column, custom_lags=[timedelta(days=7)] - if config.model in {"gblinear", "residual", "stacking", "learned_weights"} + if config.model in {"gblinear", "stacking", "learned_weights"} else [], ), WindPowerFeatureAdder( @@ -471,28 +465,6 @@ def create_forecasting_workflow( ConfidenceIntervalApplicator(quantiles=config.quantiles), ] - elif config.model == "residual": - preprocessing = [ - *checks, - *feature_adders, - *feature_standardizers, - Imputer( - selection=Exclude(config.target_column), - imputation_strategy="mean", - fill_future_values=Include(config.energy_price_column), - ), - NaNDropper( - selection=Exclude(config.target_column), - ), - ] - forecaster = ResidualForecaster( - config=ResidualForecaster.Config( - quantiles=config.quantiles, - horizons=config.horizons, - hyperparams=config.residual_hyperparams, - ) - ) - postprocessing = [QuantileSorter()] else: msg = f"Unsupported model type: {config.model}" raise ValueError(msg) From 2b16bd6f9d82c874d08ebe85c9b61a40f988a636 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 10:35:14 +0100 Subject: [PATCH 088/104] Rename regression tests to integration Signed-off-by: Marnix van Lieshout --- .../openstef-meta/tests/{regression => integration}/__init__.py | 0 .../test_ensemble_forecasting_model.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename packages/openstef-meta/tests/{regression => integration}/__init__.py (100%) rename packages/openstef-meta/tests/{regression => integration}/test_ensemble_forecasting_model.py (97%) diff --git a/packages/openstef-meta/tests/regression/__init__.py b/packages/openstef-meta/tests/integration/__init__.py similarity index 100% rename from packages/openstef-meta/tests/regression/__init__.py rename to packages/openstef-meta/tests/integration/__init__.py diff --git a/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/integration/test_ensemble_forecasting_model.py similarity index 97% rename from packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py rename to packages/openstef-meta/tests/integration/test_ensemble_forecasting_model.py index 3dda72012..24c203544 100644 --- a/packages/openstef-meta/tests/regression/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/integration/test_ensemble_forecasting_model.py @@ -75,7 +75,7 @@ def create_models( return ensemble_model, base_models -def test_preprocessing( # TODO: Move this to unit/models/test_ensemble_forecasting_model.py? +def test_preprocessing( sample_timeseries_dataset: TimeSeriesDataset, create_models: tuple[EnsembleForecastingModel, dict[str, ForecastingModel]], ) -> None: From e0f373beb3790d2d5280b558c2f2f6d6358eb05a Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 10:44:16 +0100 Subject: [PATCH 089/104] Move forecaster back for now Signed-off-by: Marnix van Lieshout --- .../src/openstef_core/mixins/__init__.py | 3 - .../src/openstef_core/mixins/forecaster.py | 220 ------------------ .../models/ensemble_forecasting_model.py | 2 +- .../forecast_combiners/stacking_combiner.py | 2 +- .../presets/forecasting_workflow.py | 2 +- .../test_ensemble_mlflow_storage_callback.py | 2 +- .../models/test_ensemble_forecasting_model.py | 2 +- .../models/base_forecasting_model.py | 3 +- .../forecasting/base_case_forecaster.py | 2 +- .../forecasting/constant_median_forecaster.py | 2 +- .../forecasting/flatliner_forecaster.py | 2 +- .../models/forecasting/forecaster.py | 216 ++++++++++++++++- .../models/forecasting/gblinear_forecaster.py | 2 +- .../models/forecasting/lgbm_forecaster.py | 2 +- .../forecasting/lgbmlinear_forecaster.py | 2 +- .../models/forecasting/xgboost_forecaster.py | 2 +- .../models/forecasting_model.py | 2 +- .../presets/forecasting_workflow.py | 6 +- .../mlflow/test_mlflow_storage_callback.py | 2 +- .../unit/models/test_forecasting_model.py | 2 +- 20 files changed, 225 insertions(+), 253 deletions(-) delete mode 100644 packages/openstef-core/src/openstef_core/mixins/forecaster.py diff --git a/packages/openstef-core/src/openstef_core/mixins/__init__.py b/packages/openstef-core/src/openstef_core/mixins/__init__.py index 49cfb87a4..0da051876 100644 --- a/packages/openstef-core/src/openstef_core/mixins/__init__.py +++ b/packages/openstef-core/src/openstef_core/mixins/__init__.py @@ -9,7 +9,6 @@ and data transformation pipelines. """ -from .forecaster import Forecaster, ForecasterConfig from .predictor import BatchPredictor, BatchResult, HyperParams, Predictor from .stateful import Stateful from .transform import Transform, TransformPipeline @@ -17,8 +16,6 @@ __all__ = [ "BatchPredictor", "BatchResult", - "Forecaster", - "ForecasterConfig", "HyperParams", "Predictor", "Stateful", diff --git a/packages/openstef-core/src/openstef_core/mixins/forecaster.py b/packages/openstef-core/src/openstef_core/mixins/forecaster.py deleted file mode 100644 index 262736542..000000000 --- a/packages/openstef-core/src/openstef_core/mixins/forecaster.py +++ /dev/null @@ -1,220 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""Core forecasting model interfaces and configurations. - -Provides the fundamental building blocks for implementing forecasting models in OpenSTEF. -These mixins establish contracts that ensure consistent behavior across different model types -while supporting both single and multi-horizon forecasting scenarios. - -Key concepts: -- **Horizon**: The lead time for predictions, accounting for data availability and versioning cutoffs -- **Quantiles**: Probability levels for uncertainty estimation -- **State**: Serializable model parameters that enable saving/loading trained models -- **Batching**: Processing multiple prediction requests simultaneously for efficiency - -Multi-horizon forecasting considerations: -Some models (like linear models) cannot handle missing data or conditional features effectively, -making them suitable only for single-horizon approaches. Other models (like XGBoost) can -handle such data complexities and work well for multi-horizon scenarios. -""" - -from abc import abstractmethod -from typing import Self - -from pydantic import Field - -from openstef_core.base_model import BaseConfig -from openstef_core.datasets import ForecastDataset, ForecastInputDataset -from openstef_core.mixins.predictor import BatchPredictor, HyperParams -from openstef_core.types import LeadTime, Quantile - - -class ForecasterConfig(BaseConfig): - """Configuration for forecasting models with support for multiple quantiles and horizons. - - Fundamental configuration parameters that determine forecasting model behavior across - different prediction horizons and uncertainty levels. These are operational parameters - rather than hyperparameters that affect training. - - Example: - Basic configuration for daily energy forecasting: - - >>> from openstef_core.types import LeadTime, Quantile - >>> config = ForecasterConfig( - ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], - ... horizons=[LeadTime.from_string("PT1H"), LeadTime.from_string("PT6H"), LeadTime.from_string("PT24H")] - ... ) - >>> len(config.horizons) - 3 - >>> str(config.max_horizon) - 'P1D' - - See Also: - HorizonForecasterConfig: Single-horizon variant of this configuration. - BaseForecaster: Multi-horizon forecaster that uses this configuration. - ForecasterHyperParams: Hyperparameter configuration used alongside this. - """ - - quantiles: list[Quantile] = Field( - default=[Quantile(0.5)], - description=( - "Probability levels for uncertainty estimation. Each quantile represents a confidence level " - "(e.g., 0.1 = 10th percentile, 0.5 = median, 0.9 = 90th percentile). " - "Models must generate predictions for all specified quantiles." - ), - min_length=1, - ) - - horizons: list[LeadTime] = Field( - default=..., - description=( - "Lead times for predictions, accounting for data availability and versioning cutoffs. " - "Each horizon defines how far ahead the model should predict." - ), - min_length=1, - ) - - supports_batching: bool = Field( - default=False, - description=( - "Indicates if the model can handle batch predictions. " - "Batching allows multiple prediction requests to be processed simultaneously, " - "which is more efficient for models that support it, especially on GPUs." - ), - ) - - @property - def max_horizon(self) -> LeadTime: - """Returns the maximum lead time (horizon) from the configured horizons. - - Useful for determining the furthest prediction distance required by the model. - This is commonly used for data preparation and validation logic. - - Returns: - The maximum lead time. - """ - return max(self.horizons) - - def with_horizon(self, horizon: LeadTime) -> Self: - """Create a new configuration with a different horizon. - - Useful for creating multiple forecaster instances for different prediction - horizons from a single base configuration. - - Args: - horizon: The new lead time to use for predictions. - - Returns: - New configuration instance with the specified horizon. - """ - return self.model_copy(update={"horizons": [horizon]}) - - @classmethod - def forecaster_class(cls) -> type["Forecaster"]: - """Get the associated Forecaster class for this configuration. - - Returns: - The Forecaster class that uses this configuration. - """ - raise NotImplementedError("Subclasses must implement forecaster_class") - - -class Forecaster(BatchPredictor[ForecastInputDataset, ForecastDataset]): - """Base for forecasters that handle multiple horizons simultaneously. - - Designed for models that train and predict across multiple prediction horizons - in a unified manner. These models handle the complexity of different lead times - internally, providing a simpler interface for multi-horizon forecasting. - - Ideal for models that can share parameters or features across horizons, avoiding - the need to train separate models for each prediction distance. - - Invariants: - - Predictions must include all quantiles specified in the configuration - - predict_batch() only called when supports_batching returns True - - Example: - Implementation for a model that handles multiple horizons: - - >>> from typing import override - >>> class CustomForecaster(Forecaster): - ... def __init__(self, config: ForecasterConfig): - ... self._config = config - ... self._fitted = False - ... - ... @property - ... @override - ... def config(self): - ... return self._config - ... - ... @property - ... @override - ... def is_fitted(self): - ... return self._fitted - ... - ... @override - ... def get_state(self): - ... return {"config": self._config, "fitted": self._fitted} - ... - ... @override - ... def from_state(self, state): - ... instance = self.__class__(state["config"]) - ... instance._fitted = state["fitted"] - ... return instance - ... - ... @override - ... def fit(self, input_data, data_val): - ... # Train on data for all horizons - ... self._fitted = True - ... - ... @override - ... def predict(self, input_data): - ... # Generate predictions for all horizons - ... from openstef_core.datasets.validated_datasets import ForecastDataset - ... import pandas as pd - ... return ForecastDataset( - ... data=pd.DataFrame(), - ... sample_interval=pd.Timedelta("15min"), - ... forecast_start=pd.Timestamp.now() - ... ) - """ - - @abstractmethod - def __init__(self, config: ForecasterConfig) -> None: - """Initialize the forecaster with the given configuration. - - Args: - config: Configuration object specifying quantiles, horizons, and batching support. - """ - raise NotImplementedError("Subclasses must implement __init__") - - @property - @abstractmethod - def config(self) -> ForecasterConfig: - """Access the model's configuration parameters. - - Returns: - Configuration object containing fundamental model parameters. - """ - raise NotImplementedError("Subclasses must implement config") - - @property - def hyperparams(self) -> HyperParams: - """Access the model's hyperparameters for training and prediction. - - Hyperparameters control model behavior during training and inference. - Default implementation returns empty hyperparameters, which is suitable - for models without configurable parameters. - - Returns: - Hyperparameter configuration object. - """ - return HyperParams() - - -__all__ = [ - "Forecaster", - "ForecasterConfig", -] diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index 51f06cd7b..d6854f9fb 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -28,9 +28,9 @@ from openstef_core.datasets.validated_datasets import EnsembleForecastDataset from openstef_core.exceptions import NotFittedError from openstef_core.mixins import TransformPipeline -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner from openstef_models.models.base_forecasting_model import BaseForecastingModel +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.models.forecasting_model import ModelFitResult logger = logging.getLogger(__name__) diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index 27c15c8ee..1ce440216 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -30,7 +30,7 @@ from openstef_models.models.forecasting.lgbm_forecaster import LGBMHyperParams if TYPE_CHECKING: - from openstef_core.mixins.forecaster import Forecaster + from openstef_models.models.forecasting.forecaster import Forecaster logger = logging.getLogger(__name__) diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 6b1f23d54..e762fa949 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -67,7 +67,7 @@ from openstef_models.utils.feature_selection import Exclude, FeatureSelection, Include if TYPE_CHECKING: - from openstef_core.mixins.forecaster import Forecaster + from openstef_models.models.forecasting.forecaster import Forecaster class EnsembleWorkflowConfig(BaseConfig): diff --git a/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py b/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py index 456e4bea9..b53fdf163 100644 --- a/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py +++ b/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py @@ -15,7 +15,6 @@ from openstef_core.datasets import TimeSeriesDataset from openstef_core.datasets.validated_datasets import EnsembleForecastDataset, ForecastDataset, ForecastInputDataset from openstef_core.exceptions import SkipFitting -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.mixins.predictor import HyperParams from openstef_core.types import LeadTime, Q from openstef_meta.integrations.mlflow import EnsembleMLFlowStorageCallback @@ -24,6 +23,7 @@ from openstef_meta.workflows import CustomEnsembleForecastingWorkflow from openstef_models.integrations.mlflow import MLFlowStorage from openstef_models.mixins.callbacks import WorkflowContext +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig if TYPE_CHECKING: from pathlib import Path diff --git a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py index 97121fb34..6422d9c2a 100644 --- a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py @@ -14,13 +14,13 @@ from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset from openstef_core.datasets.validated_datasets import EnsembleForecastDataset, ForecastDataset from openstef_core.exceptions import NotFittedError -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.mixins.predictor import HyperParams from openstef_core.mixins.transform import TransformPipeline from openstef_core.testing import assert_timeseries_equal, create_synthetic_forecasting_dataset from openstef_core.types import LeadTime, Q from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.transforms.postprocessing.quantile_sorter import QuantileSorter from openstef_models.transforms.time_domain.lags_adder import LagsAdder diff --git a/packages/openstef-models/src/openstef_models/models/base_forecasting_model.py b/packages/openstef-models/src/openstef_models/models/base_forecasting_model.py index 65cfa4ccb..eafc9a8f3 100644 --- a/packages/openstef-models/src/openstef_models/models/base_forecasting_model.py +++ b/packages/openstef-models/src/openstef_models/models/base_forecasting_model.py @@ -21,11 +21,10 @@ from openstef_core.base_model import BaseModel from openstef_core.datasets import ForecastDataset, TimeSeriesDataset from openstef_core.mixins import Predictor, TransformPipeline -from openstef_core.mixins.forecaster import ForecasterConfig +from openstef_models.models.forecasting.forecaster import ForecasterConfig from openstef_models.utils.data_split import DataSplitter -# TODO: Move to openstef-core? class BaseForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]): """Abstract base for forecasting models.""" diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py index d03714705..1e1094092 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/base_case_forecaster.py @@ -21,10 +21,10 @@ from pydantic import Field from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.mixins.predictor import HyperParams from openstef_core.types import LeadTime, Quantile from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig class BaseCaseForecasterHyperParams(HyperParams): diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py index cf254cf5c..2289398d5 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/constant_median_forecaster.py @@ -16,10 +16,10 @@ from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import NotFittedError -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.mixins.predictor import HyperParams from openstef_core.types import Any, LeadTime, Quantile from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig class ConstantMedianForecasterHyperParams(HyperParams): diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py index 79bf24534..d684d1373 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/flatliner_forecaster.py @@ -14,8 +14,8 @@ from pydantic import Field from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig class FlatlinerForecasterConfig(ForecasterConfig): diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py index ae75cee02..262736542 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/forecaster.py @@ -2,19 +2,217 @@ # # SPDX-License-Identifier: MPL-2.0 -"""Re-exports of core forecasting interfaces from openstef-core. +"""Core forecasting model interfaces and configurations. -The canonical definitions of Forecaster and ForecasterConfig -now live in ``openstef_core.mixins.forecaster``. This module re-exports them for -backwards compatibility. +Provides the fundamental building blocks for implementing forecasting models in OpenSTEF. +These mixins establish contracts that ensure consistent behavior across different model types +while supporting both single and multi-horizon forecasting scenarios. + +Key concepts: +- **Horizon**: The lead time for predictions, accounting for data availability and versioning cutoffs +- **Quantiles**: Probability levels for uncertainty estimation +- **State**: Serializable model parameters that enable saving/loading trained models +- **Batching**: Processing multiple prediction requests simultaneously for efficiency + +Multi-horizon forecasting considerations: +Some models (like linear models) cannot handle missing data or conditional features effectively, +making them suitable only for single-horizon approaches. Other models (like XGBoost) can +handle such data complexities and work well for multi-horizon scenarios. """ -# TODO: Remove... Backwards compat not needed +from abc import abstractmethod +from typing import Self + +from pydantic import Field + +from openstef_core.base_model import BaseConfig +from openstef_core.datasets import ForecastDataset, ForecastInputDataset +from openstef_core.mixins.predictor import BatchPredictor, HyperParams +from openstef_core.types import LeadTime, Quantile + + +class ForecasterConfig(BaseConfig): + """Configuration for forecasting models with support for multiple quantiles and horizons. + + Fundamental configuration parameters that determine forecasting model behavior across + different prediction horizons and uncertainty levels. These are operational parameters + rather than hyperparameters that affect training. + + Example: + Basic configuration for daily energy forecasting: + + >>> from openstef_core.types import LeadTime, Quantile + >>> config = ForecasterConfig( + ... quantiles=[Quantile(0.1), Quantile(0.5), Quantile(0.9)], + ... horizons=[LeadTime.from_string("PT1H"), LeadTime.from_string("PT6H"), LeadTime.from_string("PT24H")] + ... ) + >>> len(config.horizons) + 3 + >>> str(config.max_horizon) + 'P1D' + + See Also: + HorizonForecasterConfig: Single-horizon variant of this configuration. + BaseForecaster: Multi-horizon forecaster that uses this configuration. + ForecasterHyperParams: Hyperparameter configuration used alongside this. + """ + + quantiles: list[Quantile] = Field( + default=[Quantile(0.5)], + description=( + "Probability levels for uncertainty estimation. Each quantile represents a confidence level " + "(e.g., 0.1 = 10th percentile, 0.5 = median, 0.9 = 90th percentile). " + "Models must generate predictions for all specified quantiles." + ), + min_length=1, + ) + + horizons: list[LeadTime] = Field( + default=..., + description=( + "Lead times for predictions, accounting for data availability and versioning cutoffs. " + "Each horizon defines how far ahead the model should predict." + ), + min_length=1, + ) + + supports_batching: bool = Field( + default=False, + description=( + "Indicates if the model can handle batch predictions. " + "Batching allows multiple prediction requests to be processed simultaneously, " + "which is more efficient for models that support it, especially on GPUs." + ), + ) + + @property + def max_horizon(self) -> LeadTime: + """Returns the maximum lead time (horizon) from the configured horizons. + + Useful for determining the furthest prediction distance required by the model. + This is commonly used for data preparation and validation logic. + + Returns: + The maximum lead time. + """ + return max(self.horizons) + + def with_horizon(self, horizon: LeadTime) -> Self: + """Create a new configuration with a different horizon. + + Useful for creating multiple forecaster instances for different prediction + horizons from a single base configuration. + + Args: + horizon: The new lead time to use for predictions. + + Returns: + New configuration instance with the specified horizon. + """ + return self.model_copy(update={"horizons": [horizon]}) + + @classmethod + def forecaster_class(cls) -> type["Forecaster"]: + """Get the associated Forecaster class for this configuration. + + Returns: + The Forecaster class that uses this configuration. + """ + raise NotImplementedError("Subclasses must implement forecaster_class") + + +class Forecaster(BatchPredictor[ForecastInputDataset, ForecastDataset]): + """Base for forecasters that handle multiple horizons simultaneously. + + Designed for models that train and predict across multiple prediction horizons + in a unified manner. These models handle the complexity of different lead times + internally, providing a simpler interface for multi-horizon forecasting. + + Ideal for models that can share parameters or features across horizons, avoiding + the need to train separate models for each prediction distance. + + Invariants: + - Predictions must include all quantiles specified in the configuration + - predict_batch() only called when supports_batching returns True + + Example: + Implementation for a model that handles multiple horizons: + + >>> from typing import override + >>> class CustomForecaster(Forecaster): + ... def __init__(self, config: ForecasterConfig): + ... self._config = config + ... self._fitted = False + ... + ... @property + ... @override + ... def config(self): + ... return self._config + ... + ... @property + ... @override + ... def is_fitted(self): + ... return self._fitted + ... + ... @override + ... def get_state(self): + ... return {"config": self._config, "fitted": self._fitted} + ... + ... @override + ... def from_state(self, state): + ... instance = self.__class__(state["config"]) + ... instance._fitted = state["fitted"] + ... return instance + ... + ... @override + ... def fit(self, input_data, data_val): + ... # Train on data for all horizons + ... self._fitted = True + ... + ... @override + ... def predict(self, input_data): + ... # Generate predictions for all horizons + ... from openstef_core.datasets.validated_datasets import ForecastDataset + ... import pandas as pd + ... return ForecastDataset( + ... data=pd.DataFrame(), + ... sample_interval=pd.Timedelta("15min"), + ... forecast_start=pd.Timestamp.now() + ... ) + """ + + @abstractmethod + def __init__(self, config: ForecasterConfig) -> None: + """Initialize the forecaster with the given configuration. + + Args: + config: Configuration object specifying quantiles, horizons, and batching support. + """ + raise NotImplementedError("Subclasses must implement __init__") + + @property + @abstractmethod + def config(self) -> ForecasterConfig: + """Access the model's configuration parameters. + + Returns: + Configuration object containing fundamental model parameters. + """ + raise NotImplementedError("Subclasses must implement config") + + @property + def hyperparams(self) -> HyperParams: + """Access the model's hyperparameters for training and prediction. + + Hyperparameters control model behavior during training and inference. + Default implementation returns empty hyperparameters, which is suitable + for models without configurable parameters. + + Returns: + Hyperparameter configuration object. + """ + return HyperParams() -from openstef_core.mixins.forecaster import ( - Forecaster, - ForecasterConfig, -) __all__ = [ "Forecaster", diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py index 43cc8761f..f08dc4269 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/gblinear_forecaster.py @@ -22,9 +22,9 @@ from openstef_core.datasets.mixins import LeadTime from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import InputValidationError, MissingExtraError, NotFittedError -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.mixins.predictor import HyperParams from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.utils.evaluation_functions import EvaluationFunctionType, get_evaluation_function from openstef_models.utils.loss_functions import ( ObjectiveFunctionType, diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py index af9918391..6ec87593d 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbm_forecaster.py @@ -21,8 +21,8 @@ NotFittedError, ) from openstef_core.mixins import HyperParams -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.utils.multi_quantile_regressor import MultiQuantileRegressor if TYPE_CHECKING: diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py index 7118346eb..3a4c9c12c 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/lgbmlinear_forecaster.py @@ -21,8 +21,8 @@ NotFittedError, ) from openstef_core.mixins import HyperParams -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.utils.multi_quantile_regressor import MultiQuantileRegressor if TYPE_CHECKING: diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py index 4c1e1452f..c5415e2d6 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/xgboost_forecaster.py @@ -19,8 +19,8 @@ from openstef_core.datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import MissingExtraError, NotFittedError from openstef_core.mixins import HyperParams -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_models.explainability.mixins import ExplainableForecaster +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.utils.evaluation_functions import EvaluationFunctionType, get_evaluation_function from openstef_models.utils.loss_functions import ( ObjectiveFunctionType, diff --git a/packages/openstef-models/src/openstef_models/models/forecasting_model.py b/packages/openstef-models/src/openstef_models/models/forecasting_model.py index b55cbd9fb..3c2d1fe4c 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting_model.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting_model.py @@ -27,8 +27,8 @@ from openstef_core.datasets.timeseries_dataset import validate_horizons_present from openstef_core.exceptions import InsufficientlyCompleteError, NotFittedError from openstef_core.mixins import TransformPipeline -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_models.models.base_forecasting_model import BaseForecastingModel +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig class ModelFitResult(BaseModel): diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index 0d4a0f86b..d280884f8 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -213,7 +213,7 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob default=FeatureSelection(include=None, exclude=None), description="Feature selection for which features to clip.", ) - # TODO: Add sample weight method parameter + # TODO: Add sample weight method parameter like in EnsembleWorkflowConfig sample_weight_scale_percentile: int = Field( default=95, description="Percentile of target values used as scaling reference. " @@ -328,9 +328,7 @@ def create_forecasting_workflow( add_trivial_lags=config.model not in {"gblinear", "stacking", "learned_weights"}, # GBLinear uses only 7day lag. target_column=config.target_column, - custom_lags=[timedelta(days=7)] - if config.model in {"gblinear", "stacking", "learned_weights"} - else [], + custom_lags=[timedelta(days=7)] if config.model in {"gblinear", "stacking", "learned_weights"} else [], ), WindPowerFeatureAdder( windspeed_reference_column=config.wind_speed_column, diff --git a/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py b/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py index c9b9c0e47..f24c897fa 100644 --- a/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py +++ b/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py @@ -14,7 +14,7 @@ from openstef_core.datasets import TimeSeriesDataset from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ModelNotFoundError, SkipFitting -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_core.types import LeadTime, Q from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback from openstef_models.mixins.callbacks import WorkflowContext diff --git a/packages/openstef-models/tests/unit/models/test_forecasting_model.py b/packages/openstef-models/tests/unit/models/test_forecasting_model.py index 30dfa1352..9f59a3a30 100644 --- a/packages/openstef-models/tests/unit/models/test_forecasting_model.py +++ b/packages/openstef-models/tests/unit/models/test_forecasting_model.py @@ -13,13 +13,13 @@ from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import InsufficientlyCompleteError, NotFittedError from openstef_core.mixins import TransformPipeline -from openstef_core.mixins.forecaster import Forecaster, ForecasterConfig from openstef_core.testing import assert_timeseries_equal, create_synthetic_forecasting_dataset from openstef_core.types import LeadTime, Quantile, override from openstef_models.models.forecasting.constant_median_forecaster import ( ConstantMedianForecaster, ConstantMedianForecasterConfig, ) +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.models.forecasting_model import ForecastingModel from openstef_models.transforms.postprocessing.quantile_sorter import QuantileSorter from openstef_models.transforms.time_domain.lags_adder import LagsAdder From 0529bf81d47dbc6707e09f5beed53441f897e725 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 10:55:21 +0100 Subject: [PATCH 090/104] Use sample weight config in models Signed-off-by: Marnix van Lieshout --- .../test_ensemble_forecasting_model.py | 2 +- .../presets/forecasting_workflow.py | 29 +++++-------------- 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/packages/openstef-meta/tests/integration/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/integration/test_ensemble_forecasting_model.py index 24c203544..f4883805a 100644 --- a/packages/openstef-meta/tests/integration/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/integration/test_ensemble_forecasting_model.py @@ -67,7 +67,7 @@ def create_models( model=forecaster_name, # type: ignore quantiles=config.quantiles, horizons=config.horizons, - sample_weight_exponent=sample_weight_config.weight_exponent, + sample_weight_config=sample_weight_config, ) base_model = create_forecasting_workflow(config=model_config).model base_models[forecaster_name] = cast(ForecastingModel, base_model) diff --git a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py index d280884f8..03b5d377f 100644 --- a/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/presets/forecasting_workflow.py @@ -213,23 +213,12 @@ class ForecastingWorkflowConfig(BaseConfig): # PredictionJob default=FeatureSelection(include=None, exclude=None), description="Feature selection for which features to clip.", ) - # TODO: Add sample weight method parameter like in EnsembleWorkflowConfig - sample_weight_scale_percentile: int = Field( - default=95, - description="Percentile of target values used as scaling reference. " - "Values are normalized relative to this percentile before weighting.", - ) - sample_weight_exponent: float = Field( - default_factory=lambda data: 1.0 - if data.get("model") in {"gblinear", "lgbmlinear", "lgbm", "learned_weights", "stacking", "xgboost"} - else 0.0, - description="Exponent applied to scale the sample weights. " - "0=uniform weights, 1=linear scaling, >1=stronger emphasis on high values. " - "Note: Defaults to 1.0 for gblinear congestion models.", - ) - sample_weight_floor: float = Field( - default=0.1, - description="Minimum weight value to ensure all samples contribute to training.", + sample_weight_config: SampleWeightConfig = Field( + default_factory=lambda data: SampleWeightConfig(weight_exponent=1.0) + if data.get("model") == "gblinear" + else SampleWeightConfig(weight_exponent=0.0), + description="Sample weighting configuration. Controls how training samples are weighted. " + "Defaults to weight_exponent=1.0 for gblinear, 0.0 (uniform) for other models.", ) # Data splitting strategy @@ -357,11 +346,7 @@ def create_forecasting_workflow( Scaler(selection=Exclude(config.target_column), method="standard"), SampleWeighter( target_column=config.target_column, - config=SampleWeightConfig( - weight_exponent=config.sample_weight_exponent, - weight_floor=config.sample_weight_floor, - weight_scale_percentile=config.sample_weight_scale_percentile, - ), + config=config.sample_weight_config, ), EmptyFeatureRemover(), ] From d2ee03578901b10e4ab754d5f2a014a22ecedd49 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 11:02:05 +0100 Subject: [PATCH 091/104] Add importances to median forecaster Signed-off-by: Marnix van Lieshout --- .../test_ensemble_forecasting_model.py | 5 ++--- .../openstef_models/explainability/mixins.py | 2 +- .../models/forecasting/median_forecaster.py | 21 +++++++++++++++++++ 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/packages/openstef-meta/tests/integration/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/integration/test_ensemble_forecasting_model.py index f4883805a..2b183cbf2 100644 --- a/packages/openstef-meta/tests/integration/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/integration/test_ensemble_forecasting_model.py @@ -3,7 +3,6 @@ # SPDX-License-Identifier: MPL-2.0 from datetime import timedelta -from typing import cast import numpy as np import pandas as pd @@ -57,7 +56,7 @@ def create_models( config: EnsembleWorkflowConfig, ) -> tuple[EnsembleForecastingModel, dict[str, ForecastingModel]]: - ensemble_model = cast(EnsembleForecastingModel, create_ensemble_workflow(config=config).model) + ensemble_model = create_ensemble_workflow(config=config).model base_models: dict[str, ForecastingModel] = {} for forecaster_name in config.base_models: @@ -70,7 +69,7 @@ def create_models( sample_weight_config=sample_weight_config, ) base_model = create_forecasting_workflow(config=model_config).model - base_models[forecaster_name] = cast(ForecastingModel, base_model) + base_models[forecaster_name] = base_model return ensemble_model, base_models diff --git a/packages/openstef-models/src/openstef_models/explainability/mixins.py b/packages/openstef-models/src/openstef_models/explainability/mixins.py index 4a29633b8..2e1fa81ca 100644 --- a/packages/openstef-models/src/openstef_models/explainability/mixins.py +++ b/packages/openstef-models/src/openstef_models/explainability/mixins.py @@ -18,7 +18,7 @@ from openstef_models.explainability.plotters.feature_importance_plotter import FeatureImportancePlotter -class ExplainableForecaster(ABC): # TODO: Inherit from Forecaster once it is moved to openstef-core? +class ExplainableForecaster(ABC): """Mixin for forecasters that can explain feature importance. Provides a standardized interface for accessing and visualizing feature diff --git a/packages/openstef-models/src/openstef_models/models/forecasting/median_forecaster.py b/packages/openstef-models/src/openstef_models/models/forecasting/median_forecaster.py index 35678185d..ef6ab0900 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting/median_forecaster.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting/median_forecaster.py @@ -339,3 +339,24 @@ def fit(self, data: ForecastInputDataset, data_val: ForecastInputDataset | None self.feature_importances_ = np.ones(len(self.feature_names_)) / (len(self.feature_names_) or 1.0) self.is_fitted_ = True + + @override + def predict_contributions(self, data: ForecastInputDataset, *, scale: bool = True) -> pd.DataFrame: + """Get feature contributions for each prediction. + + Since MedianForecaster equally weights all lag features via the median, + contributions are uniform across features. + + Args: + data: Input dataset for which to compute feature contributions. + scale: Whether to scale contributions to sum to the prediction value. + + Returns: + DataFrame with uniform contributions per lag feature. + """ + input_data = data.input_data(start=data.forecast_start) + return pd.DataFrame( + data=1.0, + index=input_data.index, + columns=[f"{data.target_column}_{quantile.format()}" for quantile in self.config.quantiles], + ) From bf18c16bc11564dbb6d4ff3802772e4d7b8060fa Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 11:06:41 +0100 Subject: [PATCH 092/104] Rename for consistency Signed-off-by: Marnix van Lieshout --- .../integrations/mlflow/ensemble_mlflow_storage_callback.py | 4 ++-- .../integrations/mlflow/mlflow_storage_callback.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py index 68d631e3f..64efb954f 100644 --- a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py +++ b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py @@ -27,12 +27,12 @@ from openstef_meta.models.ensemble_forecasting_model import EnsembleModelFitResult from openstef_meta.workflows import CustomEnsembleForecastingWorkflow, EnsembleForecastingCallback from openstef_models.explainability import ExplainableForecaster -from openstef_models.integrations.mlflow.mlflow_storage_callback import MLFlowStorageCallbackBase, metrics_to_dict +from openstef_models.integrations.mlflow.mlflow_storage_callback import BaseMLFlowStorageCallback, metrics_to_dict from openstef_models.mixins.callbacks import WorkflowContext from openstef_models.models.base_forecasting_model import BaseForecastingModel -class EnsembleMLFlowStorageCallback(MLFlowStorageCallbackBase, EnsembleForecastingCallback): +class EnsembleMLFlowStorageCallback(BaseMLFlowStorageCallback, EnsembleForecastingCallback): """MLFlow callback for ensemble forecasting workflows. Uses composition with MLFlowStorageCallbackBase for shared MLflow storage diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index 7218bfb61..1f3598afe 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -40,7 +40,7 @@ ) -class MLFlowStorageCallbackBase(BaseConfig): +class BaseMLFlowStorageCallback(BaseConfig): """Base configuration and shared utilities for MLflow storage callbacks. Provides common fields and helper methods used by both single-model and @@ -214,7 +214,7 @@ def _check_is_new_model_better( return False -class MLFlowStorageCallback(MLFlowStorageCallbackBase, ForecastingCallback): +class MLFlowStorageCallback(BaseMLFlowStorageCallback, ForecastingCallback): """MLFlow callback for logging forecasting workflow events.""" @override @@ -381,4 +381,4 @@ def metrics_to_dict(metrics: SubsetMetric, prefix: str) -> dict[str, float]: } -__all__ = ["MLFlowStorageCallback", "MLFlowStorageCallbackBase"] +__all__ = ["MLFlowStorageCallback", "BaseMLFlowStorageCallback"] From 704fc2ebadfa4e5ba4b9704f061505786a719118 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 11:30:40 +0100 Subject: [PATCH 093/104] Move base mlflow callback to separate file Signed-off-by: Marnix van Lieshout --- .gitignore | 4 +- .../ensemble_mlflow_storage_callback.py | 10 +- .../mlflow/base_mlflow_storage_callback.py | 222 ++++++++++++++++++ .../mlflow/mlflow_storage_callback.py | 211 +---------------- .../mlflow/test_mlflow_storage_callback.py | 2 +- 5 files changed, 241 insertions(+), 208 deletions(-) create mode 100644 packages/openstef-models/src/openstef_models/integrations/mlflow/base_mlflow_storage_callback.py diff --git a/.gitignore b/.gitignore index 47f061709..3712a6750 100644 --- a/.gitignore +++ b/.gitignore @@ -127,5 +127,5 @@ certificates/ benchmark_results*/ # Mlflow -mlflow -mlflow_artifacts_local \ No newline at end of file +/mlflow +/mlflow_artifacts_local \ No newline at end of file diff --git a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py index 64efb954f..c67332cd0 100644 --- a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py +++ b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py @@ -27,7 +27,7 @@ from openstef_meta.models.ensemble_forecasting_model import EnsembleModelFitResult from openstef_meta.workflows import CustomEnsembleForecastingWorkflow, EnsembleForecastingCallback from openstef_models.explainability import ExplainableForecaster -from openstef_models.integrations.mlflow.mlflow_storage_callback import BaseMLFlowStorageCallback, metrics_to_dict +from openstef_models.integrations.mlflow.base_mlflow_storage_callback import BaseMLFlowStorageCallback from openstef_models.mixins.callbacks import WorkflowContext from openstef_models.models.base_forecasting_model import BaseForecastingModel @@ -131,12 +131,12 @@ def on_fit_end( self._logger.info("Stored trained model for run %s", run_id) # Format the metrics for MLflow - metrics = metrics_to_dict(metrics=result.metrics_full, prefix="full_") - metrics.update(metrics_to_dict(metrics=result.metrics_train, prefix="train_")) + metrics = self.metrics_to_dict(metrics=result.metrics_full, prefix="full_") + metrics.update(self.metrics_to_dict(metrics=result.metrics_train, prefix="train_")) if result.metrics_val is not None: - metrics.update(metrics_to_dict(metrics=result.metrics_val, prefix="val_")) + metrics.update(self.metrics_to_dict(metrics=result.metrics_val, prefix="val_")) if result.metrics_test is not None: - metrics.update(metrics_to_dict(metrics=result.metrics_test, prefix="test_")) + metrics.update(self.metrics_to_dict(metrics=result.metrics_test, prefix="test_")) # Mark the run as finished self.storage.finalize_run(model_id=context.workflow.model_id, run_id=run_id, metrics=metrics) diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/base_mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/base_mlflow_storage_callback.py new file mode 100644 index 000000000..19510781d --- /dev/null +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/base_mlflow_storage_callback.py @@ -0,0 +1,222 @@ +# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project +# +# SPDX-License-Identifier: MPL-2.0 + +"""Base configuration and shared utilities for MLflow storage callbacks. + +Provides common fields, helper methods, and utility functions used by both +single-model and ensemble-model MLflow callbacks. +""" + +import logging +from datetime import timedelta +from typing import Any, override + +from mlflow.entities import Run +from pydantic import Field, PrivateAttr + +from openstef_beam.evaluation import SubsetMetric +from openstef_beam.evaluation.metric_providers import MetricDirection +from openstef_core.base_model import BaseConfig +from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset +from openstef_core.exceptions import ( + MissingColumnsError, + ModelNotFoundError, +) +from openstef_core.types import Q, QuantileOrGlobal +from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage +from openstef_models.models.base_forecasting_model import BaseForecastingModel + + +class BaseMLFlowStorageCallback(BaseConfig): + """Base configuration and shared utilities for MLflow storage callbacks. + + Provides common fields and helper methods used by both single-model and + ensemble-model MLflow callbacks. Not a callback itself — subclasses should + also inherit from the appropriate callback type. + """ + + storage: MLFlowStorage = Field(default_factory=MLFlowStorage) + + model_reuse_enable: bool = Field(default=True) + model_reuse_max_age: timedelta = Field(default=timedelta(days=7)) + + model_selection_enable: bool = Field(default=True) + model_selection_metric: tuple[QuantileOrGlobal, str, MetricDirection] = Field( + default=(Q(0.5), "R2", "higher_is_better"), + description="Metric to monitor for model performance when retraining.", + ) + model_selection_old_model_penalty: float = Field( + default=1.2, + description="Penalty to apply to the old model's metric to bias selection towards newer models.", + ) + + store_feature_importance_plot: bool = Field( + default=True, + description="Whether to store feature importance plots in MLflow artifacts if available.", + ) + + _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) + + @override + def model_post_init(self, context: Any) -> None: + pass + + def _find_run(self, model_id: str, run_name: str | None) -> Run | None: + """Find an MLflow run by model_id and optional run_name. + + Args: + model_id: The model identifier. + run_name: Optional specific run name to search for. + + Returns: + The MLflow Run object, or None if not found. + """ + if run_name is not None: + return self.storage.search_run(model_id=model_id, run_name=run_name) + + runs = self.storage.search_latest_runs(model_id=model_id) + return next(iter(runs), None) + + def _try_load_model( + self, + run_id: str, + model_id: str, + ) -> BaseForecastingModel | None: + """Try to load a model from MLflow, returning None on failure. + + Args: + run_id: The MLflow run ID. + model_id: The model identifier. + + Returns: + The loaded model, or None if loading failed. + """ + try: + old_model = self.storage.load_run_model(run_id=run_id, model_id=model_id) + except ModelNotFoundError: + self._logger.warning( + "Could not load model from previous run %s for model %s, skipping model selection", + run_id, + model_id, + ) + return None + + if not isinstance(old_model, BaseForecastingModel): + self._logger.warning( + "Loaded old model from run %s is not a BaseForecastingModel, skipping model selection", + run_id, + ) + return None + + return old_model + + def _try_evaluate_model( + self, + run_id: str, + old_model: BaseForecastingModel, + input_data: TimeSeriesDataset, + ) -> SubsetMetric | None: + """Try to evaluate a model, returning None on failure. + + Args: + run_id: The MLflow run ID (for logging). + old_model: The model to evaluate. + input_data: The dataset to evaluate on. + + Returns: + The evaluation metrics, or None if evaluation failed. + """ + try: + return old_model.score(input_data) + except (MissingColumnsError, ValueError) as e: + self._logger.warning( + "Could not evaluate old model from run %s, skipping model selection: %s", + run_id, + e, + ) + return None + + def _check_tags_compatible(self, run_tags: dict[str, str], new_tags: dict[str, str], run_id: str) -> bool: + """Check if model tags are compatible, excluding mlflow.runName. + + Returns: + True if tags are compatible, False otherwise. + """ + old_tags = {k: v for k, v in run_tags.items() if k != "mlflow.runName"} + + if old_tags == new_tags: + return True + + differences = { + k: (old_tags.get(k), new_tags.get(k)) + for k in old_tags.keys() | new_tags.keys() + if old_tags.get(k) != new_tags.get(k) + } + + self._logger.info( + "Model tags changed since run %s, skipping model selection. Changes: %s", + run_id, + differences, + ) + return False + + def _check_is_new_model_better( + self, + old_metrics: SubsetMetric, + new_metrics: SubsetMetric, + ) -> bool: + """Compare old and new model metrics to determine if the new model is better. + + Returns: + True if the new model improves on the monitored metric. + """ + quantile, metric_name, direction = self.model_selection_metric + + old_metric = old_metrics.get_metric(quantile=quantile, metric_name=metric_name) + new_metric = new_metrics.get_metric(quantile=quantile, metric_name=metric_name) + + if old_metric is None or new_metric is None: + self._logger.warning( + "Could not find %s metric for quantile %s in old or new model metrics, assuming improvement", + metric_name, + quantile, + ) + return True + + self._logger.info( + "Comparing old model %s metric %.5f to new model %s metric %.5f for quantile %s", + metric_name, + old_metric, + metric_name, + new_metric, + quantile, + ) + + match direction: + case "higher_is_better" if new_metric >= old_metric / self.model_selection_old_model_penalty: + return True + case "lower_is_better" if new_metric <= old_metric / self.model_selection_old_model_penalty: + return True + case _: + return False + + @staticmethod + def metrics_to_dict(metrics: SubsetMetric, prefix: str) -> dict[str, float]: + """Convert SubsetMetric to a flat dictionary for MLflow logging. + + Args: + metrics: The metrics to convert. + prefix: Prefix to add to each metric key (e.g. "full_", "train_"). + + Returns: + Flat dictionary mapping metric names to values. + """ + return { + f"{prefix}{quantile}_{metric_name}": value + for quantile, metrics_dict in metrics.metrics.items() + for metric_name, value in metrics_dict.items() + } + + +__all__ = ["BaseMLFlowStorageCallback"] diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index 1f3598afe..fd8c70eb9 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -4,33 +4,26 @@ """MLflow integration for tracking and storing forecasting workflows. -Provides storage and callback functionality to log model training runs, artifacts, +Provides callback functionality to log model training runs, artifacts, and metrics to MLflow. Automatically saves models, training data, and performance metrics for each forecasting workflow execution. """ -import logging -from datetime import UTC, datetime, timedelta -from typing import Any, cast, override +from datetime import UTC, datetime +from typing import cast, override -from mlflow.entities import Run -from pydantic import Field, PrivateAttr - -from openstef_beam.evaluation import SubsetMetric -from openstef_beam.evaluation.metric_providers import MetricDirection -from openstef_core.base_model import BaseConfig from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset from openstef_core.datasets.versioned_timeseries_dataset import ( VersionedTimeSeriesDataset, ) from openstef_core.exceptions import ( - MissingColumnsError, ModelNotFoundError, SkipFitting, ) -from openstef_core.types import Q, QuantileOrGlobal from openstef_models.explainability import ExplainableForecaster -from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage +from openstef_models.integrations.mlflow.base_mlflow_storage_callback import ( + BaseMLFlowStorageCallback, +) from openstef_models.mixins.callbacks import WorkflowContext from openstef_models.models.base_forecasting_model import BaseForecastingModel from openstef_models.models.forecasting_model import ModelFitResult @@ -40,180 +33,6 @@ ) -class BaseMLFlowStorageCallback(BaseConfig): - """Base configuration and shared utilities for MLflow storage callbacks. - - Provides common fields and helper methods used by both single-model and - ensemble-model MLflow callbacks. Not a callback itself — subclasses should - also inherit from the appropriate callback type. - """ - - storage: MLFlowStorage = Field(default_factory=MLFlowStorage) - - model_reuse_enable: bool = Field(default=True) - model_reuse_max_age: timedelta = Field(default=timedelta(days=7)) - - model_selection_enable: bool = Field(default=True) - model_selection_metric: tuple[QuantileOrGlobal, str, MetricDirection] = Field( - default=(Q(0.5), "R2", "higher_is_better"), - description="Metric to monitor for model performance when retraining.", - ) - model_selection_old_model_penalty: float = Field( - default=1.2, - description="Penalty to apply to the old model's metric to bias selection towards newer models.", - ) - - store_feature_importance_plot: bool = Field( - default=True, - description="Whether to store feature importance plots in MLflow artifacts if available.", - ) - - _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) - - @override - def model_post_init(self, context: Any) -> None: - pass - - def _find_run(self, model_id: str, run_name: str | None) -> Run | None: - """Find an MLflow run by model_id and optional run_name. - - Args: - model_id: The model identifier. - run_name: Optional specific run name to search for. - - Returns: - The MLflow Run object, or None if not found. - """ - if run_name is not None: - return self.storage.search_run(model_id=model_id, run_name=run_name) - - runs = self.storage.search_latest_runs(model_id=model_id) - return next(iter(runs), None) - - def _try_load_model( - self, - run_id: str, - model_id: str, - ) -> BaseForecastingModel | None: - """Try to load a model from MLflow, returning None on failure. - - Args: - run_id: The MLflow run ID. - model_id: The model identifier. - - Returns: - The loaded model, or None if loading failed. - """ - try: - old_model = self.storage.load_run_model(run_id=run_id, model_id=model_id) - except ModelNotFoundError: - self._logger.warning( - "Could not load model from previous run %s for model %s, skipping model selection", - run_id, - model_id, - ) - return None - - if not isinstance(old_model, BaseForecastingModel): - self._logger.warning( - "Loaded old model from run %s is not a BaseForecastingModel, skipping model selection", - run_id, - ) - return None - - return old_model - - def _try_evaluate_model( - self, - run_id: str, - old_model: BaseForecastingModel, - input_data: TimeSeriesDataset, - ) -> SubsetMetric | None: - """Try to evaluate a model, returning None on failure. - - Args: - run_id: The MLflow run ID (for logging). - old_model: The model to evaluate. - input_data: The dataset to evaluate on. - - Returns: - The evaluation metrics, or None if evaluation failed. - """ - try: - return old_model.score(input_data) - except (MissingColumnsError, ValueError) as e: - self._logger.warning( - "Could not evaluate old model from run %s, skipping model selection: %s", - run_id, - e, - ) - return None - - def _check_tags_compatible(self, run_tags: dict[str, str], new_tags: dict[str, str], run_id: str) -> bool: - """Check if model tags are compatible, excluding mlflow.runName. - - Returns: - True if tags are compatible, False otherwise. - """ - old_tags = {k: v for k, v in run_tags.items() if k != "mlflow.runName"} - - if old_tags == new_tags: - return True - - differences = { - k: (old_tags.get(k), new_tags.get(k)) - for k in old_tags.keys() | new_tags.keys() - if old_tags.get(k) != new_tags.get(k) - } - - self._logger.info( - "Model tags changed since run %s, skipping model selection. Changes: %s", - run_id, - differences, - ) - return False - - def _check_is_new_model_better( - self, - old_metrics: SubsetMetric, - new_metrics: SubsetMetric, - ) -> bool: - """Compare old and new model metrics to determine if the new model is better. - - Returns: - True if the new model improves on the monitored metric. - """ - quantile, metric_name, direction = self.model_selection_metric - - old_metric = old_metrics.get_metric(quantile=quantile, metric_name=metric_name) - new_metric = new_metrics.get_metric(quantile=quantile, metric_name=metric_name) - - if old_metric is None or new_metric is None: - self._logger.warning( - "Could not find %s metric for quantile %s in old or new model metrics, assuming improvement", - metric_name, - quantile, - ) - return True - - self._logger.info( - "Comparing old model %s metric %.5f to new model %s metric %.5f for quantile %s", - metric_name, - old_metric, - metric_name, - new_metric, - quantile, - ) - - match direction: - case "higher_is_better" if new_metric >= old_metric / self.model_selection_old_model_penalty: - return True - case "lower_is_better" if new_metric <= old_metric / self.model_selection_old_model_penalty: - return True - case _: - return False - - class MLFlowStorageCallback(BaseMLFlowStorageCallback, ForecastingCallback): """MLFlow callback for logging forecasting workflow events.""" @@ -286,12 +105,12 @@ def on_fit_end( self._logger.info("Stored trained model for run %s", run_id) # Format the metrics for MLflow - metrics = metrics_to_dict(metrics=result.metrics_full, prefix="full_") - metrics.update(metrics_to_dict(metrics=result.metrics_train, prefix="train_")) + metrics = self.metrics_to_dict(metrics=result.metrics_full, prefix="full_") + metrics.update(self.metrics_to_dict(metrics=result.metrics_train, prefix="train_")) if result.metrics_val is not None: - metrics.update(metrics_to_dict(metrics=result.metrics_val, prefix="val_")) + metrics.update(self.metrics_to_dict(metrics=result.metrics_val, prefix="val_")) if result.metrics_test is not None: - metrics.update(metrics_to_dict(metrics=result.metrics_test, prefix="test_")) + metrics.update(self.metrics_to_dict(metrics=result.metrics_test, prefix="test_")) # Mark the run as finished self.storage.finalize_run(model_id=context.workflow.model_id, run_id=run_id, metrics=metrics) @@ -373,12 +192,4 @@ def _run_model_selection(self, workflow: CustomForecastingWorkflow, result: Mode raise SkipFitting("New model did not improve monitored metric, skipping re-fit.") -def metrics_to_dict(metrics: SubsetMetric, prefix: str) -> dict[str, float]: - return { - f"{prefix}{quantile}_{metric_name}": value - for quantile, metrics_dict in metrics.metrics.items() - for metric_name, value in metrics_dict.items() - } - - -__all__ = ["MLFlowStorageCallback", "BaseMLFlowStorageCallback"] +__all__ = ["MLFlowStorageCallback"] diff --git a/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py b/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py index f24c897fa..50835b4cb 100644 --- a/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py +++ b/packages/openstef-models/tests/unit/integrations/mlflow/test_mlflow_storage_callback.py @@ -14,10 +14,10 @@ from openstef_core.datasets import TimeSeriesDataset from openstef_core.datasets.validated_datasets import ForecastDataset, ForecastInputDataset from openstef_core.exceptions import ModelNotFoundError, SkipFitting -from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_core.types import LeadTime, Q from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback from openstef_models.mixins.callbacks import WorkflowContext +from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig from openstef_models.models.forecasting_model import ForecastingModel, ModelFitResult from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow From f778c2aa607c87920fa6564ea7d768cd5ef471fd Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 11:34:13 +0100 Subject: [PATCH 094/104] Clean up docstring Signed-off-by: Marnix van Lieshout --- .../tests/unit/transforms/general/test_flagger.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/packages/openstef-models/tests/unit/transforms/general/test_flagger.py b/packages/openstef-models/tests/unit/transforms/general/test_flagger.py index b250099f4..0483494c4 100644 --- a/packages/openstef-models/tests/unit/transforms/general/test_flagger.py +++ b/packages/openstef-models/tests/unit/transforms/general/test_flagger.py @@ -40,7 +40,7 @@ def test_flagger__fit_transform( train_dataset: TimeSeriesDataset, test_dataset: TimeSeriesDataset, ): - """Test fit and transform flags correctly leaves other columns unchanged.""" + """Test fit and transform flags correctly.""" # Arrange flagger = Flagger(selection=FeatureSelection(include={"A", "B", "C"})) @@ -49,12 +49,11 @@ def test_flagger__fit_transform( transformed_dataset = flagger.transform(test_dataset) # Assert - # Column C should remain unchanged expected_df = pd.DataFrame( { "A": [1, 1], "B": [0, 1], - "C": [0, 0], # Unchanged + "C": [0, 0], }, index=test_dataset.index, ) From eefe21a1fb48eaa15dc370ea08378f054c7dcdeb Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 11:59:32 +0100 Subject: [PATCH 095/104] Add explainability to combiner models Signed-off-by: Marnix van Lieshout --- examples/benchmarks/liander_2024_ensemble.py | 4 +-- .../ensemble_mlflow_storage_callback.py | 7 +++++ .../forecast_combiners/forecast_combiner.py | 11 +++++++ .../learned_weights_combiner.py | 30 +++++++++++++++++++ .../forecast_combiners/stacking_combiner.py | 19 ++++++++++++ .../test_ensemble_mlflow_storage_callback.py | 5 ++++ .../models/test_ensemble_forecasting_model.py | 11 +++++++ 7 files changed, 85 insertions(+), 2 deletions(-) diff --git a/examples/benchmarks/liander_2024_ensemble.py b/examples/benchmarks/liander_2024_ensemble.py index 769db2183..5b89da479 100644 --- a/examples/benchmarks/liander_2024_ensemble.py +++ b/examples/benchmarks/liander_2024_ensemble.py @@ -64,7 +64,7 @@ BENCHMARK_FILTER: list[Liander2024Category] | None = None -USE_MLFLOW_STORAGE = False +USE_MLFLOW_STORAGE = True if USE_MLFLOW_STORAGE: storage = MLFlowStorage( @@ -116,7 +116,7 @@ start_time = time.time() create_liander2024_benchmark_runner( storage=LocalBenchmarkStorage(base_path=OUTPUT_PATH / model), - data_dir=Path("../data/liander2024-energy-forecasting-benchmark"), + data_dir=None, # Path("../data/liander2024-energy-forecasting-benchmark"), callbacks=[StrictExecutionCallback()], ).run( forecaster_factory=create_openstef4_preset_backtest_forecaster( diff --git a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py index c67332cd0..372cdf83c 100644 --- a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py +++ b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py @@ -27,6 +27,7 @@ from openstef_meta.models.ensemble_forecasting_model import EnsembleModelFitResult from openstef_meta.workflows import CustomEnsembleForecastingWorkflow, EnsembleForecastingCallback from openstef_models.explainability import ExplainableForecaster +from openstef_models.explainability.plotters.feature_importance_plotter import FeatureImportancePlotter from openstef_models.integrations.mlflow.base_mlflow_storage_callback import BaseMLFlowStorageCallback from openstef_models.mixins.callbacks import WorkflowContext from openstef_models.models.base_forecasting_model import BaseForecastingModel @@ -122,6 +123,12 @@ def on_fit_end( fig = forecaster.plot_feature_importances() fig.write_html(data_path / f"feature_importances_{name}.html") # pyright: ignore[reportUnknownMemberType] + # Store combiner feature importances (combiners have their own feature_importances property) + combiner_fi = model.combiner.feature_importances + if not combiner_fi.empty: + fig = FeatureImportancePlotter.plot(combiner_fi) + fig.write_html(data_path / "feature_importances_combiner.html") # pyright: ignore[reportUnknownMemberType] + # Store the trained model self.storage.save_run_model( model_id=context.workflow.model_id, diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index beee00679..04f5f28ab 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -120,6 +120,17 @@ def is_fitted(self) -> bool: """Indicates whether the final learner has been fitted.""" raise NotImplementedError("Subclasses must implement the is_fitted property.") + @property + @abstractmethod + def feature_importances(self) -> pd.DataFrame: + """Feature importances from the combiner's internal models. + + Returns a DataFrame with feature names as index and quantile columns, + with importances for each quantile the combiner was trained on. + Returns an empty DataFrame if no feature importances are available. + """ + raise NotImplementedError("Subclasses must implement the feature_importances property.") + @abstractmethod def predict_contributions( self, diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py index e83344add..c24ecc49a 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/learned_weights_combiner.py @@ -14,6 +14,7 @@ from abc import abstractmethod from typing import Literal, override +import numpy as np import pandas as pd from lightgbm import LGBMClassifier from pydantic import Field @@ -258,6 +259,7 @@ def fit( self._label_encoder.fit(data.forecaster_names) + feature_names: list[str] = [] for i, q in enumerate(self.quantiles): # Data preparation dataset = data.get_best_forecaster_labels(quantile=q) @@ -274,6 +276,9 @@ def fit( weights = compute_sample_weight("balanced", labels) * combined_data.sample_weight_series self.models[i].fit(X=input_data, y=labels, sample_weight=weights) # pyright: ignore[reportUnknownMemberType] + feature_names = list(input_data.columns) + + self.feature_names_ = feature_names self._is_fitted = True @staticmethod @@ -408,6 +413,31 @@ def _generate_contributions_quantile( weights.columns = [f"{col}_{Quantile(self.quantiles[model_index]).format()}" for col in weights.columns] return weights + @property + @override + def feature_importances(self) -> pd.DataFrame: + """Feature importances from the internal classifiers, per quantile. + + Returns a DataFrame with feature names as index and quantile columns, + with importances normalized to sum to 1.0 per quantile. + For classifiers without feature_importances_ (e.g. DummyClassifier), + uniform importances are used. + """ + importances: dict[str, np.ndarray] = {} + for i, q in enumerate(self.quantiles): + model = self.models[i] + if hasattr(model, "feature_importances_"): + raw = np.array(model.feature_importances_, dtype=float) # pyright: ignore[reportUnknownMemberType, reportUnknownArgumentType, reportAttributeAccessIssue] + elif hasattr(model, "coef_"): + raw = np.abs(np.array(model.coef_, dtype=float)).mean(axis=0) # pyright: ignore[reportUnknownMemberType, reportUnknownArgumentType, reportAttributeAccessIssue] + else: + raw = np.ones(len(self.feature_names_), dtype=float) + + total = raw.sum() + importances[Quantile(q).format()] = raw / total if total > 0 else raw + + return pd.DataFrame(importances, index=self.feature_names_) + @property @override def is_fitted(self) -> bool: diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index 1ce440216..9c53d03bd 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -236,6 +236,25 @@ def predict_contributions( return contributions + @property + @override + def feature_importances(self) -> pd.DataFrame: + """Feature importances from the internal regression models, per quantile. + + Delegates to each inner model's ``feature_importances`` property + (requires ``ExplainableForecaster``). Returns a DataFrame with feature + names as index and quantile columns. + """ + frames: list[pd.DataFrame] = [] + for model in self.models: + if isinstance(model, ExplainableForecaster): + frames.append(model.feature_importances) + + if not frames: + return pd.DataFrame() + + return pd.concat(frames, axis=1) + @property def is_fitted(self) -> bool: """Check the StackingForecastCombiner is fitted.""" diff --git a/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py b/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py index b53fdf163..eac268f8a 100644 --- a/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py +++ b/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py @@ -118,6 +118,11 @@ def predict( def is_fitted(self) -> bool: return self._is_fitted + @property + @override + def feature_importances(self) -> pd.DataFrame: + return pd.DataFrame() + @override def predict_contributions( self, diff --git a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py index 6422d9c2a..4be9e9950 100644 --- a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py @@ -101,6 +101,17 @@ def predict( def is_fitted(self) -> bool: return self._is_fitted + @property + def feature_importances(self) -> pd.DataFrame: + return pd.DataFrame() + + def predict_contributions( + self, + data: EnsembleForecastDataset, + additional_features: ForecastInputDataset | None = None, + ) -> pd.DataFrame: + return pd.DataFrame() + @pytest.fixture def sample_timeseries_dataset() -> TimeSeriesDataset: From 5d6a288ac1af363f6ad15f07ff4457f24b15b54a Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 13:06:45 +0100 Subject: [PATCH 096/104] Make combiners explainable Signed-off-by: Marnix van Lieshout --- .../mlflow/ensemble_mlflow_storage_callback.py | 8 ++++---- .../models/forecast_combiners/stacking_combiner.py | 7 +++---- .../tests/unit/models/test_ensemble_forecasting_model.py | 1 + 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py index 372cdf83c..b231cc228 100644 --- a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py +++ b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py @@ -123,10 +123,10 @@ def on_fit_end( fig = forecaster.plot_feature_importances() fig.write_html(data_path / f"feature_importances_{name}.html") # pyright: ignore[reportUnknownMemberType] - # Store combiner feature importances (combiners have their own feature_importances property) - combiner_fi = model.combiner.feature_importances - if not combiner_fi.empty: - fig = FeatureImportancePlotter.plot(combiner_fi) + # Store combiner feature importances + combiner_feature_importances = model.combiner.feature_importances + if not combiner_feature_importances.empty: + fig = FeatureImportancePlotter.plot(combiner_feature_importances) fig.write_html(data_path / "feature_importances_combiner.html") # pyright: ignore[reportUnknownMemberType] # Store the trained model diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py index 9c53d03bd..13fb5ed05 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/stacking_combiner.py @@ -245,10 +245,9 @@ def feature_importances(self) -> pd.DataFrame: (requires ``ExplainableForecaster``). Returns a DataFrame with feature names as index and quantile columns. """ - frames: list[pd.DataFrame] = [] - for model in self.models: - if isinstance(model, ExplainableForecaster): - frames.append(model.feature_importances) + frames: list[pd.DataFrame] = [ + model.feature_importances for model in self.models if isinstance(model, ExplainableForecaster) + ] if not frames: return pd.DataFrame() diff --git a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py index 4be9e9950..601561554 100644 --- a/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py +++ b/packages/openstef-meta/tests/unit/models/test_ensemble_forecasting_model.py @@ -105,6 +105,7 @@ def is_fitted(self) -> bool: def feature_importances(self) -> pd.DataFrame: return pd.DataFrame() + @override def predict_contributions( self, data: EnsembleForecastDataset, From fe633498485f37d72aa1f908b411fb8b5eab71dc Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 13:07:43 +0100 Subject: [PATCH 097/104] Delete empty folder Signed-off-by: Marnix van Lieshout --- .../src/openstef_meta/models/forecasting/__init__.py | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py diff --git a/packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py b/packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py deleted file mode 100644 index b02e265cd..000000000 --- a/packages/openstef-meta/src/openstef_meta/models/forecasting/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""This module provides meta-forecasting models.""" From 8c32d2fb0326d6025a6683db861ad7d5312dbb6e Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 15:02:16 +0100 Subject: [PATCH 098/104] Use protocols Signed-off-by: Marnix van Lieshout --- .../benchmarking/baselines/openstef4.py | 19 +- .../openstef_meta/integrations/__init__.py | 4 - .../integrations/mlflow/__init__.py | 9 - .../ensemble_mlflow_storage_callback.py | 236 --------------- .../models/ensemble_forecasting_model.py | 67 +---- .../forecast_combiners/forecast_combiner.py | 34 +-- .../presets/forecasting_workflow.py | 18 +- .../src/openstef_meta/workflows/__init__.py | 11 - .../custom_ensemble_forecasting_workflow.py | 145 --------- .../test_ensemble_mlflow_storage_callback.py | 47 ++- .../test_forecast_combiner.py | 21 +- .../integrations/mlflow/__init__.py | 13 +- .../mlflow/base_mlflow_storage_callback.py | 222 -------------- .../mlflow/mlflow_storage_callback.py | 279 ++++++++++++++++-- .../src/openstef_models/models/__init__.py | 2 - .../models/base_forecasting_model.py | 155 ---------- .../models/forecasting_model.py | 126 ++++++-- .../workflows/custom_forecasting_workflow.py | 9 +- 18 files changed, 449 insertions(+), 968 deletions(-) delete mode 100644 packages/openstef-meta/src/openstef_meta/integrations/__init__.py delete mode 100644 packages/openstef-meta/src/openstef_meta/integrations/mlflow/__init__.py delete mode 100644 packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py delete mode 100644 packages/openstef-meta/src/openstef_meta/workflows/__init__.py delete mode 100644 packages/openstef-meta/src/openstef_meta/workflows/custom_ensemble_forecasting_workflow.py delete mode 100644 packages/openstef-models/src/openstef_models/integrations/mlflow/base_mlflow_storage_callback.py delete mode 100644 packages/openstef-models/src/openstef_models/models/base_forecasting_model.py diff --git a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py index b659a98c0..1590f2893 100644 --- a/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py +++ b/packages/openstef-beam/src/openstef_beam/benchmarking/baselines/openstef4.py @@ -33,15 +33,11 @@ from openstef_core.types import Q from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_meta.presets import EnsembleWorkflowConfig, create_ensemble_workflow -from openstef_meta.workflows import CustomEnsembleForecastingWorkflow -from openstef_models.models.forecasting_model import ForecastingModel from openstef_models.presets import ForecastingWorkflowConfig from openstef_models.workflows.custom_forecasting_workflow import ( CustomForecastingWorkflow, ) -ForecastingWorkflow = CustomForecastingWorkflow | CustomEnsembleForecastingWorkflow - class WorkflowCreationContext(BaseConfig): """Context information for workflow execution within backtesting.""" @@ -62,8 +58,8 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): config: BacktestForecasterConfig = Field( description="Configuration for the backtest forecaster interface", ) - workflow_factory: Callable[[WorkflowCreationContext], ForecastingWorkflow] = Field( - description="Factory function that creates a new forecasting workflow instance", + workflow_factory: Callable[[WorkflowCreationContext], CustomForecastingWorkflow] = Field( + description="Factory function that creates a new CustomForecastingWorkflow instance", ) cache_dir: Path = Field( description="Directory to use for caching model artifacts during backtesting", @@ -77,7 +73,7 @@ class OpenSTEF4BacktestForecaster(BaseModel, BacktestForecasterMixin): description="When True, saves base forecaster prediction contributions for ensemble models", ) - _workflow: ForecastingWorkflow | None = PrivateAttr(default=None) + _workflow: CustomForecastingWorkflow | None = PrivateAttr(default=None) _is_flatliner_detected: bool = PrivateAttr(default=False) _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) @@ -98,7 +94,10 @@ def quantiles(self) -> list[Q]: if isinstance(self._workflow.model, EnsembleForecastingModel): name = self._workflow.model.forecaster_names[0] return self._workflow.model.forecasters[name].config.quantiles - return self._workflow.model.forecaster.config.quantiles + if self._workflow.model.forecaster is not None: + return self._workflow.model.forecaster.config.quantiles + msg = f"Cannot determine quantiles from model type {type(self._workflow.model)}" + raise TypeError(msg) @override def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: @@ -128,7 +127,7 @@ def fit(self, data: RestrictedHorizonVersionedTimeSeries) -> None: self._workflow = workflow - if self.debug and isinstance(self._workflow.model, ForecastingModel): + if self.debug: id_str = data.horizon.strftime("%Y%m%d%H%M%S") self._workflow.model.prepare_input(training_data).to_parquet( path=self.cache_dir / f"debug_{id_str}_prepared_training.parquet" @@ -190,7 +189,7 @@ def _preset_target_forecaster_factory( # Factory function that creates a forecaster for a given target. prefix = context.run_name - def _create_workflow(context: WorkflowCreationContext) -> ForecastingWorkflow: + def _create_workflow(context: WorkflowCreationContext) -> CustomForecastingWorkflow: # Create a new workflow instance with fresh model. location = LocationConfig( name=target.name, diff --git a/packages/openstef-meta/src/openstef_meta/integrations/__init__.py b/packages/openstef-meta/src/openstef_meta/integrations/__init__.py deleted file mode 100644 index 74b18f446..000000000 --- a/packages/openstef-meta/src/openstef_meta/integrations/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""Integration modules for external services.""" diff --git a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/__init__.py b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/__init__.py deleted file mode 100644 index 19ba099a5..000000000 --- a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""MLflow integration for ensemble forecasting models.""" - -from openstef_meta.integrations.mlflow.ensemble_mlflow_storage_callback import EnsembleMLFlowStorageCallback - -__all__ = ["EnsembleMLFlowStorageCallback"] diff --git a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py b/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py deleted file mode 100644 index b231cc228..000000000 --- a/packages/openstef-meta/src/openstef_meta/integrations/mlflow/ensemble_mlflow_storage_callback.py +++ /dev/null @@ -1,236 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""MLflow storage callback for ensemble forecasting models. - -Provides MLflow storage and tracking for ensemble forecasting workflows using -composition with MLFlowStorageCallbackBase rather than inheriting from -MLFlowStorageCallback. This avoids conflicting generic type parameters and -keeps the callback fully type-safe. - -Ensemble-specific behavior: -- Logs combiner hyperparameters as the primary hyperparams -- Logs per-forecaster hyperparameters with name-prefixed keys -- Stores feature importance plots for each explainable forecaster component -""" - -import logging -from datetime import UTC, datetime -from typing import cast, override - -from pydantic import PrivateAttr - -from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset -from openstef_core.datasets.versioned_timeseries_dataset import VersionedTimeSeriesDataset -from openstef_core.exceptions import ModelNotFoundError, SkipFitting -from openstef_meta.models.ensemble_forecasting_model import EnsembleModelFitResult -from openstef_meta.workflows import CustomEnsembleForecastingWorkflow, EnsembleForecastingCallback -from openstef_models.explainability import ExplainableForecaster -from openstef_models.explainability.plotters.feature_importance_plotter import FeatureImportancePlotter -from openstef_models.integrations.mlflow.base_mlflow_storage_callback import BaseMLFlowStorageCallback -from openstef_models.mixins.callbacks import WorkflowContext -from openstef_models.models.base_forecasting_model import BaseForecastingModel - - -class EnsembleMLFlowStorageCallback(BaseMLFlowStorageCallback, EnsembleForecastingCallback): - """MLFlow callback for ensemble forecasting workflows. - - Uses composition with MLFlowStorageCallbackBase for shared MLflow storage - configuration and utility methods, combined with EnsembleForecastingCallback - for properly-typed ensemble workflow hooks. - - Handles EnsembleForecastingModel instances by: - - Logging combiner hyperparameters as the primary model hyperparams - - Logging per-forecaster hyperparameters with name-prefixed keys - - Storing feature importance plots for each explainable base forecaster - """ - - _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) - - @override - def on_fit_start( - self, - context: WorkflowContext[CustomEnsembleForecastingWorkflow], - data: VersionedTimeSeriesDataset | TimeSeriesDataset, - ) -> None: - """Check model reuse before fitting. - - Raises: - SkipFitting: If a recent model already exists. - """ - if not self.model_reuse_enable: - return - - run = self._find_run(model_id=context.workflow.model_id, run_name=context.workflow.run_name) - - if run is not None: - now = datetime.now(tz=UTC) - end_time_millis = cast(float | None, run.info.end_time) - run_end_datetime = ( - datetime.fromtimestamp(end_time_millis / 1000, tz=UTC) if end_time_millis is not None else None - ) - self._logger.info( - "Found previous MLflow run %s for model %s ended at %s", - cast(str, run.info.run_id), - context.workflow.model_id, - run_end_datetime, - ) - if run_end_datetime is not None and (now - run_end_datetime) <= self.model_reuse_max_age: - raise SkipFitting("Model is recent enough, skipping re-fit.") - - @override - def on_fit_end( - self, - context: WorkflowContext[CustomEnsembleForecastingWorkflow], - result: EnsembleModelFitResult, - ) -> None: - """Store ensemble model, hyperparams, artifacts, and metrics to MLflow.""" - if self.model_selection_enable: - self._run_model_selection(workflow=context.workflow, result=result) - - model = context.workflow.model - - # Create a new run with combiner hyperparameters - run = self.storage.create_run( - model_id=context.workflow.model_id, - tags=model.tags, - hyperparams=model.combiner.config.hyperparams, - run_name=context.workflow.run_name, - experiment_tags=context.workflow.experiment_tags, - ) - run_id: str = run.info.run_id - self._logger.info("Created MLflow run %s for model %s", run_id, context.workflow.model_id) - - # Log per-forecaster hyperparameters - for name, forecaster in model.forecasters.items(): - hyperparams = forecaster.hyperparams - prefixed_params = {f"{name}.{k}": str(v) for k, v in hyperparams.model_dump().items()} - self.storage.log_hyperparams(run_id=run_id, params=prefixed_params) - self._logger.debug("Logged hyperparams for forecaster '%s' in run %s", name, run_id) - - # Store the model input - run_path = self.storage.get_artifacts_path(model_id=context.workflow.model_id, run_id=run_id) - data_path = run_path / self.storage.data_path - data_path.mkdir(parents=True, exist_ok=True) - result.input_dataset.to_parquet(path=data_path / "data.parquet") - self._logger.info("Stored training data at %s for run %s", data_path, run_id) - - # Store feature importance plots for each explainable forecaster - if self.store_feature_importance_plot: - for name, forecaster in model.forecasters.items(): - if isinstance(forecaster, ExplainableForecaster): - fig = forecaster.plot_feature_importances() - fig.write_html(data_path / f"feature_importances_{name}.html") # pyright: ignore[reportUnknownMemberType] - - # Store combiner feature importances - combiner_feature_importances = model.combiner.feature_importances - if not combiner_feature_importances.empty: - fig = FeatureImportancePlotter.plot(combiner_feature_importances) - fig.write_html(data_path / "feature_importances_combiner.html") # pyright: ignore[reportUnknownMemberType] - - # Store the trained model - self.storage.save_run_model( - model_id=context.workflow.model_id, - run_id=run_id, - model=context.workflow.model, - ) - self._logger.info("Stored trained model for run %s", run_id) - - # Format the metrics for MLflow - metrics = self.metrics_to_dict(metrics=result.metrics_full, prefix="full_") - metrics.update(self.metrics_to_dict(metrics=result.metrics_train, prefix="train_")) - if result.metrics_val is not None: - metrics.update(self.metrics_to_dict(metrics=result.metrics_val, prefix="val_")) - if result.metrics_test is not None: - metrics.update(self.metrics_to_dict(metrics=result.metrics_test, prefix="test_")) - - # Mark the run as finished - self.storage.finalize_run(model_id=context.workflow.model_id, run_id=run_id, metrics=metrics) - self._logger.info("Stored MLflow run %s for model %s", run_id, context.workflow.model_id) - - @override - def on_predict_start( - self, - context: WorkflowContext[CustomEnsembleForecastingWorkflow], - data: VersionedTimeSeriesDataset | TimeSeriesDataset, - ): - """Load ensemble model from MLflow for prediction. - - Raises: - ModelNotFoundError: If no model run is found. - """ - if context.workflow.model.is_fitted: - return - - run = self._find_run(model_id=context.workflow.model_id, run_name=context.workflow.run_name) - - if run is None: - raise ModelNotFoundError(model_id=context.workflow.model_id) - - run_id: str = run.info.run_id - old_model = self.storage.load_run_model(run_id=run_id, model_id=context.workflow.model_id) - - if not isinstance(old_model, BaseForecastingModel): - self._logger.warning( - "Loaded model from run %s is not a BaseForecastingModel, cannot use for prediction", - cast(str, run.info.run_id), - ) - return - - context.workflow.model = old_model # pyright: ignore[reportAttributeAccessIssue] - self._logger.info( - "Loaded model from MLflow run %s for model %s", - run_id, - context.workflow.model_id, - ) - - def _run_model_selection(self, workflow: CustomEnsembleForecastingWorkflow, result: EnsembleModelFitResult) -> None: - """Compare new ensemble model against the previous best and keep the better one. - - Raises: - SkipFitting: If the new model does not improve on the monitored metric. - """ - run = self._find_run(model_id=workflow.model_id, run_name=None) - if run is None: - return - - run_id = cast(str, run.info.run_id) - - if not self._check_tags_compatible( - run_tags=run.data.tags, # pyright: ignore[reportUnknownMemberType, reportUnknownArgumentType] - new_tags=workflow.model.tags, - run_id=run_id, - ): - return - - new_model = workflow.model - new_metrics = result.metrics_full - - old_model = self._try_load_model(run_id=run_id, model_id=workflow.model_id) - - if old_model is None: - return - - old_metrics = self._try_evaluate_model( - run_id=run_id, - old_model=old_model, - input_data=result.input_dataset, - ) - - if old_metrics is None: - return - - if self._check_is_new_model_better(old_metrics=old_metrics, new_metrics=new_metrics): - workflow.model = new_model # pyright: ignore[reportAttributeAccessIssue] - else: - workflow.model = old_model # pyright: ignore[reportAttributeAccessIssue] - self._logger.info( - "New model did not improve %s metric from previous run %s, reusing old model", - self.model_selection_metric, - run_id, - ) - raise SkipFitting("New model did not improve monitored metric, skipping re-fit.") - - -__all__ = ["EnsembleMLFlowStorageCallback"] diff --git a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py index d6854f9fb..a321e9aeb 100644 --- a/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py +++ b/packages/openstef-meta/src/openstef_meta/models/ensemble_forecasting_model.py @@ -17,8 +17,6 @@ import pandas as pd from pydantic import Field, PrivateAttr -from openstef_beam.evaluation import SubsetMetric -from openstef_core.base_model import BaseModel from openstef_core.datasets import ( ForecastDataset, ForecastInputDataset, @@ -29,66 +27,23 @@ from openstef_core.exceptions import NotFittedError from openstef_core.mixins import TransformPipeline from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner -from openstef_models.models.base_forecasting_model import BaseForecastingModel from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig -from openstef_models.models.forecasting_model import ModelFitResult +from openstef_models.models.forecasting_model import ForecastingModel, ModelFitResult logger = logging.getLogger(__name__) -class EnsembleModelFitResult(BaseModel): - """Fit result for EnsembleForecastingModel containing details for both forecasters and combiner.""" +class EnsembleModelFitResult(ModelFitResult): + """Fit result for EnsembleForecastingModel. - forecaster_fit_results: dict[str, ModelFitResult] = Field(description="ModelFitResult for each base forecaster") - - combiner_fit_result: ModelFitResult = Field(description="ModelFitResult for the ForecastCombiner") - - # Make compatible with ModelFitResult interface - @property - def input_dataset(self) -> EnsembleForecastDataset: - """Returns the input dataset used for fitting the combiner.""" - return cast( - "EnsembleForecastDataset", - self.combiner_fit_result.input_dataset, - ) - - @property - def input_data_train(self) -> ForecastInputDataset: - """Returns the training input data used for fitting the combiner.""" - return self.combiner_fit_result.input_data_train - - @property - def input_data_val(self) -> ForecastInputDataset | None: - """Returns the validation input data used for fitting the combiner.""" - return self.combiner_fit_result.input_data_val - - @property - def input_data_test(self) -> ForecastInputDataset | None: - """Returns the test input data used for fitting the combiner.""" - return self.combiner_fit_result.input_data_test - - @property - def metrics_train(self) -> SubsetMetric: - """Returns the full metrics calculated during combiner fitting.""" - return self.combiner_fit_result.metrics_train - - @property - def metrics_val(self) -> SubsetMetric | None: - """Returns the full metrics calculated during combiner fitting.""" - return self.combiner_fit_result.metrics_val - - @property - def metrics_test(self) -> SubsetMetric | None: - """Returns the full metrics calculated during combiner fitting.""" - return self.combiner_fit_result.metrics_test + Extends ModelFitResult with per-forecaster details. The base class fields + (input_dataset, metrics_*, etc.) represent the combiner's fit results. + """ - @property - def metrics_full(self) -> SubsetMetric: - """Returns the full metrics calculated during combiner fitting.""" - return self.combiner_fit_result.metrics_full + forecaster_fit_results: dict[str, ModelFitResult] = Field(description="ModelFitResult for each base forecaster") -class EnsembleForecastingModel(BaseForecastingModel): +class EnsembleForecastingModel(ForecastingModel): """Complete forecasting pipeline combining preprocessing, prediction, and postprocessing. Orchestrates the full forecasting workflow by managing feature engineering, @@ -174,7 +129,7 @@ class EnsembleForecastingModel(BaseForecastingModel): _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) @property - def config(self) -> list[ForecasterConfig]: + def config(self) -> list[ForecasterConfig]: # pyright: ignore[reportIncompatibleMethodOverride] """Returns the configuration of the underlying forecaster.""" return [x.config for x in self.forecasters.values()] @@ -236,7 +191,7 @@ def fit( return EnsembleModelFitResult( forecaster_fit_results=forecaster_fit_results, - combiner_fit_result=combiner_fit_result, + **combiner_fit_result.model_dump(), ) @staticmethod @@ -464,7 +419,7 @@ def _predict_forecasters( return EnsembleForecastDataset.from_forecast_datasets(predictions, target_series=data.data[self.target_column]) - def prepare_input( + def prepare_input( # pyright: ignore[reportIncompatibleMethodOverride] self, data: TimeSeriesDataset, forecaster_name: str = "", diff --git a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py index 04f5f28ab..7ab11eefd 100644 --- a/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py +++ b/packages/openstef-meta/src/openstef_meta/models/forecast_combiners/forecast_combiner.py @@ -18,6 +18,7 @@ from openstef_core.datasets.validated_datasets import EnsembleForecastDataset from openstef_core.mixins import HyperParams, Predictor from openstef_core.types import LeadTime, Quantile +from openstef_models.explainability import ExplainableForecaster class ForecastCombinerConfig(BaseConfig): @@ -75,8 +76,14 @@ def with_horizon(self, horizon: LeadTime) -> Self: return self.model_copy(update={"horizons": [horizon]}) -class ForecastCombiner(Predictor[EnsembleForecastDataset, ForecastDataset]): - """Combines base Forecaster predictions for each quantile into final predictions.""" +class ForecastCombiner(Predictor[EnsembleForecastDataset, ForecastDataset], ExplainableForecaster): + """Combines base Forecaster predictions for each quantile into final predictions. + + Inherits from ExplainableForecaster to provide feature importance and + visualization capabilities. The ``predict_contributions`` method uses + combiner-specific signatures (EnsembleForecastDataset) rather than the + ExplainableForecaster signature (ForecastInputDataset). + """ config: ForecastCombinerConfig @@ -120,31 +127,22 @@ def is_fitted(self) -> bool: """Indicates whether the final learner has been fitted.""" raise NotImplementedError("Subclasses must implement the is_fitted property.") - @property - @abstractmethod - def feature_importances(self) -> pd.DataFrame: - """Feature importances from the combiner's internal models. - - Returns a DataFrame with feature names as index and quantile columns, - with importances for each quantile the combiner was trained on. - Returns an empty DataFrame if no feature importances are available. - """ - raise NotImplementedError("Subclasses must implement the feature_importances property.") - @abstractmethod - def predict_contributions( + def predict_contributions( # pyright: ignore[reportIncompatibleMethodOverride] self, data: EnsembleForecastDataset, additional_features: ForecastInputDataset | None = None, ) -> pd.DataFrame: - """Generate final predictions based on base Forecaster predictions. + """Generate contribution predictions based on base forecaster predictions. + + Note: This overrides ExplainableForecaster.predict_contributions with a + combiner-specific signature using EnsembleForecastDataset. Args: data: EnsembleForecastDataset containing base Forecaster predictions. - data_val: Optional EnsembleForecastDataset for validation during prediction. Will be ignored additional_features: Optional ForecastInputDataset containing additional features for the final learner. Returns: - ForecastDataset containing the final contributions. + DataFrame containing the feature contributions. """ - raise NotImplementedError("Subclasses must implement the predict method.") + raise NotImplementedError("Subclasses must implement the predict_contributions method.") diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index e762fa949..58df8371e 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -23,7 +23,6 @@ from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset from openstef_core.mixins.transform import Transform, TransformPipeline from openstef_core.types import LeadTime, Q, Quantile, QuantileOrGlobal -from openstef_meta.integrations.mlflow import EnsembleMLFlowStorageCallback from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel from openstef_meta.models.forecast_combiners.learned_weights_combiner import ( LGBMCombinerHyperParams, @@ -35,8 +34,7 @@ from openstef_meta.models.forecast_combiners.stacking_combiner import ( StackingCombiner, ) -from openstef_meta.workflows import CustomEnsembleForecastingWorkflow, EnsembleForecastingCallback -from openstef_models.integrations.mlflow import MLFlowStorage +from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback from openstef_models.mixins.model_serializer import ModelIdentifier from openstef_models.models.forecasting.gblinear_forecaster import GBLinearForecaster from openstef_models.models.forecasting.lgbm_forecaster import LGBMForecaster @@ -65,6 +63,10 @@ ) from openstef_models.utils.data_split import DataSplitter from openstef_models.utils.feature_selection import Exclude, FeatureSelection, Include +from openstef_models.workflows.custom_forecasting_workflow import ( + CustomForecastingWorkflow, + ForecastingCallback, +) if TYPE_CHECKING: from openstef_models.models.forecasting.forecaster import Forecaster @@ -326,14 +328,14 @@ def feature_standardizers(config: EnsembleWorkflowConfig) -> list[Transform[Time ) -def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomEnsembleForecastingWorkflow: # noqa: C901, PLR0912 +def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastingWorkflow: # noqa: C901, PLR0912 """Create an ensemble forecasting workflow from configuration. Args: config (EnsembleWorkflowConfig): Configuration for the ensemble workflow. Returns: - CustomEnsembleForecastingWorkflow: Configured ensemble forecasting workflow. + CustomForecastingWorkflow: Configured ensemble forecasting workflow. Raises: ValueError: If an unsupported base model or combiner type is specified. @@ -491,10 +493,10 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomEnsembleFo **config.tags, } - callbacks: list[EnsembleForecastingCallback] = [] + callbacks: list[ForecastingCallback] = [] if config.mlflow_storage is not None: callbacks.append( - EnsembleMLFlowStorageCallback( + MLFlowStorageCallback( storage=config.mlflow_storage, model_reuse_enable=config.model_reuse_enable, model_reuse_max_age=config.model_reuse_max_age, @@ -504,7 +506,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomEnsembleFo ) ) - return CustomEnsembleForecastingWorkflow( + return CustomForecastingWorkflow( model=EnsembleForecastingModel( common_preprocessing=common_preprocessing, model_specific_preprocessing=model_specific_preprocessing, diff --git a/packages/openstef-meta/src/openstef_meta/workflows/__init__.py b/packages/openstef-meta/src/openstef_meta/workflows/__init__.py deleted file mode 100644 index e9dff0fee..000000000 --- a/packages/openstef-meta/src/openstef_meta/workflows/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 -"""Workflow orchestration for ensemble forecasting models.""" - -from openstef_meta.workflows.custom_ensemble_forecasting_workflow import ( - CustomEnsembleForecastingWorkflow, - EnsembleForecastingCallback, -) - -__all__ = ["CustomEnsembleForecastingWorkflow", "EnsembleForecastingCallback"] diff --git a/packages/openstef-meta/src/openstef_meta/workflows/custom_ensemble_forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/workflows/custom_ensemble_forecasting_workflow.py deleted file mode 100644 index 741763d6b..000000000 --- a/packages/openstef-meta/src/openstef_meta/workflows/custom_ensemble_forecasting_workflow.py +++ /dev/null @@ -1,145 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""High-level workflow orchestration for ensemble forecasting operations. - -Provides a complete ensemble forecasting workflow that combines model management, -callback execution, and optional model persistence. Acts as the main entry point -for production ensemble forecasting systems. -""" - -import logging -from datetime import datetime - -from pydantic import Field, PrivateAttr - -from openstef_core.base_model import BaseModel -from openstef_core.datasets import TimeSeriesDataset, VersionedTimeSeriesDataset -from openstef_core.datasets.validated_datasets import ForecastDataset -from openstef_core.exceptions import NotFittedError, SkipFitting -from openstef_meta.models.ensemble_forecasting_model import ( - EnsembleForecastingModel, - EnsembleModelFitResult, -) -from openstef_models.mixins import ModelIdentifier, PredictorCallback -from openstef_models.mixins.callbacks import WorkflowContext - - -class EnsembleForecastingCallback( - PredictorCallback[ - "CustomEnsembleForecastingWorkflow", - VersionedTimeSeriesDataset | TimeSeriesDataset, - EnsembleModelFitResult, - ForecastDataset, - ] -): - """Callback interface for monitoring ensemble forecasting workflow lifecycle events. - - Similar to ForecastingCallback but parameterized with EnsembleModelFitResult - instead of ModelFitResult, giving callbacks access to the full ensemble fit - result including per-forecaster and combiner results. - - All methods have default no-op implementations, so subclasses only need - to override the specific events they care about. - """ - - -class CustomEnsembleForecastingWorkflow(BaseModel): - """Complete ensemble forecasting workflow with model management and lifecycle hooks. - - Orchestrates the full ensemble forecasting process by combining an - EnsembleForecastingModel with callback execution. Provides the main - interface for production ensemble forecasting systems. - - Invariants: - - Callbacks are executed at appropriate lifecycle stages - - Model fitting returns EnsembleModelFitResult with per-forecaster details - - Prediction delegates to the underlying EnsembleForecastingModel - """ - - model: EnsembleForecastingModel = Field(description="The ensemble forecasting model to use.") - callbacks: list[EnsembleForecastingCallback] = Field( - default_factory=list[EnsembleForecastingCallback], - description="List of callbacks to execute during workflow events.", - ) - model_id: ModelIdentifier = Field(...) - run_name: str | None = Field(default=None, description="Optional name for this workflow run.") - experiment_tags: dict[str, str] = Field( - default_factory=dict, - description="Optional metadata tags for experiment tracking.", - ) - - _logger: logging.Logger = PrivateAttr(default_factory=lambda: logging.getLogger(__name__)) - - def fit( - self, - data: TimeSeriesDataset, - data_val: TimeSeriesDataset | None = None, - data_test: TimeSeriesDataset | None = None, - ) -> EnsembleModelFitResult | None: - """Train the ensemble forecasting model with callback execution. - - Executes the complete training workflow including pre-fit callbacks, - model training, and post-fit callbacks. Returns the full ensemble - fit result with per-forecaster and combiner details. - - Args: - data: Training dataset for the forecasting model. - data_val: Optional validation dataset for model tuning. - data_test: Optional test dataset for final evaluation. - - Returns: - EnsembleModelFitResult containing training metrics for each - base forecaster and the combiner, or None if fitting was skipped. - """ - result: EnsembleModelFitResult | None = None - context: WorkflowContext[CustomEnsembleForecastingWorkflow] = WorkflowContext(workflow=self) - - try: - for callback in self.callbacks: - callback.on_fit_start(context=context, data=data) - - result = self.model.fit(data=data, data_val=data_val, data_test=data_test) - - for callback in self.callbacks: - callback.on_fit_end(context=context, result=result) - except SkipFitting as e: - self._logger.info("Skipping model fitting: %s", e) - result = None - - return result - - def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: - """Generate forecasts with callback execution. - - Executes the complete prediction workflow including pre-prediction callbacks, - model prediction, and post-prediction callbacks. - - Args: - data: Input dataset for generating forecasts. - forecast_start: Optional start time for forecasts. - - Returns: - Generated forecast dataset. - - Raises: - NotFittedError: If the underlying model hasn't been trained. - """ - context: WorkflowContext[CustomEnsembleForecastingWorkflow] = WorkflowContext(workflow=self) - - for callback in self.callbacks: - callback.on_predict_start(context=context, data=data) - - if not self.model.is_fitted: - raise NotFittedError(type(self.model).__name__) - - forecasts = self.model.predict(data=data, forecast_start=forecast_start) - - for callback in self.callbacks: - callback.on_predict_end(context=context, data=data, result=forecasts) - - return forecasts - - -__all__ = ["CustomEnsembleForecastingWorkflow", "EnsembleForecastingCallback"] diff --git a/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py b/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py index eac268f8a..d22ec509d 100644 --- a/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py +++ b/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: MPL-2.0 -"""Tests for EnsembleMLFlowStorageCallback.""" +"""Tests for MLFlowStorageCallback with ensemble models.""" from __future__ import annotations @@ -17,13 +17,12 @@ from openstef_core.exceptions import SkipFitting from openstef_core.mixins.predictor import HyperParams from openstef_core.types import LeadTime, Q -from openstef_meta.integrations.mlflow import EnsembleMLFlowStorageCallback from openstef_meta.models.ensemble_forecasting_model import EnsembleForecastingModel, EnsembleModelFitResult from openstef_meta.models.forecast_combiners.forecast_combiner import ForecastCombiner, ForecastCombinerConfig -from openstef_meta.workflows import CustomEnsembleForecastingWorkflow -from openstef_models.integrations.mlflow import MLFlowStorage +from openstef_models.integrations.mlflow import MLFlowStorage, MLFlowStorageCallback from openstef_models.mixins.callbacks import WorkflowContext from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig +from openstef_models.workflows.custom_forecasting_workflow import CustomForecastingWorkflow if TYPE_CHECKING: from pathlib import Path @@ -145,9 +144,9 @@ def storage(tmp_path: Path) -> MLFlowStorage: @pytest.fixture -def callback(storage: MLFlowStorage) -> EnsembleMLFlowStorageCallback: +def callback(storage: MLFlowStorage) -> MLFlowStorageCallback: """Create ensemble callback with test storage.""" - return EnsembleMLFlowStorageCallback(storage=storage) + return MLFlowStorageCallback(storage=storage) @pytest.fixture @@ -161,7 +160,7 @@ def sample_dataset() -> TimeSeriesDataset: ) -def _create_ensemble_workflow() -> CustomEnsembleForecastingWorkflow: +def _create_ensemble_workflow() -> CustomForecastingWorkflow: """Create an ensemble forecasting workflow for testing.""" horizons = [LeadTime(timedelta(hours=1))] quantiles = [Q(0.5)] @@ -183,28 +182,28 @@ def _create_ensemble_workflow() -> CustomEnsembleForecastingWorkflow: combiner=combiner, ) - return CustomEnsembleForecastingWorkflow(model_id="test_ensemble", model=ensemble_model) + return CustomForecastingWorkflow(model_id="test_ensemble", model=ensemble_model) @pytest.fixture -def ensemble_workflow() -> CustomEnsembleForecastingWorkflow: +def ensemble_workflow() -> CustomForecastingWorkflow: return _create_ensemble_workflow() @pytest.fixture def ensemble_fit_result( - sample_dataset: TimeSeriesDataset, ensemble_workflow: CustomEnsembleForecastingWorkflow + sample_dataset: TimeSeriesDataset, ensemble_workflow: CustomForecastingWorkflow ) -> EnsembleModelFitResult: """Create a fit result from the ensemble model.""" - return ensemble_workflow.model.fit(sample_dataset) + return cast(EnsembleModelFitResult, ensemble_workflow.model.fit(sample_dataset)) # --- Tests --- def test_on_fit_end__stores_ensemble_model( - callback: EnsembleMLFlowStorageCallback, - ensemble_workflow: CustomEnsembleForecastingWorkflow, + callback: MLFlowStorageCallback, + ensemble_workflow: CustomForecastingWorkflow, ensemble_fit_result: EnsembleModelFitResult, ): """Test that on_fit_end stores an EnsembleForecastingModel to MLflow.""" @@ -223,8 +222,8 @@ def test_on_fit_end__stores_ensemble_model( def test_on_fit_end__logs_combiner_hyperparams_as_primary( - callback: EnsembleMLFlowStorageCallback, - ensemble_workflow: CustomEnsembleForecastingWorkflow, + callback: MLFlowStorageCallback, + ensemble_workflow: CustomForecastingWorkflow, ensemble_fit_result: EnsembleModelFitResult, ): """Test that combiner hyperparams are logged as the run's primary params.""" @@ -234,7 +233,7 @@ def test_on_fit_end__logs_combiner_hyperparams_as_primary( runs = callback.storage.search_latest_runs(model_id=ensemble_workflow.model_id, limit=1) run = runs[0] - params = run.data.params # pyright: ignore[reportUnknownMemberType] + params = run.data.params # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType] # Combiner hyperparams should be logged as primary params assert "learning_rate" in params @@ -242,8 +241,8 @@ def test_on_fit_end__logs_combiner_hyperparams_as_primary( def test_on_fit_end__logs_per_forecaster_hyperparams( - callback: EnsembleMLFlowStorageCallback, - ensemble_workflow: CustomEnsembleForecastingWorkflow, + callback: MLFlowStorageCallback, + ensemble_workflow: CustomForecastingWorkflow, ensemble_fit_result: EnsembleModelFitResult, ): """Test that per-forecaster hyperparams are logged with name prefixes.""" @@ -253,7 +252,7 @@ def test_on_fit_end__logs_per_forecaster_hyperparams( runs = callback.storage.search_latest_runs(model_id=ensemble_workflow.model_id, limit=1) run = runs[0] - params = run.data.params # pyright: ignore[reportUnknownMemberType] + params = run.data.params # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType] # Per-forecaster hyperparams should be prefixed assert "model_a.alpha" in params @@ -265,8 +264,8 @@ def test_on_fit_end__logs_per_forecaster_hyperparams( def test_on_predict_start__loads_ensemble_model( - callback: EnsembleMLFlowStorageCallback, - ensemble_workflow: CustomEnsembleForecastingWorkflow, + callback: MLFlowStorageCallback, + ensemble_workflow: CustomForecastingWorkflow, ensemble_fit_result: EnsembleModelFitResult, sample_dataset: TimeSeriesDataset, ): @@ -286,12 +285,12 @@ def test_on_predict_start__loads_ensemble_model( def test_model_selection__keeps_better_ensemble_model( storage: MLFlowStorage, - ensemble_workflow: CustomEnsembleForecastingWorkflow, + ensemble_workflow: CustomForecastingWorkflow, ensemble_fit_result: EnsembleModelFitResult, sample_dataset: TimeSeriesDataset, ): """Test that model selection keeps the better performing ensemble model.""" - callback = EnsembleMLFlowStorageCallback( + callback = MLFlowStorageCallback( storage=storage, model_selection_metric=(Q(0.5), "R2", "higher_is_better"), ) @@ -323,7 +322,7 @@ def test_model_selection__keeps_better_ensemble_model( ), ) worse_result = worse_ensemble.fit(sample_dataset) - worse_workflow = CustomEnsembleForecastingWorkflow(model_id="test_ensemble", model=worse_ensemble) + worse_workflow = CustomForecastingWorkflow(model_id="test_ensemble", model=worse_ensemble) worse_context = WorkflowContext(workflow=worse_workflow) with pytest.raises(SkipFitting, match="New model did not improve"): diff --git a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py index f5cf4b279..f81e62976 100644 --- a/packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py +++ b/packages/openstef-meta/tests/unit/models/forecast_combiners/test_forecast_combiner.py @@ -82,20 +82,7 @@ def test_config_requires_at_least_one_horizon(): ) -def test_forecast_combiner_methods_raise_not_implemented(): - """ForecastCombiner base methods raise NotImplementedError.""" - # Arrange - combiner = ForecastCombiner() # type: ignore[abstract] - - # Act & Assert - with pytest.raises(NotImplementedError): - combiner.fit(data=None) # type: ignore[arg-type] - - with pytest.raises(NotImplementedError): - combiner.predict(data=None) # type: ignore[arg-type] - - with pytest.raises(NotImplementedError): - _ = combiner.is_fitted - - with pytest.raises(NotImplementedError): - combiner.predict_contributions(data=None) # type: ignore[arg-type] +def test_forecast_combiner_is_abstract(): + """ForecastCombiner cannot be instantiated directly — it has abstract methods.""" + with pytest.raises(TypeError, match="abstract"): + ForecastCombiner() # type: ignore[abstract] diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py index a64f59c51..ec82b574f 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py @@ -16,6 +16,15 @@ """ from .mlflow_storage import MLFlowStorage -from .mlflow_storage_callback import MLFlowStorageCallback +from .mlflow_storage_callback import ( + HasExplainableCombiner, + HasForecasters, + MLFlowStorageCallback, +) -__all__ = ["MLFlowStorage", "MLFlowStorageCallback"] +__all__ = [ + "HasExplainableCombiner", + "HasForecasters", + "MLFlowStorage", + "MLFlowStorageCallback", +] diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/base_mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/base_mlflow_storage_callback.py deleted file mode 100644 index 19510781d..000000000 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/base_mlflow_storage_callback.py +++ /dev/null @@ -1,222 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""Base configuration and shared utilities for MLflow storage callbacks. - -Provides common fields, helper methods, and utility functions used by both -single-model and ensemble-model MLflow callbacks. -""" - -import logging -from datetime import timedelta -from typing import Any, override - -from mlflow.entities import Run -from pydantic import Field, PrivateAttr - -from openstef_beam.evaluation import SubsetMetric -from openstef_beam.evaluation.metric_providers import MetricDirection -from openstef_core.base_model import BaseConfig -from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset -from openstef_core.exceptions import ( - MissingColumnsError, - ModelNotFoundError, -) -from openstef_core.types import Q, QuantileOrGlobal -from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage -from openstef_models.models.base_forecasting_model import BaseForecastingModel - - -class BaseMLFlowStorageCallback(BaseConfig): - """Base configuration and shared utilities for MLflow storage callbacks. - - Provides common fields and helper methods used by both single-model and - ensemble-model MLflow callbacks. Not a callback itself — subclasses should - also inherit from the appropriate callback type. - """ - - storage: MLFlowStorage = Field(default_factory=MLFlowStorage) - - model_reuse_enable: bool = Field(default=True) - model_reuse_max_age: timedelta = Field(default=timedelta(days=7)) - - model_selection_enable: bool = Field(default=True) - model_selection_metric: tuple[QuantileOrGlobal, str, MetricDirection] = Field( - default=(Q(0.5), "R2", "higher_is_better"), - description="Metric to monitor for model performance when retraining.", - ) - model_selection_old_model_penalty: float = Field( - default=1.2, - description="Penalty to apply to the old model's metric to bias selection towards newer models.", - ) - - store_feature_importance_plot: bool = Field( - default=True, - description="Whether to store feature importance plots in MLflow artifacts if available.", - ) - - _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) - - @override - def model_post_init(self, context: Any) -> None: - pass - - def _find_run(self, model_id: str, run_name: str | None) -> Run | None: - """Find an MLflow run by model_id and optional run_name. - - Args: - model_id: The model identifier. - run_name: Optional specific run name to search for. - - Returns: - The MLflow Run object, or None if not found. - """ - if run_name is not None: - return self.storage.search_run(model_id=model_id, run_name=run_name) - - runs = self.storage.search_latest_runs(model_id=model_id) - return next(iter(runs), None) - - def _try_load_model( - self, - run_id: str, - model_id: str, - ) -> BaseForecastingModel | None: - """Try to load a model from MLflow, returning None on failure. - - Args: - run_id: The MLflow run ID. - model_id: The model identifier. - - Returns: - The loaded model, or None if loading failed. - """ - try: - old_model = self.storage.load_run_model(run_id=run_id, model_id=model_id) - except ModelNotFoundError: - self._logger.warning( - "Could not load model from previous run %s for model %s, skipping model selection", - run_id, - model_id, - ) - return None - - if not isinstance(old_model, BaseForecastingModel): - self._logger.warning( - "Loaded old model from run %s is not a BaseForecastingModel, skipping model selection", - run_id, - ) - return None - - return old_model - - def _try_evaluate_model( - self, - run_id: str, - old_model: BaseForecastingModel, - input_data: TimeSeriesDataset, - ) -> SubsetMetric | None: - """Try to evaluate a model, returning None on failure. - - Args: - run_id: The MLflow run ID (for logging). - old_model: The model to evaluate. - input_data: The dataset to evaluate on. - - Returns: - The evaluation metrics, or None if evaluation failed. - """ - try: - return old_model.score(input_data) - except (MissingColumnsError, ValueError) as e: - self._logger.warning( - "Could not evaluate old model from run %s, skipping model selection: %s", - run_id, - e, - ) - return None - - def _check_tags_compatible(self, run_tags: dict[str, str], new_tags: dict[str, str], run_id: str) -> bool: - """Check if model tags are compatible, excluding mlflow.runName. - - Returns: - True if tags are compatible, False otherwise. - """ - old_tags = {k: v for k, v in run_tags.items() if k != "mlflow.runName"} - - if old_tags == new_tags: - return True - - differences = { - k: (old_tags.get(k), new_tags.get(k)) - for k in old_tags.keys() | new_tags.keys() - if old_tags.get(k) != new_tags.get(k) - } - - self._logger.info( - "Model tags changed since run %s, skipping model selection. Changes: %s", - run_id, - differences, - ) - return False - - def _check_is_new_model_better( - self, - old_metrics: SubsetMetric, - new_metrics: SubsetMetric, - ) -> bool: - """Compare old and new model metrics to determine if the new model is better. - - Returns: - True if the new model improves on the monitored metric. - """ - quantile, metric_name, direction = self.model_selection_metric - - old_metric = old_metrics.get_metric(quantile=quantile, metric_name=metric_name) - new_metric = new_metrics.get_metric(quantile=quantile, metric_name=metric_name) - - if old_metric is None or new_metric is None: - self._logger.warning( - "Could not find %s metric for quantile %s in old or new model metrics, assuming improvement", - metric_name, - quantile, - ) - return True - - self._logger.info( - "Comparing old model %s metric %.5f to new model %s metric %.5f for quantile %s", - metric_name, - old_metric, - metric_name, - new_metric, - quantile, - ) - - match direction: - case "higher_is_better" if new_metric >= old_metric / self.model_selection_old_model_penalty: - return True - case "lower_is_better" if new_metric <= old_metric / self.model_selection_old_model_penalty: - return True - case _: - return False - - @staticmethod - def metrics_to_dict(metrics: SubsetMetric, prefix: str) -> dict[str, float]: - """Convert SubsetMetric to a flat dictionary for MLflow logging. - - Args: - metrics: The metrics to convert. - prefix: Prefix to add to each metric key (e.g. "full_", "train_"). - - Returns: - Flat dictionary mapping metric names to values. - """ - return { - f"{prefix}{quantile}_{metric_name}": value - for quantile, metrics_dict in metrics.metrics.items() - for metric_name, value in metrics_dict.items() - } - - -__all__ = ["BaseMLFlowStorageCallback"] diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index fd8c70eb9..90de14cdf 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -4,37 +4,103 @@ """MLflow integration for tracking and storing forecasting workflows. -Provides callback functionality to log model training runs, artifacts, -and metrics to MLflow. Automatically saves models, training data, and performance -metrics for each forecasting workflow execution. +Provides a single callback for logging model training runs, artifacts, +and metrics to MLflow. Supports both single-model (ForecastingModel) and +ensemble (EnsembleForecastingModel) workflows via protocol-based dispatch. + +Ensemble-specific behavior is enabled automatically when the model satisfies +the ``HasForecasters`` and ``HasExplainableCombiner`` protocols: + +- Logs combiner hyperparameters as the primary hyperparams +- Logs per-forecaster hyperparameters with name-prefixed keys +- Stores feature importance plots for each explainable forecaster component +- Stores combiner feature importance plots """ -from datetime import UTC, datetime -from typing import cast, override +import logging +from datetime import UTC, datetime, timedelta +from typing import Any, Protocol, cast, override, runtime_checkable +from mlflow.entities import Run +from pydantic import Field, PrivateAttr + +from openstef_beam.evaluation import SubsetMetric +from openstef_beam.evaluation.metric_providers import MetricDirection +from openstef_core.base_model import BaseConfig from openstef_core.datasets.timeseries_dataset import TimeSeriesDataset from openstef_core.datasets.versioned_timeseries_dataset import ( VersionedTimeSeriesDataset, ) from openstef_core.exceptions import ( + MissingColumnsError, ModelNotFoundError, SkipFitting, ) +from openstef_core.mixins import HyperParams +from openstef_core.types import Q, QuantileOrGlobal from openstef_models.explainability import ExplainableForecaster -from openstef_models.integrations.mlflow.base_mlflow_storage_callback import ( - BaseMLFlowStorageCallback, -) +from openstef_models.integrations.mlflow.mlflow_storage import MLFlowStorage from openstef_models.mixins.callbacks import WorkflowContext -from openstef_models.models.base_forecasting_model import BaseForecastingModel -from openstef_models.models.forecasting_model import ModelFitResult +from openstef_models.models.forecasting.forecaster import Forecaster +from openstef_models.models.forecasting_model import ForecastingModel, ModelFitResult from openstef_models.workflows.custom_forecasting_workflow import ( CustomForecastingWorkflow, ForecastingCallback, ) -class MLFlowStorageCallback(BaseMLFlowStorageCallback, ForecastingCallback): - """MLFlow callback for logging forecasting workflow events.""" +@runtime_checkable +class HasForecasters(Protocol): + """Protocol for ensemble models with multiple base forecasters.""" + + @property + def forecasters(self) -> dict[str, Forecaster]: + """Return a dictionary of forecasters keyed by name.""" + ... + + +@runtime_checkable +class HasExplainableCombiner(Protocol): + """Protocol for ensemble models with an explainable forecast combiner.""" + + @property + def combiner(self) -> ExplainableForecaster: + """Return the explainable forecast combiner.""" + ... + + +class MLFlowStorageCallback(BaseConfig, ForecastingCallback): + """MLFlow callback for logging forecasting workflow events. + + Handles both single-model and ensemble workflows via protocol-based + dispatch. + """ + + storage: MLFlowStorage = Field(default_factory=MLFlowStorage) + + model_reuse_enable: bool = Field(default=True) + model_reuse_max_age: timedelta = Field(default=timedelta(days=7)) + + model_selection_enable: bool = Field(default=True) + model_selection_metric: tuple[QuantileOrGlobal, str, MetricDirection] = Field( + default=(Q(0.5), "R2", "higher_is_better"), + description="Metric to monitor for model performance when retraining.", + ) + model_selection_old_model_penalty: float = Field( + default=1.2, + description="Penalty to apply to the old model's metric to bias selection towards newer models.", + ) + + store_feature_importance_plot: bool = Field( + default=True, + description="Whether to store feature importance plots in MLflow artifacts if available.", + ) + + _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) + + @override + def model_post_init(self, context: Any) -> None: + pass @override def on_fit_start( @@ -48,7 +114,6 @@ def on_fit_start( run = self._find_run(model_id=context.workflow.model_id, run_name=context.workflow.run_name) if run is not None: - # Check if the run is recent enough to skip re-fitting now = datetime.now(tz=UTC) end_time_millis = cast(float | None, run.info.end_time) run_end_datetime = ( @@ -72,18 +137,29 @@ def on_fit_end( if self.model_selection_enable: self._run_model_selection(workflow=context.workflow, result=result) - # Create a new run model = context.workflow.model + + # Determine primary hyperparams based on model structure + hyperparams = self._get_primary_hyperparams(model) + + # Create a new run run = self.storage.create_run( model_id=context.workflow.model_id, tags=model.tags, - hyperparams=context.workflow.model.forecaster.hyperparams, + hyperparams=hyperparams, run_name=context.workflow.run_name, experiment_tags=context.workflow.experiment_tags, ) run_id: str = run.info.run_id self._logger.info("Created MLflow run %s for model %s", run_id, context.workflow.model_id) + # Log per-forecaster hyperparams for ensemble models + if isinstance(model, HasForecasters): + for name, forecaster in model.forecasters.items(): + prefixed_params = {f"{name}.{k}": str(v) for k, v in forecaster.hyperparams.model_dump().items()} + self.storage.log_hyperparams(run_id=run_id, params=prefixed_params) + self._logger.debug("Logged hyperparams for forecaster '%s' in run %s", name, run_id) + # Store the model input run_path = self.storage.get_artifacts_path(model_id=context.workflow.model_id, run_id=run_id) data_path = run_path / self.storage.data_path @@ -91,10 +167,9 @@ def on_fit_end( result.input_dataset.to_parquet(path=data_path / "data.parquet") self._logger.info("Stored training data at %s for run %s", data_path, run_id) - # Store feature importance plots if enabled - if self.store_feature_importance_plot and isinstance(model.forecaster, ExplainableForecaster): - fig = model.forecaster.plot_feature_importances() - fig.write_html(data_path / "feature_importances.html") # pyright: ignore[reportUnknownMemberType] + # Store feature importance plots + if self.store_feature_importance_plot: + self._store_feature_importances(model=model, data_path=data_path) # Store the trained model self.storage.save_run_model( @@ -130,14 +205,12 @@ def on_predict_start( if run is None: raise ModelNotFoundError(model_id=context.workflow.model_id) - # Load the model from the run run_id: str = run.info.run_id - old_model = self.storage.load_run_model(run_id=run_id, model_id=context.workflow.model_id) - if not isinstance(old_model, BaseForecastingModel): + if not isinstance(old_model, ForecastingModel): self._logger.warning( - "Loaded model from run %s is not a BaseForecastingModel, cannot use for prediction", + "Loaded model from run %s is not a ForecastingModel, cannot use for prediction", cast(str, run.info.run_id), ) return @@ -191,5 +264,163 @@ def _run_model_selection(self, workflow: CustomForecastingWorkflow, result: Mode ) raise SkipFitting("New model did not improve monitored metric, skipping re-fit.") + @staticmethod + def _get_primary_hyperparams(model: ForecastingModel) -> HyperParams: + """Determine primary hyperparameters from the model. + + For ensemble models: uses the combiner's hyperparameters. + For single models: uses the forecaster's hyperparameters. + """ + if isinstance(model, HasExplainableCombiner): + config = getattr(model.combiner, "config", None) + if config is not None: + return getattr(config, "hyperparams", HyperParams()) # pyright: ignore[reportUnknownMemberType, reportReturnType] + if model.forecaster is not None: + return model.forecaster.hyperparams + return HyperParams() + + def _store_feature_importances(self, model: ForecastingModel, data_path: Any) -> None: + """Store feature importance plots for all explainable components of the model.""" + if isinstance(model, HasForecasters): + # Ensemble model: store per-forecaster feature importances + for name, forecaster in model.forecasters.items(): + if isinstance(forecaster, ExplainableForecaster): + fig = forecaster.plot_feature_importances() + fig.write_html(data_path / f"feature_importances_{name}.html") # pyright: ignore[reportUnknownMemberType] + elif model.forecaster is not None and isinstance(model.forecaster, ExplainableForecaster): + # Single model: store feature importance + fig = model.forecaster.plot_feature_importances() + fig.write_html(data_path / "feature_importances.html") # pyright: ignore[reportUnknownMemberType] + + # Store combiner feature importances (if model has an explainable combiner) + if isinstance(model, HasExplainableCombiner): + combiner_fi = model.combiner.feature_importances + if not combiner_fi.empty: + fig = model.combiner.plot_feature_importances() + fig.write_html(data_path / "feature_importances_combiner.html") # pyright: ignore[reportUnknownMemberType] + + def _find_run(self, model_id: str, run_name: str | None) -> Run | None: + """Find an MLflow run by model_id and optional run_name.""" + if run_name is not None: + return self.storage.search_run(model_id=model_id, run_name=run_name) + + runs = self.storage.search_latest_runs(model_id=model_id) + return next(iter(runs), None) + + def _try_load_model(self, run_id: str, model_id: str) -> ForecastingModel | None: + """Try to load a model from MLflow, returning None on failure.""" + try: + old_model = self.storage.load_run_model(run_id=run_id, model_id=model_id) + except ModelNotFoundError: + self._logger.warning( + "Could not load model from previous run %s for model %s, skipping model selection", + run_id, + model_id, + ) + return None + + if not isinstance(old_model, ForecastingModel): + self._logger.warning( + "Loaded old model from run %s is not a ForecastingModel, skipping model selection", + run_id, + ) + return None + + return old_model + + def _try_evaluate_model( + self, + run_id: str, + old_model: ForecastingModel, + input_data: TimeSeriesDataset, + ) -> SubsetMetric | None: + """Try to evaluate a model, returning None on failure.""" + try: + return old_model.score(input_data) + except (MissingColumnsError, ValueError) as e: + self._logger.warning( + "Could not evaluate old model from run %s, skipping model selection: %s", + run_id, + e, + ) + return None + + def _check_tags_compatible(self, run_tags: dict[str, str], new_tags: dict[str, str], run_id: str) -> bool: + """Check if model tags are compatible, excluding mlflow.runName.""" + old_tags = {k: v for k, v in run_tags.items() if k != "mlflow.runName"} + + if old_tags == new_tags: + return True + + differences = { + k: (old_tags.get(k), new_tags.get(k)) + for k in old_tags.keys() | new_tags.keys() + if old_tags.get(k) != new_tags.get(k) + } + + self._logger.info( + "Model tags changed since run %s, skipping model selection. Changes: %s", + run_id, + differences, + ) + return False + + def _check_is_new_model_better( + self, + old_metrics: SubsetMetric, + new_metrics: SubsetMetric, + ) -> bool: + """Compare old and new model metrics to determine if the new model is better.""" + quantile, metric_name, direction = self.model_selection_metric + + old_metric = old_metrics.get_metric(quantile=quantile, metric_name=metric_name) + new_metric = new_metrics.get_metric(quantile=quantile, metric_name=metric_name) + + if old_metric is None or new_metric is None: + self._logger.warning( + "Could not find %s metric for quantile %s in old or new model metrics, assuming improvement", + metric_name, + quantile, + ) + return True + + self._logger.info( + "Comparing old model %s metric %.5f to new model %s metric %.5f for quantile %s", + metric_name, + old_metric, + metric_name, + new_metric, + quantile, + ) -__all__ = ["MLFlowStorageCallback"] + match direction: + case "higher_is_better" if new_metric >= old_metric / self.model_selection_old_model_penalty: + return True + case "lower_is_better" if new_metric <= old_metric / self.model_selection_old_model_penalty: + return True + case _: + return False + + @staticmethod + def metrics_to_dict(metrics: SubsetMetric, prefix: str) -> dict[str, float]: + """Convert SubsetMetric to a flat dictionary for MLflow logging. + + Args: + metrics: The metrics to convert. + prefix: Prefix to add to each metric key (e.g. "full_", "train_"). + + Returns: + Flat dictionary mapping metric names to values. + """ + return { + f"{prefix}{quantile}_{metric_name}": value + for quantile, metrics_dict in metrics.metrics.items() + for metric_name, value in metrics_dict.items() + } + + +__all__ = [ + "HasExplainableCombiner", + "HasForecasters", + "MLFlowStorageCallback", +] diff --git a/packages/openstef-models/src/openstef_models/models/__init__.py b/packages/openstef-models/src/openstef_models/models/__init__.py index a623e5c1e..766194fe5 100644 --- a/packages/openstef-models/src/openstef_models/models/__init__.py +++ b/packages/openstef-models/src/openstef_models/models/__init__.py @@ -8,12 +8,10 @@ imports. """ -from .base_forecasting_model import BaseForecastingModel from .component_splitting_model import ComponentSplittingModel from .forecasting_model import ForecastingModel __all__ = [ - "BaseForecastingModel", "ComponentSplittingModel", "ForecastingModel", ] diff --git a/packages/openstef-models/src/openstef_models/models/base_forecasting_model.py b/packages/openstef-models/src/openstef_models/models/base_forecasting_model.py deleted file mode 100644 index eafc9a8f3..000000000 --- a/packages/openstef-models/src/openstef_models/models/base_forecasting_model.py +++ /dev/null @@ -1,155 +0,0 @@ -# SPDX-FileCopyrightText: 2025 Contributors to the OpenSTEF project -# -# SPDX-License-Identifier: MPL-2.0 - -"""Abstract base class for forecasting models. - -Provides shared fields and evaluation logic used by both single-model -(ForecastingModel) and ensemble (EnsembleForecastingModel) implementations. -""" - -import logging -from abc import abstractmethod -from datetime import datetime, timedelta -from typing import Any, override - -import pandas as pd -from pydantic import Field, PrivateAttr - -from openstef_beam.evaluation import EvaluationConfig, EvaluationPipeline, SubsetMetric -from openstef_beam.evaluation.metric_providers import MetricProvider, ObservedProbabilityProvider, R2Provider -from openstef_core.base_model import BaseModel -from openstef_core.datasets import ForecastDataset, TimeSeriesDataset -from openstef_core.mixins import Predictor, TransformPipeline -from openstef_models.models.forecasting.forecaster import ForecasterConfig -from openstef_models.utils.data_split import DataSplitter - - -class BaseForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]): - """Abstract base for forecasting models.""" - - # Shared model components - postprocessing: TransformPipeline[ForecastDataset] = Field( - default_factory=TransformPipeline[ForecastDataset], - description="Postprocessing pipeline for transforming model outputs into final forecasts.", - exclude=True, - ) - target_column: str = Field( - default="load", - description="Name of the target variable column in datasets.", - ) - data_splitter: DataSplitter = Field( - default_factory=DataSplitter, - description="Data splitting strategy for train/validation/test sets.", - ) - cutoff_history: timedelta = Field( - default=timedelta(days=0), - description="Amount of historical data to exclude from training and prediction due to incomplete features " - "from lag-based preprocessing. When using lag transforms (e.g., lag-14), the first N days contain NaN values. " - "Set this to match your maximum lag duration (e.g., timedelta(days=14)). " - "Default of 0 assumes no invalid rows are created by preprocessing.", - ) - - # Evaluation - evaluation_metrics: list[MetricProvider] = Field( - default_factory=lambda: [R2Provider(), ObservedProbabilityProvider()], - description="List of metric providers for evaluating model score.", - ) - - # Metadata - tags: dict[str, str] = Field( - default_factory=dict, - description="Optional metadata tags for the model.", - ) - - _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) - - @property - @abstractmethod - def scoring_config(self) -> ForecasterConfig: - """Return the forecaster config used for evaluation metrics. - - For a single-model pipeline this is the forecaster's own config. - For an ensemble it is typically the first (or canonical) base-forecaster config. - """ - - @abstractmethod - @override - def fit( - self, - data: TimeSeriesDataset, - data_val: TimeSeriesDataset | None = None, - data_test: TimeSeriesDataset | None = None, - ) -> Any: - """Train the forecasting model on the provided dataset. - - Args: - data: Historical time series data with features and target values. - data_val: Optional validation data. - data_test: Optional test data. - - Returns: - Fit result containing training details and metrics. - """ - - @abstractmethod - @override - def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: - """Generate forecasts for the input data. - - Args: - data: Input dataset for generating forecasts. - forecast_start: Optional start time for forecasts. - - Returns: - Generated forecast dataset. - """ - - def score(self, data: TimeSeriesDataset) -> SubsetMetric: - """Evaluate model performance on the provided dataset. - - Generates predictions for the dataset and calculates evaluation metrics - by comparing against ground truth values. Uses the configured evaluation - metrics to assess forecast quality at the maximum forecast horizon. - - Args: - data: Time series dataset containing both features and target values - for evaluation. - - Returns: - Evaluation metrics including configured providers (e.g., R², observed - probability) computed at the maximum forecast horizon. - """ - prediction = self.predict(data=data) - return self._calculate_score(prediction=prediction) - - def _calculate_score(self, prediction: ForecastDataset) -> SubsetMetric: - if prediction.target_series is None: - raise ValueError("Prediction dataset must contain target series for scoring.") - - # Drop NaN targets for metric calculation - prediction = prediction.pipe_pandas(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownArgumentType, reportUnknownMemberType] - - pipeline = EvaluationPipeline( - config=EvaluationConfig(available_ats=[], lead_times=[self.scoring_config.max_horizon]), - quantiles=self.scoring_config.quantiles, - window_metric_providers=[], - global_metric_providers=self.evaluation_metrics, - ) - - evaluation_result = pipeline.run_for_subset( - filtering=self.scoring_config.max_horizon, - predictions=prediction, - ) - global_metric = evaluation_result.get_global_metric() - if not global_metric: - return SubsetMetric( - window="global", - timestamp=prediction.forecast_start, - metrics={}, - ) - - return global_metric - - -__all__ = ["BaseForecastingModel"] diff --git a/packages/openstef-models/src/openstef_models/models/forecasting_model.py b/packages/openstef-models/src/openstef_models/models/forecasting_model.py index 3c2d1fe4c..ae872ddc4 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting_model.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting_model.py @@ -10,14 +10,15 @@ """ import logging -from datetime import datetime +from datetime import datetime, timedelta from functools import partial -from typing import cast, override +from typing import cast import pandas as pd from pydantic import Field, PrivateAttr -from openstef_beam.evaluation import SubsetMetric +from openstef_beam.evaluation import EvaluationConfig, EvaluationPipeline, SubsetMetric +from openstef_beam.evaluation.metric_providers import MetricProvider, ObservedProbabilityProvider, R2Provider from openstef_core.base_model import BaseModel from openstef_core.datasets import ( ForecastDataset, @@ -26,9 +27,9 @@ ) from openstef_core.datasets.timeseries_dataset import validate_horizons_present from openstef_core.exceptions import InsufficientlyCompleteError, NotFittedError -from openstef_core.mixins import TransformPipeline -from openstef_models.models.base_forecasting_model import BaseForecastingModel +from openstef_core.mixins import Predictor, TransformPipeline from openstef_models.models.forecasting.forecaster import Forecaster, ForecasterConfig +from openstef_models.utils.data_split import DataSplitter class ModelFitResult(BaseModel): @@ -58,7 +59,7 @@ class ModelFitResult(BaseModel): metrics_full: SubsetMetric = Field(description="Evaluation metrics computed on the full original dataset.") -class ForecastingModel(BaseForecastingModel): +class ForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]): """Complete forecasting pipeline combining preprocessing, prediction, and postprocessing. Orchestrates the full forecasting workflow by managing feature engineering, @@ -102,36 +103,76 @@ class ForecastingModel(BaseForecastingModel): >>> forecasts = model.predict(new_data) # doctest: +SKIP """ - # Forecasting components + # Shared model components + postprocessing: TransformPipeline[ForecastDataset] = Field( + default_factory=TransformPipeline[ForecastDataset], + description="Postprocessing pipeline for transforming model outputs into final forecasts.", + exclude=True, + ) + target_column: str = Field( + default="load", + description="Name of the target variable column in datasets.", + ) + data_splitter: DataSplitter = Field( + default_factory=DataSplitter, + description="Data splitting strategy for train/validation/test sets.", + ) + cutoff_history: timedelta = Field( + default=timedelta(days=0), + description="Amount of historical data to exclude from training and prediction due to incomplete features " + "from lag-based preprocessing. When using lag transforms (e.g., lag-14), the first N days contain NaN values. " + "Set this to match your maximum lag duration (e.g., timedelta(days=14)). " + "Default of 0 assumes no invalid rows are created by preprocessing.", + ) + + # Evaluation + evaluation_metrics: list[MetricProvider] = Field( + default_factory=lambda: [R2Provider(), ObservedProbabilityProvider()], + description="List of metric providers for evaluating model score.", + ) + + # Metadata + tags: dict[str, str] = Field( + default_factory=dict, + description="Optional metadata tags for the model.", + ) + + # Forecasting components (single-model pipeline; overridden by subclasses like EnsembleForecastingModel) preprocessing: TransformPipeline[TimeSeriesDataset] = Field( default_factory=TransformPipeline[TimeSeriesDataset], description="Feature engineering pipeline for transforming raw input data into model-ready features.", exclude=True, ) - forecaster: Forecaster = Field( - default=..., - description="Underlying forecasting algorithm, either single-horizon or multi-horizon.", + forecaster: Forecaster | None = Field( + default=None, + description="Underlying forecasting algorithm. Required for single-model pipelines, " + "None for ensemble models that manage their own forecasters.", exclude=True, ) _logger: logging.Logger = PrivateAttr(default=logging.getLogger(__name__)) + @property + def _forecaster(self) -> Forecaster: + """Return the forecaster, raising if not set (ensemble models override methods instead).""" + if self.forecaster is None: + msg = "No forecaster configured. Single-model ForecastingModel requires a forecaster." + raise ValueError(msg) + return self.forecaster + @property def config(self) -> ForecasterConfig: """Returns the configuration of the underlying forecaster.""" - return self.forecaster.config + return self._forecaster.config @property - @override def scoring_config(self) -> ForecasterConfig: - return self.forecaster.config + return self._forecaster.config @property - @override def is_fitted(self) -> bool: - return self.forecaster.is_fitted + return self._forecaster.is_fitted - @override def fit( self, data: TimeSeriesDataset, @@ -159,7 +200,7 @@ def fit( Raises: InsufficientlyCompleteError: If no training data remains after dropping rows with NaN targets. """ - validate_horizons_present(data, self.forecaster.config.horizons) + validate_horizons_present(data, self._forecaster.config.horizons) target_dropna = partial(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownMemberType] if data.pipe_pandas(target_dropna).data.empty: @@ -188,7 +229,7 @@ def fit( ) # Fit the model - self.forecaster.fit(data=input_data_train, data_val=input_data_val) + self._forecaster.fit(data=input_data_train, data_val=input_data_val) prediction_raw = self._predict(input_data=input_data_train) # Fit the postprocessing transforms @@ -216,7 +257,6 @@ def _predict_and_score(input_data: ForecastInputDataset) -> SubsetMetric: metrics_full=metrics_full, ) - @override def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = None) -> ForecastDataset: """Generate forecasts using the trained model. @@ -234,7 +274,7 @@ def predict(self, data: TimeSeriesDataset, forecast_start: datetime | None = Non NotFittedError: If the model hasn't been trained yet. """ if not self.is_fitted: - raise NotFittedError(type(self.forecaster).__name__) + raise NotFittedError(type(self._forecaster).__name__) # Transform the input data to a valid forecast input input_data = self.prepare_input(data=data, forecast_start=forecast_start) @@ -287,9 +327,53 @@ def prepare_input( def _predict(self, input_data: ForecastInputDataset) -> ForecastDataset: # Predict and restore target column - prediction = self.forecaster.predict(data=input_data) + prediction = self._forecaster.predict(data=input_data) return restore_target(dataset=prediction, original_dataset=input_data, target_column=self.target_column) + def score(self, data: TimeSeriesDataset) -> SubsetMetric: + """Evaluate model performance on the provided dataset. + + Generates predictions for the dataset and calculates evaluation metrics + by comparing against ground truth values. + + Args: + data: Time series dataset containing both features and target values + for evaluation. + + Returns: + Evaluation metrics computed at the maximum forecast horizon. + """ + prediction = self.predict(data=data) + return self._calculate_score(prediction=prediction) + + def _calculate_score(self, prediction: ForecastDataset) -> SubsetMetric: + if prediction.target_series is None: + raise ValueError("Prediction dataset must contain target series for scoring.") + + # Drop NaN targets for metric calculation + prediction = prediction.pipe_pandas(pd.DataFrame.dropna, subset=[self.target_column]) # pyright: ignore[reportUnknownArgumentType, reportUnknownMemberType] + + pipeline = EvaluationPipeline( + config=EvaluationConfig(available_ats=[], lead_times=[self.scoring_config.max_horizon]), + quantiles=self.scoring_config.quantiles, + window_metric_providers=[], + global_metric_providers=self.evaluation_metrics, + ) + + evaluation_result = pipeline.run_for_subset( + filtering=self.scoring_config.max_horizon, + predictions=prediction, + ) + global_metric = evaluation_result.get_global_metric() + if not global_metric: + return SubsetMetric( + window="global", + timestamp=prediction.forecast_start, + metrics={}, + ) + + return global_metric + def restore_target[T: TimeSeriesDataset]( dataset: T, diff --git a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py index afb514f99..e1bd3a043 100644 --- a/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py +++ b/packages/openstef-models/src/openstef_models/workflows/custom_forecasting_workflow.py @@ -56,13 +56,14 @@ class CustomForecastingWorkflow(BaseModel): """Complete forecasting workflow with model management and lifecycle hooks. Orchestrates the full forecasting process by combining a ForecastingModel - with callback execution and optional model persistence. Provides the main - interface for production forecasting systems where models need to be - trained, saved, loaded, and used for prediction with monitoring. + (either ForecastingModel or EnsembleForecastingModel) with callback execution + and optional model persistence. Provides the main interface for production + forecasting systems where models need to be trained, saved, loaded, and used + for prediction with monitoring. Invariants: - Callbacks are executed at appropriate lifecycle stages - - Model fitting and prediction delegate to the underlying ForecastingModel + - Model fitting and prediction delegate to the underlying model - Storage operations (if configured) maintain model persistence Example: From ae1d2666ec6626125b4892589ebaa6c86149e534 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 15:10:55 +0100 Subject: [PATCH 099/104] Rename test file Signed-off-by: Marnix van Lieshout --- ...orage_callback.py => test_mlflow_storage_callback_ensemble.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename packages/openstef-meta/tests/unit/integrations/{test_ensemble_mlflow_storage_callback.py => test_mlflow_storage_callback_ensemble.py} (100%) diff --git a/packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py b/packages/openstef-meta/tests/unit/integrations/test_mlflow_storage_callback_ensemble.py similarity index 100% rename from packages/openstef-meta/tests/unit/integrations/test_ensemble_mlflow_storage_callback.py rename to packages/openstef-meta/tests/unit/integrations/test_mlflow_storage_callback_ensemble.py From 698d61d5422cbf317f845a7368dbaf35daad3dc3 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 15:13:37 +0100 Subject: [PATCH 100/104] Reset pinball_losses changes Signed-off-by: Marnix van Lieshout --- .../src/openstef_beam/metrics/__init__.py | 2 - .../metrics/metrics_deterministic.py | 57 +++------------ .../metrics/test_metrics_deterministic.py | 69 ------------------- 3 files changed, 10 insertions(+), 118 deletions(-) diff --git a/packages/openstef-beam/src/openstef_beam/metrics/__init__.py b/packages/openstef-beam/src/openstef_beam/metrics/__init__.py index 94e946aea..ea4ccf7ce 100644 --- a/packages/openstef-beam/src/openstef_beam/metrics/__init__.py +++ b/packages/openstef-beam/src/openstef_beam/metrics/__init__.py @@ -22,7 +22,6 @@ confusion_matrix, fbeta, mape, - pinball_losses, precision_recall, r2, relative_pinball_loss, @@ -45,7 +44,6 @@ "mape", "mean_absolute_calibration_error", "observed_probability", - "pinball_losses", "precision_recall", "r2", "rcrps", diff --git a/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py b/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py index 1a5a1b97d..f77f55579 100644 --- a/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py +++ b/packages/openstef-beam/src/openstef_beam/metrics/metrics_deterministic.py @@ -463,48 +463,6 @@ def r2( return float(r2_score(y_true, y_pred, sample_weight=sample_weights)) -def pinball_losses( - y_true: npt.ArrayLike, - y_pred: npt.ArrayLike, - *, - quantile: float, -) -> npt.NDArray[np.floating]: - """Calculate the per-sample Pinball Loss (also known as Quantile Loss). - - The pinball loss asymmetrically penalizes over- and under-predictions based on - the target quantile. For quantiles above 0.5, under-predictions are penalized more - heavily; for quantiles below 0.5, over-predictions receive higher penalties. - - Args: - y_true: Ground truth values with shape (num_samples,). - y_pred: Predicted quantile values with shape (num_samples,). - quantile: The quantile level being predicted (e.g., 0.1, 0.5, 0.9). - Must be in [0, 1]. - - Returns: - An array of per-sample pinball losses with shape (num_samples,). - - Example: - Basic usage for 90th percentile predictions: - - >>> import numpy as np - >>> y_true = np.array([100.0, 120.0, 110.0]) - >>> y_pred = np.array([95.0, 125.0, 110.0]) - >>> losses = pinball_losses(y_true, y_pred, quantile=0.9) - >>> losses - array([4.5, 0.5, 0. ]) - """ - y_true = np.asarray(y_true) - y_pred = np.asarray(y_pred) - - errors = y_true - y_pred - return np.where( - errors >= 0, - quantile * errors, # Under-prediction - (quantile - 1) * errors, # Over-prediction - ) - - def relative_pinball_loss( y_true: npt.NDArray[np.floating], y_pred: npt.NDArray[np.floating], @@ -548,16 +506,21 @@ def relative_pinball_loss( 0.0167 """ # Ensure inputs are numpy arrays - y_true = np.asarray(y_true) - y_pred = np.asarray(y_pred) + y_true = np.array(y_true) + y_pred = np.array(y_pred) if y_true.size == 0 or y_pred.size == 0: return float("NaN") - # Calculate per-sample pinball losses - losses = pinball_losses(y_true, y_pred, quantile=quantile) + # Calculate pinball loss for each sample + errors = y_true - y_pred + pinball_losses = np.where( + errors >= 0, + quantile * errors, # Under-prediction + (quantile - 1) * errors, # Over-prediction + ) # Calculate mean pinball loss (weighted if weights provided) - mean_pinball_loss = np.average(losses, weights=sample_weights) + mean_pinball_loss = np.average(pinball_losses, weights=sample_weights) # Calculate measurement range for normalization y_range = np.quantile(y_true, q=measurement_range_upper_q) - np.quantile(y_true, q=measurement_range_lower_q) diff --git a/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py b/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py index 00416422e..b3b9ac35d 100644 --- a/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py +++ b/packages/openstef-beam/tests/unit/metrics/test_metrics_deterministic.py @@ -13,7 +13,6 @@ confusion_matrix, fbeta, mape, - pinball_losses, precision_recall, relative_pinball_loss, riqd, @@ -414,74 +413,6 @@ def test_riqd_returns_nan_when_inputs_empty() -> None: assert np.isnan(result) -def test_pinball_losses_perfect_predictions_zero_loss() -> None: - """When predictions match actual values exactly, pinball loss is zero everywhere.""" - # Arrange - y = np.array([10.0, 20.0, 30.0, 40.0]) - - # Act - result = pinball_losses(y, y, quantile=0.5) - - # Assert - np.testing.assert_array_equal(result, np.zeros(4)) - - -def test_pinball_losses_under_prediction_penalized_by_quantile() -> None: - """Under-prediction (y_true > y_pred) is penalized by quantile * error.""" - # Arrange - y_true = np.array([10.0, 20.0, 30.0, 40.0]) - y_pred = np.array([5.0, 15.0, 25.0, 35.0]) # all under-predict by 5 - - # Act - result = pinball_losses(y_true, y_pred, quantile=0.9) - - # Assert — errors = 5, pinball = 0.9 * 5 = 4.5 - np.testing.assert_array_almost_equal(result, np.full(4, 4.5)) - - -def test_pinball_losses_over_prediction_penalized_by_complement() -> None: - """Over-prediction (y_true < y_pred) is penalized by (1 - quantile) * |error|.""" - # Arrange - y_true = np.array([10.0, 20.0, 30.0, 40.0]) - y_pred = np.array([15.0, 25.0, 35.0, 45.0]) # all over-predict by 5 - - # Act - result = pinball_losses(y_true, y_pred, quantile=0.9) - - # Assert — errors = -5, pinball = (0.9 - 1) * (-5) = 0.5 - np.testing.assert_array_almost_equal(result, np.full(4, 0.5)) - - -def test_pinball_losses_median_quantile_symmetric() -> None: - """At quantile 0.5, under- and over-prediction penalties are symmetric.""" - # Arrange - y_true = np.array([10.0, 20.0, 30.0, 40.0]) - y_under = np.array([5.0, 15.0, 25.0, 35.0]) - y_over = np.array([15.0, 25.0, 35.0, 45.0]) - - # Act - loss_under = pinball_losses(y_true, y_under, quantile=0.5) - loss_over = pinball_losses(y_true, y_over, quantile=0.5) - - # Assert - np.testing.assert_array_almost_equal(loss_under, loss_over) - - -def test_pinball_losses_is_non_negative() -> None: - """Pinball loss should always be >= 0 for any quantile.""" - # Arrange - rng = np.random.default_rng(42) - y_true = np.array([10.0, 20.0, 30.0, 40.0]) - y_pred = rng.normal(25, 15, size=len(y_true)) - - for q in [0.1, 0.25, 0.5, 0.75, 0.9]: - # Act - result = pinball_losses(y_true, y_pred, quantile=q) - - # Assert - assert (result >= 0).all(), f"Negative pinball loss found at quantile {q}" - - @pytest.mark.parametrize( ( "y_true", From a6fa901e316e6ed418f0677ab43e4a1c24b0cfd8 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 15:20:16 +0100 Subject: [PATCH 101/104] Fix linting issues Signed-off-by: Marnix van Lieshout --- .../mlflow/mlflow_storage_callback.py | 39 +++++++++++++++---- .../models/forecasting_model.py | 16 +++++++- 2 files changed, 47 insertions(+), 8 deletions(-) diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index 90de14cdf..9225ba95a 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -19,6 +19,7 @@ import logging from datetime import UTC, datetime, timedelta +from pathlib import Path from typing import Any, Protocol, cast, override, runtime_checkable from mlflow.entities import Run @@ -169,7 +170,7 @@ def on_fit_end( # Store feature importance plots if self.store_feature_importance_plot: - self._store_feature_importances(model=model, data_path=data_path) + self._store_feature_importances(model, data_path) # Store the trained model self.storage.save_run_model( @@ -270,6 +271,9 @@ def _get_primary_hyperparams(model: ForecastingModel) -> HyperParams: For ensemble models: uses the combiner's hyperparameters. For single models: uses the forecaster's hyperparameters. + + Returns: + The primary hyperparameters extracted from the model. """ if isinstance(model, HasExplainableCombiner): config = getattr(model.combiner, "config", None) @@ -279,7 +283,8 @@ def _get_primary_hyperparams(model: ForecastingModel) -> HyperParams: return model.forecaster.hyperparams return HyperParams() - def _store_feature_importances(self, model: ForecastingModel, data_path: Any) -> None: + @staticmethod + def _store_feature_importances(model: ForecastingModel, data_path: Path) -> None: """Store feature importance plots for all explainable components of the model.""" if isinstance(model, HasForecasters): # Ensemble model: store per-forecaster feature importances @@ -300,7 +305,11 @@ def _store_feature_importances(self, model: ForecastingModel, data_path: Any) -> fig.write_html(data_path / "feature_importances_combiner.html") # pyright: ignore[reportUnknownMemberType] def _find_run(self, model_id: str, run_name: str | None) -> Run | None: - """Find an MLflow run by model_id and optional run_name.""" + """Find an MLflow run by model_id and optional run_name. + + Returns: + The matching Run, or None if no run was found. + """ if run_name is not None: return self.storage.search_run(model_id=model_id, run_name=run_name) @@ -308,7 +317,11 @@ def _find_run(self, model_id: str, run_name: str | None) -> Run | None: return next(iter(runs), None) def _try_load_model(self, run_id: str, model_id: str) -> ForecastingModel | None: - """Try to load a model from MLflow, returning None on failure.""" + """Try to load a model from MLflow, returning None on failure. + + Returns: + The loaded model, or None if loading failed. + """ try: old_model = self.storage.load_run_model(run_id=run_id, model_id=model_id) except ModelNotFoundError: @@ -334,7 +347,11 @@ def _try_evaluate_model( old_model: ForecastingModel, input_data: TimeSeriesDataset, ) -> SubsetMetric | None: - """Try to evaluate a model, returning None on failure.""" + """Try to evaluate a model, returning None on failure. + + Returns: + The evaluation metrics, or None if evaluation failed. + """ try: return old_model.score(input_data) except (MissingColumnsError, ValueError) as e: @@ -346,7 +363,11 @@ def _try_evaluate_model( return None def _check_tags_compatible(self, run_tags: dict[str, str], new_tags: dict[str, str], run_id: str) -> bool: - """Check if model tags are compatible, excluding mlflow.runName.""" + """Check if model tags are compatible, excluding mlflow.runName. + + Returns: + True if tags are compatible, False otherwise. + """ old_tags = {k: v for k, v in run_tags.items() if k != "mlflow.runName"} if old_tags == new_tags: @@ -370,7 +391,11 @@ def _check_is_new_model_better( old_metrics: SubsetMetric, new_metrics: SubsetMetric, ) -> bool: - """Compare old and new model metrics to determine if the new model is better.""" + """Compare old and new model metrics to determine if the new model is better. + + Returns: + True if the new model is better, False otherwise. + """ quantile, metric_name, direction = self.model_selection_metric old_metric = old_metrics.get_metric(quantile=quantile, metric_name=metric_name) diff --git a/packages/openstef-models/src/openstef_models/models/forecasting_model.py b/packages/openstef-models/src/openstef_models/models/forecasting_model.py index ae872ddc4..b833c9886 100644 --- a/packages/openstef-models/src/openstef_models/models/forecasting_model.py +++ b/packages/openstef-models/src/openstef_models/models/forecasting_model.py @@ -154,7 +154,11 @@ class ForecastingModel(BaseModel, Predictor[TimeSeriesDataset, ForecastDataset]) @property def _forecaster(self) -> Forecaster: - """Return the forecaster, raising if not set (ensemble models override methods instead).""" + """Return the forecaster, raising if not set (ensemble models override methods instead). + + Raises: + ValueError: If no forecaster is configured. + """ if self.forecaster is None: msg = "No forecaster configured. Single-model ForecastingModel requires a forecaster." raise ValueError(msg) @@ -167,10 +171,20 @@ def config(self) -> ForecasterConfig: @property def scoring_config(self) -> ForecasterConfig: + """Returns the configuration of the underlying forecaster for scoring. + + Returns: + The forecaster configuration used for model evaluation and scoring. + """ return self._forecaster.config @property def is_fitted(self) -> bool: + """Check if the underlying forecaster has been fitted. + + Returns: + True if the forecaster has been trained, False otherwise. + """ return self._forecaster.is_fitted def fit( From a5e3d31a638bcb0aa8a1012ff42a4334093bd0c5 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 15:37:05 +0100 Subject: [PATCH 102/104] Add base models to tags MLflow Signed-off-by: Marnix van Lieshout --- .../openstef_meta/presets/forecasting_workflow.py | 1 + .../integrations/mlflow/mlflow_storage_callback.py | 12 +++++------- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py index 58df8371e..e33e59a10 100644 --- a/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py +++ b/packages/openstef-meta/src/openstef_meta/presets/forecasting_workflow.py @@ -490,6 +490,7 @@ def create_ensemble_workflow(config: EnsembleWorkflowConfig) -> CustomForecastin **config.location.tags, "ensemble_type": config.ensemble_type, "combiner_model": config.combiner_model, + "base_models": ",".join(config.base_models), **config.tags, } diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index 9225ba95a..6b4d9ad49 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -138,15 +138,13 @@ def on_fit_end( if self.model_selection_enable: self._run_model_selection(workflow=context.workflow, result=result) - model = context.workflow.model - # Determine primary hyperparams based on model structure - hyperparams = self._get_primary_hyperparams(model) + hyperparams = self._get_primary_hyperparams(context.workflow.model) # Create a new run run = self.storage.create_run( model_id=context.workflow.model_id, - tags=model.tags, + tags=context.workflow.model.tags, hyperparams=hyperparams, run_name=context.workflow.run_name, experiment_tags=context.workflow.experiment_tags, @@ -155,8 +153,8 @@ def on_fit_end( self._logger.info("Created MLflow run %s for model %s", run_id, context.workflow.model_id) # Log per-forecaster hyperparams for ensemble models - if isinstance(model, HasForecasters): - for name, forecaster in model.forecasters.items(): + if isinstance(context.workflow.model, HasForecasters): + for name, forecaster in context.workflow.model.forecasters.items(): prefixed_params = {f"{name}.{k}": str(v) for k, v in forecaster.hyperparams.model_dump().items()} self.storage.log_hyperparams(run_id=run_id, params=prefixed_params) self._logger.debug("Logged hyperparams for forecaster '%s' in run %s", name, run_id) @@ -170,7 +168,7 @@ def on_fit_end( # Store feature importance plots if self.store_feature_importance_plot: - self._store_feature_importances(model, data_path) + self._store_feature_importances(context.workflow.model, data_path) # Store the trained model self.storage.save_run_model( From 2a92cfaaaa53f61d59b03e4a970121c7adfc9d4d Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 15:52:27 +0100 Subject: [PATCH 103/104] Improve what is stored in MLFlow Signed-off-by: Marnix van Lieshout --- .../integrations/mlflow/__init__.py | 10 +- .../mlflow/mlflow_storage_callback.py | 94 +++++++++++++++---- 2 files changed, 80 insertions(+), 24 deletions(-) diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py index ec82b574f..b95ebd898 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/__init__.py @@ -17,14 +17,16 @@ from .mlflow_storage import MLFlowStorage from .mlflow_storage_callback import ( - HasExplainableCombiner, - HasForecasters, + EnsembleFitResult, + EnsembleModel, + ExplainableEnsembleModel, MLFlowStorageCallback, ) __all__ = [ - "HasExplainableCombiner", - "HasForecasters", + "EnsembleFitResult", + "EnsembleModel", + "ExplainableEnsembleModel", "MLFlowStorage", "MLFlowStorageCallback", ] diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index 6b4d9ad49..837ec80ad 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -9,10 +9,13 @@ ensemble (EnsembleForecastingModel) workflows via protocol-based dispatch. Ensemble-specific behavior is enabled automatically when the model satisfies -the ``HasForecasters`` and ``HasExplainableCombiner`` protocols: +the ``EnsembleModel`` and ``ExplainableEnsembleModel`` protocols, and when the +fit result satisfies ``EnsembleFitResult``: - Logs combiner hyperparameters as the primary hyperparams - Logs per-forecaster hyperparameters with name-prefixed keys +- Stores per-forecaster training data as separate artifacts +- Logs per-forecaster evaluation metrics with name-prefixed keys - Stores feature importance plots for each explainable forecaster component - Stores combiner feature importance plots """ @@ -51,7 +54,7 @@ @runtime_checkable -class HasForecasters(Protocol): +class EnsembleModel(Protocol): """Protocol for ensemble models with multiple base forecasters.""" @property @@ -61,7 +64,7 @@ def forecasters(self) -> dict[str, Forecaster]: @runtime_checkable -class HasExplainableCombiner(Protocol): +class ExplainableEnsembleModel(Protocol): """Protocol for ensemble models with an explainable forecast combiner.""" @property @@ -70,6 +73,16 @@ def combiner(self) -> ExplainableForecaster: ... +@runtime_checkable +class EnsembleFitResult(Protocol): + """Protocol for fit results that contain per-forecaster results.""" + + @property + def forecaster_fit_results(self) -> dict[str, ModelFitResult]: + """Return per-forecaster fit results.""" + ... + + class MLFlowStorageCallback(BaseConfig, ForecastingCallback): """MLFlow callback for logging forecasting workflow events. @@ -153,19 +166,19 @@ def on_fit_end( self._logger.info("Created MLflow run %s for model %s", run_id, context.workflow.model_id) # Log per-forecaster hyperparams for ensemble models - if isinstance(context.workflow.model, HasForecasters): - for name, forecaster in context.workflow.model.forecasters.items(): - prefixed_params = {f"{name}.{k}": str(v) for k, v in forecaster.hyperparams.model_dump().items()} - self.storage.log_hyperparams(run_id=run_id, params=prefixed_params) - self._logger.debug("Logged hyperparams for forecaster '%s' in run %s", name, run_id) + if isinstance(context.workflow.model, EnsembleModel): + self._log_forecaster_hyperparams(context.workflow.model, run_id) - # Store the model input + # Store the model input and per-forecaster data run_path = self.storage.get_artifacts_path(model_id=context.workflow.model_id, run_id=run_id) data_path = run_path / self.storage.data_path data_path.mkdir(parents=True, exist_ok=True) result.input_dataset.to_parquet(path=data_path / "data.parquet") self._logger.info("Stored training data at %s for run %s", data_path, run_id) + if isinstance(result, EnsembleFitResult): + self._store_forecaster_data(result.forecaster_fit_results, data_path) + # Store feature importance plots if self.store_feature_importance_plot: self._store_feature_importances(context.workflow.model, data_path) @@ -179,12 +192,7 @@ def on_fit_end( self._logger.info("Stored trained model for run %s", run_id) # Format the metrics for MLflow - metrics = self.metrics_to_dict(metrics=result.metrics_full, prefix="full_") - metrics.update(self.metrics_to_dict(metrics=result.metrics_train, prefix="train_")) - if result.metrics_val is not None: - metrics.update(self.metrics_to_dict(metrics=result.metrics_val, prefix="val_")) - if result.metrics_test is not None: - metrics.update(self.metrics_to_dict(metrics=result.metrics_test, prefix="test_")) + metrics = self._collect_metrics(result) # Mark the run as finished self.storage.finalize_run(model_id=context.workflow.model_id, run_id=run_id, metrics=metrics) @@ -263,6 +271,51 @@ def _run_model_selection(self, workflow: CustomForecastingWorkflow, result: Mode ) raise SkipFitting("New model did not improve monitored metric, skipping re-fit.") + def _log_forecaster_hyperparams(self, model: EnsembleModel, run_id: str) -> None: + """Log per-forecaster hyperparameters to the run.""" + for name, forecaster in model.forecasters.items(): + prefixed_params = {f"{name}.{k}": str(v) for k, v in forecaster.hyperparams.model_dump().items()} + self.storage.log_hyperparams(run_id=run_id, params=prefixed_params) + self._logger.debug("Logged hyperparams for forecaster '%s' in run %s", name, run_id) + + def _store_forecaster_data( + self, forecaster_fit_results: dict[str, ModelFitResult], data_path: Path + ) -> None: + """Store per-forecaster training data as separate parquet files.""" + for name, forecaster_result in forecaster_fit_results.items(): + forecaster_data_path = data_path / name + forecaster_data_path.mkdir(parents=True, exist_ok=True) + forecaster_result.input_dataset.to_parquet(path=forecaster_data_path / "data.parquet") + self._logger.debug("Stored training data for forecaster '%s' at %s", name, forecaster_data_path) + + def _collect_metrics(self, result: ModelFitResult) -> dict[str, float]: + """Collect all metrics from the fit result, including per-forecaster metrics for ensembles. + + Returns: + Flat dictionary mapping metric names to values, including per-forecaster prefixed metrics. + """ + metrics = self.metrics_to_dict(metrics=result.metrics_full, prefix="full_") + metrics.update(self.metrics_to_dict(metrics=result.metrics_train, prefix="train_")) + if result.metrics_val is not None: + metrics.update(self.metrics_to_dict(metrics=result.metrics_val, prefix="val_")) + if result.metrics_test is not None: + metrics.update(self.metrics_to_dict(metrics=result.metrics_test, prefix="test_")) + + if isinstance(result, EnsembleFitResult): + for name, forecaster_result in result.forecaster_fit_results.items(): + metrics.update(self.metrics_to_dict(metrics=forecaster_result.metrics_full, prefix=f"{name}_full_")) + metrics.update(self.metrics_to_dict(metrics=forecaster_result.metrics_train, prefix=f"{name}_train_")) + if forecaster_result.metrics_val is not None: + metrics.update( + self.metrics_to_dict(metrics=forecaster_result.metrics_val, prefix=f"{name}_val_") + ) + if forecaster_result.metrics_test is not None: + metrics.update( + self.metrics_to_dict(metrics=forecaster_result.metrics_test, prefix=f"{name}_test_") + ) + + return metrics + @staticmethod def _get_primary_hyperparams(model: ForecastingModel) -> HyperParams: """Determine primary hyperparameters from the model. @@ -273,7 +326,7 @@ def _get_primary_hyperparams(model: ForecastingModel) -> HyperParams: Returns: The primary hyperparameters extracted from the model. """ - if isinstance(model, HasExplainableCombiner): + if isinstance(model, ExplainableEnsembleModel): config = getattr(model.combiner, "config", None) if config is not None: return getattr(config, "hyperparams", HyperParams()) # pyright: ignore[reportUnknownMemberType, reportReturnType] @@ -284,7 +337,7 @@ def _get_primary_hyperparams(model: ForecastingModel) -> HyperParams: @staticmethod def _store_feature_importances(model: ForecastingModel, data_path: Path) -> None: """Store feature importance plots for all explainable components of the model.""" - if isinstance(model, HasForecasters): + if isinstance(model, EnsembleModel): # Ensemble model: store per-forecaster feature importances for name, forecaster in model.forecasters.items(): if isinstance(forecaster, ExplainableForecaster): @@ -296,7 +349,7 @@ def _store_feature_importances(model: ForecastingModel, data_path: Path) -> None fig.write_html(data_path / "feature_importances.html") # pyright: ignore[reportUnknownMemberType] # Store combiner feature importances (if model has an explainable combiner) - if isinstance(model, HasExplainableCombiner): + if isinstance(model, ExplainableEnsembleModel): combiner_fi = model.combiner.feature_importances if not combiner_fi.empty: fig = model.combiner.plot_feature_importances() @@ -443,7 +496,8 @@ def metrics_to_dict(metrics: SubsetMetric, prefix: str) -> dict[str, float]: __all__ = [ - "HasExplainableCombiner", - "HasForecasters", + "EnsembleFitResult", + "EnsembleModel", + "ExplainableEnsembleModel", "MLFlowStorageCallback", ] From 1d9ff5e008fba01606a7b82c660e076e0fab7fc0 Mon Sep 17 00:00:00 2001 From: Marnix van Lieshout Date: Thu, 19 Feb 2026 15:54:31 +0100 Subject: [PATCH 104/104] Formatting Signed-off-by: Marnix van Lieshout --- .../integrations/mlflow/mlflow_storage_callback.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py index 837ec80ad..715402ed0 100644 --- a/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py +++ b/packages/openstef-models/src/openstef_models/integrations/mlflow/mlflow_storage_callback.py @@ -278,9 +278,7 @@ def _log_forecaster_hyperparams(self, model: EnsembleModel, run_id: str) -> None self.storage.log_hyperparams(run_id=run_id, params=prefixed_params) self._logger.debug("Logged hyperparams for forecaster '%s' in run %s", name, run_id) - def _store_forecaster_data( - self, forecaster_fit_results: dict[str, ModelFitResult], data_path: Path - ) -> None: + def _store_forecaster_data(self, forecaster_fit_results: dict[str, ModelFitResult], data_path: Path) -> None: """Store per-forecaster training data as separate parquet files.""" for name, forecaster_result in forecaster_fit_results.items(): forecaster_data_path = data_path / name @@ -306,13 +304,9 @@ def _collect_metrics(self, result: ModelFitResult) -> dict[str, float]: metrics.update(self.metrics_to_dict(metrics=forecaster_result.metrics_full, prefix=f"{name}_full_")) metrics.update(self.metrics_to_dict(metrics=forecaster_result.metrics_train, prefix=f"{name}_train_")) if forecaster_result.metrics_val is not None: - metrics.update( - self.metrics_to_dict(metrics=forecaster_result.metrics_val, prefix=f"{name}_val_") - ) + metrics.update(self.metrics_to_dict(metrics=forecaster_result.metrics_val, prefix=f"{name}_val_")) if forecaster_result.metrics_test is not None: - metrics.update( - self.metrics_to_dict(metrics=forecaster_result.metrics_test, prefix=f"{name}_test_") - ) + metrics.update(self.metrics_to_dict(metrics=forecaster_result.metrics_test, prefix=f"{name}_test_")) return metrics