diff --git a/jupiter_examples/nm_sigma_estimation_comparison.ipynb b/jupiter_examples/nm_sigma_estimation_comparison.ipynb index f986d13..10f695f 100644 --- a/jupiter_examples/nm_sigma_estimation_comparison.ipynb +++ b/jupiter_examples/nm_sigma_estimation_comparison.ipynb @@ -250,7 +250,7 @@ " \"\"\"\n", " generator = NMGenerator()\n", " mixture = NormalMeanMixtures(\"canonical\", sigma=real_sigma, distribution=distribution)\n", - " return generator.canonical_generate(mixture, sample_len)\n", + " return generator.generate(mixture, sample_len)\n", "\n", "def estimate_sigma_eigenvalue_based(sample, real_sigma, search_area, a, b):\n", " sample_len = len(sample)\n", diff --git a/src/algorithms/semiparam_algorithms/nvm_semi_param_algorithms/g_estimation_given_mu_rqmc_based.py b/src/algorithms/semiparam_algorithms/nvm_semi_param_algorithms/g_estimation_given_mu_rqmc_based.py index 2f3da81..fc089cf 100644 --- a/src/algorithms/semiparam_algorithms/nvm_semi_param_algorithms/g_estimation_given_mu_rqmc_based.py +++ b/src/algorithms/semiparam_algorithms/nvm_semi_param_algorithms/g_estimation_given_mu_rqmc_based.py @@ -7,7 +7,8 @@ from scipy.integrate import quad_vec from scipy.special import gamma -from src.algorithms.support_algorithms.rqmc import RQMC +from src.algorithms.support_algorithms.integrator import Integrator +from src.algorithms.support_algorithms.rqmc import RQMCIntegrator from src.estimators.estimate_result import EstimateResult MU_DEFAULT_VALUE = 1.0 @@ -68,7 +69,7 @@ def __init__(self, sample: Optional[_typing.NDArray[np.float64]] = None, **kwarg self.x_data, self.grid_size, self.integration_tolerance, - self.integration_limit, + self.integration_limit ) = self._validate_kwargs(self.n, **kwargs) self.denominator: float = 2 * math.pi * self.n self.precompute_gamma_grid() @@ -162,15 +163,15 @@ def second_v_integrand(self, v: float, x: float) -> np.ndarray: x_power = self.x_powers[x][idx] return (self.second_u_integrals[idx] * x_power) / gamma_val - def compute_integrals_for_x(self, x: float) -> float: + def compute_integrals_for_x(self, x: float, integrator: Integrator = RQMCIntegrator()) -> float: """Compute integrals using RQMC for v-integration.""" - first_integral = RQMC(lambda t: np.sum(self.first_v_integrand(t * self.v_value, x)) * self.v_value).rqmc()[0] + first_integral = integrator.compute(func=lambda t: np.sum(self.first_v_integrand(t * self.v_value, x)) * self.v_value).value - second_integral = RQMC(lambda t: np.sum(self.second_v_integrand(-t * self.v_value, x)) * self.v_value).rqmc()[0] + second_integral = integrator.compute(func=lambda t: np.sum(self.second_v_integrand(-t * self.v_value, x)) * self.v_value).value total = (first_integral + second_integral) / self.denominator return max(0.0, total.real) def algorithm(self, sample: np._typing.NDArray) -> EstimateResult: - y_data = [self.compute_integrals_for_x(x) for x in self.x_data] + y_data = [self.compute_integrals_for_x(x, RQMCIntegrator()) for x in self.x_data] return EstimateResult(list_value=y_data, success=True) diff --git a/src/algorithms/support_algorithms/integrator.py b/src/algorithms/support_algorithms/integrator.py new file mode 100644 index 0000000..8ee0fb7 --- /dev/null +++ b/src/algorithms/support_algorithms/integrator.py @@ -0,0 +1,20 @@ +from dataclasses import dataclass +from typing import Any, Protocol, Callable, Optional + + +@dataclass +class IntegrationResult: + value: float + error: float + message: Optional[dict[str, Any]] | None = None + + +class Integrator(Protocol): + + """Base class for integral calculation""" + + def __init__(self) -> None: + ... + + def compute(self, func: Callable) -> IntegrationResult: + ... diff --git a/src/algorithms/support_algorithms/quad_integrator.py b/src/algorithms/support_algorithms/quad_integrator.py new file mode 100644 index 0000000..616eca2 --- /dev/null +++ b/src/algorithms/support_algorithms/quad_integrator.py @@ -0,0 +1,60 @@ +from typing import Callable, Any, Sequence + +from scipy.integrate import quad +from src.algorithms.support_algorithms.integrator import IntegrationResult + +class QuadIntegrator: + + def __init__( + self, + a: float = 0, + b: float = 1, + args: tuple[Any, ...] = (), + full_output: int = 0, + epsabs: float | int = 1.49e-08, + epsrel: float | int = 1.49e-08, + limit: float | int = 50, + points: Sequence[float | int] | None = None, + weight: float | int | None = None, + wvar: Any = None, + wopts: Any = None, + maxp1: float | int = 50, + limlst: int = 50, + complex_func: bool = False, + ): + self.params = { + 'a': a, + 'b': b, + 'args': args, + 'full_output': full_output, + 'epsabs': epsabs, + 'epsrel': epsrel, + 'limit': limit, + 'points': points, + 'weight': weight, + 'wvar': wvar, + 'wopts': wopts, + 'maxp1': maxp1, + 'limlst': limlst, + 'complex_func': complex_func + } + + def compute(self, func: Callable) -> IntegrationResult: + + """ + Compute integral via quad integrator + + Args: + func: integrated function + + Returns: moment approximation and error tolerance + """ + + verbose = self.params.pop('full_output', False) + result = quad(func, **self.params) + if verbose: + value, error, message = result + else: + value, error = result + message = None + return IntegrationResult(value, error, message) diff --git a/src/algorithms/support_algorithms/rqmc.py b/src/algorithms/support_algorithms/rqmc.py index 4b10c9d..543d2cb 100644 --- a/src/algorithms/support_algorithms/rqmc.py +++ b/src/algorithms/support_algorithms/rqmc.py @@ -5,6 +5,8 @@ import scipy from numba import njit +from src.algorithms.support_algorithms.integrator import IntegrationResult + BITS = 30 """Number of bits in XOR. Should be less than 64""" NUMBA_FAST_MATH = True @@ -126,7 +128,8 @@ def _update( Returns:Updated mean of all rows - """ + + """ values = [] sum_of_new: float = 0.0 for i in range(self.count): @@ -212,9 +215,9 @@ def _xor_float(a: float, b: float) -> float: Returns: XOR float value """ - a = int(a * (2**BITS)) - b = int(b * (2**BITS)) - return np.bitwise_xor(a, b) / 2**BITS + a = int(a * (2 ** BITS)) + b = int(b * (2 ** BITS)) + return np.bitwise_xor(a, b) / 2 ** BITS def __call__(self) -> tuple[float, float]: """Interface for users @@ -223,3 +226,51 @@ def __call__(self) -> tuple[float, float]: """ return self.rqmc() + + +class RQMCIntegrator: + """ + Randomize Quasi Monte Carlo Method + + Args: + error_tolerance: pre-specified error tolerance + count: number of rows of random values matrix + base_n: number of columns of random values matrix + i_max: allowed number of cycles + a: parameter for quantile of normal distribution + + """ + + def __init__( + self, + error_tolerance: float = 1e-6, + count: int = 25, + base_n: int = 2 ** 6, + i_max: int = 100, + a: float = 0.00047, + ): + self.error_tolerance = error_tolerance + self.count = count + self.base_n = base_n + self.i_max = i_max + self.a = a + + def compute(self, func: Callable) -> IntegrationResult: + """ + Compute integral via RQMC integrator + + Args: + func: integrated function + + Returns: moment approximation and error tolerance + """ + result = RQMC( + func, + error_tolerance=self.error_tolerance, + count=self.count, + base_n=self.base_n, + i_max=self.i_max, + a=self.a, + )() + return IntegrationResult(result[0], result[1]) + diff --git a/src/generators/nm_generator.py b/src/generators/nm_generator.py index 4c95b0f..5ecfe4a 100644 --- a/src/generators/nm_generator.py +++ b/src/generators/nm_generator.py @@ -9,7 +9,7 @@ class NMGenerator(AbstractGenerator): @staticmethod - def classical_generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: + def generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: """Generate a sample of given size. Classical form of NMM Args: @@ -27,25 +27,6 @@ def classical_generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: raise ValueError("Mixture must be NormalMeanMixtures") mixing_values = mixture.params.distribution.rvs(size=size) normal_values = scipy.stats.norm.rvs(size=size) + if mixture.mixture_form == "canonical": + return mixing_values + mixture.params.sigma * normal_values return mixture.params.alpha + mixture.params.beta * mixing_values + mixture.params.gamma * normal_values - - @staticmethod - def canonical_generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: - """Generate a sample of given size. Canonical form of NMM - - Args: - mixture: Normal Mean Mixture - size: length of sample - - Returns: sample of given size - - Raises: - ValueError: If mixture is not a Normal Mean Mixture - - """ - - if not isinstance(mixture, NormalMeanMixtures): - raise ValueError("Mixture must be NormalMeanMixtures") - mixing_values = mixture.params.distribution.rvs(size=size) - normal_values = scipy.stats.norm.rvs(size=size) - return mixing_values + mixture.params.sigma * normal_values diff --git a/src/generators/nmv_generator.py b/src/generators/nmv_generator.py index 5ff3221..8270a1a 100644 --- a/src/generators/nmv_generator.py +++ b/src/generators/nmv_generator.py @@ -9,7 +9,7 @@ class NMVGenerator(AbstractGenerator): @staticmethod - def classical_generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: + def generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: """Generate a sample of given size. Classical form of NMVM Args: @@ -27,29 +27,10 @@ def classical_generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: raise ValueError("Mixture must be NormalMeanMixtures") mixing_values = mixture.params.distribution.rvs(size=size) normal_values = scipy.stats.norm.rvs(size=size) + if mixture.mixture_form == "canonical": + return mixture.params.alpha + mixture.params.mu * mixing_values + (mixing_values ** 0.5) * normal_values return ( mixture.params.alpha + mixture.params.beta * mixing_values + mixture.params.gamma * (mixing_values**0.5) * normal_values - ) - - @staticmethod - def canonical_generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: - """Generate a sample of given size. Canonical form of NMVM - - Args: - mixture: Normal Mean Variance Mixtures - size: length of sample - - Returns: sample of given size - - Raises: - ValueError: If mixture type is not Normal Mean Variance Mixtures - - """ - - if not isinstance(mixture, NormalMeanVarianceMixtures): - raise ValueError("Mixture must be NormalMeanMixtures") - mixing_values = mixture.params.distribution.rvs(size=size) - normal_values = scipy.stats.norm.rvs(size=size) - return mixture.params.alpha + mixture.params.mu * mixing_values + (mixing_values**0.5) * normal_values + ) \ No newline at end of file diff --git a/src/generators/nv_generator.py b/src/generators/nv_generator.py index faa55f5..712e918 100644 --- a/src/generators/nv_generator.py +++ b/src/generators/nv_generator.py @@ -9,7 +9,7 @@ class NVGenerator(AbstractGenerator): @staticmethod - def classical_generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: + def generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: """Generate a sample of given size. Classical form of NVM Args: @@ -27,25 +27,6 @@ def classical_generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: raise ValueError("Mixture must be NormalMeanMixtures") mixing_values = mixture.params.distribution.rvs(size=size) normal_values = scipy.stats.norm.rvs(size=size) - return mixture.params.alpha + mixture.params.gamma * (mixing_values**0.5) * normal_values - - @staticmethod - def canonical_generate(mixture: AbstractMixtures, size: int) -> tpg.NDArray: - """Generate a sample of given size. Canonical form of NVM - - Args: - mixture: Normal Variance Mixtures - size: length of sample - - Returns: sample of given size - - Raises: - ValueError: If mixture type is not Normal Variance Mixtures - - """ - - if not isinstance(mixture, NormalVarianceMixtures): - raise ValueError("Mixture must be NormalMeanMixtures") - mixing_values = mixture.params.distribution.rvs(size=size) - normal_values = scipy.stats.norm.rvs(size=size) - return mixture.params.alpha + (mixing_values**0.5) * normal_values + if mixture.mixture_form == "canonical": + return mixture.params.alpha + (mixing_values ** 0.5) * normal_values + return mixture.params.alpha + mixture.params.gamma * (mixing_values**0.5) * normal_values \ No newline at end of file diff --git a/src/mixtures/abstract_mixture.py b/src/mixtures/abstract_mixture.py index 13c8e42..503e061 100644 --- a/src/mixtures/abstract_mixture.py +++ b/src/mixtures/abstract_mixture.py @@ -1,10 +1,14 @@ from abc import ABCMeta, abstractmethod from dataclasses import fields -from typing import Any +from typing import Any, List, Tuple, Union, Dict, Type +import numpy as np +from numpy.typing import NDArray from scipy.stats import rv_continuous from scipy.stats.distributions import rv_frozen +from src.algorithms.support_algorithms.integrator import Integrator +from src.algorithms.support_algorithms.rqmc import RQMCIntegrator # default integrator class AbstractMixtures(metaclass=ABCMeta): """Base class for Mixtures""" @@ -12,14 +16,24 @@ class AbstractMixtures(metaclass=ABCMeta): _classical_collector: Any _canonical_collector: Any - @abstractmethod - def __init__(self, mixture_form: str, **kwargs: Any) -> None: + def __init__( + self, + mixture_form: str, + integrator_cls: Type[Integrator] = RQMCIntegrator, + integrator_params: Dict[str, Any] = None, + **kwargs: Any + ) -> None: """ - Args: - mixture_form: Form of Mixture classical or Canonical - **kwargs: Parameters of Mixture + mixture_form: Form of Mixture classical or canonical + integrator_cls: Class implementing Integrator protocol (default: RQMCIntegrator) + integrator_params: Parameters for integrator constructor (default: {{}}) + **kwargs: Parameters of Mixture (alpha, gamma, etc.) """ + self.mixture_form = mixture_form + self.integrator_cls = integrator_cls + self.integrator_params = integrator_params or {} + if mixture_form == "classical": self.params = self._params_validation(self._classical_collector, kwargs) elif mixture_form == "canonical": @@ -28,40 +42,88 @@ def __init__(self, mixture_form: str, **kwargs: Any) -> None: raise AssertionError(f"Unknown mixture form: {mixture_form}") @abstractmethod - def compute_moment(self, n: int, params: dict) -> tuple[float, float]: ... + def _compute_moment(self, n: int) -> Tuple[float, float]: + ... + + def compute_moment( + self, + x: Union[List[int], int, NDArray[np.float64]] + ) -> Union[List[Tuple[float, float]], Tuple[float, float], NDArray[Any]]: + if isinstance(x, np.ndarray): + return np.array([self._compute_moment(xp) for xp in x], dtype=object) + elif isinstance(x, list): + return [self._compute_moment(xp) for xp in x] + elif isinstance(x, int): + return self._compute_moment(x) + else: + raise TypeError(f"Unsupported type for x: {type(x)}") @abstractmethod - def compute_cdf(self, x: float, params: dict) -> tuple[float, float]: ... + def _compute_pdf(self, x: float) -> Tuple[float, float]: + ... + + def compute_pdf( + self, + x: Union[List[float], float, NDArray[np.float64]] + ) -> Union[List[Tuple[float, float]], Tuple[float, float], NDArray[Any]]: + if isinstance(x, np.ndarray): + return np.array([self._compute_pdf(xp) for xp in x], dtype=object) + elif isinstance(x, list): + return [self._compute_pdf(xp) for xp in x] + elif isinstance(x, float): + return self._compute_pdf(x) + else: + raise TypeError(f"Unsupported type for x: {type(x)}") @abstractmethod - def compute_pdf(self, x: float, params: dict) -> tuple[float, float]: ... + def _compute_logpdf(self, x: float) -> Tuple[float, float]: + ... + + def compute_logpdf( + self, + x: Union[List[float], float, NDArray[np.float64]] + ) -> Union[List[Tuple[float, float]], Tuple[float, float], NDArray[Any]]: + if isinstance(x, np.ndarray): + return np.array([self._compute_logpdf(xp) for xp in x], dtype=object) + elif isinstance(x, list): + return [self._compute_logpdf(xp) for xp in x] + elif isinstance(x, float): + return self._compute_logpdf(x) + else: + raise TypeError(f"Unsupported type for x: {type(x)}") @abstractmethod - def compute_logpdf(self, x: float, params: dict) -> tuple[float, float]: ... - - def _params_validation(self, data_collector: Any, params: dict[str, float | rv_continuous | rv_frozen]) -> Any: - """Mixture Parameters Validation - - Args: - data_collector: Dataclass that collect parameters of Mixture - params: Input parameters - - Returns: Instance of dataclass - - Raises: - ValueError: If given parameters is unexpected - ValueError: If parameter type is invalid - ValueError: If parameters age not given - - """ - + def _compute_cdf(self, x: float) -> Tuple[float, float]: + ... + + def compute_cdf( + self, + x: Union[List[float], float, NDArray[np.float64]] + ) -> Union[List[Tuple[float, float]], Tuple[float, float], NDArray[Any]]: + if isinstance(x, np.ndarray): + return np.array([self._compute_cdf(xp) for xp in x], dtype=object) + elif isinstance(x, list): + return [self._compute_cdf(xp) for xp in x] + elif isinstance(x, float): + return self._compute_cdf(x) + else: + raise TypeError(f"Unsupported type for x: {type(x)}") + + def _params_validation( + self, + data_collector: Any, + params: dict[str, float | rv_continuous | rv_frozen] + ) -> Any: + """Mixture Parameters Validation""" dataclass_fields = fields(data_collector) if len(params) != len(dataclass_fields): raise ValueError(f"Expected {len(dataclass_fields)} arguments, got {len(params)}") - names_and_types = dict((field.name, field.type) for field in dataclass_fields) - for pair in params.items(): - if pair[0] not in names_and_types: - raise ValueError(f"Unexpected parameter {pair[0]}") - if not isinstance(pair[1], names_and_types[pair[0]]): - raise ValueError(f"Type missmatch: {pair[0]} should be {names_and_types[pair[0]]}, not {type(pair[1])}") + names_and_types = {field.name: field.type for field in dataclass_fields} + for name, value in params.items(): + if name not in names_and_types: + raise ValueError(f"Unexpected parameter {name}") + if not isinstance(value, names_and_types[name]): + raise ValueError( + f"Type mismatch: {name} should be {names_and_types[name]}, not {type(value)}" + ) return data_collector(**params) diff --git a/src/mixtures/nm_mixture.py b/src/mixtures/nm_mixture.py index 33c7580..a2eaf76 100644 --- a/src/mixtures/nm_mixture.py +++ b/src/mixtures/nm_mixture.py @@ -1,261 +1,104 @@ from dataclasses import dataclass -from typing import Any +from typing import Any, Type, Dict, Tuple import numpy as np -from scipy.integrate import quad from scipy.special import binom from scipy.stats import norm, rv_continuous from scipy.stats.distributions import rv_frozen -from src.algorithms.support_algorithms.log_rqmc import LogRQMC -from src.algorithms.support_algorithms.rqmc import RQMC +from src.algorithms.support_algorithms.integrator import Integrator +from src.algorithms.support_algorithms.rqmc import RQMCIntegrator from src.mixtures.abstract_mixture import AbstractMixtures - @dataclass class _NMMClassicDataCollector: - """TODO: Change typing from float | int | etc to Protocol with __addition__ __multiplication__ __subtraction__""" - - """Data Collector for parameters of classical NMM""" alpha: float | int | np.int64 - beta: float | int | np.int64 + beta: float | int | np.int64 gamma: float | int | np.int64 distribution: rv_frozen | rv_continuous - @dataclass class _NMMCanonicalDataCollector: - """TODO: Change typing from float | int | etc to Protocol with __addition__ __multiplication__ __subtraction__""" - - """Data Collector for parameters of canonical NMM""" sigma: float | int | np.int64 distribution: rv_frozen | rv_continuous - class NormalMeanMixtures(AbstractMixtures): _classical_collector = _NMMClassicDataCollector _canonical_collector = _NMMCanonicalDataCollector - def __init__(self, mixture_form: str, **kwargs: Any) -> None: - """ - Read Doc of Parent Method - """ - - super().__init__(mixture_form, **kwargs) + def __init__( + self, + mixture_form: str, + integrator_cls: Type[Integrator] = RQMCIntegrator, + integrator_params: Dict[str, Any] = None, + **kwargs: Any + ) -> None: + super().__init__(mixture_form, integrator_cls=integrator_cls, integrator_params=integrator_params, **kwargs) def _params_validation(self, data_collector: Any, params: dict[str, float | rv_continuous | rv_frozen]) -> Any: - """ - Read parent method doc - - Raises: - ValueError: If canonical Mixture has negative sigma parameter - - """ - data_class = super()._params_validation(data_collector, params) if hasattr(data_class, "sigma") and data_class.sigma <= 0: - raise ValueError("Sigma cant be zero or negative") + raise ValueError("Sigma can't be zero or negative") if hasattr(data_class, "gamma") and data_class.gamma == 0: - raise ValueError("Gamma cant be zero") + raise ValueError("Gamma can't be zero") return data_class - def _classical_moment(self, n: int, params: dict) -> tuple[float, float]: - """ - Compute n-th moment of classical NMM - - Args: - n (): Moment ordinal - params (): Parameters of integration algorithm - - Returns: moment approximation and error tolerance - - """ - mixture_moment = 0 - error_tolerance = 0 - for k in range(0, n + 1): - for l in range(0, k + 1): - coefficient = binom(n, n - k) * binom(k, k - l) * (self.params.beta ** (k - l)) * (self.params.gamma**l) - mixing_moment = quad(lambda u: self.params.distribution.ppf(u) ** (k - l), 0, 1, **params) - error_tolerance += (self.params.beta ** (k - l)) * mixing_moment[1] - mixture_moment += coefficient * (self.params.alpha ** (n - k)) * mixing_moment[0] * norm.moment(l) - return mixture_moment, error_tolerance - - def _canonical_moment(self, n: int, params: dict) -> tuple[float, float]: - """ - Compute n-th moment of canonical NMM - - Args: - n (): Moment ordinal - params (): Parameters of integration algorithm - - Returns: moment approximation and error tolerance - - """ - mixture_moment = 0 - error_tolerance = 0 - for k in range(0, n + 1): - coefficient = binom(n, n - k) * (self.params.sigma**k) - mixing_moment = quad(lambda u: self.params.distribution.ppf(u) ** (n - k), 0, 1, **params) - error_tolerance += mixing_moment[1] - mixture_moment += coefficient * mixing_moment[0] * norm.moment(k) - return mixture_moment, error_tolerance - - def compute_moment(self, n: int, params: dict) -> tuple[float, float]: - """ - Compute n-th moment of NMM - - Args: - n (): Moment ordinal - params (): Parameters of integration algorithm - - Returns: moment approximation and error tolerance - - """ - if isinstance(self.params, _NMMClassicDataCollector): - return self._classical_moment(n, params) - return self._canonical_moment(n, params) - - def _canonical_compute_cdf(self, x: float, params: dict) -> tuple[float, float]: - """ - Equation for canonical cdf - Args: - x (): point - params (): parameters of RQMC algorithm - - Returns: computed cdf and error tolerance - - """ - rqmc = RQMC(lambda u: norm.cdf((x - self.params.distribution.ppf(u)) / np.abs(self.params.sigma)), **params) - return rqmc() - - def _classical_compute_cdf(self, x: float, params: dict) -> tuple[float, float]: - """ - Equation for classic cdf - Args: - x (): point - params (): parameters of RQMC algorithm - - Returns: computed cdf and error tolerance - - """ - rqmc = RQMC( - lambda u: norm.cdf( - (x - self.params.alpha - self.params.beta * self.params.distribution.ppf(u)) / np.abs(self.params.gamma) - ), - **params - ) - return rqmc() - - def compute_cdf(self, x: float, params: dict) -> tuple[float, float]: - """ - Choose equation for cdf estimation depends on Mixture form - Args: - x (): point - params (): parameters of RQMC algorithm - - Returns: Computed pdf and error tolerance - - """ - if isinstance(self.params, _NMMCanonicalDataCollector): - return self._canonical_compute_cdf(x, params) - return self._classical_compute_cdf(x, params) - - def _canonical_compute_pdf(self, x: float, params: dict) -> tuple[float, float]: - """ - Equation for canonical pdf - Args: - x (): point - params (): parameters of RQMC algorithm - - Returns: computed pdf and error tolerance - - """ - rqmc = RQMC( - lambda u: (1 / np.abs(self.params.sigma)) - * norm.pdf((x - self.params.distribution.ppf(u)) / np.abs(self.params.sigma)), - **params - ) - return rqmc() - - def _classical_compute_pdf(self, x: float, params: dict) -> tuple[float, float]: - """ - Equation for classic pdf - Args: - x (): point - params (): parameters of RQMC algorithm - - Returns: computed pdf and error tolerance - - """ - rqmc = RQMC( - lambda u: (1 / np.abs(self.params.gamma)) - * norm.pdf( - (x - self.params.alpha - self.params.beta * self.params.distribution.ppf(u)) / np.abs(self.params.gamma) - ), - **params - ) - return rqmc() - - def compute_pdf(self, x: float, params: dict) -> tuple[float, float]: - """ - Choose equation for pdf estimation depends on Mixture form - Args: - x (): point - params (): parameters of RQMC algorithm - - Returns: Computed pdf and error tolerance - - """ - if isinstance(self.params, _NMMCanonicalDataCollector): - return self._canonical_compute_pdf(x, params) - return self._classical_compute_pdf(x, params) - - def _classical_compute_log_pdf(self, x: float, params: dict) -> tuple[float, float]: - """ - Equation for classic log pdf - Args: - x (): point - params (): parameters of LogRQMC algorithm - - Returns: computed log pdf and error tolerance - - """ - rqmc = LogRQMC( - lambda u: np.log(1 / np.abs(self.params.gamma)) - + norm.logpdf( - (x - self.params.alpha - self.params.beta * self.params.distribution.ppf(u)) / np.abs(self.params.gamma) - ), - **params - ) - return rqmc() - - def _canonical_compute_log_pdf(self, x: float, params: dict) -> tuple[float, float]: - """ - Equation for canonical log pdf - Args: - x (): point - params (): parameters of LogRQMC algorithm - - Returns: computed log pdf and error tolerance - - """ - rqmc = LogRQMC( - lambda u: np.log(1 / np.abs(self.params.sigma)) - + norm.logpdf((x - self.params.distribution.ppf(u)) / np.abs(self.params.sigma)), - **params - ) - return rqmc() - - def compute_logpdf(self, x: float, params: dict) -> tuple[float, float]: - """ - Choose equation for log pdf estimation depends on Mixture form - Args: - x (): point - params (): parameters of LogRQMC algorithm - - Returns: Computed log pdf and error tolerance - - """ - if isinstance(self.params, _NMMCanonicalDataCollector): - return self._canonical_compute_log_pdf(x, params) - return self._classical_compute_log_pdf(x, params) + def _compute_moment(self, n: int) -> Tuple[float, float]: + mixture_moment = 0.0 + error = 0.0 + if self.mixture_form == "classical": + for k in range(n + 1): + for l in range(k + 1): + coeff = binom(n, n - k) * binom(k, k - l) + def mix(u: float) -> float: + return ( + self.params.distribution.ppf(u) ** (k - l) + ) + res = self.integrator_cls(**(self.integrator_params or {})).compute(mix) + mixture_moment += coeff * (self.params.beta ** (k - l)) * (self.params.gamma ** l) * (self.params.alpha ** (n - k)) * res.value * norm.moment(l) + error += coeff * (self.params.beta ** (k - l)) * (self.params.gamma ** l) * (self.params.alpha ** (n - k)) * res.error * norm.moment(l) + else: + for k in range(n + 1): + coeff = binom(n, n - k) + def mix(u: float) -> float: + return self.params.distribution.ppf(u) ** (n - k) + res = self.integrator_cls(**(self.integrator_params or {})).compute(mix) + mixture_moment += coeff * (self.params.sigma ** k) * res.value * norm.moment(k) + error += coeff * (self.params.sigma ** k) * res.error * norm.moment(k) + return mixture_moment, error + + def _compute_cdf(self, x: float) -> Tuple[float, float]: + if self.mixture_form == "classical": + def fn(u: float) -> float: + p = self.params.distribution.ppf(u) + return norm.cdf((x - self.params.alpha - self.params.beta * p) / abs(self.params.gamma)) + else: + def fn(u: float) -> float: + p = self.params.distribution.ppf(u) + return norm.cdf((x - p) / abs(self.params.sigma)) + res = self.integrator_cls(**(self.integrator_params or {})).compute(fn) + return res.value, res.error + + def _compute_pdf(self, x: float) -> Tuple[float, float]: + if self.mixture_form == "classical": + def fn(u: float) -> float: + p = self.params.distribution.ppf(u) + return (1 / abs(self.params.gamma)) * norm.pdf((x - self.params.alpha - self.params.beta * p) / abs(self.params.gamma)) + else: + def fn(u: float) -> float: + p = self.params.distribution.ppf(u) + return (1 / abs(self.params.sigma)) * norm.pdf((x - p) / abs(self.params.sigma)) + res = self.integrator_cls(**(self.integrator_params or {})).compute(fn) + return res.value, res.error + + def _compute_logpdf(self, x: float) -> Tuple[float, float]: + if self.mixture_form == "classical": + def fn(u: float) -> float: + p = self.params.distribution.ppf(u) + return np.log(1 / abs(self.params.gamma)) + norm.logpdf((x - self.params.alpha - self.params.beta * p) / abs(self.params.gamma)) + else: + def fn(u: float) -> float: + p = self.params.distribution.ppf(u) + return np.log(1 / abs(self.params.sigma)) + norm.logpdf((x - p) / abs(self.params.sigma)) + res = self.integrator_cls(**(self.integrator_params or {})).compute(fn) + return res.value, res.error diff --git a/src/mixtures/nmv_mixture.py b/src/mixtures/nmv_mixture.py index c090ff3..bc0f575 100644 --- a/src/mixtures/nmv_mixture.py +++ b/src/mixtures/nmv_mixture.py @@ -1,191 +1,113 @@ from dataclasses import dataclass from functools import lru_cache -from typing import Any +from typing import Any, Type, Dict, Tuple import numpy as np from scipy.special import binom -from scipy.stats import geninvgauss, norm, rv_continuous +from scipy.stats import norm, rv_continuous from scipy.stats.distributions import rv_frozen +from src.algorithms.support_algorithms.integrator import Integrator +from src.algorithms.support_algorithms.rqmc import RQMCIntegrator from src.algorithms.support_algorithms.log_rqmc import LogRQMC -from src.algorithms.support_algorithms.rqmc import RQMC from src.mixtures.abstract_mixture import AbstractMixtures - @dataclass class _NMVMClassicDataCollector: - """TODO: Change typing from float | int | etc to Protocol with __addition__ __multiplication__ __subtraction__""" - - """Data Collector for parameters of classical NMVM""" alpha: float | int | np.int64 - beta: float | int | np.int64 + beta: float | int | np.int64 gamma: float | int | np.int64 distribution: rv_frozen | rv_continuous - @dataclass class _NMVMCanonicalDataCollector: - """TODO: Change typing from float | int | etc to Protocol with __addition__ __multiplication__ __subtraction__""" - - """Data Collector for parameters of canonical NMVM""" alpha: float | int | np.int64 - mu: float | int | np.int64 + mu: float | int | np.int64 distribution: rv_frozen | rv_continuous - class NormalMeanVarianceMixtures(AbstractMixtures): _classical_collector = _NMVMClassicDataCollector _canonical_collector = _NMVMCanonicalDataCollector - def __init__(self, mixture_form: str, **kwargs: Any) -> None: - super().__init__(mixture_form, **kwargs) - - def _classical_moment(self, n: int, params: dict) -> tuple[float, float]: - """ - Compute n-th moment of classical NMM - - Args: - n (): Moment ordinal - params (): Parameters of integration algorithm - - Returns: moment approximation and error tolerance - - """ - - def integral_func(u: float) -> float: - result = 0 - for k in range(0, n + 1): - for l in range(0, k + 1): - result += ( - binom(n, n - k) - * binom(k, k - l) - * (self.params.beta ** (k - l)) - * (self.params.gamma**l) - * self.params.distribution.ppf(u) ** (k - l / 2) - * (self.params.alpha ** (n - k)) - * norm.moment(l) - ) - return result - - rqmc = RQMC(lambda u: integral_func(u), **params) - return rqmc() - - def _canonical_moment(self, n: int, params: dict) -> tuple[float, float]: - """ - Compute n-th moment of classical NMM - - Args: - n (): Moment ordinal - params (): Parameters of integration algorithm - - Returns: moment approximation and error tolerance - - """ - - def integral_func(u: float) -> float: - result = 0 - for k in range(0, n + 1): - for l in range(0, k + 1): - result += ( - binom(n, n - k) - * binom(k, k - l) - * (self.params.nu ** (k - l)) - * self.params.distribution.ppf(u) ** (k - l / 2) - * (self.params.alpha ** (n - k)) - * norm.moment(l) - ) - return result - - rqmc = RQMC(lambda u: integral_func(u), **params) - return rqmc() - - def compute_moment(self, n: int, params: dict) -> tuple[float, float]: - if isinstance(self.params, _NMVMClassicDataCollector): - return self._classical_moment(n, params) - return self._canonical_moment(n, params) - - def _classical_cdf(self, x: float, params: dict) -> tuple[float, float]: - def _inner_func(u: float) -> float: - ppf = lru_cache()(self.params.distribution.ppf)(u) - point = (x - self.params.alpha) / (np.sqrt(ppf) * self.params.gamma) - ( - self.params.beta / self.params.gamma * np.sqrt(ppf) - ) - return norm.cdf(point) - - rqmc = RQMC(lambda u: _inner_func(u), **params) - return rqmc() - - def _canonical_cdf(self, x: float, params: dict) -> tuple[float, float]: - def _inner_func(u: float) -> float: - ppf = self.params.distribution.ppf(u) - point = (x - self.params.alpha) / (np.sqrt(ppf)) - (self.params.mu * np.sqrt(ppf)) - return norm.cdf(point) - - rqmc = RQMC(lambda u: _inner_func(u), **params) - return rqmc() - - def compute_cdf(self, x: float, params: dict) -> tuple[float, float]: - if isinstance(self.params, _NMVMClassicDataCollector): - return self._classical_cdf(x, params) - return self._canonical_cdf(x, params) - - def _classical_pdf(self, x: float, params: dict) -> tuple[float, float]: - def _inner_func(u: float) -> float: - ppf = self.params.distribution.ppf(u) - return ( - 1 - / np.sqrt(2 * np.pi * ppf * self.params.gamma**2) - * np.exp( - -((x - self.params.alpha) ** 2 + self.params.beta**2 * ppf**2) / (2 * ppf * self.params.gamma**2) + def __init__( + self, + mixture_form: str, + integrator_cls: Type[Integrator] = RQMCIntegrator, + integrator_params: Dict[str, Any] = None, + **kwargs: Any + ) -> None: + super().__init__(mixture_form, integrator_cls=integrator_cls, integrator_params=integrator_params, **kwargs) + + def _compute_moment(self, n: int) -> Tuple[float, float]: + gamma = getattr(self.params, 'gamma', None) + + def integrand(u: float) -> float: + s = 0.0 + for k in range(n + 1): + for l in range(k + 1): + if self.mixture_form == 'classical': + term = ( + binom(n, n - k) + * binom(k, k - l) + * (self.params.beta ** (k - l)) + * (self.params.gamma ** l) + * (self.params.distribution.ppf(u) ** (k - l/2)) + * norm.moment(l) + ) + else: + term = ( + binom(n, n - k) + * binom(k, k - l) + * (self.params.mu ** (k - l)) + * (self.params.distribution.ppf(u) ** (k - l/2)) + * norm.moment(l) + ) + s += term + return s + + res = self.integrator_cls(**(self.integrator_params or {})).compute(integrand) + return res.value, res.error + + def _compute_cdf(self, x: float) -> Tuple[float, float]: + def integrand(u: float) -> float: + p = self.params.distribution.ppf(u) + if self.mixture_form == 'classical': + return norm.cdf((x - self.params.alpha) / (np.sqrt(p) * self.params.gamma)) + return norm.cdf((x - self.params.alpha) / np.sqrt(p) - self.params.mu * np.sqrt(p)) + + res = self.integrator_cls(**(self.integrator_params or {})).compute(integrand) + return res.value, res.error + + def _compute_pdf(self, x: float) -> Tuple[float, float]: + def integrand(u: float) -> float: + p = self.params.distribution.ppf(u) + if self.mixture_form == 'classical': + return ( + 1 / np.sqrt(2 * np.pi * p * self.params.gamma ** 2) + * np.exp(-((x - self.params.alpha) ** 2 + self.params.beta ** 2 * p ** 2) / (2 * p * self.params.gamma ** 2)) ) - ) - - rqmc = RQMC(lambda u: _inner_func(u), **params)() - return np.exp(self.params.beta * (x - self.params.alpha) / self.params.gamma**2) * rqmc[0], rqmc[1] - - def _canonical_pdf(self, x: float, params: dict) -> tuple[float, float]: - def _inner_func(u: float) -> float: - ppf = self.params.distribution.ppf(u) return ( - 1 - / np.sqrt(2 * np.pi * ppf) - * np.exp(-((x - self.params.alpha) ** 2 + self.params.mu**2 * ppf**2) / (2 * ppf)) + 1 / np.sqrt(2 * np.pi * p) + * np.exp(-((x - self.params.alpha) ** 2 + self.params.mu ** 2 * p ** 2) / (2 * p)) ) - rqmc = RQMC(lambda u: _inner_func(u), **params) - res = rqmc() - return np.exp(self.params.mu * (x - self.params.alpha)) * res[0], res[1] - - def _classical_log_pdf(self, x: float, params: dict) -> tuple[float, float]: - def _inner_func(u: float) -> float: - ppf = self.params.distribution.ppf(u) - return -( - (x - self.params.alpha) ** 2 - + ppf**2 * self.params.beta**2 - + ppf * self.params.gamma**2 * np.log(2 * np.pi * ppf * self.params.gamma**2) - ) / (2 * ppf * self.params.gamma**2) - - rqmc = LogRQMC(lambda u: _inner_func(u), **params) - return rqmc() - - def _canonical_log_pdf(self, x: float, params: dict) -> tuple[float, float]: - def _inner_func(u: float) -> float: - ppf = self.params.distribution.ppf(u) - return -((x - self.params.alpha) ** 2 + ppf**2 * self.params.mu**2 + ppf * np.log(2 * np.pi * ppf)) / ( - 2 * ppf - ) - - rqmc = LogRQMC(lambda u: _inner_func(u), **params) - return rqmc() - - def compute_pdf(self, x: float, params: dict) -> tuple[float, float]: - if isinstance(self.params, _NMVMClassicDataCollector): - return self._classical_pdf(x, params) - return self._canonical_pdf(x, params) - - def compute_logpdf(self, x: float, params: dict) -> tuple[Any, float]: - if isinstance(self.params, _NMVMClassicDataCollector): - int_result = self._classical_log_pdf(x, params) - return self.params.beta * (x - self.params.alpha) / self.params.gamma**2 + int_result[0], int_result[1] - int_result = self._canonical_log_pdf(x, params) - return self.params.mu * (x - self.params.alpha) + int_result[0], int_result[1] + res = self.integrator_cls(**(self.integrator_params or {})).compute(integrand) + if self.mixture_form == 'classical': + val = np.exp(self.params.beta * (x - self.params.alpha) / self.params.gamma ** 2) * res.value + else: + val = np.exp(self.params.mu * (x - self.params.alpha)) * res.value + return val, res.error + + def _compute_logpdf(self, x: float) -> Tuple[float, float]: + def integrand(u: float) -> float: + p = self.params.distribution.ppf(u) + if self.mixture_form == 'classical': + return -((x - self.params.alpha) ** 2 + p ** 2 * self.params.beta ** 2 + p * self.params.gamma ** 2 * np.log(2 * np.pi * p * self.params.gamma ** 2)) / (2 * p * self.params.gamma ** 2) + return -((x - self.params.alpha) ** 2 + p ** 2 * self.params.mu ** 2 + p * np.log(2 * np.pi * p)) / (2 * p) + + res = self.integrator_cls(**(self.integrator_params or {})).compute(integrand) + if self.mixture_form == 'classical': + val = self.params.beta * (x - self.params.alpha) / self.params.gamma ** 2 + res.value + else: + val = self.params.mu * (x - self.params.alpha) + res.value + return val, res.error diff --git a/src/mixtures/nv_mixture.py b/src/mixtures/nv_mixture.py index 7905e6e..2d7cbad 100644 --- a/src/mixtures/nv_mixture.py +++ b/src/mixtures/nv_mixture.py @@ -1,93 +1,99 @@ from dataclasses import dataclass from functools import lru_cache -from typing import Any +from typing import Any, Type, Dict import numpy as np from scipy.special import binom from scipy.stats import norm, rv_continuous from scipy.stats.distributions import rv_frozen +from src.algorithms.support_algorithms.integrator import Integrator +from src.algorithms.support_algorithms.rqmc import RQMCIntegrator from src.algorithms.support_algorithms.log_rqmc import LogRQMC -from src.algorithms.support_algorithms.rqmc import RQMC +from src.algorithms.support_algorithms.quad_integrator import QuadIntegrator from src.mixtures.abstract_mixture import AbstractMixtures - @dataclass class _NVMClassicDataCollector: - """TODO: Change typing from float | int | etc to Protocol with __addition__ __multiplication__ __subtraction__""" - - """Data Collector for parameters of classical NVM""" alpha: float | int | np.int64 gamma: float | int | np.int64 distribution: rv_frozen | rv_continuous - @dataclass class _NVMCanonicalDataCollector: - """TODO: Change typing from float | int | etc to Protocol with __addition__ __multiplication__ __subtraction__""" - - """Data Collector for parameters of canonical NVM""" alpha: float | int | np.int64 distribution: rv_frozen | rv_continuous - class NormalVarianceMixtures(AbstractMixtures): - _classical_collector = _NVMClassicDataCollector _canonical_collector = _NVMCanonicalDataCollector - def __init__(self, mixture_form: str, **kwargs: Any) -> None: - super().__init__(mixture_form, **kwargs) - - def compute_moment(self, n: int, params: dict) -> tuple[float, float]: - """ - Compute n-th moment of NVM - Args: - n (): Moment ordinal - params (): Parameters of integration algorithm - Returns: moment approximation and error tolerance - """ - gamma = self.params.gamma if isinstance(self.params, _NVMClassicDataCollector) else 1 - - def integrate_func(u: float) -> float: + def __init__( + self, + mixture_form: str, + integrator_cls: Type[Integrator] = RQMCIntegrator, + integrator_params: Dict[str, Any] = None, + **kwargs: Any + ) -> None: + super().__init__(mixture_form, integrator_cls=integrator_cls, integrator_params=integrator_params, **kwargs) + self.integrator_cls = integrator_cls + self.integrator_params = integrator_params or {} + + def _compute_moment(self, n: int) -> tuple[float, float]: + gamma = getattr(self.params, 'gamma', 1) + + def integrand(u: float) -> float: return sum( - [ - binom(n, k) - * (gamma**k) - * (self.params.alpha ** (n - k)) - * (self.params.distribution.ppf(u) ** (k / 2)) - * norm.moment(k) - for k in range(0, n + 1) - ] + binom(n, k) + * (gamma ** k) + * (self.params.alpha ** (n - k)) + * (self.params.distribution.ppf(u) ** (k / 2)) + * norm.moment(k) + for k in range(n + 1) ) - result = RQMC(integrate_func, **params)() - return result + integrator = self.integrator_cls(**self.integrator_params) + result = integrator.compute(integrand) + return result.value, result.error + + def _compute_cdf(self, x: float) -> tuple[float, float]: + gamma = getattr(self.params, 'gamma', 1) + param_norm = norm(0, gamma) + + def integrand(u: float) -> float: + return param_norm.cdf((x - self.params.alpha) / np.sqrt(self.params.distribution.ppf(u))) + + integrator = self.integrator_cls(**self.integrator_params) + result = integrator.compute(integrand) + return result.value, result.error + + def _compute_pdf(self, x: float) -> tuple[float, float]: + gamma = getattr(self.params, 'gamma', 1) + d = (x - self.params.alpha) ** 2 / gamma ** 2 + + def integrand(u: float) -> float: + return self._integrand_func(u, d, gamma) + + integrator = self.integrator_cls(**self.integrator_params) + result = integrator.compute(integrand) + return result.value, result.error + + def _compute_logpdf(self, x: float) -> tuple[float, float]: + gamma = getattr(self.params, 'gamma', 1) + d = (x - self.params.alpha) ** 2 / gamma ** 2 + + def integrand(u: float) -> float: + return self._log_integrand_func(u, d, gamma) - def compute_cdf(self, x: float, params: dict) -> tuple[float, float]: - parametric_norm = norm(0, self.params.gamma if isinstance(self.params, _NVMClassicDataCollector) else 1) - rqmc = RQMC( - lambda u: parametric_norm.cdf((x - self.params.alpha) / np.sqrt(self.params.distribution.ppf(u))), **params - ) - return rqmc() + integrator = self.integrator_cls(**self.integrator_params) + result = integrator.compute(integrand) + return result.value, result.error @lru_cache() def _integrand_func(self, u: float, d: float, gamma: float) -> float: ppf = self.params.distribution.ppf(u) - return (1 / np.sqrt(np.pi * 2 * ppf * np.abs(gamma**2))) * np.exp(-1 * d / (2 * ppf)) + return (1 / np.sqrt(2 * np.pi * ppf * np.abs(gamma ** 2))) * np.exp(-d / (2 * ppf)) - def _log_integrand_func(self, u: float, d: float, gamma: float | int | np.int64) -> float: + def _log_integrand_func(self, u: float, d: float, gamma: float) -> float: ppf = self.params.distribution.ppf(u) - return -(ppf * np.log(np.pi * 2 * ppf * gamma**2) + d) / (2 * ppf) - - def compute_pdf(self, x: float, params: dict) -> tuple[float, float]: - gamma = self.params.gamma if isinstance(self.params, _NVMClassicDataCollector) else 1 - d = (x - self.params.alpha) ** 2 / gamma**2 - rqmc = RQMC(lambda u: self._integrand_func(u, d, gamma), **params) - return rqmc() - - def compute_logpdf(self, x: float, params: dict) -> tuple[float, float]: - gamma = self.params.gamma if isinstance(self.params, _NVMClassicDataCollector) else 1 - d = (x - self.params.alpha) ** 2 / gamma**2 - log_rqmc = LogRQMC(lambda u: self._log_integrand_func(u, d, gamma), **params) - return log_rqmc() + return -(ppf * np.log(2 * np.pi * ppf * gamma ** 2) + d) / (2 * ppf) diff --git a/tests/generators/nm_generator/test_mixing_normal.py b/tests/generators/nm_generator/test_mixing_normal.py index ea85692..49af0ae 100644 --- a/tests/generators/nm_generator/test_mixing_normal.py +++ b/tests/generators/nm_generator/test_mixing_normal.py @@ -16,7 +16,7 @@ class TestMixingNormal: ) def test_classic_generate_variance_0(self, mixing_variance: float, expected_variance: float) -> None: mixture = NormalMeanMixtures("classical", alpha=0, beta=mixing_variance**0.5, gamma=1, distribution=norm) - sample = self.generator.classical_generate(mixture, self.test_mixture_size) + sample = self.generator.generate(mixture, self.test_mixture_size) actual_variance = ndimage.variance(sample) assert actual_variance == pytest.approx(expected_variance, 0.1) @@ -24,7 +24,7 @@ def test_classic_generate_variance_0(self, mixing_variance: float, expected_vari def test_classic_generate_variance_1(self, beta: float) -> None: expected_variance = beta**2 + 1 mixture = NormalMeanMixtures("classical", alpha=0, beta=beta, gamma=1, distribution=norm) - sample = self.generator.classical_generate(mixture, self.test_mixture_size) + sample = self.generator.generate(mixture, self.test_mixture_size) actual_variance = ndimage.variance(sample) assert actual_variance == pytest.approx(expected_variance, 0.1) @@ -32,7 +32,7 @@ def test_classic_generate_variance_1(self, beta: float) -> None: def test_classic_generate_variance_2(self, beta: float, gamma: float) -> None: expected_variance = beta**2 + gamma**2 mixture = NormalMeanMixtures("classical", alpha=0, beta=beta, gamma=gamma, distribution=norm) - sample = self.generator.classical_generate(mixture, self.test_mixture_size) + sample = self.generator.generate(mixture, self.test_mixture_size) actual_variance = ndimage.variance(sample) assert actual_variance == pytest.approx(expected_variance, 0.1) @@ -40,14 +40,14 @@ def test_classic_generate_variance_2(self, beta: float, gamma: float) -> None: def test_classic_generate_mean(self, beta: float, gamma: float) -> None: expected_mean = 0 mixture = NormalMeanMixtures("classical", alpha=0, beta=beta, gamma=gamma, distribution=norm) - sample = self.generator.classical_generate(mixture, self.test_mixture_size) + sample = self.generator.generate(mixture, self.test_mixture_size) actual_mean = np.mean(np.array(sample)) assert abs(actual_mean - expected_mean) < 1 @pytest.mark.parametrize("expected_size", np.random.randint(0, 100, size=50)) def test_classic_generate_size(self, expected_size: int) -> None: mixture = NormalMeanMixtures("classical", alpha=0, beta=1, gamma=1, distribution=norm) - sample = self.generator.classical_generate(mixture, expected_size) + sample = self.generator.generate(mixture, expected_size) actual_size = np.size(sample) assert actual_size == expected_size @@ -56,7 +56,7 @@ def test_classic_generate_size(self, expected_size: int) -> None: ) def test_canonical_generate_variance_0(self, mixing_variance: float, expected_variance: float) -> None: mixture = NormalMeanMixtures("canonical", sigma=1, distribution=norm(0, mixing_variance**0.5)) - sample = self.generator.canonical_generate(mixture, self.test_mixture_size) + sample = self.generator.generate(mixture, self.test_mixture_size) actual_variance = ndimage.variance(sample) assert actual_variance == pytest.approx(expected_variance, 0.1) @@ -64,7 +64,7 @@ def test_canonical_generate_variance_0(self, mixing_variance: float, expected_va def test_canonical_generate_variance_1(self, sigma: float) -> None: expected_variance = sigma**2 + 1 mixture = NormalMeanMixtures("canonical", sigma=sigma, distribution=norm) - sample = self.generator.canonical_generate(mixture, self.test_mixture_size) + sample = self.generator.generate(mixture, self.test_mixture_size) actual_variance = ndimage.variance(sample) assert actual_variance == pytest.approx(expected_variance, 0.1) @@ -72,7 +72,7 @@ def test_canonical_generate_variance_1(self, sigma: float) -> None: def test_canonical_generate_variance_2(self, mixing_variance: float, sigma: float) -> None: expected_variance = mixing_variance + sigma**2 mixture = NormalMeanMixtures("canonical", sigma=sigma, distribution=norm(0, mixing_variance**0.5)) - sample = self.generator.canonical_generate(mixture, self.test_mixture_size) + sample = self.generator.generate(mixture, self.test_mixture_size) actual_variance = ndimage.variance(sample) assert actual_variance == pytest.approx(expected_variance, 0.1) @@ -80,13 +80,13 @@ def test_canonical_generate_variance_2(self, mixing_variance: float, sigma: floa def test_canonical_generate_mean(self, sigma: float) -> None: expected_mean = 0 mixture = NormalMeanMixtures("canonical", sigma=sigma, distribution=norm) - sample = self.generator.canonical_generate(mixture, self.test_mixture_size) + sample = self.generator.generate(mixture, self.test_mixture_size) actual_mean = np.mean(np.array(sample)) assert abs(actual_mean - expected_mean) < 1 @pytest.mark.parametrize("expected_size", [*np.random.randint(0, 100, size=50), 0, 1, 1000000]) def test_canonical_generate_size(self, expected_size: int) -> None: mixture = NormalMeanMixtures("canonical", sigma=1, distribution=norm) - sample = self.generator.canonical_generate(mixture, expected_size) + sample = self.generator.generate(mixture, expected_size) actual_size = np.size(sample) assert actual_size == expected_size