Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
939d877
add copyright notice to license
archermarx Aug 7, 2025
ea37b80
Update LICENSE
archermarx Aug 7, 2025
5887dce
Merge branch 'eckelsjd:main' into main
archermarx Feb 20, 2026
0ae5bb4
fix depwarn for pydantic
archermarx Mar 3, 2026
aaca947
fix None type hints
archermarx Mar 3, 2026
f7d3fc1
some typing fixes in Variable
archermarx Mar 3, 2026
0a82c7d
fix callable type hint in component
archermarx Mar 3, 2026
7c88757
implement overridable timestamp prefix
archermarx Mar 3, 2026
ed3b76e
Fix linting
archermarx Mar 4, 2026
953cdaa
typing: log distribution float args
archermarx Mar 4, 2026
14d1400
typing: fix remaining pylance issues by swapping numpy functions to t…
archermarx Mar 4, 2026
55faaad
typing: deserialize methods return Self and use generic Serializable …
archermarx Mar 4, 2026
c047b4b
typing: variable name is always a string and transform.from_string ta…
archermarx Mar 4, 2026
8eaf878
typing: variable domain is always a tuple or list
archermarx Mar 4, 2026
050ba46
typing: variable distribution is always a distribution (or None)
archermarx Mar 4, 2026
a357de8
typing: variable compression is always a Compression (or None)
archermarx Mar 4, 2026
7fe96f7
typing: variable norm is always a list of transforms or None
archermarx Mar 4, 2026
b7665b9
tests: fix flaky matplotlib issues by changing background and fix imp…
archermarx Mar 4, 2026
0cd59b2
typing: add overloads to specify type for transform
archermarx Mar 4, 2026
2bb3b47
typing: Fix remaining type errors in variable.py
archermarx Mar 4, 2026
87e69d7
lint: fix lint errors
archermarx Mar 4, 2026
8dbd704
typing: fix remaing type errors in compression.py with judicious asserts
archermarx Mar 4, 2026
5d21559
typing: narrow types in Component
archermarx Mar 4, 2026
75dc37e
typing: narrow types in System and make component name a str
archermarx Mar 4, 2026
6e58d6f
fix: use null logger pattern to resolve Optional[Logger] type errors
archermarx Mar 4, 2026
7492c5f
typing: fix type errors in IndexSet, MiscTree, and Kwargs classes, as…
archermarx Mar 4, 2026
e745147
typing: narrow ModelKwargs
archermarx Mar 4, 2026
de778c0
fix: MultiIndex allows slicing
archermarx Mar 4, 2026
0167459
fix: update date on copyright notice
archermarx Mar 4, 2026
ad5d48a
fix package versioning for pydantic to support python 3.14
archermarx Mar 19, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
Copyright (C) 2023-2026 The Regents of the University of Michigan
Computational Autonomy Group (alexgorodetsky.com), Department of Aerospace Engineering

GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007

Expand Down
3,596 changes: 2,199 additions & 1,397 deletions pdm.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ dependencies = [
"matplotlib>=3.9",
"networkx>=3.2",
"pyyaml>=6.0.2",
"pydantic>=2.9.1, !=2.12.*",
"dill>=0.3.9",
"scikit-learn>=1.6.1",
"pydantic>=2.11.10",
]
requires-python = ">=3.11"
readme = "README.md"
Expand Down
24 changes: 24 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
amisc @ file:///Users/archermarks/projects/pem/amisc
annotated-types==0.7.0
contourpy==1.3.3
cycler==0.12.1
dill==0.4.1
fonttools==4.62.1
joblib==1.5.3
kiwisolver==1.5.0
matplotlib==3.10.8
networkx==3.6.1
numpy==2.4.3
packaging==26.0
pillow==12.1.1
pydantic==2.12.5
pydantic_core==2.41.5
pyparsing==3.3.2
python-dateutil==2.9.0.post0
PyYAML==6.0.3
scikit-learn==1.8.0
scipy==1.17.1
six==1.17.0
threadpoolctl==3.6.0
typing-inspection==0.4.2
typing_extensions==4.15.0
193 changes: 112 additions & 81 deletions src/amisc/component.py

Large diffs are not rendered by default.

31 changes: 20 additions & 11 deletions src/amisc/compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class Compression(PickleSerializable, ABC):
"""
fields: list[str] = field(default_factory=list)
method: str = 'svd'
coords: np.ndarray = None # (num_pts, dim)
coords: np.ndarray | None = None # (num_pts, dim)
interpolate_method: str = 'rbf'
interpolate_opts: dict = field(default_factory=dict)
_map_computed: bool = False
Expand Down Expand Up @@ -156,6 +156,7 @@ def _iterate_coords_and_states():
ret_dict = {}
loop_shape = state.shape[:-1]
coords_shape = n_coords.shape[:-1]
assert self.num_pts is not None
state = state.reshape((*loop_shape, self.num_pts, self.num_qoi))
n_coords = n_coords.reshape((-1, self.dim))
for i, qoi in enumerate(self.fields):
Expand All @@ -171,9 +172,11 @@ def _iterate_coords_and_states():

if coords_obj_array:
# Make an object array for each qoi, where each element is a unique `(*loop_shape, *coord_shape)` array
_first_dict = None
for _, _first_dict in np.ndenumerate(all_qois):
if _first_dict is not None:
break
assert _first_dict is not None
ret = {qoi: np.empty(all_qois.shape, dtype=object) for qoi in _first_dict}
for qoi in ret:
for index, qoi_dict in np.ndenumerate(all_qois):
Expand Down Expand Up @@ -232,6 +235,7 @@ def _iterate_coords_and_fields():

coords_shape = f_coords.shape[:-1]
loop_shape = next(iter(f_values.values())).shape[:-len(coords_shape)]
assert self.num_pts is not None
states = np.empty((*loop_shape, self.num_pts, self.num_qoi))
f_coords = f_coords.reshape(-1, self.dim)
for i, qoi in enumerate(self.fields):
Expand All @@ -243,7 +247,7 @@ def _iterate_coords_and_fields():
interp = self.interpolator()(f_coords, field_vals, **self.interpolate_opts)
yg = interp(grid_coords)
states[..., i] = yg.T.reshape(*loop_shape, self.num_pts)
all_states[j] = states.reshape((*loop_shape, self.dof))
all_states[j] = states.reshape((*loop_shape, self.num_pts * self.num_qoi))

# All fields now on the same dof grid, so stack them in same array
state_shape = ()
Expand All @@ -262,7 +266,7 @@ def _iterate_coords_and_fields():
return ret_states

@abstractmethod
def compute_map(self, **kwargs):
def compute_map(self, data_matrix, **kwargs):
"""Compute and store the compression map. Must set the value of `coords` and `_is_computed`. Should
use the same normalization as the parent `Variable` object.

Expand Down Expand Up @@ -297,7 +301,7 @@ def latent_size(self) -> int:
raise NotImplementedError

@abstractmethod
def estimate_latent_ranges(self) -> list[tuple[float, float]]:
def estimate_latent_ranges(self) -> list[tuple[float, float]] | None:
"""Estimate the range of the latent space coefficients."""
raise NotImplementedError

Expand All @@ -323,20 +327,20 @@ class SVD(Compression):
:ivar energy_tol: the energy tolerance of the SVD decomposition
:ivar reconstruction_tol: the reconstruction error tolerance of the SVD decomposition
"""
data_matrix: np.ndarray = None # (dof, num_samples)
projection_matrix: np.ndarray = None # (dof, rank)
rank: int = None
energy_tol: float = None
reconstruction_tol: float = None
data_matrix: np.ndarray | None = None # (dof, num_samples)
projection_matrix: np.ndarray | None = None # (dof, rank)
rank: int | None = None
energy_tol: float | None = None
reconstruction_tol: float | None = None

def __post_init__(self):
"""Compute the SVD if the data matrix is provided."""
if (data_matrix := self.data_matrix) is not None:
self.compute_map(data_matrix, rank=self.rank, energy_tol=self.energy_tol,
reconstruction_tol=self.reconstruction_tol)

def compute_map(self, data_matrix: np.ndarray | dict, rank: int = None, energy_tol: float = None,
reconstruction_tol: float = None):
def compute_map(self, data_matrix: np.ndarray | dict, rank: int | None = None, energy_tol: float | None = None,
reconstruction_tol: float | None = None, **kwargs):
"""Compute the SVD compression map from the data matrix. Recall that `dof` is the total number of degrees of
freedom, equal to the number of grid points `num_pts` times the number of quantities of interest `num_qoi`
at each grid point.
Expand Down Expand Up @@ -371,6 +375,7 @@ def compute_map(self, data_matrix: np.ndarray | dict, rank: int = None, energy_t
if relative_error(u[:, :r] @ u[:, :r].T @ data_matrix, data_matrix) <= reconstruction_tol:
rank = r
break
assert rank is not None
energy_tol = energy_frac[rank - 1]
else:
energy_tol = energy_tol or self.energy_tol or 0.95
Expand All @@ -386,16 +391,20 @@ def compute_map(self, data_matrix: np.ndarray | dict, rank: int = None, energy_t
self._map_computed = True

def compress(self, data):
assert self.projection_matrix is not None
return np.squeeze(self.projection_matrix.T @ data[..., np.newaxis], axis=-1)

def reconstruct(self, compressed):
assert self.projection_matrix is not None
return np.squeeze(self.projection_matrix @ compressed[..., np.newaxis], axis=-1)

def latent_size(self):
assert self.rank is not None
return self.rank

def estimate_latent_ranges(self):
if self.map_exists:
assert self.data_matrix is not None
latent_data = self.compress(self.data_matrix.T) # (rank, num_samples)
latent_min = np.min(latent_data, axis=0)
latent_max = np.max(latent_data, axis=0)
Expand Down
39 changes: 23 additions & 16 deletions src/amisc/distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,20 +43,23 @@ def __str__(self):
def __repr__(self):
return self.__str__()

def domain(self, dist_args: tuple = None) -> tuple:
def domain(self, dist_args: tuple | None = None) -> tuple | None:
"""Return the domain of this distribution. Defaults to `dist_args`

:param dist_args: overrides `self.dist_args`
"""
return dist_args or self.dist_args

def nominal(self, dist_args: tuple = None) -> float:
def nominal(self, dist_args: tuple | None = None) -> float | None:
"""Return the nominal value of this distribution. Defaults to middle of domain.

:param dist_args: overrides `self.dist_args`
"""
lb, ub = self.domain(dist_args=dist_args)
return (lb + ub) / 2
if (domain := self.domain(dist_args=dist_args)) is not None:
lb, ub = domain
return (lb + ub) / 2
else:
return None

@classmethod
def from_string(cls, dist_string: str) -> Distribution | None:
Expand Down Expand Up @@ -124,7 +127,11 @@ def from_string(cls, dist_string: str) -> Distribution | None:
raise NotImplementedError(f'The distribution "{dist_string}" is not recognized.')

@abstractmethod
def sample(self, shape: int | tuple, nominal: float | np.ndarray = None, dist_args: tuple = None) -> np.ndarray:
def sample(self,
shape: int | tuple,
nominal: float | np.ndarray | None = None,
dist_args: tuple | None = None
) -> np.ndarray:
"""Sample from the distribution.

:param shape: shape of the samples to return
Expand All @@ -135,7 +142,7 @@ def sample(self, shape: int | tuple, nominal: float | np.ndarray = None, dist_ar
raise NotImplementedError

@abstractmethod
def pdf(self, x: np.ndarray, dist_args: tuple = None) -> np.ndarray:
def pdf(self, x: np.ndarray, dist_args: tuple | None = None) -> np.ndarray:
"""Evaluate the pdf of this distribution at the `x` locations.

:param x: the locations at which to evaluate the pdf
Expand All @@ -153,7 +160,7 @@ def __str__(self):

def sample(self, shape, nominal=None, dist_args=None):
lb, ub = dist_args or self.dist_args
return np.random.rand(*shape) * (ub - lb) + lb
return np.random.random_sample(shape) * (ub - lb) + lb

def pdf(self, x, dist_args=None):
x = np.atleast_1d(x)
Expand All @@ -173,8 +180,8 @@ class LogUniform(Distribution):
x = LogUniform((1e-3, 1e-1)) # log10(x) ~ U(-3, -1)
```
"""
def __init__(self, dist_args: tuple, base=10):
self.base = base
def __init__(self, dist_args: tuple, base: float | int = 10):
self.base = float(base)
super().__init__(dist_args)

def __str__(self):
Expand All @@ -183,7 +190,7 @@ def __str__(self):
def sample(self, shape, nominal=None, dist_args=None):
lb, ub = dist_args or self.dist_args
c = 1 / np.log(self.base)
return self.base ** (np.random.rand(*shape) * c * (np.log(ub) - np.log(lb)) + c * np.log(lb))
return self.base ** (np.random.random_sample(shape) * c * (np.log(ub) - np.log(lb)) + c * np.log(lb))

def pdf(self, x, dist_args=None):
x = np.atleast_1d(x)
Expand All @@ -209,7 +216,7 @@ def domain(self, dist_args=None):

def sample(self, shape, nominal=None, dist_args=None):
mu, std = dist_args or self.dist_args
return np.random.randn(*shape) * std + mu
return np.random.standard_normal(shape) * std + mu

def pdf(self, x, dist_args=None):
mu, std = dist_args or self.dist_args
Expand All @@ -225,8 +232,8 @@ class LogNormal(Distribution):
x = LogNormal((-2, 1)) # log10(x) ~ N(-2, 1)
```
"""
def __init__(self, dist_args: tuple, base=10):
self.base = base
def __init__(self, dist_args: tuple, base: float | int = 10):
self.base = float(base)
super().__init__(dist_args)

def __str__(self):
Expand All @@ -239,7 +246,7 @@ def domain(self, dist_args=None):

def sample(self, shape, nominal=None, dist_args=None):
mu, std = dist_args or self.dist_args
return self.base ** (np.random.randn(*shape) * std + mu)
return self.base ** (np.random.standard_normal(shape) * std + mu)

def pdf(self, x, dist_args=None):
mu, std = dist_args or self.dist_args
Expand All @@ -266,7 +273,7 @@ def sample(self, shape, nominal=None, dist_args=None):
raise ValueError('Cannot sample relative distribution when no nominal value is provided.')
dist_args = dist_args or self.dist_args
tol = abs((dist_args[0] / 100) * nominal)
return np.random.rand(*shape) * 2 * tol - tol + nominal
return np.random.random_sample(shape) * 2 * tol - tol + nominal

def pdf(self, x, dist_args=None):
return np.ones(x.shape)
Expand All @@ -291,7 +298,7 @@ def sample(self, shape, nominal=None, dist_args=None):
raise ValueError('Cannot sample tolerance distribution when no nominal value is provided.')
dist_args = dist_args or self.dist_args
tol = abs(dist_args[0])
return np.random.rand(*shape) * 2 * tol - tol + nominal
return np.random.random_sample(shape) * 2 * tol - tol + nominal

def pdf(self, x, dist_args=None):
return np.ones(np.atleast_1d(x).shape)
16 changes: 8 additions & 8 deletions src/amisc/interpolator.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class LinearState(InterpolatorState, Base64Serializable):
"""
x_vars: list[str] = field(default_factory=list)
y_vars: list[str] = field(default_factory=list)
regressor: Pipeline = None
regressor: Pipeline | None = None

def __eq__(self, other):
if isinstance(other, LinearState):
Expand All @@ -92,7 +92,7 @@ class GPRState(InterpolatorState, Base64Serializable):
"""
x_vars: list[str] = field(default_factory=list)
y_vars: list[str] = field(default_factory=list)
regressor: Pipeline = None
regressor: Pipeline | None = None

def __eq__(self, other):
if isinstance(other, GPRState):
Expand All @@ -118,7 +118,7 @@ class Interpolator(Serializable, ABC):

@abstractmethod
def refine(self, beta: MultiIndex, training_data: tuple[Dataset, Dataset],
old_state: InterpolatorState, input_domains: dict[str, tuple]) -> InterpolatorState:
old_state: InterpolatorState | None, input_domains: dict[str, tuple]) -> InterpolatorState:
"""Refine the interpolator state with new training data.

:param beta: a multi-index specifying the fidelity "levels" of the new interpolator state (starts at (0,... 0))
Expand Down Expand Up @@ -234,7 +234,7 @@ def _extend_grids(x_grids: dict[str, np.ndarray], x_points: dict[str, np.ndarray
return extended_grids

def refine(self, beta: MultiIndex, training_data: tuple[Dataset, Dataset],
old_state: LagrangeState, input_domains: dict[str, tuple]) -> LagrangeState:
old_state: LagrangeState | None, input_domains: dict[str, tuple]) -> LagrangeState:
"""Refine the interpolator state with new training data.

:param beta: the refinement level indices for the interpolator (not used for `Lagrange`)
Expand Down Expand Up @@ -550,7 +550,7 @@ class Linear(Interpolator, StringSerializable):
:ivar polynomial_opts: options to pass to the `PolynomialFeatures` constructor (e.g. 'degree', 'include_bias').
"""
regressor: str = 'Ridge'
scaler: str = None
scaler: str | None = None
regressor_opts: dict = field(default_factory=dict)
scaler_opts: dict = field(default_factory=dict)
polynomial_opts: dict = field(default_factory=lambda: {'degree': 1, 'include_bias': False})
Expand All @@ -568,7 +568,7 @@ def __post_init__(self):
raise ImportError(f"Scaler '{self.scaler}' not found in sklearn.preprocessing")

def refine(self, beta: MultiIndex, training_data: tuple[Dataset, Dataset],
old_state: LinearState, input_domains: dict[str, tuple]) -> InterpolatorState:
old_state: LinearState | None, input_domains: dict[str, tuple]) -> InterpolatorState:
"""Train a new linear regression model.

:param beta: if not empty, then the first element is the number of degrees to add to the polynomial features.
Expand Down Expand Up @@ -654,7 +654,7 @@ class GPR(Interpolator, StringSerializable):
:ivar regressor_opts: options to pass to the `GaussianProcessRegressor` constructor
(see [scikit-learn](https://scikit-learn.org/stable/) documentation).
"""
scaler: str = None
scaler: str | None = None
kernel: str | list = 'RBF'
scaler_opts: dict = field(default_factory=dict)
kernel_opts: dict = field(default_factory=dict)
Expand Down Expand Up @@ -707,7 +707,7 @@ def __post_init__(self):
raise ImportError(f"Scaler '{self.scaler}' not found in sklearn.preprocessing")

def refine(self, beta: MultiIndex, training_data: tuple[Dataset, Dataset],
old_state: GPRState, input_domains: dict[str, tuple]) -> InterpolatorState:
old_state: GPRState | None, input_domains: dict[str, tuple]) -> InterpolatorState:
"""Train a new gaussian process regression model.

:param beta: refinement level indices (Not used for 'GPR')
Expand Down
Loading