From 1ac3d3f61dff09590866507c3fc9d0361c8e3402 Mon Sep 17 00:00:00 2001 From: Alexandre Adam Date: Thu, 1 Aug 2024 22:11:23 -0400 Subject: [PATCH 01/40] Simplifying base (#6) * Major refactoring of the class structure. Still need to finish the fit and Trainer methods, then implement HessianTrace class and method to finetune SBM with LORA weights * Finished refactoring Trainer, made new tests for save load which are more extensive * Added and tested HessianDiagonal class, including the second order loss function needed to train it * Made sure weights of SBM were frozen in HessianDiagonal, moved the test in their own file and made them faster. Also added a parameter to switch between the two loss. Defaults to canonical, though potentially Meng version is better. * Fixed bug in backward compatibility methods * Fixed bug in backward compatibility methods v2 * Added and tested save/load methods of LoRA SBM class * Refactored conditional branch, though still need some cleanup. Added back in the torch.no_grad() decorator around the sample method which was forgotten * Removed getargspec() since it's deprecated in later version of python * beta_primitive for linear schedule needed a factor 1/2 * Moved the factor of 1/2 in the mu function and explicitly in the drift function as in Song et al. * Exposed stopping factor to the API for the Euler Maruyama method * Forgot np * Added Tweedie formula at the last step of sampling for denoising * Implemented conditional branch in MLP DDPM, added tests to cover training of every models under a variety of settings * Added tests, some error catchers and centralized the conditional branch stuff. * Added tests for training LoRA SBM, modified the cleanup function to handle the directory for this model * Added TC2 to flake8 to allow forward reference * Needed both TC and TC2 for flake8 to work with forward references * Minor fixes to the tests * Added __all__ tag to layers, debugged DDPM for 1D and 3D, tests passing except LoRA models * Added posterior score model with likelihood score function saved with dill * Fixed import * Added flake8 type checkning * . * fixed some imports * Added type hint blocks for forward referencing and flake8 * Removed type EnergyModel which is instance of ScoreModel now * removed lora posterior * Fixed an initialization problem for LoRA when using base SBM that has a different checkpoint * Improved test, put back the loading of optimizer checkpoints in the trainer, also made sure all parameters of a ScoreModel are tracked during training for the Posterior fine-tuning tasks * Removed a print left in the code and added weights_only to torch.load function to remove deprecation warning * Removed the vv from coverage --- .flake8 | 5 + .github/workflows/python-package.yml | 7 +- requirements.txt | 4 +- score_models/__init__.py | 8 +- score_models/architectures/__init__.py | 7 +- .../architectures/conditional_branch.py | 117 ++++ score_models/architectures/ddpm.py | 162 +++-- score_models/architectures/encoder.py | 190 ++++++ score_models/architectures/mlp.py | 153 +++-- score_models/architectures/ncsnpp.py | 330 +++++---- score_models/architectures/ncsnpp_level.py | 86 +++ score_models/base.py | 496 -------------- score_models/dsm.py | 12 - score_models/layers/__init__.py | 28 +- score_models/layers/attention_block.py | 2 + score_models/layers/combine.py | 2 + .../layers/conditional_batchnorm2d.py | 2 + .../layers/conditional_instancenorm2d.py | 4 +- .../layers/conditional_instancenorm2d_plus.py | 2 + score_models/layers/conv1dsame.py | 2 + score_models/layers/conv2dsame.py | 2 + score_models/layers/conv3dsame.py | 2 + score_models/layers/ddpm_resnet_block.py | 12 +- score_models/layers/downsample.py | 2 + score_models/layers/ncsn_resnet_block.py | 1 - score_models/layers/projection_embedding.py | 2 + score_models/layers/resnet_block_biggan.py | 51 +- score_models/layers/spectral_normalization.py | 2 + score_models/layers/squeeze_and_excitation.py | 2 + score_models/layers/style_gan_conv.py | 2 + score_models/layers/upsample.py | 2 + score_models/losses/__init__.py | 2 + score_models/losses/dsm.py | 93 +++ .../{ => losses}/sliced_score_matching.py | 2 + score_models/ode/__init__.py | 4 + score_models/ode/euler.py | 60 +- score_models/ode/heun.py | 66 +- score_models/ode/hutchinson_trick.py | 48 ++ score_models/ode/probability_flow_ode.py | 45 ++ score_models/save_load_utils.py | 400 +++++++++++ score_models/sbm/__init__.py | 7 + score_models/sbm/base.py | 188 ++++++ score_models/sbm/energy_model.py | 64 ++ score_models/sbm/hessian_model.py | 93 +++ score_models/{ => sbm}/kernel_slic.py | 8 +- score_models/sbm/lora.py | 123 ++++ score_models/sbm/score_model.py | 141 ++++ score_models/sbm/slic.py | 128 ++++ score_models/score_model.py | 47 -- score_models/sde/__init__.py | 1 + score_models/sde/euler_maruyama.py | 67 ++ score_models/sde/predictor_corrector.py | 0 score_models/sde/sde.py | 48 +- score_models/sde/tsvesde.py | 31 +- score_models/sde/vesde.py | 33 +- score_models/sde/vpsde.py | 90 ++- score_models/slic.py | 28 - score_models/trainer.py | 216 ++++++ score_models/utils.py | 80 +-- setup.py | 9 +- tests/test_architectures.py | 108 +-- tests/test_conditional_architecture.py | 236 +++---- tests/test_hessian_model.py | 36 + tests/test_lora_sbm.py | 41 ++ tests/test_save_load.py | 79 +++ tests/test_score_models.py | 58 +- tests/test_sdes.py | 32 +- tests/test_training.py | 624 ++++++++++-------- 68 files changed, 3456 insertions(+), 1579 deletions(-) create mode 100644 .flake8 create mode 100644 score_models/architectures/conditional_branch.py create mode 100644 score_models/architectures/encoder.py create mode 100644 score_models/architectures/ncsnpp_level.py delete mode 100644 score_models/base.py delete mode 100644 score_models/dsm.py create mode 100644 score_models/losses/__init__.py create mode 100644 score_models/losses/dsm.py rename score_models/{ => losses}/sliced_score_matching.py (98%) create mode 100644 score_models/ode/hutchinson_trick.py create mode 100644 score_models/ode/probability_flow_ode.py create mode 100644 score_models/save_load_utils.py create mode 100644 score_models/sbm/__init__.py create mode 100644 score_models/sbm/base.py create mode 100644 score_models/sbm/energy_model.py create mode 100644 score_models/sbm/hessian_model.py rename score_models/{ => sbm}/kernel_slic.py (92%) create mode 100644 score_models/sbm/lora.py create mode 100644 score_models/sbm/score_model.py create mode 100644 score_models/sbm/slic.py delete mode 100644 score_models/score_model.py create mode 100644 score_models/sde/euler_maruyama.py create mode 100644 score_models/sde/predictor_corrector.py delete mode 100644 score_models/slic.py create mode 100644 score_models/trainer.py create mode 100644 tests/test_hessian_model.py create mode 100644 tests/test_lora_sbm.py create mode 100644 tests/test_save_load.py diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..fde9d53 --- /dev/null +++ b/.flake8 @@ -0,0 +1,5 @@ +[flake8] +max-line-length = 127 +max-complexity = 10 +select = E9,F63,F7,F82 +ignore = TC002,TC201 diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 7fd3b7f..f2a32ca 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -1,6 +1,5 @@ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python - name: Python package on: @@ -12,9 +11,9 @@ on: branches: - master - dev + jobs: build: - runs-on: ubuntu-latest strategy: fail-fast: false @@ -30,11 +29,11 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install flake8 pytest + python -m pip install flake8 pytest flake8-type-checking if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - name: Lint with flake8 run: | # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + flake8 . --count --select=E9,F63,F7,F82,TC,TC2 --show-source --statistics # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics diff --git a/requirements.txt b/requirements.txt index f0563d3..75453ba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,8 @@ torch>=2.0 +torch_ema +peft>=0.11 +dill numpy h5py -torch_ema scipy tqdm diff --git a/score_models/__init__.py b/score_models/__init__.py index dd843a0..120c815 100644 --- a/score_models/__init__.py +++ b/score_models/__init__.py @@ -1,4 +1,4 @@ -from .score_model import ScoreModel, EnergyModel -from .slic import SLIC -from .architectures import MLP, NCSNpp, DDPM -from .sde import VESDE, VPSDE, SDE +from .sbm import * +from .architectures import * +from .sde import * +from .losses import * diff --git a/score_models/architectures/__init__.py b/score_models/architectures/__init__.py index b44751d..daf9a4a 100644 --- a/score_models/architectures/__init__.py +++ b/score_models/architectures/__init__.py @@ -1,3 +1,4 @@ -from .ncsnpp import NCSNpp -from .ddpm import DDPM -from .mlp import MLP +from .ncsnpp import * +from .ddpm import * +from .mlp import * +from .encoder import * diff --git a/score_models/architectures/conditional_branch.py b/score_models/architectures/conditional_branch.py new file mode 100644 index 0000000..4012e99 --- /dev/null +++ b/score_models/architectures/conditional_branch.py @@ -0,0 +1,117 @@ +from typing import Optional, Literal, Union + +import torch +import torch.nn as nn +from torch.nn import Module +from ..layers import GaussianFourierProjection, PositionalEncoding + + +def validate_conditional_arguments( + conditions: Optional[tuple[Literal["time_discrete", "time_continuous", "time_vector", "input_tensor"]]] = None, + condition_embeddings: Optional[tuple[int]] = None, + condition_channels: Optional[tuple[int]] = None + ): + discrete_index = 0 # Number of discrete conditional variables + tensor_index = 0 # Number of vector/tensor conditional variables + if conditions: + if not isinstance(conditions, (tuple, list)): + raise ValueError("Conditions should be a tuple of strings.") + for c in conditions: + if c.lower() not in ["time_discrete", "time_continuous", "time_vector", "input_tensor"]: + raise ValueError(f"Conditions must be either 'discrete', 'continuous', 'vector', 'tensor'], received {c}.") + if c.lower() == "time_discrete": + if condition_embeddings is None: + raise ValueError("condition_embeddings must be provided for a 'discrete' condition type, " + "and must be a tuple of integers of length equal to the number of 'discrete' conditions.") + if len(condition_embeddings) <= discrete_index: + raise ValueError("condition_embeddings must be provided for a 'discrete' condition type, " + "and must be a tuple of integers of length equal to the number of 'discrete' conditions.") + if not isinstance(condition_embeddings, (tuple, list)) or not isinstance(condition_embeddings[discrete_index], int): + raise ValueError("condition_embeddings must be provided and be a tuple of integers for a 'discrete' condition type") + discrete_index += 1 + elif c.lower() in ["input_tensor", "time_vector"]: + if condition_channels is None: + raise ValueError("condition_channels must be provided for 'input_tensor' and 'time_vector' condition types, " + "and must be a tuple of integers of length equal to the number of 'input_tensor' and 'time_vector' conditions.") + if len(condition_channels) <= tensor_index: + raise ValueError("condition_channels must be provided for 'input_tensor' and 'time_vector' condition types, " + "and must be a tuple of integers of length equal to the number of 'input_tensor' and 'time_vector' conditions.") + if not isinstance(condition_channels, (tuple, list)) or not isinstance(condition_channels[tensor_index], int): + raise ValueError("condition_channels must be provided and be a tuple of integers for 'input_tensor' and 'time_vector' condition type.") + tensor_index += 1 + + +def conditional_branch( + model: Module, + time_branch_channels: int, + input_branch_channels: int, + condition_embeddings: Union[tuple[int], type(None)], + condition_channels: Union[tuple[int], type(None)], + fourier_scale: float = 30., + ): + total_time_channels = time_branch_channels + total_input_channels = input_branch_channels + conditional_branch = [] + for condition_type in model.condition_type: + if condition_type.lower() == "time_discrete": + conditional_branch.append( + nn.Embedding( + num_embeddings=condition_embeddings[0], + embedding_dim=time_branch_channels + ) + ) + condition_embeddings = condition_embeddings[1:] + total_time_channels += time_branch_channels + + elif condition_type.lower() == "time_continuous": + conditional_branch.append( + GaussianFourierProjection( + embed_dim=time_branch_channels, + scale=fourier_scale + ) + ) + total_time_channels += time_branch_channels + + elif condition_type.lower() == "time_vector": + conditional_branch.append( + PositionalEncoding( + channels=condition_channels[0], + embed_dim=time_branch_channels, + scale=fourier_scale + ) + ) + condition_channels = condition_channels[1:] + total_time_channels += time_branch_channels + + elif condition_type.lower() == "input_tensor": + total_input_channels += condition_channels[0] + condition_channels = condition_channels[1:] + + model.conditional_branch = nn.ModuleList(conditional_branch) + return total_time_channels, total_input_channels + + +def merge_conditional_time_branch(model, temb, *args): + B, *_ = temb.shape + c_idx = 0 + e_idx = 0 + if len(args) != len(model.condition_type): + raise ValueError(f"The network requires {len(model.condition_type)} additional arguments, but {len(args)} were provided.") + for condition, condition_type in zip(args, model.condition_type): + if "time" in condition_type.lower(): + if "discrete" in condition_type.lower(): + if torch.any((condition < 0) | (condition >= model.condition_embeddings[e_idx])): + raise ValueError(f"Additional argument {c_idx} must be a long tensor with values " + f"between 0 and {model.condition_embeddings[e_idx]-1} inclusively.") + e_idx += 1 + c_emb = model.conditional_branch[c_idx](condition).view(B, -1) + temb = torch.cat([temb, c_emb], dim=1) + c_idx += 1 + return temb + +def merge_conditional_input_branch(model, x, *args): + B, *D = x.shape + for condition, condition_type in zip(args, model.condition_type): + if "input" in condition_type.lower(): + x = torch.cat([x, condition], dim=1) + return x diff --git a/score_models/architectures/ddpm.py b/score_models/architectures/ddpm.py index cdef3a5..74f4c9d 100644 --- a/score_models/architectures/ddpm.py +++ b/score_models/architectures/ddpm.py @@ -1,47 +1,78 @@ -""" -Code ported from Yang Song's repo https://github.com/yang-song/score_sde_pytorch/blob/main/ -with slight modifications to make it work on continuous time. -""" +from typing import Optional, Literal + import torch from torch import nn -from score_models.utils import get_activation -from score_models.layers import DDPMResnetBlock, SelfAttentionBlock, GaussianFourierProjection, UpsampleLayer, DownsampleLayer -from score_models.layers.ddpm_resnet_block import conv3x3 -import functools +from functools import partial + +from ..utils import get_activation +from ..layers import ( + DDPMResnetBlock, + SelfAttentionBlock, + GaussianFourierProjection, + UpsampleLayer, + DownsampleLayer, + conv3x3 + ) +from .conditional_branch import ( + validate_conditional_arguments, + conditional_branch, + merge_conditional_time_branch, + merge_conditional_input_branch + ) +__all__ = ["DDPM"] class DDPM(nn.Module): def __init__( self, - channels:int=1, - dimensions:int=2, - nf:int=128, - activation_type:str="relu", - ch_mult:list[int,...]=(1, 1, 2, 2, 4, 4), - num_res_blocks:int =2, - resample_with_conv:bool=True, - dropout:float=0., - attention:bool=True, - conditioning:list[str,...]=["None"], - conditioning_channels:list[int,...]=None, + channels: int = 1, + dimensions: int = 2, + nf: int = 128, + activation_type: str = "relu", + ch_mult: tuple[int] = (2, 2), + num_res_blocks: int = 2, + resample_with_conv: bool = True, + dropout: float = 0., + attention: bool = True, + fourier_scale: float = 30., + conditions: Optional[tuple[str]] = None, + condition_embeddings: Optional[tuple[int]] = None, + condition_channels: Optional[int] = None, **kwargs - ): + ): + """ + Deep Diffusion Probabilistic Model (DDPM) implementation. + + Args: + channels (int): Number of input channels. Default is 1. + dimensions (int): Number of spatial dimensions. Default is 2. + nf (int): Number of filters in the network. Default is 128. + activation_type (str): Type of activation function to use. Default is "relu". + ch_mult (tuple[int]): Channel multiplier for each layer. Default is (2, 2). + num_res_blocks (int): Number of residual blocks in the network. Default is 2. + resample_with_conv (bool): Whether to use convolutional resampling. Default is True. + dropout (float): Dropout rate. Default is 0. + attention (bool): Whether to use attention mechanism. Default is True. + fourier_scale (float): Scale parameter for Fourier features. Default is 30. + conditions (Optional[tuple[str]]): Types of conditioning inputs. Default is None. + condition_embeddings (Optional[tuple[int]]): Embedding sizes for conditioning inputs. Default is None. + condition_channels (Optional[int]): Number of channels for conditioning inputs. Default is None. + **kwargs: Additional keyword arguments. + + References: + - Original implementation in Yang Song's repository: https://github.com/yang-song/score_sde_pytorch/blob/main/ + """ super().__init__() if dimensions not in [1, 2, 3]: raise ValueError(f"Input must have 1, 2, or 3 spatial dimensions to use this architecture, received {dimensions}.") - self.conditioned = False - for c in conditioning: - if c.lower() not in ["none", "time", "input"]: - raise ValueError(f"Conditioning must be in ['None', 'Time', 'Input'], received {c}") - if c.lower() != "none": - self.conditioned = True - if conditioning_channels is not None: - raise ValueError("conditioning_channels must be provided when the network is conditioned") - elif c.lower() == "none" and self.conditioned: - raise ValueError(f"Cannot have a mix of 'None' and other type of conditioning, received the list {conditioning}") - + validate_conditional_arguments(conditions, condition_embeddings, condition_channels) + self.conditioned = conditions is not None + self.condition_type = conditions + self.condition_embeddings = condition_embeddings + self.condition_channels = condition_channels self.hyperparameters = { "channels": channels, + "dimensions": dimensions, "nf": nf, "activation_type": activation_type, "ch_mult": ch_mult, @@ -49,8 +80,10 @@ def __init__( "resample_with_conv": resample_with_conv, "dropout": dropout, "attention": attention, - "dimensions": dimensions, - "conditioning": conditioning + "fourier_scale": fourier_scale, + "conditions": conditions, + "condition_embeddings": condition_embeddings, + "condition_channels": condition_channels } self.dimensions = dimensions self.act = act = get_activation(activation_type=activation_type) @@ -60,18 +93,40 @@ def __init__( self.num_res_blocks = num_res_blocks self.num_resolutions = num_resolutions = len(ch_mult) - AttnBlock = SelfAttentionBlock - ResnetBlock = functools.partial(DDPMResnetBlock, act=act, temb_dim=4 * nf, dropout=dropout, dimensions=dimensions) + # Prepare layers + AttnBlock = partial(SelfAttentionBlock, dimensions=dimensions) + ResnetBlock = partial(DDPMResnetBlock, act=act, temb_dim=4 * nf, dropout=dropout, dimensions=dimensions) + Downsample = partial(DownsampleLayer, dimensions=dimensions) + Upsample = partial(UpsampleLayer, dimensions=dimensions) - # Condition on continuous time - modules = [GaussianFourierProjection(embed_dim=nf), nn.Linear(nf, nf * 4), nn.Linear(nf * 4, nf * 4)] + ########### Conditional branch ########### + if self.conditioned: + total_time_channels, total_input_channels = conditional_branch( + self, + time_branch_channels=nf, + input_branch_channels=channels, + condition_embeddings=condition_embeddings, + condition_channels=condition_channels, + fourier_scale=fourier_scale + ) # This method attach a Module list to self.conditional_branch + else: + total_time_channels = nf + total_input_channels = channels + ######################################### + + ########### Time branch ########### + modules = [ + GaussianFourierProjection(embed_dim=nf, scale=fourier_scale), + nn.Linear(total_time_channels, nf * 4), + nn.Linear(nf * 4, nf * 4) + ] with torch.no_grad(): modules[1].bias.zero_() modules[2].bias.zero_() + #################################### # Downsampling block - Downsample = functools.partial(DownsampleLayer, dimensions=dimensions) - modules.append(conv3x3(channels, nf)) + modules.append(conv3x3(total_input_channels, nf, dimensions=dimensions)) hs_c = [nf] in_ch = nf for i_level in range(num_resolutions): @@ -92,7 +147,6 @@ def __init__( modules.append(ResnetBlock(in_ch=in_ch)) # Upsampling block - Upsample = functools.partial(UpsampleLayer, dimensions=dimensions) for i_level in reversed(range(num_resolutions)): for i_block in range(num_res_blocks + 1): out_ch = nf * ch_mult[i_level] @@ -102,18 +156,31 @@ def __init__( modules.append(Upsample(in_ch=in_ch, with_conv=resample_with_conv)) assert not hs_c - modules.append(nn.GroupNorm(num_channels=in_ch, num_groups=min(in_ch // 4, 32), eps=1e-6)) - modules.append(conv3x3(in_ch, channels)) + modules.append(nn.GroupNorm(num_channels=in_ch, num_groups=max(min(in_ch // 4, 32), 1), eps=1e-6)) + modules.append(conv3x3(in_ch, channels, dimensions=dimensions)) self.all_modules = nn.ModuleList(modules) - def forward(self, t, x): + def forward(self, t, x, *args): + B, *D = x.shape modules = self.all_modules + + # Time branch m_idx = 0 - temb = t - for _ in range(3): - temb = modules[m_idx](temb) - m_idx += 1 + temb = modules[m_idx](t) + m_idx += 1 + if self.conditioned: + temb = merge_conditional_time_branch(self, temb, *args) + temb = modules[m_idx](temb) + m_idx += 1 + temb = modules[m_idx](self.act(temb)) + m_idx += 1 + # Input branch + if self.conditioned: + x = merge_conditional_input_branch(self, x, *args) + # if self.fourier_features: + # ffeatures = self.fourier_features(x) + # x = torch.concat([x, ffeatures], axis=1) # Downsampling block hs = [modules[m_idx](x)] m_idx += 1 @@ -152,4 +219,3 @@ def forward(self, t, x): m_idx += 1 assert m_idx == len(modules) return h - diff --git a/score_models/architectures/encoder.py b/score_models/architectures/encoder.py new file mode 100644 index 0000000..4f41062 --- /dev/null +++ b/score_models/architectures/encoder.py @@ -0,0 +1,190 @@ +from typing import Literal, Optional + +import torch +from torch import nn +from .conditional_branch import ( + validate_conditional_arguments, + conditional_branch, + merge_conditional_time_branch, + merge_conditional_input_branch + ) +from ..definitions import default_init +from ..layers import Conv2dSame, ResnetBlockBigGANpp, GaussianFourierProjection +from ..utils import get_activation + + +__all__ = ['Encoder'] + + +class Encoder(nn.Module): + """ + Function that ouputs latent representations of an 1D, 2D or 3D random variable + (i.e. shape = [D, C, *D] where D is the number of dimensions, + C is the number of channels and *D are the spatial dimensions) + conditioned on time and possitbly other variables. + """ + def __init__( + self, + pixels: int, + channels: int, + latent_size: int, + input_kernel_size=7, # Kernel size of the first convolutional layer + nf: int = 64, # Base width of the convolutional layers + ch_mult: tuple[int] = (2, 2, 2, 2), # Channel multiplier for each level + num_res_blocks: int = 2, + activation : Literal["relu", "gelu", "leakyrelu", "sigmoid", "tanh", "silu"] = "silu", + output_kernel: int = 2, # Final layer is an average pooling layer with this kernel shape + hidden_layers: int = 1, + hidden_size: int = 256, + factor: int = 2, + fourier_scale: float = 16., + conditions : Optional[tuple[Literal["time_discrete", "time_continuous", "time_vector", "input_tensor"]]] = None, + condition_embeddings: Optional[tuple[int]] = None, + condition_channels: Optional[int] = None, + **kwargs + ): + """ + Function that ouputs latent representations of an 1D, 2D or 3D random variable of shape + shape = [D, C, *D] + where D is the number of dimensions, C is the number of channels and *D are the spatial dimensions). + This network is conditioned on time and possitbly other variables. + + Parameters: + - pixels (int): The number of pixels in the input image. + - channels (int): The number of channels in the input image. + - latent_size (int): The size of the latent representation. + - input_kernel_size (int): Kernel size of the first convolutional layer (default is 7). + - nf (int): Base width of the convolutional layers (default is 64). + - ch_mult (tuple[int]): Channel multiplier for each level (default is (2, 2, 2, 2)). + - num_res_blocks (int): Number of residual blocks (default is 2). + - activation (str): Activation function to use (options: "relu", "gelu", "leakyrelu", "sigmoid", "tanh", "silu", default is "silu"). + - output_kernel (int): Kernel size of the final average pooling layer (default is 2). + - hidden_layers (int): Number of hidden layers (default is 1). + - hidden_size (int): Size of the hidden layers (default is 256). + - factor (int): Factor to scale the hidden size by (default is 2). + - conditions (tuple[str]): Types of conditions to consider (options: "time_discrete", "time_continuous", "time_vector", "input_tensor"). + - condition_embeddings (tuple[int]): Embedding sizes for the conditions. + - condition_channels (int): Number of channels for the conditions. + + """ + super().__init__() + validate_conditional_arguments(conditions, condition_embeddings, condition_channels) + self.conditioned = conditions is not None + self.hyperparameters = { + "pixels": pixels, + "channels": channels, + "latent_size": latent_size, + "nf": nf, + "input_kernel_size": input_kernel_size, + "ch_mult": ch_mult, + "num_res_blocks": num_res_blocks, + "activation": activation, + "hidden_layers": hidden_layers, + "hidden_size": hidden_size, + "output_kernel": output_kernel, + "factor": factor, + "fourier_scale": fourier_scale, + "conditions": conditions, + "condition_embeddings": condition_embeddings, + "condition_channels": condition_channels + } + assert (output_kernel % 2 == 0) or (output_kernel == 1), "output_kernel must be an even number or equal to 1 (no average pooling at the end)" + assert pixels % 2**len(ch_mult) == 0, "pixels must be divisible by 2**len(ch_mult)" + + self.act = get_activation(activation) + self.nf = nf + self.num_res_blocks = num_res_blocks + self.pixels = pixels + self.channels = channels + self.factor = factor + self._latent_pixels = pixels // factor**(len(ch_mult) + output_kernel//2) + self._latent_channels = int(nf * ch_mult[-1]) + assert self._latent_pixels > 0, "Network is too deep for the given input size and downsampling factor" + + ### Conditional branch ### + if self.conditioned: + total_time_channels, total_input_channels = conditional_branch( + self, + time_branch_channels=nf, + input_branch_channels=channels, + condition_embeddings=condition_embeddings, + condition_channels=condition_channels, + fourier_scale=fourier_scale + ) # This method attach a Module list to self.conditional_branch + else: + total_time_channels = nf + total_input_channels = channels + + ### Time branch ### + modules = [ + GaussianFourierProjection(embed_dim=nf, scale=fourier_scale), # Time embedding + nn.Linear(total_time_channels, nf * 4), # Combine time embedding with conditionals if any + nn.Linear(nf * 4, nf * 4) + ] + with torch.no_grad(): + modules[1].weight.data = default_init()(modules[1].weight.shape) + modules[1].bias.zero_() + modules[2].weight.data = default_init()(modules[2].weight.shape) + modules[2].bias.zero_() + self.time_branch = nn.ModuleList(modules) + + ### Input branch ### + out_ch = int(nf * ch_mult[0]) + self.input_layer = Conv2dSame(total_input_channels, out_ch, kernel_size=input_kernel_size) + layers = [] + for i in range(len(ch_mult)): + in_ch = out_ch = int(nf * ch_mult[i]) + for j in range(self.num_res_blocks): + if j < num_res_blocks - 1: + layers.append(ResnetBlockBigGANpp( + act=self.act, + in_ch=in_ch, + out_ch=out_ch, + temb_dim=4*nf + )) + else: + out_ch = int(nf * ch_mult[i+1]) if i+1 < len(ch_mult) else in_ch + layers.append(ResnetBlockBigGANpp( + act=self.act, + in_ch=in_ch, + out_ch=out_ch, + temb_dim=4*nf, + down=True, + factor=factor + )) + self.input_branch = nn.ModuleList(layers) + self.final_pooling_layer = nn.AvgPool2d(kernel_size=output_kernel) + + ### Latent encoder ### + self._image_latent_size = self._latent_pixels * self._latent_pixels * self._latent_channels + layers = [] + layers.append(nn.Linear(self._image_latent_size, hidden_size)) + for _ in range(hidden_layers): + layers.append(nn.Linear(hidden_size, hidden_size)) + self.latent_branch = nn.ModuleList(layers) + self.output_layer = nn.Linear(hidden_size, latent_size) + + + def forward(self, t, x, *args): + ############ Time branch ############ + temb = self.time_branch[0](t) # Gaussian Fourier Projection + if self.conditioned: + # Combine time embedding with conditionals if any + temb = merge_conditional_time_branch(self, temb, *args) + temb = self.time_branch[1](temb) + temb = self.time_branch[2](self.act(temb)) # pre activation convention + + ############ Input branch ############ + if self.conditioned: + # Combine input tensor with input tensors if any + x = merge_conditional_input_branch(self, x, *args) + h = self.input_layer(x) + for block in self.input_branch: + h = block(h, temb) + h = self.final_pooling_layer(h) + + ############ Latent encoder ############ + h = h.view(-1, self._image_latent_size) # flatten + for layer in self.latent_branch: + h = self.act(layer(h)) + return self.output_layer(h) diff --git a/score_models/architectures/mlp.py b/score_models/architectures/mlp.py index dea7acc..1927f30 100644 --- a/score_models/architectures/mlp.py +++ b/score_models/architectures/mlp.py @@ -1,63 +1,128 @@ +from typing import Optional, Literal + import torch import torch.nn as nn -from score_models.layers import GaussianFourierProjection, ScaledAttentionLayer -from score_models.utils import get_activation + +from ..layers import ( + GaussianFourierProjection, + ScaledAttentionLayer + ) +from .conditional_branch import ( + validate_conditional_arguments, + conditional_branch, + merge_conditional_time_branch, + merge_conditional_input_branch + ) +from ..utils import get_activation + +__all__ = ["MLP"] class MLP(nn.Module): def __init__( self, - dimensions:int, - units:int=100, - layers:int=2, - time_embedding_dimensions:int =32, - embedding_scale:int=30, - activation:int="swish", - time_branch_layers:int=1, - bottleneck:int=None, - attention:bool=False, - nn_is_energy:bool=False, - output_activation:str=None, - conditioning:list[str,...]=["none"], - conditioning_channels:list[int,...]=None, + channels: Optional[int] = None, + units: int = 100, + layers: int = 2, + time_branch_channels: int = 32, + time_branch_layers: int = 1, + fourier_scale: int = 16, + activation: int = "swish", + bottleneck: Optional[int] = None, + attention: bool = False, + nn_is_energy: bool = False, + output_activation: str = None, + conditions: Optional[Literal["discrete", "continuous", "vector", "tensor"]] = None, + condition_channels: Optional[tuple[int]] = None, + condition_embeddings: Optional[tuple[int]] = None, **kwargs ): + """ + Multi-Layer Perceptron (MLP) neural network. + + Parameters: + - channels (Optional[int]): Number of input channels. Default is None. + - units (int): Number of units in each hidden layer. Default is 100. + - layers (int): Number of hidden layers. Default is 2. + - time_branch_channels (int): Number of channels in the time branch. Default is 32. + - time_branch_layers (int): Number of layers in the time branch. Default is 1. + - fourier_scale (int): Scale factor for Fourier features. Default is 16. + - activation (str): Activation function to use. Default is "swish". + - bottleneck (Optional[int]): Number of units in the bottleneck layer. Default is None. + - attention (bool): Whether to use attention mechanism. Default is False. + - nn_is_energy (bool): Whether the neural network represents energy. Default is False. + - output_activation (str): Activation function for the output layer. Default is None. + - conditions (Optional[Literal["discrete", "continuous", "vector", "tensor"]]): Type of conditions. Default is None. + - condition_channels (Optional[tuple[int]]): Channels for conditioning. Default is None. + - condition_embeddings (Optional[tuple[int]]): Embeddings for conditioning. Default is None. + - **kwargs: Additional keyword arguments. + + """ super().__init__() - self.conditioned = False - for c in conditioning: - if c.lower() not in ["none", "input"]: - raise ValueError(f"Conditioning must be in ['None', 'Input'], received {c}") - if c.lower() != "none": - self.conditioned = True - if conditioning_channels is not None: - raise ValueError("conditioning_channels must be provided when the network is conditioned") - elif c.lower() == "none" and self.conditioned: - raise ValueError(f"Cannot have a mix of 'None' and other type of conditioning, received the list {conditioning}") + validate_conditional_arguments(conditions, condition_embeddings, condition_channels) + self.conditioned = conditions is not None + self.condition_type = conditions + self.condition_embeddings = condition_embeddings + self.condition_channels = condition_channels + # Some backward compatibility + if "embedding_scale" in kwargs: + fourier_scale = kwargs["embedding_scale"] + if "time_embedding_dimensions" in kwargs: + time_branch_channels = kwargs["time_embedding_dimensions"] + if channels is None: + if "dimensions" in kwargs: + channels = kwargs["dimensions"] + else: + raise ValueError("You must provide a 'channels' argument to initialize the MLP architecture.") self.hyperparameters = { - "dimensions": dimensions, + "channels": channels, "units": units, "layers": layers, - "time_embedding_dimensions": time_embedding_dimensions, - "embedding_scale": embedding_scale, + "time_branch_channels": time_branch_channels, + "fourier_scale": fourier_scale, "activation": activation, "time_branch_layers": time_branch_layers, + "botleneck": bottleneck, + "attention": attention, "nn_is_energy": nn_is_energy, - "conditioning": conditioning + "output_activation": output_activation, + "conditions": conditions, + "condition_channels": condition_channels, + "condition_embeddings": condition_embeddings, } - if nn_is_energy: - self.hyperparameters.update({"output_activation": output_activation}) self.time_branch_layers = time_branch_layers self.layers = layers self.nn_is_energy = nn_is_energy - t_dim = time_embedding_dimensions if layers % 2 == 1: + print(f"Number of layers must be an even number for this architecture. Adding one more layer...") layers += 1 - # time embedding branch - modules = [GaussianFourierProjection(t_dim, scale=embedding_scale)] - for _ in range(time_branch_layers): + + ########### Conditional branch ########### + if self.conditioned: + total_time_channels, total_input_channels = conditional_branch( + self, + time_branch_channels=time_branch_channels, + input_branch_channels=channels, + condition_embeddings=condition_embeddings, + condition_channels=condition_channels, + fourier_scale=fourier_scale + ) # This method attach a Module list to self.conditional_branch + else: + total_time_channels = time_branch_channels + total_input_channels = channels + ######################################### + + ########### Time branch ########### + t_dim = time_branch_channels + modules = [GaussianFourierProjection(t_dim, scale=fourier_scale), # Time embedding + nn.Linear(total_time_channels, t_dim) # Compress the signal from time index and the other conditionals if any + ] + for _ in range(time_branch_layers - 1): modules.append(nn.Linear(t_dim, t_dim)) - # main branch - modules.append(nn.Linear(dimensions+t_dim, units)) + ################################### + + ########### Input branch ########### + modules.append(nn.Linear(total_input_channels + t_dim, units)) if bottleneck is not None: assert isinstance(bottleneck, int) self.bottleneck = bottleneck @@ -75,19 +140,28 @@ def __init__( self.output_layer = nn.Linear(units, 1) self.output_act = get_activation(output_activation) else: - self.output_layer = nn.Linear(units, dimensions) + self.output_layer = nn.Linear(units, channels) self.act = get_activation(activation) self.all_modules = nn.ModuleList(modules) + ################################### - def forward(self, t, x): + def forward(self, t, x, *args): B, D = x.shape modules = self.all_modules + + # Time branch temb = modules[0](t) + if self.conditioned: + temb = merge_conditional_time_branch(self, temb, *args) i = 1 for _ in range(self.time_branch_layers): temb = self.act(modules[i](temb)) i += 1 + + # Input branch x = torch.cat([x, temb], dim=1) + if self.conditioned: + x = merge_conditional_input_branch(self, x, *args) x = modules[i](x) i += 1 for _ in range(self.layers//2): @@ -108,4 +182,3 @@ def forward(self, t, x): if self.nn_is_energy: out = self.output_act(out) return out - diff --git a/score_models/architectures/ncsnpp.py b/score_models/architectures/ncsnpp.py index 0ae88e2..cc931e1 100644 --- a/score_models/architectures/ncsnpp.py +++ b/score_models/architectures/ncsnpp.py @@ -1,102 +1,110 @@ -from score_models.layers import DDPMResnetBlock, GaussianFourierProjection, SelfAttentionBlock, \ - UpsampleLayer, DownsampleLayer, Combine, ResnetBlockBigGANpp, conv3x3, PositionalEncoding -from score_models.utils import get_activation -from score_models.definitions import default_init +from typing import Optional, Literal + import torch.nn as nn -import functools -import torch import numpy as np - +import torch +from functools import partial + +from ..layers import ( + DDPMResnetBlock, + ResnetBlockBigGANpp, + GaussianFourierProjection, + SelfAttentionBlock, + UpsampleLayer, + DownsampleLayer, + Combine, + conv3x3, + PositionalEncoding + ) +from ..utils import get_activation +from ..definitions import default_init +from .conditional_branch import ( + validate_conditional_arguments, + conditional_branch, + merge_conditional_time_branch, + merge_conditional_input_branch + ) + +__all__ = ["NCSNpp"] class NCSNpp(nn.Module): - """ - NCSN++ model - - Args: - channels (int): Number of input channels. Default is 1. - dimensions (int): Number of dimensions of the input data. Default is 2. - nf (int): Number of filters in the first layer. Default is 128. - ch_mult (tuple): Channel multiplier for each layer. Default is (2, 2, 2, 2). - num_res_blocks (int): Number of residual blocks in each layer. Default is 2. - activation_type (str): Type of activation function. Default is "swish". - dropout (float): Dropout probability. Default is 0. - resample_with_conv (bool): Whether to use convolutional resampling. Default is True. - fir (bool): Whether to use finite impulse response filtering. Default is True. - fir_kernel (tuple): FIR filter kernel. Default is (1, 3, 3, 1). - skip_rescale (bool): Whether to rescale skip connections. Default is True. - progressive (str): Type of progressive training. Default is "output_skip". - progressive_input (str): Type of progressive - init_scale (float): The initial scale for the function. Default is 1e-2. - fourier_scale (float): The Fourier scale for the function. Default is 16. - resblock_type (str): The type of residual block to use. Default is "biggan". - combine_method (str): The method to use for combining the results. Default is "sum". - attention (bool): Whether or not to use attention. Default is True. - - """ def __init__( self, - channels=1, - dimensions=2, - nf=128, - ch_mult=(2, 2, 2, 2), - num_res_blocks=2, - activation_type="swish", - dropout=0., - resample_with_conv=True, - fir=True, - fir_kernel=(1, 3, 3, 1), - skip_rescale=True, - progressive="output_skip", - progressive_input="input_skip", - init_scale=1e-2, - fourier_scale=16., - resblock_type="biggan", - combine_method="sum", - attention=True, - condition:tuple[str,...]=["None"], # discrete_time, continuous_time, vector, input - condition_num_embedding:tuple[int,...]=None, - condition_input_channels:int=None, - condition_vector_channels:int=None, + channels: int = 1, + dimensions: Literal[1, 2, 3] = 2, + nf: int = 128, + ch_mult: tuple[int] = (2, 2, 2, 2), + num_res_blocks: int = 2, + activation_type: str = "swish", + dropout: float = 0., + resample_with_conv: bool = True, + fir: bool = True, + fir_kernel: tuple[int] = (1, 3, 3, 1), + skip_rescale: bool = True, + progressive: Literal["none", "output_skip", "residual"] = "output_skip", + progressive_input: Literal["none", "input_skip", "residual"] = "input_skip", + init_scale: float = 1e-2, + fourier_scale: float = 16., + resblock_type: Literal["biggan", "ddpm"] = "biggan", + combine_method: Literal["concat", "sum"] = "sum", + attention: bool = True, + conditions : Optional[tuple[Literal["time_discrete", "time_continuous", "time_vector", "input_tensor"]]] = None, + condition_embeddings: Optional[tuple[int]] = None, + condition_channels: Optional[int] = None, # fourier_features=False, # n_min=7, # n_max=8, **kwargs ): + """ + NCSN++ model + + Args: + channels (int): Number of input channels. Default is 1. + dimensions (Literal[1, 2, 3]): Number of dimensions for input data. Default is 2. + nf (int): Number of filters in the first layer. Default is 128. + ch_mult (tuple[int]): Channel multiplier for each layer. Default is (2, 2, 2, 2). + num_res_blocks (int): Number of residual blocks. Default is 2. + activation_type (str): Type of activation function to use. Default is "swish". + dropout (float): Dropout probability. Default is 0. + resample_with_conv (bool): Whether to resample with convolution. Default is True. + fir (bool): Whether to use finite impulse response filter. Default is True. + fir_kernel (tuple[int]): Kernel size for FIR filter. Default is (1, 3, 3, 1). + skip_rescale (bool): Whether to skip rescaling. Default is True. + progressive (Literal["none", "output_skip", "residual"]): Type of progressive training. Default is "output_skip". + progressive_input (Literal["none", "input_skip", "residual"]): Type of progressive input. Default is "input_skip". + init_scale (float): Initial scale for weights. Default is 1e-2. + fourier_scale (float): Scale for Fourier features. Default is 16. + resblock_type (Literal["biggan", "ddpm"]): Type of residual block. Default is "biggan". + combine_method (Literal["concat", "sum"]): Method for combining features. Default is "sum". + attention (bool): Whether to use attention mechanism. Default is True. + conditions (Optional[tuple[Literal["time_discrete", "time_continuous", "time_vector", "input_tensor"]]]): Conditions for input data. Default is None. + condition_embeddings (Optional[tuple[int]]): Embedding size for conditions. Default is None. + condition_channels (Optional[int]): Number of channels for conditions. Default is None. + """ super().__init__() if dimensions not in [1, 2, 3]: raise ValueError("Input must have 1, 2, or 3 spatial dimensions to use this architecture") - self.conditioned = False - discrete_index = 0 - if condition is not None: - if not isinstance(condition, (tuple, list)): - raise ValueError("Condition should be a list or a tuple of strings") - for c in condition: - if c.lower() not in ["none", "discrete_timelike", "continuous_timelike", "vector", "input"]: - raise ValueError(f"Condition must be in ['none', 'discrete_timelike', 'continuous_timelike', 'input'], received {c}") - if c.lower() != "none": - self.conditioned = True - elif c.lower() == "none" and self.conditioned: - raise ValueError(f"Cannot have a mix of 'None' and other type of conditions, received the tuple {condition}") - if c.lower() == "discrete_timelike": - if not isinstance(condition_num_embedding, (tuple, list)): - raise ValueError("condition_num_embedding must be provided and be a tuple or list of integer for discrete_timelike condition type") - elif not isinstance(condition_num_embedding[discrete_index], int): - raise ValueError("condition_num_embedding must be provided and be a tuple or list of integer for discrete_timelike condition type") - discrete_index += 1 - elif c.lower() == "input": - if not isinstance(condition_input_channels, int): - raise ValueError("condition_input_channels must be provided and be an integer for input condition type") - elif c.lower() == "vector": - if not isinstance(condition_vector_channels, int): - raise ValueError("condition_vector_channels must be provided and be an integer for vector condition type") - - self.condition_type = condition - self.condition_num_embedding = condition_num_embedding - self.condition_input_channels = 0 if condition_input_channels is None else condition_input_channels - self.condition_vector_channels = condition_vector_channels - self.dimensions = dimensions + validate_conditional_arguments(conditions, condition_embeddings, condition_channels) + self.conditioned = conditions is not None + self.condition_type = conditions + self.condition_embeddings = condition_embeddings + self.condition_channels = condition_channels self.channels = channels + self.act = act = get_activation(activation_type) + self.attention = attention + self.nf = nf + self.num_res_blocks = num_res_blocks + self.num_resolutions = num_resolutions = len(ch_mult) + self.skip_rescale = skip_rescale + self.progressive = progressive.lower() + self.progressive_input = progressive_input.lower() + self.resblock_type = resblock_type + if progressive not in ['none', 'output_skip', 'residual']: + raise ValueError(f"progressive must be in ['none', 'output_skip', 'residual'], received {progressive}") + if progressive_input not in ['none', 'input_skip', 'residual']: + raise ValueError(f"progressive_input must be in ['none', 'input_skip', 'residual'], received {progressive_input}") self.hyperparameters = { "channels": channels, "nf": nf, @@ -116,97 +124,84 @@ def __init__( "combine_method": combine_method, "attention": attention, "dimensions": dimensions, - "condition": condition, - "condition_num_embedding": condition_num_embedding, - "condition_input_channels": condition_input_channels, - "condition_vector_channels": condition_vector_channels + "conditions": conditions, + "condition_embeddings": condition_embeddings, + "condition_channels": condition_channels, + # "fourier_features": fourier_features, + # "n_min": n_min, + # "n_max": n_max } - self.act = act = get_activation(activation_type) - self.attention = attention - - self.nf = nf - self.num_res_blocks = num_res_blocks - self.num_resolutions = num_resolutions = len(ch_mult) - - self.skip_rescale = skip_rescale - self.progressive = progressive.lower() - self.progressive_input = progressive_input.lower() - self.resblock_type = resblock_type - assert progressive in ['none', 'output_skip', 'residual'] - assert progressive_input in ['none', 'input_skip', 'residual'] - combiner = functools.partial(Combine, method=combine_method.lower(), dimensions=self.dimensions) - # Timelike condition branch, to be appended to the time embedding - time_input_nf = nf - discrete_index = 0 + ########### Conditional branch ########### if self.conditioned: - condition_embedding_layers = [] - for c_type in self.condition_type: - if c_type.lower() == "discrete_timelike": - time_input_nf += nf - condition_embedding_layers.append(nn.Embedding(num_embeddings=self.condition_num_embedding[discrete_index], - embedding_dim=nf)) - discrete_index += 1 - elif c_type.lower() == "continuous_timelike": - time_input_nf += nf - condition_embedding_layers.append(GaussianFourierProjection(embed_dim=nf, scale=fourier_scale)) - elif c_type.lower() == "vector": - time_input_nf += nf - condition_embedding_layers.append(PositionalEncoding(channels=self.condition_vector_channels, embed_dim=nf, scale=fourier_scale)) - self.condition_embedding_layers = nn.ModuleList(condition_embedding_layers) + total_time_channels, total_input_channels = conditional_branch( + self, + time_branch_channels=nf, + input_branch_channels=channels, + condition_embeddings=condition_embeddings, + condition_channels=condition_channels, + fourier_scale=fourier_scale + ) # This method attach a Module list to self.conditional_branch + else: + total_time_channels = nf + total_input_channels = channels + ######################################### - # Condition on continuous time (second layer receives a concatenation of all the embeddings) - modules = [GaussianFourierProjection(embed_dim=nf, scale=fourier_scale), nn.Linear(time_input_nf, nf * 4), nn.Linear(nf * 4, nf * 4)] + ########### Time branch ########### + modules = [ + GaussianFourierProjection(embed_dim=nf, scale=fourier_scale), # Time embedding + nn.Linear(total_time_channels, nf * 4), # Combine time embedding with conditionals if any + nn.Linear(nf * 4, nf * 4) + ] with torch.no_grad(): modules[1].weight.data = default_init()(modules[1].weight.shape) modules[1].bias.zero_() modules[2].weight.data = default_init()(modules[2].weight.shape) modules[2].bias.zero_() + #################################### - AttnBlock = functools.partial(SelfAttentionBlock, init_scale=init_scale, dimensions=dimensions) - Upsample = functools.partial(UpsampleLayer, with_conv=resample_with_conv, fir=fir, fir_kernel=fir_kernel, dimensions=self.dimensions) - + ########### Prepare layers ########### + combiner = partial(Combine, method=combine_method.lower(), dimensions=self.dimensions) + AttnBlock = partial(SelfAttentionBlock, init_scale=init_scale, dimensions=dimensions) + Upsample = partial(UpsampleLayer, with_conv=resample_with_conv, fir=fir, fir_kernel=fir_kernel, dimensions=self.dimensions) + Downsample = partial(DownsampleLayer, with_conv=resample_with_conv, fir=fir, fir_kernel=fir_kernel, dimensions=self.dimensions) if progressive == 'output_skip': self.pyramid_upsample = Upsample(fir=fir, fir_kernel=fir_kernel, with_conv=False) elif progressive == 'residual': - pyramid_upsample = functools.partial(UpsampleLayer, fir=fir, fir_kernel=fir_kernel, with_conv=True, dimensions=self.dimensions) - - Downsample = functools.partial(DownsampleLayer, with_conv=resample_with_conv, fir=fir, fir_kernel=fir_kernel, dimensions=self.dimensions) - + pyramid_upsample = partial(UpsampleLayer, fir=fir, fir_kernel=fir_kernel, with_conv=True, dimensions=self.dimensions) if progressive_input == 'input_skip': self.pyramid_downsample = Downsample(fir=fir, fir_kernel=fir_kernel, with_conv=False) elif progressive_input == 'residual': - pyramid_downsample = functools.partial(Downsample, fir=fir, fir_kernel=fir_kernel, with_conv=True) - - + pyramid_downsample = partial(Downsample, fir=fir, fir_kernel=fir_kernel, with_conv=True) if resblock_type == 'ddpm': - ResnetBlock = functools.partial(DDPMResnetBlock, - act=act, - dropout=dropout, - init_scale=init_scale, - skip_rescale=skip_rescale, - temb_dim=nf * 4, - dimensions=self.dimensions - ) + ResnetBlock = partial(DDPMResnetBlock, + act=act, + dropout=dropout, + init_scale=init_scale, + skip_rescale=skip_rescale, + temb_dim=nf * 4, + dimensions=self.dimensions + ) elif resblock_type == 'biggan': - ResnetBlock = functools.partial(ResnetBlockBigGANpp, - act=act, - dropout=dropout, - fir=fir, - fir_kernel=fir_kernel, - init_scale=init_scale, - skip_rescale=skip_rescale, - temb_dim=nf * 4, - dimensions=self.dimensions - ) + ResnetBlock = partial(ResnetBlockBigGANpp, + act=act, + dropout=dropout, + fir=fir, + fir_kernel=fir_kernel, + init_scale=init_scale, + skip_rescale=skip_rescale, + temb_dim=nf * 4, + dimensions=self.dimensions + ) else: raise ValueError(f'resblock type {resblock_type} unrecognized.') + ##################################### # Downsampling block - input_pyramid_ch = channels + self.condition_input_channels - modules.append(conv3x3(channels + self.condition_input_channels, nf, dimensions=dimensions)) + input_pyramid_ch = total_input_channels + modules.append(conv3x3(total_input_channels, nf, dimensions=dimensions)) hs_c = [nf] in_ch = nf #+ fourier_feature_channels for i_level in range(num_resolutions): @@ -250,12 +245,12 @@ def __init__( if progressive != 'none': if i_level == num_resolutions - 1: if progressive == 'output_skip': - modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), + modules.append(nn.GroupNorm(num_groups=max(min(in_ch // 4, 32), 1), num_channels=in_ch, eps=1e-6)) modules.append(conv3x3(in_ch, channels, init_scale=init_scale, dimensions=dimensions)) pyramid_ch = channels elif progressive == 'residual': - modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), + modules.append(nn.GroupNorm(num_groups=max(min(in_ch // 4, 32), 1), num_channels=in_ch, eps=1e-6)) modules.append(conv3x3(in_ch, in_ch, bias=True, dimensions=dimensions)) pyramid_ch = in_ch @@ -263,7 +258,7 @@ def __init__( raise ValueError(f'{progressive} is not a valid name.') else: if progressive == 'output_skip': - modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), + modules.append(nn.GroupNorm(num_groups=max(min(in_ch // 4, 32), 1), num_channels=in_ch, eps=1e-6)) modules.append(conv3x3(in_ch, channels, bias=True, init_scale=init_scale, dimensions=dimensions)) pyramid_ch = channels @@ -282,7 +277,7 @@ def __init__( assert not hs_c if progressive != 'output_skip': - modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), + modules.append(nn.GroupNorm(num_groups=max(min(in_ch // 4, 32), 1), num_channels=in_ch, eps=1e-6)) modules.append(conv3x3(in_ch, channels, init_scale=1., dimensions=dimensions)) @@ -290,49 +285,32 @@ def __init__( def forward(self, t, x, *args): B, *D = x.shape - # timestep/noise_level embedding; only for continuous training modules = self.all_modules m_idx = 0 - # Gaussian Fourier features embeddings. + + # Time branch temb = modules[m_idx](t).view(B, -1) m_idx += 1 - - c_idx = 0 if self.conditioned: - if len(args) != len(self.condition_type): - raise ValueError(f"The network requires {len(self.condition_type)} additional arguments, but {len(args)} were provided.") - for j, condition in enumerate(args): - if "timelike" in self.condition_type[j].lower() or "vector" in self.condition_type[j].lower(): - # embedding and concatenation of the 'timelike' conditions - c_emb = self.condition_embedding_layers[c_idx](condition).view(B, -1) - temb = torch.cat([temb, c_emb], dim=1) - c_idx += 1 - + temb = merge_conditional_time_branch(self, temb, *args) temb = modules[m_idx](temb) m_idx += 1 temb = modules[m_idx](self.act(temb)) m_idx += 1 - + # Input branch if self.conditioned: - for j, condition in enumerate(args): - if self.condition_type[j].lower() == "input": - x = torch.cat([x, condition], dim=1) - - # Add Fourier features + x = merge_conditional_input_branch(self, x, *args) # if self.fourier_features: # ffeatures = self.fourier_features(x) # x = torch.concat([x, ffeatures], axis=1) - # Downsampling block input_pyramid = None if self.progressive_input != 'none': input_pyramid = x - hs = [modules[m_idx](x)] m_idx += 1 for i_level in range(self.num_resolutions): - # Residual blocks for this resolution for i_block in range(self.num_res_blocks): h = modules[m_idx](hs[-1], temb) torch.var(h) diff --git a/score_models/architectures/ncsnpp_level.py b/score_models/architectures/ncsnpp_level.py new file mode 100644 index 0000000..0ad4443 --- /dev/null +++ b/score_models/architectures/ncsnpp_level.py @@ -0,0 +1,86 @@ +# from typing import Optional + +# import torch +# from torch.nn import Module +# from torch import nn + +# from .ncsnpp import NCSNpp + +# class NCSNppLevel(Module): + # def __init__( + # self, + # base_net: NCSNpp, + # ): + # super().__init__() + + + + + + + +# def adjust_layer_index(name, shift): + # """ + # Adjusts the index in layer names to account for the extra levels added. + # """ + # match = re.search(r'all_modules\.(\d+)', name) + # if match: + # original_index = int(match.group(1)) + # # Adjust index for layers beyond the initial Fourier and Linear layers + # if original_index > 3: # Assuming the first few layers are fixed as described + # return original_index + shift + # else: + # raise ValueError(f"Layer index not found in layer name: {name}") + + +# def initialize_from_pretrained(pretrained_score_model, extra_levels=1, ch_mult=1) -> ScoreModel: + # """ + # Initializes a new model with additional levels from a pre-trained bottleneck model. + + # Args: + # pretrained_model (NCSNpp): The pre-trained model from which to initialize the new model. + # extra_levels (int): The number of additional levels to add to the U-Net architecture. + # ch_mult (int): The channel multiplier for the additional levels. If set to 1, the first layer of the bottleneck + # is initialized with pre-trained weights; otherwise, it is initialized with random weights. + + # Returns: + # NCSNpp: A new model instance with the updated architecture and weights. + # """ + # score_hyperparameters = pretrained_score_model.hyperparameters.copy() + # hyperparameters = pretrained_score_model.model.hyperparameters.copy() + + # # Adjust ch_mult for the new model + # hyperparameters["ch_mult"] = [ch_mult] * extra_levels + hyperparameters["ch_mult"] + + # # Initialize the new model + # new_model = NCSNpp(**hyperparameters) + + # pretrained_dict = pretrained_score_model.model.state_dict() + # new_dict = new_model.state_dict() + + # # Layer index shift calculation + # num_res_blocks = hyperparameters["num_res_blocks"] + # input_skip_or_residual = hyperparameters["progressive_input"] in ["input_skip", "residual"] + # layer_shift = (num_res_blocks + (2 if input_skip_or_residual else 1)) * extra_levels + + # # Copy weights with adjustments + # for name, param in pretrained_dict.items(): + # # Adjust layer names based on the calculated shift + # if ch_mult != 1 and "all_modules.3.conv" in name: + # adjusted_index = adjust_layer_index(name, layer_shift) + # new_name = name.replace(f".{name.split('.')[1]}.", f".{adjusted_index}.") + + # if new_name in new_dict: + # new_dict[new_name].copy_(param) + # print(f"Copied weights for layer: {name} -> {new_name}") + # else: + # print(f"Layer {new_name} not found in new model. This layer might be part of the added level.") + + # # Handle special case for the first layer of the bottleneck if ch_mult != 1 + # if ch_mult != 1: + # # Initialize first layer of bottleneck with random weights or according to some strategy + # print("Initializing first layer of the bottleneck with random weights due to ch_mult != 1.") + + # new_score_model = ScoreModel(new_model, **score_hyperparameters) + # return new_score_model + diff --git a/score_models/base.py b/score_models/base.py deleted file mode 100644 index 1b90cac..0000000 --- a/score_models/base.py +++ /dev/null @@ -1,496 +0,0 @@ -from typing import Callable, Union - -import torch -from torch import Tensor -from torch.nn import Module -from torch.utils.data import DataLoader, Dataset -from torch.func import vjp -from torch_ema import ExponentialMovingAverage -from .utils import DEVICE -from typing import Union -from abc import ABC, abstractmethod -from tqdm import tqdm -import time -import os, glob, re, json -import numpy as np -from datetime import datetime -from contextlib import nullcontext - -from .sde import VESDE, VPSDE, TSVESDE, SDE -from .utils import load_architecture - - -class ScoreModelBase(Module, ABC): - def __init__( - self, - model: Union[str, Module]=None, - sde:Union[str, SDE]=None, - checkpoints_directory=None, - model_checkpoint:int=None, - device=DEVICE, - **hyperparameters - ): - super().__init__() - if model is None and checkpoints_directory is None: - raise ValueError("Must provide one of 'model' or 'checkpoints_directory'") - if model is None or isinstance(model, str): - model, hyperparams, self.loaded_checkpoint = load_architecture( - checkpoints_directory, - model=model, - device=device, - hyperparameters=hyperparameters, - model_checkpoint=model_checkpoint - ) - hyperparameters.update(hyperparams) - elif hasattr(model, "hyperparameters"): - hyperparameters.update(model.hyperparameters) - if sde is None: - # Some sane defaults for quick use - if "t_star" in hyperparameters.keys(): - print("Using the Truncated Scaled Variance Exploding SDE") - sde = "tsve" - elif "sigma_min" in hyperparameters.keys(): - print("Using the Variance Exploding SDE") - sde = "ve" - elif "beta_min" in hyperparameters.keys(): - print("Using the Variance Preserving SDE") - sde = "vp" - else: - raise KeyError("SDE parameters are missing, please specify which sde to use") - if isinstance(sde, str): - if sde.lower() not in ["ve", "vp", "tsve"]: - raise ValueError(f"The SDE {sde} provided is no supported") - hyperparameters["sde"] = sde.lower() - if "T" not in hyperparameters.keys(): - hyperparameters["T"] = 1. - if sde.lower() == "ve": - sde = VESDE(sigma_min=hyperparameters["sigma_min"], sigma_max=hyperparameters["sigma_max"], T=hyperparameters["T"]) - elif sde.lower() == "vp": - if "epsilon" not in hyperparameters.keys(): - hyperparameters["epsilon"] = 1e-5 - sde = VPSDE( - beta_min=hyperparameters["beta_min"], - beta_max=hyperparameters["beta_max"], - T=hyperparameters["T"], - epsilon=hyperparameters["epsilon"] - ) - elif sde.lower() == "tsve": - if "epsilon" not in hyperparameters.keys(): - hyperparameters["epsilon"] = 0 - sde = TSVESDE( - sigma_min=hyperparameters["sigma_min"], - sigma_max=hyperparameters["sigma_max"], - t_star=hyperparameters["t_star"], - beta=hyperparameters["beta"], - T=hyperparameters["T"], - epsilon=hyperparameters["epsilon"] - ) - - hyperparameters["model_architecture"] = model.__class__.__name__ - self.hyperparameters = hyperparameters - self.checkpoints_directory = checkpoints_directory - self.model = model - self.model.to(device) - self.sde = sde - self.device = device - - def forward(self, t, x, *args) -> Tensor: - return self.score(t, x, *args) - - @abstractmethod - def score(self, t, x, *args) -> Tensor: - ... - - @abstractmethod - def loss_fn(self, x, *args) -> Tensor: - ... - - def ode_drift(self, t, x, *args): - f = self.sde.drift(t, x) - g = self.sde.diffusion(t, x) - f_tilde = f - 0.5 * g**2 * self.score(t, x, *args) - return f_tilde - - def hessian(self, t, x, *args, **kwargs): - return self.divergence(self.drift_fn, t, x, *args, **kwargs) - - def divergence(self, drift_fn, t, x, *args, n_cotangent_vectors: int = 1, noise_type="rademacher") -> Tensor: - B, *D = x.shape - # duplicate noisy samples for for the Hutchinson trace estimator - samples = torch.tile(x, [n_cotangent_vectors, *[1]*len(D)]) - # TODO also duplicate args - t = torch.tile(t, [n_cotangent_vectors]) - # sample cotangent vectors - vectors = torch.randn_like(samples) - if noise_type == 'rademacher': - vectors = vectors.sign() - # Compute the trace of the Jacobian of the drift functions (Hessian if drift is just the score) - f = lambda x: drift_fn(t, x, *args) - _, vjp_func = vjp(f, samples) - divergence = (vectors * vjp_func(vectors)[0]).flatten(1).sum(dim=1) - return divergence - - @torch.no_grad() - def log_likelihood( - self, - x, - *args, - ode_steps: int, - n_cotangent_vectors: int = 1, - noise_type="rademacher", - verbose=0, - method="Euler", - t0:int=0, - t1:int=1 - ) -> Tensor: - """ - A basic implementation of Euler discretisation method of the ODE associated - with the marginals of the learned SDE. - - ode_steps: Number of steps to perform in the ODE - hutchinsons_samples: Number of samples to draw to compute the trace of the Jacobian (divergence) - - Note that this estimator only compute the likelihood for one trajectory. - For more precise log likelihood estimation, tile x along the batch dimension - and averge the results. You can also increase the number of ode steps and increase - the number of cotangent vector for the Hutchinson estimator. - - Using the instantaneous change of variable formula - (Chen et al. 2018,https://arxiv.org/abs/1806.07366) - See also Song et al. 2020, https://arxiv.org/abs/2011.13456) - """ - kwargs = {"n_cotangent_vectors": n_cotangent_vectors, "noise_type": noise_type} - disable = False if verbose else True - B, *D = x.shape - log_p = 0. - t = torch.ones([B]).to(self.device) * t0 - dt = (t1 - t0) / ode_steps - # Small wrappers to make the notation a bit more readable - f = lambda t, x: self.ode_drift(t, x, *args) - div = lambda t, x: self.divergence(self.ode_drift, t, x, *args, **kwargs) - for _ in tqdm(range(ode_steps), disable=disable): - if method == "Euler": - x = x + f(t, x) * dt - log_p += div(t, x) * dt - t = t + dt - elif method == "Heun": - previous_x = x.clone() - drift = f(t, x) - new_x = x + drift * dt - x = x + 0.5 * (drift + f(t+dt, new_x)) * dt - log_p += 0.5 * (div(t, previous_x) + div(t+dt, x)) * dt - t = t + dt - else: - raise NotImplementedError("Invalid method, please select either Euler or Heun") - log_p += self.sde.prior(D).log_prob(x) - return log_p - - # def score_at_zero_temperature( - # self, - # x, - # *args, - # ode_steps: int, - # n_cotangent_vectors: int = 1, - # noise_type="rademacher", - # verbose=0, - # return_ll=False - # ) -> Tensor: - # """ - # Takes a gradient through the log_likelihood solver to compute the score - # at zero temperature. - # """ - # kwargs = {"ode_steps": ode_steps, - # "n_cotangent_vectors": n_cotangent_vectors, - # "verbose": verbose, - # "noise_type": noise_type - # } - # wrapped_ll = lambda x: self.log_likelihood(x, *args, **kwargs) - # ll, vjp_func = vjp(wrapped_ll, x) - # score = vjp_func(torch.ones_like(ll)) - # if return_ll: - # return score, ll - # return score - - @torch.no_grad() - def sample( - self, - shape, # TODO change this so that specifying C, H, W is optional. Maybe save C, H, W in model hparams in the future - steps, - condition:list=[], - likelihood_score_fn:Callable=None, - guidance_factor=1. - ): - """ - An Euler-Maruyama integration of the model SDE - - shape: Shape of the tensor to sample (including batch size) - steps: Number of Euler-Maruyam steps to perform - likelihood_score_fn: Add an additional drift to the sampling for posterior sampling. Must have the signature f(t, x) - guidance_factor: Multiplicative factor for the likelihood drift - """ - if not isinstance(condition, (list, tuple)): - raise ValueError(f"condition must be a list or tuple or torch.Tensor, received {type(condition)}") - B, *D = shape - sampling_from = "prior" if likelihood_score_fn is None else "posterior" - if likelihood_score_fn is None: - likelihood_score_fn = lambda t, x: 0. - x = self.sde.prior(D).sample([B]).to(self.device) - dt = -(self.sde.T - self.sde.epsilon) / steps - t = torch.ones(B).to(self.device) * self.sde.T - for _ in (pbar := tqdm(range(steps))): - pbar.set_description(f"Sampling from the {sampling_from} | t = {t[0].item():.1f} | sigma = {self.sde.sigma(t)[0].item():.1e}" - f"| scale ~ {x.std().item():.1e}") - t += dt - if t[0] < self.sde.epsilon: # Accounts for numerical error in the way we discretize t. - break - g = self.sde.diffusion(t, x) - f = self.sde.drift(t, x) - g**2 * (self.score(t, x, *condition) + guidance_factor * likelihood_score_fn(t, x)) - dw = torch.randn_like(x) * (-dt)**(1/2) - x_mean = x + f * dt - x = x_mean + g * dw - if torch.any(torch.isnan(x)): - print("Diffusion is not stable, NaN were produced. Stopped sampling.") - break - return x_mean - - def fit( - self, - dataset: Dataset, - preprocessing_fn=None, - epochs=100, - learning_rate=1e-4, - ema_decay=0.9999, - batch_size=1, - shuffle=False, - patience=float('inf'), - tolerance=0, - max_time=float('inf'), - warmup=0, - clip=0., - checkpoints_directory=None, - model_checkpoint=None, - checkpoints=10, - models_to_keep=2, - seed=None, - logname=None, - logdir=None, - n_iterations_in_epoch=None, - logname_prefix="score_model", - verbose=0 - ): - """ - Train the model on the provided dataset. - - Parameters: - dataset (torch.utils.data.Dataset): The training dataset. - preprocessing_fn (function, optional): A function to preprocess the input data. Default is None. - learning_rate (float, optional): The learning rate for optimizer. Default is 1e-4. - ema_decay (float, optional): The decay rate for Exponential Moving Average. Default is 0.9999. - batch_size (int, optional): The batch size for training. Default is 1. - shuffle (bool, optional): Whether to shuffle the dataset during training. Default is False. - epochs (int, optional): The number of epochs for training. Default is 100. - patience (float, optional): The patience value for early stopping. Default is infinity. - tolerance (float, optional): The tolerance value for early stopping. Default is 0. - max_time (float, optional): The maximum training time in hours. Default is infinity. - warmup (int, optional): The number of warmup iterations for learning rate. Default is 0. - clip (float, optional): The gradient clipping value. Default is 0. - model_checkpoint (float, optional): If checkpoints_directory is provided, this can be used to restart training from checkpoint. - checkpoints_directory (str, optional): The directory to save model checkpoints. Default is None. - checkpoints (int, optional): The interval for saving model checkpoints. Default is 10 epochs. - models_to_keep (int, optional): The number of best models to keep. Default is 3. - seed (int, optional): The random seed for numpy and torch. Default is None. - logname (str, optional): The logname for saving checkpoints. Default is None. - logdir (str, optional): The path to the directory in which to create the new checkpoint_directory with logname. - logname_prefix (str, optional): The prefix for the logname. Default is "score_model". - - Returns: - list: List of loss values during training. - """ - optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate) - ema = ExponentialMovingAverage(self.model.parameters(), decay=ema_decay) - dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=False) - if n_iterations_in_epoch is None: - n_iterations_in_epoch = len(dataloader) - if checkpoints_directory is None: - checkpoints_directory = self.checkpoints_directory - - if preprocessing_fn is not None: - preprocessing_name = preprocessing_fn.__name__ - else: - preprocessing_name = None - preprocessing_fn = lambda x: x - - # ==== Take care of where to write checkpoints and stuff ================================================================= - if checkpoints_directory is not None: - if os.path.isdir(checkpoints_directory): - logname = os.path.split(checkpoints_directory)[-1] - elif logname is None: - logname = logname_prefix + "_" + datetime.now().strftime("%y%m%d%H%M%S") - - save_checkpoint = False - latest_checkpoint = 0 - if checkpoints_directory is not None or logdir is not None: - save_checkpoint = True - if checkpoints_directory is None: - checkpoints_directory = os.path.join(logdir, logname) - if not os.path.isdir(checkpoints_directory): - os.mkdir(checkpoints_directory) - - script_params_path = os.path.join(checkpoints_directory, "script_params.json") - if not os.path.isfile(script_params_path): - with open(script_params_path, "w") as f: - json.dump( - { - "preprocessing": preprocessing_name, - "learning_rate": learning_rate, - "ema_decay": ema_decay, - "batch_size": batch_size, - "shuffle": shuffle, - "epochs": epochs, - "patience": patience, - "tolerance": tolerance, - "max_time": max_time, - "warmup": warmup, - "clip": clip, - "checkpoint_directory": checkpoints_directory, - "checkpoints": checkpoints, - "models_to_keep": models_to_keep, - "seed": seed, - "logname": logname, - "logname_prefix": logname_prefix, - }, - f, - indent=4 - ) - - model_hparams_path = os.path.join(checkpoints_directory, "model_hparams.json") - if not os.path.isfile(model_hparams_path): - with open(model_hparams_path, "w") as f: - json.dump(self.hyperparameters, f, indent=4) - - # ======= Load model if model_id is provided =============================================================== - paths = glob.glob(os.path.join(checkpoints_directory, "checkpoint*.pt")) - opt_paths = glob.glob(os.path.join(checkpoints_directory, "optimizer*.pt")) - checkpoint_indices = [int(re.findall('[0-9]+', os.path.split(path)[-1])[-1]) for path in paths] - scores = [float(re.findall('([0-9]{1}.[0-9]+e[+-][0-9]{2})', os.path.split(path)[-1])[-1]) for path in paths] - if checkpoint_indices: - if model_checkpoint is not None: - checkpoint_path = paths[checkpoint_indices.index(model_checkpoint)] - self.model.load_state_dict(torch.load(checkpoint_path, map_location=self.model.device)) - optimizer.load_state_dict(torch.load(opt_paths[checkpoints == model_checkpoint], map_location=self.device)) - print(f"Loaded checkpoint {model_checkpoint} of {logname}") - latest_checkpoint = model_checkpoint - else: - max_checkpoint_index = np.argmax(checkpoint_indices) - checkpoint_path = paths[max_checkpoint_index] - opt_path = opt_paths[max_checkpoint_index] - self.model.load_state_dict(torch.load(checkpoint_path, map_location=self.device)) - optimizer.load_state_dict(torch.load(opt_path, map_location=self.device)) - print(f"Loaded checkpoint {checkpoint_indices[max_checkpoint_index]} of {logname}") - latest_checkpoint = checkpoint_indices[max_checkpoint_index] - - if seed is not None: - torch.manual_seed(seed) - best_loss = float('inf') - losses = [] - step = 0 - global_start = time.time() - estimated_time_for_epoch = 0 - out_of_time = False - - data_iter = iter(dataloader) - for epoch in (pbar := tqdm(range(epochs))): - if (time.time() - global_start) > max_time * 3600 - estimated_time_for_epoch: - break - epoch_start = time.time() - time_per_step_epoch_mean = 0 - cost = 0 - for _ in range(n_iterations_in_epoch): - start = time.time() - try: - X = next(data_iter) - except StopIteration: - data_iter = iter(dataloader) - X = next(data_iter) - if isinstance(X, (list, tuple)): - x, *args = X - else: - x = X - args = [] - if preprocessing_fn is not None: - x = preprocessing_fn(x) - optimizer.zero_grad() - loss = self.loss_fn(x, *args) - loss.backward() - - if step < warmup: - for g in optimizer.param_groups: - g['lr'] = learning_rate * np.minimum(step / warmup, 1.0) - - if clip > 0: - torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=clip) - - optimizer.step() - ema.update() - - _time = time.time() - start - time_per_step_epoch_mean += _time - cost += float(loss) - step += 1 - - time_per_step_epoch_mean /= len(dataloader) - cost /= len(dataloader) - pbar.set_description(f"Epoch {epoch + 1:d} | Cost: {cost:.1e} |") - losses.append(cost) - if verbose >= 2: - print(f"epoch {epoch} | cost {cost:.2e} | time per step {time_per_step_epoch_mean:.2e} s") - elif verbose == 1: - if (epoch + 1) % checkpoints == 0: - print(f"epoch {epoch} | cost {cost:.1e}") - - if np.isnan(cost): - print("Model exploded and returns NaN") - break - - if cost < (1 - tolerance) * best_loss: - best_loss = cost - patience = patience - else: - patience -= 1 - - if (time.time() - global_start) > max_time * 3600: - out_of_time = True - - if save_checkpoint: - if (epoch + 1) % checkpoints == 0 or patience == 0 or epoch == epochs - 1 or out_of_time: - latest_checkpoint += 1 - with open(os.path.join(checkpoints_directory, "score_sheet.txt"), mode="a") as f: - f.write(f"{latest_checkpoint} {cost}\n") - with ema.average_parameters(): - torch.save(self.model.state_dict(), os.path.join(checkpoints_directory, f"checkpoint_{cost:.4e}_{latest_checkpoint:03d}.pt")) - torch.save(optimizer.state_dict(), os.path.join(checkpoints_directory, f"optimizer_{cost:.4e}_{latest_checkpoint:03d}.pt")) - paths = glob.glob(os.path.join(checkpoints_directory, "*.pt")) - checkpoint_indices = [int(re.findall('[0-9]+', os.path.split(path)[-1])[-1]) for path in paths] - scores = [float(re.findall('([0-9]{1}.[0-9]+e[+-][0-9]{2})', os.path.split(path)[-1])[-1]) for path in paths] - if len(checkpoint_indices) > 2*models_to_keep: # has to be twice since we also save optimizer states - index_to_delete = np.argmin(checkpoint_indices) - os.remove(os.path.join(checkpoints_directory, f"checkpoint_{scores[index_to_delete]:.4e}_{checkpoint_indices[index_to_delete]:03d}.pt")) - os.remove(os.path.join(checkpoints_directory, f"optimizer_{scores[index_to_delete]:.4e}_{checkpoint_indices[index_to_delete]:03d}.pt")) - del scores[index_to_delete] - del checkpoint_indices[index_to_delete] - - if patience == 0: - print("Reached patience") - break - - if out_of_time: - print("Out of time") - break - - if epoch > 0: - estimated_time_for_epoch = time.time() - epoch_start - - print(f"Finished training after {(time.time() - global_start) / 3600:.3f} hours.") - # Save EMA weights in the model - ema.copy_to(self.parameters()) - return losses diff --git a/score_models/dsm.py b/score_models/dsm.py deleted file mode 100644 index 02d8c2b..0000000 --- a/score_models/dsm.py +++ /dev/null @@ -1,12 +0,0 @@ -from typing import Union -import torch -from torch import Tensor - -def denoising_score_matching(score_model: Union["ScoreModel", "EnergyModel"], samples: Tensor, *args: list[Tensor]): - B, *D = samples.shape - sde = score_model.sde - z = torch.randn_like(samples) - t = torch.rand(B).to(score_model.device) * (sde.T - sde.epsilon) + sde.epsilon - mean, sigma = sde.marginal_prob(t, samples) - return torch.sum((z + score_model.model(t, mean + sigma * z, *args)) ** 2) / B - diff --git a/score_models/layers/__init__.py b/score_models/layers/__init__.py index e32a6f1..ef733a0 100644 --- a/score_models/layers/__init__.py +++ b/score_models/layers/__init__.py @@ -1,15 +1,13 @@ -from .style_gan_conv import StyleGANConv -from .conv_layers import conv3x3, conv1x1 -from .resnet_block_biggan import ResnetBlockBigGANpp -from .spectral_normalization import SpectralNorm -from .ddpm_resnet_block import DDPMResnetBlock -from .ncsn_resnet_block import NCSNResidualBlock -from .attention_block import SelfAttentionBlock, ScaledAttentionLayer -from .projection_embedding import GaussianFourierProjection, PositionalEncoding -from .conv1dsame import Conv1dSame, ConvTransposed1dSame -from .conv2dsame import Conv2dSame, ConvTransposed2dSame -from .conv3dsame import Conv3dSame, ConvTransposed3dSame -from .combine import Combine -from .upsample import UpsampleLayer -from .downsample import DownsampleLayer - +from .attention_block import * +from .combine import * +from .conv1dsame import * +from .conv2dsame import * +from .conv3dsame import * +from .conv_layers import * +from .ddpm_resnet_block import * +from .downsample import * +from .projection_embedding import * +from .resnet_block_biggan import * +from .style_gan_conv import * +from .spectral_normalization import * +from .upsample import * diff --git a/score_models/layers/attention_block.py b/score_models/layers/attention_block.py index 8cb2921..fc6e93e 100644 --- a/score_models/layers/attention_block.py +++ b/score_models/layers/attention_block.py @@ -6,6 +6,8 @@ since the latter has some reported speed issues. """ +__all__ = ["SelfAttentionBlock", "ScaledAttentionLayer"] + class SelfAttentionBlock(nn.Module): """ diff --git a/score_models/layers/combine.py b/score_models/layers/combine.py index caf12d1..773aeba 100644 --- a/score_models/layers/combine.py +++ b/score_models/layers/combine.py @@ -1,6 +1,8 @@ import torch from .conv_layers import conv1x1 +__all__ = ['Combine'] + class Combine(torch.nn.Module): """Combine information from skip connections.""" diff --git a/score_models/layers/conditional_batchnorm2d.py b/score_models/layers/conditional_batchnorm2d.py index 34a1c1b..b519476 100644 --- a/score_models/layers/conditional_batchnorm2d.py +++ b/score_models/layers/conditional_batchnorm2d.py @@ -1,6 +1,8 @@ import torch.nn as nn import torch +__all__ = ['ConditionalBatchNorm2d'] + class ConditionalBatchNorm2d(nn.Module): """ diff --git a/score_models/layers/conditional_instancenorm2d.py b/score_models/layers/conditional_instancenorm2d.py index fab4032..dfe9a52 100644 --- a/score_models/layers/conditional_instancenorm2d.py +++ b/score_models/layers/conditional_instancenorm2d.py @@ -1,6 +1,8 @@ import torch.nn as nn import torch +__all__ = ['ConditionalInstanceNorm2d'] + class ConditionalInstanceNorm2d(nn.Module): def __init__(self, num_features, num_classes=None, bias=True): @@ -50,4 +52,4 @@ def forward(self, x, condition): some_network = ConditionalInstanceNorm2d(10, 3) some_input_image = torch.randn((10, 10, 32, 32)) # [B, C, H, W] time_index = torch.randint(size=(10,), high=3) - some_network.forward(some_input_image, time_index) \ No newline at end of file + some_network.forward(some_input_image, time_index) diff --git a/score_models/layers/conditional_instancenorm2d_plus.py b/score_models/layers/conditional_instancenorm2d_plus.py index fc0b3e3..ad16628 100644 --- a/score_models/layers/conditional_instancenorm2d_plus.py +++ b/score_models/layers/conditional_instancenorm2d_plus.py @@ -1,6 +1,8 @@ import torch.nn as nn import torch +__all__ = ['ConditionalInstanceNorm2dPlus'] + class ConditionalInstanceNorm2dPlus(nn.Module): """ diff --git a/score_models/layers/conv1dsame.py b/score_models/layers/conv1dsame.py index 560c501..f7d27c1 100644 --- a/score_models/layers/conv1dsame.py +++ b/score_models/layers/conv1dsame.py @@ -7,6 +7,8 @@ Implements same padding behavior as in Tensorflow """ +__all__ = ['Conv1dSame', 'ConvTransposed1dSame'] + class Conv1dSame(nn.Module): def __init__( diff --git a/score_models/layers/conv2dsame.py b/score_models/layers/conv2dsame.py index 25cb73b..f517da7 100644 --- a/score_models/layers/conv2dsame.py +++ b/score_models/layers/conv2dsame.py @@ -7,6 +7,8 @@ Implements same padding behavior as in Tensorflow """ +__all__ = ['Conv2dSame', 'ConvTransposed2dSame'] + class Conv2dSame(nn.Module): def __init__( diff --git a/score_models/layers/conv3dsame.py b/score_models/layers/conv3dsame.py index 836f5d4..48d743a 100644 --- a/score_models/layers/conv3dsame.py +++ b/score_models/layers/conv3dsame.py @@ -7,6 +7,8 @@ Implements same padding behavior as in Tensorflow """ +__all__ = ['Conv3dSame', 'ConvTransposed3dSame'] + class Conv3dSame(nn.Module): def __init__( diff --git a/score_models/layers/ddpm_resnet_block.py b/score_models/layers/ddpm_resnet_block.py index 7e188cd..5c7809d 100644 --- a/score_models/layers/ddpm_resnet_block.py +++ b/score_models/layers/ddpm_resnet_block.py @@ -10,6 +10,7 @@ CONVS = {1: Conv1dSame, 2: Conv2dSame, 3: Conv3dSame} +__all__ = ("DDPMResnetBlock",) def conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, dimensions=2): """3x3 convolution with DDPM initialization.""" @@ -38,7 +39,7 @@ def forward(self, x): x = x.permute(0, *spatial_dims, 1) y = torch.einsum("ij, ...j -> ...i", self.W, x) + self.b spatial_dims = list(range(1, 1+len(D))) - return y.permute(0, 3, *spatial_dims) + return y.permute(0, -1, *spatial_dims) class DDPMResnetBlock(nn.Module): @@ -47,7 +48,7 @@ def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False, super().__init__() if out_ch is None: out_ch = in_ch - self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6) + self.GroupNorm_0 = nn.GroupNorm(num_groups=max(min(in_ch // 4, 32), 1), num_channels=in_ch, eps=1e-6) self.act = act self.Conv_0 = conv3x3(in_ch, out_ch, dimensions=dimensions) if temb_dim is not None: @@ -55,7 +56,7 @@ def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False, self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape) nn.init.zeros_(self.Dense_0.bias) - self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6) + self.GroupNorm_1 = nn.GroupNorm(num_groups=max(min(out_ch // 4, 32), 1), num_channels=out_ch, eps=1e-6) self.Dropout_0 = nn.Dropout(dropout) self.Conv_1 = conv3x3(out_ch, out_ch, dimensions=dimensions) if in_ch != out_ch: @@ -68,14 +69,13 @@ def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False, self.conv_shortcut = conv_shortcut def forward(self, x, temb=None): - B, C, H, W = x.shape - assert C == self.in_ch + B, C, *D = x.shape out_ch = self.out_ch if self.out_ch else self.in_ch h = self.act(self.GroupNorm_0(x)) h = self.Conv_0(h) # Add bias to each feature map conditioned on the time embedding if temb is not None: - h += self.Dense_0(self.act(temb))[:, :, None, None] + h += self.Dense_0(self.act(temb)).view(B, out_ch, *[1]*len(D)) h = self.act(self.GroupNorm_1(h)) h = self.Dropout_0(h) h = self.Conv_1(h) diff --git a/score_models/layers/downsample.py b/score_models/layers/downsample.py index 67b18f7..9174122 100644 --- a/score_models/layers/downsample.py +++ b/score_models/layers/downsample.py @@ -9,6 +9,8 @@ 2: F.avg_pool2d, 3: F.avg_pool3d} +__all__ = ['DownsampleLayer'] + class DownsampleLayer(torch.nn.Module): def __init__( self, diff --git a/score_models/layers/ncsn_resnet_block.py b/score_models/layers/ncsn_resnet_block.py index 45c3ce8..89eaa18 100644 --- a/score_models/layers/ncsn_resnet_block.py +++ b/score_models/layers/ncsn_resnet_block.py @@ -6,7 +6,6 @@ from functools import partial from .conv2dsame import Conv2dSame - def conv1x1(in_planes, out_planes, stride=1, bias=True, dilation=1): """1x1 convolution. Same as NCSNv1/v2.""" conv = Conv2dSame(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias, dilation=dilation) diff --git a/score_models/layers/projection_embedding.py b/score_models/layers/projection_embedding.py index c9ed104..e10f4da 100644 --- a/score_models/layers/projection_embedding.py +++ b/score_models/layers/projection_embedding.py @@ -5,6 +5,8 @@ left_matmul = vmap(torch.matmul, in_dims=(None, 0)) +__all__ = ['GaussianFourierProjection', 'PositionalEncoding'] + class GaussianFourierProjection(nn.Module): """Gaussian random features for encoding time steps.""" diff --git a/score_models/layers/resnet_block_biggan.py b/score_models/layers/resnet_block_biggan.py index 8ab1c5d..1171226 100644 --- a/score_models/layers/resnet_block_biggan.py +++ b/score_models/layers/resnet_block_biggan.py @@ -1,3 +1,5 @@ +from typing import Optional, Callable + import torch from torch import nn from score_models.definitions import default_init @@ -7,22 +9,25 @@ SQRT2 = 1.41421356237 +__all__ = ['ResnetBlockBigGANpp'] + class ResnetBlockBigGANpp(nn.Module): def __init__( self, - act, - in_ch, - out_ch=None, - temb_dim=None, - up=False, - down=False, - dropout=0.1, - fir=False, - fir_kernel=(1, 3, 3, 1), - skip_rescale=True, - init_scale=0., - dimensions=2 + act: Callable, + in_ch: int, + out_ch: Optional[int] = None, + temb_dim: Optional[int] = None, + up: bool = False, + down: bool = False, + dropout: float = 0., + fir: bool = False, + fir_kernel: tuple[int] = (1, 3, 3, 1), + skip_rescale: bool = True, + init_scale: float = 0., + factor: int = 2, + dimensions: int = 2 ): super().__init__() assert dimensions in [1, 2, 3] @@ -36,8 +41,9 @@ def __init__( self.act = act self.in_ch = in_ch self.out_ch = out_ch + self.factor = factor - self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6) + self.GroupNorm_0 = nn.GroupNorm(num_groups=max(min(in_ch // 4, 32), 1), num_channels=in_ch, eps=1e-6) self.Conv_0 = conv3x3(in_ch, out_ch, dimensions=dimensions) if temb_dim is not None: self.Dense_0 = nn.Linear(temb_dim, out_ch) @@ -45,32 +51,31 @@ def __init__( self.Dense_0.weight.data = default_init()(self.Dense_0.weight.shape) nn.init.zeros_(self.Dense_0.bias) - self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6) + self.GroupNorm_1 = nn.GroupNorm(num_groups=max(min(out_ch // 4, 32), 1), num_channels=out_ch, eps=1e-6) self.Dropout_0 = nn.Dropout(dropout) # suppress skip connection at initialization self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale, dimensions=dimensions) if in_ch != out_ch or up or down: self.Conv_2 = conv1x1(in_ch, out_ch, dimensions=dimensions) - def forward(self, x, temb=None): B, *_ = x.shape h = self.act(self.GroupNorm_0(x)) if self.up: if self.fir: - h = upsample(h, self.fir_kernel, factor=2, dimensions=self.dimensions) - x = upsample(x, self.fir_kernel, factor=2, dimensions=self.dimensions) + h = upsample(h, self.fir_kernel, factor=self.factor, dimensions=self.dimensions) + x = upsample(x, self.fir_kernel, factor=self.factor, dimensions=self.dimensions) else: - h = naive_upsample(h, factor=2, dimensions=self.dimensions) - x = naive_upsample(x, factor=2, dimensions=self.dimensions) + h = naive_upsample(h, factor=self.factor, dimensions=self.dimensions) + x = naive_upsample(x, factor=self.factor, dimensions=self.dimensions) elif self.down: if self.fir: - h = downsample(h, self.fir_kernel, factor=2, dimensions=self.dimensions) - x = downsample(x, self.fir_kernel, factor=2, dimensions=self.dimensions) + h = downsample(h, self.fir_kernel, factor=self.factor, dimensions=self.dimensions) + x = downsample(x, self.fir_kernel, factor=self.factor, dimensions=self.dimensions) else: - h = naive_downsample(h, factor=2, dimensions=self.dimensions) - x = naive_downsample(x, factor=2, dimensions=self.dimensions) + h = naive_downsample(h, factor=self.factor, dimensions=self.dimensions) + x = naive_downsample(x, factor=self.factor, dimensions=self.dimensions) h = self.Conv_0(h) # Add bias to each feature map conditioned on the time embedding diff --git a/score_models/layers/spectral_normalization.py b/score_models/layers/spectral_normalization.py index 784fc61..9a1bcbf 100644 --- a/score_models/layers/spectral_normalization.py +++ b/score_models/layers/spectral_normalization.py @@ -9,6 +9,8 @@ from torch import nn from torch.nn import Parameter +__all__ = ['SpectralNorm'] + def l2normalize(v, eps=1e-12): return v / (v.norm() + eps) diff --git a/score_models/layers/squeeze_and_excitation.py b/score_models/layers/squeeze_and_excitation.py index 44fc974..daa2720 100644 --- a/score_models/layers/squeeze_and_excitation.py +++ b/score_models/layers/squeeze_and_excitation.py @@ -2,6 +2,8 @@ from torch import nn from torch.nn import functional as F +__all__ = ['SqueezeAndExcite'] + class SqueezeAndExcite(nn.Module): """ diff --git a/score_models/layers/style_gan_conv.py b/score_models/layers/style_gan_conv.py index 1eb418f..6882d31 100644 --- a/score_models/layers/style_gan_conv.py +++ b/score_models/layers/style_gan_conv.py @@ -6,6 +6,8 @@ from .up_or_downsampling2d import upsample_conv_2d, conv_downsample_2d from .up_or_downsampling3d import upsample_conv_3d, conv_downsample_3d +__all__ = ['StyleGANConv'] + class StyleGANConv(nn.Module): """Conv2d layer with optimal upsampling and downsampling (StyleGAN2).""" diff --git a/score_models/layers/upsample.py b/score_models/layers/upsample.py index e02bbbc..33dce24 100644 --- a/score_models/layers/upsample.py +++ b/score_models/layers/upsample.py @@ -5,6 +5,8 @@ from .up_or_downsampling import upsample from ..definitions import default_init +__all__ = ['UpsampleLayer'] + class UpsampleLayer(Module): def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False, diff --git a/score_models/losses/__init__.py b/score_models/losses/__init__.py new file mode 100644 index 0000000..b15d8b7 --- /dev/null +++ b/score_models/losses/__init__.py @@ -0,0 +1,2 @@ +from .dsm import * +from .sliced_score_matching import * diff --git a/score_models/losses/dsm.py b/score_models/losses/dsm.py new file mode 100644 index 0000000..8a9b282 --- /dev/null +++ b/score_models/losses/dsm.py @@ -0,0 +1,93 @@ +from typing import Union, TYPE_CHECKING +if TYPE_CHECKING: + from score_models import ScoreModel, HessianDiagonal + +from torch import Tensor +import torch + +__all__ = ["dsm", "denoising_score_matching", "second_order_dsm", "second_order_dsm_meng_variation"] + + +def dsm(model: "ScoreModel", samples: Tensor, *args: list[Tensor]): + B, *D = samples.shape + sde = model.sde + + x0 = samples # x0 ~ p(x0) + t = torch.rand(B).to(model.device) * (sde.T - sde.epsilon) + sde.epsilon # t ~ U(epsilon, T) + z = torch.randn_like(samples) # z ~ N(0, 1) + + # Sample from the marginal at time t using the Gaussian perturbation kernel + mu = sde.mu(t).view(-1, *[1]*len(D)) + sigma = sde.sigma(t).view(-1, *[1]*len(D)) + xt = mu * samples + sigma * z # xt ~ p(xt | x0) + + # Compute the loss + epsilon_theta = model.reparametrized_score(t, xt, *args) # epsilon_theta(t, x) = sigma(t) * s(t, x) + return ((epsilon_theta + z)**2).sum() / (2 * B) + + +def denoising_score_matching(model: "ScoreModel", samples: Tensor, *args: list[Tensor]): + # Used for backward compatibility + return dsm(model, samples, *args) + + +def second_order_dsm(model: "HessianDiagonal", samples: Tensor, *args: list[Tensor]): + """ + Loss used to train a model to approximate the diagonal of the Hessians of log p(x). + This loss is derived in the works of Meng et al. (2020), arxiv.org/pdf/2111.04726 + and Lu et al (2022), arxiv.org/pdf/2206.08265. + + In particular, this loss corresponds to equation (13) of Lu et al. (2022). It can be viewed + as a continuous time extension of equation (11) of Meng et al. (2020). + + A better version of this loss is implemented below in the spirit of equation (17) of + Meng et al. (2020). + """ + B, *D = samples.shape + sde = model.sde + epsilon_model = model.score_model.reparametrized_score # epsilon_1(t, x) = sigma(t) * s_1(t, x) + + # Compute the first order DSM loss + x0 = samples # x0 ~ p(x0) + t = torch.rand(B).to(model.device) * (sde.T - sde.epsilon) + sde.epsilon # t ~ U(epsilon, T) + z = torch.randn_like(samples) # z ~ N(0, 1) + + # Sample from the marginal at time t using the Gaussian perturbation kernel + mu = sde.mu(t).view(-1, *[1]*len(D)) + sigma = sde.sigma(t).view(-1, *[1]*len(D)) + xt = mu * samples + sigma * z # xt ~ p(xt | x0) + with torch.no_grad(): + ell_1 = epsilon_model(t, xt, *args) + z # ell_1 is the DSM loss term before contraction + + # Compute the second order DSM loss + diag_theta = model.reparametrized_diagonal(t, xt, *args) # diag_theta(t, x) = sigma(t)**2 * diag(s_2(t, x)) + 1 + return ((diag_theta - ell_1**2)**2).sum() / (2 * B) + +def second_order_dsm_meng_variation(model: "HessianDiagonal", samples: Tensor, *args: list[Tensor]): + """ + Loss used to train a model to approximate the diagonal of the Hessians of log p(x). + This loss is derived in the works of Meng et al. (2020), arxiv.org/pdf/2111.04726 + and Lu et al (2022), arxiv.org/pdf/2206.08265. + + This loss corresponds to equation (17) of Meng et al. (2020) extended to continuous time + as a more stable version of the loss in second_order_noisy_dsm. + """ + B, *D = samples.shape + sde = model.sde + epsilon_model = model.score_model.reparametrized_score # epsilon_1(t, x) = sigma(t) * s_1(t, x) + + # Compute the first order DSM loss + x0 = samples # x0 ~ p(x0) + t = torch.rand(B).to(model.device) * (sde.T - sde.epsilon) + sde.epsilon # t ~ U(epsilon, T) + z = torch.randn_like(samples) # z ~ N(0, 1) + + # Sample from the marginal at time t using the Gaussian perturbation kernel + mu = sde.mu(t).view(-1, *[1]*len(D)) + sigma = sde.sigma(t).view(-1, *[1]*len(D)) + xt = mu * samples + sigma * z # xt ~ p(xt | x0) + with torch.no_grad(): + epsilon_1 = epsilon_model(t, xt, *args) + + # Compute the second order DSM loss + diag_theta = model.reparametrized_diagonal(t, xt, *args) # diag_theta(t, x) = sigma(t)**2 * diag(s_2(t, x)) + 1 + return ((diag_theta + epsilon_1**2 - z**2)**2).sum() / (2 * B) diff --git a/score_models/sliced_score_matching.py b/score_models/losses/sliced_score_matching.py similarity index 98% rename from score_models/sliced_score_matching.py rename to score_models/losses/sliced_score_matching.py index d4eae16..9bcfb8c 100644 --- a/score_models/sliced_score_matching.py +++ b/score_models/losses/sliced_score_matching.py @@ -1,6 +1,8 @@ import torch from torch.func import vjp +__all__ = ["sliced_score_matching_loss"] + # Kept here for reference, but not currently used def time_weighted_sliced_score_matching_loss(model, samples, t, lambda_t, n_cotangent_vectors=1, noise_type="rademacher"): """ diff --git a/score_models/ode/__init__.py b/score_models/ode/__init__.py index e69de29..6a2a2bb 100644 --- a/score_models/ode/__init__.py +++ b/score_models/ode/__init__.py @@ -0,0 +1,4 @@ +from .euler import * +from .heun import * +from .hutchinson_trick import * +from .probability_flow_ode import * diff --git a/score_models/ode/euler.py b/score_models/ode/euler.py index 2f54b9b..569d6cf 100644 --- a/score_models/ode/euler.py +++ b/score_models/ode/euler.py @@ -1,28 +1,50 @@ -from typing import Callable +from typing import Callable, Optional, Tuple import torch from torch import Tensor from tqdm import tqdm -def euler(x:Tensor, drift_fn:Callable, N:int, t0=0., t1=1., verbose=0, **kwargs): - """ - A basic implementation of Euler discretisation method of the ODE associated - with the marginales of the learned SDE. - +__all__ = ["euler_method"] + + +@torch.no_grad() +def euler_method( + x: Tensor, + *args, + steps: int, + drift: Callable, + hessian_trace: Optional[Callable] = None, + t0: float = 0., + t1: float = 1., + verbose: int = 0) -> Tuple[Tensor, Tensor]: + """ + Euler discretisation method of an ODE implicitly defined by a drift function. + + Args: x: Initial state - drift_fn: Update the state x - N: Number of steps + *args: Additional arguments to pass to the drift function and divergence function. + steps: Number of steps of integration. + drift: Update function of the state x. + hessian_trace: Trace of the Hessian of the log probability. + If provided, the trace of the Hessian is integrated alongside the state. t0: Initial time of integration, defaults to 0. t1: Final time of integration, defaults to 1. - - Returns the final state - """ - disable = False if verbose else True - B, *D = x.shape - t = torch.ones([B]).to(x.device) * t0 - dt = (t1 - t0) / N - for _ in tqdm(range(N), disable=disable): - x = x + drift_fn(t, x, **kwargs) * dt - t += dt - return x + verbose: If True, display a progress bar. + """ + B, *D = x.shape + N = steps + t = torch.ones([B]).to(x.device) * t0 + dt = (t1 - t0) / N + + f = lambda t, x: drift(t, x, *args) + if hessian_trace is None: + ht = lambda t, x: 0. + else: + ht = lambda t, x: hessian_trace(t, x, *args) + delta_log_p = 0 + for _ in tqdm(range(N), disable=(not verbose)): + delta_log_p += ht(t, x) * dt + x = x + f(t, x) * dt + t += dt + return x, delta_log_p diff --git a/score_models/ode/heun.py b/score_models/ode/heun.py index ae4659f..32efd35 100644 --- a/score_models/ode/heun.py +++ b/score_models/ode/heun.py @@ -1,29 +1,55 @@ -from typing import Callable +from typing import Callable, Optional, Tuple import torch from torch import Tensor from tqdm import tqdm -def heun(x:Tensor, drift_fn:Callable, N:int, t0=0., t1=1., verbose=0, **kwargs): - """ - A basic implementation of Heun second order discretisation method of the ODE associated - with the marginales of the learned SDE. - +__all__ = ["heun_method"] + + +@torch.no_grad() +def heun_method( + x: Tensor, + *args, + steps: int, + drift: Callable, + hessian_trace: Optional[Callable] = None, + t0: float = 0., + t1: float = 1., + verbose: int = 0 + ) -> Tuple[Tensor, Tensor]: + """ + Heun discretisation method of an ODE implicitly defined by a drift function. + + Args: x: Initial state - drift_fn: Update the state x - N: Number of steps + *args: Additional arguments to pass to the drift function and divergence function. + steps: Number of steps of integration. + drift: Update function of the state x. + hessian_trace: Trace of the Hessian of the log probability. + If provided, the trace of the Hessian is integrated alongside the state. t0: Initial time of integration, defaults to 0. t1: Final time of integration, defaults to 1. - - Returns the final state - """ - disable = False if verbose else True - B, *D = x.shape - t = torch.ones([B]).to(x.device) * t0 - dt = (t1 - t0) / N - for _ in tqdm(range(N), disable=disable): - new_x = x + drift_fn(t, x) * dt - x = x + 0.5 * (drift_fn(t, x) + drift_fn(t+dt, new_x)) * dt - t = t + dt - return x + verbose: If True, display a progress bar. + """ + B, *D = x.shape + N = steps + t = torch.ones([B]).to(x.device) * t0 + dt = (t1 - t0) / N + f = lambda t, x: drift(t, x, *args) + if hessian_trace is None: + ht = lambda t, x: 0. + else: + ht = lambda t, x: hessian_trace(t, x, *args) + + delta_log_p = 0 + x1 = x.clone() + for _ in tqdm(range(N), disable=(not verbose)): + f0 = f(t, x) + x1 = x + f0 * dt + f1 = f(t + dt, x1) + delta_log_p += 0.5 * (ht(t, x) + ht(t + dt, x1)) * dt + x = x + 0.5 * (f0 + f1) * dt + t = t + dt + return x, delta_log_p diff --git a/score_models/ode/hutchinson_trick.py b/score_models/ode/hutchinson_trick.py new file mode 100644 index 0000000..1e8b9f6 --- /dev/null +++ b/score_models/ode/hutchinson_trick.py @@ -0,0 +1,48 @@ +from typing import Callable, Literal, List + +import torch +from torch import Tensor +from torch.func import vjp + +__all__ = ['divergence_with_hutchinson_trick'] + + +def divergence_with_hutchinson_trick( + drift: Callable[[Tensor, Tensor, List[Tensor]], Tensor], + t: Tensor, + x: Tensor, + *args, + cotangent_vectors: int = 1, + noise_type: Literal['rademacher', 'gaussian'] = 'rademacher', + **kwargs + ) -> Tensor: + """ + Compute the divergence of the drift function using the Hutchinson trace estimator. + + Args: + drift: Drift function of the ODE. + t: Time of the ODE. + x: State of the ODE. + *args: Additional arguments to pass to the drift function. + cotangent_vectors: Number of cotangent vectors to sample for the Hutchinson trace estimator. + noise_type: Type of noise to sample, either 'rademacher' or 'gaussian'. + """ + B, *D = x.shape + # duplicate samples for for the Hutchinson trace estimator + samples = torch.tile(x, [cotangent_vectors, *[1]*len(D)]) + t = torch.tile(t, [cotangent_vectors]) + _args = [] + for arg in args: + _, *DA = arg.shape + arg = torch.tile(arg, [cotangent_vectors, *[1]*len(DA)]) + _args.append(arg) + + # sample cotangent vectors + vectors = torch.randn_like(samples) + if noise_type == 'rademacher': + vectors = vectors.sign() + + f = lambda x: drift(t, x, *_args) + _, vjp_func = vjp(f, samples) + divergence = (vectors * vjp_func(vectors)[0]).flatten(1).sum(dim=1) + return divergence diff --git a/score_models/ode/probability_flow_ode.py b/score_models/ode/probability_flow_ode.py new file mode 100644 index 0000000..fedc729 --- /dev/null +++ b/score_models/ode/probability_flow_ode.py @@ -0,0 +1,45 @@ +from typing import Literal, Callable, Optional, Tuple + +import torch +from torch import Tensor + +from .euler import euler_method +from .heun import heun_method + +__all__ = ["probability_flow_ode"] + + +def probability_flow_ode( + x, + *args, + steps: int, + drift: Callable, + hessian_trace: Optional[Callable] = None, + method: Literal["euler", "heun"] = "euler", + t0: int = 0, + t1: int = 1, + verbose=0) -> Tuple[Tensor, Tensor]: + """ + The probability flow ODE method for score-based models. + This method also make use of the instantaneous change of variable formula + developed by Chen et al. 2018 (arxiv.org/abs/1806.07366) to compute the log probability of the flow. + See Song et al. 2020 (arxiv.org/abs/2011.13456) for usage with SDE formalism of SBM. + + Args: + x: Initial state + *args: Additional arguments to pass to the drift function and hessian trace function. + steps: Number of steps of integration. + drift: Update function of the state x. + hessian_trace: Trace of the Hessian of the log probability. + If provided, the trace of the Hessian is integrated alongside the state. + method: Integration method. Either 'euler' or 'heun'. + t0: Initial time of integration, defaults to 0. + t1: Final time of integration, defaults to 1. + verbose: If True, display a progress bar. + """ + if method == "euler": + return euler_method(x, *args, steps=steps, drift=drift, hessian_trace=hessian_trace, t0=t0, t1=t1, verbose=verbose) + elif method == "heun": + return heun_method(x, *args, steps=steps, drift=drift, hessian_trace=hessian_trace, t0=t0, t1=t1, verbose=verbose) + else: + raise NotImplementedError(f"ODE integration method {method} is not implemented. Use 'euler' or 'heun'.") diff --git a/score_models/save_load_utils.py b/score_models/save_load_utils.py new file mode 100644 index 0000000..eb402a1 --- /dev/null +++ b/score_models/save_load_utils.py @@ -0,0 +1,400 @@ +from typing import Optional, Literal, Tuple, TYPE_CHECKING +if TYPE_CHECKING: + from score_models import ScoreModel, LoRAScoreModel, SDE + +import torch +import os, glob, re, json +import numpy as np +import warnings +import copy +import shutil +import dill +import gzip +import h5py +import hashlib +from datetime import datetime +from torch.nn import Module +from peft import PeftModel + +from .utils import DEVICE + + +def checkpoint_number(path: str) -> int: + return int(re.findall(r'[0-9]+', path)[-1]) + + +def maybe_raise_error(message: str, throw_error: bool = True, error_type=FileNotFoundError): + if throw_error: + raise error_type(message) + else: + warnings.warn(message) + + +def last_checkpoint(path: str) -> int: + if os.path.isdir(path): + paths = sorted(glob.glob(os.path.join(path, "*checkpoint*")), key=checkpoint_number) + if len(paths) > 0: + return checkpoint_number(paths[-1]) + else: + return 0 + else: + return 0 + + +def next_checkpoint(path: str) -> int: + return last_checkpoint(path) + 1 + + +def save_checkpoint( + model: Module, + path: str, + create_path: bool = True, + key: Literal["checkpoint", "optimizer", "lora_checkpoint"] = "checkpoint" + ): + """ + Utility function to save checkpoints of a model and its optimizer state. + This utility will save files in path with the following pattern + ``` + Path + ├── checkpoint_001.pt + ├── checkpoint_002.pt + ├── ... + ├── optimizer_001.pt + ├── optimizer_002.pt + ├── ... + ``` + + Args: + model: Model instance to save. + path: Path to a directory where to save the checkpoint files. Defaults to the path in the ScoreModel instance. + create_path: If True, create the directory if it does not exist. + key: Key to save the checkpoint with. Defaults to "checkpoint". Alternative is "optimizer". + """ + if not os.path.isdir(path): + if create_path: + os.makedirs(path, exist_ok=True) + else: + raise FileNotFoundError(f"Directory {os.path.dirname(path)} does not exist") + + checkpoint = next_checkpoint(path) + if key == "lora_checkpoint": + model.save_pretrained(os.path.join(path, f"{key}_{checkpoint:03d}")) + else: + torch.save(model.state_dict(), os.path.join(path, f"{key}_{checkpoint:03d}.pt")) + print(f"Saved {key} {checkpoint} to {path}") + + +def save_hyperparameters(hyperparameters: dict, path: str, key: str = "model_hparams"): + """ + Utility function to save the hyperparameters of a model to a standard file. + """ + file = os.path.join(path, f"{key}.json") + if not os.path.isfile(file): + with open(file, "w") as f: + json.dump(hyperparameters, f, indent=4) + print(f"Saved {key} to {path}") + + +def load_hyperparameters(path: str, key: str = "model_hparams") -> dict: + """ + Utility function to load the hyperparameters of a model from a standard file. + """ + file = os.path.join(path, f"{key}.json") + if os.path.isfile(file): + with open(file, "r") as f: + hparams = json.load(f) + return hparams + else: + raise FileNotFoundError(f"Could not find hyperparameters in {path}.") + + +def remove_oldest_checkpoint(path: str, models_to_keep: int = 5): + """ + Utility function to clean up old checkpoints in a directory. + This utility will delete the oldest checkpoints and their optimizer states. + """ + if models_to_keep: + paths = sorted(glob.glob(os.path.join(path, "*checkpoint*")), key=checkpoint_number) + checkpoints = [checkpoint_number(os.path.split(path)[-1]) for path in paths] + if len(paths) > models_to_keep: + # Clean up oldest models + path_to_remove = paths[0] + if os.path.isfile(path_to_remove): + os.remove(path_to_remove) + # Handle case (e.g. LoRA) where checkpoint is a directory + elif os.path.isdir(path_to_remove): + shutil.rmtree(path_to_remove) + # remove associated optimizer + opt_path = os.path.join(path, "optimizer_{:03d}.pt".format(checkpoints[0])) + if os.path.exists(opt_path): + os.remove(opt_path) + # # remove associated scalar net + # scalar_path = os.path.join(path, "scalar_net_{:03d}.pt".format(checkpoints[0])) + # if os.path.exists(scalar_path): + # os.remove(scalar_path) + +def load_sbm_state(sbm: "ScoreModel", path: str, device=DEVICE): + """ + Utility function to load the state dictionary of a model from a file. + We use a try except to catch an old error in the model saving process. + """ + try: + sbm.net.load_state_dict(torch.load(path, map_location=sbm.device, weights_only=True)) + except (KeyError, RuntimeError) as e: + # Maybe the ScoreModel instance was used when saving the weights... (mostly backward compatibility with old bugs) + try: + sbm.load_state_dict(torch.load(path, map_location=sbm.device, weights_only=True)) + except (KeyError, RuntimeError): + print(e) + raise KeyError(f"Could not load state of model from {path}. Make sure you are loading the correct model.") + +def load_optimizer_state(optimizer: torch.optim.Optimizer, path: str, raise_error: bool = True, device=DEVICE): + try: + optimizer.load_state_dict(torch.load(path, map_location=device, weights_only=True)) + except (KeyError, RuntimeError) as e: + if raise_error: + print(e) + maybe_raise_error(f"Could not load state of the optimizer from {path}.", raise_error, error_type=KeyError) + +def load_lora_state(lora_sbm: "LoRAScoreModel", path: str, device=DEVICE): + lora_sbm.lora_net = PeftModel.from_pretrained(copy.deepcopy(lora_sbm.net), path, is_trainable=True) + +# def load_scalar_net(posterior_sbm: "LoRAPosteriorScoreModel", path: str): + # posterior_sbm.scalar_net.load_state_dict(torch.load(path, map_location=posterior_sbm.device)) + +def load_checkpoint( + model: Module, + path: str, + checkpoint: Optional[int] = None, + raise_error: bool = True, + key: Literal["checkpoint", "optimizer", "lora_checkpoint"] = "checkpoint", + device=DEVICE + ): + """ + Utility function to load the checkpoint of a model and its optimizer state. + This utility assumes the directory contains files with the following pattern: + ``` + Path + ├── *checkpoint_*_001.pt + ├── *checkpoint_*_002.pt + ├── ... + ├── optimizer_*_001.pt + ├── optimizer_*_002.pt + ├── ... + ``` + + Args: + checkpoint: Checkpoint number to load. If None, the last checkpoint is loaded. + path: Path to load the checkpoint files from. Defaults to the path in the ScoreModelBase instance. + raise_error: If True, raise an error if no checkpoints are found in the directory. + """ + if not os.path.isdir(path): + if raise_error: + raise FileNotFoundError(f"Directory {path} does not exist.") + else: # If no directory is found, don't do anything. This is useful for initialization of Base. + return + name = os.path.split(path)[-1] + # Collect all checkpoint paths sorted by the checkpoint number (*_001.pt, *_002.pt, ...) + paths = sorted(glob.glob(os.path.join(path, f"{key}*")), key=checkpoint_number) + checkpoints = [checkpoint_number(os.path.split(path)[-1]) for path in paths] + if checkpoint and checkpoint not in checkpoints: + # Make sure the requested checkpoint exists + maybe_raise_error(f"{key} {checkpoint} not found in directory {path}.", raise_error) + checkpoint = None # Overwrite to load the last checkpoint + + # Refactor to use setattr for more generality or just returns the net. + if key == "checkpoint": + loading_mecanism = load_sbm_state + elif key == "lora_checkpoint": + loading_mecanism = load_lora_state + elif key == "optimizer": + loading_mecanism = load_optimizer_state + # elif key == "scalar_net": + # loading_mecanism = load_scalar_net + else: + raise ValueError(f"Key {key} not recognized.") + + if checkpoints: + if checkpoint: + # Load requested checkpoint + index = checkpoints.index(checkpoint) + else: + # Load last checkpoint + index = np.argmax(checkpoints) + checkpoint = checkpoints[index] + loading_mecanism(model, paths[index], device=device) + print(f"Loaded {key} {checkpoint} from {name}.") + return checkpoint + else: + maybe_raise_error(f"No {key} found in {path}") + return None + +def load_architecture( + path: Optional[str] = None, + net: Optional[str] = None, + device=DEVICE, + hparams_filename="model_hparams", + **hyperparameters + ) -> Tuple[Module, dict]: + """ + Utility function to load a model architecture from a checkpoint directory or + a dictionary of hyperparameters. + + Args: + path (str): Path to the checkpoint directory. If None, the model is loaded from the hyperparameters. + model (str): Model architecture to load. If provided, hyperparameters are used to instantiate the model. + device (torch.device): Device to load the model to. + hyperparameters: hyperparameters to instantiate the model. + """ + if path: + if not os.path.isdir(path): + raise FileNotFoundError(f"Directory {path} does not exist. " + "Please make sure to provide a valid path to the checkpoint directory.") + hparams = load_hyperparameters(path, key=hparams_filename) + hyperparameters.update(hparams) + net = hyperparameters.get("model_architecture", "ncsnpp") + + if isinstance(net, str): + if net.lower() == "ncsnpp": + from score_models.architectures import NCSNpp + net = NCSNpp(**hyperparameters).to(device) + elif net.lower() == "ddpm": + from score_models.architectures import DDPM + net = DDPM(**hyperparameters).to(device) + elif net.lower() == "mlp": + from score_models import MLP + net = MLP(**hyperparameters).to(device) + elif net.lower() == "encoder": + from score_models import Encoder + net = Encoder(**hyperparameters).to(device) + else: + raise ValueError(f"Architecture {net} not recognized.") + else: + raise ValueError(f"A model architecture or a path to a checkpoint directory must be provided.") + + # Backward compatibility + if "model_architecture" not in hyperparameters.keys(): + hyperparameters["model_architecture"] = net.__class__.__name__ + + if path: + print(f"Loaded model architecture {net.__class__.__name__} from {os.path.split(path)[-1]}.") + else: + print(f"Loaded model architecture {net.__class__.__name__} from hyperparameters.") + return net, hyperparameters + + +def load_sde(sde: Optional[Literal["ve", "vp", "tsve"]] = None, **kwargs) -> Tuple["SDE", dict]: + if sde is None: + if "sde" not in kwargs.keys(): + # Some sane defaults for quick use of VE or VP + if "sigma_min" in kwargs.keys() or "sigma_max" in kwargs.keys(): + print("Using the Variance Exploding SDE") + sde = "ve" + elif "beta_max" in kwargs.keys() or "beta_min" in kwargs.keys(): + print("Using the Variance Preserving SDE") + sde = "vp" + else: + raise KeyError("SDE parameters are missing, please specify which sde to use by using e.g. sde='ve' or sde='vp'") + else: + # Backward compatibility + if kwargs["sde"] == "vpsde": + kwargs["sde"] = "vp" + elif kwargs["sde"] == "vesde": + kwargs["sde"] = "ve" + sde = kwargs["sde"] + else: + # Backward compatibility + if sde == "vpsde": + sde = "vp" + elif sde == "vesde": + sde = "ve" + + if sde.lower() not in ["ve", "vp", "tsve"]: + raise ValueError(f"The SDE {sde} provided is not recognized. Please use 've', 'vp', or 'tsve'.") + + # Load the SDE from the keyword + if sde.lower() == "ve": + if "sigma_min" not in kwargs.keys() or "sigma_max" not in kwargs.keys(): + raise KeyError("Variance Exploding SDE requires sigma_min and sigma_max to be specified.") + from score_models.sde import VESDE + sde_hyperparameters = { + "sigma_min": kwargs.get("sigma_min"), + "sigma_max": kwargs.get("sigma_max"), + "T": kwargs.get("T", VESDE.__init__.__defaults__[0]) + } + sde = VESDE(**sde_hyperparameters) + + elif sde.lower() == "vp": + from score_models.sde import VPSDE + sde_hyperparameters = { + "beta_min": kwargs.get("beta_min", VPSDE.__init__.__defaults__[0]), + "beta_max": kwargs.get("beta_max", VPSDE.__init__.__defaults__[1]), + "T": kwargs.get("T", VPSDE.__init__.__defaults__[2]), + "epsilon": kwargs.get("epsilon", VPSDE.__init__.__defaults__[3]), + "schedule": kwargs.get("schedule", VPSDE.__init__.__defaults__[4]) + } + sde = VPSDE(**sde_hyperparameters) + + elif sde.lower() == "tsve": + if "sigma_min" not in kwargs.keys() or "sigma_max" not in kwargs.keys(): + raise KeyError("Truncated Scaled Variance Exploding SDE requires sigma_min and sigma_max to be specified.") + from score_models.sde import TSVESDE + sde_hyperparameters = { + "sigma_min": kwargs.get("sigma_min"), + "sigma_max": kwargs.get("sigma_max"), + "t_star": kwargs.get("t_star"), + "beta": kwargs.get("beta"), + "T": kwargs.get("T", TSVESDE.__init__.__defaults__[0]), + "epsilon": kwargs.get("epsilon", TSVESDE.__init__.__defaults__[1]) + } + sde = TSVESDE(**sde_hyperparameters) + # Making sure the sde name is recorded + sde_hyperparameters["sde"] = sde.__class__.__name__.lower() + return sde, sde_hyperparameters + + +def serialize_object(obj, h5_path, object_name, date_str=None, metadata_path=None): + # Serialize and compress the object + serialized_obj = dill.dumps(obj) + compressed_obj = gzip.compress(serialized_obj) + + # Save compressed object to h5 + with h5py.File(h5_path, 'a') as hf: + hf.create_dataset(object_name, data=np.void(compressed_obj)) + + # Save metadata with checksum of the compressed object + if metadata_path is not None: + checksum = hashlib.sha256(compressed_obj).hexdigest() + metadata = { + "filename": os.path.basename(h5_path), + "object_name": object_name, + "checksum": checksum, + "creation_time": date_str if date_str is not None else datetime.now().strftime("%Y%m%d_%H%M%S"), + } + with open(metadata_path, 'w') as meta_file: + json.dump(metadata, meta_file) + + +def deserialize_object(h5_path, dataset_name, metadata_path=None, checksum=None, safe_mode=True): + # Load compressed serialized object from H5 file + with h5py.File(h5_path, 'r') as hf: + compressed_obj = bytes(hf[dataset_name][()]) + + # Decompress the object + decompressed_obj = gzip.decompress(compressed_obj) + + # Verify checksum in safe mode + if safe_mode: + expected_checksum = checksum + if metadata_path is not None: + with open(metadata_path, 'r') as meta_file: + metadata = json.load(meta_file) + expected_checksum = metadata['checksum'] + + loaded_checksum = hashlib.sha256(compressed_obj).hexdigest() + if expected_checksum is None or loaded_checksum != expected_checksum: + raise ValueError("Checksum does not match. Data may have been tampered with.") + + # Deserialize the object + return dill.loads(decompressed_obj) + diff --git a/score_models/sbm/__init__.py b/score_models/sbm/__init__.py new file mode 100644 index 0000000..985bcc5 --- /dev/null +++ b/score_models/sbm/__init__.py @@ -0,0 +1,7 @@ +from .score_model import * +from .energy_model import * +from .slic import * +from .hessian_model import * +from .lora import * +# from .lora_posterior import * +# from .kernel_slic import * diff --git a/score_models/sbm/base.py b/score_models/sbm/base.py new file mode 100644 index 0000000..8adffeb --- /dev/null +++ b/score_models/sbm/base.py @@ -0,0 +1,188 @@ +from typing import Union, Optional, Callable +from abc import ABC, abstractmethod + +import torch +from torch.nn import Module +from torch import Tensor +from torch_ema import ExponentialMovingAverage + +from ..save_load_utils import ( + save_checkpoint, + save_hyperparameters, + load_checkpoint, + load_architecture, + load_sde + ) +from ..utils import DEVICE +from ..sde import SDE +from ..trainer import Trainer + +class Base(Module, ABC): + def __init__( + self, + net: Optional[Union[str, Module]] = None, + sde: Optional[Union[str, SDE]] = None, + path: Optional[str] = None, + checkpoint: Optional[int] = None, + device=DEVICE, + **hyperparameters + ): + super().__init__() + # Backward compatibility + if "checkpoints_directory" in hyperparameters.keys() and path is None: + path = hyperparameters["checkpoints_directory"] + hyperparameters.pop("checkpoints_directory") + if "model" in hyperparameters.keys() and net is None: + net = hyperparameters["model"] + hyperparameters.pop("model") + if "model_checkpoint" in hyperparameters.keys() and checkpoint is None: + checkpoint = hyperparameters["model_checkpoint"] + hyperparameters.pop("model_checkpoint") + + if net is None and path is None: + raise ValueError("Must provide either 'net' or 'path' to instantiate the model.") + + self.path = path + if net is None or isinstance(net, str): + self.net, self.hyperparameters = load_architecture( + path, + net=net, + device=device, + checkpoint=checkpoint, + **hyperparameters + ) + else: + self.net = net + self.hyperparameters = hyperparameters + + # Important to set these attributes before any loading attempt (device is needed) + if isinstance(sde, SDE): + self.hyperparameters["sde"] = sde.__class__.__name__.lower() + self.sde = sde + sde_params = sde.hyperparameters + else: + if isinstance(sde, str): + self.hyperparameters["sde"] = sde + self.sde, sde_params = load_sde(**self.hyperparameters) + self.hyperparameters.update(sde_params) # Save the SDE hyperparameters, including the defaults + self.device = device + self.net.to(device) + self.to(device) + if self.path: + self.load(checkpoint, raise_error=False) # If no checkpoint is found, loaded_checkpoint will be None + else: + self.loaded_checkpoint = None + + if hasattr(self.net, "hyperparameters"): + self.hyperparameters.update(self.net.hyperparameters) + + # Backward compatibility + if "model_architecture" not in self.hyperparameters: + self.hyperparameters["model_architecture"] = self.net.__class__.__name__.lower() + self.model = self.net + + def forward(self, t, x, *args) -> Tensor: + return self.net(t, x, *args) + + @abstractmethod + def loss(self, x, *args) -> Tensor: + ... + + def save( + self, + path: Optional[str] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + create_path: bool = True + ): + """ + Save the model checkpoint to the provided path or the path provided during initialization. + + Args: + path (str, optional): The path to save the checkpoint. Default is path provided during initialization. + optimizer (torch.optim.Optimizer, optional): Optimizer to save alongside the checkpoint. Default is None. + create_path (bool, optional): Whether to create the path if it does not exist. Default is True. + """ + path = path or self.path + if path: + if optimizer: # Save optimizer first since checkpoint number is inferred from number of checkpoint files + save_checkpoint(model=optimizer, path=path, key="optimizer", create_path=create_path) + save_checkpoint(model=self.net, path=path, key="checkpoint", create_path=create_path) + self.save_hyperparameters(path) # If already present in path, this does nothing + else: + raise ValueError("No path provided to save the model. Please provide a valid path or initialize the model with a path.") + + def save_hyperparameters(self, path: Optional[str] = None): + """ + Save the hyperparameters of the model to a json file in the checkpoint directory. + """ + path = path or self.path + if path: + save_hyperparameters(self.hyperparameters, path) + + def load( + self, + checkpoint: Optional[int] = None, + raise_error: bool = True + ): + """ + Load a specific checkpoint from the model. + + Args: + checkpoint (int): The checkpoint number to load. If not provided, load the lastest checkpoint found. + optimizer (torch.optim.Optimizer, optional): The optimizer to load. Default is None. + raise_error (bool, optional): Whether to raise an error if checkpoint is not found. Default is True. + """ + if self.path is None: + raise ValueError("A checkpoint can only be loaded if the model is instantiated with a path, e.g. model = ScoreModel(path='path/to/checkpoint').") + self.loaded_checkpoint = load_checkpoint(model=self, checkpoint=checkpoint, path=self.path, key="checkpoint", raise_error=raise_error) + + def fit( + self, + dataset: torch.utils.data.Dataset, + preprocessing: Optional[Callable] = None, + batch_size: int = 1, + shuffle: bool = False, + epochs: int = 100, + iterations_per_epoch: Optional[int] = None, + max_time: float = float('inf'), + optimizer: Optional[torch.optim.Optimizer] = None, + learning_rate: float = 1e-3, + ema_decay: float = 0.999, + clip: float = 0., + warmup: int = 0, + checkpoint_every: int = 10, + models_to_keep: int = 3, + path: Optional[str] = None, + name_prefix: Optional[str] = None, + seed: Optional[int] = None, + **kwargs + ) -> list: + # Backward compatibility + if "checkpoints_directory" in kwargs and path is None: + path = kwargs["checkpoints_directory"] + if "preprocessing_fn" in kwargs and preprocessing is None: + preprocessing = kwargs["preprocessing_fn"] + if "checkpoints" in kwargs and checkpoint_every is None: + checkpoint_every = kwargs["checkpoints"] + trainer = Trainer( + model=self, + dataset=dataset, + preprocessing=preprocessing, + batch_size=batch_size, + shuffle=shuffle, + epochs=epochs, + iterations_per_epoch=iterations_per_epoch, + max_time=max_time, + optimizer=optimizer, + learning_rate=learning_rate, + ema_decay=ema_decay, + clip=clip, + warmup=warmup, + checkpoint_every=checkpoint_every, + models_to_keep=models_to_keep, + path=path, + name_prefix=name_prefix, + seed=seed + ) + losses = trainer.train() + return losses diff --git a/score_models/sbm/energy_model.py b/score_models/sbm/energy_model.py new file mode 100644 index 0000000..13c8f45 --- /dev/null +++ b/score_models/sbm/energy_model.py @@ -0,0 +1,64 @@ +from typing import Union, Optional + +from torch.func import grad +from torch import vmap +from torch.nn import Module +import torch + +from .score_model import ScoreModel +from ..utils import DEVICE +from ..sde import SDE + +__all__ = ["EnergyModel"] + +class EnergyModel(ScoreModel): + def __init__( + self, + net: Optional[Union[str, Module]] = None, + sde: Optional[Union[str, SDE]] = None, + path: Optional[str] = None, + checkpoint: Optional[int] = None, + device=DEVICE, + **hyperparameters + ): + super().__init__(net, sde, path, checkpoint=checkpoint, device=device, **hyperparameters) + nn_is_energy = self.net.hyperparameters.get("nn_is_energy", False) + self.nn_is_energy = nn_is_energy + if nn_is_energy: + self._energy = self._nn_energy + else: + self._energy = self._unet_energy + + def forward(self, t, x, *args): + """ + Overwrite the forward method to return the energy function instead of the model output. + """ + return self.energy(t, x, *args) + + def reparametrized_score(self, t, x, *args): + """ + Numerically stable reparametrization of the score function for the DSM loss. + Score function uses this method so self.score(t, x, *args) will also work as expected. + """ + def energy(t, x, *args): + # wrapper to feed energy im vmap + t = t.unsqueeze(0) + x = x.unsqueeze(0) + args = [a.unsqueeze(0) for a in args] + return self.unnormalized_energy(t, x, *args).squeeze(0) + return - vmap(grad(energy, argnums=1))(t, x, *args) # Don't forget the minus sign! + + def unnormalized_energy(self, t, x, *args): + return self._energy(t, x, *args) + + def energy(self, t, x, *args): + sigma_t = self.sde.sigma(t) + energy = self.unnormalized_energy(t, x, *args) + return energy / sigma_t + + def _unet_energy(self, t, x, *args): + _, *D = x.shape + return 0.5 * torch.sum((x - self.net(t, x, *args)).flatten(1)**2, dim=1) + + def _nn_energy(self, t, x, *args): + return self.net(t, x, *args).squeeze(1) diff --git a/score_models/sbm/hessian_model.py b/score_models/sbm/hessian_model.py new file mode 100644 index 0000000..b4cf954 --- /dev/null +++ b/score_models/sbm/hessian_model.py @@ -0,0 +1,93 @@ +from typing import Optional, Literal + +from torch import Tensor +import torch +import os + +from .base import Base +from .score_model import ScoreModel +from ..sde import SDE +from ..utils import DEVICE +from ..losses import second_order_dsm, second_order_dsm_meng_variation + +__all__ = ["HessianDiagonal"] + +class HessianDiagonal(Base): + def __init__( + self, + score_model: Optional[ScoreModel] = None, + net: Optional[torch.nn.Module] = None, + sde: Optional[torch.Tensor] = None, + path: Optional[str] = None, + checkpoint: Optional[int] = None, + device: torch.device = DEVICE, + loss: Literal["canonical", "meng"] = "canonical", + **hyperparameters + ): + if isinstance(score_model, ScoreModel): + sde = score_model.sde + super().__init__(net, sde, path, checkpoint=checkpoint, device=device, **hyperparameters) + # Check if SBM has been loaded, otherwise use the user provided SBM + if not hasattr(self, "score_model"): + if not isinstance(score_model, ScoreModel): + raise ValueError("Must provide a ScoreModel instance to instantiate the HessianDiagonal model.") + self.score_model = score_model + if loss == "canonical": + self._loss = second_order_dsm + elif loss == "meng": + self._loss = second_order_dsm_meng_variation + else: + raise ValueError(f"Loss function {loss} is not recognized. Choose 'canonical' or 'meng'.") + + # Make sure ScoreModel weights are frozen (this class does not allow joint optimization for now) + for p in self.score_model.net.parameters(): + p.requires_grad = False + print("Score model weights are now frozen. This class does not currently support joint optimization.") + + def forward(self, t: Tensor, x: Tensor, *args): + return self.diagonal(t, x, *args) + + def loss(self, x: Tensor, *args): + return self._loss(self, x, *args) + + def reparametrized_diagonal(self, t: Tensor, x: Tensor, *args): + return self.net(t, x, *args) + + def diagonal(self, t: Tensor, x: Tensor, *args): + B, *D = x.shape + sigma_t = self.sde.sigma(t).view(B, *[1]*len(D)) + return (self.net(t, x, *args) - 1) / sigma_t**2 + + def trace(self, t: Tensor, x: Tensor, *args): + return self.diagonal(t, x, *args).flatten(1).sum(1) + + def save( + self, + path: Optional[str] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + create_path: bool = True, + ): + """ + We use the super method to save checkpoints of the hessian diagonal network. + We need to save a copy of the score model net and hyperparameters + in order to reload it correctly. This method add special routines for that. + """ + super().save(path, optimizer, create_path) # Save Hessian net + # Create a sub directory for the SBM + path = path or self.path + sbm_path = os.path.join(path, "score_model") + if not os.path.exists(sbm_path): + self.score_model.save(sbm_path, create_path=True) + + def load( + self, + checkpoint: Optional[int] = None, + raise_error: bool = True + ): + """ + Super method reloads the HessianDiagonal net. + Then we load the base score model from the score_model sub-directory. + """ + super().load(checkpoint, raise_error) + sbm_path = os.path.join(self.path, "score_model") + self.score_model = ScoreModel(path=sbm_path) diff --git a/score_models/kernel_slic.py b/score_models/sbm/kernel_slic.py similarity index 92% rename from score_models/kernel_slic.py rename to score_models/sbm/kernel_slic.py index 4a40525..e78e170 100644 --- a/score_models/kernel_slic.py +++ b/score_models/sbm/kernel_slic.py @@ -3,10 +3,12 @@ from torch import Tensor from torch.nn import Module from torch.func import vjp, jacrev -from .sde import SDE -from .score_model import SLIC -from .utils import DEVICE +from .slic import SLIC +from ..sde import SDE +from ..utils import DEVICE + +# TODO finish this class with Echoes in the Noise work class KernelSLIC(SLIC): def __init__( self, diff --git a/score_models/sbm/lora.py b/score_models/sbm/lora.py new file mode 100644 index 0000000..fcb4383 --- /dev/null +++ b/score_models/sbm/lora.py @@ -0,0 +1,123 @@ +from typing import Union, Optional + +import torch +import copy +import glob +import os +from torch import Tensor +from peft import LoraConfig, get_peft_model + +from .score_model import ScoreModel +from ..architectures import NCSNpp, MLP +from ..sde import SDE +from ..utils import DEVICE +from ..save_load_utils import save_checkpoint, load_checkpoint + +__all__ = ["LoRAScoreModel"] + + +def get_specific_layer_names(model): + layer_names = [] + for name, module in model.named_modules(): + if isinstance(module, (torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv1d, torch.nn.Conv3d)): + layer_names.append(name) + return layer_names + + +class LoRAScoreModel(ScoreModel): + """ + Class designed to fine-tune an existing SBM with LoRA weights. + """ + def __init__( + self, + base_sbm: Optional[ScoreModel] = None, + path: Optional[str] = None, + checkpoint: Optional[int] = None, + lora_rank: Optional[int] = None, + target_modules: Optional[str] = None, + device: torch.device = DEVICE, + **hyperparameters + ): + if base_sbm: + # Initialize from scratch + net = base_sbm.net + base_hyperparameters = base_sbm.hyperparameters + super().__init__(net, device=device, **base_hyperparameters) + + # Freeze the base net + for param in self.net.parameters(): + param.requires_grad = False + + # Construct the LoRA model around the base net + if target_modules is None: + if isinstance(self.net, NCSNpp): + target_modules = ["Dense_0", "conv"] + else: + target_modules = list(set(get_specific_layer_names(self.net))) + print(f"Automatically detecting target modules {' '.join(target_modules)}") + if lora_rank is None: + raise ValueError("LoRA rank must be provided when initializing from a base SBM.") + lora_config = LoraConfig( + r=lora_rank, + lora_alpha=lora_rank, + init_lora_weights="gaussian", + target_modules=target_modules + ) + self.lora_net = get_peft_model(copy.deepcopy(self.net), lora_config) + self.lora_net.print_trainable_parameters() + self.hyperparameters["lora_rank"] = lora_rank + self.hyperparameters["target_modules"] = target_modules + print(f"Initialized LoRA weights with rank {lora_rank}") + else: + # Base model and LoRA initialized with the self.load method + super().__init__(path=path, checkpoint=checkpoint, device=device, **hyperparameters) + + def reparametrized_score(self, t, x, *args) -> Tensor: + """ + Modify forward method to return the LoRA score function instead of the base SBM score function. + This method is also used in the DSM loss function, such that the LoRA weights are used in the loss computation + for backpropagation. + """ + return self.lora_net(t, x, *args) + + def save( + self, + path: Optional[str] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + create_path: bool = True + ): + """ + Update the save method to save only one copy of the base SBM alongside the LoRA checkpoints. + """ + path = path or self.path + if path: + # Save the base ScoreModel only once + base_sbm_path = os.path.join(path, "base_sbm") + if not os.path.exists(base_sbm_path): + super().save(base_sbm_path, create_path=True) + + # Save the LoRA weights and the optimizer associated with them + if optimizer: # Save optimizer first since checkpoint number is inferred from number of checkpoint files + save_checkpoint(model=optimizer, path=path, key="optimizer", create_path=create_path) + save_checkpoint(model=self.lora_net, path=path, key="lora_checkpoint", create_path=create_path) + self.save_hyperparameters(path) + else: + raise ValueError("No path provided to save the model. Please provide a valid path or initialize the model with a path.") + + def load( + self, + checkpoint: Optional[int] = None, + raise_error: bool = True + ): + if self.path is None: + raise ValueError("A checkpoint can only be loaded if the model is instantiated with a path, e.g. model = ScoreModel(path='path/to/checkpoint').") + # Load base SBM (and freeze it) + base_path = os.path.join(self.path, "base_sbm") + self.net = ScoreModel(path=base_path).net + for param in self.net.parameters(): + param.requires_grad = False + + # Load LoRA weights + self.loaded_checkpoint = load_checkpoint(model=self, checkpoint=checkpoint, path=self.path, key="lora_checkpoint", raise_error=raise_error) + print(f"Loaded LoRA weights with rank {self.hyperparameters['lora_rank']}") + self.lora_net.print_trainable_parameters() diff --git a/score_models/sbm/score_model.py b/score_models/sbm/score_model.py new file mode 100644 index 0000000..52b3cbc --- /dev/null +++ b/score_models/sbm/score_model.py @@ -0,0 +1,141 @@ +from typing import Union, Optional, Callable +from abc import abstractmethod + +from torch.func import grad +from torch import vmap, Tensor +from torch.nn import Module +import numpy as np +import torch + +from .base import Base +from ..sde import SDE +from ..losses import dsm +from ..ode import probability_flow_ode, divergence_with_hutchinson_trick +from ..sde import euler_maruyama_method +from ..utils import DEVICE + + +__all__ = ["ScoreModel"] + + +class ScoreModel(Base): + def __init__( + self, + net: Optional[Union[str, Module]] = None, + sde: Optional[Union[str, SDE]] = None, + path: Optional[str] = None, + checkpoint: Optional[int] = None, + hessian_trace_model: Optional[Union[str, Module]] = None, + device=DEVICE, + **hyperparameters + ): + super().__init__(net, sde, path, checkpoint=checkpoint, device=device, **hyperparameters) + if hessian_trace_model is not None: + self.hessian_trace_model = hessian_trace_model + else: + self.hessian_trace_model = self.divergence + + def loss(self, x, *args) -> Tensor: + return dsm(self, x, *args) + + def reparametrized_score(self, t, x, *args) -> Tensor: + """ + Numerically stable reparametrization of the score function for the DSM loss. + """ + return self.net(t, x, *args) + + def forward(self, t, x, *args): + """ + Overwrite the forward method to return the score function instead of the model output. + This also affects the __call__ method of the class, meaning that + ScoreModel(t, x, *args) is equivalent to ScoreModel.forward(t, x, *args). + """ + return self.score(t, x, *args) + + def score(self, t, x, *args) -> Tensor: + _, *D = x.shape + sigma_t = self.sde.sigma(t).view(-1, *[1]*len(D)) + epsilon_theta = self.reparametrized_score(t, x, *args) + return epsilon_theta / sigma_t + + def ode_drift(self, t, x, *args) -> Tensor: + """ + Compute the drift of the ODE defined by the score function. + """ + f = self.sde.drift(t, x) + g = self.sde.diffusion(t, x) + f_tilde = f - 0.5 * g**2 * self.score(t, x, *args) + return f_tilde + + def divergence(self, t, x, *args, **kwargs) -> Tensor: + """ + Compute the divergence of the drift of the ODE defined by the score function. + """ + return divergence_with_hutchinson_trick(self.ode_drift, t, x, *args, **kwargs) + + def hessian_trace(self, t, x, *args, **kwargs) -> Tensor: + """ + Compute the trace of the Hessian of the score function. + """ + return self.hessian_trace_model(t, x, *args, **kwargs) + + def log_likelihood(self, x, *args, steps, t=0., method="euler", **kwargs) -> Tensor: + """ + Compute the log likelihood of point x using the probability flow ODE, + which makes use of the instantaneous change of variable formula + developed by Chen et al. 2018 (arxiv.org/abs/1806.07366). + See Song et al. 2020 (arxiv.org/abs/2011.13456) for usage with SDE formalism of SBM. + """ + drift = self.ode_drift + hessian_trace = lambda t, x, *args: self.hessian_trace(t, x, *args, **kwargs) + # Solve the probability flow ODE up in temperature to time t=1. + xT, delta_log_p = probability_flow_ode( + x, + *args, + steps=steps, + drift=drift, + hessian_trace=hessian_trace, + t0=t, + t1=1., + method=method) + # Add the log likelihood of the prior at time t=1. + log_p = self.sde.prior(x.shape).log_prob(xT) + delta_log_p + return log_p + + def tweedie(self, t: Tensor, x: Tensor, *args) -> Tensor: + """ + Compute the Tweedie formula for the expectation E[x0 | xt] + """ + B, *D = x.shape + mu = self.sde.mu(t).view(-1, *[1]*len(D)) + sigma = self.sde.sigma(t).view(-1, *[1]*len(D)) + return (x + sigma**2 * self.score(t, x, *args)) / mu + + @torch.no_grad() + def sample( + self, + shape: tuple, # TODO grab dimensions from model hyperparams if available + steps: int, + *args, + likelihood_score: Optional[Callable] = None, + guidance_factor: float = 1., + stopping_factor: float = np.inf, + denoise_last_step: bool = True + ) -> Tensor: + """ + Sample from the score model by solving the reverse-time SDE using the Euler-Maruyama method. + """ + batch_size, *D = shape + likelihood_score = likelihood_score or (lambda t, x: torch.zeros_like(x)) + score = lambda t, x: self.score(t, x, *args) + guidance_factor * likelihood_score(t, x) + t, x = euler_maruyama_method( + batch_size=batch_size, + dimensions=D, + steps=steps, + sde=self.sde, + score=score, + stopping_factor=stopping_factor + ) + if denoise_last_step: + x = self.tweedie(t, x, *args) + return x diff --git a/score_models/sbm/slic.py b/score_models/sbm/slic.py new file mode 100644 index 0000000..f71480b --- /dev/null +++ b/score_models/sbm/slic.py @@ -0,0 +1,128 @@ +from typing import Callable, Union, Optional + +import torch +from torch.nn import Module +from torch.func import vjp +from inspect import signature, Parameter + +from ..sde import SDE +from .score_model import ScoreModel +from ..utils import DEVICE + +__all__ = ["SLIC"] + +class SLIC(ScoreModel): + def __init__( + self, + forward_model: Callable, + net: Optional[Union[str, Module]] = None, + sde: Optional[SDE]=None, + path: Optional[str] = None, + checkpoint: Optional[int] = None, + anneal_residuals: bool = False, # Add noise to residuals + device=DEVICE, + **hyperparameters + ): + """ + Args: + forward_model: Callable + Function that takes the inputs of the forward model and returns the model output. + net: Optional[Union[str, Module]] + Neural network architecture or path to the model + sde: Optional[SDE] + Stochastic differential equation + anneal_residuals: bool + Add noise to residuals according to the SLIC model SDE following Yang Song's hijacking trick + (arxiv.org/pdf/2111.08005) + """ + super().__init__(net, sde, path, checkpoint=checkpoint, device=device, **hyperparameters) + self.forward_model = forward_model + self.anneal_residuals = anneal_residuals + if not self._valid_forward_model_signature(forward_model): + raise ValueError("The forward model must have the signature: forward(t, x), with extra argument being optional.") + + def reparametrized_score(self, t, eta, *args): + """ + Note: SLIC models should be trained as SBM models. SLIC is a wrapper class used only + for inference when a forward model is provided. + + Though with this reparametrization, the network can technically be trained with this class. + + Args: + t: torch.Tensor + Time index of the SDE + eta: torch.Tensor + Residuals of the model in the observation space + args: list + Additional arguments to the score model + """ + return self.net(t, eta, *args) + + def residual_score(self, t, eta, *args): + """ + Args: + t: torch.Tensor + Time index of the SDE + eta: torch.Tensor + Residuals of the model in the observation space + args: list + Additional arguments to the score model + """ + B, *D = eta.shape + sigma = self.sde.sigma(t).view(-1, *[1]*len(D)) + return self.net(t, eta, *args) / sigma + + def forward(self, t, y, x, *args): + return self.score(t, y, x, *args) + + def score(self, t, y, x, *args): + """ + See Legin et al. (2023), https://iopscience.iop.org/article/10.3847/2041-8213/acd645 + + Args: + t: torch.Tensor + Time index of the SDE + x: torch.Tensor + Input tensor of the forward model + y: torch.Tensor + Observed output tensor + """ + B, *D = y.shape + y_hat, vjp_func = vjp(lambda x: self.forward_model(t, x), x) + if self.anneal_residuals: + mu = self.sde.mu(t).view(-1, *[1]*len(D)) + sigma = self.sde.sigma(t).view(-1, *[1]*len(D)) + z = torch.randn_like(y) + eta = mu * (y - y_hat) + sigma * z + else: + eta = y - y_hat + score = self.residual_score(t, eta, *args) + return - vjp_func(score)[0] + + @staticmethod + def _valid_forward_model_signature(f: Callable): + sig = signature(f) + args = list(sig.parameters.values()) + arg_names = list(sig.parameters.keys()) + if len(args) < 2: + return False + else: + # Check if the first two arguments are positional + check = all(_is_positional(a) for a in args[:2]) + check = check and arg_names[0] == "t" and arg_names[1] == "x" + if len(args) > 2: + # Check if the rest are optional + check = check and all(_is_optional(a) for a in args[2:]) + return check + +def _is_positional(param: Parameter) -> bool: + return param.kind in [ + Parameter.POSITIONAL_OR_KEYWORD, + Parameter.POSITIONAL_ONLY, + Parameter.VAR_POSITIONAL] + +def _is_optional(param: Parameter) -> bool: + return (param.kind in [ + Parameter.VAR_POSITIONAL, + Parameter.VAR_KEYWORD] or + param.default is not Parameter.empty) diff --git a/score_models/score_model.py b/score_models/score_model.py deleted file mode 100644 index c17ead5..0000000 --- a/score_models/score_model.py +++ /dev/null @@ -1,47 +0,0 @@ -from .base import ScoreModelBase, Union, Module -from .dsm import denoising_score_matching -from .sde import SDE -from torch.func import grad -from torch import vmap -import torch - -class ScoreModel(ScoreModelBase): - def __init__(self, model: Union[str, Module] = None, sde: SDE=None, checkpoints_directory=None, **hyperparameters): - super().__init__(model, sde=sde, checkpoints_directory=checkpoints_directory, **hyperparameters) - - def loss_fn(self, x, *args): - return denoising_score_matching(self, x, *args) - - def score(self, t, x, *args): - _, *D = x.shape - return self.model(t, x, *args) / self.sde.sigma(t).view(-1, *[1]*len(D)) - - -class EnergyModel(ScoreModelBase): - def __init__(self, model: Union[str, Module] = None, sde: SDE=None, checkpoints_directory=None, **hyperparameters): - super().__init__(model, sde=sde, checkpoints_directory=checkpoints_directory, **hyperparameters) - nn_is_energy = self.model.hyperparameters.get("nn_is_energy", False) - self.nn_is_energy = nn_is_energy - - def loss_fn(self, x, *args): - return denoising_score_matching(self, x, *args) - - def energy(self, t, x, *args): - if self.nn_is_energy: - return self._nn_energy(t, x, *args) - else: - return self._unet_energy(t, x, *args) - - def _unet_energy(self, t, x, *args): - _, *D = x.shape - return 0.5 / self.sde.sigma(t) * torch.sum((x - self.model(t, x, *args))**2, dim=list(range(1, 1+len(D)))) - - def _nn_energy(self, t, x, *args): - return self.model(t, x, *args).squeeze(1) / self.sde.sigma(t) - - def score(self, t, x, *args): - _, *D = x.shape - # small wrapper to account for input without batch dim from vmap - energy = lambda t, x: self.energy(t.unsqueeze(0), x.unsqueeze(0), *args).squeeze(0) - return -vmap(grad(energy, argnums=1))(t, x, *args) # Don't forget the minus sign! - diff --git a/score_models/sde/__init__.py b/score_models/sde/__init__.py index d912508..a8a8a3e 100644 --- a/score_models/sde/__init__.py +++ b/score_models/sde/__init__.py @@ -2,3 +2,4 @@ from .sde import SDE from .vpsde import VPSDE from .tsvesde import TSVESDE +from .euler_maruyama import euler_maruyama_method diff --git a/score_models/sde/euler_maruyama.py b/score_models/sde/euler_maruyama.py new file mode 100644 index 0000000..f183139 --- /dev/null +++ b/score_models/sde/euler_maruyama.py @@ -0,0 +1,67 @@ +from typing import Union, Optional, Callable + +import torch +import numpy as np +from torch import Tensor +from tqdm import tqdm + +from .sde import SDE +from ..utils import DEVICE + +__all__ = ["euler_maruyama_method"] + +def euler_maruyama_method( + batch_size: int, + dimensions: tuple[int], + steps: int, + sde: SDE, + score: Optional[Callable[Tensor, Tensor]] = None, + T: Optional[Union[Tensor, float]] = None, + epsilon: Optional[float] = None , + guidance_factor: float = 1., + stopping_factor: float = 1e2, + denoise_last_step: bool = True, + device = DEVICE + ) -> Tensor: + """ + An Euler-Maruyama integration of an SDE specified by the score function. + + Args: + batch_size: Number of samples to draw + dimensions: Shape of the tensor to sample + steps: Number of Euler-Maruyam steps to perform + score: Score function of the reverse-time SDE + likelihood_score_fn: Add an additional drift to the sampling for posterior sampling. Must have the signature f(t, x) + stopping_factor: When magnitude of the score is larger than stopping_factor * sqrt(D), stop the sampling + """ + B = batch_size + D = dimensions + T = T or sde.T + epsilon = epsilon or sde.epsilon + score = score or (lambda t, x: torch.zeros_like(x)) + + x = sde.prior(D).sample([B]).to(device) + dt = -(T - epsilon) / steps + t = torch.ones(B).to(device) * T + for _ in (pbar := tqdm(range(steps))): + pbar.set_description(f"Euler-Maruyama | t = {t[0].item():.1f} | sigma(t) = {sde.sigma(t)[0].item():.1e}" + f"| x.std() ~ {x.std().item():.1e}") + g = sde.diffusion(t, x) + f = sde.drift(t, x) + s = score(t, x) + dw = torch.randn_like(x) * abs(dt)**(1/2) + x = x + (f - g**2 * s) * dt + g * dw + t += dt + # Check for NaNs + if torch.any(torch.isnan(x)): + print("Diffusion is not stable: NaN were produced. Stopped sampling.") + break + # Check magnitude of the score + m = torch.sum(s.flatten(1)**2, dim=1).sqrt() + if torch.any(m > stopping_factor * np.prod(D)**(1/2)): + print(f"Diffusion is not stable: magnitude of the score is larger than {stopping_factor} x sqrt(D). Stopped sampling.") + break + # Check if t is too small + if t[0] < epsilon: + break + return t, x diff --git a/score_models/sde/predictor_corrector.py b/score_models/sde/predictor_corrector.py new file mode 100644 index 0000000..e69de29 diff --git a/score_models/sde/sde.py b/score_models/sde/sde.py index b221493..570bffb 100644 --- a/score_models/sde/sde.py +++ b/score_models/sde/sde.py @@ -17,47 +17,57 @@ def __init__(self, T=1.0, epsilon=0., **kwargs): super().__init__() self.T = T self.epsilon = epsilon + self.hyperparameters = { + "T": T, + "epsilon": epsilon + } + + @abstractmethod + def mu(self, t) -> Tensor: + ... @abstractmethod def sigma(self, t) -> Tensor: ... - + @abstractmethod def prior(self, shape) -> Distribution: """ - High temperature distribution + High temperature prior distribution. Typically a Gaussian distribution. """ ... @abstractmethod def diffusion(self, t:Tensor, x: Tensor) -> Tensor: + """ + Diffusion coefficient of the SDE. + """ ... @abstractmethod def drift(self, t, x) -> Tensor: - ... - - @abstractmethod - def marginal_prob_scalars(self, t) -> Tuple[Tensor, Tensor]: """ - Returns scaling functions for the mean and the standard deviation of the marginals + Drift coefficient of the SDE. """ ... - def sample_marginal(self, t: Tensor, x0: Tensor) -> Tensor: + def perturbation_scalars(self, t) -> Tuple[Tensor, Tensor]: + return self.mu(t), self.sigma(t) + + def perturbation_kernel(self, t: Tensor, x0: Tensor) -> Tensor: """ - Sample from the marginal at time t given some initial condition x0 + Sample from the marginal at time t using the Gaussian perturbation kernel + and the reparametrization trick. """ _, *D = x0.shape + mu_t = self.mu(t).view(-1, *[1]*len(D)) + sigma_t = self.sigma(t).view(-1, *[1]*len(D)) z = torch.randn_like(x0) - mu_t, sigma_t = self.marginal_prob_scalars(t) - return mu_t.view(-1, *[1]*len(D)) * x0 + sigma_t.view(-1, *[1]*len(D)) * z - - def marginal_prob(self, t, x): - _, *D = x.shape - m_t, sigma_t = self.marginal_prob_scalars(t) - mean = m_t.view(-1, *[1]*len(D)) * x - std = sigma_t.view(-1, *[1]*len(D)) - return mean, std - + return mu_t * x0 + sigma_t * z + + # Backward compatibility + def sample_time_marginal(self, t: Tensor, x0: Tensor) -> Tensor: + return self.perturbation_kernel(t, x0) + def marginal_prob_scalars(self, t) -> Tuple[Tensor, Tensor]: + return self.perturbation_scalars(t) diff --git a/score_models/sde/tsvesde.py b/score_models/sde/tsvesde.py index f495c1a..95b6f9f 100644 --- a/score_models/sde/tsvesde.py +++ b/score_models/sde/tsvesde.py @@ -38,6 +38,12 @@ def __init__( self.sigma_max = sigma_max self.beta = beta self.t_star = t_star + self.hyperparameters.update({ + "sigma_min": sigma_min, + "sigma_max": sigma_max, + "t_star": t_star, + "beta": beta + }) if beta_fn == "relu": self.beta_fn = lambda t: - self.beta * F.relu(t/self.T - self.t_star) @@ -47,14 +53,6 @@ def __init__( self.beta_fn = lambda t: - self.beta * F.hardswish(alpha*(t/self.T - self.t_star))/alpha self.beta_fn_dot = vmap(grad(self.beta_fn)) - def scale(self, t): - """ - Piecewise continuous scale function that takes a VE at t < t_star and - attach it to a VP-like diffusion at t>t_star. Note that the variance isnan - still exploding but with a logarihmic slope reduced by the beta hyperparameter. - """ - return torch.exp(self.beta_fn(t)) - def sigma(self, t: Tensor) -> Tensor: """ Numerically stable formula for sigma @@ -64,17 +62,24 @@ def sigma(self, t: Tensor) -> Tensor: log_coeff = self.beta_fn(t) + (smax - smin) * t/self.T + smin return torch.exp(log_coeff) + def mu(self, t: Tensor) -> Tensor: + """ + Piecewise continuous scale function that takes a VE at t < t_star and + attach it to a VP-like diffusion at t>t_star. Note that the variance isnan + still exploding but with a logarihmic slope reduced by the beta hyperparameter. + """ + return torch.exp(self.beta_fn(t)) + def prior(self, shape, device=DEVICE): mu = torch.zeros(shape).to(device) sigma_max = np.exp(-self.beta * (1. - self.t_star) + np.log(self.sigma_max)) return Independent(Normal(loc=mu, scale=sigma_max, validate_args=False), len(shape)) - def marginal_prob_scalars(self, t) -> tuple[Tensor, Tensor]: - return self.scale(t), self.sigma(t) - def diffusion(self, t: Tensor, x: Tensor) -> Tensor: - _, *D = x.shape - return self.sigma(t).view(-1, *[1]*len(D)) * np.sqrt(2*(np.log(self.sigma_max) - np.log(self.sigma_min))) + _, *D = x.shape # broadcast diffusion coefficient to x shape + # Analytical derivative of the sigma**2 function, square rooted at the end + prefactor = np.sqrt(2 * (np.log(self.sigma_max) - np.log(self.sigma_min))) + return prefactor * self.sigma(t).view(-1, *[1]*len(D)) def drift(self, t: Tensor, x: Tensor) -> Tensor: _, *D = x.shape diff --git a/score_models/sde/vesde.py b/score_models/sde/vesde.py index d6843f2..b888631 100644 --- a/score_models/sde/vesde.py +++ b/score_models/sde/vesde.py @@ -1,9 +1,9 @@ import torch from .sde import SDE from torch import Tensor -import numpy as np from torch.distributions import Normal, Independent from score_models.utils import DEVICE +import numpy as np class VESDE(SDE): @@ -27,30 +27,27 @@ def __init__( super().__init__(T, epsilon) self.sigma_min = sigma_min self.sigma_max = sigma_max + self.hyperparameters.update({ + "sigma_min": sigma_min, + "sigma_max": sigma_max + }) + + def mu(self, t: Tensor) -> Tensor: + return torch.ones_like(t) def sigma(self, t: Tensor) -> Tensor: return self.sigma_min * (self.sigma_max / self.sigma_min) ** (t/self.T) - def prior(self, shape, mu=None, device=DEVICE): - """ - Technically, VESDE does not change the mean of the 0 temperature distribution, - so I give the option to provide for more accuracy. In practice, - sigma_max is chosen large enough to make this choice irrelevant - """ - if mu is None: - mu = torch.zeros(shape).to(device) - else: - assert mu.shape == shape - return Independent(Normal(loc=mu, scale=self.sigma_max, validate_args=False), len(shape)) - - def marginal_prob_scalars(self, t) -> tuple[Tensor, Tensor]: - return torch.ones_like(t), self.sigma(t) - def diffusion(self, t: Tensor, x: Tensor) -> Tensor: _, *D = x.shape # broadcast diffusion coefficient to x shape - return self.sigma(t).view(-1, *[1]*len(D)) * np.sqrt(2 * (np.log(self.sigma_max) - np.log(self.sigma_min))) + # Analytical derivative of the sigma**2 function, square rooted at the end + prefactor = np.sqrt(2 * (np.log(self.sigma_max) - np.log(self.sigma_min))) + return prefactor * self.sigma(t).view(-1, *[1]*len(D)) def drift(self, t: Tensor, x: Tensor) -> Tensor: return torch.zeros_like(x) - + def prior(self, shape, mean=None, device=DEVICE): + if mean is None: + mean = torch.zeros(shape).to(device) + return Independent(Normal(loc=mean, scale=self.sigma_max, validate_args=False), len(shape)) diff --git a/score_models/sde/vpsde.py b/score_models/sde/vpsde.py index e993c93..9261c88 100644 --- a/score_models/sde/vpsde.py +++ b/score_models/sde/vpsde.py @@ -1,8 +1,15 @@ +from typing import Literal + import torch from torch import Tensor -from .sde import SDE from torch.distributions import Independent, Normal -from score_models.utils import DEVICE +from torch.func import vmap, grad +import numpy as np + +from .sde import SDE +from ..utils import DEVICE + +PI_OVER_2 = np.pi / 2 class VPSDE(SDE): @@ -11,18 +18,76 @@ def __init__( beta_min: float = 0.1, beta_max: float = 20, T: float = 1.0, - epsilon: float = 1e-5, + epsilon: float = 1e-3, + schedule: Literal["cosine", "linear"] = "linear", **kwargs ): + """ + Args: + beta_min (float): Coefficient of the linear VP noise schedule, control minimum amount of noise. + beta_max (float): Coefficient of the linear VP noise schedule, control rescaling of the data space. + T (float, optional): The time horizon for the VPSDE. Defaults to 1.0. + epsilon (float, optional): The initial time for the VPSDE. Defaults to 1e-3. + schedule (str, optional): The VP noise schedule. Defaults to "cosine". + + Notes: + - The "cosine" schedule is the one defined in Nichol & Dhariwal 2021. (https://arxiv.org/abs/2102.09672) + but reformulated in continuous time. beta_max controls the clipping of the gradient to avoid + numerical instability as t -> T. + - The "linear" schedule is the original noise schedule from Ho et al. 2020 and Song et al. 2021. + See equation (33) in Song et al 2020. (https://arxiv.org/abs/2011.13456). + - Suggest making beta_max much larger for the cosine schedule to avoid sharp deviations in the mu function. + After all, I am not using a manual clipping of beta, rather I make a patchwork between cosine and a linear schedule. + """ super().__init__(T, epsilon) self.beta_min = beta_min self.beta_max = beta_max + self.hyperparameters.update({ + "beta_min": beta_min, + "beta_max": beta_max, + "schedule": schedule + }) + + if schedule == "cosine": + def beta_primitive(t: Tensor, beta_max, *args) -> Tensor: + """ + See equation (17) in Nichol & Dhariwal 2021. (https://arxiv.org/abs/2102.09672). + The primitive of the beta function is the log of \bar{alpha} in their notation. + + To implement the clipping discussed in their paper, + we instead use beta_max to control the maximum drift value in the diffusion. + The derivative of log(\bar{\alpha}}) is + beta(t) = 2/pi * arctan(pi*t/2), which we can invert to get the time index + at which the drift reaches beta_max. + """ + return torch.where( + t < 2/np.pi * np.arctan(beta_max / np.pi), # analytical inversion of the beta schedule + - 2 * torch.log(torch.cos(PI_OVER_2 * t)), # Cosine schedule for the primitive of beta + beta_max * t, # Linear schedule for regime where cosine is clipped + ) + + elif schedule == "linear": + def beta_primitive(t: Tensor, beta_max, beta_min) -> Tensor: + """ + See equation (33) in Song et al 2020. (https://arxiv.org/abs/2011.13456) + """ + return 0.5 * (beta_max - beta_min) * t**2 + beta_min * t + + else: + raise ValueError(f"Unknown noise schedule {schedule}") + self._beta_primitive = beta_primitive + + def beta_primitive(self, t: Tensor) -> Tensor: + return self._beta_primitive(t/self.T, self.beta_max, self.beta_min) def beta(self, t: Tensor): - return self.beta_min + (self.beta_max - self.beta_min) * t + return vmap(grad(self.beta_primitive))(t) + + def mu(self, t: Tensor) -> Tensor: + return torch.exp( - 0.5 * self.beta_primitive(t)) def sigma(self, t: Tensor) -> Tensor: - return self.marginal_prob_scalars(t)[1] + return (1 - self.mu(t)**2).sqrt() def prior(self, shape, device=DEVICE): mu = torch.zeros(shape).to(device) @@ -30,17 +95,10 @@ def prior(self, shape, device=DEVICE): def diffusion(self, t: Tensor, x: Tensor) -> Tensor: _, *D = x.shape - return torch.sqrt(self.beta(t)).view(-1, *[1]*len(D)) + beta = self.beta(t).view(-1, *[1]*len(D)) + return beta.sqrt() def drift(self, t: Tensor, x: Tensor) -> Tensor: _, *D = x.shape - return -0.5 * self.beta(t).view(-1, *[1]*len(D)) * x - - def marginal_prob_scalars(self, t: Tensor) -> tuple[Tensor, Tensor]: - """ - See equation (33) in Song et al 2020. (https://arxiv.org/abs/2011.13456) - """ - log_coeff = 0.5 * (self.beta_max - self.beta_min) * t**2 + self.beta_min * t # integral of b(t) - std = torch.sqrt(1. - torch.exp(- log_coeff)) - return torch.exp(-0.5*log_coeff), std - + beta = self.beta(t).view(-1, *[1]*len(D)) + return - 0.5 * beta * x diff --git a/score_models/slic.py b/score_models/slic.py deleted file mode 100644 index 581925a..0000000 --- a/score_models/slic.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing import Callable, Union - -from torch.nn import Module -from torch.func import vjp -from .sde import SDE -from .score_model import ScoreModel - -class SLIC(ScoreModel): - """ - Original implementation of SLIC - """ - def __init__( - self, - model: Union[str, Module] = None, - forward_model: Callable = None, # need to be differentiable - sde: SDE=None, - checkpoints_directory=None, - **hyperparameters - ): - super().__init__(model, sde=sde, checkpoints_directory=checkpoints_directory, **hyperparameters) - self.forward_model = forward_model - - def slic_score(self, t, x, y): - """ - See Legin et al. (2023), https://iopscience.iop.org/article/10.3847/2041-8213/acd645 - """ - y_hat, vjp_func = vjp(self.forward_model, x) - return - vjp_func(self.score(t, y - y_hat))[0] diff --git a/score_models/trainer.py b/score_models/trainer.py new file mode 100644 index 0000000..476e272 --- /dev/null +++ b/score_models/trainer.py @@ -0,0 +1,216 @@ +from typing import Optional, Callable, TYPE_CHECKING +if TYPE_CHECKING: + from score_models import ScoreModel + +import torch +import json, os +import time +import numpy as np +from torch.utils.data import DataLoader, Dataset +from torch_ema import ExponentialMovingAverage +from datetime import datetime +from tqdm import tqdm + +from .utils import DEVICE +from .save_load_utils import ( + remove_oldest_checkpoint, + last_checkpoint, + load_checkpoint + ) + + +class Trainer: + def __init__( + self, + model: "ScoreModel", + dataset: Dataset, + preprocessing: Optional[Callable] = None, + batch_size: int = 1, + shuffle: bool = False, + epochs: int = 100, + iterations_per_epoch: Optional[int] = None, + max_time: float = float('inf'), + optimizer: Optional[torch.optim.Optimizer] = None, + learning_rate: float = 1e-3, + ema_decay: float = 0.999, + clip: float = 0., + warmup: int = 0, + checkpoint_every: int = 10, + models_to_keep: int = 3, + path: Optional[str] = None, + name_prefix: Optional[str] = None, + seed: Optional[int] = None, + ): + self.model = model + self.net = model.net # Neural network to train + self.dataset = dataset + self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle) + self .data_iter = iter(self.dataloader) + self.preprocessing = preprocessing or (lambda x: x) + self.optimizer = optimizer or torch.optim.Adam(self.model.parameters(), lr=learning_rate) + self.ema = ExponentialMovingAverage(self.model.parameters(), decay=ema_decay) + self.lr = learning_rate + self.clip = clip + self.warmup = warmup + self.global_step = 0 + if seed: + torch.manual_seed(seed) + self.epochs = epochs + self.checkpoint_every = checkpoint_every + self.models_to_keep = models_to_keep + self.iterations_per_epoch = iterations_per_epoch or len(self.dataloader) + self.max_time = max_time + + # Provided model already has a path to load a checkpoint from + if path and self.model.path: + print(f"Loading a checkpoint from the model path {self.model.path} and saving in new path {path}...") + if self.model.path: + if not os.path.isdir(self.model.path): # Double check the path is valid + print(f"Provided path {self.model.path} is not a valid directory. Can't load checkpoint.") + else: + checkpoint = load_checkpoint( + model=self.optimizer, + checkpoint=self.model.loaded_checkpoint, + path=self.model.path, + key="optimizer", + device=self.model.device + ) + print(f"Resumed training from checkpoint {checkpoint}.") + + # Create a new checkpoint and save checkpoint there + if path: + self.path = path + if name_prefix: # Instantiate a new model, stamped with the current time + model_name = name_prefix + "_" + datetime.now().strftime("%y%m%d%H%M%S") + self.path = os.path.join(self.path, model_name) + else: + model_name = os.path.split(self.path)[-1] + if not os.path.isdir(self.path): + os.makedirs(self.path, exist_ok=True) + + # Save Training parameters + file = os.path.join(self.path, "script_params.json") + if not os.path.isfile(file): + with open(file, "w") as f: + json.dump( + { + "dataset": dataset.__class__.__name__, + "preprocessing": preprocessing.__name__ if preprocessing is not None else None, + "optimizer": self.optimizer.__class__.__name__, + "learning_rate": self.optimizer.param_groups[0]["lr"], + "ema_decay": ema_decay, + "batch_size": batch_size, + "shuffle": shuffle, + "epochs": epochs, + "max_time": max_time, + "warmup": warmup, + "clip": clip, + "checkpoint_every": checkpoint_every, + "models_to_keep": models_to_keep, + "seed": seed, + "path": str(path), + "model_name": model_name, + "name_prefix": name_prefix, + "iterations_per_epoch": iterations_per_epoch + }, + f, + indent=4 + ) + # Save model hyperparameters to reconstruct the model later + self.model.save_hyperparameters(path) + elif self.model.path: + # Continue saving checkpoints in the model path + self.path = self.model.path + else: + self.path = None + print("No path provided. Training checkpoints will not be saved.") + + def save_checkpoint(self, loss: float): + """ + Save model and optimizer if a path is provided. Then save loss and remove oldest checkpoints + when the number of checkpoints exceeds models_to_keep. + """ + if self.path: + with self.ema.average_parameters(): + self.model.save(self.path, optimizer=self.optimizer) + + checkpoint = last_checkpoint(self.path) + with open(os.path.join(self.path, "score_sheet.txt"), "a") as f: + f.write(f"{checkpoint} {loss}\n") + + if self.models_to_keep: + remove_oldest_checkpoint(self.path, self.models_to_keep) + + def train_epoch(self): + time_per_step_avg = 0 + cost = 0 + for _ in range(self.iterations_per_epoch): + start = time.time() + # Load data + try: + X = next(self.data_iter) + except StopIteration: + self.data_iter = iter(self.dataloader) # Reset the iterator + X = next(self.data_iter) + if isinstance(X, (list, tuple)): # Handle conditional arguments + x, *args = X + else: + x = X + args = [] + # Preprocessing + x = self.preprocessing(x) + # Training step + self.optimizer.zero_grad() + loss = self.model.loss(x, *args) + loss.backward() + if self.global_step < self.warmup: + for g in self.optimizer.param_groups: + g['lr'] = self.lr * np.minimum(self.global_step / self.warmup, 1.0) + if self.clip > 0: + torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.clip) + self.optimizer.step() + self.ema.update() + # Logging + time_per_step_avg += time.time() - start + cost += loss.item() + self.global_step += 1 + cost /= self.iterations_per_epoch + time_per_step_avg /= self.iterations_per_epoch + return cost, time_per_step_avg + + def train(self, verbose=0) -> list: + losses = [] + global_start = time.time() + estimated_time_for_epoch = 0 + out_of_time = False + for epoch in (pbar := tqdm(range(self.epochs))): + if (time.time() - global_start) > self.max_time * 3600 - estimated_time_for_epoch: + break + # Train + epoch_start = time.time() + cost, time_per_step_avg = self.train_epoch() + # Logging + losses.append(cost) + pbar.set_description(f"Epoch {epoch + 1:d} | Cost: {cost:.1e} |") + if verbose >= 2: + print(f"epoch {epoch} | cost {cost:.2e} | time per step {time_per_step_avg:.2e} s", flush=True) + elif verbose == 1: + if (epoch + 1) % self.checkpoints == 0: + print(f"epoch {epoch} | cost {cost:.1e}", flush=True) + if np.isnan(cost): + print("Model exploded and returns NaN") + break + if (time.time() - global_start) > self.max_time * 3600: + out_of_time = True + if (epoch + 1) % self.checkpoint_every == 0 or epoch == self.epochs - 1 or out_of_time: + self.save_checkpoint(cost) + if out_of_time: + print("Out of time") + break + if epoch > 0: + estimated_time_for_epoch = time.time() - epoch_start + + print(f"Finished training after {(time.time() - global_start) / 3600:.3f} hours.") + # Save EMA weights in the model for dynamic use (e.g. Jupyter notebooks) + self.ema.copy_to(self.model.parameters()) + return losses diff --git a/score_models/utils.py b/score_models/utils.py index 50eee6f..4b48e78 100644 --- a/score_models/utils.py +++ b/score_models/utils.py @@ -1,12 +1,6 @@ from functools import partial -import warnings import torch import torch.nn as nn -import os, json, re -from glob import glob -import numpy as np -from torch.nn import Module -from typing import Union DTYPE = torch.float32 DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else "cpu") @@ -46,6 +40,12 @@ def get_activation(activation_type="elu"): return nn.Identity() elif activation_type == "relu": activation = nn.ReLU() + elif activation_type == "leakyrelu": + activation = nn.LeakyReLU(0.2) + elif activation_type == "gelu": + activation = nn.GELU() + elif activation_type == "sigmoid": + activation = nn.Sigmoid() elif activation_type == "elu": activation = nn.ELU() elif activation_type == "tanh": @@ -55,71 +55,3 @@ def get_activation(activation_type="elu"): else: raise NotImplementedError('activation layer [%s] is not found' % activation_type) return activation - - -def load_architecture( - checkpoints_directory, - model: Union[str, Module] = None, - dimensions=2, - hyperparameters=None, - device=DEVICE, - model_checkpoint:int=None, - ) -> list[Module, dict]: - if hyperparameters is None: - hyperparameters = {} - if model is None: - with open(os.path.join(checkpoints_directory, "model_hparams.json"), "r") as f: - hparams = json.load(f) - hyperparameters.update(hparams) - model = hparams.get("model_architecture", "ncsnpp") - if "dimensions" not in hyperparameters.keys(): - hyperparameters["dimensions"] = dimensions - if isinstance(model, str): - if model.lower() == "ncsnpp": - from score_models.architectures import NCSNpp - model = NCSNpp(**hyperparameters).to(device) - elif model.lower() == "ddpm": - from score_models.architectures import DDPM - model = DDPM(**hyperparameters).to(device) - elif model.lower() == "mlp": - from score_models import MLP - model = MLP(**hyperparameters).to(device) - else: - raise ValueError(f"{model} not supported") - # Backward compatibility with old stuff - if "sde" in hyperparameters.keys(): - if hyperparameters["sde"] == "vpsde": - hyperparameters["sde"] = "vp" - elif hyperparameters["sde"] == "vesde": - hyperparameters["sde"] = "ve" - if checkpoints_directory is not None: - paths = glob(os.path.join(checkpoints_directory, "checkpoint*.pt")) - checkpoints = [int(re.findall('[0-9]+', os.path.split(path)[-1])[-1]) for path in paths] - if not paths: - warnings.warn(f"Directory {checkpoints_directory} might not have checkpoint files. Cannot load architecture.") - return model, hyperparameters, None - if model_checkpoint is None: - checkpoint = np.argmax(checkpoints) - path = paths[checkpoint] - elif model_checkpoint not in checkpoints: - warnings.warn(f"Directory {checkpoints_directory} does not have the checkpoint requested. Methods defaults to loading latest checkpoint.") - checkpoint = np.argmax(checkpoints) - path = paths[checkpoint] - else: - checkpoint = [i for i, c in enumerate(checkpoints) if c == model_checkpoint][0] - path = paths[checkpoint] - try: - model.load_state_dict(torch.load(path, map_location=device)) - model_dir = os.path.split(checkpoints_directory)[-1] - print(f"Loaded checkpoint {checkpoints[checkpoint]} of {model_dir}") - except (KeyError, RuntimeError): - # Maybe the ScoreModel instance was used when saving the weights, in which case we hack the loading process - from score_models import ScoreModel - model = ScoreModel(model, **hyperparameters) - model.load_state_dict(torch.load(path, map_location=device)) - model = model.model # Remove the ScoreModel wrapping to extract the nn - model_dir = os.path.split(checkpoints_directory)[-1] - print(f"Loaded checkpoint {checkpoints[checkpoint]} of {model_dir}") - return model, hyperparameters, checkpoints[checkpoint] - return model, hyperparameters, None - diff --git a/setup.py b/setup.py index 33f1eb3..e93776a 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name="score_models", - version="0.5.11", + version="0.6.0", description="A simple pytorch interface for score model and basic diffusion.", long_description=long_description, author="Alexandre Adam", @@ -19,14 +19,15 @@ "torch_ema", "h5py", "numpy", - "tqdm" + "tqdm", + "peft>=0.11" ], - python_requires=">=3.8", + python_requires=">=3.9", classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", - "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", ], ) diff --git a/tests/test_architectures.py b/tests/test_architectures.py index 2497eeb..5148fc5 100644 --- a/tests/test_architectures.py +++ b/tests/test_architectures.py @@ -1,47 +1,63 @@ -from score_models.architectures import NCSNpp, DDPM, MLP +from score_models.architectures import NCSNpp, DDPM, MLP, Encoder import torch - - -def test_ddpm(): - x = torch.randn(size=[1, 1, 32, 32]) * 230 - t = torch.randn([1]) - model = DDPM(1, nf=64, ch_mult=(1, 1, 2, 2)) - model(x=x, t=t) - -def test_ddpm_smallnf(): - x = torch.randn(size=[1, 1, 32, 32]) * 230 - t = torch.randn([1]) - model = DDPM(1, nf=8, ch_mult=(1, 1, 2, 2)) - model(x=x, t=t) - -def test_ncsnpp(): - x = torch.randn(size=[1, 1, 32, 32]) * 500 - t = torch.randn([1]) - model = NCSNpp(1, dimensions=2, nf=8, ch_mult=(2, 2, 2, 2), num_res_blocks=3) - model(x=x, t=t) - -def test_ncsnpp1d(): - x = torch.randn(size=[1, 1, 256]) * 500 - t = torch.randn([1]) - model = NCSNpp(1, dimensions=1, nf=8, ch_mult=(1, 1, 2, 2), attention=True) - model(x=x, t=t) - - -def test_ncsnpp3d(): - x = torch.randn(size=[1, 1, 32, 32, 32]) * 500 - t = torch.randn([1]) - model = NCSNpp(1, dimensions=3, nf=8, ch_mult=(1, 1, 2, 2), attention=True) - model(x=x, t=t) - - -def test_mlp(): - x = torch.randn(size=[10, 10]) * 100 - t = torch.randn([10]) - model = MLP(dimensions=10, units=10, layers=3, time_embedding_dimensions=16, time_branch_layers=2, bottleneck=10, attention=True) - model(x=x, t=t) - - x = torch.randn(size=[1, 10]) * 100 - t = torch.randn([1]) - model = MLP(dimensions=10, units=10, layers=2, time_embedding_dimensions=16, time_branch_layers=1) - model(x=x, t=t) - +import pytest + +@pytest.mark.parametrize("D", [1, 2, 3]) +@pytest.mark.parametrize("C", [1, 3]) +@pytest.mark.parametrize("ch_mult", [(1, 1), (1, 2), (1, 1, 1)]) +@pytest.mark.parametrize("nf", [2, 4]) # Number of filters needs to be power of 2 when small, or at least divisible by 4, need to debug this +@pytest.mark.parametrize("num_res_blocks", [1, 3]) +@pytest.mark.parametrize("attention", [True, False]) +@pytest.mark.parametrize("Net", [NCSNpp, DDPM]) +def test_unets(D, C, ch_mult, nf, num_res_blocks, attention, Net): + B = 2 + P = 8 + x = torch.randn(B, C, *[P]*D) * 500 + t = torch.rand([B]) + model = Net(C, dimensions=D, nf=nf, ch_mult=ch_mult, num_res_blocks=num_res_blocks, attention=attention) + out = model(t, x) + assert out.shape == torch.Size([B, C, *[P]*D]) + assert torch.isfinite(out).all() + +@pytest.mark.parametrize("layers", [1, 3]) +@pytest.mark.parametrize("time_branch_channels", [16, 32]) +@pytest.mark.parametrize("time_branch_layers", [1, 3]) +@pytest.mark.parametrize("bottleneck", [10, 20]) +@pytest.mark.parametrize("attention", [True, False]) +def test_mlp(layers, time_branch_channels, time_branch_layers, bottleneck, attention): + B = 2 + C = 10 + x = torch.randn(B, C) + t = torch.randn([B]) + model = MLP( + C, + layers=layers, + time_branch_channels=time_branch_channels, + time_branch_layers=time_branch_layers, + bottleneck=bottleneck, + attention=attention) + out = model(t, x) + assert out.shape == torch.Size([B, C]) + assert torch.isfinite(out).all() + + +# @pytset.mark.parametrize("D", [1, 2, 3]) +@pytest.mark.parametrize("D", [2]) +@pytest.mark.parametrize("C", [1, 3]) +@pytest.mark.parametrize("ch_mult", [(1, 1), (1, 2), (1, 1, 1)]) +@pytest.mark.parametrize("latent_size", (1, 10, 100)) +def test_encoder(D, C, ch_mult, latent_size): + B = 2 + P = 16 + x = torch.randn(B, C, *[P]*D) + t = torch.randn([B]) + model = Encoder( + pixels=P, + channels=C, + dimensions=D, + ch_mult=ch_mult, + latent_size=latent_size + ) + out = model(t, x) + assert out.shape == torch.Size([B, latent_size]) + assert torch.isfinite(out).all() diff --git a/tests/test_conditional_architecture.py b/tests/test_conditional_architecture.py index e23ddc7..38930cc 100644 --- a/tests/test_conditional_architecture.py +++ b/tests/test_conditional_architecture.py @@ -1,129 +1,133 @@ from score_models import NCSNpp, DDPM, MLP +from functools import partial import torch import pytest -def test_discrete_timelike_conditional(): - nf = 32 - net = NCSNpp( - nf=nf, - ch_mult=(1, 1), - condition=["discrete_timelike"], - condition_num_embedding=[10], - ) - - B = 10 - c = torch.randint(10, (B,)) - x = torch.randn(B, 1, 8, 8) - t = torch.rand(B) +@pytest.mark.parametrize("Net", [NCSNpp, MLP, DDPM]) +@pytest.mark.parametrize("conditions", [ + (("time_continuous",), None, None), + (("input_tensor",), None, (10,)), + (("time_discrete",), (15,), None), + (("time_vector",), None, (12,)), + (("input_tensor", "time_continuous"), None, (15,)), + (("time_continuous", "time_discrete"), (32,), None), + (("time_continuous", "time_discrete", "time_discrete"), (32, 12), None), + (("input_tensor", "time_continuous", "time_vector", "time_discrete"), (15,), (3, 12)), + ]) +def test_conditional_branch(Net, conditions): + condition_type, condition_embeddings, condition_channels = conditions + hp = { + "ch_mult": (1, 1), + "nf": 8, + "conditions": condition_type, + "condition_channels": condition_channels, + "condition_embeddings": condition_embeddings + } + B = 5 + C = 10 + D = [] if Net == MLP else [8, 8] + net = Net(C, **hp) + assert net.conditioned + assert hasattr(net, "conditional_branch") - out = net(t, x, c) - assert out.shape == x.shape - assert net.condition_embedding_layers[0](c).shape == torch.Size([B, nf]) - -def test_continuous_timelike_conditional(): - nf = 32 - net = NCSNpp( - nf=nf, - ch_mult=(1, 1), - condition=["continuous_timelike"] - ) - - B = 10 - c = torch.randn(B) - x = torch.randn(B, 1, 8, 8) - t = torch.rand(B) - - out = net(t, x, c) - assert out.shape == x.shape - assert net.condition_embedding_layers[0](c).shape == torch.Size([B, nf]) - -def test_continuous_input_conditional(): - nf = 32 - C_cond = 3 - net = NCSNpp( - nf=nf, - ch_mult=(1, 1), - condition=["input"], - condition_input_channels=3 - ) - - B = 10 - c = torch.randn(B, C_cond, 8, 8) - x = torch.randn(B, 1, 8, 8) - t = torch.rand(B) - - out = net(t, x, c) - assert out.shape == x.shape - -def test_vector_condition(): - nf = 32 - C_cond = 3 - net = NCSNpp( - nf=nf, - ch_mult=(1, 1), - condition=["vector"], - condition_vector_channels=3 - ) - - B = 10 - c = torch.randn(B, C_cond) - x = torch.randn(B, 1, 8, 8) - t = torch.rand(B) + x = torch.randn(B, C, *D) + t = torch.randn(B) + c = [] + c_idx = 0 + for condition in condition_type: + if condition == "time_continuous": + c.append(torch.randn(B)) + elif condition == "time_discrete": + c.append(torch.randint(10, (B,))) + elif condition == "time_vector": + c.append(torch.randn(B, condition_channels[c_idx])) + c_idx += 1 + elif condition == "input_tensor": + c.append(torch.randn(B, condition_channels[c_idx], *D)) + c_idx += 1 - out = net(t, x, c) + print([_c.shape for _c in c]) + out = net(t, x, *c) assert out.shape == x.shape -def test_mix_condition_type(): - nf = 32 - C_cond = 3 - net = NCSNpp( - nf=nf, - ch_mult=(1, 1), - condition=["input", "discrete_timelike", "continuous_timelike", "continuous_timelike"], - condition_input_channels=3, - condition_num_embedding=(15,), - ) - - B = 10 - c_input = torch.randn(B, C_cond, 8, 8) - c_discrete = torch.randint(10, (B,)) - c_cont1 = torch.randn(B) - c_cont2 = torch.randn(B) - x = torch.randn(B, 1, 8, 8) - t = torch.rand(B) - - out = net(t, x, c_input, c_discrete, c_cont1, c_cont2) - assert out.shape == x.shape - - -def test_conditional_architecture_raising_errors(): - nf = 32 +@pytest.mark.parametrize("Net", [NCSNpp, MLP, DDPM]) +@pytest.mark.parametrize("conditions", [ + (("input_tensor",), None, None), # Channel not provided + (("time_discrete",), None, None), # Embedding not provided + (("time_vector",), None, None), + (("input_tensor", "time_vector"), None, (15,)), # Not enough channels + (("time_discrete", "time_discrete"), (32,), None), # Not enough embeddings + ]) +def test_validate_conditional_branch_errors(Net, conditions): + condition_type, condition_embeddings, condition_channels = conditions + hp = { + "ch_mult": (1, 1), + "nf": 8, + "conditions": condition_type, + "condition_channels": condition_channels, + "condition_embeddings": condition_embeddings + } + B = 5 + C = 10 + D = [] if Net == MLP else [8, 8] with pytest.raises(ValueError): - net = NCSNpp( - nf=nf, - ch_mult=(1, 1), - condition=["discrete_timelike"], - ) + net = Net(C, **hp) - with pytest.raises(ValueError): - net = NCSNpp( - nf=nf, - ch_mult=(1, 1), - condition=["discrete_timelike"], - condition_num_embedding=15 - ) - with pytest.raises(ValueError): - net = NCSNpp( - nf=nf, - ch_mult=(1, 1), - condition=["input"], - ) +@pytest.mark.parametrize("Net", [NCSNpp, MLP, DDPM]) +def test_merging_errors_len_args(Net): + condition_type = ("input_tensor", "time_vector") + condition_embeddings = None + condition_channels = (15, 15) + hp = { + "ch_mult": (1, 1), + "nf": 8, + "conditions": condition_type, + "condition_channels": condition_channels, + "condition_embeddings": condition_embeddings + } + B = 5 + C = 10 + D = [] if Net == MLP else [8, 8] + net = Net(C, **hp) + t = torch.randn(B) + x = torch.randn(B, C, *D) + c = [torch.randn(B, 15),] # Not enough arguments provided + with pytest.raises(ValueError) as exc_info: + net(t, x, *c) + assert "The network requires 2 additional arguments, but 1 were provided." in str(exc_info.value) + - with pytest.raises(ValueError): - net = NCSNpp( - nf=nf, - ch_mult=(1, 1), - condition="input", - ) +@pytest.mark.parametrize("Net", [NCSNpp, MLP, DDPM]) +@pytest.mark.parametrize("conditions", [ + (("time_discrete",), (3,), None), + (("time_discrete", "time_discrete"), (3, 10), None), + ]) +def test_merging_errors_embedding_arg(conditions, Net): + condition_type, condition_embeddings, condition_channels = conditions + hp = { + "ch_mult": (1, 1), + "nf": 8, + "conditions": condition_type, + "condition_channels": condition_channels, + "condition_embeddings": condition_embeddings + } + B = 5 + C = 10 + D = [] if Net == MLP else [8, 8] + net = Net(C, **hp) + t = torch.randn(B) + x = torch.randn(B, C, *D) + if len(condition_type) == 1: + c = [torch.ones(B, 1).long() * 4,] + with pytest.raises(ValueError) as exc_info: + net(t, x, *c) + max_int = condition_embeddings[0] - 1 + assert f"Additional argument 0 must be a long tensor with values between 0 and {max_int} inclusively." in str(exc_info.value) + elif len(condition_type) == 2: + c = [torch.ones(B, 1).long() * 2, torch.ones(B, 1).long() * 15] + with pytest.raises(ValueError) as exc_info: + net(t, x, *c) + max_int = condition_embeddings[1] - 1 + assert f"Additional argument 1 must be a long tensor with values between 0 and {max_int} inclusively." in str(exc_info.value) diff --git a/tests/test_hessian_model.py b/tests/test_hessian_model.py new file mode 100644 index 0000000..47a07b1 --- /dev/null +++ b/tests/test_hessian_model.py @@ -0,0 +1,36 @@ +from score_models import HessianDiagonal, ScoreModel, MLP +import torch +import pytest +import os + + +@pytest.mark.parametrize("loss", ["canonical", "meng"]) +def test_save_load_hessian_diagonal(loss, tmp_path): + path = os.path.join(tmp_path, "test") + net = MLP(10) + hessian_net = MLP(10) + score_model = ScoreModel(net, sde="vp") + model = HessianDiagonal(score_model, hessian_net, loss=loss) + + for i in range(3): + model.save(path) + + # Check that we can reload the whole setup just from path + new_model = HessianDiagonal(path=path) + + # Check that the architecture is reloaded correctly + B = 10 + D = 10 + x = torch.randn(B, D) + t = torch.randn(B) + with torch.no_grad(): + assert torch.allclose(model(t, x), new_model(t, x), atol=1e-3) + # Check that sbm is loaded correctly for the loss function + assert torch.allclose(model.score_model(t, x), new_model.score_model(t, x), atol=1e-3) + torch.manual_seed(42) + loss1 = model.loss(x) + + torch.manual_seed(42) + loss2 = new_model.loss(x) + # Give it a loose tolerance, not sure why they are differen just yet + assert torch.allclose(loss1, loss2, atol=4) diff --git a/tests/test_lora_sbm.py b/tests/test_lora_sbm.py new file mode 100644 index 0000000..8005cf8 --- /dev/null +++ b/tests/test_lora_sbm.py @@ -0,0 +1,41 @@ +from score_models import LoRAScoreModel, NCSNpp, MLP, ScoreModel +import os +import torch +import pytest + +@pytest.mark.parametrize("lora_rank", [1, 10, 30]) +@pytest.mark.parametrize("sde", [{"sde": "vp"}, {"sde": "ve", "sigma_min": 0.1, "sigma_max": 100.0}]) +@pytest.mark.parametrize("net", [MLP(10), NCSNpp(1, ch_mult=[1, 1], nf=8)]) +def test_lora_sbm(net, sde, lora_rank, tmp_path): + base_sbm = ScoreModel(net, **sde) + sbm = LoRAScoreModel(base_sbm, lora_rank=lora_rank) + + # Check that checkpoints are being saved correctly + path = os.path.join(tmp_path, "test") + for i in range(3): + sbm.save(path) + + print(os.listdir(path)) + assert os.path.exists(path) + assert os.path.exists(os.path.join(path, "model_hparams.json")) + assert os.path.exists(os.path.join(path, "base_sbm")) + assert os.path.isdir(os.path.join(path, "base_sbm")) + for i in range(1, 4): + assert os.path.exists(os.path.join(path, f"lora_checkpoint_{i:03d}")) + assert os.path.isdir(os.path.join(path, f"lora_checkpoint_{i:03d}")) + + # Check that we can reload the whole setup just from path + new_sbm = LoRAScoreModel(path=path) + + # Check that models are consistent with each other + B = 10 + D = [10] if isinstance(net, MLP) else [1, 8, 8] + t = torch.rand(B) + x = torch.randn(B, *D) + with torch.no_grad(): + print(sbm(t, x) - new_sbm(t, x)) + assert torch.allclose(sbm(t, x), new_sbm(t, x)) + # Sanity check that we are using the LoRA model + assert torch.allclose(sbm.lora_net(t, x), new_sbm.lora_net(t, x)) + assert torch.allclose(sbm.lora_net(t, x), sbm.reparametrized_score(t, x)) + assert torch.allclose(new_sbm.lora_net(t, x), new_sbm.reparametrized_score(t, x)) diff --git a/tests/test_save_load.py b/tests/test_save_load.py new file mode 100644 index 0000000..d8cb992 --- /dev/null +++ b/tests/test_save_load.py @@ -0,0 +1,79 @@ +from score_models import MLP, NCSNpp, ScoreModel, EnergyModel, SLIC +import torch +import pytest +import numpy as np +import os + +@pytest.mark.parametrize("net", [MLP(10), NCSNpp(10, nf=8, ch_mult=[1, 1])]) +@pytest.mark.parametrize("sde", [{"sde": "vp"}, {"sde": "ve", "sigma_min": 1e-2, "sigma_max": 1e2}]) +@pytest.mark.parametrize("Model", [ScoreModel, EnergyModel, SLIC]) +def test_save(net, sde, Model, tmp_path): + if Model == SLIC: + forward_model = lambda t, x: x + model = Model(forward_model, net, **sde) + else: + model = Model(net, **sde) + + path = os.path.join(tmp_path, "test") + model.save(path) + + assert os.path.exists(path) + assert os.path.exists(os.path.join(path, "checkpoint_001.pt")) + assert os.path.exists(os.path.join(path, "model_hparams.json")) + + # Save again + model.save(path) + assert os.path.exists(os.path.join(path, "checkpoint_002.pt")) + + # Check if we can load and continue saving checkpoints + if Model == SLIC: + model = Model(forward_model, path=path, **sde) + else: + model = Model(path=path, **sde) + model.save() + assert os.path.exists(os.path.join(path, "checkpoint_003.pt")) + + +@pytest.mark.parametrize("net", [MLP(10), NCSNpp(1, nf=8, ch_mult=[1, 1],)]) +@pytest.mark.parametrize("sde", [{"sde": "vp"}, {"sde": "ve", "sigma_min": 1e-2, "sigma_max": 1e2}]) +@pytest.mark.parametrize("Model", [ScoreModel, EnergyModel, SLIC]) +def test_load(net, sde, Model, tmp_path): + path = os.path.join(tmp_path, "test") + if Model == SLIC: + forward_model = lambda t, x: x + model = Model(forward_model, net, path=path, **sde) + else: + model = Model(net, path=path, **sde) + + for i in range(10): + model.save() + + # Load checkpoint + model.load(10) + assert model.loaded_checkpoint == 10 + + # reload last checkpoint to compare with load + model.load() + + # Check that the architecture is reloaded correctly + if Model == SLIC: # Currently we do not save the forward model, though we could serialize it in a custom save and load function. + new_model = Model(forward_model, path=path) + else: + new_model = Model(path=path) + B = 10 + if isinstance(net, MLP): + D = net.hyperparameters["channels"] + x = torch.randn(B, D) + else: + C = net.channels + D = net.dimensions + x = torch.randn(B, C, *[32]*D) + + t = torch.rand(B) + if Model == SLIC: + y = forward_model(t, x) + print(model(t, y, x) - new_model(t, y, x)) + assert torch.allclose(model(t, y, x), new_model(t, y, x), atol=1e-3) + else: + print(model(t, x) - new_model(t, x)) + assert torch.allclose(model(t, x), new_model(t, x), atol=1e-3) diff --git a/tests/test_score_models.py b/tests/test_score_models.py index 31553cd..007f144 100644 --- a/tests/test_score_models.py +++ b/tests/test_score_models.py @@ -1,5 +1,5 @@ import torch -from score_models.utils import load_architecture +from score_models.save_load_utils import load_architecture from score_models import ScoreModel, EnergyModel, SLIC from score_models.architectures import MLP, NCSNpp, DDPM from score_models.sde import VESDE, VPSDE, TSVESDE @@ -83,19 +83,14 @@ def test_log_likelihood(): score = ScoreModel(net, beta_min=1e-2, beta_max=10) print(score.sde) x = torch.randn(3, 2) - ll = score.log_likelihood(x, ode_steps=10, verbose=1) + ll = score.log_likelihood(x, steps=10, verbose=1, method="euler") + print(ll) + assert ll.shape == torch.Size([3]) + + ll = score.log_likelihood(x, steps=10, verbose=1, method="heun") print(ll) assert ll.shape == torch.Size([3]) -# def test_score_at_zero_t(): - # net = MLP(dimensions=2) - # score = ScoreModel(net, beta_min=1e-2, beta_max=10) - # print(score.sde) - # x = torch.randn(3, 2) - # t = torch.rand(3) - # ll, vjp_func = torch.func.vjp(lambda x: score.log_likelihood(t, x, ode_steps=10), x) - # grad = vjp_func(torch.ones_like(ll)) - # print(grad) def test_sample_fn(): net = NCSNpp(1, nf=8, ch_mult=(2, 2)) @@ -106,31 +101,24 @@ def test_sample_fn(): score = ScoreModel(net, beta_min=1e-2, beta_max=10) score.sample(shape=[5, 1, 16, 16], steps=10) -def test_slic_score(): - def forward_model(x): - return torch.sum(x, dim=1, keepdim=True) # Function R^C to R - C = 100 - net = MLP(dimensions=C) - # Check that we can get the score without a forward_model - score = SLIC(net, beta_min=1e-2, beta_max=10) - print(score.sde) - x = torch.randn(3, C) - t = torch.rand(3) - s = score(t, x) - print(s) - assert s.shape == torch.Size([3, C]) - - # Now check slic score - net = MLP(dimensions=1) # Define SLIC in output space of forward model - score = SLIC(net, forward_model, beta_min=1e-2, beta_max=10) - y = forward_model(x) - print(score.sde) - x = torch.randn(3, C) - t = torch.rand(3) - s = score.slic_score(t, x, y) +@pytest.mark.parametrize("anneal_residuals", [True, False]) +def test_slic_score(anneal_residuals): + B = 3 + m = 10 + D = 100 + def forward_model(t, x): + return x[:, :m] # Function R^C to R^m + x = torch.randn(B, D) + t = torch.rand(B) + net = MLP(m) # Define SLIC in output space of forward model (m) + model = SLIC(forward_model, net, beta_min=1e-2, beta_max=10, anneal_residuals=anneal_residuals) + y = forward_model(None, x) + x = torch.randn(B, D) + t = torch.rand(B) + s = model(t, y=y, x=x) print(s) print(s.shape) - assert s.shape == torch.Size([3, C]) + assert s.shape == torch.Size([B, D]) def test_loading_different_sdes(): @@ -149,7 +137,7 @@ def test_loading_different_sdes(): assert score.sde.epsilon == 0 assert score.sde.T == 1 - score = ScoreModel(net, sigma_min=1e-3, sigma_max=1e2, t_star=0.5, beta=10) + score = ScoreModel(net, sde="tsve", sigma_min=1e-3, sigma_max=1e2, t_star=0.5, beta=10) assert isinstance(score.sde, TSVESDE) assert score.sde.sigma_min == 1e-3 assert score.sde.sigma_max == 1e2 diff --git a/tests/test_sdes.py b/tests/test_sdes.py index fc34ee1..59e924f 100644 --- a/tests/test_sdes.py +++ b/tests/test_sdes.py @@ -2,8 +2,10 @@ import numpy as np import torch -def get_trajectories(sde, B=10, N=100, x0=5): - dt = 1/N +# TODO: make better tests checking for the expected marginals of the trajectories backward and forward + +def get_trajectories(sde, B=10, N=100, x0=5, T=1): + dt = T/N t = torch.zeros(B) + sde.epsilon x0 = torch.ones(B) * x0 x = torch.clone(x0) @@ -16,7 +18,7 @@ def get_trajectories(sde, B=10, N=100, x0=5): dw = torch.randn_like(x) * dt**(1/2) x = x + f * dt + g * dw trajectories.append(x) - marginal_samples.append(sde.sample_marginal(t, x0)) + marginal_samples.append(sde.perturbation_kernel(t, x0)) trajectories = np.stack(trajectories) marginal_samples = np.stack(marginal_samples) return trajectories, marginal_samples @@ -28,16 +30,18 @@ def get_trajectories(sde, B=10, N=100, x0=5): import matplotlib.pyplot as plt B = 100 - N = 1000 - x0 = 1e2 - sde1 = VESDE(sigma_min=1e-1, sigma_max=100) - sde2 = VPSDE(beta_min=1e-2, beta_max=20) - sde3 = TSVESDE(sigma_min=1e-6, sigma_max=1e9, t_star=0.4, beta=30, beta_fn="relu") - sde4 = TSVESDE(sigma_min=1e-4, sigma_max=1e6, t_star=0.4, beta=20, beta_fn="silu") - sde5 = TSVESDE(sigma_min=1e-4, sigma_max=1e6, t_star=0.4, beta=20, beta_fn="hardswish") + N = 100 + x0 = 1e3 + T = 1 + sde1 = VESDE(sigma_min=1e-1, sigma_max=100., T=T) + sde2 = VPSDE(beta_min=1e-2, beta_max=100, schedule="linear", T=T) + sde22 = VPSDE(beta_max=100, schedule="cosine", T=T) + sde3 = TSVESDE(sigma_min=1e-6, sigma_max=1e9, t_star=0.4, beta=30, beta_fn="relu", T=T) + sde4 = TSVESDE(sigma_min=1e-4, sigma_max=1e6, t_star=0.4, beta=20, beta_fn="silu", T=T) + sde5 = TSVESDE(sigma_min=1e-4, sigma_max=1e6, t_star=0.4, beta=20, beta_fn="hardswish", T=T) - text = ["", "", "relu", "silu", "hardswish"] - for i, sde in enumerate([sde1, sde2, sde3, sde4, sde5]): + text = ["", "linear", "cosine", "relu", "silu", "hardswish"] + for i, sde in enumerate([sde1, sde2, sde22, sde3, sde4, sde5]): trajectories, marginal_samples = get_trajectories(sde, B, N, x0=x0) fig, axs = plt.subplots(2, 2, figsize=(8, 4), sharex=True) @@ -49,14 +53,14 @@ def get_trajectories(sde, B=10, N=100, x0=5): axs[1, 1].set_xlabel("t") axs[0, 0].set_ylabel("x") axs[1, 0].set_ylabel("x") - t = np.linspace(0, 1, N+1) + t = np.linspace(0, T, N+1) for b in range(B): axs[0, 0].plot(t, trajectories[:, b]) axs[1, 0].plot(t, trajectories.std(axis=1), "k-", alpha=0.5, label=r"Empirical $\sigma(t)$") axs[1, 0].plot(t, trajectories.mean(axis=1), "r-", alpha=0.5, label=r"Empirical $\mu(t)$") - mu, sigma = sde.marginal_prob_scalars(torch.tensor(t)) + mu, sigma = sde.perturbation_scalars(torch.tensor(t)) axs[1, 0].plot(t, sigma, "k--", label=r"Expected $\sigma(t)$") axs[1, 0].plot(t, mu * x0, "r-", label=r"Expected $\mu(t)$") # axs[1, 0].legend() diff --git a/tests/test_training.py b/tests/test_training.py index 6b7ef1b..8e482fb 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -1,301 +1,371 @@ -import torch from torch.utils.data import TensorDataset -from score_models import ScoreModel, EnergyModel, MLP, NCSNpp +from score_models import ScoreModel, EnergyModel, SLIC, HessianDiagonal, LoRAScoreModel, MLP, NCSNpp, DDPM +from functools import partial +import pytest +import torch import shutil, os import numpy as np class Dataset(torch.utils.data.Dataset): - def __init__(self, size, channels, dimensions:list, conditioning="None", test_input_list=False): + def __init__( + self, + size, + channels, + dimensions, + time_branch_channels: int = 4, + conditions=None, + condition_channels=None, + condition_embeddings=None, + **kwargs + ): self.size = size - self.channels = channels - self.dimensions = dimensions - self.conditioning = conditioning - self.test_input_list = test_input_list + self.C = channels + self.D = dimensions + self.conditions = conditions + self.condition_channels = condition_channels + self.condition_embeddings = condition_embeddings def __len__(self): return self.size def __getitem__(self, index): - if self.test_input_list: - return torch.randn(self.channels, *self.dimensions), - if self.conditioning.lower() == "none": - return torch.randn(self.channels, *self.dimensions) - elif self.conditioning.lower() == "time": - return torch.randn(self.channels, *self.dimensions), torch.randn(1) - elif self.conditioning.lower() == "input": - return torch.randn(self.channels, *self.dimensions), torch.randn(self.channels, *self.dimensions) - elif self.conditioning.lower() == "input_and_time": - return torch.randn(self.channels, *self.dimensions), torch.randn(self.channels, *self.dimensions), torch.randn(1) - elif self.conditioning.lower() == "time_and_discrete": - return torch.randn(self.channels, *self.dimensions), torch.randn(1), torch.randint(10, (1,)) - elif self.conditioning.lower() == "discrete_time": - return torch.randn(self.channels, *self.dimensions), torch.tensor(np.random.choice(range(10))) - -def test_multiple_channels_ncsnpp(): - C = 3 - D = 16 - dim = 2 - B = 5 - size = 2*B - dataset = Dataset(size, C, [D]*dim) - net = NCSNpp(nf=8, channels=C, ch_mul=(1, 1)) - model = ScoreModel(model=net, sigma_min=1e-2, sigma_max=10) - model.fit(dataset, batch_size=B, epochs=2) - - -def test_training_conditioned_input_ncsnpp(): - C = 1 - D = 16 - dim = 2 - B = 5 - size = 2*B - dataset = Dataset(size, C, [D]*dim, conditioning="input") - net = NCSNpp(nf=8, ch_mul=(1, 1), condition=["input"], condition_input_channels=1) - model = ScoreModel(model=net, sigma_min=1e-2, sigma_max=10) - model.fit(dataset, batch_size=B, epochs=2) - -def test_training_conditioned_continuous_timelike_ncsnpp(): - C = 1 - D = 16 - dim = 2 - B = 5 - size = 2*B - dataset = Dataset(size, C, [D]*dim, conditioning="time") - net = NCSNpp(nf=8, ch_mul=(1, 1), condition=["continuous_timelike"]) - model = ScoreModel(model=net, sigma_min=1e-2, sigma_max=10) - model.fit(dataset, batch_size=B, epochs=2) - -def test_training_conditioned_discrete_timelike_ncsnpp(): - C = 1 - D = 16 - dim = 2 - B = 5 - size = 2*B - dataset = Dataset(size, C, [D]*dim, conditioning="discrete_time") - net = NCSNpp(nf=8, ch_mul=(1, 1), condition=["discrete_timelike"], condition_num_embedding=(10,)) - model = ScoreModel(model=net, sigma_min=1e-2, sigma_max=10) - model.fit(dataset, batch_size=B, epochs=2) - - -def test_training_conditioned_discrete_and_timelike_ncsnpp(): - C = 1 - D = 16 - dim = 2 - B = 5 - size = 2*B - dataset = Dataset(size, C, [D]*dim, conditioning="time_and_discrete") - net = NCSNpp(nf=8, ch_mul=(1, 1), condition=["continuous_timelike", "discrete_timelike"], condition_num_embedding=(10,)) - model = ScoreModel(model=net, sigma_min=1e-2, sigma_max=10) - model.fit(dataset, batch_size=B, epochs=2) - - -def test_training_score_mlp(): - C = 10 - B = 5 - size = 2*B - dataset = Dataset(size, C, []) - hyperparameters = { - "dimensions": C, - "units": 2*C, - "layers": 2, - "time_embedding_dimensions": 32, - "embedding_scale": 32, - "activation": "swish", - "time_branch_layers": 1 - } - net = MLP(**hyperparameters) - # Create an instance of ScoreModel - model = ScoreModel(model=net, sigma_min=1e-2, sigma_max=10) - - # Define any preprocessing function if needed - def preprocessing_fn(x): + x = [torch.randn(self.C, *self.D),] + if self.conditions: + c_idx = 0 + e_idx = 0 + for condition in self.conditions: + if condition == "time_continuous": + c = torch.randn(1) + x.append(c) + elif condition == "time_discrete": + tokens = self.condition_embeddings[e_idx] + c = torch.randint(tokens, (1,)) + x.append(c) + e_idx += 1 + elif condition == "time_vector": + c = torch.randn(self.condition_channels[c_idx]) + x.append(c) + c_idx += 1 + elif condition == "input_tensor": + c = torch.randn(self.condition_channels[c_idx], *self.D) + x.append(c) + c_idx += 1 return x - # Set the hyperparameters and other options for training - learning_rate = 1e-3 - ema_decay = 0.9999 - batch_size = 1 - epochs = 10 - warmup = 0 # learning rate warmup - clip = 0. # gradient clipping - checkpoints_directory = os.path.dirname(os.path.abspath(__file__)) + "/checkpoints" - seed = 42 +@pytest.mark.parametrize("models_to_keep", [1, 2]) +@pytest.mark.parametrize("conditions", [ + (None, None, None), + (("input_tensor", "time_continuous", "time_vector", "time_discrete"), (15,), (15, 3)), + ]) +@pytest.mark.parametrize("sde", [ + {"sde": "vp"}, + {"sde": "ve", "sigma_min": 1e-2, "sigma_max": 1e2}, + {"sde": "vp", "schedule": "cosine", "beta_max": 100} + ]) +@pytest.mark.parametrize("Net", [MLP, NCSNpp, DDPM]) +def test_training_score_model(conditions, sde, Net, models_to_keep, tmp_path, capsys): + condition_type, embeddings, channels = conditions + hp = { + "ch_mult": (1, 1), + "nf": 2, + "conditions": condition_type, + "condition_channels": channels, + "condition_embeddings": embeddings, + } + E = 3 # epochs + B = 2 + C = 3 + N = 4 + D = [] if Net == MLP else [4, 4] + dataset = Dataset(N, C, dimensions=D, **hp) + net = Net(C, **hp) + model = ScoreModel(net, **sde) + + path = tmp_path / "test" + losses = model.fit(dataset, batch_size=B, epochs=E, path=path, checkpoint_every=1, models_to_keep=models_to_keep) - # Fit the model to the dataset - losses = model.fit( - dataset, - preprocessing_fn=preprocessing_fn, - learning_rate=learning_rate, - ema_decay=ema_decay, - batch_size=batch_size, - epochs=epochs, - warmup=warmup, - clip=clip, - checkpoints_directory=checkpoints_directory, - seed=seed - ) print(losses) - # leave the checpoints there until next test - -def test_training_score_mlp_input_list(): - C = 10 - B = 5 - size = 2*B - dataset = Dataset(size, C, [], test_input_list=True) - checkpoints_directory = os.path.dirname(os.path.abspath(__file__)) + "/checkpoints" - model = ScoreModel(checkpoints_directory=checkpoints_directory) - losses = model.fit( - dataset, - checkpoints_directory=checkpoints_directory, - epochs=10, - checkpoints=1, - models_to_keep=12, # keep all checkpoints for next test - batch_size=1 - ) - - -def test_load_checkpoint_at_scoremodel_init(): - checkpoints_directory = os.path.dirname(os.path.abspath(__file__)) + "/checkpoints" - model1 = ScoreModel(checkpoints_directory=checkpoints_directory, model_checkpoint=1) - assert model1.loaded_checkpoint == 1, f"Expected checkpoint 1, got {model1.loaded_checkpoint}" - - model2 = ScoreModel(checkpoints_directory=checkpoints_directory, model_checkpoint=4) - assert model2.loaded_checkpoint == 4, f"Expected checkpoint 4, got {model2.loaded_checkpoint}" - - model3 = ScoreModel(checkpoints_directory=checkpoints_directory) - # Additional assertion based on the expected behavior when model_checkpoint is not provided - expected_checkpoint = 11 # Based on previous test, training 10 epochs and saving each one, we should have 11 checkpoints (also saving the last one) - assert model3.loaded_checkpoint == expected_checkpoint, f"Expected checkpoint {expected_checkpoint}, got {model3.loaded_checkpoint}" - - -def test_training_load_checkpoint(): - C = 10 - B = 5 - size = 2*B - dataset = Dataset(size, C, []) - checkpoints_directory = os.path.dirname(os.path.abspath(__file__)) + "/checkpoints" - model = ScoreModel(checkpoints_directory=checkpoints_directory) - losses = model.fit( - dataset, - checkpoints_directory=checkpoints_directory, - epochs=10, - batch_size=1 - ) - # Finally remove the checkpoint directory to keep the logic of the test above sound - shutil.rmtree(checkpoints_directory) - + assert len(losses) == E, f"Expected {E} losses, got {len(losses)}" + # Check that some improvement happens + assert os.path.isfile(os.path.join(path, "model_hparams.json")), "model_hparams.json not found" + assert os.path.isfile(os.path.join(path, "script_params.json")), "script_params.json not found" + for i in range(E+1-models_to_keep, E+1): + assert os.path.isfile(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt not found" + assert os.path.isfile(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt not found" + for i in range(0, E+1-models_to_keep): # Check that files are cleaned up + assert not os.path.exists(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt found, should not be there" + assert not os.path.exists(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt found, should not be there" -def test_training_score_ncsnpp(): - C = 1 - D = 140 - B = 5 - size = 2*B - dataset = Dataset(size, C, [D]) - hyperparameters = { - 'channels': C, - 'nf': 8, - 'activation_type': 'swish', - 'ch_mult': (2, 2), - 'num_res_blocks': 2, - 'resample_with_conv': True, - 'dropout': 0.0, - 'fir': True, - 'fir_kernel': (1, 3, 3, 1), - 'skip_rescale': True, - 'progressive': 'output_skip', - 'progressive_input': 'input_skip', - 'init_scale': 0.01, - 'fourier_scale': 16.0, - 'resblock_type': 'biggan', - 'combine_method': 'sum', - 'attention': True, - 'dimensions': 1, - 'sde': 'vesde', - 'sigma_min': 0.001, - 'sigma_max': 200, - 'T': 1.0} - net = NCSNpp(**hyperparameters) - # Create an instance of ScoreModel - model = ScoreModel(model=net, sigma_min=1e-2, sigma_max=10) - - # Define any preprocessing function if needed - def preprocessing_fn(x): - return x - - # Set the hyperparameters and other options for training - learning_rate = 1e-3 - ema_decay = 0.9999 - batch_size = 1 - epochs = 2 - warmup = 0 # learning rate warmup - clip = 0. # gradient clipping - checkpoints_directory = None - seed = 42 + # Test resume from checkpoint + new_model = ScoreModel(path=path) + assert new_model.loaded_checkpoint == E, f"Expected loaded_checkpoint to be {E}, got {new_model.loaded_checkpoint}" + losses = new_model.fit( + dataset, + batch_size=B, + epochs=E, + checkpoint_every=1, + models_to_keep=models_to_keep + ) + # Check stdout for the print statement declaring we resumed from a previous checkpoint for the optimizer + captured = capsys.readouterr() + print(captured.out) + assert f"Resumed training from checkpoint {E}." in captured.out + + # Check that the new checkpoints are updated correctly + for i in range(2*E+1-models_to_keep, 2*E+1): + assert os.path.isfile(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt not found" + assert os.path.isfile(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt not found" + for i in range(E, 2*E+1-models_to_keep): # Check that files are cleaned up + assert not os.path.exists(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt found, should not be there" + assert not os.path.exists(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt found, should not be there" + + +@pytest.mark.parametrize("Net", [MLP, NCSNpp]) +@pytest.mark.parametrize("sde", [ + {"sde": "vp"}, + {"sde": "ve", "sigma_min": 1e-2, "sigma_max": 1e2}, + {"sde": "vp", "schedule": "cosine", "beta_max": 100} + ]) +def test_training_energy_model(sde, Net, tmp_path, capsys): + hp = { + "ch_mult": (1, 1), + "nf": 2, + } + E = 2 # epochs + B = 2 + C = 3 + N = 4 + models_to_keep = 1 + D = [] if Net == MLP else [4, 4] + dataset = Dataset(N, C, dimensions=D, **hp) + net = Net(C, **hp) + model = EnergyModel(net, **sde) + + path = tmp_path / "test" + losses = model.fit(dataset, batch_size=B, epochs=E, path=path, checkpoint_every=1, models_to_keep=models_to_keep) - # Fit the model to the dataset - losses = model.fit( - dataset, - preprocessing_fn=preprocessing_fn, - learning_rate=learning_rate, - ema_decay=ema_decay, - batch_size=batch_size, - epochs=epochs, - warmup=warmup, - clip=clip, - checkpoints_directory=checkpoints_directory, - seed=seed - ) print(losses) + assert len(losses) == E, f"Expected {E} losses, got {len(losses)}" + assert os.path.isfile(os.path.join(path, "model_hparams.json")), "model_hparams.json not found" + assert os.path.isfile(os.path.join(path, "script_params.json")), "script_params.json not found" + for i in range(E+1-models_to_keep, E+1): + assert os.path.isfile(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt not found" + assert os.path.isfile(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt not found" + for i in range(0, E+1-models_to_keep): # Check that files are cleaned up + assert not os.path.exists(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt found, should not be there" + assert not os.path.exists(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt found, should not be there" + + # Test resume from checkpoint + new_model = EnergyModel(path=path) + assert new_model.loaded_checkpoint == E, f"Expected loaded_checkpoint to be {E}, got {new_model.loaded_checkpoint}" + losses = new_model.fit( + dataset, + batch_size=B, + epochs=E, + checkpoint_every=1, + models_to_keep=models_to_keep + ) + # Check stdout for the print statement declaring we resumed from a previous checkpoint for the optimizer + captured = capsys.readouterr() + print(captured.out) + assert f"Resumed training from checkpoint {E}." in captured.out + + # Check that the new checkpoints are updated correctly + for i in range(2*E+1-models_to_keep, 2*E+1): + assert os.path.isfile(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt not found" + assert os.path.isfile(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt not found" + for i in range(E, 2*E+1-models_to_keep): # Check that files are cleaned up + assert not os.path.exists(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt found, should not be there" + assert not os.path.exists(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt found, should not be there" + + +@pytest.mark.parametrize("conditions", [ + (None, None, None), # conditions, embeddings, channels + (("input_tensor", "time_continuous", "time_vector", "time_discrete"), (15,), (15, 3)), + ]) +@pytest.mark.parametrize("loss", ["canonical", "meng"]) +@pytest.mark.parametrize("sde", [ + {"sde": "vp"}, + {"sde": "ve", "sigma_min": 1e-2, "sigma_max": 1e2}, + {"sde": "vp", "schedule": "cosine", "beta_max": 100} + ]) +@pytest.mark.parametrize("Net", [MLP, NCSNpp]) +def test_training_hessian_diagonal_model(conditions, loss, sde, Net, tmp_path, capsys): + condition_type, embeddings, channels = conditions + hp = { + "ch_mult": (1, 1), + "nf": 2, + "conditions": condition_type, + "condition_channels": channels, + "condition_embeddings": embeddings, + } + E = 3 # epochs + B = 2 + C = 3 + N = 4 + D = [] if Net == MLP else [4, 4] + models_to_keep = 1 + dataset = Dataset(N, C, dimensions=D, **hp) + net = Net(C, **hp) + base_model = ScoreModel(net, **sde) + derivative_net = Net(C, **hp) + derivative_model = HessianDiagonal(base_model, net=derivative_net, loss=loss) + + path = tmp_path / "test" + losses = derivative_model.fit(dataset, batch_size=B, epochs=E, path=path, checkpoint_every=1, models_to_keep=models_to_keep) -def test_training_energy(): - # Create a dummy dataset - X = torch.randn(10, 10) - - # Convert the data into a TensorDataset - dataset = TensorDataset(X) - - hyperparameters = { - "dimensions": 10, - "units": 10, - "layers": 2, - "time_embedding_dimensions": 32, - "embedding_scale": 32, - "activation": "swish", - "time_branch_layers": 1, - # "nn_is_energy": True - } - net = MLP(**hyperparameters) - # Create an instance of ScoreModel - model = EnergyModel(model=net, sigma_min=1e-2, sigma_max=10) - - # Define any preprocessing function if needed - def preprocessing_fn(x): - return x - - # Set the hyperparameters and other options for training - learning_rate = 1e-3 - ema_decay = 0.9999 - batch_size = 1 - epochs = 10 - warmup = 0 # learning rate warmup - clip = 0. # gradient clipping - checkpoints_directory = None - seed = 42 + print(losses) + assert len(losses) == E, f"Expected {E} losses, got {len(losses)}" + # Check that some improvement happens + assert os.path.isdir(os.path.join(path, "score_model")), "score_model directory not found, the base SBM has not been saved" + assert os.path.isfile(os.path.join(path, "model_hparams.json")), "model_hparams.json not found" + assert os.path.isfile(os.path.join(path, "script_params.json")), "script_params.json not found" + for i in range(E+1-models_to_keep, E+1): + assert os.path.isfile(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt not found" + assert os.path.isfile(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt not found" + for i in range(0, E+1-models_to_keep): # Check that files are cleaned up + assert not os.path.exists(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt found, should not be there" + assert not os.path.exists(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt found, should not be there" + + # Test resume from checkpoint + new_model = HessianDiagonal(path=path) + assert new_model.loaded_checkpoint == E, f"Expected loaded_checkpoint to be {E}, got {new_model.loaded_checkpoint}" + losses = new_model.fit( + dataset, + batch_size=B, + epochs=E, + checkpoint_every=1, + models_to_keep=models_to_keep + ) + # Check stdout for the print statement declaring we resumed from a previous checkpoint for the optimizer + captured = capsys.readouterr() + print(captured.out) + assert f"Resumed training from checkpoint {E}." in captured.out + + # Check that the new checkpoints are updated correctly + for i in range(2*E+1-models_to_keep, 2*E+1): + assert os.path.isfile(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt not found" + assert os.path.isfile(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt not found" + for i in range(E, 2*E+1-models_to_keep): # Check that files are cleaned up + assert not os.path.exists(os.path.join(path, f"checkpoint_{i:03}.pt")), f"checkpoint_{i:03}.pt found, should not be there" + assert not os.path.exists(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt found, should not be there" + + +@pytest.mark.parametrize("conditions", [ + (None, None, None), # conditions, embeddings, channels + (("input_tensor", "time_continuous", "time_vector", "time_discrete"), (15,), (15, 3)), + ]) +@pytest.mark.parametrize("lora_rank", [1, 2]) +@pytest.mark.parametrize("sde", [ + {"sde": "vp"}, + {"sde": "ve", "sigma_min": 1e-2, "sigma_max": 1e2}, + {"sde": "vp", "schedule": "cosine", "beta_max": 100} + ]) +@pytest.mark.parametrize("Net", [MLP, NCSNpp]) +def test_training_lora_model(conditions, lora_rank, sde, Net, tmp_path, capsys): + condition_type, embeddings, channels = conditions + hp = { + "ch_mult": (1, 1), + "nf": 2, + "conditions": condition_type, + "condition_channels": channels, + "condition_embeddings": embeddings, + } + E = 3 # epochs + B = 2 + C = 3 + N = 4 + D = [] if Net == MLP else [4, 4] + models_to_keep = 1 + dataset = Dataset(N, C, dimensions=D, **hp) + net = Net(C, **hp) + base_model = ScoreModel(net, **sde) + lora_model = LoRAScoreModel(base_model, lora_rank=lora_rank) + + path = tmp_path / "test" + losses = lora_model.fit(dataset, batch_size=B, epochs=E, path=path, checkpoint_every=1, models_to_keep=models_to_keep) - # Fit the model to the dataset - losses = model.fit( - dataset, - preprocessing_fn=preprocessing_fn, - learning_rate=learning_rate, - ema_decay=ema_decay, - batch_size=batch_size, - epochs=epochs, - warmup=warmup, - clip=clip, - checkpoints_directory=checkpoints_directory, - seed=seed - ) print(losses) + assert len(losses) == E, f"Expected {E} losses, got {len(losses)}" + # Check that some improvement happens + assert os.path.isdir(os.path.join(path, "base_sbm")), "base_sbm directory not found, the base SBM has not been saved" + print(os.listdir(os.path.join(path, "base_sbm"))) + assert os.path.isfile(os.path.join(path, "base_sbm", "model_hparams.json")), "model_hparams.json not found in base_sbm directory" + assert os.path.isfile(os.path.join(path, "base_sbm", "checkpoint_001.pt")), "checkpout_001.pt not found in base_sbm directory" + assert os.path.isfile(os.path.join(path, "model_hparams.json")), "model_hparams.json not found" + assert os.path.isfile(os.path.join(path, "script_params.json")), "script_params.json not found" + print(os.listdir(path)) + for i in range(E+1-models_to_keep, E+1): + assert os.path.isdir(os.path.join(path, f"lora_checkpoint_{i:03}")), f"lora_checkpoint_{i:03} not found" + assert os.path.isfile(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt not found" + for i in range(0, E+1-models_to_keep): # Check that files are cleaned up + assert not os.path.exists(os.path.join(path, f"lora_checkpoint_{i:03}")), f"lora_checkpoint_{i:03} found, should not be there" + assert not os.path.exists(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt found, should not be there" + + # Check the network is reloaded correctly + new_model = LoRAScoreModel(path=path) + assert new_model.loaded_checkpoint == E, f"Expected loaded_checkpoint to be {E}, got {new_model.loaded_checkpoint}" + losses = new_model.fit( + dataset, + batch_size=B, + epochs=E, + checkpoint_every=1, + models_to_keep=models_to_keep + ) + # Check stdout for the print statement declaring we resumed from a previous checkpoint for the optimizer + captured = capsys.readouterr() + print(captured.out) + assert f"Resumed training from checkpoint {E}." in captured.out + + # Check that the new checkpoints are updated correctly + print(os.listdir(path)) + for i in range(2*E+1-models_to_keep, 2*E+1): + assert os.path.isdir(os.path.join(path, f"lora_checkpoint_{i:03}")), f"lora_checkpoint_{i:03} not found" + assert os.path.isfile(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt not found" + for i in range(E, 2*E+1-models_to_keep): # Check that files are cleaned up + assert not os.path.exists(os.path.join(path, f"lora_checkpoint_{i:03}")), f"lora_checkpoint_{i:03} found, should not be there" + assert not os.path.exists(os.path.join(path, f"optimizer_{i:03}.pt")), f"optimizer_{i:03}.pt found, should not be there" + + +def test_backward_compatibility_optimizer_state(tmp_path, capsys): + # First, train a model with custom optimizer target network + E = 3 # epochs + B = 2 + C = 3 + N = 4 + D = [] + dataset = Dataset(N, C, dimensions=D) + net = MLP(C) + model = ScoreModel(net, "vp") + + # Simulate case where optimizer targets the network + optim = torch.optim.Adam(net.parameters(), lr=1e-3) + path = tmp_path / "test" + losses = model.fit( + dataset, + batch_size=B, + epochs=E, + path=path, + checkpoint_every=1, + optimizer=optim, + models_to_keep=1) + + # Now we resume training, and check that we managed to load the checkpoint + new_model = ScoreModel(path=path) + assert new_model.loaded_checkpoint == E, f"Expected loaded_checkpoint to be {E}, got {new_model.loaded_checkpoint}" + # Don't provide the optimizer here to simulate the backward compatibility component of loading the optimizer + losses = new_model.fit( + dataset, + batch_size=B, + epochs=E, + checkpoint_every=1, + models_to_keep=1 + ) + # Check stdout for the print statement declaring we resumed from a previous checkpoint for the optimizer + captured = capsys.readouterr() + print(captured.out) + assert f"Resumed training from checkpoint {E}." in captured.out + assert f"Loaded optimizer {E} from test." in captured.out From cdd9eede3d8781eac7a45d82818cb766ed98a371 Mon Sep 17 00:00:00 2001 From: AlexandreAdam Date: Wed, 28 Aug 2024 18:36:06 -0400 Subject: [PATCH 02/40] Made cat the default for NCSNpp. Made the batch size and dataloading wrapping optional in the fit method, so we can use dataset that output a full batch. Updated the tests for the layers and added test to catch new behaviors. --- score_models/architectures/ncsnpp.py | 3 +- score_models/layers/attention_block.py | 16 +- score_models/layers/combine.py | 7 +- score_models/sbm/base.py | 16 +- score_models/trainer.py | 20 +- tests/test_layers.py | 248 +++++++++++-------------- tests/test_training.py | 28 ++- 7 files changed, 160 insertions(+), 178 deletions(-) diff --git a/score_models/architectures/ncsnpp.py b/score_models/architectures/ncsnpp.py index cc931e1..5570a9b 100644 --- a/score_models/architectures/ncsnpp.py +++ b/score_models/architectures/ncsnpp.py @@ -46,7 +46,7 @@ def __init__( init_scale: float = 1e-2, fourier_scale: float = 16., resblock_type: Literal["biggan", "ddpm"] = "biggan", - combine_method: Literal["concat", "sum"] = "sum", + combine_method: Literal["cat", "sum"] = "cat", attention: bool = True, conditions : Optional[tuple[Literal["time_discrete", "time_continuous", "time_vector", "input_tensor"]]] = None, condition_embeddings: Optional[tuple[int]] = None, @@ -406,4 +406,3 @@ def forward(self, t, x, *args): assert m_idx == len(modules) return h - diff --git a/score_models/layers/attention_block.py b/score_models/layers/attention_block.py index fc6e93e..cd30dc5 100644 --- a/score_models/layers/attention_block.py +++ b/score_models/layers/attention_block.py @@ -58,22 +58,22 @@ def __call__(self, x): class ScaledAttentionLayer(nn.Module): """ - Simple self attention mechanism, with MLP and no skip connections for MLP network + Simple self attention mechanism with MLP and no skip connections """ - def __init__(self, dimensions): + def __init__(self, channels): super().__init__() - self.query = nn.Linear(in_features=dimensions, out_features=dimensions) - self.key = nn.Linear(in_features=dimensions, out_features=dimensions) - self.value = nn.Linear(in_features=dimensions, out_features=dimensions) - self.to_out = nn.Linear(in_features=dimensions, out_features=dimensions) + c = channels + self.query = nn.Linear(in_features=c, out_features=c) + self.key = nn.Linear(in_features=c, out_features=c) + self.value = nn.Linear(in_features=c, out_features=c) + self.to_out = nn.Linear(in_features=c, out_features=c) # Initialization with torch.no_grad(): - bound = 1 / dimensions ** (1 / 2) + bound = 1 / c**(1/2) for layer in (self.query, self.key, self.value): layer.weight.uniform_(-bound, bound) layer.bias.zero_() - bound = 1 / dimensions ** (1 / 2) self.to_out.weight.uniform_(-bound, bound) self.to_out.bias.zero_() diff --git a/score_models/layers/combine.py b/score_models/layers/combine.py index 773aeba..5fab4ff 100644 --- a/score_models/layers/combine.py +++ b/score_models/layers/combine.py @@ -6,11 +6,14 @@ class Combine(torch.nn.Module): """Combine information from skip connections.""" - def __init__(self, in_ch, out_ch, method='cat', dimensions:int = 2): super().__init__() self.Conv_0 = conv1x1(in_ch, out_ch, dimensions=dimensions) - assert method in ["cat", "sum"], f'Method {method} not recognized.' + if method not in ["cat", "sum"]: + raise ValueError(f'Method {method} not recognized for the Combine layer.') + if method == 'sum': + if in_ch != out_ch: + raise ValueError('Method sum requires in_ch == out') self.method = method def forward(self, x, y): diff --git a/score_models/sbm/base.py b/score_models/sbm/base.py index 8adffeb..fedbcb1 100644 --- a/score_models/sbm/base.py +++ b/score_models/sbm/base.py @@ -139,19 +139,19 @@ def load( def fit( self, dataset: torch.utils.data.Dataset, + epochs: int = 1, + learning_rate: float = 1e-4, + ema_decay: float = 0.999, + clip: float = 0., + warmup: int = 0, + optimizer: Optional[torch.optim.Optimizer] = None, preprocessing: Optional[Callable] = None, - batch_size: int = 1, + batch_size: Optional[int] = None, shuffle: bool = False, - epochs: int = 100, iterations_per_epoch: Optional[int] = None, max_time: float = float('inf'), - optimizer: Optional[torch.optim.Optimizer] = None, - learning_rate: float = 1e-3, - ema_decay: float = 0.999, - clip: float = 0., - warmup: int = 0, checkpoint_every: int = 10, - models_to_keep: int = 3, + models_to_keep: int = 1, path: Optional[str] = None, name_prefix: Optional[str] = None, seed: Optional[int] = None, diff --git a/score_models/trainer.py b/score_models/trainer.py index 476e272..058cfa0 100644 --- a/score_models/trainer.py +++ b/score_models/trainer.py @@ -24,17 +24,17 @@ def __init__( self, model: "ScoreModel", dataset: Dataset, - preprocessing: Optional[Callable] = None, - batch_size: int = 1, - shuffle: bool = False, epochs: int = 100, - iterations_per_epoch: Optional[int] = None, - max_time: float = float('inf'), - optimizer: Optional[torch.optim.Optimizer] = None, + batch_size: Optional[int] = None, learning_rate: float = 1e-3, ema_decay: float = 0.999, clip: float = 0., warmup: int = 0, + optimizer: Optional[torch.optim.Optimizer] = None, + preprocessing: Optional[Callable] = None, + shuffle: bool = False, + iterations_per_epoch: Optional[int] = None, + max_time: float = float('inf'), checkpoint_every: int = 10, models_to_keep: int = 3, path: Optional[str] = None, @@ -43,9 +43,11 @@ def __init__( ): self.model = model self.net = model.net # Neural network to train - self.dataset = dataset - self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle) - self .data_iter = iter(self.dataloader) + if batch_size is not None: + self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle) + else: + self.dataloader = dataset + self.data_iter = iter(self.dataloader) self.preprocessing = preprocessing or (lambda x: x) self.optimizer = optimizer or torch.optim.Adam(self.model.parameters(), lr=learning_rate) self.ema = ExponentialMovingAverage(self.model.parameters(), decay=ema_decay) diff --git a/tests/test_layers.py b/tests/test_layers.py index 01148ac..a8b3b90 100644 --- a/tests/test_layers.py +++ b/tests/test_layers.py @@ -4,164 +4,134 @@ from score_models.definitions import default_init from score_models.utils import get_activation import numpy as np +import pytest def init_test_fn(shape, dtype=torch.float32, device="cpu"): return torch.ones(shape, dtype=dtype, device=device) -def test_attention(): - x = torch.randn([10, 4, 8, 8]) - print(x[0, 0, 0, 0], x[0, 0, 0, 1]) - att = SelfAttentionBlock(4) +@pytest.mark.parametrize("D", [1, 2, 3]) +@pytest.mark.parametrize("P", [8]) +@pytest.mark.parametrize("C", [4]) +@pytest.mark.parametrize("B", [10]) +def test_attention(B, D, C, P): + x = torch.randn([B, C, *[P]*D]) + att = SelfAttentionBlock(C, dimensions=D) y = att(x) - print(y[0, 0, 0, 0], y[0, 0, 0, 1]) - x = torch.randn([10, 4, 8, 8, 8]) - SelfAttentionBlock(4, dimensions=3)(x) - x = torch.randn([10, 4, 8]) - SelfAttentionBlock(4, dimensions=1)(x) + assert y.shape == x.shape - x = torch.randn(10, 5) * 100 - B, D = x.shape - temb = torch.randn(B, D) + +@pytest.mark.parametrize("C", [4]) +@pytest.mark.parametrize("B", [10]) +def test_scaled_attention_layer(C, B): + x = torch.randn(B, C) * 100 + temb = torch.randn(B, C) context = torch.stack([x, temb], dim=1) + att = ScaledAttentionLayer(channels=C) + out = att(x.view(B, 1, C), context) + assert out.squeeze().shape == x.shape + print("context shape", context.shape) - att = ScaledAttentionLayer(dimensions=5) - out = att(x.view(B, 1, D), context) print("shape",out.shape) print("out", out) -def test_resnet_biggan(): - # out channels has to be at least 4 - act = get_activation("relu") - layer = ResnetBlockBigGANpp(act=act, in_ch=8, out_ch=4, temb_dim=None, up=False, down=False, fir=False, skip_rescale=True, dimensions=2) - x = torch.randn(1, 8, 8, 8) - out = layer(x) - assert list(out.shape) == [1, 4, 8, 8] - - layer = ResnetBlockBigGANpp(act=act, in_ch=8, out_ch=4, temb_dim=10, up=False, down=False, fir=False, skip_rescale=True, dimensions=3) - x = torch.randn(1, 8, 8, 8, 8) - out = layer(x) - assert list(out.shape) == [1, 4, 8, 8, 8] - - layer = ResnetBlockBigGANpp(act=act, in_ch=8, out_ch=4, temb_dim=10, up=True, down=False, fir=False, skip_rescale=True, dimensions=3) - x = torch.randn(1, 8, 8, 8, 8) - out = layer(x) - assert list(out.shape) == [1, 4, 16, 16, 16] - - layer = ResnetBlockBigGANpp(act=act, in_ch=8, out_ch=4, temb_dim=10, up=False, down=True, fir=True, skip_rescale=True, dimensions=3) - x = torch.randn(1, 8, 8, 8, 8) - out = layer(x) - assert list(out.shape) == [1, 4, 4, 4, 4] - - layer = ResnetBlockBigGANpp(act=act, in_ch=8, out_ch=4, temb_dim=10, up=False, down=True, fir=True, skip_rescale=False, dimensions=1) - x = torch.randn(1, 8, 8) +@pytest.mark.parametrize("D", [1, 2, 3]) +@pytest.mark.parametrize("P", [4, 8]) +@pytest.mark.parametrize("Cin", [4]) +@pytest.mark.parametrize("Cout", [2, 4]) +@pytest.mark.parametrize("temb_dim", [None, 10]) +@pytest.mark.parametrize("up_down", [(False, False), (True, False), (False, True)]) +@pytest.mark.parametrize("fir", [True, False]) +@pytest.mark.parametrize("skip_rescale", [True, False]) +def test_resnet_biggan(D, P, Cin, Cout, temb_dim, up_down, fir, skip_rescale): + up = up_down[0] + down = up_down[1] + layer = ResnetBlockBigGANpp( + act=get_activation("relu"), + in_ch=Cin, + out_ch=Cout, + temb_dim=temb_dim, + up=up, + down=down, + fir=fir, + skip_rescale=skip_rescale, + dimensions=D) + + x = torch.randn(1, Cin, *[P]*D) out = layer(x) - assert list(out.shape) == [1, 4, 4] - -def test_combine(): - x = torch.randn(1, 1, 8, 8) - y = torch.randn(1, 1, 8, 8) - layer = Combine(in_ch=1, out_ch=4, method="cat", dimensions=2) + Pout = P*2 if up else P//2 if down else P + assert list(out.shape) == [1, Cout, *[Pout]*D] + +@pytest.mark.parametrize("D", [1, 2, 3]) +@pytest.mark.parametrize("P", [4, 8]) +@pytest.mark.parametrize("Cin", [4]) +@pytest.mark.parametrize("Cout", [2, 4]) +@pytest.mark.parametrize("method", ["cat", "sum"]) +def test_combine(D, P, Cin, Cout, method): + if method == "sum": + Cout = Cin # Sum requires the same number of channels + layer = Combine(in_ch=Cin, out_ch=Cout, method=method, dimensions=D) + x = torch.randn(1, Cin, *[P]*D) + y = torch.randn(1, Cin, *[P]*D) out = layer(x, y) - assert list(out.shape) == [1, 5, 8, 8] - - x = torch.randn(1, 1, 8, 8, 8) - y = torch.randn(1, 1, 8, 8, 8) - layer = Combine(in_ch=1, out_ch=4, method="cat", dimensions=3) - out = layer(x, y) - assert list(out.shape) == [1, 5, 8, 8, 8] - - x = torch.randn(1, 1, 8, 8, 8) - y = torch.randn(1, 4, 8, 8, 8) - layer = Combine(in_ch=1, out_ch=4, method="sum", dimensions=3) - out = layer(x, y) - assert list(out.shape) == [1, 4, 8, 8, 8] - - -def test_upsample_layer(): - x = torch.randn(1, 1, 8, 8) - layer = UpsampleLayer(1, 3, with_conv=True, fir=True, dimensions=2) - out = layer(x) - assert list(out.shape) == [1, 3, 16, 16] - - x = torch.randn(1, 1, 8) - layer = UpsampleLayer(1, 3, with_conv=True, fir=True, dimensions=1) - out = layer(x) - assert list(out.shape) == [1, 3, 16] - - x = torch.randn(1, 1, 8) - layer = UpsampleLayer(1, 1, with_conv=False, fir=False, dimensions=1) - out = layer(x) - assert list(out.shape) == [1, 1, 16] - - x = torch.randn(1, 1, 8, 8, 8) - layer = UpsampleLayer(1, 1, with_conv=False, fir=False, dimensions=3) - out = layer(x) - assert list(out.shape) == [1, 1, 16, 16, 16] + if method == "cat": # Concatenation will append the channels + assert list(out.shape) == [1, Cin+Cout, *[P]*D] + else: + assert list(out.shape) == [1, Cout, *[P]*D] + +def test_combine_errors(): + with pytest.raises(ValueError) as e: + layer = Combine(in_ch=4, out_ch=6, method="sum", dimensions=2) + assert "Method sum requires in_ch == out" in str(e) - -def test_downsample_layer(): - x = torch.randn(1, 1, 8, 8) - layer = DownsampleLayer(1, 3, with_conv=True, fir=True, dimensions=2) - out = layer(x) - assert list(out.shape) == [1, 3, 4, 4] - - x = torch.randn(1, 1, 8) - layer = DownsampleLayer(1, 3, with_conv=True, fir=True, dimensions=1) + with pytest.raises(ValueError) as e: + layer = Combine(in_ch=4, out_ch=6, method="not_a_method", dimensions=2) + assert "Method not_a_method not recognized for the Combine layer." in str(e) + +@pytest.mark.parametrize("D", [1, 2, 3]) +@pytest.mark.parametrize("P", [4, 8]) +@pytest.mark.parametrize("Cin", [1, 3]) +@pytest.mark.parametrize("Cout", [3, 4]) +@pytest.mark.parametrize("fir", [True, False]) +@pytest.mark.parametrize("with_conv", [True, False]) +def test_up_down_sampling_layer(D, P, Cin, Cout, fir, with_conv): + x = torch.randn(1, Cin, *[P]*D) + # Upsample layer + if Cin != Cout: # If the number of channels is different, we need to use a convolutional layer + with_conv = True + layer = UpsampleLayer(Cin, Cout, with_conv=with_conv, fir=fir, dimensions=D) out = layer(x) - assert list(out.shape) == [1, 3, 4] - - x = torch.randn(1, 1, 8) - layer = DownsampleLayer(1, 1, with_conv=False, fir=False, dimensions=1) - out = layer(x) - assert list(out.shape) == [1, 1, 4] - - x = torch.randn(1, 1, 8, 8, 8) - layer = DownsampleLayer(1, 1, with_conv=False, fir=False, dimensions=3) + assert list(out.shape) == [1, Cout, *[2*P]*D] + # Downsample + layer = DownsampleLayer(Cin, Cout, with_conv=with_conv, fir=fir, dimensions=D) out = layer(x) - assert list(out.shape) == [1, 1, 4, 4, 4] - - -def test_stylegan_conv_shape(): - x = torch.randn(1, 1, 8, 8) - conv = StyleGANConv(in_ch=1, out_ch=3, kernel=3, up=False, down=False, use_bias=True, kernel_init=default_init(), dimensions=2) + assert list(out.shape) == [1, Cout, *[P//2]*D] + + +@pytest.mark.parametrize("D", [1, 2, 3]) +@pytest.mark.parametrize("P", [4, 8]) +@pytest.mark.parametrize("Cin", [1, 2]) +@pytest.mark.parametrize("Cout", [3, 4]) +@pytest.mark.parametrize("up_down", [(False, False), (True, False), (False, True)]) +def test_stylegan_conv_shape(D, P, Cin, Cout, up_down): + up = up_down[0] + down = up_down[1] + conv = StyleGANConv( + in_ch=Cin, + out_ch=Cout, + kernel=3, + up=up, + down=down, + use_bias=True, + kernel_init=default_init(), + dimensions=D + ) + + x = torch.randn(1, Cin, *[P]*D) out = conv(x) - assert list(out.shape) == [1, 3, 8, 8] + Pout = P*2 if up else P//2 if down else P + assert list(out.shape) == [1, Cout, *[Pout]*D] - conv = StyleGANConv(in_ch=1, out_ch=3, kernel=3, up=True, down=False, use_bias=True, kernel_init=default_init(), dimensions=2) - out = conv(x) - assert list(out.shape) == [1, 3, 16, 16] - - conv = StyleGANConv(in_ch=1, out_ch=3, kernel=3, up=False, down=True, use_bias=True, kernel_init=default_init(), dimensions=2) - out = conv(x) - assert list(out.shape) == [1, 3, 4, 4] - - x = torch.randn(1, 1, 8) - conv = StyleGANConv(in_ch=1, out_ch=3, kernel=3, up=False, down=False, use_bias=True, kernel_init=default_init(), dimensions=1) - out = conv(x) - assert list(out.shape) == [1, 3, 8] - - conv = StyleGANConv(in_ch=1, out_ch=3, kernel=3, up=True, down=False, use_bias=True, kernel_init=default_init(), dimensions=1) - out = conv(x) - assert list(out.shape) == [1, 3, 16] - - conv = StyleGANConv(in_ch=1, out_ch=3, kernel=3, up=False, down=True, use_bias=True, kernel_init=default_init(), dimensions=1) - out = conv(x) - assert list(out.shape) == [1, 3, 4] - - x = torch.randn(1, 1, 8, 8, 8) - conv = StyleGANConv(in_ch=1, out_ch=3, kernel=3, up=False, down=False, use_bias=True, kernel_init=default_init(), dimensions=3) - out = conv(x) - assert list(out.shape) == [1, 3, 8, 8, 8] - - conv = StyleGANConv(in_ch=1, out_ch=3, kernel=3, up=True, down=False, use_bias=True, kernel_init=default_init(), dimensions=3) - out = conv(x) - assert list(out.shape) == [1, 3, 16, 16, 16] - - conv = StyleGANConv(in_ch=1, out_ch=3, kernel=3, up=False, down=True, use_bias=True, kernel_init=default_init(), dimensions=3) - out = conv(x) - assert list(out.shape) == [1, 3, 4, 4, 4] - def test_stylegan_conv_resample_kernel(): x = torch.ones(1, 1, 8, 8) diff --git a/tests/test_training.py b/tests/test_training.py index 8e482fb..0e46704 100644 --- a/tests/test_training.py +++ b/tests/test_training.py @@ -16,6 +16,7 @@ def __init__( conditions=None, condition_channels=None, condition_embeddings=None, + batch_size=None, **kwargs ): self.size = size @@ -24,34 +25,35 @@ def __init__( self.conditions = conditions self.condition_channels = condition_channels self.condition_embeddings = condition_embeddings + self.B = batch_size or 1 def __len__(self): return self.size def __getitem__(self, index): - - x = [torch.randn(self.C, *self.D),] + # We use a batch size to simulate the case when batch_size = None and we don't have a dataloader + x = [torch.randn(self.B, self.C, *self.D),] if self.conditions: c_idx = 0 e_idx = 0 for condition in self.conditions: if condition == "time_continuous": - c = torch.randn(1) + c = torch.randn(self.B, 1) x.append(c) elif condition == "time_discrete": tokens = self.condition_embeddings[e_idx] - c = torch.randint(tokens, (1,)) + c = torch.randint(tokens, (self.B, 1,)) x.append(c) e_idx += 1 elif condition == "time_vector": - c = torch.randn(self.condition_channels[c_idx]) + c = torch.randn(self.B, self.condition_channels[c_idx]) x.append(c) c_idx += 1 elif condition == "input_tensor": - c = torch.randn(self.condition_channels[c_idx], *self.D) + c = torch.randn(self.B, self.condition_channels[c_idx], *self.D) x.append(c) c_idx += 1 - return x + return [x_.squeeze(0) for x_ in x] # Remove the batch dimension for dataloader @pytest.mark.parametrize("models_to_keep", [1, 2]) @pytest.mark.parametrize("conditions", [ @@ -64,17 +66,18 @@ def __getitem__(self, index): {"sde": "vp", "schedule": "cosine", "beta_max": 100} ]) @pytest.mark.parametrize("Net", [MLP, NCSNpp, DDPM]) -def test_training_score_model(conditions, sde, Net, models_to_keep, tmp_path, capsys): +@pytest.mark.parametrize("B", [None, 2]) # Make sure we don't create a dataloader if batch_size is None +def test_training_score_model(B, conditions, sde, Net, models_to_keep, tmp_path, capsys): condition_type, embeddings, channels = conditions - hp = { + hp = { # Hyperparameters for the dataset "ch_mult": (1, 1), "nf": 2, "conditions": condition_type, "condition_channels": channels, "condition_embeddings": embeddings, + "batch_size": B # If B is None, we use the Dataloader to handle the batch size (which we set to a default below) } E = 3 # epochs - B = 2 C = 3 N = 4 D = [] if Net == MLP else [4, 4] @@ -83,6 +86,11 @@ def test_training_score_model(conditions, sde, Net, models_to_keep, tmp_path, ca model = ScoreModel(net, **sde) path = tmp_path / "test" + # Fitting method's batch_size argument is basically the reverse of Dataset, since we turn off/on the dataloader + if B is None: + B = 2 + elif isinstance(B, int): + B = None losses = model.fit(dataset, batch_size=B, epochs=E, path=path, checkpoint_every=1, models_to_keep=models_to_keep) print(losses) From 143d6e70d20358424fcdfbe1f4e1aeff762c125f Mon Sep 17 00:00:00 2001 From: AlexandreAdam Date: Wed, 28 Aug 2024 23:58:34 -0400 Subject: [PATCH 03/40] Updated Euler-Maruyma sampler and created a denoise method to the SBM class --- score_models/sbm/score_model.py | 52 +++++++++++++++++++++++++++--- score_models/sde/euler_maruyama.py | 26 +++++++-------- tests/test_score_models.py | 12 ++++++- 3 files changed, 72 insertions(+), 18 deletions(-) diff --git a/score_models/sbm/score_model.py b/score_models/sbm/score_model.py index 52b3cbc..9653f3f 100644 --- a/score_models/sbm/score_model.py +++ b/score_models/sbm/score_model.py @@ -114,8 +114,8 @@ def tweedie(self, t: Tensor, x: Tensor, *args) -> Tensor: @torch.no_grad() def sample( self, - shape: tuple, # TODO grab dimensions from model hyperparams if available - steps: int, + shape: tuple, + steps: int, *args, likelihood_score: Optional[Callable] = None, guidance_factor: float = 1., @@ -124,13 +124,21 @@ def sample( ) -> Tensor: """ Sample from the score model by solving the reverse-time SDE using the Euler-Maruyama method. + + The initial condition is sample from the high temperature prior at time t=T. + To denoise a sample from some time t, use the denoise or tweedie method instead. + """ batch_size, *D = shape likelihood_score = likelihood_score or (lambda t, x: torch.zeros_like(x)) score = lambda t, x: self.score(t, x, *args) + guidance_factor * likelihood_score(t, x) + + # Sample from high temperature boundary condition + xT = self.sde.prior(D).sample([B]) + # Solve the reverse-time SDE t, x = euler_maruyama_method( - batch_size=batch_size, - dimensions=D, + t=self.sde.T, + xt=xT, steps=steps, sde=self.sde, score=score, @@ -139,3 +147,39 @@ def sample( if denoise_last_step: x = self.tweedie(t, x, *args) return x + + @torch.no_grad() + def denoise( + self, + t: Tensor, + xt: Tensor, + steps: int, + *args, + epsilon: Optional[float] = None, + likelihood_score: Optional[Callable] = None, + guidance_factor: float = 1., + stopping_factor: float = np.inf + ): + + """ + Denoise a given sample xt at time t using the score model. + + Tweedie formula is applied after the Euler-Maruyama solver + is used to solve the reverse-time SDE. + """ + likelihood_score = likelihood_score or (lambda t, x: torch.zeros_like(x)) + score = lambda t, x: self.score(t, x, *args) + guidance_factor * likelihood_score(t, x) + + # Solve the reverse-time SDE + t, x = euler_maruyama_method( + t=t, + xt=xt, + steps=steps, + sde=self.sde, + score=score, + epsilon=epsilon, + stopping_factor=stopping_factor + ) + # Apply the Tweedie formula + x = self.tweedie(t, x, *args) + return x diff --git a/score_models/sde/euler_maruyama.py b/score_models/sde/euler_maruyama.py index f183139..52cd344 100644 --- a/score_models/sde/euler_maruyama.py +++ b/score_models/sde/euler_maruyama.py @@ -11,13 +11,12 @@ __all__ = ["euler_maruyama_method"] def euler_maruyama_method( - batch_size: int, - dimensions: tuple[int], + t: Union[Tensor, float], + xt: Tensor, steps: int, sde: SDE, score: Optional[Callable[Tensor, Tensor]] = None, - T: Optional[Union[Tensor, float]] = None, - epsilon: Optional[float] = None , + epsilon: Optional[float] = None, guidance_factor: float = 1., stopping_factor: float = 1e2, denoise_last_step: bool = True, @@ -27,22 +26,23 @@ def euler_maruyama_method( An Euler-Maruyama integration of an SDE specified by the score function. Args: - batch_size: Number of samples to draw - dimensions: Shape of the tensor to sample steps: Number of Euler-Maruyam steps to perform score: Score function of the reverse-time SDE likelihood_score_fn: Add an additional drift to the sampling for posterior sampling. Must have the signature f(t, x) stopping_factor: When magnitude of the score is larger than stopping_factor * sqrt(D), stop the sampling """ - B = batch_size - D = dimensions - T = T or sde.T + B, *D = xt.shape + if isinstance(t, float): + t = torch.ones(B).to(device) * t + if t.shape[0] == 1: + t = t.repeat(B).to(device) + elif not all([t[i].item() == t[0].item() for i in range(B)]): + raise ValueError("All times must be the same for each batch element, the more general case is not implemented yet.") + T = t[0].cpu().item() epsilon = epsilon or sde.epsilon - score = score or (lambda t, x: torch.zeros_like(x)) - - x = sde.prior(D).sample([B]).to(device) + score = score or (lambda t, x: torch.zeros_like(xt)) + x = xt.clone() dt = -(T - epsilon) / steps - t = torch.ones(B).to(device) * T for _ in (pbar := tqdm(range(steps))): pbar.set_description(f"Euler-Maruyama | t = {t[0].item():.1f} | sigma(t) = {sde.sigma(t)[0].item():.1e}" f"| x.std() ~ {x.std().item():.1e}") diff --git a/tests/test_score_models.py b/tests/test_score_models.py index 007f144..4ec9cc8 100644 --- a/tests/test_score_models.py +++ b/tests/test_score_models.py @@ -92,7 +92,7 @@ def test_log_likelihood(): assert ll.shape == torch.Size([3]) -def test_sample_fn(): +def test_sample_method(): net = NCSNpp(1, nf=8, ch_mult=(2, 2)) score = ScoreModel(net, sigma_min=1e-2, sigma_max=10) score.sample(shape=[5, 1, 16, 16], steps=10) @@ -101,6 +101,16 @@ def test_sample_fn(): score = ScoreModel(net, beta_min=1e-2, beta_max=10) score.sample(shape=[5, 1, 16, 16], steps=10) + +@pytest.mark.parametrize("epsilon", [None, 1e-3, 0.1]) +def test_denoise_method(epsilon): + net = NCSNpp(1, nf=8, ch_mult=(2, 2)) + score = ScoreModel(net, sigma_min=1e-2, sigma_max=10) + B = 5 + t = torch.rand(1) * torch.ones(B) + x = torch.randn(B, 1, 16, 16) + score.denoise(t, x, steps=10, epsilon=epsilon) + @pytest.mark.parametrize("anneal_residuals", [True, False]) def test_slic_score(anneal_residuals): B = 3 From 14c0830852030d09c3b443b397a65297e9e7f4c7 Mon Sep 17 00:00:00 2001 From: AlexandreAdam Date: Thu, 29 Aug 2024 00:02:39 -0400 Subject: [PATCH 04/40] Fixed a bug introduced in the sample method --- score_models/sbm/score_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/score_models/sbm/score_model.py b/score_models/sbm/score_model.py index 9653f3f..c0fa8b6 100644 --- a/score_models/sbm/score_model.py +++ b/score_models/sbm/score_model.py @@ -129,7 +129,7 @@ def sample( To denoise a sample from some time t, use the denoise or tweedie method instead. """ - batch_size, *D = shape + B, *D = shape likelihood_score = likelihood_score or (lambda t, x: torch.zeros_like(x)) score = lambda t, x: self.score(t, x, *args) + guidance_factor * likelihood_score(t, x) From 1905ccc09cfbdcc55ec173128b26e4773f94bd1e Mon Sep 17 00:00:00 2001 From: AlexandreAdam Date: Mon, 2 Sep 2024 01:06:03 -0400 Subject: [PATCH 05/40] Updated and tested new feature so that custom loss can implement schedules based on the global step of the training --- score_models/sbm/hessian_model.py | 2 +- score_models/sbm/score_model.py | 2 +- score_models/trainer.py | 2 +- tests/test_hessian_model.py | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/score_models/sbm/hessian_model.py b/score_models/sbm/hessian_model.py index b4cf954..cab86b2 100644 --- a/score_models/sbm/hessian_model.py +++ b/score_models/sbm/hessian_model.py @@ -47,7 +47,7 @@ def __init__( def forward(self, t: Tensor, x: Tensor, *args): return self.diagonal(t, x, *args) - def loss(self, x: Tensor, *args): + def loss(self, x: Tensor, *args, step: int) -> Tensor: return self._loss(self, x, *args) def reparametrized_diagonal(self, t: Tensor, x: Tensor, *args): diff --git a/score_models/sbm/score_model.py b/score_models/sbm/score_model.py index c0fa8b6..30134cc 100644 --- a/score_models/sbm/score_model.py +++ b/score_models/sbm/score_model.py @@ -35,7 +35,7 @@ def __init__( else: self.hessian_trace_model = self.divergence - def loss(self, x, *args) -> Tensor: + def loss(self, x, *args, step: int) -> Tensor: return dsm(self, x, *args) def reparametrized_score(self, t, x, *args) -> Tensor: diff --git a/score_models/trainer.py b/score_models/trainer.py index 058cfa0..ebd67aa 100644 --- a/score_models/trainer.py +++ b/score_models/trainer.py @@ -163,7 +163,7 @@ def train_epoch(self): x = self.preprocessing(x) # Training step self.optimizer.zero_grad() - loss = self.model.loss(x, *args) + loss = self.model.loss(x, *args, step=self.global_step) loss.backward() if self.global_step < self.warmup: for g in self.optimizer.param_groups: diff --git a/tests/test_hessian_model.py b/tests/test_hessian_model.py index 47a07b1..6a9c390 100644 --- a/tests/test_hessian_model.py +++ b/tests/test_hessian_model.py @@ -28,9 +28,9 @@ def test_save_load_hessian_diagonal(loss, tmp_path): # Check that sbm is loaded correctly for the loss function assert torch.allclose(model.score_model(t, x), new_model.score_model(t, x), atol=1e-3) torch.manual_seed(42) - loss1 = model.loss(x) + loss1 = model.loss(x, step=1) torch.manual_seed(42) - loss2 = new_model.loss(x) + loss2 = new_model.loss(x, step=1) # Give it a loose tolerance, not sure why they are differen just yet assert torch.allclose(loss1, loss2, atol=4) From e07f21c673b778d225473dda7f0ac7e7608fc0a9 Mon Sep 17 00:00:00 2001 From: AlexandreAdam Date: Tue, 3 Sep 2024 13:47:16 -0400 Subject: [PATCH 06/40] Updated a comment in LoRA --- score_models/sbm/lora.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/score_models/sbm/lora.py b/score_models/sbm/lora.py index fcb4383..2774f57 100644 --- a/score_models/sbm/lora.py +++ b/score_models/sbm/lora.py @@ -96,9 +96,10 @@ def save( if not os.path.exists(base_sbm_path): super().save(base_sbm_path, create_path=True) - # Save the LoRA weights and the optimizer associated with them + # Save the optimizer associated with them if optimizer: # Save optimizer first since checkpoint number is inferred from number of checkpoint files save_checkpoint(model=optimizer, path=path, key="optimizer", create_path=create_path) + # Save the LoRA adapters only (takes less space than saving the whole merged model) save_checkpoint(model=self.lora_net, path=path, key="lora_checkpoint", create_path=create_path) self.save_hyperparameters(path) else: From 73de468672c0f9618ee761d795a5e4cc7ce882a1 Mon Sep 17 00:00:00 2001 From: AlexandreAdam Date: Tue, 3 Sep 2024 21:17:48 -0400 Subject: [PATCH 07/40] Cleanup LoRA class --- score_models/sbm/lora.py | 43 ++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/score_models/sbm/lora.py b/score_models/sbm/lora.py index 2774f57..95f4c97 100644 --- a/score_models/sbm/lora.py +++ b/score_models/sbm/lora.py @@ -13,6 +13,7 @@ from ..utils import DEVICE from ..save_load_utils import save_checkpoint, load_checkpoint + __all__ = ["LoRAScoreModel"] @@ -48,29 +49,33 @@ def __init__( for param in self.net.parameters(): param.requires_grad = False - # Construct the LoRA model around the base net - if target_modules is None: - if isinstance(self.net, NCSNpp): - target_modules = ["Dense_0", "conv"] - else: - target_modules = list(set(get_specific_layer_names(self.net))) - print(f"Automatically detecting target modules {' '.join(target_modules)}") - if lora_rank is None: - raise ValueError("LoRA rank must be provided when initializing from a base SBM.") - lora_config = LoraConfig( - r=lora_rank, - lora_alpha=lora_rank, - init_lora_weights="gaussian", - target_modules=target_modules - ) - self.lora_net = get_peft_model(copy.deepcopy(self.net), lora_config) + # # Construct the LoRA model around the base net + self.lora_net = self._make_lora_adapter(lora_rank, target_modules) + print(f"Initialized LoRA weights with rank {lora_rank}") self.lora_net.print_trainable_parameters() self.hyperparameters["lora_rank"] = lora_rank self.hyperparameters["target_modules"] = target_modules - print(f"Initialized LoRA weights with rank {lora_rank}") else: # Base model and LoRA initialized with the self.load method super().__init__(path=path, checkpoint=checkpoint, device=device, **hyperparameters) + + def _make_lora_adapter(self, lora_rank: int, target_modules: Optional[str] = None): + """ + Create a LoRA adapter to be injected into the model. + """ + if target_modules is None: + if isinstance(self.net, NCSNpp): + target_modules = ["Dense_0", "conv"] + else: + target_modules = list(set(get_specific_layer_names(self.net))) + print(f"Automatically detecting target modules {' '.join(target_modules)}") + lora_config = LoraConfig( + r=lora_rank, + lora_alpha=lora_rank, + init_lora_weights="gaussian", + target_modules=target_modules + ) + return get_peft_model(copy.deepcopy(self.net), lora_config) def reparametrized_score(self, t, x, *args) -> Tensor: """ @@ -106,8 +111,8 @@ def save( raise ValueError("No path provided to save the model. Please provide a valid path or initialize the model with a path.") def load( - self, - checkpoint: Optional[int] = None, + self, + checkpoint: Optional[int] = None, raise_error: bool = True ): if self.path is None: From 65a7170461b459359a20360923e80b9562de6b05 Mon Sep 17 00:00:00 2001 From: AlexandreAdam Date: Wed, 4 Sep 2024 10:04:00 -0400 Subject: [PATCH 08/40] Added merge and unload method --- score_models/sbm/lora.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/score_models/sbm/lora.py b/score_models/sbm/lora.py index 95f4c97..8259cd6 100644 --- a/score_models/sbm/lora.py +++ b/score_models/sbm/lora.py @@ -127,3 +127,9 @@ def load( self.loaded_checkpoint = load_checkpoint(model=self, checkpoint=checkpoint, path=self.path, key="lora_checkpoint", raise_error=raise_error) print(f"Loaded LoRA weights with rank {self.hyperparameters['lora_rank']}") self.lora_net.print_trainable_parameters() + + def merge_and_unload(self): + """ + Merge the LoRA weights with the base SBM and unload the LoRA weights. + """ + return ScoreModel(net=self.lora_net.merge_and_unload(), **self.hyperparameters) From 27789c886ffa30179a48c99be2e2f55db2f654f9 Mon Sep 17 00:00:00 2001 From: AlexandreAdam Date: Wed, 4 Sep 2024 10:06:49 -0400 Subject: [PATCH 09/40] Added test for merge and unload method --- tests/test_lora_sbm.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tests/test_lora_sbm.py b/tests/test_lora_sbm.py index 8005cf8..4fbcb00 100644 --- a/tests/test_lora_sbm.py +++ b/tests/test_lora_sbm.py @@ -39,3 +39,26 @@ def test_lora_sbm(net, sde, lora_rank, tmp_path): assert torch.allclose(sbm.lora_net(t, x), new_sbm.lora_net(t, x)) assert torch.allclose(sbm.lora_net(t, x), sbm.reparametrized_score(t, x)) assert torch.allclose(new_sbm.lora_net(t, x), new_sbm.reparametrized_score(t, x)) + + +def test_merge_and_unload(tmp_path): + # Base SBM + model = ScoreModel(MLP(10), sde="vp") + + # LoRA SBM + lora_rank = 10 + lora_model = LoRAScoreModel(model, lora_rank=lora_rank) + + # Check that merged model is a ScoreModel + base_model = lora_model.merge_and_unload() + assert isinstance(base_model, ScoreModel) + + # Should also be equivalent to the original model per LoRA initialization strategy + t = torch.rand(2) + x = torch.randn(2, 10) + assert torch.allclose(base_model(t, x), model(t, x)) + + # Check that the LoRA weights are no longer present + for name, p in base_model.named_parameters(): + assert "lora" not in name + From d0f521a36d8fb6701efe8460ee51d6aad0b55302 Mon Sep 17 00:00:00 2001 From: AlexandreAdam Date: Wed, 4 Sep 2024 13:57:43 -0400 Subject: [PATCH 10/40] Fixed a problem introduced in the Combine layer. --- score_models/layers/combine.py | 3 --- tests/test_layers.py | 4 ---- tests/test_lora_sbm.py | 1 - 3 files changed, 8 deletions(-) diff --git a/score_models/layers/combine.py b/score_models/layers/combine.py index 5fab4ff..fccda27 100644 --- a/score_models/layers/combine.py +++ b/score_models/layers/combine.py @@ -11,9 +11,6 @@ def __init__(self, in_ch, out_ch, method='cat', dimensions:int = 2): self.Conv_0 = conv1x1(in_ch, out_ch, dimensions=dimensions) if method not in ["cat", "sum"]: raise ValueError(f'Method {method} not recognized for the Combine layer.') - if method == 'sum': - if in_ch != out_ch: - raise ValueError('Method sum requires in_ch == out') self.method = method def forward(self, x, y): diff --git a/tests/test_layers.py b/tests/test_layers.py index a8b3b90..a864649 100644 --- a/tests/test_layers.py +++ b/tests/test_layers.py @@ -80,10 +80,6 @@ def test_combine(D, P, Cin, Cout, method): assert list(out.shape) == [1, Cout, *[P]*D] def test_combine_errors(): - with pytest.raises(ValueError) as e: - layer = Combine(in_ch=4, out_ch=6, method="sum", dimensions=2) - assert "Method sum requires in_ch == out" in str(e) - with pytest.raises(ValueError) as e: layer = Combine(in_ch=4, out_ch=6, method="not_a_method", dimensions=2) assert "Method not_a_method not recognized for the Combine layer." in str(e) diff --git a/tests/test_lora_sbm.py b/tests/test_lora_sbm.py index 4fbcb00..d01626a 100644 --- a/tests/test_lora_sbm.py +++ b/tests/test_lora_sbm.py @@ -61,4 +61,3 @@ def test_merge_and_unload(tmp_path): # Check that the LoRA weights are no longer present for name, p in base_model.named_parameters(): assert "lora" not in name - From abb2f4119935646ca17c278b33fac091ad9a97a5 Mon Sep 17 00:00:00 2001 From: "Connor Stone, PhD" Date: Thu, 12 Sep 2024 17:04:45 -0700 Subject: [PATCH 11/40] feat: adding simple models and solvers (#8) * Adding solvers and simple score models * Adding simple model descriptions * Integrate solvers into score_model sample * Fix init for simple models * Add unit tests for simple models * Solver now handles log_p, I think * sde step now doesnt include x * Address review comments * same update for sde solver * fix import * kwargs now pass through score models * lower case names * ode now can get P(xt) * rename ode solver to avoid conflict * avoid snake case * housekeeping simple_models to analytic_models and clean sde ode solvers * clean up solver step size * refactor solver to propogate args * clean up solver creation in ScoreModel * adding solver docstrings * update analytic model docstrings * more on passing args. getting tests to run * solver class can now construct its subclasses * new unit tests for solvers * remove comment * add hook function to solvers * update docstring * propogate kwargs in sde * Housekeeping, add type hints * merge ananlytic_models into sbm module * conv likelihood A may be tensor or callable * return tweedie to score_model * analytic models handle mu_t * rename t_scale to sigma_t for consistency * rename methods * return denoise method to score_model. solver handle tensor t input * add check that t input is uniform * minor updates from discussion, tweedie, joint, and delta logp * clean up conv likelihood inputs * remove unecessary pary of conv like test after update * MVG may now also be diagonal * Fix MVG bug * add MVG score model which computes score analytically instead of using autograd * allow users to set time_steps for solvers * update docstring * nice progress bar * remove unused import --- score_models/__init__.py | 1 + score_models/architectures/__init__.py | 1 + score_models/architectures/ddpm.py | 101 ++++---- score_models/architectures/encoder.py | 151 ++++++------ score_models/architectures/mlp.py | 118 ++++----- score_models/architectures/ncsnpp.py | 320 ++++++++++++++----------- score_models/architectures/null_net.py | 12 + score_models/sbm/__init__.py | 8 + score_models/sbm/base.py | 147 ++++++------ score_models/sbm/conv_likelihood.py | 123 ++++++++++ score_models/sbm/energy_model.py | 57 +++-- score_models/sbm/grf.py | 60 +++++ score_models/sbm/interpolated.py | 75 ++++++ score_models/sbm/joint.py | 110 +++++++++ score_models/sbm/mvg.py | 239 ++++++++++++++++++ score_models/sbm/sample.py | 64 +++++ score_models/sbm/score_model.py | 247 +++++++++---------- score_models/sbm/tweedie.py | 62 +++++ score_models/sde/sde.py | 56 +++-- score_models/sde/tsvesde.py | 78 +++--- score_models/sde/vesde.py | 33 ++- score_models/sde/vpsde.py | 69 ++++-- score_models/solver/__init__.py | 3 + score_models/solver/ode.py | 205 ++++++++++++++++ score_models/solver/sde.py | 169 +++++++++++++ score_models/solver/solver.py | 142 +++++++++++ tests/test_analytic_models.py | 314 ++++++++++++++++++++++++ tests/test_score_models.py | 18 +- tests/test_solvers.py | 121 ++++++++++ 29 files changed, 2460 insertions(+), 644 deletions(-) create mode 100644 score_models/architectures/null_net.py create mode 100644 score_models/sbm/conv_likelihood.py create mode 100644 score_models/sbm/grf.py create mode 100644 score_models/sbm/interpolated.py create mode 100644 score_models/sbm/joint.py create mode 100644 score_models/sbm/mvg.py create mode 100644 score_models/sbm/sample.py create mode 100644 score_models/sbm/tweedie.py create mode 100644 score_models/solver/__init__.py create mode 100644 score_models/solver/ode.py create mode 100644 score_models/solver/sde.py create mode 100644 score_models/solver/solver.py create mode 100644 tests/test_analytic_models.py create mode 100644 tests/test_solvers.py diff --git a/score_models/__init__.py b/score_models/__init__.py index 120c815..4fbde7b 100644 --- a/score_models/__init__.py +++ b/score_models/__init__.py @@ -2,3 +2,4 @@ from .architectures import * from .sde import * from .losses import * +from .solver import * diff --git a/score_models/architectures/__init__.py b/score_models/architectures/__init__.py index daf9a4a..fa4b5a2 100644 --- a/score_models/architectures/__init__.py +++ b/score_models/architectures/__init__.py @@ -2,3 +2,4 @@ from .ddpm import * from .mlp import * from .encoder import * +from .null_net import * diff --git a/score_models/architectures/ddpm.py b/score_models/architectures/ddpm.py index 74f4c9d..cf0bcf8 100644 --- a/score_models/architectures/ddpm.py +++ b/score_models/architectures/ddpm.py @@ -6,40 +6,41 @@ from ..utils import get_activation from ..layers import ( - DDPMResnetBlock, - SelfAttentionBlock, - GaussianFourierProjection, - UpsampleLayer, - DownsampleLayer, - conv3x3 - ) + DDPMResnetBlock, + SelfAttentionBlock, + GaussianFourierProjection, + UpsampleLayer, + DownsampleLayer, + conv3x3, +) from .conditional_branch import ( - validate_conditional_arguments, - conditional_branch, - merge_conditional_time_branch, - merge_conditional_input_branch - ) + validate_conditional_arguments, + conditional_branch, + merge_conditional_time_branch, + merge_conditional_input_branch, +) __all__ = ["DDPM"] + class DDPM(nn.Module): def __init__( - self, - channels: int = 1, - dimensions: int = 2, - nf: int = 128, - activation_type: str = "relu", - ch_mult: tuple[int] = (2, 2), - num_res_blocks: int = 2, - resample_with_conv: bool = True, - dropout: float = 0., - attention: bool = True, - fourier_scale: float = 30., - conditions: Optional[tuple[str]] = None, - condition_embeddings: Optional[tuple[int]] = None, - condition_channels: Optional[int] = None, - **kwargs - ): + self, + channels: int = 1, + dimensions: int = 2, + nf: int = 128, + activation_type: str = "relu", + ch_mult: tuple[int] = (2, 2), + num_res_blocks: int = 2, + resample_with_conv: bool = True, + dropout: float = 0.0, + attention: bool = True, + fourier_scale: float = 30.0, + conditions: Optional[tuple[str]] = None, + condition_embeddings: Optional[tuple[int]] = None, + condition_channels: Optional[int] = None, + **kwargs, + ): """ Deep Diffusion Probabilistic Model (DDPM) implementation. @@ -64,7 +65,9 @@ def __init__( """ super().__init__() if dimensions not in [1, 2, 3]: - raise ValueError(f"Input must have 1, 2, or 3 spatial dimensions to use this architecture, received {dimensions}.") + raise ValueError( + f"Input must have 1, 2, or 3 spatial dimensions to use this architecture, received {dimensions}." + ) validate_conditional_arguments(conditions, condition_embeddings, condition_channels) self.conditioned = conditions is not None self.condition_type = conditions @@ -83,7 +86,7 @@ def __init__( "fourier_scale": fourier_scale, "conditions": conditions, "condition_embeddings": condition_embeddings, - "condition_channels": condition_channels + "condition_channels": condition_channels, } self.dimensions = dimensions self.act = act = get_activation(activation_type=activation_type) @@ -95,20 +98,22 @@ def __init__( # Prepare layers AttnBlock = partial(SelfAttentionBlock, dimensions=dimensions) - ResnetBlock = partial(DDPMResnetBlock, act=act, temb_dim=4 * nf, dropout=dropout, dimensions=dimensions) + ResnetBlock = partial( + DDPMResnetBlock, act=act, temb_dim=4 * nf, dropout=dropout, dimensions=dimensions + ) Downsample = partial(DownsampleLayer, dimensions=dimensions) Upsample = partial(UpsampleLayer, dimensions=dimensions) ########### Conditional branch ########### if self.conditioned: total_time_channels, total_input_channels = conditional_branch( - self, - time_branch_channels=nf, - input_branch_channels=channels, - condition_embeddings=condition_embeddings, - condition_channels=condition_channels, - fourier_scale=fourier_scale - ) # This method attach a Module list to self.conditional_branch + self, + time_branch_channels=nf, + input_branch_channels=channels, + condition_embeddings=condition_embeddings, + condition_channels=condition_channels, + fourier_scale=fourier_scale, + ) # This method attach a Module list to self.conditional_branch else: total_time_channels = nf total_input_channels = channels @@ -116,10 +121,10 @@ def __init__( ########### Time branch ########### modules = [ - GaussianFourierProjection(embed_dim=nf, scale=fourier_scale), - nn.Linear(total_time_channels, nf * 4), - nn.Linear(nf * 4, nf * 4) - ] + GaussianFourierProjection(embed_dim=nf, scale=fourier_scale), + nn.Linear(total_time_channels, nf * 4), + nn.Linear(nf * 4, nf * 4), + ] with torch.no_grad(): modules[1].bias.zero_() modules[2].bias.zero_() @@ -156,14 +161,16 @@ def __init__( modules.append(Upsample(in_ch=in_ch, with_conv=resample_with_conv)) assert not hs_c - modules.append(nn.GroupNorm(num_channels=in_ch, num_groups=max(min(in_ch // 4, 32), 1), eps=1e-6)) + modules.append( + nn.GroupNorm(num_channels=in_ch, num_groups=max(min(in_ch // 4, 32), 1), eps=1e-6) + ) modules.append(conv3x3(in_ch, channels, dimensions=dimensions)) self.all_modules = nn.ModuleList(modules) - def forward(self, t, x, *args): + def forward(self, t, x, *args, **kwargs): B, *D = x.shape modules = self.all_modules - + # Time branch m_idx = 0 temb = modules[m_idx](t) @@ -179,8 +186,8 @@ def forward(self, t, x, *args): if self.conditioned: x = merge_conditional_input_branch(self, x, *args) # if self.fourier_features: - # ffeatures = self.fourier_features(x) - # x = torch.concat([x, ffeatures], axis=1) + # ffeatures = self.fourier_features(x) + # x = torch.concat([x, ffeatures], axis=1) # Downsampling block hs = [modules[m_idx](x)] m_idx += 1 diff --git a/score_models/architectures/encoder.py b/score_models/architectures/encoder.py index 4f41062..fb0bfb6 100644 --- a/score_models/architectures/encoder.py +++ b/score_models/architectures/encoder.py @@ -3,50 +3,53 @@ import torch from torch import nn from .conditional_branch import ( - validate_conditional_arguments, - conditional_branch, - merge_conditional_time_branch, - merge_conditional_input_branch - ) + validate_conditional_arguments, + conditional_branch, + merge_conditional_time_branch, + merge_conditional_input_branch, +) from ..definitions import default_init from ..layers import Conv2dSame, ResnetBlockBigGANpp, GaussianFourierProjection from ..utils import get_activation -__all__ = ['Encoder'] +__all__ = ["Encoder"] class Encoder(nn.Module): """ - Function that ouputs latent representations of an 1D, 2D or 3D random variable - (i.e. shape = [D, C, *D] where D is the number of dimensions, - C is the number of channels and *D are the spatial dimensions) + Function that ouputs latent representations of an 1D, 2D or 3D random variable + (i.e. shape = [D, C, *D] where D is the number of dimensions, + C is the number of channels and *D are the spatial dimensions) conditioned on time and possitbly other variables. """ + def __init__( - self, - pixels: int, - channels: int, - latent_size: int, - input_kernel_size=7, # Kernel size of the first convolutional layer - nf: int = 64, # Base width of the convolutional layers - ch_mult: tuple[int] = (2, 2, 2, 2), # Channel multiplier for each level - num_res_blocks: int = 2, - activation : Literal["relu", "gelu", "leakyrelu", "sigmoid", "tanh", "silu"] = "silu", - output_kernel: int = 2, # Final layer is an average pooling layer with this kernel shape - hidden_layers: int = 1, - hidden_size: int = 256, - factor: int = 2, - fourier_scale: float = 16., - conditions : Optional[tuple[Literal["time_discrete", "time_continuous", "time_vector", "input_tensor"]]] = None, - condition_embeddings: Optional[tuple[int]] = None, - condition_channels: Optional[int] = None, - **kwargs - ): + self, + pixels: int, + channels: int, + latent_size: int, + input_kernel_size=7, # Kernel size of the first convolutional layer + nf: int = 64, # Base width of the convolutional layers + ch_mult: tuple[int] = (2, 2, 2, 2), # Channel multiplier for each level + num_res_blocks: int = 2, + activation: Literal["relu", "gelu", "leakyrelu", "sigmoid", "tanh", "silu"] = "silu", + output_kernel: int = 2, # Final layer is an average pooling layer with this kernel shape + hidden_layers: int = 1, + hidden_size: int = 256, + factor: int = 2, + fourier_scale: float = 16.0, + conditions: Optional[ + tuple[Literal["time_discrete", "time_continuous", "time_vector", "input_tensor"]] + ] = None, + condition_embeddings: Optional[tuple[int]] = None, + condition_channels: Optional[int] = None, + **kwargs + ): """ Function that ouputs latent representations of an 1D, 2D or 3D random variable of shape - shape = [D, C, *D] - where D is the number of dimensions, C is the number of channels and *D are the spatial dimensions). + shape = [D, C, *D] + where D is the number of dimensions, C is the number of channels and *D are the spatial dimensions). This network is conditioned on time and possitbly other variables. Parameters: @@ -86,41 +89,47 @@ def __init__( "fourier_scale": fourier_scale, "conditions": conditions, "condition_embeddings": condition_embeddings, - "condition_channels": condition_channels + "condition_channels": condition_channels, } - assert (output_kernel % 2 == 0) or (output_kernel == 1), "output_kernel must be an even number or equal to 1 (no average pooling at the end)" - assert pixels % 2**len(ch_mult) == 0, "pixels must be divisible by 2**len(ch_mult)" - + assert (output_kernel % 2 == 0) or ( + output_kernel == 1 + ), "output_kernel must be an even number or equal to 1 (no average pooling at the end)" + assert pixels % 2 ** len(ch_mult) == 0, "pixels must be divisible by 2**len(ch_mult)" + self.act = get_activation(activation) self.nf = nf self.num_res_blocks = num_res_blocks self.pixels = pixels self.channels = channels self.factor = factor - self._latent_pixels = pixels // factor**(len(ch_mult) + output_kernel//2) + self._latent_pixels = pixels // factor ** (len(ch_mult) + output_kernel // 2) self._latent_channels = int(nf * ch_mult[-1]) - assert self._latent_pixels > 0, "Network is too deep for the given input size and downsampling factor" + assert ( + self._latent_pixels > 0 + ), "Network is too deep for the given input size and downsampling factor" ### Conditional branch ### if self.conditioned: total_time_channels, total_input_channels = conditional_branch( - self, - time_branch_channels=nf, - input_branch_channels=channels, - condition_embeddings=condition_embeddings, - condition_channels=condition_channels, - fourier_scale=fourier_scale - ) # This method attach a Module list to self.conditional_branch + self, + time_branch_channels=nf, + input_branch_channels=channels, + condition_embeddings=condition_embeddings, + condition_channels=condition_channels, + fourier_scale=fourier_scale, + ) # This method attach a Module list to self.conditional_branch else: total_time_channels = nf total_input_channels = channels - + ### Time branch ### modules = [ - GaussianFourierProjection(embed_dim=nf, scale=fourier_scale), # Time embedding - nn.Linear(total_time_channels, nf * 4), # Combine time embedding with conditionals if any - nn.Linear(nf * 4, nf * 4) - ] + GaussianFourierProjection(embed_dim=nf, scale=fourier_scale), # Time embedding + nn.Linear( + total_time_channels, nf * 4 + ), # Combine time embedding with conditionals if any + nn.Linear(nf * 4, nf * 4), + ] with torch.no_grad(): modules[1].weight.data = default_init()(modules[1].weight.shape) modules[1].bias.zero_() @@ -136,25 +145,26 @@ def __init__( in_ch = out_ch = int(nf * ch_mult[i]) for j in range(self.num_res_blocks): if j < num_res_blocks - 1: - layers.append(ResnetBlockBigGANpp( - act=self.act, - in_ch=in_ch, - out_ch=out_ch, - temb_dim=4*nf - )) + layers.append( + ResnetBlockBigGANpp( + act=self.act, in_ch=in_ch, out_ch=out_ch, temb_dim=4 * nf + ) + ) else: - out_ch = int(nf * ch_mult[i+1]) if i+1 < len(ch_mult) else in_ch - layers.append(ResnetBlockBigGANpp( - act=self.act, - in_ch=in_ch, - out_ch=out_ch, - temb_dim=4*nf, - down=True, - factor=factor - )) + out_ch = int(nf * ch_mult[i + 1]) if i + 1 < len(ch_mult) else in_ch + layers.append( + ResnetBlockBigGANpp( + act=self.act, + in_ch=in_ch, + out_ch=out_ch, + temb_dim=4 * nf, + down=True, + factor=factor, + ) + ) self.input_branch = nn.ModuleList(layers) self.final_pooling_layer = nn.AvgPool2d(kernel_size=output_kernel) - + ### Latent encoder ### self._image_latent_size = self._latent_pixels * self._latent_pixels * self._latent_channels layers = [] @@ -164,16 +174,15 @@ def __init__( self.latent_branch = nn.ModuleList(layers) self.output_layer = nn.Linear(hidden_size, latent_size) - - def forward(self, t, x, *args): + def forward(self, t, x, *args, **kwargs): ############ Time branch ############ - temb = self.time_branch[0](t) # Gaussian Fourier Projection + temb = self.time_branch[0](t) # Gaussian Fourier Projection if self.conditioned: # Combine time embedding with conditionals if any temb = merge_conditional_time_branch(self, temb, *args) temb = self.time_branch[1](temb) - temb = self.time_branch[2](self.act(temb)) # pre activation convention - + temb = self.time_branch[2](self.act(temb)) # pre activation convention + ############ Input branch ############ if self.conditioned: # Combine input tensor with input tensors if any @@ -182,9 +191,9 @@ def forward(self, t, x, *args): for block in self.input_branch: h = block(h, temb) h = self.final_pooling_layer(h) - + ############ Latent encoder ############ - h = h.view(-1, self._image_latent_size) # flatten + h = h.view(-1, self._image_latent_size) # flatten for layer in self.latent_branch: h = self.act(layer(h)) return self.output_layer(h) diff --git a/score_models/architectures/mlp.py b/score_models/architectures/mlp.py index 1927f30..23121e4 100644 --- a/score_models/architectures/mlp.py +++ b/score_models/architectures/mlp.py @@ -3,16 +3,13 @@ import torch import torch.nn as nn -from ..layers import ( - GaussianFourierProjection, - ScaledAttentionLayer - ) +from ..layers import GaussianFourierProjection, ScaledAttentionLayer from .conditional_branch import ( validate_conditional_arguments, conditional_branch, merge_conditional_time_branch, - merge_conditional_input_branch - ) + merge_conditional_input_branch, +) from ..utils import get_activation __all__ = ["MLP"] @@ -20,23 +17,23 @@ class MLP(nn.Module): def __init__( - self, - channels: Optional[int] = None, - units: int = 100, - layers: int = 2, - time_branch_channels: int = 32, - time_branch_layers: int = 1, - fourier_scale: int = 16, - activation: int = "swish", - bottleneck: Optional[int] = None, - attention: bool = False, - nn_is_energy: bool = False, - output_activation: str = None, - conditions: Optional[Literal["discrete", "continuous", "vector", "tensor"]] = None, - condition_channels: Optional[tuple[int]] = None, - condition_embeddings: Optional[tuple[int]] = None, - **kwargs - ): + self, + channels: Optional[int] = None, + units: int = 100, + layers: int = 2, + time_branch_channels: int = 32, + time_branch_layers: int = 1, + fourier_scale: int = 16, + activation: int = "swish", + bottleneck: Optional[int] = None, + attention: bool = False, + nn_is_energy: bool = False, + output_activation: str = None, + conditions: Optional[Literal["discrete", "continuous", "vector", "tensor"]] = None, + condition_channels: Optional[tuple[int]] = None, + condition_embeddings: Optional[tuple[int]] = None, + **kwargs, + ): """ Multi-Layer Perceptron (MLP) neural network. @@ -73,54 +70,61 @@ def __init__( if "dimensions" in kwargs: channels = kwargs["dimensions"] else: - raise ValueError("You must provide a 'channels' argument to initialize the MLP architecture.") + raise ValueError( + "You must provide a 'channels' argument to initialize the MLP architecture." + ) self.hyperparameters = { - "channels": channels, - "units": units, - "layers": layers, - "time_branch_channels": time_branch_channels, - "fourier_scale": fourier_scale, - "activation": activation, - "time_branch_layers": time_branch_layers, - "botleneck": bottleneck, - "attention": attention, - "nn_is_energy": nn_is_energy, - "output_activation": output_activation, - "conditions": conditions, - "condition_channels": condition_channels, - "condition_embeddings": condition_embeddings, - } + "channels": channels, + "units": units, + "layers": layers, + "time_branch_channels": time_branch_channels, + "fourier_scale": fourier_scale, + "activation": activation, + "time_branch_layers": time_branch_layers, + "botleneck": bottleneck, + "attention": attention, + "nn_is_energy": nn_is_energy, + "output_activation": output_activation, + "conditions": conditions, + "condition_channels": condition_channels, + "condition_embeddings": condition_embeddings, + } self.time_branch_layers = time_branch_layers self.layers = layers self.nn_is_energy = nn_is_energy if layers % 2 == 1: - print(f"Number of layers must be an even number for this architecture. Adding one more layer...") + print( + f"Number of layers must be an even number for this architecture. Adding one more layer..." + ) layers += 1 ########### Conditional branch ########### if self.conditioned: total_time_channels, total_input_channels = conditional_branch( - self, - time_branch_channels=time_branch_channels, - input_branch_channels=channels, - condition_embeddings=condition_embeddings, - condition_channels=condition_channels, - fourier_scale=fourier_scale - ) # This method attach a Module list to self.conditional_branch + self, + time_branch_channels=time_branch_channels, + input_branch_channels=channels, + condition_embeddings=condition_embeddings, + condition_channels=condition_channels, + fourier_scale=fourier_scale, + ) # This method attach a Module list to self.conditional_branch else: total_time_channels = time_branch_channels total_input_channels = channels ######################################### - + ########### Time branch ########### t_dim = time_branch_channels - modules = [GaussianFourierProjection(t_dim, scale=fourier_scale), # Time embedding - nn.Linear(total_time_channels, t_dim) # Compress the signal from time index and the other conditionals if any - ] + modules = [ + GaussianFourierProjection(t_dim, scale=fourier_scale), # Time embedding + nn.Linear( + total_time_channels, t_dim + ), # Compress the signal from time index and the other conditionals if any + ] for _ in range(time_branch_layers - 1): modules.append(nn.Linear(t_dim, t_dim)) ################################### - + ########### Input branch ########### modules.append(nn.Linear(total_input_channels + t_dim, units)) if bottleneck is not None: @@ -144,11 +148,11 @@ def __init__( self.act = get_activation(activation) self.all_modules = nn.ModuleList(modules) ################################### - - def forward(self, t, x, *args): + + def forward(self, t, x, *args, **kwargs): B, D = x.shape modules = self.all_modules - + # Time branch temb = modules[0](t) if self.conditioned: @@ -164,7 +168,7 @@ def forward(self, t, x, *args): x = merge_conditional_input_branch(self, x, *args) x = modules[i](x) i += 1 - for _ in range(self.layers//2): + for _ in range(self.layers // 2): x = self.act(modules[i](x)) i += 1 if self.bottleneck: @@ -175,7 +179,7 @@ def forward(self, t, x, *args): x = self.attention_layer(x.view(B, 1, -1), context).view(B, -1) if self.bottleneck: x = self.act(self.bottleneck_out(x)) - for _ in range(self.layers//2): + for _ in range(self.layers // 2): x = self.act(modules[i](x)) i += 1 out = self.output_layer(x) diff --git a/score_models/architectures/ncsnpp.py b/score_models/architectures/ncsnpp.py index 5570a9b..f9a0c6f 100644 --- a/score_models/architectures/ncsnpp.py +++ b/score_models/architectures/ncsnpp.py @@ -6,56 +6,59 @@ from functools import partial from ..layers import ( - DDPMResnetBlock, - ResnetBlockBigGANpp, - GaussianFourierProjection, - SelfAttentionBlock, - UpsampleLayer, - DownsampleLayer, - Combine, - conv3x3, - PositionalEncoding - ) + DDPMResnetBlock, + ResnetBlockBigGANpp, + GaussianFourierProjection, + SelfAttentionBlock, + UpsampleLayer, + DownsampleLayer, + Combine, + conv3x3, + PositionalEncoding, +) from ..utils import get_activation from ..definitions import default_init from .conditional_branch import ( - validate_conditional_arguments, - conditional_branch, - merge_conditional_time_branch, - merge_conditional_input_branch - ) + validate_conditional_arguments, + conditional_branch, + merge_conditional_time_branch, + merge_conditional_input_branch, +) __all__ = ["NCSNpp"] + class NCSNpp(nn.Module): def __init__( - self, - channels: int = 1, - dimensions: Literal[1, 2, 3] = 2, - nf: int = 128, - ch_mult: tuple[int] = (2, 2, 2, 2), - num_res_blocks: int = 2, - activation_type: str = "swish", - dropout: float = 0., - resample_with_conv: bool = True, - fir: bool = True, - fir_kernel: tuple[int] = (1, 3, 3, 1), - skip_rescale: bool = True, - progressive: Literal["none", "output_skip", "residual"] = "output_skip", - progressive_input: Literal["none", "input_skip", "residual"] = "input_skip", - init_scale: float = 1e-2, - fourier_scale: float = 16., - resblock_type: Literal["biggan", "ddpm"] = "biggan", - combine_method: Literal["cat", "sum"] = "cat", - attention: bool = True, - conditions : Optional[tuple[Literal["time_discrete", "time_continuous", "time_vector", "input_tensor"]]] = None, - condition_embeddings: Optional[tuple[int]] = None, - condition_channels: Optional[int] = None, - # fourier_features=False, - # n_min=7, - # n_max=8, - **kwargs - ): + self, + channels: int = 1, + dimensions: Literal[1, 2, 3] = 2, + nf: int = 128, + ch_mult: tuple[int] = (2, 2, 2, 2), + num_res_blocks: int = 2, + activation_type: str = "swish", + dropout: float = 0.0, + resample_with_conv: bool = True, + fir: bool = True, + fir_kernel: tuple[int] = (1, 3, 3, 1), + skip_rescale: bool = True, + progressive: Literal["none", "output_skip", "residual"] = "output_skip", + progressive_input: Literal["none", "input_skip", "residual"] = "input_skip", + init_scale: float = 1e-2, + fourier_scale: float = 16.0, + resblock_type: Literal["biggan", "ddpm"] = "biggan", + combine_method: Literal["cat", "sum"] = "cat", + attention: bool = True, + conditions: Optional[ + tuple[Literal["time_discrete", "time_continuous", "time_vector", "input_tensor"]] + ] = None, + condition_embeddings: Optional[tuple[int]] = None, + condition_channels: Optional[int] = None, + # fourier_features=False, + # n_min=7, + # n_max=8, + **kwargs, + ): """ NCSN++ model @@ -84,7 +87,9 @@ def __init__( """ super().__init__() if dimensions not in [1, 2, 3]: - raise ValueError("Input must have 1, 2, or 3 spatial dimensions to use this architecture") + raise ValueError( + "Input must have 1, 2, or 3 spatial dimensions to use this architecture" + ) self.dimensions = dimensions validate_conditional_arguments(conditions, condition_embeddings, condition_channels) self.conditioned = conditions is not None @@ -101,10 +106,14 @@ def __init__( self.progressive = progressive.lower() self.progressive_input = progressive_input.lower() self.resblock_type = resblock_type - if progressive not in ['none', 'output_skip', 'residual']: - raise ValueError(f"progressive must be in ['none', 'output_skip', 'residual'], received {progressive}") - if progressive_input not in ['none', 'input_skip', 'residual']: - raise ValueError(f"progressive_input must be in ['none', 'input_skip', 'residual'], received {progressive_input}") + if progressive not in ["none", "output_skip", "residual"]: + raise ValueError( + f"progressive must be in ['none', 'output_skip', 'residual'], received {progressive}" + ) + if progressive_input not in ["none", "input_skip", "residual"]: + raise ValueError( + f"progressive_input must be in ['none', 'input_skip', 'residual'], received {progressive_input}" + ) self.hyperparameters = { "channels": channels, "nf": nf, @@ -131,28 +140,30 @@ def __init__( # "n_min": n_min, # "n_max": n_max } - + ########### Conditional branch ########### if self.conditioned: total_time_channels, total_input_channels = conditional_branch( - self, - time_branch_channels=nf, - input_branch_channels=channels, - condition_embeddings=condition_embeddings, - condition_channels=condition_channels, - fourier_scale=fourier_scale - ) # This method attach a Module list to self.conditional_branch + self, + time_branch_channels=nf, + input_branch_channels=channels, + condition_embeddings=condition_embeddings, + condition_channels=condition_channels, + fourier_scale=fourier_scale, + ) # This method attach a Module list to self.conditional_branch else: total_time_channels = nf total_input_channels = channels ######################################### - + ########### Time branch ########### modules = [ - GaussianFourierProjection(embed_dim=nf, scale=fourier_scale), # Time embedding - nn.Linear(total_time_channels, nf * 4), # Combine time embedding with conditionals if any - nn.Linear(nf * 4, nf * 4) - ] + GaussianFourierProjection(embed_dim=nf, scale=fourier_scale), # Time embedding + nn.Linear( + total_time_channels, nf * 4 + ), # Combine time embedding with conditionals if any + nn.Linear(nf * 4, nf * 4), + ] with torch.no_grad(): modules[1].weight.data = default_init()(modules[1].weight.shape) modules[1].bias.zero_() @@ -163,47 +174,67 @@ def __init__( ########### Prepare layers ########### combiner = partial(Combine, method=combine_method.lower(), dimensions=self.dimensions) AttnBlock = partial(SelfAttentionBlock, init_scale=init_scale, dimensions=dimensions) - Upsample = partial(UpsampleLayer, with_conv=resample_with_conv, fir=fir, fir_kernel=fir_kernel, dimensions=self.dimensions) - Downsample = partial(DownsampleLayer, with_conv=resample_with_conv, fir=fir, fir_kernel=fir_kernel, dimensions=self.dimensions) - if progressive == 'output_skip': + Upsample = partial( + UpsampleLayer, + with_conv=resample_with_conv, + fir=fir, + fir_kernel=fir_kernel, + dimensions=self.dimensions, + ) + Downsample = partial( + DownsampleLayer, + with_conv=resample_with_conv, + fir=fir, + fir_kernel=fir_kernel, + dimensions=self.dimensions, + ) + if progressive == "output_skip": self.pyramid_upsample = Upsample(fir=fir, fir_kernel=fir_kernel, with_conv=False) - elif progressive == 'residual': - pyramid_upsample = partial(UpsampleLayer, fir=fir, fir_kernel=fir_kernel, with_conv=True, dimensions=self.dimensions) - if progressive_input == 'input_skip': + elif progressive == "residual": + pyramid_upsample = partial( + UpsampleLayer, + fir=fir, + fir_kernel=fir_kernel, + with_conv=True, + dimensions=self.dimensions, + ) + if progressive_input == "input_skip": self.pyramid_downsample = Downsample(fir=fir, fir_kernel=fir_kernel, with_conv=False) - elif progressive_input == 'residual': + elif progressive_input == "residual": pyramid_downsample = partial(Downsample, fir=fir, fir_kernel=fir_kernel, with_conv=True) - if resblock_type == 'ddpm': - ResnetBlock = partial(DDPMResnetBlock, - act=act, - dropout=dropout, - init_scale=init_scale, - skip_rescale=skip_rescale, - temb_dim=nf * 4, - dimensions=self.dimensions - ) + if resblock_type == "ddpm": + ResnetBlock = partial( + DDPMResnetBlock, + act=act, + dropout=dropout, + init_scale=init_scale, + skip_rescale=skip_rescale, + temb_dim=nf * 4, + dimensions=self.dimensions, + ) - elif resblock_type == 'biggan': - ResnetBlock = partial(ResnetBlockBigGANpp, - act=act, - dropout=dropout, - fir=fir, - fir_kernel=fir_kernel, - init_scale=init_scale, - skip_rescale=skip_rescale, - temb_dim=nf * 4, - dimensions=self.dimensions - ) + elif resblock_type == "biggan": + ResnetBlock = partial( + ResnetBlockBigGANpp, + act=act, + dropout=dropout, + fir=fir, + fir_kernel=fir_kernel, + init_scale=init_scale, + skip_rescale=skip_rescale, + temb_dim=nf * 4, + dimensions=self.dimensions, + ) else: - raise ValueError(f'resblock type {resblock_type} unrecognized.') + raise ValueError(f"resblock type {resblock_type} unrecognized.") ##################################### # Downsampling block input_pyramid_ch = total_input_channels modules.append(conv3x3(total_input_channels, nf, dimensions=dimensions)) hs_c = [nf] - in_ch = nf #+ fourier_feature_channels + in_ch = nf # + fourier_feature_channels for i_level in range(num_resolutions): # Residual blocks for this resolution for i_block in range(num_res_blocks): @@ -212,17 +243,17 @@ def __init__( in_ch = out_ch hs_c.append(in_ch) if i_level != num_resolutions - 1: - if resblock_type == 'ddpm': + if resblock_type == "ddpm": modules.append(Downsample(in_ch=in_ch)) else: modules.append(ResnetBlock(down=True, in_ch=in_ch)) - if progressive_input == 'input_skip': + if progressive_input == "input_skip": modules.append(combiner(in_ch=input_pyramid_ch, out_ch=in_ch)) - if combine_method == 'cat': + if combine_method == "cat": in_ch *= 2 - elif progressive_input == 'residual': + elif progressive_input == "residual": modules.append(pyramid_downsample(in_ch=input_pyramid_ch, out_ch=in_ch)) input_pyramid_ch = in_ch hs_c.append(in_ch) @@ -238,56 +269,75 @@ def __init__( for i_level in reversed(range(num_resolutions)): for i_block in range(num_res_blocks + 1): out_ch = nf * ch_mult[i_level] - modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(), - out_ch=out_ch)) + modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(), out_ch=out_ch)) in_ch = out_ch - if progressive != 'none': + if progressive != "none": if i_level == num_resolutions - 1: - if progressive == 'output_skip': - modules.append(nn.GroupNorm(num_groups=max(min(in_ch // 4, 32), 1), - num_channels=in_ch, eps=1e-6)) - modules.append(conv3x3(in_ch, channels, init_scale=init_scale, dimensions=dimensions)) + if progressive == "output_skip": + modules.append( + nn.GroupNorm( + num_groups=max(min(in_ch // 4, 32), 1), num_channels=in_ch, eps=1e-6 + ) + ) + modules.append( + conv3x3(in_ch, channels, init_scale=init_scale, dimensions=dimensions) + ) pyramid_ch = channels - elif progressive == 'residual': - modules.append(nn.GroupNorm(num_groups=max(min(in_ch // 4, 32), 1), - num_channels=in_ch, eps=1e-6)) + elif progressive == "residual": + modules.append( + nn.GroupNorm( + num_groups=max(min(in_ch // 4, 32), 1), num_channels=in_ch, eps=1e-6 + ) + ) modules.append(conv3x3(in_ch, in_ch, bias=True, dimensions=dimensions)) pyramid_ch = in_ch else: - raise ValueError(f'{progressive} is not a valid name.') + raise ValueError(f"{progressive} is not a valid name.") else: - if progressive == 'output_skip': - modules.append(nn.GroupNorm(num_groups=max(min(in_ch // 4, 32), 1), - num_channels=in_ch, eps=1e-6)) - modules.append(conv3x3(in_ch, channels, bias=True, init_scale=init_scale, dimensions=dimensions)) + if progressive == "output_skip": + modules.append( + nn.GroupNorm( + num_groups=max(min(in_ch // 4, 32), 1), num_channels=in_ch, eps=1e-6 + ) + ) + modules.append( + conv3x3( + in_ch, + channels, + bias=True, + init_scale=init_scale, + dimensions=dimensions, + ) + ) pyramid_ch = channels - elif progressive == 'residual': + elif progressive == "residual": modules.append(pyramid_upsample(in_ch=pyramid_ch, out_ch=in_ch)) pyramid_ch = in_ch else: - raise ValueError(f'{progressive} is not a valid name') + raise ValueError(f"{progressive} is not a valid name") if i_level != 0: - if resblock_type == 'ddpm': + if resblock_type == "ddpm": modules.append(Upsample(in_ch=in_ch)) else: modules.append(ResnetBlock(in_ch=in_ch, up=True)) assert not hs_c - if progressive != 'output_skip': - modules.append(nn.GroupNorm(num_groups=max(min(in_ch // 4, 32), 1), - num_channels=in_ch, eps=1e-6)) - modules.append(conv3x3(in_ch, channels, init_scale=1., dimensions=dimensions)) + if progressive != "output_skip": + modules.append( + nn.GroupNorm(num_groups=max(min(in_ch // 4, 32), 1), num_channels=in_ch, eps=1e-6) + ) + modules.append(conv3x3(in_ch, channels, init_scale=1.0, dimensions=dimensions)) self.all_modules = nn.ModuleList(modules) - def forward(self, t, x, *args): + def forward(self, t, x, *args, **kwargs): B, *D = x.shape modules = self.all_modules m_idx = 0 - + # Time branch temb = modules[m_idx](t).view(B, -1) m_idx += 1 @@ -297,16 +347,16 @@ def forward(self, t, x, *args): m_idx += 1 temb = modules[m_idx](self.act(temb)) m_idx += 1 - + # Input branch if self.conditioned: x = merge_conditional_input_branch(self, x, *args) # if self.fourier_features: - # ffeatures = self.fourier_features(x) - # x = torch.concat([x, ffeatures], axis=1) + # ffeatures = self.fourier_features(x) + # x = torch.concat([x, ffeatures], axis=1) # Downsampling block input_pyramid = None - if self.progressive_input != 'none': + if self.progressive_input != "none": input_pyramid = x hs = [modules[m_idx](x)] m_idx += 1 @@ -318,21 +368,21 @@ def forward(self, t, x, *args): hs.append(h) if i_level != self.num_resolutions - 1: - if self.resblock_type == 'ddpm': + if self.resblock_type == "ddpm": h = modules[m_idx](hs[-1]) m_idx += 1 else: h = modules[m_idx](hs[-1], temb) m_idx += 1 - if self.progressive_input == 'input_skip': + if self.progressive_input == "input_skip": input_pyramid = self.pyramid_downsample(input_pyramid) h = modules[m_idx](input_pyramid, h) m_idx += 1 - elif self.progressive_input == 'residual': + elif self.progressive_input == "residual": input_pyramid = modules[m_idx](input_pyramid) m_idx += 1 if self.skip_rescale: - input_pyramid = (input_pyramid + h) / np.sqrt(2.) + input_pyramid = (input_pyramid + h) / np.sqrt(2.0) else: input_pyramid = input_pyramid + h h = input_pyramid @@ -355,40 +405,40 @@ def forward(self, t, x, *args): h = modules[m_idx](torch.cat([h, hs.pop()], dim=1), temb) m_idx += 1 - if self.progressive != 'none': + if self.progressive != "none": if i_level == self.num_resolutions - 1: - if self.progressive == 'output_skip': + if self.progressive == "output_skip": pyramid = self.act(modules[m_idx](h)) m_idx += 1 pyramid = modules[m_idx](pyramid) m_idx += 1 - elif self.progressive == 'residual': + elif self.progressive == "residual": pyramid = self.act(modules[m_idx](h)) m_idx += 1 pyramid = modules[m_idx](pyramid) m_idx += 1 else: - raise ValueError(f'{self.progressive} is not a valid name.') + raise ValueError(f"{self.progressive} is not a valid name.") else: - if self.progressive == 'output_skip': + if self.progressive == "output_skip": pyramid = self.pyramid_upsample(pyramid) pyramid_h = self.act(modules[m_idx](h)) m_idx += 1 pyramid_h = modules[m_idx](pyramid_h) m_idx += 1 pyramid = pyramid + pyramid_h - elif self.progressive == 'residual': + elif self.progressive == "residual": pyramid = modules[m_idx](pyramid) m_idx += 1 if self.skip_rescale: - pyramid = (pyramid + h) / np.sqrt(2.) + pyramid = (pyramid + h) / np.sqrt(2.0) else: pyramid = pyramid + h h = pyramid else: - raise ValueError(f'{self.progressive} is not a valid name') + raise ValueError(f"{self.progressive} is not a valid name") if i_level != 0: - if self.resblock_type == 'ddpm': + if self.resblock_type == "ddpm": h = modules[m_idx](h) m_idx += 1 else: @@ -396,7 +446,7 @@ def forward(self, t, x, *args): m_idx += 1 assert not hs - if self.progressive == 'output_skip': + if self.progressive == "output_skip": h = pyramid else: h = self.act(modules[m_idx](h)) diff --git a/score_models/architectures/null_net.py b/score_models/architectures/null_net.py new file mode 100644 index 0000000..ee7dd8f --- /dev/null +++ b/score_models/architectures/null_net.py @@ -0,0 +1,12 @@ +from torch import nn + + +class NullNet(nn.Module): + + def __init__(self, isenergy=False, *args, **kwargs): + super().__init__() + + self.hyperparameters = {"nn_is_energy": isenergy} + + def forward(self, t, x, *args, **kwargs): + raise RuntimeError("This is a null model and should not be called.") diff --git a/score_models/sbm/__init__.py b/score_models/sbm/__init__.py index 985bcc5..6cc4899 100644 --- a/score_models/sbm/__init__.py +++ b/score_models/sbm/__init__.py @@ -3,5 +3,13 @@ from .slic import * from .hessian_model import * from .lora import * +from .grf import * +from .mvg import * +from .interpolated import * +from .conv_likelihood import * +from .joint import * +from .sample import * +from .tweedie import * + # from .lora_posterior import * # from .kernel_slic import * diff --git a/score_models/sbm/base.py b/score_models/sbm/base.py index fedbcb1..2a97385 100644 --- a/score_models/sbm/base.py +++ b/score_models/sbm/base.py @@ -4,29 +4,29 @@ import torch from torch.nn import Module from torch import Tensor -from torch_ema import ExponentialMovingAverage from ..save_load_utils import ( save_checkpoint, save_hyperparameters, load_checkpoint, load_architecture, - load_sde - ) + load_sde, +) from ..utils import DEVICE from ..sde import SDE from ..trainer import Trainer + class Base(Module, ABC): def __init__( - self, - net: Optional[Union[str, Module]] = None, - sde: Optional[Union[str, SDE]] = None, - path: Optional[str] = None, - checkpoint: Optional[int] = None, - device=DEVICE, - **hyperparameters - ): + self, + net: Optional[Union[str, Module]] = None, + sde: Optional[Union[str, SDE]] = None, + path: Optional[str] = None, + checkpoint: Optional[int] = None, + device=DEVICE, + **hyperparameters + ): super().__init__() # Backward compatibility if "checkpoints_directory" in hyperparameters.keys() and path is None: @@ -38,19 +38,15 @@ def __init__( if "model_checkpoint" in hyperparameters.keys() and checkpoint is None: checkpoint = hyperparameters["model_checkpoint"] hyperparameters.pop("model_checkpoint") - + if net is None and path is None: raise ValueError("Must provide either 'net' or 'path' to instantiate the model.") self.path = path if net is None or isinstance(net, str): self.net, self.hyperparameters = load_architecture( - path, - net=net, - device=device, - checkpoint=checkpoint, - **hyperparameters - ) + path, net=net, device=device, checkpoint=checkpoint, **hyperparameters + ) else: self.net = net self.hyperparameters = hyperparameters @@ -64,39 +60,42 @@ def __init__( if isinstance(sde, str): self.hyperparameters["sde"] = sde self.sde, sde_params = load_sde(**self.hyperparameters) - self.hyperparameters.update(sde_params) # Save the SDE hyperparameters, including the defaults + self.hyperparameters.update( + sde_params + ) # Save the SDE hyperparameters, including the defaults self.device = device self.net.to(device) self.to(device) if self.path: - self.load(checkpoint, raise_error=False) # If no checkpoint is found, loaded_checkpoint will be None + self.load( + checkpoint, raise_error=False + ) # If no checkpoint is found, loaded_checkpoint will be None else: self.loaded_checkpoint = None if hasattr(self.net, "hyperparameters"): self.hyperparameters.update(self.net.hyperparameters) - + # Backward compatibility if "model_architecture" not in self.hyperparameters: self.hyperparameters["model_architecture"] = self.net.__class__.__name__.lower() self.model = self.net - def forward(self, t, x, *args) -> Tensor: - return self.net(t, x, *args) - + def forward(self, t, x, *args, **kwargs) -> Tensor: + return self.net(t, x, *args, **kwargs) + @abstractmethod - def loss(self, x, *args) -> Tensor: - ... - + def loss(self, x, *args, **kwargs) -> Tensor: ... + def save( - self, - path: Optional[str] = None, - optimizer: Optional[torch.optim.Optimizer] = None, - create_path: bool = True - ): + self, + path: Optional[str] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + create_path: bool = True, + ): """ Save the model checkpoint to the provided path or the path provided during initialization. - + Args: path (str, optional): The path to save the checkpoint. Default is path provided during initialization. optimizer (torch.optim.Optimizer, optional): Optimizer to save alongside the checkpoint. Default is None. @@ -104,13 +103,19 @@ def save( """ path = path or self.path if path: - if optimizer: # Save optimizer first since checkpoint number is inferred from number of checkpoint files - save_checkpoint(model=optimizer, path=path, key="optimizer", create_path=create_path) + if ( + optimizer + ): # Save optimizer first since checkpoint number is inferred from number of checkpoint files + save_checkpoint( + model=optimizer, path=path, key="optimizer", create_path=create_path + ) save_checkpoint(model=self.net, path=path, key="checkpoint", create_path=create_path) - self.save_hyperparameters(path) # If already present in path, this does nothing + self.save_hyperparameters(path) # If already present in path, this does nothing else: - raise ValueError("No path provided to save the model. Please provide a valid path or initialize the model with a path.") - + raise ValueError( + "No path provided to save the model. Please provide a valid path or initialize the model with a path." + ) + def save_hyperparameters(self, path: Optional[str] = None): """ Save the hyperparameters of the model to a json file in the checkpoint directory. @@ -118,12 +123,8 @@ def save_hyperparameters(self, path: Optional[str] = None): path = path or self.path if path: save_hyperparameters(self.hyperparameters, path) - - def load( - self, - checkpoint: Optional[int] = None, - raise_error: bool = True - ): + + def load(self, checkpoint: Optional[int] = None, raise_error: bool = True): """ Load a specific checkpoint from the model. @@ -133,30 +134,38 @@ def load( raise_error (bool, optional): Whether to raise an error if checkpoint is not found. Default is True. """ if self.path is None: - raise ValueError("A checkpoint can only be loaded if the model is instantiated with a path, e.g. model = ScoreModel(path='path/to/checkpoint').") - self.loaded_checkpoint = load_checkpoint(model=self, checkpoint=checkpoint, path=self.path, key="checkpoint", raise_error=raise_error) - + raise ValueError( + "A checkpoint can only be loaded if the model is instantiated with a path, e.g. model = ScoreModel(path='path/to/checkpoint')." + ) + self.loaded_checkpoint = load_checkpoint( + model=self, + checkpoint=checkpoint, + path=self.path, + key="checkpoint", + raise_error=raise_error, + ) + def fit( - self, - dataset: torch.utils.data.Dataset, - epochs: int = 1, - learning_rate: float = 1e-4, - ema_decay: float = 0.999, - clip: float = 0., - warmup: int = 0, - optimizer: Optional[torch.optim.Optimizer] = None, - preprocessing: Optional[Callable] = None, - batch_size: Optional[int] = None, - shuffle: bool = False, - iterations_per_epoch: Optional[int] = None, - max_time: float = float('inf'), - checkpoint_every: int = 10, - models_to_keep: int = 1, - path: Optional[str] = None, - name_prefix: Optional[str] = None, - seed: Optional[int] = None, - **kwargs - ) -> list: + self, + dataset: torch.utils.data.Dataset, + epochs: int = 1, + learning_rate: float = 1e-4, + ema_decay: float = 0.999, + clip: float = 0.0, + warmup: int = 0, + optimizer: Optional[torch.optim.Optimizer] = None, + preprocessing: Optional[Callable] = None, + batch_size: Optional[int] = None, + shuffle: bool = False, + iterations_per_epoch: Optional[int] = None, + max_time: float = float("inf"), + checkpoint_every: int = 10, + models_to_keep: int = 1, + path: Optional[str] = None, + name_prefix: Optional[str] = None, + seed: Optional[int] = None, + **kwargs + ) -> list: # Backward compatibility if "checkpoints_directory" in kwargs and path is None: path = kwargs["checkpoints_directory"] @@ -182,7 +191,7 @@ def fit( models_to_keep=models_to_keep, path=path, name_prefix=name_prefix, - seed=seed - ) + seed=seed, + ) losses = trainer.train() return losses diff --git a/score_models/sbm/conv_likelihood.py b/score_models/sbm/conv_likelihood.py new file mode 100644 index 0000000..e347d55 --- /dev/null +++ b/score_models/sbm/conv_likelihood.py @@ -0,0 +1,123 @@ +from typing import Callable, Union, Tuple, Optional +import torch +from torch import Tensor +from torch import vmap +import numpy as np + +from ..sde import SDE +from .energy_model import EnergyModel +from ..architectures import NullNet + + +class ConvolvedLikelihood(EnergyModel): + """ + Convolved likelihood approximation for the likelihood component of a + posterior score model. + + Applies the convolved likelihood approximation as described in Adam et al. + 2022 Appendix A. Essentially this assumes that the posterior convolution may + be approximately factored into the convolution of the likelihood and the + prior separately. For a linear and gaussian likelihood model, this is exact, + coming out to: + + .. math:: + + p_t(y|x_t) = N(y|Ax_t, \\Sigma_y + \\sigma_t^2 AA^T) + + We implement this as an energy model, where the energy is the negative log + likelihood in the observation space. Autodiff then propagates the score to + the model space. + + Args: + sde: The SDE that the score model is associated with. + y: The observation. + Sigma_y: The observation covariance matrix. If ``Sigma_y.shape == y.shape`` this is assumed to be the diagonal of the covariance matrix. + A: The linear operator relating the model space to the observation space. May be a tensor or a function. + AAT: The covariance of the linear operator. With A as a matrix then this is A @ A.T and should have the same shape as Sigma_y. + x_shape: The shape of the model space. This must be provided if A is a function and AAT is not provided. + """ + + @torch.no_grad() + def __init__( + self, + sde: SDE, + y: Tensor, + Sigma_y: Tensor, + A: Union[Tensor, Callable], + AAT: Optional[Tensor] = None, + x_shape: Optional[Tuple[int]] = None, + **kwargs, + ): + super().__init__(net=NullNet(isenergy=True), sde=sde, path=None, checkpoint=None, **kwargs) + self.sde = sde + self.y = y + self.Sigma_y = Sigma_y + self.y_shape = y.shape + self.A = A + self.x_shape = x_shape + self.diag = self.y.shape == self.Sigma_y.shape + + if AAT is None: + if callable(A): + assert ( + x_shape is not None + ), "x_shape must be provided if A is a function and AAT is not provided." + Amatrix = torch.func.jacrev(A)( + torch.zeros(x_shape, dtype=y.dtype, device=y.device) + ).reshape(np.prod(self.y_shape), np.prod(x_shape)) + if self.diag: + self.AAT = torch.sum(Amatrix**2, dim=1).reshape(*self.Sigma_y.shape) + else: + self.AAT = Amatrix @ Amatrix.T + else: + if self.diag: + self.AAT = torch.sum(self.A**2, dim=1).reshape(*self.Sigma_y.shape) + else: + self.AAT = self.A @ self.A.T + else: + self.AAT = AAT + + assert self.AAT.shape == self.Sigma_y.shape, "AAT must have the same shape as Sigma_y" + + @property + def diag(self): + return self._diag + + @diag.setter + def diag(self, value): + self._diag = value + self.energy = self.diag_energy if value else self.full_energy + + def diag_energy(self, t, xt, *args, sigma, **kwargs): + if callable(self.A): + r = self.y - self.A(xt) + else: + r = self.y - (self.A @ xt.reshape(-1, 1)).reshape(*self.y_shape) + + nll = 0.5 * torch.sum(r**2 * sigma) + return nll + + def _full_forward(self, t, xt, sigma): + if callable(self.A): + r = self.y.reshape(-1, 1) - self.A(xt).reshape(-1, 1) + else: + r = self.y.reshape(-1, 1) - self.A @ xt.reshape(-1, 1) + nll = 0.5 * (r.T @ sigma @ r) + return nll.squeeze() + + def full_energy(self, t, xt, *args, sigma, **kwargs): + + return vmap(self._full_forward, in_dims=(0, 0, None))(t, xt, sigma) + + def score(self, t, x, *args, **kwargs): + # Compute sigma once per time step + sigma = self.Sigma_y * self.sde.mu(t[0]) ** 2 + self.sde.sigma(t[0]) ** 2 * self.AAT + sigma = 1 / sigma if self.diag else torch.linalg.inv(sigma) + + return super().score(t, x, *args, sigma=sigma, **kwargs) + + def unnormalized_energy(self, t: Tensor, x: Tensor, *args, **kwargs): + raise RuntimeError("Unnormalized energy should not be called for analytic models.") + + def reparametrized_score(self, t, x, *args, **kwargs): + raise RuntimeError("Reparametrized score should not be called for analytic models.") diff --git a/score_models/sbm/energy_model.py b/score_models/sbm/energy_model.py index 13c8f45..7cd55c3 100644 --- a/score_models/sbm/energy_model.py +++ b/score_models/sbm/energy_model.py @@ -11,16 +11,17 @@ __all__ = ["EnergyModel"] + class EnergyModel(ScoreModel): def __init__( - self, - net: Optional[Union[str, Module]] = None, - sde: Optional[Union[str, SDE]] = None, - path: Optional[str] = None, - checkpoint: Optional[int] = None, - device=DEVICE, - **hyperparameters - ): + self, + net: Optional[Union[str, Module]] = None, + sde: Optional[Union[str, SDE]] = None, + path: Optional[str] = None, + checkpoint: Optional[int] = None, + device=DEVICE, + **hyperparameters + ): super().__init__(net, sde, path, checkpoint=checkpoint, device=device, **hyperparameters) nn_is_energy = self.net.hyperparameters.get("nn_is_energy", False) self.nn_is_energy = nn_is_energy @@ -29,36 +30,46 @@ def __init__( else: self._energy = self._unet_energy - def forward(self, t, x, *args): + def score(self, t, x, *args, **kwargs): """ Overwrite the forward method to return the energy function instead of the model output. """ - return self.energy(t, x, *args) - def reparametrized_score(self, t, x, *args): + # wrapper to feed energy im vmap + def energy_wrapper(t, x, *args): + t = t.unsqueeze(0) + x = x.unsqueeze(0) + args = [a.unsqueeze(0) for a in args] + return self.energy(t, x, *args, **kwargs).squeeze(0) + + return -vmap(grad(energy_wrapper, argnums=1))(t, x, *args) # Don't forget the minus sign! + + def reparametrized_score(self, t, x, *args, **kwargs): """ Numerically stable reparametrization of the score function for the DSM loss. Score function uses this method so self.score(t, x, *args) will also work as expected. """ + def energy(t, x, *args): - # wrapper to feed energy im vmap + # wrapper to feed energy in vmap t = t.unsqueeze(0) x = x.unsqueeze(0) args = [a.unsqueeze(0) for a in args] - return self.unnormalized_energy(t, x, *args).squeeze(0) - return - vmap(grad(energy, argnums=1))(t, x, *args) # Don't forget the minus sign! + return self.unnormalized_energy(t, x, *args, **kwargs).squeeze(0) - def unnormalized_energy(self, t, x, *args): - return self._energy(t, x, *args) + return -vmap(grad(energy, argnums=1))(t, x, *args) # Don't forget the minus sign! - def energy(self, t, x, *args): + def unnormalized_energy(self, t, x, *args, **kwargs): + return self._energy(t, x, *args, **kwargs) + + def energy(self, t, x, *args, **kwargs): sigma_t = self.sde.sigma(t) - energy = self.unnormalized_energy(t, x, *args) + energy = self.unnormalized_energy(t, x, *args, **kwargs) return energy / sigma_t - def _unet_energy(self, t, x, *args): + def _unet_energy(self, t, x, *args, **kwargs): _, *D = x.shape - return 0.5 * torch.sum((x - self.net(t, x, *args)).flatten(1)**2, dim=1) - - def _nn_energy(self, t, x, *args): - return self.net(t, x, *args).squeeze(1) + return 0.5 * torch.sum((x - self.net(t, x, *args, **kwargs)).flatten(1) ** 2, dim=1) + + def _nn_energy(self, t, x, *args, **kwargs): + return self.net(t, x, *args, **kwargs).squeeze(1) diff --git a/score_models/sbm/grf.py b/score_models/sbm/grf.py new file mode 100644 index 0000000..60d7adf --- /dev/null +++ b/score_models/sbm/grf.py @@ -0,0 +1,60 @@ +import torch +from torch import Tensor + +from ..sde import SDE +from .energy_model import EnergyModel +from ..architectures import NullNet + + +class GRFEnergyModel(EnergyModel): + """ + Gaussian random field score model. + + Computes the energy for a gaussian random field based on a provided power spectrum. + + Args: + sde: The SDE that the score model is associated with. + power_spectrum: The power spectrum of the Gaussian random field. + """ + + def __init__(self, sde: SDE, power_spectrum: Tensor, **kwargs): + super().__init__(net=NullNet(isenergy=True), sde=sde, path=None, checkpoint=None, **kwargs) + self.sde = sde + # Store the power spectrum + self.power_spectrum = power_spectrum + self.dims = power_spectrum.dim() + if self.dims == 1: + self.fft = torch.fft.fft + elif self.dims == 2: + self.fft = torch.fft.fft2 + else: + raise ValueError("Only 1D and 2D power spectra are supported") + + def energy(self, t: Tensor, x: Tensor, *args, **kwargs): + """GRF energy""" + sigma_t = self.sde.sigma(t) + mu_t = self.sde.mu(t) + + # Fourier Transform of the image + fftkwargs = {"norm": "ortho"} + if self.dims == 2: + fftkwargs["s"] = x.shape[-2:] + elif self.dims == 1: + fftkwargs["n"] = x.shape[-1] + image_ft = self.fft(x, **fftkwargs) + + # Compute squared magnitude of the Fourier coefficients + magnitude_squared = torch.abs(image_ft) ** 2 + + # Calculate negative log likelihood + nll = 0.5 * torch.sum( + (magnitude_squared / (mu_t**2 * self.power_spectrum + sigma_t**2)).real, + dim=tuple(range(-self.dims, 0)), + ) + return nll + + def unnormalized_energy(self, t: Tensor, x: Tensor, *args, **kwargs): + raise RuntimeError("Unnormalized energy should not be called for GRF models.") + + def reparametrized_score(self, t, x, *args, **kwargs): + raise RuntimeError("Reparametrized score should not be called for GRF models.") diff --git a/score_models/sbm/interpolated.py b/score_models/sbm/interpolated.py new file mode 100644 index 0000000..e098667 --- /dev/null +++ b/score_models/sbm/interpolated.py @@ -0,0 +1,75 @@ +from typing import Union, Callable + +import torch +from torch import Tensor + +from ..sde import SDE +from . import ScoreModel +from ..architectures import NullNet + + +class InterpolatedScoreModel(ScoreModel): + """ + Smoothly transitions between two score models as a function of t. + + This score model class allows for the interpolation of the scores between + two models. Can be useful when one model is better at capturing the score in + the early stages of the SDE and another model is better at capturing the + score in the later stages of the SDE. + + Args: + sde: The SDE that the score model is associated with. + hight_model: The high temperature model. + lowt_model: The low temperature model. + beta_scheme: The scheme for the beta parameter. Can be "linear", "square", + "sqrt", "linear:", "sqrt:", or "sin:". For the "" models + the ``i`` parameter can be used to scale the ``t`` input to beta making + the transition happen later. + + """ + + def __init__( + self, + sde: SDE, + hight_model: ScoreModel, + lowt_model: ScoreModel, + beta_scheme: Union[Callable, str] = "linear", + epsilon: float = 0.01, + **kwargs, + ): + super().__init__(net=NullNet(isenergy=False), sde=sde, path=None, checkpoint=None, **kwargs) + self.sde = sde + self.hight_model = hight_model + self.lowt_model = lowt_model + self.beta_scheme = beta_scheme + self.epsilon = epsilon + + def beta(self, t: Tensor) -> Tensor: + T = (t - self.sde.t_min) / (self.sde.t_max - self.sde.t_min) + if callable(self.beta_scheme): + return self.beta_scheme(T) + elif self.beta_scheme == "linear": + return T + elif self.beta_scheme == "square": + return T**2 + elif self.beta_scheme == "sqrt": + return torch.sqrt(T) + elif "linear:" in self.beta_scheme: + return int(self.beta_scheme[self.beta_scheme.find(":") + 1 :]) * T + elif "sqrt:" in self.beta_scheme: + return torch.sqrt(int(self.beta_scheme[self.beta_scheme.find(":") + 1 :]) * T) + elif "sin:" in self.beta_scheme: + i = int(self.beta_scheme[self.beta_scheme.find(":") + 1 :]) + return torch.where(T > 1 / i, torch.ones_like(T), torch.sin(i * T * torch.pi / 2.0)) + else: + raise NotImplementedError(f"Unknown beta_scheme {self.beta_scheme}") + + def score(self, t: Tensor, x: Tensor, *args, **kwargs): + # Compute the weighted score for each model + beta = torch.clamp(self.beta(kwargs.get("t_a", t)[0]), 0.0, 1.0) + score = torch.zeros_like(x) + if beta.item() > self.epsilon: + score += self.hight_model(t, x, *args, **kwargs) * beta + if beta.item() < (1 - self.epsilon): + score += self.lowt_model(t, x, *args, **kwargs) * (1.0 - beta) + return score diff --git a/score_models/sbm/joint.py b/score_models/sbm/joint.py new file mode 100644 index 0000000..1c499b0 --- /dev/null +++ b/score_models/sbm/joint.py @@ -0,0 +1,110 @@ +from typing import Tuple, Union + +import torch +from torch import Tensor +import numpy as np + +from . import ScoreModel +from ..architectures import NullNet + + +class JointScoreModel(ScoreModel): + """ + A score model which combines the scores from multiple models. + + This score model class allows for multiple score models to combine their + scores arbitrarily. They may share all, some, or none of the model space + with this class handling the bookkeeping. The scores from each model (where + they use the same model dimensions) are simply summed. The class may also + handle multiple inputs, internally they are combined into a single massive + concatenated ``x`` vector, when passed to the models the ``x`` vector is + split into the appropriate segments ``x_0, x_1, ..., x_n`` and each one is + converted into the expected shape (defined by the ``x_shapes`` argument). + + Usage: a list of M models is passed to the constructor, these models will be + used to compute the score. The x vector is split into N segments defined by + the x_shapes argument (N does not need to equal M). The model_uses argument + identifies which segments of x (defined by x_shapes) each model uses. For + example Imagine three models [M1, M2, M3], and x_shapes = [(2, 3), (3, 4), + (4, 5)], and model_uses = [[0, 1], [0, 2], None]. This means that M1 uses + the first two segments of x (M1(x1, x2)), M2 uses the first and third + segments of x (M2(x1, x3)), and M3 uses the full x vector (as a flat tensor + M3(x)). The score will be broken up into similar segments and summed then + returned as a flat tensor like x. + + Args: + sde: The SDE that the score model is associated with. models: A list of + score models. x_shapes: A list of shapes for the x vectors that the + models expect. + These are the shapes that the flat-concatenated ``x`` vector will be + split into. + model_uses: A list of lists of integers, where each list is the indices + of the x vectors corresponding to ``x_shapes`` that each model uses. + If None, the model will be passed the full ``x`` vector. + """ + + def __init__( + self, + models: Tuple[ScoreModel], + x_shapes: Tuple[Tuple[int]], + model_uses: Tuple[Union[None, Tuple[int]]], + **kwargs + ): + assert all( + isinstance(m.sde, models[0].sde.__class__) for m in models + ), "All models must share the same SDE." + super().__init__( + net=NullNet(isenergy=False), sde=models[0].sde, path=None, checkpoint=None, **kwargs + ) + self.models = models + self.x_shapes = x_shapes + self.model_uses = model_uses + + def split_x(self, x: Tensor): + B, D = x.shape + + # Split x into segments + sub_x = [] + place = 0 + for shapex in self.x_shapes: + sub_x.append(x[..., place : place + np.prod(shapex)].reshape(B, *shapex)) + place += np.prod(shapex) + assert place == D + return sub_x + + def join_x(self, sub_x: Tuple[Tensor]): + B = sub_x[0].shape[0] + return torch.cat(tuple(S.reshape(B, -1) for S in sub_x), dim=-1) + + @property + def xsize(self): + return sum(np.prod(shapex) for shapex in self.x_shapes) + + def score(self, t: Tensor, x: Tensor, *args, **kwargs): + # Split x into segments + sub_x = self.split_x(x) + + # Compute / store the score for each model + scores = list(torch.zeros_like(sx) for sx in sub_x) + for i, model in enumerate(self.models): + # Select the segments from x that this model uses + if self.model_uses[i] is None: + model_x = x + model_score = model(t, model_x, *args, **kwargs) + else: + model_x = tuple(sub_x[j] for j in self.model_uses[i]) + # Compute the score for this model + model_score = model(t, *model_x, *args, **kwargs) + + # Ensure the score is a tuple + if not isinstance(model_score, tuple): + model_score = (model_score,) + # Add the score to the appropriate segments of x (now stored in scores) + if self.model_uses[i] is None: + for j, score in enumerate(self.split_x(model_score[0])): + scores[j] += score + else: + for j, score in zip(self.model_uses[i], model_score): + scores[j] += score + + return self.join_x(scores) diff --git a/score_models/sbm/mvg.py b/score_models/sbm/mvg.py new file mode 100644 index 0000000..7d1654f --- /dev/null +++ b/score_models/sbm/mvg.py @@ -0,0 +1,239 @@ +from typing import Optional + +import torch +from torch import Tensor +import numpy as np + +from ..sde import SDE +from .energy_model import EnergyModel +from .score_model import ScoreModel +from ..architectures import NullNet + + +class MVGEnergyModel(EnergyModel): + """ + A multivariate gaussian score model. + + A multivariate gaussian energy model, which can be used as a single + multivariate gaussian or a mixture of them. if the ``mean`` is 1D, then it + is a single gaussian, if it is 2D, then it is a mixture of gaussians. + + Args: + + sde: The SDE that the score model is associated with. + + mean: The mean of the gaussian(s). + + cov: The covariance of the gaussian(s). If cov.shape == mean.shape, this + is a diagonal covariance. Otherwise, it is a full covariance matrix + where if mean has shape (M, *D) (or just (*D,) for single MVG) then + the covariance matrix should have shape (M, prod(D), prod(D)) (or + just (prod(D), prod(D)) for single MVG). + + w: The weights of the mixture of gaussians (if a mixture). Default is + equal weight. + """ + + def __init__(self, sde: SDE, mean: Tensor, cov: Tensor, w: Optional[Tensor] = None, **kwargs): + super().__init__(net=NullNet(isenergy=True), sde=sde, path=None, checkpoint=None, **kwargs) + self.sde = sde + self.mean = mean + self.cov = cov + self.diag = mean.shape == cov.shape + if mean.dim() == 1: + self.mixture = False + self.w = torch.tensor(1.0, dtype=self.mean.dtype, device=self.mean.device) + elif mean.dim() == 2: + self.mixture = True + if w is None: + self.w = ( + torch.ones(self.mean.shape[0], dtype=self.mean.dtype, device=self.mean.device) + / self.mean.shape[0] + ) + else: + self.w = w + else: + raise ValueError("mean must be 1D (single Gaussian) or 2D (mixture of Gaussians)") + + @property + def diag(self): + return self._diag + + @diag.setter + def diag(self, value): + self._diag = value + self.ll = self.ll_diag if value else self.ll_full + + @property + def mixture(self): + return self._mixture + + @mixture.setter + def mixture(self, value): + self._mixture = value + self.energy = self.energy_mixture if value else self.energy_single + + def ll_diag(self, t: Tensor, x: Tensor, mu: Tensor, cov: Tensor, w: Tensor): + r = (x.squeeze() - self.sde.mu(t) * mu).flatten() + cov_t = self.sde.mu(t) ** 2 * cov + self.sde.sigma(t) ** 2 + icov = 1 / cov_t + logdet = torch.sum(torch.log(2 * torch.pi * cov_t)) + ll = -0.5 * torch.sum(r**2 * icov) - 0.5 * logdet + torch.log(w) + return ll + + def ll_full(self, t: Tensor, x: Tensor, mu: Tensor, cov: Tensor, w: Tensor): + r = (x.squeeze() - self.sde.mu(t) * mu).flatten() + cov_t = self.sde.mu(t) ** 2 * cov + self.sde.sigma(t) ** 2 * torch.eye( + cov.shape[-1], dtype=cov.dtype, device=cov.device + ) + icov = torch.linalg.inv(cov_t) + logdet = torch.logdet(2 * torch.pi * cov_t) + ll = -0.5 * (r @ icov @ r.reshape(1, -1).T) - 0.5 * logdet + torch.log(w) + return ll + + def energy_single(self, t: Tensor, x: Tensor, *args, **kwargs): + """MVG energy for a single gaussian""" + return -self.ll(t, x, self.mean, self.cov, self.w) + + def energy_mixture(self, t: Tensor, x: Tensor, *args, **kwargs): + """MVG energy for a mixture of gaussians""" + ll = torch.vmap(self.ll, in_dims=(None, None, 0, 0, 0))(t, x, self.mean, self.cov, self.w) + return -torch.logsumexp(ll, dim=0) + + def unnormalized_energy(self, t: Tensor, x: Tensor, *args, **kwargs): + raise RuntimeError("Unnormalized energy should not be called for MVG models.") + + def reparametrized_score(self, t, x, *args, **kwargs): + raise RuntimeError("Reparametrized score should not be called for MVG models.") + + +class MVGScoreModel(ScoreModel): + """ + A multivariate gaussian score model. + + A multivariate gaussian score model, which can be used as a single + multivariate gaussian or a mixture of them. Make sure to set mixture=True if + using a mixture model. The gaussians may also be diagonal or have full + covariance matrices, this will be automatically determined if the shape of + ``cov`` is equal to the shape of ``mean``. + + Args: + + sde: The SDE that the score model is associated with. + + mean: The mean of the gaussian(s). + + cov: The covariance of the gaussian(s). If cov.shape == mean.shape, this + is a diagonal covariance. Otherwise, it is a full covariance matrix + where if mean has shape (M, *D) (or just (*D,) for single MVG) then + the covariance matrix should have shape (M, prod(D), prod(D)) (or + just (prod(D), prod(D)) for single MVG). + + mixture: Whether the model is a mixture of gaussians. Default is False. + + w: The weights of the mixture of gaussians (if a mixture). Default is + equal weight. + """ + + def __init__( + self, + sde: SDE, + mean: Tensor, + cov: Tensor, + mixture: bool = False, + w: Optional[Tensor] = None, + **kwargs + ): + super().__init__(net=NullNet(isenergy=False), sde=sde, path=None, checkpoint=None, **kwargs) + self.sde = sde + self.mean = mean + self.cov = cov + self.diag = mean.shape == cov.shape + self.mixture = mixture + if mixture: + assert mean.dim() > 1, "mean must be at least 2D for a mixture of Gaussians" + if w is None: + self.w = torch.ones(mean.shape[0], dtype=mean.dtype, device=mean.device) + else: + self.w = w + self.w = self.w / self.w.sum() + + @property + def score_fn(self): + if self.diag and self.mixture: + return self.score_diag_mixture + elif self.diag and not self.mixture: + return self.score_diag_single + elif not self.diag and self.mixture: + return self.score_full_mixture + return self.score_full_single + + def score_diag_single(self, t: Tensor, x: Tensor, *args, cov: Tensor, icov: Tensor, **kwargs): + mu_t = self.sde.mu(t[0]) + r = mu_t * self.mean - x + score = icov * r + return score + + def _gamma_diag(self, r, cov, icov): + B, K, *D = r.shape + logdet = torch.sum( + torch.log(2 * torch.pi * cov), dim=tuple(range(1, len(cov.shape))) + ).reshape(1, K) + logw = torch.log(self.w).reshape(1, K) + logd = torch.sum(r**2 * icov.unsqueeze(0), dim=tuple(range(2, len(r.shape)))) + ll = -0.5 * logd - 0.5 * logdet + logw + gamma = torch.exp(ll - torch.logsumexp(ll, dim=1, keepdim=True)) + return gamma + + def score_diag_mixture(self, t: Tensor, x: Tensor, *args, cov: Tensor, icov: Tensor, **kwargs): + B, *D = x.shape + mu_t = self.sde.mu(t[0]) + r = mu_t * self.mean.unsqueeze(0) - x.unsqueeze(1) + gamma = self._gamma_diag(r, cov, icov).reshape(B, -1, *[1] * len(D)) + score = torch.sum(gamma * icov.unsqueeze(0) * r, dim=1) + return score + + def score_full_single(self, t: Tensor, x: Tensor, *args, cov: Tensor, icov: Tensor, **kwargs): + mu_t = self.sde.mu(t[0]) + r = mu_t * self.mean - x + score = torch.vmap(lambda r_i: icov @ r_i.reshape(-1, 1))(r) + return score.reshape(*x.shape) + + def _gamma_full(self, r, cov, icov): + B, K, *D = r.shape + logdet = torch.logdet(2 * torch.pi * cov).reshape(1, K) + logw = torch.log(self.w).reshape(1, K) + sub_logd = torch.vmap( + lambda r_i_k, icov_k: r_i_k.reshape(1, -1) @ icov_k @ r_i_k.reshape(-1, 1) + ) + logd = torch.vmap(sub_logd, in_dims=(0, None))(r, icov).reshape(B, K) + ll = -0.5 * logd - 0.5 * logdet + logw + gamma = torch.exp(ll - torch.logsumexp(ll, dim=1, keepdim=True)) + return gamma + + def score_full_mixture(self, t: Tensor, x: Tensor, *args, cov: Tensor, icov: Tensor, **kwargs): + B, *D = x.shape + mu_t = self.sde.mu(t[0]) + r = mu_t * self.mean.unsqueeze(0) - x.unsqueeze(1) + gamma = self._gamma_full(r, cov, icov).reshape(B, -1, *[1] * len(D)) + sub_score = torch.vmap( + lambda r_i_k, icov_k, gamma_k: gamma_k * icov_k @ r_i_k.reshape(-1, 1) + ) + score = torch.vmap(sub_score, in_dims=(0, None, 0))(r, icov, gamma) + return score.sum(dim=1).reshape(*x.shape) + + def score(self, t, x, *args, **kwargs): + mu_t = self.sde.mu(t[0]) + sigma_t = self.sde.sigma(t[0]) + if self.diag: + cov = mu_t**2 * self.cov + sigma_t**2 + icov = 1 / cov + else: + cov = mu_t**2 * self.cov + sigma_t**2 * torch.eye( + self.cov.shape[-1], dtype=self.cov.dtype, device=self.cov.device + ) + icov = torch.linalg.inv(cov) + return self.score_fn(t, x, *args, cov=cov, icov=icov, **kwargs) + + def reparametrized_score(self, t, x, *args, **kwargs): + raise RuntimeError("Reparametrized score should not be called for MVG models.") diff --git a/score_models/sbm/sample.py b/score_models/sbm/sample.py new file mode 100644 index 0000000..21a9a2a --- /dev/null +++ b/score_models/sbm/sample.py @@ -0,0 +1,64 @@ +import torch +from torch import Tensor + +from ..sde import SDE +from .score_model import ScoreModel +from ..architectures import NullNet + + +class SampleScoreModel(ScoreModel): + """ + A score model based on individual samples. + + This score model class is based on individual samples. The score at a given + point is the average of the scores of the individual samples. The scores + are calculated as the difference between the sample and the point, weighted + by the inverse of the variance of the noise at that point: + + .. math:: + + W_i = \\exp\\left(\\frac{-(x - x_i)^2}{\\sigma(t)^2 + \\sigma_{\\min}^2}\\right) \\ + \\nabla_x \\log p(x) = \\frac{1}{\\sum_i W_i} \\sum_i W_i \\frac{x - x_i}{\\sigma(t)^2 + \\sigma_{\\min}^2} + + Args: + sde (SDE): The stochastic differential equation for the score model. + samples (Tensor): The samples to use for the score model. + sigma_min (float, optional): The minimum value of the standard deviation of the noise term. Defaults to 0.0. + + """ + + def __init__( + self, + sde: SDE, + samples: Tensor, + sigma_min: Tensor = 0.0, + **kwargs, + ): + super().__init__(net=NullNet(isenergy=False), sde=sde, path=None, checkpoint=None, **kwargs) + self.sde = sde + self.samples = samples + self.sigma_min = sigma_min + + @torch.no_grad() + def score(self, t: Tensor, x: Tensor, *args, **kwargs): + B, *D = x.shape + K, *D = self.samples.shape + sigma_t = self.sde.sigma(t[0]) + mu_t = self.sde.mu(t[0]) + W = torch.sum( + -0.5 + * (self.samples.unsqueeze(0) * mu_t - x.unsqueeze(1)) ** 2 # B, K, *D + / (sigma_t**2 + self.sigma_min**2), + dim=tuple(range(2, 2 + len(D))), + keepdim=True, + ) # B, K, *[1]*len(D) + W = torch.exp(W - torch.max(W, dim=1, keepdim=True).values) + W = torch.nan_to_num(W) + W = W / torch.sum(W, dim=1, keepdim=True) + scores = torch.sum( + W + * (self.samples.unsqueeze(0) * mu_t - x.unsqueeze(1)) + / (sigma_t**2 + self.sigma_min**2), + dim=1, + ) # B, *D + return scores diff --git a/score_models/sbm/score_model.py b/score_models/sbm/score_model.py index 30134cc..987b6cd 100644 --- a/score_models/sbm/score_model.py +++ b/score_models/sbm/score_model.py @@ -1,17 +1,13 @@ -from typing import Union, Optional, Callable -from abc import abstractmethod +from typing import Union, Optional, Literal -from torch.func import grad -from torch import vmap, Tensor +from torch import Tensor from torch.nn import Module -import numpy as np import torch from .base import Base from ..sde import SDE from ..losses import dsm -from ..ode import probability_flow_ode, divergence_with_hutchinson_trick -from ..sde import euler_maruyama_method +from ..solver import Solver, ODESolver from ..utils import DEVICE @@ -20,166 +16,143 @@ class ScoreModel(Base): def __init__( - self, - net: Optional[Union[str, Module]] = None, - sde: Optional[Union[str, SDE]] = None, - path: Optional[str] = None, - checkpoint: Optional[int] = None, - hessian_trace_model: Optional[Union[str, Module]] = None, - device=DEVICE, - **hyperparameters - ): + self, + net: Optional[Union[str, Module]] = None, + sde: Optional[Union[str, SDE]] = None, + path: Optional[str] = None, + checkpoint: Optional[int] = None, + hessian_trace_model: Optional[Union[str, Module]] = None, + device=DEVICE, + **hyperparameters + ): super().__init__(net, sde, path, checkpoint=checkpoint, device=device, **hyperparameters) - if hessian_trace_model is not None: - self.hessian_trace_model = hessian_trace_model - else: - self.hessian_trace_model = self.divergence - + self.hessian_trace_model = hessian_trace_model + def loss(self, x, *args, step: int) -> Tensor: return dsm(self, x, *args) - def reparametrized_score(self, t, x, *args) -> Tensor: + def reparametrized_score(self, t, x, *args, **kwargs) -> Tensor: """ - Numerically stable reparametrization of the score function for the DSM loss. + Numerically stable reparametrization of the score function for the DSM loss. """ - return self.net(t, x, *args) + return self.net(t, x, *args, **kwargs) - def forward(self, t, x, *args): + def forward(self, t, x, *args, **kwargs): """ Overwrite the forward method to return the score function instead of the model output. - This also affects the __call__ method of the class, meaning that + This also affects the __call__ method of the class, meaning that ScoreModel(t, x, *args) is equivalent to ScoreModel.forward(t, x, *args). """ - return self.score(t, x, *args) - - def score(self, t, x, *args) -> Tensor: + return self.score(t, x, *args, **kwargs) + + def score(self, t, x, *args, **kwargs) -> Tensor: _, *D = x.shape - sigma_t = self.sde.sigma(t).view(-1, *[1]*len(D)) - epsilon_theta = self.reparametrized_score(t, x, *args) + sigma_t = self.sde.sigma(t).view(-1, *[1] * len(D)) + epsilon_theta = self.reparametrized_score(t, x, *args, **kwargs) return epsilon_theta / sigma_t - - def ode_drift(self, t, x, *args) -> Tensor: - """ - Compute the drift of the ODE defined by the score function. - """ - f = self.sde.drift(t, x) - g = self.sde.diffusion(t, x) - f_tilde = f - 0.5 * g**2 * self.score(t, x, *args) - return f_tilde - - def divergence(self, t, x, *args, **kwargs) -> Tensor: - """ - Compute the divergence of the drift of the ODE defined by the score function. - """ - return divergence_with_hutchinson_trick(self.ode_drift, t, x, *args, **kwargs) - - def hessian_trace(self, t, x, *args, **kwargs) -> Tensor: - """ - Compute the trace of the Hessian of the score function. - """ - return self.hessian_trace_model(t, x, *args, **kwargs) - - def log_likelihood(self, x, *args, steps, t=0., method="euler", **kwargs) -> Tensor: - """ - Compute the log likelihood of point x using the probability flow ODE, + + def log_likelihood( + self, + x, + *args, + steps, + t=0.0, + solver: Literal["euler_ode", "rk2_ode", "rk4_ode"] = "euler_ode", + **kwargs + ) -> Tensor: + """ + Compute the log likelihood of point x using the probability flow ODE, which makes use of the instantaneous change of variable formula developed by Chen et al. 2018 (arxiv.org/abs/1806.07366). See Song et al. 2020 (arxiv.org/abs/2011.13456) for usage with SDE formalism of SBM. """ - drift = self.ode_drift - hessian_trace = lambda t, x, *args: self.hessian_trace(t, x, *args, **kwargs) + B, *D = x.shape + + solver = ODESolver(self, solver=solver, **kwargs) # Solve the probability flow ODE up in temperature to time t=1. - xT, delta_log_p = probability_flow_ode( - x, - *args, - steps=steps, - drift=drift, - hessian_trace=hessian_trace, - t0=t, - t1=1., - method=method) - # Add the log likelihood of the prior at time t=1. - log_p = self.sde.prior(x.shape).log_prob(xT) + delta_log_p + xT, dlog_p = solver( + x, *args, steps=steps, forward=True, t_min=t, **kwargs, get_delta_logp=True + ) + + # add boundary condition PDF probability + log_p = self.sde.prior(D).log_prob(xT) + dlog_p + return log_p - - def tweedie(self, t: Tensor, x: Tensor, *args) -> Tensor: - """ - Compute the Tweedie formula for the expectation E[x0 | xt] - """ - B, *D = x.shape - mu = self.sde.mu(t).view(-1, *[1]*len(D)) - sigma = self.sde.sigma(t).view(-1, *[1]*len(D)) - return (x + sigma**2 * self.score(t, x, *args)) / mu - + @torch.no_grad() def sample( - self, - shape: tuple, - steps: int, - *args, - likelihood_score: Optional[Callable] = None, - guidance_factor: float = 1., - stopping_factor: float = np.inf, - denoise_last_step: bool = True - ) -> Tensor: + self, + shape: tuple, # TODO grab dimensions from model hyperparams if available + steps: int, + *args, + solver: Literal[ + "em_sde", "rk2_sde", "rk4_sde", "euler_ode", "rk2_ode", "rk4_ode" + ] = "em_sde", + progress_bar: bool = True, + denoise_last_step: bool = True, + **kwargs + ) -> Tensor: """ Sample from the score model by solving the reverse-time SDE using the Euler-Maruyama method. - - The initial condition is sample from the high temperature prior at time t=T. + + The initial condition is sample from the high temperature prior at time t=T. To denoise a sample from some time t, use the denoise or tweedie method instead. - + """ B, *D = shape - likelihood_score = likelihood_score or (lambda t, x: torch.zeros_like(x)) - score = lambda t, x: self.score(t, x, *args) + guidance_factor * likelihood_score(t, x) - - # Sample from high temperature boundary condition + + solver = Solver(self, solver=solver, **kwargs) xT = self.sde.prior(D).sample([B]) - # Solve the reverse-time SDE - t, x = euler_maruyama_method( - t=self.sde.T, - xt=xT, - steps=steps, - sde=self.sde, - score=score, - stopping_factor=stopping_factor - ) - if denoise_last_step: - x = self.tweedie(t, x, *args) - return x - + x0 = solver( + xT, + *args, + steps=steps, + forward=False, + progress_bar=progress_bar, + denoise_last_step=denoise_last_step, + **kwargs + ) + + return x0 + @torch.no_grad() def denoise( - self, - t: Tensor, - xt: Tensor, - steps: int, + self, + t: Tensor, + xt: Tensor, + steps: int, + *args, + solver: Literal[ + "em_sde", "rk2_sde", "rk4_sde", "euler_ode", "rk2_ode", "rk4_ode" + ] = "em_sde", + progress_bar: bool = True, + denoise_last_step: bool = True, + **kwargs + ) -> Tensor: + """ + Sample from the score model by solving the reverse-time SDE using the Euler-Maruyama method. + + The initial condition is provided as xt at time t. + + """ + x0 = Solver(self, solver=solver, **kwargs)( + xt, *args, - epsilon: Optional[float] = None, - likelihood_score: Optional[Callable] = None, - guidance_factor: float = 1., - stopping_factor: float = np.inf - ): + t_max=t, + steps=steps, + forward=False, + progress_bar=progress_bar, + denoise_last_step=denoise_last_step, + **kwargs + ) + return x0 + + def tweedie(self, t: Tensor, x: Tensor, *args, **kwargs) -> Tensor: """ - Denoise a given sample xt at time t using the score model. - - Tweedie formula is applied after the Euler-Maruyama solver - is used to solve the reverse-time SDE. + Compute the Tweedie formula for the expectation E[x0 | xt] """ - likelihood_score = likelihood_score or (lambda t, x: torch.zeros_like(x)) - score = lambda t, x: self.score(t, x, *args) + guidance_factor * likelihood_score(t, x) - - # Solve the reverse-time SDE - t, x = euler_maruyama_method( - t=t, - xt=xt, - steps=steps, - sde=self.sde, - score=score, - epsilon=epsilon, - stopping_factor=stopping_factor - ) - # Apply the Tweedie formula - x = self.tweedie(t, x, *args) - return x + B, *D = x.shape + mu = self.sde.mu(t).view(-1, *[1] * len(D)) + sigma = self.sde.sigma(t).view(-1, *[1] * len(D)) + return (x + sigma**2 * self.score(t, x, *args, **kwargs)) / mu diff --git a/score_models/sbm/tweedie.py b/score_models/sbm/tweedie.py new file mode 100644 index 0000000..dea1ffa --- /dev/null +++ b/score_models/sbm/tweedie.py @@ -0,0 +1,62 @@ +from typing import Callable, Optional + +import torch +from torch.func import grad +from torch import vmap, Tensor + +from ..sde import SDE +from . import ScoreModel +from ..architectures import NullNet + + +class TweedieScoreModel(ScoreModel): + """ + Convolved likelihood score model using Tweedie's Formula. + + Based on Chung et al. 2022 (doi: 10.48550/arXiv.2209.14687) though we use + the jacobian to properly propagate the score. Uses the score of the expected + value as an approximation of the expectation of the score. + + Args: + sde: The SDE that the score model is associated with. + prior_model: The model to use for the log likelihood score. + log_likelihood: The log likelihood function to use. Should + accept signature ll(sigma_t, x, *args, **kwargs) where the args and + kwargs will be passed from the forward method. + """ + + def __init__( + self, + sde: SDE, + prior_model: ScoreModel, + log_likelihood: Optional[Callable] = None, + log_likelihood_score0: Optional[Callable] = None, + **kwargs, + ): + assert (log_likelihood is None) != ( + log_likelihood_score0 is None + ), "Either log_likelihood or log_likelihood_score0 must be provided, not both." + super().__init__(net=NullNet(isenergy=False), sde=sde, path=None, checkpoint=None, **kwargs) + self.sde = sde + self.prior_model = prior_model + if log_likelihood is not None: + self.log_likelihood = log_likelihood + else: + self.log_likelihood_score0 = log_likelihood_score0 + + def tweedie(self, t: Tensor, xt: Tensor, *args, **kwargs): + sigma_t = self.sde.sigma(t) + mu_t = self.sde.mu(t) + x0 = ( + xt + sigma_t.unsqueeze(-1) ** 2 * self.prior_model.score(t, xt, *args, **kwargs) + ) / mu_t.unsqueeze(-1) + return x0 + + def log_likelihood_score0(self, t: Tensor, x0: Tensor, *args, **kwargs): + sigma_t = self.sde.sigma(t[0]) + return vmap(grad(lambda x: self.log_likelihood(sigma_t, x, *args, **kwargs).squeeze()))(x0) + + def score(self, t: Tensor, x: Tensor, *args, **kwargs): + x0, vjp_func = torch.func.vjp(lambda xt: self.tweedie(t, xt, *args, **kwargs), x) + score0 = self.log_likelihood_score0(t, x0, *args, **kwargs) + return vjp_func(score0)[0] diff --git a/score_models/sde/sde.py b/score_models/sde/sde.py index 570bffb..1e250b7 100644 --- a/score_models/sde/sde.py +++ b/score_models/sde/sde.py @@ -6,39 +6,55 @@ from torch.distributions import Distribution from torch import Tensor + class SDE(ABC): """ Abstract class for some SDE info important for the score models """ - def __init__(self, T=1.0, epsilon=0., **kwargs): + + def __init__(self, T=1.0, epsilon=0.0, **kwargs): """ - The time index in the diffusion is defined in the range [epsilon, T]. + The time index in the diffusion is defined in the range [epsilon, T]. """ super().__init__() - self.T = T - self.epsilon = epsilon - self.hyperparameters = { - "T": T, - "epsilon": epsilon - } + self.T = kwargs.get("t_max", T) + self.epsilon = kwargs.get("t_min", epsilon) + self.hyperparameters = {"T": T, "epsilon": epsilon} + + @property + def t_min(self): + return self.epsilon + + @t_min.setter + def t_min(self, value): + self.epsilon = value + + @property + def t_max(self): + return self.T + + @t_max.setter + def t_max(self, value): + self.T = value @abstractmethod - def mu(self, t) -> Tensor: - ... - + def mu(self, t) -> Tensor: ... + @abstractmethod - def sigma(self, t) -> Tensor: - ... - + def sigma(self, t) -> Tensor: ... + + @abstractmethod + def t_sigma(self, sigma) -> Tensor: ... + @abstractmethod def prior(self, shape) -> Distribution: """ High temperature prior distribution. Typically a Gaussian distribution. """ ... - + @abstractmethod - def diffusion(self, t:Tensor, x: Tensor) -> Tensor: + def diffusion(self, t: Tensor, x: Tensor) -> Tensor: """ Diffusion coefficient of the SDE. """ @@ -53,18 +69,18 @@ def drift(self, t, x) -> Tensor: def perturbation_scalars(self, t) -> Tuple[Tensor, Tensor]: return self.mu(t), self.sigma(t) - + def perturbation_kernel(self, t: Tensor, x0: Tensor) -> Tensor: """ Sample from the marginal at time t using the Gaussian perturbation kernel and the reparametrization trick. """ _, *D = x0.shape - mu_t = self.mu(t).view(-1, *[1]*len(D)) - sigma_t = self.sigma(t).view(-1, *[1]*len(D)) + mu_t = self.mu(t).view(-1, *[1] * len(D)) + sigma_t = self.sigma(t).view(-1, *[1] * len(D)) z = torch.randn_like(x0) return mu_t * x0 + sigma_t * z - + # Backward compatibility def sample_time_marginal(self, t: Tensor, x0: Tensor) -> Tensor: return self.perturbation_kernel(t, x0) diff --git a/score_models/sde/tsvesde.py b/score_models/sde/tsvesde.py index 95b6f9f..95cf138 100644 --- a/score_models/sde/tsvesde.py +++ b/score_models/sde/tsvesde.py @@ -11,57 +11,64 @@ class TSVESDE(SDE): def __init__( - self, - sigma_min: float, - sigma_max: float, - t_star: float, - beta: float, - T:float=1.0, - epsilon:float=0.0, - beta_fn="relu", - alpha=30, # silu and hardswish recaling of t - **kwargs + self, + sigma_min: float, + sigma_max: float, + t_star: float, + beta: float, + T: float = 1.0, + epsilon: float = 0.0, + beta_fn="relu", + alpha=30, # silu and hardswish recaling of t + **kwargs ): """ - Truncated Scaled Variance Exploding stochastic differential equation - + Truncated Scaled Variance Exploding stochastic differential equation + Args: sigma_min (float): The minimum value of the standard deviation of the noise term. sigma_max (float): The maximum value of the standard deviation of the noise term. t_star (float): Time at which to truncate the VE SDE and start the scaled VE. - beta (float): Slope of the scale SDE, and also its drift (akin to the VPSDE). + beta (float): Slope of the scale SDE, and also its drift (akin to the VPSDE). T (float, optional): The time horizon for the VESDE. Defaults to 1.0. device (str, optional): The device to use for computation. Defaults to DEVICE. """ - super().__init__(T, epsilon) + super().__init__(T, epsilon, **kwargs) self.sigma_min = sigma_min self.sigma_max = sigma_max self.beta = beta self.t_star = t_star - self.hyperparameters.update({ - "sigma_min": sigma_min, - "sigma_max": sigma_max, - "t_star": t_star, - "beta": beta - }) - + self.hyperparameters.update( + {"sigma_min": sigma_min, "sigma_max": sigma_max, "t_star": t_star, "beta": beta} + ) + if beta_fn == "relu": - self.beta_fn = lambda t: - self.beta * F.relu(t/self.T - self.t_star) + self.beta_fn = lambda t: -self.beta * F.relu(t / self.T - self.t_star) elif beta_fn == "swish" or beta_fn == "silu": - self.beta_fn = lambda t: - self.beta * F.silu(alpha*(t/self.T - self.t_star))/alpha + self.beta_fn = lambda t: -self.beta * F.silu(alpha * (t / self.T - self.t_star)) / alpha elif beta_fn == "hardswish": - self.beta_fn = lambda t: - self.beta * F.hardswish(alpha*(t/self.T - self.t_star))/alpha + self.beta_fn = ( + lambda t: -self.beta * F.hardswish(alpha * (t / self.T - self.t_star)) / alpha + ) self.beta_fn_dot = vmap(grad(self.beta_fn)) - + def sigma(self, t: Tensor) -> Tensor: """ Numerically stable formula for sigma """ smin = np.log(self.sigma_min) smax = np.log(self.sigma_max) - log_coeff = self.beta_fn(t) + (smax - smin) * t/self.T + smin + log_coeff = self.beta_fn(t) + (smax - smin) * t / self.T + smin return torch.exp(log_coeff) - + + def t_sigma(self, sigma: Tensor) -> Tensor: + """ + Inverse of the sigma function. Should give the time at which the kernel has standard deviation sigma. + """ + raise NotImplementedError( + "Inverse of the sigma function is not implemented for the TSVESDE." + ) + def mu(self, t: Tensor) -> Tensor: """ Piecewise continuous scale function that takes a VE at t < t_star and @@ -69,19 +76,18 @@ def mu(self, t: Tensor) -> Tensor: still exploding but with a logarihmic slope reduced by the beta hyperparameter. """ return torch.exp(self.beta_fn(t)) - + def prior(self, shape, device=DEVICE): - mu = torch.zeros(shape).to(device) - sigma_max = np.exp(-self.beta * (1. - self.t_star) + np.log(self.sigma_max)) + mu = torch.zeros(shape).to(device) + sigma_max = np.exp(-self.beta * (1.0 - self.t_star) + np.log(self.sigma_max)) return Independent(Normal(loc=mu, scale=sigma_max, validate_args=False), len(shape)) - + def diffusion(self, t: Tensor, x: Tensor) -> Tensor: - _, *D = x.shape # broadcast diffusion coefficient to x shape - # Analytical derivative of the sigma**2 function, square rooted at the end + _, *D = x.shape # broadcast diffusion coefficient to x shape + # Analytical derivative of the sigma**2 function, square rooted at the end prefactor = np.sqrt(2 * (np.log(self.sigma_max) - np.log(self.sigma_min))) - return prefactor * self.sigma(t).view(-1, *[1]*len(D)) + return prefactor * self.sigma(t).view(-1, *[1] * len(D)) def drift(self, t: Tensor, x: Tensor) -> Tensor: _, *D = x.shape - return self.beta_fn_dot(t).view(-1, *[1]*len(D)) * x - + return self.beta_fn_dot(t).view(-1, *[1] * len(D)) * x diff --git a/score_models/sde/vesde.py b/score_models/sde/vesde.py index b888631..c31b555 100644 --- a/score_models/sde/vesde.py +++ b/score_models/sde/vesde.py @@ -8,41 +8,38 @@ class VESDE(SDE): def __init__( - self, - sigma_min: float, - sigma_max: float, - T:float=1.0, - epsilon:float=0.0, - **kwargs + self, sigma_min: float, sigma_max: float, T: float = 1.0, epsilon: float = 0.0, **kwargs ): """ - Variance Exploding stochastic differential equation - + Variance Exploding stochastic differential equation + Args: sigma_min (float): The minimum value of the standard deviation of the noise term. sigma_max (float): The maximum value of the standard deviation of the noise term. T (float, optional): The time horizon for the VESDE. Defaults to 1.0. device (str, optional): The device to use for computation. Defaults to DEVICE. """ - super().__init__(T, epsilon) + super().__init__(T, epsilon, **kwargs) self.sigma_min = sigma_min self.sigma_max = sigma_max - self.hyperparameters.update({ - "sigma_min": sigma_min, - "sigma_max": sigma_max - }) + self.hyperparameters.update({"sigma_min": sigma_min, "sigma_max": sigma_max}) def mu(self, t: Tensor) -> Tensor: return torch.ones_like(t) def sigma(self, t: Tensor) -> Tensor: - return self.sigma_min * (self.sigma_max / self.sigma_min) ** (t/self.T) - + return self.sigma_min * (self.sigma_max / self.sigma_min) ** (t / self.T) + + def t_sigma(self, sigma: Tensor) -> Tensor: + return torch.log(torch.as_tensor(sigma / self.sigma_min, device=DEVICE)) / torch.log( + torch.as_tensor(self.sigma_max / self.sigma_min, device=DEVICE) + ) + def diffusion(self, t: Tensor, x: Tensor) -> Tensor: - _, *D = x.shape # broadcast diffusion coefficient to x shape - # Analytical derivative of the sigma**2 function, square rooted at the end + _, *D = x.shape # broadcast diffusion coefficient to x shape + # Analytical derivative of the sigma**2 function, square rooted at the end prefactor = np.sqrt(2 * (np.log(self.sigma_max) - np.log(self.sigma_min))) - return prefactor * self.sigma(t).view(-1, *[1]*len(D)) + return prefactor * self.sigma(t).view(-1, *[1] * len(D)) def drift(self, t: Tensor, x: Tensor) -> Tensor: return torch.zeros_like(x) diff --git a/score_models/sde/vpsde.py b/score_models/sde/vpsde.py index 9261c88..3924405 100644 --- a/score_models/sde/vpsde.py +++ b/score_models/sde/vpsde.py @@ -20,7 +20,7 @@ def __init__( T: float = 1.0, epsilon: float = 1e-3, schedule: Literal["cosine", "linear"] = "linear", - **kwargs + **kwargs, ): """ Args: @@ -29,76 +29,97 @@ def __init__( T (float, optional): The time horizon for the VPSDE. Defaults to 1.0. epsilon (float, optional): The initial time for the VPSDE. Defaults to 1e-3. schedule (str, optional): The VP noise schedule. Defaults to "cosine". - + Notes: - The "cosine" schedule is the one defined in Nichol & Dhariwal 2021. (https://arxiv.org/abs/2102.09672) - but reformulated in continuous time. beta_max controls the clipping of the gradient to avoid + but reformulated in continuous time. beta_max controls the clipping of the gradient to avoid numerical instability as t -> T. - - The "linear" schedule is the original noise schedule from Ho et al. 2020 and Song et al. 2021. + - The "linear" schedule is the original noise schedule from Ho et al. 2020 and Song et al. 2021. See equation (33) in Song et al 2020. (https://arxiv.org/abs/2011.13456). - Suggest making beta_max much larger for the cosine schedule to avoid sharp deviations in the mu function. After all, I am not using a manual clipping of beta, rather I make a patchwork between cosine and a linear schedule. """ - super().__init__(T, epsilon) + super().__init__(T, epsilon, **kwargs) self.beta_min = beta_min self.beta_max = beta_max - self.hyperparameters.update({ - "beta_min": beta_min, - "beta_max": beta_max, - "schedule": schedule - }) - + self.hyperparameters.update( + {"beta_min": beta_min, "beta_max": beta_max, "schedule": schedule} + ) + if schedule == "cosine": + def beta_primitive(t: Tensor, beta_max, *args) -> Tensor: """ See equation (17) in Nichol & Dhariwal 2021. (https://arxiv.org/abs/2102.09672). The primitive of the beta function is the log of \bar{alpha} in their notation. - + To implement the clipping discussed in their paper, we instead use beta_max to control the maximum drift value in the diffusion. The derivative of log(\bar{\alpha}}) is beta(t) = 2/pi * arctan(pi*t/2), which we can invert to get the time index at which the drift reaches beta_max. """ + # fmt: off return torch.where( t < 2/np.pi * np.arctan(beta_max / np.pi), # analytical inversion of the beta schedule - 2 * torch.log(torch.cos(PI_OVER_2 * t)), # Cosine schedule for the primitive of beta beta_max * t, # Linear schedule for regime where cosine is clipped - ) + ) + # fmt: on + + def inv_beta_primitive(beta: Tensor, beta_max, *args) -> Tensor: + """ + The inverse of the beta primitive function. + """ + return torch.where( + beta < beta_max * 2 / np.pi * np.arctan(beta_max / np.pi), + 2 / np.pi * torch.arccos(torch.exp(-0.5 * beta)), + beta / beta_max, + ) elif schedule == "linear": + def beta_primitive(t: Tensor, beta_max, beta_min) -> Tensor: """ See equation (33) in Song et al 2020. (https://arxiv.org/abs/2011.13456) """ return 0.5 * (beta_max - beta_min) * t**2 + beta_min * t - + + def inv_beta_primitive(beta: Tensor, beta_max, beta_min) -> Tensor: + return (torch.sqrt(beta_min**2 + 2 * (beta_max - beta_min) * beta) - beta_min) / ( + beta_max - beta_min + ) + else: raise ValueError(f"Unknown noise schedule {schedule}") self._beta_primitive = beta_primitive - + def beta_primitive(self, t: Tensor) -> Tensor: - return self._beta_primitive(t/self.T, self.beta_max, self.beta_min) - + return self._beta_primitive(t / self.T, self.beta_max, self.beta_min) + def beta(self, t: Tensor): return vmap(grad(self.beta_primitive))(t) def mu(self, t: Tensor) -> Tensor: - return torch.exp( - 0.5 * self.beta_primitive(t)) + return torch.exp(-0.5 * self.beta_primitive(t)) def sigma(self, t: Tensor) -> Tensor: - return (1 - self.mu(t)**2).sqrt() - + return (1 - self.mu(t) ** 2).sqrt() + + def t_sigma(self, sigma: Tensor) -> Tensor: + beta = -2 * torch.log(torch.sqrt(1 - sigma**2)) + return self._inv_beta_primitive(beta, self.beta_max, self.beta_min) * self.T + def prior(self, shape, device=DEVICE): mu = torch.zeros(shape).to(device) - return Independent(Normal(loc=mu, scale=1., validate_args=False), len(shape)) + return Independent(Normal(loc=mu, scale=1.0, validate_args=False), len(shape)) def diffusion(self, t: Tensor, x: Tensor) -> Tensor: _, *D = x.shape - beta = self.beta(t).view(-1, *[1]*len(D)) + beta = self.beta(t).view(-1, *[1] * len(D)) return beta.sqrt() def drift(self, t: Tensor, x: Tensor) -> Tensor: _, *D = x.shape - beta = self.beta(t).view(-1, *[1]*len(D)) - return - 0.5 * beta * x + beta = self.beta(t).view(-1, *[1] * len(D)) + return -0.5 * beta * x diff --git a/score_models/solver/__init__.py b/score_models/solver/__init__.py new file mode 100644 index 0000000..10b47ce --- /dev/null +++ b/score_models/solver/__init__.py @@ -0,0 +1,3 @@ +from .ode import * +from .sde import * +from .solver import * diff --git a/score_models/solver/ode.py b/score_models/solver/ode.py new file mode 100644 index 0000000..42d3eaa --- /dev/null +++ b/score_models/solver/ode.py @@ -0,0 +1,205 @@ +from typing import Callable, Literal, Optional + +import torch +from torch import Tensor +from torch.func import vjp +from tqdm import tqdm +from .solver import Solver + + +class ODESolver(Solver): + + @torch.no_grad() + def solve( + self, + x: Tensor, + steps: int, + forward: bool, + *args: tuple, + progress_bar: bool = True, + trace: bool = False, + kill_on_nan: bool = False, + denoise_last_step: bool = False, + time_steps: Optional[Tensor] = None, + get_delta_logp: bool = False, + hook: Optional[Callable] = None, + **kwargs, + ): + """ + Integrate the diffusion ODE forward or backward in time. + + Discretizes the ODE using the given method and integrates the ODE with + + .. math:: + x_{i+1} = x_i + \\frac{dx}{dt}(t_i, x_i) dt + + where the :math:`\\frac{dx}{dt}` is the diffusion drift of + + .. math:: + \\frac{dx}{dt} = f(t, x) - \\frac{1}{2} g(t, x)^2 s(t, x) + + where :math:`f(t, x)` is the sde drift, :math:`g(t, x)` is the sde diffusion, + and :math:`s(t, x)` is the score. + + Args: + x: Initial condition. + steps: integration discretization. + forward: Direction of integration. + *args: Additional arguments to pass to the score model. + progress_bar: Whether to display a progress bar. + trace: Whether to return the full path or just the last point. + kill_on_nan: Whether to raise an error if NaNs are encountered. + denoise_last_step: Whether to project to the boundary at the last step. + time_steps: Optional time steps to use for integration. Should be a 1D tensor containing the bin edges of the + time steps. For example, if one wanted 50 steps from 0 to 1, the time steps would be ``torch.linspace(0, 1, 51)``. + get_delta_logp: Whether to return the log probability of the input x (should be used with forward=True). + hook: Optional hook function to call after each step. Will be called with the signature ``hook(t, x, sde, score, solver)``. + """ + B, *D = x.shape + + # Step + T, dT = self.time_steps(steps, B, D, time_steps=time_steps, forward=forward, **kwargs) + + # log P(xt) if requested + dlogp = torch.zeros(B, device=x.device, dtype=x.dtype) + if self.score.hessian_trace_model is None: + ht = self.divergence_hutchinson_trick + else: + ht = ( + lambda t, x, args, dt, **kwargs: self.score.hessian_trace_model( + t, x, *args, **kwargs + ) + * dt + ) + dp = ht if get_delta_logp else lambda *args, **kwargs: 0.0 + + # Trace ODE path if requested + if trace: + path = [x] + + # Progress bar + pbar = tqdm(tuple(zip(T, dT))) if progress_bar else zip(T, dT) + for t, dt in pbar: + if progress_bar: + pbar.set_description( + f"t={t[0].item():.1g} | sigma={self.sde.sigma(t)[0].item():.1g} | " + f"x={x.mean().item():.1g}\u00B1{x.std().item():.1g}" + ) + + # Check for NaNs + if kill_on_nan and torch.any(torch.isnan(x)): + raise ValueError("NaN encountered in ODE solver") + + # Update x + step = self.step(t, x, args, dt, self.dx, dp, **kwargs) + x = x + step[0] + dlogp = dlogp + step[1] + + if trace: + path.append(x) + + # Call hook + if hook is not None: + hook(t, x, self.sde, self.score, self) + + # Project to boundary if denoising + if denoise_last_step and not forward: + x = self.tweedie(t, x, *args, **kwargs) + if trace: + path[-1] = x + + # Return path or final x + if trace: + if get_delta_logp: + return torch.stack(path), dlogp + return torch.stack(path) + if get_delta_logp: + return x, dlogp + return x + + def dx(self, t: Tensor, x: Tensor, args: tuple, dt: Tensor, **kwargs): + """Discretization of the ODE, this is the update for x""" + f = self.sde.drift(t, x) + g = self.sde.diffusion(t, x) + s = self.score(t, x, *args, **kwargs) + return (f - 0.5 * g**2 * s) * dt + + def divergence_hutchinson_trick( + self, + t: Tensor, + x: Tensor, + args: tuple, + dt: Tensor, + n_cot_vec: int = 1, + noise_type: Literal["rademacher", "gaussian"] = "rademacher", + **kwargs, + ): + """ + Compute the divergence of the drift function using the Hutchinson trace estimator. + + Args: + t: Time of the ODE. + x: State of the ODE. + args: Additional arguments to pass to the drift function. + dt: Time step of the ODE. + n_cot_vec: Number of cotangent vectors to sample for the Hutchinson trace estimator. + noise_type: Type of noise to sample, either 'rademacher' or 'gaussian'. + """ + _, *D = x.shape + # duplicate samples for for the Hutchinson trace estimator + samples = torch.tile(x, [n_cot_vec, *[1] * len(D)]) + t = torch.tile(t, [n_cot_vec]) + _args = [] + for arg in args: + _, *DA = arg.shape + arg = torch.tile(arg, [n_cot_vec, *[1] * len(DA)]) + _args.append(arg) + + # sample cotangent vectors + vectors = torch.randn_like(samples) + if noise_type == "rademacher": + vectors = vectors.sign() + + f = lambda x: self.dx(t, x, _args, dt, **kwargs) + _, vjp_func = vjp(f, samples) + divergence = (vectors * vjp_func(vectors)[0]).flatten(1).sum(dim=1) + return divergence + + +class Euler_ODE(ODESolver): + """ + Euler method for solving an ODE + """ + + def step(self, t, x, args, dt, dx, dp, **kwargs): + return dx(t, x, args, dt, **kwargs), dp(t, x, args, dt, **kwargs) + + +class RK2_ODE(ODESolver): + """ + Runge Kutta 2nd order ODE solver + """ + + def step(self, t, x, args, dt, dx, dp, **kwargs): + k1 = dx(t, x, args, dt, **kwargs) + l1 = dp(t, x, args, dt, **kwargs) + k2 = dx(t + dt.squeeze(), x + k1, args, dt, **kwargs) + l2 = dp(t + dt.squeeze(), x + k1, args, dt, **kwargs) + return (k1 + k2) / 2, (l1 + l2) / 2 + + +class RK4_ODE(ODESolver): + """ + Runge Kutta 4th order ODE solver + """ + + def step(self, t, x, args, dt, dx, dp, **kwargs): + k1 = dx(t, x, args, dt, **kwargs) + l1 = dp(t, x, args, dt, **kwargs) + k2 = dx(t + dt.squeeze() / 2, x + k1 / 2, args, dt, **kwargs) + l2 = dp(t + dt.squeeze() / 2, x + k1 / 2, args, dt, **kwargs) + k3 = dx(t + dt.squeeze() / 2, x + k2 / 2, args, dt, **kwargs) + l3 = dp(t + dt.squeeze() / 2, x + k2 / 2, args, dt, **kwargs) + k4 = dx(t + dt.squeeze(), x + k3, args, dt, **kwargs) + l4 = dp(t + dt.squeeze(), x + k3, args, dt, **kwargs) + return (k1 + 2 * k2 + 2 * k3 + k4) / 6, (l1 + 2 * l2 + 2 * l3 + l4) / 6 diff --git a/score_models/solver/sde.py b/score_models/solver/sde.py new file mode 100644 index 0000000..c0428bb --- /dev/null +++ b/score_models/solver/sde.py @@ -0,0 +1,169 @@ +from typing import Callable, Optional + +import torch +from torch import Tensor +import numpy as np +from tqdm import tqdm +from .solver import Solver + + +class SDESolver(Solver): + + @torch.no_grad() + def solve( + self, + x: Tensor, + steps: int, + forward: bool, + *args: tuple, + progress_bar: bool = True, + trace: bool = False, + kill_on_nan: bool = False, + denoise_last_step: bool = False, + time_steps: Optional[Tensor] = None, + corrector_steps: int = 0, + corrector_snr: float = 0.1, + hook: Optional[Callable] = None, + sk: float = 0, # Set to -1 for Ito SDE, TODO: make sure this is right + **kwargs, + ): + """ + Integrate the diffusion SDE forward or backward in time. + + Discretizes the SDE using the given method and integrates with + + .. math:: + x_{i+1} = x_i + \\frac{dx}{dt}(t_i, x_i) * dt + g(t_i, x_i) * dw + + where the :math:`\\frac{dx}{dt}` is the diffusion drift of + + .. math:: + \\frac{dx}{dt} = f(t, x) - \\frac{1}{2} g(t, x)^2 s(t, x) + + where :math:`f(t, x)` is the sde drift, :math:`g(t, x)` is the sde diffusion, + and :math:`s(t, x)` is the score. + + Args: + x: Initial condition. + steps: integration discretization. + forward: Direction of integration. + *args: Additional arguments to pass to the score model. + progress_bar: Whether to display a progress bar. + trace: Whether to return the full path or just the last point. + kill_on_nan: Whether to raise an error if NaNs are encountered. + denoise_last_step: Whether to project to the boundary at the last step. + time_steps: Optional time steps to use for integration. Should be a 1D tensor containing the bin edges of the + time steps. For example, if one wanted 50 steps from 0 to 1, the time steps would be ``torch.linspace(0, 1, 51)``. + corrector_steps: Number of corrector steps to add after each SDE step (0 for no corrector steps). + corrector_snr: Signal-to-noise ratio for the corrector steps. + hook: Optional hook function to call after each step. Will be called with the signature ``hook(t, x, sde, score, solver)``. + sk: Stratonovich correction term (set to -1 for Ito SDE). + """ + B, *D = x.shape + + # Step + T, dT = self.time_steps(steps, B, D, time_steps=time_steps, forward=forward, **kwargs) + + # Trace if requested + if trace: + path = [x] + + # Progress bar + pbar = tqdm(tuple(zip(T, dT))) if progress_bar else zip(T, dT) + for t, dt in pbar: + if progress_bar: + pbar.set_description( + f"t={t[0].item():.1g} | sigma={self.sde.sigma(t)[0].item():.1g} | " + f"x={x.mean().item():.1g}\u00B1{x.std().item():.1g}" + ) + + # Check for NaNs + if kill_on_nan and torch.any(torch.isnan(x)): + raise ValueError("NaN encountered in SDE solver") + + # Update x + x = x + self.step(t, x, args, dt, forward, sk=sk, **kwargs) + + # Add requested corrector steps + for _ in range(corrector_steps): + x = self.corrector_step(t, x, args, corrector_snr, **kwargs) + + if trace: + path.append(x) + + # Call hook + if hook is not None: + hook(t, x, self.sde, self.score, self) + + # Project to boundary if denoising + if denoise_last_step and not forward: + x = self.tweedie(t, x, *args, **kwargs) + if trace: + path[-1] = x + + if trace: + return torch.stack(path) + return x + + def corrector_step(self, t, x, args, snr, **kwargs): + """Basic Langevin corrector step for the SDE.""" + _, *D = x.shape + z = torch.randn_like(x) + epsilon = (snr * self.sde.sigma(t).view(-1, *[1] * len(D))) ** 2 + return x + epsilon * self.score(t, x, *args, **kwargs) + z * torch.sqrt(2 * epsilon) + + def drift(self, t: Tensor, x: Tensor, args: tuple, forward: bool, **kwargs): + """SDE drift term""" + f = self.sde.drift(t, x) + if forward: + return f + g = self.sde.diffusion(t, x) + s = self.score(t, x, *args, **kwargs) + return f - g**2 * s + + def dx(self, t, x, args, dt, forward, dw=None, **kwargs): + """SDE differential element dx""" + if dw is None: + dw = torch.randn_like(x) * torch.sqrt(dt.abs()) + return self.drift(t, x, args, forward, **kwargs) * dt + self.sde.diffusion(t, x) * dw + + +class EM_SDE(SDESolver): + """ + Base solver for a stochastic differential equation (SDE) using the Euler-Maruyama method. + """ + + def step(self, t, x, args, dt, forward, sk=None, **kwargs): + """base SDE solver""" + dw = torch.randn_like(x) * torch.sqrt(dt.abs()) + return self.dx(t, x, args, dt, forward, dw, **kwargs) + + +class RK2_SDE(SDESolver): + def step(self, t, x, args, dt, forward, sk, **kwargs): + """Base SDE solver using a 2nd order Runge-Kutta method. For more + details see Equation 2.5 in chapter 7.2 of the book "Introduction to + Stochastic Differential Equations" by Thomas C. Gard. The equations have + been adapted by including a term ``skdt`` which allows for solving the + Ito SDE or the Stratonovich SDE.""" + dw = torch.randn_like(x) * torch.sqrt(dt.abs()) + skdt = sk * np.random.choice([-1, 1]) * torch.sqrt(dt.abs()) + k1 = self.dx(t, x, args, dt, forward, dw - skdt, **kwargs) + k2 = self.dx(t + dt.squeeze(), x + k1, args, dt, forward, dw + skdt, **kwargs) + return (k1 + k2) / 2 + + +class RK4_SDE(SDESolver): + def step(self, t, x, args, dt, forward, sk, **kwargs): + """Base SDE solver using a 4th order Runge-Kutta method. For more + details see Equation 3.6 in chapter 7.3 of the book "Introduction to + Stochastic Differential Equations" by Thomas C. Gard. The equations have + been adapted by including a term ``skdt`` which allows for solving the + Ito SDE or the Stratonovich SDE.""" + dw = torch.randn_like(x) * torch.sqrt(dt.abs()) + skdt = sk * np.random.choice([-1, 1]) * torch.sqrt(dt.abs()) + k1 = self.dx(t, x, args, dt, forward, dw - skdt, **kwargs) + k2 = self.dx(t + dt.squeeze() / 2, x + k1 / 2, args, dt, forward, dw + skdt, **kwargs) + k3 = self.dx(t + dt.squeeze() / 2, x + k2 / 2, args, dt, forward, dw - skdt, **kwargs) + k4 = self.dx(t + dt.squeeze(), x + k3, args, dt, forward, dw + skdt, **kwargs) + return (k1 + 2 * k2 + 2 * k3 + k4) / 6 diff --git a/score_models/solver/solver.py b/score_models/solver/solver.py new file mode 100644 index 0000000..d91e8bf --- /dev/null +++ b/score_models/solver/solver.py @@ -0,0 +1,142 @@ +from abc import ABC, abstractmethod +from typing import Optional + +import torch +from torch import Tensor + +from ..utils import DEVICE + +__all__ = ["Solver"] + + +def all_subclasses(cls): + subclasses = {} + for subcls in cls.__subclasses__(): + subclasses[subcls.__name__.lower()] = subcls + subclasses.update(all_subclasses(subcls)) + return subclasses + + +class Solver(ABC): + """ + Base class for a solver of a stochastic/ordinary differential equation + (SDE/ODE). + + Defines the signatures for methods related to integrating a differential + equation (stochastic or ordinary) in the context of diffusion models. + + The only requirement on init is a ScoreModel object, which is used to define + the DE by providing the SDE object and the score. + """ + + def __new__(cls, *args, solver=None, **kwargs): + """Create the correct Solver subclass given the solver name.""" + if solver is not None: + SOLVERS = all_subclasses(cls) + try: + return super(Solver, cls).__new__(SOLVERS[solver.lower()]) + except KeyError: + raise ValueError( + f'Unknown solver type: "{solver}". Must be one of {list(filter(lambda s: "_" in s, SOLVERS.keys()))}' + ) + + return super(Solver, cls).__new__(cls) + + def __init__(self, score, *args, **kwargs): + self.score = score + + @abstractmethod + def solve( + self, x, steps, forward, *args, progress_bar=True, trace=False, kill_on_nan=False, **kwargs + ): ... + + @abstractmethod + def dx(self, t, x, args, dt, **kwargs): ... + + @abstractmethod + def step(self, t, x, args, dt, dx, **kwargs): ... + + def __call__( + self, + x, + steps, + *args, + forward=False, + progress_bar=True, + trace=False, + kill_on_nan=False, + **kwargs, + ): + """Calls the solve method with the given arguments.""" + return self.solve( + x, + steps, + forward, + *args, + progress_bar=progress_bar, + trace=trace, + kill_on_nan=kill_on_nan, + **kwargs, + ) + + @property + def sde(self): + return self.score.sde + + def time_steps( + self, + steps: int, + B: int, + D: tuple, + time_steps: Optional[Tensor] = None, + forward: bool = True, + device=DEVICE, + **kwargs, + ): + """ + Generate a tensor of time steps for integration. Note that the last + entry is removed because it is the endpoint and not a step. For example + if going from 0 to 1 with 10 steps, the steps are [0, 0.1, 0.2, ..., + 0.9], thus the returned tensor has the time value for the beginning of + each block of time. + """ + if time_steps is None: + t_min = torch.as_tensor(kwargs.get("t_min", self.sde.t_min), device=device) + t_max = torch.as_tensor(kwargs.get("t_max", self.sde.t_max), device=device) + delta_t = t_max - t_min + assert torch.allclose( + delta_t, delta_t.reshape(-1)[0] + ), "All time steps must be the same" + delta_t = delta_t.reshape(-1)[0] # Get the scalar value + t_min = t_min.reshape(-1)[0] + if forward: + T = torch.linspace(0, 1, steps + 1, device=device) + else: + T = torch.linspace(1, 0, steps + 1, device=device) + T = delta_t * T + t_min + else: + T = time_steps + dT = T[1:] - T[:-1] + T = T[:-1] + T = T.reshape(-1, 1).repeat(1, B) + dT = dT.reshape(T.shape[0], 1, *[1] * len(D)).repeat(1, B, *[1] * len(D)) + return T, dT + + def step_size(self, steps: int, forward: bool, device=DEVICE, **kwargs): + """Returns the step size for the integration. This is simply the time + window divided by the number of steps. However, it is negative if going + backwards in time.""" + + h = 1 if forward else -1 + t_min = kwargs.get("t_min", self.sde.t_min) + t_max = kwargs.get("t_max", self.sde.t_max) + return torch.as_tensor(h * (t_max - t_min) / steps, device=device) + + def tweedie(self, t: Tensor, x: Tensor, *args, **kwargs) -> Tensor: + """ + Compute the Tweedie formula for the expectation E[x0 | xt] + """ + B, *D = x.shape + mu = self.sde.mu(t).view(-1, *[1] * len(D)) + sigma = self.sde.sigma(t).view(-1, *[1] * len(D)) + return (x + sigma**2 * self.score(t, x, *args, **kwargs)) / mu diff --git a/tests/test_analytic_models.py b/tests/test_analytic_models.py new file mode 100644 index 0000000..05430c3 --- /dev/null +++ b/tests/test_analytic_models.py @@ -0,0 +1,314 @@ +import torch +import numpy as np +from score_models.sde import VESDE +from score_models import ( + GRFEnergyModel, + MVGEnergyModel, + MVGScoreModel, + JointScoreModel, + SampleScoreModel, + InterpolatedScoreModel, + ConvolvedLikelihood, + TweedieScoreModel, +) +import pytest + + +@pytest.mark.parametrize("psd_shape", [(32, 32), (32, 16), (25,), (64,)]) +def test_grf(psd_shape): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + + if len(psd_shape) == 1: + f = np.fft.fftfreq(psd_shape[0]) + f[0] = 1 + power_spectrum = 1.0 / f**2 + else: + # Frequency indices + u = np.fft.fftfreq(psd_shape[0]) + v = np.fft.fftfreq(psd_shape[1]) + + # Create a grid of frequencies + U, V = np.meshgrid(u, v, indexing="ij") + + # Compute the squared frequency magnitude + freq_magnitude_squared = U**2 + V**2 + + # Avoid division by zero for the zero frequency + freq_magnitude_squared[0, 0] = 1 + + # Inverse square of the frequency magnitude + power_spectrum = 1.0 / freq_magnitude_squared + + psd = torch.tensor(power_spectrum, dtype=torch.float32) + + model = GRFEnergyModel(sde, power_spectrum=psd) + + samples = model.sample(shape=(2, *psd_shape), steps=25) + + assert torch.all(torch.isfinite(samples)) + + +@pytest.mark.parametrize( + "mean,cov", + ( + ([0.0], [[1.0]]), # 1D Gaussian + ([0.0, 0.0], [[1.0, 0.1], [0.1, 1.0]]), # 2D Gaussian + ([1.0, 2.0], [2.0, 2.0]), # 2D Gaussian with diagonal covariance + (np.random.randn(5, 3), np.stack([np.eye(3)] * 5)), # mixture of 5 3D Gaussians + ( + np.random.randn(5, 3), + np.ones((5, 3)), + ), # mixture of 5 3D Gaussians with diagonal covariance + ), +) +def test_mvg_energy(mean, cov): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + mean = torch.tensor(mean, dtype=torch.float32) + cov = torch.tensor(cov, dtype=torch.float32) + model = MVGEnergyModel( + sde, + mean=mean, + cov=cov, + ) + + samples = model.sample(shape=(100, mean.shape[-1]), steps=50) + + assert torch.all(torch.isfinite(samples)) + if model.mixture: + assert torch.allclose( + samples.mean(dim=0), mean.mean(dim=0), atol=1 + ), "mean for MVG samples not close" + return + assert torch.allclose(samples.mean(dim=0), mean, atol=1), "mean for MVG samples not close" + if model.diag: + assert torch.allclose( + samples.std(dim=0), cov.sqrt(), atol=1 + ), "std for MVG samples not close" + else: + assert torch.allclose( + samples.std(dim=0), torch.diag(cov).sqrt(), atol=1 + ), "std for MVG samples not close" + + +@pytest.mark.parametrize( + "mean,cov", + ( + ([0.0], [[1.0]]), # 1D Gaussian + ([0.0, 0.0], [[1.0, 0.1], [0.1, 1.0]]), # 2D Gaussian + ([1.0, 2.0], [2.0, 2.0]), # 2D Gaussian with diagonal covariance + (np.random.randn(5, 3), np.stack([np.eye(3)] * 5)), # mixture of 5 3D Gaussians + ( + np.random.randn(5, 3), + np.ones((5, 3)), + ), # mixture of 5 3D Gaussians with diagonal covariance + ), +) +def test_mvg_score(mean, cov): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + mean = torch.tensor(mean, dtype=torch.float32) + cov = torch.tensor(cov, dtype=torch.float32) + model = MVGScoreModel( + sde, + mean=mean, + cov=cov, + mixture=mean.ndim >= 2, + ) + + samples = model.sample(shape=(100, mean.shape[-1]), steps=50) + + assert torch.all(torch.isfinite(samples)) + if model.mixture: + assert torch.allclose( + samples.mean(dim=0), mean.mean(dim=0), atol=1 + ), "mean for MVG samples not close" + return + print(samples.mean(dim=0), mean) + assert torch.allclose(samples.mean(dim=0), mean, atol=1), "mean for MVG samples not close" + if model.diag: + assert torch.allclose( + samples.std(dim=0), cov.sqrt(), atol=1 + ), "std for MVG samples not close" + else: + assert torch.allclose( + samples.std(dim=0), torch.diag(cov).sqrt(), atol=1 + ), "std for MVG samples not close" + + +@pytest.mark.parametrize("Nsamp,Ndim", ((10, 1), (1, 2), (5, 100))) +def test_joint_shared(Nsamp, Ndim): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + model1 = SampleScoreModel( + sde, + samples=torch.randn(Nsamp, Ndim), + ) + + model2 = SampleScoreModel( + sde, + samples=torch.randn(Nsamp, Ndim), + ) + + model = JointScoreModel( + models=(model1, model2), + x_shapes=[(Ndim,)], + model_uses=[None, None], + ) + + samples = model.sample(shape=(2, Ndim), steps=25) + + assert torch.all(torch.isfinite(samples)) + + +@pytest.mark.parametrize("Nsamp,Ndim1,Ndim2", ((10, 1, 3), (1, 2, 5), (5, 100, 1), (3, 1, 1))) +def test_joint_paired(Nsamp, Ndim1, Ndim2): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + model1 = SampleScoreModel( + sde, + samples=torch.randn(Nsamp, Ndim1), + ) + + model2 = SampleScoreModel( + sde, + samples=torch.randn(Nsamp, Ndim2), + ) + + model = JointScoreModel( + models=(model1, model2), + x_shapes=[(Ndim1,), (Ndim2,)], + model_uses=[(0,), (1,)], + ) + + samples = model.sample(shape=(2, Ndim1 + Ndim2), steps=25) + + assert torch.all(torch.isfinite(samples)) + + +@pytest.mark.parametrize("Nsamp,Ndim", ((10, 1), (1, 2), (5, 100))) +def test_sample_score(Nsamp, Ndim): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + model = SampleScoreModel( + sde, + samples=torch.randn(Nsamp, Ndim), + ) + + samples = model.sample(shape=(2, Ndim), steps=25) + + assert torch.all(torch.isfinite(samples)) + + +@pytest.mark.parametrize( + "beta", ("linear", "square", "sqrt", "linear:2", "sqrt:2", "sin:2", lambda t: t**2) +) +@pytest.mark.parametrize("Nsamp,Ndim", ((10, 1), (1, 2))) +def test_interpolated(Nsamp, Ndim, beta): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + model1 = SampleScoreModel( + sde, + samples=torch.randn(Nsamp, Ndim), + ) + + model2 = SampleScoreModel( + sde, + samples=torch.randn(Nsamp, Ndim), + ) + + model = InterpolatedScoreModel( + sde, + hight_model=model1, + lowt_model=model2, + beta_scheme=beta, + ) + + samples = model.sample(shape=(2, Ndim), steps=25) + + assert torch.all(torch.isfinite(samples)) + + +@pytest.mark.parametrize("diag", (True, False)) +@pytest.mark.parametrize("Amatrix", (True, False)) +def test_convolved_likelihood(diag, Amatrix): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + torch.manual_seed(42) + x_true = torch.randn(3) + + def fwd(x): + return torch.arange(1, 4) * x + + y = fwd(x_true) + torch.randn(3) * 0.1 + Sigma_y = torch.eye(3) * 0.1**2 + A = torch.func.jacrev(fwd)(x_true) if Amatrix else fwd + + priormodel = MVGEnergyModel( + sde, + mean=torch.zeros(3), + cov=torch.eye(3), + ) + + likelihoodmodel = ConvolvedLikelihood( + sde, + y=y, + Sigma_y=torch.diag(Sigma_y) if diag else Sigma_y, + A=A, + x_shape=None if Amatrix else (3,), + ) + + model = JointScoreModel( + models=(priormodel, likelihoodmodel), + x_shapes=[ + (3,), + ], + model_uses=[None, None], + ) + + samples = model.sample(shape=(2, *x_true.shape), steps=25) + + assert torch.all(torch.isfinite(samples)) + assert torch.allclose(samples, x_true, atol=1.0) + + +def test_tweedie(): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + x_true = torch.randn(3) + + def fwd(x): + return torch.arange(1, 4) * x + 2 + + y = fwd(x_true) + torch.randn(3) * 0.1 + Sigma_y = torch.eye(3) * 0.1**2 + A = torch.func.jacrev(fwd)(x_true) + + priormodel = MVGEnergyModel( + sde, + mean=torch.zeros(3), + cov=torch.eye(3), + ) + + def log_likelihood(sigma_t, x): + r = y - A @ x + ret = ( + r.reshape(1, -1) + @ torch.linalg.inv( + Sigma_y + + sigma_t**2 + * torch.eye(Sigma_y.shape[0], dtype=Sigma_y.dtype, device=Sigma_y.device) + ) + @ r.reshape(-1, 1) + ) + return ret.squeeze() + + likelihoodmodel = TweedieScoreModel( + sde, + prior_model=priormodel, + log_likelihood=log_likelihood, + ) + + model = JointScoreModel( + models=(priormodel, likelihoodmodel), + x_shapes=[ + (3,), + ], + model_uses=[None, None], + ) + + samples = model.sample(shape=(2, 3), steps=25) + + assert torch.all(torch.isfinite(samples)) diff --git a/tests/test_score_models.py b/tests/test_score_models.py index 4ec9cc8..7b4d711 100644 --- a/tests/test_score_models.py +++ b/tests/test_score_models.py @@ -10,7 +10,7 @@ def local_test_loading_model_and_score_fn(): # local test only path = "/home/alexandre/Desktop/Projects/data/score_models/ncsnpp_ct_g_220912024942" model, hparams = load_architecture(path) - + score = ScoreModel(checkpoints_directory=path) print(score.sde) x = torch.randn(1, 1, 256, 256) @@ -73,21 +73,23 @@ def test_loading_with_nn(): t = torch.ones(1) score(t, x) + def test_init_score(): net = MLP(10) with pytest.raises(KeyError): score = ScoreModel(net) + def test_log_likelihood(): net = MLP(dimensions=2) score = ScoreModel(net, beta_min=1e-2, beta_max=10) print(score.sde) x = torch.randn(3, 2) - ll = score.log_likelihood(x, steps=10, verbose=1, method="euler") + ll = score.log_likelihood(x, steps=10, verbose=1, method="euler_ode") print(ll) assert ll.shape == torch.Size([3]) - ll = score.log_likelihood(x, steps=10, verbose=1, method="heun") + ll = score.log_likelihood(x, steps=10, verbose=1, method="rk2_ode") print(ll) assert ll.shape == torch.Size([3]) @@ -111,16 +113,19 @@ def test_denoise_method(epsilon): x = torch.randn(B, 1, 16, 16) score.denoise(t, x, steps=10, epsilon=epsilon) + @pytest.mark.parametrize("anneal_residuals", [True, False]) def test_slic_score(anneal_residuals): B = 3 m = 10 D = 100 + def forward_model(t, x): - return x[:, :m] # Function R^C to R^m + return x[:, :m] # Function R^C to R^m + x = torch.randn(B, D) t = torch.rand(B) - net = MLP(m) # Define SLIC in output space of forward model (m) + net = MLP(m) # Define SLIC in output space of forward model (m) model = SLIC(forward_model, net, beta_min=1e-2, beta_max=10, anneal_residuals=anneal_residuals) y = forward_model(None, x) x = torch.randn(B, D) @@ -129,7 +134,7 @@ def forward_model(t, x): print(s) print(s.shape) assert s.shape == torch.Size([B, D]) - + def test_loading_different_sdes(): net = DDPM(1, nf=32, ch_mult=(2, 2)) @@ -157,6 +162,5 @@ def test_loading_different_sdes(): assert score.sde.beta == 10 - if __name__ == "__main__": local_test_loading_model_and_score_fn() diff --git a/tests/test_solvers.py b/tests/test_solvers.py new file mode 100644 index 0000000..852c2d8 --- /dev/null +++ b/tests/test_solvers.py @@ -0,0 +1,121 @@ +import torch +import numpy as np +from score_models.sde import VESDE +from score_models import ( + MVGEnergyModel, + Solver, + ODESolver, + EM_SDE, + RK2_ODE, + MVGScoreModel, +) +import pytest + + +def test_solver_constructor(): + + with pytest.raises(TypeError): # abstract class cant be created + Solver(None) + + assert isinstance(Solver(None, solver="EM_SDE"), EM_SDE), "EM_SDE not created" + + assert isinstance(ODESolver(None, solver="RK2_ODE"), RK2_ODE), "RK2_ODE not created" + + assert isinstance(EM_SDE(None), Solver), "EM_SDE not created" + + with pytest.raises(ValueError): # unknown solver + Solver(None, solver="random_solver") + + with pytest.raises(ValueError): # unknown ode solver + ODESolver(None, solver="EM_SDE") + + +@pytest.mark.parametrize( + "mean,cov", + ( + ([0.0], [[1.0]]), # 1D Gaussian + ([0.0, 0.0], [[1.0, 0.1], [0.1, 1.0]]), # 2D Gaussian + ), +) +@pytest.mark.parametrize( + "solver", ["em_sde", "rk2_sde", "rk4_sde", "euler_ode", "rk2_ode", "rk4_ode"] +) +def test_solver_sample(solver, mean, cov): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + mean = torch.tensor(mean, dtype=torch.float32) + cov = torch.tensor(cov, dtype=torch.float32) + model = MVGEnergyModel( + sde, + mean=mean, + cov=cov, + ) + + samples = model.sample( + shape=(100, mean.shape[-1]), + steps=50, + solver=solver, + denoise_last_step=True, + kill_on_nan=True, + ) + + assert torch.all(torch.isfinite(samples)) + + assert torch.allclose(samples.mean(dim=0), mean, atol=1), "mean not close" + + assert torch.allclose(samples.std(dim=0), torch.tensor(1.0), atol=1), "std not close" + + +@pytest.mark.parametrize( + "mean,cov", + ( + ([0.0], [[1.0]]), # 1D Gaussian + ([0.0, 0.0], [[1.0, 0.1], [0.1, 1.0]]), # 2D Gaussian + ), +) +@pytest.mark.parametrize( + "solver", ["em_sde", "rk2_sde", "rk4_sde", "euler_ode", "rk2_ode", "rk4_ode"] +) +def test_solver_forward(solver, mean, cov): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + mean = torch.tensor(mean, dtype=torch.float32) + cov = torch.tensor(cov, dtype=torch.float32) + model = MVGEnergyModel( + sde, + mean=mean, + cov=cov, + ) + slvr = Solver(model, solver=solver) + + x0 = torch.tensor(np.random.multivariate_normal(mean, cov, 100), dtype=torch.float32) + xT = slvr(x0, steps=50, forward=True, get_delta_logp="ode" in solver, progress_bar=False) + + if "ode" in solver: # check delta_logp calculation for ODE solvers + xT, dlogp = xT + assert torch.all(torch.isfinite(dlogp)) + + assert torch.all(torch.isfinite(xT)) + + +@pytest.mark.parametrize( + "steps,time_steps", + ( + (50, None), # 50 steps normally + (None, torch.linspace(1, 0, 50)), # 50 steps set by user + (None, torch.cat((torch.logspace(0, -2, 49), torch.zeros(1)))), # 50 steps with log spacing + ), +) +def test_solver_step(steps, time_steps): + sde = VESDE(sigma_min=1e-2, sigma_max=10) + mean = torch.zeros(2, dtype=torch.float32) + cov = torch.ones(2, dtype=torch.float32) + model = MVGScoreModel( + sde, + mean=mean, + cov=cov, + ) + + samples = model.sample(shape=(100, mean.shape[-1]), steps=steps, time_steps=time_steps) + + assert torch.all(torch.isfinite(samples)) + assert torch.allclose(samples.mean(dim=0), mean, atol=1), "mean for MVG samples not close" + assert torch.allclose(samples.std(dim=0), cov.sqrt(), atol=1), "std for MVG samples not close" From c05e9c0e128100020cca3249c0083f6545f9cbd2 Mon Sep 17 00:00:00 2001 From: Alexandre Adam Date: Mon, 21 Oct 2024 15:18:25 +0200 Subject: [PATCH 12/40] feature: Docs (#11) * Removed dollar signs in README * Updated actions * Update CI action to run on any branch push * Fixed bug in CI action * Modved package to src to avoid conflict * Reduced memory reqs on lora sbm test for macOS backend * Added restriction for sending the coverage * Removed macOS backend test and updated flag for coverage * Fixed CI action bug * Removed flag nonsense in CI action * Removed pip show in CI * Adding docs folder, currently empty * Started docs, drafted a style and created intro page * Modified docs action and added utils for plotting and distributions * Updated book * Improved introduction * Improved intro * Renamed intro to the score * Added readthedocs conf file * Updated actions * Updated permission of github action * updated readthedocs conf * Reset readthedocs file * Added documentation badge * Added documentation section * Drafted some structure for the docs * Started the score matching section * Added part on annealing * Finished annealing score matching section * Edited score matching part * . * Almost finished score matching section * Added visualization of score learned with DSM * . * Finished the score matching section * Working on overview * Revision of some of the symbols, added some stuff in getting started * Found a neat logo * Added logo in README * Cleaned up 02 SM notebook a little bit * Added logo to book * Updated front page * Worked on the getting started page * Added outline for diffusion section --- .bumpversion.cfg | 12 + .github/labeler.yaml | 6 + .github/workflows/coverage.yaml | 66 +- .github/workflows/deploy_package.yaml | 82 ++ .github/workflows/docs.yaml | 53 + .github/workflows/label_pr.yaml | 26 + .gitignore | 6 - .readthedocs.yaml | 15 + README.md | 15 +- assets/sbm_logo.png | Bin 0 -> 125914 bytes docs/_config.yml | 49 + docs/_static/css/custom.css | 58 ++ docs/_toc.yml | 15 + .../contributing.md | 0 docs/dummy.md | 1 + docs/front_page.md | 69 ++ docs/getting_started.md | 88 ++ docs/notebooks | 1 + docs/references.bib | 380 +++++++ docs_requirements.txt | 1 + long_description.rst | 2 +- notebooks/00-Overview.ipynb | 32 + notebooks/01-the_score.ipynb | 350 +++++++ notebooks/02-score_matching.ipynb | 937 ++++++++++++++++++ notebooks/03-diffusion.ipynb | 139 +++ notebooks/04-conditional_sbm.ipynb | 23 + notebooks/05-log_probabilities.ipynb | 18 + notebooks/06-fine_tuning.ipynb | 23 + notebooks/07-SDEs.ipynb | 23 + notebooks/logo.ipynb | 89 ++ pyproject.toml | 33 + setup.py | 35 +- .../score_models}/__init__.py | 0 .../score_models}/architectures/__init__.py | 0 .../architectures/conditional_branch.py | 0 .../score_models}/architectures/ddpm.py | 0 .../score_models}/architectures/encoder.py | 0 .../score_models}/architectures/mlp.py | 0 .../score_models}/architectures/ncsnpp.py | 0 .../architectures/ncsnpp_level.py | 0 .../score_models}/architectures/null_net.py | 0 .../score_models}/definitions.py | 0 .../score_models}/layers/__init__.py | 0 .../score_models}/layers/attention_block.py | 0 .../score_models}/layers/combine.py | 0 .../layers/conditional_batchnorm2d.py | 0 .../layers/conditional_instancenorm2d.py | 0 .../layers/conditional_instancenorm2d_plus.py | 0 .../score_models}/layers/conv1dsame.py | 0 .../score_models}/layers/conv2dsame.py | 0 .../score_models}/layers/conv3dsame.py | 0 .../score_models}/layers/conv_layers.py | 0 .../score_models}/layers/ddpm_resnet_block.py | 0 .../score_models}/layers/downsample.py | 0 .../score_models}/layers/ncsn_resnet_block.py | 0 .../layers/projection_embedding.py | 0 .../layers/resnet_block_biggan.py | 0 .../layers/spectral_normalization.py | 0 .../layers/squeeze_and_excitation.py | 0 .../score_models}/layers/style_gan_conv.py | 0 .../layers/up_or_downsampling.py | 0 .../layers/up_or_downsampling1d.py | 0 .../layers/up_or_downsampling2d.py | 0 .../layers/up_or_downsampling3d.py | 0 .../score_models}/layers/upfirdn1d.py | 0 .../score_models}/layers/upfirdn2d.py | 0 .../score_models}/layers/upfirdn3d.py | 0 .../score_models}/layers/upsample.py | 0 .../score_models}/losses/__init__.py | 0 .../score_models}/losses/dsm.py | 0 .../losses/sliced_score_matching.py | 0 .../score_models}/ode/__init__.py | 0 .../score_models}/ode/euler.py | 0 .../score_models}/ode/heun.py | 0 .../score_models}/ode/hutchinson_trick.py | 0 .../score_models}/ode/probability_flow_ode.py | 0 src/score_models/plot_utils.py | 186 ++++ .../score_models}/save_load_utils.py | 0 .../score_models}/sbm/__init__.py | 0 .../score_models}/sbm/base.py | 0 .../score_models}/sbm/conv_likelihood.py | 0 .../score_models}/sbm/energy_model.py | 0 {score_models => src/score_models}/sbm/grf.py | 0 .../score_models}/sbm/hessian_model.py | 0 .../score_models}/sbm/interpolated.py | 0 .../score_models}/sbm/joint.py | 0 .../score_models}/sbm/kernel_slic.py | 0 .../score_models}/sbm/lora.py | 0 {score_models => src/score_models}/sbm/mvg.py | 0 .../score_models}/sbm/sample.py | 0 .../score_models}/sbm/score_model.py | 0 .../score_models}/sbm/slic.py | 0 .../score_models}/sbm/tweedie.py | 0 .../score_models}/sde/__init__.py | 0 .../score_models}/sde/euler_maruyama.py | 0 src/score_models/sde/predictor_corrector.py | 0 {score_models => src/score_models}/sde/sde.py | 0 .../score_models}/sde/tsvesde.py | 0 .../score_models}/sde/vesde.py | 0 .../score_models}/sde/vpsde.py | 0 .../score_models}/solver/__init__.py | 0 .../score_models}/solver/ode.py | 0 .../score_models}/solver/sde.py | 0 .../score_models}/solver/solver.py | 0 src/score_models/toy_distributions.py | 118 +++ {score_models => src/score_models}/trainer.py | 0 {score_models => src/score_models}/utils.py | 0 tests/test_lora_sbm.py | 2 +- 108 files changed, 2877 insertions(+), 76 deletions(-) create mode 100644 .bumpversion.cfg create mode 100644 .github/labeler.yaml create mode 100644 .github/workflows/deploy_package.yaml create mode 100644 .github/workflows/docs.yaml create mode 100644 .github/workflows/label_pr.yaml create mode 100644 .readthedocs.yaml create mode 100644 assets/sbm_logo.png create mode 100644 docs/_config.yml create mode 100644 docs/_static/css/custom.css create mode 100644 docs/_toc.yml rename score_models/sde/predictor_corrector.py => docs/contributing.md (100%) create mode 100644 docs/dummy.md create mode 100644 docs/front_page.md create mode 100644 docs/getting_started.md create mode 120000 docs/notebooks create mode 100644 docs/references.bib create mode 100644 docs_requirements.txt create mode 100644 notebooks/00-Overview.ipynb create mode 100644 notebooks/01-the_score.ipynb create mode 100644 notebooks/02-score_matching.ipynb create mode 100644 notebooks/03-diffusion.ipynb create mode 100644 notebooks/04-conditional_sbm.ipynb create mode 100644 notebooks/05-log_probabilities.ipynb create mode 100644 notebooks/06-fine_tuning.ipynb create mode 100644 notebooks/07-SDEs.ipynb create mode 100644 notebooks/logo.ipynb rename {score_models => src/score_models}/__init__.py (100%) rename {score_models => src/score_models}/architectures/__init__.py (100%) rename {score_models => src/score_models}/architectures/conditional_branch.py (100%) rename {score_models => src/score_models}/architectures/ddpm.py (100%) rename {score_models => src/score_models}/architectures/encoder.py (100%) rename {score_models => src/score_models}/architectures/mlp.py (100%) rename {score_models => src/score_models}/architectures/ncsnpp.py (100%) rename {score_models => src/score_models}/architectures/ncsnpp_level.py (100%) rename {score_models => src/score_models}/architectures/null_net.py (100%) rename {score_models => src/score_models}/definitions.py (100%) rename {score_models => src/score_models}/layers/__init__.py (100%) rename {score_models => src/score_models}/layers/attention_block.py (100%) rename {score_models => src/score_models}/layers/combine.py (100%) rename {score_models => src/score_models}/layers/conditional_batchnorm2d.py (100%) rename {score_models => src/score_models}/layers/conditional_instancenorm2d.py (100%) rename {score_models => src/score_models}/layers/conditional_instancenorm2d_plus.py (100%) rename {score_models => src/score_models}/layers/conv1dsame.py (100%) rename {score_models => src/score_models}/layers/conv2dsame.py (100%) rename {score_models => src/score_models}/layers/conv3dsame.py (100%) rename {score_models => src/score_models}/layers/conv_layers.py (100%) rename {score_models => src/score_models}/layers/ddpm_resnet_block.py (100%) rename {score_models => src/score_models}/layers/downsample.py (100%) rename {score_models => src/score_models}/layers/ncsn_resnet_block.py (100%) rename {score_models => src/score_models}/layers/projection_embedding.py (100%) rename {score_models => src/score_models}/layers/resnet_block_biggan.py (100%) rename {score_models => src/score_models}/layers/spectral_normalization.py (100%) rename {score_models => src/score_models}/layers/squeeze_and_excitation.py (100%) rename {score_models => src/score_models}/layers/style_gan_conv.py (100%) rename {score_models => src/score_models}/layers/up_or_downsampling.py (100%) rename {score_models => src/score_models}/layers/up_or_downsampling1d.py (100%) rename {score_models => src/score_models}/layers/up_or_downsampling2d.py (100%) rename {score_models => src/score_models}/layers/up_or_downsampling3d.py (100%) rename {score_models => src/score_models}/layers/upfirdn1d.py (100%) rename {score_models => src/score_models}/layers/upfirdn2d.py (100%) rename {score_models => src/score_models}/layers/upfirdn3d.py (100%) rename {score_models => src/score_models}/layers/upsample.py (100%) rename {score_models => src/score_models}/losses/__init__.py (100%) rename {score_models => src/score_models}/losses/dsm.py (100%) rename {score_models => src/score_models}/losses/sliced_score_matching.py (100%) rename {score_models => src/score_models}/ode/__init__.py (100%) rename {score_models => src/score_models}/ode/euler.py (100%) rename {score_models => src/score_models}/ode/heun.py (100%) rename {score_models => src/score_models}/ode/hutchinson_trick.py (100%) rename {score_models => src/score_models}/ode/probability_flow_ode.py (100%) create mode 100644 src/score_models/plot_utils.py rename {score_models => src/score_models}/save_load_utils.py (100%) rename {score_models => src/score_models}/sbm/__init__.py (100%) rename {score_models => src/score_models}/sbm/base.py (100%) rename {score_models => src/score_models}/sbm/conv_likelihood.py (100%) rename {score_models => src/score_models}/sbm/energy_model.py (100%) rename {score_models => src/score_models}/sbm/grf.py (100%) rename {score_models => src/score_models}/sbm/hessian_model.py (100%) rename {score_models => src/score_models}/sbm/interpolated.py (100%) rename {score_models => src/score_models}/sbm/joint.py (100%) rename {score_models => src/score_models}/sbm/kernel_slic.py (100%) rename {score_models => src/score_models}/sbm/lora.py (100%) rename {score_models => src/score_models}/sbm/mvg.py (100%) rename {score_models => src/score_models}/sbm/sample.py (100%) rename {score_models => src/score_models}/sbm/score_model.py (100%) rename {score_models => src/score_models}/sbm/slic.py (100%) rename {score_models => src/score_models}/sbm/tweedie.py (100%) rename {score_models => src/score_models}/sde/__init__.py (100%) rename {score_models => src/score_models}/sde/euler_maruyama.py (100%) create mode 100644 src/score_models/sde/predictor_corrector.py rename {score_models => src/score_models}/sde/sde.py (100%) rename {score_models => src/score_models}/sde/tsvesde.py (100%) rename {score_models => src/score_models}/sde/vesde.py (100%) rename {score_models => src/score_models}/sde/vpsde.py (100%) rename {score_models => src/score_models}/solver/__init__.py (100%) rename {score_models => src/score_models}/solver/ode.py (100%) rename {score_models => src/score_models}/solver/sde.py (100%) rename {score_models => src/score_models}/solver/solver.py (100%) create mode 100644 src/score_models/toy_distributions.py rename {score_models => src/score_models}/trainer.py (100%) rename {score_models => src/score_models}/utils.py (100%) diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 0000000..ec1581c --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,12 @@ +[bumpversion] +current_version = 0.6.0 +commit = True +tag = True + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:score_models/__init__.py] +search = __version__ = "{current_version}" +replace = __version__ = "{new_version}" diff --git a/.github/labeler.yaml b/.github/labeler.yaml new file mode 100644 index 0000000..14a4815 --- /dev/null +++ b/.github/labeler.yaml @@ -0,0 +1,6 @@ +bugfix: + - "*fix*" +feature: + - "*feature*" +major: + - "*major*" diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 5753272..494d57f 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -1,27 +1,33 @@ -name: Code Coverage +name: CI on: push: branches: - - master - - dev + - '**' # Trigger on all branches pull_request: branches: - master - dev + jobs: - coverage: - runs-on: ubuntu-latest + build: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11"] + os: [ubuntu-latest] steps: - - name: Checkout score_models - uses: actions/checkout@v3 - - - name: Set up Python - uses: actions/setup-python@v2 + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 with: - python-version: 3.9 - + python-version: ${{ matrix.python-version }} + allow-prereleases: true + - name: Record State run: | pwd @@ -34,29 +40,25 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install pytest pytest-cov torch - # Install deps + pip install pytest pytest-cov cd $GITHUB_WORKSPACE pip install -r requirements.txt - shell: bash - - - name: Install score_models + + - name: Install package run: | - cd $GITHUB_WORKSPACE pip install -e . - pip show score_models - shell: bash - - - name: Run tests with coverage + + - name: Test run: | - cd $GITHUB_WORKSPACE - pwd - pytest --cov-report=xml --cov=score_models tests/ - shell: bash - - - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v3 + pytest --cov=${{ env.PROJECT_NAME }} --cov-report=xml --cov-report=term tests/ + # Generate coverage report + continue-on-error: true # Allow tests to fail without breaking the workflow + + - name: Upload coverage reports to Codecov + if: matrix.python-version == '3.10' # Change this to upload coverage only for a specific version + uses: codecov/codecov-action@v4 with: - files: ${{ github.workspace }}/coverage.xml - fail_ci_if_error: true - + token: ${{ secrets.CODECOV_TOKEN }} + files: ./coverage.xml + flags: unittests + name: codecov-umbrella diff --git a/.github/workflows/deploy_package.yaml b/.github/workflows/deploy_package.yaml new file mode 100644 index 0000000..e536229 --- /dev/null +++ b/.github/workflows/deploy_package.yaml @@ -0,0 +1,82 @@ +name: Version Bump and Deploy + +on: + push: + branches: + - master + +jobs: + bump-version: + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: '3.x' + + - name: Install dependencies + run: pip install bump2version + + - name: Get Labels of Last Merged PR + id: get-labels + run: | + last_commit=$(git log -1 --pretty=format:"%H") + labels=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + "https://api.github.com/repos/${{ github.repository }}/commits/$last_commit/pulls" | \ + jq -r '.[0].labels[].name') + echo "Labels found: $labels" + echo "labels=$labels" >> $GITHUB_ENV + + - name: Determine Version Bump + id: bump + run: | + if [[ "${{ env.labels }}" == *"bugfix"* ]]; then + echo "bump_type=patch" >> $GITHUB_ENV + elif [[ "${{ env.labels }}" == *"feature"* ]]; then + echo "bump_type=minor" >> $GITHUB_ENV + elif [[ "${{ env.labels }}" == *"major"* ]]; then + echo "bump_type=major" >> $GITHUB_ENV + else + echo "No valid label found for version bump." + exit 1 + + - name: Bump Version + if: env.bump_type != '' + run: bump2version ${{ env.bump_type }} + + - name: Push Changes + if: env.bump_type != '' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: git push --follow-tags + + deploy: + needs: bump-version + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: '3.x' + + - name: Install dependencies for deployment + run: | + pip install build twine + + - name: Build package + run: python -m build + + - name: Publish to PyPI + env: + TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + run: | + twine upload dist/* diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml new file mode 100644 index 0000000..1e34523 --- /dev/null +++ b/.github/workflows/docs.yaml @@ -0,0 +1,53 @@ +name: Build and Deploy Docs +on: + push: + branches: + - master + - dev + - docs + +permissions: + contents: write # This allows the action to push to the repository + +jobs: + build-book: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.11 + uses: actions/setup-python@v4 + with: + python-version: 3.11 + + - name: Install requirements + run: pip install -r docs_requirements.txt + + - name: Build Jupyter Book + run: jupyter-book build docs + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: jupyter-book-html + path: docs/_build/html + + deploy-pages: + runs-on: ubuntu-latest + needs: build-book + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Download artifact + uses: actions/download-artifact@v4 + with: + name: jupyter-book-html + path: docs/_build/html + + - name: Deploy to GitHub Pages + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: docs/_build/html diff --git a/.github/workflows/label_pr.yaml b/.github/workflows/label_pr.yaml new file mode 100644 index 0000000..43674f3 --- /dev/null +++ b/.github/workflows/label_pr.yaml @@ -0,0 +1,26 @@ +name: Auto Label PR + +on: + pull_request: + types: [opened, edited] + +jobs: + label: + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Run Labeler + uses: actions/labeler@v3 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + + - name: Apply Labels Based on Title + id: label + run: | + labels=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + "https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/labels" | \ + jq -r '.[].name') + echo "labels=$labels" >> $GITHUB_ENV diff --git a/.gitignore b/.gitignore index dd1037c..0c70b81 100644 --- a/.gitignore +++ b/.gitignore @@ -153,17 +153,11 @@ dmypy.json cython_debug/ # PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. .idea/ models/ data/ __pycache__/* .vim -/docs/ -/doc/ /*.html /*.pdf Makefile diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..54fcf24 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,15 @@ +version: 2 + +build: + os: "ubuntu-20.04" + tools: + python: "3.9" + jobs: + pre_build: + - "jupyter-book config sphinx docs/" + +python: + install: + - requirements: docs_requirements.txt + - method: pip + path: . diff --git a/README.md b/README.md index fb0786b..07b2036 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,10 @@ -# Score Models for Pytorch +# Score Models [![PyPI version](https://badge.fury.io/py/score_models.svg)](https://badge.fury.io/py/score_models) -[![codecov](https://codecov.io/gh/AlexandreAdam/torch_score_models/branch/master/graph/badge.svg)](https://codecov.io/gh/AlexandreAdam/torch_score_models) +[![codecov](https://codecov.io/gh/AlexandreAdam/torch_score_models/branch/master/graph/badge.svg)](https://codecov.io/gh/AlexandreAdam/score_models) +[![Documentation Status](https://readthedocs.org/projects/score-models/badge/?version=latest)](https://score-models.readthedocs.io/en/latest/?badge=latest) + +![](./assets/sbm_logo.png) A storage for score-based models. The `ScoreModel` interface gives access to the following utilities - Simple initialisation of MLP, NCSN++ and DDPM neural network architectures @@ -20,6 +23,10 @@ To install the package, you can use pip: pip install score_models ``` +## Documentation + +The documentation is available at [https://score-models.readthedocs.io/en/latest/](https://score-models.readthedocs.io/en/latest/). + ## Usage @@ -28,11 +35,11 @@ pip install score_models The `ScoreModel` class is the main interface for training and using score models, defined as ```math -\mathbf{s}_\theta(t, \mathbf{x}) \equiv \nabla_\mathbf{x} \log p_t(\mathbf{x}) = \frac{1}{\sigma(t)} f_\theta (t, \mathbf{x})$$ +\mathbf{s}_\theta(t, \mathbf{x}) \equiv \nabla_\mathbf{x} \log p_t(\mathbf{x}) = \frac{1}{\sigma(t)} f_\theta (t, \mathbf{x}) ``` where $\sigma(t)$ is the standard deviation of the perturbation kernel $`p_t(\mathbf{x} \mid \mathbf{x}_0)`$ -of an SDE and $f_\theta : [0, 1] \times\mathbb{R}^d \to \mathbb{R}^d$ is a neural network for $\mathbf{x} \in \mathbb{R}^d$. +of an SDE and $f_\theta : [0, 1] \times\mathbb{R}^d \to \mathbb{R}^d$ is a neural network for $`\mathbf{x} \in \mathbb{R}^d`$. The `ScoreModel` class extends the `torch.nn.Module` class. Example usage: diff --git a/assets/sbm_logo.png b/assets/sbm_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..c95cfd4c4fa47591852ba0e499d4b53bcefcd599 GIT binary patch literal 125914 zcmV)&K#aeMP)V>bE;ueSEFfrfbZ~PzFE4FjbZ~5MbZlv2E^l&Y zFChE=!~g&Q32;bRa{vGjVE_ORVF9Q=r)dBI002ouK~#90?EQC?6;;#s4}ZE(m^{po zbIv(O2?8RZAeaCniaBD=ImZhFsF+1Di;9RKC}1GxoWn4~FgeeOz285o#~BW%yN_#K z&$^%I_ujSEERiXyyZ5fWcZF}&CKL*(tFOLV0k%Ki&Tpo+tZAd_)N$GJ_3o;x(4i!C z{y2L+KrMdVt$v#2R+?tdH@rKl;zwO|XlBm`s5MWco}KE_RW2kPLh&U}E%zZrE-YiGX42B3bO?9AtNq15fD{tcXYUmlA3_8e!vrYWeN)eie~$b(@%3ng@;`~HpX|(g z=b5OrcgFVZMbD$o>Eg_H+i=vzyJM4?_bh5scW1tv&OmLuFE*Jcogde-^kmec+nxCurK9}wP~Dn4^R{b<3Vw=e z8H3(^cN6NBi=Fv~wnh17p63ZNIyjyh7iSL{)qRanHnbZ2B z4m}Z@%eoV5e5I+uG3e4q59sRqXY`of^NtHOwdZ49xn1^r zN%5%S&onhQhV}8>#i*ZS*c%$AqJnc!?PE9-KE53F))Z&Ho=s5dHB^$^Zl|D+%CAJzdawo zms{ZGA)smYe5;N^>vZT`&spYyBIs7vnXfDewc?!l{tczYL21C5uSY%bh2dC@aYdZT$IczxL%s0I^tUV4nWzM|+OaEO;_CmSe znXhRYB*(#_d}rR#onY-DXTHnMfQ)?De$1J7e>Utr0hbST=3RRjMs;%LJDdk`9%$vT ze*shmA<65^_diWuHv~e?e68z3X0bD0hlbD~6*eAq=DlvM638usfvue1 z^JIyvCnqw@{aAN?&HquVohWBVc?{=V_olEv$C+<@H&}hJdJL@StnSdbHZ0m?&zBSr z_gAAa~D*Is)q@4WL4*IaXr z+&^IED0@DDxoe?W8l2t1p6_rG?971)?VV+A%Z2VS7esP$jolxumRI2J{&2Rrhd z6%=519u!oY_UAMRV^8(0-rr!^HedYS2!HT9v58N6FzoWaQ{pHXDB%i z%3{#Zjt#&cgd+|HTUsLYZ3)YEJIkCt3_e;J(`y3o@hZ4-m^0t<-O!_%Mm-0;&n$*A zKXho|EVIo2|AxU9=Y~MonXh$SIF#qiH>@2T&4*(J_I%x&L+2(MpZ{&omlO{-kB65Q zD3rr~GwT)?U;VE5dx!IT0u+|QGoL9QzfvPP!9n&C8K>8!X=L%y;c*Sg;QA90CZpOwjNL z;oZgdN;kT{#>JyFu75s^4)EcJA9Bq#*YNh+Z`&GeMMVZ@O*lejrQv9_-Ticch08-M z{y3gi&8&^AMSm)udf(vFr`)u)QPdL!aO)h!Q&(toZeja<-1RA3G6ecKDC+DL(76$e z>}b!ITLxckg?sY(+6dFnZL3tuc5WAaUEjGT{t|%?2bA_FpyF zQsHn6p81Ft65ZSFa43vx4@n;QI)=shzx3ay{;r??2`%fv84k|erlU|;t{nForC{GlF-D!JJRoEBe>-_yF-F+x#BQ*y z+V9MF$zWJ>Kyk=n54+|p@CD%07502nMr!nF5BI$jTXe08-dv!l@WZ`R?Sl=Q55dZv zikS{kwy*&UHo%4boS(hsP;3#le{Hb+UmjtL)nMz^1bjg_kZaG^wFxw>tvMQO4Qj!} zJ}`HgJzq*9+;gGEvtKCuL2JG^FU+~e#hmX9HthvHIa!RZk3RZ{88c?^_S z!>rn*`2J-#Nfus+*A1WjrdYfIKE7MWZIeY^yC1$?2QOb{%>!_}5Wd+ITa@k1zhGPk zXycHoShx=cHHEqk;p%O9(4jWO*=K|Q&qUbbM6MHcuP5&rEB%oHXEb%@`)?a;t<&Ie z3~AdD&0$3hX_V=`VZkmaiD8WXm;U?oZz61aPCE0A?gT6LLdYS?bLBAjZKvYsztH&; zg|KuROdoE~XDIl6Z)|DNy*aT(*vbR{??l-AF{VUgG1!K;hgJLRc{B~C4T8`9vgdQV z;Lh_jUi(Se8kO;0c<~0Em%mhGIvnni6b!*b?4CX&SDlQ)mbyMy6zFr4o zxHM!63@SZaU3-G7BnasoX3wt?FP`k`HY>EACIfR8`^m>Dx>@YY*zaou&-o$?Pq zd?lVw{u6(S(NdVZ#TVu{G z8cMmf5bnJ=y2gBW72G}y?*AMHw-;+S8jWlT z8?T1fm&1ha(6+wype;TZ8o6L&CfqdGns0n#c&shtSHSqT)_ed@v;embog56n;k6*% zEncb}dCcKx<*jZ4ee?<%j}T? zog55bfHt*YP$IN*;61Yiqz2((n?tj15}ex+mh6U|=R=(sA{f2uNt7=ZO7cMG+A+wi z3oT-lnc{^mvB<0!o1qMl9tT}xz)|(A2aS_slUXM|Hr!_CI?ykM=yQh#FrW_fZ)ndq zx-*2zVbCmwj#5@ZN8*@@6>KLYGq$;jZ&Fp8ZlWZ2%@g2d8Qjm%-$FLY#Ak&fS-v znjqhD3^EGf^BW{-*!&Ka!C4LAucOXs(IgFuDxD9os&$d-u;EteP+|<&f@Ciw zI(SH3>PYmnnFcvAM4C?ep?@L-4H(qe;rRyCOR9cnE2`y&B!>kD&^`_7>riClA-AXn zEz_W*3rcK4(9^oZ$uj5>4;3NEkKx->y^s*YW&_~TVms(k<6@WT)?(X0Q@qX_*grD$ z|GrF*4koOdK#^4)_kE4n#RkwIZ=vRA_-2E>=7M_g%aG(4i|_HyJD2- zaXIU9i`o*kaq>5ZwG(Vm|N3y;CaQkPU|6;Trgei%8@_wb*LY_MOdlZo`mXHLc^cP+ z`!CUXV6H*C25{#&uxPWxi<-1zmjoQH918cmV{p}2jV>)u=c^3DfjoF{47|Qbaq~D0 zkHwfBEt2p0d8>U|G_L}hBuf#EQ(DwzfMGZrY##G`@x-v^lRdFH3i0N5#-dNL%50bl zsctEnv!iZ}p`ix?HW9XzcsQdS#09|@!V>2=qO-sw_8)~oEnsU_biUgsX_QvL7b_Ic zoDY9kM2qL#<6`Az#h1$zv#!=K4A5`9?I2%%BSu@lzGAd}^2sM$ef8DMo;~{?qix4t zgHh8e89TzoN3V*}R$6K>cG6J_3d5{e+>%D7(RTaGVcz@RVDam2hW66PFNCwNsH9U{ zoljnk`^Q3Ew-=^9Z}7xr8kdjK*m+QK^^;+qyV=Fae$}%V3d6LwpmSq*VlsTSQt`uT zsTz$`>i5>lK)pU0p8gK%r^5B4PaljzAI#ne4-Jth+f{?Dt8V7)g>zd<)FouVrM;r_ z?J0q6Mc^xev)kB5#Wof}MY%JM?W7sj{com4yN; zr$vvHh(XXG$)3-pSuc*7)GQUsDc^Q?8=vz2WmMFugs@i2E4= z8n~t(e7RZ*7Od#!1JJEDWELvhwCJ%CsHl+il#?-58IZ`#|Lvsdf)lc%)^%l{-{p|v z9@Y+y=D?96IACE^dt7kw0Qh@1^lS_tY>3V`wl{Qa0v|0?+%+1WUK|~3_uO?lyN<$V zzbf9n7QR~tKgC#(bi4qbSO70hgRT4Fm-X<%b)t#usIX18x22hfGc{zx7#42=yaZ1NpR<6jiW){#ylWb5!+L8R%T%0wxsNf!+`RU9we_)ZE!_5&;{BN#qkF+u zzZ%T?+~CXS+%&J@WgahpE9S!e=faq-aMPOxP3ppf5kO9*;D&EtLT5OmB|P;VJbpp+ zT>q=<q`Z+N4L9xv3Zu zLSnGBvPciQG&o@zR%vmPQi?_i?rQHuvx&CU^2(2{i*aS;(X_4wJ?cYcg&3BxkOF?s z1%H_o9<)xgj-c%|3j|uETBp-eye5VXNVM=Y8zf4KFWzlmbkH_k(xJ!l;BYzYi_)s2 zMVh3+dN+YW6G!t!3~eC(m;U>d;DHJUgQoXqW>U09W4Pp{4Wf)7^CX{QE+?j<+zanQ`%xE61Nmwtv%uLrLVH@;!;&@_!+ zZJ|%Q>JQ#=0$yDSZ%l>{S4au;1CwAv1fDIo0^ZsL4-Aru(Ob`i$-Si1wXs1GiX&tdr9qEg1$khM&%l&Xeke`%SNX!MWC$ zqBs|4 zJI{dmQ($uzOl=Jx?t{u0S`qv&{r4%}<6Lymp@BqIw>eY*j_wTZ0Q_RpKk3pTV$A2{ zOHt9kG1&4e;Hzygs=4%Kv+BvFfW-&E8V88xvrm8GMMGO%( zR~Yu&7;Ft|LHh=hTK?6>V7p{6{JaG&>I(0zi_X)p9h})si&0nl$TH zY2$G`8=js5_s)mxBKvQNq+l3s`Vg)?Qy?B6{-SvK20f|{JpBt?&=Y=Hr|8iVhFTc+ zpKOLLgpRl*DN z4Bomy<3u4m`IWq(zUsR~nm5t@G1|UaaJtbpefo6XeB*V#{rF}8w(e4lzNCVQXSsRz zg?KbgjJEOTAETfk#J&x!7~0RvZ8z2C?pa~xd@V-XnSC|>+F&r{W*<-7;pNviJp46J z!bT5%q*%CC@$-{9jnZMt10m|AYs`Pz&G#?4Su`DL#ls`t!QO1av;FWleEo;w%LjE9 zJSI(bpBM-iw}U5^$|O8A8lL?gW{!g|Z-;a5Bby zYuJ_rLz}=imkG9OKyzpk0SfOO57QIjm>)XUgH{&NHGsz1K!V`O3M~|?&=0{%`E!Fr zSZ5KoE%J-;2@*kDZP6I^WJPsOKiLJFEvlVPltaBl@Fqg-lTbT`#o5CPwQ?cZERt|p z{F|~MB={xD*EkurTD-%uW3uSt{y zOYp$>&T{y7s15yXfW^bxLW>lMs-EA&+F*$Hz`0%I_wCbRfQ`(dZJ=eU$eh&Ox^m3x zf(c#a_ifW*P|N6iZXGV|E8o>14tiOn)~^@eUwP|@D6arU}%u4(J>kF z3gG&PLZEn$wEvkt96tU7&glrdbKu@C&@zVNjQR=i@ksb+y;P9@xLztvFCGXf5fS~X zT0>D0bV`PLDH>M{f-|GE_-LI7os+-_z~ELeePHxw_i73a<7BKZ>0^y4>fb_iL(@c8 z>>_fLyNk{>OBQ{xqJGV*iyg*wwysodng(Z^-`K4#w6`ccxM3*tYyxo#CNzr9b3t#o zd?1umLaGkKlb~BfW};(b7}o>N>kSzv#6S<0!rc+;^1{Jz&t#2->l70@Lsl_-wGlo$ z4_cZshjxdlqcnQAlQtf+ua^Fv@BIch4UxjOSm?|)FnbcL+zs>phPSSQCq7YhYzEUt zYX3yq;#~0lSjaDi<$K}EKJfiIm^ndX>KUiX9N!eqtS!1)SuEc?p%-+Dzz)~)z;%tG zq+G^rYCjprXf&Z25QeS|;aZErx|X$Jpt&XoHivdLaGr@>B{Nbt9>&^006H~*KIWVc zZ4FIqiqR%^6^Fl$jZBv=c*N9rNYN!@5{<4J1jE}2{6G)G$S8q{5gp;ufzr;ST>`8< z3NQ6KHFK1b02d5^Ye&KSKj5l<5?y<381${7@8kPPDm1SIw(V0)9|CuM31iy8i)MjI z-%c8RI%?c>zRpcAg_+bJ&g%zPyr$5BZ>|Hq(g%P2kVL0;I!eKHH&+{NeR|8u_UWge z^2SS7G5PXYeD}%CJo)64?AYt#tV=7HJl4(J=LMHmUT!eqf};e=f~@$p4NpB%pE`A1 z-1~Z%H@-CZX_lLj{WKOX3-j220*sy*NB2H%Ub$ADef577`!ZqiQ#$q1VCfoz$!F^H z?yS?kwZ_+%0;(F%_SzNDvVlgsCK}D^X$9kNddA`u#YB!xH^nXyHv)gsM}a2}i&xAXXKv3FkxKa%dJMDv@TUn=^8x zIpU;L=$8Ve4pFwowV-z=h)aSiY&_jEKm2DBXfC+;40~)|gB0l3MGWR^23hk}2I0kt zB6IR!`y!4yNieV*coN|H!Pd|B1>xy)KzGB0f%d77^aL2%1Kja&>riXG3xwg(vqAU3 zxPFp`h(;j;?ic|H3DB~gY`W2?b3?dsD5RvofKJsw=$ce30Y1K3BNSHDY6N`~p+kfb zmYxis+yq^lX*8?_7xt0@rx_i^v2dayKb+MRdM3l}0vOd1+Beep{8p$PVMMk|fg#C| z)DVJU#Yb0bQ8BZ+Cf}5t0@s}ZT`UZkBrmk-0CDkf&q#^lL?c}@b;AWOOteo;UOPy1 zu3s0ZZ&UfJ5FOTB@aiP%Zt^Ef;j=5`&r?T8kySJr-NCG$O@W~nsk>&i;l&HY_%BX| z8(WjSB|B*29Y1M!^${;Fdlxx}{)y zH4VOc+{LV0q>aabR+47DZmwicUYQ6b4$+$`$}5L!`oq+oaQA#@(*Vxv39o*mn02$p zC-;l;`onTl%(KJbsb64fe>hnHllsHFnHtG8)TL%0yC~N)T#|Fn?qL%9R^87 zKR8kTJZGA{%(FT`S{-QCR1ExR6gJ?t;gFI9^;%iCJ?YvQZX5=+)1YsAXcA?wshbR+ zUZ)WVDcZD#x?#bw)zrHNwcyJ;pi@(g0WD?BGjrgU?$t$3E*-vnK#x6_P1_C5y0n4|$9s6kG}<(`&Zhke z-J_G5>f)J4#0ra&%D=@2TPq>H1d2ziI)o2@!B)bqmLlb}KpZtZTdiPrZ*mWjK{$bl{v&ukP2r6qD$AIy-{LNsdT zmc#eN5!e!hSlW?o3_!jC+YdoO30M`H)pEn$d{AX_9ZFw|))m8QK}jJ5N};#Kd$vx5 z!ZJ{mP?QBB3*#cq4F#pJ@c^`U!;j^s^E6KtPrF_*e7qNKXb3MIw-1#AEZ$XZ{48|n ze!kG`%~M(dn=GP+0Ke>(l<~ijSzZb2Y>e=wd&B^aNv8SxN*}DYNj)vwS3{;l-=V7C z`vdT&4K{FvxduZ%_~T%7o`a^YYZ~|tz@8|Zd=<#u?b0}S90t~cZ!%8hNlyZo85V3% z3;H$$uMbva3C7cj0tU?526yy;H&zN%0zlU_(j(UDw;9l{792PR9uqtoiCqCWSPrFo zt0SOR4L$!l3_6~Spgd7XUbhqia zDg#z+iOy4%-ir6a-hFVS9Ck!F#WkAUVOLI5}KD!ha?S>C6Qmv7+JsTeTRibRVf``6> z=PrZWCTrA7(`Z^xqp2CGx_1Sf-yVGBu;!pdZggG4?b2vbBUK#i=Zre4_T$jMUJXJ&*}?Xz|~ z;M6_FZom24+_MZHthCrej+v=()4$4QJJc`4RhJz0~SGq<<@T zZ3R5o2fjG~yG@zxn?iDeI1;x`&=}GV?)@4b9U<9?M}8C7SErU5ZJTNI>!k7J0~&j? z;HB?jT0gjE2uyho^gsZn+)&B)zl8Yht0YDYcA*qpd(#0H{Fu+OMcw`}+6pRL5egZ& z-Cmx5_EAz&QrNgnjJAu<@$mL@apdNPx%$Q|=6_j)Qi^M@u1()=5{G|qc9{RnH~8dH z7cafxXZoE!vQ8RM-_7$^YRo(vipyc#BL?jnL#t$i@wbLJmaUk0rp~mlVR|2!c}6v! z?XHn<$GU~Cc;TSVCMgYaTYP(HXQ%{nTS6M(DIfpQqr0)iISNNo+Y5~$~fS~1*Q0Ako2 zOo3kMkX8t_EsQ6h0fRe$kq>QjsI-XQ^+<+VLGV<-xfY@K`~dWd2UP&I+d!N}8n(6v z(u<@=U2ZxYj1px?kB7QN(&X~|22dD+&tqufSx^D_Ca8JQE_>de%uQxrhJA#s#0PnK zr~Vr&s@e<>9JiNQ=7$_pW~|25A%|#fMF39Z#P*&)neTZdD>_!FidL8he8CCpXJ?oI z;c%%OF42k(stn$;62|7%_R*-S;xAkZS{^Kl(lVhc3Uw@7v0)dS z+XxmNg9A09_-)N-&%ta(dZpsAh492sxmNJ~t@67eU1c=$ixsQahRG{{=e~z$$4jL8 zy07fNIg(yl3Hx&4-3#EYWiobut~Mw%0}cJnh}MQ|_-YTlI0Bwr47Dp2$8v?IWetNZ zXut>iU`h*Eu?4bCor@N^{`I8neabY{$}Hw}PqFNy06Q%pH2{B_p!>gUoCJX1_Ja`y zUxhS+wxbQEQMJs*NdVa!j4=2s?ceclQ`gE*Snr7^isZlH5-7-n{ZS0rs-nE2LdB}B z(v$O>2&cHdX#*V2R=A2`!vPrE0-j!9&C{)=2+GP=lx7;dy&Sv-+%*_(StJqa(PmeS z3ZG)p?_v5hhbv!Kw5tzuFM<0P$am==8^^Wx_;~A+0E_1(a^Bf)l!9w++|Tz5tBtm2 zo_U5DGp->g`zT2%_4r}_-2iMbjkYQ0dYJP}9LF+(JoH!=gNGla_;?w=e%^q3=7&8r zC(J9K8T|OFo8es)pMUP><+(wAFZ1%!6u5m19M2P@?c4zxFHP52`*(;he>G^?QD^$s z@bY*#w^KEq?duyL^MJwLqJpykVL4OQN2Nf4gg0gv{E zwrS9(A*7X@-seRDA7#TGCZ1$$3-L5QSkGxRzX;-dX1}RoEU!g*_AqD)D6F>jBPpn0 zSq`XT>8O6xVzGr4EXfDsBWzia=|EoJw=dV!FQ+#RGy@oW?OhD(P++5vk#mP zGA$OA-+-0bKq-WZVV{M^w>TeEsr>wCF^0t5v zXHQTt665N^tC_L2Gr6UQY=%Z9Jk#HrFWCdvv;h@{q##UcW6gU*H&dn$p0r7`Cb;3o zb`pV22*3q4xTEX3f>t3${o{SC5BPW&+}crO#s>rsYDF`;m@@tFa8G*-+Z|m*rq>74 zIz;Ew5S*D(axhP^=w&$COo#Y#pu!@W+#nGQLy=qtO*Q!G#Ocw}^kk?EC}w>RquRj6 zW6&T07T81;BdH_+Z|{Ve-Qc4QP?!OskUGs^1NbQ$TDT#(48n$@fkoR6e+u|>WE{p= z;P@_X2jMc|xvs4>Ch@|yq8lm%H+Qh_hxSBY(K%HvI%q|c+ekxN6^3iuT34kvPl2;d zXRD5a!4|2p^P0;UOP$ZPVw_D-D)-Y@PsXT2P zLPe!w!zRVPY&eh!GkU-s%c>oOGrDOMmnj~3JItgWaQ%BQthM0QuADDU+P|T&0atz{ zc(&<%;l>XIgZ1o(2IZBC0iCKR@1Z}WXv-Uv^!(;63V%S+B4S+T?uBmYVmQSWiOxm4 zrfgL{G)xp@*f5}*MNvWbW}unp&=iZbbm!X8H3?AAH353q6r?>r2*Ty!$UJPbFAi!X zYXSwsYYQgUQsxi{m5Izp2Uzn>H(Qtlir^xP{cA*H8IRHvaMU8W&@dT-Aw^mRqy=GV zHWWle#p|bnKcFbcfKS%LZQbDeL$JMuqt(3?lvgS~Uu;k>5uRKKbI*f?d*ugUbU&Sv zGR2M0hPnC-xPP7^GY`JG7Jl6WuPzrp{q+aj)H1Oh_dVn1@mKwPGdF=NF7ljWw5?py zm7As|ux0bNG;i6Sxo_OTh_kNXrF#YGxPGg_n9C}-WRiz1-^8=Y7H|6LZ5XU$3}oqKjKD(s}*;05?4y;L>;C<*9JnSrYo*eGIy{(l~#xhJRC- zzc(9vxecCN4xdkjD+dbDG9^J{U~7%L<`@i4h9R}#qg}A*2)s5NdYK!1{UmtebLsO` z#|1amg?A1}YA&%VDiwyWBPut}@xt)881}t~R-6EQurA*oqx4HIPzGyluFlYb)kQ$Lecze=g+LHOxdP;OYGw)Y?OFhQ zrsth)^Bxk~32g{t}_A)8A4pqVm3xn1T>^v!l*4E=vco2$+w!2E4tk|M#;`01jgmkshmJ|G#>PY3nkUUz^-B9ovnxYT zH$jZ1_KC12+nRTCE;Mw3Rw;(Nl`ZNEOA8sT0@}pe%iNR$P23_=)9f*9$sYOkR`IaT zLZ(6@oLZR_lWeju)DPqVWm0ih%L8?y%x7f~^im)MeLD$2dr*Xdl%51Pe+YLDgp0aM zLBw_+oSO!>%&tDvgdOtF1%28n9qy?(96M{AckNxDy{@~Da2{ust>8<5P2(NgH-SqL%jsJ;|w@h0VTz! z-s;k*wnivVF{P(mP59nq_;J5nS$Ahg$+$GF2S;{?dE+wpWh-Rl!bj5tOul?SjB5oW zV$3|WNP!!>NrdhG#V~84+)A@zH@t8G?A>9|rm=9{95WZW{vW9&i_*-bXY}gWd*IqzXhuFN16g%&<`jng=L@42%7CcLDUO zBRsv*Pc=#+DYp_len<|({vvC>jXBUWU1WBzEx{Hm$}R`5ACkiMy%)UJr7Q z8r(HX6m*ctp8ZU++M%(`F1-$-NSHAhAEt4jua{XsU7X{*8t>oh;^0xm zS8L(Yx8ap(Qc3phUy2H!!sXJqa-7DCJtgY%Z&F|NB)i{rV&85dvLl3;z7{;f-{ac|IFwZo=8b(+s*CfCd9$1hC+s%|sw3`Q7 z>hN+r{9FX}b0jU=$fAPk(L^|!Ey~r)!V+ukg3%!eS3*;td|NbXsKao-C|J{S7-He^ zCTnnp=^3{#fx#Ahy)`*Fk7vTe31%49fm-VDOdR}`3t73)+~Oa#a=`^U1PdW0Aegdf zbXOwyOTb?Y4_ZVbyc#qQL7*6hhoGOu_q2Ayi2~T41DEP>R+MKpPlD`XXjlZVpMV*0 za7#Qmo!ft9L7*HcwJwgRqJ@WmLWv+}+j!|~vjxXONenU%i_8-RkZA+h-Ea~r%S@RL zDZi@s%qf&Z*NQghLU|cb9D~fmKoR5@!9kmXoK;ys0r-loH~J&R!lS~_@enkPGD#K?GH#gscAa=z6aiJCtcE=C{BmR+rZdbFmD^U&4{(zC#jK| zG&K#*b3tAej;^FSEmM=~>(KeId>aMU{RIyeLuC<=4=Zf&kjKj)y8tMRQI|?Y_pBwV2$z?_4m3V3NF zywDmhuhAamlV$3k%I_wr(k~shjjR2 zymSnRh1#aU8^d5yYk2c-*l-+XO@eOC1fSNeAw2q_fv&*~ZQ#Z3@bqedrPMW3yy-6Z z@{ekZ{ESqX+Zx{33~^QKvkdk|V9zw*KXqZlPBZ;p9KD$aKke+poSg_EFw^~Z$g z&kTFG#KOZsIT#i2%Mok7K-k306~J;EzJFv^fU1DS7MT(-oyHPav=?e&u5Qas2LG#1gU8}E&eDzyJ%UW{Cx2+F1 z_ko9&N$=OshQo#J;EE^03~B=jaT){K!R#yH;y!TEyW*_8I126@0Q%s|0_;DgSogV? z(Sziao%?B!OK+{@#fRc}@}4*h11`HIlj2f?_MK9B{lyk|JR09FDQ4pJTe)|-&QmwL zNJ@m1bPc~>X7ZH>yriWFE^W+%2B9!4dtT>6j^gZFLX5gGNak^aF~fDP8zmp~)jGI* zF1#`oDvA{My=|~$gTmchqkl7)KUQ+fGrpCpVICg_?=FI6+Z9&~fxC7}KKh$}QXP2J zuW(B@*s~Wl9)`2bG|NsOyqW|5X$rq(!TJK|9D>YTY4K5$UaNo$lVE=-^az8$0P04W z;B{d*06bnFyg+;wq*$Ezxf*nc16Q_nmDQmzbn-x;09yHAhlO{XZ9o?tf<@3d0Glnm z)Di{B6%Z&CBWAmWyj(x19EqCwJYbQvJcTxdL63)F36P#GU9+Rn(Xe33f_YFc00%AJ z;b<7*%RnoY_l`yt3X%d4$`QcvCJR~N5Tu`g=5FYl1`DFRTY?Vta-q2wx+TC(N$^~y zbHPPpFJ$L}3cx{|s6?VJn;c;Q**3AN%0T)ioyZZHduyC@e!zZArjm%^D!k3>V#UjxtF)Tbr z0hATN&M5O)R1PS4zn}KPsATxGDB@Y>!BnMQdY(jXRun--43##;A$aKsJk%QA*al4l z0`dnr2x`)&g)l5h-sSiSd1$oDsrQATs93&@FvMFJIcaXl%@UrY1=jP~Bn}Ro5WdwJ zqBB;sH%I0&>!j$Q6*Y;2ede?O%8<2V^^WUIz8%M)u7yLT2ZX0A3$|PAg+cT0D{~sj%u5__s0Q9Es=t=pdX&=0rzczBc)P>tm_&F zPbyyi8U{9kOXdk&;D_^Ic?LYa#(I2e(((*=Y%P2=8rr17RbN4eTJZKlIGn5SxHX#9 zlBo9$o8Z#s&@~yJUkkO%VC@l+Q}e2}Pjdw>(>MSpPQqqW=PDzL0DKCTRe(Pmf(3v9 zn#I0+ASiryasv%f)0NREn zy7pp6_%7SbW>MXGYFmb4*G`GH&Rz#E_LI!RwZB8ZR^at$WEU!~dqZ(mZ@B#{*qi}3 z_JLdb!Q`(&FD-{jw*`6U>oAMo@^JGs`NFS$3^DPl3huhz%NtL}la!>hX`5(7{e}sA z`DQ}^mi(=aH zbW(_~KMyhcCY^C6`0|T2QUyBe5_sfX_MB;rq$+pm8cZGX{5?uF5>n_S~`6P4?;t8zGh|V{+TuQmaMX=K%1P!p%CyQ-!DOl`f!WRJF3D^__ zJBYVNsOsoZ;*ci!?G&@6#rO_$y!(m7- zhVqKj+b3-D!9k#n2O5^iyG65ebPdAIz@I1I*ucUVx}X8nRx(!iHL{-1o+;4NE%W(# zCz)R>dask{aU%o9+u)PBCW{U$AI$GyEz@tn3r*xZ#-z)=uhFPSyo`sZ7&^6#&J(Xe z*bhl1a7Hay8f8zYmn65dH21;0{qSH5_%a9f*Dz#yG?i$y?}Qwd&-@K<_lJ#zFxQNV zj_m<^jw=?fgon=x}FO{-)EQGs|k6}*`ZcQl8E$DmQUT+vo5%J}{nf&q2GTWF54 z#XFwe2HYhwSCPwOO1RM`eVp!wOVdQ=i_NU}5P)af zNg6)B5^im7&38>5`3`p&W>`c*>*z8bzFY`3gkw=2?A1l*npViIM9J~+WznfTEz+U5 z6lxd4(qqs-!QgawU|%)Qptf)5I_w}scwhHDo|+Ww!Sg9m1XxayH0uiWG2?MK~s+^}Rtn6suwro%brp%*sflHdBN8h4L{Ra@cN?-ipLz?kOn z!DzU!D>N_cqhh zQ7WHil|wkgtkhKSw?#kU0PMiX5rDS3CmrbV14IV9Pqtg6l-VDy8TBo;vuLY ztPNQ6d4NM9;rrDuQNU;v3Ic|d(l58cyhPHEA=AiGf@ia$?{@R0bbCm_)*>-bR303*NOOO4 zQoc>)ReSQ$=(hryM^$9%ip{qy%?CkypPQt*7Q41wYG z;JS74J57@&vs<@GH-ciHqyZlvEa%6hAD}b_M=tW2moJr~r#T~}l|_jUX7qu-)}I<3 zeQ<~L5S=$tzGp}yIJjT1Of~79lM*eQ(HE}RLHe)Di?N@S$9JCM`jtg%I)=YU6IK1 z=rG>`D}1m7$d?G*-m>VIzbCjr&k@6IY*Y9qA4((mylyBw3SDc%;D#_~Ak01rha%3S zUAtgP7dT!bS2w;t6kgm8tFlG!b4y_58pT~B1&jFbkI=ILe0e@R^Cx_=P2}iTpQHWL z9P`tK2E(TZIB%qjrSE#FlLmip2s8Y`a?T#^V#Sww| z!Ll`qb8icA>r|cFrs>>srHdu24Z2S9(J~!=yh{`(B@xPfkeVdTZXdl^)b;Tt@bohH z?n=3f>xaz}G0QJgq$Fs(KUECN?Rjw40+`qudL_ev*|6<|DD?FMpuGoLd0;~k+_()M zXa#rw3O^qb@AZ7M3QYqBxnM;Z%qxTemGH7zQPe-eyKJP1L3GjwKOT_Wef=mKvI}UT z!KVReSR%RSXzeX@V7dpgLju^XYq5(q(cmt(0KrFLy)lM?``u#Xjs+T6L>z*^Bu$K^ z;lNM}4=E1WAWQu)-enEuuSss8s}A{LsiLxS(EI3czFRi%4p^rgFLFU#Sd8oYY|=CB zm9(trroq`3;qN*cpl)CVkr0{_sVO1Z8?6~j*s2mMcZ z5`y1SXdme~=7Y>qQ>M*k2eNUg0%?cfvyqwQ7d)COa+djXt{lcYi^L$YqLTqQToRki zMW)Q%#rDmJ|11)|Lxs{>B^qV=fI_+H;D=+;(}9`AK)JLKaUF++gYfPd>k%VACm&Xs71d3>@O^~SmyBt&W|$6SIUJ92Flxv*h5ho4nHC3ZaR?3+ zm`0P0p`PcGx7|derTLO>q!9MlU|^Q# z%Dk_zak@a9IaLZ3<*?cYvkLHsxrf;~ya2!Cfl(%{LjEeU=38GZI(MuP3M>L9Cn|v= zxxaS7k?4`fo!Nj-j5%*6%*&CA^P2R;rphTdnj@RcG!B8{GSWpQQ z>qAC4Jib{VH67yN&JPq*dJ9i-oZR@bXexZS2OeJ~e?Ora=+EDv^T%u#t;~|^wc8Z^ zrus=uf_0yJ>C#qX+b)BxEBp)@=%!(PoyYHt(f~y&MW!< z`>)bzTpy0+!HB1!b$wWPmjIb>e8b@9b)vENP0{FXhI^l022cJ9-(CS@+R1e>eI9{# zmnzQf4{r??zRkIC_V+NQ9bDZWvW~)|OJPP|xUW9^++3>HKFNT$j=;ygph+pbv`)aE z0Nq{ibux@}!vk4R*9Dg;_-P*uh$wx2(F;!}2&QaeNLp4jjABHbrNcXMaATaLG6MP0 zTSFMY*}4Er%POJ3;!r%K#5*es!if+RMQ5WD*l$2_P@;yrZQST>VJHpCvsYR?x6lwT zQ`caE%f33bihApCn@27h4p+iSi^F!K0c~~hQVT+0=2QG|d)ZA_X(q?I0ygInFOJP)>*(fMfC zxAoBB9yf%{!@7mV8Xe}5F#_mR8J!12C;UY+pN$H=UglBwu>ck(o<8qQ%`u<113HBzy7qDkyqHqW z)4QQu%-!K^52a)aO+CAvkP4HnSkxHbsMPNgdka5~lQhfuB)&0mXCP1MXjp}Bx=d^W6H7ZE| zvUWBVS>s$%yi!&SlWj1oU0oszz*PcGZPwj6ZYi89FNR4r`q|ZtfF|d`P>VgJiK$QO z1dLa3Nfg~oH2?0egRr3h`Wi4VVqLdQm2>j*-OwitPahOxdPxHqZhHIu8q3Wje;JaM=o(^KUxC(=FiKZcrx~_GH6|SER?@z5+>w0zB3Wz8VR7_vv9uD->HkbTQQg z{vXeVp;rdjuv4-6GcS`y>U_HaiVJ0B-8|h(*N!@?cfp9e49@DK@y%m8uYYDR`uY&N z_8E9Q8o#~hLe~V`JK||*Sr2}?8xCf{BkvoGeIQI(F|52#`=|fPGt1%erSQ!aFncoG zG8nG=45mB}S9FG#y9wX=lQ4M^+}K_EqLmfGxDVmA0AvK=r{>VWEuF*DJjetSdQvg8ww6oAqp;cu+qp%C~&P^MsY2x5V; zZ7{^3Jz_q8olV8!=8zbo+ibpjn*k+Zi7u|P`KC>N8S88V4q5R0ZGJ$OA@Y8VB5y&K zq}V{PYimMM)R5?4t?EF73fXi9*%%^a3Z{9*aO~}uTOsVIu_<$?&Bjy-OfhA4_lZ7P zQ5a}aA^pvUdu`f!02g?r*HPz6Yg`~mDzdpLbEMY>!v;)@6TS|WG8R_U4hWS)T7?+7 z(I_t{PC=*u>LtU@DDxTTg0fQRSq4cuOiF=O77E`e9!_S#u@V@j;Ey02aF`!}0tGLZ z!jma-@Rx;QOjz30w2i>9%@4wu6lq1#qfFk-8|84?Ye;j}P=S17jzxqZL4!J#GFBaI z-m%DlZa$gMi@l;VRy5QrI@h?|K6RRJKrg@O-88RtQBt0Q$%ee+Py#*_BWx zeekS?K2!|N%3xv=lzL!o#Jsmifx{=^LJhW-!N_=V4xX#=flj=*#41Q)$@s9??R+io*JHRy+Icz!>8*;fjl-rXrt z+_#1bUizyoFsX~I&+D?_sx+983^#9v;dP;#U*I?W8&%7k=NE_T&NSFr3FCs0nGcO3 zjEQn!yABs5L+v7oU|Kb;ZWAx#AFimrB_xuLhv8yf=5vYx$87fH4lXGeODcnOn>{qc zfbk)bIo&0Dv=y~=NvocOvgoaRs;FK_z9Y^DjVz4HynyIjxJYb+O;Mhk?3Vj~hxjF` zd~+>$KjO^lTpPA#!8w{Z72jpUo3-KRO8DK3j*n?8y*6!~P=Pw66J0H%z%i!rx3_f{S=fekek`th!5~kc8;@9`%iTA+PgD`ZaL9e#(^Rqe?Ws1k%3~~1}LE5y|SaiF1 zG#U9~v^B2-i|-Y~ZT&uIc$LASql(`zh8CvLHtP>qegLNSf?LlJVBgmVVcY_^wk71Kf0W1nbeh4ZRBw5H#1O70e%&<>44$>f7 zL7|d1BUY3Klq)f8t>h(XVq9zvicuX6Cez(q*iTl#Yvw|$#CmS3==UJ_%cSylsROoU ziC+pnPWWN3&1SbU0F~uZY_i0mTJe{F$jtFaH(aXdkAMKO1NriAY~M2g{*#dJhpjdd zzQ2RAPXzLuzkR7+n!y(NV55!9wL$P57sJ|0X1)Od6PvWDBDzBnGN8b8yi#*)LPfb1W>CM+BPol@XX z3ACyux8`ix3oA`eb9E)WkOy-b!84uZI~v5po`a{hcc}ngu7n9Xw2zmw@Whem{qR1h z0I#{;f*I9kUnI>flkxB!hr<>;K45h~=4y4ozJh*90D{HhDA?&;v4U+uk@H?BbBf{%7e>7P;M3tw(>|e?3;)&K3)dLkBV{E zFb-z7hPEEKy9P{Qbq4G|4DD;loOVuw`aM-VmG|^7i`@J1Dn9uHj)M{^~0k* ztBtkA$Ia-oQt;Az#o!ymoY75V)B7%Zx0i=K^>&ykcL#awMmIma;iXF(7;;V-?))Gd zUe{@y4*N4<$n6G=>%j7tbY8w*XU>m`UQM+x~XKG8=e{~e_oXj=ll&i0EU!c zU*Z&;sYB}+a-#JRtXO?rsxhs+s*W05q{}z=>NQBMKu&hvU!1U;Tv)Pz*rD9Nsg86vKoo<=gCJR=wwDn{+J5-L>Hg z*cFs_jz(pMR0fxX;A;!xD%%IiA;HUim<1=p61|E>+m6V8kCsW>ktH?;P$UI_yNjVu z8N6av8Fo*Cby2WbANawSC-0OJHgPmj=xg)aj0#yx(deJCudr_812Xm-%{f})FrUi; zqH}wL_J^^K=&+MaH)8mOE8q_sfu&*j)`|cuw$S%H9}tv-Hc))}!3mOeX`dkb*M|qB zn()O4%+{}mVO(SR^Md0LXBMwqZyNW8k}JHfH8VLm6>zc~77P|A;=I4?v7%OV<2J!n zf8QUr<;Xz~aCN6s@s7{s!NC9|Wy5^aDH&BCmPcfLKJ!DC4}Lf*-`mmyTcd3JE5eXn z3V9)^>aDPFmX-&l2*ap=Tnioz+2g{MGOsonQPu^)=M$W(UDQkm{xoGS_uK2|hM?SU zoEL~*)mcS*gVG3iU%hgrVRhyyrh%-m0wXs`nT6LH`nX zxKM%}RkUa?T+~wjyg6S|;ZL-LtMh(75MfjU6Xo(DTr#J}kZtk~{*cJZd(&z2z(^SN4VAydMS7x#D7(lnXwFL9fG# zgK%jtsSE}9GattN4VN{8**&3e5?r$c#{U50T0o0<$=w%(;oJAakZmJl^CmoMR_bmqW8W2$x%odq@dS zMWr}h&7)X=ZXnqOUz~(ahCL3lo-X)Le+Il?XBLRVihINrsJ(H)r0coO zEBIoh<{r`6DI4T!qx+h{kS}3X2DDF?N>C6?qybFNg1+&v@gV$mR6y;vi(05xQvC$9aJZ+)`Ah%-}#YYNrW~ODPzvqOZFude9?dRS||; zY?Q8>F6&b%DVVG8AhS3GkyO_$(aXP1Kw=2y z2Torzy%jUOM@XkJhLvaAxWe8%AF=M5geT4(XgI*sZ;hh#;tSibVk zD_pc~tugv(ShxvRJ_244uy`$4XpSaA{bt`7t2!kKTujq~8gGhw2cGF@E&WB-DwP2sg3@Kh&Qd<2Gl zBR2@lPZmWr6#P{I-Q%H!SHQ|Mw#d~WZ8Z2XUapdw;T1*JHE0upS0cRQN8D1$88A1A zR0DpD;xRVYAg_k;<>7RkMHO|@;HV*oTr>lsijEl)X|OYzG{nI11N)SH1S3O9R3OW+ zK46Q?gCb8s3!Ez@K^A#yB`I8hlQ!>b_Eom^lJb%CZl!{qcH?9IE}*Od)hCSV=am>R zCdvRUkMP(JC`n(n)+L=~MH|CV%YaZ=@EBI)L*}O<1dUX+!5)n^7}AF<7_!%m01^eP z2ubg(bv8!HdIQo8ahS4fWQLF^WMv2%D`%N?4M|sJJIE|jBC~;Ey(U2Bs5~foXEpAX zNHiuGhFd++)7I%Ocq?BTPLK1z2p4>80e-IUhH#d=o1tJ?yi|reQ8MszBAn-zb~=5$ z(7O~qI1Ifa!q+#t;fn-;Q*;PORd1~*hrUMKOGc!&MfP{cgR5%nU=ChZ_p6aYx6*CFpRt z6j~cF*ag4EOSHZwbxsug@kpOU=ndd$FFanuq8!x_X0C%X1MqrtxIa_U>gRf3O)Z)C zDIMUlpQT^kw*#SlGK^Uz4aHk|VP$)GxDheYxMbMa8ipr|@iK9(V5)!WFTK02`%Q}H zG~mlbxIYe#1mXT1c&a`ODT7D0!pItdhKVltEDk!D`ZJ^w7G^=)h>bc9_#zH2_CUiB zWR^#dUp6!qWZUZvWh2KqEwEvSz=J>@z?&q?z;% z8<~fd6xOu>4q5P(D(O~181C>wpD63Sfg9H3Nij}9!K?8wC&D`!kOs5%!jN(pl>ncV zNfY~JNzxGkU}AH){!i#t2|ss*WrgtcQ8`!Vr^B0RFuglW{}IYh!m~Y3kF3pX4m$>c z-fN~JEeUqb*0^gD|Frn{Y^kE>RbkSTHP*cC;-&8me*HN}c$=SnI}P4_+=I&v*S`yQ zeF`7k09OuyHCq)=y&gu(Q~dF`Mv_NL?1#^m66uvU!%Guj$3f_Fmz3%LF+r@<$`DNa zL0Sd;Jry1q2({v1?sj>}%)rMS?4kHKY z-mpZxYFo(ef`GM)40%^8I$M*pX_rPQbmGa7c{oH+g@XICtJA6p zNQ5gI{hbRgGetGc3%d=8ygN}DFfR!Ad!c2GFivI;Y>B`xtO!H8f*kXk4&}=`M$!?a zA{+$oXSMcrD!BoqC?Ew?R#b@SMsXmvc`pr$p6s*v{$gazW0ASRoX=f`^|ur%`Bp>0 zpBCxeys-4~1UP6qNBbk*@y`;u;h>5d0mEG~M^*IRL5MRe#^ZIERvXTagEy1l#Y#8{ z+`STlWzZ=V2GoXSIdW!nPk`S$zxJv$;ktAQEC7_}!-C9enZKDh>UydgPIDq3_D2k`ANV^g_(6cI(&#-4 z0e&%~wM9OA-Ct|Y=NgB-Ze>``+KR9}Zy4AV7MXv=u!mI1Ty2*UHRL<|O7POrC`5H1 z)DiNomqHy< zteGwV%iJ;;`Ihuco7zk6Q#)D;U0wrU39L918pcDNcqgV#bG&#f?Dw$xb$W%8C&d{KqCXpyBgY5`n;FK#da;&HX zc~%7yial)2N+xm`w$QAjU@pW^tFH-uZFCILhF+R%5dC$j7PTt%PcfsLKK0KvZBgln zlJK*Eq+6}1!ZbdUHRz{D#{<^UV7+N{=V6bDt0F^5)n&G_5B;}5DhdNi(%V)PkF*_8 zz^*Vfv1X+OQVbI`jX^8&B8OKjGMghFYREL)tUk$$fQ}6%GMhTc+-u6rG3>ESM@^Z# z4SSg(CEv3*d}>-Dl6(p>!m=Spv%|{GIjI7NmmAgvG{?<7WMdE#g7CIYQA8xYV^)+l z3Bo{jq+Sa4_ghD2}6qr8uS7yTo3VQ&2Q((6__J_ju z>E1$9=k^<-gI3f8(G7*@wADW6MWVS1xw@{N1z*_Y$v5CHi*G44WE~#}OUm|(&_5TG z{6l#`80Zn*xT#p2zPEy>8lNY9aGBY*>8k>`{UE#+51;tq?W&6Na(LM6!2TcJRw7>@2Ft@fxZeZ!XH;LMc44X% zj#N<@@O%J9XmD?tIHNt>Qt%Z?KZL<+NTewRYg}j*^hP0_4x&*VV6&N-*%P*I&<>Ja zDpqjZkTw@qR1b-kDWqtsmJM$LqNAA#8rtwxof}6C`|MEI5PjMnjviGk3_~kj^m181 z0xaK0C|zy<#w3W|JXk2VoFr&)Uk&5=L?zs44#I{~xZ?mkn*^ObFrfer_#v+Xo-=J7 zzX>yG6c0c5f(i9t)JFLFWR0SO4}OJ?Hz~FrhQD9Xn0uYpt zr}0ge&hBII-4%-4M@n^1Wk8w~%7wWa;|D>{Hzc~Yemab70cW(5;sb!Ljo`Y0lG7i( z5{iPb@=VYSIDa9$w_OHwW^ZU@p8hZ9!1FoqWlOmVW=jE#S_MHyoW zV1=n50VT#sG%7)i#WLlLBim^jiF>fdy#Qn(d>KmaJ&i`?REMVbDp(#vv|*d7Zr6|s z{AA%Rtu?vv-&@H0Ib7W|RY$(biLQ6c;gy7SH7$to z*HFnJahVP7#|PZ(7O$@lQV3y1Z8Yf<sgToj5lRo?Xqt>Gfo#3p(F^D;&##b6P(1N@Y7D{K2It@A01c?9DVjAyqXK&w1iiiz^(P*>rCjj zOaQ$)q69e#ZmNK8E~x;#Hy*M=@MLcFg42Qanxr^E7JN?~K8e`Xihy5@YCK6hEqeOv zfP#0;35e4q?P*0n7-F!vH2V!;g_4Eh6oKDi^6hXiMzT%5-);E($U&H^-~%JJE1H79 zYlcJ^-I{ef3_^P6GUmMnUs2WHU&pZXshV9dQRrFg}Haowv zH}E!^#%HAxFV&7V84`Xk2KG3}^eKrPI?3E<2p*#Z`@)n>s=A0HR+(kk;|DjJGRtF> zS!VKXx8dDyIv!pPmfB(6!hj}Dl%39(n;U$64eqcwu~Ia_vy5Hmyts)&UCf0=o zE7Zn-D+-`n#Iq8CyUf2!MSA^3tEAqcOaQbdQ8p|3H7sLPtWHl2)}-C0Zj>r#Lv(|w zA3i1eVnusQnI%T_{#7+;hcX*+W35YmgvUVEQnb5%Yim-yoTACk#qQG`gmx~txkMtr zgIwa2HjWtM-ia_|4|J)7kCJ4JZ#7$lxG`_QsRKQJg4gL;AFFGkeoaiv-Tv^OhAtv;~OqpBlw;*`MN>i8Y@Q^vcar3!5mAyl; z!4P?Vhy5l)2}CAAgP(1leJCvFZ#yk|abQy&=K93B@@P`H6#-49yB8+sK*uoL?1g2a z)3as+Qefa_XjB64romt@Oe~P-9!LSp;QG+(d)Rse?r#Qv^@9TyFl>X|d;{`5`qBTu zO`}h}K>7D=(ET>WJ4@h`J2d9sr7<8u(J`!;d|Q~AuZOv5vc}4z(DNDT;=Ao(XjcbX z#esh%{J0G6I}dan?pp!V7r_f7;H@z*treW{8tmKvE6#-!m*k}<{3-XCtr`g*50JJG z=luZ7_Q2|NsUQM)H3v3T!i8xtv!0|nLrRJf?(;~so(A-GRU297d!S1Vg>DSY^@!44 zpiBfXTn>Y zVpc+`>J1_qrI>u0vc5hFL`Jp&7ih6=s_dhi1<1ElC~Hu46|Fbtd#|$ZH&l!i z8~huY`;@)mUy5vGvB=zD%G~cDvm6=I3WrU(=6fD+*pv^L@=h3%zPF;PCaf8Ty&kOy z%2-!Yy+KCOXO-1XNi7X-a7B-xM^YT{wnz4os&Put1JbsmnFp>K-&j@b(IEy{xJNQ zC0A^udf?Tb@^8P-l)gXTra%uDH1}42NU96&u7O_vDGX!X@UREw1z=DKyi_S0_JcJN z+Sv&9u;;c0oqNo@1f*JjMNdjwV{}RL=bCh(uDUy2cyl4q@|jh5sxK? zb@+XdCcGMA4B5SBV@=XNR(Lp&NSsee8gZ%?9h;R5d>oeX?yN~4wn$H@Ch6w)lq!i} z*fs9$r-?^esiIdJRZ$#*{V_}rv6Id7`kq3z;7^*tmXhpqE2f?qTXPuQ; z4}xC`o8Q#cMrJQlWQFZpN z4U3Z!a*$a?eguU5fCt_T$jvE11>>WHF*1NNbeIzq2ds^SH&=q_`#uLlw2v8GGn9Ri zQD;Oq!b)_-rb`Sxwdz8jzBXgu37K<4iB4NNh#KO=f*inOT0cFqc#FB={t|$*N}vJoS{(csk|>e}yw*&jFGV3qcjbj(f)`e# zi~RYqRAM1U)Kc^8x-Jg3_~7Fr>2>DT;Q0pC-~4q*paVTL7+DSrO~BcV2;kvCCF$j+ z!2M={&fqBe6vEu^4blI47IT_Nwc{JGaVDEmZNMZddxw9BCQf3J61=t*HHUu&AW7Nd z;?B@yT{_8Zilk1<47mlw>g-C^1i#iqi|#yuRD9+scq}02e>CTPvRn4HxBPII4<2%h zjZm|C`w{cN*%^e}ilisz7m3iz1)rF`Dd%>UBFeCW1C`J|5gNMSyN2*$U3j(t^tr!7 zw+ChMZGRSS7&I%VVb#{hYh)orK;I&pLQ_2P!w|0 zM7X-0{QbbjQicw2PFon#9)@g}m;0+Lj7x*n`C_yI45riba*6G?d?^OpHmHs)ixgGG9?>0$V6pW@Ra~N5o0sg!TUI*#1M1xW{MHMEG0z^ z(a1-2#bF`vlR7ZPjct+@t&TD412pu$xA2FKkQqooJPZF^Q z76+89w=gitCX}6sfQA#Xj_S74#8}uD7DMH&aBMIDfcL|4d&ct~*&m+pOC#>o2)=y^ zRw{|!9Sd7K43l+vb`oMdSy4Tc$B(^(mWSxRADLGx&^ZXGJc#I?ozCYX^XWqzMXTp4 zB=X=>a`4$1!IemKvYQ5jqa4OAWUh4uV|2lAMHWyr(Bu$)F95?_aJ>s&37rZ*=;x9H z`kFF09E6cBY5nm+jbf8aYeU;OcqkKY-3QN~fcxU%y;OL!5-uzj$W1I103N7>+biJy z1b8hG-YtMfkHNz^(r;~yd2p;S;En)X;Wja4A-KK*I=ZT_MFbhg=Rz<_hlx6T5flS1 zGU!(e*&n;<;8NBOReeP74D9veB;xq_l&l%MbsUcvP4U=A>9Y|hx>Cv97ufK65rZ)v z`}3$oiEb)jh;GeWY3|(>O86qT5ic^PJ44b(@}PoWA`bPYnn0=U@yUMmidXjbM}pPg zJv_j`zUp3l$4iD~ih%wOeSc^}(D8u<9D<+5o=V2%}$v7nVrW z?VfS)^dWd`6TC497M=xP?0~-S!_7-zO(x751?kCf!9I9B8@}xT=Ox3v8{wM2q_@oT z-676YraQ`EK>)r=g|j_yrU8TY!Oh3v@*1aQO{48@58UJtBjMr7Q}5*Tn3I*NiE(+k zOC~q6oxnj=9a>G-#1lOhMFEy5!McHL9#$Jv82CcT0b0NBJK+a+C^2%fl>KIR024!|V28tdR>?e}>ny0IV-15?X2mDHmUUXDSbpeNg+@Tz8Up|5~Cs$Ob`=1S{Mh4Q1IQj)6yqLJ zi5O#cGV`duS9yue_pAw5V-mMS39zIB4+g~1sinzSc8eI}R1GFoz(8I41J>2x`QWKe zj8}ucxzaM{MlXDoDAD{&D6%2_t{*ACJC+M?Er#=-Q`EUsG5B7C^(SE57m!~8n{R@b$4Xm{lR41l5g70S z^lS!i?-MXxojBNjzIiWJ0~q=#44)5!o5-L&bsR3<2X{1rpE|<*_2IJBFl9CLGL10> z+*1Kx`e9xQTp1@h{?7{G<%p0)L(}8;YEn^EmBOs)LHL1B!-54cl%xq;MN9~Yl6+{u zoUj}?qisC;UzD^DuxniaqNtxKc^|tpMG;c@S>zzE0$F&+W8gckoFj3`z`Pg?#hR1{ zd~9NmTHElQ09F%&!KUmn2p|UA79|^r6*Wb^`#r^3gAti=A#)>vqd0gqS6`?)ljog|U3 zg%SOY`sme0+~scRhQBFdLe?79sosW~d_$z+ZXix{6<-|I#^w)3b)k_RRq345UUvjp zbBC3EHUnbUr3_^K9CJ9A@(_8uly&wBs`YH15&b;OL*O$mO&pom!qN?)ChaoB0UKd@ z4?#|^+Fs_DNZn)cBFlxO%B$9QP{HTf64CpjA=GlgIY}@k6J9DhbuH>^?t%+@R{zD3 zTp0B{Jn$8~atSQGPcCdW04?L>3WxOtusa`S$I0NO zYf||M^6`(Fap0=(;{;ti>0iTeuV3&EE)8yrh@YnbH<|#z&%@(FZh>qLb_Xqp@|R=ZJE|NpK<)5b8nSV9)L@ck-4CiJ zB_M}QzB)akYQgulYm5J>N-3-4sk|*4qNwwYN^SgxWjE%-8+~OjD;Cl``=zKh4C|k`D$oTCuB#SX#i!Sdxm%P{9AUqn9d!745 zY+(0##2{K9hMWA-`lGi7&$`8V)_}KC1wRx}a(`w+bH18sFee@+L@T$9LQOUJG+rWp zAO_KQ0dWkP>hNNXkGR$a=jf7}z0xPe0`c1ES35NVF4f_kFuWI(xjie2KIupg%?;-G zx3GE7bR=qRhgJ3<4s6)m7wxDva!zI_d#Fg2%uo#HL6yv74l=z+#^b1AU-*}%RTnMQ zvEb>ii>`kj1{9nbz$3okpLf83mcpPE%w7iFACv~R+n<9o`@!_HVaFA4 zcFR*=_~(7_)>`nIiq*Y0Y#a|)*68G^YtSG;3JU;k?E=kPz}fMzraqh%Cx_4aGFV}n z&sWxmiRoft8o-wc&|8P|OX0zC0SKSvg<&-mIta{ki_x~%fSY~NC#kh2|F((@#4>)s z1vk0G%6%Xt>5a6A38{-L{I2FR^y)`fp=^N%mU6F8cQ}Z3HJeyQgW+$8%Xe(rl><)zj z1lZgc>kTOb8J`}=hTC7Wzf%#UZ&V{B^D;lWD*bZ}$=|)m(NyyyW zQB@DJGaO{<2tKPyPwc3lsc%Cx`$&t9jC(w?HteXkCj5gn`;?L!(aT<%eSftAWMdwx z!9pcPfZTTOhd3dT^z=;CixUR=#u+9ZGWQ`c+=nX7xu>ef4h^1hLdPy?jEXo4fY71LDZG(yYPv zAoD&^6FJdvhBc`fa=y0H?3>{RfwN2<8Ra0e2{JBiH2W&(5uY@Zrq>*)d9X^kPGj-9W zM{7ep7d(~^eU8B;nJ}*u&ho(WR59#-3xa<1I93RM-vRTdLkkn>d?*JdzXq8F zP%B9zwTZ?-4=kDl^Tt74(+yP$zS#-aEP&6qL2d?bx*Z&3eK7%R0 z!~BDAWe=DM{7?yB)q&Y*u)Gl7I|4noz^oJSOmj%|z-@k6VE>7On>})$=agc(R_eBd zYD@YT16~V>(RQx~E^*5>Lz8@`Zo=hAMz9yz zN+Npgu{JagBB{ApB3GLckH#)N0)k%(Dtnwr6$OD!ID6bWG8UU-khu-PL4=h3!B7JH zi738Z1O)`-nu+}ZuOW+fGjP;~Z?GowvRg?(gi~Z-vl8Jal|6Mk~D=?+WLr+G@nZo+)}jfIi>{e&_xrEIz!o4z*Uh#Qg}-Y81IsTq&mQT5g3*# zW7(yNvCvzGH^ZlI{yyYe4D-9~`lf9*BW9)88+Lv~H%f_Z-pdi)E41MYB03z4%o8e`I9he&y=SbHi8W_3HlmSnK#8_*r$$WfioZ9zqmx*uAQE+ zgi=^#ZU#RaaBENuk~`gSn_K4MrogH9{}v-nbK7ZhHucjaMI8A`2~&Z+CRK*7C1IfKTyq*!E)0Z)gk4de}BxU>@1g`_CqE|0Wu`NM#zep%=5 zmB7$Uc&!NT_DUu54=M0|68s&6{>AX3ud3*v0516m2LA_Iq{E(v;kk*B8V_}n;qx`H z(Y)z(_&~U2J#?!JyQjmYX07(oBKUYST=Eq(d>;C~4NqjiklL_zFmz28V1I2le0>-$ zTM5m+g!~{pR05-N<*J7D_2H3JxIP~4JqrD{$;H1433K1*hl@OLl?R@Plg4&el~t!z zK`z$bs$^pvuS2D&!}UIPJdI^hLCuCB5Jlh>pbIOADB#&?a9B83q4k-kh|9nLAlY@8Q+3W8tT zZo^X-+2B7lV3CRqmv;i^roB~k5J|&0jp-r8_;%Wus$`bOV1WNynH9>uzZr;rg#Lw0 zpK``g9ijRTUpw+6c#&P$7rRvH&Qc|KKPx(>WV7`vYrNavN+K9ln|CGl1ssv@QS+ks z87^@|YDO~*q{wKYA>-OXlZ`)`)36_bBY=Hmq7o?{DR*!vK*l_*?60f?(Y11AUvN-G z2TYkxSlAQD-fh<`8N{#%DEL}MH>!h3#6Lxo6yRG%^=&a#RECV%WYejxR00;S=@{lC zk<2IEa%LoGa$ZznD-{xH^#boCKpz*J>6Sj7XO+WZCr{6UHW_fC+3&NLOXU71PK z##o-YkGRZtd)Q3V7DbFjKGkW-ak}hpk$P|lc+-%5EY5UTtft{7C3}Ni3K7ivva!CM zr;17uXZHxs=f^=)rjy>4BLR^kI6K_s=DXVMpHf9);2E;MJeun?IoGZHn9G!O-q7X)(OA0p2?c7M~A88pB(os%7oW zf#=sj-@jmIDXbX`Pql~nJ;iVXiMnkwchF5iI5S7O54Ui`RY`DLvK$6SYq+abJ+E|4 z8sXmTF+06S40xuwp|~`;iYFSaSMYFHZYRi95@oYe*pmnjYF^mBN0EU{{x)Tw0;oiQ z_p#7+Vrw&C2fVLI+hXL~ihWy$XjFn6I7JS8dvWF|z}X0@z&Qd~fgA+c*w0BMZBnrv z+y`SgxIpGSi`WjtDw$66?njKJSY;lLL1w^|dEj5jbP6~Gk*IPk@0pJ@F1Dj|O)99@ zILNAt>;s<~_J(RVO(I+0hJ`m4Ck5nSdN~Yt2c<1TI&eXhO++CZ{+ovEO@nOqkS)M( zhW)i*AlM|kYU3(8h|H&5`;V%3JdAUcrAnq#1Qx_ODp0cC?I;QO(P6#y(IgT##{wh{ zaIx7@;5!2z2*WqV>EC^qTiRq~D)=lUb6zt->qf4I8e-xmP8xEU#u!wyq$+&eAyd!@ zx1>pHPIDSW+f{J7! z83m`CcR==!Saj(qVtmHB*T&*VmI8k`@HN$}jkPKorHR2iD=fDIMRSg>&;?)jw;{O6 zC&H&Yn-6)Qr%P;?l_rk0rcvhw-c5q><^dH}Qt5hH)$W`LsGf~baO#iOjk;?js_+HW zfk#p54b-<6qrCd5eC^Uuzh8t3%|xj?Q4jP(J=X};%yTN=?JZH^aq{zA2l=~UF4QP@ zHBTEC>V;aU%?(f`O;AORQ4``(I;fl7s79LbwboE~dr^NRqP8cZ`qp@-E*h$r$=6du zJ?uvP;zhOBPJJI8)DR6dR72GU%__r=j=hHy-0M#_g?;?Ma zL8ZpPr^Uzzs6_MKocNN=?~R48Rt$U|P~AXzV|>pfQ0Xzsjf;UV1ysEl_*#M*AEQjn zcWfWyxOX7BPSpHygX(I2SFAFf=E)7JON?Wm0IK7ES!O(_PXA?@-H`R+#FGZ94)!|V zS3`}|P`$BUM>?qIU8p`9>S7Jm9a(pF&#wonIj8G!I;bi7>H3%g>O2iKRuerLp;^Co zQ**4kIlMy>@~%$f={4oWGDjT{-E)%ZL3G(kX4SKubiR|xmuB;BXP9GlxhDH;Bu@i# zj1!SP{W@Lt_hvPC2I{EmT-E2naPz-iG}Ps~%==AV)YEaONp4g#?bL5f2X&Jh)l;|s zo|pii_J6eOij)t4b(ZN{MH)kS{JHwjdLju)EEtQvo2>+ z0;qE}(Xq(qw*oaulYcWP%Gp*^X3H3K&P8l{n9Gz`2j{g^wJ$p9k`Af`sJb!8>~89S z9ZxS%osm5;+TI-C4y89wgUvU?f=i%hBmSX42wwRO`o9cy zOf=*9Lm}%XxTbA&lx%+yd{@I2e|9KLXa`$5z&-V)t%h5Jwuur^1Gu*}jIArzgS?vn zPsU5w`d}FDD~4Vtp;tECUkpE3%0@`%s#(@c%0187$p}-b(Dqj);N^GOae?dI0XdZY|1i;)EdwiOX+;q^~ zhNp^>5m0C>)Y;*=Rn(H$Fgo>semG;zs=m8-3^G9gsqJFmOGkjFPSnbjX9tvS3=Bs= ztIcANSqB)60C$~e08$)b2mS`}!aAG0PShVMl5mO$rUP9}UMK2jKHF(NYnd{8#vrpV zQW)b@RheSSj73M90b{VQD%_6D`y@>Yc&uozlIY1WT|ocoQFv-2n6jXfNZUoae7oIz zrXkUs3$#;pxe&ok)r=NsNL0R?CIvhVslFpPl3D>{k>}f``4f;aiba=uAY-5Az~?o2 z`^6x$JEF_A9r)r+-T@AL$B_btQADqL4I$Xm?g&0@jCOjF(MhCvwgFsc7Ku&JPAvdg zV+a;*pOOpAZ*almE_l)<5lt5`Cjbxo;roz8lV0^o@zLELSQIZT1YPQWuL-2boHHq#hBvls)hpCHH ziMG~6pRDj)E@Taj(3~?W?U8wpWnb=%eQ{e&nVoQMg$1&H2E-sU9XJd7{d+U=EhEh{ zJCb~eO;87^4sEFk7P{scdldNGkad5ZF28@uCA?L7yfiH7&ou^&)Zs~w=-uaDnCO!6 zf2I;{t$_Fapp;-IyXyk`TAc*X#!2QwKX)GVe+%4tbi=KF3cUL_{B;O?L8#LKW{rp4 z=fP#IBsKZbPMEd`-rWwzOQB6qxvJ&Xy0D==j7Wl8cfp`P;MSe6wg^7z0gE)4S^;Od z;m0KD5I)j0z7B@r?Q*!R0M>@!<^Vhwgh_6A*9+Hs1n9feRQjqA)8`=uLscc7q2xfeD-5Y>#S_LEfbBsJ=rSd@$c)#lp%)pZ zykyPZsj3>ISZ+u2!a0pqij0vHWS{{u?pv{5b8$!oEy%Y=!!hYd zHDA@G?*XD?Rxj7(j?s8csy=TIN!qKX%zD5Fanc2UVVv|wdn^d&`vtHb3msQ*V*qXm zh;cj50F^*$zbgmv9PjCs#6KEvXGr#4U6XrIr$$kfRI1O8x*AN=qdV$lNLtcRa>iLX ze_`of2$p0)*SgTBKK!%~9%=yB*MaYgLRCOv@W*_iE#hzHmV0oL1D z_!<(?aRPV86WfOSKSi;;pBw0ib4ULF7pVcVIqrAhOGTokv5Y}~#0YjWtWyv}!!F8F zQ|2a|fw)OX!OYeeWcI_{#2xq&5f6Dw3^My5-{Vw3)6e8{(!JK^&oUdH1Ym{+zbSi+ zrw6#-jI@6hgHhiP@uojjvEj%-HvewOKC(sGHj*7W;h=Mk&7Tbp-!T}OPbX@Fj8`m$Td~S)jW`xghTs5X z53zIl2ATg|i#0xRq$X=(qZyEhChuZhu1m{S(mrLMk|0r)bLax@aKl(r=R!&%r%wi} zzatf?93E8zMEHFW?v99-oMNz5QQs!;=$SB~BmB5qdZ$$cVa5WvH?)fC#=(7E;HD1Z zIqJa0tKi3o)zl*aUT*>e%)I60N8#(DQ+eFL2Z=D;eBs9`;iD+0MJG+to4s`LRNTNl zL0BI#;t5DUq2anTTzo7PJyMl~{C>O!A2|S%L5xeq>DKfA(*HLp5$8w`h`u>h>-`(P z|9_Sl>zFq}qJ?&Au_a<0J5?KZLN@2mlyfl*Nct$p!3c~bFV)6sOJqN?Gh)(kewY8R z=w~YS^Qa^9omu1mBM;eHh~rdMRqsL!keYMf!Axx(QBpx%8AVn%4W9H$HR=)5pgU|x zW95HagjJK05ok>-T_Pu~HA#a$6+9J4U!^B5B=y)zlLNlyccde_A3)B)4pg_ju%bpd zN2gM7j+Vzd?{SF37fUemzb!Kf=S)H!?42zSqEA)DU&)+qu-8ZKhTf;DBe{(^wK}zr zCPsRelAjzz=c*iR7jTJ3#=h9xe@c`zFW+lKIJnnzL)A@)4$AA(81>WTDD|}bthf^8 zb)C-lXm^xxx%~JqMg6BODyas*v9^o`G84w_H&cOud zh_k&;>-9S9w>BG{b{*GiuX7FZ)5eoO{muo)cU%hhXP{`fT2x;N)h4{S3iEORk)32>!W&;D%;`r&bsU!k#tQJv2l?hc&RVqB@px>qUFn~h`(JB92!BmoDackEy**(aB^z(Cy&Uu zQNOGGbU3#29f*_Rcc&4s!%vIjH5*28%`_r?_$fHH!}hg{=pl<60oo^;a2xmOa{PYd z*q`;!*nT|6evcFX)V@Q|yi5`&%tzyGE+PY`efJqi$lxL%!L~co5X^CC+lKpib`crv z>W}LjYIk^_KyVo8>v0x|6JBrPg$y8}$$3V?vSaNYQM;B+F$721eX1LRbA5uD2?YMy z`*^iaFwM3<+0$NE-6j;`EHoN_ukv(XN&R`2A$Y6N{*d(jEf5^(*#EA3!k+MP-Csko zMOwR=5S(wccf_@6g5a$l!J9pT<6_=l27*_31m}7Ly%3z{4HuMv=JZQ_g2U{X>oo+Y z_yq6D5}3!{MliV44@`7*R@NQVr0ZTgJ zkWM(T4c0#h|NSU@dxvuS7+^sjj&FwJo8aI^_{B;%tE9xXU$(B{gFJcb&dtC<8FJRx2U4MUoYwDYH^x@^E_GMI}k4+$|c(jyvOMNZwf}{^a+s#PDB#mOM_%r zYzuNd-=5MAWZh>-cNaGx3*6-t-Hc8ZF^@9rniD z6o;(6hoqC2L8s6m++6PkL~MSN^djhs>^0L))m@11l2%_2;2`7yNPC~1C?uPdV=TlH z6t+!!pB?Cw?cxTi5sVEeCPmuhsCZhRyHObV9 z(S*Whp|v~BtCvOV;%%hwT{96EHf^7oh}OEa@p&xbJ6;jQCyQQ)>C@qLb6{>BuDu67{wQ3#0rDO+`mn)DX1w6*pH?~e$q?Y4t2E{pe23xi_uhoH&!hsal2K_}X9TzEo} z@#x(M85B-RyT>+U3@jh-8yX{YpbhLnk`0n(w3x(AY_PU3li7Vcw^v6gh7vyoV7V<0|I9HIVXqtZ;E z&HE=oI*)BC;v{vU&#ghck=ugC=Q@%@xf%36T@FdE?{lL4UKjC_>oup4o>+MXdY&hU zH>eLp2IX2bhL5Yr5NI1BsRePl}j zzbL_tR$)3?Og0iPyvY|Y4U|mxb3?-q2FDqZ(?dL1hHsXKuTux0`qn+juzM~L|GGA$ z9EOo6p*i&s(R*BoP^Fpnu3X=HCo_7%v+JScpI%p?_h-ag<>q#`8qF(Ev zsIU9cA?Dd&c$FX@c#)C%vl_oBD_0(9e|)nKCsw`J%>n#dv66szKLa1k!O>N1?klN2 z={;pRs{(3lhJ{`5_Xoin7Qu5n;H!7Tx9@}Rp8>DG6#6XzZe9~yFdJUe2RChmudRT8 znGI*$2CJ(`at**63-JCX4bn~zez61Iy?LY+f1(fnk&_I1TcC;t_tebXLk#@Cj0{46 zPYfw}W;Gh@!-osxR3oGDi>2rRF$s}2X8{){SlK=mkBiI3yt^A?V~IWWS$)zsrakr9 z_GGyBc>%2>X@*_6$)_1rXB%ydNu|r0f%h5lO0Ek;(0)D%RU6Spmh_x^%lLG;$0$x( z?KK)m-hh2XBoEcAYZ>X)YeVTgeDtojS?|=dRH`xTZ{SUtieZJl|E~`XPp>gr9PX*O zyN|pzK1vM#-BQ)YjhqNA$XU^f#@g@7BMl!9t8_$tb2rKT+(5eGKkYs2DR3Ec8Irw; zK3Zea4)x(X8nU0kn(Iz8GBmUHle1p@hsrXJec(_}X=U216R91&i_Z-WAM=sV)!qPa z-CxGrght5u@K`^b{VV+pF{cqeG8@kClY(^lb~x=ec)Z5^o#eqaoiM>#kC*O*w`>|d zc#ibopL22@gCy|p92j0ssvQmbBR#2OhERs_+EZvXxfkTV?_SwmqmDZEEVvjx zS#&CYGNs+AHe_`}kiPlVVoxxx_BQm8o_Zv7YPeRn7*8}(+4z%C->Y7D7-U4HjguMZ zppbMxoM$97i8B}t5~b8ubuhx5q( z1f4d?%QFM;o;#GrBgA|kj?KZsj6Mr8aK{k5cay@M)fQ|EN|L+SQ^LWo4M|xMDqQ9w zzD^?wdrmWo0fz&;fRYMsVq_BBS~MdgFwF%FZi+2%UzB(ElpY~!jo%j%#(vUHd_LN6 zUHdai?Gpu8<&uX^YcBoNVB5KP9QKrJW^prKT(9ZkO&KjlzupgrmhKK#5C9n$MV@BVe3xNCPO zT=!g~@o{~S%nfWod!4;lU z3y7Gy4HafiT1*z8km>a#PY%zvT}Jxi)PrK|yZec|9dRSlo@Ww@@w+VGjc5#{oe+Kd zNir}DnjisgixP;BI7mhQABSG_Olg})16pvBi0>h~kl~-U;N(z0^*da%6TPFf``p#n z>_GF!r9WG6OQGPxj{?o#66r~(4?*w!Dw4;>Nk()1s=!Tb(hj@?g~GO30(hM8-~hBf zJw)=D>OynKC0AlA=|Y0=KDU!@)cdxri_y}K4A7*tA&Yp$D~NyZJ`&i>GvXOu7SSe; z^5ZR~QjO=A<&k~efW&DhRfRn_02fqEOaq+iNpvuTQhV(0={r7GjLz*@i#{Zn1Bn`D z8ui0>N^pJ6-@nH0T_GM0#I3I7oHhe*F$%2>*|7YrL9|!yAYhKsweBN+zsaI->=Pu9 zCl8IgG_NIS4i`y3^crL=yIivw?L}!EwgxIm)#K0uEVk{gi1@vSjISj6 zAWrMS1{$izQUIT^qLTSWf}^EIBF7Lz)|UBAS?}^C8Ko+#MbWm~5`byS5S%d;UNRHT zo(B00T=g{kW;y(JIULXl^V;F755Qqf@VahzMW-I%xBc+r^{{`VZsfCTbb}KOoS%c& zWtHIe7lW!$zb=4(-#I-0$Qiy)9W6#Tv>o`s;7BWW0#eZ&W*_LDGJL5fM@bHqiV2CL zI|CKIjUpP)MF|!Q2L4h`P8QgL7V&08R8MkOXV8n@RbHuG1=rJY9d(QqTs^QLrPt#m z8x4)XnaFdKl;C#~;z0o{3$%!*8Q_h`Be&UpTyvf6E6AetBk2I!LuJD*BTjPlyEn+_ z>oV%^0{VGVFkErG*vO03Rke9uUsiJ3@OpC$yf*`jsy5I|OWG^$EvGA;tI>dX(kFOu zauuokt}=YBI9xRH1|vRGh#@P0xw0yJ){2q{474kYPUARDS+BkP#hTV-oVr?q^`GC!_a$#mYF!qp`Nceh%~`O8Lm( z@U>!Cq#*0TiG4EO&YLa|+r#~^pacH23f^)ZtlTOi?94WJT^F3yZo?uTT(Jq>`~hZ6AIj&j26Dz&SQ)?#eR!}v=Ay#;CE<+RjVA&2eBVC11B@=#-dBPPIwC&8>%{l85E@Q*jc_pAKNhc>}A^C4$X z1#1T34Nt&zTPoN2Xdd2NRmkj#61;f_JYnx754<)b>vNWYK2P%5e;ibNw-#M!;7>uP zjw^!rlUX-9kvtLed(eb!p?>rXA$m^Cw z(V2-tz;Pkl9P+r`RWoo+!6(=eV+zi0g}2R@0<^(XLR;U!Wh>$LmJD)W0cPf*#e=pC zwE58C!PnQpV>bSfK?~hPPsUiEQ32GS4a!Igkh^gZg+vR;aBDYkW4UsTiRkYhP9c7s zU_|(HNhG?Nu~i4TlAFf^qmg8f$9=k9b3CNuqq(Ll!F6wuxbMx_mGf;hWt=ip>SeAZ zIW${9%( zzk8jpM3py{RW$W{Uu4evgOS(#m@l>H90NZuOC1x1qY>$J3rc7i=QZYLX#w5mScZU`kekAc)SHn&A&H<3+JsL=#k5_L26qDIpabvr zK*=}bl9(<)^XUW@ixhct6|b+K4u-NUe7O6%zO# z%EO7`(~)6xh#8)1?YRK{v&xd1h$#6UGL~)#;Qp#MEhusBfkt8W5TyVvu${fYC~@wrs^3@RXC?jZ zJF*&EiO+TFM1nWXf|pEz{z15KIehO4SY}s>#cgo^CYX|eceKKrTV>pQYZH8XGi)!z z@ddcV$~Fv2%W;gati1<$YM(=cFRLTd)=Gh;w{V}ViL(c~DVW zddPs8X2e;XYG8tay8`Ve+w6C|(T6j=;i|rCN+Oc43`Es&@mhJv5Z%wfslJN8hq%8C zKPp$87~R18s`fY#ukN=s9PDOPDk-2MJc#2{wo_5s@1QOWi$a>G+(X9ubYufW*%Jk% zUS5F21wSc|JdjO7-nB!FqKqFbRt_>n^qvm1_Rw)YEGxq&t%og~8-JZA?OAx$ba>ND z)du~`dicSU@Zp8<$p_$DHa|wAf!DUcd)i<|7Jj=GUe*l%`W$>Q!pv_%32|SUt^B{I z%J829aI@VtTY*nyly@z}<7N0vu`((5Gg8L18%@eq?pI;rjH}dG6E@$}rZv`e9`$5r|-#*WiULJUZ zrww{Du*vR8{V2>@Mtjyow0T#5w&RvQrHS&$;vsD-WY z##wOw4472V|NGIC@UGind#Un=r!~Uox?z?z#=f^1{&C%KE8auuPlF`NVHW2h;P#SE z2O0YSS!8f!js6_~PsFUC9-2_#mIG>UM-KIA-JqZ|tv2!@cdXHk!T<-Lg2_qk!npIjxMj%S-9rzqn?sl@&NcITJMs?fi!Sm!GH`dY zZ<2BA7#obnaBD?LI(MLPx0QI#WymotQCFkJ!9`6O3J>l~X*UfeH@zakQtCiq)O-F0 zYwB>LJD$(20=x0T>WMR6-P zpr7Wh;|Ol=hijgLYu3Y>s<>chRu9pUgSWK6(Tz%2`#&pTrR|LcK0IozzzIIg%&0*7 zr2}wTNwNOZeTffh!v`lBC6W6>Ng1WDDM#1x^MN;c6;#y+JJk;=Fue0Z6rNgGmI|d{B$&+TnyD|q}NRZx#-t1wv)aG4ZyJ!9T?m)-UA$N zzyC3kPqWielEk$9U5#jrxK>&iYc!*=l=j{WXpySToF0YAh-;Nb6Bx}kU8`YuR1E5| z!f3A9L3)aZ22>T{KHQS1b^+(ve?N+hh&W^H05tCJCwc7kp^YYO9m%0NmnND5&AAf0 z#>?ZPzHCOhGkxIZgm&{#V)PtvX%JoEB992;M;g8F^}si3CQ&2s5>J`xZw+*Ba|~Qu zf}0&WsSWtgjCg8oh~MZ3x+H5*VRgI3^AF>Ts_L(zK!2|Q{!tJ6?Y z1IDyeuQ{8mO8_0~JnVIRNv{=>#Ws!3o{a zor8zA%5b~(IoMHx3-*Ke-Vfe3cvT)w%fS>M?k~a>14`#{L>B&G3*1{&N4+MaFQ4P7 zc)N!d!3&0zB|5}nqm1E;Jyj(D_+n{f4Xy^@6-ELEfE59JryR{6IRMq`0vHTb-sPqU zS$76?aF{P8$#}`wdl<{p?w;cM9*+~+Xs+qnPot@{;F3gjH0@y5o@7Vk>2bZ!(e&qd zTys3;hs!my$oNiDI~S0Lql7j~7iF|TsTdx^%@}kk@irRcYEP&{l2#klM+b>KdhV!b zLL*A+Gy_p<04^zyYcH!R%KJOez-y{_+&cqRIJh|&-X}irB4o6zMuvEt(cX&Ihs`9f5x)KX zuG*7a*Dw#=<5=Ej3+achE@Ze}NBn`J8#vC$re1}eqz{o?QrKi`8F`<3tdQ5k+;_4YEri>*7c2g#n- z1@O)C$i=dOit3_{I_jvSj(rmb8Q_ND2Ji$FrpltdCHy=bRO-SWX4Iz5$jj1(_PPHm zx*6!Tm;$`Z*XFzhRXEs!Qdm7`H^T);-8zU4ii6hC@C@~&?t2nk>YX;Cv5;hVMjf(Z z51qb&E02KVyDI;0-2i-e8T??C{^)=V{I(5Rt4;+E7v*ic-G0-ne1$|Cjc&5h$m{mW zLAjtaD4}3aRUEj-z}+@&NA1boAh&1_DjRZ7uxEIx#tR0yWLNV1h|v2gfjW;C?6i}W zYm(hP1+Lzk(LDFw)p)($0sH%HD9>U)6`IHa22S%v{_VwOSRUwm&d52@g#`58R*7W+r;D#}e9v*jvhbbfAqeZ9$$3%twNZ zCvh{tCZJQ_U5@Sd7WgRf=5*jHl0C6g&^U@x^6ZIdMDN{&ySM;xt%qt%qVodaMv}v)ZRp_YvL=T2fa87a4e-z$UQ6tl_$*J*411oT|{nzLduBSVN$n2jh?Nxb52;5Z~Ac$8>^tOwDt8m&;^ zG2rT8xP3DX)j991;+lQ6CJ);|2L3(+y|zt%06#3jZB;q;UgX1va?ooh>bf%AQkMAO zwvwn|-nZw%KTL*0TPiVsfGtH>wH@x*1k1L<{2_Ror^Ii0+olK{lc}WX2=m5#AOq+4 zN)Fcud@QR6uf;(&W`Br*&-!q#byG*-Py?^`#5>5xv;n<@vr%j~4YQEZnTAPdAstKf zI9ziZA~rXtw0jKo`!t7Zf==ZJp^Y;w z#C?KvW2DpZnjk*p6o+;pr(`VAiQ70XkUV#)5hf{3Ls;R$1$~a~z-|75#>iOD%?{GX z9ca^ZX?F-B!Mh-f4ixK2=eqUcPW@Pb#=XmYcX=jI-;X6Ss85s7j1H#2PKeQj`AYeoY@`g;$NNBc(_c2E(c#bG@Xqs%3KOOkY^M>+8%LE_67?SStLDh1jv z?Bsq|7A|Xqs|#ShIhWw@7FgIMk-;5X;pWZyoZAK6S@`ECV80@~tpN`9W!2wNf*Xe5 zB{^7Ih6{GUBV{e#|CE(s*K3q(q#GG@-z`cJ5kBz222L?j-4txkt_$F&Whrt(WP$gX zO4)|Z0sJnAKGh$JT#!S7su#r7TFW4#1*G~&^2oI^_Uf5t+>&m(dJ8*=mL|u+Ri!!_ z^0#YSS2DlBt#EVBUei?+Qb2t=mLdVyP-BDr?y)=>yVjoE8!;Z@ghte#N#m{uZ3s^i zzcv`)2(-R!4dC~<8Cc^XavII}9KxLdK9=4(gVx0)nnEiws53;14l>9{>9apOfnPE* zCsrqLuBWm=A?_^0&#DGf2cqBg>@uz{}9zu0f%!u1m2{6? zH!5d#Wf0$w1{wVUDj62%OtJ;D2^j9%!mjz0h;2O!6nn z@VTK%J{uqSu&)r+HuSgKk*xW0tB&aajyDx+IsAD~Fmlqipw0+CkCDz(+E;m8b<`1u zskqfn? zYwW%>9T{ugMte#N+9&?KG`t?RD~>esv~8!d&#evM7iD>I0FFYc)onqg%{F9=RUg{J zXvV&nU(FRWNDp5xLGvrld5{H8H4=ubAllI|0eG3G1poJ2!>g9ld!z@i@!{z}2G~=9 zsy&6}?|k4rIe1A{xz^T|^>cLq-yR(K?jwia)CRcg5NNZ;*2W=tVkdm)3An*ZR^HJ7 z?`_b)+7amUco{yvLkZywDxTY5A7B7Ul?Q-Z%UZx(y(o2zMBUJ=W4v%Ga4YHRv|bd- zxqcMaZb0MKHD$`ah5}04@G$bO#32V9i@2C|j6WKXA^m(jpQA|;RL9?rlaPli=^(iT z)hNF-h^HUS2hR6p%->LkWsKZ&7aDlKr|cE|0Xz`E6*Xx+4)fqYG7?GxJX(f-8>$%V z0B2|5)A>rY@!}!)=+5EeE!JS0n1dz{HWXn~aeNJfcG6FYlTfGD)o2xTBQC>sY$jyH z&2l+;HKBfQM{)dmO~jGdR}JDE@Cx8=(mgx}BM!*VN$;!BYt)6ifj1r@&)pgAh=bc| z;G!UTT|5+d@qYUP+}ECpkmHyZ>YGX>g+?6mRCwB>MaU0J+P^wcG|8n}&Az%ex#Cph z-TpQ4`(YE}nhn|zb)0zT)t~WqF2S#BEN)~vw2XFS|c80`F5HN- zX@t$uqJeaR=Oo0jJCb-3@OpH*?nZpTS-2T%dEhK`I^RaJvDS%*xoJet4DO^=R~X#h zPo899Mkq{J0-T0Bsk$9_F>n|ft4Uh4Gig9Vzgj@fBHkl@rq_sbxT^_wB*Rj-%-c?%r zJnmuwmo;`Y2svzNXhUK6r0YyUhuPPqT(cVq-`?S?c4OT2jyLeJj0~!yjMUC9@DT@Y zBJc@c8++V4=muWr!8?pZ6>;yu1I|SJ$TYHkD{g(7!(B`;*6$tkPLt4z#1jifam@_c zN8?aJVaJ@QguJWKfcA<@%8DL>jInBCEfjp+7{ISe@~~ylyY8@UW*Ys!`vZ9YkkXoc zyr^<$Kg_|aeK<5DJL91;oVN{rJ_JW^g9~=TOb;$;f?qX4=FQ!(Fb_EoRu95`JK&xj zaPJQI>=d|T8+>#PEGfbV8lc@M#(H-N9uMH1Ik2^2iWUNshm%hTF~YQ{Lj zvjG`j;bu4*gQUoR%ZDYQ6>Y*k@#$gapu_7#v>E3jI=vZ@{FfT#90l0lXydKM z0;7;v=z#d&tVD0$vvnZT(1}VCg@S{(`fy?Z7nMfV3+=OQCKzQyI4Mw-gIgI{>7a>9 z&DkvR>v*I6q$P44&AS|rbv|tzZ$@L-C1U}=4d|JZgf?dxB}@Ku5S_Yd2zak2gYAz2 zObmvLAwl!xa~ZgTR~YXFt!dZ;&%bo4j;i3pIrpO*x;?6IbV+#A6}K3jh6maWJL8LkF&%Xv3)| zxP4(^sFePx<^lOArBnwpe432)Y63sE$=aKdp>}MICG+DFT>3mt2e0&0k|;zmP(H6; zmn%laAt-{i&K_#kAr5Kn)bi~8jdkXNeGM-$GLpYjj^8^^H}GOlVw5Y&(TB*BJ@|)= z?2;?W@cYtmZW^dm=O^uHWFUY)mf%xE!@u`c4e*sJcj!mA!IyUqfB&)j_NzPKo6}+S z5Ud)6r-xwWAbfK@tQgdw|KB`Jx1O{Ofqt$i!%v1Z2q&TpzJvBA2hiv7K%xWC2_&o` zH)MZ~b;^Z0>ZoIUaGnuy_lxDRO+!&f9lHZUZ&qm7dB7v1Je(L5`3aMt?_lpp?7pYt zNCW41N=WGD zFoDZEYobDO^-(yrPGYTNH1L4;8L687P)-lYhvk+!(AZl~I>CBh^#s@P!oov5rZiQw zYyQewxa9K8qBXegtQZ^kB=_YE;O}gF_lW?$Uyi1MyxxOjJVgS zzL?ebJr%$wi7QW0j}>F$k*y_`43cVLRMA0B_GLOghzr_Y~lf z68!r%xTy% zt2c~CAGwGq3^J0Ep1lDPdrw3Hfi#4b9R>q&EYh$D&Ebn447kq^mu&Yw8pmn(IUjhpktip& zje&piV2Rz^k_2+q@X&RSGVnf6^D7R`z+3H_oQ7$*^_8o@%60F8vFEUX8yB;M;PO|s&v{fvlSshDX5S- z@3HaSTLKvfr`q^%HP%`Z;LX8skJ@`oB|pit0o)eI(5N*~W}#A78!ZXHHkMqC`G~BU zmeRu2yO%@HpT;E%Uu4p~fhPBC;A-Lt;2BgcVm_*ccelfR0-S;th|5SOW_F`g3{R3Q z&S*jXG?sNYefvoY9~d-=17LMbn?_(UB8mIJA8>o06M)lO4g3OpF)a=IjNtGC(;oMZof9ZJCx zm+W{y)K^cCJoek|?_H`QB#CgI4>D+sB&{J?Br0eid9CY08+W@A|7b--n?5wZr`dg_ zKY%aQEN}~L*sQh`)%Qx;r_#`a)TPx}Ru+-HR&;QNk?;;=KwK3>?-32adB}jcndIoy zWZV8JlGmKP{e2o=*N|R~Vzltk`c&H=)#yWpZw|O7;`do9DTZ2XA6{i`gd`kgJ!Fpu z$&`*BWYD!3xUc5DzbylA$-vWP_fRmjv1eh~Hi-=m z@+BykTV-nBQG!nm$S5;t;`bW(_l!zPZ3$$hJ{iDY%5b-R=sD=*u@<=oS0Pv7N-7=b z0WY$mgN=a(X!xlegso`sg?za^J#U+9!o|f%h3AT9)Hi7h?%tH|CnOss?Y_u0*;Aw!p1N!Lx(#t*78iD-@ahULWk2gLkcg-){wzfgtKeY84hy zii-}SYjA}|SS#?)8Mv}6_iwE$GlN2+FY?3(xHIA8a*+N02O{pH8etOA-Q8oCHi5$= zf$d0KP}|l=oTRD9)jkEdEKsepG&BRRG?iHYV*y+dM7x?h(8)4Dr^DwQuD`pY5rzAf zCkVL{HIdrZ2|78GXNj(Cf;F!uNJjELAp;2UK(gKauW15&d0unF;t zYI$k~g|tFq_q`;~=bXLXSn{dH8ZH~Xk2tH3I9}67REX6mf{}FnG{xMW23NzWoAgA@ zdEgi$-dI%12GHC&)U3{|L2E+NbBChC z8Ngjc?^j)jo4C-57{5{;PXF*wQvXAZ46q@j`UJg~q<518KA)Ab^{&DAeIo<>cOK5p z!r$+JtBUZcdGJrO^!sNA;Kg^q9h=}I-SFXV_+CGlXJ-;TF$8xH=yTTq+&>6!YK0yj z{U@T;sA&kbn8aSC!$iQe}ZY#Ym~> zS;Jv#AVI%4?=XrHZx577_+FwpAyZHZqKA)DleAeI6mIZ{@6SeL@K3b@g3EA|=Jz3$t})7Lo>UBde|yd4BroLM z=#Vp(n`a*)Cnt#xW+6lCMxqy-HdF>JEIs!`Fp}K603EK{YY5n1FQr$^0F93q8@;o8 z&^(Xhbsdi0+f5|D|0Z<0eUjv{>7m7ZEK>vJ(BhvajF^TFNpU5)Jj7i(02vQg;>KZX zM|DSi%Mrefc+tUp^!$&I+y`AKYLaF!WNh17ZrI-fmxQq4d~YY2v+JWZat;#DL}k8LdA>;M$1aXMv??4uvAe8?2oW zq6di_=hm!WLjhc1U&dhu-t3EiAB77`N_L!tCg9()@THQWcCS+hk3BJ$|zu622dT@{@;Xzp8 z0K{MW>LC2gCW4z~je*HVr8qphQ7#YU&H0^O;9HQy{{&B@yJu0Yu2pCLTpBxangyq8>REuANZxCx(qat^F(`5s^OuH zb9XNe@6w;GXmd|9UWjj)cPNh<1=ji}`SoJ`Qjp~f{RBhgy0{C%RszMjx zK^A$7-|P*q=JRv_KP~Hk7vdzNv>`(_+OYyD|7#DIIVyE_2hbY6yU&v}xO$*liH_=p zXhL4I>F9tR`=tgB@${bm$1ZSN7~j3{^lmtz3l49G*^O}a z?K0j%jOeFk1N%3?mwI4fRbjU^MfkVp;3xKO=X>yz{P00|UkUzmPz6juoifG9NLvvT z?p%NtvVEn5)Of+54Y|$-tmB2mMC9G8&)quqRD^411KNyBHNR^^j({#BkJ()@={Djp z1*Hz@4`hgMB-+D2)@f4eNJk45M^GH5BMa`U$^|I_lUrk;XrES>s%c?a>7rDph81Q* z_U7D*xm(I5&?C&}bh#tgQ?Z@U(w5fpBGIAlCjNVb0 zPjO$VQ`(B2)x~4!qwvle>WAexxNeeD(Hu|WVs@gC{;z}4tp{1)Tmyd!l0)4EL~)7Z zAy#8=6Op*WMcmawMJyI&n;7)I!?5U2%Olsi29(5jsi!^oKO#E2*TC^U9OuI$W%$1c z7u=F8oY@Fx7T_Zr;l@#@Ap_vw9)%S<;p>ZFW&zA!dI`1$&|j9gpua3*?6?LvEC=@t z!bRI*=OCQsS8}fP2XIyf+6>%Mf?pTmYMa^jvl-RG8bA+Rv?R+b%gPcQ4)(*0;`Uue zZSJyU%P4~AD~%@1!$?gUH}EGQ7v}w>6U)b1X;VNGDJjO?iU!LT;z{yz?57v?=Pw!V zj%`3h*+r=TL;N{_s~uO8cbdxI-W$O6F$VKAlnr`^C2%e$ezDpgaYfPyuC3VmYbotq zD;-R-*NWpsHKNe_WVCqx0k_a`FL1hnot7K;0P*8)uD#EdBrl}x=-rRyV%vijrnrsC zM{}YVk(A>q9UOqhX2J5zZmemu0L|xFh$st66#4Cl1uqKIfLEIeDfsC?hlD6)-U&!h zvo3%;iB~7LptVvq=el{BWPmwG-Xv3{Id062hO@TRDxENR+=-K1?GsI*h-nxnS{%l4@*nFev9>0rN z+}hAOaJ11Fdm6a5to0xbjlf$x8P-0PH9Bf<+8^<`9|_bhZvV;v$J_gOoa9hlJL=D| z;$0aukH(V6<)O7mq)iMelkxvL^tptXW%n(7Rq>Dj5>WfyVG>t3(H< zdg8E!bgvy%e8o%3(Cewf(}F4<|8s%%34jIKg?10xJpJtOV5SGtZ54w9bNW0v09R~-r%DGiU{Uuk`02@XxybezpM9TG}uJ) zyD6cKdJFN2%T35IS_e#vG2R~nHkaXPqjA?~WGK2CL2I#Ia%-q1c@iE&#O^e2!xrE^ z(#K|zvG>xINLQT(XwE0KE1^DIh2D?r_!vO_`HcPBq?9{rsd&?q;5t5wj89q#m+y|_ zfK_8Q;*RY=n|v)iq$)h3YGZD#Y152^3CC8oX#?&^h#1wvAf@0eyWpQ>;0SB9{W%!E z7IoO~m}_Lf-yCS&idxp!p|!D#fqY~c&;qL!BS5)%vvgy3!V&MZ5bXfSG;Sz9^6oZTdM|d z#}N2AdDc$IL!&3dtUuyq+?RGb0xn+oTv9RYtC)foQI~W8pi|Pm5@xDn z^l&-Iw4!`PcL(l9hD%#~5p*DWkI}GP0g{gRv8(TDd-mzt(&MlRda9W)aLA zIKBa%Ey3M?qjF(yqI}g+M;&$4QAZth?3IWV9r(Z(+TordxO^u(9bq}nH1e=rUxHi8 z(QeX{jFP|J9w@a(l=tk#*2DH-kepv-GHU1GJ=`l0o&XzBxTN;EePsQ2qu6}6ft5jW z65J3u;2a}sJNukbWbh4ZV z{MOOKHWTrHc2F62JA#qiK!d0c9u$f#IO^vjrl+|SP<1268`8_|X^mgGs7`l{`SU$LHe_2+!x5Y)Fr0sM^N z23sCw#Xf`z`5RP8;(Et>eV3_ZbKV{(LE{~aG^*#IO|}#$q2~j{ZzeO*=CPh+Bc}!R zd)lVt*-z573^FPW@r!5DveuxY8=j}bOM|~eRrZVW@;gams6~4hypW}GkWE56T1y`w`Mu9ZWb}uT=q(Dwg>M zbVw4bwId^}1?^L{@TrufnhrD>xFi@ZOlhG~l4?J+ca{QCI(};3`%J`LTWVmAwO!T) z@UZ>;EoA^SLCd}s0toqQlWd#2N?OlAA<*f0*q()x^3ZJC{Cc~@l>qHcFtuBs?lxU> z%?`LWCBa`NiUbET!axQ?f1treP?@0Br@)=qd~9g*-x^qsO@Qv?pz?MLMNNY|#tLr74tf1rU}J1KXf5>(d# zhXzu%G%~s}F^>Y>)+2Xq5I+G|qk;Gr(odg5=)I&(`qA`#no$>ez>-89B`Rr2h8(V0 zd+qRV(}?{=+(Z&%%|R%qTr;3DC*s--QJGuCn07m<%+F#Vp^%F(B*Z4PD7RSB_1y{g zvmC8Cw~^fM0TnWRS|5zpoKHg&+EC))A%m*T{yRxPQ~TZFnwTai2;WoM+#JpL+*Rp) zT%KbBQr$M9{*N-)T9BbO+wN1-A|AXKaj_4q+RGLhm2PuEj!I43HUnoEILRK6udDI6 zb)wDwFkpTlb!*h2K8FrpEy&1>Gww$-=0~$8jrIB!j^iW@k?eg8QvmI>Kbsi&>_f<- zzXKRFm3dZeJa0ob%2s6iWbGOk=Ahk4MR2nj9b&QHxjCp@udM{zec3$Cw$0{p#jYw2 zLd%fE32kL)@MS#qr+5+~z3)Eo@iw@95U$#3jkOxgH1J{{t}nwqF{NpaGa_r=UXD)H z0dS^~q47|VEGpNabOB)i{v=|vl zPX#Ii=mLZ2C!l@q*&x2&au3!xVBewsj2Bwb`VkjFGsqLSz{pz~Rnjq!a>*_<@S}3H zu{g=d)8+#gMNCA4qLpVB;F_J$dDt3#IC%n`HUTcUFC`?(zPBIMQAZth)KNzrb?i}y zj3oKMdz#^Z1mFC0126S;Du~0OMkkbpgf>ST#hy(<8>kT9_cf&T3+N@zK}2mAc)+o! zaP(y4Er~-?D5s9S7cTF;7v)5B9V2<*2$Wke4M`#HIs})sp$>N>(Nsb-p;Qo~!6GU- zOOsH4pJNA~==^)tXh!zQq=Yup5O*=o6v_ZccyM;YBG~D{s~h3agpGDy3w&U%>WC&G z^RA}oU%ava|J^2q=;b@%fgLbdthfbz1MkVezvbXTD>%5m1j_q2F&s z2e+3iVY?~70R~>|DgL|6ejZ_@`|&jN0+!p)TB2QpZHS<_mSpu*7EO?`bUlY|^)wQ) zi#VYb8DZVPl|(;l*nf*q;@k_%Zh6?sD3mp)YK7bBcSA5S@lHlszJbPiX@ zdwG`l@MR}@-%}BVp_bh1qjx;dNPN&?6ymuza1-w|0LK`3k%6^=LZc4_8ZWi?zSQ2& zO8bc;w`Y)`;yKdADLK*;$Bq^*Aqs<@J&C9tmZ6)3Narr|BZt<-XVLh4hWNSCj@G3# zF+&bFE~d*fRkt1MFpldrd(qfP3fXs}@wN$Zb)zr=CD2`B)t>7EH~L8iIK;rq(As%Z z5HHY~jEuoMNEcMNR_SqFYeyqGNXu{be9%nt8sbsm?L8jTra(n>Fcr9jkrh}bp)lw| zQz=JwDN5oGdS8pJqv;S&!n~D1Wj;IJ&cYc5IJ`mj z(#QH?sr6oY2Ih3Y{=G1#2c|W{lUpmIgg9hg6Jz>M_A5VMR)ELmKm9d7xdz% zph2~Q_{}jin#OYT%-L%u4dzxfpf(Y|c5WrmwwhYi? zpuvv0M~Pn~yU=r`rJpEJNs2jEXhP4ERGMlM@`i7S`R+D!K$wQc-TH`eG#ecd<{HgY z&;sgOs-}gCx4qXuJ`l+|2u4;k$fHUMVc$PQ^!sfF^hfy5r&`=8xu4{wg2Ae?!)UlotM2A?2G8woSY)$ChX04cDDLSa!7>pDY zOhVl7>4A)ryNDJ`Zrc<*m|B3LEG(?@s-4$ZnFDR@(9;VYoshF_I&$!a83~<0%t6Hie$oj~l;E-* zaMR$(K{3<7zh>dj*1Hy^nmNOhqUQFpCQa04_c|jZ;6Xb`;|9z;M38q$AF#Ke0S&U% z#8-frga+vhBYCn*eLK*EZ$=)qqzUb#MQ2yY_s0XHNvqLdKYPowyP^#}cbv>Vto=97 zDD}pLW%qOduQA%F9tx5TtX{M(?8!O*o)5x^h|7V(M+RP8v|(RI=%I(``5qd)Eod#w zGO~g}8}hDAFqNMH@D0Z?--J@xY(ox)b%FiK$gw>I8AIF9n!KHTC-EV-K7L;wdC>7t6k(|m{p-I=Bj;o@@~|E6%R~3&2rG7mFE87f1^E1CxW4AZ=ovVy z6VB;_Bf8+c8(?c)bWlefb<|Nu9e+DQBe1@4*6~6kR&;QDLoykCQVjg4J=_GI)hI%BPEyl$k9zZtz$C^b>= zQIs%gZ*2jVq^P83XJBr^MSKItbUTO+md3~*E@*+popAL4T(%SL*a_LHQCcwYrJM+O z*W0k@?KW3Q81G+X;QS0cT2^_gJIceenzsVSdGKb$4|>pw4x-}rCnJ~Sa^${Ds$h^M zowVsnXdew+PhMei%rrht1B%-}nbK~5;CKVy4cv1w%`|Yl9b_AAZlI^F@SvKPG8H%+ zy_2md{PJkbHTzH>Y#}*}=ri`NepjyB1jG??dG7_pgLxYDPtxDGiV4OeznJSa)55_X z>Yr&Sl<-JIn*!pn%|r8iBk*m<@DRYOjNbnvD0_HZ<%Rk7epjH-M%MVR7xpCMfPb3DR0uHT(OV>9jhXWO4~gmw_O_!v+9 z{-d%RAJPD)7|olT0(c^!Z9C#}rLFhizJ|K?(`ZWejmP_R$v2uq#&1$FfDSB02vta_ zrFDeDsfCuy)&YFCrcz5YD)_$C)6Zv7X)1u{EO&9bsZ={S$b+92hrbUWXv)G_1%*WC zX5n?~ptZ`dYs$cpy>Q3`IAJ1e9E8{ZCAn|S=E-h-|HL*35=Lk_P; z>|VF&!C78Q_;hFAjYub9PuJ=&k^M?>Lh_j!7}0})>8CbPm(<7W*}9YOG03uQX zYBKI1uoxLb%Sm2HyHLf5u{2mGA)_X4llM{9_bEsr8)e+Iq472aFa{nE;M$rt3xLx+ z2{Eei^^#kep4q^gYUcZ@04|RpV4ng^Ga>~)iMWoJ~axb_eluU>-UEK1#CapcBoFGy^z;ct9?y^d>4* zM#c&gsbr*&LpKr#JU~46c?RP1b{aV!ep&N-A5}Rx+Ehv?0{p5BcRH-K9l(35#{8N< zhTDUT9IJ;SL*hC6E^8HzA+l(#eTZb?g@^hnE@^!=y3jjHE8S+$SWa_@j0PTB8{!-R zpnZKF>a%MiejoP9A|Pkq({(kR?jDq)V;0b z6GP6IzCOGg|`PW#aa{Hj}Cw99tL!}f4-cqpUWz(oykm-Vjw zxl~zXLa*L&243aM^LBdx4+O&vC=YnOr_EwnAP-xd%IQ$FkSr%X1N&I3@Wum4r<6Xl zkc20mTA9B=o9m(WiVbV=8XppJ9JKP4Y&+H}Q!K zGBWyc_pr@FedH2R*c?ngjmDZjfdB$so^K=w-XC1mCKfvnA6)=`m6O#~}l>d$2yi zrfSQ=U>SBKET*v(x3mOqp{1DwmqmPc_k`^!2n&?EB24nTJA1_H*!?k@`8z~ia8Spd zkFB_SE9-{Z?hO~-p353HGzRukn#~0<23raEL8s!cm+w?a^wApgvH<)#4|kN5i|vNe zNVoh_126L>Hn_uvJZr7gmwU2cL%~5DC*ly~{(Xw{3I-+i9z&#!^l(Qb${>G3;Kt1g z|K^`OCE9%ucqEAK*H*;OT268%bKw5Kvii zAa0x-WAEiwl9hE$Xr8PfU38G9FdHpKBRXh7_ZWpxbT9|?%~gS$=%5Wa2Z;_g2Fi?| zBs$2UxtU}Hcc8hEcJK;If+ekCA-e1}(~R%Ypvzt_E$Md}$1j66?!~CjFOO-L15P#C zzy|{Ojpbmbp$T}q5g+`?0R9xj?>URn9D4v=JMJ75&gUmcUPlbtYm$TsA$rj}ci}>| zFt*9WJ@h<}CA680_-2M=(ZK>#Jq5()ez2-d4{){>9aM`V2Lt$*D$&7feVCdcI_UP{ zRT-7$i$W+mXvkFr5Vd&TqJyJz(V~O@Y*q31YiwGNyLJvQfSLzBpA%;)tRs4BS)0KE zdy@<7!#^2Rga=Pl8Dj0g%RDVqk5Tcm^+(*#sc3L6M~hP2=G2Icp|n97+KDf8S`QCB z_fFy$n&J2Puw{_<<{30blROUv^p4W*VKn1&tW!&j1}pBcf~-pw|T>-|<{3hSNqg4x*mVN5*=Qbi>KD?|t0MhmFVk zbg568M{^{}3))R3t;e4kd7#K3C&VHvI%olYP##_mt__u@TI$K0x(4{39q4PV!9B;w zvv#~MPutIma7AhO_cUhV%tm?GW@g}x>*3j|HmzAWVgfAbh2ti`x&e6WZSl`$OoHg( zga){AAbt^;WVD$t3*uk$enuX%q;gX8kRh-;hthUucxb~ap!eqLLFlr7=OX#8qmC$K zkhe7Lq#t4;S~GT5eFp5$y@kS1$KHjo%#lZ>cC3d&(90eEz8M)A167|oukD3_%VEC{ zIYZ(c5m{v1-5){c3egG7^5m3?!~CqGgAd1CBFL5Uhz< zjprMf?7?GYg*T!W?0H7{fz}dV3zL}N)iJ)Pb6V=CqmDZ2sAJE;U-#`M1ARVB^7ZqB zZL$gh)|cSkLAY=m{BFAnKF{;uzjCTpKNu+N`B)jgKO_Qs28BCc?MwZz79|aQGJxNd z6`IMQq=I20zRf6!>r)I*oZEnMj+K#NShU=iD1pMR410_RTn(#s(o?8Z<3Ix^8oiS> z=;vxi^6z?)XXLN%hxq>86O6z-I#Ib6f6wA3-`|_ijQT0=)EV*y=NqjBKN&@(gD~M| zTA%Lj0+z8v28D|QG%nK;!iMmX$8s!rxcf2+r1R|hfW<~RFI~j}hoU_#P0+Bn?OnSH zSyZ8C1Mz!eJC)+^mzPKKpA8CoE;XtO^qGj_?*-rpD>_&gz*i$&r+q%0UQoq@FK>qH zA{MoSJ0vlm4EwiaVe05`amyJ|q(inN?*Q&Gt2hLmeg0?xE#<)<9)^+=g*R2&j_gH$BJMDeN&2t|;dk^C8#fc8)pzu$pQIhLgLtsC& zS=>=2y_958;z6e1641G>xV`5xo#)pGADc8ECEZ z4O~{khwnxP*utuPuM|j#@O8ToEirJkCk|V?ZL=wWkNNudHDzeG+_l3ziG%=_m*EI& zLp6FZtpEc#SW;Dxz00fO@3%p3FLZZ7gDvR2VF3PkTtX)RnXhe8g51?5eb$z=h<>I+ zp0)39g|%g!rtkJ-kgYDOBfj4tqTR2H!xzF|l(c{rfe-oc+cI1kK}>!$kTG&Na7`f6 zeAGfwMv~|T+a_t^KZ~y6QYt6OTl9S3>Rs|lH$vth&z2#%?&vh&Y$Kz0F-n=>f?ESE z4qK>{rHV3c*CRvvL1e5Y89K|*0-m-o@5HUITu88i{mz(@xPYE(m2EqO`#Ucp4(8ML z?~@3=8}0X`UE78DJzDHY`Q8$mj{)(E_a?Lf1&C`AH9nUEUZDG*ho}>yQU8TXP0vVi zw^*kjcuH&Y95OJH@GNi{urP4vp%zj9hoXZd z#b#*Gr@gBI+yp_RNk6fZvFB|SaXT0nNuC>>R75LL*UO;8SeL0#crv&@d$@*$%D#33 zQvwMVYS*?BGStI)l4L*)+51TASC_S>#OPF?W1ZrtO!`4;Jr#v&6oydsh%n#Tlwjn1 ze-3fc!Zu;~sfVjx#Df9&Whi--l4YCGehFHu?kdW$RrKXyD;RjB3=f8#U4p&<28(ip z9BK`+KkbC=woPUy%o&1(LolZYCN{#RA^1&|)oQ9lE|5^^fTUkoWbE&gCWA|c;3=ph z(-x9?X8w_wg}h(=Xh5$IbUJfca9dH|FC+Oq`)DkZqfRI`({caliObJN#=09M%xsr~&vy4qlOgJ`>RZm}}tez61mLnD05-lHVtjT()MK z{mwcncp;3nFt1G;drw&**XM|x9Qy#$LUp{fkPaIr4_yV6-2-L3?l1JwV%h4D4-fGRFdY-LRq}m z2jW2XJKSdhi8!7`qJyM)Hyia)Qk4U!a6Hq-y}_+2!D!$jI>;eD9z=&-u=jKT>RS+> z*M)o(9b;mwD98*ykT}P4t>0ooiS?<2ei59n+=@ z?Nf{FH7|5jiDpqnpB$o}EQ?t?LF-r(q6nmgPscL466W#jA4U7I4)1o5EMO_1HMRwD zsiV+~jOpp982fK)#5-9eoLXqtur?#nK?Cr#9Y~$PA+{vd{zjsLH9_T&f2ZZyhb5^N z+Y{4`C75Cx`M`_@*pXEzbgDJNzO@C;wkP4n9L(;9**!3=3kn%nv27%8t+uK`W^%?7 zGXtwu3Z%r`S~aU59)y)ckS{4?bJ4a92h5HD9xucFB@yqA^EH`5VrZxcTotI~O}Md! zX%j+hMT^f<5rn}2Jpf|N^2L~d$-yS2hX+z)9h@2L2YkpWQXFX%Hjtr5!enx%&SR!0!laNRE80Yy9 z=Rpe<&sy#HY(%1ub~AiFwpabT(`cRQF%<=9mP$pT)J}z-)3}5xiE0cp;Sj5b<|Nu9d+z_ zjPPpvz|<_9)da7%4B>S}cytJ^-437MAZv06_`f_%_mo)l>9TUP{hchL}!z=(GvF=Om6$)?O>FP%`LnaR}IaxHGI|&~D@+zGw(; zE5nODJ;Yp3N#zR2D7&o;f2cCnW~0(J2ZV$K;I06!Di7!66oD5RXt8ZJ25@B%z3KKL zqhTj!hh0gp!st-Qncx7tr!Kl(_xF#{dquViFJq!zS(Ei@7! zWRL@6ZPfsojzXb_7==Su0z>wJpRyeMrCvpNFxgY_fNFJjvj+`1g+foxL#qex-vG0# z$m@O0uyg_}>VYMluzb7p0o52GI(SPvtQwLB?ecB#*bqE62#Xrv10C=`8{v=J;FTG8 zQ3f6@!%ah~8+v+1HATNN2!FAo!l2R)s{;5<2~P7gu)kMQS&nLS8@>3QfxK|PC~IS_ zMG<-3ngS`;ZcdpjJ&4bhH2FY_W1MoU8fi+f=YwWMyiZ$5W|3Y_2DIomATdGQ1P$Z) zXQ9n_MauV|X|y0;k#M@GzV;Jn(@5iXti&xH!A9INa6d_f^Av}tfj7#Iss`IQ+| z4r^UOrSwo5dXZr=-GhSy_)DO77llP=j@(P~0V8kQ?TUJu4af*iT0=IWciu+4!S)C; zrVeIwn_&Z$l0B2q+8I|8ZX(*~my`UiLgjqgK`nzk*Dl`u43+X_V`a3tszfIs%1)dS zltW`}I$FD;_KaSXdMYeu6xj9PafkbCMhf7AjP}1 zh^5WV7Br5Rr?d;N*~r+Jc$-B0S^#pyOh@xQ%3#Z)^=m&WC8b(};Rag|aI%2~M%*%x zfaG)|0Up3%2F~JHVy|GxF?}a7C6C3%AU00&O{S0 z?d0iFfniU9hbB{+qOt+0L?0zR%s30>zI!0h<{gI|@OC?pt}V*|PQz$68<&&3%cAUZ z%CD|{=W>z@kw=@-vnltmHz6eKdc+$TdE`M~fELGZm)-iO8?6W1fbCZB66d*Wt@`&C z_MQHphWmlSN1Oa=c7IRg?hYTV6LCh6K}P2!WVqcCxQPzhfu(3Zg(}_Ki1y0n(0klh z#`^PtON62w9V(w9n)>6}-!HVr+Yiel4~31WuIN%v>*2qZMmjQwC0*0O$0)@RD|&vW1u3b)f;w(O5LtH#t$vH*XuhJ4fwlK+D!sp>cOAO(V?-w6-NO# zIov~;WDiR#%I8;`Gprh2z-dPL<6Jzdb+{uuWY*_+9d*>PCnC)VbXntEN@u1~j0{Q{ zGL-N|!>B|PGH_BWoZJG(Hp{h_16B;e`&YsxRjwW%cySiat%}JG1n|{C_}ZXuz76Jn1$lY~h`-n8(81z-PsM%z9E`5maUJof8)|rW#$i7I5VogGky|}e5gE;+U} z>PHu{ay#O&Z6m$P!1F;fdq*xsLwbb;gZNP`C^Yj3Bl|u;+_UpcB{}B}<#Z1?X#F|> z9m3MW_VpA?>8Peo#32jpPcogP7IV-Vl19a7K;x+%5{-t3_JjU}HhJU>n2s_PM1`$; zsf0tP0beZ-U*9w6cZb-J=nml1cJgeupH|>#8xB3%hl`8wv#JRe04=^M9h^~sBOBnQ zYlpvE-@qYVaNI;Vb&_nLQ?G-LgxYjw?STY$Zihc@hO0Ki-8*#ACJ$yez`gdk_r4sw zF{{#2cb4F;lEec4k%vzXz^&Hu$pRPW;K=~)F3UrAye~uTrkd62ECa`R+Po(KR|O*V z)o$hn_<*U9p>ML{gQx{)7BUDPMrB5l46;Lz<-CgINiu=4CCrWnt*8&vNYx&?PnVEi z9~yN1DeWesMI>$G?nDOD9i*oV3Hbvdp*#sQQJ=3N-oWd!*DDcURwl%0M#R&rf|2T> zC`_F^da(hlIUO-TAXeqdgEwAO4~47_Y(nJ zlk&XCm$#+mkxCLvF4(k5?!gU~xj z3ft#Uila^>@=4>Og8Ke!wD51W<1(2~&pDyc7nC@o<>0zU> zu8Pot-eX)Q0E5=K+4g&ry#A9>nW;v^xpq;SHv%t1hWc$J|IHlx9gmRQuRYjkRmyi8 zG@e|zb{?=85&x5}YtWvQc7PoXJR}x)IH64+NSRT7oVg-jbNhsgow?S?OcxgqOYhbd5 zV93D|iHg6rpwn{gpI1-1nMSp^q85PA!*#Ni`X`ZX4*|Li+z=$E;qcI==pu{Xl~VJu zMo7|j3H2Rm9!(&?gBqknxfSt|ESpdT8TnBzX~sJ!cNPa9lFzkn*VG zHGMSqR#W$|{VfP9d*p~7lO9S3wAlW4895Eo#wO@sdR*1ttwI8>sCSh|V`O*t)Q<;T z=J9AeY%`F792F%BJ#34N4t!rN4?pPmRH#~^hwU?V@@zx)M+@?-olxar`+0GA`MbO? zvB6o^!}gN3@Ili(Y>Rk6}y0N)va&slex z2mEIi4)t_$7((vH8_MwQqC6d0lx8A~4R5s`w5?R?XoiyLFl7f-KljDc$lnSVV+|By zOiL=9M`6JG{)YScTM&9J&a^3J(n7Lzj4d7x=nTw?2Tc6;;U~ zL>mgH9Z+?n{Q>;4EHxkKdT%wAgm@rv!ta8S$@bJN}^Jt*tmvMZZJaWO;OQ6MLIqAH;y%87fsJre4Iii3 z=3zU#Di2!{%(i0)JOlS^g_}3SEt~ao#Q^-vG`M|>%2Ite5AVyvlO?#R2sak>^9zk| z@gV%l-fSMYtU(LYon@uxcvlAgP=aq1hZ|4l8F;A=+X85^3ed{~m9MH^2(!RHTdqtw z5NY|U7?s~*;Bf10xte6&$O-6v9wgZ~36oRq?#5jw@-U?>Iyv-yT%M-|x3nRX5VL}}Y$U@5ji(jBZ2>H+Y1fOgU=IX(|4Ee& z=HXW9po6imbkKtgg0xBp6Oj5a&4ZIc2cDgz^ZoLOsGbxu2+?TU#u4MI(S{qbxe?vt zSoh~4I1#;{KPI$W44iGiGYaoN=y0Dcz`4lKc$VZ!2Xk<%bkK&| zd(WV;>{96Zg z%3wGjvaB;XIV#6Sm|JAP#KfIFT(^|>0ox%A!}1msSeZNL0=R2 zIq0r(->s=yARF7EZL%sIc=kFwimfPdCbcRf|qENjiUO zFS%>RdDn5*iSDsKrQKX$icx}RmuJcW$D>VlWyUS-+dn zUKVHE%t19l3yA!EZQv#jX#`HS-@gs5k8$F0SFBqWAJBjK@1FkpA3l zXiZM5B{~xc53XROQQeG;v4uwS%?B zw^VgXlLwis1P7-SDz!v6R=u>oCe;#M&~b0=H)1Q}gu8ZAI8x zf^8*OwG)26M*n}Y4~J#-@3Os606bWPJM6C1jW*s!WYy;F&pS&MeU%OvhX6Cur5YXeS4()lEU`k}y4p6b!w5V#kscg1ux_V$K+VB>`|P_C)8 z)A46#eRU5Qhm)>9ipW6;!+Fm(%o#n&FsuIC%3*gg3aH)MbA9$n9jnZi1t`Sk;$@nNTTZHd2{|+;>c&?cb<{Cd$N+Tb z;P_4%X(x2S!e+%%@3ry5+xp?X55j$$;OHEj)(ofRV45$h5#aVA_=hd zvA4j5cwV5wBAl^C9fNX^YmM0w*@#z5$_LR2hYo(Fle}gJM9p{VZ_T7k$qS3$!c?l1Gu??<| z*r@sn@S;g@LNC1YcG!~SVY};Sf&*G%a}oZ$0j_xtuH69lY=)ycU_mo{XC=J70RP&g z!FR(DTrmJw4#MdmYH|4H_fijD>%*gEJ=m52 zK3f{OiDrO*@L*CE3Gnhj$qQ>2*CoKwo)sqq@g+oZhEZna zh~IK&%wivY=P4*;@>~EvbG+A6j8Ywe*2HjpKF08OTtLd`E$ALr60b&_Y=5pN`Eb)@ zf1h?p@z7z{MHrGrg!+vM_ZMEXk+Fa8X~|Y|Xmie!eE7(sZ2R+p%W->lAMp&IWMoEz zTC}3^nZ^rjpi+A~2_bLQMWJfYxZg;6|GV(bMuX76N}|4W+BVI=?GeAvP$@|@9Vi&M zBpB`_Xh+F%7Z~j=8KZP%_Xcp0U6l4SN|L9<6fyrIaX|vuxvjc9bF*0h`%FH)b!tI;juFbmn z5KS4_uLD|g@X`HYb^yyf_|s1K+@msx0KU6(_#(0m_<9lkQpHibrCfQKYTQ~@XFV3c zaYhDM)F3P(F4GyPWX|t`c+2?-V6NppC7n(Okuc#I(g*%F+&9?0EsE>P)0jYdnI2G` z${wV~O6sfm$fKGG;uUiSc#&0no(%lhvGcrtmbCtI zC3KGluGeeG~?6t9hK4j^{2EOkN56!%@XRL zq!d`aNch%)926}vYep^d_HQR3L*h2#-&3ASLg}P?4*B9~7;SXGwF=BwJ0t?q$K?+D ziA)@eo^uP8c~D^FeQpCb+PSpdRH_423lG-^;;u z+KnOV0rnJC=iUStva~@b?X=Vv4M+oz84 zMK6_V7jc*Z>~DEK%c*Z#b-ZxsK%H*v74X9&;N7>wYI~e$F>rhVPHog@dbV=0Uu}hdS`W{ZWSt)EOL)**<=eZY z2p=D)gfu;54If}C6%np4!52%zy=40vc!N>$Tu|Mvua$?Nay(S9yw@n~)`d=~m&SPV za)|%ajzSytG!MH6Tv9%)LTNvq59Xn8hAZc-j(r2JUa%d&*~mltAkl`~YT$KN^}Ys$ zS?>>$lahv*ZGSE&`5-bHo?#tz>`TblPZFi074g|tCeWv50!uu&q#PZMX+RwP4gYV@)ozjS(D&vk0px^G8Z4CvIx)agdZ)3*W3h;Rt0rmc7wv4=QhE+ z+u(@2GWuS(1HRA?S8a!%OoWeafJ?W_mH(LrIMG-5M)jUD{?*f20`%qq*jIhLBjEoZGsEA@#q60`=UvyC6`6R(r z0cp@!PUAR^MnGZ^k87-Jsk+X&Jn7F9R=Btm_k^_bk@1~&?a|C*7ehTyC8L51+L0qA z?cI+yQMn7!)_Nc1bZsG?588})f-{iF<`RY*V#faeg;sbl#flCtr$WyP!=Q&4CCBZu z23mIdO$9cz>39^p&i@W-Okl7ZgM)y<`t1dPvsNp7dPEaU z%D}V$KDk{Fu>&b7uG%3|UZ;WmJbBYLl;C$I)fSy#)ZuqWjKpmy^mS99&ir0XJQ*ZP zn1>d)q@l8bD(wm9rQW@Y_B)e`5 zAf*i3UlB&|TMf()#MdlG5W_QQ!uo-X(IMi+E*;1yNZK?(&tFVwGg^eIv9yKO$9=I4 z^N6M0r$?nSKpL{RXJ~hYPbG1864Exyu+4pFe5TWOFqjI21GKlPc;#4`Kk@S9T!(I6qUUMuBHV+vtNd}ui zeb`2P3MhjTL?4LW%j&=_O;G`r9h+yb{bWMhPWwG+p8qglm1eXJ5>L{vMOelP()p;a z^W7C5>iZ;vts9j;Y$n=Z3-dwFK$I!pK)$Afx`4%&lQyv`>bJnCas|kkJKU4O)@IwZ zBCa%uFTErqW9=|cq6C2XUZomT!Goy<7|6krsvNYZT>3nV zPC^@rPbuP|8jU@+-8!_Pxfnu6p;J+q$|G(9G@!+4EbHdgvG1XW(dBtu0!%ZCIwT2W zd@5-h>Y>r`MHU$g8PxCFNlpM>{dZx>sesXC9H`^@A&0UNJV(6o*@29+g$6Dyj}#qv zC>_sI12YYLx-`-mQ9vHH<9(@0zY;M~`+S8%&nm!|Hb>`SJGcYR>4PJ?;N>@sQgmQ| zV<*Dv=D^EmLWi~f?phD8z8W62qcm&aCGGHz9yrEIGH%}nuY3ZYuDbCS;FAS7&vM-! zD8U=HOFmou;Gf9CVO2?XFD}BDij|x5k>xzax-zE%lMVc_93S>H$a3#OC!6|2*?SOD zC-+5fW*v3ZQAZv79{%bvUDyui&w92Iw!s<}$qNDY*;*+6}xl2XD+NU(r)#sYBnkeWb_YU{CEs{GbTmuCX!$;3N-D zv1-$D0N*LAFn9G>|9@PLHrDd!^xKPme>d?b)h6T`P0G7e#|wwLp;pJ< ziZG|)SaZ4RsH2YIn1uF{xa7haY!RHBhnYoqY!E)a27a;uwwB?50^Doc zPs+drjc{rpe%L2>!mozlrCIpR&~PrnAs)OlqY&sNB_)Wwy$l!C#I^Fkc^({VxkhEA z5WTg=x+D34x&P;8Q^ex?WalRlZ8C4wD~lecjVHa2F)Ya zv`XWVr_XgAacwslaRpt&&>gtvG#pDbAS1)|eY%Q*#zUR#%71PnIg3CS;<3#@BA?po z2^plq4uuD;!1v3;4YpRqVO#3KfksZ6@7O`PE>Ng-j!{Xf<9wCj{AqD`pZGv?7S3#h zGx9Ji3vXNxomEl9)+`(`0S@kk<0inm0eH)8$$e{XdojVKE8xeE!y}u9|MKX`@Y@$d zR{=^ZVdn<;z$5U@rz&lZYKLInD6Jk=ovPBiMU@0a7tU3kD*_S%0S*<;g#CS6)K zcY_9uODT{E=$ZFL?ny5i^hpbI4p@Z9-ASH?4#Z_!L3GKI5VL^ujNZ|Ofm`a3NvKRv zInboNgZS*`i_nI-oaE=~B06jR--34JwNATc1M(ERq?YJL&+1Ypr<7)nqaI8t@95h zv?(AV)MWH7qLQruB|+N0)}jHGKwOLZAgNz_&;cNBjTtRk>`xaCu7?teuONASF!s7G z{5@BARLT&>p%0B=fNLUtp9PkpOcS9H;My8dP!Djnr|bAt^J^%83#?Upm{GaRskMxM zi=URD%ZFEIl*%It84o&|pdlxLN)-Gy$W8;lDf;(uh&7fMb;E}bgICYj4XoS@r~eX` z)vU~O8sX+6q0_1%zO)=Zux$86kIuqpTVakb?prZ{cW;JY)Eo)hP=ejrR!KU^h{(6B z>SDE?i~`z(s*R+>jLKbw1J9tvs&=xpqD|ISo?XWahh}7eByBD|Xp(M6o8~g22lG5s z!YZ7kmz7mPAq@rO9eJAi7};0QhQ?6ZhIAxaR~{$XXgMCK(so)ezKf@Ae-ur41PKq~ zgfTuch@K{WQyvemh%0urXLT%KkeICjJ=(_;_*TDYqY+B+@8T_&3YZgwLhG2}W+lhlBX|xrf3Zvr%f0Bx5b)U2P!U zU~5Fya^0{S8@PDeJQSXA4NDY|hwf?ij!iU=o+l}JXNc+OaN@#?n@oE0X%Nq(ljIO$ zi1Db3@IoV8D?Ie$kNLkukWK|u1}M`j?_bf0$wir02plS!F z=Ha}xu(4($%xZ$O`rt)czXu^w-~(uo^2#v zmKL&8(SS!+i=}se982)dg`vky^}0o6K57 z0FOcrDT2e&A%6_w+g2kiD-)*mNO}jmGOfqHe1abj&yh=iHX&m3Q^XVgb4bA)dKIrH zdXsHK##*R4yRw|Dcn*;P4x^IRBWe8YDQP`+CDNXK0y5YFWZ1YIqH}1R_{d;;q0@Ve z=6&w&X+3hts7;D;buv7yM{QdlWefiZ&#)D*ns%+C)g?n zOFZ~l5iTzc|DHltf`c;)uwNElzYf+^wQ0`6;k}A(o-hG648ogkjqlsg7T7BpJ~OZ? z4^M{Jf0Y#atc8zqtsHOQ>HzLZ=#4x&UENOdLS@i`UMIKj9@rnf=kT;ye=q59%^haY zM&=r3-c!85X>eIM+o>o{ht^?NElr!}N-F4J4{+NIa#W>?Tg9M4of_Q_n z4JFRKE5S0JlhGa&mpIoqa8wuE)j!J8RC@!?^x!pKB^Lah5?o!=nGJ{+cZ^Yy*M|f6 zZcK-JDAaj`fm>|oAr5(zpXpZ8i|B@RFzZQn>)73J0tx}ArHkl7Uh111uIZrz&s~4h zo$X!N*904N)KSOthqtS5V64%A4583i`%{Fm28x*8*Ah`sKGjxicX(YAp~^2vP(88nCb9NJDseyG1LzJkFgf?BsS@UB0 zg!=t5-1ufKz?n8R%qqltizC9%w%1%ka=5$|@p;CQbCxE~@R4EFgYto;jgM9oo^vq- z0yKA;NhTuu=v}&SnL>|tT4fQ?Gi_pQy;MH(Xs0bV1=QzhA>LjTHc28?gGk@=4O||$ zk-GE1sc0;%LV}k#@y87GT&qbI9kifvoYtQ{dZr|TdI8P1EXfrFsxi^tUz*^=^&O1{ zX$nM82zx&?HeL8=K02r@C;6RvXe_%Zs2kBcPg=v8P$|B(xN+?~#O+#-+j={eoWyWH zOcL5mMAW7x#7(P37O6gG+IF6uN7e7B2Z;(67?@xew^AS>!q=>waIk@6Jmu~TMFpD! z_@pf;!1V?1ygWHlrdGE-=bPry? zGkE|UVBphPc*hVtnKDCvRhBw*2k>wyeq-DKe7Y<{u(l>?Ev~aF!L$W;9q>zJjTg|O z7xn#R#DnuAepibvNMYmxS3+x%JtM^GB1p-jg(+z-gOdBaUF2fl)%rIv0fV!^Qi&&9U8R;9>}b zWcsw{o`lL7ZA1ot+`K!~Xfpwr>}aqR(epop)S*c{!hYL6t)Tn>Zv9y%-MHC-#$CbT zdLh=~Hb2JNSab0ZHzI>Atq-eVZ2NYStLl_-KR~;vqPHQ(fomU?QQy0ayRi=8mq|C! z!!nL>9_S$?G7Jw;Q-YBP({;dIfyVekD&?mhtm>ru0w|YZyA>Vu7~R_qw#}2k?L|3y zw)t?RuZji_mEr#IrIcXeHYny`YXFDkWI$fF1Gd@g_&Z?s04y4SIXy6?3Hk@&XDd=V zqA^48#ls1TZzlNKQ3Ng7zor@)@p(%x!Ir}azBP*=Z{pi6@d+MkC-_DaK_;dR5VSz> zyF9`FH4u0Zn1r?;%Mg6Q-dkHjyWQc_3&G11`ltFc!SR|&eN+F8?o;o}vHQ$};5Ck8 zJ^W0Dpl3I@_ZQ;Riq^KIHU~p+t|2(a5G)u)-_*x({Zs!$e=-o{Q`)pbaF`*Ol+b26 z1m}72c1#w6qdbDM{rGEidjzj-A~-Y`-)2E8!3XCNv}BU+`LCjb<0itlkAbWQZ@&Tl zv^rXma7YvUY%V-kgjYQRD+Zt=qyMhPd><}ufxC+Ewykh@2Ck1dx=l0ilRR8pl%@UU zq39FVMBuwQxVa3E2XN7Dn#W)s3ZvbLN?j#kF*>nbpK{Fx#A9;_5xKba>!@R_aHi2| z^{auK=V1{N?|d)VGrYWY)KNzry9@s63BLXrSn?bA%~NpMY4DAsVM!PK^8v&E;Eru@ z_7v5p$}?7M_+7jcF` zoDpNhe@TJ|9B0Klt4KH24C2yFrY@w3gGV$Ox{f;bb+nKya59KWbGp$Q6NLsuRd|!9 zeK-oe240?l*Cb5DC0Y1L8yp?8>E?YnYZCnL5@<;i9h}h%SFRKJZhH~l{};G;Ieh0B zc+D(m%)qjKxc_OzexEMMn|97a@bRhe*%`XfTYKRVjZAN``H*VgYR^VjBUtERW zX&eV^0`f4WaX<=)TQQdX*^IoZNzc@V#@tgxKSLG~f2SdJ=A)!@&L*I-Q4Un$B{Y zimyUEwm9#4FY2F-By+^__Pf&p7$FxliF@p!2tppW$_=1*>62csIcx86EWNbNq}Pcx zNL9EVH;;0dr|?;l=e)7spEj02{h7u`?LtwhHMmteFldZyBz?VgO%gsDq!mwZMEBQ1 zboHXJpQj^%O_zb+1;crPIkf&Su)>2DBfBRDJZ)c2H7DRz55cdkm2uOv@Reoo#Uu3J7akA4x)x3?!Y4PwH@2zd!qh5@@$=K+&zoV#09=%Z zUl-xO2jS7O$p7%DS@xccNu;NLtRk&}%oA{h8(o^w1)kBYu2j(V|$e?Ogi(8Q>>&F-tRqwgXoN zFg-}$yvs<;u#I$u+aTIJT{iBq_LdsbJzPE#Wk7OyrbeXrY&V+kPda|TkKS20M&k^& zOjY|NV{9}=ZQ8SEs3_81F^C(b!*y*K4ZM_V`bZ7lf!1RegDry$jRur$AZgyah|r)? z(=rL+ym6^G6e5S-Rhq#z)*f#|`WVTfK8%YDh4Ua>!(iln=8=)sV)XA2)k20=WFNGf zN{Oosl`14nfxfF7?cX7qfNtBS;K4*6wt3KAbrU^)W$v}-VL~3-8)U43B6Aa~Z>$Ce zFkLx<5AO%>o&oo6h6|R#HS1w+E1WY;pQC$W)pq#8mGGfFJXwVI^~2pmaC8&Av>RU6 z15fUR;~s!T2L7i3HU{vI1M-T^$iP1q;IR_CaciY>K9$wW`hE$f7&tWpkC)-UYj(!> z`m$g%==8NJfFISYJTrjPJ#lKD20jzBiZ4R>RXn4j^Iwf=mjV9WlX1E;fd4L!ZIcZ(6E+s%icPXoKiwd2*pp?ry9}QmgqP;v)*`I1 zMp6?p+U6Q9Qg0qqGP=vFI;atNr3a_lBgi+4Fks-88bhlc_@F22cp!ieB^eZK_~L?-IdZVf9j<ZqfR zI;!CkvR{Dq<2bYfCz|At?OdZox^c(>C+Fe7go|`H!0V?whz@2oz!Oz&x`(#HD{hC+ zEQAlv(f_;ZD0tb;aOdVqO#J>K=nuVQ9{juuPFV-bOM1wD6yjV}hQ*$e%l%gaoVP<2 z??LYTRRMf(2ws~}WrA1xaA|2|64(a=_Y!gn+QhoeEbI_3MPqww-04n}A*iq}6) zayZz-jmxwv5oY{P%S{-f1z3vQ>S-(w>hn^RF(?w*J? zlMsJ&p3(eX6Q~ZaOIXfF%4m>)=%OGxS!f103wiIK3E-N9Yv$2{yOZQB@gd)AtP9Vl zal%5l)C0~T&-=p;ZNv5_P)VFyyJl9yg-{RbqY`c$OP6t%L*uI7;d&r(Opf&G6{ERN zSGWop@@Njb&STelu}K`++IDI8nL+b?3+{=2T~k82j``7`K>B^QqcwSD%7QxCmUeMKJZh4)|o@Situ2P2frx}=hwA* zs$g(N0Zz(8iw7Ur0Q*%LZBtv|;9gkR3kP<>liOe@W$>BphZEep4ZgJ&ezqRAmLc!K zODDl0?Qn2A9MlG08CKs7U)cyh7*K4u)xfb0a6$u|)S$`ps`c=#EL>KCZxz*Uu7SBe zyx3ZgZ`-NC_EsBi{J#>M=)rYmczs5L`%6``p%-|kr*Pn%0rc6E`6Uq#_aY;u+fyji z@|TDi-H2Z55+j4@vV;yg43(?+Q;2n zz_-fDs^}2iz)Ost{ii8L-V(GxUr)M9bQ+qQN$qlIu^-Fut82NfF4VtqCF?xYw{uay zUY#IrszwWNj{W;*kf9g1mdrtG!g7)~#x^uJ(o}#MG?v#Tv?*Z63en2GC`>`?e;W0} z^*%>~PJ7LyaoLAb*gT3`8L2E1ova{vee%$Fci}E}qCSeNjNqd^V-lJdF8ApH^U*j; zx(9>1n$;B^n#0Qz+Vr7kY9P7_OBSsebEpUet_+4(C+a~0!UaYSB#^>Ata=2hHgdWr z9)4JMYG+WHz9#T;&|Bqsd%DV|X>5n~KK=CUb#@ft zxfILZ%*hkHwHMydBSY<{>s5N{u5IwIli|Og){GrYbPj@zP>?+*vz!U6b10p77) zi@-ErMqEP~-j`9muAi0QJRjaYDCumU(Z;ea(1d@FRfQIS@0As*>_Y=5T!7nwcY9J{ zY_#P6Rz?o2CBSJ$;niCM8Ea94+6P`|^c**qmE<-KCn9V7o`?;s8fV&pur-i!ZY&3M z12R}24d7CTBjr(OqNXi+`x*?DvQ#bxQ=Up)&0V3A-4zH4yB_gISOZ!Z=NsrW@SU=I zdWG4@W0|(mX3?Q+U+H(Kq(0e(+kqnV7_Vmc_H5U&>+n~S`d$JT?_Mi9D8zZ44Kn@~ z+CBTChy!N)H8 zw)Mcb6GrMxd(A(Vla-f?fa8rk0!b&2*BG6oe-ZoH+MB4rrI?|HXIG1pCT4>xLXHzj@^mygw|l3EzsqFNffj+bVJ2gEH{Drb^;CPzUq}m5!Kb;ETCRvfL0W z0vRGt1#&_DqhCp|`PVim^~SeK;>?_4hUZbK#+^vb+0Mx8RO3nXooU>HwaC~^b5*wh zmyEHoW{@~xKijX*CA>(3ynKfmh40dgwJ?d`naHD3M;*@xJ-Fw?>O!HvIp~DjXa7D) z98^agb<{ClXd<~H0;E@4$slT15;|;dwj|7HR@H+fm@EXfmSKkv|2Y+=*8$oDDi9@I&9loVq!Hs#92tRIaHwnugQoDcuT@{dl0ERXOv_(DFsNFG20-aMw%yXcXz?NkXx(U&PE1s z5}_OR3@>F49k`NCiXF(Ky@hDbRmg`6g$FxO4VZ=kK{ z#XgwQt_6MXGbC^HjYyzyJMm;tkWqTJ5rI9f5<@k*ky>Q{g@Kd)VfzA%iDLbTA7UVmA`KfVTlhqQe!4mQbtCTmZbt<~gj!*Gq1thvp#lwr^mw zt<3VQ= zG$$17_S+yg1H4h@)-oqH5}e!wCpN*s1?Vrqb=zQ75#G`cpIQ(9yFrWHX&F@_ILMP| zU~K^NJ^0!ne0@lrnE{USq_jB5=>O#m{Hg@sDGlGK`cMZRXgx^xmUUXM9klb2x-49n zz8gd8&jY6+al$QujIlV(Mx??&1j(Cs2XHKK70D;QdC0K3KY)8<+T^H+9X19^gd3NM z`S~LBsFu*Xm_;;4FAOQWoWAyk$_ZKY%muU{xjat+aUGu~eSvaG+pw$Qk`AB^8FpKV zE|27)gU&o7FWVsSjk22oY7if^5xDCAvv=NMb`)3I|5f)*le5AoX9)=bLgbt?7;Iyl zuyNj9=eYK+*Y?_8r*+P8oxx#koG};^P0l$9B!K`4l=IBo?(dJcdhgY!ySm2T$bh)- z^UNc`l&ZTcoH}*Rd(O$(cfDx8?LBH6aDwXkgmK4~X= z3sSt>fI~tB9UN*cY(E>eAeuv2*G_Cw;rBCF!IHW*0kEhC4jT){_bcf0*eevd>KcRp z`*lwUM>R`%JEaZA+B6*ZZHKo$30K+cOs`Qjf`2X7+#7dQ;l118p}O66OaSkxO9Xd! z1phTu8^i)|ZlLrUiMIM+3}3D8cpFKqBeRVZkf(qj#_1cTfVDIMzruZx9EdDkE{j+Z z6=c4_(FPPvx*WF)_E`J=ccVI}qphqZ7?0A7>}wPObN4QKP6G|}AO*wMVmOk+yMYE8 zXkb^O37zq-vv@qxIkFV=5b^gPaVT2t208+G)FCLRwZN+Fu-T_HRXzGb`1K@l*|x>X zL$y7E@2-O%TlMz95Y8^h>J?IRbAEk5iW#W5dXJG-+;0D@0zawlXdyGmWiZ}oaE5@( zGu(egWc3&#JFgMwSpjZ;cM&Ogd8X<%+-W)jWQlX-PHIKpmS^h!|G^?73r0eK{96RS zc(xNQ8hB2_CxR+L0a?`}BnOh82;G{W&6br08fakmM4t7z-fNWwpNnW2x3x2{UPM>& z+)S?D*Z@}M{KHwsoh`o$aboUrw3z{nu`W!PZZRGyO)hI#{kuJz^^_Lm^2oDF?1?7% zCeMDy0q?M>R#wIE-%aLA~NxAMy}?&*x9wzK`n}Ako??k&js?mFDi&9w2J4C zGGV9%nPj;mE6CbC2PLWecI3a`&J9FGNU$+hGU(x@-ww8})2*n~+&a=PXi4s}0PU&K z%zyZ7M~D&~=T#wjE}@L%lHvI^m%9Q5v@doii?)wzzYEbevUr98GWzUcX&pa|cINrD zAWGNbKrYm@JZuwtsC8hTG=AeqGgErbHLJ9k2 zF}yO6`|rn*BEssipApG#hJgc<^d1>6dWezx5qfeg`Ti z^mr^zfD3z|O_HZnx1#YJO{IDw3Jh^==3eBIz74oVmN^*o;)m9-lp_+6Y% z9PdMJhH4CV7?q|=Lo;v)8uz?x`h7?V@Qs=qO^ioMXCC2y4&uQ2TuhU1572>gFWEr{ zvydY6NX|QS+TV{>rGsgNuHb?@+dQc#W&)!U`Z9Ks73aF{YZwDNiS8GP?u(*a}8k2uykB~h?X?Z zK$C3~#qgRy$@oF~$SHx^BrI~r82!2`@Q8o<)9ZVV~#D5>}D6q3Mu8fjMfa81 zPcS8GV+w9}F({&SqR}|IN~Rd}{mVU`?`zrI6;`-Bb8`tPY(=De`4EQNkPuj5e_RKB z3p(iXEOEPn{OuE6mbmTLlKqYqYPCe4CjF2dLoKDpB%~;OFY-&3QbJ1O%L4diD30c+_TODRO@Jsu_%Wc%{zCq2pHZg!- z4Z$^agVGv6sVK|Zu}$Kyy=@JwscX~U0{e}Hd1DlOuxtywKj&tX&S4AS_hVp>5PVvksn^F!#F=KseSr}l;I7!6O@0#6f_I0UG6q=sZIo(k_&6G1fpv{; zjp4f~1Kn)k=!#sUKidpfrU*nZkg9HGq-S*0oh_9?;4g8ttP z190~iXsSw?i(<7o(iUys8pAaaTwaB%s_?o{AJdG=QGpbP8zU7jzsD*Ddm_Hu7^_^Y zh^4sQ74PhOei5?jJ&wrwS>)Not@s2w6tdhZNo1K1dD187pcQdxmXR)G04ll(24;aeo!s(2tV64kn>*UxWMG zTainAG~IojXgsr?8=~*-Meg-9rKSb(J|?04a}RL+ur?DB|6{I!v3Ad{kKunJKMvVc z;Os!$$pjl>rSQ2r-qA&Wx`COd*5;a6^EQ0Vj6v&jYpiVgE;4oxn)f_T+^$d-I?%kk z)?GrrejGo=vk5p8c{v`nKUwoI0mV5@lJb5& zFlf$Qd$-%RX+_-h;V4imNi_xOC4Y!_3}WmA=3D++a=uorzVTx_>9dT2Ar}X=^jJ?@ zgA*uKR>>-9hlNU82FgRb%C>0=U`iP_m*9ws-UZ-KTlGAEu2$&pg9*LRWs51V+y>XW za)SYdrPUqzQUNN(Q=e@&G!Ppdh->1S=AOvi*#5f`+N4j#8bDJgm?LVIr9|uBMs0yc zR*5sz!n*4>PHg{8B1p1oq*iU8aozi+VaU-(TAw~}ZMNiHlG<)*@0NsNC)TVaqqgtC zabFCdi*$%J0~cplH6xTpI{Y>w!Isu;Jz4{6fUa0p+(GDO0}-q+@Pw&Ffv!Uwet<$1s#VxF1W#3AlJ#RQA5v63z{)K!xEYGgP;P?e z0&K6s!#Ub=Du*ouKG6ZU48SGZ(|K&u4V+nktE-9(O|04Ew*!ybs; zb>Vu^K<_R@GXM)z8-|vnH5$7$h%|12~bR_*AXOwhvK5{)Z zWwb3(Bi}dh{6T6oQXLWt@DawJS^!$7Cm30dT^LX8mfS!(T$GOn8hAdVNcNeQpabbA z{TaVED5?LMLAE?B1BVB2xZI+Mg##Hw|_og5pZt=Fk%UQDwdbtMih#f_&?9?E7VrEL%}s z(7h3f_<|Jo(OBS+Knt#lGB7`lyHr?$DpCZe)@jLGVz@NkIoa?ebSTHM{Pfqba~TiN zSk6WZdkOhTW=A`+Eh%G^_xYW;qaTO2Jp`G{AHz+o??Ue7y!rQ8BO^r<8gJLPR}ihh zRSlvSt&7Qss64!~MUrfKo>7Eon-S6Wme?h);4?tVGxk412Gc1`agv zM63!aOGwX^)P#~7KVg4d>&`bR>#lG+RH*UDvQR{Guo^#kQvyyx`|(lq{aLQ*E^0iV zygu{zcAzn;;8%yaiTsjw38o?Wuo8_!)_eFw+V3hD{PK5#f8IgxJ7;>(1<7|j^Xg#N zqYJ;9zCH<)M~Zgb0VE*`U}*y$%xKd}Eotrq;CJk}K9fu49!9|j{Wj>}�QXbu+I zpo4u4bQ|$$6Wkq3ZXIru<{l6#q5maA&|7zd>My{6Elai7T2nr`9uBd8F9a~N2lnWL z8GSIW86MfRl5dK4!TYQ(`?Yg0O<#43nJZv4%w^?kOV*YZ1}RH zp9CGqYH%OvLero$9eX0Ob`riQqdlC6NOYp?@-!CISda4;eg8oR59L8CF8nlte0=MHwST3Lq zYwFfqmQd+L-<4N_ri_H$X!4GlaFhQNVV6_{NNd-QE>M%vH$R!8u}ne2Znjm{K;PVi z-=WZkgkf^??M~J@Fc|MBh?9Hdc%!iofNk;4;~%0q4N$wKWRGnt+V^=x?1Z~Gno3*6 z_L(I|47Et!jgEHpD4_A5WdA!Ny9`k~wO}vT+D|O?dCw2XSJ;}+x+~(ZV6-Cnodn8c zaj*j5JzH|`tJ8u39AN$ayW17zrk$-mGhRXaut0g??a zg{_xblxUj5Rz%AFRAey=4U~s%;2vZk+QaPV8JuHksk8Plu)kH-I&GVFq|<=1Wb9Rt z!gfGV({W}7(3i->046uV_L8z~BsjWKlPR4YFm?>|^gzXKx{e~8o@MT{$P7C8K{q@& z1Q%_C8wYk4g3}DVx2QB8S4BH&p$Sg3IZSSj;MRQ^%^!9t|m-h_Me8Qbppm>@YRME!jG z_EvNlXIT^t5{h%J;5$F^KQzn0>jIT#yED#CIj{#(NFK&L{)s2zAAM*)8xSJ-kdq1E zIfY$O_Nam97mCzUl&s5W(}|R`c?SNR5)^5Wm49C&UFS0?dDxneh3)WA(WPHYIY`Hc zaAE~cYKE_FfGbigMEi6q58J`L@ZxK7^03heI@m4H5OfVR&_DwXG_d<3mFph>$Ch== z&cb+WT67U#XBn9xU6{+r2=mW3wCDWZAWK`@2>aa=<>?tiKEb?!2GUSKiJnIibkGGH zZDgHut=S$L!7dFn(7@jhpP-xxxLwReR7=!#Ms_34n+tP`FvrnJF@U4Tcmy3Bk`cM^ z-ZuDgH(b75<*9Dnwxa;80G}<%?|fBNK?m1I;wH_riricSFDuCXadV`ggC%vzTsskQ z{#A%ne2)!ISeNoX#~};SQshR-a^p241u!qk>qv4K`I;eptXwU~owSbhg6;-I$jvlz z0}rAs%)|M7^NjdFdm3>Hz7atw_UDRBHgK37gvfG(?uq4ot>>W401iU`ZyTDYdo$j9 z3VN5PaF4%Fp16_1*TU!yqGN12?+-|<#-iCrbvYsEJwKo~L6YfiC z(}Fl3b4`t=@Lf-rXd8;UIzCW-Mo<-l6*l1I?z&0Y&#vvwRvtW!lIRZSpS2-=?4bR& z2g!by3VumCcZJ(wB_k2t)2k+;~AiNEa0CIcN|hER(Sjy-(Z4WA;Wy0T|soAw4k>Hy~uzu1-L4uZ2`%x z#2wlO{9st6mJZ|!T^#6)eFFF&J29(bwVi1cbZ|rndj{~cp&jSA05lcl3O%U_rWN4T zPeX6r?Y6xL2abh(#=v1?;i>KLx|`f%VAl25o)b7!(uXz!+Z8RC*1j4`Vf(@$d^EHH z2E+Q>62s*YTwsF^jy7=8FwJyL60JB)NqIG@%N57sYF$gZaxwvxNO~M)KhGj@cA*X8 zQ&Ojy`~8V*`WEE zhypBp1k-rnSQ{Yn1d5u>5_{$|y${LO3wudxA2twzBt*W0x>JOJkfMi%;kSivqg#_0!m zCvX(1)^QhZWo#1Sjjqdi&ldcM;v+$yeRh^om8{RnC@98F1HE z8=O;su8cN?0J>WgOi&uuCPCN%#Yqt6d z(|P=@y7OUnlz|t9O675L4EM%63&C>(Er7(jmL&l8MFM*{?#)-kzr4ybg%0~?*5Mx_ zfiTmE0DfW047H+-xX`HT#7Cfd)qySv*JK1Dft31wqm&^&sUv(-7d#g*5{v#!q}1eD zu%5}iIg)30A_Zr(%T7IC(S;PEHS8X;InR?}&;@oXIKtO>pKaxMSF@dUCS@4o(@bzwdo%}Rq0O`!`n4JZIQ16bp@+_n^?tZj1$(^RTl%#J3*tbdqSoxq2T+#tOF|D(#t z6G$d`=o)C?{|6&oLq*)dObs;9Km-3MBwR=rd}Lf+8$-UnRLBY91SkxTIY@54|I@7u zzJJ;(m+1XNJ3g?*z=dVF)mAyUs#^1XCra2N123^I(VJrNw(2c{0V?~@NzEmi#aY-7 zS>l$GU%{ZtvpdB_ky;{F7iOWcxzW*P3d(nPel#*(K{N2aKY<}F3}~ia z`roUPHIe-(0Q=OrME_KU?+kNyHXAsuNiNZ2EAWD+V7)!`6InRD6;AGllgGpOCRlul zB1Y@m)Wd9;;5UyCZ-&@m#))FMF@nzy!pFjN3*EL@oGDk(!8#kMc+;?;gY%FA2`X3f zH?o5c_C!J~FX*5^`e88~zCj0*Nhj{tV+?Qr@~_{Y(Pk{-|4p)F(Qn5)D-6@o*zI9t zMqPsrzh8Rp%73qm)C0}m{lM>{9mixQaJ+3dU;`zxlImb{>Yi6~enE9l?=+s0T$>GEN1=Aj7BzF~X& z8`bPEW)84^BmFFOBx zkOUnJ*dPv9(7{3^m-B)S+VPK4^+ABL5oDE|0-LBC?MYYAL4Cm96~hAi`cT=-IqVp) zHyFbQ>Vghl5yFHFQ`ZGm1s%M+Alg_O3ITMrK&1plSJ1&^2uD~j*(@%m%cYNZz=9%N zx?O<>w`_w#UCSl|Un+^`c10u$+f9)c=Pdh@3kKYU>~DE$Zy$!S#6CYQ%udewod>T48cX! z9iP)uly&XICYV!%mp=uM*R^Ra!hvI8@mM%=EUX!TS6=TP+svSYL(6dO_Uwf*-sk{$ z$kF=@qYqr?c!3$n_0k}~?~H`+Gr-O+p(0X{dQeT)O(+uA7m2CdH*Z@54Gc$s1mki> zR+?%8vLxiWW_JfSbOQ}E@a#n&Qs%R)v>}Rye<05GV6>rHqPIG31=9;sN*~BjbV36M z^}y}xU}K{+RRaw)&_DwXG|<3v7n!B0F6f5;>VU&a>5-h%41Bi)uP!JLSQ<_Y;9Vgc zVX{jsz1+Z=M!tDhq{STa%UnH|kQWUCvPMkvXgA-pT|FiMZwfRwuF@QBXl|bC<--gk zk7z8DUm9$8Ld}j>s=fWv=W?x3EnJq0dU_ZvSvw{xhApTm15x?x(VQprkM9s5}BA`GV%kM_+jQ0s904E2UgC^kG7%s`6OdMt; zAMTA+!6R$Fnh@XZHu8fB#-eY}gD`huVRgpx%V$yhwUEyEN>NBKWE{(Bzhkcc~zfd?M$w=4w|2 z9R%px^VSpyaT7qVwi4w6?N>4rZWna($Pv?>dqs z?RiKdJ6%B>&m`#^1-c631BSm^48l5MkYK*60Z z6wa?>gVt-WFKL#-wxX(_gL?*HU`Wv>po9Kj3L2<;>QZ|wiQ&o^h&2flL;agOV{xo* zt_z`kB8I1}^=+e781Au3+wcvz6_qPlia2js8*mdflDJFgY-4n9af9Ti7HCB&GAd+~ za3=ys8u8Kqg1A<_AwfG*Bn~jOXqPdlgpV)oip4REVr`Ur?f4H@g14e?dkl^HGO~U7 zPXHxB+P>QMdBSl$qaSpz(WB3Y(Yo-hYd8cvWeM+(;XdeBqdCochpirMMvBcjeO8eo zIY{>0tw#ssc3^UxzIPr*>t!p7*y=JGcVFQ>gf4vT)+BbgfrDbWE>>jty1KT>?RcJ1 zfXt1tqLYUoe68qWcmRrWc178($1h*uXe(==wc$FnlR%_F{BtS}q47`3!MQF;YpJEm za3L=ldV+}jK!UAk-8FgimtZa8McKNzhkY#_tD_-jMLUC&fD}zfU0@WGjkz zpJf049d(=i&KRNywiqd7eMaMcxgEHNfVD%qsBQ}3;7}>f?vCL0y1^YEK~#j3BG|ty zed#w_R2vx}+yc|L!Jb=TRv%1ig|!3lqoq5(^Kdzo8FcWbR=9dQ+@2xfr`RZptE$;i z8~Ygz@?CMZ5IV%ba;xRKgyeG}$Jc0}fDWKCH52&L)C7b9_BJ}unv)-^cwLZj(57KL z3SGX>VZDIoMqaxHMjt*AV`Gq&{rU_Ga2gh(^}7}+xocwmeGN5b(jbe_AQHOU?au%W z!fibo1mp%9NJogyQrG3D6$KQmh_h?BP6GB0;G!t~vRg!YP_KdCX9&Tb0FJA`k|DS; z|x*mku=Wnjm|hjWuv~0bZ8T zzyOO%@NXS(NX9|h6vCDDB+&1>3>*-^xuMLCH`@fd_r-FR&Os$^j%}v0<#jiS+dNB6||T~v+%rc;=u{%-CZ1wDX2V82>gnjxd#D?4m~)KTk6qR z?xnl&!vX`R81Y{I9A#HsXai0GZY8@-g2`yi^EhH%h!-%Lrri$2zsTw{KwGHu)+_LnA+r&^AmOoP1&J86M%Iu| zt0_C)^Y29u?ur%7=>6JA=V~YKKh^$p8~E)ox=#{4x`&Y*Yqfeu3F#*w{b6wci;d36 zqJbyl+U4=47zT`z<{lBkT&odZTZO6ic3%LdlwnH=PN=|mtAvdU;jCu;y|oO}dSQAW z^mjp}0FQ3ju>wPyv7pcyq&G;)I}{D{hEl!)BNBYtV0Ia8&Ne#`h8>GMw<2p@haI48 z_QMi|GBnTV+4pdoS zk{$o8vBs!=6YYVbL%ZuHtQ+VYfrER#ozH&YybzYgQrw0gj$>@UM*(wUxQc9nIu>!` z9>$%GI5yyxG8Hl@3zd8bzy~YP2@5jMER@ZWNTzk0M3$$`lRX zOaCb3&5@zDMt!Zi#4sXXA__Gaii77s?Ba z4)=RwcZm2z@*_WYClrzL)=9cmq<|EperwY0Gpf<*lMVosPGz!D%8HH1I<*0~e^_+Q z82cVWNJtKnP0-xkP(+*K`BsG^L_*1Bm1skjw#n!w>oZ23D4oo>I*I@L2re5Yfefn6 z3bn}AZMf^xJ|B@>PIs4JtR^I5o-Z=~9|6}Tb9Y7`Qs}dyUW@3Ww}%ayO0)KNBf8d{ zK&7eD#&NDud8$F+e^P{C891z>5>*#&fvZxiOk>I_{(jULc=Iidpo0b)Xy93bR&+D% zLKfmKbeX&BnSHGW8fc(_XFW254qn-;9BNsZY~UDM#5oIlS-y+QbiUa5RWN9fQQa8J z$btjL;hwt&8puOCu zz%l=z2&uwLhlOV*=C<=%;HzD5=OA3P74F;yLs6|w0eF8=Cc2w!;@m4ECBr@3a@eLA z1tzSI;nrB~ZmV;-jY9(eB>}9o!3OulasduE`<;jszom$0m1XK|vp=hFyJ-sO9Y@n# zobY+_`b@Yad8Yrd$Q69EqfHB1Pz!-?IP%>M$5i|8u8Nh>-o?#2$i71!5xNV_OCDi) zSCCeHiiNizQ~7fIi05s(fqgq3A)jmn*>~mUw}XbB|nYPnXt~$Hjh{O z_rSMLUl&%wUCcVO3TT{q?caTqq)$fY$O^PhT<_e1Uy@Da)7K*{Bo#V}+)_>owB?jOLBq1>Tk1NhC* z&Y3Q<^0L(awA1EI*?s`7J+?G^vXSsHv3U4(Q-FCA2cCSq>eC9v7DfT5ueBhhueR5 zoPh&k@rZ6==M6mtl_HuNtE7`FQgk*t6du4GJ=TrpZZtQ2FWN|10dR8tTiaTDo+u>&dnlYk~u%g9}idBA=_ zEkYEeq<7$_uuVb1E}$BIH^(YXmG*Dvp|W< zouf^%K9fj2ABC+_gH&}VMzk%a}T7)P_Zk}T;Xn%;{*Z@u_)FMOy4zs1I5=7)HY$5RFR*mUDOyaw!=+tg^H!~VIM>;rZd5;93N(tYfB^C_c%j(%#9F-=IJ`m=|^RsddO zq&RIxn{N|}#=D6f7sLc4>|<*6L|r9tp6y8Z?ml{L12JFvz;7`$x0R;vrk0mtjRiuoh-TnilWHce=;n}?ReUR*VpYKQx z`X_kq2gVeT()Kj|K@`td0=ADuIYB#qCCQ#E@HyYx(B*Ii=@$iql+$@udHY$4wZDSu zCNB=OER;Wu)R6o_y#oR!x18$$Q&b==w{&CBQTlmg?Lo!a@sW`Avl^jxt`7Y zy$yYH6OwN}F0T;rVIL=-P`)dIT}HC*T$lC$UG66$GQE%NE+e@Vqc!LXV0o^i z*go?Vtno;IW<|Rs*cTn1e%8gO`}D;ub_?UQmXn953&SYiXUOPA4SrFq9Jb}dv5ncYkDc}@9ISY zXEcKj`q6l19mt82H5vFD>EGUgf>%>T6*cPcjjFrMRD$Wi=|+3< zg4jJ@huL?!6!!t%iiGn$xD}Q%iZWa2@t(dgXuszvLQP2CxDx-ip(-d7P}!k%9?$Ot zPDQeIW2{nQSwRPL(HJ~{dpvuP0+hEu3e+mLWT6QKA_b_lT^c3<2cil&aSVSU`y1*} z0gkb4@4;>17>D*|7NLJv!sp`Ag!aZ0WbcUzn!{O$Z~Q0E=PyJA`XS2`&)O#eBJD4A zyaQ;>Cl!503jz8rS3p}os_-)eT%Pj#BCr^JZ=!o#9q$-T+XuWLP!L@`24nbu$E9K;bOp_?L$H=cB@m%0y`f$kF(IL8p^ctRl{e5C#8v?$W5^$7;Lx4l=1#MFdKa10& z3_}zpJ;wg`rDUfYC?aKNh-czG*0)EAX*0jr8Kti{!E^g8sPzgZ77OpO>kn_PCXF3tKO>)E-lSuSV|6)B#3k z#ydl}ID-Gp5TH|x>Z*UXs5ShrgIb>k_)7=uRe(PYz{j`1lU4X!1@;Kwo!jJrynZiO z&;n});fJf>+bdzscKBo$ys#O*ve8kRYFYsva?EfKS^zFhdmIuZu7%QvKL_Udg1nqO zVAGsXE8R0dt%Oe=O4OoIp+>6Q%IJn`H?aGo8_574WuS~WZ)1TmMr-(z45ckWQY+Ip zDz1SB8fd^56?7iDl)!g|Dgv-PhW|*376qjs`e>nMO?~fR?K1JA61=PeAKLr%|1A1o-5?m!IxHYkr(f*AoUiQvove4;G( z^zVn@Lj%&GiU#I1!ChNmuOj?;EZi{wi^}k+_3(uaH61B6N;Crgw-?q$a*1BN4W1sR zR(2csMhUL9?$Dc}ot3jA1M$>uvM$jy7t6~6DTs+nG)vRn3*F=&!)A^f}#S^`*H zhSOKU)x$P+Vd?nv7i}{7IipG0fbZI*N(TdVFVY6Qq@bTIv6Q!O4#NAwTA8L6qsjy4 zgz$6>*F^A38+34(5h=A~{mB52#&E5TDy<)zJrPNMD3;Q86Y1pk@rbG0P+M!r)`&~EEN%n@^;)xY6l`8--t-XJ~+%s*#9zik3kP2qh3Qkw`3~v z^{>lm*MwZRK2bqF217k=ft zMuIVDExCdYrlP%ZGuf367NJPJl7R=}e1)xu4$f9I=XvhTFKpH6ZU7Jv3eNyKJK)? z_bFl3MLwOS9j(uY9BmRAGamS}N1vd%JQ$rLx8vqDPe$L9$NMUweL0$YvmKp5uD#KN z6tYLjmc^@35ZWxGB=){hqLYC=?Z0`1Y(+0Xr2far4?1XNbQQn=$)~KqH_#rRj%5AS zj&`lUp=i%tO1hD#9>*C)ZDs}I92BT1z}=C8V&=2+*uS;_Yh(EF5d1AtKd%blr;%iG z0DQVEMefEb{A>^|sp|i~Fx0=jCYG|+Ux4i~T(u1*75Kl6ct{8@FM&C=l74Q!y$u!@ z;l=@3*>2wkQQb_J41A=h#hqB$uC8jb_cNN<8AdPkbSw+p&9R8_i4xTZoEc~lF1Hr8 zd*bvE?NiV~TV{W<$fhm!XEpAP=fe}*RWN9>T<@0z9yBB6H0w}k0bYQjtX9SFJMvx3 zt-!fPXHKFQ7_{KhHoo03SEEwQj_8hz`hQAK;@U@~cRM$oD-aza4niwvu8^n1= z!IeL+!Wn_i;k}IXlTQrkBK@KQOf&GyAI)H7zxyNln%rtC=++$?qXVRV03V%kt{er%6H>+14pAn z{x>;o4N_hIeyFd0pm#*)7N|6F`W!=u`=5lJFfd(2F2^OPz=NCoU-hpw* zx)uXh#4>CQw?ei7pDBpf4{%QeU##v(d1?W^T7ubD@%v*H{%deYBUBsk{T4XTMi%^P z06w!#-p2%~K?fo5^)9$;Q0~wNQ%>?}2HsRq4z{bS>B9F|qeOEzSyyM8LhvFZH|o8y zI|<_yxj=5?NRsMtS! zTulx5qlml^1NP_nrZt>1*&)nnkNEwI1_FRYD}t8&G#i?I25Pr8L|UIC_9b#BQJ+_o7CL$Ic9 zXB-ef*{FiS?GaoR!KKw&m2y_1XrXnu!Z!p#C2cOS)gGTO{}+{sonMx%Ma zbRt)E)^kJT`tC#ITAzqQUyG|jq-o!%M&F!jZTPw0JJ4J{6BmvmQdV6-J0T*}7lG?M z`)x(v@4CPyeb@1lo#;U0nYZo|B`9yr2S^$9naeKxqR~6iT>4z5T7c7#3+ge;pUirf z$!MIG;{N^$wW!n4LK_;-tT{6F{d$m$_1Q-WUBD+9ZV}C8-n=L8v4;G8*Tm==P7FE^ zUBMz-Ucp>cMJ-e2#hyM;}M}?gWpG@ zgI#V9?Lz~-_brjm=soP3-3lBMDCpoo8xL@Qr23)*mL@iY#%JFE4lY39-i$ArWHr4l z5!^QjOR5l6rS)MuIlwAoYfzPgha-i*J{oHf>NDv-iC*$CYLmS=hHbXZpyg1lB)wK> z6)Amd?DMk7?}O+=H{zbGA)eV=*#Le7^#v#@#kGmL(V?&oMaJZPSJC!cGEj_V@yL3Y zu_$saB%kP-MBn>H^if70d=w7Igd1-J?r8^a9zgiRB*(l8uY$C>(lolGu}SiM`1evDQti9c!#ZMZ$6a6 zy#*-(K2hmDip@xoch1B}QQMB6@@s(oQ7VbcVs}Znesod28NXVqTkx|4k9NR8O#Y=~ z8nfcM(fXho&VuAvdwl!s2>D5Qj4?`RhSC!ziJd>hln zNV&Pu(WV19+bAd6Rix9p3cz^=#u}v*dkFE^>OENj@Y(oY`=rgM!fzK_NV5_q`Ib=m>^TLBhYg>HQe zPsZ@+p;`)`*#UgE40FO--+vy0w{6?eLYrw}dq*q$Z2)c8_HE?LC{AJ{C zCcj(bY|hY;Mj!bI`9-N4;r$K#qp>?wdH8!UlJ9=Lj!_;0WX0ad-)AZGNoDkWOLx>j z4Iw(bUGx8Z-J3q&otOKqfE43|ewCJYu+#qk`W$y-+0nW-)98KH#B!Amu_H$~L~c-! z7i+tXL>;j7vU=HJ-qV(@e4kN)g(U?UT)YkL-UdU1wGRw{*A(Q}eZ(fty()s+A|;SB z$c%V&prC~N;#!HS#GgJDI5kj}`>iqDAM3VwU%WHV=mm&dvkZ|(A18gGCT_l!xRbn= z>^F>N^j5+4=_8y+ThJzSL7$t{NH(z{Kgn;>XUOQfunK51xHxA?9=Waf@iaj0muITT z{jR8;>zlOqxq^`dC+rFfa|z9ntKX!A*Bab1WD0U|u19O$*a)|6GTA{LAPaM^nLRvVCUFWeLf!HpDMn=h^RQ2Y7-1UfLCxgwdAC zBl(sd@~IzVfx`{_(P4aQMqIZs2Cm46J}n}hrycm;NJRtI#5)Ge6@eGn>Ir|Ts^6Jb zPbwUh!Pf4z8eew+N0p&t2nw;nO%o(a!nB|!VAjSeg%P0ARsf+<392DrC{T^kEhukE zk`kcFDAKVStDn2salp$!hk=Q)Hq=(qqqCY2aW^3ZXDJXRwDCt%VH=6k;=Aa_qVLQT zcHQW3xRv~cdL(l5`M9yh+P?GLE+JZIqph%&aVu==&>Heh6H!EYz#bw0`;(}tJV9d6 zq0DtnD-JunCdSf2I_+B-q7AhP9Btmi9s!|1ZTq zqBP+{=3U}pbQQLw`b*Y(4j}n927f|*4+%MpeU9{ z$aj-8G|L8tqp>C$cs8R8<*8ea+qP?%H1PkAZe*==={-}CWiN~F6QW#u!&9c2 zBg^S$-Ur%XpAuZP-MU4$iNltljLI!+MQ+h6t15cB1##Fw3fnoMe6KfI6XAn#O~LB| zUKl8!(8H+0!M$-hH|==z@0KFrmlfU7j5rf($R`aOMwhSPOVIy&`t?yvb_Jn*SM=!{ z{g!0A&uVK>Ez5R9*!9hq=9{T|q`?`_1?AEAJxB6AyQ0r$5_zs6gD4s$B#T_!usO)B zvL%K;;K$4J&3U*xMgaP_LJKU;#m9<#AMoCu>o6 z+HdqJ1>%biG`M*r;E^B+j_^@1rlH$>-Ue+!3b#)_tZ9tCb)XH+f1c9TgLpi7&)ype z;|DySzX%_G`i?-=!=`byz5BPCKnRu^?qzw*V=tmnxJ=|=qYxl=H3+q<}668q@ z(X7R_2=%`V_rX|X*OdR+osg8NS@#UT$7o_CkxRSdZL)!9FMRT4)uWMHEg!pEp1=^v zrDwasv^%1L`_fZHDSXBw{UfauS}!oe$Xhtwz%Qa5Jw65Gaa?GB`V72dNIFe&7Igs! zgtEXL6e>m9M+a&_7F8S^wq_%XSQc9BN4hpb3;DOtyuoUqfd(3Apn(P&crGAGQ{vKn zTI`>$Hd75O9j1e2rhaL4nC3uQXI}2BGPPW7t~L!c@b{n_xkoC`^oum`yg~(WT72_n zHPAo<&mSaQb{Asuy*z1#tU_~!M)jR;DQ$^M^sYf!-V!t$_)QsZk8IFEq=KeUQ^FS6 zpo3d%(7_$Ua@lezb;MJk)K71A7W^7yb#8_e5d*^<`|*@LX`=J<}LTFjN~F@2&h} z$JJ*Zd(Z`-h-BTmUHeK6q#!Q{EvafU1@TI=686tAI*(@Cpo8Ih%6Jr1u-L$O1MeQv z+z;8G5ZEh#!vZ)wgnJ|S_^?H8HcarN^E|Q{q8wKBjaB%}Abc!Tb%Ws;h~d%*E~pDS zIC+>Vx*>*(Qi2Z7qgFZL$r!FCJLq5`BCg&?HmSS|2~nQ_0iTGR5-^GUny3jTl3iYF zSEGc!eY8uP_`)ZUp&4Z~zZJL3ya1ex#xgAfbsF|TJjlGDgE44xHV)5cFIw?Ch?~%S zF3V|GL_Czdg4spjDBw2yh~nMAX-F<^iQyKqgJk9*mq}jG!5DNX_yi>Rq6O^}un#K132=3+ zOIm^?=wPAQQRf*RbZ}lMZd)3DR#m{l8*_pV0L)&+bh-4Y4wzSjzig|e^(fW-zS+Q6 zOR&mDeqK=(Np1)o$e`-O^1}Tfa(VWd_yz`X$XL7&yqGaa2mqXvR_e`&mXR=Qp3ulP3JGhkE z7F4EiD%p{|{lFeZ%Gtz}v>qi?4Pmj7l`RB5Syk4HwaBvCjf%h@ZUYals>1i`tQrPK z(7^#ExM3jM3NqG6unZ@~)nghm|GN%T-&7P0G|)f;&jyUcZP{p`fd-zxXh*tHS~O{b zX~?^mb?GPp^8>hb*bO38Ve`>;N2^T(4K&ceu7s}~3Q!KTyK~wvK#6m&VRUmYHqbx= z&wUu=#Yv+o1cWcniXh%zGA?ty^=z(QJ3OKlO`_3l(DF!}L zf~#$ngZon?+=+pFyEjL0bDUnk@)ZGWwK>=xBHPS67nv>d_#)3GC9#=jjz{)Pp%w8L zU31ilGQ_V!i7tKIm*Y{=&BSc{IN4T^5;BA5B^@0}!cpJC$s;MS+~_kOJ*1O>>k%M6 z%R{&=`4L*5hoZ#XS+eL}l&txM2JW+b+Prt|p+*$}t-bp*+Lq8QeTaM#y02@@H;|#r zKGR2r`i%WZp6$aG?n2)@67Jp@G@dPJ%w2oFLM`W&3qI?!6TN3vUm;qT#y)c$e&dIe z+729JpvTl|gth>`jNwMRiS{sXMqtZ%q9nYVWBAZet-!!b3UGQssXP*{{!LX_7Qu>G z&m8~?*NsX4n6ZKCiN3xSHb-#lW(Wrq6p&zwfrUl*=b|D+ud2e&hv1F2(m@eLL+ob* z3zkIsh(FpAR{I-;$F~LIcMQfN3Em#Vt;2#2_CbWaEwOlPca!Z)o@RfylED2eGcvxB`ox>3VdK z|Lqa57`TyiZd?`du@6HB{xz}Rc(wxjk-t7byeOZbm}WFKd27kw_wAsw!{Dc|_2aHW zQQzkf`N~)UeY>mAHE4difj_7GK0=qXgMbBAiCQyk!EFHk62q~;%(}pXQ;-fbgp5(^ zs&HlqW9;u+4E%Rd=hgWU{B%fBq30Ih$WTEDQvWlVQtct+AzJ<8{9g@GlvKbbY z;E^E(9o)4IhK6d|Z3w)rD8Y6|q`-qKtFR=7;{#391mNUQ7PQ-K(7`QnEedA>aC)HV ziFMY>c26u}HhgoRfj0C~`;%1>p&cntE6J~j;JcW7>$dHRx|<>XLU~uD1MuxLv_C%M z^|@%l;>F_+NoKH$Ke;%2ihBlqUci>w*_j4XCvSx z9fpzB5*^8okQ&H`K?+-*0y-Wz#K0fo?6ac>IKfD9`+1aJ z+qw@~+d2(=uc`orHSvxY$w>xYQIKWrM?>m&LLfJ4X3)WFTi}ZAaDT>Xn{1Rb#~y=V_Iu*@Tzxls|Jfd(4*d*FNFnvZVOE=8#sc&ULu#(oxo1{!$2Vk8uZ z=K=!!65jSBtETJQTgf*pq!~+66Y{ngcrubz$CVc3o|NQp^>{c|vRv1TU+bBBW_MK9 z3Ov8E^ygK^NU#Vk`o9Mq_TSISY1fTH>0L-dzVl#^HP8oLp6!wxNM7L8Cmk19BsBBVSJuZ9?8(rHVrL^7jpn^+1iW}b>^YR^SM6bIo`9c zdQ9|a(~jRb2QqJ#7$hGM^hr=p>T};mTS!@VWqAWt5NYAfq)@ zzee{q@cDv%K2d-V7vRkWQOc6%yru|OHN*AIa8onf(hT3P>@16R2s$u8UjP#d@PQ7P zUxJMh+%*7q4#E}N;I~^fv0VnfYfBm|kHt@062qlK@Sy#wV*)rkkPG06SP5^RKptCRU8Ngm@BiF^)5fHu`EqoTUYW4JKmr4#qV zbl`V6{Z2<0g*;bE7ZOhEa@v8`@g3y5GddAvz=y{*1Fem`OHC&_V14?WhSqc5nr=r2 zlW(8f$zR9qDDP2TpB?A`xg)3Ddf;%&Z(QbR_b{-ZffvVc5!vhRrdZyELlEEXS8+Zc zavxx>5vO-!3_p$2_m#1IE(9*k>2nOV0{0Zv7h(7W&p4wz%#bu;h>N82VutGFVk&n7gUd7QvL`*SPlb3I`Dc(GAF z(|Zx2J@1<;sPfMiv}Rq{0|n6Joe5*nIqQ)57ks}5E*T4V_rvN* zuw)`!ISyu9b4XhNl>oMEhtnhYeFbi4f*)2C?NBr@Bal_@jw(D7!}~+{asgfv==T$i z%2c%*xI3y9Je_Ye0kaG&3g9p!O?G&BszEdu#|3blfpI4L5S<5{YIGBG@xDTI(D^7M zdDfmJS@6+FIOf5iEdD+j+RJE@_$Y(p@Uz$z(V^%AUn@o*`8T8iH23?H5uR+Z1_`e5 z#_y&SVJ{m-K{0>II1Hgye{?DCu@8{IalL1PPW*0%1#~F-wC%*tYL^5jjAow!S|2`r zcI|>bJCQP$XFXVt6uZ3VZ$?#Crs1B?)xi3=Rw^hB)6iw(F}uJX!7sJPX0)D%V)$*G zUG%vXT`1O&uMkYZubycKeparvNTK(!bhRNF>vI@)<7cUkY@e=g+JK1q^YLGIbC6uV zCa3L8+dj{I-j2RM&tvc;iu!X|rIw(?@?-4T=fh!}hSt&}9`9Mf?@a2%&l%w z$=K)n%zG0$XM7CObB7Uk0 zk5^&Ekp6tU3Lo!;mH@uB5pEoSg9F$ngvaZ`OutlCb%UP_%DR@|m4QtPXJ6{C5nLGU z9Jx0GDPoI^f)V~4XA7z0j26{pakga*v^dA&U*B&utx@Cg=e2}L8S}B4`EqbZ%VIdZ zPoMsLgA-cti}FcaLU|>E>f0Y~Wc_#=_n39qcg<6n4HDo!-!y`& z&_3Jb=yMj5855C5AnSrO7Tu;VjwJBY0;JMVKrYl}u`VM8`z~qk(?l&=GH>nq20HnS zX9=x&UoyL|XTf*gi)bue`#q#~+4NBsL1Q@uk^1vweiQosy!Z7T&l1_*gydZk*XXVI zr+FHKtXz5T5t8o88xEiE3DBM!z)vQ2pzram$WZWT>${dpXlz|=3uwMONVgK#@6jp9 z-Ffja9i=`^RREl0BohKe_X5eEr>KqDB4e$iu`K#)fyls$I*Qp;qkf-?Yc)m}SuHsp z{$n6+GdutP`kx}i-HzV_aWBMOIc;V^JT9jV5O>)3%4_#r`|0y+_iX=mpZ9C>Xfqk& zmcR2I3iju4h!4tXI|1UkIc-`Xp6T)2GQ@K{-*KU5yOPKIB|m$5j(-v2d86EC0pdM8 z`&Kcy&3uU8V2Im2`uk`={KZ9=q`^PYD| z&is`jKFj!xP1?_4CV##b*!RurYXVFp5etp_DdG`~X_f&h*U;1wafHiUl;;dSATM8AiSwmA(&l^$p08XfKqS#RICf;b*i(_Xv| z?c}@tis;6W*Cx;O-NKIMPhTv?k6hnMK7qBqZ5|B%!4570c0Fc$wAlk$Rb5yIyw->x z5Ke1BV)Sj-m;`87AN5G#@`g@MQc5;vjddkDGTA`D;AE9M3){vSadA7iN=B z&>m@@v+#4VPe2@@JoF)MSRRH@NWSZP9>7}9O#Zmv7t1~J);ECJG<*QqVyaG8+cC$Ukk;{ua|QV#_;|@ zan{ZX;R8i@dtt}#{lgHPvqk*3`5~NGRBpcEXW;)mzii-;CU|c*e0Q7z4|+qGUQ#l+ zPppQYY)}UW1aNSu;_iopaCkv^MZPwm$kC+ZF5tWZyexzlhT^jQx>_^)6@h;a;1eOd zJA~KRLAf#Bannoyjy8(Sdb^PsF^ibmf{5)$ksmg@ht7NbO( zKAEZy#oq+&BbdAJJ2*?^FZu+t$>%_<0FE^J_AG2b0VjtB@OmTuUf!6F0bXt3WdU>> zC7{f5>+~aKEx=9IE+Rf;XU?-h0%QXDyp9f_!}tlD$>?44ICSkO0LZt`@#HJQt!Ny5 z`y5App5QnH#iyPVnfn6Uo@TK$j5Br;ozc zY@hGiM~@=D^x4L*&tBkMqipjJ;ii@xg!X5itet?yvm>Y7W^|Cd*8EMe5~H3LND=j! z&qe4A%nLS|h}KcwrPJ89c?#Pg;%6OCzO1VtxiE!%`Q3qJt&eeFGJZ0oh%O&4x%4nv zOD{3m=iCyMmaNynIRU&S*!fIY27K4*Jj`3JzKZ(Exm<4A(Wo zcPg+j6iIGV4F9E2h_mao;|sqC|9ge2pSnbS~m%jQB5$jNi?xhnixQr?AaMLc~Wf z?t>p+pbx+B>@qsweT0ZX=?Yw%b0x4X*5=H*)fnXaAGB7CZj&z@)>Gr9Y>gEam1dC) zP*TI95zozaQ%o#8dAG@cn&l?%pqmU-<9w^Cp+>^ZJCr7kpwIDWPx$mXg5=Upn2TYf;cHSkzd5~fVe0LE{(Dk{Hf^D zIT*X;*90V!nlk#S$9OdMc?Vqp7^D4}7tAmVDXc>|&+kWTZ>&e3ElA$FEVd3FN&X(G>(M0|QtrhW`K;z(2 za;5`uoUgF;+2{7+R&Xk|zh?5S>Qm4e9g=UUPtMG|bEezALl^m%q%!&@mjR;)tc{^y zWc?a`nQKAs{)b5W2gE^LSJ!5;k*vNpRt2Z!vHtrbVeRZVIMAFWO50zm@Lxl4SODjj z6m;;43hY}@Y`|HY;X4Cx$R_ykHaNHdSGB@dn}AZ#|3KlrjU4MhA)31#3vf!_XP4Mr?pA?JjoFMlgan9 z_d*=LQf@g7^gEp2uW7U+lqI2@uEbB+ouygvTp?q}{nQck*@c^1 zV$ga&#pLg$a}9An`8=Mp4Dm~hdk-CBh(8z*zs$IKhVKrEe^L-XX&S^mInPdh+8{pK z5Wh4KH|?z%>l4JGIvnDc=d90e^j&%5)MdxOXFMn3$9YQT>D9))?+Xwgfw;79$I>fIF+`M1|@fjxnoGU_n6ylq?zG;%}&xd!N?DwNRWCMu5c5=?R7wr7ZcZ_NB z^D^T;XMp&%f!5M&M}PkwiYs`s(Y_td$tywp?IQ6zLhYReMrY=Wg4+5VV~8(nBEFzP zd~Shwvi;5w;xhu`Lj&SP1>#5hh?n;f|Gbmtkm<4BQWK-!|17rFHUo<@OgT@ZT9V^%-zcl- zW}Fwoo|H9lVLZ~N@5Dc|^V8_&eTs2g#j5E0eJppI@Uxs5{P+S%Dh3x?5trpD+}6qg z6hX5Tx2xxg7~U4>_IO3?pD=kPaAgd82TK2PONMzOvhQ+;$#}kfb7*M!t13 zLhkiEYt%^ktlB>FtWimy(>?ke^6WFVea`Uga|YUzaZaC8Y}>rO)N3?Hc|4v7$qU!5 zd|3>W0=YO{bv|#6dV3^;J_e1GJHDLa+%X^5*p7u zS6`=f9s1BOVkA3f2W~6Z5HQ0iNoZcbI z0qqah^J8RHnPRf%C&?3fMxX@qc_qsdY(o~InK>S!5Lrv}&bUGQuG4bnsEDnH>+z0* z$kR15=X^`Bz~rAbLv`<&x90oF=b~qV`x z4&awjZGEhY;r-iXX?<4-ep69?H3R%6f~zCAWe8rmUY6tI%J83UFt<+exV8%K+m^D| z46vXXW>nzlcG#yy4?g2T_+DK;jSVrps|;@~!wXB|(frdExUFu+PYB@mWw}z{Qq;!% z?BI?j#XXGLeW?H+D=3)ZH_^_EVmlJ1?+#S2>kh}NnTI$IdCC0_C7&nLfp}CdtOL$6 zekAP#UBJssemy*c$g+hzDS}MK`*=Ki5`HB4PUPlvVH4sP=9$QoWpS=?b7__%rTpa~ zob2K9NH7WUAm;>1MwpdYv4YahluYe{lGScc{Ib8#G0Kb7k<)GpI_&3o%;g*-mwjhW zyU9S4XP>i;a@FN=4JRRPhi{+VCZ7a80rA6p`kZCN1Ig>Ni~^MM+}0%klR#|0g6w$a$6zi;V?YH}5?Vq;>`2kr8rUJ(sx#nF;<6I;gCgtq83Yw!l zYC!>=L#KHN<*{fFPbYsoyO6BSlS>8EU!L(IK+|2OnA z%J89nxPKvBGzZoU=|z9F1`fOjZnFtu#vAz8GW@kw1UrB$hv3Z3JKkP8(czi=uO$(@ zePHKo@FA**&|_e844)g?Ij_&&R#AJgZTF`*`}TR5fl~s@m&#}ZQZOc&eCt^U5;iVg zLy{5xOyhUJx1i{h7URw-2U5Vscq%T3;TKss2|wayC-TvE=CoUll&icxA4Lbzxenzf zrlyP^9jNR_o>Cnk#Xsruwir%K88CyAIVV=LsSZK558}FY8aGd*h1x-wCrA=yWftyR zd=oY6R-O>8@6)wLD%6w<-#(K>qFE?YQ`GameRf@+t^>JL*Js{)R?r-sxA}n zlWmzlE38?|w#GXjaxseRnrz^K2=0qCHd(;~!!fzef^eU`*{7}RQ%!BGT$YjPXfJ_V z!3wA;OLKD$?k05T_#DV58WGyTeF2+`4yf_Q%~9&M&u%yFh{@zzCmIDA<>~)N8Ywp* z%l?5G|DY3{9Z(~9r@>I;x6XI8E2A}2GVr2|_neMA381s|^dSH8(Sib4LLth?L}dEDm%|eX}ZFdA)GiT)1~3oZbarTM5%{gL58& zcQ1!?mcYg;9Nz>#?|^Gs;g|w^Y?~q)el!5D+9LPfm;m++;WY*LSsC^X;5$R`U<7Zo z%32RKO?poQZwU4OqZqzYRdU@Pc04GS#kE6oS*!s}+x#<7AOna%oTb?ps7<7g_8*#B zziFr-1+i?j`0{SNi>Q6i>R7=FX=q2vYVsZ~B&i+p6wrWL^iZBfD0ybyw?Bn$UWYk^ zavxI2LC@XC>@39QT8qUUu@*nT6;Xa*NQ5kSgNWEU-#~xLn6{&&&!F`-)*(dpGHL^E zC43e&Lc?rFe;~maqq%nlDEvKrmT_B9W>8CloY!aa{1R>@eEJCboNe1BK|HQL1KZYR zjSJAe1kG#Kc%BkUz%P##?e%1aWoru(o_~#`Tr5p#cXtF|EXg|dg{s^n$K+`Y77)oIjzQ0sD?kOU&&bj` z(v+sv$lbOfRshSAI6oQiIAkRpign@7lJ_0dnsmz)=)6eW;05DeOS9|V({-7vKiBp$*_PM?f|>$$MYQwfbTtZ`tfGL(5K(UG zaa#oMEy;uU{i=crx()nSp>`JjZV*0Rk&)}#2!1%IiUuL@i;Ar7-+CJUdm=1f2#1b^ z)(U)XGaT?Vtcl?AZumqS1n(ISH*bdh?}mNvhA*s?)_>k4*s~9=IuO@0XQm@M)}Ed&D1}^=mY+B2!0aD zO!&4CUJ~wTE*wAy@$wjc8;SRorm-(U=EuaZj>%O2wVwl&uLdcIm#Z2^EoB!(3s!q zXw!o|h+?=T&UW!`L5I{MFJE}!j;K=1+c;@>6d0G1E6ug(5|=J$SzF;63CAk-KH;b^ckWu zN1f!?XI%*_wF2?toIZbN`@9>i!7MxlJZzQpKRfyiY@Zz{Wy|{PraTQSv3>qA&OQ*! z=wMwHYu;C6v|Ag?b$M;1xzDP5xhj^DzbJs~a z;8vP0j&)!@ZQSn2t!T|XY2bT~dEbr{uPY$GuG)}th{N#Ef)t+ zT9-E9Cvmn%qk;?*%VHZnn9^oFQtG;m+W2tTmY{E6iaZ5b<1~n5SR49w*Bo72H%AXR z+68F6-4thMYDjQl3>QT*t{h~tyDsQviusAdsOFa-Q;NUqSgl$7r5J%dtKlfAYshEHsR86o^vGhEUJ z2Nd9S+u{5n@f;u84sTu#%>lf$Tl&^%o$zQRZreHQVDAD1d)x`{ezfND1lUrA1#R$# z$#8HNY#xGRm%~{b;BWTpCk1dpCw#XP&TG~tI%}Bfo~P1ZT@)J2yaHklK&jRD?>OlsFi&fj@wcA_L;$sq4f2L&}Q)= zESHc1;SvlRfrA1mmA)k8o9&+m8xifZunyg{^W1_FddIv(mIG)pk2U$E`x07AF6Ck~ zax<)m-JHSANLd(f;BRsIxNk=JY9Sa6;gAn-8I}Y!B40>yUjKS(kbOHxajr-mg7?S%Etd@YD7^yMx+I zIjhh96k!h^9CJAjizSp;rQ=`-Qb)Z_jbCIs;Eg4{Lt$LV`|D=;yDv4LA*8$fw5 zn*%oqd<~k%jzDrO>+ov9x+9GrtzrmS8!ARhY!=GEIJ@RuDU1ftJS{Np@TE}T?vjuU}jDa&kI5|-8V?8L>*1cdl^&o-hKW{%TpPg)*Ge0#$Ff+0r)cyN@7cbyu%2b$BSjJQLZhhCv|Ifl z2HqKJKW6UeS^>z)N5W>HQeu-;A z*3_i!x)@%baq65A)bet z)Lg)O<*fOHpE3iti((8l*K=NnGUD)YhGg zi`hzzbG5glUGlx55$7roC!3m*?&1?AbJ%9wZq~$u&|)MTvoIxD8$mixbvVW&w_{-c z-QgKRuNCp~>YtH@CTiokucKYUkE$4DJxIgcp!QzfWUu3T7?glH2|^p^c!z^+f3uC7 z>zC~JR-=H5EKIZgEeQOSw%3KV_gm=rw&U#YipHJgAdxX8lkH-Knw)Df3OvZdgt~W2 z3rK1;a8#j2joHG^9KXFocuNKTSd}%uj4pQdcv%O0Yz%yCyA-t*G5n;Zw!a4=`0qX8 z3k%`1Pf2Mz=V5r;82J56=&QSt{@-gJ-wS^`2rk?g?$`ixu7ocxm9n;a2wpT9{%JNG zP=w36;X|$P>CN!J+u-bV3Mx3g1CFt$_8bEjR3r%Qi{OicO0{ruQ8&m0TVlP$P^>=w z7>UGJe{1fv{WaMSydq8)K4VeDN7;xIkmW`+)VL>k8UH7uO|_q6;uB@A$A}1F6 zo9sy_YAL~!8T}ThZQ_KZlx1m~K}`{HDNae$Ryjuzff@(G_3eF#2he5|eVL^M)(gSC zO-(q~;}ClbPLdQp5c{X~xZB=vuB__YvTn(DTPw-dSQ|PG25si;v2KRb9pAseh=UU0 z=4KVB9dK)L3%mse*2nqDV?$~I6h3{fvn~Z!p8++V&q(@ox$F%!A-X!&zw_zSMgC3_ zDo3&UOjCddfq8)xup!{v)ojbn2B10A<*8z_FND9W!W#y(HvV~-X|LZXy-~Z_nHul} zvWVPnsUHi1^mGR+V!2_~#cr$ROr!4(aF6|bQ@iQ+ne6@9S@*jYxE1vPS!^~~*JTzj z45Um}5q~v{q5`@+8{4PrdwPuy#)l$}Qy!iG{${rk3kla6d_wh z4Lk{4T-BZ(9wd165S&wl{fh9R0k|oGFO_yQG<7D@HGg5NthE6D zeh<863fz7ilzQNvAvoYZ=m_D4xp0`>kioG%;;H>^U$}cC%)Ju+>2Bz2hGGkxe*`@B zVrhT}JOzL3g1d%bzt!-KE%4Jpc;z@)wkN!QJY2O&?uVaNq^P}k8~pcx{LYuR!HY{O zyO2!4XNn?Qez_{{#FtC3INZ^Mx*2Wghhh!lQ9-(f-Gbb5iAFptW2P2R)BIhLDFtfE z*dnrB1rzI(3`i$xZw5YGkOKQCBGx|{%L%mtuA3Z3t9ER zO%ylOHu(awEkeULL{|9zwpdEr$YBlaWTz`!r^5_%9{S1(D zb*rP#%VQWaTEpLsvW1uder)?(Z_-_U39gJ`!1npQs=Lo0R;3VZHgJQZ&#UYD{Enl~ zP1brJ7||oL6#EF6A86kk>A2-@j^!R{F>-xmxmlOQS~s5Z{H7V^g>YGl z|7UL_i_GJwO3bp1F<6NXqp^WoarlMh)3umvdI%^$VuMv4e&|?B+pKF2q*S_GhY_MR zv>T0?%f;G8_D>>t-+_3RE=1^DEFyQOi^KNgNc*_c>};K?#}Dj#mW()*S$GItcAAXbjagk5 zjjV>#Om=ka{ShoKz@U+5V7Mi22>8>G6t*w4=t8#%_-v`BFKme5n`>c05xzJPemV*M zV*u``!i6?>?W0@aWB1E_eBM;}{lU;yfukOPU$29UX2E;MLh!3aaNi~=YHzp;mRhpb zA${zCkgaB@g;FCkpAHXL{3RpOQn7|u{7XG)YIvh_5(+`q|k;Z&=tV!6AF;gq3 z9iFj6D7GUaW{fy=S+2$^;sxCmDL`OpN}G-7HuRx_6w5N&9A9xTHy=R+=o@0#VxT2X zUwlR2e5?Gm;db9{M*jLL*-8!Q9Un)B>;)M&M9};681V+Li{bu^cH5}&r?!&b-{Z&{ zaCE4-xi5CRd+O0+q}(27IYL=^>pF}!S;-2BEKD~ti6`Z;vJj$u(PVaxAgV_d@#~t6 z4rCWU$)NA97?I%f`fM_CujD0@oNnK9agZ&$tZtfR-X}YK&W=J za{8>+^|`;J&sL+cuaJE!Z$Zjmhoxn>c%Tccsbrpw3Uj450Hu!k->SZ6egNNU(q#eU z*57m3!^T?+&g+U=pKCHKXdOto>@_uyLK@1*eN;AX7haOUw8?0#WQm(*qXWCyxGkFr zx{*>_LVGU@d)hUhR{EzN{dSHzal6bbfyQmC@uN$$pbPxLflV9YXgkd+z>^$%ZUF6z zW&?+Y+2h$}$Fs>HKgI>x8{;jhJ+CBIf)2!)4NP`o?-(l^Pyu+Cr((W+&wk^k(@eAq zb-Ty%1}WhLQq> zhRd4$n&BdAx*Z?D2P*LC0ljB(cAd}%Ut0rPt&+JZf@MSS#w~Es5S&|vvu42OAB6ih z!l2bnHVr8v_nnVPxBBNX5KO)dUVj%nQm070a5B7a0c;r1KYq~_@Ybi{gmrMEeYrS> z)pe`&pO?du?NSC#+^XnMgWR8!t*!mb+hko!Oz&T>!Z)hYM&DM{qPaYhB2m9*9z_VYa=X^o8R0Wue>7`}jceI}h7|je#uIAZtk)+JOB+ zRqyE~o9j5mKD#e~LmkhbVt+R#%;wGY2a@fei^ZMpPx(I_7}i{jt}!?y)bpo>ZbkKg zQ1Yfd&}A(PQ>;9k6}tJ538n?=7izA*G#nFZ-a{j6Tow+pvbHUdEX{nbk@7z_lyx-= z$rxq@Vnx8Q6)F4!u`*D6Y1qbYHt?^Lwf7&3U}+@p$vN9=*1~BmaQ*@Ci;Ys;PHTfR z+9V_1vlZUE1&*8mmz@P)Sq87W1s>W2S04d;b*PVDuY);vL9njwx;wEMesdK3_)z%P z18~OW@TTkF$szd9BjG~_tKHlR{B15A(Fz|}0pEQZuHFitUI1r|ubHa%3Z*^XG6auQ z;m3pU-(z8Fu{I!Efj?Jkgx_Bd!AB}PD<%gAwVT`>5uBT{@d_wQd2$1ASv*d$zjFzj zkeYk*a7SPBtPq)Ol-xHB=Y(#xHGB+Kcu1E)C#8G7iZ5^(Y~Jn?upzMnGVlByNn{io{Hh34A*Il%IrNH zYmP2)xKTC-Fr`(8u~+S#Ivg6pi;D2R0ch!j>1DWgn7?iaS=9nyNdUcJ`laRG7``_o zm#<4<+kz+rCFIV`La&iRX^`77i*waupd}D@?w8g4eRq9i`?J=JEUd75XjSaCa7L(~ zWwAR5t0@qN?Kf4p#)I=Ugd*|Ez<}d|-U57aNK?PgWK(6v8m&uE&`Fl(p$!=zmPhcj zjE8PR3e_EvF4@<6d`|#;bWrnO39>nCy}&Q**?xD5WvCuc#Tu8Dj{OF;bN&1wca&y= zA6B*B*OSei43J#hVrd{QE8{x*4(rG-q19v*9JC~3EftaCw3vhlb{9*vk+5{H`aMKJJUw|cbU%hVt z|51cb55n(;;FHbLj)zak{jrpl>ms==T2mHa8#TVsP@K(w+QZZi;f{2ygX<)JsIk$lr|StK4$*|-mot;iK`kj25p4ZFhjSwad~7PccRsX>aa z%Nlv5?X!rUoz-*zQ45kjUC+NNlBJ=5Zk1V02a%;Xuzg#Op$hNTF5Y>3QkHp!o#^nau zL%CO1X33%$ItsA3q&fGg10A8~w;NqXp2!&cr;*zjH2(KEWJ5AXV*|L@F{qoYQomP_ zo>+P~Du{kDvp}XM7$G^*Y@hFvp%+&rllx41kU)a_kzzj~$X0?&KvxJ;9TxfsQKF7* zfk%w!48s-lff%ma40Ai+>(fQ{ zX888q`ul6G>NnpeA$d*-zS{zy-=<`d7fpfp_rsV1 ze7Y{oVw#aV^nxMS62MEFU`_#kJ4~tsD535ZR+!HZ(-WhvMMU|>Vjaj?xzp~AmDA0j zM2%TOaT~D44*Vyo*@xU9wG=1I<8(6Y@>t~3O-5X;vph{lPsH-w4;h(mv-m&Xjchh` zqnmsdZi+?xF5o6^#waN&z~`&kiehB6F$Q9ddzu^iu}H4aNuh*smbLQX2ucB*R~C2g z_`;vjaCzrGpwL%hl)fKo>hh_Im0q zXf3n{`tuk^e{<|@x2p&*Eas=YN$_qfJi0q{Q2<$qP8w!H-8Ya6`3NiVs|A>tB6tdD zy|vkQ&dQnBZ4a&>XMI*s?mKXQe=?q+#J+hLhYs12kvz`A(P1s3(iLsJcc?LMCEK;% zgwC1PK=0d~|C)djpN_Nc+nJ8VcS1p9Fxj#0%V-UQj1pOx6KZS>QpVD-s7>!QHdOGy z>7EybPciP(Aw_Sl(0H?QV?#~(a9p_AeJFLFaUxc;} zc%TYjY_5&@zwQUWsk;H(uonLFDfsz%`1K^1Q-&{|2H(6JwpU^EAUwSl)^3M)-w7YO z7cSTjUOyFrn@)zux5E72!Z#k!=4vUz=`(a#RU>%+Rq&b$9M=g)J^){}S>~=@4MXMSZIaRpEkk0gD}zl_gjk!gWVj$cUmN*o`|F<)JI6| zX>^mG6vE*Jy;oKZ&Uu0SVX-@*wPqPRw&jCD5EEZQhI~forCZ6f`qL> zIl8iNls%9F`}Zsu=xWNDi}omJkQW)hFnHKPsos!wdF6Kzx}j z9AWzjJ>K((g2pI7>%jHw*yBCt*zY#LSs8~`F;GyEv6mzl@1?s%H}h*FT|QjOMS>3d z?DubjA8ye5+%*8Nn4_q#9lpz2D{qYCcAV7;n^OEbivoRr#Xdia;C*^Q7qpOCq-q)t zwBr<_vCYD)Kw};Nr+5gaB+p^ND1&$4sQS_LmchKV$k z&?O;6Zbnzm$)oD#=orU0jkD)VfMi)74zos+7+spOa8@E4aul(n3N@>=>ms|rUOr-! zaw84h_BjQkK)gdzzGuS_%;|tt5!^5cKdsi3%LGqugICOlo9uqSax=Vni}aA>+*r8< zzHkCuwg!H%9Ns@0+6wU2C*iuS@X=n_Gys2jBb@&*9QMC(+BEpm5&HN4{UE&YR`~K> z5FC7wf(jnD51iKnH@*TEkAnk$3SYbhmTZN+Pf%5Zn=Se0#S`H72f>#ghQF+WKdgc4 z&W3?jICLZY)M__Jw!r2H9^4K;-vU=20^99j`oRJC_W`(IK-{CPvGQpBVDgSdR&S5t zv(=g+vL#k^g)|JB8iyBlZ?oV zc~N~}lg--OcI;3x_6tO2Z!$SW%>%he@<_<7$Xq-rbRXIY;dw%!u}Z^fdsYM=m; zG)r820F{CWoPUkrT?2A8)uX2fzgZyuQa#=|AYMzafs@w5)fpzi?+z(KI4Z%W&TKB% z=LVp;3EJA=V#h>XUxif_7+-;pXKD24w)gHqm{@{qO1hY2;if7)(FFY!C0oow1$g&> z4xt|1;Zc;9yQc~@Ea z7PD}d?Q=pI{@u~%Lk3#gV5kJ|${3?!0G$(HO(4SXdmV|m%Kb2N0vz20Cv0`67f7%k zIDC^_{ht~O|GNps&Cr^;f7k;5{1AM2P^9;@JtlHBcIYa=n)V&j zG}YtL2<|Auq!PS&d$xDxp%7ZyV09?jl!mQ9*JK6i-4(;@9P*^+zjesuYk-bcSXYK`W{hcb09_MRe(;-B_)$uKs|TTD46Ggl z2bScLebunNaqAiw+Xa(m!uQw1XD2AA?$Aweo&A>IKLX=h;ov>tifyod5PrBw3iqp4 z!0R4|Z(a|*E%53i;O$4jITyou^WdWgs*mq4hq;$PaLbz7!(TdA%Gy1v;lQ83!bx!a zVz}@;XljQ~UZr;bG97-hAAH~*_|hY=t{E;p3$_lyWslV=9{gt?ys!;k@F@H~hVLAx zo99z@5H}&~%O{&;f%?!OoHZVnrwG;?BDr8L9)e;a-Q5`?L9!A3-jz2k;pS{M*%5nl zO)Y`x2C@mJ6=bEmHx_|34O@-en)gNG^JGO-4#%2UsXsQvau;R^++IZREU;r@;^CNS zMA{w19h|U7P%Fc;BF+wvZnZbP$L%Ig>vBU3l@M;NYHeikqMjUrsqJuk364$7C&O^f z1~{e*{?LELN_HNuq|bRJ_1W(ziLedW+q&}h%IWjr80M929;+rdhi%Vh|32rJbu*sU zl+7s`0QRtbE~vQqFOS$h4KUr&CuJxUbxF$7!)&4mf=@Zc8lRfgjQWhonbjf1!eDj5RmxH6Q>P%Ohx#%1a081^hd z5J0!X&9^MBc{mm{XL|{%B`6f+iuX8+iGYL4`ZukP5*uwLjp?+ado8sz>09O$vvY=p zp~j@82~Hfr9PJT^AJy9qCWF;J2%Kogab`>QyFL=bo_0Lk~6q=w=a=R=GWhhi& zOc$Irg6}B;GwoOuGsb=s;$8tPXmXEzYZ1zU=IGIkHd}#7El?`KX%)XU0*cn}ZH1mJ z5;PD6a+x<5VVh&TB1;wMcW?{=Di!VLxrOXZ5m5n(6=-XLlQU#PA%w75w6G@wcy05} zMwW63mTrbg9dPQNlG*<{MppFy>!%3UJONE5`2Gn}W`r1prr5oPX18-ajAK44ux2Lr8?=9c)gI`=!^@H!n@VV(Q zrUfob8RerwaR+8M!PF*Lma<~1$QspxqC1|>P)Ht)YjxX(f^1Ug6_!&KAj?daTkt6( zKGbZJ9hLG_1XC(fHqQv%xzg&f5!H<=1>yi^xjX-8$*|3VJ2B}`s#3CA41CRTOSw6c zwG!aB)$FLC$BZ`6gg`kPt+b0)blB3Ugo_)?QLs)Ba*S_Am-7^Nc$?0Vg5;P6R1w|!GHboOXX zRSooJDC4JBpk#lS#r>I6l9k*9aIiyRn^1r$_PtJVjOQL!VGCk-fk(FL{(>%vg(B?j zP}ojwm1m%2;84eV9%vP|X5fH~cFA5ZRp4l!f~OX*YKE{3VG*V~*8bToT2oCSOwTyP zCl+9`efDXNaa!06#Uhj=*e^pF+uK^5+B;xwC|;fq&TP_|R4l`M$M+=jIJe@KHz(P4 zWdnyf=4hT((Dy6(aXfpCs^t`$pe4+XCVp`{#F3P&{T%w<6`f6z?3dny%~OR6Wsq4{OweD>kK&UTKMT>aK#FE)IQ+y$KaujaMl7iryV{q z241@Yo(O7|hsU7P(?MiFaS2HPSwMTLRKbKdh%+b71|=k$uK7@&2PhVCLnO|oRHA>GQhZaaMSDEN;@ z;Qq~U#h|!94&(Cd_uR z%Mh5dTt8DY4xEFUVeJqE1sLa$bB8y{RUTtGuo)jVvk5|bAnxfQn+|A#s0u+@e4H#y zDkv~%qtWFivt3aN@0KB0l+kWNNn=v1KqZ11o-%Yl19w`_LNQ~k+5^}+D1~i(Q5S+V z9M&X{Lmb;=%o*d^TLM$jxF4D!KS}{?E5l$>uIm@N2+DO>RFq4!T7u4u@979?9Lc4) zmHH_~J4a;C(Zm9*4>Ud-2eQYf9w${~kt$Z?7VXKHuW1F1XSM8>wX;j|Qf)=n|13-@ zXF+cFNJsiUO!8>!m@K^T0Z6~xMY>gsDC_N zQ4RloEnK$_?s^A=>)>1c@a~oH%Z)O%?$r*zd>pRb2!DMcJg`>J`E(Q9Gz9;?Ngw&x zfHDAoaBrCi6C4#Pa_BNE;HohkGd6u9jt=-gX*(lh@B^%|8Ydrt6zG$1pelH5q4MW&rJN(B2CxGZL(A zh+x%#++a+EOESjbXB%OYRct@FaYwFoJuWFgsY5v-S7x|o9*&`9hA#TIm~7YFaF_up zH|0*aI%A!#kKp^3Z~kN_{Bd)3fA4%+)=g7}Pi4%}>MGnjMsD8b81C@kIZln>u>h(? z&0BrW*Lr-z%Gc%|_}see{@(SJwW^ok!&y;>D78kR1;QA9l+oYvSnpIUiO!Q|XxUnY z!UXY6U)~Sj-3qT6rm#J@MrqAX-ya^>1pl!B-mwA>ZGjW4rT+4Vpwt9!J{~T70$ws3 z&YS@sxEJo(2;bcY&c6%>c4J4 zJR1xQjF(&WqN<3adCCRAudDEJOG0iNqx>>ycqoFq0%$A4;|>eksu(txptCH(d=^@O z`>jjtql4Kw?KT&{G^>KbTQWBB+87G`a*bXSiMW}TxOV*zbj*a6P4I>e_?N9alSc1c zBQo|83*hkq`0y0{|5tB>FK6X(sly+K;K}h&XojAOTuNz})*=FVa~B+#l|>V1E5f9S z&@&!R?$2%)1E)=ZF=JuQM0al1<^oKyeV*dk=j;jYJ}V(iod~_-;nci7`=M`~`gEZb zz%<+EX^ytZd-jckSrcl}b6I^(p9p>9;Z#ST^SX44?wJfXW_)Of2+ULnn_yvY zcDv)oLC*x3FagFo6uSfZpwbC_v$O`%&|cQKg%!BEOJ#lX@MH`pY=%8Mq1Yy3cNUiP zD)6GOU2-%H#=x9@7&9KuaE#|3JyKG;Cc!T=*3#r2$)7?S^!CBoj*Cn^KC%tQk5&J3 z9bMy$_U?1%tUGobbdQIA#~f{qVc%XTx51+m{oeC4+o9AC#a1X6#ox(TUgWsrB>*inO%CarJ}WDDsWeKdZ5eXF1493UKZ0wo8Zi0 zmx*cP2nqVmozr(Gpt?rMlutZ zim=LIuKsHkCY6=2@m9xkZ-`X+qG)tO%DQx~Z-%yJ6~A4d(Po{2wr&VJ;N@jy+8>Tm zsOT9}fJM_`eo32TxDHsbO4gn|5ca`q$HSg2a`$B=%dJNkz`k8_t$?!Ir{SC_%J~!& z;V8#}c)~cjS;B%_5d!@_EWp?%9UxgaZXA>fw$B`;>C_2o*V_#HbY-_YW~}z+Nc!w* zg2j&LuA|0^I6sm;J1TIXLnt5V+2^8eDHvq~r={@4047_7sM-vNcgZa`9Nl$-e``r@ z>&YGNZ>h&k+u)tEVC^7WyFoma0Rt0<&Bdk|&YB1tL%lbMWIY@)aONbS$)*X)IGBzY z1MMXUN^qFtLa~3hl*q8)R@nCG2FkMTy>M!Fy9Be_#St_mSkRf>?(~V`69plhy8qc0Ao|_3_?~J{4K#!EULI`I#6qf~E zfaRhdlp#N++I6}fUIBv7;f|{Q(u_xQ8wPFS4I!K|f zu!69^q{M}3m|Id}y*P$*9DU7e76EiD*`<8;v^Sw?0k~8|^#WtD+U}na9v;|7T z5ro-(dk{yVB_YW>ZkxTKO6&l<)bZ_mw`r`Kf%zFO*8Q#fdh%E)pJ~YvKYD0gnu8*& zEW$r^?%eLoy>+9hM)0RKaM^nJ@9Ej^Hyq^Bo$IW+h z6&|w|&_6wq-LAC&UwRNQI@sGC?&xh*U5;V{zgv;rt|^3XJfy$hxDi?$eGbO3bi4Ze z^`qGwtx5>rUZTI>ve7M=t1&FuCMDw+%eC*)PzvCC59{xDY|gfzfMmo&_B~yFF53>K z6&QkL8S_&p0NoI`i1Rc(}~7YOGHsg z!-@egZ9oLevu<8>7>r@#X3+s|+?>7kE?=pG>!}FVINWHrtOv@ldK;|HXxCN&x}n+x zn>V`iP1d77h6jga)n8{Lx`(3_z(q@SFLi zvke$%gUwcc?&aXM*2CETs;#gz!-~*afG^#rzhAc{o9hN})hY$StR8}=Gv0N46Wlx? zx9Lq!K!*oqpy&U!civ%El~>#U?S0D3ncfR9FomHbU6dlCqC^y|XlyZRiW>9AXrd`^ zVxkgbqK2qgVl*n)qN0Ee6#)SO>2(;|3~dV2dpYOq?~k>QGamM{_xAey-uL?>_jTpM z$nrV6Jo|apv(~-tY(u+x zE(QERDs%_H({4SN+CxCPjPcej=go&<%rJOy z4-}=t_b0;lmP^C;D~rYEgD(v(IY(n(tH#1F;FTZCH#Yl4m^B1`JzebdtOQtjs}vZV z`8aGp0R@?`;$GNz2rhU8P6jnr%+u&!4d?fV^WTMojWWo^Lt(~0rOW(VKh^kP2VC73 zZp(-7Zi0g?FuMR2?0{WOFmE<&Fe^m|_~2LBaPJ9NS}70aNC!+V62f0^+!_aObV6Ma zYJ!s6wZfbjXz)p7CRs5-0-AJG&^>Y*9;5L5rv{`9A7F;f$CPBAn!~_#3g;#(UU&sZ zkZQ_nU6ygDYmjXSuP3DURJTOq(nRPA%01atZCw~IMGs-%TpNdNazKc!025=>RY4hE zDUl9oaIIpzd-P%7FwRITyAn_%iRGkQBH-7FOEu$lgBzDP}Zelg(n&5`;$c-K8bMS{n|= zhZT?#FJo@EN%OXcpvK!X=9-gg+m9k+0=v7^>C=u7RCy56exNKKG2OS-K| z^Z;F9I3D3(Ccw&S`!UnZ>n!O~kGU^wI&8p6g~N8B6G)aSwaOrzZ4s^N2#HN{I@~x6 zKCJm>4%?lBpt=+4d=i;E|8yV`>Z8)HlafRWEhk4(U?f}j0M!f_5hEINB*HGm`(IZh zo3Q2+Qn~7VuB`5fB39@<8ko`86YP1OO*Wfh1XZoVan<9YISyJC>B&{~;+Q4{V2RD) zqa-Nz60gDMiaEEZ9Y_{lX@7rD)w2_(C5kf|pCnprR@}j^FvJ)Vl-EOE8mzbnb{>P*x51i6G`=%QqxMUK$*C}N3GAwcIm2X- zJKu+S*TD2~@Z})`UmH9-N(gnUYT<9wV;=o_1?j5(NQA(q|0r;0f z4-MeS206WJ9{7pP#<{5j4w%pHHk*6j8~}e3)O(;)@e!W@l8Yh1BmBL~{HJQ#9c_Zk zM}d|iLd=75WRtxiD&${QFQn}$qu{hO7?K4~o`8o7?VC_XFgi{ujkE-Dhzb>p}$ybk8oF)-!fQtv&|90f>3sXfPScUu);p=>pG4qqHQ*06BB|%E;HyblA2@-6G z(76dc8eooNnkUIViQ>T9j%gk^W)D`|jmt&C5*iFs925p;$voF%pfv#JTBMIAn%A9_ z4uex=6CMyykJ%fkKDc8b%-ajUI713BeCG3-H2mwfS7*V>2AG}?ffV_711$=thQta_ zRSN)qC`hsX?RcMA6y$}o2Uy#H%%gtheMjDb6~^XDR01!Ia3Sjvk-g9o;B=eR;KhTG z;vKL2h6+>r1Ag!ukghmD=cfQZNwIYU6I?{3(#!*FK_4I>bIYq(%%+(RiPsOODT>-s zW1*k9E?#0YKgai#cWC(Komye+0C7_BL1Qdj=+a>u87EFFei&zy8k}7ue->x=tS45O zWzGw~2S(>u+r)ZdXr|m#w#)3$#02P!m28Hlsi_7tOiF|1hbjHl<6UT8r$>XSHgkSb zp3G%$2$B`+S5Z3Pm55=Z29qu3{FnmyGd>L_=fc}3;l`+x@$}L1XG{!`0Kb_6zuN&9 zMut!{QtW-`)*(?SM_yFfs=gUIh=Wf-BF0NyQpy z8n3K})NHuF6dpM!?G>&Y0>?aX>tsNORfk}pIVl#_!yU)qXM+I^t~x_FOh=k~^2Y%C zf^b_CT-Fyl0wRppZsF}{zylpHA`@a0Az3j&LO?hL5|f}P4Js96hz4K*KqkN0KpLc` zL;n;taWf}XPCN0L@V??S6^@0Nc)9M_RJgy@opiwV5G?ir9{E_jibW~}7}-J!%}-OG zxH~n-&Vu9&7?`5|-HH$l&4AQY7?Q2#?`5ZnxL`&Wysa=YVq{_Jih+Ka@cXt?udg*M zANBlmC8~5qKe)RVK8iY(jWHLYzUfka|A%q#U^zT?%=+XUjlJE_lqc5|0tPA~b5{%n ztpG@dv-8#CT{H-SY2fV-=h>uYJsOhKJ(zaN0J27uSR&I^jvb{7^yce3-C z~AU6xT3S>>niWg$E6=oL6y9}lGI0jaj*bnq%2<5|-!>nzF zWC6NRZ%*XG4i`m6od*1+5n=*BtUPyB;eodRJQ*TZJWkiZ&4`z|S7S(l(Vvbv-w(jSc+k?t8O%{Q*SZF$4KUXR4W`+=`|}Gy?+4*5n5{T? zjm#E)r=9|>g~FM2!h6jS$`KB97#NUZZ8u{u=!GJ|GeyBi1({1)U)ksE=F<4Spd~?Q zAY4Aw+Gc3B&@S3NP@4@aEh2kHTVulkgZ-6o)2ndHL>S#yQlks^z}TT4TEZGnE(tSX5Ukt-_q+j5-XIS5 z11n(OXQ1D;RD|2IlHk*OprQe0{|%bj;G9vgVV}X#lLjX_9XH^d0!ck+8Wg8k+x3fs0U1IHooJIDydYP^ z0%PLriJPNRASo6C0k||@Z8s>%Jaxs&DbWg}%~MHi3S5&bt;nK-n&1RonSJaMcp-&V8vnU z@m7^f0KuCEm%M4sb&K`F>qmiD(YAYjzx7x<8|1xo$4KP)Ll@jZfLF`GA1_g)_bPje z_eRV5r>yqf^2emzGupKrw12IxbtY^cKCTqK2 ztdzN~C&I())#LqqIcTYJkbHQpwT-61Bb((qTN2=v-PU&7YGf@mVxTH4tuv!h+ac-( zU19jVO2n|=uKs$k;barU^wB77fH`B~#T{_(B>2~1s4x!{d&>$Lfvia)jSj(kVn|zaqptIYIbR zT|Q{ntv4pW4i(L!IaH~^5Jja;Q3BLzfC0^lv}tCnG)mV3pa<3I1l=pd-c>D9(3;+g< zSKPbikqW3?-O_t+vPI-G84X!oGlBW3N`JJlPuRd%I2WZ z8G>o%336bT{g@})q(zekj4iNfO%;NvL*$boeJ;P#U_v>2gso*r`pta03MB)Bq$mnDd3ql?=c$L@o>#Bcy%Al z7z!hXYT(moj*9;18eE(&--;Fkcr{uSC$>>(a;U;*%6nL#xeIdQ0S^oxVC{r}JnKSp z{!Set@7D@B=JV8nbelAI1YyJMB;gnNmj=ZKUXU&Ko)TvkTPQHFAK-;lKeVbM^`?Uu z6K5~~5REC>Ql0CKgOp_Ll;|f&Q6QOH>HpiBfBu`c9}%1`;K0n5dW_$Gy-u z52ohBmKL~5QR*FyRFAX>%FK|{*jmLY<&#Pw?CLS_VXO5*0x*A{w13id;ohk5a+yRg z0A4A#9?vjfuV3_y+BNVi(nzHt$m|DWa-qBf7? z4WRoq1{T3Jqv4+iq=f!r8}37I^p7_y9|M4{F+`F2xM(zJVZa0DDpE86Q;X&F190&u z``?cIeQt_;gesg_B(fd=7r7iWD^YZvR5-n`=OpfUoulQ?^aLqDQeoTxi1WJ~)BO9C zIN>MSjTzBvid=^kCKT{h=k19K4ppC2B+ciC<}gfE@RC3xA!ilC`(<#0B8_5&TgQk< z?#N=Zjg6TaY>G|@rWWwjWKUEAXWV;ZM`{T07KTB^K8DOX@dc;$chYANJ|uDBR?7=&OoM3 zHGFSO&W5yfzG}qWN0Cvvpco+*d*pku!mvJ&Y(CF9W7NMnsy|<;gAY%$w#i78YtnpB z*$wA8NaI8>+T_}4F>*aBTx=5GGm>Q%6Z13qYA(h5V5mh9A+oM};m!--{1Kpkc(+uDod2*y*lxc_{&&SZgF{sY0rR8S^LdEt$H1j8 z!|^8g$4%ngo#@smGB>@Y8w|4Jr1Ig98{l9QJT^zTAv4Db&u!D8o*TJuBy4YxzMwM- zVYErqJr-4Ta7Ho=jsq>(JT?2(e6pzt;@s%5(q2myNw~n|z@#O>00q}=P&y=$xArpfOGV_~c!0_O7qv67P56|{C*tS~7-sye+U2P50&V~IEO?Pu70_(iG0H_@=? z5~ajSiWOkaKx-QV7?>sthbJCJ+1|!|BDcUu2qwb~B zOdI$D&>n^>#(c9V+o&{&o^|`-!ySh5gU(5o);o<|U*}UsV^9uMH%Y|7jzgCa3;QbN zpLZWOH%JAB>@)n zE(2OLQ5*Phhw)Y9JQ@)_x~evL$0{Vo%J6AsME%Ct+^XURc)*&wgOg)ot+j3UNLcY!Qy#NHZvNv>KjS5@y35 zgPInNXFh>HT@8Q#T(Xt-&xF=?xbNu@e|{mvb+bJD={18Bbuj-{jZb#NUq67Ceh7L? z2b}k3sB48;r^DZVq;d0ngU`0ZH8VXNJO=Nr33H^};0JHRhNJMK=`d>)T(tl;?U%)W zLy5t%4F;J3_~-~cvS1h@WTo4x7`w*wbLPsprQ>*gCf8?ZyEe!rM2Ds<)w?-uCjh9ABt*SVnr-m>8{y|@;3H^`W`zHB4w5tK2FB-mUfQF*&Dchy5E zNj;_kT>5uOpLQg|dwZd(t!?RmIv0f@ab9@yh|nK8b+}?a zY&c+T`(7Ck4=;TIk9=rt^U8WycN9pG@2J}%#a!PCm;GIYz&jG)GerjH#;4%jP0-~L z!u@I+o>_0ae;7PTLdUpdzWiP^I=bQh_vAX?-Ue@MRF8M(t5VE&yc^~#-qDh+@VmEx zc-Y={Y6RH|yXv9VBLyE1ybI5KW^MP6&!H;`4mZLbHtXH5-;%CEHNZpfTiXOfF!xDu z6l-H)`A%!QpD%*f*Nfx3yHj)+qp@fW{O}owkAvM+&=vz(~bRB+iqlcRw z2{ZRBjf*D2f4>DQOF%#GVbL)fT?nt;qw&CNuy6(3d9BXOGc_*%WsnwXTr~?!GA4=yDpMb`^4YEid{W7d+&Cs zM!akYJhnj!31&SGOSXUg!EI=Qg>^6>9`<*@a~oj(=chj4FtDIbi1(cyn7z=toLSf4 z{@qd$^qW;`PEBbYyjlvLe(-=|5sCN1oL7Jt=#Gbf*IU~?yb>1gf*p-eoC^PSz)Yby zV0|lW?1ri!Jh>iL9J02n?|>~n5dyqcyzt7<{*4$fuwpCY=c`)gBOnpm#sJ6 z-2ylMLn@gQQ-w=$-VnHXwOR|RH@>Wu4hgznHW@oEMrWFIX>dZV4#5;>I{-4JrIF*9 znpBFqLWT&FI-y2`kRm-34a4o2K=_-CX%mHXJ7&b$tZ4^t7VK)Uw(AbT)-p+l739Iw z`%bm#?1t73_-q$+`e5b|_}S-DsqTcTR`~vVaQzwZ);75K42^{=4Gcpzj^22_L>#)Z zX4rWYYMSM@qj9JO4tu2cU}@E<u=GZY2U--_4v=S2asuKKpe;!XIGj-3A&29}ZrD{JV^}#^09)JSP*K&S zKG1;Kf7m8N5_uL?tq{mmYc6&L#d*xmf}wfvkfPWk8msHTmn;qrsMoOCZqPDKE|vp2 zl0-{T1;daixfTcIe)~3f5xiXv*?okjv-gC0E?0NRb=J8kB56y2Mi7VKZV&LipX3;_4 zbXazu#^NvF4~q<@jMjMg4i7(gAjF9Xk+u^gB{gy+xhUTSD~UF4o02Cet9OWISLbpz?LJhavvzZrNso5ykRtTxi3!iblUwm^mCu8|42lRUGU*I-FN5e3;J^m1_~4mLtUa(n@=~ zIGJ#jNeCTnl6hq5Hj37&$0%|Lgd=06>K;Fu?fQQJKcsV&>O|+vWJx@Du^S9q`zBT zIBXcsYiMnYq|8;K_0brRBHw0hC)8Q+(tE?FxvM;Ni^fh!uyH^^s43onNR~u*R_cxA zJ4Cr)WUfeNS>eM?P~QU0It)mM^A&5z122d9(QJ*E)(VI9))}YTyzx<(FLoF__6sk6 zSZL7F4uAQ9yc|6tAkNx>3b^HO2HENG#zS77dMnJUAANl_cnNu#oJklf~YyXWK{npkF#{tA*M5_Txp6>W4welIC@~ zrxc$wQ2(~jU^;zHsm49E5-qgrDY){G6sl~ihl^}@QU6>A*H47!cEC@^sqJ1|BMRZA z%_7Knkque3H~J?_ingr5+195+n~PK5fAH+I-;y;Ty8n`pBkF53hJ zIXbHk!xat%6uoiNB-mF46NbZ^>!3riIZo^^6pi8mqOqict_gQG?2#z49pZdurv@ju z@s{ZMCi=J_l_$nX+bgRBaWpjSBXAM4bV0yjt*LAj&QW?CWV&=LPwxk@@vybr-mbu; z$V9gli-uj)vo{Wy5y1QuwM|X4@KBF6!&IA!@ZK1hCS5WPDl!@oL=G}mRk2oZ`sTuh zRFSH*-=Z-xTZ$QXDbnQ;B>2Tytaao3yF^2$v-5wVUiQlvFATPcK!3CyMifF>9b7!z z+9ni+mp+Bd&(e5v3*3ITwcXpR4Gvep4`=H$YiX&9Dd(y&~}UZK1*Z z(BmA^8T)D_1wA}PZPU~t{Kp0zF0vuOF8UJE<6&I=VMFzl&@_yDZ3sm>mdA{)r@7VDI0Z^U}sMSxvCPL9?~5tW?- z96_Xe!eJNbQ*h_7HFv4?+ z*QV~9+Z#Jih!Z+mp%wtJ@qiRM?5=@}ZKCfld~7gbkj4ikFlz+lTNJbz2FzO+=Jrc8 z-q;}T=I)D6wcB;T;K|oR%)iIOm!+^^nZa{+>gb~i;lke;)VILA+dT{~(zx>8Al*?5 z!S$2j_lx0+1Muh%HR@a7{#Q}|?#2A(V53Pec&Jp6F$`LC_ zg!jwg>>-j4RYBLFIS85Qu=Rx64y3w@Smy{^O{XbhX>Z2flTszB(nE@1c_}2P$;sF$ zWD64ADs6}DRnCiEvjG(@KF+3)=%+cMt`mrL4wdh#kyKBo!^!jG{g9I_<7O#7`ua9l zyd5qX3$N^vIikWdE2a45)m?JhPIkfP@#@lxRz;HTjdq~UVSWHKb&IVVguH5p)r&o>y;#li?uxn-xt6+u(CyX&lMl7oU1ww8&^kBYk#!LrRT zFdq)oK%tF9N(BAWWFhbHL3Janv{{=1J}Ic#al}47loAWa+uR)}T0%Wha62@2$+LGt zRg zuzEj?R8;-Gw!)yY4sN|f=Xd{xi3$~l-#-;1B~jzf>plGHd4tYw(2H-8il$4>*0}3h zo$vl8$T5q}?6SYZL(5?1c)0#tjqg7arpw|Z{OkaX`YTKt3^PuHUn{Dd0A{`_{UV3u z!c(i%cJCa9dAp?#+e?iTgpoemrFCv`$}tasQIt2;o;gCh>!e@P7__kl-k zt`CIAHog!hEq1pdzFiIne9jIiz`CrSa46SmdD2^ZD50JU+j z)5VzEYoNkKm1b2FXgRRjVI$sJ4mo|L@39@0ZZeoSNaNgRV7CK*44|wQhW`;Rouu)~ zDxqtr@XKY=-}p0!3e2q)u)f8azXtGSE9`NI27%0tMn!dbZ&WpczmLWzHr&<-wpT)R zyhw#O;b2JSi4#idVW*4DxTHdKEk95M0(zso0sg)N_E(E^h#i)e!wDBo)BXl%cj3?< ztB2S=(q+UB8xBDKJdNxA?K~#HOS?f&(HtGN@+P3KrWAeejgn5-;$mGYt%pzR?Ta!Z zcx0{Y3ndQOt|N`mk!cFQJ78BEY;AD1J6I2;np!wFg5AfVJrQ2oYH#<^b~tSiOkZFh zICU12*9&H(|K&M!A6_S*#Q3hU3g)ob2#)T)8XB1qUU6XIU`{0 zVUd5Z!-&!Ps zjJR(%tlVxsL%XKp0Hx+)Y{vrt*mzX<6z3>T6cMZ_6@lFACpur_vn%12Y4)Vi-q1DC zSaK6#3;27U!GG}oAF7)m#-}-ILTxJtuSa8~1F3t%eyIxkj!R8HfWjP@G*qfX?cnpk zl)=sg2O!#hZXC9Fzq1n(ft!d(MB^~qg>wfY0A11O?zdfW*g8aO%LyeXV6aUJD;i_+ zo%w43nTgH?2OxW)U6+^$j@E*%YvekR^tY8mSI}UBjV@+y6l6kYx8dk4$C&H3otEP_ zMP(yoyDT~=$b?wG<``iEdGAf%V$nfEn@H_AVb?MF-mLWWdSm%^=%1l6*hZ)r;Pus_ zxjE}}d%KsH8#K1U?U&e3q<_5|X7@pZzuf7Sc^8O*w;uMgcDuoY&)dgtAHGGSqYHlb zlKt0r;fr!O-$nziH_`%-pXx5cmL3o}j&BQXE*-X(E)i39a@c}l`0OCeD7LpV z40vHZ+=MSc8I`h8Vt;K>9EDPbJ*PSld8<; z>1IPt3P5~}#t9b@XkBv`k!kLb&CUt?Dj_XFBh#g0IeLI|g(yfrZKr|*5a-j)0WsoG z^>dKD>FkzvjjGgGGzv1Htj777Myf>ZINuQ%xOeJvTsmw+`$9{H!BL0kYJ7}1w$WnK z|2L>_lddXG*nJFqUX4Kx2Y|!XaG=uQe20CexgFkI3pdTMf0*4t`17k_?z~bb-i8xs z81R!n1-W{r&ROF$^rwF1At_$ty8jN^PJnYp!waC=W~5pmd@Dz`wQnobTI2#?LlVFP&K3mB3OXE;<&)whZe>yOTM9uuVJW>?U8 z5dq=I{KsMYpNCrq8}zM;d`N1~xlh*U{_n+{t;Yfnoy zJ5)D8O|zlwuF)GGZHEh71O{IC*x9W|lNzugk{*w~@--4UXBNT>{O$O;MWCpez zh9M5=43N}Ej1NZVJKGNKE6vScUF~c;zR;P!2C^AAjsWUE$3 z(a1`K?x5jVQ3>KKx{a_|^;eBXXSaOAc71LmD9RG2&8b2ZAT7S<5MYO};m%