From ac570e85aa0db7d0be03a5b721a955c9c7ef81e6 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 09:28:51 +0100 Subject: [PATCH 001/149] fixed test --- test_autofit/serialise/test_samples.py | 32 +++++++++++++++++++------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/test_autofit/serialise/test_samples.py b/test_autofit/serialise/test_samples.py index 918f05e2a..9c333d1ce 100644 --- a/test_autofit/serialise/test_samples.py +++ b/test_autofit/serialise/test_samples.py @@ -52,17 +52,21 @@ def make_summary_dict(): "type": "instance", "class_path": "autofit.non_linear.samples.summary.SamplesSummary", "arguments": { - "errors_at_sigma_1": { + "values_at_sigma_3": { "type": "list", - "values": [(2.0, 0.0), (3.0, 0.0), (4.0, 0.0)], + "values": [ + {"type": "tuple", "values": [0.0, 2.0]}, + {"type": "tuple", "values": [1.0, 4.0]}, + {"type": "tuple", "values": [2.0, 6.0]}, + ], }, "errors_at_sigma_3": { "type": "list", - "values": [(2.0, 0.0), (3.0, 0.0), (4.0, 0.0)], - }, - "values_at_sigma_3": { - "type": "list", - "values": [(0.0, 2.0), (1.0, 4.0), (2.0, 6.0)], + "values": [ + {"type": "tuple", "values": [2.0, 0.0]}, + {"type": "tuple", "values": [3.0, 0.0]}, + {"type": "tuple", "values": [4.0, 0.0]}, + ], }, "max_log_likelihood_sample": { "type": "instance", @@ -83,7 +87,19 @@ def make_summary_dict(): }, "values_at_sigma_1": { "type": "list", - "values": [(0.0, 2.0), (1.0, 4.0), (2.0, 6.0)], + "values": [ + {"type": "tuple", "values": [0.0, 2.0]}, + {"type": "tuple", "values": [1.0, 4.0]}, + {"type": "tuple", "values": [2.0, 6.0]}, + ], + }, + "errors_at_sigma_1": { + "type": "list", + "values": [ + {"type": "tuple", "values": [2.0, 0.0]}, + {"type": "tuple", "values": [3.0, 0.0]}, + {"type": "tuple", "values": [4.0, 0.0]}, + ], }, "log_evidence": None, "median_pdf_sample": { From 10ad4f9b0cde10ee66a5db6e3cccdf5bf3d0416a Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 10:03:06 +0100 Subject: [PATCH 002/149] creating an Array model --- autofit/__init__.py | 2 +- autofit/mapper/prior_model/array.py | 25 +++++++++++++++++++ test_autofit/mapper/functionality/__init__.py | 0 .../{ => functionality}/test_by_path.py | 0 .../test_explicit_width_modifier.py | 0 .../test_from_data_names.py | 0 .../mapper/{ => functionality}/test_has.py | 0 .../test_take_attributes.py | 0 test_autofit/mapper/test_array.py | 9 +++++++ 9 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 autofit/mapper/prior_model/array.py create mode 100644 test_autofit/mapper/functionality/__init__.py rename test_autofit/mapper/{ => functionality}/test_by_path.py (100%) rename test_autofit/mapper/{ => functionality}/test_explicit_width_modifier.py (100%) rename test_autofit/mapper/{ => functionality}/test_from_data_names.py (100%) rename test_autofit/mapper/{ => functionality}/test_has.py (100%) rename test_autofit/mapper/{ => functionality}/test_take_attributes.py (100%) create mode 100644 test_autofit/mapper/test_array.py diff --git a/autofit/__init__.py b/autofit/__init__.py index a02199d53..4852f150a 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -57,7 +57,7 @@ from .mapper.prior_model.annotation import AnnotationPriorModel from .mapper.prior_model.collection import Collection from .mapper.prior_model.prior_model import Model -from .mapper.prior_model.prior_model import Model +from .mapper.prior_model.array import Array from .non_linear.search.abstract_search import NonLinearSearch from .non_linear.analysis.visualize import Visualizer from .non_linear.analysis.analysis import Analysis diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py new file mode 100644 index 000000000..cfbcc7a80 --- /dev/null +++ b/autofit/mapper/prior_model/array.py @@ -0,0 +1,25 @@ +from typing import Tuple + +from .abstract import AbstractPriorModel +from autofit.mapper.prior.abstract import Prior +import numpy as np + + +class Array(AbstractPriorModel): + def __init__(self, shape: Tuple[int, ...], prior: Prior): + """ + An array of priors. + + Parameters + ---------- + shape : (int, int) + The shape of the array. + prior : Prior + The prior of every entry in the array. + """ + super().__init__() + self.shape = shape + + for key in np.ndindex(*shape): + suffix = "_".join(map(str, key)) + setattr(self, f"prior_{suffix}", prior.new()) diff --git a/test_autofit/mapper/functionality/__init__.py b/test_autofit/mapper/functionality/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test_autofit/mapper/test_by_path.py b/test_autofit/mapper/functionality/test_by_path.py similarity index 100% rename from test_autofit/mapper/test_by_path.py rename to test_autofit/mapper/functionality/test_by_path.py diff --git a/test_autofit/mapper/test_explicit_width_modifier.py b/test_autofit/mapper/functionality/test_explicit_width_modifier.py similarity index 100% rename from test_autofit/mapper/test_explicit_width_modifier.py rename to test_autofit/mapper/functionality/test_explicit_width_modifier.py diff --git a/test_autofit/mapper/test_from_data_names.py b/test_autofit/mapper/functionality/test_from_data_names.py similarity index 100% rename from test_autofit/mapper/test_from_data_names.py rename to test_autofit/mapper/functionality/test_from_data_names.py diff --git a/test_autofit/mapper/test_has.py b/test_autofit/mapper/functionality/test_has.py similarity index 100% rename from test_autofit/mapper/test_has.py rename to test_autofit/mapper/functionality/test_has.py diff --git a/test_autofit/mapper/test_take_attributes.py b/test_autofit/mapper/functionality/test_take_attributes.py similarity index 100% rename from test_autofit/mapper/test_take_attributes.py rename to test_autofit/mapper/functionality/test_take_attributes.py diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py new file mode 100644 index 000000000..269de1e9a --- /dev/null +++ b/test_autofit/mapper/test_array.py @@ -0,0 +1,9 @@ +import autofit as af + + +def test_instantiate(): + array = af.Array( + shape=(2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + assert array.prior_count == 4 From 258885450ea96da5b5f82ffb3a3e7f45a973c00a Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 10:07:57 +0100 Subject: [PATCH 003/149] instance from prior medians for special case of 2d array --- autofit/mapper/prior_model/array.py | 17 ++++++++++++++++- test_autofit/mapper/test_array.py | 15 +++++++++++++-- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py index cfbcc7a80..84103f908 100644 --- a/autofit/mapper/prior_model/array.py +++ b/autofit/mapper/prior_model/array.py @@ -1,4 +1,4 @@ -from typing import Tuple +from typing import Tuple, Dict from .abstract import AbstractPriorModel from autofit.mapper.prior.abstract import Prior @@ -23,3 +23,18 @@ def __init__(self, shape: Tuple[int, ...], prior: Prior): for key in np.ndindex(*shape): suffix = "_".join(map(str, key)) setattr(self, f"prior_{suffix}", prior.new()) + + def _instance_for_arguments( + self, + arguments: Dict[Prior, float], + ignore_assertions: bool = False, + ): + return np.array( + [ + [ + arguments[getattr(self, f"prior_{i}_{j}")] + for j in range(self.shape[1]) + ] + for i in range(self.shape[0]) + ] + ) diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index 269de1e9a..d16358a32 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -1,9 +1,20 @@ +import pytest + import autofit as af -def test_instantiate(): - array = af.Array( +@pytest.fixture +def array(): + return af.Array( shape=(2, 2), prior=af.GaussianPrior(mean=0.0, sigma=1.0), ) + + +def test_instantiate(array): assert array.prior_count == 4 + + +def test_instance(array): + instance = array.instance_from_prior_medians() + assert (instance == [[0.0, 0.0], [0.0, 0.0]]).all() From f9e4820c3e2c60beea7f8163754f6339994edc9b Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 10:27:28 +0100 Subject: [PATCH 004/149] generalised instance for method --- autofit/mapper/prior_model/array.py | 31 ++++++++++++++++++----------- test_autofit/mapper/test_array.py | 25 ++++++++++++++++++++++- 2 files changed, 43 insertions(+), 13 deletions(-) diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py index 84103f908..7282596c0 100644 --- a/autofit/mapper/prior_model/array.py +++ b/autofit/mapper/prior_model/array.py @@ -19,22 +19,29 @@ def __init__(self, shape: Tuple[int, ...], prior: Prior): """ super().__init__() self.shape = shape + self.indices = np.ndindex(*shape) - for key in np.ndindex(*shape): - suffix = "_".join(map(str, key)) - setattr(self, f"prior_{suffix}", prior.new()) + for index in self.indices: + setattr( + self, + self._make_key(index), + prior.new(), + ) + + @staticmethod + def _make_key(index): + suffix = "_".join(map(str, index)) + return f"prior_{suffix}" def _instance_for_arguments( self, arguments: Dict[Prior, float], ignore_assertions: bool = False, ): - return np.array( - [ - [ - arguments[getattr(self, f"prior_{i}_{j}")] - for j in range(self.shape[1]) - ] - for i in range(self.shape[0]) - ] - ) + array = np.zeros(self.shape) + for index in self.indices: + key = self._make_key(index) + array[index] = getattr(self, key).instance_for_arguments( + arguments, ignore_assertions + ) + return array diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index d16358a32..e67fe8a24 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -11,10 +11,33 @@ def array(): ) -def test_instantiate(array): +@pytest.fixture +def array_3d(): + return af.Array( + shape=(2, 2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + + +def test_prior_count(array): assert array.prior_count == 4 +def test_prior_count_3d(array_3d): + assert array_3d.prior_count == 8 + + def test_instance(array): instance = array.instance_from_prior_medians() assert (instance == [[0.0, 0.0], [0.0, 0.0]]).all() + + +def test_instance_3d(array_3d): + instance = array_3d.instance_from_prior_medians() + assert ( + instance + == [ + [[0.0, 0.0], [0.0, 0.0]], + [[0.0, 0.0], [0.0, 0.0]], + ] + ).all() From 66d055e86d982a9ff2f9c55c38f572f4defcffc4 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 10:42:21 +0100 Subject: [PATCH 005/149] use set and get item to simplify implementation --- autofit/mapper/prior_model/array.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py index 7282596c0..21aa57fd8 100644 --- a/autofit/mapper/prior_model/array.py +++ b/autofit/mapper/prior_model/array.py @@ -22,11 +22,7 @@ def __init__(self, shape: Tuple[int, ...], prior: Prior): self.indices = np.ndindex(*shape) for index in self.indices: - setattr( - self, - self._make_key(index), - prior.new(), - ) + self[index] = prior.new() @staticmethod def _make_key(index): @@ -40,8 +36,13 @@ def _instance_for_arguments( ): array = np.zeros(self.shape) for index in self.indices: - key = self._make_key(index) - array[index] = getattr(self, key).instance_for_arguments( + array[index] = self[index].instance_for_arguments( arguments, ignore_assertions ) return array + + def __setitem__(self, key, value): + setattr(self, self._make_key(key), value) + + def __getitem__(self, key): + return getattr(self, self._make_key(key)) From 08f8a73e2bf389ff7773ec423e484d18d253327a Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 10:43:48 +0100 Subject: [PATCH 006/149] test modification --- test_autofit/mapper/test_array.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index e67fe8a24..01407830a 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -41,3 +41,8 @@ def test_instance_3d(array_3d): [[0.0, 0.0], [0.0, 0.0]], ] ).all() + + +def test_modify_prior(array): + array[0, 0] = 1.0 + assert array.prior_count == 3 From a0f0b87ab548e49bea1d112a7986a943dc3bcf3d Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 10:57:14 +0100 Subject: [PATCH 007/149] modifying values + fix --- autofit/mapper/prior_model/array.py | 15 +++++++++++---- test_autofit/mapper/test_array.py | 7 +++++++ 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py index 21aa57fd8..8923c6126 100644 --- a/autofit/mapper/prior_model/array.py +++ b/autofit/mapper/prior_model/array.py @@ -19,7 +19,7 @@ def __init__(self, shape: Tuple[int, ...], prior: Prior): """ super().__init__() self.shape = shape - self.indices = np.ndindex(*shape) + self.indices = list(np.ndindex(*shape)) for index in self.indices: self[index] = prior.new() @@ -36,9 +36,16 @@ def _instance_for_arguments( ): array = np.zeros(self.shape) for index in self.indices: - array[index] = self[index].instance_for_arguments( - arguments, ignore_assertions - ) + value = self[index] + try: + value = value.instance_for_arguments( + arguments, + ignore_assertions, + ) + except AttributeError: + pass + + array[index] = value return array def __setitem__(self, key, value): diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index 01407830a..ef3a7d518 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -46,3 +46,10 @@ def test_instance_3d(array_3d): def test_modify_prior(array): array[0, 0] = 1.0 assert array.prior_count == 3 + assert ( + array.instance_from_prior_medians() + == [ + [1.0, 0.0], + [0.0, 0.0], + ] + ).all() From dcc1195367a4bcaf84af392eaaa45f09b99d1615 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 10:58:15 +0100 Subject: [PATCH 008/149] more testing --- test_autofit/mapper/test_array.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index ef3a7d518..2002d6b96 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -53,3 +53,13 @@ def test_modify_prior(array): [0.0, 0.0], ] ).all() + + +def test_correlation(array): + array[0, 0] = array[1, 1] + array[0, 1] = array[1, 0] + + instance = array.random_instance() + + assert instance[0, 0] == instance[1, 1] + assert instance[0, 1] == instance[1, 0] From f5116ab76317205d17e9ad3900e3097464f1b481 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 11:11:21 +0100 Subject: [PATCH 009/149] array from dict --- autofit/mapper/model_object.py | 12 +++++++++++- test_autofit/mapper/test_array.py | 18 ++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index aea48e872..10e79962e 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -234,7 +234,17 @@ def get_class_path(): f"Could not find type for class path {class_path}. Defaulting to Instance placeholder." ) instance = ModelInstance() - + elif type_ == "array": + from autofit.mapper.prior_model.array import Array + + return Array( + shape=d["shape"], + prior=from_dict( + d["prior"], + reference=dereference(reference, "prior"), + loaded_ids=loaded_ids, + ), + ) else: try: return Prior.from_dict(d, loaded_ids=loaded_ids) diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index 2002d6b96..2ecefd0fd 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -63,3 +63,21 @@ def test_correlation(array): assert instance[0, 0] == instance[1, 1] assert instance[0, 1] == instance[1, 0] + + +def test_from_dict(): + array = af.AbstractPriorModel.from_dict( + { + "type": "array", + "shape": (2, 2), + "prior": {"type": "Gaussian", "mean": 0.0, "sigma": 1.0}, + } + ) + assert array.prior_count == 4 + assert ( + array.instance_from_prior_medians() + == [ + [0.0, 0.0], + [0.0, 0.0], + ] + ).all() From fc30f866d7b1eb16f8f430bb61ece3a3cfa8c059 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 11:47:52 +0100 Subject: [PATCH 010/149] testing complex dict --- autofit/mapper/model_object.py | 3 ++ test_autofit/mapper/test_array.py | 56 +++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index 10e79962e..fb24e7d6c 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -286,6 +286,7 @@ def dict(self) -> dict: from autofit.mapper.prior_model.collection import Collection from autofit.mapper.prior_model.prior_model import Model from autofit.mapper.prior.tuple_prior import TuplePrior + from autofit.mapper.prior_model.array import Array if isinstance(self, Collection): type_ = "collection" @@ -295,6 +296,8 @@ def dict(self) -> dict: type_ = "model" elif isinstance(self, TuplePrior): type_ = "tuple_prior" + elif isinstance(self, Array): + type_ = "array" else: raise AssertionError( f"{self.__class__.__name__} cannot be serialised to dict" diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index 2ecefd0fd..c636d029c 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -1,6 +1,7 @@ import pytest import autofit as af +from autoconf.dictable import to_dict @pytest.fixture @@ -81,3 +82,58 @@ def test_from_dict(): [0.0, 0.0], ] ).all() + + +@pytest.fixture +def array_dict(): + return { + "arguments": { + "indices": { + "type": "list", + "values": [ + {"type": "tuple", "values": [0, 0]}, + {"type": "tuple", "values": [0, 1]}, + {"type": "tuple", "values": [1, 0]}, + {"type": "tuple", "values": [1, 1]}, + ], + }, + "prior_0_0": { + "id": 1, + "lower_limit": float("-inf"), + "mean": 0.0, + "sigma": 1.0, + "type": "Gaussian", + "upper_limit": float("inf"), + }, + "prior_0_1": { + "id": 2, + "lower_limit": float("-inf"), + "mean": 0.0, + "sigma": 1.0, + "type": "Gaussian", + "upper_limit": float("inf"), + }, + "prior_1_0": { + "id": 3, + "lower_limit": float("-inf"), + "mean": 0.0, + "sigma": 1.0, + "type": "Gaussian", + "upper_limit": float("inf"), + }, + "prior_1_1": { + "id": 4, + "lower_limit": float("-inf"), + "mean": 0.0, + "sigma": 1.0, + "type": "Gaussian", + "upper_limit": float("inf"), + }, + "shape": {"type": "tuple", "values": [2, 2]}, + }, + "type": "array", + } + + +def test_to_dict(array, array_dict): + assert to_dict(array) == array_dict From 7a50b5a46560300c128d85a6f901cdd3b41452d0 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 11:58:47 +0100 Subject: [PATCH 011/149] properly handling from dict --- autofit/mapper/model_object.py | 10 ++------ autofit/mapper/prior_model/array.py | 30 ++++++++++++++++++++--- test_autofit/mapper/test_array.py | 38 +++++++++++------------------ 3 files changed, 42 insertions(+), 36 deletions(-) diff --git a/autofit/mapper/model_object.py b/autofit/mapper/model_object.py index fb24e7d6c..05be46ef7 100644 --- a/autofit/mapper/model_object.py +++ b/autofit/mapper/model_object.py @@ -150,6 +150,7 @@ def from_dict( from autofit.mapper.prior_model.collection import Collection from autofit.mapper.prior_model.prior_model import Model from autofit.mapper.prior.abstract import Prior + from autofit.mapper.prior.gaussian import GaussianPrior from autofit.mapper.prior.tuple_prior import TuplePrior from autofit.mapper.prior.arithmetic.compound import Compound from autofit.mapper.prior.arithmetic.compound import ModifiedPrior @@ -237,14 +238,7 @@ def get_class_path(): elif type_ == "array": from autofit.mapper.prior_model.array import Array - return Array( - shape=d["shape"], - prior=from_dict( - d["prior"], - reference=dereference(reference, "prior"), - loaded_ids=loaded_ids, - ), - ) + return Array.from_dict(d) else: try: return Prior.from_dict(d, loaded_ids=loaded_ids) diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py index 8923c6126..6dbf6ab32 100644 --- a/autofit/mapper/prior_model/array.py +++ b/autofit/mapper/prior_model/array.py @@ -1,12 +1,17 @@ -from typing import Tuple, Dict +from typing import Tuple, Dict, Optional +from autoconf.dictable import from_dict from .abstract import AbstractPriorModel from autofit.mapper.prior.abstract import Prior import numpy as np class Array(AbstractPriorModel): - def __init__(self, shape: Tuple[int, ...], prior: Prior): + def __init__( + self, + shape: Tuple[int, ...], + prior: Optional[Prior] = None, + ): """ An array of priors. @@ -21,8 +26,9 @@ def __init__(self, shape: Tuple[int, ...], prior: Prior): self.shape = shape self.indices = list(np.ndindex(*shape)) - for index in self.indices: - self[index] = prior.new() + if prior is not None: + for index in self.indices: + self[index] = prior.new() @staticmethod def _make_key(index): @@ -53,3 +59,19 @@ def __setitem__(self, key, value): def __getitem__(self, key): return getattr(self, self._make_key(key)) + + @classmethod + def from_dict( + cls, + d, + reference: Optional[Dict[str, str]] = None, + loaded_ids: Optional[dict] = None, + ): + arguments = d["arguments"] + shape = from_dict(arguments["shape"]) + array = cls(shape) + for key, value in arguments.items(): + if key.startswith("prior"): + setattr(array, key, from_dict(value)) + + return array diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index c636d029c..322d69203 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -66,24 +66,6 @@ def test_correlation(array): assert instance[0, 1] == instance[1, 0] -def test_from_dict(): - array = af.AbstractPriorModel.from_dict( - { - "type": "array", - "shape": (2, 2), - "prior": {"type": "Gaussian", "mean": 0.0, "sigma": 1.0}, - } - ) - assert array.prior_count == 4 - assert ( - array.instance_from_prior_medians() - == [ - [0.0, 0.0], - [0.0, 0.0], - ] - ).all() - - @pytest.fixture def array_dict(): return { @@ -98,7 +80,6 @@ def array_dict(): ], }, "prior_0_0": { - "id": 1, "lower_limit": float("-inf"), "mean": 0.0, "sigma": 1.0, @@ -106,7 +87,6 @@ def array_dict(): "upper_limit": float("inf"), }, "prior_0_1": { - "id": 2, "lower_limit": float("-inf"), "mean": 0.0, "sigma": 1.0, @@ -114,7 +94,6 @@ def array_dict(): "upper_limit": float("inf"), }, "prior_1_0": { - "id": 3, "lower_limit": float("-inf"), "mean": 0.0, "sigma": 1.0, @@ -122,7 +101,6 @@ def array_dict(): "upper_limit": float("inf"), }, "prior_1_1": { - "id": 4, "lower_limit": float("-inf"), "mean": 0.0, "sigma": 1.0, @@ -135,5 +113,17 @@ def array_dict(): } -def test_to_dict(array, array_dict): - assert to_dict(array) == array_dict +def test_to_dict(array, array_dict, remove_ids): + assert remove_ids(to_dict(array)) == array_dict + + +def test_from_dict(array_dict): + array = af.AbstractPriorModel.from_dict(array_dict) + assert array.prior_count == 4 + assert ( + array.instance_from_prior_medians() + == [ + [0.0, 0.0], + [0.0, 0.0], + ] + ).all() From 718fed9f0f6b85292d2d65f36d051e8167631514 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 14:15:35 +0100 Subject: [PATCH 012/149] docs and typws --- autofit/mapper/prior_model/array.py | 103 +++++++++++++++++++++++++--- 1 file changed, 94 insertions(+), 9 deletions(-) diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py index 6dbf6ab32..39866d71b 100644 --- a/autofit/mapper/prior_model/array.py +++ b/autofit/mapper/prior_model/array.py @@ -1,4 +1,4 @@ -from typing import Tuple, Dict, Optional +from typing import Tuple, Dict, Optional, Union from autoconf.dictable import from_dict from .abstract import AbstractPriorModel @@ -31,15 +31,48 @@ def __init__( self[index] = prior.new() @staticmethod - def _make_key(index): - suffix = "_".join(map(str, index)) + def _make_key(index: Tuple[int, ...]) -> str: + """ + Make a key for the prior. + + This is so an index (e.g. (1, 2)) can be used to access a + prior (e.g. prior_1_2). + + Parameters + ---------- + index + The index of an element in an array. + + Returns + ------- + The attribute name for the prior. + """ + if isinstance(index, int): + suffix = f"_{index}" + else: + suffix = "_".join(map(str, index)) return f"prior_{suffix}" def _instance_for_arguments( self, arguments: Dict[Prior, float], ignore_assertions: bool = False, - ): + ) -> np.ndarray: + """ + Create an array where the prior at each index is replaced with the + a concrete value. + + Parameters + ---------- + arguments + The arguments to replace the priors with. + ignore_assertions + Whether to ignore assertions in the priors. + + Returns + ------- + The array with the priors replaced. + """ array = np.zeros(self.shape) for index in self.indices: value = self[index] @@ -54,11 +87,47 @@ def _instance_for_arguments( array[index] = value return array - def __setitem__(self, key, value): - setattr(self, self._make_key(key), value) + def __setitem__( + self, + index: Union[int, Tuple[int, ...]], + value: Union[float, Prior], + ): + """ + Set the value at an index. - def __getitem__(self, key): - return getattr(self, self._make_key(key)) + Parameters + ---------- + index + The index of the prior. + value + The new value. + """ + setattr( + self, + self._make_key(index), + value, + ) + + def __getitem__( + self, + index: Union[int, Tuple[int, ...]], + ) -> Union[float, Prior]: + """ + Get the value at an index. + + Parameters + ---------- + index + The index of the value. + + Returns + ------- + The value at the index. + """ + return getattr( + self, + self._make_key(index), + ) @classmethod def from_dict( @@ -66,7 +135,23 @@ def from_dict( d, reference: Optional[Dict[str, str]] = None, loaded_ids: Optional[dict] = None, - ): + ) -> "Array": + """ + Create an array from a dictionary. + + Parameters + ---------- + d + The dictionary. + reference + A dictionary of references. + loaded_ids + A dictionary of loaded ids. + + Returns + ------- + The array. + """ arguments = d["arguments"] shape = from_dict(arguments["shape"]) array = cls(shape) From 00ba187bdadabb431a6a214754f3e71aa327ee20 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 14:16:57 +0100 Subject: [PATCH 013/149] test 1d array --- test_autofit/mapper/test_array.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index 322d69203..6cf1ebfea 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -127,3 +127,16 @@ def test_from_dict(array_dict): [0.0, 0.0], ] ).all() + + +@pytest.fixture +def array_1d(): + return af.Array( + shape=(2,), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + + +def test_1d_array(array_1d): + assert array_1d.prior_count == 2 + assert (array_1d.instance_from_prior_medians() == [0.0, 0.0]).all() From df708b72bab7c985a3241f6170f466b4d383d447 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 1 Jul 2024 14:18:18 +0100 Subject: [PATCH 014/149] modifying values on 1d arrays --- autofit/mapper/prior_model/array.py | 2 +- test_autofit/mapper/test_array.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py index 39866d71b..62a9204cb 100644 --- a/autofit/mapper/prior_model/array.py +++ b/autofit/mapper/prior_model/array.py @@ -48,7 +48,7 @@ def _make_key(index: Tuple[int, ...]) -> str: The attribute name for the prior. """ if isinstance(index, int): - suffix = f"_{index}" + suffix = str(index) else: suffix = "_".join(map(str, index)) return f"prior_{suffix}" diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index 6cf1ebfea..099993f81 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -140,3 +140,9 @@ def array_1d(): def test_1d_array(array_1d): assert array_1d.prior_count == 2 assert (array_1d.instance_from_prior_medians() == [0.0, 0.0]).all() + + +def test_1d_array_modify_prior(array_1d): + array_1d[0] = 1.0 + assert array_1d.prior_count == 1 + assert (array_1d.instance_from_prior_medians() == [1.0, 0.0]).all() From 5353631de0579bd436ed2749b586af40c066cc7a Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 8 Jul 2024 09:34:07 +0100 Subject: [PATCH 015/149] tree flatten and unflatten for pytrees (jax) --- autofit/mapper/prior/abstract.py | 2 -- autofit/mapper/prior_model/array.py | 22 ++++++++++++++++++++++ test_autofit/mapper/test_array.py | 16 ++++++++++++++++ 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/autofit/mapper/prior/abstract.py b/autofit/mapper/prior/abstract.py index fd8669fc3..a1e3c30db 100644 --- a/autofit/mapper/prior/abstract.py +++ b/autofit/mapper/prior/abstract.py @@ -1,5 +1,4 @@ import itertools -import os import random from abc import ABC, abstractmethod from copy import copy @@ -115,7 +114,6 @@ def factor(self): return self.message.factor def assert_within_limits(self, value): - if jax_wrapper.use_jax: return diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py index 62a9204cb..ac7d50a33 100644 --- a/autofit/mapper/prior_model/array.py +++ b/autofit/mapper/prior_model/array.py @@ -5,7 +5,10 @@ from autofit.mapper.prior.abstract import Prior import numpy as np +from autofit.jax_wrapper import register_pytree_node_class + +@register_pytree_node_class class Array(AbstractPriorModel): def __init__( self, @@ -160,3 +163,22 @@ def from_dict( setattr(array, key, from_dict(value)) return array + + def tree_flatten(self): + """ + Flatten this array model as a PyTree. + """ + members = [self[index] for index in self.indices] + return members, (self.shape,) + + @classmethod + def tree_unflatten(cls, aux_data, children): + """ + Unflatten a PyTree into an array model. + """ + (shape,) = aux_data + instance = cls(shape) + for index, child in zip(instance.indices, children): + instance[index] = child + + return instance diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index 099993f81..282fed3af 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -146,3 +146,19 @@ def test_1d_array_modify_prior(array_1d): array_1d[0] = 1.0 assert array_1d.prior_count == 1 assert (array_1d.instance_from_prior_medians() == [1.0, 0.0]).all() + + +def test_tree_flatten(array): + children, aux = array.tree_flatten() + assert len(children) == 4 + assert aux == ((2, 2),) + + new_array = af.Array.tree_unflatten(aux, children) + assert new_array.prior_count == 4 + assert ( + new_array.instance_from_prior_medians() + == [ + [0.0, 0.0], + [0.0, 0.0], + ] + ).all() From fd44f538170ab61d5e8bf302f742c6bed353c266 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 8 Jul 2024 10:42:03 +0100 Subject: [PATCH 016/149] array prior passing --- autofit/mapper/prior_model/array.py | 18 ++++++++++++++ test_autofit/mapper/test_array.py | 37 +++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py index ac7d50a33..aa4482204 100644 --- a/autofit/mapper/prior_model/array.py +++ b/autofit/mapper/prior_model/array.py @@ -182,3 +182,21 @@ def tree_unflatten(cls, aux_data, children): instance[index] = child return instance + + @property + def prior_class_dict(self): + return { + **{ + prior: cls + for prior_model in self.direct_prior_model_tuples + for prior, cls in prior_model[1].prior_class_dict.items() + }, + **{prior: np.ndarray for _, prior in self.direct_prior_tuples}, + } + + def gaussian_prior_model_for_arguments(self, arguments): + new_array = Array(self.shape) + for index in self.indices: + new_array[index] = self[index].gaussian_prior_model_for_arguments(arguments) + + return new_array diff --git a/test_autofit/mapper/test_array.py b/test_autofit/mapper/test_array.py index 282fed3af..836eb91f9 100644 --- a/test_autofit/mapper/test_array.py +++ b/test_autofit/mapper/test_array.py @@ -1,4 +1,5 @@ import pytest +import numpy as np import autofit as af from autoconf.dictable import to_dict @@ -162,3 +163,39 @@ def test_tree_flatten(array): [0.0, 0.0], ] ).all() + + +class Analysis(af.Analysis): + def log_likelihood_function(self, instance): + return -float( + np.mean( + ( + np.array( + [ + [0.1, 0.2], + [0.3, 0.4], + ] + ) + - instance + ) + ** 2 + ) + ) + + +def test_optimisation(): + array = af.Array( + shape=(2, 2), + prior=af.UniformPrior( + lower_limit=0.0, + upper_limit=1.0, + ), + ) + result = af.DynestyStatic().fit(model=array, analysis=Analysis()) + + posterior = result.model + array[0, 0] = posterior[0, 0] + array[0, 1] = posterior[0, 1] + + result = af.DynestyStatic().fit(model=array, analysis=Analysis()) + assert isinstance(result.instance, np.ndarray) From ed5c58271ed126c810ebe53aa4093ada4c02ba1f Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 8 Jul 2024 10:45:53 +0100 Subject: [PATCH 017/149] docs --- autofit/mapper/prior_model/array.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/autofit/mapper/prior_model/array.py b/autofit/mapper/prior_model/array.py index aa4482204..c37c786b2 100644 --- a/autofit/mapper/prior_model/array.py +++ b/autofit/mapper/prior_model/array.py @@ -194,7 +194,20 @@ def prior_class_dict(self): **{prior: np.ndarray for _, prior in self.direct_prior_tuples}, } - def gaussian_prior_model_for_arguments(self, arguments): + def gaussian_prior_model_for_arguments(self, arguments: Dict[Prior, Prior]): + """ + Returns a new instance of model mapper with a set of Gaussian priors based on + tuples provided by a previous nonlinear search. + + Parameters + ---------- + arguments + Tuples providing the mean and sigma of gaussians + + Returns + ------- + A new model mapper populated with Gaussian priors + """ new_array = Array(self.shape) for index in self.indices: new_array[index] = self[index].gaussian_prior_model_for_arguments(arguments) From f04c56a8142292056224bbf974502095b1462a12 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 22 Jul 2024 14:27:39 +0100 Subject: [PATCH 018/149] fix test --- test_autofit/serialise/test_samples.py | 46 +++++++++++++++++--------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/test_autofit/serialise/test_samples.py b/test_autofit/serialise/test_samples.py index 918f05e2a..12e039ef4 100644 --- a/test_autofit/serialise/test_samples.py +++ b/test_autofit/serialise/test_samples.py @@ -52,18 +52,6 @@ def make_summary_dict(): "type": "instance", "class_path": "autofit.non_linear.samples.summary.SamplesSummary", "arguments": { - "errors_at_sigma_1": { - "type": "list", - "values": [(2.0, 0.0), (3.0, 0.0), (4.0, 0.0)], - }, - "errors_at_sigma_3": { - "type": "list", - "values": [(2.0, 0.0), (3.0, 0.0), (4.0, 0.0)], - }, - "values_at_sigma_3": { - "type": "list", - "values": [(0.0, 2.0), (1.0, 4.0), (2.0, 6.0)], - }, "max_log_likelihood_sample": { "type": "instance", "class_path": "autofit.non_linear.samples.sample.Sample", @@ -81,11 +69,14 @@ def make_summary_dict(): }, }, }, - "values_at_sigma_1": { + "values_at_sigma_3": { "type": "list", - "values": [(0.0, 2.0), (1.0, 4.0), (2.0, 6.0)], + "values": [ + {"type": "tuple", "values": [0.0, 2.0]}, + {"type": "tuple", "values": [1.0, 4.0]}, + {"type": "tuple", "values": [2.0, 6.0]}, + ], }, - "log_evidence": None, "median_pdf_sample": { "type": "instance", "class_path": "autofit.non_linear.samples.sample.Sample", @@ -103,6 +94,31 @@ def make_summary_dict(): }, }, }, + "errors_at_sigma_3": { + "type": "list", + "values": [ + {"type": "tuple", "values": [2.0, 0.0]}, + {"type": "tuple", "values": [3.0, 0.0]}, + {"type": "tuple", "values": [4.0, 0.0]}, + ], + }, + "values_at_sigma_1": { + "type": "list", + "values": [ + {"type": "tuple", "values": [0.0, 2.0]}, + {"type": "tuple", "values": [1.0, 4.0]}, + {"type": "tuple", "values": [2.0, 6.0]}, + ], + }, + "errors_at_sigma_1": { + "type": "list", + "values": [ + {"type": "tuple", "values": [2.0, 0.0]}, + {"type": "tuple", "values": [3.0, 0.0]}, + {"type": "tuple", "values": [4.0, 0.0]}, + ], + }, + "log_evidence": None, }, } From 23759667f04114efb8cdd92a645ac4d426740b8b Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 22 Jul 2024 14:30:58 +0100 Subject: [PATCH 019/149] readd model to samples summary when aggregating in database --- autofit/database/model/fit.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index e1cf0e78f..bde40c5b0 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -332,7 +332,10 @@ def __getitem__(self, item: str): """ for p in self.jsons + self.arrays + self.hdus + self.pickles: if p.name == item: - return p.value + value = p.value + if item == "samples_summary": + value.model = self.model + return value return getattr(self, item) From 8a9b6a774cc022102fbae3b382690f6888664aa3 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 22 Jul 2024 14:33:23 +0100 Subject: [PATCH 020/149] test --- test_autofit/database/test_regression.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test_autofit/database/test_regression.py b/test_autofit/database/test_regression.py index bae58d94e..19d90ac31 100644 --- a/test_autofit/database/test_regression.py +++ b/test_autofit/database/test_regression.py @@ -39,3 +39,12 @@ def test_model_with_parameterless_component(): def test_instance_in_collection(): collection = af.Collection(gaussian=af.Gaussian()) assert list(collection.items()) == [("gaussian", af.Gaussian())] + + +def test_samples_summary_model(): + fit = af.db.Fit() + model = af.Model(af.Gaussian) + fit["samples_summary"] = af.Samples(model=model, sample_list=[]) + fit.model = model + + assert fit["samples_summary"].model.cls == af.Gaussian From 34e91f1b57a12f3ec3c3553e4020f769425fb63f Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 22 Jul 2024 15:53:21 +0100 Subject: [PATCH 021/149] retain model in summary JSON so aggregator features work --- autofit/non_linear/samples/summary.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/autofit/non_linear/samples/summary.py b/autofit/non_linear/samples/summary.py index 3c9f59c74..920868549 100644 --- a/autofit/non_linear/samples/summary.py +++ b/autofit/non_linear/samples/summary.py @@ -12,8 +12,6 @@ class SamplesSummary(SamplesInterface): - __exclude_fields__ = ["model"] - def __init__( self, max_log_likelihood_sample: Sample, From b3ad3eb613963e686696e63b1dc17316ec601a14 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 23 Jul 2024 11:17:24 +0100 Subject: [PATCH 022/149] model cookbook doc --- docs/cookbooks/model.rst | 334 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 333 insertions(+), 1 deletion(-) diff --git a/docs/cookbooks/model.rst b/docs/cookbooks/model.rst index 1f9081d75..84590ccca 100644 --- a/docs/cookbooks/model.rst +++ b/docs/cookbooks/model.rst @@ -10,6 +10,8 @@ This cookbook provides an overview of basic model composition tools. **Contents:** +**Models:** + If first describes how to use the ``af.Model`` object to define models with a single model component from single Python classes, with the following sections: @@ -21,6 +23,8 @@ Python classes, with the following sections: - **Tuple Parameters (Model)**: Defining model components with parameters that are tuples. - **Json Output (Model)**: Output a model in human readable text via a .json file and loading it back again. +**Collections:** + It then describes how to use the ``af.Collection`` object to define models with many model components from multiple Python classes, with the following sections: @@ -31,6 +35,19 @@ Python classes, with the following sections: - **Json Output (Collection)**: Output a collection in human readable text via a .json file and loading it back again. - **Extensible Models (Collection)**: Using collections to extend models with new model components, including the use of Python dictionaries and lists. +**Arrays:** + +The cookbook next describes using NumPy arrays via tbe `af.Array` object to compose models, where each entry of the +array is a free parameters, therefore offering maximum flexibility with the number of free parameter. This has +the following sections: + + - **Model Composition (af.Array)**: Composing models using NumPy arrays and `af.Array`(). + - **Prior Customization (af.Array)**: How to customize the priors of a numpy array model. + - **Instances (af.Array)**: Create an instance of a numpy array model via input parameters. + - **Model Customization (af.Array):** Customize a numpy array model (e.g. fixing parameters or linking them to one another). + - **Json Output (af.Array)**: Output a numpy array model in human readable text via a .json file and loading it back again. + - **Extensible Models (af.Array)**: Using numpy arrays to compose models with a flexible number of parameters. + Python Class Template --------------------- @@ -718,7 +735,7 @@ Python dictionaries can easily be saved to hard disk as a ``.json`` file. This means we can save any **PyAutoFit** model to hard-disk. -Checkout the file ``autofit_workspace/*/model/jsons/model.json`` to see the model written as a .json. +Checkout the file ``autofit_workspace/*/model/jsons/collection.json`` to see the model written as a .json. .. code-block:: python @@ -910,6 +927,321 @@ This gives the following output: normalization (Gaussian) = 5.0 sigma (Gaussian) = 6.0 +Model Composition (af.Array) +---------------------------- + +Models can be composed using NumPy arrays, where each element of the array is a free parameter. + +This offers a lot more flexibility than using ``Model`` and ``Collection`` objects, as the number of parameters in the +model is chosen on initialization via the input of the ``shape`` attribute. + +For many use cases, this flexibility is key to ensuring model composition is as easy as possible, for example when +a part of the model being fitted is a matrix of parameters which may change shape depending on the dataset being +fitted. + +To compose models using NumPy arrays, we use the ``af.Array`` object. + +.. code-block:: python + + model = af.Array( + shape=(2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + +Each element of the array is a free parameter, which for ``shape=(2,2)`` means the model has 4 free parameters. + +.. code-block:: python + + print(f"Model Total Free Parameters = {model.total_free_parameters}") + +The ``info`` attribute of the model gives information on all of the parameters and their priors. + +.. code-block:: python + + print(model.info) + +This gives the following output: + +.. code-block:: bash + + Total Free Parameters = 4 + + model Array (N=4) + indices list (N=0) + + shape (2, 2) + indices + 0 (0, 0) + 1 (0, 1) + 2 (1, 0) + 3 (1, 1) + prior_0_0 GaussianPrior [124], mean = 0.0, sigma = 1.0 + prior_0_1 GaussianPrior [125], mean = 0.0, sigma = 1.0 + prior_1_0 GaussianPrior [126], mean = 0.0, sigma = 1.0 + prior_1_1 GaussianPrior [127], mean = 0.0, sigma = 1.0 + +Prior Customization (af.Array) +------------------------------ + +The prior of every parameter in the array is set via the ``prior`` input above. + +NumPy array models do not currently support default priors via config files, so all priors must be manually specified. + +The prior of every parameter in the array can be customized by normal NumPy array indexing: + +.. code-block:: python + + model = af.Array(shape=(2, 2), prior=af.GaussianPrior(mean=0.0, sigma=1.0)) + + model.array[0, 0] = af.UniformPrior(lower_limit=0.0, upper_limit=1.0) + model.array[0, 1] = af.LogUniformPrior(lower_limit=1e-4, upper_limit=1e4) + model.array[1, 0] = af.GaussianPrior(mean=0.0, sigma=2.0) + +The ``info`` attribute shows the customized priors. + +.. code-block:: python + + print(model.info) + +The output is as follows: + +.. code-block:: bash + + Total Free Parameters = 4 + + model Array (N=4) + indices list (N=0) + + shape (2, 2) + indices + 0 (0, 0) + 1 (0, 1) + 2 (1, 0) + 3 (1, 1) + prior_0_0 UniformPrior [133], lower_limit = 0.0, upper_limit = 1.0 + prior_0_1 LogUniformPrior [134], lower_limit = 0.0001, upper_limit = 10000.0 + prior_1_0 GaussianPrior [135], mean = 0.0, sigma = 2.0 + prior_1_1 GaussianPrior [132], mean = 0.0, sigma = 1.0 + +Instances (af.Array) +-------------------- + +Instances of numpy array model components can be created, where an input ``vector`` of parameters is mapped to create +an instance of the Python class of the model. + +If the priors of the numpy array are not customized, ordering of parameters goes from element [0,0] to [0,1] to [1,0], +as shown by the ``paths`` attribute. + +.. code-block:: python + + model = af.Array( + shape=(2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + + print(model.paths) + +The output is as follows: + +.. code-block:: bash + + ['prior_0_0', 'prior_0_1', 'prior_1_0', 'prior_1_1'] + +An instance can then be created by passing a vector of parameters to the model via the ``instance_from_vector`` method. + +The ``instance`` created is a NumPy array, where each element is the value passed in the vector. + +.. code-block:: python + + instance = model.instance_from_vector(vector=[0.0, 1.0, 2.0, 3.0]) + + print("\nModel Instance:") + print(instance) + +The output is as follows: + +.. code-block:: bash + + Model Instance: + [[0. 1.] + [2. 3.]] + +Prior customization changes the order of the parameters, therefore if you customize the priors of the numpy +array you must check the ordering of the parameters in the ``paths`` attribute before passing a vector to +the ``instance_from_vector`` + + +.. code-block:: python + + model[0, 0] = af.UniformPrior(lower_limit=0.0, upper_limit=1.0) + model[0, 1] = af.LogUniformPrior(lower_limit=1e-4, upper_limit=1e4) + model[1, 0] = af.GaussianPrior(mean=0.0, sigma=2.0) + + print(model.paths) + +The output is as follows: + +.. code-block:: bash + + [('prior_1_1',), ('prior_0_0',), ('prior_0_1',), ('prior_1_0',)] + +If we create a vector and print its values from this customized model: + +.. code-block:: python + + instance = model.instance_from_vector(vector=[0.0, 1.0, 2.0, 3.0]) + + print("\nModel Instance:") + print(instance) + +The output is as follows: + +.. code-block:: bash + + Model Instance: + [[1. 2.] + [3. 0.]] + +Model Customization (af.Array) +------------------------------ + +The model customization API for numpy array models is the same as for ``af.Model`` and ``af.Collection`` objects. + +.. code-block:: python + + model = af.Array( + shape=(2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + + model[0,0] = 50.0 + model[0,1] = model[1,0] + model.add_assertion(model[1,1] > 0.0) + + print(model.info) + +The output is as follows: + +.. code-block:: bash + Total Free Parameters = 2 + + model Array (N=2) + indices list (N=0) + + shape (2, 2) + indices + 0 (0, 0) + 1 (0, 1) + 2 (1, 0) + 3 (1, 1) + prior_0_0 50.0 + prior_0_1 - prior_1_0 GaussianPrior [147], mean = 0.0, sigma = 1.0 + prior_1_1 GaussianPrior [148], mean = 0.0, sigma = 1.0 + + +JSon Outputs (af.Array) +------------------------ + +An ``Array`` has a ``dict`` attribute, which express all information about the model as a Python dictionary. + +By printing this dictionary we can therefore get a concise summary of the model. + +.. code-block:: python + + model = af.Array( + shape=(2, 2), + prior=af.GaussianPrior(mean=0.0, sigma=1.0), + ) + + print(model.dict()) + +Python dictionaries can easily be saved to hard disk as a ``.json`` file. + +This means we can save any **PyAutoFit** model to hard-disk. + +Checkout the file ``autofit_workspace/*/model/jsons/array.json`` to see the model written as a .json. + +.. code-block:: python + + model_path = path.join("scripts", "model", "jsons") + + os.makedirs(model_path, exist_ok=True) + + model_file = path.join(model_path, "array.json") + + with open(model_file, "w+") as f: + json.dump(model.dict(), f, indent=4) + +We can load the model from its ``.json`` file, meaning that one can easily save a model to hard disk and load it +elsewhere. + +.. code-block:: python + + model = af.Array.from_json(file=model_file) + + print(f"\n Model via Json Prior Count = {model.prior_count}") + +Extensible Models (af.Array) +---------------------------- + +For ``Model`` objects, the number of parameters is fixed to those listed in the input Python class when the model is +created. + +For ``Collection`` objects, the use of dictionaries and lists allows for the number of parameters to be extended, but it +was still tied to the input Python classes when the model was created. + +For ``Array`` objects, the number of parameters is fully customizable, you choose the shape of the array and therefore +the number of parameters in the model when you create it. + +This makes ``Array`` objects the most extensible and flexible way to compose models. + +You can also combine ``Array`` objects with ``Collection`` objects to create models with a mix of fixed and extensible +parameters. + +.. code-block:: python + + model = af.Collection( + gaussian=Gaussian, + array=af.Array(shape=(3, 2), prior=af.GaussianPrior(mean=0.0, sigma=1.0)) + ) + + model.gaussian.sigma = 2.0 + model.array[0, 0] = 1.0 + + print(model.info) + +The output is as follows: + +.. code-block:: python + + Total Free Parameters = 7 + + model Collection (N=7) + gaussian Gaussian (N=2) + array Array (N=5) + indices list (N=0) + + gaussian + centre UniformPrior [165], lower_limit = 0.0, upper_limit = 100.0 + normalization LogUniformPrior [166], lower_limit = 1e-06, upper_limit = 1000000.0 + sigma 2.0 + array + shape (3, 2) + indices + 0 (0, 0) + 1 (0, 1) + 2 (1, 0) + 3 (1, 1) + 4 (2, 0) + 5 (2, 1) + prior_0_0 1.0 + prior_0_1 GaussianPrior [160], mean = 0.0, sigma = 1.0 + prior_1_0 GaussianPrior [161], mean = 0.0, sigma = 1.0 + prior_1_1 GaussianPrior [162], mean = 0.0, sigma = 1.0 + prior_2_0 GaussianPrior [163], mean = 0.0, sigma = 1.0 + prior_2_1 GaussianPrior [164], mean = 0.0, sigma = 1.0 + + Wrap Up ------- From 526f4c8cc7ebc3c0b35451a79625bde4ba17b0d9 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 23 Jul 2024 11:28:15 +0100 Subject: [PATCH 023/149] docs --- docs/overview/scientific_workflow.rst | 2 +- docs/overview/the_basics.rst | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/overview/scientific_workflow.rst b/docs/overview/scientific_workflow.rst index f6f1ed9be..ff670803a 100644 --- a/docs/overview/scientific_workflow.rst +++ b/docs/overview/scientific_workflow.rst @@ -525,7 +525,7 @@ For simpler scenarios, adjustments might include: In more intricate cases, models might involve numerous parameters and complex compositions of multiple model components. **PyAutoFit** offers a sophisticated model composition API designed to handle these complexities. It provides -tools for constructing elaborate models using lists of Python classes and hierarchical structures of Python classes. +tools for constructing elaborate models using lists of Python classes, NumPy arrays and hierarchical structures of Python classes. For a detailed exploration of these capabilities, you can refer to the `model cookbook `_, which provides comprehensive diff --git a/docs/overview/the_basics.rst b/docs/overview/the_basics.rst index a67e9302c..da3d213d5 100644 --- a/docs/overview/the_basics.rst +++ b/docs/overview/the_basics.rst @@ -298,13 +298,13 @@ Analysis We now tell **PyAutoFit** how to fit the model to the data. -We define an `Analysis` class, which includes: +We define an ``Analysis`` class, which includes: -- An `__init__` constructor that takes `data` and `noise_map` as inputs (this can be extended with additional elements necessary for fitting the model to the data). +- An ``__init__`` constructor that takes ``data`` and ``noise_map`` as inputs (this can be extended with additional elements necessary for fitting the model to the data). -- A `log_likelihood_function` that defines how to fit an `instance` of the model to the data and return a log likelihood value. +- A ``log_likelihood_function`` that defines how to fit an ``instance`` of the model to the data and return a log likelihood value. -Read the comments and docstrings of the `Analysis` class in detail for a full description of how the analysis works. +Read the comments and docstrings of the ``Analysis`` class in detail for a full description of how the analysis works. .. code-block:: python @@ -623,7 +623,7 @@ examples demonstrating more complex model-fitting tasks. This includes cookbooks, which provide a concise reference guide to the **PyAutoFit** API for advanced model-fitting: -- [Model Cookbook](https://pyautofit.readthedocs.io/en/latest/cookbooks/model.html): Learn how to compose complex models using multiple Python classes, lists, dictionaries, and customize their parameterization. +- [Model Cookbook](https://pyautofit.readthedocs.io/en/latest/cookbooks/model.html): Learn how to compose complex models using multiple Python classes, lists, dictionaries, NumPy arrays and customize their parameterization. - [Analysis Cookbook](https://pyautofit.readthedocs.io/en/latest/cookbooks/search.html): Customize the analysis with model-specific output and visualization to gain deeper insights into your model fits. From b7b94729e9c14202cb2e55bb2815d6be8014b776 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 23 Jul 2024 16:51:19 +0100 Subject: [PATCH 024/149] physical values added to SensitivityResult --- .../non_linear/grid/sensitivity/__init__.py | 1 + autofit/non_linear/grid/sensitivity/result.py | 6 +++++- .../pickles/settings.pickle | Bin 1443 -> 1444 bytes 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 90c6107b1..3efd447c5 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -147,6 +147,7 @@ def run(self) -> SensitivityResult: perturb_samples=[ result.perturb_result.samples_summary for result in results ], + physical_values=physical_values, shape=self.shape, ) diff --git a/autofit/non_linear/grid/sensitivity/result.py b/autofit/non_linear/grid/sensitivity/result.py index 2b1a78093..58ebd728e 100644 --- a/autofit/non_linear/grid/sensitivity/result.py +++ b/autofit/non_linear/grid/sensitivity/result.py @@ -1,4 +1,4 @@ -from typing import List, Tuple +from typing import List, Tuple, Union from autofit.non_linear.grid.grid_list import GridList, as_grid_list from autofit.non_linear.samples.interface import SamplesInterface @@ -9,6 +9,7 @@ def __init__( self, samples: List[SamplesInterface], perturb_samples: List[SamplesInterface], + physical_values : Union[List, GridList], shape : Tuple[int, ...] ): """ @@ -18,11 +19,14 @@ def __init__( ---------- results The results of each sensitivity job. + physical_values + A list of lists of values representing the physical values of the sensitivity grid values. shape The shape of the sensitivity mapping grid. """ self.samples = GridList(samples, shape) self.perturb_samples = GridList(perturb_samples, shape) + self.physical_values = GridList(physical_values, shape) self.shape = shape def __getitem__(self, item): diff --git a/test_autofit/database/mass_sie__source_sersic/phase_mass[sie]_source[bulge]/settings__imaging[grid_sub_2]__lens[pos_off]/dynesty_static[nlive_50__bound_multi_vol_dec_0.5_vol_check_2.0__enlarge_1.0__sample_rwalk_walks_5_facc_0.2]/pickles/settings.pickle b/test_autofit/database/mass_sie__source_sersic/phase_mass[sie]_source[bulge]/settings__imaging[grid_sub_2]__lens[pos_off]/dynesty_static[nlive_50__bound_multi_vol_dec_0.5_vol_check_2.0__enlarge_1.0__sample_rwalk_walks_5_facc_0.2]/pickles/settings.pickle index 370367a0d9d7934cd49acbdd5be99888fd284036..b24dd9470be8de3a07ef351ce79df164f9beecba 100644 GIT binary patch delta 15 WcmZ3?y@Y#%JtI@G-ew2JWEKD>vII;3 delta 14 VcmZ3&y_kE0JtJfMW=F Date: Wed, 24 Jul 2024 20:08:06 +0100 Subject: [PATCH 025/149] x0 input supported --- .../search/optimize/lbfgs/search.py | 34 ++++++++++++------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/autofit/non_linear/search/optimize/lbfgs/search.py b/autofit/non_linear/search/optimize/lbfgs/search.py index 963160d0e..3c2e49fd7 100644 --- a/autofit/non_linear/search/optimize/lbfgs/search.py +++ b/autofit/non_linear/search/optimize/lbfgs/search.py @@ -27,6 +27,7 @@ def __init__( initializer: Optional[AbstractInitializer] = None, iterations_per_update: int = None, session: Optional[sa.orm.Session] = None, + x0: Optional[np.ndarray] = None, **kwargs ): """ @@ -66,6 +67,8 @@ def __init__( **kwargs ) + self.x0 = x0 + self.logger.debug("Creating LBFGS Search") @cached_property @@ -122,19 +125,26 @@ def _fit( ) except (FileNotFoundError, TypeError): - ( - unit_parameter_lists, - parameter_lists, - log_posterior_list, - ) = self.initializer.samples_from_model( - total_points=1, - model=model, - fitness=fitness, - paths=self.paths, - n_cores=self.number_of_cores, - ) - x0 = np.asarray(parameter_lists[0]) + if self.x0 is None: + + ( + unit_parameter_lists, + parameter_lists, + log_posterior_list, + ) = self.initializer.samples_from_model( + total_points=1, + model=model, + fitness=fitness, + paths=self.paths, + n_cores=self.number_of_cores, + ) + + x0 = np.asarray(parameter_lists[0]) + + else: + + x0 = self.x0 total_iterations = 0 From 9052e9609075c90c664a8f7f52284624987404d6 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 26 Jul 2024 16:30:51 +0100 Subject: [PATCH 026/149] added history storage to fitness, enabling MLE plot --- autofit/config/visualize/plots_search.yaml | 2 +- autofit/non_linear/fitness.py | 17 ++++++ autofit/non_linear/plot/optimize_plotters.py | 20 ++++++- autofit/non_linear/plot/samples_plotters.py | 1 + .../search/optimize/abstract_optimize.py | 8 +-- .../search/optimize/lbfgs/search.py | 53 ++++++++++++++----- 6 files changed, 83 insertions(+), 18 deletions(-) diff --git a/autofit/config/visualize/plots_search.yaml b/autofit/config/visualize/plots_search.yaml index b6e7331bb..b34172f27 100644 --- a/autofit/config/visualize/plots_search.yaml +++ b/autofit/config/visualize/plots_search.yaml @@ -3,4 +3,4 @@ nest: mcmc: corner_cornerpy: true # Output corner figure (using corner.py) during a non-linear search fit? optimize: - corner_cornerpy: true # Output corner figure (using corner.py) during a non-linear search fit? \ No newline at end of file + log_likelihood_vs_iteration: true # Output a plot of the log likelihood versus iteration number? \ No newline at end of file diff --git a/autofit/non_linear/fitness.py b/autofit/non_linear/fitness.py index df954d53e..6209b9553 100644 --- a/autofit/non_linear/fitness.py +++ b/autofit/non_linear/fitness.py @@ -35,6 +35,7 @@ def __init__( fom_is_log_likelihood: bool = True, resample_figure_of_merit: float = -np.inf, convert_to_chi_squared: bool = False, + store_history: bool = False, ): """ Interfaces with any non-linear search to fit the model to the data and return a log likelihood via @@ -67,6 +68,11 @@ def __init__( instead. The appropriate value depends on the search, but is typically either `None`, `-np.inf` or `1.0e99`. All values indicate to the non-linear search that the model-fit should be resampled or ignored. + Many searches do not store the history of the parameters and log likelihood values, often to save + memory on large model-fits. However, this can be useful, for example to plot the results of a model-fit + versus iteration number. If the `store_history` bool is `True`, the parameters and log likelihoods are stored + in the `parameters_history_list` and `figure_of_merit_history_list` attribute of the fitness object. + Parameters ---------- analysis @@ -86,6 +92,8 @@ def __init__( convert_to_chi_squared If `True`, the figure of merit returned is the log likelihood multiplied by -2.0, such that it is a chi-squared value that is minimized. + store_history + If `True`, the parameters and log likelihood values of every model-fit are stored in lists. """ self.analysis = analysis @@ -99,6 +107,10 @@ def __init__( if self.paths is not None: self.check_log_likelihood(fitness=self) + self.store_history = store_history + self.parameters_history_list = [] + self.log_likelihood_history_list = [] + def __getstate__(self): state = self.__dict__.copy() del state["_log_likelihood_function"] @@ -154,6 +166,11 @@ def __call__(self, parameters, *kwargs): log_prior_list = self.model.log_prior_list_from_vector(vector=parameters) figure_of_merit = log_likelihood + sum(log_prior_list) + if self.store_history: + + self.parameters_history_list.append(parameters) + self.log_likelihood_history_list.append(log_likelihood) + if self.convert_to_chi_squared: figure_of_merit *= -2.0 diff --git a/autofit/non_linear/plot/optimize_plotters.py b/autofit/non_linear/plot/optimize_plotters.py index a4a6b8a31..fe0399aed 100644 --- a/autofit/non_linear/plot/optimize_plotters.py +++ b/autofit/non_linear/plot/optimize_plotters.py @@ -4,9 +4,27 @@ from autofit.non_linear.plot.samples_plotters import SamplesPlotter +from autofit.non_linear.plot.samples_plotters import skip_plot_in_test_mode + logger = logging.getLogger(__name__) class OptimizePlotter(SamplesPlotter): - pass + @skip_plot_in_test_mode + def log_likelihood_vs_iteration(self, **kwargs): + """ + Plot the log likelihood of a model fit to a dataset over the course of an optimization. + """ + + log_likelihood_list = self.samples.log_likelihood_list + iteration_list = range(len(log_likelihood_list)) + plt.figure() + plt.plot(iteration_list, log_likelihood_list, c="k") + plt.xlabel("Iteration") + plt.ylabel("Log Likelihood") + plt.title("Log Likelihood vs Iteration") + self.output.to_figure( + auto_filename="log_likelihood_vs_iteration", + ) + plt.close() diff --git a/autofit/non_linear/plot/samples_plotters.py b/autofit/non_linear/plot/samples_plotters.py index 37949ec14..885f01ef1 100644 --- a/autofit/non_linear/plot/samples_plotters.py +++ b/autofit/non_linear/plot/samples_plotters.py @@ -71,6 +71,7 @@ def log_posterior_list(self): def close(self): if plt.fignum_exists(num=1): + plt.clf() plt.close() def log_plot_exception(self, plot_name : str): diff --git a/autofit/non_linear/search/optimize/abstract_optimize.py b/autofit/non_linear/search/optimize/abstract_optimize.py index ad8a7db30..93c58b065 100644 --- a/autofit/non_linear/search/optimize/abstract_optimize.py +++ b/autofit/non_linear/search/optimize/abstract_optimize.py @@ -25,7 +25,9 @@ def plot_results(self, samples): def should_plot(name): return conf.instance["visualize"]["plots_search"]["optimize"][name] - plotter = OptimizePlotter( + plotter = self.plotter_cls( samples=samples, - output=Output(path=self.paths.image_path / "search", format="png") - ) \ No newline at end of file + output=Output(path=self.paths.image_path / "search", format="png"), + ) + if should_plot("log_likelihood_vs_iteration"): + plotter.log_likelihood_vs_iteration() \ No newline at end of file diff --git a/autofit/non_linear/search/optimize/lbfgs/search.py b/autofit/non_linear/search/optimize/lbfgs/search.py index 3c2e49fd7..3f557203c 100644 --- a/autofit/non_linear/search/optimize/lbfgs/search.py +++ b/autofit/non_linear/search/optimize/lbfgs/search.py @@ -27,7 +27,7 @@ def __init__( initializer: Optional[AbstractInitializer] = None, iterations_per_update: int = None, session: Optional[sa.orm.Session] = None, - x0: Optional[np.ndarray] = None, + visualize : bool = False, **kwargs ): """ @@ -40,6 +40,15 @@ def __init__( If you use `LBFGS` as part of a published work, please cite the package via scipy following the instructions under the *Attribution* section of the GitHub page. + By default, the L-BFGS method scipy implementation does not store the history of parameter values and + log likelihood values during the non-linear search. This is because storing these values can require a large + amount of memory, in contradiction to the L-BFGS method's primary advantage of being memory efficient. + This means that it is difficult to visualize the L-BFGS method results (e.g. log likelihood vs iteration). + + **PyAutoFit** extends the class with the option of using visualize mode, which stores the history of parameter + values and log likelihood values during the non-linear search. This allows the results of the L-BFGS method to be + visualized after the search has completed, and it is enabled by setting the `visualize` flag to `True`. + Parameters ---------- name @@ -55,6 +64,9 @@ def __init__( The number of cores sampling is performed using a Python multiprocessing Pool instance. session An SQLalchemy session instance so the results of the model-fit are written to an SQLite database. + visualize + If True, visualization of the search is enabled, which requires storing the history of parameter values and + log likelihood values during the non-linear search. """ super().__init__( @@ -67,7 +79,7 @@ def __init__( **kwargs ) - self.x0 = x0 + self.visualize = visualize self.logger.debug("Creating LBFGS Search") @@ -112,6 +124,7 @@ def _fit( fom_is_log_likelihood=False, resample_figure_of_merit=-np.inf, convert_to_chi_squared=True, + store_history=self.visualize, ) try: @@ -126,7 +139,11 @@ def _fit( except (FileNotFoundError, TypeError): - if self.x0 is None: + if "x0" in self.kwargs: + + x0 = self.kwargs["x0"] + + else: ( unit_parameter_lists, @@ -142,10 +159,6 @@ def _fit( x0 = np.asarray(parameter_lists[0]) - else: - - x0 = self.x0 - total_iterations = 0 self.logger.info( @@ -177,6 +190,11 @@ def _fit( parameters=search_internal.x ) + if self.visualize: + + search_internal.parameters_history_list = fitness.parameters_history_list + search_internal.log_likelihood_history_list = fitness.log_likelihood_history_list + self.paths.save_search_internal( obj=search_internal, ) @@ -220,14 +238,23 @@ def samples_via_internal_from( x0 = search_internal.x total_iterations = search_internal.nit - log_posterior_list = np.array([search_internal.log_posterior_list]) - parameter_lists = [list(x0)] - log_prior_list = model.log_prior_list_from(parameter_lists=parameter_lists) - log_likelihood_list = [ - lp - prior for lp, prior in zip(log_posterior_list, log_prior_list) - ] + if self.visualize: + + parameter_lists = search_internal.parameters_history_list + log_prior_list = model.log_prior_list_from(parameter_lists=parameter_lists) + log_likelihood_list = search_internal.log_likelihood_history_list + + else: + + parameter_lists = [list(x0)] + log_prior_list = model.log_prior_list_from(parameter_lists=parameter_lists) + log_posterior_list = np.array([search_internal.log_posterior_list]) + log_likelihood_list = [ + lp - prior for lp, prior in zip(log_posterior_list, log_prior_list) + ] + weight_list = len(log_likelihood_list) * [1.0] sample_list = Sample.from_lists( From 246ce9b05421a580d26658b58396513fcd97cf30 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 26 Jul 2024 16:49:23 +0100 Subject: [PATCH 027/149] imrpvoed visuyalization --- autofit/non_linear/plot/optimize_plotters.py | 58 ++++++++++++++++--- .../search/optimize/abstract_optimize.py | 6 +- 2 files changed, 55 insertions(+), 9 deletions(-) diff --git a/autofit/non_linear/plot/optimize_plotters.py b/autofit/non_linear/plot/optimize_plotters.py index fe0399aed..1769096d5 100644 --- a/autofit/non_linear/plot/optimize_plotters.py +++ b/autofit/non_linear/plot/optimize_plotters.py @@ -11,20 +11,62 @@ class OptimizePlotter(SamplesPlotter): @skip_plot_in_test_mode - def log_likelihood_vs_iteration(self, **kwargs): + def log_likelihood_vs_iteration(self, use_log_y : bool = False, use_last_50_percent : bool = False, **kwargs): """ - Plot the log likelihood of a model fit to a dataset over the course of an optimization. + Plot the log likelihood of a model fit as a function of iteration number. + + For a maximum likelihood estimate, the log likelihood should increase with iteration number. + + This often produces a large dynamic range in the y-axis, such that plotting the y-axis on a log-scale can be + useful to see the full range of values. + + Parameters + ---------- + use_log_y + If True, the y-axis is plotted on a log-scale. """ log_likelihood_list = self.samples.log_likelihood_list iteration_list = range(len(log_likelihood_list)) - plt.figure() - plt.plot(iteration_list, log_likelihood_list, c="k") - plt.xlabel("Iteration") - plt.ylabel("Log Likelihood") - plt.title("Log Likelihood vs Iteration") + if use_last_50_percent: + + iteration_list = iteration_list[int(len(iteration_list) / 2) :] + log_likelihood_list = log_likelihood_list[int(len(log_likelihood_list) / 2) :] + + plt.figure(figsize=(12, 12)) + + if use_log_y: + plt.semilogy(iteration_list, log_likelihood_list, c="k") + else: + plt.plot(iteration_list, log_likelihood_list, c="k") + + plt.xlabel("Iteration", fontsize=16) + plt.ylabel("Log Likelihood", fontsize=16) + plt.xticks(fontsize=16) + plt.yticks(fontsize=16) + + title = "Log Likelihood vs Iteration" + + if use_log_y: + + title += " (Log Scale)" + + if use_last_50_percent: + + title += " (Last 50 Percent)" + + plt.title("Log Likelihood vs Iteration", fontsize=24) + + filename = "log_likelihood_vs_iteration" + + if use_log_y: + filename += "_log_y" + + if use_last_50_percent: + filename += "_last_50_percent" + self.output.to_figure( - auto_filename="log_likelihood_vs_iteration", + auto_filename=filename, ) plt.close() diff --git a/autofit/non_linear/search/optimize/abstract_optimize.py b/autofit/non_linear/search/optimize/abstract_optimize.py index 93c58b065..3f61a9af9 100644 --- a/autofit/non_linear/search/optimize/abstract_optimize.py +++ b/autofit/non_linear/search/optimize/abstract_optimize.py @@ -29,5 +29,9 @@ def should_plot(name): samples=samples, output=Output(path=self.paths.image_path / "search", format="png"), ) + if should_plot("log_likelihood_vs_iteration"): - plotter.log_likelihood_vs_iteration() \ No newline at end of file + + plotter.log_likelihood_vs_iteration() + plotter.log_likelihood_vs_iteration(use_log_y=True) + plotter.log_likelihood_vs_iteration(use_last_50_percent=True) \ No newline at end of file From 968bdd10e9e32b9b9b5705c19f3d15b382bf6983 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 26 Jul 2024 17:10:25 +0100 Subject: [PATCH 028/149] MLE plotting now works --- autofit/config/visualize/plots_search.yaml | 1 + autofit/non_linear/plot/optimize_plotters.py | 66 ++++++++++++++++++- .../search/optimize/abstract_optimize.py | 6 ++ 3 files changed, 71 insertions(+), 2 deletions(-) diff --git a/autofit/config/visualize/plots_search.yaml b/autofit/config/visualize/plots_search.yaml index b34172f27..d6f9197db 100644 --- a/autofit/config/visualize/plots_search.yaml +++ b/autofit/config/visualize/plots_search.yaml @@ -3,4 +3,5 @@ nest: mcmc: corner_cornerpy: true # Output corner figure (using corner.py) during a non-linear search fit? optimize: + subplot_parameters: true # Output a subplot of the best-fit parameters of the model? log_likelihood_vs_iteration: true # Output a plot of the log likelihood versus iteration number? \ No newline at end of file diff --git a/autofit/non_linear/plot/optimize_plotters.py b/autofit/non_linear/plot/optimize_plotters.py index 1769096d5..36597e92d 100644 --- a/autofit/non_linear/plot/optimize_plotters.py +++ b/autofit/non_linear/plot/optimize_plotters.py @@ -10,6 +10,68 @@ class OptimizePlotter(SamplesPlotter): + def subplot_parameters(self, use_log_y : bool = False, use_last_50_percent : bool = False, **kwargs): + """ + Plots a subplot of every parameter against iteration number. + + The subplot extends over all free parameters in the model-fit, with the number of parameters per subplot + given by the total number of free parameters in the model-fit. + + This often produces a large dynamic range in the y-axis. Plotting the y-axis on a log-scale or only + plotting the last 50% of samples can make the plot easier to inspect. + + Parameters + ---------- + use_log_y + If True, the y-axis is plotted on a log-scale. + use_last_50_percent + If True, only the last 50% of samples are plotted. + kwargs + Additional key word arguments can be passed to the `plt.subplots` method. + + Returns + ------- + + """ + + parameter_lists = self.samples.parameters_extract + + plt.subplots(self.model.total_free_parameters, 1, figsize=(12, 3 * len(parameter_lists))) + + for i, parameters in enumerate(parameter_lists): + + iteration_list = range(len(parameter_lists[0])) + + plt.subplot(self.model.total_free_parameters, 1, i + 1) + + if use_last_50_percent: + + iteration_list = iteration_list[int(len(iteration_list) / 2) :] + parameters = parameters[int(len(parameters) / 2) :] + + if use_log_y: + plt.semilogy(iteration_list, parameters, c="k") + else: + plt.plot(iteration_list, parameters, c="k") + + plt.xlabel("Iteration", fontsize=16) + plt.ylabel(self.model.parameter_labels_with_superscripts_latex[i], fontsize=16) + plt.xticks(fontsize=16) + plt.yticks(fontsize=16) + + filename = "subplot_parameters" + + if use_log_y: + filename += "_log_y" + + if use_last_50_percent: + filename += "_last_50_percent" + + self.output.subplot_to_figure( + auto_filename=filename + ) + plt.close() + @skip_plot_in_test_mode def log_likelihood_vs_iteration(self, use_log_y : bool = False, use_last_50_percent : bool = False, **kwargs): """ @@ -17,8 +79,8 @@ def log_likelihood_vs_iteration(self, use_log_y : bool = False, use_last_50_perc For a maximum likelihood estimate, the log likelihood should increase with iteration number. - This often produces a large dynamic range in the y-axis, such that plotting the y-axis on a log-scale can be - useful to see the full range of values. + This often produces a large dynamic range in the y-axis. Plotting the y-axis on a log-scale or only + plotting the last 50% of samples can make the plot easier to inspect. Parameters ---------- diff --git a/autofit/non_linear/search/optimize/abstract_optimize.py b/autofit/non_linear/search/optimize/abstract_optimize.py index 3f61a9af9..de069692f 100644 --- a/autofit/non_linear/search/optimize/abstract_optimize.py +++ b/autofit/non_linear/search/optimize/abstract_optimize.py @@ -30,6 +30,12 @@ def should_plot(name): output=Output(path=self.paths.image_path / "search", format="png"), ) + if should_plot("subplot_parameters"): + + plotter.subplot_parameters() + plotter.subplot_parameters(use_log_y=True) + plotter.subplot_parameters(use_last_50_percent=True) + if should_plot("log_likelihood_vs_iteration"): plotter.log_likelihood_vs_iteration() From 5672d726386ac4579b2ba8bb7d761cc5bd2a7dc4 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 26 Jul 2024 17:17:57 +0100 Subject: [PATCH 029/149] split LBFGS into LBFGS and BFGS --- autofit/__init__.py | 3 +- .../optimize/{lbfgs => bfgs}/__init__.py | 0 .../search/optimize/{lbfgs => bfgs}/search.py | 139 +++++++++++++----- 3 files changed, 101 insertions(+), 41 deletions(-) rename autofit/non_linear/search/optimize/{lbfgs => bfgs}/__init__.py (100%) rename autofit/non_linear/search/optimize/{lbfgs => bfgs}/search.py (56%) diff --git a/autofit/__init__.py b/autofit/__init__.py index 4852f150a..4ee1c37b9 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -75,7 +75,8 @@ from .non_linear.search.nest.dynesty.search.static import DynestyStatic from .non_linear.search.nest.ultranest.search import UltraNest from .non_linear.search.optimize.drawer.search import Drawer -from .non_linear.search.optimize.lbfgs.search import LBFGS +from .non_linear.search.optimize.bfgs.search import BFGS +from .non_linear.search.optimize.bfgs.search import LBFGS from .non_linear.search.optimize.pyswarms.search.globe import PySwarmsGlobal from .non_linear.search.optimize.pyswarms.search.local import PySwarmsLocal from .non_linear.paths.abstract import AbstractPaths diff --git a/autofit/non_linear/search/optimize/lbfgs/__init__.py b/autofit/non_linear/search/optimize/bfgs/__init__.py similarity index 100% rename from autofit/non_linear/search/optimize/lbfgs/__init__.py rename to autofit/non_linear/search/optimize/bfgs/__init__.py diff --git a/autofit/non_linear/search/optimize/lbfgs/search.py b/autofit/non_linear/search/optimize/bfgs/search.py similarity index 56% rename from autofit/non_linear/search/optimize/lbfgs/search.py rename to autofit/non_linear/search/optimize/bfgs/search.py index 3f557203c..0f6e4aa5a 100644 --- a/autofit/non_linear/search/optimize/lbfgs/search.py +++ b/autofit/non_linear/search/optimize/bfgs/search.py @@ -16,9 +16,11 @@ import numpy as np -class LBFGS(AbstractOptimizer): +class AbstractBFGS(AbstractOptimizer): __identifier_fields__ = () + method = None + def __init__( self, name: Optional[str] = None, @@ -31,42 +33,9 @@ def __init__( **kwargs ): """ - A L-BFGS scipy non-linear search. - - For a full description of the scipy L-BFGS method, checkout its documentation: - - https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html - - If you use `LBFGS` as part of a published work, please cite the package via scipy following the instructions - under the *Attribution* section of the GitHub page. - - By default, the L-BFGS method scipy implementation does not store the history of parameter values and - log likelihood values during the non-linear search. This is because storing these values can require a large - amount of memory, in contradiction to the L-BFGS method's primary advantage of being memory efficient. - This means that it is difficult to visualize the L-BFGS method results (e.g. log likelihood vs iteration). + Abstract wrapper for the BFGS and L-BFGS scipy non-linear searches. - **PyAutoFit** extends the class with the option of using visualize mode, which stores the history of parameter - values and log likelihood values during the non-linear search. This allows the results of the L-BFGS method to be - visualized after the search has completed, and it is enabled by setting the `visualize` flag to `True`. - - Parameters - ---------- - name - The name of the search, controlling the last folder results are output. - path_prefix - The path of folders prefixing the name folder where results are output. - unique_tag - The name of a unique tag for this model-fit, which will be given a unique entry in the sqlite database - and also acts as the folder after the path prefix and before the search name. - initializer - Generates the initialize samples of non-linear parameter space (see autofit.non_linear.initializer). - number_of_cores: int - The number of cores sampling is performed using a Python multiprocessing Pool instance. - session - An SQLalchemy session instance so the results of the model-fit are written to an SQLite database. - visualize - If True, visualization of the search is enabled, which requires storing the history of parameter values and - log likelihood values during the non-linear search. + See the docstrings of the `BFGS` and `LBFGS` classes for a description of the arguments of this class. """ super().__init__( @@ -81,7 +50,7 @@ def __init__( self.visualize = visualize - self.logger.debug("Creating LBFGS Search") + self.logger.debug(f"Creating {self.method} Search") @cached_property def config_dict_options(self): @@ -162,7 +131,7 @@ def _fit( total_iterations = 0 self.logger.info( - "Starting new LBFGS non-linear search (no previous samples found)." + f"Starting new {self.method} non-linear search (no previous samples found)." ) maxiter = self.config_dict_options.get("maxiter", 1e8) @@ -179,7 +148,7 @@ def _fit( search_internal = optimize.minimize( fun=fitness.__call__, x0=x0, - method="L-BFGS-B", + method=self.method, options=config_dict_options, **self.config_dict_search ) @@ -211,7 +180,7 @@ def _fit( search_internal=search_internal, ) - self.logger.info("L-BFGS sampling complete.") + self.logger.info(f"{self.method} sampling complete.") return search_internal @@ -275,3 +244,93 @@ def samples_via_internal_from( sample_list=sample_list, samples_info=samples_info, ) + + +class BFGS(AbstractBFGS): + """ + The BFGS non-linear search, which wraps the scipy Broyden-Fletcher-Goldfarb-Shanno (BFGS) algorithm. + + See the docstrings of the `BFGS` and `LBFGS` classes for a description of the arguments of this class. + + For a full description of the scipy BFGS method, checkout its documentation: + + https://docs.scipy.org/doc/scipy/reference/optimize.minimize-bfgs.html#optimize-minimize-bfgs + + If you use `BFGS` as part of a published work, please cite the package via scipy following the instructions + under the *Attribution* section of the GitHub page. + + By default, the BFGS method scipy implementation does not store the history of parameter values and + log likelihood values during the non-linear search. This is because storing these values can require a large + amount of memory, in contradiction to the BFGS method's primary advantage of being memory efficient. + This means that it is difficult to visualize the BFGS method results (e.g. log likelihood vs iteration). + + **PyAutoFit** extends the class with the option of using visualize mode, which stores the history of parameter + values and log likelihood values during the non-linear search. This allows the results of the BFGS method to be + visualized after the search has completed, and it is enabled by setting the `visualize` flag to `True`. + + Parameters + ---------- + name + The name of the search, controlling the last folder results are output. + path_prefix + The path of folders prefixing the name folder where results are output. + unique_tag + The name of a unique tag for this model-fit, which will be given a unique entry in the sqlite database + and also acts as the folder after the path prefix and before the search name. + initializer + Generates the initialize samples of non-linear parameter space (see autofit.non_linear.initializer). + number_of_cores: int + The number of cores sampling is performed using a Python multiprocessing Pool instance. + session + An SQLalchemy session instance so the results of the model-fit are written to an SQLite database. + visualize + If True, visualization of the search is enabled, which requires storing the history of parameter values and + log likelihood values during the non-linear search. + """ + + method = "BFGS" + +class LBFGS(AbstractBFGS): + """ + The L-BFGS non-linear search, which wraps the scipy Limited-memory Broyden-Fletcher-Goldfarb-Shanno (L-BFGS) + algorithm. + + See the docstrings of the `BFGS` and `LBFGS` classes for a description of the arguments of this class. + + For a full description of the scipy L-BFGS method, checkout its documentation: + + https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html + + If you use `LBFGS` as part of a published work, please cite the package via scipy following the instructions + under the *Attribution* section of the GitHub page. + + By default, the L-BFGS method scipy implementation does not store the history of parameter values and + log likelihood values during the non-linear search. This is because storing these values can require a large + amount of memory, in contradiction to the L-BFGS method's primary advantage of being memory efficient. + This means that it is difficult to visualize the L-BFGS method results (e.g. log likelihood vs iteration). + + **PyAutoFit** extends the class with the option of using visualize mode, which stores the history of parameter + values and log likelihood values during the non-linear search. This allows the results of the L-BFGS method to be + visualized after the search has completed, and it is enabled by setting the `visualize` flag to `True`. + + Parameters + ---------- + name + The name of the search, controlling the last folder results are output. + path_prefix + The path of folders prefixing the name folder where results are output. + unique_tag + The name of a unique tag for this model-fit, which will be given a unique entry in the sqlite database + and also acts as the folder after the path prefix and before the search name. + initializer + Generates the initialize samples of non-linear parameter space (see autofit.non_linear.initializer). + number_of_cores: int + The number of cores sampling is performed using a Python multiprocessing Pool instance. + session + An SQLalchemy session instance so the results of the model-fit are written to an SQLite database. + visualize + If True, visualization of the search is enabled, which requires storing the history of parameter values and + log likelihood values during the non-linear search. + """ + + method = "L-BFGS-B" \ No newline at end of file From 52808e5746764ce054f5a8ac5ddf289beb595333 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 26 Jul 2024 17:18:24 +0100 Subject: [PATCH 030/149] added BFGS to config --- autofit/config/non_linear/optimize.yaml | 38 +++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/autofit/config/non_linear/optimize.yaml b/autofit/config/non_linear/optimize.yaml index 81a1640f5..387c2d47f 100644 --- a/autofit/config/non_linear/optimize.yaml +++ b/autofit/config/non_linear/optimize.yaml @@ -49,6 +49,44 @@ PySwarmsLocal: updates: iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). +BFGS: + search: + tol: null + options: + disp: false + eps: 1.0e-08 + ftol: 2.220446049250313e-09 + gtol: 1.0e-05 + iprint: -1.0 + maxcor: 10 + maxfun: 15000 + maxiter: 15000 + maxls: 20 + initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}. + method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution. + ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + parallel: + number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. + printing: + silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter. + updates: + iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. + remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). +Drawer: + search: + total_draws: 50 + initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}. + method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution. + ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. + parallel: + number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. + printing: + silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter. + updates: + iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. + remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). LBFGS: search: tol: null From ee45a3e9add7dd0746f07ad84759898d76ea19d9 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 17:18:18 +0100 Subject: [PATCH 031/149] SpecificRangeInitializer -> InitializerParamBounds --- autofit/__init__.py | 2 +- autofit/mapper/prior_model/abstract.py | 8 ++++++++ autofit/non_linear/fitness.py | 2 ++ autofit/non_linear/initializer.py | 4 ++-- autofit/non_linear/search/nest/abstract_nest.py | 6 +++--- docs/cookbooks/search.rst | 2 +- test_autofit/non_linear/test_initializer.py | 6 +++--- 7 files changed, 20 insertions(+), 10 deletions(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index 4ee1c37b9..48f7cee11 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -66,7 +66,7 @@ from .non_linear.grid.sensitivity import Sensitivity from .non_linear.initializer import InitializerBall from .non_linear.initializer import InitializerPrior -from .non_linear.initializer import SpecificRangeInitializer +from .non_linear.initializer import InitializerParamBounds from .non_linear.search.mcmc.auto_correlations import AutoCorrelationsSettings from .non_linear.search.mcmc.emcee.search import Emcee from .non_linear.search.mcmc.zeus.search import Zeus diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index 04695dc68..b862c5a37 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -1465,6 +1465,14 @@ def paths(self) -> List[Path]: """ return [path for path, _ in self.path_priors_tuples] + @property + def paths_formatted(self) -> List[Path]: + """ + A list of paths to all the priors in the model, ordered by their + ids + """ + return [path for path, _ in self.path_priors_tuples] + @property def composition(self): return [".".join(path) for path in self.paths] diff --git a/autofit/non_linear/fitness.py b/autofit/non_linear/fitness.py index 6209b9553..be24faecd 100644 --- a/autofit/non_linear/fitness.py +++ b/autofit/non_linear/fitness.py @@ -154,6 +154,8 @@ def __call__(self, parameters, *kwargs): instance = self.model.instance_from_vector(vector=parameters) log_likelihood = self.log_likelihood_function(instance=instance) + print(log_likelihood) + if np.isnan(log_likelihood): return self.resample_figure_of_merit diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index 0de568cf6..b9eead4ef 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -175,7 +175,7 @@ def samples_in_test_mode(self, total_points: int, model: AbstractPriorModel): return unit_parameter_lists, parameter_lists, figure_of_merit_list -class SpecificRangeInitializer(AbstractInitializer): +class InitializerParamBounds(AbstractInitializer): def __init__( self, parameter_dict: Dict[Prior, Tuple[float, float]], @@ -226,7 +226,7 @@ def _generate_unit_parameter_list(self, model: AbstractPriorModel) -> List[float key = ".".join(model.path_for_prior(prior)) if key not in self._generated_warnings: logger.warning( - f"Range for {key} not set in the SpecificRangeInitializer. " + f"Range for {key} not set in the InitializerParamBounds. " f"Using defaults." ) self._generated_warnings.add(key) diff --git a/autofit/non_linear/search/nest/abstract_nest.py b/autofit/non_linear/search/nest/abstract_nest.py index 307f06837..5e356891d 100644 --- a/autofit/non_linear/search/nest/abstract_nest.py +++ b/autofit/non_linear/search/nest/abstract_nest.py @@ -8,7 +8,7 @@ from autofit.non_linear.initializer import ( InitializerPrior, AbstractInitializer, - SpecificRangeInitializer, + InitializerParamBounds, ) from autofit.non_linear.samples import SamplesNest from autofit.non_linear.plot.nest_plotters import NestPlotter @@ -44,9 +44,9 @@ def __init__( session An SQLAlchemy session instance so the results of the model-fit are written to an SQLite database. """ - if isinstance(initializer, SpecificRangeInitializer): + if isinstance(initializer, InitializerParamBounds): raise ValueError( - "SpecificRangeInitializer cannot be used for nested sampling" + "InitializerParamBounds cannot be used for nested sampling" ) super().__init__( diff --git a/docs/cookbooks/search.rst b/docs/cookbooks/search.rst index 8bb92caaf..32bada27f 100644 --- a/docs/cookbooks/search.rst +++ b/docs/cookbooks/search.rst @@ -241,7 +241,7 @@ We now define the start point of certain parameters in the model as follows. .. code-block:: python - initializer = af.SpecificRangeInitializer( + initializer = af.InitializerParamBounds( { model.centre: (49.0, 51.0), model.normalization: (4.0, 6.0), diff --git a/test_autofit/non_linear/test_initializer.py b/test_autofit/non_linear/test_initializer.py index c63397306..45a83a34e 100644 --- a/test_autofit/non_linear/test_initializer.py +++ b/test_autofit/non_linear/test_initializer.py @@ -235,7 +235,7 @@ def make_model(): def test_starting_point_initializer(model): - initializer = af.SpecificRangeInitializer( + initializer = af.InitializerParamBounds( { model.centre: (1.0, 2.0), model.normalization: (2.0, 3.0), @@ -250,7 +250,7 @@ def test_starting_point_initializer(model): def test_offset(model): - initializer = af.SpecificRangeInitializer( + initializer = af.InitializerParamBounds( { model.centre: (1.5, 2.0), model.normalization: (2.5, 3.0), @@ -265,7 +265,7 @@ def test_offset(model): def test_missing_parameter(model): - initializer = af.SpecificRangeInitializer( + initializer = af.InitializerParamBounds( { model.centre: (1.5, 2.0), model.normalization: (2.5, 3.0), From 75491833fee995c812e51c55207e2e1a13de8be7 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 17:19:55 +0100 Subject: [PATCH 032/149] docstring --- autofit/non_linear/initializer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index b9eead4ef..ca0c9df39 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -183,13 +183,13 @@ def __init__( upper_limit=1.0, ): """ - Initializer that allows the range of possible starting points for each prior - to be specified explicitly. + Initializer which uses the bounds on input parameters as the starting point for the search (e.g. where + an MLE optimization starts or MCMC walkers are initialized). Parameters ---------- parameter_dict - A dictionary mapping priors to inclusive ranges of physical values that + A dictionary mapping each parameter path to bounded ranges of physical values that the initial values for that dimension in the search may take lower_limit A default, unit lower limit used when a prior is not specified From 46a893cbfe64a1ec84f9818e7fa2e7f7817bf4b3 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 17:41:58 +0100 Subject: [PATCH 033/149] initializer tested --- autofit/__init__.py | 1 + autofit/non_linear/initializer.py | 30 ++++++++++- autofit/non_linear/paths/directory.py | 5 +- .../non_linear/search/optimize/bfgs/search.py | 3 ++ test_autofit/non_linear/test_initializer.py | 54 ++++++++++++++++++- 5 files changed, 90 insertions(+), 3 deletions(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index 48f7cee11..58b230af8 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -67,6 +67,7 @@ from .non_linear.initializer import InitializerBall from .non_linear.initializer import InitializerPrior from .non_linear.initializer import InitializerParamBounds +from .non_linear.initializer import InitializerParamStartPoints from .non_linear.search.mcmc.auto_correlations import AutoCorrelationsSettings from .non_linear.search.mcmc.emcee.search import Emcee from .non_linear.search.mcmc.zeus.search import Zeus diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index ca0c9df39..3b36c37de 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -190,7 +190,7 @@ def __init__( ---------- parameter_dict A dictionary mapping each parameter path to bounded ranges of physical values that - the initial values for that dimension in the search may take + are where the search begins. lower_limit A default, unit lower limit used when a prior is not specified upper_limit @@ -241,6 +241,34 @@ def _generate_unit_parameter_list(self, model: AbstractPriorModel) -> List[float return unit_parameter_list +class InitializerParamStartPoints(InitializerParamBounds): + def __init__( + self, + parameter_dict: Dict[Prior, float], + ): + """ + Initializer which input values of the parameters as the starting point for the search (e.g. where + an MLE optimization starts or MCMC walkers are initialized). + + Parameters + ---------- + parameter_dict + A dictionary mapping each parameter path to the starting point physical values that + are where the search begins. + lower_limit + A default, unit lower limit used when a prior is not specified + upper_limit + A default, unit upper limit used when a prior is not specified + """ + parameter_dict_new = {} + + for key, value in parameter_dict.items(): + parameter_dict_new[key] = (value - 1.0e-8, value + 1.0e-8) + + super().__init__(parameter_dict=parameter_dict_new) + + + class Initializer(AbstractInitializer): def __init__(self, lower_limit: float, upper_limit: float): """ diff --git a/autofit/non_linear/paths/directory.py b/autofit/non_linear/paths/directory.py index 69452603e..88e70726a 100644 --- a/autofit/non_linear/paths/directory.py +++ b/autofit/non_linear/paths/directory.py @@ -350,7 +350,10 @@ def save_all(self, search_config_dict=None, info=None): ).save(str(self.output_path / "model_graph.html")) if info: self.save_json("info", info) - self.save_json("search", to_dict(self.search)) + try: + self.save_json("search", to_dict(self.search)) + except TypeError: + pass self.save_json("model", to_dict(self.model)) self._save_metadata(search_name=type(self.search).__name__.lower()) diff --git a/autofit/non_linear/search/optimize/bfgs/search.py b/autofit/non_linear/search/optimize/bfgs/search.py index 0f6e4aa5a..49a126157 100644 --- a/autofit/non_linear/search/optimize/bfgs/search.py +++ b/autofit/non_linear/search/optimize/bfgs/search.py @@ -126,6 +126,9 @@ def _fit( n_cores=self.number_of_cores, ) + print(parameter_lists) + fdsdffsd + x0 = np.asarray(parameter_lists[0]) total_iterations = 0 diff --git a/test_autofit/non_linear/test_initializer.py b/test_autofit/non_linear/test_initializer.py index 45a83a34e..589d5d66f 100644 --- a/test_autofit/non_linear/test_initializer.py +++ b/test_autofit/non_linear/test_initializer.py @@ -17,6 +17,43 @@ def __call__(self, parameters): return self.figure_of_merit +def test__priors__samples_from_model(): + model = af.Model(af.m.MockClassx4) + model.one = af.UniformPrior(lower_limit=0.099, upper_limit=0.101) + model.two = af.UniformPrior(lower_limit=0.199, upper_limit=0.201) + model.three = af.UniformPrior(lower_limit=0.299, upper_limit=0.301) + model.four = af.UniformPrior(lower_limit=0.399, upper_limit=0.401) + + initializer = af.InitializerPrior() + + ( + unit_parameter_lists, + parameter_lists, + figure_of_merit_list, + ) = initializer.samples_from_model( + total_points=2, + model=model, + fitness=MockFitness(), + paths=af.DirectoryPaths(), + ) + + assert 0.0 < unit_parameter_lists[0][0] < 1.0 + assert 0.0 < unit_parameter_lists[1][0] < 1.0 + assert 0.0 < unit_parameter_lists[0][1] < 1.0 + assert 0.0 < unit_parameter_lists[1][1] < 1.0 + assert 0.0 < unit_parameter_lists[0][2] < 1.0 + assert 0.0 < unit_parameter_lists[1][2] < 1.0 + assert 0.0 < unit_parameter_lists[0][3] < 1.0 + assert 0.0 < unit_parameter_lists[1][3] < 1.0 + + assert 0.099 < parameter_lists[0][0] < 0.101 + assert 0.099 < parameter_lists[1][0] < 0.101 + assert 0.199 < parameter_lists[0][1] < 0.201 + assert 0.199 < parameter_lists[1][1] < 0.201 + assert 0.299 < parameter_lists[0][2] < 0.301 + assert 0.299 < parameter_lists[1][2] < 0.301 + assert 0.399 < parameter_lists[0][3] < 0.401 + assert 0.399 < parameter_lists[1][3] < 0.401 def test__priors__samples_from_model(): model = af.Model(af.m.MockClassx4) model.one = af.UniformPrior(lower_limit=0.099, upper_limit=0.101) @@ -234,7 +271,7 @@ def make_model(): ) -def test_starting_point_initializer(model): +def test_initializer_bounds(model): initializer = af.InitializerParamBounds( { model.centre: (1.0, 2.0), @@ -249,6 +286,21 @@ def test_starting_point_initializer(model): assert 0.0 <= parameter <= 1.0 +def test__initializer_start_point(model): + initializer = af.InitializerParamStartPoints( + { + model.centre: 1.5, + model.normalization: 2.5, + model.sigma: -1.5, + } + ) + + parameter_list = initializer._generate_unit_parameter_list(model) + assert len(parameter_list) == 3 + for parameter in parameter_list: + assert 0.0 <= parameter <= 1.0 + + def test_offset(model): initializer = af.InitializerParamBounds( { From dab40f073d0742621b0b0ec274a09cdb2cad8c19 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 17:42:58 +0100 Subject: [PATCH 034/149] remove manual input of x0 for LBFGS --- .../non_linear/search/optimize/bfgs/search.py | 33 +++++++------------ 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/autofit/non_linear/search/optimize/bfgs/search.py b/autofit/non_linear/search/optimize/bfgs/search.py index 49a126157..467139b60 100644 --- a/autofit/non_linear/search/optimize/bfgs/search.py +++ b/autofit/non_linear/search/optimize/bfgs/search.py @@ -108,28 +108,19 @@ def _fit( except (FileNotFoundError, TypeError): - if "x0" in self.kwargs: - - x0 = self.kwargs["x0"] - - else: - - ( - unit_parameter_lists, - parameter_lists, - log_posterior_list, - ) = self.initializer.samples_from_model( - total_points=1, - model=model, - fitness=fitness, - paths=self.paths, - n_cores=self.number_of_cores, - ) - - print(parameter_lists) - fdsdffsd + ( + unit_parameter_lists, + parameter_lists, + log_posterior_list, + ) = self.initializer.samples_from_model( + total_points=1, + model=model, + fitness=fitness, + paths=self.paths, + n_cores=self.number_of_cores, + ) - x0 = np.asarray(parameter_lists[0]) + x0 = np.asarray(parameter_lists[0]) total_iterations = 0 From 505c84d20bd841873247a3a90e7bd4a626ca64d9 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 17:54:03 +0100 Subject: [PATCH 035/149] added info from model to param bounds --- autofit/non_linear/initializer.py | 15 +++++++++++++++ test_autofit/non_linear/test_initializer.py | 16 ++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index 3b36c37de..88f8df7d2 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -240,6 +240,21 @@ def _generate_unit_parameter_list(self, model: AbstractPriorModel) -> List[float return unit_parameter_list + def info_from_model(self, model : AbstractPriorModel) -> str: + """ + Returns a string showing the bounds of the parameters in the initializer. + """ + info = "Initializer Parameters:\n" + for prior in model.priors_ordered_by_id: + + key = ".".join(model.path_for_prior(prior)) + + try: + info += f"{key} : {self.parameter_dict[prior]}\n" + except KeyError: + info += f"{key} : {prior})\n" + + return info class InitializerParamStartPoints(InitializerParamBounds): def __init__( diff --git a/test_autofit/non_linear/test_initializer.py b/test_autofit/non_linear/test_initializer.py index 589d5d66f..e743947d5 100644 --- a/test_autofit/non_linear/test_initializer.py +++ b/test_autofit/non_linear/test_initializer.py @@ -286,6 +286,22 @@ def test_initializer_bounds(model): assert 0.0 <= parameter <= 1.0 +def test__initializer_bounds__info_from_model(model): + + initializer = af.InitializerParamBounds( + { + model.centre: (1.0, 2.0), + model.normalization: (2.0, 3.0), + model.sigma: (-2.0, -1.0), + } + ) + + info = initializer.info_from_model(model) + + assert "Initializer Parameters" in info + assert "centre : (1.0, 2.0)" in info + + def test__initializer_start_point(model): initializer = af.InitializerParamStartPoints( { From cc63811b0f70b1e7247837b227e58871721223b2 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 17:59:23 +0100 Subject: [PATCH 036/149] info display improved --- autofit/non_linear/initializer.py | 36 ++++++++++++++++++++- test_autofit/non_linear/test_initializer.py | 14 ++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index 88f8df7d2..d2d931898 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -250,12 +250,33 @@ def info_from_model(self, model : AbstractPriorModel) -> str: key = ".".join(model.path_for_prior(prior)) try: - info += f"{key} : {self.parameter_dict[prior]}\n" + + value = self.info_value_from(self.parameter_dict[prior]) + + info += f"{key} : {value}\n" + except KeyError: + info += f"{key} : {prior})\n" return info + def info_value_from(self, value : Tuple[float, float]) -> Tuple[float, float]: + """ + Returns the value that is used to display the bounds of the parameters in the initializer. + + This function simply returns the input value, but it can be overridden in subclasses for diffferent + initializers. + + Parameters + ---------- + value + The value to be displayed in the initializer info which is a tuple of the lower and upper bounds of the + parameter. + """ + return value + + class InitializerParamStartPoints(InitializerParamBounds): def __init__( self, @@ -282,7 +303,20 @@ def __init__( super().__init__(parameter_dict=parameter_dict_new) + def info_value_from(self, value : Tuple[float, float]) -> float: + """ + Returns the value that is used to display the starting point of the parameters in the initializer. + + This function returns the mean of the input value, as the starting point is a single value in the center of the + bounds. + Parameters + ---------- + value + The value to be displayed in the initializer info which is a tuple of the lower and upper bounds of the + parameter. + """ + return (value[1] + value[0]) / 2.0 class Initializer(AbstractInitializer): def __init__(self, lower_limit: float, upper_limit: float): diff --git a/test_autofit/non_linear/test_initializer.py b/test_autofit/non_linear/test_initializer.py index e743947d5..651867b45 100644 --- a/test_autofit/non_linear/test_initializer.py +++ b/test_autofit/non_linear/test_initializer.py @@ -317,6 +317,20 @@ def test__initializer_start_point(model): assert 0.0 <= parameter <= 1.0 +def test__initializer_start_point__info_from_model(model): + initializer = af.InitializerParamStartPoints( + { + model.centre: 1.5, + model.normalization: 2.5, + model.sigma: -1.5, + } + ) + info = initializer.info_from_model(model) + + assert "Initializer Parameters" in info + assert "centre : 1.5" in info + + def test_offset(model): initializer = af.InitializerParamBounds( { From 6f93f57bae4cd54cf57c35f331e49dccce4d0375 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 19:12:57 +0100 Subject: [PATCH 037/149] dediciate starting point visualization --- autofit/non_linear/fitness.py | 2 - autofit/non_linear/paths/abstract.py | 19 +++++-- autofit/non_linear/search/abstract_search.py | 54 ++++++++++++++----- .../non_linear/search/optimize/bfgs/search.py | 19 +++++++ 4 files changed, 76 insertions(+), 18 deletions(-) diff --git a/autofit/non_linear/fitness.py b/autofit/non_linear/fitness.py index be24faecd..6209b9553 100644 --- a/autofit/non_linear/fitness.py +++ b/autofit/non_linear/fitness.py @@ -154,8 +154,6 @@ def __call__(self, parameters, *kwargs): instance = self.model.instance_from_vector(vector=parameters) log_likelihood = self.log_likelihood_function(instance=instance) - print(log_likelihood) - if np.isnan(log_likelihood): return self.resample_figure_of_merit diff --git a/autofit/non_linear/paths/abstract.py b/autofit/non_linear/paths/abstract.py index 4ae9f4c37..1129be8f4 100644 --- a/autofit/non_linear/paths/abstract.py +++ b/autofit/non_linear/paths/abstract.py @@ -32,6 +32,7 @@ def __init__( parent: Optional["AbstractPaths"] = None, unique_tag: Optional[str] = None, identifier: str = None, + image_path_suffix : str = "", ): """ Manages the path structure for `NonLinearSearch` output, for analyses both not using and using the search @@ -63,6 +64,16 @@ def __init__( is_identifier_in_paths If True output path and symlink path terminate with an identifier generated from the search and model + parent + The parent paths object of this paths object. + unique_tag + A unique tag for the search, used to differentiate between searches with the same name. + identifier + A custom identifier for the search, if this is not None it will be used instead of the automatically + generated identifier + image_path_suffix + A suffix which is appended to the image path. This is used to differentiate between different + image outputs, for example the image of the starting point of an MLE. """ self.name = name or "" @@ -87,6 +98,8 @@ def __init__( except NoSectionError as e: logger.exception(e) + self.image_path_suffix = image_path_suffix + @property @abstractmethod def samples(self): @@ -211,10 +224,10 @@ def image_path(self) -> Path: The path to the image folder. """ - if not os.path.exists(self.output_path / "image"): - os.makedirs(self.output_path / "image") + if not os.path.exists(self.output_path / f"image{self.image_path_suffix}"): + os.makedirs(self.output_path / f"image{self.image_path_suffix}") - return self.output_path / "image" + return self.output_path / f"image{self.image_path_suffix}" @property def profile_path(self) -> Path: diff --git a/autofit/non_linear/search/abstract_search.py b/autofit/non_linear/search/abstract_search.py index 2ac13edec..6154bbc62 100644 --- a/autofit/non_linear/search/abstract_search.py +++ b/autofit/non_linear/search/abstract_search.py @@ -29,6 +29,7 @@ from autofit.graphical.utils import Status from autofit.mapper.prior_model.abstract import AbstractPriorModel from autofit.mapper.prior_model.collection import Collection +from autofit.mapper.model import ModelInstance from autofit.non_linear.initializer import Initializer from autofit.non_linear.fitness import Fitness from autofit.non_linear.parallel import SneakyPool, SneakierPool @@ -1050,9 +1051,11 @@ def perform_update( def perform_visualization( self, model: AbstractPriorModel, - analysis: AbstractPriorModel, - samples_summary: SamplesSummary, + analysis: Analysis, during_analysis: bool, + samples_summary: Optional[SamplesSummary] = None, + instance : Optional[ModelInstance] = None, + paths_override : Optional[AbstractPaths] = None, search_internal=None, ): """ @@ -1075,32 +1078,57 @@ def perform_visualization( analysis Contains the data and the log likelihood function which fits an instance of the model to the data, returning the log likelihood the `NonLinearSearch` maximizes. + samples_summary + The summary of the samples of the non-linear search, which are used for visualization. during_analysis If the update is during a non-linear search, in which case tasks are only performed after a certain number of updates and only a subset of visualization may be performed. + instance + The instance of the model that is used for visualization. If not input, the maximum log likelihood + instance from the samples is used. """ self.logger.debug("Visualizing") - if analysis.should_visualize(paths=self.paths, during_analysis=during_analysis): + if paths_override is None: + paths = self.paths + else: + paths = paths_override + + if instance is None and samples_summary is None: + raise AssertionError( + """The search's perform_visualization method has been called without an input instance or + samples_summary. + + This should not occur, please ensure one of these inputs is provided. + """ + ) + + if instance is None: + instance = samples_summary.instance + + if analysis.should_visualize(paths=paths, during_analysis=during_analysis): analysis.visualize( - paths=self.paths, - instance=samples_summary.instance, + paths=paths, + instance=instance, during_analysis=during_analysis, ) analysis.visualize_combined( - paths=self.paths, - instance=samples_summary.instance, + paths=paths, + instance=instance, during_analysis=during_analysis, ) - if analysis.should_visualize(paths=self.paths, during_analysis=during_analysis): - if not isinstance(self.paths, NullPaths): - samples = self.samples_from( - model=model, search_internal=search_internal - ) + if analysis.should_visualize(paths=paths, during_analysis=during_analysis): + if not isinstance(paths, NullPaths): + try: + samples = self.samples_from( + model=model, search_internal=search_internal + ) - self.plot_results(samples=samples) + self.plot_results(samples=samples) + except FileNotFoundError: + pass @property def samples_cls(self): diff --git a/autofit/non_linear/search/optimize/bfgs/search.py b/autofit/non_linear/search/optimize/bfgs/search.py index 467139b60..292cdd50c 100644 --- a/autofit/non_linear/search/optimize/bfgs/search.py +++ b/autofit/non_linear/search/optimize/bfgs/search.py @@ -128,6 +128,25 @@ def _fit( f"Starting new {self.method} non-linear search (no previous samples found)." ) + + if self.visualize: + + self.logger.info( + f"Visualizing Starting Point Model in image_start folder." + ) + + instance = model.instance_from_vector(vector=x0) + paths = copy.copy(self.paths) + paths.image_path_suffix = "_start" + + self.perform_visualization( + model=model, + analysis=analysis, + instance=instance, + during_analysis=False, + paths_override=paths, + ) + maxiter = self.config_dict_options.get("maxiter", 1e8) while total_iterations < maxiter: From eca4f2067ad34bcd1d9c48b9f962f549351291d0 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 19:16:20 +0100 Subject: [PATCH 038/149] move plotting of start point to AbstractOptimizer --- .../search/optimize/abstract_optimize.py | 43 +++++++++++++++++++ .../non_linear/search/optimize/bfgs/search.py | 15 +------ 2 files changed, 44 insertions(+), 14 deletions(-) diff --git a/autofit/non_linear/search/optimize/abstract_optimize.py b/autofit/non_linear/search/optimize/abstract_optimize.py index de069692f..c13945697 100644 --- a/autofit/non_linear/search/optimize/abstract_optimize.py +++ b/autofit/non_linear/search/optimize/abstract_optimize.py @@ -1,6 +1,9 @@ from abc import ABC +import copy from autoconf import conf +from autofit.mapper.prior_model.abstract import AbstractPriorModel +from autofit.non_linear.analysis import Analysis from autofit.non_linear.search.abstract_search import NonLinearSearch from autofit.non_linear.samples import Samples from autofit.non_linear.plot.optimize_plotters import OptimizePlotter @@ -20,6 +23,46 @@ def samples_cls(self): def plotter_cls(self): return OptimizePlotter + def plot_start_point( + self, + model: AbstractPriorModel, + analysis: Analysis, + ): + """ + Visualize the starting point of the non-linear search, using an instance of the model at the starting point + of the maximum likelihood estimator. + + Plots are output to a folder named `image_start` in the output path, so that the starting point model + can be compared to the final model inferred by the non-linear search. + + Parameters + ---------- + model + The model used by the non-linear search + analysis + The analysis which contains the visualization methods which plot the starting point model. + + Returns + ------- + + """ + + self.logger.info( + f"Visualizing Starting Point Model in image_start folder." + ) + + instance = model.instance_from_vector(vector=x0) + paths = copy.copy(self.paths) + paths.image_path_suffix = "_start" + + self.perform_visualization( + model=model, + analysis=analysis, + instance=instance, + during_analysis=False, + paths_override=paths, + ) + def plot_results(self, samples): def should_plot(name): diff --git a/autofit/non_linear/search/optimize/bfgs/search.py b/autofit/non_linear/search/optimize/bfgs/search.py index 292cdd50c..585e1a779 100644 --- a/autofit/non_linear/search/optimize/bfgs/search.py +++ b/autofit/non_linear/search/optimize/bfgs/search.py @@ -128,24 +128,11 @@ def _fit( f"Starting new {self.method} non-linear search (no previous samples found)." ) - if self.visualize: - self.logger.info( - f"Visualizing Starting Point Model in image_start folder." - ) + self.plot_start_point() - instance = model.instance_from_vector(vector=x0) - paths = copy.copy(self.paths) - paths.image_path_suffix = "_start" - self.perform_visualization( - model=model, - analysis=analysis, - instance=instance, - during_analysis=False, - paths_override=paths, - ) maxiter = self.config_dict_options.get("maxiter", 1e8) From 1ee3e2aa94a63da56b8aec0fc2995931c3f90dc0 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 19:38:48 +0100 Subject: [PATCH 039/149] model start info updated, output and tested --- autofit/non_linear/initializer.py | 8 ++++++-- autofit/non_linear/paths/directory.py | 16 +++++++++++++++- .../search/optimize/abstract_optimize.py | 4 +++- .../non_linear/search/optimize/bfgs/search.py | 8 +++++--- test_autofit/non_linear/test_initializer.py | 8 ++++---- 5 files changed, 33 insertions(+), 11 deletions(-) diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index d2d931898..c9415c0c9 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -25,6 +25,9 @@ class AbstractInitializer(ABC): def _generate_unit_parameter_list(self, model): pass + def info_from_model(self, model : AbstractPriorModel) -> str: + raise NotImplementedError + @staticmethod def figure_of_metric(args) -> Optional[float]: fitness, parameter_list = args @@ -244,7 +247,8 @@ def info_from_model(self, model : AbstractPriorModel) -> str: """ Returns a string showing the bounds of the parameters in the initializer. """ - info = "Initializer Parameters:\n" + info = "Total Free Parameters = " + str(model.prior_count) + "\n" + info += "Total Starting Points = " + str(len(self.parameter_dict)) + "\n\n" for prior in model.priors_ordered_by_id: key = ".".join(model.path_for_prior(prior)) @@ -253,7 +257,7 @@ def info_from_model(self, model : AbstractPriorModel) -> str: value = self.info_value_from(self.parameter_dict[prior]) - info += f"{key} : {value}\n" + info += f"{key}: Start[{value}]\n" except KeyError: diff --git a/autofit/non_linear/paths/directory.py b/autofit/non_linear/paths/directory.py index 88e70726a..a16ae025e 100644 --- a/autofit/non_linear/paths/directory.py +++ b/autofit/non_linear/paths/directory.py @@ -2,6 +2,7 @@ import dill import json +import numpy as np import os from os import path from pathlib import Path @@ -21,7 +22,6 @@ from ..samples import load_from_table from autofit.non_linear.samples.pdf import SamplesPDF from autofit.non_linear.samples.summary import SamplesSummary -import numpy as np from ...visualise import VisualiseGraph @@ -354,6 +354,13 @@ def save_all(self, search_config_dict=None, info=None): self.save_json("search", to_dict(self.search)) except TypeError: pass + + try: + info_start = self.search.initializer.info_from_model(model=self.model) + self._save_model_start_point(info=info_start) + except NotImplementedError: + pass + self.save_json("model", to_dict(self.model)) self._save_metadata(search_name=type(self.search).__name__.lower()) @@ -461,6 +468,13 @@ def _save_model_info(self, model): with open_(self.output_path / "model.info", "w+") as f: f.write(model.info) + def _save_model_start_point(self, info): + """ + Save the model.start file, which summarizes the start point of every parameter. + """ + with open_(self.output_path / "model.start", "w+") as f: + f.write(info) + def _save_parameter_names_file(self, model): """ Create the param_names file listing every parameter's label and Latex tag, which is used for corner.py diff --git a/autofit/non_linear/search/optimize/abstract_optimize.py b/autofit/non_linear/search/optimize/abstract_optimize.py index c13945697..1f2a18a58 100644 --- a/autofit/non_linear/search/optimize/abstract_optimize.py +++ b/autofit/non_linear/search/optimize/abstract_optimize.py @@ -1,5 +1,6 @@ from abc import ABC import copy +from typing import List from autoconf import conf from autofit.mapper.prior_model.abstract import AbstractPriorModel @@ -25,6 +26,7 @@ def plotter_cls(self): def plot_start_point( self, + parameter_vector : List[float], model: AbstractPriorModel, analysis: Analysis, ): @@ -51,7 +53,7 @@ def plot_start_point( f"Visualizing Starting Point Model in image_start folder." ) - instance = model.instance_from_vector(vector=x0) + instance = model.instance_from_vector(vector=parameter_vector) paths = copy.copy(self.paths) paths.image_path_suffix = "_start" diff --git a/autofit/non_linear/search/optimize/bfgs/search.py b/autofit/non_linear/search/optimize/bfgs/search.py index 585e1a779..6f3e87393 100644 --- a/autofit/non_linear/search/optimize/bfgs/search.py +++ b/autofit/non_linear/search/optimize/bfgs/search.py @@ -130,9 +130,11 @@ def _fit( if self.visualize: - self.plot_start_point() - - + self.plot_start_point( + parameter_vector=x0, + model=model, + analysis=analysis, + ) maxiter = self.config_dict_options.get("maxiter", 1e8) diff --git a/test_autofit/non_linear/test_initializer.py b/test_autofit/non_linear/test_initializer.py index 651867b45..ec37f1c96 100644 --- a/test_autofit/non_linear/test_initializer.py +++ b/test_autofit/non_linear/test_initializer.py @@ -298,8 +298,8 @@ def test__initializer_bounds__info_from_model(model): info = initializer.info_from_model(model) - assert "Initializer Parameters" in info - assert "centre : (1.0, 2.0)" in info + assert "Total Free Parameters = 3" in info + assert "centre: Start[(1.0, 2.0)]" in info def test__initializer_start_point(model): @@ -327,8 +327,8 @@ def test__initializer_start_point__info_from_model(model): ) info = initializer.info_from_model(model) - assert "Initializer Parameters" in info - assert "centre : 1.5" in info + assert "Total Free Parameters = 3" in info + assert "centre: Start[1.5]" in info def test_offset(model): From 3098150e91fb2728e2e235b0bb37a9db76c19ae6 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 20:02:52 +0100 Subject: [PATCH 040/149] MLE conversion complete --- .gitignore | 14 +++++++------- autofit/__init__.py | 10 +++++----- autofit/config/non_linear/README.rst | 2 +- .../non_linear/{optimize.yaml => mle.yaml} | 4 ++-- autofit/config/visualize/plots_search.yaml | 2 +- autofit/interpolator/covariance.py | 4 ++-- autofit/mock.py | 2 +- autofit/non_linear/mock/mock_search.py | 4 ++-- autofit/non_linear/paths/directory.py | 2 +- .../{optimize_plotters.py => mle_plotters.py} | 2 +- .../search/{optimize => mle}/__init__.py | 0 .../abstract_mle.py} | 10 +++++----- .../search/{optimize => mle}/bfgs/__init__.py | 0 .../search/{optimize => mle}/bfgs/search.py | 4 ++-- .../{optimize => mle}/drawer/__init__.py | 0 .../search/{optimize => mle}/drawer/search.py | 4 ++-- .../{optimize => mle}/pyswarms/__init__.py | 0 .../pyswarms/search/__init__.py | 0 .../pyswarms/search/abstract.py | 6 +++--- .../pyswarms/search/globe.py | 4 ++-- .../pyswarms/search/local.py | 4 ++-- autofit/plot/__init__.py | 2 +- docs/api/plot.rst | 2 +- docs/api/searches.rst | 10 ++++++---- test_autofit/analysis/test_free_parameter.py | 6 +++--- test_autofit/config/non_linear/README.rst | 2 +- .../non_linear/{optimize.yaml => mle.yaml} | 19 ------------------- test_autofit/config/non_linear/mock.yaml | 2 +- test_autofit/database/test_search.py | 16 ++++++++-------- .../graphical/gaussian/test_optimizer.py | 4 ++-- .../graphical/hierarchical/test_optimise.py | 4 ++-- test_autofit/non_linear/grid/conftest.py | 2 +- .../grid/test_optimizer_grid_search.py | 4 ++-- .../grid/test_paths/test_database_run.py | 2 +- .../grid/test_paths/test_indicators.py | 2 +- test_autofit/non_linear/test_regression.py | 14 +++++++------- 36 files changed, 76 insertions(+), 93 deletions(-) rename autofit/config/non_linear/{optimize.yaml => mle.yaml} (97%) rename autofit/non_linear/plot/{optimize_plotters.py => mle_plotters.py} (96%) rename autofit/non_linear/search/{optimize => mle}/__init__.py (100%) rename autofit/non_linear/search/{optimize/abstract_optimize.py => mle/abstract_mle.py} (86%) rename autofit/non_linear/search/{optimize => mle}/bfgs/__init__.py (100%) rename autofit/non_linear/search/{optimize => mle}/bfgs/search.py (96%) rename autofit/non_linear/search/{optimize => mle}/drawer/__init__.py (100%) rename autofit/non_linear/search/{optimize => mle}/drawer/search.py (95%) rename autofit/non_linear/search/{optimize => mle}/pyswarms/__init__.py (100%) rename autofit/non_linear/search/{optimize => mle}/pyswarms/search/__init__.py (100%) rename autofit/non_linear/search/{optimize => mle}/pyswarms/search/abstract.py (95%) rename autofit/non_linear/search/{optimize => mle}/pyswarms/search/globe.py (92%) rename autofit/non_linear/search/{optimize => mle}/pyswarms/search/local.py (92%) rename test_autofit/config/non_linear/{optimize.yaml => mle.yaml} (78%) diff --git a/.gitignore b/.gitignore index 804d51ed2..c7e026207 100644 --- a/.gitignore +++ b/.gitignore @@ -125,13 +125,13 @@ venv.bak/ .idea workspace/output/ output -test/optimize/test_fit +test/mle/test_fit test/test_files/text/ test/ -test_autofit/optimize/test_fit/ +test_autofit/mle/test_fit/ test_autofit/test_files/text/psycopg2-binary==2.8.1 test_autofit/test_files/text/ -fit/test_autofit/optimize/test_fit +fit/test_autofit/mle/test_fit *.DS_Store test_autofit/config/priors/old @@ -157,7 +157,7 @@ test_autofit/samples.csv __MACOSX *.swp test/autofit/test_fit -# Byte-compiled / optimized / DLL files +# Byte-compiled / mled / DLL files __pycache__/ *.py[cod] *$py.class @@ -264,13 +264,13 @@ venv.bak/ .idea workspace/output/ output -test/optimize/test_fit +test/mle/test_fit test/test_files/text/ test/ -test_autofit/optimize/test_fit/ +test_autofit/mle/test_fit/ test_autofit/test_files/text/psycopg2-binary==2.8.1 test_autofit/test_files/text/ -fit/test_autofit/optimize/test_fit +fit/test_autofit/mle/test_fit *.DS_Store test_autofit/config/priors/old diff --git a/autofit/__init__.py b/autofit/__init__.py index 58b230af8..ab48e882e 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -75,11 +75,11 @@ from .non_linear.search.nest.dynesty.search.dynamic import DynestyDynamic from .non_linear.search.nest.dynesty.search.static import DynestyStatic from .non_linear.search.nest.ultranest.search import UltraNest -from .non_linear.search.optimize.drawer.search import Drawer -from .non_linear.search.optimize.bfgs.search import BFGS -from .non_linear.search.optimize.bfgs.search import LBFGS -from .non_linear.search.optimize.pyswarms.search.globe import PySwarmsGlobal -from .non_linear.search.optimize.pyswarms.search.local import PySwarmsLocal +from .non_linear.search.mle.drawer.search import Drawer +from .non_linear.search.mle.bfgs.search import BFGS +from .non_linear.search.mle.bfgs.search import LBFGS +from .non_linear.search.mle.pyswarms.search.globe import PySwarmsGlobal +from .non_linear.search.mle.pyswarms.search.local import PySwarmsLocal from .non_linear.paths.abstract import AbstractPaths from .non_linear.paths import DirectoryPaths from .non_linear.paths import DatabasePaths diff --git a/autofit/config/non_linear/README.rst b/autofit/config/non_linear/README.rst index 11774c406..9432dd852 100644 --- a/autofit/config/non_linear/README.rst +++ b/autofit/config/non_linear/README.rst @@ -6,4 +6,4 @@ Files - ``mcmc.yaml``: Settings default behaviour of MCMC non-linear searches (e.g. Emcee). - ``nest.yaml``: Settings default behaviour of nested sampler non-linear searches (e.g. Dynesty). -- ``optimizer.yaml``: Settings default behaviour of optimizer non-linear searches (e.g. PySwarms). \ No newline at end of file +- ``mle.yaml``: Settings default behaviour of maximum likelihood estimator (mle) searches (e.g. PySwarms). \ No newline at end of file diff --git a/autofit/config/non_linear/optimize.yaml b/autofit/config/non_linear/mle.yaml similarity index 97% rename from autofit/config/non_linear/optimize.yaml rename to autofit/config/non_linear/mle.yaml index 387c2d47f..3946b950d 100644 --- a/autofit/config/non_linear/optimize.yaml +++ b/autofit/config/non_linear/mle.yaml @@ -1,11 +1,11 @@ # Configuration files that customize the default behaviour of non-linear searches. -# **PyAutoFit** supports the following optimizer algorithms: +# **PyAutoFit** supports the following maximum likelihood estimator (MLE) algorithms: # - PySwarms: https://github.com/ljvmiranda921/pyswarms / https://pyswarms.readthedocs.io/en/latest/index.html # Settings in the [search], [run] and [options] entries are specific to each nested algorithm and should be -# determined by consulting that optimizers method's own readthedocs. +# determined by consulting that method's own readthedocs. PySwarmsGlobal: run: diff --git a/autofit/config/visualize/plots_search.yaml b/autofit/config/visualize/plots_search.yaml index d6f9197db..7552865d3 100644 --- a/autofit/config/visualize/plots_search.yaml +++ b/autofit/config/visualize/plots_search.yaml @@ -2,6 +2,6 @@ nest: corner_anesthetic: true # Output corner figure (using anestetic) during a non-linear search fit? mcmc: corner_cornerpy: true # Output corner figure (using corner.py) during a non-linear search fit? -optimize: +mle: subplot_parameters: true # Output a subplot of the best-fit parameters of the model? log_likelihood_vs_iteration: true # Output a plot of the log likelihood versus iteration number? \ No newline at end of file diff --git a/autofit/interpolator/covariance.py b/autofit/interpolator/covariance.py index fd090d287..4e7d0dcbd 100644 --- a/autofit/interpolator/covariance.py +++ b/autofit/interpolator/covariance.py @@ -195,8 +195,8 @@ def _relationships_for_value( """ analysis = self._analysis_for_value(value) model = self.model(path_relationship_map=path_relationship_map or {}) - optimizer = DynestyStatic() - result = optimizer.fit(model=model, analysis=analysis) + search = DynestyStatic() + result = search.fit(model=model, analysis=analysis) return result.instance def __getitem__(self, value: Equality) -> float: diff --git a/autofit/mock.py b/autofit/mock.py index d29ae6aa2..98ddd262a 100644 --- a/autofit/mock.py +++ b/autofit/mock.py @@ -2,7 +2,7 @@ from autofit.non_linear.mock.mock_result import MockResult from autofit.non_linear.mock.mock_result import MockResultGrid from autofit.non_linear.mock.mock_search import MockSearch -from autofit.non_linear.mock.mock_search import MockOptimizer +from autofit.non_linear.mock.mock_search import MockMLE from autofit.non_linear.mock.mock_samples_summary import MockSamplesSummary from autofit.non_linear.mock.mock_samples import MockSamples from autofit.non_linear.mock.mock_samples import MockSamplesNest diff --git a/autofit/non_linear/mock/mock_search.py b/autofit/non_linear/mock/mock_search.py index 8f26e4c88..4dbd53c12 100644 --- a/autofit/non_linear/mock/mock_search.py +++ b/autofit/non_linear/mock/mock_search.py @@ -157,13 +157,13 @@ def perform_update(self, model, analysis, during_analysis, search_internal=None) ) -class MockOptimizer(MockSearch): +class MockMLE(MockSearch): def __init__(self, **kwargs): super().__init__(fit_fast=False, **kwargs) @property def samples_cls(self): - return MockOptimizer + return MockMLE def project( self, factor_approx: FactorApproximation, status: Status = Status() diff --git a/autofit/non_linear/paths/directory.py b/autofit/non_linear/paths/directory.py index a16ae025e..90599ebc2 100644 --- a/autofit/non_linear/paths/directory.py +++ b/autofit/non_linear/paths/directory.py @@ -358,7 +358,7 @@ def save_all(self, search_config_dict=None, info=None): try: info_start = self.search.initializer.info_from_model(model=self.model) self._save_model_start_point(info=info_start) - except NotImplementedError: + except (NotImplementedError, AttributeError): pass self.save_json("model", to_dict(self.model)) diff --git a/autofit/non_linear/plot/optimize_plotters.py b/autofit/non_linear/plot/mle_plotters.py similarity index 96% rename from autofit/non_linear/plot/optimize_plotters.py rename to autofit/non_linear/plot/mle_plotters.py index 36597e92d..4e31e8a5d 100644 --- a/autofit/non_linear/plot/optimize_plotters.py +++ b/autofit/non_linear/plot/mle_plotters.py @@ -8,7 +8,7 @@ logger = logging.getLogger(__name__) -class OptimizePlotter(SamplesPlotter): +class MLEPlotter(SamplesPlotter): def subplot_parameters(self, use_log_y : bool = False, use_last_50_percent : bool = False, **kwargs): """ diff --git a/autofit/non_linear/search/optimize/__init__.py b/autofit/non_linear/search/mle/__init__.py similarity index 100% rename from autofit/non_linear/search/optimize/__init__.py rename to autofit/non_linear/search/mle/__init__.py diff --git a/autofit/non_linear/search/optimize/abstract_optimize.py b/autofit/non_linear/search/mle/abstract_mle.py similarity index 86% rename from autofit/non_linear/search/optimize/abstract_optimize.py rename to autofit/non_linear/search/mle/abstract_mle.py index 1f2a18a58..530fb51e8 100644 --- a/autofit/non_linear/search/optimize/abstract_optimize.py +++ b/autofit/non_linear/search/mle/abstract_mle.py @@ -7,14 +7,14 @@ from autofit.non_linear.analysis import Analysis from autofit.non_linear.search.abstract_search import NonLinearSearch from autofit.non_linear.samples import Samples -from autofit.non_linear.plot.optimize_plotters import OptimizePlotter +from autofit.non_linear.plot.mle_plotters import MLEPlotter from autofit.non_linear.plot.output import Output -class AbstractOptimizer(NonLinearSearch, ABC): +class AbstractMLE(NonLinearSearch, ABC): @property def config_type(self): - return conf.instance["non_linear"]["optimize"] + return conf.instance["non_linear"]["mle"] @property def samples_cls(self): @@ -22,7 +22,7 @@ def samples_cls(self): @property def plotter_cls(self): - return OptimizePlotter + return MLEPlotter def plot_start_point( self, @@ -68,7 +68,7 @@ def plot_start_point( def plot_results(self, samples): def should_plot(name): - return conf.instance["visualize"]["plots_search"]["optimize"][name] + return conf.instance["visualize"]["plots_search"]["mle"][name] plotter = self.plotter_cls( samples=samples, diff --git a/autofit/non_linear/search/optimize/bfgs/__init__.py b/autofit/non_linear/search/mle/bfgs/__init__.py similarity index 100% rename from autofit/non_linear/search/optimize/bfgs/__init__.py rename to autofit/non_linear/search/mle/bfgs/__init__.py diff --git a/autofit/non_linear/search/optimize/bfgs/search.py b/autofit/non_linear/search/mle/bfgs/search.py similarity index 96% rename from autofit/non_linear/search/optimize/bfgs/search.py rename to autofit/non_linear/search/mle/bfgs/search.py index 6f3e87393..8e5ac7d3d 100644 --- a/autofit/non_linear/search/optimize/bfgs/search.py +++ b/autofit/non_linear/search/mle/bfgs/search.py @@ -4,7 +4,7 @@ from autofit.database.sqlalchemy_ import sa from autofit.mapper.prior_model.abstract import AbstractPriorModel -from autofit.non_linear.search.optimize.abstract_optimize import AbstractOptimizer +from autofit.non_linear.search.mle.abstract_mle import AbstractMLE from autofit.non_linear.analysis import Analysis from autofit.non_linear.fitness import Fitness from autofit.non_linear.initializer import AbstractInitializer @@ -16,7 +16,7 @@ import numpy as np -class AbstractBFGS(AbstractOptimizer): +class AbstractBFGS(AbstractMLE): __identifier_fields__ = () method = None diff --git a/autofit/non_linear/search/optimize/drawer/__init__.py b/autofit/non_linear/search/mle/drawer/__init__.py similarity index 100% rename from autofit/non_linear/search/optimize/drawer/__init__.py rename to autofit/non_linear/search/mle/drawer/__init__.py diff --git a/autofit/non_linear/search/optimize/drawer/search.py b/autofit/non_linear/search/mle/drawer/search.py similarity index 95% rename from autofit/non_linear/search/optimize/drawer/search.py rename to autofit/non_linear/search/mle/drawer/search.py index 43c481740..aa6567ee0 100644 --- a/autofit/non_linear/search/optimize/drawer/search.py +++ b/autofit/non_linear/search/mle/drawer/search.py @@ -5,12 +5,12 @@ from autofit.mapper.prior_model.abstract import AbstractPriorModel from autofit.non_linear.fitness import Fitness -from autofit.non_linear.search.optimize.abstract_optimize import AbstractOptimizer +from autofit.non_linear.search.mle.abstract_mle import AbstractMLE from autofit.non_linear.initializer import AbstractInitializer from autofit.non_linear.samples import Samples, Sample -class Drawer(AbstractOptimizer): +class Drawer(AbstractMLE): __identifier_fields__ = ("total_draws",) def __init__( diff --git a/autofit/non_linear/search/optimize/pyswarms/__init__.py b/autofit/non_linear/search/mle/pyswarms/__init__.py similarity index 100% rename from autofit/non_linear/search/optimize/pyswarms/__init__.py rename to autofit/non_linear/search/mle/pyswarms/__init__.py diff --git a/autofit/non_linear/search/optimize/pyswarms/search/__init__.py b/autofit/non_linear/search/mle/pyswarms/search/__init__.py similarity index 100% rename from autofit/non_linear/search/optimize/pyswarms/search/__init__.py rename to autofit/non_linear/search/mle/pyswarms/search/__init__.py diff --git a/autofit/non_linear/search/optimize/pyswarms/search/abstract.py b/autofit/non_linear/search/mle/pyswarms/search/abstract.py similarity index 95% rename from autofit/non_linear/search/optimize/pyswarms/search/abstract.py rename to autofit/non_linear/search/mle/pyswarms/search/abstract.py index 7c0acb7cd..2307aa2c1 100644 --- a/autofit/non_linear/search/optimize/pyswarms/search/abstract.py +++ b/autofit/non_linear/search/mle/pyswarms/search/abstract.py @@ -7,7 +7,7 @@ from autofit.mapper.prior_model.abstract import AbstractPriorModel from autofit.non_linear.fitness import Fitness from autofit.non_linear.initializer import AbstractInitializer -from autofit.non_linear.search.optimize.abstract_optimize import AbstractOptimizer +from autofit.non_linear.search.mle.abstract_mle import AbstractMLE from autofit.non_linear.samples.sample import Sample from autofit.non_linear.samples.samples import Samples @@ -66,7 +66,7 @@ def __call__(self, parameters, *kwargs): return np.asarray(figure_of_merit_list) -class AbstractPySwarms(AbstractOptimizer): +class AbstractPySwarms(AbstractMLE): def __init__( self, name: Optional[str] = None, @@ -79,7 +79,7 @@ def __init__( **kwargs ): """ - A PySwarms Particle Swarm Optimizer global non-linear search. + A PySwarms Particle Swarm MLE global non-linear search. For a full description of PySwarms, checkout its Github and readthedocs webpages: diff --git a/autofit/non_linear/search/optimize/pyswarms/search/globe.py b/autofit/non_linear/search/mle/pyswarms/search/globe.py similarity index 92% rename from autofit/non_linear/search/optimize/pyswarms/search/globe.py rename to autofit/non_linear/search/mle/pyswarms/search/globe.py index ab16dadb5..ebf620cc1 100644 --- a/autofit/non_linear/search/optimize/pyswarms/search/globe.py +++ b/autofit/non_linear/search/mle/pyswarms/search/globe.py @@ -2,7 +2,7 @@ from autofit.database.sqlalchemy_ import sa from autofit.non_linear.initializer import AbstractInitializer -from autofit.non_linear.search.optimize.pyswarms.search.abstract import AbstractPySwarms +from autofit.non_linear.search.mle.pyswarms.search.abstract import AbstractPySwarms class PySwarmsGlobal(AbstractPySwarms): @@ -25,7 +25,7 @@ def __init__( **kwargs ): """ - A PySwarms Particle Swarm Optimizer global non-linear search. + A PySwarms Particle Swarm MLE global non-linear search. For a full description of PySwarms, checkout its Github and readthedocs webpages: diff --git a/autofit/non_linear/search/optimize/pyswarms/search/local.py b/autofit/non_linear/search/mle/pyswarms/search/local.py similarity index 92% rename from autofit/non_linear/search/optimize/pyswarms/search/local.py rename to autofit/non_linear/search/mle/pyswarms/search/local.py index ff1102a26..4432eae8a 100644 --- a/autofit/non_linear/search/optimize/pyswarms/search/local.py +++ b/autofit/non_linear/search/mle/pyswarms/search/local.py @@ -1,7 +1,7 @@ from typing import Optional from autofit.database.sqlalchemy_ import sa -from autofit.non_linear.search.optimize.pyswarms.search.abstract import AbstractPySwarms +from autofit.non_linear.search.mle.pyswarms.search.abstract import AbstractPySwarms class PySwarmsLocal(AbstractPySwarms): @@ -25,7 +25,7 @@ def __init__( **kwargs ): """ - A PySwarms Particle Swarm Optimizer global non-linear search. + A PySwarms Particle Swarm MLE global non-linear search. For a full description of PySwarms, checkout its Github and readthedocs webpages: diff --git a/autofit/plot/__init__.py b/autofit/plot/__init__.py index ab35eb913..525a81891 100644 --- a/autofit/plot/__init__.py +++ b/autofit/plot/__init__.py @@ -1,5 +1,5 @@ from autofit.non_linear.plot.samples_plotters import SamplesPlotter from autofit.non_linear.plot.mcmc_plotters import MCMCPlotter -from autofit.non_linear.plot.optimize_plotters import OptimizePlotter +from autofit.non_linear.plot.mle_plotters import MLEPlotter from autofit.non_linear.plot.nest_plotters import NestPlotter from autofit.non_linear.plot.output import Output \ No newline at end of file diff --git a/docs/api/plot.rst b/docs/api/plot.rst index 84055e406..ac381ca1e 100644 --- a/docs/api/plot.rst +++ b/docs/api/plot.rst @@ -24,4 +24,4 @@ Plotters NestPlotter MCMCPlotter - OptimizePlotter \ No newline at end of file + MLEPlotter \ No newline at end of file diff --git a/docs/api/searches.rst b/docs/api/searches.rst index 6c08e98db..6d070bf86 100644 --- a/docs/api/searches.rst +++ b/docs/api/searches.rst @@ -4,8 +4,8 @@ Non-Linear Searches A non-linear search is an algorithm which fits a model to data. -**PyAutoFit** currently supports three types of non-linear search algorithms: nested samplers, -Markov Chain Monte Carlo (MCMC) and optimizers. +**PyAutoFit** currently supports three types of non-linear search algorithms: nested samplers (nest), +Markov Chain Monte Carlo (MCMC) and Maximum Likelihood Estimators (MLE). **Examples / Tutorials:** @@ -41,8 +41,8 @@ MCMC Emcee Zeus -Optimizers ----------- +Maximum Likelihood Estimators +----------------------------- .. currentmodule:: autofit @@ -51,6 +51,8 @@ Optimizers :template: custom-class-template.rst :recursive: + BFGS + LBFGS PySwarmsLocal PySwarmsGlobal diff --git a/test_autofit/analysis/test_free_parameter.py b/test_autofit/analysis/test_free_parameter.py index 3a8eb790c..5acb0fbb9 100644 --- a/test_autofit/analysis/test_free_parameter.py +++ b/test_autofit/analysis/test_free_parameter.py @@ -2,7 +2,7 @@ import autofit as af from autofit.non_linear.analysis import FreeParameterAnalysis -from autofit.non_linear.mock.mock_search import MockOptimizer +from autofit.non_linear.mock.mock_search import MockMLE def test_copy(): @@ -77,8 +77,8 @@ def make_result( combined_analysis, model, ): - optimizer = MockOptimizer() - return optimizer.fit(model, combined_analysis) + search = MockMLE() + return search.fit(model, combined_analysis) @pytest.fixture(autouse=True) diff --git a/test_autofit/config/non_linear/README.rst b/test_autofit/config/non_linear/README.rst index 11774c406..9432dd852 100644 --- a/test_autofit/config/non_linear/README.rst +++ b/test_autofit/config/non_linear/README.rst @@ -6,4 +6,4 @@ Files - ``mcmc.yaml``: Settings default behaviour of MCMC non-linear searches (e.g. Emcee). - ``nest.yaml``: Settings default behaviour of nested sampler non-linear searches (e.g. Dynesty). -- ``optimizer.yaml``: Settings default behaviour of optimizer non-linear searches (e.g. PySwarms). \ No newline at end of file +- ``mle.yaml``: Settings default behaviour of maximum likelihood estimator (mle) searches (e.g. PySwarms). \ No newline at end of file diff --git a/test_autofit/config/non_linear/optimize.yaml b/test_autofit/config/non_linear/mle.yaml similarity index 78% rename from test_autofit/config/non_linear/optimize.yaml rename to test_autofit/config/non_linear/mle.yaml index ccb50e959..b6587c4c4 100644 --- a/test_autofit/config/non_linear/optimize.yaml +++ b/test_autofit/config/non_linear/mle.yaml @@ -1,22 +1,3 @@ -DownhillSimplex: - initialize: - method: prior - printing: - silence: false - search: - disp: 1 - ftol: 0.0001 - full_output: 0 - maxfun: null - maxiter: null - retall: 0 - xtol: 0.0001 - updates: - iterations_per_update: 11 - log_every_update: 1 - model_results_every_update: 1 - remove_state_files_at_end: true - visualize_every_update: 1 Drawer: initialize: ball_lower_limit: 0.49 diff --git a/test_autofit/config/non_linear/mock.yaml b/test_autofit/config/non_linear/mock.yaml index 87ba40e65..7b78ee760 100644 --- a/test_autofit/config/non_linear/mock.yaml +++ b/test_autofit/config/non_linear/mock.yaml @@ -1,4 +1,4 @@ -MockOptimizer: +MockMLE: initialize: method: prior printing: diff --git a/test_autofit/database/test_search.py b/test_autofit/database/test_search.py index 5f14e3dbb..d1873f9d0 100644 --- a/test_autofit/database/test_search.py +++ b/test_autofit/database/test_search.py @@ -4,17 +4,17 @@ def test_is_database_paths(session): - optimizer = af.m.MockOptimizer(session=session) - assert isinstance(optimizer.paths, af.DatabasePaths) + mle = af.m.MockMLE(session=session) + assert isinstance(mle.paths, af.DatabasePaths) # noinspection PyUnresolvedReferences - assert optimizer.paths.save_all_samples is False + assert mle.paths.save_all_samples is False @pytest.mark.parametrize("save_all_samples", [True, False]) def test_save_all_samples_boolean(session, save_all_samples): - optimizer = af.m.MockOptimizer(session=session, save_all_samples=save_all_samples) + mle = af.m.MockMLE(session=session, save_all_samples=save_all_samples) # noinspection PyUnresolvedReferences - assert optimizer.paths.save_all_samples is save_all_samples + assert mle.paths.save_all_samples is save_all_samples def test_unique_tag(session): @@ -23,11 +23,11 @@ def test_unique_tag(session): unique_tag = "unique" - optimizer = af.m.MockOptimizer(session=session, unique_tag=unique_tag) + mle = af.m.MockMLE(session=session, unique_tag=unique_tag) - assert optimizer.paths.unique_tag == unique_tag + assert mle.paths.unique_tag == unique_tag - optimizer.fit(model, analysis) + mle.fit(model, analysis) fit = session.query(af.db.Fit).one() diff --git a/test_autofit/graphical/gaussian/test_optimizer.py b/test_autofit/graphical/gaussian/test_optimizer.py index ea78e3d02..b7214c014 100644 --- a/test_autofit/graphical/gaussian/test_optimizer.py +++ b/test_autofit/graphical/gaussian/test_optimizer.py @@ -56,8 +56,8 @@ def test_optimisation(self, factor_model, laplace, dynesty): @pytest.mark.filterwarnings('ignore::RuntimeWarning') def test_null_paths(self, factor_model): - optimizer = af.DynestyStatic(maxcall=10) - result, status = optimizer.optimise( + search = af.DynestyStatic(maxcall=10) + result, status = search.optimise( factor_model.mean_field_approximation().factor_approximation(factor_model) ) diff --git a/test_autofit/graphical/hierarchical/test_optimise.py b/test_autofit/graphical/hierarchical/test_optimise.py index 06370f831..f500029ae 100644 --- a/test_autofit/graphical/hierarchical/test_optimise.py +++ b/test_autofit/graphical/hierarchical/test_optimise.py @@ -9,9 +9,9 @@ def make_factor(hierarchical_factor): def test_optimise(factor): - optimizer = af.DynestyStatic(maxcall=100, dynamic_delta=False, delta=0.1,) + search = af.DynestyStatic(maxcall=100, dynamic_delta=False, delta=0.1,) - _, status = optimizer.optimise( + _, status = search.optimise( factor.mean_field_approximation().factor_approximation(factor) ) assert status diff --git a/test_autofit/non_linear/grid/conftest.py b/test_autofit/non_linear/grid/conftest.py index a7f68379c..93e05fbb8 100644 --- a/test_autofit/non_linear/grid/conftest.py +++ b/test_autofit/non_linear/grid/conftest.py @@ -26,7 +26,7 @@ def make_sample_name_paths(): @pytest.fixture(name="grid_search_10_result") def make_grid_search_10_result(mapper, sample_name_paths): grid_search = af.SearchGridSearch( - search=af.m.MockOptimizer( + search=af.m.MockMLE( samples_summary=MockSamplesSummary( model=mapper, median_pdf_sample=Sample( diff --git a/test_autofit/non_linear/grid/test_optimizer_grid_search.py b/test_autofit/non_linear/grid/test_optimizer_grid_search.py index c407c6422..129177d2a 100644 --- a/test_autofit/non_linear/grid/test_optimizer_grid_search.py +++ b/test_autofit/non_linear/grid/test_optimizer_grid_search.py @@ -142,14 +142,14 @@ def test_raises_exception_for_bad_limits(self, grid_search, mapper): @pytest.fixture(name="grid_search_05") def make_grid_search_05(): - search = af.SearchGridSearch(search=af.m.MockOptimizer(), number_of_steps=2) + search = af.SearchGridSearch(search=af.m.MockMLE(), number_of_steps=2) search.search.paths = af.DirectoryPaths(name="sample_name") return search @pytest.fixture(autouse=True) def empty_args(): - af.m.MockOptimizer.init_args = list() + af.m.MockMLE.init_args = list() def test_csv_headers(grid_search_10_result, sample_name_paths): diff --git a/test_autofit/non_linear/grid/test_paths/test_database_run.py b/test_autofit/non_linear/grid/test_paths/test_database_run.py index b98434f4d..602b5d7e3 100644 --- a/test_autofit/non_linear/grid/test_paths/test_database_run.py +++ b/test_autofit/non_linear/grid/test_paths/test_database_run.py @@ -8,7 +8,7 @@ ) def make_search(session): return af.SearchGridSearch( - search=af.m.MockOptimizer( + search=af.m.MockMLE( session=session ), number_of_steps=2 diff --git a/test_autofit/non_linear/grid/test_paths/test_indicators.py b/test_autofit/non_linear/grid/test_paths/test_indicators.py index a8d5f112b..c9b9696c8 100644 --- a/test_autofit/non_linear/grid/test_paths/test_indicators.py +++ b/test_autofit/non_linear/grid/test_paths/test_indicators.py @@ -24,7 +24,7 @@ def make_database_parent_search(session): def _make_grid_search(mapper, session=None): search = af.SearchGridSearch( - search=af.m.MockOptimizer(session=session), number_of_steps=2 + search=af.m.MockMLE(session=session), number_of_steps=2 ) search.fit( model=mapper, diff --git a/test_autofit/non_linear/test_regression.py b/test_autofit/non_linear/test_regression.py index 82591b64d..64876ce63 100644 --- a/test_autofit/non_linear/test_regression.py +++ b/test_autofit/non_linear/test_regression.py @@ -14,18 +14,18 @@ def test_no_priors(): search.fit(model, af.Analysis()) -@pytest.fixture(name="optimizer") -def make_optimizer(): +@pytest.fixture(name="search") +def make_search(): return af.DynestyStatic("name") -def test_serialize_optimiser(optimizer): - optimizer = pickle.loads(pickle.dumps(optimizer)) - assert optimizer.name == "name" +def test_serialize_optimiser(search): + search = pickle.loads(pickle.dumps(search)) + assert search.name == "name" -def test_serialize_grid_search(optimizer): - grid_search = af.SearchGridSearch(optimizer) +def test_serialize_grid_search(search): + grid_search = af.SearchGridSearch(search) assert grid_search.logger.name == "GridSearch (name)" assert "logger" not in grid_search.__getstate__() From e13aa37e58144472d0ab77c29984376982093cfe Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 20:16:42 +0100 Subject: [PATCH 041/149] undo hack --- autofit/non_linear/paths/directory.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/autofit/non_linear/paths/directory.py b/autofit/non_linear/paths/directory.py index 90599ebc2..3dd344e4f 100644 --- a/autofit/non_linear/paths/directory.py +++ b/autofit/non_linear/paths/directory.py @@ -348,13 +348,16 @@ def save_all(self, search_config_dict=None, info=None): VisualiseGraph( model=self.model, ).save(str(self.output_path / "model_graph.html")) + if info: self.save_json("info", info) - try: - self.save_json("search", to_dict(self.search)) - except TypeError: - pass + + # try: + # self.save_json("search", to_dict(self.search)) + # except TypeError: + # pass + self.save_json("search", to_dict(self.search)) try: info_start = self.search.initializer.info_from_model(model=self.model) self._save_model_start_point(info=info_start) From 50809c1e8c21212e6a6ada587cf5cfd69819443f Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 31 Jul 2024 20:20:56 +0100 Subject: [PATCH 042/149] uncommented code --- autofit/non_linear/paths/directory.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/autofit/non_linear/paths/directory.py b/autofit/non_linear/paths/directory.py index 3dd344e4f..6f7d429b2 100644 --- a/autofit/non_linear/paths/directory.py +++ b/autofit/non_linear/paths/directory.py @@ -351,11 +351,6 @@ def save_all(self, search_config_dict=None, info=None): if info: self.save_json("info", info) - - # try: - # self.save_json("search", to_dict(self.search)) - # except TypeError: - # pass self.save_json("search", to_dict(self.search)) try: From fab9d4541a93789be4a4e4d2b62210376b06c89d Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 2 Aug 2024 13:11:46 +0100 Subject: [PATCH 043/149] perturbation -> pertrub --- autofit/non_linear/grid/sensitivity/job.py | 2 +- test_autofit/non_linear/grid/test_sensitivity/conftest.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index c8f1b2ca3..c5089eb0f 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -114,7 +114,7 @@ def perform(self) -> JobResult: ) perturb_model = copy(self.model) - perturb_model.perturbation = self.perturb_model + perturb_model.perturb = self.perturb_model perturb_result = self.perturb_fit_cls( model=perturb_model, diff --git a/test_autofit/non_linear/grid/test_sensitivity/conftest.py b/test_autofit/non_linear/grid/test_sensitivity/conftest.py index e807b2a16..f808ffc39 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/conftest.py +++ b/test_autofit/non_linear/grid/test_sensitivity/conftest.py @@ -16,8 +16,8 @@ def __init__(self): def __call__(self, instance: af.ModelInstance, simulate_path: Optional[str]): image = instance.gaussian(x) - if hasattr(instance, "perturbation"): - image += instance.perturbation(x) + if hasattr(instance, "perturb"): + image += instance.perturb(x) return image From 21993b2b98e928794b33a3c67e6b915b072ac084 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 2 Aug 2024 14:04:41 +0100 Subject: [PATCH 044/149] fix unit test --- autofit/non_linear/grid/sensitivity/__init__.py | 1 + test_autofit/non_linear/grid/test_sensitivity/test_results.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 3efd447c5..5849fd200 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -127,6 +127,7 @@ def run(self) -> SensitivityResult: results = sorted(results) os.makedirs(self.paths.output_path, exist_ok=True) + with open(self.results_path, "w+") as f: writer = csv.writer(f) writer.writerow(headers) diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_results.py b/test_autofit/non_linear/grid/test_sensitivity/test_results.py index 343adbfc8..94f4f4e1a 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_results.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_results.py @@ -39,7 +39,8 @@ def test_result(job_result): result = SensitivityResult( samples=[job_result.result.samples.summary()], perturb_samples=[job_result.perturb_result.samples.summary()], - shape=(1,) + shape=(1,), + physical_values=[[0.0]], ) assert result.log_likelihoods_base == [1.0] From 2f4dae5e99069675c8ef3a77981e32ca1705b8fd Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 5 Aug 2024 09:54:35 +0100 Subject: [PATCH 045/149] naive implementation of physical_centres_from --- autofit/non_linear/grid/grid_search/result.py | 22 +++++++- .../grid/test_quantities_for_path.py | 51 +++++++++++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 test_autofit/non_linear/grid/test_quantities_for_path.py diff --git a/autofit/non_linear/grid/grid_search/result.py b/autofit/non_linear/grid/grid_search/result.py index 3b1bef042..95d2270c1 100644 --- a/autofit/non_linear/grid/grid_search/result.py +++ b/autofit/non_linear/grid/grid_search/result.py @@ -41,6 +41,25 @@ def __init__( self.parent = parent + @as_grid_list + def physical_centres_lists_from(self, path: str): + """ + Get the physical centres of the grid search from a path to an attribute of the instance in the samples. + + Parameters + ---------- + path + The path to the attribute to get from the instance + + Returns + ------- + A list of lists of physical values + """ + return [ + samples.model.object_for_path(path.split(".")).mean + for samples in self.samples + ] + @property @as_grid_list def physical_lower_limits_lists(self) -> GridList: @@ -209,7 +228,8 @@ def attribute_grid(self, attribute_path: Union[str, Iterable[str]]) -> GridList: @as_grid_list def log_likelihoods( - self, relative_to_value: float = 0.0, + self, + relative_to_value: float = 0.0, ) -> GridList: """ The maximum log likelihood of every grid search on a NumPy array whose shape is the native dimensions of the diff --git a/test_autofit/non_linear/grid/test_quantities_for_path.py b/test_autofit/non_linear/grid/test_quantities_for_path.py new file mode 100644 index 000000000..bdc512ff9 --- /dev/null +++ b/test_autofit/non_linear/grid/test_quantities_for_path.py @@ -0,0 +1,51 @@ +import autofit as af + + +def test_physical_centres_from(): + model = af.Model( + af.Gaussian, + ) + grid_priors = [model.centre] + lower_limits_lists = [[0.0], [2.0]] + + sample = af.Sample( + 1.0, + 1.0, + 1.0, + { + "centre": 1.0, + "normalization": 2.0, + "sigma": 3.0, + }, + ) + + samples = [ + af.Samples( + model=af.Model( + af.Gaussian, + centre=af.UniformPrior( + lower_limit=0.0, + upper_limit=2.0, + ), + ), + sample_list=[sample], + ), + af.Samples( + model=af.Model( + af.Gaussian, + centre=af.UniformPrior( + lower_limit=2.0, + upper_limit=4.0, + ), + ), + sample_list=[sample], + ), + ] + + result = af.GridSearchResult( + samples=samples, + lower_limits_lists=lower_limits_lists, + grid_priors=grid_priors, + ) + + assert result.physical_centres_lists_from("centre") == [[1.0], [3.0]] From a71bc25cfaf3edc8333077648ea95c628b554d0d Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 5 Aug 2024 09:56:32 +0100 Subject: [PATCH 046/149] revieq --- autofit/non_linear/initializer.py | 2 +- autofit/non_linear/search/abstract_search.py | 5 +---- autofit/non_linear/search/mle/bfgs/search.py | 1 - 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/autofit/non_linear/initializer.py b/autofit/non_linear/initializer.py index c9415c0c9..a0966e531 100644 --- a/autofit/non_linear/initializer.py +++ b/autofit/non_linear/initializer.py @@ -261,7 +261,7 @@ def info_from_model(self, model : AbstractPriorModel) -> str: except KeyError: - info += f"{key} : {prior})\n" + info += f"{key}: {prior})\n" return info diff --git a/autofit/non_linear/search/abstract_search.py b/autofit/non_linear/search/abstract_search.py index 6154bbc62..f9e63a104 100644 --- a/autofit/non_linear/search/abstract_search.py +++ b/autofit/non_linear/search/abstract_search.py @@ -1090,10 +1090,7 @@ def perform_visualization( self.logger.debug("Visualizing") - if paths_override is None: - paths = self.paths - else: - paths = paths_override + paths = paths_override or self.paths if instance is None and samples_summary is None: raise AssertionError( diff --git a/autofit/non_linear/search/mle/bfgs/search.py b/autofit/non_linear/search/mle/bfgs/search.py index 8e5ac7d3d..e620fec36 100644 --- a/autofit/non_linear/search/mle/bfgs/search.py +++ b/autofit/non_linear/search/mle/bfgs/search.py @@ -17,7 +17,6 @@ class AbstractBFGS(AbstractMLE): - __identifier_fields__ = () method = None From 558c4405b4ea4167aeb202f11a4a713b19eba1ae Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 5 Aug 2024 09:58:53 +0100 Subject: [PATCH 047/149] review complete --- test_autofit/non_linear/test_initializer.py | 92 +++++++-------------- 1 file changed, 32 insertions(+), 60 deletions(-) diff --git a/test_autofit/non_linear/test_initializer.py b/test_autofit/non_linear/test_initializer.py index ec37f1c96..4d117ebe2 100644 --- a/test_autofit/non_linear/test_initializer.py +++ b/test_autofit/non_linear/test_initializer.py @@ -17,7 +17,8 @@ def __call__(self, parameters): return self.figure_of_merit -def test__priors__samples_from_model(): +@pytest.fixture +def model_and_samples(): model = af.Model(af.m.MockClassx4) model.one = af.UniformPrior(lower_limit=0.099, upper_limit=0.101) model.two = af.UniformPrior(lower_limit=0.199, upper_limit=0.201) @@ -26,71 +27,42 @@ def test__priors__samples_from_model(): initializer = af.InitializerPrior() - ( - unit_parameter_lists, - parameter_lists, - figure_of_merit_list, - ) = initializer.samples_from_model( - total_points=2, - model=model, - fitness=MockFitness(), - paths=af.DirectoryPaths(), - ) - - assert 0.0 < unit_parameter_lists[0][0] < 1.0 - assert 0.0 < unit_parameter_lists[1][0] < 1.0 - assert 0.0 < unit_parameter_lists[0][1] < 1.0 - assert 0.0 < unit_parameter_lists[1][1] < 1.0 - assert 0.0 < unit_parameter_lists[0][2] < 1.0 - assert 0.0 < unit_parameter_lists[1][2] < 1.0 - assert 0.0 < unit_parameter_lists[0][3] < 1.0 - assert 0.0 < unit_parameter_lists[1][3] < 1.0 - - assert 0.099 < parameter_lists[0][0] < 0.101 - assert 0.099 < parameter_lists[1][0] < 0.101 - assert 0.199 < parameter_lists[0][1] < 0.201 - assert 0.199 < parameter_lists[1][1] < 0.201 - assert 0.299 < parameter_lists[0][2] < 0.301 - assert 0.299 < parameter_lists[1][2] < 0.301 - assert 0.399 < parameter_lists[0][3] < 0.401 - assert 0.399 < parameter_lists[1][3] < 0.401 -def test__priors__samples_from_model(): - model = af.Model(af.m.MockClassx4) - model.one = af.UniformPrior(lower_limit=0.099, upper_limit=0.101) - model.two = af.UniformPrior(lower_limit=0.199, upper_limit=0.201) - model.three = af.UniformPrior(lower_limit=0.299, upper_limit=0.301) - model.four = af.UniformPrior(lower_limit=0.399, upper_limit=0.401) - - initializer = af.InitializerPrior() - - ( - unit_parameter_lists, - parameter_lists, - figure_of_merit_list, - ) = initializer.samples_from_model( + unit_parameter_lists, parameter_lists, _ = initializer.samples_from_model( total_points=2, model=model, fitness=MockFitness(), paths=af.DirectoryPaths(), ) - assert 0.0 < unit_parameter_lists[0][0] < 1.0 - assert 0.0 < unit_parameter_lists[1][0] < 1.0 - assert 0.0 < unit_parameter_lists[0][1] < 1.0 - assert 0.0 < unit_parameter_lists[1][1] < 1.0 - assert 0.0 < unit_parameter_lists[0][2] < 1.0 - assert 0.0 < unit_parameter_lists[1][2] < 1.0 - assert 0.0 < unit_parameter_lists[0][3] < 1.0 - assert 0.0 < unit_parameter_lists[1][3] < 1.0 - - assert 0.099 < parameter_lists[0][0] < 0.101 - assert 0.099 < parameter_lists[1][0] < 0.101 - assert 0.199 < parameter_lists[0][1] < 0.201 - assert 0.199 < parameter_lists[1][1] < 0.201 - assert 0.299 < parameter_lists[0][2] < 0.301 - assert 0.299 < parameter_lists[1][2] < 0.301 - assert 0.399 < parameter_lists[0][3] < 0.401 - assert 0.399 < parameter_lists[1][3] < 0.401 + return unit_parameter_lists, parameter_lists + +@pytest.mark.parametrize("index, param_index, lower, upper", [ + (0, 0, 0.0, 1.0), + (1, 0, 0.0, 1.0), + (0, 1, 0.0, 1.0), + (1, 1, 0.0, 1.0), + (0, 2, 0.0, 1.0), + (1, 2, 0.0, 1.0), + (0, 3, 0.0, 1.0), + (1, 3, 0.0, 1.0), +]) +def test_unit_parameter_lists(model_and_samples, index, param_index, lower, upper): + unit_parameter_lists, _ = model_and_samples + assert lower < unit_parameter_lists[index][param_index] < upper + +@pytest.mark.parametrize("index, param_index, lower, upper", [ + (0, 0, 0.099, 0.101), + (1, 0, 0.099, 0.101), + (0, 1, 0.199, 0.201), + (1, 1, 0.199, 0.201), + (0, 2, 0.299, 0.301), + (1, 2, 0.299, 0.301), + (0, 3, 0.399, 0.401), + (1, 3, 0.399, 0.401), +]) +def test_parameter_lists(model_and_samples, index, param_index, lower, upper): + _, parameter_lists = model_and_samples + assert lower < parameter_lists[index][param_index] < upper def test__priors__samples_from_model__raise_exception_if_all_likelihoods_identical(): From 6e3a762e65488c569624a266cfbbbe8e8bc006ba Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 5 Aug 2024 10:40:35 +0100 Subject: [PATCH 048/149] illustrating two separate physical_centres --- .../grid/test_quantities_for_path.py | 54 ++++++++++++------- 1 file changed, 35 insertions(+), 19 deletions(-) diff --git a/test_autofit/non_linear/grid/test_quantities_for_path.py b/test_autofit/non_linear/grid/test_quantities_for_path.py index bdc512ff9..2cb1a57e2 100644 --- a/test_autofit/non_linear/grid/test_quantities_for_path.py +++ b/test_autofit/non_linear/grid/test_quantities_for_path.py @@ -1,12 +1,20 @@ +import pytest + import autofit as af -def test_physical_centres_from(): +@pytest.fixture(name="result") +def make_result(): model = af.Model( af.Gaussian, ) - grid_priors = [model.centre] - lower_limits_lists = [[0.0], [2.0]] + grid_priors = [model.centre, model.normalization] + lower_limits_lists = [ + [0.0, 1.0], + [0.0, 2.0], + [2.0, 1.0], + [2.0, 2.0], + ] sample = af.Sample( 1.0, @@ -19,33 +27,41 @@ def test_physical_centres_from(): }, ) - samples = [ - af.Samples( + def make_samples(centre, normalization): + return af.Samples( model=af.Model( af.Gaussian, centre=af.UniformPrior( - lower_limit=0.0, - upper_limit=2.0, + lower_limit=centre, + upper_limit=centre + 2.0, ), - ), - sample_list=[sample], - ), - af.Samples( - model=af.Model( - af.Gaussian, - centre=af.UniformPrior( - lower_limit=2.0, - upper_limit=4.0, + normalization=af.UniformPrior( + lower_limit=normalization, + upper_limit=normalization + 1.0, ), ), sample_list=[sample], - ), + ) + + samples = [ + make_samples(centre, normalisation) + for centre, normalisation in lower_limits_lists ] - result = af.GridSearchResult( + return af.GridSearchResult( samples=samples, lower_limits_lists=lower_limits_lists, grid_priors=grid_priors, ) - assert result.physical_centres_lists_from("centre") == [[1.0], [3.0]] + +@pytest.mark.parametrize( + "name, expected", + [ + ("centre", [1.0, 1.0, 3.0, 3.0]), + ("normalization", [1.5, 2.5, 1.5, 2.5]), + ], +) +def test_physical_centres_from(result, name, expected): + assert result.physical_centres_lists_from(name) == expected + assert result.shape == (2, 2) From 24e1b152b3d2a619493106e2a6f2901134eb893a Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 5 Aug 2024 11:12:47 +0100 Subject: [PATCH 049/149] also support tuples of results --- autofit/non_linear/grid/grid_search/result.py | 24 ++++++++++++++----- .../grid/test_quantities_for_path.py | 9 +++++++ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/autofit/non_linear/grid/grid_search/result.py b/autofit/non_linear/grid/grid_search/result.py index 95d2270c1..0d7d56dbc 100644 --- a/autofit/non_linear/grid/grid_search/result.py +++ b/autofit/non_linear/grid/grid_search/result.py @@ -1,4 +1,4 @@ -from typing import List, Optional, Union, Iterable +from typing import List, Optional, Union, Iterable, Tuple import numpy as np @@ -42,7 +42,10 @@ def __init__( self.parent = parent @as_grid_list - def physical_centres_lists_from(self, path: str): + def physical_centres_lists_from( + self, + path: Union[str, Tuple[str, ...]], + ) -> GridList: """ Get the physical centres of the grid search from a path to an attribute of the instance in the samples. @@ -55,10 +58,19 @@ def physical_centres_lists_from(self, path: str): ------- A list of lists of physical values """ - return [ - samples.model.object_for_path(path.split(".")).mean - for samples in self.samples - ] + if isinstance(path, str): + path = path.split(".") + + def value_for_samples(samples): + return samples.model.object_for_path(path).mean + + else: + paths = [p.split(".") for p in path] + + def value_for_samples(samples): + return tuple(samples.model.object_for_path(p).mean for p in paths) + + return [value_for_samples(samples) for samples in self.samples] @property @as_grid_list diff --git a/test_autofit/non_linear/grid/test_quantities_for_path.py b/test_autofit/non_linear/grid/test_quantities_for_path.py index 2cb1a57e2..539b35b10 100644 --- a/test_autofit/non_linear/grid/test_quantities_for_path.py +++ b/test_autofit/non_linear/grid/test_quantities_for_path.py @@ -65,3 +65,12 @@ def make_samples(centre, normalization): def test_physical_centres_from(result, name, expected): assert result.physical_centres_lists_from(name) == expected assert result.shape == (2, 2) + + +def test_two_physical_centres(result): + assert result.physical_centres_lists_from(("centre", "normalization")) == [ + (1.0, 1.5), + (1.0, 2.5), + (3.0, 1.5), + (3.0, 2.5), + ] From 40842a06f0b80a200e88218d75f5717b67f7e80b Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 5 Aug 2024 11:18:50 +0100 Subject: [PATCH 050/149] promoted physical_centres_lists_from to abstract class used by both grid and sensitivity results --- autofit/non_linear/grid/grid_search/result.py | 65 ++++++++++--------- autofit/non_linear/grid/sensitivity/result.py | 34 +++++----- 2 files changed, 54 insertions(+), 45 deletions(-) diff --git a/autofit/non_linear/grid/grid_search/result.py b/autofit/non_linear/grid/grid_search/result.py index 0d7d56dbc..785b5990b 100644 --- a/autofit/non_linear/grid/grid_search/result.py +++ b/autofit/non_linear/grid/grid_search/result.py @@ -11,36 +11,11 @@ from autofit.non_linear.samples.interface import SamplesInterface -# noinspection PyTypeChecker -class GridSearchResult: - def __init__( - self, - samples: List[SamplesInterface], - lower_limits_lists: Union[List, GridList], - grid_priors: List[Prior], - parent: Optional[NonLinearSearch] = None, - ): - """ - The sample of a grid search. - - Parameters - ---------- - samples - The samples of the non linear optimizations performed at each grid step - lower_limits_lists - A list of lists of values representing the lower bounds of the grid searched values at each step - """ - self.no_dimensions = len(lower_limits_lists[0]) - self.no_steps = len(lower_limits_lists) - - self.lower_limits_lists = GridList(lower_limits_lists, self.shape) - self.samples = GridList(samples, self.shape) if samples is not None else None - self.side_length = int(self.no_steps ** (1 / self.no_dimensions)) - self.step_size = 1 / self.side_length - self.grid_priors = grid_priors - - self.parent = parent +class AbstractGridSearchResult: + def __init__(self, samples: GridList): + self.samples = samples + # noinspection PyTypeChecker @as_grid_list def physical_centres_lists_from( self, @@ -72,6 +47,38 @@ def value_for_samples(samples): return [value_for_samples(samples) for samples in self.samples] + +# noinspection PyTypeChecker +class GridSearchResult(AbstractGridSearchResult): + def __init__( + self, + samples: List[SamplesInterface], + lower_limits_lists: Union[List, GridList], + grid_priors: List[Prior], + parent: Optional[NonLinearSearch] = None, + ): + """ + The sample of a grid search. + + Parameters + ---------- + samples + The samples of the non linear optimizations performed at each grid step + lower_limits_lists + A list of lists of values representing the lower bounds of the grid searched values at each step + """ + self.no_dimensions = len(lower_limits_lists[0]) + self.no_steps = len(lower_limits_lists) + + self.lower_limits_lists = GridList(lower_limits_lists, self.shape) + self.side_length = int(self.no_steps ** (1 / self.no_dimensions)) + self.step_size = 1 / self.side_length + self.grid_priors = grid_priors + + self.parent = parent + + super().__init__(GridList(samples, self.shape) if samples is not None else None) + @property @as_grid_list def physical_lower_limits_lists(self) -> GridList: diff --git a/autofit/non_linear/grid/sensitivity/result.py b/autofit/non_linear/grid/sensitivity/result.py index 2b1a78093..eda2f452b 100644 --- a/autofit/non_linear/grid/sensitivity/result.py +++ b/autofit/non_linear/grid/sensitivity/result.py @@ -1,15 +1,17 @@ from typing import List, Tuple from autofit.non_linear.grid.grid_list import GridList, as_grid_list +from autofit.non_linear.grid.grid_search.result import AbstractGridSearchResult from autofit.non_linear.samples.interface import SamplesInterface + # noinspection PyTypeChecker -class SensitivityResult: +class SensitivityResult(AbstractGridSearchResult): def __init__( - self, - samples: List[SamplesInterface], - perturb_samples: List[SamplesInterface], - shape : Tuple[int, ...] + self, + samples: List[SamplesInterface], + perturb_samples: List[SamplesInterface], + shape: Tuple[int, ...], ): """ The result of a sensitivity mapping @@ -21,7 +23,7 @@ def __init__( shape The shape of the sensitivity mapping grid. """ - self.samples = GridList(samples, shape) + super().__init__(GridList(samples, shape)) self.perturb_samples = GridList(perturb_samples, shape) self.shape = shape @@ -57,9 +59,10 @@ def log_evidence_differences(self) -> GridList: The log evidence differences between the base and perturbed models """ return [ - log_evidence_perturbed - log_evidence_base for - log_evidence_perturbed, log_evidence_base in - zip(self.log_evidences_perturbed, self.log_evidences_base) + log_evidence_perturbed - log_evidence_base + for log_evidence_perturbed, log_evidence_base in zip( + self.log_evidences_perturbed, self.log_evidences_base + ) ] @property @@ -85,13 +88,15 @@ def log_likelihood_differences(self) -> GridList: The log likelihood differences between the base and perturbed models """ return [ - log_likelihood_perturbed - log_likelihood_base for - log_likelihood_perturbed, log_likelihood_base in - zip(self.log_likelihoods_perturbed, self.log_likelihoods_base) + log_likelihood_perturbed - log_likelihood_base + for log_likelihood_perturbed, log_likelihood_base in zip( + self.log_likelihoods_perturbed, self.log_likelihoods_base + ) ] def figure_of_merits( - self, use_log_evidences: bool, + self, + use_log_evidences: bool, ) -> GridList: """ Convenience method to get either the log likelihoods difference or log evidence difference of the grid search. @@ -105,6 +110,3 @@ def figure_of_merits( if use_log_evidences: return self.log_evidence_differences return self.log_likelihood_differences - - - From 07989f3f080d1bcd178d41736c24cdeb0f12c1db Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 5 Aug 2024 15:53:20 +0100 Subject: [PATCH 051/149] perturbed centres --- autofit/non_linear/grid/grid_search/result.py | 23 +++++++++++++- autofit/non_linear/grid/sensitivity/result.py | 18 ++++++++++- .../grid/test_sensitivity/test_results.py | 31 ++++++++++++++----- 3 files changed, 62 insertions(+), 10 deletions(-) diff --git a/autofit/non_linear/grid/grid_search/result.py b/autofit/non_linear/grid/grid_search/result.py index 785b5990b..d85dcab27 100644 --- a/autofit/non_linear/grid/grid_search/result.py +++ b/autofit/non_linear/grid/grid_search/result.py @@ -24,6 +24,27 @@ def physical_centres_lists_from( """ Get the physical centres of the grid search from a path to an attribute of the instance in the samples. + Parameters + ---------- + path + The path to the attribute to get from the instance + + Returns + ------- + A list of lists of physical values + """ + return self._physical_centres_lists_from(self.samples, path) + + # noinspection PyTypeChecker + @as_grid_list + def _physical_centres_lists_from( + self, + samples: GridList, + path: Union[str, Tuple[str, ...]], + ) -> GridList: + """ + Get the physical centres of the grid search from a path to an attribute of the instance in the samples. + Parameters ---------- path @@ -45,7 +66,7 @@ def value_for_samples(samples): def value_for_samples(samples): return tuple(samples.model.object_for_path(p).mean for p in paths) - return [value_for_samples(samples) for samples in self.samples] + return [value_for_samples(samples) for samples in samples] # noinspection PyTypeChecker diff --git a/autofit/non_linear/grid/sensitivity/result.py b/autofit/non_linear/grid/sensitivity/result.py index eda2f452b..4d9a27083 100644 --- a/autofit/non_linear/grid/sensitivity/result.py +++ b/autofit/non_linear/grid/sensitivity/result.py @@ -1,4 +1,4 @@ -from typing import List, Tuple +from typing import List, Tuple, Union from autofit.non_linear.grid.grid_list import GridList, as_grid_list from autofit.non_linear.grid.grid_search.result import AbstractGridSearchResult @@ -27,6 +27,22 @@ def __init__( self.perturb_samples = GridList(perturb_samples, shape) self.shape = shape + def perturbed_physical_centres_list_from( + self, path: Union[str, Tuple[str, ...]] + ) -> List: + """ + Returns the physical centres of the perturbed model for each sensitivity fit + + Parameters + ---------- + path + The path to the physical centres in the samples + """ + return self._physical_centres_lists_from( + self.perturb_samples, + path, + ) + def __getitem__(self, item): return self.samples[item] diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_results.py b/test_autofit/non_linear/grid/test_sensitivity/test_results.py index 343adbfc8..b531d2aa2 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_results.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_results.py @@ -2,18 +2,25 @@ from autofit.non_linear.grid.sensitivity.job import JobResult from autofit.non_linear.grid.sensitivity.result import SensitivityResult +import autofit as af class Samples: - def __init__(self, log_likelihood): self.log_likelihood = log_likelihood + self.model = af.Model( + af.Gaussian, + centre=af.UniformPrior( + 0.0, + 1.0, + ), + ) def summary(self): return self -class Result: +class Result: def __init__(self, samples): self.samples = samples @@ -35,13 +42,21 @@ def test_job_result(job_result): assert job_result.log_likelihood_increase == 1.0 -def test_result(job_result): - result = SensitivityResult( +@pytest.fixture(name="sensitivity_result") +def make_sensitivity_result(job_result): + return SensitivityResult( samples=[job_result.result.samples.summary()], perturb_samples=[job_result.perturb_result.samples.summary()], - shape=(1,) + shape=(1,), ) - assert result.log_likelihoods_base == [1.0] - assert result.log_likelihoods_perturbed == [2.0] - assert result.log_likelihood_differences == [1.0] + +def test_result(sensitivity_result): + assert sensitivity_result.log_likelihoods_base == [1.0] + assert sensitivity_result.log_likelihoods_perturbed == [2.0] + assert sensitivity_result.log_likelihood_differences == [1.0] + + +def test_physical_centres(sensitivity_result): + assert sensitivity_result.physical_centres_lists_from("centre") == [0.5] + assert sensitivity_result.perturbed_physical_centres_list_from("centre") == [0.5] From 4a4a272902006b71fa058f81b59186d45fcd3979 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Mon, 5 Aug 2024 16:41:01 +0100 Subject: [PATCH 052/149] update typing --- autofit/non_linear/grid/sensitivity/result.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/non_linear/grid/sensitivity/result.py b/autofit/non_linear/grid/sensitivity/result.py index 4d9a27083..52b5d4498 100644 --- a/autofit/non_linear/grid/sensitivity/result.py +++ b/autofit/non_linear/grid/sensitivity/result.py @@ -29,7 +29,7 @@ def __init__( def perturbed_physical_centres_list_from( self, path: Union[str, Tuple[str, ...]] - ) -> List: + ) -> GridList: """ Returns the physical centres of the perturbed model for each sensitivity fit From 4930b87b277056783c1afbbae6a3e3d593443498 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 7 Aug 2024 09:22:40 +0100 Subject: [PATCH 053/149] cast to list before iterating f_locals --- autofit/mapper/prior/arithmetic/compound.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/mapper/prior/arithmetic/compound.py b/autofit/mapper/prior/arithmetic/compound.py index 98952fc74..0355c36fb 100644 --- a/autofit/mapper/prior/arithmetic/compound.py +++ b/autofit/mapper/prior/arithmetic/compound.py @@ -17,7 +17,7 @@ def retrieve_name(var): first_name = None frame = inspect.currentframe() while frame is not None: - for name, value in frame.f_locals.items(): + for name, value in list(frame.f_locals.items()): if var is value: first_name = name frame = frame.f_back From 75d8a50a6493f54ff46fa033ceed93698f0771b8 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 7 Aug 2024 09:25:22 +0100 Subject: [PATCH 054/149] fix test --- test_autofit/serialise/test_samples.py | 43 +++++++++++++++++++------- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/test_autofit/serialise/test_samples.py b/test_autofit/serialise/test_samples.py index ccb8fa2b5..6f7331d03 100644 --- a/test_autofit/serialise/test_samples.py +++ b/test_autofit/serialise/test_samples.py @@ -45,13 +45,14 @@ def test_summary(summary, model, sample): assert summary.model is model assert summary.max_log_likelihood_sample == sample + @pytest.fixture(name="summary_dict") def make_summary_dict(): return { "type": "instance", "class_path": "autofit.non_linear.samples.summary.SamplesSummary", "arguments": { - "max_log_likelihood_sample": { + "median_pdf_sample": { "type": "instance", "class_path": "autofit.non_linear.samples.sample.Sample", "arguments": { @@ -76,7 +77,28 @@ def make_summary_dict(): {"type": "tuple", "values": [2.0, 6.0]}, ], }, - "median_pdf_sample": { + "model": { + "class_path": "autofit.example.model.Gaussian", + "type": "model", + "arguments": { + "centre": { + "lower_limit": 0.0, + "upper_limit": 1.0, + "type": "Uniform", + }, + "normalization": { + "lower_limit": 0.0, + "upper_limit": 1.0, + "type": "Uniform", + }, + "sigma": { + "lower_limit": 0.0, + "upper_limit": 1.0, + "type": "Uniform", + }, + }, + }, + "max_log_likelihood_sample": { "type": "instance", "class_path": "autofit.non_linear.samples.sample.Sample", "arguments": { @@ -93,14 +115,6 @@ def make_summary_dict(): }, }, }, - "errors_at_sigma_3": { - "type": "list", - "values": [ - {"type": "tuple", "values": [2.0, 0.0]}, - {"type": "tuple", "values": [3.0, 0.0]}, - {"type": "tuple", "values": [4.0, 0.0]}, - ], - }, "values_at_sigma_1": { "type": "list", "values": [ @@ -117,12 +131,19 @@ def make_summary_dict(): {"type": "tuple", "values": [4.0, 0.0]}, ], }, + "errors_at_sigma_3": { + "type": "list", + "values": [ + {"type": "tuple", "values": [2.0, 0.0]}, + {"type": "tuple", "values": [3.0, 0.0]}, + {"type": "tuple", "values": [4.0, 0.0]}, + ], + }, "log_evidence": None, }, } - def test_dict(summary, summary_dict, remove_ids): assert remove_ids(to_dict(summary)) == summary_dict From 839d7bb805ff4f1e984bf42335069e93146ef1f8 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 7 Aug 2024 09:56:25 +0100 Subject: [PATCH 055/149] fix --- autofit/non_linear/fitness.py | 3 ++- test_autofit/interpolator/test_covariance.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/fitness.py b/autofit/non_linear/fitness.py index 6209b9553..654d5c15c 100644 --- a/autofit/non_linear/fitness.py +++ b/autofit/non_linear/fitness.py @@ -102,12 +102,13 @@ def __init__( self.fom_is_log_likelihood = fom_is_log_likelihood self.resample_figure_of_merit = resample_figure_of_merit self.convert_to_chi_squared = convert_to_chi_squared + self.store_history = store_history + self._log_likelihood_function = None if self.paths is not None: self.check_log_likelihood(fitness=self) - self.store_history = store_history self.parameters_history_list = [] self.log_likelihood_history_list = [] diff --git a/test_autofit/interpolator/test_covariance.py b/test_autofit/interpolator/test_covariance.py index a6d264842..3f86d8fef 100644 --- a/test_autofit/interpolator/test_covariance.py +++ b/test_autofit/interpolator/test_covariance.py @@ -71,7 +71,7 @@ def test_interpolate_other_field(interpolator): def test_linear_analysis_for_value(interpolator): analysis = interpolator._analysis_for_value(interpolator.t == 0.5) assert (analysis.x == np.array([0, 1, 2])).all() - assert (analysis.y == np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])).all() + assert (analysis.model_curve == np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])).all() def test_model(interpolator): From 05b711fe69d3918f982e408db8c1e1d87ad98f62 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Aug 2024 09:05:23 +0100 Subject: [PATCH 056/149] handle astropy string annotations --- autofit/mapper/prior_model/prior_model.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 43f2dcc8c..007ef10b5 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -116,7 +116,10 @@ def __init__( annotations = inspect.getfullargspec(cls).annotations for key, value in annotations.items(): if isinstance(value, str): - annotations[key] = getattr(builtins, value) + try: + annotations[key] = getattr(builtins, value) + except AttributeError: + pass except TypeError: annotations = dict() @@ -170,6 +173,9 @@ def __init__( else: annotation = annotations[arg] + if isinstance(annotation, str): + continue + if ( hasattr(annotation, "__origin__") and issubclass( From ba2f1c9bf340ec1ca2998f8e5c6ce5f2a33ab282 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Aug 2024 09:05:46 +0100 Subject: [PATCH 057/149] handle list annotation --- autofit/mapper/prior_model/prior_model.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 007ef10b5..560106b9e 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -177,11 +177,15 @@ def __init__( continue if ( - hasattr(annotation, "__origin__") - and issubclass( - annotation.__origin__, collections.abc.Collection + ( + hasattr(annotation, "__origin__") + and issubclass( + annotation.__origin__, collections.abc.Collection + ) ) - ) or isinstance(annotation, collections.abc.Collection): + or isinstance(annotation, collections.abc.Collection) + or issubclass(annotation, collections.abc.Collection) + ): from autofit.mapper.prior_model.collection import Collection value = Collection() From 16594de8203128226cf9d2ea9dd4086ae25f3395 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Aug 2024 09:06:01 +0100 Subject: [PATCH 058/149] astropy when self not seen in constructor (ffs) --- autofit/mapper/prior_model/prior_model.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 560106b9e..174d8ea63 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -289,7 +289,11 @@ def constructor_argument_names(self) -> List[str]: """ if self.cls not in class_args_dict: try: - class_args_dict[self.cls] = inspect.getfullargspec(self.cls).args[1:] + class_args_dict[self.cls] = [ + arg + for arg in inspect.getfullargspec(self.cls).args + if arg != "self" + ] except TypeError: class_args_dict[self.cls] = [] return class_args_dict[self.cls] From 3376b5e10a67bb7c28af0243b3824fdde9d9e2cb Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Aug 2024 09:18:58 +0100 Subject: [PATCH 059/149] use get_type_hints to ensure string type hints are resolved correctly --- autofit/mapper/mock/mock_model.py | 29 +++++++++++++------- autofit/mapper/prior_model/prior_model.py | 11 +------- test_autofit/config/priors/mock_model.yaml | 11 ++++++++ test_autofit/mapper/model/test_regression.py | 7 ++++- 4 files changed, 37 insertions(+), 21 deletions(-) diff --git a/autofit/mapper/mock/mock_model.py b/autofit/mapper/mock/mock_model.py index fe8e299ba..f5811973b 100644 --- a/autofit/mapper/mock/mock_model.py +++ b/autofit/mapper/mock/mock_model.py @@ -41,7 +41,7 @@ def __init__(self, one=1, two=2, three=3): class MockClassx2Tuple: def __init__(self, one_tuple=(0.0, 0.0)): """Abstract MockParent, describing an object with y, x cartesian - coordinates """ + coordinates""" self.one_tuple = one_tuple def __eq__(self, other): @@ -94,7 +94,6 @@ def __init__(self, tup=(0.0, 0.0)): class MockOverload: - def __init__(self, one=1.0): self.one = one @@ -112,11 +111,11 @@ def two(self, two): class MockComponents: def __init__( - self, - components_0: list = None, - components_1: list = None, - parameter=None, - **kwargs + self, + components_0: list = None, + components_1: list = None, + parameter=None, + **kwargs ): self.parameter = parameter self.group_0 = components_0 @@ -134,7 +133,7 @@ def __eq__(self, other): class MockChildTuple(MockParent): def __init__(self, tup=(0.0, 0.0)): - """ Generic circular profiles class to contain functions shared by light and + """Generic circular profiles class to contain functions shared by light and mass profiles. Parameters @@ -147,7 +146,7 @@ def __init__(self, tup=(0.0, 0.0)): class MockChildTuplex2(MockChildTuple): def __init__(self, tup=(0.0, 0.0), one=1.0, two=0.0): - """ Generic elliptical profiles class to contain functions shared by light + """Generic elliptical profiles class to contain functions shared by light and mass profiles. Parameters @@ -166,7 +165,7 @@ def __init__(self, tup=(0.0, 0.0), one=1.0, two=0.0): class MockChildTuplex3(MockChildTuple): def __init__(self, tup=(0.0, 0.0), one=1.0, two=0.0, three=0.0): - """ Generic elliptical profiles class to contain functions shared by light + """Generic elliptical profiles class to contain functions shared by light and mass profiles. Parameters @@ -182,3 +181,13 @@ def __init__(self, tup=(0.0, 0.0), one=1.0, two=0.0, three=0.0): self.one = one self.two = two self.three = three + + +class Parameter: + def __init__(self, value: float = 0.5): + self.value = value + + +class WithString: + def __init__(self, arg: "Parameter"): + self.arg = arg diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 174d8ea63..ed8619117 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -112,16 +112,7 @@ def __init__( self.cls = cls - try: - annotations = inspect.getfullargspec(cls).annotations - for key, value in annotations.items(): - if isinstance(value, str): - try: - annotations[key] = getattr(builtins, value) - except AttributeError: - pass - except TypeError: - annotations = dict() + annotations = typing.get_type_hints(cls.__init__) try: arg_spec = inspect.getfullargspec(cls) diff --git a/test_autofit/config/priors/mock_model.yaml b/test_autofit/config/priors/mock_model.yaml index 74cbc268a..3c21fcfbf 100644 --- a/test_autofit/config/priors/mock_model.yaml +++ b/test_autofit/config/priors/mock_model.yaml @@ -1,3 +1,14 @@ +Parameter: + value: + gaussian_limits: + lower: 0.0 + upper: 1.0 + lower_limit: 0.0 + type: Uniform + upper_limit: 1.0 + width_modifier: + type: Absolute + value: 0.2 MockChildTuple: tup_0: gaussian_limits: diff --git a/test_autofit/mapper/model/test_regression.py b/test_autofit/mapper/model/test_regression.py index 1955cc30b..fa381fb6a 100644 --- a/test_autofit/mapper/model/test_regression.py +++ b/test_autofit/mapper/model/test_regression.py @@ -5,6 +5,7 @@ import autofit as af from autoconf.exc import ConfigException from autofit.example.model import PhysicalNFW +from autofit.mapper.mock.mock_model import WithString from autofit.mapper.model_object import Identifier @@ -158,7 +159,6 @@ def test_random_instance(model_with_assertion): class TestModel: - __test__ = False def __init__(self, items: List[float]): @@ -173,3 +173,8 @@ def test_typing_annotations(): def test_no_default_tuple_priors(): model = af.Model(PhysicalNFW) assert model.prior_count == 6 + + +def test_string_annotation(): + model = af.Model(WithString) + assert model.instance_from_prior_medians().arg.value == 0.5 From a5194076178610d91c69f8e5d62c1fd5b564d534 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Aug 2024 09:31:37 +0100 Subject: [PATCH 060/149] manually computing namespace --- autofit/mapper/prior_model/prior_model.py | 8 +++++--- autofit/mapper/prior_model/util.py | 24 +++++++++++++++++++++++ 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index ed8619117..9b7ac1986 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -1,10 +1,9 @@ -import builtins import collections.abc import copy import inspect import logging -from typing import List import typing +from typing import * from autofit.jax_wrapper import register_pytree_node_class, register_pytree_node @@ -16,6 +15,7 @@ from autofit.mapper.prior.deferred import DeferredInstance from autofit.mapper.prior.tuple_prior import TuplePrior from autofit.mapper.prior_model.abstract import AbstractPriorModel +from autofit.mapper.prior_model.util import gather_namespaces from autofit.tools.namer import namer logger = logging.getLogger(__name__) @@ -112,7 +112,9 @@ def __init__( self.cls = cls - annotations = typing.get_type_hints(cls.__init__) + namespaces = gather_namespaces(cls) + + annotations = typing.get_type_hints(cls.__init__, namespaces, namespaces) try: arg_spec = inspect.getfullargspec(cls) diff --git a/autofit/mapper/prior_model/util.py b/autofit/mapper/prior_model/util.py index b364ce9bf..a28a5f4e4 100644 --- a/autofit/mapper/prior_model/util.py +++ b/autofit/mapper/prior_model/util.py @@ -1,3 +1,7 @@ +import inspect +from typing import Type, Dict +import typing + from autofit.mapper.prior_model.attribute_pair import AttributeNameValue @@ -5,3 +9,23 @@ class PriorModelNameValue(AttributeNameValue): @property def prior_model(self): return self.value + + +def gather_namespaces(cls: Type) -> Dict[str, Dict]: + """ + Recursively gather the globals and locals for a given class and its parent classes. + """ + namespaces = {} + + for base in inspect.getmro(cls): + if base is object: + continue # Skip the base object class + + # Fetch the global and local namespaces where the class was defined + module = inspect.getmodule(base) + if module: + namespaces.update(vars(module)) + + namespaces.update(vars(typing)) + + return namespaces From 3cd3a7c88951948b4ce0363df1e7ca02e7f8be13 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Aug 2024 09:34:28 +0100 Subject: [PATCH 061/149] handle function case --- autofit/mapper/prior_model/prior_model.py | 6 +++++- autofit/mapper/prior_model/util.py | 18 ++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 9b7ac1986..3608e10b7 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -114,7 +114,11 @@ def __init__( namespaces = gather_namespaces(cls) - annotations = typing.get_type_hints(cls.__init__, namespaces, namespaces) + annotations = typing.get_type_hints( + cls.__init__, + namespaces, + namespaces, + ) try: arg_spec = inspect.getfullargspec(cls) diff --git a/autofit/mapper/prior_model/util.py b/autofit/mapper/prior_model/util.py index a28a5f4e4..822ce5035 100644 --- a/autofit/mapper/prior_model/util.py +++ b/autofit/mapper/prior_model/util.py @@ -17,14 +17,16 @@ def gather_namespaces(cls: Type) -> Dict[str, Dict]: """ namespaces = {} - for base in inspect.getmro(cls): - if base is object: - continue # Skip the base object class - - # Fetch the global and local namespaces where the class was defined - module = inspect.getmodule(base) - if module: - namespaces.update(vars(module)) + try: + for base in inspect.getmro(cls): + if base is object: + continue + + module = inspect.getmodule(base) + if module: + namespaces.update(vars(module)) + except AttributeError: + pass namespaces.update(vars(typing)) From 84a7c5debaacad18ef93f198decc634f87640d69 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Aug 2024 10:08:37 +0100 Subject: [PATCH 062/149] check if default has default and use default - fixing astropy interaction --- autofit/mapper/prior_model/prior_model.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 3608e10b7..7f43c145f 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -125,6 +125,10 @@ def __init__( defaults = dict( zip(arg_spec.args[-len(arg_spec.defaults) :], arg_spec.defaults) ) + defaults = { + key: value.default if hasattr(value, "default") else value + for key, value in defaults.items() + } except TypeError: defaults = {} @@ -173,7 +177,9 @@ def __init__( if isinstance(annotation, str): continue - if ( + if arg in defaults: + value = self._convert_value(defaults[arg]) + elif ( ( hasattr(annotation, "__origin__") and issubclass( From 6c7fe09a082ea7bea61cbf0770117058e455fb6f Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Aug 2024 15:44:47 +0100 Subject: [PATCH 063/149] fix setting attribute rather than calling in constructor. could cause issues later --- autofit/mapper/prior_model/prior_model.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 43f2dcc8c..ed0850a6a 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -250,7 +250,10 @@ def instance_unflatten(self, aux_data, children): ------- An instance of this model. """ - return self.cls(**dict(zip(self.direct_argument_names, children))) + instance = object.__new__(self.cls) + for name, child in zip(self.direct_argument_names, children): + setattr(instance, name, child) + return instance def tree_flatten(self): """ From 9282b5fd470cd4465166ee432cde104f3b6a93b2 Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Aug 2024 16:20:53 +0100 Subject: [PATCH 064/149] actually use constructor argument names --- autofit/mapper/prior_model/prior_model.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index ed0850a6a..5937bb530 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -233,7 +233,7 @@ def instance_flatten(self, instance): Flatten an instance of this model as a PyTree. """ return ( - [getattr(instance, name) for name in self.direct_argument_names], + [getattr(instance, name) for name in self.constructor_argument_names], None, ) @@ -250,10 +250,7 @@ def instance_unflatten(self, aux_data, children): ------- An instance of this model. """ - instance = object.__new__(self.cls) - for name, child in zip(self.direct_argument_names, children): - setattr(instance, name, child) - return instance + return self.cls(*children) def tree_flatten(self): """ From 718ce6faf9ac48695900bd20263e1f5fdc338a6f Mon Sep 17 00:00:00 2001 From: Richard Date: Mon, 12 Aug 2024 16:25:48 +0100 Subject: [PATCH 065/149] improving handling of pytress --- autofit/mapper/prior_model/prior_model.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 5937bb530..06fbe1ca1 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -232,9 +232,17 @@ def instance_flatten(self, instance): """ Flatten an instance of this model as a PyTree. """ + attribute_names = [ + name + for name in self.direct_argument_names + if hasattr(instance, name) and name not in self.constructor_argument_names + ] return ( - [getattr(instance, name) for name in self.constructor_argument_names], - None, + ( + [getattr(instance, name) for name in self.constructor_argument_names], + [getattr(instance, name) for name in attribute_names], + ), + (attribute_names,), ) def instance_unflatten(self, aux_data, children): @@ -250,7 +258,12 @@ def instance_unflatten(self, aux_data, children): ------- An instance of this model. """ - return self.cls(*children) + constructor_arguments, other_arguments = children + attribute_names = aux_data[0] + instance = self.cls(*constructor_arguments) + for name, value in zip(attribute_names, other_arguments): + setattr(instance, name, value) + return instance def tree_flatten(self): """ From 3454565fbaccfa53fcb56342844b0d28f7744319 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 20 Aug 2024 16:39:32 +0100 Subject: [PATCH 066/149] updated scipy --- requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index f50f3246a..162ac2e2f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -anesthetic==2.8.1 +anesthetic==2.8.14 corner==2.2.1 decorator>=4.2.1 dill>=0.3.1.1 @@ -10,9 +10,9 @@ matplotlib numpydoc>=1.0.0 pyprojroot==0.2.0 pyswarms==1.3.0 -h5py>=2.10.0 +h5py>=3.11.0 SQLAlchemy==1.3.20 -scipy<=1.11.3 +scipy<=1.14.0 astunparse==1.6.3 threadpoolctl>=3.1.0,<=3.2.0 timeout-decorator==0.5.0 From 03c6981876e1e18110af61bef4b23dc78c10e75c Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 20 Aug 2024 17:26:01 +0100 Subject: [PATCH 067/149] update emcee and corner --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 162ac2e2f..9446e5994 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,10 @@ anesthetic==2.8.14 -corner==2.2.1 +corner==2.2.2 decorator>=4.2.1 dill>=0.3.1.1 dynesty==2.1.2 typing-inspect>=0.4.0 -emcee>=3.1.3 +emcee>=3.1.6 gprof2dot==2021.2.21 matplotlib numpydoc>=1.0.0 From 1e8e87ee545357fb14100c2a1d3a8439eb813653 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 20 Aug 2024 17:27:04 +0100 Subject: [PATCH 068/149] dynesty --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9446e5994..3d26801d4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ anesthetic==2.8.14 corner==2.2.2 decorator>=4.2.1 dill>=0.3.1.1 -dynesty==2.1.2 +dynesty==2.1.4 typing-inspect>=0.4.0 emcee>=3.1.6 gprof2dot==2021.2.21 From e73f40672f25bfe06a5822ca14abc505c30aa48c Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 20 Aug 2024 17:32:04 +0100 Subject: [PATCH 069/149] update ultranest --- optional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optional_requirements.txt b/optional_requirements.txt index 2ef2fbada..d636e7e28 100644 --- a/optional_requirements.txt +++ b/optional_requirements.txt @@ -3,5 +3,5 @@ getdist==1.4 #jax>=0.4.13 #jaxlib>=0.4.13 nautilus-sampler==1.0.2 -ultranest==3.6.2 +ultranest==34.3.2 zeus-mcmc==2.5.4 From 1b81858bef80246754e33cf23d545d590ecaeb3c Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 20 Aug 2024 17:32:16 +0100 Subject: [PATCH 070/149] fix typo --- optional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optional_requirements.txt b/optional_requirements.txt index d636e7e28..c9be577e9 100644 --- a/optional_requirements.txt +++ b/optional_requirements.txt @@ -3,5 +3,5 @@ getdist==1.4 #jax>=0.4.13 #jaxlib>=0.4.13 nautilus-sampler==1.0.2 -ultranest==34.3.2 +ultranest==4.3.2 zeus-mcmc==2.5.4 From 725e125ffbeba96717089ead5848b140d5f0b16e Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 20 Aug 2024 17:32:35 +0100 Subject: [PATCH 071/149] nautilus --- optional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optional_requirements.txt b/optional_requirements.txt index c9be577e9..cb20d0857 100644 --- a/optional_requirements.txt +++ b/optional_requirements.txt @@ -2,6 +2,6 @@ astropy>=5.0 getdist==1.4 #jax>=0.4.13 #jaxlib>=0.4.13 -nautilus-sampler==1.0.2 +nautilus-sampler==1.0.4 ultranest==4.3.2 zeus-mcmc==2.5.4 From 5dec5c672470d1982ac0604d55e9b601b210528a Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 23 Aug 2024 10:31:49 +0100 Subject: [PATCH 072/149] use hidden module to continue use of scalar_search_wolfe1 --- autofit/graphical/laplace/line_search.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/autofit/graphical/laplace/line_search.py b/autofit/graphical/laplace/line_search.py index 4f3e3e0f4..dc56d5952 100644 --- a/autofit/graphical/laplace/line_search.py +++ b/autofit/graphical/laplace/line_search.py @@ -11,7 +11,7 @@ from typing import Optional, Dict, Tuple import numpy as np -from scipy.optimize import linesearch +from scipy.optimize import _linesearch as linesearch from autoconf import cached_property from autofit.graphical.factor_graphs.abstract import ( @@ -188,7 +188,6 @@ def _next_state(self, stepsize): return next_state def step(self, stepsize): - if not stepsize: return self From 7835b870086b99ef68f4a8d348a727537d962744 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 23 Aug 2024 10:42:38 +0100 Subject: [PATCH 073/149] use simpson instead of deprecated trapz --- test_autofit/graphical/functionality/test_projection.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test_autofit/graphical/functionality/test_projection.py b/test_autofit/graphical/functionality/test_projection.py index bdddea557..cd343923f 100644 --- a/test_autofit/graphical/functionality/test_projection.py +++ b/test_autofit/graphical/functionality/test_projection.py @@ -12,16 +12,16 @@ def make_q_cavity(): def test_integration(q_cavity, probit_factor): - x = np.linspace(-3, 3, 2 ** 10) + x = np.linspace(-3, 3, 2**10) probit = stats.norm(loc=0.0, scale=1.0).cdf(x) q = stats.norm(loc=q_cavity.mean, scale=q_cavity.sigma).pdf(x) tilted_distribution = probit * q - assert tilted_distribution.shape == (2 ** 10,) + assert tilted_distribution.shape == (2**10,) ni_0, ni_1, ni_2 = ( - integrate.trapz(x ** i * tilted_distribution, x) for i in range(3) + integrate.simpson(x**i * tilted_distribution, x=x) for i in range(3) ) q_numerical = autofit.messages.normal.NormalMessage.from_sufficient_statistics( From da6298830453e60914e6fde1106664e76f91684e Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 23 Aug 2024 10:48:20 +0100 Subject: [PATCH 074/149] fix test --- test_autofit/interpolator/test_covariance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_autofit/interpolator/test_covariance.py b/test_autofit/interpolator/test_covariance.py index 3f86d8fef..a6d264842 100644 --- a/test_autofit/interpolator/test_covariance.py +++ b/test_autofit/interpolator/test_covariance.py @@ -71,7 +71,7 @@ def test_interpolate_other_field(interpolator): def test_linear_analysis_for_value(interpolator): analysis = interpolator._analysis_for_value(interpolator.t == 0.5) assert (analysis.x == np.array([0, 1, 2])).all() - assert (analysis.model_curve == np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])).all() + assert (analysis.y == np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])).all() def test_model(interpolator): From abfc0275984004bce132e73ea4f6998c9fb4e9f7 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 23 Aug 2024 10:53:51 +0100 Subject: [PATCH 075/149] Mapped type annotations --- autofit/database/model/fit.py | 15 ++++++++------- autofit/database/model/model.py | 3 ++- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index bde40c5b0..2ec84df43 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -1,6 +1,7 @@ import json import pickle from functools import wraps +from sqlalchemy.orm import Mapped from typing import List import numpy as np @@ -181,7 +182,7 @@ class Fit(Base): ) is_complete = sa.Column(sa.Boolean) - _named_instances: List[NamedInstance] = sa.orm.relationship("NamedInstance") + _named_instances: Mapped[List[NamedInstance]] = sa.orm.relationship("NamedInstance") @property @try_none @@ -203,7 +204,7 @@ def named_instances(self): def total_parameters(self): return self.model.prior_count if self.model else 0 - _info: List[Info] = sa.orm.relationship("Info") + _info: Mapped[List[Info]] = sa.orm.relationship("Info") def __init__(self, **kwargs): try: @@ -216,7 +217,7 @@ def __init__(self, **kwargs): parent_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) - children: List["Fit"] = sa.orm.relationship( + children: Mapped[List["Fit"]] = sa.orm.relationship( "Fit", backref=sa.orm.backref("parent", remote_side=[id]) ) @@ -301,14 +302,14 @@ def model(self) -> AbstractPriorModel: def model(self, model: AbstractPriorModel): self.__model = Object.from_object(model) - pickles: List[Pickle] = sa.orm.relationship("Pickle", lazy="joined") - jsons: List[JSON] = sa.orm.relationship("JSON", lazy="joined") - arrays: List[Array] = sa.orm.relationship( + pickles: Mapped[List[Pickle]] = sa.orm.relationship("Pickle", lazy="joined") + jsons: Mapped[List[JSON]] = sa.orm.relationship("JSON", lazy="joined") + arrays: Mapped[List[Array]] = sa.orm.relationship( "Array", lazy="joined", foreign_keys=[Array.fit_id], ) - hdus: List[HDU] = sa.orm.relationship( + hdus: Mapped[List[HDU]] = sa.orm.relationship( "HDU", lazy="joined", foreign_keys=[HDU.fit_id], diff --git a/autofit/database/model/model.py b/autofit/database/model/model.py index 4214a25cd..3a227c457 100644 --- a/autofit/database/model/model.py +++ b/autofit/database/model/model.py @@ -1,5 +1,6 @@ import abc import inspect +from sqlalchemy.orm import Mapped from typing import List, Tuple, Any, Iterable, Union, ItemsView, Type import numpy as np @@ -42,7 +43,7 @@ class Object(Base): foreign_keys=[latent_samples_for_id], ) - children: List["Object"] = sa.orm.relationship( + children: Mapped[List["Object"]] = sa.orm.relationship( "Object", uselist=True, ) From 89bd95bd92d23358fa0b273a1b5b055fac75543d Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 23 Aug 2024 11:02:34 +0100 Subject: [PATCH 076/149] wrap query in query() --- autofit/database/aggregator/aggregator.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autofit/database/aggregator/aggregator.py b/autofit/database/aggregator/aggregator.py index 4bfbebc9c..4c27f0c3d 100644 --- a/autofit/database/aggregator/aggregator.py +++ b/autofit/database/aggregator/aggregator.py @@ -1,5 +1,6 @@ import logging from abc import ABC, abstractmethod +from sqlalchemy import text from typing import Optional, List, Union, cast from ..sqlalchemy_ import sa @@ -370,7 +371,7 @@ def _fits_for_query(self, query: str) -> List[m.Fit]: query """ logger.debug(f"Executing query: {query}") - fit_ids = {row[0] for row in self.session.execute(query)} + fit_ids = {row[0] for row in self.session.execute(text(query))} logger.info(f"{len(fit_ids)} fit(s) found matching query") query = self.session.query(m.Fit).filter(m.Fit.id.in_(fit_ids)) From 18bb9660dc22ee98b1206a9f2d49b82c8a9169bd Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 23 Aug 2024 11:05:07 +0100 Subject: [PATCH 077/149] wrap query in text --- autofit/database/migration/migration.py | 85 ++++--------------- autofit/database/migration/session_wrapper.py | 29 ++----- .../database/migration/test_integration.py | 83 +++++------------- 3 files changed, 49 insertions(+), 148 deletions(-) diff --git a/autofit/database/migration/migration.py b/autofit/database/migration/migration.py index f540c43e1..c77c96515 100644 --- a/autofit/database/migration/migration.py +++ b/autofit/database/migration/migration.py @@ -1,14 +1,13 @@ import logging from abc import ABC, abstractmethod from hashlib import md5 +from sqlalchemy import text from typing import Union, Generator, Iterable, Optional from .session_wrapper import SessionWrapper from ..sqlalchemy_ import sa -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) class Identifiable(ABC): @@ -19,22 +18,13 @@ def id(self) -> str: A unique identifier generated by hashing a string """ - def __eq__( - self, - other: Union["Identifiable", str] - ) -> bool: + def __eq__(self, other: Union["Identifiable", str]) -> bool: """ Compares ids """ - if isinstance( - other, - Identifiable - ): + if isinstance(other, Identifiable): return self.id == other.id - if isinstance( - other, - str - ): + if isinstance(other, str): return self.id == other return False @@ -57,13 +47,7 @@ def id(self) -> str: """ Hash generated from underlying SQL statements """ - return md5( - ":".join( - self.strings - ).encode( - "utf-8" - ) - ).hexdigest() + return md5(":".join(self.strings).encode("utf-8")).hexdigest() def __str__(self): return "\n".join(self.strings) @@ -72,10 +56,7 @@ def __str__(self): class Revision(Identifiable): - def __init__( - self, - steps: Iterable[Step] - ): + def __init__(self, steps: Iterable[Step]): """ A specific revision of the database. This comprises a set of sequential steps and is uniquely identified @@ -95,12 +76,7 @@ def id(self) -> str: A unique identifier created by joining and hashing the identifiers of comprised steps. """ - return md5( - ":".join( - step.id for step - in self.steps - ).encode("utf-8") - ).hexdigest() + return md5(":".join(step.id for step in self.steps).encode("utf-8")).hexdigest() def __sub__(self, other: "Revision") -> "Revision": """ @@ -121,17 +97,11 @@ def __sub__(self, other: "Revision") -> "Revision": An object comprising steps required to move from the other revision to this revision. """ - return Revision(tuple( - step for step in self.steps - if step not in other.steps - )) + return Revision(tuple(step for step in self.steps if step not in other.steps)) class Migrator: - def __init__( - self, - *steps: Step - ): + def __init__(self, *steps: Step): """ Manages migration of an old database. @@ -153,14 +123,9 @@ def revisions(self) -> Generator[Revision, None, None]: starting on the first step and terminating on any step """ for i in range(1, len(self._steps) + 1): - yield Revision( - self._steps[:i] - ) + yield Revision(self._steps[:i]) - def get_steps( - self, - revision_id: Optional[str] = None - ) -> Iterable[Step]: + def get_steps(self, revision_id: Optional[str] = None) -> Iterable[Step]: """ Retrieve steps required to go from the specified revision to the latest revision. @@ -188,9 +153,7 @@ def latest_revision(self) -> Revision: The latest revision according to the steps passed to the Migrator """ - return Revision( - self._steps - ) + return Revision(self._steps) def migrate(self, session: sa.orm.Session): """ @@ -207,19 +170,11 @@ def migrate(self, session: sa.orm.Session): session A session pointing at some database. """ - wrapper = SessionWrapper( - session - ) + wrapper = SessionWrapper(session) revision_id = wrapper.revision_id - steps = list( - self.get_steps( - revision_id - ) - ) + steps = list(self.get_steps(revision_id)) if len(steps) == 0: - logger.info( - "Database already at latest revision" - ) + logger.info("Database already at latest revision") return latest_revision_id = self.latest_revision.id @@ -230,14 +185,10 @@ def migrate(self, session: sa.orm.Session): for step in steps: for string in step.strings: try: - session.execute( - string - ) + session.execute(text(string)) except sa.exc.OperationalError as e: logger.debug(e) wrapper.revision_id = self.latest_revision.id - logger.info( - f"revision_id updated to {wrapper.revision_id}" - ) + logger.info(f"revision_id updated to {wrapper.revision_id}") diff --git a/autofit/database/migration/session_wrapper.py b/autofit/database/migration/session_wrapper.py index 3f61071a8..3f2d52cda 100644 --- a/autofit/database/migration/session_wrapper.py +++ b/autofit/database/migration/session_wrapper.py @@ -1,16 +1,13 @@ import logging from functools import wraps +from sqlalchemy import text from typing import Optional from ..sqlalchemy_ import sa -logger = logging.getLogger( - __name__ -) +logger = logging.getLogger(__name__) -def needs_revision_table( - func -): +def needs_revision_table(func): """ Applies to functions that depend on the existence of the revision table. If the table does not exist @@ -59,11 +56,9 @@ def _init_revision_table(self): Creates the revision table with a single null entry """ self.session.execute( - "CREATE TABLE revision (revision_id VARCHAR PRIMARY KEY)" - ) - self.session.execute( - "INSERT INTO revision (revision_id) VALUES (null)" + text("CREATE TABLE revision (revision_id VARCHAR PRIMARY KEY)") ) + self.session.execute(text("INSERT INTO revision (revision_id) VALUES (null)")) @property def is_table(self) -> bool: @@ -71,9 +66,7 @@ def is_table(self) -> bool: Does the revision table exist? """ try: - self.session.execute( - "SELECT 1 FROM revision" - ) + self.session.execute(text("SELECT 1 FROM revision")) return True except sa.exc.OperationalError: return False @@ -85,9 +78,7 @@ def revision_id(self) -> Optional[str]: Describes the current revision of the database. None if no revisions have been made. """ - for row in self.session.execute( - "SELECT revision_id FROM revision" - ): + for row in self.session.execute(text("SELECT revision_id FROM revision")): return row[0] return None @@ -95,8 +86,6 @@ def revision_id(self) -> Optional[str]: @needs_revision_table def revision_id(self, revision_id: str): self.session.execute( - sa.text( - f"UPDATE revision SET revision_id = :revision_id" - ), - {"revision_id": revision_id} + text(f"UPDATE revision SET revision_id = :revision_id"), + {"revision_id": revision_id}, ) diff --git a/test_autofit/database/migration/test_integration.py b/test_autofit/database/migration/test_integration.py index b1c84d733..5336c6bc4 100644 --- a/test_autofit/database/migration/test_integration.py +++ b/test_autofit/database/migration/test_integration.py @@ -1,74 +1,35 @@ import pytest +from sqlalchemy import text from autofit.database.migration import SessionWrapper -@pytest.fixture( - autouse=True -) -def create_table( - session -): - session.execute( - "CREATE TABLE test (id INTEGER PRIMARY KEY)" - ) - - -def test_run_migration( - migrator, - session, - revision_2 -): - migrator.migrate( - session - ) - assert len(list( - session.execute( - "SELECT * FROM test" - ) - )) == 2 - - assert SessionWrapper( - session - ).revision_id == revision_2 - - -def test_apply_twice( - migrator, - session -): +@pytest.fixture(autouse=True) +def create_table(session): + session.execute(text("CREATE TABLE test (id INTEGER PRIMARY KEY)")) + + +def test_run_migration(migrator, session, revision_2): + migrator.migrate(session) + assert len(list(session.execute(text("SELECT * FROM test")))) == 2 + + assert SessionWrapper(session).revision_id == revision_2 + + +def test_apply_twice(migrator, session): for _ in range(2): - migrator.migrate( - session - ) - assert len(list( - session.execute( - "SELECT * FROM test" - ) - )) == 2 - - -def test_run_partial_migration( - migrator, - session, - revision_1, - revision_2 -): - wrapper = SessionWrapper( - session - ) + migrator.migrate(session) + assert len(list(session.execute(text("SELECT * FROM test")))) == 2 + + +def test_run_partial_migration(migrator, session, revision_1, revision_2): + wrapper = SessionWrapper(session) wrapper.revision_id = revision_1.id assert len(migrator.get_steps(revision_1.id)) == 1 - migrator.migrate( - session - ) - assert len(list( - session.execute( - "SELECT * FROM test" - ) - )) == 1 + migrator.migrate(session) + assert len(list(session.execute(text("SELECT * FROM test")))) == 1 assert wrapper.revision_id == revision_2 From 1a25aa581f351aa516daf5bf7a50dcef08932816 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 23 Aug 2024 11:05:46 +0100 Subject: [PATCH 078/149] latest version of sqlalchemy --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3d26801d4..9e79b89f0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ numpydoc>=1.0.0 pyprojroot==0.2.0 pyswarms==1.3.0 h5py>=3.11.0 -SQLAlchemy==1.3.20 +SQLAlchemy==2.0.32 scipy<=1.14.0 astunparse==1.6.3 threadpoolctl>=3.1.0,<=3.2.0 From ab087f1e2a36f170bd618c415a2782233e01fb22 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 23 Aug 2024 11:21:35 +0100 Subject: [PATCH 079/149] remove support for 3.8 --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 7ef718bba..d3d85b17d 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.8, 3.9, '3.10', '3.11'] + python-version: [3.9, '3.10', '3.11'] steps: - name: Checkout PyAutoConf uses: actions/checkout@v2 From 2865fb87fe861875a5bdbf6d7b6226726f704600 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 27 Aug 2024 17:28:36 +0100 Subject: [PATCH 080/149] change declarative base import --- autofit/database/model/model.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/autofit/database/model/model.py b/autofit/database/model/model.py index 3a227c457..7fd984111 100644 --- a/autofit/database/model/model.py +++ b/autofit/database/model/model.py @@ -6,9 +6,10 @@ import numpy as np from autoconf.class_path import get_class, get_class_path -from ..sqlalchemy_ import sa, declarative +from ..sqlalchemy_ import sa +from sqlalchemy.orm import declarative_base -Base = declarative.declarative_base() +Base = declarative_base() _schema_version = 1 From b6a2c278f423a532a397cf900dc9524acbb85991 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 27 Aug 2024 17:31:50 +0100 Subject: [PATCH 081/149] silence float warning --- test_autofit/test_correspondence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_autofit/test_correspondence.py b/test_autofit/test_correspondence.py index 94b44155f..b2dfb9805 100644 --- a/test_autofit/test_correspondence.py +++ b/test_autofit/test_correspondence.py @@ -109,4 +109,4 @@ def test_regression(): assert log_likelihood == numerical_log_likelihood assert float(gradient) == pytest.approx(numerical_gradient, rel=0.001) - assert float(approx_gradient) == pytest.approx(numerical_gradient, rel=0.001) + assert float(approx_gradient[0]) == pytest.approx(numerical_gradient, rel=0.001) From 0563a2382243a689643cbc4aacc0cc464f940709 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 27 Aug 2024 17:46:27 +0100 Subject: [PATCH 082/149] change n_effective to maxcall --- test_autofit/interpolator/test_covariance.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/test_autofit/interpolator/test_covariance.py b/test_autofit/interpolator/test_covariance.py index a6d264842..0ab3e22ae 100644 --- a/test_autofit/interpolator/test_covariance.py +++ b/test_autofit/interpolator/test_covariance.py @@ -44,23 +44,23 @@ def test_covariance_matrix(interpolator): # ) -def n_effective(func): +def maxcall(func): return with_config( "non_linear", "nest", "DynestyStatic", "run", - "n_effective", - value=0, + "maxcall", + value=1, )(func) -@n_effective +@maxcall def test_interpolate(interpolator): assert isinstance(interpolator[interpolator.t == 0.5].gaussian.centre, float) -@n_effective +@maxcall def test_interpolate_other_field(interpolator): assert isinstance( interpolator[interpolator.gaussian.centre == 0.5].gaussian.centre, @@ -79,7 +79,7 @@ def test_model(interpolator): assert model.prior_count == 6 -@n_effective +@maxcall def test_single_variable(): samples_list = [ af.SamplesPDF( @@ -106,7 +106,7 @@ def test_single_variable(): assert interpolator[interpolator.t == 50.0].v == pytest.approx(50.0, abs=1.0) -@n_effective +@maxcall def test_variable_and_constant(): samples_list = [ af.SamplesPDF( From b1e74b0bde11a298d44b611f4eb5987d52934f31 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 27 Aug 2024 17:52:33 +0100 Subject: [PATCH 083/149] scipy import! --- autofit/interpolator/linear.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autofit/interpolator/linear.py b/autofit/interpolator/linear.py index 131381981..a923da9f6 100644 --- a/autofit/interpolator/linear.py +++ b/autofit/interpolator/linear.py @@ -1,4 +1,4 @@ -from scipy.stats import stats +from scipy.stats import linregress from .abstract import AbstractInterpolator @@ -9,5 +9,5 @@ class LinearInterpolator(AbstractInterpolator): @staticmethod def _interpolate(x, y, value): - slope, intercept, r, p, std_err = stats.linregress(x, y) + slope, intercept, r, p, std_err = linregress(x, y) return slope * value + intercept From a1df4f12b8b136c93922ca8190d9f42ec8be95ee Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 27 Aug 2024 18:02:31 +0100 Subject: [PATCH 084/149] drop 3.8 doc --- docs/installation/overview.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation/overview.rst b/docs/installation/overview.rst index c0c2c47d8..1517c1f7c 100644 --- a/docs/installation/overview.rst +++ b/docs/installation/overview.rst @@ -3,7 +3,7 @@ Overview ======== -**PyAutoFit** requires Python 3.8 - 3.11 and support the Linux, MacOS and Windows operating systems. +**PyAutoFit** requires Python 3.9 - 3.11 and support the Linux, MacOS and Windows operating systems. **PyAutoFit** can be installed via the Python distribution `Anaconda `_ or using `Pypi `_ to ``pip install`` **PyAutoFit** into your Python distribution. From 542ff8cfb88018576294dc15ea24c1700c10fe1f Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 27 Aug 2024 18:49:10 +0100 Subject: [PATCH 085/149] include python 3.12 --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index d3d85b17d..7c526b4bf 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.9, '3.10', '3.11'] + python-version: [3.9, '3.10', '3.11', '3.12'], '3.10', '3.11'] steps: - name: Checkout PyAutoConf uses: actions/checkout@v2 From 2cd099697a7e876fd56a0399401ad6017e0145ef Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Tue, 27 Aug 2024 18:51:07 +0100 Subject: [PATCH 086/149] fix --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 7c526b4bf..4fb654d4f 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.9, '3.10', '3.11', '3.12'], '3.10', '3.11'] + python-version: [3.9, '3.10', '3.11', '3.12'] steps: - name: Checkout PyAutoConf uses: actions/checkout@v2 From 0992a117055488989425b06204fa503fdca78daa Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Sat, 31 Aug 2024 13:52:02 +0100 Subject: [PATCH 087/149] minor --- autofit/non_linear/analysis/analysis.py | 2 -- autofit/non_linear/search/mcmc/emcee/search.py | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/analysis/analysis.py b/autofit/non_linear/analysis/analysis.py index 7d5b24a88..9981437e2 100644 --- a/autofit/non_linear/analysis/analysis.py +++ b/autofit/non_linear/analysis/analysis.py @@ -9,8 +9,6 @@ from autofit.non_linear.result import Result from autofit.non_linear.samples.samples import Samples from autofit.non_linear.samples.sample import Sample -from autofit.mapper.prior_model.collection import Collection -from autofit.mapper.prior.gaussian import GaussianPrior from .visualize import Visualizer from ..samples.util import simple_model_for_kwargs diff --git a/autofit/non_linear/search/mcmc/emcee/search.py b/autofit/non_linear/search/mcmc/emcee/search.py index e8c2d81a1..c0d9d07d7 100644 --- a/autofit/non_linear/search/mcmc/emcee/search.py +++ b/autofit/non_linear/search/mcmc/emcee/search.py @@ -4,6 +4,8 @@ import emcee import numpy as np +from autoconf import conf + from autofit.database.sqlalchemy_ import sa from autofit.mapper.model_mapper import ModelMapper from autofit.mapper.prior_model.abstract import AbstractPriorModel From d6bbffc556d88ea034304f60309da7be3e17e2e7 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Sat, 31 Aug 2024 13:52:58 +0100 Subject: [PATCH 088/149] emcee force search internal True --- autofit/non_linear/search/mcmc/emcee/search.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/autofit/non_linear/search/mcmc/emcee/search.py b/autofit/non_linear/search/mcmc/emcee/search.py index c0d9d07d7..59a511e81 100644 --- a/autofit/non_linear/search/mcmc/emcee/search.py +++ b/autofit/non_linear/search/mcmc/emcee/search.py @@ -84,6 +84,11 @@ def __init__( self.logger.debug("Creating Emcee Search") + # TODO : Emcee visualization tools rely on the .hdf file and thus require that the search internal is + # TODO : On hard-disk, which this forces to occur. + + conf.instance["output"]["search_internal"] = True + def _fit(self, model: AbstractPriorModel, analysis): """ Fit a model using Emcee and the Analysis class which contains the data and returns the log likelihood from From afb4d1ffd8da28f66d99c80dac2a4b9dcf4566bd Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Sat, 31 Aug 2024 14:07:33 +0100 Subject: [PATCH 089/149] Fix Zeus --- autofit/non_linear/search/mcmc/zeus/search.py | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/autofit/non_linear/search/mcmc/zeus/search.py b/autofit/non_linear/search/mcmc/zeus/search.py index ea1d3b11f..d8332fe87 100644 --- a/autofit/non_linear/search/mcmc/zeus/search.py +++ b/autofit/non_linear/search/mcmc/zeus/search.py @@ -1,6 +1,7 @@ from typing import Dict, Optional import numpy as np +import os from autofit.database.sqlalchemy_ import sa from autofit.mapper.model_mapper import ModelMapper @@ -116,7 +117,7 @@ def _fit(self, model: AbstractPriorModel, analysis): "You are attempting to perform a model-fit using Zeus. \n\n" "However, the optional library Zeus (https://zeus-mcmc.readthedocs.io/en/latest/) is " "not installed.\n\n" - "Install it via the command `pip install zeus==3.5.5`.\n\n" + "Install it via the command `pip install zeus-mcmc==2.5.4`.\n\n" "----------------------" ) @@ -274,13 +275,23 @@ def samples_via_internal_from(self, model, search_internal=None): search_internal = search_internal or self.paths.load_search_internal() - auto_correlations = self.auto_correlations_from(search_internal=search_internal) + if os.environ.get("PYAUTOFIT_TEST_MODE") == "1": + + samples_after_burn_in = search_internal.get_chain( + discard=5, thin=5, flat=True + ) + + else: + auto_correlations = self.auto_correlations_from( + search_internal=search_internal + ) + + discard = int(3.0 * np.max(auto_correlations.times)) + thin = int(np.max(auto_correlations.times) / 2.0) + samples_after_burn_in = search_internal.get_chain( + discard=discard, thin=thin, flat=True + ) - discard = int(3.0 * np.max(auto_correlations.times)) - thin = int(np.max(auto_correlations.times) / 2.0) - samples_after_burn_in = search_internal.get_chain( - discard=discard, thin=thin, flat=True - ) parameter_lists = samples_after_burn_in.tolist() log_posterior_list = search_internal.get_log_prob(flat=True).tolist() @@ -306,7 +317,9 @@ def samples_via_internal_from(self, model, search_internal=None): sample_list=sample_list, samples_info=self.samples_info_from(search_internal=search_internal), auto_correlation_settings=self.auto_correlation_settings, - auto_correlations=auto_correlations, + auto_correlations=self.auto_correlations_from( + search_internal=search_internal + ), ) def auto_correlations_from(self, search_internal=None): From 69d2b25747b090a512192ae8442cc8a819d6fcb5 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Sat, 31 Aug 2024 17:25:04 +0100 Subject: [PATCH 090/149] exception for pyswamrs in notebook --- .../search/mle/pyswarms/search/abstract.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/autofit/non_linear/search/mle/pyswarms/search/abstract.py b/autofit/non_linear/search/mle/pyswarms/search/abstract.py index 2307aa2c1..996df63cf 100644 --- a/autofit/non_linear/search/mle/pyswarms/search/abstract.py +++ b/autofit/non_linear/search/mle/pyswarms/search/abstract.py @@ -215,9 +215,15 @@ def _fit(self, model: AbstractPriorModel, analysis): total_iterations += iterations - self.paths.save_search_internal( - obj=search_internal, - ) + # TODO : Running PySwarms in NoteBook raises + # TODO: TypeError: cannot pickle '_hashlib.HMAC' object + + try: + self.paths.save_search_internal( + obj=search_internal, + ) + except TypeError: + pass self.perform_update( model=model, From 5fda6dc46c85a61e6713357d10c30aa6f6899a5d Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Sun, 1 Sep 2024 09:47:38 +0100 Subject: [PATCH 091/149] pyswarms output --- .../search/mle/pyswarms/search/abstract.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/autofit/non_linear/search/mle/pyswarms/search/abstract.py b/autofit/non_linear/search/mle/pyswarms/search/abstract.py index 996df63cf..0e8c18490 100644 --- a/autofit/non_linear/search/mle/pyswarms/search/abstract.py +++ b/autofit/non_linear/search/mle/pyswarms/search/abstract.py @@ -218,12 +218,7 @@ def _fit(self, model: AbstractPriorModel, analysis): # TODO : Running PySwarms in NoteBook raises # TODO: TypeError: cannot pickle '_hashlib.HMAC' object - try: - self.paths.save_search_internal( - obj=search_internal, - ) - except TypeError: - pass + self.output_search_internal(search_internal=search_internal) self.perform_update( model=model, @@ -236,6 +231,14 @@ def _fit(self, model: AbstractPriorModel, analysis): return search_internal + def output_search_internal(self, search_internal): + try: + self.paths.save_search_internal( + obj=search_internal, + ) + except TypeError: + pass + def samples_via_internal_from(self, model, search_internal=None): """ Returns a `Samples` object from the pyswarms internal results. From e6dfaa3da9783ea0da273606c4a6d7ef457d5c8f Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Sun, 1 Sep 2024 10:00:17 +0100 Subject: [PATCH 092/149] pyswamrs notebook bug fixed --- autofit/non_linear/result.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/non_linear/result.py b/autofit/non_linear/result.py index d146d0334..343343f1e 100644 --- a/autofit/non_linear/result.py +++ b/autofit/non_linear/result.py @@ -311,7 +311,7 @@ def search_internal(self): try: return self.paths.load_search_internal() - except FileNotFoundError: + except (FileNotFoundError, RecursionError): pass @property From e8132b08deb0cb9a2b563b9f1d3075e349840d8d Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Sun, 1 Sep 2024 16:02:47 +0100 Subject: [PATCH 093/149] notebook fix --- test_autofit/config/priors/model.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test_autofit/config/priors/model.yaml b/test_autofit/config/priors/model.yaml index 0fda20896..16e704f99 100644 --- a/test_autofit/config/priors/model.yaml +++ b/test_autofit/config/priors/model.yaml @@ -66,8 +66,8 @@ PhysicalNFW: type: Absolute value: 0.2 gaussian_limits: - lower: '-1.0' - upper: '1.0' + lower: -1.0 + upper: 1.0 log10m: type: Uniform lower_limit: 6.0 From 16cb216ffd438403894e6b24471cc5d46f657476 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 6 Sep 2024 09:47:35 +0100 Subject: [PATCH 094/149] check tests --- autofit/example/analysis.py | 2 +- files/citations.bib | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/autofit/example/analysis.py b/autofit/example/analysis.py index 836027d8c..83db61645 100644 --- a/autofit/example/analysis.py +++ b/autofit/example/analysis.py @@ -116,7 +116,7 @@ def model_data_1d_from(self, instance: af.ModelInstance) -> np.ndarray: def save_attributes(self, paths: af.DirectoryPaths): """ Before the model-fit via the non-linear search begins, this routine saves attributes of the `Analysis` object - to the `pickles` folder such that they can be loaded after the analysis using PyAutoFit's database and + to the `files` folder such that they can be loaded after the analysis using PyAutoFit's database and aggregator tools. For this analysis the following are output: diff --git a/files/citations.bib b/files/citations.bib index cee63f8ce..84e01c822 100644 --- a/files/citations.bib +++ b/files/citations.bib @@ -76,6 +76,23 @@ @article{multinest volume = {398}, year = {2009} } +@ARTICLE{nautilus, + author = {{Lange}, Johannes U.}, + title = "{NAUTILUS: boosting Bayesian importance nested sampling with deep learning}", + journal = {\mnras}, + keywords = {methods: data analysis, methods: statistical, software: data analysis, Astrophysics - Instrumentation and Methods for Astrophysics, Astrophysics - Cosmology and Nongalactic Astrophysics, Astrophysics - Earth and Planetary Astrophysics, Astrophysics - Astrophysics of Galaxies, Computer Science - Machine Learning}, + year = 2023, + month = oct, + volume = {525}, + number = {2}, + pages = {3181-3194}, + doi = {10.1093/mnras/stad2441}, +archivePrefix = {arXiv}, + eprint = {2306.16923}, + primaryClass = {astro-ph.IM}, + adsurl = {https://ui.adsabs.harvard.edu/abs/2023MNRAS.525.3181L}, + adsnote = {Provided by the SAO/NASA Astrophysics Data System} +} @ARTICLE{numpy, author={S. {van der Walt} and S. C. {Colbert} and G. {Varoquaux}}, doi={10.1109/MCSE.2011.37}, From 3e44f5eb4bb6481c5cc9da9cff0aae9201deae9c Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 6 Sep 2024 10:33:10 +0100 Subject: [PATCH 095/149] remove Recusi --- autofit/non_linear/result.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/non_linear/result.py b/autofit/non_linear/result.py index 343343f1e..d146d0334 100644 --- a/autofit/non_linear/result.py +++ b/autofit/non_linear/result.py @@ -311,7 +311,7 @@ def search_internal(self): try: return self.paths.load_search_internal() - except (FileNotFoundError, RecursionError): + except FileNotFoundError: pass @property From ecd54c229d0f7a2cd21924b7f71b5aec436eb6ff Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 6 Sep 2024 11:07:01 +0100 Subject: [PATCH 096/149] skip calling combined analysis visualise. May need to come up with general solution for forwarding combined analysis visualisation calls --- autofit/non_linear/analysis/analysis.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/analysis/analysis.py b/autofit/non_linear/analysis/analysis.py index 7d5b24a88..e3fa92eca 100644 --- a/autofit/non_linear/analysis/analysis.py +++ b/autofit/non_linear/analysis/analysis.py @@ -1,3 +1,4 @@ +import inspect import logging from abc import ABC from typing import Optional, Dict @@ -9,8 +10,6 @@ from autofit.non_linear.result import Result from autofit.non_linear.samples.samples import Samples from autofit.non_linear.samples.sample import Sample -from autofit.mapper.prior_model.collection import Collection -from autofit.mapper.prior.gaussian import GaussianPrior from .visualize import Visualizer from ..samples.util import simple_model_for_kwargs @@ -42,6 +41,10 @@ def __getattr__(self, item: str): raise AttributeError(f"Analysis has no attribute {item}") def method(*args, **kwargs): + parameters = inspect.signature(_method).parameters + if "analyses" in parameters: + logger.debug(f"Skipping {item} as this is not a combined analysis") + return return _method(self, *args, **kwargs) return method From a4bf4840d1212a95840f738f872d36862eccaae5 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 6 Sep 2024 11:30:50 +0100 Subject: [PATCH 097/149] fixing sql warnings...; --- autofit/database/model/fit.py | 10 ++++++++-- autofit/database/model/model.py | 1 + 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index 2ec84df43..31e56a9f0 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -111,7 +111,10 @@ class NamedInstance(Base): instance_id = sa.Column(sa.Integer, sa.ForeignKey("object.id")) __instance = sa.orm.relationship( - "Object", uselist=False, backref="named_instance", foreign_keys=[instance_id] + "Object", + uselist=False, + backref="named_instance", + foreign_keys=[instance_id], ) @property @@ -182,7 +185,10 @@ class Fit(Base): ) is_complete = sa.Column(sa.Boolean) - _named_instances: Mapped[List[NamedInstance]] = sa.orm.relationship("NamedInstance") + _named_instances: Mapped[List[NamedInstance]] = sa.orm.relationship( + "NamedInstance", + back_populates="fit", + ) @property @try_none diff --git a/autofit/database/model/model.py b/autofit/database/model/model.py index 3a227c457..40e9cb540 100644 --- a/autofit/database/model/model.py +++ b/autofit/database/model/model.py @@ -46,6 +46,7 @@ class Object(Base): children: Mapped[List["Object"]] = sa.orm.relationship( "Object", uselist=True, + back_populates="parent", ) def __len__(self): From 90ac4c630090976387baa0800d76c751bacc5cbe Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 6 Sep 2024 11:31:41 +0100 Subject: [PATCH 098/149] more back populated --- autofit/database/model/fit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index 31e56a9f0..6f3a857fa 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -210,7 +210,7 @@ def named_instances(self): def total_parameters(self): return self.model.prior_count if self.model else 0 - _info: Mapped[List[Info]] = sa.orm.relationship("Info") + _info: Mapped[List[Info]] = sa.orm.relationship("Info", back_populates="fit") def __init__(self, **kwargs): try: From 7e68cc5e1b5b837e2cfe9a9be50352646d827f42 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 6 Sep 2024 11:33:58 +0100 Subject: [PATCH 099/149] samples/latent samples --- autofit/database/model/model.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/autofit/database/model/model.py b/autofit/database/model/model.py index 40e9cb540..3aef1152e 100644 --- a/autofit/database/model/model.py +++ b/autofit/database/model/model.py @@ -33,7 +33,10 @@ class Object(Base): samples_for_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) samples_for = sa.orm.relationship( - "Fit", uselist=False, foreign_keys=[samples_for_id] + "Fit", + uselist=False, + foreign_keys=[samples_for_id], + back_populates="_samples", ) latent_samples_for_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) @@ -41,6 +44,7 @@ class Object(Base): "Fit", uselist=False, foreign_keys=[latent_samples_for_id], + back_populates="_latent_samples", ) children: Mapped[List["Object"]] = sa.orm.relationship( From 6c8585476138c41e762a7a4322eb5a16020428ac Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 6 Sep 2024 11:36:52 +0100 Subject: [PATCH 100/149] pickles and jsons --- autofit/database/model/fit.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index 6f3a857fa..8b1f10bea 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -30,7 +30,11 @@ def __init__(self, **kwargs): name = sa.Column(sa.String) string = sa.Column(sa.String) fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) - fit = sa.orm.relationship("Fit", uselist=False) + fit = sa.orm.relationship( + "Fit", + uselist=False, + back_populates="pickles", + ) @property def value(self): @@ -64,7 +68,11 @@ def __init__(self, **kwargs): name = sa.Column(sa.String) string = sa.Column(sa.String) fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) - fit = sa.orm.relationship("Fit", uselist=False) + fit = sa.orm.relationship( + "Fit", + uselist=False, + back_populates="jsons", + ) @property def dict(self): @@ -308,7 +316,10 @@ def model(self) -> AbstractPriorModel: def model(self, model: AbstractPriorModel): self.__model = Object.from_object(model) - pickles: Mapped[List[Pickle]] = sa.orm.relationship("Pickle", lazy="joined") + pickles: Mapped[List[Pickle]] = sa.orm.relationship( + "Pickle", + lazy="joined", + ) jsons: Mapped[List[JSON]] = sa.orm.relationship("JSON", lazy="joined") arrays: Mapped[List[Array]] = sa.orm.relationship( "Array", From b014ccb56d69e9611fae49d981c24bf845519864 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 6 Sep 2024 11:39:42 +0100 Subject: [PATCH 101/149] hdus and arrays --- autofit/database/model/array.py | 14 +++++++++++++- autofit/database/model/fit.py | 2 ++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/autofit/database/model/array.py b/autofit/database/model/array.py index f2c032a08..fc665c383 100644 --- a/autofit/database/model/array.py +++ b/autofit/database/model/array.py @@ -29,7 +29,12 @@ def __init__(self, **kwargs): _shape = sa.Column(sa.String) fit_id = sa.Column(sa.String, sa.ForeignKey("fit.id")) - fit = sa.orm.relationship("Fit", uselist=False, foreign_keys=[fit_id]) + fit = sa.orm.relationship( + "Fit", + uselist=False, + foreign_keys=[fit_id], + back_populates="arrays", + ) @property def shape(self): @@ -84,6 +89,13 @@ class HDU(Array): "polymorphic_identity": "hdu", } + fit = sa.orm.relationship( + "Fit", + uselist=False, + foreign_keys=[Array.fit_id], + back_populates="hdus", + ) + @property def header(self): """ diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index 8b1f10bea..e97c47abd 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -325,11 +325,13 @@ def model(self, model: AbstractPriorModel): "Array", lazy="joined", foreign_keys=[Array.fit_id], + viewonly=True, ) hdus: Mapped[List[HDU]] = sa.orm.relationship( "HDU", lazy="joined", foreign_keys=[HDU.fit_id], + viewonly=True, ) def __getitem__(self, item: str): From a240b085818d819fda456602e40faa0d658c45ef Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 6 Sep 2024 12:12:21 +0100 Subject: [PATCH 102/149] handling dictionaries with arbitrary keys in database --- autofit/database/model/common.py | 22 ++++++++++++++++------ test_autofit/database/test_regression.py | 5 +++++ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/autofit/database/model/common.py b/autofit/database/model/common.py index 75b71b741..2a4f9e0d4 100644 --- a/autofit/database/model/common.py +++ b/autofit/database/model/common.py @@ -1,11 +1,7 @@ -from typing import Union +from typing import Union, ItemsView, Any, Iterable, Tuple from ..sqlalchemy_ import sa -from autofit.mapper.prior import abstract -from autofit.mapper.prior_model import prior_model -from autofit.mapper.prior_model import collection - from .model import Object @@ -26,7 +22,7 @@ class Dict(Object): __mapper_args__ = {"polymorphic_identity": "dict"} def __call__(self): - return {child.name: child() for child in self.children} + return dict(child() for child in self.children) @classmethod def _from_object(cls, source: dict): @@ -34,3 +30,17 @@ def _from_object(cls, source: dict): instance._add_children(source.items()) instance.cls = dict return instance + + def _add_children( + self, items: Union[ItemsView[str, Any], Iterable[Tuple[str, Any]]] + ): + """ + Add database representations of child attributes + + Parameters + ---------- + items + Attributes such as floats or priors that are associated + with the real object + """ + self.children = [Object.from_object(item) for item in items] diff --git a/test_autofit/database/test_regression.py b/test_autofit/database/test_regression.py index 19d90ac31..c05ef92ba 100644 --- a/test_autofit/database/test_regression.py +++ b/test_autofit/database/test_regression.py @@ -48,3 +48,8 @@ def test_samples_summary_model(): fit.model = model assert fit["samples_summary"].model.cls == af.Gaussian + + +def test_dict_with_tuple_keys(): + d = {("a", "b"): 1} + assert db.Object.from_object(d)() == d From 77338f0c323633f28f1759c47bd553c21fdacb77 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 6 Sep 2024 13:54:35 +0100 Subject: [PATCH 103/149] fix regression --- autofit/database/model/common.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/autofit/database/model/common.py b/autofit/database/model/common.py index 2a4f9e0d4..da7f2dc15 100644 --- a/autofit/database/model/common.py +++ b/autofit/database/model/common.py @@ -22,7 +22,15 @@ class Dict(Object): __mapper_args__ = {"polymorphic_identity": "dict"} def __call__(self): - return dict(child() for child in self.children) + d = {} + for child in self.children: + instance = child() + if child.name != "": + d[child.name] = instance + else: + d[instance[0]] = instance[1] + + return d @classmethod def _from_object(cls, source: dict): @@ -43,4 +51,9 @@ def _add_children( Attributes such as floats or priors that are associated with the real object """ - self.children = [Object.from_object(item) for item in items] + self.children = [ + Object.from_object(value, name=key) + if isinstance(key, str) + else Object.from_object((key, value)) + for key, value in items + ] From effa065dff09613365851ce94dc5fcb674753447 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 6 Sep 2024 14:57:11 +0100 Subject: [PATCH 104/149] ignore assertions when computing latent variables --- autofit/mapper/prior_model/abstract.py | 22 ++++++++++++++++++---- autofit/non_linear/analysis/analysis.py | 5 ++++- autofit/non_linear/samples/sample.py | 23 +++++++++++++++++++---- 3 files changed, 41 insertions(+), 9 deletions(-) diff --git a/autofit/mapper/prior_model/abstract.py b/autofit/mapper/prior_model/abstract.py index b862c5a37..70acc8548 100644 --- a/autofit/mapper/prior_model/abstract.py +++ b/autofit/mapper/prior_model/abstract.py @@ -1334,7 +1334,9 @@ def _explode_path(path_): raise AssertionError(f"No path was found matching {name}") def instance_from_prior_name_arguments( - self, prior_name_arguments: Dict[str, float] + self, + prior_name_arguments: Dict[str, float], + ignore_assertions: bool = False, ): """ Instantiate the model from the names of priors and @@ -1346,6 +1348,8 @@ def instance_from_prior_name_arguments( The names of priors where names of models and the name of the prior have been joined by underscores, mapped to corresponding values. + ignore_assertions + If True, assertions will not be checked Returns ------- @@ -1355,10 +1359,15 @@ def instance_from_prior_name_arguments( { self.path_for_name(name): value for name, value in prior_name_arguments.items() - } + }, + ignore_assertions=ignore_assertions, ) - def instance_from_path_arguments(self, path_arguments: Dict[Tuple[str], float]): + def instance_from_path_arguments( + self, + path_arguments: Dict[Tuple[str], float], + ignore_assertions: bool = False, + ): """ Create an instance from a dictionary mapping paths to tuples to corresponding values. @@ -1370,6 +1379,8 @@ def instance_from_path_arguments(self, path_arguments: Dict[Tuple[str], float]): Note that, for linked priors, each path only needs to be specified once. If multiple paths for the same prior are specified then the value for the last path will be used. + ignore_assertions + If True, assertions will not be checked Returns ------- @@ -1378,7 +1389,10 @@ def instance_from_path_arguments(self, path_arguments: Dict[Tuple[str], float]): arguments = { self.object_for_path(path): value for path, value in path_arguments.items() } - return self._instance_for_arguments(arguments) + return self._instance_for_arguments( + arguments, + ignore_assertions=ignore_assertions, + ) @property def prior_count(self) -> int: diff --git a/autofit/non_linear/analysis/analysis.py b/autofit/non_linear/analysis/analysis.py index e3fa92eca..5e464bf56 100644 --- a/autofit/non_linear/analysis/analysis.py +++ b/autofit/non_linear/analysis/analysis.py @@ -72,7 +72,10 @@ def compute_latent_samples(self, samples: Samples) -> Optional[Samples]: log_prior=sample.log_prior, weight=sample.weight, kwargs=self.compute_latent_variables( - sample.instance_for_model(model) + sample.instance_for_model( + model, + ignore_assertions=True, + ) ), ) ) diff --git a/autofit/non_linear/samples/sample.py b/autofit/non_linear/samples/sample.py index 45bf67092..60051a826 100644 --- a/autofit/non_linear/samples/sample.py +++ b/autofit/non_linear/samples/sample.py @@ -175,7 +175,11 @@ def from_lists( ) return samples - def instance_for_model(self, model: AbstractPriorModel): + def instance_for_model( + self, + model: AbstractPriorModel, + ignore_assertions: bool = False, + ): """ Create an instance from this sample for a model @@ -183,6 +187,8 @@ def instance_for_model(self, model: AbstractPriorModel): ---------- model The model the this sample was taken from + ignore_assertions + If True, do not check that the instance is valid Returns ------- @@ -190,13 +196,22 @@ def instance_for_model(self, model: AbstractPriorModel): """ try: if self.is_path_kwargs: - return model.instance_from_path_arguments(self.kwargs) + return model.instance_from_path_arguments( + self.kwargs, + ignore_assertions=ignore_assertions, + ) else: - return model.instance_from_prior_name_arguments(self.kwargs) + return model.instance_from_prior_name_arguments( + self.kwargs, + ignore_assertions=ignore_assertions, + ) except KeyError: # TODO: Does this get used? If so, why? - return model.instance_from_vector(self.parameter_lists_for_model(model)) + return model.instance_from_vector( + self.parameter_lists_for_model(model), + ignore_prior_limits=ignore_assertions, + ) @split_paths def with_paths(self, paths: List[Tuple[str, ...]]) -> "Sample": From f5c77bbb25b87f247b585cd2852c4c06b8e9a5e2 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 6 Sep 2024 16:59:03 +0100 Subject: [PATCH 105/149] return config exception rather than throwing other error --- autofit/mapper/prior_model/prior_model.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/autofit/mapper/prior_model/prior_model.py b/autofit/mapper/prior_model/prior_model.py index 1ed80ba55..ab2f99745 100644 --- a/autofit/mapper/prior_model/prior_model.py +++ b/autofit/mapper/prior_model/prior_model.py @@ -350,6 +350,8 @@ def make_prior(self, attribute_name): If no configuration can be found """ cls = self.cls + if isinstance(cls, ConfigException): + return cls if not inspect.isclass(cls): # noinspection PyProtectedMember cls = inspect._findclass(cls) From ef2c29f012f7aa41aa1b7ac0a43e068acde12a9a Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 13 Sep 2024 14:56:26 +0100 Subject: [PATCH 106/149] test illustrating issue --- test_autofit/database/test_regression.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/test_autofit/database/test_regression.py b/test_autofit/database/test_regression.py index c05ef92ba..119731d8a 100644 --- a/test_autofit/database/test_regression.py +++ b/test_autofit/database/test_regression.py @@ -1,7 +1,8 @@ import pytest +import numpy as np import autofit as af -from autofit import database as db +from autofit import database as db, Fit @pytest.fixture(name="model") @@ -53,3 +54,18 @@ def test_samples_summary_model(): def test_dict_with_tuple_keys(): d = {("a", "b"): 1} assert db.Object.from_object(d)() == d + + +def test_persist_values(session): + fit = Fit(id=1) + + fit.set_pickle("pickle", "test") + fit.set_array("array", np.array([1, 2, 3])) + + session.add(fit) + session.commit() + + fit = session.query(Fit).first() + + assert fit["pickle"] == "test" + assert fit["array"].tolist() == [1, 2, 3] From ebc6c964ddf029027904112c9942487c60524aee Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 13 Sep 2024 15:04:33 +0100 Subject: [PATCH 107/149] fix attribute relationships --- autofit/database/model/fit.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/autofit/database/model/fit.py b/autofit/database/model/fit.py index e97c47abd..30b493a4c 100644 --- a/autofit/database/model/fit.py +++ b/autofit/database/model/fit.py @@ -319,19 +319,22 @@ def model(self, model: AbstractPriorModel): pickles: Mapped[List[Pickle]] = sa.orm.relationship( "Pickle", lazy="joined", + foreign_keys=[Pickle.fit_id], + ) + jsons: Mapped[List[JSON]] = sa.orm.relationship( + "JSON", + lazy="joined", + foreign_keys=[JSON.fit_id], ) - jsons: Mapped[List[JSON]] = sa.orm.relationship("JSON", lazy="joined") arrays: Mapped[List[Array]] = sa.orm.relationship( "Array", lazy="joined", foreign_keys=[Array.fit_id], - viewonly=True, ) hdus: Mapped[List[HDU]] = sa.orm.relationship( "HDU", lazy="joined", foreign_keys=[HDU.fit_id], - viewonly=True, ) def __getitem__(self, item: str): From 54a159b3c2ec8a659a0e71cb36845962ebdfc30e Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 20 Sep 2024 08:34:14 +0100 Subject: [PATCH 108/149] do not throw away model when minimising samples --- autofit/non_linear/samples/samples.py | 1 - 1 file changed, 1 deletion(-) diff --git a/autofit/non_linear/samples/samples.py b/autofit/non_linear/samples/samples.py index 00165df5f..742270b38 100644 --- a/autofit/non_linear/samples/samples.py +++ b/autofit/non_linear/samples/samples.py @@ -407,7 +407,6 @@ def minimise(self) -> "Samples": A copy of this object with only important samples retained """ samples = copy(self) - samples.model = None samples.sample_list = list( {self.max_log_likelihood_sample, self.max_log_posterior_sample} ) From f12eed1cbd5ec8ccbb810adf095281bfdf0b907f Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 20 Sep 2024 10:24:16 +0100 Subject: [PATCH 109/149] updates to fix build --- README.rst | 4 ++-- docs/conf.py | 4 ++-- docs/index.rst | 4 ++-- docs/requirements.txt | 2 +- docs/science_examples/astronomy.rst | 2 +- paper/paper.json | 2 +- readthedocs.yml | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/README.rst b/README.rst index c2bf20e40..79379c1d7 100644 --- a/README.rst +++ b/README.rst @@ -45,7 +45,7 @@ The following links are useful for new starters: - `The introduction Jupyter Notebook on Binder `_, where you can try **PyAutoFit** in a web browser (without installation). -- `The autofit_workspace GitHub repository `_, which includes example scripts and the `HowToFit Jupyter notebook lectures `_ which give new users a step-by-step introduction to **PyAutoFit**. +- `The autofit_workspace GitHub repository `_, which includes example scripts and the `HowToFit Jupyter notebook lectures `_ which give new users a step-by-step introduction to **PyAutoFit**. Support ------- @@ -72,7 +72,7 @@ API Overview To illustrate the **PyAutoFit** API, we use an illustrative toy model of fitting a one-dimensional Gaussian to noisy 1D data. Here's the ``data`` (black) and the model (red) we'll fit: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/files/toy_model_fit.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/files/toy_model_fit.png :width: 400 We define our model, a 1D Gaussian by writing a Python class using the format below: diff --git a/docs/conf.py b/docs/conf.py index a77132aaf..fcb18313a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -3,7 +3,7 @@ # # This file only contains a selection of the most common options. For a full # list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html +# https://www.sphinx-doc.org/en/main/usage/configuration.html # -- Path setup -------------------------------------------------------------- @@ -73,7 +73,7 @@ intersphinx_mapping = { "python": ("https://docs.python.org/3", None), - "sphinx": ("https://www.sphinx-doc.org/en/master", None), + "sphinx": ("https://www.sphinx-doc.org/en/main", None), } # -- Options for TODOs ------------------------------------------------------- diff --git a/docs/index.rst b/docs/index.rst index a1ac9d908..58e29d7b9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -22,7 +22,7 @@ The following links are useful for new starters: - `The introduction Jupyter Notebook on Binder `_, where you can try **PyAutoFit** in a web browser (without installation). -- `The autofit_workspace GitHub repository `_, which includes example scripts and the `HowToFit Jupyter notebook lectures `_ which give new users a step-by-step introduction to **PyAutoFit**. +- `The autofit_workspace GitHub repository `_, which includes example scripts and the `HowToFit Jupyter notebook lectures `_ which give new users a step-by-step introduction to **PyAutoFit**. Support ------- @@ -49,7 +49,7 @@ API Overview To illustrate the **PyAutoFit** API, we use an illustrative toy model of fitting a one-dimensional Gaussian to noisy 1D data. Here's the ``data`` (black) and the model (red) we'll fit: -.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/master/files/toy_model_fit.png +.. image:: https://raw.githubusercontent.com/rhayes777/PyAutoFit/main/files/toy_model_fit.png :width: 400 We define our model, a 1D Gaussian by writing a Python class using the format below: diff --git a/docs/requirements.txt b/docs/requirements.txt index 701716a5c..10099ec48 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -18,7 +18,7 @@ astunparse==1.6.3 threadpoolctl>=3.1.0,<=3.2.0 autoconf timeout-decorator==0.5.0 -sphinx==5.2.3 +sphinx xxhash<=3.4.1 pyvis==0.3.2 furo diff --git a/docs/science_examples/astronomy.rst b/docs/science_examples/astronomy.rst index 9cf977585..faaa2642e 100644 --- a/docs/science_examples/astronomy.rst +++ b/docs/science_examples/astronomy.rst @@ -321,7 +321,7 @@ An example project on the **autofit_workspace** shows how to use **PyAutoFit** t lensing data, using **multi-level model composition**. If you'd like to perform the fit shown in this script, checkout the -`simple examples `_ on the +`simple examples `_ on the ``autofit_workspace``. We detail how **PyAutoFit** works in the first 3 tutorials of the `HowToFit lecture series `_. diff --git a/paper/paper.json b/paper/paper.json index ff2e0a5c8..c4bc2faa4 100644 --- a/paper/paper.json +++ b/paper/paper.json @@ -1,5 +1,5 @@ { - "@context": "https://raw.githubusercontent.com/codemeta/codemeta/master/codemeta.jsonld", + "@context": "https://raw.githubusercontent.com/codemeta/codemeta/main/codemeta.jsonld", "@type": "Code", "author": [ { diff --git a/readthedocs.yml b/readthedocs.yml index aa4ec0851..ca3049c2a 100644 --- a/readthedocs.yml +++ b/readthedocs.yml @@ -3,7 +3,7 @@ version: 2 build: os: ubuntu-20.04 tools: - python: "3.9" + python: "3.11" python: install: From 7c7320627b45e929128166faea9a5411ecd10a02 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Sat, 21 Sep 2024 11:11:38 +0100 Subject: [PATCH 110/149] version --- autofit/__init__.py | 2 +- docs/installation/overview.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index ab48e882e..22ea05751 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -134,4 +134,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2024.5.16.0" +__version__ = "2024.9.21.1" diff --git a/docs/installation/overview.rst b/docs/installation/overview.rst index 1517c1f7c..ad3853de2 100644 --- a/docs/installation/overview.rst +++ b/docs/installation/overview.rst @@ -3,7 +3,7 @@ Overview ======== -**PyAutoFit** requires Python 3.9 - 3.11 and support the Linux, MacOS and Windows operating systems. +**PyAutoFit** requires Python 3.9 - 3.12 and support the Linux, MacOS and Windows operating systems. **PyAutoFit** can be installed via the Python distribution `Anaconda `_ or using `Pypi `_ to ``pip install`` **PyAutoFit** into your Python distribution. From 4deb3c6dd1cdd6887bf8e25f593e85f2e853237c Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Sat, 21 Sep 2024 11:38:28 +0100 Subject: [PATCH 111/149] version --- autofit/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/__init__.py b/autofit/__init__.py index 22ea05751..c243f6edb 100644 --- a/autofit/__init__.py +++ b/autofit/__init__.py @@ -134,4 +134,4 @@ def save_abc(pickler, obj): conf.instance.register(__file__) -__version__ = "2024.9.21.1" +__version__ = "2024.9.21.2" From 275b6f094acf107ef051e3075bbca8a47a278e5b Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 27 Sep 2024 12:18:59 +0100 Subject: [PATCH 112/149] use write_table to output sensitivity csv with whitespace formatting --- .../non_linear/grid/sensitivity/__init__.py | 36 +++++++++---------- autofit/text/formatter.py | 6 ++-- 2 files changed, 22 insertions(+), 20 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 6b78ff6ff..4b8e672e5 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -13,6 +13,7 @@ from autofit.non_linear.grid.sensitivity.job import JobResult from autofit.non_linear.grid.sensitivity.result import SensitivityResult from autofit.non_linear.parallel import Process +from autofit.text.formatter import write_table from autofit.text.text_util import padding @@ -128,22 +129,21 @@ def run(self) -> SensitivityResult: os.makedirs(self.paths.output_path, exist_ok=True) - with open(self.results_path, "w+") as f: - writer = csv.writer(f) - writer.writerow(headers) - for result_ in results: - values = physical_values[result_.number] - writer.writerow( - padding(item) - for item in [ - result_.number, - *values, - result_.log_evidence_increase, - result_.log_likelihood_increase, - ] - ) - - result = SensitivityResult( + write_table( + headers=headers, + rows=[ + [ + result.number, + *physical_values[result.number], + result.log_evidence_increase, + result.log_likelihood_increase, + ] + for result in results + ], + filename=self.results_path, + ) + + sensitivity_result = SensitivityResult( samples=[result.result.samples_summary for result in results], perturb_samples=[ result.perturb_result.samples_summary for result in results @@ -151,9 +151,9 @@ def run(self) -> SensitivityResult: shape=self.shape, ) - self.paths.save_json("result", to_dict(result)) + self.paths.save_json("result", to_dict(sensitivity_result)) - return result + return sensitivity_result @property def shape(self) -> Tuple[int, ...]: diff --git a/autofit/text/formatter.py b/autofit/text/formatter.py index 7508f3974..751ebff23 100644 --- a/autofit/text/formatter.py +++ b/autofit/text/formatter.py @@ -1,6 +1,8 @@ import csv import logging -from typing import Tuple +from typing import Tuple, Union + +from pathlib import Path from autoconf import conf from autofit.tools.util import open_ @@ -221,7 +223,7 @@ def output_list_of_strings_to_file(file, list_of_strings): f.write("".join(list_of_strings)) -def write_table(headers, rows, filename: str): +def write_table(headers, rows, filename: Union[str, Path]): """ Write a table of parameters, posteriors, priors and likelihoods. From 8a52762446c3c3a32ea4791179c56c537affc141 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 27 Sep 2024 14:04:26 +0100 Subject: [PATCH 113/149] optionally include unique tag in the output path --- autofit/non_linear/paths/abstract.py | 5 +++-- test_autofit/non_linear/paths/test_paths.py | 9 ++++++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/autofit/non_linear/paths/abstract.py b/autofit/non_linear/paths/abstract.py index 1129be8f4..8e6492d9f 100644 --- a/autofit/non_linear/paths/abstract.py +++ b/autofit/non_linear/paths/abstract.py @@ -32,7 +32,7 @@ def __init__( parent: Optional["AbstractPaths"] = None, unique_tag: Optional[str] = None, identifier: str = None, - image_path_suffix : str = "", + image_path_suffix: str = "", ): """ Manages the path structure for `NonLinearSearch` output, for analyses both not using and using the search @@ -244,11 +244,12 @@ def output_path(self) -> Path: strings = list( filter( - len, + None, [ str(conf.instance.output_path), str(self.path_prefix), str(self.name), + self.unique_tag, ], ) ) diff --git a/test_autofit/non_linear/paths/test_paths.py b/test_autofit/non_linear/paths/test_paths.py index ffd784988..e123e897e 100644 --- a/test_autofit/non_linear/paths/test_paths.py +++ b/test_autofit/non_linear/paths/test_paths.py @@ -39,7 +39,9 @@ def test_paths_argument(self): self.assert_paths_as_expected(search.paths) def test_combination_argument(self): - search = af.m.MockSearch("other",) + search = af.m.MockSearch( + "other", + ) search.paths = af.DirectoryPaths(name="name") self.assert_paths_as_expected(search.paths) @@ -69,3 +71,8 @@ def test_serialize(model): pickled_paths = pickle.loads(pickle.dumps(paths)) assert pickled_paths.model is not None + + +def test_unique_tag(): + paths = af.DirectoryPaths(unique_tag="unique_tag") + assert "unique_tag" in paths.output_path.parts From 0819fbbad806cf12aa14902412ee7d39f45743ad Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 27 Sep 2024 14:21:02 +0100 Subject: [PATCH 114/149] ensure sample objects get passed to CombinedResult --- autofit/non_linear/analysis/combined.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/analysis/combined.py b/autofit/non_linear/analysis/combined.py index af2f6fb2b..417b1f905 100644 --- a/autofit/non_linear/analysis/combined.py +++ b/autofit/non_linear/analysis/combined.py @@ -362,11 +362,15 @@ def make_result( paths=paths, samples=samples, search_internal=search_internal, - analysis=analysis + analysis=analysis, ) for analysis in self.analyses ] - return CombinedResult(child_results) + return CombinedResult( + child_results, + samples, + samples_summary, + ) def __len__(self): return len(self.analyses) From 3078f2db22eda83a7bbfd30bd1e4ca516d52d102 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 27 Sep 2024 15:24:40 +0100 Subject: [PATCH 115/149] do not put unique tag in path prefix --- autofit/non_linear/search/abstract_search.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/autofit/non_linear/search/abstract_search.py b/autofit/non_linear/search/abstract_search.py index f9e63a104..3fdb70d25 100644 --- a/autofit/non_linear/search/abstract_search.py +++ b/autofit/non_linear/search/abstract_search.py @@ -182,9 +182,6 @@ def __init__( self._logger = None - if unique_tag is not None and path_prefix is not None: - path_prefix = path_prefix / unique_tag - self.unique_tag = unique_tag if paths: @@ -653,10 +650,11 @@ def pre_fit_output( if not self.disable_output: self.logger.info(f"The output path of this fit is {self.paths.output_path}") else: - self.logger.info("Output to hard-disk disabled, input a search name to enable.") + self.logger.info( + "Output to hard-disk disabled, input a search name to enable." + ) if not self.paths.is_complete or self.force_pickle_overwrite: - if not self.disable_output: self.logger.info( f"Outputting pre-fit files (e.g. model.info, visualization)." @@ -959,7 +957,6 @@ def perform_update( self.iterations += self.iterations_per_update if not self.disable_output: - if during_analysis: self.logger.info( f"""Fit Running: Updating results after {self.iterations} iterations (see output folder).""" @@ -1054,8 +1051,8 @@ def perform_visualization( analysis: Analysis, during_analysis: bool, samples_summary: Optional[SamplesSummary] = None, - instance : Optional[ModelInstance] = None, - paths_override : Optional[AbstractPaths] = None, + instance: Optional[ModelInstance] = None, + paths_override: Optional[AbstractPaths] = None, search_internal=None, ): """ From 410a25aacd2eaa4a70d3e718df8502aca40718a6 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 27 Sep 2024 15:24:52 +0100 Subject: [PATCH 116/149] unique tag after path prefix and before name --- autofit/non_linear/paths/abstract.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/non_linear/paths/abstract.py b/autofit/non_linear/paths/abstract.py index 8e6492d9f..d686a5d43 100644 --- a/autofit/non_linear/paths/abstract.py +++ b/autofit/non_linear/paths/abstract.py @@ -248,8 +248,8 @@ def output_path(self) -> Path: [ str(conf.instance.output_path), str(self.path_prefix), - str(self.name), self.unique_tag, + str(self.name), ], ) ) From 5d266352e252dda06facf4e4ec459adfe4529e56 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Thu, 3 Oct 2024 16:37:15 +0100 Subject: [PATCH 117/149] fix implemented and tested --- autofit/non_linear/search/nest/nautilus/search.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/autofit/non_linear/search/nest/nautilus/search.py b/autofit/non_linear/search/nest/nautilus/search.py index 603236dc2..ea46122f9 100644 --- a/autofit/non_linear/search/nest/nautilus/search.py +++ b/autofit/non_linear/search/nest/nautilus/search.py @@ -290,6 +290,21 @@ def call_search(self, search_internal, model, analysis): finished = False + if self.iterations_per_update < self.config_dict_search["n_live"] * 3.0: + + self.iterations_per_update = int(self.config_dict_search["n_live"] * 3) + + logger.info( + """ + The number of iterations_per_update is less than 3 times the number of live points, which can cause + issues where Nautilus loses sampling infomation due to stopping to output results. The number of + iterations per update has been increased to 3 times the number of live points. + + To remove this warning, increase the number of iterations_per_update to three or more times the + number of live points. + """ + ) + while not finished: iterations, total_iterations = self.iterations_from( From 61f2b3a436f6bef000ed89d5ff098242b3b7bc80 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 9 Oct 2024 16:36:49 +0100 Subject: [PATCH 118/149] added mask input to sensitivity map and test its shape --- autofit/example/analysis.py | 2 +- autofit/example/visualize.py | 8 ++++---- autofit/non_linear/analysis/visualize.py | 2 +- .../non_linear/grid/sensitivity/__init__.py | 19 +++++++++++++++++-- autofit/non_linear/plot/nest_plotters.py | 4 ++-- docs/cookbooks/analysis.rst | 4 ++-- docs/cookbooks/result.rst | 4 ++-- 7 files changed, 29 insertions(+), 14 deletions(-) diff --git a/autofit/example/analysis.py b/autofit/example/analysis.py index 83db61645..581415228 100644 --- a/autofit/example/analysis.py +++ b/autofit/example/analysis.py @@ -132,7 +132,7 @@ def save_attributes(self, paths: af.DirectoryPaths): Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. """ paths.save_json(name="data", object_dict=self.data.tolist(), prefix="dataset") diff --git a/autofit/example/visualize.py b/autofit/example/visualize.py index a66f49824..7493021d6 100644 --- a/autofit/example/visualize.py +++ b/autofit/example/visualize.py @@ -33,7 +33,7 @@ def visualize_before_fit( analysis The analysis class used to perform the model-fit whose quantities are being visualized. paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. model The model which is fitted to the data, which may be used to customize the visualization. @@ -84,7 +84,7 @@ def visualize( analysis The analysis class used to perform the model-fit whose quantities are being visualized. paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. instance An instance of the model that is being fitted to the data by this analysis (whose parameters have been set @@ -163,7 +163,7 @@ def visualize_before_fit_combined( analyses A list of the analysis classes used to perform the model-fit whose quantities are being visualized. paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. model The model which is fitted to the data, which may be used to customize the visualization. @@ -202,7 +202,7 @@ def visualize_combined( analyses A list of the analysis classes used to perform the model-fit whose quantities are being visualized. paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. model The model which is fitted to the data, which may be used to customize the visualization. diff --git a/autofit/non_linear/analysis/visualize.py b/autofit/non_linear/analysis/visualize.py index 7b0e74313..d0000c28a 100644 --- a/autofit/non_linear/analysis/visualize.py +++ b/autofit/non_linear/analysis/visualize.py @@ -32,7 +32,7 @@ def should_visualize( Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization and the pickled objects used by the aggregator output by this function. Returns diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 4b8e672e5..f79a55611 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -1,7 +1,7 @@ -import csv import logging import os from copy import copy +import numpy as np from pathlib import Path from typing import List, Generator, Callable, ClassVar, Optional, Union, Tuple @@ -14,7 +14,6 @@ from autofit.non_linear.grid.sensitivity.result import SensitivityResult from autofit.non_linear.parallel import Process from autofit.text.formatter import write_table -from autofit.text.text_util import padding class Sensitivity: @@ -30,6 +29,7 @@ def __init__( job_cls: ClassVar = Job, perturb_model_prior_func: Optional[Callable] = None, number_of_steps: Union[Tuple[int, ...], int] = 4, + mask: Optional[List[bool]] = None, number_of_cores: int = 2, limit_scale: int = 1, ): @@ -94,6 +94,21 @@ def __init__( self.job_cls = job_cls self.number_of_steps = number_of_steps + self.mask = mask + + if mask is not None: + if self.shape != np.asarray(mask).shape: + raise ValueError( + f""" + The mask of the Sensitivity object must have the same shape as the sensitivity grid. + + For your inputs, the shape of each are as follows: + + Sensitivity Grid: {self.shape} + Mask: {np.asarray(mask).shape} + """ + ) + self.number_of_cores = number_of_cores self.limit_scale = limit_scale diff --git a/autofit/non_linear/plot/nest_plotters.py b/autofit/non_linear/plot/nest_plotters.py index 64d4cd43a..2d4c470cb 100644 --- a/autofit/non_linear/plot/nest_plotters.py +++ b/autofit/non_linear/plot/nest_plotters.py @@ -1,5 +1,3 @@ -from anesthetic.samples import NestedSamples -from anesthetic import make_2d_axes from functools import wraps import numpy as np import warnings @@ -59,6 +57,8 @@ def corner_anesthetic(self, **kwargs): config_dict = conf.instance["visualize"]["plots_settings"]["corner_anesthetic"] + from anesthetic.samples import NestedSamples + from anesthetic import make_2d_axes import matplotlib.pylab as pylab params = {'font.size' : int(config_dict["fontsize"])} diff --git a/docs/cookbooks/analysis.rst b/docs/cookbooks/analysis.rst index 8b766c3d6..de6ce6d0d 100644 --- a/docs/cookbooks/analysis.rst +++ b/docs/cookbooks/analysis.rst @@ -641,7 +641,7 @@ These files can then also be loaded via the database, as described in the databa Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. """ @@ -674,7 +674,7 @@ These files can then also be loaded via the database, as described in the databa Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where the + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization and the pickled objects used by the aggregator output by this function. result diff --git a/docs/cookbooks/result.rst b/docs/cookbooks/result.rst index ccbb3f56e..7102e759f 100644 --- a/docs/cookbooks/result.rst +++ b/docs/cookbooks/result.rst @@ -553,7 +553,7 @@ as 1D numpy arrays, are converted to a suitable dictionary output format. This u Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization, and the pickled objects used by the aggregator output by this function. """ from autoconf.dictable import to_dict @@ -575,7 +575,7 @@ as 1D numpy arrays, are converted to a suitable dictionary output format. This u Parameters ---------- paths - The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored, + The paths object which manages all paths, e.g. where the non-linear search outputs are stored, visualization and the pickled objects used by the aggregator output by this function. result The result of a model fit, including the non-linear search, samples and maximum likelihood model. From 0996f8e699284cb5431cba244371b6047edf04d2 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 9 Oct 2024 16:37:50 +0100 Subject: [PATCH 119/149] docstring --- autofit/non_linear/grid/sensitivity/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index f79a55611..6b4aeb35d 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -65,6 +65,10 @@ def __init__( The number of steps for each dimension of the sensitivity grid. If input as a float the dimensions are all that value. If input as a tuple of length the number of dimensions, each tuple value is the number of steps in that dimension. + mask + A mask to apply to the sensitivity grid, such that all `True` values are not included in the sensitivity + mapping. This is useful for removing regions of the sensitivity grid that are expected to have no + sensitivity, for example because they have no signal. number_of_cores How many cores does this computer have? limit_scale From 5b4afb5a88365fb3f177498498b542a46c5e09e5 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 9 Oct 2024 17:27:00 +0100 Subject: [PATCH 120/149] jobs skipped --- .../non_linear/grid/sensitivity/__init__.py | 67 +++++++++++++------ 1 file changed, 47 insertions(+), 20 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 6b4aeb35d..1a3ef8add 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -140,6 +140,7 @@ def run(self) -> SensitivityResult: for result in process_class.run_jobs( self._make_jobs(), number_of_cores=self.number_of_cores ): + if isinstance(result, Exception): raise result @@ -168,6 +169,7 @@ def run(self) -> SensitivityResult: result.perturb_result.samples_summary for result in results ], shape=self.shape, + mask=self.mask, ) self.paths.save_json("result", to_dict(sensitivity_result)) @@ -196,6 +198,21 @@ def shape(self) -> Tuple[int, ...]: self.number_of_steps for _ in range(self.perturb_model.prior_count) ) + def shape_index_from_number(self, number: int) -> Tuple[int, ...]: + """ + Returns the index of the sensitivity grid from a number. + + Parameters + ---------- + number + The number of the sensitivity grid. + + Returns + ------- + The index of the sensitivity grid. + """ + return np.unravel_index(number, self.shape) + @property def step_size(self) -> Union[float, Tuple]: """ @@ -309,26 +326,36 @@ def _make_jobs(self) -> Generator[Job, None, None]: for number, (perturb_instance, perturb_model, label) in enumerate( zip(self._perturb_instances, self._perturb_models, self._labels) ): - if self.perturb_model_prior_func is not None: - perturb_model = self.perturb_model_prior_func( - perturb_instance=perturb_instance, perturb_model=perturb_model - ) - simulate_instance = copy(self.instance) - simulate_instance.perturb = perturb_instance + shape_index = self.shape_index_from_number(number=number) - paths = self.paths.for_sub_analysis( - label, - ) + should_bypass = False - yield self.job_cls( - simulate_instance=simulate_instance, - model=self.model, - perturb_model=perturb_model, - base_instance=self.instance, - simulate_cls=self.simulate_cls, - base_fit_cls=self.base_fit_cls, - perturb_fit_cls=self.perturb_fit_cls, - paths=paths, - number=number, - ) + if self.mask is not None: + should_bypass = np.asarray(self.mask)[shape_index] + + if not should_bypass: + + if self.perturb_model_prior_func is not None: + perturb_model = self.perturb_model_prior_func( + perturb_instance=perturb_instance, perturb_model=perturb_model + ) + + simulate_instance = copy(self.instance) + simulate_instance.perturb = perturb_instance + + paths = self.paths.for_sub_analysis( + label, + ) + + yield self.job_cls( + simulate_instance=simulate_instance, + model=self.model, + perturb_model=perturb_model, + base_instance=self.instance, + simulate_cls=self.simulate_cls, + base_fit_cls=self.base_fit_cls, + perturb_fit_cls=self.perturb_fit_cls, + paths=paths, + number=number, + ) From 119fb8c538712abd51de6c0223ad39991dccdc53 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Wed, 9 Oct 2024 20:53:58 +0100 Subject: [PATCH 121/149] added instance to fit __call__ --- autofit/non_linear/grid/sensitivity/__init__.py | 1 - autofit/non_linear/grid/sensitivity/job.py | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 1a3ef8add..f5587eb4e 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -169,7 +169,6 @@ def run(self) -> SensitivityResult: result.perturb_result.samples_summary for result in results ], shape=self.shape, - mask=self.mask, ) self.paths.save_json("result", to_dict(sensitivity_result)) diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index c5089eb0f..96ce686ed 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -111,6 +111,7 @@ def perform(self) -> JobResult: model=self.model, dataset=dataset, paths=self.paths.for_sub_analysis("[base]"), + instance=self.simulate_instance, ) perturb_model = copy(self.model) @@ -120,6 +121,7 @@ def perform(self) -> JobResult: model=perturb_model, dataset=dataset, paths=self.paths.for_sub_analysis("[perturb]"), + instance=self.simulate_instance, ) return JobResult( From 915983c0ae25de26a3416f5d87c96ec3ab3d2e27 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 08:41:22 +0100 Subject: [PATCH 122/149] fix test --- test_autofit/non_linear/grid/test_sensitivity/conftest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test_autofit/non_linear/grid/test_sensitivity/conftest.py b/test_autofit/non_linear/grid/test_sensitivity/conftest.py index f808ffc39..13c28c82e 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/conftest.py +++ b/test_autofit/non_linear/grid/test_sensitivity/conftest.py @@ -38,7 +38,7 @@ class BaseFit: def __init__(self, analysis_cls): self.analysis_cls = analysis_cls - def __call__(self, dataset, model, paths): + def __call__(self, dataset, model, paths, instance): search = af.m.MockSearch( return_sensitivity_results=True, samples_summary=MockSamplesSummary(model=model), @@ -53,7 +53,7 @@ class PerturbFit: def __init__(self, analysis_cls): self.analysis_cls = analysis_cls - def __call__(self, dataset, model, paths): + def __call__(self, dataset, model, paths, instance): search = af.m.MockSearch( return_sensitivity_results=True, samples_summary=MockSamplesSummary(model=model), From c1ed1eeceeb6ed07cd315754f7167297e6357337 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 11 Oct 2024 10:02:10 +0100 Subject: [PATCH 123/149] review --- autofit/non_linear/search/nest/nautilus/search.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/autofit/non_linear/search/nest/nautilus/search.py b/autofit/non_linear/search/nest/nautilus/search.py index ea46122f9..289f1a010 100644 --- a/autofit/non_linear/search/nest/nautilus/search.py +++ b/autofit/non_linear/search/nest/nautilus/search.py @@ -290,15 +290,17 @@ def call_search(self, search_internal, model, analysis): finished = False - if self.iterations_per_update < self.config_dict_search["n_live"] * 3.0: + minimum_iterations_per_updates = 3 * self.config_dict_search["n_live"] + if self.iterations_per_update < minimum_iterations_per_updates: - self.iterations_per_update = int(self.config_dict_search["n_live"] * 3) + self.iterations_per_update = minimum_iterations_per_updates logger.info( - """ + f""" The number of iterations_per_update is less than 3 times the number of live points, which can cause - issues where Nautilus loses sampling infomation due to stopping to output results. The number of - iterations per update has been increased to 3 times the number of live points. + issues where Nautilus loses sampling information due to stopping to output results. The number of + iterations per update has been increased to 3 times the number of live points, therefore a value + of {minimum_iterations_per_updates}. To remove this warning, increase the number of iterations_per_update to three or more times the number of live points. From c3fa28756ed301df8cde4f1933e09b5607401ec0 Mon Sep 17 00:00:00 2001 From: James Nightingale Date: Fri, 11 Oct 2024 10:26:20 +0100 Subject: [PATCH 124/149] fix added --- autofit/non_linear/search/mcmc/zeus/search.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/autofit/non_linear/search/mcmc/zeus/search.py b/autofit/non_linear/search/mcmc/zeus/search.py index d8332fe87..32e63242e 100644 --- a/autofit/non_linear/search/mcmc/zeus/search.py +++ b/autofit/non_linear/search/mcmc/zeus/search.py @@ -1,3 +1,4 @@ +import logging from typing import Dict, Optional import numpy as np @@ -292,6 +293,22 @@ def samples_via_internal_from(self, model, search_internal=None): discard=discard, thin=thin, flat=True ) + if len(samples_after_burn_in) == 0: + + logging.info( + """ + After thinnng the Zeus samples in order to remove burn-in, no samples were left. + + To create a samples object containing samples, so that the code can continue and results + can be inspected, the full list of samples before removing burn-in has been used. This may + indicate that the sampler has not converged and therefore your results may not be reliable. + + To fix this, run Zeus with more steps to ensure convergence is achieved or change the auto + correlation settings to be less aggressive in thinning samples. + """ + ) + + samples_after_burn_in = search_internal.get_chain(flat=True) parameter_lists = samples_after_burn_in.tolist() log_posterior_list = search_internal.get_log_prob(flat=True).tolist() From 3a51aa796f2bf7c62e889171c16a5183a95e0af0 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 11:18:00 +0100 Subject: [PATCH 125/149] illustrating issue --- .../grid/test_sensitivity/conftest.py | 31 +++++++++++++++++++ .../test_masked_sensitivity.py | 7 +++++ 2 files changed, 38 insertions(+) create mode 100644 test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py diff --git a/test_autofit/non_linear/grid/test_sensitivity/conftest.py b/test_autofit/non_linear/grid/test_sensitivity/conftest.py index 13c28c82e..715e9211f 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/conftest.py +++ b/test_autofit/non_linear/grid/test_sensitivity/conftest.py @@ -88,6 +88,37 @@ def make_sensitivity( ) +@pytest.fixture(name="masked_sensitivity") +def make_masked_sensitivity( + perturb_model, +): + # noinspection PyTypeChecker + instance = af.ModelInstance() + instance.gaussian = af.Gaussian() + return s.Sensitivity( + simulation_instance=instance, + base_model=af.Collection(gaussian=af.Model(af.Gaussian)), + perturb_model=perturb_model, + simulate_cls=Simulate(), + base_fit_cls=BaseFit(Analysis), + perturb_fit_cls=PerturbFit(Analysis), + paths=af.DirectoryPaths(), + number_of_steps=2, + mask=np.array( + [ + [ + [True, True], + [True, True], + ], + [ + [True, True], + [True, True], + ], + ] + ), + ) + + @pytest.fixture(name="job") def make_job( perturb_model, diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py new file mode 100644 index 000000000..4ea67f2ca --- /dev/null +++ b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py @@ -0,0 +1,7 @@ +from math import prod + + +def test_run(masked_sensitivity): + result = masked_sensitivity.run() + number_elements = prod(masked_sensitivity.shape) + assert len(result.samples) == number_elements From 7608c869304e2e437a78591634504af2166077f1 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 11:18:07 +0100 Subject: [PATCH 126/149] masked job result --- autofit/non_linear/grid/sensitivity/job.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index 96ce686ed..a28c5881e 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -35,7 +35,10 @@ def log_evidence_increase(self) -> Optional[float]: if hasattr(self.result.samples, "log_evidence"): if self.result.samples.log_evidence is not None: - return float(self.perturb_result.samples.log_evidence - self.result.samples.log_evidence) + return float( + self.perturb_result.samples.log_evidence + - self.result.samples.log_evidence + ) @property def log_likelihood_increase(self) -> Optional[float]: @@ -48,6 +51,15 @@ def log_likelihood_increase(self) -> Optional[float]: return float(self.perturb_result.log_likelihood - self.result.log_likelihood) +class MaskedJobResult(AbstractJobResult): + @property + def result(self): + return self + + def __getattr__(self, item): + return None + + class Job(AbstractJob): _number = count() From d68e0ff7ddb551e793cd048cd24001b65abe199b Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 11:21:34 +0100 Subject: [PATCH 127/149] extracted bypass function --- .../non_linear/grid/sensitivity/__init__.py | 24 +++++++------------ 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index f5587eb4e..3406d9fda 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -98,10 +98,10 @@ def __init__( self.job_cls = job_cls self.number_of_steps = number_of_steps - self.mask = mask + self.mask = np.array(mask) - if mask is not None: - if self.shape != np.asarray(mask).shape: + if self.mask is not None: + if self.shape != self.mask.shape: raise ValueError( f""" The mask of the Sensitivity object must have the same shape as the sensitivity grid. @@ -109,7 +109,7 @@ def __init__( For your inputs, the shape of each are as follows: Sensitivity Grid: {self.shape} - Mask: {np.asarray(mask).shape} + Mask: {self.mask.shape} """ ) @@ -140,7 +140,6 @@ def run(self) -> SensitivityResult: for result in process_class.run_jobs( self._make_jobs(), number_of_cores=self.number_of_cores ): - if isinstance(result, Exception): raise result @@ -315,6 +314,10 @@ def _perturb_models(self) -> Generator[AbstractPriorModel, None, None]: ] yield self.perturb_model.with_limits(limits) + def _should_bypass(self, number: int) -> bool: + shape_index = self.shape_index_from_number(number=number) + return self.mask is not None and np.asarray(self.mask)[shape_index] + def _make_jobs(self) -> Generator[Job, None, None]: """ Create a list of jobs to be run on separate processes. @@ -325,16 +328,7 @@ def _make_jobs(self) -> Generator[Job, None, None]: for number, (perturb_instance, perturb_model, label) in enumerate( zip(self._perturb_instances, self._perturb_models, self._labels) ): - - shape_index = self.shape_index_from_number(number=number) - - should_bypass = False - - if self.mask is not None: - should_bypass = np.asarray(self.mask)[shape_index] - - if not should_bypass: - + if not self._should_bypass(number=number): if self.perturb_model_prior_func is not None: perturb_model = self.perturb_model_prior_func( perturb_instance=perturb_instance, perturb_model=perturb_model From 71b8bfdb872720bac7e1c3db8af7355805baeb64 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 11:28:26 +0100 Subject: [PATCH 128/149] make a job for a number --- .../non_linear/grid/sensitivity/__init__.py | 75 +++++++++++-------- 1 file changed, 44 insertions(+), 31 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 3406d9fda..eeeb5d42f 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -262,31 +262,36 @@ def _headers(self) -> Generator[str, None, None]: yield path @property - def _labels(self) -> Generator[str, None, None]: + def _labels(self) -> List[str]: """ One label for each perturbation, used to distinguish fits for each perturbation by placing them in separate directories. """ + labels = [] for list_ in self._lists: strings = list() for value, prior_tuple in zip(list_, self.perturb_model.prior_tuples): path, prior = prior_tuple value = prior.value_for(value) strings.append(f"{path}_{value}") - yield "_".join(strings) + labels.append("_".join(strings)) + + return labels @property - def _perturb_instances(self) -> Generator[ModelInstance, None, None]: + def _perturb_instances(self) -> List[ModelInstance]: """ A list of instances each of which defines a perturbation to be applied to the image. """ - for list_ in self._lists: - yield self.perturb_model.instance_from_unit_vector(list_) + + return [ + self.perturb_model.instance_from_unit_vector(list_) for list_ in self._lists + ] @property - def _perturb_models(self) -> Generator[AbstractPriorModel, None, None]: + def _perturb_models(self) -> List[AbstractPriorModel]: """ A list of models representing a perturbation at each grid square. @@ -300,6 +305,8 @@ def _perturb_models(self) -> Generator[AbstractPriorModel, None, None]: step_sizes = (self.step_size,) * self.perturb_model.prior_count half_steps = [self.limit_scale * step_size / 2 for step_size in step_sizes] + + perturb_models = [] for list_ in self._lists: limits = [ ( @@ -312,43 +319,49 @@ def _perturb_models(self) -> Generator[AbstractPriorModel, None, None]: half_steps, ) ] - yield self.perturb_model.with_limits(limits) + perturb_models.append(self.perturb_model.with_limits(limits)) + return perturb_models def _should_bypass(self, number: int) -> bool: shape_index = self.shape_index_from_number(number=number) return self.mask is not None and np.asarray(self.mask)[shape_index] def _make_jobs(self) -> Generator[Job, None, None]: + for number, _ in enumerate(self._perturb_instances): + yield self._make_job(number) + + def _make_job(self, number) -> Generator[Job, None, None]: """ Create a list of jobs to be run on separate processes. Each job fits a perturb image with the original model and a model which includes a perturbation. """ - for number, (perturb_instance, perturb_model, label) in enumerate( - zip(self._perturb_instances, self._perturb_models, self._labels) - ): - if not self._should_bypass(number=number): - if self.perturb_model_prior_func is not None: - perturb_model = self.perturb_model_prior_func( - perturb_instance=perturb_instance, perturb_model=perturb_model - ) + perturb_instance = self._perturb_instances[number] + perturb_model = self._perturb_models[number] + label = self._labels[number] + + if not self._should_bypass(number=number): + if self.perturb_model_prior_func is not None: + perturb_model = self.perturb_model_prior_func( + perturb_instance=perturb_instance, perturb_model=perturb_model + ) - simulate_instance = copy(self.instance) - simulate_instance.perturb = perturb_instance + simulate_instance = copy(self.instance) + simulate_instance.perturb = perturb_instance - paths = self.paths.for_sub_analysis( - label, - ) + paths = self.paths.for_sub_analysis( + label, + ) - yield self.job_cls( - simulate_instance=simulate_instance, - model=self.model, - perturb_model=perturb_model, - base_instance=self.instance, - simulate_cls=self.simulate_cls, - base_fit_cls=self.base_fit_cls, - perturb_fit_cls=self.perturb_fit_cls, - paths=paths, - number=number, - ) + yield self.job_cls( + simulate_instance=simulate_instance, + model=self.model, + perturb_model=perturb_model, + base_instance=self.instance, + simulate_cls=self.simulate_cls, + base_fit_cls=self.base_fit_cls, + perturb_fit_cls=self.perturb_fit_cls, + paths=paths, + number=number, + ) From 360095513f60cd75edc0c5e519b6669d9c6b8eb9 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 11:48:19 +0100 Subject: [PATCH 129/149] implemented dummy result mechanism --- autofit/non_linear/grid/sensitivity/__init__.py | 14 +++++++++++--- autofit/non_linear/grid/sensitivity/job.py | 4 ++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index eeeb5d42f..91aa0df22 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -9,7 +9,7 @@ from autofit.mapper.model import ModelInstance from autofit.mapper.prior_model.abstract import AbstractPriorModel from autofit.non_linear.grid.grid_search import make_lists, Sequential -from autofit.non_linear.grid.sensitivity.job import Job +from autofit.non_linear.grid.sensitivity.job import Job, MaskedJobResult from autofit.non_linear.grid.sensitivity.job import JobResult from autofit.non_linear.grid.sensitivity.result import SensitivityResult from autofit.non_linear.parallel import Process @@ -136,9 +136,17 @@ def run(self) -> SensitivityResult: process_class = Process if self.number_of_cores > 1 else Sequential - results = list() + results = [] + jobs = [] + + for number in range(len(self._perturb_instances)): + if self._should_bypass(number=number): + results.append(MaskedJobResult(number=number)) + else: + jobs.append(self._make_job(number)) + for result in process_class.run_jobs( - self._make_jobs(), number_of_cores=self.number_of_cores + jobs, number_of_cores=self.number_of_cores ): if isinstance(result, Exception): raise result diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index a28c5881e..4691bd138 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -56,6 +56,10 @@ class MaskedJobResult(AbstractJobResult): def result(self): return self + @property + def perturb_result(self): + return self + def __getattr__(self, item): return None From 8d8e3da22b9ef8ddc0db31ddf257d2b73f9960cb Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 11:49:32 +0100 Subject: [PATCH 130/149] fix --- autofit/non_linear/grid/sensitivity/__init__.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 91aa0df22..87740d1a5 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -98,9 +98,10 @@ def __init__( self.job_cls = job_cls self.number_of_steps = number_of_steps - self.mask = np.array(mask) + self.mask = None - if self.mask is not None: + if mask is not None: + self.mask = np.asarray(mask) if self.shape != self.mask.shape: raise ValueError( f""" From 7666a653c0d51b51262b069a740490aa46db0c5e Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 11:49:55 +0100 Subject: [PATCH 131/149] fix --- autofit/non_linear/grid/sensitivity/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 87740d1a5..8e053ae4e 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -363,7 +363,7 @@ def _make_job(self, number) -> Generator[Job, None, None]: label, ) - yield self.job_cls( + return self.job_cls( simulate_instance=simulate_instance, model=self.model, perturb_model=perturb_model, From 875e96e51606fb0ee6a0695e1110d60f719918f0 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 11:50:16 +0100 Subject: [PATCH 132/149] remove bypass check from job creation --- .../non_linear/grid/sensitivity/__init__.py | 41 +++++++++---------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 8e053ae4e..889da6e55 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -350,27 +350,26 @@ def _make_job(self, number) -> Generator[Job, None, None]: perturb_model = self._perturb_models[number] label = self._labels[number] - if not self._should_bypass(number=number): - if self.perturb_model_prior_func is not None: - perturb_model = self.perturb_model_prior_func( - perturb_instance=perturb_instance, perturb_model=perturb_model - ) + if self.perturb_model_prior_func is not None: + perturb_model = self.perturb_model_prior_func( + perturb_instance=perturb_instance, perturb_model=perturb_model + ) - simulate_instance = copy(self.instance) - simulate_instance.perturb = perturb_instance + simulate_instance = copy(self.instance) + simulate_instance.perturb = perturb_instance - paths = self.paths.for_sub_analysis( - label, - ) + paths = self.paths.for_sub_analysis( + label, + ) - return self.job_cls( - simulate_instance=simulate_instance, - model=self.model, - perturb_model=perturb_model, - base_instance=self.instance, - simulate_cls=self.simulate_cls, - base_fit_cls=self.base_fit_cls, - perturb_fit_cls=self.perturb_fit_cls, - paths=paths, - number=number, - ) + return self.job_cls( + simulate_instance=simulate_instance, + model=self.model, + perturb_model=perturb_model, + base_instance=self.instance, + simulate_cls=self.simulate_cls, + base_fit_cls=self.base_fit_cls, + perturb_fit_cls=self.perturb_fit_cls, + paths=paths, + number=number, + ) From bd936666e41c7eb5b65082e5c2e8cac6eab90106 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 11:52:16 +0100 Subject: [PATCH 133/149] docs --- autofit/non_linear/grid/sensitivity/job.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index 4691bd138..5d969bfbc 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -52,6 +52,10 @@ def log_likelihood_increase(self) -> Optional[float]: class MaskedJobResult(AbstractJobResult): + """ + A placeholder result for a job that has been masked out. + """ + @property def result(self): return self From 7d5171d84f1249cf4ec8e4cf6db066206c4640ec Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 12:24:06 +0100 Subject: [PATCH 134/149] check if a job is complete --- autofit/non_linear/grid/sensitivity/job.py | 22 +++++++++++++++++-- autofit/non_linear/mock/mock_search.py | 1 + .../grid/test_sensitivity/conftest.py | 2 ++ .../test_sensitivity/test_functionality.py | 4 ++++ 4 files changed, 27 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index 5d969bfbc..6986fa4c1 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -112,6 +112,24 @@ def __init__( self.perturb_fit_cls = perturb_fit_cls self.paths = paths + @property + def base_paths(self): + return self.paths.for_sub_analysis("[base]") + + @property + def perturb_paths(self): + return self.paths.for_sub_analysis("[perturb]") + + @property + def is_complete(self) -> bool: + """ + Returns True if the job has been completed, False otherwise. + """ + return (self.base_paths.is_complete and self.perturb_paths.is_complete) or ( + (self.paths.output_path / "[base].zip").exists() + and (self.paths.output_path / "[perturb].zip").exists() + ) + def perform(self) -> JobResult: """ - Create one model with a perturbation and another without @@ -130,7 +148,7 @@ def perform(self) -> JobResult: result = self.base_fit_cls( model=self.model, dataset=dataset, - paths=self.paths.for_sub_analysis("[base]"), + paths=self.base_paths, instance=self.simulate_instance, ) @@ -140,7 +158,7 @@ def perform(self) -> JobResult: perturb_result = self.perturb_fit_cls( model=perturb_model, dataset=dataset, - paths=self.paths.for_sub_analysis("[perturb]"), + paths=self.perturb_paths, instance=self.simulate_instance, ) diff --git a/autofit/non_linear/mock/mock_search.py b/autofit/non_linear/mock/mock_search.py index 4dbd53c12..c68b5d9da 100644 --- a/autofit/non_linear/mock/mock_search.py +++ b/autofit/non_linear/mock/mock_search.py @@ -136,6 +136,7 @@ def _fit(self, model, analysis): ) self.paths.save_samples_summary(self.samples_summary) + self.paths.completed() return analysis.make_result( samples_summary=samples_summary, diff --git a/test_autofit/non_linear/grid/test_sensitivity/conftest.py b/test_autofit/non_linear/grid/test_sensitivity/conftest.py index 715e9211f..e2a71d2eb 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/conftest.py +++ b/test_autofit/non_linear/grid/test_sensitivity/conftest.py @@ -42,6 +42,7 @@ def __call__(self, dataset, model, paths, instance): search = af.m.MockSearch( return_sensitivity_results=True, samples_summary=MockSamplesSummary(model=model), + paths=paths, ) analysis = self.analysis_cls(dataset=dataset) @@ -57,6 +58,7 @@ def __call__(self, dataset, model, paths, instance): search = af.m.MockSearch( return_sensitivity_results=True, samples_summary=MockSamplesSummary(model=model), + paths=paths, ) analysis = self.analysis_cls(dataset=dataset) diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py b/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py index 4d1330d1c..a22540fea 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py @@ -29,11 +29,15 @@ def test_labels(sensitivity): def test_perform_job(job): + assert not job.is_complete + result = job.perform() assert isinstance(result, s.JobResult) assert isinstance(result.perturb_result, af.Result) assert isinstance(result.result, af.Result) + assert job.is_complete + class TestPerturbationModels: @pytest.mark.parametrize( From 41095a0853884fb9d70a5476272a5dabb7f544b0 Mon Sep 17 00:00:00 2001 From: Richard Date: Fri, 11 Oct 2024 12:39:18 +0100 Subject: [PATCH 135/149] skip simulation if job is complete --- autofit/non_linear/grid/sensitivity/job.py | 15 ++++++++++----- .../grid/test_sensitivity/test_functionality.py | 12 ++++++++++++ 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index 6986fa4c1..03c2b2db6 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -140,10 +140,13 @@ def perform(self) -> JobResult: An object comprising the results of the two fits """ - dataset = self.simulate_cls( - instance=self.simulate_instance, - simulate_path=self.paths.image_path.with_name("simulate"), - ) + if self.is_complete: + dataset = None + else: + dataset = self.simulate_cls( + instance=self.simulate_instance, + simulate_path=self.paths.image_path.with_name("simulate"), + ) result = self.base_fit_cls( model=self.model, @@ -163,5 +166,7 @@ def perform(self) -> JobResult: ) return JobResult( - number=self.number, result=result, perturb_result=perturb_result + number=self.number, + result=result, + perturb_result=perturb_result, ) diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py b/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py index a22540fea..c6d348067 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py @@ -39,6 +39,18 @@ def test_perform_job(job): assert job.is_complete +def test_perform_twice(job): + job.perform() + assert job.is_complete + + result = job.perform() + assert isinstance(result, s.JobResult) + assert isinstance(result.perturb_result, af.Result) + assert isinstance(result.result, af.Result) + + assert job.is_complete + + class TestPerturbationModels: @pytest.mark.parametrize( "limit_scale, fl, fu, sl, su", From bb8e4cc8e88b4160aadaff5a4d24a9918fa83e8a Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 08:35:32 +0100 Subject: [PATCH 136/149] fixture --- .../grid/test_sensitivity/test_masked_sensitivity.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py index 4ea67f2ca..77fa5bbe2 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py @@ -1,7 +1,13 @@ from math import prod +import pytest -def test_run(masked_sensitivity): - result = masked_sensitivity.run() + +@pytest.fixture(name="masked_result") +def make_masked_result(masked_sensitivity): + return masked_sensitivity.run() + + +def test_run(masked_sensitivity, masked_result): number_elements = prod(masked_sensitivity.shape) - assert len(result.samples) == number_elements + assert len(masked_result.samples) == number_elements From f025d3d04e2ff3d290769179eab9b575695c47c2 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 08:44:23 +0100 Subject: [PATCH 137/149] model for masked result --- autofit/non_linear/grid/sensitivity/__init__.py | 7 ++++++- autofit/non_linear/grid/sensitivity/job.py | 8 ++++++++ .../grid/test_sensitivity/test_masked_sensitivity.py | 7 ++++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 889da6e55..8e6e6a893 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -142,7 +142,12 @@ def run(self) -> SensitivityResult: for number in range(len(self._perturb_instances)): if self._should_bypass(number=number): - results.append(MaskedJobResult(number=number)) + results.append( + MaskedJobResult( + number=number, + model=self.model, + ) + ) else: jobs.append(self._make_job(number)) diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index 5d969bfbc..22bb8c86e 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -56,6 +56,10 @@ class MaskedJobResult(AbstractJobResult): A placeholder result for a job that has been masked out. """ + def __init__(self, number, model): + super().__init__(number) + self.model = model + @property def result(self): return self @@ -67,6 +71,10 @@ def perturb_result(self): def __getattr__(self, item): return None + @property + def samples_summary(self): + return self + class Job(AbstractJob): _number = count() diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py index 77fa5bbe2..f7518f610 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py @@ -8,6 +8,11 @@ def make_masked_result(masked_sensitivity): return masked_sensitivity.run() -def test_run(masked_sensitivity, masked_result): +def test_result_size(masked_sensitivity, masked_result): number_elements = prod(masked_sensitivity.shape) assert len(masked_result.samples) == number_elements + + +def test_sample(masked_result): + sample = masked_result.samples[0] + assert sample.model is not None From e14ea07efe09149a99865903c18ffc6183e5d5b2 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 08:45:35 +0100 Subject: [PATCH 138/149] default likelihood and evidence --- autofit/non_linear/grid/sensitivity/job.py | 8 ++++++++ .../grid/test_sensitivity/test_masked_sensitivity.py | 2 ++ 2 files changed, 10 insertions(+) diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index 22bb8c86e..2be43c154 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -75,6 +75,14 @@ def __getattr__(self, item): def samples_summary(self): return self + @property + def log_evidence(self): + return 0.0 + + @property + def log_likelihood(self): + return 0.0 + class Job(AbstractJob): _number = count() diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py index f7518f610..de13c224a 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py @@ -16,3 +16,5 @@ def test_result_size(masked_sensitivity, masked_result): def test_sample(masked_result): sample = masked_result.samples[0] assert sample.model is not None + assert sample.log_evidence == 0.0 + assert sample.log_likelihood == 0.0 From 6cea6f77685eaca7e5905cc1ad2e45f3fc64bc55 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 09:29:42 +0100 Subject: [PATCH 139/149] give model perturbation --- autofit/non_linear/grid/sensitivity/__init__.py | 4 +++- .../grid/test_sensitivity/test_masked_sensitivity.py | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 8e6e6a893..8e233bc08 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -142,10 +142,12 @@ def run(self) -> SensitivityResult: for number in range(len(self._perturb_instances)): if self._should_bypass(number=number): + model = self.model.copy() + model.perturb = self._perturb_models[number] results.append( MaskedJobResult( number=number, - model=self.model, + model=model, ) ) else: diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py index de13c224a..c039b1b38 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py @@ -16,5 +16,6 @@ def test_result_size(masked_sensitivity, masked_result): def test_sample(masked_result): sample = masked_result.samples[0] assert sample.model is not None + assert sample.model.perturb is not None assert sample.log_evidence == 0.0 assert sample.log_likelihood == 0.0 From 1e6652adb4dce82d07965cdb222326735bf2409a Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 15:34:40 +0100 Subject: [PATCH 140/149] with_limits implementation for uniform prior --- autofit/mapper/prior/uniform.py | 16 ++++++++-- .../test_masked_sensitivity.py | 29 +++++++++++++++++++ 2 files changed, 42 insertions(+), 3 deletions(-) diff --git a/autofit/mapper/prior/uniform.py b/autofit/mapper/prior/uniform.py index 3aa799396..3cea04a90 100644 --- a/autofit/mapper/prior/uniform.py +++ b/autofit/mapper/prior/uniform.py @@ -64,6 +64,16 @@ def __init__( def tree_flatten(self): return (self.lower_limit, self.upper_limit), (self.id,) + def with_limits( + self, + lower_limit: float, + upper_limit: float, + ) -> "Prior": + return UniformPrior( + lower_limit=lower_limit, + upper_limit=upper_limit, + ) + def logpdf(self, x): # TODO: handle x as a numpy array if x == self.lower_limit: @@ -97,9 +107,9 @@ def value_for(self, unit: float, ignore_prior_limits: bool = False) -> float: physical_value = prior.value_for(unit=0.2) """ - return float(round( - super().value_for(unit, ignore_prior_limits=ignore_prior_limits), 14 - )) + return float( + round(super().value_for(unit, ignore_prior_limits=ignore_prior_limits), 14) + ) def log_prior_from_value(self, value): """ diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py index c039b1b38..57da0109c 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py @@ -2,6 +2,8 @@ import pytest +import autofit as af + @pytest.fixture(name="masked_result") def make_masked_result(masked_sensitivity): @@ -19,3 +21,30 @@ def test_sample(masked_result): assert sample.model.perturb is not None assert sample.log_evidence == 0.0 assert sample.log_likelihood == 0.0 + + +@pytest.mark.parametrize( + "lower, upper, mean", + [ + (0.0, 1.0, 0.5), + (-1.0, 1.0, 0.0), + (-1.0, 0.0, -0.5), + (0.5, 1.0, 0.75), + ], +) +def test_mean_uniform_prior( + lower, + upper, + mean, +): + prior = af.UniformPrior( + lower_limit=0.0, + upper_limit=1.0, + ) + assert ( + prior.with_limits( + lower, + upper, + ).mean + == mean + ) From e517a07095f6f27b3ef930f80e08677fc02f9381 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 15:39:03 +0100 Subject: [PATCH 141/149] illustrate x,y,etc params available from perturb model --- .../test_sensitivity/test_masked_sensitivity.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py index 57da0109c..fa357c426 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py @@ -48,3 +48,16 @@ def test_mean_uniform_prior( ).mean == mean ) + + +def test_physical_centres_lists(masked_result, masked_sensitivity): + assert masked_result.perturbed_physical_centres_list_from("perturb.centre") == [ + 0.25, + 0.25, + 0.25, + 0.25, + 0.75, + 0.75, + 0.75, + 0.75, + ] From fd01def38e6eb5fd0ad84f2eaeb33907a1ac54dd Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 15:40:25 +0100 Subject: [PATCH 142/149] with limits for log gaussian prior --- autofit/mapper/prior/log_gaussian.py | 31 ++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/autofit/mapper/prior/log_gaussian.py b/autofit/mapper/prior/log_gaussian.py index d13f9d203..a02d77e7b 100644 --- a/autofit/mapper/prior/log_gaussian.py +++ b/autofit/mapper/prior/log_gaussian.py @@ -71,6 +71,37 @@ def __init__( id_=id_, ) + @classmethod + def with_limits(cls, lower_limit: float, upper_limit: float) -> "LogGaussianPrior": + """ + Create a new gaussian prior centred between two limits + with sigma distance between this limits. + + Note that these limits are not strict so exceptions will not + be raised for values outside of the limits. + + This function is typically used in prior passing, where the + result of a model-fit are used to create new Gaussian priors + centred on the previously estimated median PDF model. + + Parameters + ---------- + lower_limit + The lower limit of the new Gaussian prior. + upper_limit + The upper limit of the new Gaussian Prior. + + Returns + ------- + A new GaussianPrior + """ + return cls( + mean=(lower_limit + upper_limit) / 2, + sigma=upper_limit - lower_limit, + lower_limit=lower_limit, + upper_limit=upper_limit, + ) + def _new_for_base_message(self, message): """ Create a new instance of this wrapper but change the parameters used From 653e94878ec00e2e8b88fdd9d26064ee6a0df881 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 15:40:47 +0100 Subject: [PATCH 143/149] abstract with_limits --- autofit/mapper/prior/abstract.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/autofit/mapper/prior/abstract.py b/autofit/mapper/prior/abstract.py index a1e3c30db..174bb4851 100644 --- a/autofit/mapper/prior/abstract.py +++ b/autofit/mapper/prior/abstract.py @@ -95,16 +95,11 @@ def new(self): new.id = next(self._ids) return new + @abstractmethod def with_limits(self, lower_limit: float, upper_limit: float) -> "Prior": """ Create a new instance of the same prior class with the passed limits. """ - new = self.__class__( - lower_limit=max(lower_limit, self.lower_limit), - upper_limit=min(upper_limit, self.upper_limit), - ) - new.message = self.message - return new @property def factor(self): From 8045f8e26b62b56c5cff05c4f918ea5c3015c250 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 15:42:28 +0100 Subject: [PATCH 144/149] remove test --- .../non_linear/grid/test_sensitivity/test_functionality.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py b/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py index 4d1330d1c..511bada9a 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_functionality.py @@ -122,11 +122,6 @@ def test_prior_with_limits(self): assert prior.lower_limit == 3 assert prior.upper_limit == 5 - def test_existing_limits(self): - prior = af.UniformPrior(2, 4).with_limits(3, 5) - assert prior.lower_limit == 3 - assert prior.upper_limit == 4 - @pytest.fixture(name="tuple_sensitivity") def make_tuple_sensitivity(sensitivity): From 2aaf47a7cb29958f0926a3fdc10126f4c5b24ee6 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 16:43:43 +0100 Subject: [PATCH 145/149] path value dicts --- .../non_linear/grid/sensitivity/__init__.py | 18 ++++++++++++++++++ .../test_masked_sensitivity.py | 13 +++++++++++++ 2 files changed, 31 insertions(+) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 8e233bc08..5388723d0 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -254,6 +254,24 @@ def _lists(self) -> List[List[float]]: """ return make_lists(self.perturb_model.prior_count, step_size=self.step_size) + @property + def path_value_dicts(self): + paths = [ + self.perturb_model.path_for_prior(prior) + for prior in self.perturb_model.priors_ordered_by_id + ] + + return [ + { + path: value + for path, value in zip( + paths, + list_, + ) + } + for list_ in self._lists + ] + @property def _physical_values(self) -> List[List[float]]: """ diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py index fa357c426..857159afc 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py @@ -61,3 +61,16 @@ def test_physical_centres_lists(masked_result, masked_sensitivity): 0.75, 0.75, ] + + +def test_path_value_dicts(masked_sensitivity): + assert masked_sensitivity.path_value_dicts == [ + {("centre",): 0.25, ("normalization",): 0.25, ("sigma",): 0.25}, + {("centre",): 0.25, ("normalization",): 0.25, ("sigma",): 0.75}, + {("centre",): 0.25, ("normalization",): 0.75, ("sigma",): 0.25}, + {("centre",): 0.25, ("normalization",): 0.75, ("sigma",): 0.75}, + {("centre",): 0.75, ("normalization",): 0.25, ("sigma",): 0.25}, + {("centre",): 0.75, ("normalization",): 0.25, ("sigma",): 0.75}, + {("centre",): 0.75, ("normalization",): 0.75, ("sigma",): 0.25}, + {("centre",): 0.75, ("normalization",): 0.75, ("sigma",): 0.75}, + ] From 1fefff295d4199480d98f6ff0adf9fc4faead517 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 16:45:53 +0100 Subject: [PATCH 146/149] pass a path value dict to results --- .../non_linear/grid/sensitivity/__init__.py | 3 +++ autofit/non_linear/grid/sensitivity/job.py | 22 ++++++++++++++++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 5388723d0..cd7325906 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -148,6 +148,7 @@ def run(self) -> SensitivityResult: MaskedJobResult( number=number, model=model, + path_value_dict=self.path_value_dicts[number], ) ) else: @@ -374,6 +375,7 @@ def _make_job(self, number) -> Generator[Job, None, None]: perturb_instance = self._perturb_instances[number] perturb_model = self._perturb_models[number] label = self._labels[number] + path_value_dict = self.path_value_dicts[number] if self.perturb_model_prior_func is not None: perturb_model = self.perturb_model_prior_func( @@ -397,4 +399,5 @@ def _make_job(self, number) -> Generator[Job, None, None]: perturb_fit_cls=self.perturb_fit_cls, paths=paths, number=number, + path_value_dict=path_value_dict, ) diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index 2be43c154..f59442dd9 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -10,7 +10,13 @@ class JobResult(AbstractJobResult): - def __init__(self, number: int, result: Result, perturb_result: Result): + def __init__( + self, + number: int, + result: Result, + perturb_result: Result, + path_value_dicts, + ): """ The result of a single sensitivity comparison @@ -22,6 +28,7 @@ def __init__(self, number: int, result: Result, perturb_result: Result): super().__init__(number) self.result = result self.perturb_result = perturb_result + self.path_value_dicts = path_value_dicts @property def log_evidence_increase(self) -> Optional[float]: @@ -56,9 +63,10 @@ class MaskedJobResult(AbstractJobResult): A placeholder result for a job that has been masked out. """ - def __init__(self, number, model): + def __init__(self, number, model, path_value_dict): super().__init__(number) self.model = model + self.path_value_dict = path_value_dict @property def result(self): @@ -98,6 +106,7 @@ def __init__( perturb_fit_cls: Callable, paths: AbstractPaths, number: int, + path_value_dicts=None, ): """ Job to run non-linear searches comparing how well a model and a model with a perturbation fit the image. @@ -116,6 +125,9 @@ def __init__( sensitivity map. paths The paths defining the output directory structure of the sensitivity mapping. + path_value_dicts + A list of dictionaries containing the values of the perturbation parameters for each sensitivity mapping + dataset. """ super().__init__(number=number) @@ -127,6 +139,7 @@ def __init__( self.base_fit_cls = base_fit_cls self.perturb_fit_cls = perturb_fit_cls self.paths = paths + self.path_value_dicts = path_value_dicts def perform(self) -> JobResult: """ @@ -161,5 +174,8 @@ def perform(self) -> JobResult: ) return JobResult( - number=self.number, result=result, perturb_result=perturb_result + number=self.number, + result=result, + perturb_result=perturb_result, + path_value_dicts=self.path_value_dicts, ) From 49e6b5dcb9786296acef4b9520d9173da66ed072 Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 16:46:48 +0100 Subject: [PATCH 147/149] Revert "pass a path value dict to results" This reverts commit 1fefff295d4199480d98f6ff0adf9fc4faead517. --- .../non_linear/grid/sensitivity/__init__.py | 3 --- autofit/non_linear/grid/sensitivity/job.py | 22 +++---------------- 2 files changed, 3 insertions(+), 22 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index cd7325906..5388723d0 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -148,7 +148,6 @@ def run(self) -> SensitivityResult: MaskedJobResult( number=number, model=model, - path_value_dict=self.path_value_dicts[number], ) ) else: @@ -375,7 +374,6 @@ def _make_job(self, number) -> Generator[Job, None, None]: perturb_instance = self._perturb_instances[number] perturb_model = self._perturb_models[number] label = self._labels[number] - path_value_dict = self.path_value_dicts[number] if self.perturb_model_prior_func is not None: perturb_model = self.perturb_model_prior_func( @@ -399,5 +397,4 @@ def _make_job(self, number) -> Generator[Job, None, None]: perturb_fit_cls=self.perturb_fit_cls, paths=paths, number=number, - path_value_dict=path_value_dict, ) diff --git a/autofit/non_linear/grid/sensitivity/job.py b/autofit/non_linear/grid/sensitivity/job.py index f59442dd9..2be43c154 100644 --- a/autofit/non_linear/grid/sensitivity/job.py +++ b/autofit/non_linear/grid/sensitivity/job.py @@ -10,13 +10,7 @@ class JobResult(AbstractJobResult): - def __init__( - self, - number: int, - result: Result, - perturb_result: Result, - path_value_dicts, - ): + def __init__(self, number: int, result: Result, perturb_result: Result): """ The result of a single sensitivity comparison @@ -28,7 +22,6 @@ def __init__( super().__init__(number) self.result = result self.perturb_result = perturb_result - self.path_value_dicts = path_value_dicts @property def log_evidence_increase(self) -> Optional[float]: @@ -63,10 +56,9 @@ class MaskedJobResult(AbstractJobResult): A placeholder result for a job that has been masked out. """ - def __init__(self, number, model, path_value_dict): + def __init__(self, number, model): super().__init__(number) self.model = model - self.path_value_dict = path_value_dict @property def result(self): @@ -106,7 +98,6 @@ def __init__( perturb_fit_cls: Callable, paths: AbstractPaths, number: int, - path_value_dicts=None, ): """ Job to run non-linear searches comparing how well a model and a model with a perturbation fit the image. @@ -125,9 +116,6 @@ def __init__( sensitivity map. paths The paths defining the output directory structure of the sensitivity mapping. - path_value_dicts - A list of dictionaries containing the values of the perturbation parameters for each sensitivity mapping - dataset. """ super().__init__(number=number) @@ -139,7 +127,6 @@ def __init__( self.base_fit_cls = base_fit_cls self.perturb_fit_cls = perturb_fit_cls self.paths = paths - self.path_value_dicts = path_value_dicts def perform(self) -> JobResult: """ @@ -174,8 +161,5 @@ def perform(self) -> JobResult: ) return JobResult( - number=self.number, - result=result, - perturb_result=perturb_result, - path_value_dicts=self.path_value_dicts, + number=self.number, result=result, perturb_result=perturb_result ) From 2c6a8be935db8d43a4e8a522342971d0c8c4965e Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 16:53:55 +0100 Subject: [PATCH 148/149] test_perturbed_physical_centres_list_from --- .../non_linear/grid/sensitivity/__init__.py | 16 ++++-------- autofit/non_linear/grid/sensitivity/result.py | 17 ++++++------ .../test_masked_sensitivity.py | 26 ++++++++++++------- 3 files changed, 30 insertions(+), 29 deletions(-) diff --git a/autofit/non_linear/grid/sensitivity/__init__.py b/autofit/non_linear/grid/sensitivity/__init__.py index 5388723d0..ad83f2065 100644 --- a/autofit/non_linear/grid/sensitivity/__init__.py +++ b/autofit/non_linear/grid/sensitivity/__init__.py @@ -184,6 +184,7 @@ def run(self) -> SensitivityResult: result.perturb_result.samples_summary for result in results ], shape=self.shape, + path_values=self.path_values, ) self.paths.save_json("result", to_dict(sensitivity_result)) @@ -255,22 +256,15 @@ def _lists(self) -> List[List[float]]: return make_lists(self.perturb_model.prior_count, step_size=self.step_size) @property - def path_value_dicts(self): + def path_values(self): paths = [ self.perturb_model.path_for_prior(prior) for prior in self.perturb_model.priors_ordered_by_id ] - return [ - { - path: value - for path, value in zip( - paths, - list_, - ) - } - for list_ in self._lists - ] + return { + path: list(values) for path, *values in zip(paths, *self._physical_values) + } @property def _physical_values(self) -> List[List[float]]: diff --git a/autofit/non_linear/grid/sensitivity/result.py b/autofit/non_linear/grid/sensitivity/result.py index 346909bbd..8e5fd4839 100644 --- a/autofit/non_linear/grid/sensitivity/result.py +++ b/autofit/non_linear/grid/sensitivity/result.py @@ -1,4 +1,4 @@ -from typing import List, Tuple, Union +from typing import List, Tuple, Union, Dict from autofit.non_linear.grid.grid_list import GridList, as_grid_list from autofit.non_linear.grid.grid_search.result import AbstractGridSearchResult @@ -12,22 +12,22 @@ def __init__( samples: List[SamplesInterface], perturb_samples: List[SamplesInterface], shape: Tuple[int, ...], + path_values: Dict[Tuple[str, ...], List[float]], ): """ The result of a sensitivity mapping Parameters ---------- - results - The results of each sensitivity job. - physical_values - A list of lists of values representing the physical values of the sensitivity grid values. shape The shape of the sensitivity mapping grid. + path_values + A list of tuples of the path to the grid priors and the physical values themselves. """ super().__init__(GridList(samples, shape)) self.perturb_samples = GridList(perturb_samples, shape) self.shape = shape + self.path_values = path_values def perturbed_physical_centres_list_from( self, path: Union[str, Tuple[str, ...]] @@ -40,10 +40,9 @@ def perturbed_physical_centres_list_from( path The path to the physical centres in the samples """ - return self._physical_centres_lists_from( - self.perturb_samples, - path, - ) + if isinstance(path, str): + path = tuple(path.split(".")) + return self.path_values[path] def __getitem__(self, item): return self.samples[item] diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py index 857159afc..5d2e5102f 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py @@ -64,13 +64,21 @@ def test_physical_centres_lists(masked_result, masked_sensitivity): def test_path_value_dicts(masked_sensitivity): - assert masked_sensitivity.path_value_dicts == [ - {("centre",): 0.25, ("normalization",): 0.25, ("sigma",): 0.25}, - {("centre",): 0.25, ("normalization",): 0.25, ("sigma",): 0.75}, - {("centre",): 0.25, ("normalization",): 0.75, ("sigma",): 0.25}, - {("centre",): 0.25, ("normalization",): 0.75, ("sigma",): 0.75}, - {("centre",): 0.75, ("normalization",): 0.25, ("sigma",): 0.25}, - {("centre",): 0.75, ("normalization",): 0.25, ("sigma",): 0.75}, - {("centre",): 0.75, ("normalization",): 0.75, ("sigma",): 0.25}, - {("centre",): 0.75, ("normalization",): 0.75, ("sigma",): 0.75}, + assert masked_sensitivity.path_values == { + ("centre",): [0.25, 0.25, 0.25, 0.25, 0.75, 0.75, 0.75, 0.75], + ("normalization",): [0.25, 0.25, 0.75, 0.75, 0.25, 0.25, 0.75, 0.75], + ("sigma",): [0.25, 0.75, 0.25, 0.75, 0.25, 0.75, 0.25, 0.75], + } + + +def test_perturbed_physical_centres_list_from(masked_result): + assert masked_result.perturbed_physical_centres_list_from("centre") == [ + 0.25, + 0.25, + 0.25, + 0.25, + 0.75, + 0.75, + 0.75, + 0.75, ] From 1e324b619561f07e6bc9fdf31afe798adfa189ef Mon Sep 17 00:00:00 2001 From: Richard Date: Wed, 16 Oct 2024 17:13:38 +0100 Subject: [PATCH 149/149] fix tests --- .../test_sensitivity/test_masked_sensitivity.py | 13 ------------- .../grid/test_sensitivity/test_results.py | 3 +++ 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py index 5d2e5102f..bf9c33314 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_masked_sensitivity.py @@ -50,19 +50,6 @@ def test_mean_uniform_prior( ) -def test_physical_centres_lists(masked_result, masked_sensitivity): - assert masked_result.perturbed_physical_centres_list_from("perturb.centre") == [ - 0.25, - 0.25, - 0.25, - 0.25, - 0.75, - 0.75, - 0.75, - 0.75, - ] - - def test_path_value_dicts(masked_sensitivity): assert masked_sensitivity.path_values == { ("centre",): [0.25, 0.25, 0.25, 0.25, 0.75, 0.75, 0.75, 0.75], diff --git a/test_autofit/non_linear/grid/test_sensitivity/test_results.py b/test_autofit/non_linear/grid/test_sensitivity/test_results.py index b531d2aa2..655794eba 100644 --- a/test_autofit/non_linear/grid/test_sensitivity/test_results.py +++ b/test_autofit/non_linear/grid/test_sensitivity/test_results.py @@ -48,6 +48,9 @@ def make_sensitivity_result(job_result): samples=[job_result.result.samples.summary()], perturb_samples=[job_result.perturb_result.samples.summary()], shape=(1,), + path_values={ + ("centre",): [0.5], + }, )