Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions autofit/config/general.yaml
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
jax:
use_jax: false # If True, PyAutoFit uses JAX internally, whereas False uses normal Numpy.
analysis:
n_cores: 1 # The number of cores a parallelized sum of Analysis classes uses by default.
updates:
iterations_per_quick_update: 1e99 # Non-linear search iterations between every quick update, which just displays the maximum likelihood model fit.
iterations_per_full_update: 1e99 # Non-linear search iterations between every full update, which outputs all visuals and result fits (e.g. model.result, search.summary), this exits the search and can be slow.
hpc:
hpc_mode: false # If True, use HPC mode, which disables GUI visualization, logging to screen and other settings which are not suited to running on a super computer.
iterations_per_update: 5000 # The number of iterations between every update (visualization, results output, etc) in HPC mode.
iterations_per_quick_update: 1e99 # Non-linear search iterations between every quick update, which just displays the maximum likelihood model fit.
iterations_per_full_update: 1e99 # Non-linear search iterations between every full update, which outputs all visuals and result fits (e.g. model.result, search.summary), this exits the search and can be slow.
inversion:
check_reconstruction: true # If True, the inversion's reconstruction is checked to ensure the solution of a meshs's mapper is not an invalid solution where the values are all the same.
reconstruction_vmax_factor: 0.5 # Plots of an Inversion's reconstruction use the reconstructed data's bright value multiplied by this factor.
Expand Down
7 changes: 2 additions & 5 deletions autofit/config/non_linear/mcmc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@ Emcee:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
Zeus:
run:
check_walkers: true
Expand Down Expand Up @@ -58,6 +55,6 @@ Zeus:
printing:
silence: false # If True, the default print output of the non-linear search is silenced and not printed by the Python interpreter.

updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
iterations_per_full_update: 500 # Non-linear search iterations between every full update, which outputs all visuals and result fits (e.g. model.result, search.summary), this exits the search and can be slow.
iterations_per_quick_update: 500 # Non-linear search iterations between every quick update, which just displays the maximum likelihood model fit.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
20 changes: 10 additions & 10 deletions autofit/config/non_linear/mle.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ PySwarmsGlobal:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
iterations_per_full_update: 500 # Non-linear search iterations between every full update, which outputs all visuals and result fits (e.g. model.result, search.summary), this exits the search and can be slow.
iterations_per_quick_update: 500 # Non-linear search iterations between every quick update, which just displays the maximum likelihood model fit.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
PySwarmsLocal:
run:
Expand All @@ -46,8 +46,8 @@ PySwarmsLocal:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
iterations_per_full_update: 500 # Non-linear search iterations between every full update, which outputs all visuals and result fits (e.g. model.result, search.summary), this exits the search and can be slow.
iterations_per_quick_update: 500 # Non-linear search iterations between every quick update, which just displays the maximum likelihood model fit.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
BFGS:
search:
Expand All @@ -70,8 +70,8 @@ BFGS:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
iterations_per_full_update: 500 # Non-linear search iterations between every full update, which outputs all visuals and result fits (e.g. model.result, search.summary), this exits the search and can be slow.
iterations_per_quick_update: 500 # Non-linear search iterations between every quick update, which just displays the maximum likelihood model fit.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
LBFGS:
search:
Expand All @@ -94,8 +94,8 @@ LBFGS:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
iterations_per_full_update: 500 # Non-linear search iterations between every full update, which outputs all visuals and result fits (e.g. model.result, search.summary), this exits the search and can be slow.
iterations_per_quick_update: 500 # Non-linear search iterations between every quick update, which just displays the maximum likelihood model fit.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
Drawer:
search:
Expand All @@ -108,6 +108,6 @@ Drawer:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
iterations_per_full_update: 500 # Non-linear search iterations between every full update, which outputs all visuals and result fits (e.g. model.result, search.summary), this exits the search and can be slow.
iterations_per_quick_update: 500 # Non-linear search iterations between every quick update, which just displays the maximum likelihood model fit.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
12 changes: 0 additions & 12 deletions autofit/config/non_linear/nest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,6 @@ DynestyStatic:
force_x1_cpu: false # Force Dynesty to not use Python multiprocessing Pool, which can fix issues on certain operating systems.
printing:
silence: false # If True, the default print output of the non-linear search is silenced and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
DynestyDynamic:
search:
bootstrap: null
Expand Down Expand Up @@ -64,9 +61,6 @@ DynestyDynamic:
force_x1_cpu: false # Force Dynesty to not use Python multiprocessing Pool, which can fix issues on certain operating systems.
printing:
silence: false # If True, the default print output of the non-linear search is silenced and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
Nautilus:
search:
n_live: 3000 # Number of so-called live points. New bounds are constructed so that they encompass the live points.
Expand All @@ -93,9 +87,6 @@ Nautilus:
force_x1_cpu: false # Force Dynesty to not use Python multiprocessing Pool, which can fix issues on certain operating systems.
printing:
silence: false # If True, the default print output of the non-linear search is silenced and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
UltraNest:
search:
draw_multiple: true
Expand Down Expand Up @@ -140,6 +131,3 @@ UltraNest:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silenced and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
4 changes: 2 additions & 2 deletions autofit/non_linear/analysis/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,5 +300,5 @@ def profile_log_likelihood_function(self, paths: AbstractPaths, instance):
"""
pass

def latent_lh_dict_from(self, **kwargs):
return None
def perform_quick_update(self, paths, instance):
raise NotImplementedError
113 changes: 112 additions & 1 deletion autofit/non_linear/fitness.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
from autofit.jax_wrapper import numpy as xp
from autofit import exc

from autofit.text import text_util


from autofit.mapper.prior_model.abstract import AbstractPriorModel
from autofit.non_linear.paths.abstract import AbstractPaths
Expand All @@ -40,7 +42,8 @@ def __init__(
resample_figure_of_merit: float = -xp.inf,
convert_to_chi_squared: bool = False,
store_history: bool = False,
use_jax_vmap : bool = False
use_jax_vmap : bool = False,
iterations_per_quick_update: Optional[int] = None,
):
"""
Interfaces with any non-linear search to fit the model to the data and return a log likelihood via
Expand Down Expand Up @@ -120,6 +123,11 @@ def __init__(
if self.use_jax_vmap:
self._call = self._vmap

self.iterations_per_quick_update = iterations_per_quick_update
self.quick_update_max_lh_parameters = None
self.quick_update_max_lh = -xp.inf
self.quick_update_count = 0

if self.paths is not None:
self.check_log_likelihood(fitness=self)

Expand Down Expand Up @@ -189,25 +197,128 @@ def call_wrap(self, parameters):
the log-likelihood itself or another objective function value,
depending on configuration.
"""

if self.use_jax_vmap:
if len(np.array(parameters).shape) == 1:
parameters = np.array(parameters)[None, :]

figure_of_merit = self._call(parameters)

if self.convert_to_chi_squared:
figure_of_merit *= -0.5

if self.fom_is_log_likelihood:
log_likelihood = figure_of_merit
else:
log_prior_list = xp.array(self.model.log_prior_list_from_vector(vector=parameters))
log_likelihood = figure_of_merit - xp.sum(log_prior_list)

self.manage_quick_update(parameters=parameters, log_likelihood=log_likelihood)

if self.convert_to_chi_squared:
log_likelihood *= -2.0

if self.store_history:

self.parameters_history_list.append(parameters)
self.log_likelihood_history_list.append(log_likelihood)

return figure_of_merit

def manage_quick_update(self, parameters, log_likelihood):
"""
Manage quick updates during the non-linear search.

A "quick update" is a lightweight visualization of the current best-fit
(maximum likelihood) model parameters. This provides fast feedback on the
progress of the fit without waiting for the full analysis to complete.

It does not require leaving the active non-linear search, and is
therefore faster than the full analysis visualization.

Workflow:
----------
1. Track the number of likelihood evaluations since the last quick update.
2. Identify the maximum log-likelihood from the current batch of evaluations.
- If `log_likelihood` is an array (batched evaluations), find the best
index with `argmax`.
- If it’s just a scalar (single evaluation), treat it as one update.
3. If a new maximum likelihood is found, update:
- `self.quick_update_max_lh` (best log-likelihood value so far).
- `self.quick_update_max_lh_parameters` (corresponding parameter vector).
4. Once the number of evaluations exceeds
`self.iterations_per_quick_update`, generate a quick visualization of
the current max-likelihood model via
`self.analysis.perform_quick_update()`.

Parameters
----------
parameters : array-like
The parameter vectors evaluated in this batch. Shape is typically
(n_batch, n_param).
log_likelihood : float or array-like
The corresponding log-likelihood(s). If batched, must have shape
(n_batch,).

Notes
-----
- Quick updates are optional and controlled by
`self.iterations_per_quick_update`.
- If the `analysis` class does not implement
`perform_quick_update`, the update is silently skipped.
- This mechanism is intended for fast, coarse visualization only,
not detailed science-quality outputs.
"""

if self.iterations_per_quick_update is None:
return

try:

best_idx = xp.argmax(log_likelihood)
best_log_likelihood = log_likelihood[best_idx]
best_parameters = parameters[best_idx]
total_updates = log_likelihood.shape[0]

except AttributeError:

best_log_likelihood = log_likelihood
best_parameters = parameters
total_updates = 1

if best_log_likelihood > self.quick_update_max_lh:
self.quick_update_max_lh = best_log_likelihood
self.quick_update_max_lh_parameters = best_parameters

self.quick_update_count += total_updates

if self.quick_update_count >= self.iterations_per_quick_update:

start_time = time.time()

logger.info("Performing quick update of maximum log likelihood fit image and model.results")

instance = self.model.instance_from_vector(vector=self.quick_update_max_lh_parameters)

try:
self.analysis.perform_quick_update(self.paths, instance)
except NotImplementedError:
pass

result_info = text_util.result_max_lh_info_from(
max_log_likelihood_sample=self.quick_update_max_lh_parameters.tolist(),
max_log_likelihood=self.quick_update_max_lh,
model=self.model,
)
result_info = "\n".join(result_info)

logger.info(result_info)
self.paths.output_model_results(result_info=result_info)

self.quick_update_count = 0

logger.info(f"Quick update complete in {time.time() - start_time} seconds.")

@timeout(timeout_seconds)
def __call__(self, parameters, *kwargs):
"""
Expand Down
11 changes: 8 additions & 3 deletions autofit/non_linear/paths/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,9 +438,7 @@ def save_summary(
result_info = text_util.result_info_from(
samples=samples,
)
filename = self.output_path / "model.results"
with open_(filename, "w") as f:
f.write(result_info)
self.output_model_results(result_info=result_info)

if latent_samples:
result_info = text_util.result_info_from(
Expand Down Expand Up @@ -473,3 +471,10 @@ def _covariance_file(self) -> Path:
@property
def _info_file(self) -> Path:
return self._files_path / "samples_info.json"

def output_model_results(self, result_info):

filename = self.output_path / "model.results"

with open_(filename, "w") as f:
f.write(result_info)
Loading
Loading