Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 0 additions & 14 deletions autofit/config/non_linear/mle.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -73,20 +73,6 @@ BFGS:
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
Drawer:
search:
total_draws: 50
initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}.
method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution.
ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method.
ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method.
parallel:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter.
updates:
iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
LBFGS:
search:
tol: null
Expand Down
5 changes: 4 additions & 1 deletion autofit/example/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,4 +233,7 @@ def compute_latent_variables(self, instance) -> Dict[str, float]:
try:
return {"fwhm": instance.fwhm}
except AttributeError:
return {"gaussian.fwhm": instance[0].fwhm}
try:
return {"gaussian.fwhm": instance[0].fwhm}
except AttributeError:
return {"gaussian.fwhm": instance[0].gaussian.fwhm}
9 changes: 8 additions & 1 deletion autofit/example/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,14 @@ def __init__(
self.sigma = sigma

@property
def fwhm(self):
def fwhm(self) -> float:
"""
The full-width half-maximum of the Gaussian profile.

This is used to illustrate latent variables in **PyAutoFit**, which are values that can be inferred from
the free parameters of the model which we are interested and may want to store the full samples information
on (e.g. to create posteriors).
"""
return 2 * np.sqrt(2 * np.log(2)) * self.sigma

def _tree_flatten(self):
Expand Down
1 change: 0 additions & 1 deletion autofit/non_linear/grid/grid_search/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,6 @@ def write_results():

def save_metadata(self):
self.paths.save_unique_tag(is_grid_search=True)
self.paths.zip_remove_nuclear()

def make_jobs(self, model, analysis, grid_priors, info: Optional[Dict] = None):
grid_priors = model.sort_priors_alphabetically(set(grid_priors))
Expand Down
1 change: 0 additions & 1 deletion autofit/non_linear/grid/grid_search/job.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ def perform(self):
model=self.model,
analysis=self.analysis,
info=self.info,
bypass_nuclear_if_on=True,
)
result_list_row = [
self.index,
Expand Down
52 changes: 0 additions & 52 deletions autofit/non_linear/paths/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,58 +291,6 @@ def _zip(self):
except FileNotFoundError:
pass

def zip_remove_nuclear(self):
"""
When multiple model-fits are performed using the same `path_prefix` and `name`,
the results are populated in the same folder with different unique identifiers.

By accident, one may perform runs where additional results are placed
in these folders which are not wanted for the subsequent analysis.

Removing these results from the directory can be cumbersome, as determining
the unwanted results based on their unique identifier requires visually inspecting
them.

These unwanted results can also make manipulating the results via the database
problematic, as one may need to again filter based on unique identifier.

When a run is performed in nuclear mode, all results in every folder are
deleted except the results corresponding to the unique identifier of that run.

Therefore, provided the user is 100% certain that the run corresponds to the
results they want to keep, nuclear mode can be used to remove all unwanted results.

For example, suppose a folder has 5 results, 4 of which are unwanted and 1 which is
wanted. If nuclear mode runs, and the model-fit is set up correctly such that the
identifier created corresponds to the wanted result, all 4 unwanted results
will be deleted.

To enable nuclear mode, set the environment variable ``PYAUTOFIT_NUCLEAR_MODE=1``.

Nuclear mode is dangerous, and must be used with CAUTION AND CARE!
"""

if os.environ.get("PYAUTOFIT_NUCLEAR_MODE") == "1":
file_path = Path(os.path.split(self.output_path)[0])

file_list = os.listdir(file_path)
file_list = [file for file in file_list if self.identifier not in file]

for file in file_list:
file_to_remove = file_path / file

try:
os.remove(file_to_remove)
logger.info(f"NUCLEAR MODE -- Removed {file_to_remove}")
except (IsADirectoryError, FileNotFoundError):
pass

try:
shutil.rmtree(file_to_remove)
logger.info(f"NUCLEAR MODE -- Removed {file_to_remove}")
except (NotADirectoryError, FileNotFoundError):
pass

def restore(self):
"""
Copy files from the ``.zip`` file to the samples folder.
Expand Down
2 changes: 1 addition & 1 deletion autofit/non_linear/paths/null.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def save_array(self, name, array):
def load_array(self, name):
pass

def save_fits(self, name: str, hdu, prefix: str = ""):
def save_fits(self, name: str, fits, prefix: str = ""):
pass

def load_fits(self, name: str, prefix: str = ""):
Expand Down
14 changes: 3 additions & 11 deletions autofit/non_linear/search/abstract_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,6 @@ def fit(
model: AbstractPriorModel,
analysis: Analysis,
info: Optional[Dict] = None,
bypass_nuclear_if_on: bool = False,
) -> Union[Result, List[Result]]:
"""
Fit a model, M with some function f that takes instances of the
Expand All @@ -557,9 +556,6 @@ class represented by model M and gives a score for their fitness.
info
Optional dictionary containing information about the fit that can be saved in the `files` folder
(e.g. as `files/info.json`) and can be loaded via the database.
bypass_nuclear_if_on
If nuclear mode is on (environment variable "PYAUTOFIT_NUCLEAR_MODE=1") passing this as True will
bypass it.

Returns
-------
Expand Down Expand Up @@ -612,7 +608,6 @@ class represented by model M and gives a score for their fitness.
)

self.post_fit_output(
bypass_nuclear_if_on=bypass_nuclear_if_on,
search_internal=result.search_internal,
)

Expand Down Expand Up @@ -847,7 +842,7 @@ def result_via_completed_fit(

return result

def post_fit_output(self, search_internal, bypass_nuclear_if_on: bool):
def post_fit_output(self, search_internal):
"""
Cleans up the output folderds after a completed non-linear search.

Expand All @@ -860,8 +855,8 @@ def post_fit_output(self, search_internal, bypass_nuclear_if_on: bool):

Parameters
----------
bypass_nuclear_if_on
Whether to use nuclear mode to delete a lot of files (see nuclear mode description).
search_internal
The internal search.
"""
if not conf.instance["output"]["search_internal"]:
self.logger.info("Removing search internal folder.")
Expand All @@ -874,9 +869,6 @@ def post_fit_output(self, search_internal, bypass_nuclear_if_on: bool):

self.paths.zip_remove()

if not bypass_nuclear_if_on:
self.paths.zip_remove_nuclear()

@abstractmethod
def _fit(self, model: AbstractPriorModel, analysis: Analysis):
pass
Expand Down
16 changes: 11 additions & 5 deletions autofit/non_linear/search/nest/dynesty/search/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,11 +201,6 @@ def _fit(
during_analysis=True,
)

try:
os.remove(self.checkpoint_file)
except TypeError:
pass

return search_internal

def samples_info_from(self, search_internal=None):
Expand Down Expand Up @@ -485,6 +480,17 @@ def search_internal_from(
):
raise NotImplementedError()

def output_search_internal(self, search_internal):

self.paths.save_search_internal(
obj=search_internal,
)

try:
os.remove(self.checkpoint_file)
except (TypeError, FileNotFoundError):
pass

def check_pool(self, uses_pool: bool, pool):
if (uses_pool and pool is None) or (not uses_pool and pool is not None):
raise exc.SearchException(
Expand Down
9 changes: 5 additions & 4 deletions autofit/non_linear/search/nest/nautilus/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,10 +159,6 @@ def _fit(self, model: AbstractPriorModel, analysis):
checkpoint_exists=checkpoint_exists,
)

if self.checkpoint_file is not None:

os.remove(self.checkpoint_file)

return search_internal

@property
Expand Down Expand Up @@ -468,6 +464,11 @@ def output_search_internal(self, search_internal):
search_internal.pool_l = pool_l
search_internal.pool_s = pool_s

try:
os.remove(self.checkpoint_file)
except (TypeError, FileNotFoundError):
pass

def samples_info_from(self, search_internal=None):
return {
"log_evidence": search_internal.evidence(),
Expand Down
Loading