diff --git a/autofit/config/non_linear/mle.yaml b/autofit/config/non_linear/mle.yaml index 3946b950d..5b951c703 100644 --- a/autofit/config/non_linear/mle.yaml +++ b/autofit/config/non_linear/mle.yaml @@ -73,20 +73,6 @@ BFGS: updates: iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). -Drawer: - search: - total_draws: 50 - initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}. - method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution. - ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. - ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method. - parallel: - number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing. - printing: - silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter. - updates: - iterations_per_update: 500 # The number of iterations of the non-linear search performed between every 'update', where an update performs tasks like outputting model.results. - remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable). LBFGS: search: tol: null diff --git a/autofit/example/analysis.py b/autofit/example/analysis.py index 581415228..cdc086f64 100644 --- a/autofit/example/analysis.py +++ b/autofit/example/analysis.py @@ -233,4 +233,7 @@ def compute_latent_variables(self, instance) -> Dict[str, float]: try: return {"fwhm": instance.fwhm} except AttributeError: - return {"gaussian.fwhm": instance[0].fwhm} + try: + return {"gaussian.fwhm": instance[0].fwhm} + except AttributeError: + return {"gaussian.fwhm": instance[0].gaussian.fwhm} \ No newline at end of file diff --git a/autofit/example/model.py b/autofit/example/model.py index b5772f41d..5297f6254 100644 --- a/autofit/example/model.py +++ b/autofit/example/model.py @@ -38,7 +38,14 @@ def __init__( self.sigma = sigma @property - def fwhm(self): + def fwhm(self) -> float: + """ + The full-width half-maximum of the Gaussian profile. + + This is used to illustrate latent variables in **PyAutoFit**, which are values that can be inferred from + the free parameters of the model which we are interested and may want to store the full samples information + on (e.g. to create posteriors). + """ return 2 * np.sqrt(2 * np.log(2)) * self.sigma def _tree_flatten(self): diff --git a/autofit/non_linear/grid/grid_search/__init__.py b/autofit/non_linear/grid/grid_search/__init__.py index dc57d4474..0a33d0af0 100644 --- a/autofit/non_linear/grid/grid_search/__init__.py +++ b/autofit/non_linear/grid/grid_search/__init__.py @@ -273,7 +273,6 @@ def write_results(): def save_metadata(self): self.paths.save_unique_tag(is_grid_search=True) - self.paths.zip_remove_nuclear() def make_jobs(self, model, analysis, grid_priors, info: Optional[Dict] = None): grid_priors = model.sort_priors_alphabetically(set(grid_priors)) diff --git a/autofit/non_linear/grid/grid_search/job.py b/autofit/non_linear/grid/grid_search/job.py index de70edb88..892d8ad46 100644 --- a/autofit/non_linear/grid/grid_search/job.py +++ b/autofit/non_linear/grid/grid_search/job.py @@ -55,7 +55,6 @@ def perform(self): model=self.model, analysis=self.analysis, info=self.info, - bypass_nuclear_if_on=True, ) result_list_row = [ self.index, diff --git a/autofit/non_linear/paths/abstract.py b/autofit/non_linear/paths/abstract.py index 9016a8dd1..1fea99cbf 100644 --- a/autofit/non_linear/paths/abstract.py +++ b/autofit/non_linear/paths/abstract.py @@ -291,58 +291,6 @@ def _zip(self): except FileNotFoundError: pass - def zip_remove_nuclear(self): - """ - When multiple model-fits are performed using the same `path_prefix` and `name`, - the results are populated in the same folder with different unique identifiers. - - By accident, one may perform runs where additional results are placed - in these folders which are not wanted for the subsequent analysis. - - Removing these results from the directory can be cumbersome, as determining - the unwanted results based on their unique identifier requires visually inspecting - them. - - These unwanted results can also make manipulating the results via the database - problematic, as one may need to again filter based on unique identifier. - - When a run is performed in nuclear mode, all results in every folder are - deleted except the results corresponding to the unique identifier of that run. - - Therefore, provided the user is 100% certain that the run corresponds to the - results they want to keep, nuclear mode can be used to remove all unwanted results. - - For example, suppose a folder has 5 results, 4 of which are unwanted and 1 which is - wanted. If nuclear mode runs, and the model-fit is set up correctly such that the - identifier created corresponds to the wanted result, all 4 unwanted results - will be deleted. - - To enable nuclear mode, set the environment variable ``PYAUTOFIT_NUCLEAR_MODE=1``. - - Nuclear mode is dangerous, and must be used with CAUTION AND CARE! - """ - - if os.environ.get("PYAUTOFIT_NUCLEAR_MODE") == "1": - file_path = Path(os.path.split(self.output_path)[0]) - - file_list = os.listdir(file_path) - file_list = [file for file in file_list if self.identifier not in file] - - for file in file_list: - file_to_remove = file_path / file - - try: - os.remove(file_to_remove) - logger.info(f"NUCLEAR MODE -- Removed {file_to_remove}") - except (IsADirectoryError, FileNotFoundError): - pass - - try: - shutil.rmtree(file_to_remove) - logger.info(f"NUCLEAR MODE -- Removed {file_to_remove}") - except (NotADirectoryError, FileNotFoundError): - pass - def restore(self): """ Copy files from the ``.zip`` file to the samples folder. diff --git a/autofit/non_linear/paths/null.py b/autofit/non_linear/paths/null.py index e4049d710..dfca8aee7 100644 --- a/autofit/non_linear/paths/null.py +++ b/autofit/non_linear/paths/null.py @@ -28,7 +28,7 @@ def save_array(self, name, array): def load_array(self, name): pass - def save_fits(self, name: str, hdu, prefix: str = ""): + def save_fits(self, name: str, fits, prefix: str = ""): pass def load_fits(self, name: str, prefix: str = ""): diff --git a/autofit/non_linear/search/abstract_search.py b/autofit/non_linear/search/abstract_search.py index 5ad770ece..44a42e4fa 100644 --- a/autofit/non_linear/search/abstract_search.py +++ b/autofit/non_linear/search/abstract_search.py @@ -534,7 +534,6 @@ def fit( model: AbstractPriorModel, analysis: Analysis, info: Optional[Dict] = None, - bypass_nuclear_if_on: bool = False, ) -> Union[Result, List[Result]]: """ Fit a model, M with some function f that takes instances of the @@ -557,9 +556,6 @@ class represented by model M and gives a score for their fitness. info Optional dictionary containing information about the fit that can be saved in the `files` folder (e.g. as `files/info.json`) and can be loaded via the database. - bypass_nuclear_if_on - If nuclear mode is on (environment variable "PYAUTOFIT_NUCLEAR_MODE=1") passing this as True will - bypass it. Returns ------- @@ -612,7 +608,6 @@ class represented by model M and gives a score for their fitness. ) self.post_fit_output( - bypass_nuclear_if_on=bypass_nuclear_if_on, search_internal=result.search_internal, ) @@ -847,7 +842,7 @@ def result_via_completed_fit( return result - def post_fit_output(self, search_internal, bypass_nuclear_if_on: bool): + def post_fit_output(self, search_internal): """ Cleans up the output folderds after a completed non-linear search. @@ -860,8 +855,8 @@ def post_fit_output(self, search_internal, bypass_nuclear_if_on: bool): Parameters ---------- - bypass_nuclear_if_on - Whether to use nuclear mode to delete a lot of files (see nuclear mode description). + search_internal + The internal search. """ if not conf.instance["output"]["search_internal"]: self.logger.info("Removing search internal folder.") @@ -874,9 +869,6 @@ def post_fit_output(self, search_internal, bypass_nuclear_if_on: bool): self.paths.zip_remove() - if not bypass_nuclear_if_on: - self.paths.zip_remove_nuclear() - @abstractmethod def _fit(self, model: AbstractPriorModel, analysis: Analysis): pass diff --git a/autofit/non_linear/search/nest/dynesty/search/abstract.py b/autofit/non_linear/search/nest/dynesty/search/abstract.py index da91f33e1..2d7e9ccc5 100644 --- a/autofit/non_linear/search/nest/dynesty/search/abstract.py +++ b/autofit/non_linear/search/nest/dynesty/search/abstract.py @@ -201,11 +201,6 @@ def _fit( during_analysis=True, ) - try: - os.remove(self.checkpoint_file) - except TypeError: - pass - return search_internal def samples_info_from(self, search_internal=None): @@ -485,6 +480,17 @@ def search_internal_from( ): raise NotImplementedError() + def output_search_internal(self, search_internal): + + self.paths.save_search_internal( + obj=search_internal, + ) + + try: + os.remove(self.checkpoint_file) + except (TypeError, FileNotFoundError): + pass + def check_pool(self, uses_pool: bool, pool): if (uses_pool and pool is None) or (not uses_pool and pool is not None): raise exc.SearchException( diff --git a/autofit/non_linear/search/nest/nautilus/search.py b/autofit/non_linear/search/nest/nautilus/search.py index 289f1a010..9cce8d34e 100644 --- a/autofit/non_linear/search/nest/nautilus/search.py +++ b/autofit/non_linear/search/nest/nautilus/search.py @@ -159,10 +159,6 @@ def _fit(self, model: AbstractPriorModel, analysis): checkpoint_exists=checkpoint_exists, ) - if self.checkpoint_file is not None: - - os.remove(self.checkpoint_file) - return search_internal @property @@ -468,6 +464,11 @@ def output_search_internal(self, search_internal): search_internal.pool_l = pool_l search_internal.pool_s = pool_s + try: + os.remove(self.checkpoint_file) + except (TypeError, FileNotFoundError): + pass + def samples_info_from(self, search_internal=None): return { "log_evidence": search_internal.evidence(),