Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 0 additions & 43 deletions config/non_linear/mle.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,53 +2,10 @@

# **PyAutoFit** supports the following maximum likelihood estimator (MLE) algorithms:

# - PySwarms: https://github.com/ljvmiranda921/pyswarms / https://pyswarms.readthedocs.io/en/latest/index.html

# Settings in the [search], [run] and [options] entries are specific to each nested algorithm and should be
# determined by consulting that method's own readthedocs.

PySwarmsGlobal:
run:
iters: 2000
search:
cognitive: 0.5
ftol: -.inf
inertia: 0.9
n_particles: 50
social: 0.3
initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}.
method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution.
ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method.
ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method.
parallel:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter.
iterations_per_full_update: 500 # Non-linear search iterations between every full update, which outputs all visuals and result fits (e.g. model.result, search.summary), this exits the search and can be slow.
iterations_per_quick_update: 500 # Non-linear search iterations between every quick update, which just displays the maximum likelihood model fit.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
PySwarmsLocal:
run:
iters: 2000
search:
cognitive: 0.5
ftol: -.inf
inertia: 0.9
minkowski_p_norm: 2
n_particles: 50
number_of_k_neighbors: 3
social: 0.3
initialize: # The method used to generate where walkers are initialized in parameter space {prior | ball}.
method: ball # priors: samples are initialized by randomly drawing from each parameter's prior. ball: samples are initialized by randomly drawing unit values from a narrow uniform distribution.
ball_lower_limit: 0.49 # The lower limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method.
ball_upper_limit: 0.51 # The upper limit of the uniform distribution unit values are drawn from when initializing walkers using the ball method.
parallel:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silcened and not printed by the Python interpreter.
iterations_per_full_update: 500 # Non-linear search iterations between every full update, which outputs all visuals and result fits (e.g. model.result, search.summary), this exits the search and can be slow.
iterations_per_quick_update: 500 # Non-linear search iterations between every quick update, which just displays the maximum likelihood model fit.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
LBFGS:
search:
tol: null
Expand Down
49 changes: 0 additions & 49 deletions config/non_linear/nest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

# - Nautilus https://https://github.com/johannesulf/nautilus / https://nautilus-sampler.readthedocs.io/en/stable/index.html
# - Dynesty: https://github.com/joshspeagle/dynesty / https://dynesty.readthedocs.io/en/latest/index.html
# - UltraNest: https://github.com/JohannesBuchner/UltraNest / https://johannesbuchner.github.io/UltraNest/readme.html
# Settings in the [search] and [run] entries are specific to each nested algorithm and should be determined by
# consulting that MCMC method's own readthedocs.
Nautilus:
Expand Down Expand Up @@ -90,51 +89,3 @@ DynestyDynamic:
printing:
silence: false # If True, the default print output of the non-linear search is silenced and not printed by the Python interpreter.

UltraNest:
search:
draw_multiple: true
ndraw_max: 65536
ndraw_min: 128
num_bootstraps: 30
num_test_samples: 2
resume: true
run_num: null
storage_backend: hdf5
vectorized: false
warmstart_max_tau: -1.0
run:
cluster_num_live_points: 40
dkl: 0.5
dlogz: 0.5
frac_remain: 0.01
insertion_test_window: 10
insertion_test_zscore_threshold: 2
lepsilon: 0.001
log_interval: null
max_iters: null
max_ncalls: null
max_num_improvement_loops: -1.0
min_ess: 400
min_num_live_points: 400
show_status: true
update_interval_ncall: null
update_interval_volume_fraction: 0.8
viz_callback: auto
stepsampler:
adaptive_nsteps: false
log: false
max_nsteps: 1000
nsteps: 25
region_filter: false
scale: 1.0
stepsampler_cls: null
initialize: # The method used to generate where walkers are initialized in parameter space {prior}.
method: prior # priors: samples are initialized by randomly drawing from each parameter's prior.
parallel:
number_of_cores: 1 # The number of cores the search is parallelized over by default, using Python multiprocessing.
printing:
silence: false # If True, the default print output of the non-linear search is silenced and not printed by the Python interpreter.

iterations_per_full_update: 1000 # Non-linear search iterations between every full update, which outputs all visuals and result fits (e.g. model.result, search.summary), this exits the search and can be slow.
iterations_per_quick_update: 500 # Non-linear search iterations between every quick update, which just displays the maximum likelihood model fit.
remove_state_files_at_end: true # Whether to remove the savestate of the seach (e.g. the Emcee hdf5 file) at the end to save hard-disk space (results are still stored as PyAutoFit pickles and loadable).
9 changes: 0 additions & 9 deletions config/visualize/plots_search.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,6 @@ emcee:
trajectories: true # Output Emcee trajectories figure during a non-linear search fit?
nautilus:
cornerplot: true # Output Nautilus cornerplot figure during a non-linear search fit?
pyswarms:
contour: true # Output PySwarms contour figure during a non-linear search fit?
cost_history: true # Output PySwarms cost_history figure during a non-linear search fit?
time_series: true # Output PySwarms time_series figure during a non-linear search fit?
trajectories: true # Output PySwarms trajectories figure during a non-linear search fit?
ultranest:
cornerplot: true # Output Ultranest cornerplot figure during a non-linear search fit?
runplot: true # Output Ultranest runplot figure during a non-linear search fit?
traceplot: true # Output Ultranest traceplot figure during a non-linear search fit?
zeus:
corner: true # Output Zeus corner figure during a non-linear search fit?
likelihood_series: true # Output Zeus likelihood series figure during a non-linear search fit?
Expand Down