Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 37 additions & 0 deletions scripts/aggregator/config/general.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
analysis:
n_cores: 1
psf:
use_fft_default: false # If True, PSFs are convolved using FFTs by default, which is faster and uses less memory in all cases except for very small PSFs, False uses direct convolution.
hpc:
hpc_mode: false
iterations_per_full_update: 5000
adapt:
adapt_minimum_percent: 0.01
adapt_noise_limit: 100000000.0
inversion:
check_reconstruction: false # If True, the inversion's reconstruction is checked to ensure the solution of a meshs's mapper is not an invalid solution where the values are all the same.
use_positive_only_solver: false # If True, inversion's use a positive-only linear algebra solver by default, which is slower but prevents unphysical negative values in the reconstructed solutuion.
no_regularization_add_to_curvature_diag_value: 1.0e-8 # The default value added to the curvature matrix's diagonal when regularization is not applied to a linear object, which prevents inversion's failing due to the matrix being singular.
positive_only_uses_p_initial: false # If True, the positive-only solver of an inversion's uses an initial guess of the reconstructed data's values as which values should be positive, speeding up the solver.
use_border_relocator: false # If True, by default a pixelization's border is used to relocate all pixels outside its border to the border.
numba:
cache: true
nopython: true
parallel: false
use_numba: true
output:
force_pickle_overwrite: false
info_whitespace_length: 80
log_file: output.log
log_level: INFO
log_to_file: false
model_results_decimal_places: 3
remove_files: false
samples_to_csv: false
profiling:
perform: true
repeats: 1
test:
check_likelihood_function: false # if True, when a search is resumed the likelihood of a previous sample is recalculated to ensure it is consistent with the previous run.
exception_override: false
disable_positions_lh_inversion_check: false
75 changes: 75 additions & 0 deletions scripts/aggregator/config/non_linear.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
mock:
MockOptimizer:
initialize:
method: prior
printing:
silence: false

MockSearch:
initialize:
method: prior
printing:
silence: false
search: {}

nest:
DynestyDynamic:
general:
acceptance_ratio_threshold: 0.1
bootstrap: null
bound: multi
enlarge: null
first_update: null
fmove: 0.9
max_move: 100
sample: auto
sampling_efficiency: 0.5
slices: 5
terminate_at_acceptance_ratio: false
update_interval: null
walks: 25
initialize:
method: prior
parallel:
force_x1_cpu: false
number_of_cores: 1
printing:
silence: false

DynestyStatic:
parallel:
number_of_cores: 1
initialize:
method: prior
inversion:
acceptance_ratio_threshold: 0.05
const_efficiency_mode: true
evidence_tolerance: 100.0
multimodal: false
n_live_points: 50
sampling_efficiency: 0.3
terminate_at_acceptance_ratio: false
printing:
silence: false
search:
const_efficiency_mode: false
evidence_tolerance: 0.5
importance_nested_sampling: false
max_iter: 0
max_modes: 100
mode_tolerance: -1.0e+90
multimodal: false
n_live_points: 50
sampling_efficiency: 0.5
settings:
context: 0
init_MPI: false
log_zero: -1.0e+100
n_iter_before_update: 5
null_log_evidence: -1.0e+90
resume: true
seed: -1.0
stagger_resampling_likelihood: true
verbose: false
write_output: true

83 changes: 83 additions & 0 deletions scripts/aggregator/config/notation.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
label:
label:
alpha: \alpha
angle_binary: \theta
beta: \beta
break_radius: \theta_{\rm B}
centre_0: y
centre_1: x
coefficient: \lambda
contribution_factor: \omega_{\rm 0}
core_radius: C_{\rm r}
core_radius_0: C_{rm r0}
core_radius_1: C_{\rm r1}
effective_radius: R_{\rm eff}
einstein_radius: \theta_{\rm Ein}
ell_comps_0: \epsilon_{\rm 1}
ell_comps_1: \epsilon_{\rm 2}
multipole_comps_0: M_{\rm 1}
multipole_comps_1: M_{\rm 2}
flux: F
gamma: \gamma
gamma_1: \gamma
gamma_2: \gamma
inner_coefficient: \lambda_{\rm 1}
inner_slope: t_{\rm 1}
intensity: I_{\rm b}
kappa: \kappa
kappa_s: \kappa_{\rm s}
log10m_vir: log_{\rm 10}(m_{vir})
m: m
mass: M
mass_at_200: M_{\rm 200}
mass_ratio: M_{\rm ratio}
mass_to_light_gradient: \Gamma
mass_to_light_ratio: \Psi
mass_to_light_ratio_base: \Psi_{\rm base}
mass_to_light_radius: R_{\rm ref}
noise_factor: \omega_{\rm 1}
noise_power: \omega{\rm 2}
noise_scale: \sigma_{\rm 1}
normalization_scale: n
outer_coefficient: \lambda_{\rm 2}
outer_slope: t_{\rm 2}
overdens: \Delta_{\rm vir}
pixels: N_{\rm pix}
radius_break: R_{\rm b}
redshift: z
redshift_object: z_{\rm obj}
redshift_source: z_{\rm src}
scale_radius: R_{\rm s}
scatter: \sigma
separation: s
sersic_index: n
shape_0: y_{\rm pix}
shape_1: x_{\rm pix}
sigma: \sigma
signal_scale: V
sky_scale: \sigma_{\rm 0}
slope: \gamma
truncation_radius: R_{\rm t}
weight_floor: W_{\rm f}
weight_power: W_{\rm p}
superscript:
ExternalShear: ext
Pixelization: pix
Point: point
Redshift: ''
Regularization: reg
label_format:
format:
angular_diameter_distance_to_earth: '{:.2f}'
concentration: '{:.2f}'
einstein_mass: '{:.4e}'
einstein_radius: '{:.2f}'
kpc_per_arcsec: '{:.2f}'
luminosity: '{:.4e}'
m: '{:.1f}'
mass: '{:.4e}'
mass_at_truncation_radius: '{:.4e}'
radius: '{:.2f}'
redshift: '{:.2f}'
rho: '{:.2f}'
sersic_luminosity: '{:.4e}'
63 changes: 63 additions & 0 deletions scripts/aggregator/config/output.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
# Determines whether files saved by the search are output to the hard-disk. This is true both when saving to the
# directory structure and when saving to database.

# Files can be listed name: bool where the name is the name of the file without a suffix (e.g. model not model.json)
# and bool is true or false.

# If a given file is not listed then the default value is used.

default: true # If true then files which are not explicitly listed here are output anyway. If false then they are not.

### Samples ###

# The `samples.csv`file contains every sampled value of every free parameter with its log likelihood and weight.

# This file is often large, therefore disabling it can significantly reduce hard-disk space use.

# `samples.csv` is used to perform marginalization, infer model parameter errors and do other analysis of the search
# chains. Even if output of `samples.csv` is disabled, these tasks are still performed by the fit and output to
# the `samples_summary.json` file. However, without a `samples.csv` file these types of tasks cannot be performed
# after the fit is complete, for example via the database.

samples: true

# The `samples.csv` file contains every accepted sampled value of every free parameter with its log likelihood and
# weight. For certain searches, the majority of samples have a very low weight, which has no numerical impact on the
# results of the model-fit. However, these samples are still output to the `samples.csv` file, taking up hard-disk space
# and slowing down analysis of the samples (e.g. via the database).

# The `samples_weight_threshold` below specifies the threshold value of the weight such that samples with a weight
# below this value are not output to the `samples.csv` file. This can be used to reduce the size of the `samples.csv`
# file and speed up analysis of the samples.

# Note that for many searches (e.g. MCMC) all samples have equal weight, and thus this threshold has no impact and
# there is no simple way to save hard-disk space. However, for nested sampling, the majority of samples have a very
# low weight and this threshold can be used to save hard-disk space.

# Set value to empty (e.g. delete 1.0e-10 below) to disable this feature.

samples_weight_threshold: 1.0e-10

### Search Internal ###

# The search internal folder which contains a saved state of the non-linear search, as a .pickle or .dill file.

# If the entry below is false, the folder is still output during the model-fit, as it is required to resume the fit
# from where it left off. Therefore, settings `false` below does not impact model-fitting checkpointing and resumption.
# Instead, the search internal folder is deleted once the fit is completed.

# The search internal folder file is often large, therefore deleting it after a fit is complete can significantly
# reduce hard-disk space use.

# The search internal representation (e.g. what you can load from the output .pickle file) may have additional
# quantities specific to the non-linear search that you are interested in inspecting. Deleting the folder means this
# information is list.

search_internal: false

# Other Files:

covariance: false # `covariance.csv`: The [free parameters x free parameters] covariance matrix.
data: true # `data.json`: The value of every data point in the data.
noise_map: true # `noise_map.json`: The value of every RMS noise map value.

Loading