Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
105 changes: 44 additions & 61 deletions searches/pyswarms/abstract.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
from typing import Dict, Optional
from typing import Optional

import numpy as np

from autofit import exc
from autofit.database.sqlalchemy_ import sa
from autofit.mapper.prior_model.abstract import AbstractPriorModel
from autofit.non_linear.fitness import Fitness
from autofit.non_linear.initializer import AbstractInitializer
from autofit.non_linear.search.mle.abstract_mle import AbstractMLE
from autofit.non_linear.samples.sample import Sample
from autofit.non_linear.samples.samples import Samples
from autofit.non_linear.test_mode import is_test_mode


class FitnessPySwarms(Fitness):
Expand All @@ -18,13 +18,9 @@ def __call__(self, parameters, *kwargs):
Interfaces with any non-linear in order to fit a model to the data and return a log likelihood via
an `Analysis` class.

The interface is described in full in the `__init__` docstring.

`PySwarms` have a unique interface in that lists of parameters corresponding to multiple particles are
passed to the fitness function. A bespoke `__call__` method is therefore required to handle this.

The figure of merit is the log posterior multiplied by -2.0, which is the chi-squared value that is minimized
by the `PySwarms` non-linear search.
passed to the fitness function. A bespoke `__call__` method is therefore required to handle this,
delegating per-particle evaluation to ``call_wrap``.

Parameters
----------
Expand All @@ -42,26 +38,10 @@ def __call__(self, parameters, *kwargs):
if isinstance(parameters[0], float):
parameters = [parameters]

figure_of_merit_list = []

for params_of_particle in parameters:
try:
instance = self.model.instance_from_vector(vector=params_of_particle)
log_likelihood = self.analysis.log_likelihood_function(
instance=instance
)
log_prior = self.model.log_prior_list_from_vector(
vector=params_of_particle
)
log_posterior = log_likelihood + sum(log_prior)
figure_of_merit = -2.0 * log_posterior
except exc.FitException:
figure_of_merit = np.nan

if np.isnan(figure_of_merit):
figure_of_merit = -2.0 * self.resample_figure_of_merit

figure_of_merit_list.append(figure_of_merit)
figure_of_merit_list = [
self.call_wrap(params_of_particle)
for params_of_particle in parameters
]

return np.asarray(figure_of_merit_list)

Expand All @@ -72,10 +52,16 @@ def __init__(
name: Optional[str] = None,
path_prefix: Optional[str] = None,
unique_tag: Optional[str] = None,
n_particles: int = 50,
cognitive: float = 0.5,
social: float = 0.3,
inertia: float = 0.9,
iters: int = 2000,
initializer: Optional[AbstractInitializer] = None,
iterations_per_quick_update: int = None,
iterations_per_full_update: int = None,
number_of_cores: int = None,
number_of_cores: int = 1,
silence: bool = False,
session: Optional[sa.orm.Session] = None,
**kwargs
):
Expand All @@ -96,20 +82,26 @@ def __init__(
unique_tag
The name of a unique tag for this model-fit, which will be given a unique entry in the sqlite database
and also acts as the folder after the path prefix and before the search name.
n_particles
The number of particles in the swarm.
cognitive
The cognitive parameter controlling how much a particle is influenced by its own best position.
social
The social parameter controlling how much a particle is influenced by the swarm's best position.
inertia
The inertia weight controlling the momentum of the particles.
iters
The total number of iterations the swarm performs.
initializer
Generates the initialize samples of non-linear parameter space (see autofit.non_linear.initializer).
number_of_cores
The number of cores sampling is performed using a Python multiprocessing Pool instance.
silence
If True, the default print output of the non-linear search is silenced.
session
An SQLalchemy session instance so the results of the model-fit are written to an SQLite database.
"""

number_of_cores = (
self._config("parallel", "number_of_cores")
if number_of_cores is None
else number_of_cores
)

super().__init__(
name=name,
path_prefix=path_prefix,
Expand All @@ -118,10 +110,20 @@ def __init__(
iterations_per_quick_update=iterations_per_quick_update,
iterations_per_full_update=iterations_per_full_update,
number_of_cores=number_of_cores,
silence=silence,
session=session,
**kwargs
)

self.n_particles = n_particles
self.cognitive = cognitive
self.social = social
self.inertia = inertia
self.iters = iters

if is_test_mode():
self.apply_test_mode()

self.logger.debug("Creating PySwarms Search")

def _fit(self, model: AbstractPriorModel, analysis):
Expand All @@ -146,6 +148,7 @@ def _fit(self, model: AbstractPriorModel, analysis):
fitness = FitnessPySwarms(
model=model,
analysis=analysis,
paths=self.paths,
fom_is_log_likelihood=False,
resample_figure_of_merit=-np.inf,
convert_to_chi_squared=True,
Expand All @@ -167,15 +170,15 @@ def _fit(self, model: AbstractPriorModel, analysis):
parameter_lists,
log_posterior_list,
) = self.initializer.samples_from_model(
total_points=self.config_dict_search["n_particles"],
total_points=self.n_particles,
model=model,
fitness=fitness,
paths=self.paths,
n_cores=self.number_of_cores,
)

init_pos = np.zeros(
shape=(self.config_dict_search["n_particles"], model.prior_count)
shape=(self.n_particles, model.prior_count)
)

for index, parameters in enumerate(parameter_lists):
Expand All @@ -201,12 +204,12 @@ def _fit(self, model: AbstractPriorModel, analysis):

bounds = (np.asarray(lower_bounds), np.asarray(upper_bounds))

while total_iterations < self.config_dict_run["iters"]:
while total_iterations < self.iters:
search_internal = self.search_internal_from(
model=model, fitness=fitness, bounds=bounds, init_pos=init_pos
)

iterations_remaining = self.config_dict_run["iters"] - total_iterations
iterations_remaining = self.iters - total_iterations

iterations = min(self.iterations_per_full_update, iterations_remaining)

Expand Down Expand Up @@ -296,28 +299,8 @@ def samples_via_internal_from(self, model, search_internal=None):
samples_info=search_internal_dict,
)

def config_dict_test_mode_from(self, config_dict: Dict) -> Dict:
"""
Returns a configuration dictionary for test mode meaning that the sampler terminates as quickly as possible.

Entries which set the total number of samples of the sampler (e.g. maximum calls, maximum likelihood
evaluations) are reduced to low values meaning it terminates nearly immediately.

Parameters
----------
config_dict
The original configuration dictionary for this sampler which includes entries controlling how fast the
sampler terminates.

Returns
-------
A configuration dictionary where settings which control the sampler's number of samples are reduced so it
terminates as quickly as possible.
"""
return {
**config_dict,
"iters": 1,
}
def apply_test_mode(self):
self.iters = 1

def search_internal_from(self, model, fitness, bounds, init_pos):
raise NotImplementedError()
22 changes: 11 additions & 11 deletions searches/pyswarms/globe.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,13 @@ def __init__(
initializer: Optional[AbstractInitializer] = None,
iterations_per_full_update: int = None,
iterations_per_quick_update: int = None,
number_of_cores: int = None,
number_of_cores: int = 1,
silence: bool = False,
session: Optional[sa.orm.Session] = None,
**kwargs
):
"""
A PySwarms Particle Swarm MLE global non-linear search.
A PySwarms Particle Swarm MLE global-best non-linear search.

For a full description of PySwarms, checkout its Github and readthedocs webpages:

Expand All @@ -47,6 +48,8 @@ def __init__(
Generates the initialize samples of non-linear parameter space (see autofit.non_linear.initializer).
number_of_cores
The number of cores sampling is performed using a Python multiprocessing Pool instance.
silence
If True, the default print output of the non-linear search is silenced.
"""

super().__init__(
Expand All @@ -57,31 +60,28 @@ def __init__(
iterations_per_quick_update=iterations_per_quick_update,
iterations_per_full_update=iterations_per_full_update,
number_of_cores=number_of_cores,
silence=silence,
session=session,
**kwargs
)

self.logger.debug("Creating PySwarms Search")

def search_internal_from(self, model, fitness, bounds, init_pos):
"""Get the static Dynesty sampler which performs the non-linear search, passing it all associated input Dynesty
variables."""
"""Get the PySwarms GlobalBestPSO sampler which performs the non-linear search."""

import pyswarms

options = {
"c1": self.config_dict_search["cognitive"],
"c2": self.config_dict_search["social"],
"w": self.config_dict_search["inertia"]
"c1": self.cognitive,
"c2": self.social,
"w": self.inertia,
}

filter_list = ["cognitive", "social", "inertia"]
config_dict = {key: value for key, value in self.config_dict_search.items() if key not in filter_list}

return pyswarms.global_best.GlobalBestPSO(
n_particles=self.n_particles,
dimensions=model.prior_count,
bounds=bounds,
init_pos=init_pos,
options=options,
**config_dict
)
37 changes: 22 additions & 15 deletions searches/pyswarms/local.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,17 @@ def __init__(
name: Optional[str] = None,
path_prefix: Optional[str] = None,
unique_tag: Optional[str] = None,
number_of_k_neighbors: int = 3,
minkowski_p_norm: int = 2,
iterations_per_quick_update: int = None,
iterations_per_full_update: int = None,
number_of_cores: int = None,
number_of_cores: int = 1,
silence: bool = False,
session: Optional[sa.orm.Session] = None,
**kwargs
):
"""
A PySwarms Particle Swarm MLE global non-linear search.
A PySwarms Particle Swarm MLE local-best non-linear search.

For a full description of PySwarms, checkout its Github and readthedocs webpages:

Expand All @@ -43,10 +46,14 @@ def __init__(
unique_tag
The name of a unique tag for this model-fit, which will be given a unique entry in the sqlite database
and also acts as the folder after the path prefix and before the search name.
initializer
Generates the initialize samples of non-linear parameter space (see autofit.non_linear.initializer).
number_of_k_neighbors
The number of neighbors each particle considers in the local-best topology.
minkowski_p_norm
The Minkowski p-norm used to compute distances between particles.
number_of_cores
The number of cores sampling is performed using a Python multiprocessing Pool instance.
silence
If True, the default print output of the non-linear search is silenced.
"""

super().__init__(
Expand All @@ -56,35 +63,35 @@ def __init__(
iterations_per_quick_update=iterations_per_quick_update,
iterations_per_full_update=iterations_per_full_update,
number_of_cores=number_of_cores,
silence=silence,
session=session,
**kwargs
)

self.number_of_k_neighbors = number_of_k_neighbors
self.minkowski_p_norm = minkowski_p_norm

self.logger.debug("Creating PySwarms Search")

def search_internal_from(self, model, fitness, bounds, init_pos):
"""
Get the static Dynesty sampler which performs the non-linear search, passing it all associated input Dynesty
variables.
Get the PySwarms LocalBestPSO sampler which performs the non-linear search.
"""

import pyswarms

options = {
"c1": self.config_dict_search["cognitive"],
"c2": self.config_dict_search["social"],
"w": self.config_dict_search["inertia"],
"k": self.config_dict_search["number_of_k_neighbors"],
"p": self.config_dict_search["minkowski_p_norm"],
"c1": self.cognitive,
"c2": self.social,
"w": self.inertia,
"k": self.number_of_k_neighbors,
"p": self.minkowski_p_norm,
}

filter_list = ["cognitive", "social", "inertia", "number_of_k_neighbors", "minkowski_p_norm"]
config_dict = {key: value for key, value in self.config_dict_search.items() if key not in filter_list}

return pyswarms.local_best.LocalBestPSO(
n_particles=self.n_particles,
dimensions=model.prior_count,
bounds=bounds,
init_pos=init_pos,
options=options,
**config_dict
)
Loading