From c412eb2a1645fdea21c08a5917e5eecc31bfa37f Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 22 Aug 2025 20:09:43 +0000 Subject: [PATCH 01/40] Add global-best PSO --- .tools/envs/testenv-linux.yml | 1 + .tools/envs/testenv-nevergrad.yml | 3 +- .tools/envs/testenv-numpy.yml | 1 + .tools/envs/testenv-others.yml | 1 + .tools/envs/testenv-pandas.yml | 1 + .tools/envs/testenv-plotly.yml | 3 +- environment.yml | 1 + src/optimagic/algorithms.py | 33 ++ src/optimagic/config.py | 1 + .../optimizers/pyswarms_optimizers.py | 359 ++++++++++++++++++ 10 files changed, 402 insertions(+), 2 deletions(-) create mode 100644 src/optimagic/optimizers/pyswarms_optimizers.py diff --git a/.tools/envs/testenv-linux.yml b/.tools/envs/testenv-linux.yml index 398c56cce..9619008f5 100644 --- a/.tools/envs/testenv-linux.yml +++ b/.tools/envs/testenv-linux.yml @@ -36,6 +36,7 @@ dependencies: - fides==0.7.4 # dev, tests - kaleido>=1.0 # dev, tests - bayes_optim # dev, tests + - pyswarms # dev, tests - pandas-stubs # dev, tests - types-cffi # dev, tests - types-openpyxl # dev, tests diff --git a/.tools/envs/testenv-nevergrad.yml b/.tools/envs/testenv-nevergrad.yml index 874b9fa5e..f87644f68 100644 --- a/.tools/envs/testenv-nevergrad.yml +++ b/.tools/envs/testenv-nevergrad.yml @@ -33,12 +33,13 @@ dependencies: - fides==0.7.4 # dev, tests - kaleido>=1.0 # dev, tests - bayes_optim # dev, tests + - pyswarms # dev, tests - pandas-stubs # dev, tests - types-cffi # dev, tests - types-openpyxl # dev, tests - types-jinja2 # dev, tests - sqlalchemy-stubs # dev, tests - sphinxcontrib-mermaid # dev, tests, docs - - -e ../../ - bayesian_optimization==1.4.0 - nevergrad + - -e ../../ diff --git a/.tools/envs/testenv-numpy.yml b/.tools/envs/testenv-numpy.yml index c54dc010f..f686904bc 100644 --- a/.tools/envs/testenv-numpy.yml +++ b/.tools/envs/testenv-numpy.yml @@ -34,6 +34,7 @@ dependencies: - fides==0.7.4 # dev, tests - kaleido>=1.0 # dev, tests - bayes_optim # dev, tests + - pyswarms # dev, tests - types-cffi # dev, tests - types-openpyxl # dev, tests - types-jinja2 # dev, tests diff --git a/.tools/envs/testenv-others.yml b/.tools/envs/testenv-others.yml index 308d142aa..47aca3876 100644 --- a/.tools/envs/testenv-others.yml +++ b/.tools/envs/testenv-others.yml @@ -34,6 +34,7 @@ dependencies: - fides==0.7.4 # dev, tests - kaleido>=1.0 # dev, tests - bayes_optim # dev, tests + - pyswarms # dev, tests - pandas-stubs # dev, tests - types-cffi # dev, tests - types-openpyxl # dev, tests diff --git a/.tools/envs/testenv-pandas.yml b/.tools/envs/testenv-pandas.yml index bccee25c6..26a323250 100644 --- a/.tools/envs/testenv-pandas.yml +++ b/.tools/envs/testenv-pandas.yml @@ -34,6 +34,7 @@ dependencies: - fides==0.7.4 # dev, tests - kaleido>=1.0 # dev, tests - bayes_optim # dev, tests + - pyswarms # dev, tests - types-cffi # dev, tests - types-openpyxl # dev, tests - types-jinja2 # dev, tests diff --git a/.tools/envs/testenv-plotly.yml b/.tools/envs/testenv-plotly.yml index eccdf512d..4470f10af 100644 --- a/.tools/envs/testenv-plotly.yml +++ b/.tools/envs/testenv-plotly.yml @@ -33,11 +33,12 @@ dependencies: - Py-BOBYQA # dev, tests - fides==0.7.4 # dev, tests - bayes_optim # dev, tests + - pyswarms # dev, tests - pandas-stubs # dev, tests - types-cffi # dev, tests - types-openpyxl # dev, tests - types-jinja2 # dev, tests - sqlalchemy-stubs # dev, tests - sphinxcontrib-mermaid # dev, tests, docs - - -e ../../ - kaleido<0.3 + - -e ../../ diff --git a/environment.yml b/environment.yml index 6bb4f01db..d428654e9 100644 --- a/environment.yml +++ b/environment.yml @@ -48,6 +48,7 @@ dependencies: - kaleido>=1.0 # dev, tests - pre-commit>=4 # dev - bayes_optim # dev, tests + - pyswarms # dev, tests - -e . # dev # type stubs - pandas-stubs # dev, tests diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index f86792478..e542c5f1f 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -71,6 +71,7 @@ PygmoSimulatedAnnealing, PygmoXnes, ) +from optimagic.optimizers.pyswarms_optimizers import PySwarmsGlobalBestPSO from optimagic.optimizers.scipy_optimizers import ( ScipyBasinhopping, ScipyBFGS, @@ -202,6 +203,7 @@ class BoundedGlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -427,6 +429,7 @@ class BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -461,6 +464,7 @@ class BoundedGlobalGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -529,6 +533,7 @@ class GlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -689,6 +694,7 @@ class BoundedGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -796,6 +802,7 @@ class BoundedGlobalParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1154,6 +1161,7 @@ class BoundedGlobalGradientFreeAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1231,6 +1239,7 @@ class GlobalGradientFreeScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1269,6 +1278,7 @@ class GlobalGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1471,6 +1481,7 @@ class BoundedGradientFreeScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1533,6 +1544,7 @@ class BoundedGradientFreeParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1626,6 +1638,7 @@ class GradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1726,6 +1739,7 @@ class BoundedGlobalScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -1769,6 +1783,7 @@ class BoundedGlobalParallelAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1850,6 +1865,7 @@ class GlobalParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -2097,6 +2113,7 @@ class BoundedParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -2387,6 +2404,7 @@ class GlobalGradientFreeAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -2494,6 +2512,7 @@ class BoundedGradientFreeAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -2604,6 +2623,7 @@ class GradientFreeScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -2674,6 +2694,7 @@ class GradientFreeParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -2744,6 +2765,7 @@ class BoundedGlobalAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -2839,6 +2861,7 @@ class GlobalScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -2886,6 +2909,7 @@ class GlobalParallelAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -3193,6 +3217,7 @@ class BoundedScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -3278,6 +3303,7 @@ class BoundedParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -3391,6 +3417,7 @@ class ParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -3537,6 +3564,7 @@ class GradientFreeAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -3614,6 +3642,7 @@ class GlobalAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -3773,6 +3802,7 @@ class BoundedAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -3923,6 +3953,7 @@ class ScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -4035,6 +4066,7 @@ class ParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -4127,6 +4159,7 @@ class Algorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute diff --git a/src/optimagic/config.py b/src/optimagic/config.py index ce6cd4d60..124bbdfa3 100644 --- a/src/optimagic/config.py +++ b/src/optimagic/config.py @@ -39,6 +39,7 @@ def _is_installed(module_name: str) -> bool: IS_IMINUIT_INSTALLED = _is_installed("iminuit") IS_NEVERGRAD_INSTALLED = _is_installed("nevergrad") IS_BAYESOPT_INSTALLED = _is_installed("bayes_opt") +IS_PYSWARMS_INSTALLED = _is_installed("pyswarms") # ====================================================================================== diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py new file mode 100644 index 000000000..d98a80a60 --- /dev/null +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -0,0 +1,359 @@ +"""Implement PySwarms particle swarm optimization algorithms. + +This module provides optimagic-compatible wrappers for PySwarms particle swarm +optimization algorithms including global best, local best, and general PSO variants with +support for different topologies. + +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Callable, Literal + +import numpy as np +from numpy.typing import NDArray + +from optimagic import mark +from optimagic.config import IS_PYSWARMS_INSTALLED +from optimagic.exceptions import NotInstalledError +from optimagic.optimization.algo_options import STOPPING_MAXFUN_GLOBAL, STOPPING_MAXITER +from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult +from optimagic.optimization.internal_optimization_problem import ( + InternalBounds, + InternalOptimizationProblem, +) +from optimagic.typing import ( + AggregationLevel, + NonNegativeFloat, + PositiveFloat, + PositiveInt, +) + +PYSWARMS_NOT_INSTALLED_ERROR = ( + "This optimizer requires the 'pyswarms' package to be installed. " + "You can install it with `pip install pyswarms`. " + "Visit https://pyswarms.readthedocs.io/en/latest/installation.html " + "for more detailed installation instructions." +) + + +@dataclass(frozen=True) +class BasePSOOptions: + """Base PSO parameters shared across all variants.""" + + cognitive_parameter: PositiveFloat + """Cognitive parameter (c1) - attraction to personal best.""" + + social_parameter: PositiveFloat + """Social parameter (c2) - attraction to neighborhood/global best.""" + + inertia_weight: PositiveFloat + """Inertia weight (w) - momentum control.""" + + +@dataclass(frozen=True) +class LocalBestPSOOptions(BasePSOOptions): + """Local Best PSO specific parameters.""" + + k_neighbors: PositiveInt + """Number of neighbors in local neighborhood.""" + + p_norm: Literal[1, 2] + """Distance metric for neighbor selection (1=Manhattan, 2=Euclidean).""" + + +@dataclass(frozen=True) +class GeneralPSOOptions(BasePSOOptions): + """General PSO parameters with topology support.""" + + k_neighbors: PositiveInt | None = None + """Number of neighbors for topologies requiring neighborhoods.""" + + p_norm: Literal[1, 2] | None = None + """Distance metric for neighbor selection (1=Manhattan, 2=Euclidean).""" + + vonneumann_range: PositiveInt | None = None + """Range parameter for Von Neumann topology.""" + + +@mark.minimizer( + name="pyswarms_global_best", + solver_type=AggregationLevel.SCALAR, + is_available=IS_PYSWARMS_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + needs_bounds=False, + supports_parallelism=True, + supports_bounds=True, + supports_infinite_bounds=False, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=True, +) +@dataclass(frozen=True) +class PySwarmsGlobalBestPSO(Algorithm): + r"""Minimize a scalar function using Global Best Particle Swarm Optimization. + + This algorithm uses a swarm of particles that move through the search space, + where each particle is attracted to both its personal best position and the + global best position found by the entire swarm. It uses a star topology where + all particles are connected to the global best particle. + + The position update follows: + + .. math:: + + x_i(t+1) = x_i(t) + v_i(t+1) + + where the velocity update is: + + .. math:: + + v_{ij}(t+1) = w \cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)] + + c_2 r_{2j}(t)[\hat{y}_j(t) - x_{ij}(t)] + + Here :math:`c_1` and :math:`c_2` control the balance between personal experience + and swarm knowledge, :math:`w` controls momentum, and :math:`r_{1j}`, :math:`r_{2j}` + are random numbers from [0,1]. + + """ + + n_particles: PositiveInt = 50 + """Number of particles in the swarm. + + Typical values: 20-100. + + """ + + cognitive_parameter: PositiveFloat = 0.5 + r"""Cognitive parameter :math:`c_1` controlling attraction to personal best.""" + + social_parameter: PositiveFloat = 0.3 + r"""Social parameter :math:`c_2` controlling attraction to global best.""" + + inertia_weight: PositiveFloat = 0.9 + r"""Inertia weight :math:`w` controlling momentum. + + Higher values promote exploration, lower values promote exploitation. Typical range: + 0.1-0.9. + + """ + + convergence_ftol_rel: float = -np.inf + """Relative tolerance for convergence based on function value changes. + + Set to -np.inf to disable. + + """ + + convergence_ftol_iter: PositiveInt = 1 + """Number of iterations to check for convergence.""" + + stopping_maxiter: PositiveInt = STOPPING_MAXITER + """Maximum number of iterations.""" + + stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL + """Maximum number of function evaluations.""" + + boundary_strategy: Literal[ + "periodic", "reflective", "shrink", "random", "intermediate" + ] = "periodic" + """Strategy for handling out-of-bounds particles: 'periodic' (wrap around), + 'reflective' (bounce), 'shrink' (move to boundary), 'random' (reposition), + 'intermediate' (place between current and boundary).""" + + velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" + """Strategy for out-of-bounds velocities: 'unmodified' (keep), 'adjust' (scale), + 'invert' (reverse), 'zero' (set to zero).""" + + velocity_clamp_min: float | None = None + """Minimum velocity value for clamping. + + None to disable. + + """ + + velocity_clamp_max: float | None = None + """Maximum velocity value for clamping. + + None to disable. + + """ + + n_processes: PositiveInt | None = None + """Number of processes for parallel evaluation. + + None to disable parallelization. + + """ + + center_init: PositiveFloat = 1.0 + """Scaling factor for initial particle positions around search space center.""" + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + if not IS_PYSWARMS_INSTALLED: + raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) + + import pyswarms as ps + + # Build structured options using dataclass + pso_options = BasePSOOptions( + cognitive_parameter=self.cognitive_parameter, + social_parameter=self.social_parameter, + inertia_weight=self.inertia_weight, + ) + options = _build_pso_options_dict(pso_options) + + velocity_clamp = _build_velocity_clamp( + self.velocity_clamp_min, self.velocity_clamp_max + ) + + bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) + + init_pos = _create_initial_population( + x0=x0, n_particles=self.n_particles, bounds=bounds + ) + + optimizer = ps.single.GlobalBestPSO( + n_particles=self.n_particles, + dimensions=len(x0), + options=options, + bounds=bounds, + bh_strategy=self.boundary_strategy, + velocity_clamp=velocity_clamp, + vh_strategy=self.velocity_strategy, + center=self.center_init, + ftol=self.convergence_ftol_rel, + ftol_iter=self.convergence_ftol_iter, + init_pos=init_pos, + ) + + objective_wrapper = _create_objective_wrapper(problem) + + result = optimizer.optimize( + objective_func=objective_wrapper, + iters=self.stopping_maxiter, + n_processes=self.n_processes, + verbose=False, + ) + + return _process_pyswarms_result( + result=result, + n_particles=self.n_particles, + n_iterations_run=self.stopping_maxiter, + ) + +def _build_pso_options_dict(options: BasePSOOptions) -> dict[str, float | int]: + """Convert structured PSO options to PySwarms format.""" + base_options = { + "c1": options.cognitive_parameter, + "c2": options.social_parameter, + "w": options.inertia_weight, + } + + # Add topology-specific options if present + if isinstance(options, LocalBestPSOOptions): + base_options.update( + { + "k": options.k_neighbors, + "p": options.p_norm, + } + ) + elif isinstance(options, GeneralPSOOptions): + if options.k_neighbors is not None: + base_options["k"] = options.k_neighbors + if options.p_norm is not None: + base_options["p"] = options.p_norm + if options.vonneumann_range is not None: + base_options["r"] = options.vonneumann_range + + return base_options + + +def _build_velocity_clamp( + velocity_clamp_min: float | None, velocity_clamp_max: float | None +) -> tuple[float, float] | None: + """Build velocity clamp tuple.""" + clamp = None + if velocity_clamp_min is not None and velocity_clamp_max is not None: + clamp = (velocity_clamp_min, velocity_clamp_max) + return clamp + + +def _process_pyswarms_result( + result: tuple[float, NDArray[np.float64]], n_particles: int, n_iterations_run: int +) -> InternalOptimizeResult: + """Convert PySwarms result to optimagic format.""" + best_cost, best_position = result + + return InternalOptimizeResult( + x=best_position, + fun=best_cost, + success=True, + message="PySwarms optimization completed", + n_fun_evals=n_particles * n_iterations_run, + n_jac_evals=0, + n_hess_evals=0, + n_iterations=n_iterations_run, + ) + + +def _create_objective_wrapper( + problem: InternalOptimizationProblem, +) -> Callable[[NDArray[np.float64]], NDArray[np.float64]]: + """Create objective function wrapper for PySwarms 2D input format.""" + + def objective_wrapper(x: NDArray[np.float64]) -> NDArray[np.float64]: + """Objective wrapper for PySwarms format. + + Args: + x: 2D array of shape (n_particles, n_dimensions) with particle positions. + + Returns: + 1D array of shape (n_particles,) with objective values. + + """ + return np.array([problem.fun(xi) for xi in x]) + + return objective_wrapper + + +def _convert_bounds_to_pyswarms( + bounds: InternalBounds, n_dimensions: int +) -> tuple[NDArray[np.float64], NDArray[np.float64]]: + """Convert optimagic bounds to PySwarms format.""" + lower_bounds_arr = ( + bounds.lower if bounds.lower is not None else np.zeros(n_dimensions) + ) + upper_bounds_arr = ( + bounds.upper if bounds.upper is not None else np.ones(n_dimensions) + ) + if not np.all(np.isfinite(lower_bounds_arr)) or not np.all( + np.isfinite(upper_bounds_arr) + ): + raise ValueError("PySwarms does not support infinite bounds.") + + return (lower_bounds_arr, upper_bounds_arr) + + +def _create_initial_population( + x0: NDArray[np.float64], + n_particles: int, + bounds: tuple[NDArray[np.float64], NDArray[np.float64]], +) -> NDArray[np.float64]: + """Create initial population with x0 as first particle.""" + n_dimensions = len(x0) + lower_bounds, upper_bounds = bounds + + # Generate random initial positions within the bounds + init_pos = np.random.uniform( + low=lower_bounds, high=upper_bounds, size=(n_particles, n_dimensions) + ) + + init_pos[0] = np.clip(x0, lower_bounds, upper_bounds) + + return init_pos From b8a3f4a8e942b3800a25a1afd90753b76a9c4cc3 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 22 Aug 2025 22:53:34 +0000 Subject: [PATCH 02/40] fic parameters --- .../optimizers/pyswarms_optimizers.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index d98a80a60..6a11d4038 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -9,7 +9,7 @@ from __future__ import annotations from dataclasses import dataclass -from typing import Any, Callable, Literal +from typing import Callable, Literal import numpy as np from numpy.typing import NDArray @@ -17,7 +17,10 @@ from optimagic import mark from optimagic.config import IS_PYSWARMS_INSTALLED from optimagic.exceptions import NotInstalledError -from optimagic.optimization.algo_options import STOPPING_MAXFUN_GLOBAL, STOPPING_MAXITER +from optimagic.optimization.algo_options import ( + STOPPING_MAXFUN_GLOBAL, + STOPPING_MAXITER, +) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalBounds, @@ -141,12 +144,8 @@ class PySwarmsGlobalBestPSO(Algorithm): """ - convergence_ftol_rel: float = -np.inf - """Relative tolerance for convergence based on function value changes. - - Set to -np.inf to disable. - - """ + convergence_ftol_rel: NonNegativeFloat = -np.inf + """Relative tolerance for convergence based on function value changes.""" convergence_ftol_iter: PositiveInt = 1 """Number of iterations to check for convergence.""" @@ -154,9 +153,6 @@ class PySwarmsGlobalBestPSO(Algorithm): stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" - stopping_maxfun: PositiveInt = STOPPING_MAXFUN_GLOBAL - """Maximum number of function evaluations.""" - boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" From eead60bf4894723ddf34de9461d86a964c6bec4d Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 22 Aug 2025 22:55:21 +0000 Subject: [PATCH 03/40] expose verbose --- src/optimagic/optimizers/pyswarms_optimizers.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 6a11d4038..622e05a45 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -188,6 +188,9 @@ class PySwarmsGlobalBestPSO(Algorithm): center_init: PositiveFloat = 1.0 """Scaling factor for initial particle positions around search space center.""" + verbose: bool = False + """Print verbose output.""" + def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: @@ -234,7 +237,7 @@ def _solve_internal_problem( objective_func=objective_wrapper, iters=self.stopping_maxiter, n_processes=self.n_processes, - verbose=False, + verbose=self.verbose, ) return _process_pyswarms_result( From 3fe000b290ca91e9bbf9b44a82dd25806583c8e7 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 22 Aug 2025 23:10:42 +0000 Subject: [PATCH 04/40] use CONVERGENCE_FTOL_REL --- src/optimagic/optimizers/pyswarms_optimizers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 622e05a45..b554ae037 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -18,7 +18,7 @@ from optimagic.config import IS_PYSWARMS_INSTALLED from optimagic.exceptions import NotInstalledError from optimagic.optimization.algo_options import ( - STOPPING_MAXFUN_GLOBAL, + CONVERGENCE_FTOL_REL, STOPPING_MAXITER, ) from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult @@ -144,7 +144,7 @@ class PySwarmsGlobalBestPSO(Algorithm): """ - convergence_ftol_rel: NonNegativeFloat = -np.inf + convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL """Relative tolerance for convergence based on function value changes.""" convergence_ftol_iter: PositiveInt = 1 From e5e4ae558d5a7f3ce1104a31e7544fab2349e7bc Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 22 Aug 2025 23:41:37 +0000 Subject: [PATCH 05/40] Add local-best PSO --- src/optimagic/algorithms.py | 37 +++- .../optimizers/pyswarms_optimizers.py | 167 ++++++++++++++++++ 2 files changed, 203 insertions(+), 1 deletion(-) diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index e542c5f1f..76887f7a4 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -71,7 +71,10 @@ PygmoSimulatedAnnealing, PygmoXnes, ) -from optimagic.optimizers.pyswarms_optimizers import PySwarmsGlobalBestPSO +from optimagic.optimizers.pyswarms_optimizers import ( + PySwarmsGlobalBestPSO, + PySwarmsLocalBestPSO, +) from optimagic.optimizers.scipy_optimizers import ( ScipyBasinhopping, ScipyBFGS, @@ -204,6 +207,7 @@ class BoundedGlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -430,6 +434,7 @@ class BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -465,6 +470,7 @@ class BoundedGlobalGradientFreeParallelAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -534,6 +540,7 @@ class GlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -695,6 +702,7 @@ class BoundedGradientFreeParallelScalarAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -803,6 +811,7 @@ class BoundedGlobalParallelScalarAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1162,6 +1171,7 @@ class BoundedGlobalGradientFreeAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1240,6 +1250,7 @@ class GlobalGradientFreeScalarAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1279,6 +1290,7 @@ class GlobalGradientFreeParallelAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1482,6 +1494,7 @@ class BoundedGradientFreeScalarAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1545,6 +1558,7 @@ class BoundedGradientFreeParallelAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1639,6 +1653,7 @@ class GradientFreeParallelScalarAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1740,6 +1755,7 @@ class BoundedGlobalScalarAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -1784,6 +1800,7 @@ class BoundedGlobalParallelAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -1866,6 +1883,7 @@ class GlobalParallelScalarAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -2114,6 +2132,7 @@ class BoundedParallelScalarAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -2405,6 +2424,7 @@ class GlobalGradientFreeAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -2513,6 +2533,7 @@ class BoundedGradientFreeAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -2624,6 +2645,7 @@ class GradientFreeScalarAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -2695,6 +2717,7 @@ class GradientFreeParallelAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -2766,6 +2789,7 @@ class BoundedGlobalAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -2862,6 +2886,7 @@ class GlobalScalarAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -2910,6 +2935,7 @@ class GlobalParallelAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -3218,6 +3244,7 @@ class BoundedScalarAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -3304,6 +3331,7 @@ class BoundedParallelAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -3418,6 +3446,7 @@ class ParallelScalarAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -3565,6 +3594,7 @@ class GradientFreeAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_cobyla: Type[ScipyCOBYLA] = ScipyCOBYLA scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -3643,6 +3673,7 @@ class GlobalAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -3803,6 +3834,7 @@ class BoundedAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( @@ -3954,6 +3986,7 @@ class ScalarAlgorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -4067,6 +4100,7 @@ class ParallelAlgorithms(AlgoSelection): pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute scipy_differential_evolution: Type[ScipyDifferentialEvolution] = ( ScipyDifferentialEvolution @@ -4160,6 +4194,7 @@ class Algorithms(AlgoSelection): pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO + pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping scipy_brute: Type[ScipyBrute] = ScipyBrute diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index b554ae037..ae99c349b 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -246,6 +246,173 @@ def _solve_internal_problem( n_iterations_run=self.stopping_maxiter, ) +@mark.minimizer( + name="pyswarms_local_best", + solver_type=AggregationLevel.SCALAR, + is_available=IS_PYSWARMS_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + needs_bounds=False, + supports_parallelism=True, + supports_bounds=True, + supports_infinite_bounds=False, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=True, +) +@dataclass(frozen=True) +class PySwarmsLocalBestPSO(Algorithm): + r"""Minimize a scalar function using Local Best Particle Swarm Optimization. + + This algorithm uses local neighborhoods instead of a global best position. + Each particle is influenced only by the best position found within its local + neighborhood, promoting diversity and helping avoid premature convergence + on multimodal problems. + + The velocity update is: + + .. math:: + + v_{ij}(t+1) = w \cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)] + + c_2 r_{2j}(t)[\hat{y}_{lj}(t) - x_{ij}(t)] + + where :math:`\hat{y}_{lj}(t)` is the best position within the local neighborhood + of particle :math:`i`, defined by the :math:`k` nearest neighbors using + Minkowski p-norm distance. + + """ + + n_particles: PositiveInt = 50 + """Number of particles in the swarm.""" + + cognitive_parameter: PositiveFloat = 0.5 + r"""Cognitive parameter :math:`c_1` controlling attraction to personal best.""" + + social_parameter: PositiveFloat = 0.3 + r"""Social parameter :math:`c_2` controlling attraction to local best.""" + + inertia_weight: PositiveFloat = 0.9 + r"""Inertia weight :math:`w` controlling momentum.""" + + k_neighbors: PositiveInt = 3 + r"""Number of neighbors :math:`k` defining local neighborhood. + + Larger values increase information sharing but may reduce diversity. + + """ + + p_norm: Literal[1, 2] = 2 + """Distance metric: 1 (Manhattan), 2 (Euclidean). """ + + convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL + """Relative tolerance for convergence based on function value changes.""" + + convergence_ftol_iter: PositiveInt = 1 + """Number of iterations to check for convergence.""" + + stopping_maxiter: PositiveInt = STOPPING_MAXITER + """Maximum number of iterations.""" + + boundary_strategy: Literal[ + "periodic", "reflective", "shrink", "random", "intermediate" + ] = "periodic" + """Strategy for out-of-bounds particles: 'periodic', 'reflective', 'shrink', + 'random', 'intermediate'.""" + + velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" + """Strategy for out-of-bounds velocities: + 'unmodified', 'adjust', 'invert', 'zero'.""" + + velocity_clamp_min: float | None = None + """Minimum velocity value for clamping. + + None to disable. + + """ + + velocity_clamp_max: float | None = None + """Maximum velocity value for clamping. + + None to disable. + + """ + + n_processes: PositiveInt | None = None + """Number of processes for parallel evaluation. + + None to disable. + + """ + + center_init: PositiveFloat = 1.0 + """Scaling factor for initial particle positions.""" + + static_topology: bool = False + """Whether to use static topology.""" + + verbose: bool = False + """Print verbose output.""" + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + if not IS_PYSWARMS_INSTALLED: + raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) + + import pyswarms as ps + + # Build structured options using dataclass + pso_options = LocalBestPSOOptions( + cognitive_parameter=self.cognitive_parameter, + social_parameter=self.social_parameter, + inertia_weight=self.inertia_weight, + k_neighbors=self.k_neighbors, + p_norm=self.p_norm, + ) + options = _build_pso_options_dict(pso_options) + + velocity_clamp = _build_velocity_clamp( + self.velocity_clamp_min, self.velocity_clamp_max + ) + + bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) + + init_pos = _create_initial_population( + x0=x0, n_particles=self.n_particles, bounds=bounds + ) + + optimizer = ps.single.LocalBestPSO( + n_particles=self.n_particles, + dimensions=len(x0), + options=options, + bounds=bounds, + bh_strategy=self.boundary_strategy, + velocity_clamp=velocity_clamp, + vh_strategy=self.velocity_strategy, + center=self.center_init, + ftol=self.convergence_ftol_rel, + ftol_iter=self.convergence_ftol_iter, + init_pos=init_pos, + static=self.static_topology, + ) + + objective_wrapper = _create_objective_wrapper(problem) + + result = optimizer.optimize( + objective_func=objective_wrapper, + iters=self.stopping_maxiter, + n_processes=self.n_processes, + verbose=self.verbose, + ) + + return _process_pyswarms_result( + result=result, + n_particles=self.n_particles, + n_iterations_run=self.stopping_maxiter, + ) + + def _build_pso_options_dict(options: BasePSOOptions) -> dict[str, float | int]: """Convert structured PSO options to PySwarms format.""" base_options = { From a451fd4e54557f019c73755174ec0dd4e5102bf4 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 22 Aug 2025 23:46:41 +0000 Subject: [PATCH 06/40] refactor: simplify return --- src/optimagic/optimizers/pyswarms_optimizers.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index ae99c349b..e09b4056c 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -240,12 +240,14 @@ def _solve_internal_problem( verbose=self.verbose, ) - return _process_pyswarms_result( + res = _process_pyswarms_result( result=result, n_particles=self.n_particles, n_iterations_run=self.stopping_maxiter, ) + return res + @mark.minimizer( name="pyswarms_local_best", solver_type=AggregationLevel.SCALAR, @@ -406,12 +408,14 @@ def _solve_internal_problem( verbose=self.verbose, ) - return _process_pyswarms_result( + res = _process_pyswarms_result( result=result, n_particles=self.n_particles, n_iterations_run=self.stopping_maxiter, ) + return res + def _build_pso_options_dict(options: BasePSOOptions) -> dict[str, float | int]: """Convert structured PSO options to PySwarms format.""" From b21f93e7070c57b112915529b19701b42d588ec3 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Sun, 24 Aug 2025 01:02:41 +0000 Subject: [PATCH 07/40] Add general PSO --- src/optimagic/algorithms.py | 33 +++ .../optimizers/pyswarms_optimizers.py | 226 +++++++++++++++++- 2 files changed, 258 insertions(+), 1 deletion(-) diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index 76887f7a4..479a1db67 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -72,6 +72,7 @@ PygmoXnes, ) from optimagic.optimizers.pyswarms_optimizers import ( + PySwarmsGeneralPSO, PySwarmsGlobalBestPSO, PySwarmsLocalBestPSO, ) @@ -206,6 +207,7 @@ class BoundedGlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -433,6 +435,7 @@ class BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -469,6 +472,7 @@ class BoundedGlobalGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -539,6 +543,7 @@ class GlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -701,6 +706,7 @@ class BoundedGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -810,6 +816,7 @@ class BoundedGlobalParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1170,6 +1177,7 @@ class BoundedGlobalGradientFreeAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1249,6 +1257,7 @@ class GlobalGradientFreeScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1289,6 +1298,7 @@ class GlobalGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1493,6 +1503,7 @@ class BoundedGradientFreeScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1557,6 +1568,7 @@ class BoundedGradientFreeParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1652,6 +1664,7 @@ class GradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1754,6 +1767,7 @@ class BoundedGlobalScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping @@ -1799,6 +1813,7 @@ class BoundedGlobalParallelAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1882,6 +1897,7 @@ class GlobalParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2131,6 +2147,7 @@ class BoundedParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2423,6 +2440,7 @@ class GlobalGradientFreeAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2532,6 +2550,7 @@ class BoundedGradientFreeAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2644,6 +2663,7 @@ class GradientFreeScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2716,6 +2736,7 @@ class GradientFreeParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2788,6 +2809,7 @@ class BoundedGlobalAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping @@ -2885,6 +2907,7 @@ class GlobalScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping @@ -2934,6 +2957,7 @@ class GlobalParallelAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -3243,6 +3267,7 @@ class BoundedScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping @@ -3330,6 +3355,7 @@ class BoundedParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -3445,6 +3471,7 @@ class ParallelScalarAlgorithms(AlgoSelection): nevergrad_tbpsa: Type[NevergradTBPSA] = NevergradTBPSA pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -3593,6 +3620,7 @@ class GradientFreeAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -3672,6 +3700,7 @@ class GlobalAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping @@ -3833,6 +3862,7 @@ class BoundedAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_basinhopping: Type[ScipyBasinhopping] = ScipyBasinhopping @@ -3985,6 +4015,7 @@ class ScalarAlgorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS @@ -4099,6 +4130,7 @@ class ParallelAlgorithms(AlgoSelection): pounders: Type[Pounders] = Pounders pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -4193,6 +4225,7 @@ class Algorithms(AlgoSelection): pygmo_sga: Type[PygmoSga] = PygmoSga pygmo_simulated_annealing: Type[PygmoSimulatedAnnealing] = PygmoSimulatedAnnealing pygmo_xnes: Type[PygmoXnes] = PygmoXnes + pyswarms_general: Type[PySwarmsGeneralPSO] = PySwarmsGeneralPSO pyswarms_global_best: Type[PySwarmsGlobalBestPSO] = PySwarmsGlobalBestPSO pyswarms_local_best: Type[PySwarmsLocalBestPSO] = PySwarmsLocalBestPSO scipy_bfgs: Type[ScipyBFGS] = ScipyBFGS diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index e09b4056c..fba5f499d 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -9,7 +9,7 @@ from __future__ import annotations from dataclasses import dataclass -from typing import Callable, Literal +from typing import Any, Callable, Literal import numpy as np from numpy.typing import NDArray @@ -417,6 +417,209 @@ def _solve_internal_problem( return res +@mark.minimizer( + name="pyswarms_general", + solver_type=AggregationLevel.SCALAR, + is_available=IS_PYSWARMS_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + needs_bounds=False, + supports_parallelism=True, + supports_bounds=True, + supports_infinite_bounds=False, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=True, +) +@dataclass(frozen=True) +class PySwarmsGeneralPSO(Algorithm): + r"""Minimize a scalar function using General Particle Swarm Optimization with custom + topologies. + + This algorithm provides flexible PSO with different neighborhood topologies: + Star, Ring, Von Neumann, and Random. The topology choice affects the balance + between exploration and exploitation during optimization. + + The velocity update follows: + + .. math:: + + v_{ij}(t+1) = w \cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)] + + c_2 r_{2j}(t)[\hat{y}_{nj}(t) - x_{ij}(t)] + + where :math:`\hat{y}_{nj}(t)` is the best position in the neighborhood defined + by the selected topology. + + **Topology Options:** + + - **Star**: All particles connected to global best (fast convergence) + - **Ring**: Ring arrangement with k neighbors (balanced exploration/exploitation) + - **Von Neumann**: 2D grid topology (good diversity maintenance) + - **Random**: Dynamic random connections (enhanced exploration) + + """ + + n_particles: PositiveInt = 50 + """Number of particles in the swarm.""" + + cognitive_parameter: PositiveFloat = 0.5 + r"""Cognitive parameter :math:`c_1` controlling attraction to personal best.""" + + social_parameter: PositiveFloat = 0.3 + r"""Social parameter :math:`c_2` controlling attraction to neighborhood best.""" + + inertia_weight: PositiveFloat = 0.9 + r"""Inertia weight :math:`w` controlling momentum.""" + + topology_type: Literal["star", "ring", "vonneumann", "random", "pyramid"] = "star" + """Topology: 'star' (fast convergence), 'ring' (balanced), 'vonneumann' + (diversity), 'random' (exploration).""" + + k_neighbors: PositiveInt = 3 + """Number of neighbors for ring, vonneumann, and random topologies.""" + + p_norm: Literal[1, 2] = 2 + """Distance metric for neighbor selection: 1 (Manhattan), 2 (Euclidean).""" + + vonneumann_range: PositiveInt = 1 + r"""Range parameter :math:`r` for Von Neumann topology. + + Higher values create larger neighborhoods. Only used with 'vonneumann' topology. + + """ + + convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL + """Relative tolerance for convergence. + + Set to -np.inf to disable. + + """ + + convergence_ftol_iter: PositiveInt = 1 + """Number of iterations to check for convergence.""" + + stopping_maxiter: PositiveInt = STOPPING_MAXITER + """Maximum number of iterations.""" + + boundary_strategy: Literal[ + "periodic", "reflective", "shrink", "random", "intermediate" + ] = "periodic" + """Strategy for out-of-bounds particles: 'periodic', 'reflective', 'shrink', + 'random', 'intermediate'.""" + + velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" + """Strategy for out-of-bounds velocities: + 'unmodified', 'adjust', 'invert', 'zero'.""" + + velocity_clamp_min: float | None = None + """Minimum velocity value for clamping. + + None to disable. + + """ + + velocity_clamp_max: float | None = None + """Maximum velocity value for clamping. + + None to disable. + + """ + + n_processes: PositiveInt | None = None + """Number of processes for parallel evaluation. + + None to disable. + + """ + + center_init: PositiveFloat = 1.0 + """Scaling factor for initial particle positions.""" + + verbose: bool = False + """Print verbose output.""" + + static_topology: bool = False + """Whether to use static topology.""" + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + if not IS_PYSWARMS_INSTALLED: + raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) + + import pyswarms as ps + + # Build structured options using dataclass + k_neighbors = ( + self.k_neighbors + if self.topology_type in ["ring", "vonneumann", "random"] + else None + ) + p_norm = ( + self.p_norm + if self.topology_type in ["ring", "vonneumann"] + else None + ) + vonneumann_range = ( + self.vonneumann_range if self.topology_type == "vonneumann" else None + ) + + pso_options = GeneralPSOOptions( + cognitive_parameter=self.cognitive_parameter, + social_parameter=self.social_parameter, + inertia_weight=self.inertia_weight, + k_neighbors=k_neighbors, + p_norm=p_norm, + vonneumann_range=vonneumann_range, + ) + options = _build_pso_options_dict(pso_options) + + topology = _create_topology_instance(self.topology_type, self.static_topology) + + velocity_clamp = _build_velocity_clamp( + self.velocity_clamp_min, self.velocity_clamp_max + ) + + bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) + + init_pos = _create_initial_population( + x0=x0, n_particles=self.n_particles, bounds=bounds + ) + + optimizer = ps.single.GeneralOptimizerPSO( + n_particles=self.n_particles, + dimensions=len(x0), + options=options, + topology=topology, + bounds=bounds, + bh_strategy=self.boundary_strategy, + velocity_clamp=velocity_clamp, + vh_strategy=self.velocity_strategy, + center=self.center_init, + ftol=self.convergence_ftol_rel, + ftol_iter=self.convergence_ftol_iter, + init_pos=init_pos, + ) + + objective_wrapper = _create_objective_wrapper(problem) + + result = optimizer.optimize( + objective_func=objective_wrapper, + iters=self.stopping_maxiter, + n_processes=self.n_processes, + verbose=self.verbose, + ) + + res = _process_pyswarms_result( + result=result, + n_particles=self.n_particles, + n_iterations_run=self.stopping_maxiter, + ) + + return res + + def _build_pso_options_dict(options: BasePSOOptions) -> dict[str, float | int]: """Convert structured PSO options to PySwarms format.""" base_options = { @@ -454,6 +657,27 @@ def _build_velocity_clamp( return clamp +def _create_topology_instance(topology_type: str, static: bool) -> Any: + """Create PySwarms topology instance from string identifier.""" + if not IS_PYSWARMS_INSTALLED: + raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) + + from pyswarms.backend.topology import Pyramid, Random, Ring, Star, VonNeumann + + topology_map = { + "star": Star(), + "ring": Ring(static=static), + "vonneumann": VonNeumann(), + "random": Random(static=static), + "pyramid": Pyramid(static=static), + } + + if topology_type not in topology_map: + raise ValueError(f"Unknown topology type: {topology_type}") + + return topology_map[topology_type] + + def _process_pyswarms_result( result: tuple[float, NDArray[np.float64]], n_particles: int, n_iterations_run: int ) -> InternalOptimizeResult: From 9fe6876329b0b71975939b07a6468b8eda51d370 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Sun, 24 Aug 2025 02:41:25 +0000 Subject: [PATCH 08/40] docs: improve docstrings of global_best --- .../optimizers/pyswarms_optimizers.py | 73 ++++++++----------- 1 file changed, 29 insertions(+), 44 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index fba5f499d..62efbb7d1 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -43,7 +43,7 @@ @dataclass(frozen=True) class BasePSOOptions: - """Base PSO parameters shared across all variants.""" + """Common PSO parameters used by all PSO variants.""" cognitive_parameter: PositiveFloat """Cognitive parameter (c1) - attraction to personal best.""" @@ -99,36 +99,41 @@ class GeneralPSOOptions(BasePSOOptions): class PySwarmsGlobalBestPSO(Algorithm): r"""Minimize a scalar function using Global Best Particle Swarm Optimization. - This algorithm uses a swarm of particles that move through the search space, - where each particle is attracted to both its personal best position and the - global best position found by the entire swarm. It uses a star topology where - all particles are connected to the global best particle. + A population-based stochastic, global optimization optimization algorithm that + simulates the social behavior of bird flocking or fish schooling. Particles + (candidate solutions) move through the search space, adjusting their positions + based on their own experience (cognitive component) and the experience of their + neighbors or the entire swarm (social component). + + This implementation uses a star topology where all particles are connected to + each other, making each particle aware of the global best solution found by the + entire swarm. The position update follows: .. math:: - x_i(t+1) = x_i(t) + v_i(t+1) + x_{i}(t+1) = x_{i}(t) + v_{i}(t+1) - where the velocity update is: + The velocity update follows: .. math:: v_{ij}(t+1) = w \cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)] + c_2 r_{2j}(t)[\hat{y}_j(t) - x_{ij}(t)] - Here :math:`c_1` and :math:`c_2` control the balance between personal experience - and swarm knowledge, :math:`w` controls momentum, and :math:`r_{1j}`, :math:`r_{2j}` - are random numbers from [0,1]. + Where: + - :math:`w`: inertia weight controlling momentum + - :math:`c_1`: cognitive parameter for attraction to personal best + - :math:`c_2`: social parameter for attraction to global best + - :math:`r_{1j}, r_{2j}`: random numbers in [0,1] + - :math:`y_{ij}(t)`: personal best position of particle i + - :math:`\hat{y}_j(t)`: global best position """ n_particles: PositiveInt = 50 - """Number of particles in the swarm. - - Typical values: 20-100. - - """ + """Number of particles in the swarm.""" cognitive_parameter: PositiveFloat = 0.5 r"""Cognitive parameter :math:`c_1` controlling attraction to personal best.""" @@ -137,15 +142,10 @@ class PySwarmsGlobalBestPSO(Algorithm): r"""Social parameter :math:`c_2` controlling attraction to global best.""" inertia_weight: PositiveFloat = 0.9 - r"""Inertia weight :math:`w` controlling momentum. - - Higher values promote exploration, lower values promote exploitation. Typical range: - 0.1-0.9. - - """ + r"""Inertia weight :math:`w` controlling momentum.""" convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL - """Relative tolerance for convergence based on function value changes.""" + """Stop when relative change in objective function is less than this value.""" convergence_ftol_iter: PositiveInt = 1 """Number of iterations to check for convergence.""" @@ -156,40 +156,25 @@ class PySwarmsGlobalBestPSO(Algorithm): boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" - """Strategy for handling out-of-bounds particles: 'periodic' (wrap around), - 'reflective' (bounce), 'shrink' (move to boundary), 'random' (reposition), - 'intermediate' (place between current and boundary).""" + """Strategy for handling out-of-bounds particles.""" velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" - """Strategy for out-of-bounds velocities: 'unmodified' (keep), 'adjust' (scale), - 'invert' (reverse), 'zero' (set to zero).""" + """Strategy for handling out-of-bounds velocities.""" velocity_clamp_min: float | None = None - """Minimum velocity value for clamping. - - None to disable. - - """ + """Minimum velocity limit for particles.""" velocity_clamp_max: float | None = None - """Maximum velocity value for clamping. - - None to disable. - - """ + """Maximum velocity limit for particles.""" n_processes: PositiveInt | None = None - """Number of processes for parallel evaluation. - - None to disable parallelization. - - """ + """Number of processes for parallel evaluation.""" center_init: PositiveFloat = 1.0 - """Scaling factor for initial particle positions around search space center.""" + """Scaling factor for initial particle positions.""" verbose: bool = False - """Print verbose output.""" + """Enable or disable the logs and progress bar.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] From 7f078c69cf7c13da80bc874906b82324227631ae Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Sun, 24 Aug 2025 22:39:54 +0000 Subject: [PATCH 09/40] improve local-best pso docstrings --- .../optimizers/pyswarms_optimizers.py | 63 +++++++++---------- 1 file changed, 29 insertions(+), 34 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 62efbb7d1..d23744c53 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -252,21 +252,34 @@ def _solve_internal_problem( class PySwarmsLocalBestPSO(Algorithm): r"""Minimize a scalar function using Local Best Particle Swarm Optimization. - This algorithm uses local neighborhoods instead of a global best position. + A variant of PSO that uses local neighborhoods instead of a single global best. Each particle is influenced only by the best position found within its local - neighborhood, promoting diversity and helping avoid premature convergence - on multimodal problems. + neighborhood, which is determined by the k-nearest neighbors using distance metrics. - The velocity update is: + This approach uses a ring topology where particles are connected to their local + neighbors, making each particle aware of only the best solution found within its + neighborhood. + + The position update follows: + + .. math:: + + x_{i}(t+1) = x_{i}(t) + v_{i}(t+1) + + The velocity update follows: .. math:: v_{ij}(t+1) = w \cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)] + c_2 r_{2j}(t)[\hat{y}_{lj}(t) - x_{ij}(t)] - where :math:`\hat{y}_{lj}(t)` is the best position within the local neighborhood - of particle :math:`i`, defined by the :math:`k` nearest neighbors using - Minkowski p-norm distance. + Where: + - :math:`w`: inertia weight controlling momentum + - :math:`c_1`: cognitive parameter for attraction to personal best + - :math:`c_2`: social parameter for attraction to local best + - :math:`r_{1j}, r_{2j}`: random numbers in [0,1] + - :math:`y_{ij}(t)`: personal best position of particle i + - :math:`\hat{y}_{lj}(t)`: local best position in particle i's neighborhood """ @@ -283,17 +296,13 @@ class PySwarmsLocalBestPSO(Algorithm): r"""Inertia weight :math:`w` controlling momentum.""" k_neighbors: PositiveInt = 3 - r"""Number of neighbors :math:`k` defining local neighborhood. - - Larger values increase information sharing but may reduce diversity. - - """ + r"""Number of neighbors :math:`k` defining local neighborhood.""" p_norm: Literal[1, 2] = 2 """Distance metric: 1 (Manhattan), 2 (Euclidean). """ convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL - """Relative tolerance for convergence based on function value changes.""" + """Stop when relative change in objective function is less than this value.""" convergence_ftol_iter: PositiveInt = 1 """Number of iterations to check for convergence.""" @@ -304,42 +313,28 @@ class PySwarmsLocalBestPSO(Algorithm): boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" - """Strategy for out-of-bounds particles: 'periodic', 'reflective', 'shrink', - 'random', 'intermediate'.""" + """Strategy for handling out-of-bounds particles.""" velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" - """Strategy for out-of-bounds velocities: - 'unmodified', 'adjust', 'invert', 'zero'.""" + """Strategy for handling out-of-bounds velocities.""" velocity_clamp_min: float | None = None - """Minimum velocity value for clamping. - - None to disable. - - """ + """Minimum velocity limit for particles.""" velocity_clamp_max: float | None = None - """Maximum velocity value for clamping. - - None to disable. - - """ + """Maximum velocity limit for particles.""" n_processes: PositiveInt | None = None - """Number of processes for parallel evaluation. - - None to disable. - - """ + """Number of processes for parallel evaluation.""" center_init: PositiveFloat = 1.0 """Scaling factor for initial particle positions.""" static_topology: bool = False - """Whether to use static topology.""" + """Whether to use static or dynamic ring topology.""" verbose: bool = False - """Print verbose output.""" + """Enable or disable the logs and progress bar.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] From 9848e97aa8475d3f72675c6bef99ca0afefd95a5 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Sun, 24 Aug 2025 23:03:07 +0000 Subject: [PATCH 10/40] improve general pso docstrings --- .../optimizers/pyswarms_optimizers.py | 82 ++++++++----------- 1 file changed, 36 insertions(+), 46 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index d23744c53..853064469 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -417,9 +417,16 @@ class PySwarmsGeneralPSO(Algorithm): r"""Minimize a scalar function using General Particle Swarm Optimization with custom topologies. - This algorithm provides flexible PSO with different neighborhood topologies: - Star, Ring, Von Neumann, and Random. The topology choice affects the balance - between exploration and exploitation during optimization. + A flexible PSO implementation that allows selection of different neighborhood + topologies, providing control over the balance between exploration and exploitation. + The topology determines how particles communicate and share information, directly + affecting the algorithm's search behavior. + + The position update follows: + + .. math:: + + x_{i}(t+1) = x_{i}(t) + v_{i}(t+1) The velocity update follows: @@ -428,15 +435,21 @@ class PySwarmsGeneralPSO(Algorithm): v_{ij}(t+1) = w \cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)] + c_2 r_{2j}(t)[\hat{y}_{nj}(t) - x_{ij}(t)] - where :math:`\hat{y}_{nj}(t)` is the best position in the neighborhood defined - by the selected topology. + Where: + - :math:`w`: inertia weight controlling momentum + - :math:`c_1`: cognitive parameter for attraction to personal best + - :math:`c_2`: social parameter for attraction to neighborhood best + - :math:`r_{1j}, r_{2j}`: random numbers in [0,1] + - :math:`y_{ij}(t)`: personal best position of particle i + - :math:`\hat{y}_{nj}(t)`: neighborhood best position **Topology Options:** - - **Star**: All particles connected to global best (fast convergence) - - **Ring**: Ring arrangement with k neighbors (balanced exploration/exploitation) - - **Von Neumann**: 2D grid topology (good diversity maintenance) - - **Random**: Dynamic random connections (enhanced exploration) + - **Star**: All particles connected to global best + - **Ring**: Ring arrangement with k neighbors + - **Von Neumann**: 2D grid topology + - **Random**: Dynamic random connections + - **Pyramid**: Hierarchical pyramid-like network of connected particles """ @@ -453,28 +466,19 @@ class PySwarmsGeneralPSO(Algorithm): r"""Inertia weight :math:`w` controlling momentum.""" topology_type: Literal["star", "ring", "vonneumann", "random", "pyramid"] = "star" - """Topology: 'star' (fast convergence), 'ring' (balanced), 'vonneumann' - (diversity), 'random' (exploration).""" + """Topology structure for particle communication.""" k_neighbors: PositiveInt = 3 - """Number of neighbors for ring, vonneumann, and random topologies.""" + """Number of neighbors for ring and random topologies.""" p_norm: Literal[1, 2] = 2 """Distance metric for neighbor selection: 1 (Manhattan), 2 (Euclidean).""" vonneumann_range: PositiveInt = 1 - r"""Range parameter :math:`r` for Von Neumann topology. - - Higher values create larger neighborhoods. Only used with 'vonneumann' topology. - - """ + r"""Range parameter :math:`r` for Von Neumann topology.""" convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL - """Relative tolerance for convergence. - - Set to -np.inf to disable. - - """ + """Stop when relative change in objective function is less than this value.""" convergence_ftol_iter: PositiveInt = 1 """Number of iterations to check for convergence.""" @@ -485,42 +489,28 @@ class PySwarmsGeneralPSO(Algorithm): boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" - """Strategy for out-of-bounds particles: 'periodic', 'reflective', 'shrink', - 'random', 'intermediate'.""" + """Strategy for handling out-of-bounds particles.""" velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" - """Strategy for out-of-bounds velocities: - 'unmodified', 'adjust', 'invert', 'zero'.""" + """Strategy for handling out-of-bounds velocities.""" velocity_clamp_min: float | None = None - """Minimum velocity value for clamping. - - None to disable. - - """ + """Minimum velocity limit for particles.""" velocity_clamp_max: float | None = None - """Maximum velocity value for clamping. - - None to disable. - - """ + """Maximum velocity limit for particles.""" n_processes: PositiveInt | None = None - """Number of processes for parallel evaluation. - - None to disable. - - """ + """Number of processes for parallel evaluation.""" center_init: PositiveFloat = 1.0 - """Scaling factor for initial particle positions.""" - - verbose: bool = False - """Print verbose output.""" + """Scaling factor for initial particle positions.""" static_topology: bool = False - """Whether to use static topology.""" + """Whether to use static or dynamic topology.""" + + verbose: bool = False + """Enable or disable the logs and progress bar.""" def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] From 0d35e8df93fa85392e81e6d3f63d9c1061ee2390 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Mon, 25 Aug 2025 11:44:58 +0000 Subject: [PATCH 11/40] fix: particle center scaling --- .../optimizers/pyswarms_optimizers.py | 43 +++++++++++-------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 853064469..5c2cc9ae2 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -199,7 +199,7 @@ def _solve_internal_problem( bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) init_pos = _create_initial_population( - x0=x0, n_particles=self.n_particles, bounds=bounds + x0=x0, n_particles=self.n_particles, bounds=bounds, center=self.center_init ) optimizer = ps.single.GlobalBestPSO( @@ -210,7 +210,6 @@ def _solve_internal_problem( bh_strategy=self.boundary_strategy, velocity_clamp=velocity_clamp, vh_strategy=self.velocity_strategy, - center=self.center_init, ftol=self.convergence_ftol_rel, ftol_iter=self.convergence_ftol_iter, init_pos=init_pos, @@ -233,6 +232,7 @@ def _solve_internal_problem( return res + @mark.minimizer( name="pyswarms_local_best", solver_type=AggregationLevel.SCALAR, @@ -361,7 +361,7 @@ def _solve_internal_problem( bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) init_pos = _create_initial_population( - x0=x0, n_particles=self.n_particles, bounds=bounds + x0=x0, n_particles=self.n_particles, bounds=bounds, center=self.center_init ) optimizer = ps.single.LocalBestPSO( @@ -372,7 +372,6 @@ def _solve_internal_problem( bh_strategy=self.boundary_strategy, velocity_clamp=velocity_clamp, vh_strategy=self.velocity_strategy, - center=self.center_init, ftol=self.convergence_ftol_rel, ftol_iter=self.convergence_ftol_iter, init_pos=init_pos, @@ -417,9 +416,9 @@ class PySwarmsGeneralPSO(Algorithm): r"""Minimize a scalar function using General Particle Swarm Optimization with custom topologies. - A flexible PSO implementation that allows selection of different neighborhood - topologies, providing control over the balance between exploration and exploitation. - The topology determines how particles communicate and share information, directly + A flexible PSO implementation that allows selection of different neighborhood + topologies, providing control over the balance between exploration and exploitation. + The topology determines how particles communicate and share information, directly affecting the algorithm's search behavior. The position update follows: @@ -504,7 +503,7 @@ class PySwarmsGeneralPSO(Algorithm): """Number of processes for parallel evaluation.""" center_init: PositiveFloat = 1.0 - """Scaling factor for initial particle positions.""" + """Scaling factor for initial particle positions.""" static_topology: bool = False """Whether to use static or dynamic topology.""" @@ -526,11 +525,7 @@ def _solve_internal_problem( if self.topology_type in ["ring", "vonneumann", "random"] else None ) - p_norm = ( - self.p_norm - if self.topology_type in ["ring", "vonneumann"] - else None - ) + p_norm = self.p_norm if self.topology_type in ["ring", "vonneumann"] else None vonneumann_range = ( self.vonneumann_range if self.topology_type == "vonneumann" else None ) @@ -554,7 +549,7 @@ def _solve_internal_problem( bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) init_pos = _create_initial_population( - x0=x0, n_particles=self.n_particles, bounds=bounds + x0=x0, n_particles=self.n_particles, bounds=bounds, center=self.center_init ) optimizer = ps.single.GeneralOptimizerPSO( @@ -566,7 +561,6 @@ def _solve_internal_problem( bh_strategy=self.boundary_strategy, velocity_clamp=velocity_clamp, vh_strategy=self.velocity_strategy, - center=self.center_init, ftol=self.convergence_ftol_rel, ftol_iter=self.convergence_ftol_iter, init_pos=init_pos, @@ -708,16 +702,29 @@ def _create_initial_population( x0: NDArray[np.float64], n_particles: int, bounds: tuple[NDArray[np.float64], NDArray[np.float64]], + center: float = 1.0, ) -> NDArray[np.float64]: - """Create initial population with x0 as first particle.""" + """Create initial population with x0 as first particle. + + Args: + x0: Initial parameter vector + n_particles: Number of particles in the swarm + bounds: Tuple of (lower_bounds, upper_bounds) arrays + center: Scaling factor for initial particle positions around bounds + + Returns: + Initial population array of shape (n_particles, n_dimensions) + + """ n_dimensions = len(x0) lower_bounds, upper_bounds = bounds - # Generate random initial positions within the bounds - init_pos = np.random.uniform( + # Generate random initial positions within the bounds, scaled by center + init_pos = center * np.random.uniform( low=lower_bounds, high=upper_bounds, size=(n_particles, n_dimensions) ) + init_pos = np.clip(init_pos, lower_bounds, upper_bounds) init_pos[0] = np.clip(x0, lower_bounds, upper_bounds) return init_pos From b647fd25f54aeb48d2713583eac301160a065bd3 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Mon, 25 Aug 2025 12:31:56 +0000 Subject: [PATCH 12/40] feat: enable parallel eval and history in PySwarms --- .../optimizers/pyswarms_optimizers.py | 43 ++++++++++--------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 5c2cc9ae2..57047eafe 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -93,7 +93,7 @@ class GeneralPSOOptions(BasePSOOptions): supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, - disable_history=True, + disable_history=False, ) @dataclass(frozen=True) class PySwarmsGlobalBestPSO(Algorithm): @@ -167,8 +167,8 @@ class PySwarmsGlobalBestPSO(Algorithm): velocity_clamp_max: float | None = None """Maximum velocity limit for particles.""" - n_processes: PositiveInt | None = None - """Number of processes for parallel evaluation.""" + n_cores: PositiveInt = 1 + """Number of cores for parallel evaluation.""" center_init: PositiveFloat = 1.0 """Scaling factor for initial particle positions.""" @@ -215,12 +215,11 @@ def _solve_internal_problem( init_pos=init_pos, ) - objective_wrapper = _create_objective_wrapper(problem) + objective_wrapper = _create_batch_objective(problem, self.n_cores) result = optimizer.optimize( objective_func=objective_wrapper, iters=self.stopping_maxiter, - n_processes=self.n_processes, verbose=self.verbose, ) @@ -246,7 +245,7 @@ def _solve_internal_problem( supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, - disable_history=True, + disable_history=False, ) @dataclass(frozen=True) class PySwarmsLocalBestPSO(Algorithm): @@ -324,8 +323,8 @@ class PySwarmsLocalBestPSO(Algorithm): velocity_clamp_max: float | None = None """Maximum velocity limit for particles.""" - n_processes: PositiveInt | None = None - """Number of processes for parallel evaluation.""" + n_cores: PositiveInt = 1 + """Number of cores for parallel evaluation.""" center_init: PositiveFloat = 1.0 """Scaling factor for initial particle positions.""" @@ -378,12 +377,11 @@ def _solve_internal_problem( static=self.static_topology, ) - objective_wrapper = _create_objective_wrapper(problem) + objective_wrapper = _create_batch_objective(problem, self.n_cores) result = optimizer.optimize( objective_func=objective_wrapper, iters=self.stopping_maxiter, - n_processes=self.n_processes, verbose=self.verbose, ) @@ -409,7 +407,7 @@ def _solve_internal_problem( supports_infinite_bounds=False, supports_linear_constraints=False, supports_nonlinear_constraints=False, - disable_history=True, + disable_history=False, ) @dataclass(frozen=True) class PySwarmsGeneralPSO(Algorithm): @@ -499,8 +497,8 @@ class PySwarmsGeneralPSO(Algorithm): velocity_clamp_max: float | None = None """Maximum velocity limit for particles.""" - n_processes: PositiveInt | None = None - """Number of processes for parallel evaluation.""" + n_cores: PositiveInt = 1 + """Number of cores for parallel evaluation.""" center_init: PositiveFloat = 1.0 """Scaling factor for initial particle positions.""" @@ -566,12 +564,11 @@ def _solve_internal_problem( init_pos=init_pos, ) - objective_wrapper = _create_objective_wrapper(problem) + objective_wrapper = _create_batch_objective(problem, self.n_cores) result = optimizer.optimize( objective_func=objective_wrapper, iters=self.stopping_maxiter, - n_processes=self.n_processes, verbose=self.verbose, ) @@ -660,13 +657,14 @@ def _process_pyswarms_result( ) -def _create_objective_wrapper( +def _create_batch_objective( problem: InternalOptimizationProblem, + n_cores: int, ) -> Callable[[NDArray[np.float64]], NDArray[np.float64]]: - """Create objective function wrapper for PySwarms 2D input format.""" + """Return an batch objective function.""" - def objective_wrapper(x: NDArray[np.float64]) -> NDArray[np.float64]: - """Objective wrapper for PySwarms format. + def batch_objective(x: NDArray[np.float64]) -> NDArray[np.float64]: + """Compute objective values for all particles in x. Args: x: 2D array of shape (n_particles, n_dimensions) with particle positions. @@ -675,9 +673,12 @@ def objective_wrapper(x: NDArray[np.float64]) -> NDArray[np.float64]: 1D array of shape (n_particles,) with objective values. """ - return np.array([problem.fun(xi) for xi in x]) + arguments = [xi for xi in x] + results = problem.batch_fun(arguments, n_cores=n_cores) - return objective_wrapper + return np.array(results) + + return batch_objective def _convert_bounds_to_pyswarms( From 5a8d0b52cd29cf6fd35e2cc7a36079b30c7c54f7 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Mon, 25 Aug 2025 13:35:01 +0000 Subject: [PATCH 13/40] docs: add PySwarms optimizers to algorithms.md --- docs/source/algorithms.md | 95 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index bd8837b9a..01ea0c32f 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -4701,6 +4701,101 @@ package. To use it, you need to have - **seed**: Seed for the random number generator for reproducibility. ``` +## PySwarms Optimizers + +optimagic supports the following continuous algorithms from the +[PySwarms](https://pyswarms.readthedocs.io/en/latest/) library: (GlobalBestPSO, +LocalBestPSO, GeneralOptimizerPSO). To use these optimizers, you need to have +[the pyswarms package](https://github.com/ljvmiranda921/pyswarms) installed. +(`pip install pyswarms`). + +```{eval-rst} +.. dropdown:: pyswarms_global_best + + **How to use this algorithm:** + + .. code-block:: + + import optimagic as om + om.minimize( + ..., + algorithm=om.algos.pyswarms_global_best(n_particles=50, ...) + ) + + or + + .. code-block:: + + om.minimize( + ..., + algorithm="pyswarms_global_best", + algo_options={"n_particles": 50, ...} + ) + + **Description and available options:** + + .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsGlobalBestPSO + +``` + +```{eval-rst} +.. dropdown:: pyswarms_local_best + + **How to use this algorithm:** + + .. code-block:: + + import optimagic as om + om.minimize( + ..., + algorithm=om.algos.pyswarms_local_best(n_particles=50, k_neighbors=3, ...) + ) + + or + + .. code-block:: + + om.minimize( + ..., + algorithm="pyswarms_local_best", + algo_options={"n_particles": 50, "k_neighbors": 3, ...} + ) + + **Description and available options:** + + .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsLocalBestPSO + +``` + +```{eval-rst} +.. dropdown:: pyswarms_general + + **How to use this algorithm:** + + .. code-block:: + + import optimagic as om + om.minimize( + ..., + algorithm=om.algos.pyswarms_general(n_particles=50, topology_type="star", ...) + ) + + or + + .. code-block:: + + om.minimize( + ..., + algorithm="pyswarms_general", + algo_options={"n_particles": 50, "topology_type": "star", ...} + ) + + **Description and available options:** + + .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsGeneralPSO + +``` + ## References ```{eval-rst} From 1ca8dcdc88fd874a99e6d830ef12b10c80b05bee Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Mon, 25 Aug 2025 20:23:53 +0000 Subject: [PATCH 14/40] =?UTF-8?q?refactor:=20topology=20=E2=80=93=20swap?= =?UTF-8?q?=20string=20flags=20for=20dataclass=20configs?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../optimizers/pyswarms_optimizers.py | 213 +++++++++++++----- 1 file changed, 155 insertions(+), 58 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 57047eafe..b93bfd11a 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -9,7 +9,7 @@ from __future__ import annotations from dataclasses import dataclass -from typing import Any, Callable, Literal +from typing import Any, Callable, Literal, Union import numpy as np from numpy.typing import NDArray @@ -41,6 +41,106 @@ ) +# ====================================================================================== +# 1. Topology Dataclasses +# ====================================================================================== + + +@dataclass(frozen=True) +class BaseTopology: + """Base class for all topology configurations.""" + + +@dataclass(frozen=True) +class StarTopology(BaseTopology): + """Star topology configuration. + + All particles are connected to the global best. + + """ + + +@dataclass(frozen=True) +class RingTopology(BaseTopology): + """Ring topology configuration. + + Particles are connected in a ring structure. + + """ + + k_neighbors: PositiveInt = 3 + """Number of neighbors for each particle.""" + + p_norm: Literal[1, 2] = 2 + """Distance metric for neighbor selection: 1 (Manhattan), 2 (Euclidean).""" + + static: bool = False + """Whether to use a static or dynamic ring topology. + + When True, the neighborhood structure is fixed throughout optimization. When False, + neighbors are recomputed at each iteration based on current particle positions. + + """ + + +@dataclass(frozen=True) +class VonNeumannTopology(BaseTopology): + """Von Neumann topology configuration. + + Particles are arranged on a 2D grid. + + """ + + p_norm: Literal[1, 2] = 2 + """Distance metric for neighbor selection: 1 (Manhattan), 2 (Euclidean).""" + + range: PositiveInt = 1 + r"""Range parameter :math:`r` for neighborhood size.""" + + +@dataclass(frozen=True) +class PyramidTopology(BaseTopology): + """Pyramid topology configuration.""" + + static: bool = False + """Whether to use a static or dynamic pyramid topology. + + When True, the neighborhood structure is fixed throughout optimization. When False, + neighbors are recomputed at each iteration based on current particle positions. + + """ + + +@dataclass(frozen=True) +class RandomTopology(BaseTopology): + """Random topology configuration. + + Particles are connected to random neighbors. + + """ + + k_neighbors: PositiveInt = 3 + """Number of neighbors for each particle.""" + + static: bool = False + """Whether to use a static or dynamic random topology. + + When True, the neighborhood structure is fixed throughout optimization. When False, + neighbors are recomputed at each iteration based on current particle positions. + + """ + + +TopologyConfig = Union[ + Literal["star", "ring", "vonneumann", "random", "pyramid"], + BaseTopology, +] + +# ====================================================================================== +# 2. PSO Options Classes +# ====================================================================================== + + @dataclass(frozen=True) class BasePSOOptions: """Common PSO parameters used by all PSO variants.""" @@ -462,17 +562,12 @@ class PySwarmsGeneralPSO(Algorithm): inertia_weight: PositiveFloat = 0.9 r"""Inertia weight :math:`w` controlling momentum.""" - topology_type: Literal["star", "ring", "vonneumann", "random", "pyramid"] = "star" - """Topology structure for particle communication.""" + topology: TopologyConfig = "star" + """Topology structure for particle communication. - k_neighbors: PositiveInt = 3 - """Number of neighbors for ring and random topologies.""" + Can be a string name or a topology dataclass instance. - p_norm: Literal[1, 2] = 2 - """Distance metric for neighbor selection: 1 (Manhattan), 2 (Euclidean).""" - - vonneumann_range: PositiveInt = 1 - r"""Range parameter :math:`r` for Von Neumann topology.""" + """ convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL """Stop when relative change in objective function is less than this value.""" @@ -503,9 +598,6 @@ class PySwarmsGeneralPSO(Algorithm): center_init: PositiveFloat = 1.0 """Scaling factor for initial particle positions.""" - static_topology: bool = False - """Whether to use static or dynamic topology.""" - verbose: bool = False """Enable or disable the logs and progress bar.""" @@ -517,35 +609,20 @@ def _solve_internal_problem( import pyswarms as ps - # Build structured options using dataclass - k_neighbors = ( - self.k_neighbors - if self.topology_type in ["ring", "vonneumann", "random"] - else None - ) - p_norm = self.p_norm if self.topology_type in ["ring", "vonneumann"] else None - vonneumann_range = ( - self.vonneumann_range if self.topology_type == "vonneumann" else None - ) - - pso_options = GeneralPSOOptions( - cognitive_parameter=self.cognitive_parameter, - social_parameter=self.social_parameter, - inertia_weight=self.inertia_weight, - k_neighbors=k_neighbors, - p_norm=p_norm, - vonneumann_range=vonneumann_range, - ) - options = _build_pso_options_dict(pso_options) + # Resolve topology config to PySwarms topology instance and options + pyswarms_topology, topology_options = _resolve_topology_config(self.topology) - topology = _create_topology_instance(self.topology_type, self.static_topology) + base_options = { + "c1": self.cognitive_parameter, + "c2": self.social_parameter, + "w": self.inertia_weight, + } + options = {**base_options, **topology_options} velocity_clamp = _build_velocity_clamp( self.velocity_clamp_min, self.velocity_clamp_max ) - bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) - init_pos = _create_initial_population( x0=x0, n_particles=self.n_particles, bounds=bounds, center=self.center_init ) @@ -554,7 +631,7 @@ def _solve_internal_problem( n_particles=self.n_particles, dimensions=len(x0), options=options, - topology=topology, + topology=pyswarms_topology, bounds=bounds, bh_strategy=self.boundary_strategy, velocity_clamp=velocity_clamp, @@ -581,6 +658,47 @@ def _solve_internal_problem( return res +def _resolve_topology_config( + config: TopologyConfig, +) -> tuple[Any, dict[str, float | int]]: + """Resolves the topology config into a pyswarms topology instance and options + dict.""" + from pyswarms.backend.topology import Pyramid, Random, Ring, Star, VonNeumann + + if isinstance(config, str): + default_topologies = { + "star": StarTopology(), + "ring": RingTopology(), + "vonneumann": VonNeumannTopology(), + "random": RandomTopology(), + "pyramid": PyramidTopology(), + } + if config not in default_topologies: + raise ValueError(f"Unknown topology string: '{config}'") + config = default_topologies[config] + + topology_instance: Any + options: dict[str, float | int] = {} + + if isinstance(config, StarTopology): + topology_instance = Star() + elif isinstance(config, RingTopology): + topology_instance = Ring(static=config.static) + options = {"k": config.k_neighbors, "p": config.p_norm} + elif isinstance(config, VonNeumannTopology): + topology_instance = VonNeumann() + options = {"p": config.p_norm, "r": config.range} + elif isinstance(config, RandomTopology): + topology_instance = Random(static=config.static) + options = {"k": config.k_neighbors} + elif isinstance(config, PyramidTopology): + topology_instance = Pyramid(static=config.static) + else: + raise TypeError(f"Unsupported topology configuration type: {type(config)}") + + return topology_instance, options + + def _build_pso_options_dict(options: BasePSOOptions) -> dict[str, float | int]: """Convert structured PSO options to PySwarms format.""" base_options = { @@ -618,27 +736,6 @@ def _build_velocity_clamp( return clamp -def _create_topology_instance(topology_type: str, static: bool) -> Any: - """Create PySwarms topology instance from string identifier.""" - if not IS_PYSWARMS_INSTALLED: - raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) - - from pyswarms.backend.topology import Pyramid, Random, Ring, Star, VonNeumann - - topology_map = { - "star": Star(), - "ring": Ring(static=static), - "vonneumann": VonNeumann(), - "random": Random(static=static), - "pyramid": Pyramid(static=static), - } - - if topology_type not in topology_map: - raise ValueError(f"Unknown topology type: {topology_type}") - - return topology_map[topology_type] - - def _process_pyswarms_result( result: tuple[float, NDArray[np.float64]], n_particles: int, n_iterations_run: int ) -> InternalOptimizeResult: From 212b4e88b566350748a83aad6ee21b171b00c397 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Mon, 25 Aug 2025 20:48:45 +0000 Subject: [PATCH 15/40] docs: clarify boundary & velocity strategy choices in docstrings --- .../optimizers/pyswarms_optimizers.py | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index b93bfd11a..7af4bfd86 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -256,10 +256,12 @@ class PySwarmsGlobalBestPSO(Algorithm): boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" - """Strategy for handling out-of-bounds particles.""" + """Strategy for handling out-of-bounds particles. Available options: periodic (default), + reflective, shrink, random, intermediate.""" velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" - """Strategy for handling out-of-bounds velocities.""" + """Strategy for handling out-of-bounds velocities. Available options: unmodified (default), + adjust, invert, zero.""" velocity_clamp_min: float | None = None """Minimum velocity limit for particles.""" @@ -412,10 +414,12 @@ class PySwarmsLocalBestPSO(Algorithm): boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" - """Strategy for handling out-of-bounds particles.""" + """Strategy for handling out-of-bounds particles. Available options: periodic (default), + reflective, shrink, random, intermediate.""" velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" - """Strategy for handling out-of-bounds velocities.""" + """Strategy for handling out-of-bounds velocities. Available options: unmodified (default), + adjust, invert, zero.""" velocity_clamp_min: float | None = None """Minimum velocity limit for particles.""" @@ -430,7 +434,9 @@ class PySwarmsLocalBestPSO(Algorithm): """Scaling factor for initial particle positions.""" static_topology: bool = False - """Whether to use static or dynamic ring topology.""" + """Whether to use static or dynamic ring topology. When True, the neighborhood + structure is fixed throughout optimization. When False, neighbors are recomputed + at each iteration based on current particle positions.""" verbose: bool = False """Enable or disable the logs and progress bar.""" @@ -581,10 +587,12 @@ class PySwarmsGeneralPSO(Algorithm): boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" - """Strategy for handling out-of-bounds particles.""" + """Strategy for handling out-of-bounds particles. Available options: periodic (default), + reflective, shrink, random, intermediate.""" velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" - """Strategy for handling out-of-bounds velocities.""" + """Strategy for handling out-of-bounds velocities. Available options: unmodified (default), + adjust, invert, zero.""" velocity_clamp_min: float | None = None """Minimum velocity limit for particles.""" From dc41d467cbaf731d39a6ed701465710beb59b5b6 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Tue, 26 Aug 2025 07:20:03 +0000 Subject: [PATCH 16/40] docs: add PSO citations to algorithm docstrings --- docs/source/refs.bib | 36 +++++++++++++++++++ .../optimizers/pyswarms_optimizers.py | 8 +++++ 2 files changed, 44 insertions(+) diff --git a/docs/source/refs.bib b/docs/source/refs.bib index f8005d2e9..f83a50a6d 100644 --- a/docs/source/refs.bib +++ b/docs/source/refs.bib @@ -1068,4 +1068,40 @@ @inproceedings{gardner2014bayesian year={2014} } +@INPROCEEDINGS{EberhartKennedy1995, + author = {Eberhart, R. and Kennedy, J.}, + booktitle = {MHS'95. Proceedings of the Sixth International Symposium on Micro Machine and Human Science}, + title = {A new optimizer using particle swarm theory}, + year = {1995}, + pages = {39-43}, + keywords = {Particle swarm optimization;Genetic algorithms;Testing;Acceleration;Particle tracking;Optimization methods;Artificial neural networks;Evolutionary computation;Performance evaluation;Statistics}, + doi = {10.1109/MHS.1995.494215} +} + +@INPROCEEDINGS{Lane2008SpatialPSO, + author={Lane, James and Engelbrecht, Andries and Gain, James}, + booktitle={2008 IEEE Swarm Intelligence Symposium}, + title={Particle swarm optimization with spatially meaningful neighbours}, + year={2008}, + volume={}, + number={}, + pages={1-8}, + keywords={Particle swarm optimization;Topology;Birds;Convergence;Computer science;USA Councils;Cities and towns;Africa;Cultural differences;Data structures;Delaunay Triangulation;Neighbour Topology;Particle Swarm Optimization;Heuristics}, + doi={10.1109/SIS.2008.4668281} +} + +@article{Ni2013, +author = {Ni, Qingjian and Deng, Jianming}, +title = {A New Logistic Dynamic Particle Swarm Optimization Algorithm Based on Random Topology}, +journal = {The Scientific World Journal}, +volume = {2013}, +number = {1}, +pages = {409167}, +doi = {https://doi.org/10.1155/2013/409167}, +url = {https://onlinelibrary.wiley.com/doi/abs/10.1155/2013/409167}, +eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1155/2013/409167}, +abstract = {Population topology of particle swarm optimization (PSO) will directly affect the dissemination of optimal information during the evolutionary process and will have a significant impact on the performance of PSO. Classic static population topologies are usually used in PSO, such as fully connected topology, ring topology, star topology, and square topology. In this paper, the performance of PSO with the proposed random topologies is analyzed, and the relationship between population topology and the performance of PSO is also explored from the perspective of graph theory characteristics in population topologies. Further, in a relatively new PSO variant which named logistic dynamic particle optimization, an extensive simulation study is presented to discuss the effectiveness of the random topology and the design strategies of population topology. Finally, the experimental data are analyzed and discussed. And about the design and use of population topology on PSO, some useful conclusions are proposed which can provide a basis for further discussion and research.}, +year = {2013} +} + @Comment{jabref-meta: databaseType:bibtex;} diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 7af4bfd86..35dd2be8a 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -230,6 +230,8 @@ class PySwarmsGlobalBestPSO(Algorithm): - :math:`y_{ij}(t)`: personal best position of particle i - :math:`\hat{y}_j(t)`: global best position + This algorithm is an adaptation of the original Particle Swarm Optimization method + by :cite:`Kennedy1995` """ n_particles: PositiveInt = 50 @@ -382,6 +384,8 @@ class PySwarmsLocalBestPSO(Algorithm): - :math:`y_{ij}(t)`: personal best position of particle i - :math:`\hat{y}_{lj}(t)`: local best position in particle i's neighborhood + The algorithm is based on the original Particle Swarm Optimization method by + :cite:`Kennedy1995` and the local best concept introduced in :cite:`EberhartKennedy1995`. """ n_particles: PositiveInt = 50 @@ -554,6 +558,10 @@ class PySwarmsGeneralPSO(Algorithm): - **Random**: Dynamic random connections - **Pyramid**: Hierarchical pyramid-like network of connected particles + This algorithm is based on the original Particle Swarm Optimization method by + :cite:`Kennedy1995` with configurable topology structures. For topology references, + see :cite:`Lane2008SpatialPSO, Ni2013`. + """ n_particles: PositiveInt = 50 From 5e41346409ca65cd6ff1052f601418203bba9197 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Tue, 26 Aug 2025 07:31:55 +0000 Subject: [PATCH 17/40] refactor: drop unused GeneralPSOOptions dataclass --- .../optimizers/pyswarms_optimizers.py | 29 ++----------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 35dd2be8a..24b7d593a 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -166,20 +166,6 @@ class LocalBestPSOOptions(BasePSOOptions): """Distance metric for neighbor selection (1=Manhattan, 2=Euclidean).""" -@dataclass(frozen=True) -class GeneralPSOOptions(BasePSOOptions): - """General PSO parameters with topology support.""" - - k_neighbors: PositiveInt | None = None - """Number of neighbors for topologies requiring neighborhoods.""" - - p_norm: Literal[1, 2] | None = None - """Distance metric for neighbor selection (1=Manhattan, 2=Euclidean).""" - - vonneumann_range: PositiveInt | None = None - """Range parameter for Von Neumann topology.""" - - @mark.minimizer( name="pyswarms_global_best", solver_type=AggregationLevel.SCALAR, @@ -725,19 +711,8 @@ def _build_pso_options_dict(options: BasePSOOptions) -> dict[str, float | int]: # Add topology-specific options if present if isinstance(options, LocalBestPSOOptions): - base_options.update( - { - "k": options.k_neighbors, - "p": options.p_norm, - } - ) - elif isinstance(options, GeneralPSOOptions): - if options.k_neighbors is not None: - base_options["k"] = options.k_neighbors - if options.p_norm is not None: - base_options["p"] = options.p_norm - if options.vonneumann_range is not None: - base_options["r"] = options.vonneumann_range + base_options["k"] = options.k_neighbors + base_options["p"] = options.p_norm return base_options From a7fe49f4e5d16a510bdb0a67acf2e059dc11e8ee Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Tue, 26 Aug 2025 07:51:37 +0000 Subject: [PATCH 18/40] docs: clarify topology parameter usage --- .../optimizers/pyswarms_optimizers.py | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 24b7d593a..f03c47c02 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -274,7 +274,6 @@ def _solve_internal_problem( import pyswarms as ps - # Build structured options using dataclass pso_options = BasePSOOptions( cognitive_parameter=self.cognitive_parameter, social_parameter=self.social_parameter, @@ -439,7 +438,6 @@ def _solve_internal_problem( import pyswarms as ps - # Build structured options using dataclass pso_options = LocalBestPSOOptions( cognitive_parameter=self.cognitive_parameter, social_parameter=self.social_parameter, @@ -536,14 +534,6 @@ class PySwarmsGeneralPSO(Algorithm): - :math:`y_{ij}(t)`: personal best position of particle i - :math:`\hat{y}_{nj}(t)`: neighborhood best position - **Topology Options:** - - - **Star**: All particles connected to global best - - **Ring**: Ring arrangement with k neighbors - - **Von Neumann**: 2D grid topology - - **Random**: Dynamic random connections - - **Pyramid**: Hierarchical pyramid-like network of connected particles - This algorithm is based on the original Particle Swarm Optimization method by :cite:`Kennedy1995` with configurable topology structures. For topology references, see :cite:`Lane2008SpatialPSO, Ni2013`. @@ -565,8 +555,15 @@ class PySwarmsGeneralPSO(Algorithm): topology: TopologyConfig = "star" """Topology structure for particle communication. - Can be a string name or a topology dataclass instance. + The `topology` can be specified in two ways: + + 1. **By name (string):** e.g., ``"star"``, ``"ring"``. This uses the default + parameters for that topology. + 2. **By dataclass instance:** e.g., ``RingTopology(k_neighbors=5, p_norm=1)``. + This allows for detailed configuration of topology-specific parameters. + Available topologies: ``StarTopology``, ``RingTopology``, ``VonNeumannTopology``, + ``RandomTopology``, ``PyramidTopology``. """ convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL @@ -709,7 +706,7 @@ def _build_pso_options_dict(options: BasePSOOptions) -> dict[str, float | int]: "w": options.inertia_weight, } - # Add topology-specific options if present + # Add topology-specific options for local-best PSO if isinstance(options, LocalBestPSOOptions): base_options["k"] = options.k_neighbors base_options["p"] = options.p_norm From 2c2f5eaa84e57ca5e66700094b8ef71b6877512b Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Tue, 26 Aug 2025 09:00:27 +0000 Subject: [PATCH 19/40] format docstrings --- .../optimizers/pyswarms_optimizers.py | 63 ++++++++++++++----- 1 file changed, 47 insertions(+), 16 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index f03c47c02..7b4ce33b0 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -218,6 +218,7 @@ class PySwarmsGlobalBestPSO(Algorithm): This algorithm is an adaptation of the original Particle Swarm Optimization method by :cite:`Kennedy1995` + """ n_particles: PositiveInt = 50 @@ -244,12 +245,20 @@ class PySwarmsGlobalBestPSO(Algorithm): boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" - """Strategy for handling out-of-bounds particles. Available options: periodic (default), - reflective, shrink, random, intermediate.""" + """Strategy for handling out-of-bounds particles. + + Available options: periodic (default), + reflective, shrink, random, intermediate. + + """ velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" - """Strategy for handling out-of-bounds velocities. Available options: unmodified (default), - adjust, invert, zero.""" + """Strategy for handling out-of-bounds velocities. + + Available options: unmodified (default), + adjust, invert, zero. + + """ velocity_clamp_min: float | None = None """Minimum velocity limit for particles.""" @@ -370,7 +379,9 @@ class PySwarmsLocalBestPSO(Algorithm): - :math:`\hat{y}_{lj}(t)`: local best position in particle i's neighborhood The algorithm is based on the original Particle Swarm Optimization method by - :cite:`Kennedy1995` and the local best concept introduced in :cite:`EberhartKennedy1995`. + :cite:`Kennedy1995` and the local best concept introduced in + :cite:`EberhartKennedy1995`. + """ n_particles: PositiveInt = 50 @@ -403,12 +414,20 @@ class PySwarmsLocalBestPSO(Algorithm): boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" - """Strategy for handling out-of-bounds particles. Available options: periodic (default), - reflective, shrink, random, intermediate.""" + """Strategy for handling out-of-bounds particles. + + Available options: periodic (default), + reflective, shrink, random, intermediate. + + """ velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" - """Strategy for handling out-of-bounds velocities. Available options: unmodified (default), - adjust, invert, zero.""" + """Strategy for handling out-of-bounds velocities. + + Available options: unmodified (default), + adjust, invert, zero. + + """ velocity_clamp_min: float | None = None """Minimum velocity limit for particles.""" @@ -423,9 +442,12 @@ class PySwarmsLocalBestPSO(Algorithm): """Scaling factor for initial particle positions.""" static_topology: bool = False - """Whether to use static or dynamic ring topology. When True, the neighborhood - structure is fixed throughout optimization. When False, neighbors are recomputed - at each iteration based on current particle positions.""" + """Whether to use static or dynamic ring topology. + + When True, the neighborhood structure is fixed throughout optimization. When False, + neighbors are recomputed at each iteration based on current particle positions. + + """ verbose: bool = False """Enable or disable the logs and progress bar.""" @@ -564,6 +586,7 @@ class PySwarmsGeneralPSO(Algorithm): Available topologies: ``StarTopology``, ``RingTopology``, ``VonNeumannTopology``, ``RandomTopology``, ``PyramidTopology``. + """ convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL @@ -578,12 +601,20 @@ class PySwarmsGeneralPSO(Algorithm): boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" - """Strategy for handling out-of-bounds particles. Available options: periodic (default), - reflective, shrink, random, intermediate.""" + """Strategy for handling out-of-bounds particles. + + Available options: periodic (default), + reflective, shrink, random, intermediate. + + """ velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" - """Strategy for handling out-of-bounds velocities. Available options: unmodified (default), - adjust, invert, zero.""" + """Strategy for handling out-of-bounds velocities. + + Available options: unmodified (default), + adjust, invert, zero. + + """ velocity_clamp_min: float | None = None """Minimum velocity limit for particles.""" From 19c42eb3c6e0b72173275e21b86de9de97d40dff Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 27 Aug 2025 06:13:07 +0000 Subject: [PATCH 20/40] feat: expose initial_population and oh_strategy in PySwarms optimizers --- .../optimizers/pyswarms_optimizers.py | 85 ++++++++++++++++--- 1 file changed, 72 insertions(+), 13 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 7b4ce33b0..6d1a80eb8 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -242,6 +242,17 @@ class PySwarmsGlobalBestPSO(Algorithm): stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" + initial_population: NDArray[np.float64] | None = None + """Option to set the initial particle positions. + + If None, the population is generated randomly within the given bounds , or within + [0, 1] if bounds are not specified. + + """ + + oh_strategy: dict[str, str] | None = None + """Dictionary of strategies for time-varying options.""" + boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" @@ -296,15 +307,22 @@ def _solve_internal_problem( bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) - init_pos = _create_initial_population( - x0=x0, n_particles=self.n_particles, bounds=bounds, center=self.center_init - ) + if self.initial_population is not None: + init_pos = self.initial_population + else: + init_pos = _create_initial_population( + x0=x0, + n_particles=self.n_particles, + bounds=bounds, + center=self.center_init, + ) optimizer = ps.single.GlobalBestPSO( n_particles=self.n_particles, dimensions=len(x0), options=options, bounds=bounds, + oh_strategy=self.oh_strategy, bh_strategy=self.boundary_strategy, velocity_clamp=velocity_clamp, vh_strategy=self.velocity_strategy, @@ -411,6 +429,17 @@ class PySwarmsLocalBestPSO(Algorithm): stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" + initial_population: NDArray[np.float64] | None = None + """Option to set the initial particle positions. + + If None, the population is generated randomly within the given bounds , or within + [0, 1] if bounds are not specified. + + """ + + oh_strategy: dict[str, str] | None = None + """Dictionary of strategies for time-varying options.""" + boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" @@ -475,15 +504,22 @@ def _solve_internal_problem( bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) - init_pos = _create_initial_population( - x0=x0, n_particles=self.n_particles, bounds=bounds, center=self.center_init - ) + if self.initial_population is not None: + init_pos = self.initial_population + else: + init_pos = _create_initial_population( + x0=x0, + n_particles=self.n_particles, + bounds=bounds, + center=self.center_init, + ) optimizer = ps.single.LocalBestPSO( n_particles=self.n_particles, dimensions=len(x0), options=options, bounds=bounds, + oh_strategy=self.oh_strategy, bh_strategy=self.boundary_strategy, velocity_clamp=velocity_clamp, vh_strategy=self.velocity_strategy, @@ -598,6 +634,17 @@ class PySwarmsGeneralPSO(Algorithm): stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" + initial_population: NDArray[np.float64] | None = None + """Option to set the initial particle positions. + + If None, the population is generated randomly within the given bounds , or within + [0, 1] if bounds are not specified. + + """ + + oh_strategy: dict[str, str] | None = None + """Dictionary of strategies for time-varying options.""" + boundary_strategy: Literal[ "periodic", "reflective", "shrink", "random", "intermediate" ] = "periodic" @@ -653,9 +700,16 @@ def _solve_internal_problem( self.velocity_clamp_min, self.velocity_clamp_max ) bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) - init_pos = _create_initial_population( - x0=x0, n_particles=self.n_particles, bounds=bounds, center=self.center_init - ) + + if self.initial_population is not None: + init_pos = self.initial_population + else: + init_pos = _create_initial_population( + x0=x0, + n_particles=self.n_particles, + bounds=bounds, + center=self.center_init, + ) optimizer = ps.single.GeneralOptimizerPSO( n_particles=self.n_particles, @@ -663,6 +717,7 @@ def _solve_internal_problem( options=options, topology=pyswarms_topology, bounds=bounds, + oh_strategy=self.oh_strategy, bh_strategy=self.boundary_strategy, velocity_clamp=velocity_clamp, vh_strategy=self.velocity_strategy, @@ -818,15 +873,15 @@ def _convert_bounds_to_pyswarms( def _create_initial_population( x0: NDArray[np.float64], n_particles: int, - bounds: tuple[NDArray[np.float64], NDArray[np.float64]], + bounds: tuple[NDArray[np.float64], NDArray[np.float64]] | None, center: float = 1.0, ) -> NDArray[np.float64]: - """Create initial population with x0 as first particle. + """Create an initial swarm population. Args: x0: Initial parameter vector n_particles: Number of particles in the swarm - bounds: Tuple of (lower_bounds, upper_bounds) arrays + bounds: Tuple of (lower_bounds, upper_bounds) arrays or None. center: Scaling factor for initial particle positions around bounds Returns: @@ -834,7 +889,11 @@ def _create_initial_population( """ n_dimensions = len(x0) - lower_bounds, upper_bounds = bounds + if bounds is None: + lower_bounds: NDArray[np.float64] = np.zeros(n_dimensions, dtype=np.float64) + upper_bounds: NDArray[np.float64] = np.ones(n_dimensions, dtype=np.float64) + else: + lower_bounds, upper_bounds = bounds # Generate random initial positions within the bounds, scaled by center init_pos = center * np.random.uniform( From 47f13428515e8c4cf48a52871e9d22f3eabf2ff6 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 27 Aug 2025 06:50:57 +0000 Subject: [PATCH 21/40] refactor: simplify bounds handling --- .../optimizers/pyswarms_optimizers.py | 33 +++++++++---------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 6d1a80eb8..3891f3ef2 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -305,7 +305,7 @@ def _solve_internal_problem( self.velocity_clamp_min, self.velocity_clamp_max ) - bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) + bounds = _get_pyswarms_bounds(problem.bounds) if self.initial_population is not None: init_pos = self.initial_population @@ -502,7 +502,7 @@ def _solve_internal_problem( self.velocity_clamp_min, self.velocity_clamp_max ) - bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) + bounds = _get_pyswarms_bounds(problem.bounds) if self.initial_population is not None: init_pos = self.initial_population @@ -699,7 +699,7 @@ def _solve_internal_problem( velocity_clamp = _build_velocity_clamp( self.velocity_clamp_min, self.velocity_clamp_max ) - bounds = _convert_bounds_to_pyswarms(problem.bounds, len(x0)) + bounds = _get_pyswarms_bounds(problem.bounds) if self.initial_population is not None: init_pos = self.initial_population @@ -852,22 +852,21 @@ def batch_objective(x: NDArray[np.float64]) -> NDArray[np.float64]: return batch_objective -def _convert_bounds_to_pyswarms( - bounds: InternalBounds, n_dimensions: int -) -> tuple[NDArray[np.float64], NDArray[np.float64]]: +def _get_pyswarms_bounds( + bounds: InternalBounds, +) -> tuple[NDArray[np.float64], NDArray[np.float64]] | None: """Convert optimagic bounds to PySwarms format.""" - lower_bounds_arr = ( - bounds.lower if bounds.lower is not None else np.zeros(n_dimensions) - ) - upper_bounds_arr = ( - bounds.upper if bounds.upper is not None else np.ones(n_dimensions) - ) - if not np.all(np.isfinite(lower_bounds_arr)) or not np.all( - np.isfinite(upper_bounds_arr) - ): - raise ValueError("PySwarms does not support infinite bounds.") + pyswarms_bounds = None + + if bounds.lower is not None and bounds.upper is not None: + if not np.all(np.isfinite(bounds.lower)) or not np.all( + np.isfinite(bounds.upper) + ): + raise ValueError("PySwarms does not support infinite bounds.") + + pyswarms_bounds = (bounds.lower, bounds.upper) - return (lower_bounds_arr, upper_bounds_arr) + return pyswarms_bounds def _create_initial_population( From 76fc623aec3016f42878bae7f1f189f4d5e9e08a Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 27 Aug 2025 07:05:45 +0000 Subject: [PATCH 22/40] refactor: infer actual iterations & evals --- .../optimizers/pyswarms_optimizers.py | 26 ++++++------------- 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 3891f3ef2..13cda6688 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -339,11 +339,7 @@ def _solve_internal_problem( verbose=self.verbose, ) - res = _process_pyswarms_result( - result=result, - n_particles=self.n_particles, - n_iterations_run=self.stopping_maxiter, - ) + res = _process_pyswarms_result(result=result, optimizer=optimizer) return res @@ -537,11 +533,7 @@ def _solve_internal_problem( verbose=self.verbose, ) - res = _process_pyswarms_result( - result=result, - n_particles=self.n_particles, - n_iterations_run=self.stopping_maxiter, - ) + res = _process_pyswarms_result(result=result, optimizer=optimizer) return res @@ -734,11 +726,7 @@ def _solve_internal_problem( verbose=self.verbose, ) - res = _process_pyswarms_result( - result=result, - n_particles=self.n_particles, - n_iterations_run=self.stopping_maxiter, - ) + res = _process_pyswarms_result(result=result, optimizer=optimizer) return res @@ -811,20 +799,22 @@ def _build_velocity_clamp( def _process_pyswarms_result( - result: tuple[float, NDArray[np.float64]], n_particles: int, n_iterations_run: int + result: tuple[float, NDArray[np.float64]], optimizer: Any ) -> InternalOptimizeResult: """Convert PySwarms result to optimagic format.""" best_cost, best_position = result + n_iterations = len(optimizer.cost_history) + n_particles = optimizer.n_particles return InternalOptimizeResult( x=best_position, fun=best_cost, success=True, message="PySwarms optimization completed", - n_fun_evals=n_particles * n_iterations_run, + n_fun_evals=n_particles * n_iterations, n_jac_evals=0, n_hess_evals=0, - n_iterations=n_iterations_run, + n_iterations=n_iterations, ) From 543b55ce5154867fcc7b7ff7cf2507c6456c6ab5 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 27 Aug 2025 08:07:33 +0000 Subject: [PATCH 23/40] add pyswarms to mypy ignore list --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index c74752252..58936ca98 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -380,6 +380,7 @@ module = [ "pdbp", "iminuit", "nevergrad", + "pyswarms" "yaml", ] ignore_missing_imports = true From 5d6132fbd6c3e4637ec96ae8b794fb2fd710af3b Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 27 Aug 2025 15:11:44 +0000 Subject: [PATCH 24/40] fix: mark PySwarms optimizers as stochastic via seed parameter --- .../optimizers/pyswarms_optimizers.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 13cda6688..9147a83df 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -286,12 +286,18 @@ class PySwarmsGlobalBestPSO(Algorithm): verbose: bool = False """Enable or disable the logs and progress bar.""" + seed: int | None = None + """Random seed for reproducibility.""" + def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_PYSWARMS_INSTALLED: raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) + if self.seed is not None: + np.random.seed(self.seed) + import pyswarms as ps pso_options = BasePSOOptions( @@ -477,12 +483,18 @@ class PySwarmsLocalBestPSO(Algorithm): verbose: bool = False """Enable or disable the logs and progress bar.""" + seed: int | None = None + """Random seed for reproducibility.""" + def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_PYSWARMS_INSTALLED: raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) + if self.seed is not None: + np.random.seed(self.seed) + import pyswarms as ps pso_options = LocalBestPSOOptions( @@ -670,12 +682,18 @@ class PySwarmsGeneralPSO(Algorithm): verbose: bool = False """Enable or disable the logs and progress bar.""" + seed: int | None = None + """Random seed for reproducibility.""" + def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: if not IS_PYSWARMS_INSTALLED: raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) + if self.seed is not None: + np.random.seed(self.seed) + import pyswarms as ps # Resolve topology config to PySwarms topology instance and options From 9fbabd5e93483d6b0ece14f9424647ac35f6d057 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 27 Aug 2025 15:14:18 +0000 Subject: [PATCH 25/40] fix: add missing comma to mypy ignore list --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 58936ca98..c42cfae69 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -380,7 +380,7 @@ module = [ "pdbp", "iminuit", "nevergrad", - "pyswarms" + "pyswarms", "yaml", ] ignore_missing_imports = true From 2dbd9bfb0989c365acf351a7ef922af144803ed3 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 27 Aug 2025 15:35:53 +0000 Subject: [PATCH 26/40] extend mypy ignores to pyswarms.backend.topology --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index c42cfae69..339856f9b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -381,6 +381,7 @@ module = [ "iminuit", "nevergrad", "pyswarms", + "pyswarms.backend.topology", "yaml", ] ignore_missing_imports = true From 429fe28e35b11aad4fb1b2dda98bff21c90f1424 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Thu, 28 Aug 2025 07:33:53 +0000 Subject: [PATCH 27/40] refactor: use dataclasses for PSO hyper-params --- .../optimizers/pyswarms_optimizers.py | 106 ++++++------------ 1 file changed, 33 insertions(+), 73 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 9147a83df..61d287fd7 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -47,12 +47,12 @@ @dataclass(frozen=True) -class BaseTopology: +class Topology: """Base class for all topology configurations.""" @dataclass(frozen=True) -class StarTopology(BaseTopology): +class StarTopology(Topology): """Star topology configuration. All particles are connected to the global best. @@ -61,7 +61,7 @@ class StarTopology(BaseTopology): @dataclass(frozen=True) -class RingTopology(BaseTopology): +class RingTopology(Topology): """Ring topology configuration. Particles are connected in a ring structure. @@ -84,7 +84,7 @@ class RingTopology(BaseTopology): @dataclass(frozen=True) -class VonNeumannTopology(BaseTopology): +class VonNeumannTopology(Topology): """Von Neumann topology configuration. Particles are arranged on a 2D grid. @@ -99,7 +99,7 @@ class VonNeumannTopology(BaseTopology): @dataclass(frozen=True) -class PyramidTopology(BaseTopology): +class PyramidTopology(Topology): """Pyramid topology configuration.""" static: bool = False @@ -112,7 +112,7 @@ class PyramidTopology(BaseTopology): @dataclass(frozen=True) -class RandomTopology(BaseTopology): +class RandomTopology(Topology): """Random topology configuration. Particles are connected to random neighbors. @@ -133,7 +133,7 @@ class RandomTopology(BaseTopology): TopologyConfig = Union[ Literal["star", "ring", "vonneumann", "random", "pyramid"], - BaseTopology, + Topology, ] # ====================================================================================== @@ -142,27 +142,27 @@ class RandomTopology(BaseTopology): @dataclass(frozen=True) -class BasePSOOptions: +class PSOOptions: """Common PSO parameters used by all PSO variants.""" - cognitive_parameter: PositiveFloat + cognitive_parameter: PositiveFloat = 0.5 """Cognitive parameter (c1) - attraction to personal best.""" - social_parameter: PositiveFloat + social_parameter: PositiveFloat = 0.3 """Social parameter (c2) - attraction to neighborhood/global best.""" - inertia_weight: PositiveFloat + inertia_weight: PositiveFloat = 0.9 """Inertia weight (w) - momentum control.""" @dataclass(frozen=True) -class LocalBestPSOOptions(BasePSOOptions): +class LocalBestPSOOptions(PSOOptions): """Local Best PSO specific parameters.""" - k_neighbors: PositiveInt + k_neighbors: PositiveInt = 3 """Number of neighbors in local neighborhood.""" - p_norm: Literal[1, 2] + p_norm: Literal[1, 2] = 2 """Distance metric for neighbor selection (1=Manhattan, 2=Euclidean).""" @@ -224,14 +224,8 @@ class PySwarmsGlobalBestPSO(Algorithm): n_particles: PositiveInt = 50 """Number of particles in the swarm.""" - cognitive_parameter: PositiveFloat = 0.5 - r"""Cognitive parameter :math:`c_1` controlling attraction to personal best.""" - - social_parameter: PositiveFloat = 0.3 - r"""Social parameter :math:`c_2` controlling attraction to global best.""" - - inertia_weight: PositiveFloat = 0.9 - r"""Inertia weight :math:`w` controlling momentum.""" + options: PSOOptions = PSOOptions() + """PSO hyperparameters controlling particle behavior.""" convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL """Stop when relative change in objective function is less than this value.""" @@ -300,12 +294,7 @@ def _solve_internal_problem( import pyswarms as ps - pso_options = BasePSOOptions( - cognitive_parameter=self.cognitive_parameter, - social_parameter=self.social_parameter, - inertia_weight=self.inertia_weight, - ) - options = _build_pso_options_dict(pso_options) + pso_options_dict = _pso_options_to_dict(self.options) velocity_clamp = _build_velocity_clamp( self.velocity_clamp_min, self.velocity_clamp_max @@ -326,7 +315,7 @@ def _solve_internal_problem( optimizer = ps.single.GlobalBestPSO( n_particles=self.n_particles, dimensions=len(x0), - options=options, + options=pso_options_dict, bounds=bounds, oh_strategy=self.oh_strategy, bh_strategy=self.boundary_strategy, @@ -407,20 +396,8 @@ class PySwarmsLocalBestPSO(Algorithm): n_particles: PositiveInt = 50 """Number of particles in the swarm.""" - cognitive_parameter: PositiveFloat = 0.5 - r"""Cognitive parameter :math:`c_1` controlling attraction to personal best.""" - - social_parameter: PositiveFloat = 0.3 - r"""Social parameter :math:`c_2` controlling attraction to local best.""" - - inertia_weight: PositiveFloat = 0.9 - r"""Inertia weight :math:`w` controlling momentum.""" - - k_neighbors: PositiveInt = 3 - r"""Number of neighbors :math:`k` defining local neighborhood.""" - - p_norm: Literal[1, 2] = 2 - """Distance metric: 1 (Manhattan), 2 (Euclidean). """ + options: LocalBestPSOOptions = LocalBestPSOOptions() + """"PSO hyperparameters controlling particle behavior.""" convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL """Stop when relative change in objective function is less than this value.""" @@ -497,14 +474,7 @@ def _solve_internal_problem( import pyswarms as ps - pso_options = LocalBestPSOOptions( - cognitive_parameter=self.cognitive_parameter, - social_parameter=self.social_parameter, - inertia_weight=self.inertia_weight, - k_neighbors=self.k_neighbors, - p_norm=self.p_norm, - ) - options = _build_pso_options_dict(pso_options) + pso_options_dict = _pso_options_to_dict(self.options) velocity_clamp = _build_velocity_clamp( self.velocity_clamp_min, self.velocity_clamp_max @@ -525,7 +495,7 @@ def _solve_internal_problem( optimizer = ps.single.LocalBestPSO( n_particles=self.n_particles, dimensions=len(x0), - options=options, + options=pso_options_dict, bounds=bounds, oh_strategy=self.oh_strategy, bh_strategy=self.boundary_strategy, @@ -605,14 +575,8 @@ class PySwarmsGeneralPSO(Algorithm): n_particles: PositiveInt = 50 """Number of particles in the swarm.""" - cognitive_parameter: PositiveFloat = 0.5 - r"""Cognitive parameter :math:`c_1` controlling attraction to personal best.""" - - social_parameter: PositiveFloat = 0.3 - r"""Social parameter :math:`c_2` controlling attraction to neighborhood best.""" - - inertia_weight: PositiveFloat = 0.9 - r"""Inertia weight :math:`w` controlling momentum.""" + options: PSOOptions = PSOOptions() + """PSO hyperparameters controlling particle behavior.""" topology: TopologyConfig = "star" """Topology structure for particle communication. @@ -699,12 +663,8 @@ def _solve_internal_problem( # Resolve topology config to PySwarms topology instance and options pyswarms_topology, topology_options = _resolve_topology_config(self.topology) - base_options = { - "c1": self.cognitive_parameter, - "c2": self.social_parameter, - "w": self.inertia_weight, - } - options = {**base_options, **topology_options} + base_options = _pso_options_to_dict(self.options) + pso_options_dict = {**base_options, **topology_options} velocity_clamp = _build_velocity_clamp( self.velocity_clamp_min, self.velocity_clamp_max @@ -724,7 +684,7 @@ def _solve_internal_problem( optimizer = ps.single.GeneralOptimizerPSO( n_particles=self.n_particles, dimensions=len(x0), - options=options, + options=pso_options_dict, topology=pyswarms_topology, bounds=bounds, oh_strategy=self.oh_strategy, @@ -790,9 +750,9 @@ def _resolve_topology_config( return topology_instance, options -def _build_pso_options_dict(options: BasePSOOptions) -> dict[str, float | int]: - """Convert structured PSO options to PySwarms format.""" - base_options = { +def _pso_options_to_dict(options: PSOOptions) -> dict[str, float | int]: + """Convert option parameters to PySwarms format.""" + pso_options = { "c1": options.cognitive_parameter, "c2": options.social_parameter, "w": options.inertia_weight, @@ -800,10 +760,10 @@ def _build_pso_options_dict(options: BasePSOOptions) -> dict[str, float | int]: # Add topology-specific options for local-best PSO if isinstance(options, LocalBestPSOOptions): - base_options["k"] = options.k_neighbors - base_options["p"] = options.p_norm + pso_options["k"] = options.k_neighbors + pso_options["p"] = options.p_norm - return base_options + return pso_options def _build_velocity_clamp( From ef7bace0f0077124231566cff103dd17b84b7139 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 29 Aug 2025 06:04:47 +0000 Subject: [PATCH 28/40] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .tools/envs/testenv-nevergrad.yml | 2 +- .tools/envs/testenv-plotly.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.tools/envs/testenv-nevergrad.yml b/.tools/envs/testenv-nevergrad.yml index 3c97d7f6e..a87b37eb6 100644 --- a/.tools/envs/testenv-nevergrad.yml +++ b/.tools/envs/testenv-nevergrad.yml @@ -41,7 +41,7 @@ dependencies: - types-openpyxl # dev, tests - types-jinja2 # dev, tests - sqlalchemy-stubs # dev, tests - - sphinxcontrib-mermaid # dev, tests, docs - bayesian_optimization==1.4.0 - nevergrad + - sphinxcontrib-mermaid # dev, tests, docs - -e ../../ diff --git a/.tools/envs/testenv-plotly.yml b/.tools/envs/testenv-plotly.yml index 3100e3cc2..bb1c80629 100644 --- a/.tools/envs/testenv-plotly.yml +++ b/.tools/envs/testenv-plotly.yml @@ -41,6 +41,6 @@ dependencies: - types-openpyxl # dev, tests - types-jinja2 # dev, tests - sqlalchemy-stubs # dev, tests - - sphinxcontrib-mermaid # dev, tests, docs - kaleido<0.3 + - sphinxcontrib-mermaid # dev, tests, docs - -e ../../ From 5ed744ffb48e7709f8e3c79ea519f535927478b8 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 29 Aug 2025 07:40:32 +0000 Subject: [PATCH 29/40] refactor: rename initial_population to initial_positions and add PyTree support --- .../optimizers/pyswarms_optimizers.py | 68 ++++++++++++------- 1 file changed, 43 insertions(+), 25 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 61d287fd7..9d5e00d69 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -31,6 +31,7 @@ NonNegativeFloat, PositiveFloat, PositiveInt, + PyTree, ) PYSWARMS_NOT_INSTALLED_ERROR = ( @@ -236,11 +237,11 @@ class PySwarmsGlobalBestPSO(Algorithm): stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" - initial_population: NDArray[np.float64] | None = None + initial_positions: list[PyTree] | None = None """Option to set the initial particle positions. - If None, the population is generated randomly within the given bounds , or within - [0, 1] if bounds are not specified. + If None, positions are generated randomly within the given bounds, or within [0, 1] + if bounds are not specified. """ @@ -302,10 +303,15 @@ def _solve_internal_problem( bounds = _get_pyswarms_bounds(problem.bounds) - if self.initial_population is not None: - init_pos = self.initial_population + if self.initial_positions is not None: + init_pos = np.array( + [ + problem.converter.params_to_internal(position) + for position in self.initial_positions + ] + ) else: - init_pos = _create_initial_population( + init_pos = _create_initial_positions( x0=x0, n_particles=self.n_particles, bounds=bounds, @@ -408,11 +414,11 @@ class PySwarmsLocalBestPSO(Algorithm): stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" - initial_population: NDArray[np.float64] | None = None + initial_positions: list[PyTree] | None = None """Option to set the initial particle positions. - If None, the population is generated randomly within the given bounds , or within - [0, 1] if bounds are not specified. + If None, positions are generated randomly within the given bounds, or within [0, 1] + if bounds are not specified. """ @@ -482,10 +488,15 @@ def _solve_internal_problem( bounds = _get_pyswarms_bounds(problem.bounds) - if self.initial_population is not None: - init_pos = self.initial_population + if self.initial_positions is not None: + init_pos = np.array( + [ + problem.converter.params_to_internal(position) + for position in self.initial_positions + ] + ) else: - init_pos = _create_initial_population( + init_pos = _create_initial_positions( x0=x0, n_particles=self.n_particles, bounds=bounds, @@ -602,11 +613,11 @@ class PySwarmsGeneralPSO(Algorithm): stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" - initial_population: NDArray[np.float64] | None = None + initial_positions: list[PyTree] | None = None """Option to set the initial particle positions. - If None, the population is generated randomly within the given bounds , or within - [0, 1] if bounds are not specified. + If None, positions are generated randomly within the given bounds, or within [0, 1] + if bounds are not specified. """ @@ -671,10 +682,15 @@ def _solve_internal_problem( ) bounds = _get_pyswarms_bounds(problem.bounds) - if self.initial_population is not None: - init_pos = self.initial_population + if self.initial_positions is not None: + init_pos = np.array( + [ + problem.converter.params_to_internal(position) + for position in self.initial_positions + ] + ) else: - init_pos = _create_initial_population( + init_pos = _create_initial_positions( x0=x0, n_particles=self.n_particles, bounds=bounds, @@ -802,17 +818,18 @@ def _create_batch_objective( ) -> Callable[[NDArray[np.float64]], NDArray[np.float64]]: """Return an batch objective function.""" - def batch_objective(x: NDArray[np.float64]) -> NDArray[np.float64]: - """Compute objective values for all particles in x. + def batch_objective(positions: NDArray[np.float64]) -> NDArray[np.float64]: + """Compute objective values for all particles in positions. Args: - x: 2D array of shape (n_particles, n_dimensions) with particle positions. + positions: 2D array of shape (n_particles, n_dimensions) with + particle positions. Returns: 1D array of shape (n_particles,) with objective values. """ - arguments = [xi for xi in x] + arguments = [position for position in positions] results = problem.batch_fun(arguments, n_cores=n_cores) return np.array(results) @@ -837,13 +854,13 @@ def _get_pyswarms_bounds( return pyswarms_bounds -def _create_initial_population( +def _create_initial_positions( x0: NDArray[np.float64], n_particles: int, bounds: tuple[NDArray[np.float64], NDArray[np.float64]] | None, center: float = 1.0, ) -> NDArray[np.float64]: - """Create an initial swarm population. + """Create an initial swarm positions. Args: x0: Initial parameter vector @@ -852,7 +869,8 @@ def _create_initial_population( center: Scaling factor for initial particle positions around bounds Returns: - Initial population array of shape (n_particles, n_dimensions) + Initial positions array of shape (n_particles, n_dimensions) + where each row represents one particle's starting position. """ n_dimensions = len(x0) From 1ad442461ebb36cfe88334d8d996e6164010f566 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 29 Aug 2025 12:34:11 +0000 Subject: [PATCH 30/40] refactor: use common internal function for all PySwarms optimizers --- .../optimizers/pyswarms_optimizers.py | 280 ++++++++++-------- 1 file changed, 150 insertions(+), 130 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 9d5e00d69..03d23020d 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -290,58 +290,32 @@ def _solve_internal_problem( if not IS_PYSWARMS_INSTALLED: raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) - if self.seed is not None: - np.random.seed(self.seed) - import pyswarms as ps pso_options_dict = _pso_options_to_dict(self.options) + optimizer_kwargs = {"options": pso_options_dict} - velocity_clamp = _build_velocity_clamp( - self.velocity_clamp_min, self.velocity_clamp_max - ) - - bounds = _get_pyswarms_bounds(problem.bounds) - - if self.initial_positions is not None: - init_pos = np.array( - [ - problem.converter.params_to_internal(position) - for position in self.initial_positions - ] - ) - else: - init_pos = _create_initial_positions( - x0=x0, - n_particles=self.n_particles, - bounds=bounds, - center=self.center_init, - ) - - optimizer = ps.single.GlobalBestPSO( + res = _pyswarms_internal( + problem=problem, + x0=x0, + optimizer_class=ps.single.GlobalBestPSO, + optimizer_kwargs=optimizer_kwargs, n_particles=self.n_particles, - dimensions=len(x0), - options=pso_options_dict, - bounds=bounds, + convergence_ftol_rel=self.convergence_ftol_rel, + convergence_ftol_iter=self.convergence_ftol_iter, + stopping_maxiter=self.stopping_maxiter, + initial_positions=self.initial_positions, oh_strategy=self.oh_strategy, - bh_strategy=self.boundary_strategy, - velocity_clamp=velocity_clamp, - vh_strategy=self.velocity_strategy, - ftol=self.convergence_ftol_rel, - ftol_iter=self.convergence_ftol_iter, - init_pos=init_pos, - ) - - objective_wrapper = _create_batch_objective(problem, self.n_cores) - - result = optimizer.optimize( - objective_func=objective_wrapper, - iters=self.stopping_maxiter, + boundary_strategy=self.boundary_strategy, + velocity_strategy=self.velocity_strategy, + velocity_clamp_min=self.velocity_clamp_min, + velocity_clamp_max=self.velocity_clamp_max, + n_cores=self.n_cores, + center_init=self.center_init, verbose=self.verbose, + seed=self.seed, ) - res = _process_pyswarms_result(result=result, optimizer=optimizer) - return res @@ -475,59 +449,35 @@ def _solve_internal_problem( if not IS_PYSWARMS_INSTALLED: raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) - if self.seed is not None: - np.random.seed(self.seed) - import pyswarms as ps pso_options_dict = _pso_options_to_dict(self.options) + optimizer_kwargs = { + "options": pso_options_dict, + "static": self.static_topology, + } - velocity_clamp = _build_velocity_clamp( - self.velocity_clamp_min, self.velocity_clamp_max - ) - - bounds = _get_pyswarms_bounds(problem.bounds) - - if self.initial_positions is not None: - init_pos = np.array( - [ - problem.converter.params_to_internal(position) - for position in self.initial_positions - ] - ) - else: - init_pos = _create_initial_positions( - x0=x0, - n_particles=self.n_particles, - bounds=bounds, - center=self.center_init, - ) - - optimizer = ps.single.LocalBestPSO( + res = _pyswarms_internal( + problem=problem, + x0=x0, + optimizer_class=ps.single.LocalBestPSO, + optimizer_kwargs=optimizer_kwargs, n_particles=self.n_particles, - dimensions=len(x0), - options=pso_options_dict, - bounds=bounds, + convergence_ftol_rel=self.convergence_ftol_rel, + convergence_ftol_iter=self.convergence_ftol_iter, + stopping_maxiter=self.stopping_maxiter, + initial_positions=self.initial_positions, oh_strategy=self.oh_strategy, - bh_strategy=self.boundary_strategy, - velocity_clamp=velocity_clamp, - vh_strategy=self.velocity_strategy, - ftol=self.convergence_ftol_rel, - ftol_iter=self.convergence_ftol_iter, - init_pos=init_pos, - static=self.static_topology, - ) - - objective_wrapper = _create_batch_objective(problem, self.n_cores) - - result = optimizer.optimize( - objective_func=objective_wrapper, - iters=self.stopping_maxiter, + boundary_strategy=self.boundary_strategy, + velocity_strategy=self.velocity_strategy, + velocity_clamp_min=self.velocity_clamp_min, + velocity_clamp_max=self.velocity_clamp_max, + n_cores=self.n_cores, + center_init=self.center_init, verbose=self.verbose, + seed=self.seed, ) - res = _process_pyswarms_result(result=result, optimizer=optimizer) - return res @@ -666,63 +616,133 @@ def _solve_internal_problem( if not IS_PYSWARMS_INSTALLED: raise NotInstalledError(PYSWARMS_NOT_INSTALLED_ERROR) - if self.seed is not None: - np.random.seed(self.seed) - import pyswarms as ps - # Resolve topology config to PySwarms topology instance and options pyswarms_topology, topology_options = _resolve_topology_config(self.topology) - base_options = _pso_options_to_dict(self.options) pso_options_dict = {**base_options, **topology_options} - velocity_clamp = _build_velocity_clamp( - self.velocity_clamp_min, self.velocity_clamp_max - ) - bounds = _get_pyswarms_bounds(problem.bounds) - - if self.initial_positions is not None: - init_pos = np.array( - [ - problem.converter.params_to_internal(position) - for position in self.initial_positions - ] - ) - else: - init_pos = _create_initial_positions( - x0=x0, - n_particles=self.n_particles, - bounds=bounds, - center=self.center_init, - ) - - optimizer = ps.single.GeneralOptimizerPSO( + optimizer_kwargs = { + "options": pso_options_dict, + "topology": pyswarms_topology, + } + + res = _pyswarms_internal( + problem=problem, + x0=x0, + optimizer_class=ps.single.GeneralOptimizerPSO, + optimizer_kwargs=optimizer_kwargs, n_particles=self.n_particles, - dimensions=len(x0), - options=pso_options_dict, - topology=pyswarms_topology, - bounds=bounds, + convergence_ftol_rel=self.convergence_ftol_rel, + convergence_ftol_iter=self.convergence_ftol_iter, + stopping_maxiter=self.stopping_maxiter, + initial_positions=self.initial_positions, oh_strategy=self.oh_strategy, - bh_strategy=self.boundary_strategy, - velocity_clamp=velocity_clamp, - vh_strategy=self.velocity_strategy, - ftol=self.convergence_ftol_rel, - ftol_iter=self.convergence_ftol_iter, - init_pos=init_pos, + boundary_strategy=self.boundary_strategy, + velocity_strategy=self.velocity_strategy, + velocity_clamp_min=self.velocity_clamp_min, + velocity_clamp_max=self.velocity_clamp_max, + n_cores=self.n_cores, + center_init=self.center_init, + verbose=self.verbose, + seed=self.seed, ) - objective_wrapper = _create_batch_objective(problem, self.n_cores) + return res - result = optimizer.optimize( - objective_func=objective_wrapper, - iters=self.stopping_maxiter, - verbose=self.verbose, + +def _pyswarms_internal( + problem: InternalOptimizationProblem, + x0: NDArray[np.float64], + optimizer_class: Any, + optimizer_kwargs: dict[str, Any], + n_particles: int, + convergence_ftol_rel: float, + convergence_ftol_iter: int, + stopping_maxiter: int, + initial_positions: list[PyTree] | None, + oh_strategy: dict[str, str] | None, + boundary_strategy: str, + velocity_strategy: str, + velocity_clamp_min: float | None, + velocity_clamp_max: float | None, + n_cores: int, + center_init: float, + verbose: bool, + seed: int | None, +) -> InternalOptimizeResult: + """Internal function for PySwarms optimization. + + Args: + problem: Internal optimization problem + x0: Initial parameter vector + optimizer_class: PySwarms optimizer class to use + optimizer_kwargs: Arguments for optimizer class + n_particles: Number of particles in the swarm + convergence_ftol_rel: Relative tolerance for convergence detection + convergence_ftol_iter: Number of iterations for convergence check + stopping_maxiter: Maximum number of iterations before stopping + initial_positions: User-provided initial positions for particles + oh_strategy: Options handling strategy + boundary_strategy: Strategy for handling boundary constraints + velocity_strategy: Strategy for velocity updates + velocity_clamp_min: Minimum velocity bound + velocity_clamp_max: Maximum velocity bound + n_cores: Number of cores for parallel evaluation + center_init: Scaling factor for initial particle positions + verbose: Enable verbose output during optimization + seed: Random seed for reproducibility + + Returns: + InternalOptimizeResult: Internal optimization result + + """ + if seed is not None: + np.random.seed(seed) + + velocity_clamp = _build_velocity_clamp(velocity_clamp_min, velocity_clamp_max) + bounds = _get_pyswarms_bounds(problem.bounds) + + if initial_positions is not None: + init_pos = np.array( + [ + problem.converter.params_to_internal(position) + for position in initial_positions + ] + ) + else: + init_pos = _create_initial_positions( + x0=x0, + n_particles=n_particles, + bounds=bounds, + center=center_init, ) - res = _process_pyswarms_result(result=result, optimizer=optimizer) + optimizer = optimizer_class( + n_particles=n_particles, + dimensions=len(x0), + bounds=bounds, + velocity_clamp=velocity_clamp, + init_pos=init_pos, + ftol=convergence_ftol_rel, + ftol_iter=convergence_ftol_iter, + bh_strategy=boundary_strategy, + vh_strategy=velocity_strategy, + oh_strategy=oh_strategy, + **optimizer_kwargs, + ) - return res + objective_wrapper = _create_batch_objective(problem, n_cores) + + result = optimizer.optimize( + objective_func=objective_wrapper, + iters=stopping_maxiter, + verbose=verbose, + ) + + res = _process_pyswarms_result(result=result, optimizer=optimizer) + + return res def _resolve_topology_config( From ea54d692b9a73c7e16c18f5c3b0c7377aa92c5af Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 29 Aug 2025 13:02:47 +0000 Subject: [PATCH 31/40] refactor: move ring topology params into RingTopology dataclass --- .../optimizers/pyswarms_optimizers.py | 45 ++++++++----------- 1 file changed, 18 insertions(+), 27 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 03d23020d..99e0d6be6 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -156,17 +156,6 @@ class PSOOptions: """Inertia weight (w) - momentum control.""" -@dataclass(frozen=True) -class LocalBestPSOOptions(PSOOptions): - """Local Best PSO specific parameters.""" - - k_neighbors: PositiveInt = 3 - """Number of neighbors in local neighborhood.""" - - p_norm: Literal[1, 2] = 2 - """Distance metric for neighbor selection (1=Manhattan, 2=Euclidean).""" - - @mark.minimizer( name="pyswarms_global_best", solver_type=AggregationLevel.SCALAR, @@ -376,9 +365,18 @@ class PySwarmsLocalBestPSO(Algorithm): n_particles: PositiveInt = 50 """Number of particles in the swarm.""" - options: LocalBestPSOOptions = LocalBestPSOOptions() + options: PSOOptions = PSOOptions() """"PSO hyperparameters controlling particle behavior.""" + topology: RingTopology = RingTopology() + """Configuration for the Ring topology. + + This algorithm uses a fixed ring topology where particles are connected to their + local neighbors. This parameter allows customization of the number of neighbors, + distance metric, and whether the topology remains static throughout optimization. + + """ + convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL """Stop when relative change in objective function is less than this value.""" @@ -429,14 +427,6 @@ class PySwarmsLocalBestPSO(Algorithm): center_init: PositiveFloat = 1.0 """Scaling factor for initial particle positions.""" - static_topology: bool = False - """Whether to use static or dynamic ring topology. - - When True, the neighborhood structure is fixed throughout optimization. When False, - neighbors are recomputed at each iteration based on current particle positions. - - """ - verbose: bool = False """Enable or disable the logs and progress bar.""" @@ -451,10 +441,16 @@ def _solve_internal_problem( import pyswarms as ps - pso_options_dict = _pso_options_to_dict(self.options) + base_options = _pso_options_to_dict(self.options) + topology_options = { + "k": self.topology.k_neighbors, + "p": self.topology.p_norm, + } + pso_options_dict = {**base_options, **topology_options} + optimizer_kwargs = { "options": pso_options_dict, - "static": self.static_topology, + "static": self.topology.static, } res = _pyswarms_internal( @@ -794,11 +790,6 @@ def _pso_options_to_dict(options: PSOOptions) -> dict[str, float | int]: "w": options.inertia_weight, } - # Add topology-specific options for local-best PSO - if isinstance(options, LocalBestPSOOptions): - pso_options["k"] = options.k_neighbors - pso_options["p"] = options.p_norm - return pso_options From d825e16e2bb65c2ab63620f82a91fd7604bf9067 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 29 Aug 2025 14:15:52 +0000 Subject: [PATCH 32/40] refactor: use local random-state and fix arrangement --- .../optimizers/pyswarms_optimizers.py | 164 +++++++++--------- 1 file changed, 83 insertions(+), 81 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 99e0d6be6..4a2ae9db0 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -217,12 +217,6 @@ class PySwarmsGlobalBestPSO(Algorithm): options: PSOOptions = PSOOptions() """PSO hyperparameters controlling particle behavior.""" - convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL - """Stop when relative change in objective function is less than this value.""" - - convergence_ftol_iter: PositiveInt = 1 - """Number of iterations to check for convergence.""" - stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" @@ -261,6 +255,12 @@ class PySwarmsGlobalBestPSO(Algorithm): velocity_clamp_max: float | None = None """Maximum velocity limit for particles.""" + convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL + """Stop when relative change in objective function is less than this value.""" + + convergence_ftol_iter: PositiveInt = 1 + """Number of iterations to check for convergence.""" + n_cores: PositiveInt = 1 """Number of cores for parallel evaluation.""" @@ -290,8 +290,6 @@ def _solve_internal_problem( optimizer_class=ps.single.GlobalBestPSO, optimizer_kwargs=optimizer_kwargs, n_particles=self.n_particles, - convergence_ftol_rel=self.convergence_ftol_rel, - convergence_ftol_iter=self.convergence_ftol_iter, stopping_maxiter=self.stopping_maxiter, initial_positions=self.initial_positions, oh_strategy=self.oh_strategy, @@ -299,6 +297,8 @@ def _solve_internal_problem( velocity_strategy=self.velocity_strategy, velocity_clamp_min=self.velocity_clamp_min, velocity_clamp_max=self.velocity_clamp_max, + convergence_ftol_rel=self.convergence_ftol_rel, + convergence_ftol_iter=self.convergence_ftol_iter, n_cores=self.n_cores, center_init=self.center_init, verbose=self.verbose, @@ -366,7 +366,7 @@ class PySwarmsLocalBestPSO(Algorithm): """Number of particles in the swarm.""" options: PSOOptions = PSOOptions() - """"PSO hyperparameters controlling particle behavior.""" + """PSO hyperparameters controlling particle behavior.""" topology: RingTopology = RingTopology() """Configuration for the Ring topology. @@ -377,12 +377,6 @@ class PySwarmsLocalBestPSO(Algorithm): """ - convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL - """Stop when relative change in objective function is less than this value.""" - - convergence_ftol_iter: PositiveInt = 1 - """Number of iterations to check for convergence.""" - stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" @@ -421,6 +415,12 @@ class PySwarmsLocalBestPSO(Algorithm): velocity_clamp_max: float | None = None """Maximum velocity limit for particles.""" + convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL + """Stop when relative change in objective function is less than this value.""" + + convergence_ftol_iter: PositiveInt = 1 + """Number of iterations to check for convergence.""" + n_cores: PositiveInt = 1 """Number of cores for parallel evaluation.""" @@ -459,8 +459,6 @@ def _solve_internal_problem( optimizer_class=ps.single.LocalBestPSO, optimizer_kwargs=optimizer_kwargs, n_particles=self.n_particles, - convergence_ftol_rel=self.convergence_ftol_rel, - convergence_ftol_iter=self.convergence_ftol_iter, stopping_maxiter=self.stopping_maxiter, initial_positions=self.initial_positions, oh_strategy=self.oh_strategy, @@ -468,6 +466,8 @@ def _solve_internal_problem( velocity_strategy=self.velocity_strategy, velocity_clamp_min=self.velocity_clamp_min, velocity_clamp_max=self.velocity_clamp_max, + convergence_ftol_rel=self.convergence_ftol_rel, + convergence_ftol_iter=self.convergence_ftol_iter, n_cores=self.n_cores, center_init=self.center_init, verbose=self.verbose, @@ -550,12 +550,6 @@ class PySwarmsGeneralPSO(Algorithm): """ - convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL - """Stop when relative change in objective function is less than this value.""" - - convergence_ftol_iter: PositiveInt = 1 - """Number of iterations to check for convergence.""" - stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" @@ -594,6 +588,12 @@ class PySwarmsGeneralPSO(Algorithm): velocity_clamp_max: float | None = None """Maximum velocity limit for particles.""" + convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL + """Stop when relative change in objective function is less than this value.""" + + convergence_ftol_iter: PositiveInt = 1 + """Number of iterations to check for convergence.""" + n_cores: PositiveInt = 1 """Number of cores for parallel evaluation.""" @@ -629,8 +629,6 @@ def _solve_internal_problem( optimizer_class=ps.single.GeneralOptimizerPSO, optimizer_kwargs=optimizer_kwargs, n_particles=self.n_particles, - convergence_ftol_rel=self.convergence_ftol_rel, - convergence_ftol_iter=self.convergence_ftol_iter, stopping_maxiter=self.stopping_maxiter, initial_positions=self.initial_positions, oh_strategy=self.oh_strategy, @@ -638,6 +636,8 @@ def _solve_internal_problem( velocity_strategy=self.velocity_strategy, velocity_clamp_min=self.velocity_clamp_min, velocity_clamp_max=self.velocity_clamp_max, + convergence_ftol_rel=self.convergence_ftol_rel, + convergence_ftol_iter=self.convergence_ftol_iter, n_cores=self.n_cores, center_init=self.center_init, verbose=self.verbose, @@ -653,8 +653,6 @@ def _pyswarms_internal( optimizer_class: Any, optimizer_kwargs: dict[str, Any], n_particles: int, - convergence_ftol_rel: float, - convergence_ftol_iter: int, stopping_maxiter: int, initial_positions: list[PyTree] | None, oh_strategy: dict[str, str] | None, @@ -662,6 +660,8 @@ def _pyswarms_internal( velocity_strategy: str, velocity_clamp_min: float | None, velocity_clamp_max: float | None, + convergence_ftol_rel: float, + convergence_ftol_iter: int, n_cores: int, center_init: float, verbose: bool, @@ -675,8 +675,6 @@ def _pyswarms_internal( optimizer_class: PySwarms optimizer class to use optimizer_kwargs: Arguments for optimizer class n_particles: Number of particles in the swarm - convergence_ftol_rel: Relative tolerance for convergence detection - convergence_ftol_iter: Number of iterations for convergence check stopping_maxiter: Maximum number of iterations before stopping initial_positions: User-provided initial positions for particles oh_strategy: Options handling strategy @@ -684,6 +682,8 @@ def _pyswarms_internal( velocity_strategy: Strategy for velocity updates velocity_clamp_min: Minimum velocity bound velocity_clamp_max: Maximum velocity bound + convergence_ftol_rel: Relative tolerance for convergence detection + convergence_ftol_iter: Number of iterations for convergence check n_cores: Number of cores for parallel evaluation center_init: Scaling factor for initial particle positions verbose: Enable verbose output during optimization @@ -693,8 +693,7 @@ def _pyswarms_internal( InternalOptimizeResult: Internal optimization result """ - if seed is not None: - np.random.seed(seed) + rng = np.random.default_rng(seed) velocity_clamp = _build_velocity_clamp(velocity_clamp_min, velocity_clamp_max) bounds = _get_pyswarms_bounds(problem.bounds) @@ -712,6 +711,7 @@ def _pyswarms_internal( n_particles=n_particles, bounds=bounds, center=center_init, + rng=rng, ) optimizer = optimizer_class( @@ -803,51 +803,6 @@ def _build_velocity_clamp( return clamp -def _process_pyswarms_result( - result: tuple[float, NDArray[np.float64]], optimizer: Any -) -> InternalOptimizeResult: - """Convert PySwarms result to optimagic format.""" - best_cost, best_position = result - n_iterations = len(optimizer.cost_history) - n_particles = optimizer.n_particles - - return InternalOptimizeResult( - x=best_position, - fun=best_cost, - success=True, - message="PySwarms optimization completed", - n_fun_evals=n_particles * n_iterations, - n_jac_evals=0, - n_hess_evals=0, - n_iterations=n_iterations, - ) - - -def _create_batch_objective( - problem: InternalOptimizationProblem, - n_cores: int, -) -> Callable[[NDArray[np.float64]], NDArray[np.float64]]: - """Return an batch objective function.""" - - def batch_objective(positions: NDArray[np.float64]) -> NDArray[np.float64]: - """Compute objective values for all particles in positions. - - Args: - positions: 2D array of shape (n_particles, n_dimensions) with - particle positions. - - Returns: - 1D array of shape (n_particles,) with objective values. - - """ - arguments = [position for position in positions] - results = problem.batch_fun(arguments, n_cores=n_cores) - - return np.array(results) - - return batch_objective - - def _get_pyswarms_bounds( bounds: InternalBounds, ) -> tuple[NDArray[np.float64], NDArray[np.float64]] | None: @@ -869,15 +824,17 @@ def _create_initial_positions( x0: NDArray[np.float64], n_particles: int, bounds: tuple[NDArray[np.float64], NDArray[np.float64]] | None, - center: float = 1.0, + center: float, + rng: np.random.Generator, ) -> NDArray[np.float64]: """Create an initial swarm positions. Args: - x0: Initial parameter vector - n_particles: Number of particles in the swarm + x0: Initial parameter vector. + n_particles: Number of particles in the swarm. bounds: Tuple of (lower_bounds, upper_bounds) arrays or None. - center: Scaling factor for initial particle positions around bounds + center: Scaling factor for initial particle positions around bounds. + rng: NumPy random number generator instance. Returns: Initial positions array of shape (n_particles, n_dimensions) @@ -892,7 +849,7 @@ def _create_initial_positions( lower_bounds, upper_bounds = bounds # Generate random initial positions within the bounds, scaled by center - init_pos = center * np.random.uniform( + init_pos = center * rng.uniform( low=lower_bounds, high=upper_bounds, size=(n_particles, n_dimensions) ) @@ -900,3 +857,48 @@ def _create_initial_positions( init_pos[0] = np.clip(x0, lower_bounds, upper_bounds) return init_pos + + +def _create_batch_objective( + problem: InternalOptimizationProblem, + n_cores: int, +) -> Callable[[NDArray[np.float64]], NDArray[np.float64]]: + """Return an batch objective function.""" + + def batch_objective(positions: NDArray[np.float64]) -> NDArray[np.float64]: + """Compute objective values for all particles in positions. + + Args: + positions: 2D array of shape (n_particles, n_dimensions) with + particle positions. + + Returns: + 1D array of shape (n_particles,) with objective values. + + """ + arguments = [position for position in positions] + results = problem.batch_fun(arguments, n_cores=n_cores) + + return np.array(results) + + return batch_objective + + +def _process_pyswarms_result( + result: tuple[float, NDArray[np.float64]], optimizer: Any +) -> InternalOptimizeResult: + """Convert PySwarms result to optimagic format.""" + best_cost, best_position = result + n_iterations = len(optimizer.cost_history) + n_particles = optimizer.n_particles + + return InternalOptimizeResult( + x=best_position, + fun=best_cost, + success=True, + message="PySwarms optimization completed", + n_fun_evals=n_particles * n_iterations, + n_jac_evals=0, + n_hess_evals=0, + n_iterations=n_iterations, + ) From f10124096e1b390afd3c19fa7dc2143f8c0410fa Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 29 Aug 2025 16:18:09 +0000 Subject: [PATCH 33/40] fix: rename VonNeumannTopology.range to range_param --- src/optimagic/optimizers/pyswarms_optimizers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 4a2ae9db0..68a93e41d 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -95,7 +95,7 @@ class VonNeumannTopology(Topology): p_norm: Literal[1, 2] = 2 """Distance metric for neighbor selection: 1 (Manhattan), 2 (Euclidean).""" - range: PositiveInt = 1 + range_param: PositiveInt = 1 r"""Range parameter :math:`r` for neighborhood size.""" @@ -770,7 +770,7 @@ def _resolve_topology_config( options = {"k": config.k_neighbors, "p": config.p_norm} elif isinstance(config, VonNeumannTopology): topology_instance = VonNeumann() - options = {"p": config.p_norm, "r": config.range} + options = {"p": config.p_norm, "r": config.range_param} elif isinstance(config, RandomTopology): topology_instance = Random(static=config.static) options = {"k": config.k_neighbors} From ad53c4d435400556d4b7801fe33917dfa0513295 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Fri, 29 Aug 2025 16:25:31 +0000 Subject: [PATCH 34/40] test: add tests for PySwarms helper functions --- .../optimizers/test_pyswarms_optimizers.py | 224 ++++++++++++++++++ 1 file changed, 224 insertions(+) create mode 100644 tests/optimagic/optimizers/test_pyswarms_optimizers.py diff --git a/tests/optimagic/optimizers/test_pyswarms_optimizers.py b/tests/optimagic/optimizers/test_pyswarms_optimizers.py new file mode 100644 index 000000000..7eda24a8b --- /dev/null +++ b/tests/optimagic/optimizers/test_pyswarms_optimizers.py @@ -0,0 +1,224 @@ +"""Test helper functions in PySwarms optimizers.""" + +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from optimagic.config import IS_PYSWARMS_INSTALLED +from optimagic.optimization.internal_optimization_problem import InternalBounds +from optimagic.optimizers.pyswarms_optimizers import ( + PSOOptions, + PyramidTopology, + RandomTopology, + RingTopology, + StarTopology, + VonNeumannTopology, + _build_velocity_clamp, + _create_initial_positions, + _get_pyswarms_bounds, + _pso_options_to_dict, + _resolve_topology_config, +) + +RNG = np.random.default_rng(12345) + + +# Test _pso_options_to_dict +def test_pso_options_to_dict_default(): + """Test PSO options conversion with default values.""" + options = PSOOptions() + result = _pso_options_to_dict(options) + + expected = { + "c1": 0.5, + "c2": 0.3, + "w": 0.9, + } + assert result == expected + + +def test_pso_options_to_dict_custom(): + """Test PSO options conversion with custom values.""" + options = PSOOptions( + cognitive_parameter=1.5, + social_parameter=2.0, + inertia_weight=0.7, + ) + result = _pso_options_to_dict(options) + + expected = { + "c1": 1.5, + "c2": 2.0, + "w": 0.7, + } + assert result == expected + + +# Test _build_velocity_clamp +def test_build_velocity_clamp_both_values(): + """Test velocity clamp with both min and max values.""" + result = _build_velocity_clamp(-1.0, 1.0) + assert result == (-1.0, 1.0) + + +def test_build_velocity_clamp_partial_values(): + """Test velocity clamp with only one value provided.""" + result = _build_velocity_clamp(-1.0, None) + assert result is None + + result = _build_velocity_clamp(None, 1.0) + assert result is None + + +def test_build_velocity_clamp_none_values(): + """Test velocity clamp with None values.""" + result = _build_velocity_clamp(None, None) + assert result is None + + +# Test _get_pyswarms_bounds +def test_get_pyswarms_bounds_with_both(): + """Test bounds conversion when both lower and upper bounds are provided.""" + bounds = InternalBounds(lower=np.array([-2.0, -3.0]), upper=np.array([5.0, 4.0])) + + result = _get_pyswarms_bounds(bounds) + + assert result is not None + lower, upper = result + assert_array_equal(lower, np.array([-2.0, -3.0])) + assert_array_equal(upper, np.array([5.0, 4.0])) + + +def test_get_pyswarms_bounds_with_none(): + """Test bounds conversion when no bounds are provided.""" + bounds = InternalBounds(lower=None, upper=None) + + result = _get_pyswarms_bounds(bounds) + assert result is None + + +def test_get_pyswarms_bounds_partial_bounds(): + """Test bounds conversion with only one bound provided.""" + # Only lower bounds + bounds = InternalBounds(lower=np.array([1.0, 2.0]), upper=None) + result = _get_pyswarms_bounds(bounds) + assert result is None + + # Only upper bounds + bounds = InternalBounds(lower=None, upper=np.array([3.0, 4.0])) + result = _get_pyswarms_bounds(bounds) + assert result is None + + +def test_get_pyswarms_bounds_with_infinite(): + """Test that infinite bounds raise ValueError.""" + bounds = InternalBounds( + lower=np.array([-np.inf, -1.0]), upper=np.array([1.0, np.inf]) + ) + + with pytest.raises(ValueError, match="PySwarms does not support infinite bounds"): + _get_pyswarms_bounds(bounds) + + +# Test _create_initial_positions +@pytest.mark.parametrize("center", [0.5, 1.0, 2.0]) +def test_create_initial_positions_basic(center): + """Test basic initial positions creation.""" + x0 = np.array([1.0, 2.0]) + n_particles = 5 + bounds = (np.array([-5.0, -5.0]), np.array([5.0, 5.0])) + + init_pos = _create_initial_positions( + x0=x0, n_particles=n_particles, bounds=bounds, center=center, rng=RNG + ) + + assert init_pos.shape == (5, 2) + + assert_array_equal(init_pos[0], x0) + + # Check all particles are within bounds + assert np.all(init_pos >= bounds[0]) + assert np.all(init_pos <= bounds[1]) + + +def test_create_initial_positions_no_bounds(): + """Test initial positions creation with no bounds.""" + x0 = np.array([0.5, 1.5]) + n_particles = 3 + bounds = None + + init_pos = _create_initial_positions( + x0=x0, n_particles=n_particles, bounds=bounds, center=1.0, rng=RNG + ) + + assert init_pos.shape == (3, 2) + + expected_x0 = np.array([0.5, 1.0]) + assert_array_equal(init_pos[0], expected_x0) + + assert np.all(init_pos >= 0.0) + assert np.all(init_pos <= 1.0) + + +@pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason="PySwarms not installed") +@pytest.mark.parametrize( + ("topology_string", "expected_class_name", "expected_options"), + [ + ("star", "Star", {}), + ("ring", "Ring", {"k": 3, "p": 2}), + ("vonneumann", "VonNeumann", {"p": 2, "r": 1}), + ("random", "Random", {"k": 3}), + ("pyramid", "Pyramid", {}), + ], +) +def test_resolve_topology_config_by_string( + topology_string, expected_class_name, expected_options +): + """Test topology resolution with string names.""" + topology, options = _resolve_topology_config(topology_string) + + assert topology.__class__.__name__ == expected_class_name + assert options == expected_options + + +@pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason="PySwarms not installed") +@pytest.mark.parametrize( + ("config_instance", "expected_class_name", "expected_options"), + [ + (StarTopology(), "Star", {}), + (RingTopology(k_neighbors=5, p_norm=1, static=True), "Ring", {"k": 5, "p": 1}), + ( + VonNeumannTopology(p_norm=1, range_param=2), + "VonNeumann", + {"p": 1, "r": 2}, + ), + (RandomTopology(k_neighbors=4, static=False), "Random", {"k": 4}), + (PyramidTopology(static=True), "Pyramid", {}), + ], +) +def test_resolve_topology_config_by_instance( + config_instance, expected_class_name, expected_options +): + """Test topology resolution with instances.""" + topology, options = _resolve_topology_config(config_instance) + + # Check the class name and options + assert topology.__class__.__name__ == expected_class_name + assert options == expected_options + + if hasattr(config_instance, "static"): + assert topology.static == config_instance.static + + +@pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason="PySwarms not installed") +def test_resolve_topology_config_invalid_string(): + """Test topology resolution with invalid string.""" + with pytest.raises(ValueError, match="Unknown topology string: 'invalid'"): + _resolve_topology_config("invalid") + + +@pytest.mark.skipif(not IS_PYSWARMS_INSTALLED, reason="PySwarms not installed") +def test_resolve_topology_config_invalid_type(): + """Test topology resolution with invalid type.""" + with pytest.raises(TypeError, match="Unsupported topology configuration type"): + _resolve_topology_config(123) From d472b2b07c4745b874b2ddab08f51b94daa5fc38 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 3 Sep 2025 15:44:42 +0000 Subject: [PATCH 35/40] refactor: use common interface --- .../optimizers/pyswarms_optimizers.py | 424 +++++------------- 1 file changed, 117 insertions(+), 307 deletions(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 68a93e41d..0e998d961 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -9,7 +9,7 @@ from __future__ import annotations from dataclasses import dataclass -from typing import Any, Callable, Literal, Union +from typing import Any, Callable, Literal import numpy as np from numpy.typing import NDArray @@ -132,19 +132,17 @@ class RandomTopology(Topology): """ -TopologyConfig = Union[ - Literal["star", "ring", "vonneumann", "random", "pyramid"], - Topology, -] - # ====================================================================================== -# 2. PSO Options Classes +# Common PSO Options # ====================================================================================== @dataclass(frozen=True) -class PSOOptions: - """Common PSO parameters used by all PSO variants.""" +class PSOCommonOptions: + """Common options for PySwarms optimizers.""" + + n_particles: PositiveInt = 50 + """Number of particles in the swarm.""" cognitive_parameter: PositiveFloat = 0.5 """Cognitive parameter (c1) - attraction to personal best.""" @@ -155,68 +153,6 @@ class PSOOptions: inertia_weight: PositiveFloat = 0.9 """Inertia weight (w) - momentum control.""" - -@mark.minimizer( - name="pyswarms_global_best", - solver_type=AggregationLevel.SCALAR, - is_available=IS_PYSWARMS_INSTALLED, - is_global=True, - needs_jac=False, - needs_hess=False, - needs_bounds=False, - supports_parallelism=True, - supports_bounds=True, - supports_infinite_bounds=False, - supports_linear_constraints=False, - supports_nonlinear_constraints=False, - disable_history=False, -) -@dataclass(frozen=True) -class PySwarmsGlobalBestPSO(Algorithm): - r"""Minimize a scalar function using Global Best Particle Swarm Optimization. - - A population-based stochastic, global optimization optimization algorithm that - simulates the social behavior of bird flocking or fish schooling. Particles - (candidate solutions) move through the search space, adjusting their positions - based on their own experience (cognitive component) and the experience of their - neighbors or the entire swarm (social component). - - This implementation uses a star topology where all particles are connected to - each other, making each particle aware of the global best solution found by the - entire swarm. - - The position update follows: - - .. math:: - - x_{i}(t+1) = x_{i}(t) + v_{i}(t+1) - - The velocity update follows: - - .. math:: - - v_{ij}(t+1) = w \cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)] - + c_2 r_{2j}(t)[\hat{y}_j(t) - x_{ij}(t)] - - Where: - - :math:`w`: inertia weight controlling momentum - - :math:`c_1`: cognitive parameter for attraction to personal best - - :math:`c_2`: social parameter for attraction to global best - - :math:`r_{1j}, r_{2j}`: random numbers in [0,1] - - :math:`y_{ij}(t)`: personal best position of particle i - - :math:`\hat{y}_j(t)`: global best position - - This algorithm is an adaptation of the original Particle Swarm Optimization method - by :cite:`Kennedy1995` - - """ - - n_particles: PositiveInt = 50 - """Number of particles in the swarm.""" - - options: PSOOptions = PSOOptions() - """PSO hyperparameters controlling particle behavior.""" - stopping_maxiter: PositiveInt = STOPPING_MAXITER """Maximum number of iterations.""" @@ -273,6 +209,67 @@ class PySwarmsGlobalBestPSO(Algorithm): seed: int | None = None """Random seed for reproducibility.""" + +# ====================================================================================== +# Algorithm Classes +# ====================================================================================== + + +@mark.minimizer( + name="pyswarms_global_best", + solver_type=AggregationLevel.SCALAR, + is_available=IS_PYSWARMS_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + needs_bounds=False, + supports_parallelism=True, + supports_bounds=True, + supports_infinite_bounds=False, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class PySwarmsGlobalBestPSO(Algorithm, PSOCommonOptions): + r"""Minimize a scalar function using Global Best Particle Swarm Optimization. + + A population-based stochastic, global optimization optimization algorithm that + simulates the social behavior of bird flocking or fish schooling. Particles + (candidate solutions) move through the search space, adjusting their positions + based on their own experience (cognitive component) and the experience of their + neighbors or the entire swarm (social component). + + This implementation uses a star topology where all particles are connected to + each other, making each particle aware of the global best solution found by the + entire swarm. + + The position update follows: + + .. math:: + + x_{i}(t+1) = x_{i}(t) + v_{i}(t+1) + + The velocity update follows: + + .. math:: + + v_{ij}(t+1) = w \cdot v_{ij}(t) + c_1 r_{1j}(t)[y_{ij}(t) - x_{ij}(t)] + + c_2 r_{2j}(t)[\hat{y}_j(t) - x_{ij}(t)] + + Where: + - :math:`w`: inertia weight controlling momentum + - :math:`c_1`: cognitive parameter for attraction to personal best + - :math:`c_2`: social parameter for attraction to global best + - :math:`r_{1j}, r_{2j}`: random numbers in [0,1] + - :math:`y_{ij}(t)`: personal best position of particle i + - :math:`\hat{y}_j(t)`: global best position + + This algorithm is an adaptation of the original Particle Swarm Optimization method + by :cite:`Kennedy1995` + + """ + def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: @@ -281,7 +278,11 @@ def _solve_internal_problem( import pyswarms as ps - pso_options_dict = _pso_options_to_dict(self.options) + pso_options_dict = { + "c1": self.cognitive_parameter, + "c2": self.social_parameter, + "w": self.inertia_weight, + } optimizer_kwargs = {"options": pso_options_dict} res = _pyswarms_internal( @@ -289,20 +290,7 @@ def _solve_internal_problem( x0=x0, optimizer_class=ps.single.GlobalBestPSO, optimizer_kwargs=optimizer_kwargs, - n_particles=self.n_particles, - stopping_maxiter=self.stopping_maxiter, - initial_positions=self.initial_positions, - oh_strategy=self.oh_strategy, - boundary_strategy=self.boundary_strategy, - velocity_strategy=self.velocity_strategy, - velocity_clamp_min=self.velocity_clamp_min, - velocity_clamp_max=self.velocity_clamp_max, - convergence_ftol_rel=self.convergence_ftol_rel, - convergence_ftol_iter=self.convergence_ftol_iter, - n_cores=self.n_cores, - center_init=self.center_init, - verbose=self.verbose, - seed=self.seed, + algo_options=self, ) return res @@ -324,7 +312,7 @@ def _solve_internal_problem( disable_history=False, ) @dataclass(frozen=True) -class PySwarmsLocalBestPSO(Algorithm): +class PySwarmsLocalBestPSO(Algorithm, PSOCommonOptions): r"""Minimize a scalar function using Local Best Particle Swarm Optimization. A variant of PSO that uses local neighborhoods instead of a single global best. @@ -362,12 +350,6 @@ class PySwarmsLocalBestPSO(Algorithm): """ - n_particles: PositiveInt = 50 - """Number of particles in the swarm.""" - - options: PSOOptions = PSOOptions() - """PSO hyperparameters controlling particle behavior.""" - topology: RingTopology = RingTopology() """Configuration for the Ring topology. @@ -377,62 +359,6 @@ class PySwarmsLocalBestPSO(Algorithm): """ - stopping_maxiter: PositiveInt = STOPPING_MAXITER - """Maximum number of iterations.""" - - initial_positions: list[PyTree] | None = None - """Option to set the initial particle positions. - - If None, positions are generated randomly within the given bounds, or within [0, 1] - if bounds are not specified. - - """ - - oh_strategy: dict[str, str] | None = None - """Dictionary of strategies for time-varying options.""" - - boundary_strategy: Literal[ - "periodic", "reflective", "shrink", "random", "intermediate" - ] = "periodic" - """Strategy for handling out-of-bounds particles. - - Available options: periodic (default), - reflective, shrink, random, intermediate. - - """ - - velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" - """Strategy for handling out-of-bounds velocities. - - Available options: unmodified (default), - adjust, invert, zero. - - """ - - velocity_clamp_min: float | None = None - """Minimum velocity limit for particles.""" - - velocity_clamp_max: float | None = None - """Maximum velocity limit for particles.""" - - convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL - """Stop when relative change in objective function is less than this value.""" - - convergence_ftol_iter: PositiveInt = 1 - """Number of iterations to check for convergence.""" - - n_cores: PositiveInt = 1 - """Number of cores for parallel evaluation.""" - - center_init: PositiveFloat = 1.0 - """Scaling factor for initial particle positions.""" - - verbose: bool = False - """Enable or disable the logs and progress bar.""" - - seed: int | None = None - """Random seed for reproducibility.""" - def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: @@ -441,12 +367,13 @@ def _solve_internal_problem( import pyswarms as ps - base_options = _pso_options_to_dict(self.options) - topology_options = { + pso_options_dict = { + "c1": self.cognitive_parameter, + "c2": self.social_parameter, + "w": self.inertia_weight, "k": self.topology.k_neighbors, "p": self.topology.p_norm, } - pso_options_dict = {**base_options, **topology_options} optimizer_kwargs = { "options": pso_options_dict, @@ -458,20 +385,7 @@ def _solve_internal_problem( x0=x0, optimizer_class=ps.single.LocalBestPSO, optimizer_kwargs=optimizer_kwargs, - n_particles=self.n_particles, - stopping_maxiter=self.stopping_maxiter, - initial_positions=self.initial_positions, - oh_strategy=self.oh_strategy, - boundary_strategy=self.boundary_strategy, - velocity_strategy=self.velocity_strategy, - velocity_clamp_min=self.velocity_clamp_min, - velocity_clamp_max=self.velocity_clamp_max, - convergence_ftol_rel=self.convergence_ftol_rel, - convergence_ftol_iter=self.convergence_ftol_iter, - n_cores=self.n_cores, - center_init=self.center_init, - verbose=self.verbose, - seed=self.seed, + algo_options=self, ) return res @@ -493,7 +407,7 @@ def _solve_internal_problem( disable_history=False, ) @dataclass(frozen=True) -class PySwarmsGeneralPSO(Algorithm): +class PySwarmsGeneralPSO(Algorithm, PSOCommonOptions): r"""Minimize a scalar function using General Particle Swarm Optimization with custom topologies. @@ -529,19 +443,15 @@ class PySwarmsGeneralPSO(Algorithm): """ - n_particles: PositiveInt = 50 - """Number of particles in the swarm.""" - - options: PSOOptions = PSOOptions() - """PSO hyperparameters controlling particle behavior.""" - - topology: TopologyConfig = "star" + topology: Literal["star", "ring", "vonneumann", "random", "pyramid"] | Topology = ( + "star" + ) """Topology structure for particle communication. The `topology` can be specified in two ways: 1. **By name (string):** e.g., ``"star"``, ``"ring"``. This uses the default - parameters for that topology. + parameter values for that topology. 2. **By dataclass instance:** e.g., ``RingTopology(k_neighbors=5, p_norm=1)``. This allows for detailed configuration of topology-specific parameters. @@ -550,62 +460,6 @@ class PySwarmsGeneralPSO(Algorithm): """ - stopping_maxiter: PositiveInt = STOPPING_MAXITER - """Maximum number of iterations.""" - - initial_positions: list[PyTree] | None = None - """Option to set the initial particle positions. - - If None, positions are generated randomly within the given bounds, or within [0, 1] - if bounds are not specified. - - """ - - oh_strategy: dict[str, str] | None = None - """Dictionary of strategies for time-varying options.""" - - boundary_strategy: Literal[ - "periodic", "reflective", "shrink", "random", "intermediate" - ] = "periodic" - """Strategy for handling out-of-bounds particles. - - Available options: periodic (default), - reflective, shrink, random, intermediate. - - """ - - velocity_strategy: Literal["unmodified", "adjust", "invert", "zero"] = "unmodified" - """Strategy for handling out-of-bounds velocities. - - Available options: unmodified (default), - adjust, invert, zero. - - """ - - velocity_clamp_min: float | None = None - """Minimum velocity limit for particles.""" - - velocity_clamp_max: float | None = None - """Maximum velocity limit for particles.""" - - convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL - """Stop when relative change in objective function is less than this value.""" - - convergence_ftol_iter: PositiveInt = 1 - """Number of iterations to check for convergence.""" - - n_cores: PositiveInt = 1 - """Number of cores for parallel evaluation.""" - - center_init: PositiveFloat = 1.0 - """Scaling factor for initial particle positions.""" - - verbose: bool = False - """Enable or disable the logs and progress bar.""" - - seed: int | None = None - """Random seed for reproducibility.""" - def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: @@ -615,7 +469,11 @@ def _solve_internal_problem( import pyswarms as ps pyswarms_topology, topology_options = _resolve_topology_config(self.topology) - base_options = _pso_options_to_dict(self.options) + base_options = { + "c1": self.cognitive_parameter, + "c2": self.social_parameter, + "w": self.inertia_weight, + } pso_options_dict = {**base_options, **topology_options} optimizer_kwargs = { @@ -628,20 +486,7 @@ def _solve_internal_problem( x0=x0, optimizer_class=ps.single.GeneralOptimizerPSO, optimizer_kwargs=optimizer_kwargs, - n_particles=self.n_particles, - stopping_maxiter=self.stopping_maxiter, - initial_positions=self.initial_positions, - oh_strategy=self.oh_strategy, - boundary_strategy=self.boundary_strategy, - velocity_strategy=self.velocity_strategy, - velocity_clamp_min=self.velocity_clamp_min, - velocity_clamp_max=self.velocity_clamp_max, - convergence_ftol_rel=self.convergence_ftol_rel, - convergence_ftol_iter=self.convergence_ftol_iter, - n_cores=self.n_cores, - center_init=self.center_init, - verbose=self.verbose, - seed=self.seed, + algo_options=self, ) return res @@ -652,88 +497,64 @@ def _pyswarms_internal( x0: NDArray[np.float64], optimizer_class: Any, optimizer_kwargs: dict[str, Any], - n_particles: int, - stopping_maxiter: int, - initial_positions: list[PyTree] | None, - oh_strategy: dict[str, str] | None, - boundary_strategy: str, - velocity_strategy: str, - velocity_clamp_min: float | None, - velocity_clamp_max: float | None, - convergence_ftol_rel: float, - convergence_ftol_iter: int, - n_cores: int, - center_init: float, - verbose: bool, - seed: int | None, + algo_options: PSOCommonOptions, ) -> InternalOptimizeResult: """Internal function for PySwarms optimization. Args: - problem: Internal optimization problem - x0: Initial parameter vector - optimizer_class: PySwarms optimizer class to use - optimizer_kwargs: Arguments for optimizer class - n_particles: Number of particles in the swarm - stopping_maxiter: Maximum number of iterations before stopping - initial_positions: User-provided initial positions for particles - oh_strategy: Options handling strategy - boundary_strategy: Strategy for handling boundary constraints - velocity_strategy: Strategy for velocity updates - velocity_clamp_min: Minimum velocity bound - velocity_clamp_max: Maximum velocity bound - convergence_ftol_rel: Relative tolerance for convergence detection - convergence_ftol_iter: Number of iterations for convergence check - n_cores: Number of cores for parallel evaluation - center_init: Scaling factor for initial particle positions - verbose: Enable verbose output during optimization - seed: Random seed for reproducibility + problem: Internal optimization problem. + x0: Initial parameter vector. + optimizer_class: PySwarms optimizer class to use. + optimizer_kwargs: Arguments for optimizer class. + algo_options: The PySwarms common options. Returns: - InternalOptimizeResult: Internal optimization result + InternalOptimizeResult: Internal optimization result. """ - rng = np.random.default_rng(seed) + rng = np.random.default_rng(algo_options.seed) - velocity_clamp = _build_velocity_clamp(velocity_clamp_min, velocity_clamp_max) + velocity_clamp = _build_velocity_clamp( + algo_options.velocity_clamp_min, algo_options.velocity_clamp_max + ) bounds = _get_pyswarms_bounds(problem.bounds) - if initial_positions is not None: + if algo_options.initial_positions is not None: init_pos = np.array( [ problem.converter.params_to_internal(position) - for position in initial_positions + for position in algo_options.initial_positions ] ) else: init_pos = _create_initial_positions( x0=x0, - n_particles=n_particles, + n_particles=algo_options.n_particles, bounds=bounds, - center=center_init, + center=algo_options.center_init, rng=rng, ) optimizer = optimizer_class( - n_particles=n_particles, + n_particles=algo_options.n_particles, dimensions=len(x0), bounds=bounds, - velocity_clamp=velocity_clamp, init_pos=init_pos, - ftol=convergence_ftol_rel, - ftol_iter=convergence_ftol_iter, - bh_strategy=boundary_strategy, - vh_strategy=velocity_strategy, - oh_strategy=oh_strategy, + velocity_clamp=velocity_clamp, + oh_strategy=algo_options.oh_strategy, + bh_strategy=algo_options.boundary_strategy, + vh_strategy=algo_options.velocity_strategy, + ftol=algo_options.convergence_ftol_rel, + ftol_iter=algo_options.convergence_ftol_iter, **optimizer_kwargs, ) - objective_wrapper = _create_batch_objective(problem, n_cores) + objective_wrapper = _create_batch_objective(problem, algo_options.n_cores) result = optimizer.optimize( objective_func=objective_wrapper, - iters=stopping_maxiter, - verbose=verbose, + iters=algo_options.stopping_maxiter, + verbose=algo_options.verbose, ) res = _process_pyswarms_result(result=result, optimizer=optimizer) @@ -742,7 +563,7 @@ def _pyswarms_internal( def _resolve_topology_config( - config: TopologyConfig, + config: Literal["star", "ring", "vonneumann", "random", "pyramid"] | Topology, ) -> tuple[Any, dict[str, float | int]]: """Resolves the topology config into a pyswarms topology instance and options dict.""" @@ -782,17 +603,6 @@ def _resolve_topology_config( return topology_instance, options -def _pso_options_to_dict(options: PSOOptions) -> dict[str, float | int]: - """Convert option parameters to PySwarms format.""" - pso_options = { - "c1": options.cognitive_parameter, - "c2": options.social_parameter, - "w": options.inertia_weight, - } - - return pso_options - - def _build_velocity_clamp( velocity_clamp_min: float | None, velocity_clamp_max: float | None ) -> tuple[float, float] | None: From 76095060be800730dd47d8f678b5552fda898eec Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 3 Sep 2025 15:59:46 +0000 Subject: [PATCH 36/40] docs: display inherited params in pyswarms optimizers --- docs/source/algorithms.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index 1d2d4108e..1a3409f8f 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -4919,6 +4919,8 @@ LocalBestPSO, GeneralOptimizerPSO). To use these optimizers, you need to have **Description and available options:** .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsGlobalBestPSO + :members: + :inherited-members: Algorithm, object ``` @@ -4948,6 +4950,8 @@ LocalBestPSO, GeneralOptimizerPSO). To use these optimizers, you need to have **Description and available options:** .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsLocalBestPSO + :members: + :inherited-members: Algorithm, object ``` @@ -4977,6 +4981,8 @@ LocalBestPSO, GeneralOptimizerPSO). To use these optimizers, you need to have **Description and available options:** .. autoclass:: optimagic.optimizers.pyswarms_optimizers.PySwarmsGeneralPSO + :members: + :inherited-members: Algorithm, object ``` From 3c963005c79ef032c0b4088a3a4faf5e983fc750 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 3 Sep 2025 16:14:53 +0000 Subject: [PATCH 37/40] fix: update tests --- .../optimizers/test_pyswarms_optimizers.py | 33 ------------------- 1 file changed, 33 deletions(-) diff --git a/tests/optimagic/optimizers/test_pyswarms_optimizers.py b/tests/optimagic/optimizers/test_pyswarms_optimizers.py index 7eda24a8b..02ce85a8c 100644 --- a/tests/optimagic/optimizers/test_pyswarms_optimizers.py +++ b/tests/optimagic/optimizers/test_pyswarms_optimizers.py @@ -7,7 +7,6 @@ from optimagic.config import IS_PYSWARMS_INSTALLED from optimagic.optimization.internal_optimization_problem import InternalBounds from optimagic.optimizers.pyswarms_optimizers import ( - PSOOptions, PyramidTopology, RandomTopology, RingTopology, @@ -16,44 +15,12 @@ _build_velocity_clamp, _create_initial_positions, _get_pyswarms_bounds, - _pso_options_to_dict, _resolve_topology_config, ) RNG = np.random.default_rng(12345) -# Test _pso_options_to_dict -def test_pso_options_to_dict_default(): - """Test PSO options conversion with default values.""" - options = PSOOptions() - result = _pso_options_to_dict(options) - - expected = { - "c1": 0.5, - "c2": 0.3, - "w": 0.9, - } - assert result == expected - - -def test_pso_options_to_dict_custom(): - """Test PSO options conversion with custom values.""" - options = PSOOptions( - cognitive_parameter=1.5, - social_parameter=2.0, - inertia_weight=0.7, - ) - result = _pso_options_to_dict(options) - - expected = { - "c1": 1.5, - "c2": 2.0, - "w": 0.7, - } - assert result == expected - - # Test _build_velocity_clamp def test_build_velocity_clamp_both_values(): """Test velocity clamp with both min and max values.""" From bfe026865c9fbf0f4d8bdd8eedeb76dedb7b58fe Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 3 Sep 2025 17:05:58 +0000 Subject: [PATCH 38/40] update optimizer param default values --- src/optimagic/optimization/algo_options.py | 10 ++++++++++ src/optimagic/optimizers/pyswarms_optimizers.py | 11 ++++------- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/optimagic/optimization/algo_options.py b/src/optimagic/optimization/algo_options.py index 41d4e46b0..ccf0389c1 100644 --- a/src/optimagic/optimization/algo_options.py +++ b/src/optimagic/optimization/algo_options.py @@ -95,6 +95,14 @@ """ +STOPPING_MAXITER_GLOBAL = 1000 +"""int: + The maximum number of iterations for global optimizers. This is set to a lower + default for global optimizers than for local ones, as they typically run + until this limit is reached. + +""" + CONVERGENCE_SECOND_BEST_FTOL_ABS = 1e-08 """float: absolute criterion tolerance optimagic requires if no other stopping criterion apart from max iterations etc. is available @@ -117,6 +125,7 @@ Used in population-based algorithms like genetic algorithms. To disable, set to None. + """ CONVERGENCE_GENERATIONS_NOIMPROVE = None @@ -125,6 +134,7 @@ Used in population-based algorithms like genetic algorithms. To disable, set to None. + """ diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 0e998d961..60bf833c2 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -17,10 +17,7 @@ from optimagic import mark from optimagic.config import IS_PYSWARMS_INSTALLED from optimagic.exceptions import NotInstalledError -from optimagic.optimization.algo_options import ( - CONVERGENCE_FTOL_REL, - STOPPING_MAXITER, -) +from optimagic.optimization.algo_options import STOPPING_MAXITER_GLOBAL from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalBounds, @@ -141,7 +138,7 @@ class RandomTopology(Topology): class PSOCommonOptions: """Common options for PySwarms optimizers.""" - n_particles: PositiveInt = 50 + n_particles: PositiveInt = 10 """Number of particles in the swarm.""" cognitive_parameter: PositiveFloat = 0.5 @@ -153,7 +150,7 @@ class PSOCommonOptions: inertia_weight: PositiveFloat = 0.9 """Inertia weight (w) - momentum control.""" - stopping_maxiter: PositiveInt = STOPPING_MAXITER + stopping_maxiter: PositiveInt = STOPPING_MAXITER_GLOBAL """Maximum number of iterations.""" initial_positions: list[PyTree] | None = None @@ -191,7 +188,7 @@ class PSOCommonOptions: velocity_clamp_max: float | None = None """Maximum velocity limit for particles.""" - convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL + convergence_ftol_rel: NonNegativeFloat = 0 """Stop when relative change in objective function is less than this value.""" convergence_ftol_iter: PositiveInt = 1 From 9fb982a28da5f5f68a185603d3b80586bf432473 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Wed, 3 Sep 2025 21:23:52 +0000 Subject: [PATCH 39/40] fix: remove STOPPING_MAXITER_GLOBAL, set maxiter to 1000 --- src/optimagic/optimization/algo_options.py | 7 ------- src/optimagic/optimizers/pyswarms_optimizers.py | 3 +-- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/src/optimagic/optimization/algo_options.py b/src/optimagic/optimization/algo_options.py index ccf0389c1..1d225aa26 100644 --- a/src/optimagic/optimization/algo_options.py +++ b/src/optimagic/optimization/algo_options.py @@ -95,13 +95,6 @@ """ -STOPPING_MAXITER_GLOBAL = 1000 -"""int: - The maximum number of iterations for global optimizers. This is set to a lower - default for global optimizers than for local ones, as they typically run - until this limit is reached. - -""" CONVERGENCE_SECOND_BEST_FTOL_ABS = 1e-08 """float: absolute criterion tolerance optimagic requires if no other stopping diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 60bf833c2..0f923afc8 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -17,7 +17,6 @@ from optimagic import mark from optimagic.config import IS_PYSWARMS_INSTALLED from optimagic.exceptions import NotInstalledError -from optimagic.optimization.algo_options import STOPPING_MAXITER_GLOBAL from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult from optimagic.optimization.internal_optimization_problem import ( InternalBounds, @@ -150,7 +149,7 @@ class PSOCommonOptions: inertia_weight: PositiveFloat = 0.9 """Inertia weight (w) - momentum control.""" - stopping_maxiter: PositiveInt = STOPPING_MAXITER_GLOBAL + stopping_maxiter: PositiveInt = 1000 """Maximum number of iterations.""" initial_positions: list[PyTree] | None = None From 669cd02af9d512be23b47ec51be0fe87ffcf3d52 Mon Sep 17 00:00:00 2001 From: spline2hg <181270613+spline2hg@users.noreply.github.com> Date: Sun, 7 Sep 2025 13:05:30 +0000 Subject: [PATCH 40/40] warn: add warning for seed --- src/optimagic/optimizers/pyswarms_optimizers.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/optimagic/optimizers/pyswarms_optimizers.py b/src/optimagic/optimizers/pyswarms_optimizers.py index 0f923afc8..38fdd49f9 100644 --- a/src/optimagic/optimizers/pyswarms_optimizers.py +++ b/src/optimagic/optimizers/pyswarms_optimizers.py @@ -8,6 +8,7 @@ from __future__ import annotations +import warnings from dataclasses import dataclass from typing import Any, Callable, Literal @@ -203,7 +204,11 @@ class PSOCommonOptions: """Enable or disable the logs and progress bar.""" seed: int | None = None - """Random seed for reproducibility.""" + """Random seed for initial positions. + + For full reproducibility, set a global seed with `np.random.seed()`. + + """ # ====================================================================================== @@ -508,6 +513,15 @@ def _pyswarms_internal( InternalOptimizeResult: Internal optimization result. """ + if algo_options.seed is not None: + warnings.warn( + "The 'seed' parameter only makes initial particle positions reproducible. " + "For fully deterministic results, set a global seed with " + "'np.random.seed()' before running the optimizer, as other stochastic " + "parts of PySwarms rely on the global numpy random state.", + UserWarning, + ) + rng = np.random.default_rng(algo_options.seed) velocity_clamp = _build_velocity_clamp(