diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index bdc9c27e5..5f222c87a 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -4652,6 +4652,227 @@ Optimizers from the package are available in optimagic. To use it, you need to have [gradient_free_optimizers](https://pypi.org/project/gradient_free_optimizers) installed. +```{eval-rst} +.. dropdown:: gfo_hillclimbing + + **How to use this algorithm.** + + .. code-block:: python + + import optimagic as om + import numpy as np + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm=om.algos.gfo_hillclimbing(stopping_maxiter=1_000, ...), + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + or using the string interface: + + .. code-block:: python + + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm="gfo_hillclimbing", + algo_options={"stopping_maxiter": 1_000, ...}, + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + **Description and available options:** + + .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOHillClimbing + :members: + :inherited-members: Algorithm, object + +``` + +```{eval-rst} +.. dropdown:: gfo_stochastichillclimbing + + **How to use this algorithm.** + + .. code-block:: python + + import optimagic as om + import numpy as np + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm=om.algos.gfo_stochastichillclimbing(stopping_maxiter=1_000, ...), + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + or using the string interface: + + .. code-block:: python + + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm="gfo_stochastichillclimbing", + algo_options={"stopping_maxiter": 1_000, ...}, + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + **Description and available options:** + + .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOStochasticHillClimbing + :members: + :inherited-members: Algorithm, object + :member-order: bysource + +``` + +```{eval-rst} +.. dropdown:: gfo_repulsinghillclimbing + + **How to use this algorithm.** + + .. code-block:: python + + import optimagic as om + import numpy as np + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm=om.algos.gfo_repulsinghillclimbing(stopping_maxiter=1_000, ...), + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + or using the string interface: + + .. code-block:: python + + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm="gfo_repulsinghillclimbing", + algo_options={"stopping_maxiter": 1_000, ...}, + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + **Description and available options:** + + .. autoclass:: optimagic.optimizers.gfo_optimizers.GFORepulsingHillClimbing + :members: + :inherited-members: Algorithm, object + :member-order: bysource + +``` + +```{eval-rst} +.. dropdown:: gfo_simulatedannealing + + **How to use this algorithm.** + + .. code-block:: python + + import optimagic as om + import numpy as np + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm=om.algos.gfo_simulatedannealing(stopping_maxiter=1_000, ...), + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + or using the string interface: + + .. code-block:: python + + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm="gfo_simulatedannealing", + algo_options={"stopping_maxiter": 1_000, ...}, + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + **Description and available options:** + + .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOSimulatedAnnealing + :members: + :inherited-members: Algorithm, object + :member-order: bysource + +``` + +```{eval-rst} +.. dropdown:: gfo_downhillsimplex + + **How to use this algorithm.** + + .. code-block:: python + + import optimagic as om + import numpy as np + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm=om.algos.gfo_downhillsimplex(stopping_maxiter=1_000, ...), + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + or using the string interface: + + .. code-block:: python + + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm="gfo_downhillsimplex", + algo_options={"stopping_maxiter": 1_000, ...}, + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + **Description and available options:** + + .. autoclass:: optimagic.optimizers.gfo_optimizers.GFODownhillSimplex + :members: + :inherited-members: Algorithm, object + :member-order: bysource + +``` + +```{eval-rst} +.. dropdown:: gfo_powells_method + + **How to use this algorithm.** + + .. code-block:: python + + import optimagic as om + import numpy as np + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm=om.algos.gfo_powells_method(stopping_maxiter=1_000, ...), + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + or using the string interface: + + .. code-block:: python + + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm="gfo_powells_method", + algo_options={"stopping_maxiter": 1_000, ...}, + bounds = om.Bounds(lower = np.array([1,1,1]), upper=np.array([5,5,5])) + ) + + **Description and available options:** + + .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOPowellsMethod + :members: + :inherited-members: Algorithm, object + :member-order: bysource + +``` + ```{eval-rst} .. dropdown:: gfo_pso @@ -4660,6 +4881,7 @@ package are available in optimagic. To use it, you need to have .. code-block:: python import optimagic as om + import numpy as np om.minimize( fun=lambda x: x @ x, params=[1.0, 2.0, 3.0], @@ -4682,6 +4904,9 @@ package are available in optimagic. To use it, you need to have **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOParticleSwarmOptimization + :members: + :inherited-members: Algorithm, object + :member-order: bysource ``` @@ -4717,6 +4942,9 @@ package are available in optimagic. To use it, you need to have **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOParallelTempering + :members: + :inherited-members: Algorithm, object + :member-order: bysource ``` ```{eval-rst} @@ -4750,6 +4978,9 @@ package are available in optimagic. To use it, you need to have **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOSpiralOptimization + :members: + :inherited-members: Algorithm, object + :member-order: bysource ``` ```{eval-rst} @@ -4783,6 +5014,9 @@ package are available in optimagic. To use it, you need to have **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOGeneticAlgorithm + :members: + :inherited-members: Algorithm, object + :member-order: bysource ``` ```{eval-rst} @@ -4816,6 +5050,9 @@ package are available in optimagic. To use it, you need to have **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFOEvolutionStrategy + :members: + :inherited-members: Algorithm, object + :member-order: bysource ``` ```{eval-rst} @@ -4849,6 +5086,9 @@ package are available in optimagic. To use it, you need to have **Description and available options:** .. autoclass:: optimagic.optimizers.gfo_optimizers.GFODifferentialEvolution + :members: + :inherited-members: Algorithm, object + :member-order: bysource ``` diff --git a/docs/source/how_to/how_to_bounds.ipynb b/docs/source/how_to/how_to_bounds.ipynb index 9e587c06b..77223de52 100644 --- a/docs/source/how_to/how_to_bounds.ipynb +++ b/docs/source/how_to/how_to_bounds.ipynb @@ -319,7 +319,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.16" + "version": "3.12.11" } }, "nbformat": 4, diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index c7fa34cb2..07e759fde 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -17,11 +17,17 @@ from optimagic.optimizers.fides import Fides from optimagic.optimizers.gfo_optimizers import ( GFODifferentialEvolution, + GFODownhillSimplex, GFOEvolutionStrategy, GFOGeneticAlgorithm, + GFOHillClimbing, GFOParallelTempering, GFOParticleSwarmOptimization, + GFOPowellsMethod, + GFORepulsingHillClimbing, + GFOSimulatedAnnealing, GFOSpiralOptimization, + GFOStochasticHillClimbing, ) from optimagic.optimizers.iminuit_migrad import IminuitMigrad from optimagic.optimizers.ipopt import Ipopt @@ -407,11 +413,19 @@ class BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES @@ -1147,11 +1161,19 @@ class BoundedGlobalGradientFreeAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES @@ -1233,11 +1255,19 @@ class GlobalGradientFreeScalarAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES @@ -1477,11 +1507,19 @@ class BoundedGradientFreeScalarAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA @@ -1749,11 +1787,19 @@ class BoundedGlobalScalarAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES @@ -2422,11 +2468,19 @@ class GlobalGradientFreeAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES @@ -2530,11 +2584,19 @@ class BoundedGradientFreeAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim @@ -2649,11 +2711,19 @@ class GradientFreeScalarAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim @@ -2807,11 +2877,19 @@ class BoundedGlobalAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES @@ -2911,11 +2989,19 @@ class GlobalScalarAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES @@ -3261,11 +3347,19 @@ class BoundedScalarAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA @@ -3620,11 +3714,19 @@ class GradientFreeAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nag_dfols: Type[NagDFOLS] = NagDFOLS nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel @@ -3716,11 +3818,19 @@ class GlobalAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) nevergrad_bo: Type[NevergradBayesOptim] = NevergradBayesOptim nevergrad_cga: Type[NevergradCGA] = NevergradCGA nevergrad_cmaes: Type[NevergradCMAES] = NevergradCMAES @@ -3868,11 +3978,19 @@ class BoundedAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS @@ -4027,11 +4145,19 @@ class ScalarAlgorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_pybobyqa: Type[NagPyBOBYQA] = NagPyBOBYQA @@ -4239,11 +4365,19 @@ class Algorithms(AlgoSelection): gfo_differential_evolution: Type[GFODifferentialEvolution] = ( GFODifferentialEvolution ) + gfo_downhillsimplex: Type[GFODownhillSimplex] = GFODownhillSimplex gfo_evolution_strategy: Type[GFOEvolutionStrategy] = GFOEvolutionStrategy gfo_genetic_algorithm: Type[GFOGeneticAlgorithm] = GFOGeneticAlgorithm + gfo_hillclimbing: Type[GFOHillClimbing] = GFOHillClimbing gfo_parallel_tempering: Type[GFOParallelTempering] = GFOParallelTempering gfo_pso: Type[GFOParticleSwarmOptimization] = GFOParticleSwarmOptimization + gfo_powells_method: Type[GFOPowellsMethod] = GFOPowellsMethod + gfo_repulsinghillclimbing: Type[GFORepulsingHillClimbing] = GFORepulsingHillClimbing + gfo_simulatedannealing: Type[GFOSimulatedAnnealing] = GFOSimulatedAnnealing gfo_spiral_optimization: Type[GFOSpiralOptimization] = GFOSpiralOptimization + gfo_stochastichillclimbing: Type[GFOStochasticHillClimbing] = ( + GFOStochasticHillClimbing + ) iminuit_migrad: Type[IminuitMigrad] = IminuitMigrad ipopt: Type[Ipopt] = Ipopt nag_dfols: Type[NagDFOLS] = NagDFOLS diff --git a/src/optimagic/mark.py b/src/optimagic/mark.py index a3567fcb3..94a3850ec 100644 --- a/src/optimagic/mark.py +++ b/src/optimagic/mark.py @@ -80,6 +80,7 @@ def minimizer( supports_linear_constraints: bool, supports_nonlinear_constraints: bool, disable_history: bool = False, + experimental: bool = False, ) -> Callable[[AlgorithmSubclass], AlgorithmSubclass]: """Mark an algorithm as a optimagic minimizer and add AlgoInfo. @@ -115,6 +116,7 @@ def minimizer( constraints. This needs to be True if the algorithm previously took `nonlinear_constraints` as an argument. disable_history: Whether the algorithm should disable history collection. + experimental: Whether the algorithm is experimental and should skip tests. """ @@ -133,6 +135,7 @@ def decorator(cls: AlgorithmSubclass) -> AlgorithmSubclass: supports_linear_constraints=supports_linear_constraints, supports_nonlinear_constraints=supports_nonlinear_constraints, disable_history=disable_history, + experimental=experimental, ) cls.__algo_info__ = algo_info # type: ignore return cls diff --git a/src/optimagic/optimization/algorithm.py b/src/optimagic/optimization/algorithm.py index ac83fe4dd..334c40daf 100644 --- a/src/optimagic/optimization/algorithm.py +++ b/src/optimagic/optimization/algorithm.py @@ -33,6 +33,7 @@ class AlgoInfo: supports_linear_constraints: bool supports_nonlinear_constraints: bool disable_history: bool = False + experimental: bool = False def __post_init__(self) -> None: report: list[str] = [] diff --git a/src/optimagic/optimization/internal_optimization_problem.py b/src/optimagic/optimization/internal_optimization_problem.py index a53a48150..f2ddcda8b 100644 --- a/src/optimagic/optimization/internal_optimization_problem.py +++ b/src/optimagic/optimization/internal_optimization_problem.py @@ -1011,10 +1011,10 @@ def likelihood_sphere(params: PyTree) -> SpecificFunctionValue: } def sphere_gradient(params: PyTree) -> PyTree: - return {params[f"x{i}"]: 2 * v for i, v in enumerate(params.values())} + return {f"x{i}": 2 * v for i, v in enumerate(params.values())} def likelihood_sphere_gradient(params: PyTree) -> PyTree: - return {params[f"x{i}"]: 2 * v for i, v in enumerate(params.values())} + return {f"x{i}": 2 * v for i, v in enumerate(params.values())} def ls_sphere_jac(params: PyTree) -> PyTree: return { diff --git a/src/optimagic/optimizers/gfo_optimizers.py b/src/optimagic/optimizers/gfo_optimizers.py index 192ff9fd1..39db14c7f 100644 --- a/src/optimagic/optimizers/gfo_optimizers.py +++ b/src/optimagic/optimizers/gfo_optimizers.py @@ -25,6 +25,7 @@ from optimagic.typing import ( AggregationLevel, NonNegativeFloat, + PositiveFloat, PositiveInt, PyTree, ) @@ -108,6 +109,484 @@ class GFOCommonOptions: seed: int | None = None """Random seed for reproducibility.""" + rand_rest_p: ProbabilityFloat = 0 + """Probability for the optimization algorithm to jump to a random position in an + iteration step.""" + + +# ================================================================================== +# Local optimizers +# ================================================================================== + + +@mark.minimizer( + name="gfo_hillclimbing", + solver_type=AggregationLevel.SCALAR, + is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + needs_bounds=True, + supports_parallelism=False, + supports_bounds=True, + supports_infinite_bounds=False, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class GFOHillClimbing(GFOCommonOptions, Algorithm): + """Minimize a scalar function using the HillClimbing algorithm. + + This algorithm is a Python implementation of the HillClimbing algorithm through the + gradient_free_optimizers package. + + Hill climbing is a local search algorithm suited for exploring combinatorial search + spaces. + + “It starts at an initial point, which is the best point chosen from `n_init` + initialization runs, and continues to move to positions within its + neighbourhood with a better solution. It has no method against getting stuck in + local optima. + + """ + + epsilon: PositiveFloat = 0.03 + """The step-size of the hill climbing algorithm. If step_size is too large the newly + selected positions will be at the edge of the search space. + + If its value is very low it might not find new positions. + + """ + + distribution: Literal["normal", "laplace", "logistic", "gumbel"] = "normal" + """The mathematical distribution the algorithm draws samples from. + + All available distributions are taken from the numpy-package. + + """ + + n_neighbours: PositiveInt = 3 + """The number of positions the algorithm explores from its current postion before + setting its current position to the best of those neighbour positions. + + If the value of n_neighbours is large the hill-climbing-based algorithm will take a + lot of time to choose the next position to move to, but the choice will probably be + a good one. It might be a prudent approach to increase n_neighbours of the search- + space has a lot of dimensions, because there are more possible directions to move + to. + + """ + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + import gradient_free_optimizers as gfo + + opt = gfo.HillClimbingOptimizer + optimizer = partial( + opt, + epsilon=self.epsilon, + distribution=self.distribution, + n_neighbours=self.n_neighbours, + ) + res = _gfo_internal( + common_options=self, + problem=problem, + x0=x0, + optimizer=optimizer, + ) + + return res + + +@mark.minimizer( + name="gfo_stochastichillclimbing", + solver_type=AggregationLevel.SCALAR, + is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + needs_bounds=True, + supports_parallelism=False, + supports_bounds=True, + supports_infinite_bounds=False, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class GFOStochasticHillClimbing(Algorithm, GFOCommonOptions): + """Minimize a scalar function using the Stochastic Hill Climbing algorithm. + + This algorithm is a Python implementation of the StochasticHillClimbing algorithm + through the gradient_free_optimizers package. + + Stochastic hill climbing extends the normal hill climbing by accepting worse + positions with a probability `p_accept` as a next position helping against getting + stuck in local optima. + + """ + + epsilon: PositiveFloat = 0.03 + """The step-size of the hill climbing algorithm. If step_size is too large the newly + selected positions will be at the edge of the search space. + + If its value is very low it might not find new positions. + + """ + + distribution: Literal["normal", "laplace", "logistic", "gumbel"] = "normal" + """The mathematical distribution the algorithm draws samples from. + + All available distributions are taken from the numpy-package. + + """ + + n_neighbours: PositiveInt = 3 + """The number of positions the algorithm explores from its current postion before + setting its current position to the best of those neighbour positions. + + If the value of n_neighbours is large the hill-climbing-based algorithm will take a + lot of time to choose the next position to move to, but the choice will probably be + a good one. It might be a prudent approach to increase n_neighbours of the search- + space has a lot of dimensions, because there are more possible directions to move + to. + + """ + + p_accept: ProbabilityFloat = 0.5 + """The probability factor used in the equation to calculate if a worse position is + accepted as the new position. + + If the new score is not better than the previous one the algorithm accepts worse + positions with probability p_accept. + + .. math:: + score_{normalized} = norm * \\frac{score_{current} - score_{new}} + {score_{current} + score_{new}} + .. math:: + p = \\exp^{-score_{normalized}} + + If p is less than p_accept the new position gets accepted anyways. + + """ + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + import gradient_free_optimizers as gfo + + opt = gfo.StochasticHillClimbingOptimizer + optimizer = partial( + opt, + epsilon=self.epsilon, + distribution=self.distribution, + n_neighbours=self.n_neighbours, + p_accept=self.p_accept, + ) + res = _gfo_internal( + common_options=self, + problem=problem, + x0=x0, + optimizer=optimizer, + ) + + return res + + +@mark.minimizer( + name="gfo_repulsinghillclimbing", + solver_type=AggregationLevel.SCALAR, + is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + needs_bounds=True, + supports_parallelism=False, + supports_bounds=True, + supports_infinite_bounds=False, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class GFORepulsingHillClimbing(Algorithm, GFOCommonOptions): + """Minimize a scalar function using the Repulsing Hill Climbing algorithm. + + This algorithm is a Python implementation of the Repulsing Hill Climbing algorithm + through the gradient_free_optimizers package. + + The algorithm inherits from the Hill climbing which is a local search algorithm but + always activates its methods to escape local optima. + + """ + + epsilon: PositiveFloat = 0.03 + """The step-size of the hill climbing algorithm. If step_size is too large the newly + selected positions will be at the edge of the search space. + + If its value is very low it might not find new positions. + + """ + + distribution: Literal["normal", "laplace", "logistic", "gumbel"] = "normal" + """The mathematical distribution the algorithm draws samples from. + + All available distributions are taken from the numpy-package. + + """ + + n_neighbours: PositiveInt = 3 + """The number of positions the algorithm explores from its current position before + setting its current position to the best of those neighbour positions.""" + + repulsion_factor: PositiveFloat = 5 + """The algorithm increases the step size by multiplying it with the repulsion_factor + for the next iteration. This way the algorithm escapes the region that does not + offer better positions. + + .. math:: + \\epsilon = \\epsilon * {repulsion factor} + + """ + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + import gradient_free_optimizers as gfo + + opt = gfo.RepulsingHillClimbingOptimizer + optimizer = partial( + opt, + epsilon=self.epsilon, + distribution=self.distribution, + n_neighbours=self.n_neighbours, + repulsion_factor=self.repulsion_factor, + ) + res = _gfo_internal( + common_options=self, + problem=problem, + x0=x0, + optimizer=optimizer, + ) + + return res + + +@mark.minimizer( + name="gfo_simulatedannealing", + solver_type=AggregationLevel.SCALAR, + is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + needs_bounds=True, + supports_parallelism=False, + supports_bounds=True, + supports_infinite_bounds=False, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, + experimental=True, +) +@dataclass(frozen=True) +class GFOSimulatedAnnealing(Algorithm, GFOCommonOptions): + """Minimize a scalar function using the Simulated Annealing algorithm. + + This algorithm is a Python implementation of Simulated Annealing through the + gradient_free_optimizers package. + + Simulated annealing chooses its next possible position similar to hill climbing, but + it accepts worse results with a probability that decreases with time. It simulates a + temperature that decreases with each iteration, similar to a material cooling down. + + """ + + epsilon: PositiveFloat = 0.03 + """The step-size of the algorithm. + + If step_size is too large the newly selected positions will be at the edge of the + search space. If its value is very low it might not find new positions. + + """ + + distribution: Literal["normal", "laplace", "logistic", "gumbel"] = "normal" + """The mathematical distribution the algorithm draws samples from. + + All available distributions are taken from the numpy-package. + + """ + + n_neighbours: PositiveInt = 3 + """The number of positions the algorithm explores from its current position before + setting its current position to the best of those neighbour positions.""" + + start_temp: PositiveFloat = 1 + """The start_temp is a factor for the probability p of accepting a worse position. + + .. math:: + p = \\exp^{-\\frac{score_{normalized}}{temp}} + + """ + + annealing_rate: PositiveFloat = 0.97 + """Rate at which the temperatur-value of the algorithm decreases. An annealing rate + above 1 increases the temperature over time. + + .. math:: + start\\_temp \\leftarrow start\\_temp * annealing\\_rate + + """ + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + import gradient_free_optimizers as gfo + + opt = gfo.SimulatedAnnealingOptimizer + optimizer = partial( + opt, + epsilon=self.epsilon, + distribution=self.distribution, + n_neighbours=self.n_neighbours, + start_temp=self.start_temp, + annealing_rate=self.annealing_rate, + ) + res = _gfo_internal( + common_options=self, + problem=problem, + x0=x0, + optimizer=optimizer, + ) + return res + + +@mark.minimizer( + name="gfo_downhillsimplex", + solver_type=AggregationLevel.SCALAR, + is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + needs_bounds=True, + supports_parallelism=False, + supports_bounds=True, + supports_infinite_bounds=False, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, + experimental=True, +) +@dataclass(frozen=True) +class GFODownhillSimplex(Algorithm, GFOCommonOptions): + """Minimize a scalar function using the Downhill Simplex algorithm. + + This algorithm is a Python implementation of the Downhill Simplex algorithm through + the gradient_free_optimizers package. + + The Downhill simplex or Nelder mead algorithm works by grouping `number of + dimensions + 1` positions into a simplex, which can explore the search-space by + changing shape. The simplex changes shape by reflecting, expanding, contracting or + shrinking via the alpha, gamma, beta or sigma parameters. It needs at least `number + of dimensions + 1` initial positions to form a simplex in the search-space and the + movement of the positions in the simplex are affected by each other. + + """ + + simplex_reflection: PositiveFloat = 1 + """The reflection parameter of the simplex algorithm.""" + + simplex_expansion: PositiveFloat = 2 + """The expansion parameter of the simplex algorithm.""" + + simplex_contraction: PositiveFloat = 0.5 + """The contraction parameter of the simplex algorithm.""" + + simplex_shrinking: PositiveFloat = 0.5 + """The shrinking parameter of the simplex algorithm.""" + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + import gradient_free_optimizers as gfo + + opt = gfo.DownhillSimplexOptimizer + optimizer = partial( + opt, + alpha=self.simplex_reflection, + gamma=self.simplex_expansion, + beta=self.simplex_contraction, + sigma=self.simplex_shrinking, + ) + res = _gfo_internal( + common_options=self, + problem=problem, + x0=x0, + optimizer=optimizer, + ) + return res + + +@mark.minimizer( + name="gfo_powells_method", + solver_type=AggregationLevel.SCALAR, + is_available=IS_GRADIENT_FREE_OPTIMIZERS_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + needs_bounds=True, + supports_parallelism=False, + supports_bounds=True, + supports_infinite_bounds=False, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class GFOPowellsMethod(Algorithm, GFOCommonOptions): + """Minimize a scalar function using Powell's Method. + + This algorithm is a Python implementation of the Powell's Method algorithm through + the gradient_free_optimizers package. + + This powell's method implementation works by optimizing each search space dimension + at a time with the hill climbing algorithm. It works by setting the search space + range for all dimensions except one to a single value. The hill climbing algorithms + searches the best position within this dimension. After `iters_p_dim` iterations the + next dimension is searched, while the search space range from the + previously searched dimension is set to the best position, + This way the algorithm finds new best positions one dimension at a time. + + """ + + iters_p_dim: PositiveInt = 10 + """Number of iterations the algorithm will let the hill-climbing algorithm search to + find the best position before it changes to the next dimension of the search space. + + Typical range: 5 to 15. + + """ + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + import gradient_free_optimizers as gfo + + opt = gfo.PowellsMethod + optimizer = partial( + opt, + iters_p_dim=self.iters_p_dim, + ) + + res = _gfo_internal( + common_options=self, + problem=problem, + x0=x0, + optimizer=optimizer, + ) + return res + # ================================================================================== # Population Based @@ -169,10 +648,6 @@ class GFOParticleSwarmOptimization(Algorithm, GFOCommonOptions): """A factor of the movement towards the global best position of the individual particles in the population.""" - rand_rest_p: NonNegativeFloat = 0.01 - """Probability for the optimization algorithm to jump to a random position in an - iteration step.""" - def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: @@ -248,10 +723,6 @@ class GFOParallelTempering(Algorithm, GFOCommonOptions): """The number of iterations the algorithm performs before switching temperatures of the individual optimizers in the population.""" - rand_rest_p: NonNegativeFloat = 0 - """Probability for the optimization algorithm to jump to a random position in an - iteration step.""" - def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: @@ -342,10 +813,6 @@ class GFOSpiralOptimization(Algorithm, GFOCommonOptions): """ - rand_rest_p: NonNegativeFloat = 0 - """Probability for the optimization algorithm to jump to a random position in an - iteration step.""" - def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: @@ -546,10 +1013,6 @@ class GFOEvolutionStrategy(Algorithm, GFOCommonOptions): """Probability of an individual to perform a crossover with the best individual in the population.""" - rand_rest_p: NonNegativeFloat = 0 - """Probability for the optimization algorithm to jump to a random position in an - iteration step.""" - def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] ) -> InternalOptimizeResult: diff --git a/tests/optimagic/optimization/test_many_algorithms.py b/tests/optimagic/optimization/test_many_algorithms.py index 71298fb56..3712af517 100644 --- a/tests/optimagic/optimization/test_many_algorithms.py +++ b/tests/optimagic/optimization/test_many_algorithms.py @@ -43,7 +43,7 @@ def _get_options(algo): options = {} "Max time before termination" if hasattr(algo, "stopping_maxtime"): - options.update({"stopping_maxtime": 10}) + options.update({"stopping_maxtime": 1}) "Fix seed if algorithm is stochastic" if hasattr(algo, "seed"): @@ -52,6 +52,10 @@ def _get_options(algo): def _get_required_decimals(algorithm, algo): + # if algo is experimental, do not expect solution + if algo.algo_info.experimental: + return 0 + if algorithm in PRECISION_LOOKUP: return PRECISION_LOOKUP[algorithm] else: