Skip to content

Commit 279f9f2

Browse files
Merge pull request #340 from KernelTuner/skopt-support
Add initial support for scikit-optimize minimize methods (`skopt`)
2 parents 6f9f54f + 99e9e6d commit 279f9f2

File tree

4 files changed

+123
-1
lines changed

4 files changed

+123
-1
lines changed

kernel_tuner/interface.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@
6565
pyatf_strategies,
6666
random_sample,
6767
simulated_annealing,
68+
skopt
6869
)
6970
from kernel_tuner.strategies.wrapper import OptAlgWrapper
7071

@@ -82,6 +83,7 @@
8283
"mls": mls,
8384
"pso": pso,
8485
"simulated_annealing": simulated_annealing,
86+
"skopt": skopt,
8587
"firefly_algorithm": firefly_algorithm,
8688
"bayes_opt": bayes_opt,
8789
"pyatf_strategies": pyatf_strategies,
@@ -394,6 +396,7 @@ def __deepcopy__(self, _):
394396
* "pso" particle swarm optimization
395397
* "random_sample" takes a random sample of the search space
396398
* "simulated_annealing" simulated annealing strategy
399+
* "skopt" uses the minimization methods from `skopt`
397400
398401
Strategy-specific parameters and options are explained under strategy_options.
399402

kernel_tuner/strategies/common.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@ def __init__(
7373
snap=True,
7474
return_invalid=False,
7575
return_raw=None,
76+
invalid_value=sys.float_info.max,
7677
):
7778
"""An abstract method to handle evaluation of configurations.
7879
@@ -100,6 +101,7 @@ def __init__(
100101
self.return_raw = f"{tuning_options['objective']}s"
101102
self.results = []
102103
self.budget_spent_fraction = 0.0
104+
self.invalid_return_value = invalid_value
103105

104106

105107
def __call__(self, x, check_restrictions=True):
@@ -168,7 +170,7 @@ def __call__(self, x, check_restrictions=True):
168170
else:
169171
# this is not a valid configuration, replace with float max if needed
170172
if not self.return_invalid:
171-
return_value = sys.float_info.max
173+
return_value = self.invalid_return_value
172174

173175
# include raw data in return if requested
174176
if self.return_raw is not None:

kernel_tuner/strategies/skopt.py

Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
"""The strategy that uses the optimizer from skopt for searching through the parameter space."""
2+
3+
import numpy as np
4+
from kernel_tuner.util import StopCriterionReached
5+
from kernel_tuner.searchspace import Searchspace
6+
from kernel_tuner.strategies.common import (
7+
CostFunc,
8+
get_options,
9+
get_strategy_docstring,
10+
)
11+
12+
supported_learners = ["RF", "ET", "GBRT", "DUMMY", "GP"]
13+
supported_acq = ["LCB", "EI", "PI","gp_hedge"]
14+
supported_liars = ["cl_min", "cl_mean", "cl_max"]
15+
16+
_options = dict(
17+
learner=(f"The leaner to use (supported: {supported_learners})", "RF"),
18+
acq_func=(f"The acquisition function to use (supported: {supported_acq})", "gp_hedge"),
19+
lie_strategy=(f"The lie strategy to use when using batches (supported: {supported_liars})", "cl_max"),
20+
kappa=("The value of kappa", 1.96),
21+
num_initial=("Number of initial samples. If `None`, let skopt choose the initial population", None),
22+
batch_size=("The number of points to ask per batch", 1),
23+
skopt_kwargs=("Additional options passed to the skopt `Optimizer` as kwargs.", dict()),
24+
)
25+
26+
27+
def tune(searchspace: Searchspace, runner, tuning_options):
28+
learner, acq_func, lie_strategy, kappa, num_initial, batch_size, skopt_kwargs = \
29+
get_options(tuning_options.strategy_options, _options)
30+
31+
# Get maximum number of evaluations
32+
max_fevals = min(tuning_options.get("max_fevals", np.inf), searchspace.size)
33+
34+
# Const function
35+
opt_config, opt_result = None, None
36+
37+
# The dimensions. Parameters with one value become categorical
38+
from skopt.space.space import Categorical, Integer
39+
tune_params_values = list(searchspace.tune_params.values())
40+
bounds = [Integer(0, len(p) - 1) if len(p) > 1 else Categorical([0]) for p in tune_params_values]
41+
42+
# Space constraint
43+
space_constraint = lambda x: searchspace.is_param_config_valid(
44+
searchspace.get_param_config_from_param_indices(x))
45+
46+
# Create skopt optimizer
47+
skopt_kwargs = dict(skopt_kwargs)
48+
skopt_kwargs["base_estimator"] = learner
49+
skopt_kwargs["acq_func"] = acq_func
50+
51+
# Only set n_initial_points if not None
52+
if num_initial is not None:
53+
skopt_kwargs["n_initial_points"] = num_initial
54+
55+
# Set kappa is not None
56+
if kappa is not None:
57+
skopt_kwargs.setdefault("acq_func_kwargs", {})["kappa"] = kappa
58+
59+
if tuning_options.verbose:
60+
print(f"Initialize scikit-optimize Optimizer object: {skopt_kwargs}")
61+
62+
from skopt import Optimizer as SkOptimizer
63+
optimizer = SkOptimizer(
64+
dimensions=bounds,
65+
space_constraint=space_constraint,
66+
**skopt_kwargs
67+
)
68+
69+
# Ask initial batch of configs
70+
num_initial = optimizer._n_initial_points
71+
batch = optimizer.ask(num_initial, lie_strategy)
72+
xs, ys = [], []
73+
eval_count = 0
74+
75+
if tuning_options.verbose:
76+
print(f"Asked optimizer for {num_initial} points: {batch}")
77+
78+
# Create cost function
79+
cost_func = CostFunc(searchspace, tuning_options, runner)
80+
x0 = cost_func.get_start_pos()
81+
82+
# Add x0 if the user has requested it
83+
if x0 is not None:
84+
batch.insert(0, searchspace.get_param_indices(x0))
85+
86+
try:
87+
while eval_count < max_fevals:
88+
if not batch:
89+
optimizer.tell(xs, ys)
90+
batch = optimizer.ask(batch_size, lie_strategy)
91+
xs, ys = [], []
92+
93+
if tuning_options.verbose:
94+
print(f"Asked optimizer for {batch_size} points: {batch}")
95+
96+
x = batch.pop(0)
97+
y = cost_func(searchspace.get_param_config_from_param_indices(x))
98+
eval_count += 1
99+
100+
xs.append(x)
101+
ys.append(y)
102+
103+
if opt_result is None or y < opt_result:
104+
opt_config, opt_result = x, y
105+
106+
except StopCriterionReached as e:
107+
if tuning_options.verbose:
108+
print(e)
109+
110+
if opt_result is not None and tuning_options.verbose:
111+
print(f"Best configuration: {opt_config}")
112+
113+
return cost_func.results
114+
115+
116+
tune.__doc__ = get_strategy_docstring("skopt minimize", _options)

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,6 +128,7 @@ nox-poetry = "^1.0.3"
128128
ruff = "^0.4.8"
129129
pep440 = "^0.1.2"
130130
tomli = "^2.0.1" # held back by Python <= 3.10, can be replaced by built-in [tomllib](https://docs.python.org/3.11/library/tomllib.html) from Python 3.11 onwards
131+
scikit-optimize = "0.10.2"
131132

132133
# development dependencies are unused for now, as this is already covered by test and docs
133134
# # ATTENTION: if anything is changed here, run `poetry update`

0 commit comments

Comments
 (0)