|
| 1 | +"""The strategy that uses the optimizer from skopt for searching through the parameter space.""" |
| 2 | + |
| 3 | +import numpy as np |
| 4 | +from kernel_tuner.util import StopCriterionReached |
| 5 | +from kernel_tuner.searchspace import Searchspace |
| 6 | +from kernel_tuner.strategies.common import ( |
| 7 | + CostFunc, |
| 8 | + get_options, |
| 9 | + get_strategy_docstring, |
| 10 | +) |
| 11 | + |
| 12 | +supported_learners = ["RF", "ET", "GBRT", "DUMMY", "GP"] |
| 13 | +supported_acq = ["LCB", "EI", "PI","gp_hedge"] |
| 14 | +supported_liars = ["cl_min", "cl_mean", "cl_max"] |
| 15 | + |
| 16 | +_options = dict( |
| 17 | + learner=(f"The leaner to use (supported: {supported_learners})", "RF"), |
| 18 | + acq_func=(f"The acquisition function to use (supported: {supported_acq})", "gp_hedge"), |
| 19 | + lie_strategy=(f"The lie strategy to use when using batches (supported: {supported_liars})", "cl_max"), |
| 20 | + kappa=("The value of kappa", 1.96), |
| 21 | + num_initial=("Number of initial samples. If `None`, let skopt choose the initial population", None), |
| 22 | + batch_size=("The number of points to ask per batch", 1), |
| 23 | + skopt_kwargs=("Additional options passed to the skopt `Optimizer` as kwargs.", dict()), |
| 24 | +) |
| 25 | + |
| 26 | + |
| 27 | +def tune(searchspace: Searchspace, runner, tuning_options): |
| 28 | + learner, acq_func, lie_strategy, kappa, num_initial, batch_size, skopt_kwargs = \ |
| 29 | + get_options(tuning_options.strategy_options, _options) |
| 30 | + |
| 31 | + # Get maximum number of evaluations |
| 32 | + max_fevals = min(tuning_options.get("max_fevals", np.inf), searchspace.size) |
| 33 | + |
| 34 | + # Const function |
| 35 | + opt_config, opt_result = None, None |
| 36 | + |
| 37 | + # The dimensions. Parameters with one value become categorical |
| 38 | + from skopt.space.space import Categorical, Integer |
| 39 | + tune_params_values = list(searchspace.tune_params.values()) |
| 40 | + bounds = [Integer(0, len(p) - 1) if len(p) > 1 else Categorical([0]) for p in tune_params_values] |
| 41 | + |
| 42 | + # Space constraint |
| 43 | + space_constraint = lambda x: searchspace.is_param_config_valid( |
| 44 | + searchspace.get_param_config_from_param_indices(x)) |
| 45 | + |
| 46 | + # Create skopt optimizer |
| 47 | + skopt_kwargs = dict(skopt_kwargs) |
| 48 | + skopt_kwargs["base_estimator"] = learner |
| 49 | + skopt_kwargs["acq_func"] = acq_func |
| 50 | + |
| 51 | + # Only set n_initial_points if not None |
| 52 | + if num_initial is not None: |
| 53 | + skopt_kwargs["n_initial_points"] = num_initial |
| 54 | + |
| 55 | + # Set kappa is not None |
| 56 | + if kappa is not None: |
| 57 | + skopt_kwargs.setdefault("acq_func_kwargs", {})["kappa"] = kappa |
| 58 | + |
| 59 | + if tuning_options.verbose: |
| 60 | + print(f"Initialize scikit-optimize Optimizer object: {skopt_kwargs}") |
| 61 | + |
| 62 | + from skopt import Optimizer as SkOptimizer |
| 63 | + optimizer = SkOptimizer( |
| 64 | + dimensions=bounds, |
| 65 | + space_constraint=space_constraint, |
| 66 | + **skopt_kwargs |
| 67 | + ) |
| 68 | + |
| 69 | + # Ask initial batch of configs |
| 70 | + num_initial = optimizer._n_initial_points |
| 71 | + batch = optimizer.ask(num_initial, lie_strategy) |
| 72 | + xs, ys = [], [] |
| 73 | + eval_count = 0 |
| 74 | + |
| 75 | + if tuning_options.verbose: |
| 76 | + print(f"Asked optimizer for {num_initial} points: {batch}") |
| 77 | + |
| 78 | + # Create cost function |
| 79 | + cost_func = CostFunc(searchspace, tuning_options, runner) |
| 80 | + x0 = cost_func.get_start_pos() |
| 81 | + |
| 82 | + # Add x0 if the user has requested it |
| 83 | + if x0 is not None: |
| 84 | + batch.insert(0, searchspace.get_param_indices(x0)) |
| 85 | + |
| 86 | + try: |
| 87 | + while eval_count < max_fevals: |
| 88 | + if not batch: |
| 89 | + optimizer.tell(xs, ys) |
| 90 | + batch = optimizer.ask(batch_size, lie_strategy) |
| 91 | + xs, ys = [], [] |
| 92 | + |
| 93 | + if tuning_options.verbose: |
| 94 | + print(f"Asked optimizer for {batch_size} points: {batch}") |
| 95 | + |
| 96 | + x = batch.pop(0) |
| 97 | + y = cost_func(searchspace.get_param_config_from_param_indices(x)) |
| 98 | + eval_count += 1 |
| 99 | + |
| 100 | + xs.append(x) |
| 101 | + ys.append(y) |
| 102 | + |
| 103 | + if opt_result is None or y < opt_result: |
| 104 | + opt_config, opt_result = x, y |
| 105 | + |
| 106 | + except StopCriterionReached as e: |
| 107 | + if tuning_options.verbose: |
| 108 | + print(e) |
| 109 | + |
| 110 | + if opt_result is not None and tuning_options.verbose: |
| 111 | + print(f"Best configuration: {opt_config}") |
| 112 | + |
| 113 | + return cost_func.results |
| 114 | + |
| 115 | + |
| 116 | +tune.__doc__ = get_strategy_docstring("skopt minimize", _options) |
0 commit comments