Skip to content

blackboxopt.optimizers.bohb

BOHB (StagedIterationOptimizer)

Source code in blackboxopt/optimizers/bohb.py
class BOHB(StagedIterationOptimizer):
    def __init__(
        self,
        search_space: ParameterSpace,
        objective: Objective,
        min_fidelity: float,
        max_fidelity: float,
        num_iterations: int,
        eta: float = 3.0,
        top_n_percent: int = 15,
        min_samples_in_model: int = None,
        num_samples: int = 64,
        random_fraction: float = 1 / 3,
        bandwidth_factor: float = 3.0,
        min_bandwidth: float = 1e-3,
        seed: int = None,
        logger: logging.Logger = None,
    ):
        """BOHB Optimizer.

        BOHB performs robust and efficient hyperparameter optimization
        at scale by combining the speed of Hyperband searches with the
        guidance and guarantees of convergence of Bayesian
        Optimization. Instead of sampling new configurations at random,
        BOHB uses kernel density estimators to select promising candidates.

        For reference:
        ```
        @InProceedings{falkner-icml-18,
            title =     {{BOHB}: Robust and Efficient Hyperparameter Optimization at
                Scale},
            author =    {Falkner, Stefan and Klein, Aaron and Hutter, Frank},
            booktitle = {Proceedings of the 35th International Conference on Machine
                Learning},
            pages =     {1436--1445},
            year =      {2018},
        }
        ```

        Args:
            search_space: [description]
            objective: [description]
            min_fidelity: The smallest fidelity value that is still meaningful.
                Must be strictly greater than zero!
            max_fidelity: The largest fidelity value used during the optimization.
                Must not be smaller than `min_fidelity`.
            num_iterations: The number of iterations that the optimizer will run.
            eta: Scaling parameter to control the aggressiveness of Hyperband's racing.
            top_n_percent: Determines the percentile of configurations that will be
                used as training data for the kernel density estimator of the good
                configuration, e.g if set to 10 the best 10% configurations will be
                considered for training.
            min_samples_in_model: Minimum number of datapoints needed to fit a model.
            num_samples: Number of samples drawn to optimize EI via sampling.
            random_fraction: Fraction of random configurations returned.
            bandwidth_factor: Widens the bandwidth for contiuous parameters for
                proposed points to optimize EI
            min_bandwidth: to keep diversity, even when all (good) samples have the
                same value for one of the parameters, a minimum bandwidth
                (reasonable default: 1e-3) is used instead of zero.
            seed: [description]
            logger: [description]
        """
        if min_samples_in_model is None:
            min_samples_in_model = 3 * len(search_space)

        self.min_fidelity = min_fidelity
        self.max_fidelity = max_fidelity
        self.eta = eta

        self.config_sampler = BOHBSampler(
            search_space=search_space,
            objective=objective,
            min_samples_in_model=min_samples_in_model,
            top_n_percent=top_n_percent,
            num_samples=num_samples,
            random_fraction=random_fraction,
            bandwidth_factor=bandwidth_factor,
            min_bandwidth=min_bandwidth,
            seed=seed,
        )

        super().__init__(
            search_space=search_space,
            objective=objective,
            num_iterations=num_iterations,
            seed=seed,
            logger=logger,
        )

    def _create_new_iteration(self, iteration_index):
        """Optimizer specific way to create a new
        `blackboxopt.optimizer.utils.staged_iteration.StagedIteration` object
        """
        return create_hyperband_iteration(
            iteration_index,
            self.min_fidelity,
            self.max_fidelity,
            self.eta,
            self.config_sampler,
            self.objective,
            self.logger,
        )

generate_evaluation_specification(self) inherited

Get next configuration and settings to evaluate.

Exceptions:

Type Description
OptimizationComplete

When the optimization run is finished, e.g. when the budget has been exhausted.

OptimizerNotReady

When the optimizer is not ready to propose a new evaluation specification.

Source code in blackboxopt/optimizers/bohb.py
def generate_evaluation_specification(self) -> EvaluationSpecification:
    """Get next configuration and settings to evaluate.

    Raises:
        OptimizationComplete: When the optimization run is finished, e.g. when the
            budget has been exhausted.
        OptimizerNotReady: When the optimizer is not ready to propose a new
            evaluation specification.
    """
    # check if any of the already active iterations returns a configuration and
    # simply return that
    for idx, iteration in enumerate(self.iterations):
        es = iteration.generate_evaluation_specification()

        if es is not None:
            self.evaluation_uuid_to_iteration[str(es.optimizer_info["id"])] = idx
            self.pending_configurations[str(es.optimizer_info["id"])] = es
            return es

    # if that didn't work, check if there another iteration can be started and then
    # ask it for a configuration
    if len(self.iterations) < self.num_iterations:
        self.iterations.append(self._create_new_iteration(len(self.iterations)))
        es = self.iterations[-1].generate_evaluation_specification()
        self.evaluation_uuid_to_iteration[str(es.optimizer_info["id"])] = (
            len(self.iterations) - 1
        )
        self.pending_configurations[str(es.optimizer_info["id"])] = es
        return es

    # check if the optimization is already complete or whether the optimizer is
    # waiting for evaluation results -> raise corresponding error
    if all([iteration.finished for iteration in self.iterations]):
        raise OptimizationComplete

    raise OptimizerNotReady

report(self, evaluations) inherited

Report one or multiple evaluations to the optimizer.

All valid evaluations are processed. Faulty evaluations are not processed, instead an EvaluationsError is raised, which includes the problematic evaluations with their respective Exceptions in the evaluations_with_errors attribute.

Parameters:

Name Type Description Default
evaluations Union[blackboxopt.evaluation.Evaluation, Iterable[blackboxopt.evaluation.Evaluation]]

A single evaluated evaluation specifications, or an iterable of many.

required

Exceptions:

Type Description
EvaluationsError

Raised when an evaluation could not be processed.

Source code in blackboxopt/optimizers/bohb.py
def report(self, evaluations: Union[Evaluation, Iterable[Evaluation]]) -> None:
    _evals = [evaluations] if isinstance(evaluations, Evaluation) else evaluations

    call_functions_with_evaluations_and_collect_errors(
        [super().report, _validate_optimizer_info_id, self._report],
        _evals,
    )