Skip to content

blackboxopt.optimizers.staged.iteration

Datum dataclass

Small container for bookkeeping only.

StagedIteration

digest_evaluation(self, evaluation_specificiation_id, evaluation)

Registers the result of an evaluation.

Parameters:

Name Type Description Default
id

[description]

required
evaluation Evaluation

[description]

required
Source code in blackboxopt/optimizers/staged/iteration.py
def digest_evaluation(
    self, evaluation_specificiation_id: UUID, evaluation: Evaluation
):
    """Registers the result of an evaluation.

    Args:
        id: [description]
        evaluation: [description]
    """
    self.config_sampler.digest_evaluation(evaluation)
    i = self.pending_evaluations.pop(evaluation_specificiation_id)
    d = self.evaluation_data[self.current_stage][i]
    d.status = "FINISHED" if not evaluation.all_objectives_none else "CRASHED"
    loss = evaluation.objectives[self.objective.name]
    if loss is not None:
        d.loss = loss

    # quick check if all configurations have finished yet
    if len(self.evaluation_data[self.current_stage]) == self.num_configs[
        self.current_stage
    ] and all(
        [
            e.status in ["FINISHED", "CRASHED"]
            for e in self.evaluation_data[self.current_stage]
        ]
    ):
        self._progress_to_next_stage()

get_evaluation_specification(self)

Pick the next evaluation specification with a budget i.e. fidelity to run.

Returns:

Type Description
Optional[blackboxopt.evaluation.EvaluationSpecification]

[description]

Source code in blackboxopt/optimizers/staged/iteration.py
def get_evaluation_specification(self) -> Optional[EvaluationSpecification]:
    """Pick the next evaluation specification with a budget i.e. fidelity to run.

    Returns:
        [description]
    """
    if self.finished:
        return None

    # try to find a queued entry first
    for i, d in enumerate(self.evaluation_data[self.current_stage]):
        if d.status == "QUEUED":
            es = copy.deepcopy(self.eval_specs[d.config_key])
            es.settings["fidelity"] = self.fidelities[self.current_stage]
            d.status = "RUNNING"
            self.pending_evaluations[es.optimizer_info["id"]] = i
            return es

    # sample a new configuration if there are empty slots to be filled
    if (
        len(self.evaluation_data[self.current_stage])
        < self.num_configs[self.current_stage]
    ):
        conf_key = (
            self.iteration,
            self.current_stage,
            len(self.evaluation_data[self.current_stage]),
        )
        conf, opt_info = self.config_sampler.sample_configuration()
        opt_info.update({"configuration_key": conf_key, "id": str(uuid4())})
        self.eval_specs[conf_key] = EvaluationSpecification(
            configuration=conf, settings={}, optimizer_info=opt_info
        )
        self.evaluation_data[self.current_stage].append(Datum(conf_key, "QUEUED"))
        # To understand recursion, you first must understand recursion :)
        return self.get_evaluation_specification()

    # at this point there are pending evaluations and this iteration has to wait
    return None