Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
995cbf8
Multi fidelity searchspaces and surrogate modelling
jpenn2023 Jan 7, 2026
686fc86
Typing fixes
jpenn2023 Jan 13, 2026
18ec63e
More typing fixes
jpenn2023 Jan 14, 2026
d3d0c2f
More typing fixes with some unresolved
jpenn2023 Jan 16, 2026
585ad16
Typo fix
jpenn2023 Feb 5, 2026
e4b28a7
Integrating multi fidelity surrogate models with multitask refactor
jpenn2023 Feb 27, 2026
eca58ef
Integrate typing
jpenn2023 Feb 27, 2026
88db97f
Integrating kernel factories with multi fidelity
jpenn2023 Mar 6, 2026
26b8ba6
Add acquisition functions
jpenn2023 Feb 27, 2026
76d39d6
Add acquisition functions
jpenn2023 Feb 27, 2026
4bf2576
Moving generic dict comparison validator
jpenn2023 Mar 5, 2026
e168b7e
Adding qMFKG botorch attributes and including qKG current_value attri…
jpenn2023 Mar 5, 2026
915ed51
Add SearchSpaceTaskType to searchspace init file
jpenn2023 Mar 5, 2026
d6a8520
Add multi fidelity acqfs to acquistion init files
jpenn2023 Mar 5, 2026
b23ca4e
Add multi-fidelity acquisiton arguments
jpenn2023 Mar 5, 2026
dd2974f
Add recommender logic.
jpenn2023 Mar 5, 2026
53d7758
Checkpoint files should not be in the merge.
jpenn2023 Mar 9, 2026
daa3823
Attrs usage for custom acqf, minor bug fixes and docstring updates
jpenn2023 Mar 9, 2026
1004af5
Validation typing fix
jpenn2023 Mar 9, 2026
3850677
Typing fixes: working around broad Botorch typing
jpenn2023 Mar 13, 2026
ed90903
searchspace dependent acquisition function choice
jpenn2023 Mar 13, 2026
9aa977d
MFUCB fidelities costs and values set at acqf build time
jpenn2023 Mar 13, 2026
2c031e7
Comp rep fidelity dictionary for MFUCB
jpenn2023 Mar 13, 2026
b5c258d
Restricted searchspace for MFUCB stage one
jpenn2023 Mar 13, 2026
d0fd18e
Fixed custom acqf imports
jpenn2023 Mar 13, 2026
5f8fc78
Fix typing in acqf builder
jpenn2023 Mar 13, 2026
93a3b74
Docstring and typing fixes.
jpenn2023 Mar 27, 2026
951af19
Minor fixes
jpenn2023 Mar 27, 2026
bb7b334
Docstrings, error messages, variable names, file structure.
jpenn2023 Mar 31, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions baybe/acquisition/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from baybe.acquisition.acqfs import (
ExpectedImprovement,
LogExpectedImprovement,
MultiFidelityUpperConfidenceBound,
PosteriorMean,
PosteriorStandardDeviation,
ProbabilityOfImprovement,
Expand All @@ -13,6 +14,7 @@
qLogNoisyExpectedHypervolumeImprovement,
qLogNoisyExpectedImprovement,
qLogNParEGO,
qMultiFidelityKnowledgeGradient,
qNegIntegratedPosteriorVariance,
qNoisyExpectedHypervolumeImprovement,
qNoisyExpectedImprovement,
Expand All @@ -30,6 +32,7 @@
EI = ExpectedImprovement
qEI = qExpectedImprovement
qKG = qKnowledgeGradient
qMFKG = qMultiFidelityKnowledgeGradient
LogEI = LogExpectedImprovement
qLogEI = qLogExpectedImprovement
qNEI = qNoisyExpectedImprovement
Expand All @@ -38,6 +41,7 @@
PI = ProbabilityOfImprovement
qPI = qProbabilityOfImprovement
UCB = UpperConfidenceBound
MFUCB = MultiFidelityUpperConfidenceBound
qUCB = qUpperConfidenceBound
qTS = qThompsonSampling
qNEHVI = qNoisyExpectedHypervolumeImprovement
Expand All @@ -47,6 +51,7 @@
######################### Acquisition functions
# Knowledge Gradient
"qKnowledgeGradient",
"qMultiFidelityKnowledgeGradient",
# Posterior Statistics
"PosteriorMean",
"PosteriorStandardDeviation",
Expand All @@ -67,6 +72,7 @@
# Upper Confidence Bound
"UpperConfidenceBound",
"qUpperConfidenceBound",
"MultiFidelityUpperConfidenceBound",
# Thompson Sampling
"qThompsonSampling",
# Hypervolume Improvement
Expand All @@ -77,6 +83,7 @@
######################### Abbreviations
# Knowledge Gradient
"qKG",
"qMFKG",
# Posterior Statistics
"PM",
"PSTD",
Expand All @@ -97,6 +104,7 @@
# Upper Confidence Bound
"UCB",
"qUCB",
"MFUCB",
# Thompson Sampling
"qTS",
# Hypervolume Improvement
Expand Down
87 changes: 86 additions & 1 deletion baybe/acquisition/_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,13 @@
_ExpectedHypervolumeImprovement,
qExpectedHypervolumeImprovement,
qLogExpectedHypervolumeImprovement,
qMultiFidelityKnowledgeGradient,
qNegIntegratedPosteriorVariance,
qThompsonSampling,
)
from baybe.acquisition.base import AcquisitionFunction, _get_botorch_acqf_class
from baybe.acquisition.utils import make_partitioning
from baybe.acquisition.custom_acqfs import MultiFidelityUpperConfidenceBound
from baybe.acquisition.utils import make_MFUCB_dicts, make_partitioning
from baybe.exceptions import (
IncompatibilityError,
IncompleteMeasurementsError,
Expand Down Expand Up @@ -75,16 +77,21 @@ class BotorchAcquisitionArgs:
# Optional, depending on the specific acquisition function being used
best_f: float | None = _OPT_FIELD
beta: float | None = _OPT_FIELD
costs_dict: dict[Any, tuple[float, ...]] = _OPT_FIELD
current_value: Tensor | None = _OPT_FIELD
fidelities_dict: dict[Any, tuple[Any, ...]] = _OPT_FIELD
maximize: bool | None = _OPT_FIELD
mc_points: Tensor | None = _OPT_FIELD
num_fantasies: int | None = _OPT_FIELD
objective: MCAcquisitionObjective | None = _OPT_FIELD
partitioning: BoxDecomposition | None = _OPT_FIELD
posterior_transform: PosteriorTransform | None = _OPT_FIELD
project: Callable[[Tensor], Tensor] | None = _OPT_FIELD
prune_baseline: bool | None = _OPT_FIELD
ref_point: Tensor | None = _OPT_FIELD
X_baseline: Tensor | None = _OPT_FIELD
X_pending: Tensor | None = _OPT_FIELD
zetas_dict: dict[Any, tuple[float, ...]] = _OPT_FIELD

def collect(self) -> dict[str, Any]:
"""Collect the assigned arguments into a dictionary."""
Expand Down Expand Up @@ -202,6 +209,9 @@ def build(self) -> BoAcquisitionFunction:
self._set_mc_points()
self._set_ref_point()
self._set_partitioning()
self._set_current_value()
self._set_projection()
self._set_MFUCB_dicts()

botorch_acqf = self._botorch_acqf_cls(**self._args.collect())
self.set_default_sample_shape(botorch_acqf)
Expand Down Expand Up @@ -264,6 +274,81 @@ def _set_best_f(self) -> None:
case _:
raise NotImplementedError("This line should be impossible to reach.")

def _set_current_value(self) -> None:
"""Set current value maximising posterior mean in qMFKG."""
if not isinstance(self.acqf, qMultiFidelityKnowledgeGradient):
return

from botorch.optim import optimize_acqf_mixed

if isinstance(self.acqf, qMultiFidelityKnowledgeGradient):
from botorch.acquisition import PosteriorMean
from botorch.acquisition.fixed_feature import (
FixedFeatureAcquisitionFunction,
)

curr_val_acqf = FixedFeatureAcquisitionFunction(
acq_function=PosteriorMean(self._botorch_surrogate),
d=len(self.searchspace.parameters),
columns=[
self.searchspace.fidelity_idx,
],
values=[
1.0,
],
)

# Jordan MHS NOTE: This is fast-and-loose use of mixed space optimization.
# Changes will be made with the next PR which uses a notion of wrapped acqfs
# for setting a current value but also for defining cost aware wrappers.

candidates_comp = self.searchspace.discrete.comp_rep
num_comp_columns = len(candidates_comp.columns)
candidates_comp.columns = list(range(num_comp_columns)) # type: ignore
candidates_comp_dict = candidates_comp.to_dict("records")

# Possible TODO. Align num_restarts and raw_samples with that defined by the
# user for the main acquisition function.
_, current_value = optimize_acqf_mixed(
acq_function=curr_val_acqf,
bounds=torch.from_numpy(self.searchspace.comp_rep_bounds.values),
fixed_features_list=candidates_comp_dict, # type: ignore[arg-type]
q=1,
num_restarts=10,
raw_samples=64,
)

self._args.current_value = current_value

def _set_projection(self) -> None:
"""Set projection to the target fidelity for qMFKG."""
if not isinstance(self.acqf, (qMultiFidelityKnowledgeGradient)):
return

assert self.searchspace.fidelity_idx is not None # for mypy

target_fidelities = {self.searchspace.fidelity_idx: 1.0}

num_dims = len(self.searchspace.parameters)

def target_fidelity_projection(X: Tensor) -> Tensor:
from botorch.acquisition.utils import project_to_target_fidelity

return project_to_target_fidelity(X, target_fidelities, num_dims)

self._args.project = target_fidelity_projection

def _set_MFUCB_dicts(self) -> None:
"""Set value, fidelities and cost dictionaries for MFUCB."""
if not isinstance(self.acqf, MultiFidelityUpperConfidenceBound):
return

fidelities_dict, costs_dict, zetas_dict = make_MFUCB_dicts(self.searchspace)

self._args.fidelities_dict = fidelities_dict
self._args.costs_dict = costs_dict
self._args.zetas_dict = zetas_dict

def set_default_sample_shape(self, acqf: BoAcquisitionFunction, /):
"""Apply temporary workaround for Thompson sampling."""
# TODO: Needs redesign once bandits are supported more generally
Expand Down
43 changes: 42 additions & 1 deletion baybe/acquisition/acqfs.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from attr.converters import optional as optional_c
from attr.validators import optional as optional_v
from attrs import AttrsInstance, define, field, fields
from attrs.validators import gt, instance_of, le
from attrs.validators import ge, gt, instance_of, le
from typing_extensions import override

from baybe.acquisition.base import AcquisitionFunction
Expand Down Expand Up @@ -156,6 +156,22 @@ class qKnowledgeGradient(AcquisitionFunction):
memory footprint and wall time."""


@define(frozen=True)
class qMultiFidelityKnowledgeGradient(AcquisitionFunction):
"""Monte Carlo based knowledge gradient.

This acquisition function currently only supports purely continuous spaces.
"""

abbreviation: ClassVar[str] = "qMFKG"

num_fantasies: int = field(validator=[instance_of(int), gt(0)], default=128)
"""Number of fantasies to draw for approximating the knowledge gradient.

More samples result in a better approximation, at the expense of both increased
memory footprint and wall time."""


########################################################################################
### Posterior Statistics
@define(frozen=True)
Expand Down Expand Up @@ -289,6 +305,31 @@ class qUpperConfidenceBound(AcquisitionFunction):
"""See :paramref:`UpperConfidenceBound.beta`."""


@define(frozen=True)
class MultiFidelityUpperConfidenceBound(AcquisitionFunction):
"""Two stage acquisition function of Kandasamy et al (2016).

Stage 1: Choose design features based on argmax_x (softmin_m (UCB_m(x) + zeta_m)).

Stage 2: Choose cheapest fidelity satisfying a cost-aware informativeness threshold.
"""

abbreviation: ClassVar[str] = "MFUCB"

softmin_temperature: float = field(
converter=float, validator=[finite_float, ge(0.0)], default=1e-2
)
"""Softmin smoothing parameter."""

beta: float = field(converter=float, validator=finite_float, default=0.2)
"""See :paramref:`UpperConfidenceBound.beta`."""

@override
@classproperty
def supports_batching(cls) -> bool:
return False


########################################################################################
### ThompsonSampling
@define(frozen=True)
Expand Down
3 changes: 3 additions & 0 deletions baybe/acquisition/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,11 +165,14 @@ def _get_botorch_acqf_class(
"""Extract the BoTorch acquisition class for the given BayBE acquisition class."""
import botorch

from baybe.acquisition import custom_acqfs

for cls in baybe_acqf_cls.mro():
if (
acqf_cls := getattr(botorch.acquisition, cls.__name__, False)
or getattr(botorch.acquisition.multi_objective, cls.__name__, False)
or getattr(botorch.acquisition.multi_objective.parego, cls.__name__, False)
or getattr(custom_acqfs, cls.__name__, False)
):
if is_abstract(acqf_cls):
continue
Expand Down
10 changes: 10 additions & 0 deletions baybe/acquisition/custom_acqfs/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
"""Custom acquisition functions."""

from baybe.acquisition.custom_acqfs.two_stage import (
MultiFidelityUpperConfidenceBound,
)

__all__ = [
# Multi fidelity acquisition functions
"MultiFidelityUpperConfidenceBound",
]
Loading
Loading