Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- `identify_non_dominated_configurations` method to `Campaign` and `Objective`
for determining the Pareto front

### Changed
- The `Campaign.allow_*` flag mechanism is now based on `AutoBool` logic, providing
well-defined Boolean values at query time while exposing the `AUTO` option to the user
Copy link

Copilot AI Feb 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The changelog mentions an AUTO option, but the user-facing input in tests/messages appears to be the string 'auto' (and/or AutoBool.AUTO). Consider clarifying the exact supported user inputs (e.g., true/false/'auto' in JSON/configs, and/or AutoBool.AUTO in Python) so external users know what to pass.

Suggested change
well-defined Boolean values at query time while exposing the `AUTO` option to the user
well-defined Boolean values at query time while exposing an automatic mode to the user
(e.g., via the string `"auto"` in JSON/configs or `AutoBool.AUTO` in Python code)

Copilot uses AI. Check for mistakes.

### Fixed
- Broken cache validation for certain `Campaign.recommend` cases

### Removed
- `parallel_runs` argument from `simulate_scenarios`, since parallelization
can now be conveniently controlled via the new `Settings` mechanism
Expand Down
110 changes: 56 additions & 54 deletions baybe/campaign.py
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is there anything in the doc that will make it easy to understand what the (finally resolved bollean) values are for the respective cases? Currently I think users would ahve to bother witht he logi. or execute code to underssand that,

Can we add a table? Or can the docstrings of the flags be expanded?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You mean something like a table that shows how the auto-values resolve for the different flags and search space types? I have nothing against that, but do you see any way how we can ensure that stuff stays in sync (especially given that meddling with the flags sort of has been our yearly fun exercise over and over again)?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

well then lets have a static table, it wont change frequently anyway

Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@
import gc
import json
import warnings
from collections.abc import Callable, Collection, Sequence
from collections.abc import Collection, Sequence
from functools import reduce
from typing import TYPE_CHECKING, Any, TypeVar

import cattrs
import numpy as np
import pandas as pd
from attrs import Attribute, Factory, define, evolve, field, fields
from attrs import Attribute, define, evolve, field, fields
from attrs.converters import optional
from attrs.validators import instance_of
from typing_extensions import override
Expand Down Expand Up @@ -42,8 +42,8 @@
from baybe.settings import Settings, active_settings
from baybe.surrogates.base import PosteriorStatistic, SurrogateProtocol
from baybe.targets.base import Target
from baybe.utils.basic import UNSPECIFIED, UnspecifiedType, is_all_instance
from baybe.utils.boolean import eq_dataframe
from baybe.utils.basic import is_all_instance
from baybe.utils.boolean import AutoBool, eq_dataframe
from baybe.utils.conversion import to_string
from baybe.utils.dataframe import filter_df, fuzzy_row_match
from baybe.utils.validation import (
Expand All @@ -67,43 +67,27 @@
_METADATA_COLUMNS = [_RECOMMENDED, _MEASURED, _EXCLUDED]


def _make_allow_flag_default_factory(
default: bool,
) -> Callable[[Campaign], bool | UnspecifiedType]:
"""Make a default factory for allow_* flags."""

def default_allow_flag(campaign: Campaign) -> bool | UnspecifiedType:
"""Attrs-compatible default factory for allow_* flags."""
if campaign.searchspace.type is SearchSpaceType.DISCRETE:
return default
return UNSPECIFIED

return default_allow_flag


def _set_with_cache_cleared(instance: Campaign, attribute: Attribute, value: _T) -> _T:
"""Attrs-compatible hook to clear the cache when changing an attribute."""
if value != getattr(instance, attribute.name):
instance.clear_cache()
return value


def _validate_allow_flag(campaign: Campaign, attribute: Attribute, value: Any) -> None:
def _validate_allow_flag(
campaign: Campaign, attribute: Attribute, value: AutoBool
) -> None:
"""Attrs-compatible validator for context-aware validation of allow_* flags."""
match campaign.searchspace.type:
case SearchSpaceType.DISCRETE:
if not isinstance(value, bool):
raise ValueError(
f"For search spaces of '{SearchSpaceType.DISCRETE}', "
f"'{attribute.name}' must be a Boolean."
)
case _:
if value is not UNSPECIFIED:
raise ValueError(
f"For search spaces of type other than "
f"'{SearchSpaceType.DISCRETE}', '{attribute.name}' cannot be set "
f"since the flag is meaningless in such contexts.",
)
if campaign.searchspace.type is SearchSpaceType.DISCRETE:
return

if value is AutoBool.FALSE:
raise IncompatibilityError(
f"For search spaces involving a continuous subspace, the flag "
f"'{attribute.alias}' cannot be set to 'False' for algorithmic reasons. "
f"Either let the value be automatically determined by not setting it "
f"explicitly / setting it to 'auto' or explicitly set it to 'True'."
)


@define
Expand Down Expand Up @@ -151,38 +135,35 @@ def _validate_objective( # noqa: DOC101, DOC103
)
"""The employed recommender"""

allow_recommending_already_measured: bool | UnspecifiedType = field(
default=Factory(
_make_allow_flag_default_factory(default=True), takes_self=True
),
_allow_recommending_already_measured: AutoBool = field(
alias="allow_recommending_already_measured",
default=AutoBool.AUTO,
converter=AutoBool.from_unstructured,
validator=_validate_allow_flag,
on_setattr=_set_with_cache_cleared,
kw_only=True,
)
"""Allow to recommend experiments that were already measured earlier.
Can only be set for discrete search spaces."""
"""Allow recommending experiments that have already been measured."""

allow_recommending_already_recommended: bool | UnspecifiedType = field(
default=Factory(
_make_allow_flag_default_factory(default=False), takes_self=True
),
_allow_recommending_already_recommended: AutoBool = field(
alias="allow_recommending_already_recommended",
default=AutoBool.AUTO,
converter=AutoBool.from_unstructured,
validator=_validate_allow_flag,
on_setattr=_set_with_cache_cleared,
kw_only=True,
)
"""Allow to recommend experiments that were already recommended earlier.
Can only be set for discrete search spaces."""
"""Allow recommending experiments that have already been recommended."""

allow_recommending_pending_experiments: bool | UnspecifiedType = field(
default=Factory(
_make_allow_flag_default_factory(default=False), takes_self=True
),
_allow_recommending_pending_experiments: AutoBool = field(
alias="allow_recommending_pending_experiments",
default=AutoBool.AUTO,
converter=AutoBool.from_unstructured,
validator=_validate_allow_flag,
on_setattr=_set_with_cache_cleared,
kw_only=True,
)
Comment on lines +138 to 165
Copy link

Copilot AI Feb 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These attrs fields were renamed to private _allow_* names with public-facing alias=.... Any existing code that introspects attrs via fields(Campaign).allow_recommending_* will now break (those fields no longer exist as attrs attributes). Update such call sites to reference the new attrs fields (e.g., fields(Campaign)._allow_recommending_already_measured) and use .alias when you need the user-facing name for messages/config output.

Copilot uses AI. Check for mistakes.
"""Allow pending experiments to be part of the recommendations.
Can only be set for discrete search spaces."""
"""Allow recommending pending experiments."""

# Metadata
_searchspace_metadata: pd.DataFrame = field(init=False, eq=eq_dataframe)
Expand Down Expand Up @@ -264,6 +245,27 @@ def targets(self) -> tuple[Target, ...]:
"""The targets of the underlying objective."""
return self.objective.targets if self.objective is not None else ()

@property
def allow_recommending_already_measured(self) -> bool:
"""Allow recommending experiments that have already been measured."""
if self._allow_recommending_already_measured is AutoBool.AUTO:
return True
return bool(self._allow_recommending_already_measured)

@property
def allow_recommending_already_recommended(self) -> bool:
"""Allow recommending experiments that have already been recommended."""
if self._allow_recommending_already_recommended is AutoBool.AUTO:
return self.searchspace.type is not SearchSpaceType.DISCRETE
return bool(self._allow_recommending_already_recommended)

@property
def allow_recommending_pending_experiments(self) -> bool:
"""Allow recommending pending experiments."""
if self._allow_recommending_pending_experiments is AutoBool.AUTO:
return self.searchspace.type is not SearchSpaceType.DISCRETE
return bool(self._allow_recommending_pending_experiments)
Comment on lines +253 to +267
Copy link

Copilot AI Feb 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These properties rely on bool(AutoBoolValue) for non-AUTO cases. That’s easy to misread and can be brittle depending on how AutoBool is implemented (e.g., Enum truthiness vs IntEnum vs custom __bool__). Consider making the mapping explicit (e.g., comparing to AutoBool.TRUE/AutoBool.FALSE or using a dedicated AutoBool.to_bool() helper) to keep semantics unambiguous.

Suggested change
return bool(self._allow_recommending_already_measured)
@property
def allow_recommending_already_recommended(self) -> bool:
"""Allow recommending experiments that have already been recommended."""
if self._allow_recommending_already_recommended is AutoBool.AUTO:
return False
return bool(self._allow_recommending_already_recommended)
@property
def allow_recommending_pending_experiments(self) -> bool:
"""Allow recommending pending experiments."""
if self._allow_recommending_pending_experiments is AutoBool.AUTO:
return False
return bool(self._allow_recommending_pending_experiments)
return self._allow_recommending_already_measured is AutoBool.TRUE
@property
def allow_recommending_already_recommended(self) -> bool:
"""Allow recommending experiments that have already been recommended."""
if self._allow_recommending_already_recommended is AutoBool.AUTO:
return False
return self._allow_recommending_already_recommended is AutoBool.TRUE
@property
def allow_recommending_pending_experiments(self) -> bool:
"""Allow recommending pending experiments."""
if self._allow_recommending_pending_experiments is AutoBool.AUTO:
return False
return self._allow_recommending_pending_experiments is AutoBool.TRUE

Copilot uses AI. Check for mistakes.

@classmethod
def from_config(cls, config_json: str) -> Campaign:
"""Create a campaign from a configuration JSON.
Expand Down Expand Up @@ -578,9 +580,9 @@ def recommend(
ok_m = self.allow_recommending_already_measured
ok_r = self.allow_recommending_already_recommended
ok_p = self.allow_recommending_pending_experiments
ok_m_name = f.allow_recommending_already_measured.name
ok_r_name = f.allow_recommending_already_recommended.name
ok_p_name = f.allow_recommending_pending_experiments.name
ok_m_name = f._allow_recommending_already_measured.alias
ok_r_name = f._allow_recommending_already_recommended.alias
ok_p_name = f._allow_recommending_pending_experiments.alias
no_blocked_pending_points = ok_p or (pending_experiments is None)

# If there are no candidate restrictions to be relaxed
Expand Down
9 changes: 8 additions & 1 deletion baybe/serialization/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,12 @@
from cattrs.strategies import configure_union_passthrough

from baybe.utils.basic import find_subclass
from baybe.utils.boolean import is_abstract
from baybe.utils.boolean import (
AutoBool,
is_abstract,
structure_autobool,
unstructure_autobool,
)

if TYPE_CHECKING:
from cattrs.dispatch import UnstructureHook
Expand Down Expand Up @@ -169,3 +174,5 @@ def select_constructor_hook(specs: dict, cls: type[_T]) -> _T:
converter.register_structure_hook(
timedelta, lambda x, _: timedelta(seconds=float(x.removesuffix("s")))
)
converter.register_unstructure_hook(AutoBool, unstructure_autobool)
converter.register_structure_hook(AutoBool, structure_autobool)
12 changes: 12 additions & 0 deletions baybe/utils/boolean.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,3 +221,15 @@ def from_unstructured(cls, value: AutoBool | bool | str | None, /) -> AutoBool:
pass

raise ValueError(f"Cannot convert '{value}' to '{cls.__name__}'.")


def unstructure_autobool(value: AutoBool, /) -> bool | str:
"""Unstructure an :class:`AutoBool`."""
if value is AutoBool.AUTO:
return AutoBool.AUTO.value
return bool(value)


def structure_autobool(value: bool | str, _, /) -> AutoBool:
"""Structure an :class:`AutoBool`."""
return AutoBool.from_unstructured(value)
40 changes: 21 additions & 19 deletions tests/test_campaign.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from baybe.campaign import _EXCLUDED, Campaign
from baybe.constraints.conditions import SubSelectionCondition
from baybe.constraints.discrete import DiscreteExcludeConstraint
from baybe.exceptions import NotEnoughPointsLeftError
from baybe.exceptions import IncompatibilityError, NotEnoughPointsLeftError
from baybe.objectives import DesirabilityObjective, ParetoObjective
from baybe.parameters.numerical import (
NumericalContinuousParameter,
Expand All @@ -33,7 +33,6 @@
GaussianProcessSurrogate,
)
from baybe.targets import BinaryTarget, NumericalTarget
from baybe.utils.basic import UNSPECIFIED
from baybe.utils.dataframe import add_fake_measurements
from tests.conftest import run_iterations

Expand Down Expand Up @@ -119,36 +118,39 @@ def test_candidate_toggling(constraints, exclude, complement):


@pytest.mark.parametrize(
"flag",
("flag", "discrete_value"),
[
"allow_recommending_already_measured",
"allow_recommending_already_recommended",
"allow_recommending_pending_experiments",
["allow_recommending_already_measured", True],
["allow_recommending_already_recommended", False],
["allow_recommending_pending_experiments", False],
],
ids=lambda x: x.removeprefix("allow_recommending_"),
ids=["already_measured", "already_recommended", "pending_experiments"],
)
@pytest.mark.parametrize(
"space_type",
[SearchSpaceType.DISCRETE, SearchSpaceType.CONTINUOUS],
[SearchSpaceType.DISCRETE, SearchSpaceType.CONTINUOUS, SearchSpaceType.HYBRID],
ids=lambda x: x.name,
)
@pytest.mark.parametrize(
"value", [True, False, param(UNSPECIFIED, id=repr(UNSPECIFIED))]
)
def test_setting_allow_flags(flag, space_type, value):
"""Passed allow_* flags are rejected if incompatible with the search space type."""
kwargs = {flag: value}
expect_error = (space_type is SearchSpaceType.DISCRETE) != (
value is not UNSPECIFIED
)
@pytest.mark.parametrize("value", [True, False, "auto"])
def test_setting_allow_flags(flag, space_type, value, discrete_value):
"""Passed allow_* flags are rejected if incompatible with the search space type
but otherwise properly resolved into Booleans.""" # noqa
expect_error = (space_type is not SearchSpaceType.DISCRETE) and (value is False)

if space_type is SearchSpaceType.DISCRETE:
parameter = NumericalDiscreteParameter("p", [0, 1])
else:
parameter = NumericalContinuousParameter("p", [0, 1])
Comment on lines 140 to 143
Copy link

Copilot AI Feb 10, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The test parametrizes space_type with SearchSpaceType.HYBRID, but the constructed parameter only yields either a discrete or continuous search space. As a result, the HYBRID case is not actually exercised (and Campaign.searchspace.type likely won’t be HYBRID). Consider constructing an actual hybrid search space for the HYBRID branch (e.g., by explicitly building a SearchSpace with both discrete and continuous subspaces) so the new HYBRID expectations are truly validated.

Copilot uses AI. Check for mistakes.

with pytest.raises(ValueError) if expect_error else nullcontext():
Campaign(parameter, **kwargs)
with pytest.raises(IncompatibilityError) if expect_error else nullcontext():
campaign = Campaign(parameter, **{flag: value})

if expect_error:
return

fallback = discrete_value if space_type is SearchSpaceType.DISCRETE else True
resolved = value if isinstance(value, bool) else fallback
assert getattr(campaign, flag) == resolved


@pytest.mark.parametrize(
Expand Down
Loading