Skip to content

Commit d931c30

Browse files
shrutipatel31facebook-github-bot
authored andcommitted
Support BOPE experiments in get_trace with preference model utilities (#4792)
Summary: This diff updates the `get_trace` function in `ax/service/utils/best_point.py` to support preference learning (BOPE) experiments with `PreferenceOptimizationConfig`. When a BOPE experiment has an associated PE_EXPERIMENT auxiliary experiment with preference data, `get_trace` now: 1. Fits a PairwiseGP preference model to the PE_EXPERIMENT data 2. Uses the learned preference model to predict utility values for each arm's metric values 3. Returns a trace based on predicted utilities Adds `_compute_utility_from_preference_model()` helper function and corresponding unit tests. Differential Revision: D91073267
1 parent 6d84008 commit d931c30

File tree

2 files changed

+297
-5
lines changed

2 files changed

+297
-5
lines changed

ax/service/tests/test_best_point.py

Lines changed: 174 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,23 @@
1010
import pandas as pd
1111
from ax.adapter.registry import Generators
1212
from ax.core.arm import Arm
13+
from ax.core.auxiliary import AuxiliaryExperiment, AuxiliaryExperimentPurpose
1314
from ax.core.batch_trial import BatchTrial
1415
from ax.core.data import Data
15-
from ax.core.optimization_config import MultiObjectiveOptimizationConfig
16+
from ax.core.experiment import Experiment
17+
from ax.core.metric import Metric
18+
from ax.core.objective import MultiObjective, Objective
19+
from ax.core.optimization_config import (
20+
MultiObjectiveOptimizationConfig,
21+
PreferenceOptimizationConfig,
22+
)
23+
from ax.core.parameter import ParameterType, RangeParameter
24+
from ax.core.search_space import SearchSpace
1625
from ax.core.trial import Trial
17-
from ax.exceptions.core import DataRequiredError
26+
from ax.exceptions.core import DataRequiredError, UserInputError
1827
from ax.service.utils.best_point import get_trace
1928
from ax.service.utils.best_point_mixin import BestPointMixin
29+
from ax.utils.common.constants import Keys
2030
from ax.utils.common.testutils import TestCase
2131
from ax.utils.testing.core_stubs import (
2232
get_experiment_with_batch_trial,
@@ -367,3 +377,165 @@ def test_get_best_observed_value(self) -> None:
367377
minimize=True,
368378
)
369379
self.assertEqual(get_best(exp), 10) # 5 and 9 are out of design
380+
381+
def _get_pe_search_space(self) -> SearchSpace:
382+
"""Create a standard PE_EXPERIMENT search space with m1 and m2 parameters."""
383+
return SearchSpace(
384+
parameters=[
385+
RangeParameter(
386+
name="m1",
387+
parameter_type=ParameterType.FLOAT,
388+
lower=0.0,
389+
upper=10.0,
390+
),
391+
RangeParameter(
392+
name="m2",
393+
parameter_type=ParameterType.FLOAT,
394+
lower=0.0,
395+
upper=10.0,
396+
),
397+
]
398+
)
399+
400+
def _make_pref_opt_config(self, profile_name: str) -> PreferenceOptimizationConfig:
401+
"""Create a PreferenceOptimizationConfig with m1 and m2 objectives."""
402+
return PreferenceOptimizationConfig(
403+
objective=MultiObjective(
404+
objectives=[
405+
Objective(metric=Metric(name="m1"), minimize=False),
406+
Objective(metric=Metric(name="m2"), minimize=False),
407+
]
408+
),
409+
preference_profile_name=profile_name,
410+
)
411+
412+
def _assert_valid_trace(
413+
self,
414+
trace: list[float],
415+
expected_len: int,
416+
check_monotonic: bool = True,
417+
) -> None:
418+
"""Assert trace has expected length, contains floats, and is monotonic."""
419+
self.assertEqual(len(trace), expected_len)
420+
for value in trace:
421+
self.assertIsInstance(value, float)
422+
if check_monotonic:
423+
for i in range(1, len(trace)):
424+
self.assertGreaterEqual(
425+
trace[i],
426+
trace[i - 1],
427+
msg=f"Trace not monotonic at index {i}: {trace}",
428+
)
429+
430+
def test_get_trace_preference_learning_config(self) -> None:
431+
"""Test that get_trace works correctly with PreferenceOptimizationConfig.
432+
433+
This test verifies various scenarios for BOPE experiments,
434+
including cases with and without PE_EXPERIMENT data.
435+
"""
436+
with self.subTest("without_pe_experiment_raises_error"):
437+
# Setup: Create a multi-objective experiment WITHOUT PE_EXPERIMENT
438+
exp = get_experiment_with_observations(
439+
observations=[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
440+
)
441+
exp.name = "main_experiment"
442+
pref_opt_config = self._make_pref_opt_config(
443+
profile_name="nonexistent_profile"
444+
)
445+
446+
# Execute & Assert: Should raise UserInputError without PE_EXPERIMENT
447+
with self.assertRaisesRegex(
448+
UserInputError,
449+
"Preference profile 'nonexistent_profile' not found",
450+
):
451+
get_trace(exp, pref_opt_config)
452+
453+
with self.subTest("with_pe_experiment_empty_data_raises_error"):
454+
# Setup: Create main experiment
455+
exp = get_experiment_with_observations(
456+
observations=[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
457+
)
458+
exp.name = "main_experiment_empty"
459+
460+
# Setup: Create PE_EXPERIMENT with no preference comparisons
461+
pe_experiment = Experiment(
462+
name="test_profile_empty",
463+
search_space=self._get_pe_search_space(),
464+
)
465+
466+
# Setup: Attach PE_EXPERIMENT without any data
467+
aux_exp = AuxiliaryExperiment(experiment=pe_experiment, data=None)
468+
exp.add_auxiliary_experiment(
469+
auxiliary_experiment=aux_exp,
470+
purpose=AuxiliaryExperimentPurpose.PE_EXPERIMENT,
471+
)
472+
pref_opt_config = self._make_pref_opt_config(
473+
profile_name="test_profile_empty"
474+
)
475+
476+
# Execute & Assert: Should raise DataRequiredError due to empty data
477+
with self.assertRaisesRegex(
478+
DataRequiredError,
479+
"No preference data found in PE_EXPERIMENT",
480+
):
481+
get_trace(exp, pref_opt_config)
482+
483+
with self.subTest("with_pe_experiment_valid_data_computes_utility"):
484+
# This subtest verifies that when PE_EXPERIMENT exists with data,
485+
# the code attempts to use the preference model (may fall back).
486+
487+
# Setup: Create main experiment with tracking data
488+
exp = get_experiment_with_observations(
489+
observations=[[1.0, 2.0], [2.0, 3.0], [3.0, 4.0]],
490+
)
491+
exp.name = "main_experiment_with_pe"
492+
493+
# Setup: Create PE_EXPERIMENT with minimal preference data
494+
pe_experiment = Experiment(
495+
name="test_profile_with_minimal_data",
496+
search_space=self._get_pe_search_space(),
497+
)
498+
499+
# Setup: Add one pairwise preference comparison (minimal data)
500+
trial1 = pe_experiment.new_batch_trial()
501+
trial1.add_arm(Arm(name="0_0", parameters={"m1": 0.5, "m2": 1.0}))
502+
trial1.add_arm(Arm(name="0_1", parameters={"m1": 1.0, "m2": 1.5}))
503+
trial1.mark_running(no_runner_required=True).mark_completed()
504+
505+
# Setup: Create minimal preference data
506+
pe_data_records = [
507+
{
508+
"trial_index": 0,
509+
"arm_name": "0_0",
510+
"metric_name": Keys.PAIRWISE_PREFERENCE_QUERY.value,
511+
"mean": 0.0,
512+
"sem": 0.0,
513+
"metric_signature": Keys.PAIRWISE_PREFERENCE_QUERY.value,
514+
},
515+
{
516+
"trial_index": 0,
517+
"arm_name": "0_1",
518+
"metric_name": Keys.PAIRWISE_PREFERENCE_QUERY.value,
519+
"mean": 1.0,
520+
"sem": 0.0,
521+
"metric_signature": Keys.PAIRWISE_PREFERENCE_QUERY.value,
522+
},
523+
]
524+
pe_data = Data(df=pd.DataFrame.from_records(pe_data_records))
525+
pe_experiment.attach_data(pe_data)
526+
527+
# Setup: Attach PE_EXPERIMENT to main experiment
528+
aux_exp = AuxiliaryExperiment(experiment=pe_experiment, data=pe_data)
529+
exp.add_auxiliary_experiment(
530+
auxiliary_experiment=aux_exp,
531+
purpose=AuxiliaryExperimentPurpose.PE_EXPERIMENT,
532+
)
533+
pref_opt_config = self._make_pref_opt_config(
534+
profile_name="test_profile_with_minimal_data"
535+
)
536+
537+
# Execute: With minimal data, model may fail and fall back gracefully
538+
trace = get_trace(exp, pref_opt_config)
539+
540+
# Assert: Verify trace is valid and monotonically increasing
541+
self._assert_valid_trace(trace, expected_len=3, check_monotonic=True)

0 commit comments

Comments
 (0)