Skip to content

Commit a970c06

Browse files
shrutipatel31facebook-github-bot
authored andcommitted
Support BOPE experiments in get_trace with preference model utilities (facebook#4792)
Summary: This diff updates the `get_trace` function in `ax/service/utils/best_point.py` to support preference learning (BOPE) experiments with `PreferenceOptimizationConfig`. When a BOPE experiment has an associated PE_EXPERIMENT auxiliary experiment with preference data, `get_trace` now: 1. Fits a PairwiseGP preference model to the PE_EXPERIMENT data 2. Uses the learned preference model to predict utility values for each arm's metric values 3. Returns a trace based on predicted utilities If the PE_EXPERIMENT is missing or has no data, the function gracefully falls back to standard hypervolume computation for multi-objective optimization. Adds `_compute_utility_from_preference_model()` helper function and corresponding unit tests. Differential Revision: D91073267
1 parent 0311f8d commit a970c06

File tree

2 files changed

+302
-3
lines changed

2 files changed

+302
-3
lines changed

ax/service/tests/test_best_point.py

Lines changed: 171 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,23 @@
1010
import pandas as pd
1111
from ax.adapter.registry import Generators
1212
from ax.core.arm import Arm
13+
from ax.core.auxiliary import AuxiliaryExperiment, AuxiliaryExperimentPurpose
1314
from ax.core.batch_trial import BatchTrial
1415
from ax.core.data import Data
15-
from ax.core.optimization_config import MultiObjectiveOptimizationConfig
16+
from ax.core.experiment import Experiment
17+
from ax.core.metric import Metric
18+
from ax.core.objective import MultiObjective, Objective
19+
from ax.core.optimization_config import (
20+
MultiObjectiveOptimizationConfig,
21+
PreferenceOptimizationConfig,
22+
)
23+
from ax.core.parameter import ParameterType, RangeParameter
24+
from ax.core.search_space import SearchSpace
1625
from ax.core.trial import Trial
1726
from ax.exceptions.core import DataRequiredError
1827
from ax.service.utils.best_point import get_trace
1928
from ax.service.utils.best_point_mixin import BestPointMixin
29+
from ax.utils.common.constants import Keys
2030
from ax.utils.common.testutils import TestCase
2131
from ax.utils.testing.core_stubs import (
2232
get_experiment_with_batch_trial,
@@ -367,3 +377,163 @@ def test_get_best_observed_value(self) -> None:
367377
minimize=True,
368378
)
369379
self.assertEqual(get_best(exp), 10) # 5 and 9 are out of design
380+
381+
def _get_pe_search_space(self) -> SearchSpace:
382+
"""Create a standard PE_EXPERIMENT search space with m1 and m2 parameters."""
383+
return SearchSpace(
384+
parameters=[
385+
RangeParameter(
386+
name="m1",
387+
parameter_type=ParameterType.FLOAT,
388+
lower=0.0,
389+
upper=10.0,
390+
),
391+
RangeParameter(
392+
name="m2",
393+
parameter_type=ParameterType.FLOAT,
394+
lower=0.0,
395+
upper=10.0,
396+
),
397+
]
398+
)
399+
400+
def _make_pref_opt_config(self, profile_name: str) -> PreferenceOptimizationConfig:
401+
"""Create a PreferenceOptimizationConfig with m1 and m2 objectives."""
402+
return PreferenceOptimizationConfig(
403+
objective=MultiObjective(
404+
objectives=[
405+
Objective(metric=Metric(name="m1"), minimize=False),
406+
Objective(metric=Metric(name="m2"), minimize=False),
407+
]
408+
),
409+
preference_profile_name=profile_name,
410+
)
411+
412+
def _assert_valid_trace(
413+
self,
414+
trace: list[float],
415+
expected_len: int,
416+
check_monotonic: bool = True,
417+
) -> None:
418+
"""Assert trace has expected length, contains floats, and is monotonic."""
419+
self.assertEqual(len(trace), expected_len)
420+
for value in trace:
421+
self.assertIsInstance(value, float)
422+
if check_monotonic:
423+
for i in range(1, len(trace)):
424+
self.assertGreaterEqual(
425+
trace[i],
426+
trace[i - 1],
427+
msg=f"Trace not monotonic at index {i}: {trace}",
428+
)
429+
430+
def test_get_trace_preference_learning_config(self) -> None:
431+
"""Test that get_trace works correctly with PreferenceOptimizationConfig.
432+
433+
This test verifies various scenarios for BOPE experiments,
434+
including cases with and without PE_EXPERIMENT data.
435+
"""
436+
with self.subTest("without_pe_experiment_falls_back_to_hypervolume"):
437+
# Setup: Create a multi-objective experiment WITHOUT PE_EXPERIMENT
438+
exp = get_experiment_with_observations(
439+
observations=[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
440+
)
441+
exp.name = "main_experiment"
442+
pref_opt_config = self._make_pref_opt_config(
443+
profile_name="nonexistent_profile"
444+
)
445+
446+
# Execute: Should fall back to hypervolume computation
447+
trace = get_trace(exp, pref_opt_config)
448+
449+
# Assert: Verify trace is valid and monotonically increasing
450+
self._assert_valid_trace(trace, expected_len=3, check_monotonic=True)
451+
452+
with self.subTest("with_pe_experiment_empty_data_falls_back"):
453+
# Setup: Create main experiment
454+
exp = get_experiment_with_observations(
455+
observations=[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
456+
)
457+
exp.name = "main_experiment_empty"
458+
459+
# Setup: Create PE_EXPERIMENT with no preference comparisons
460+
pe_experiment = Experiment(
461+
name="test_profile_empty",
462+
search_space=self._get_pe_search_space(),
463+
)
464+
465+
# Setup: Attach PE_EXPERIMENT without any data
466+
aux_exp = AuxiliaryExperiment(experiment=pe_experiment, data=None)
467+
exp.add_auxiliary_experiment(
468+
auxiliary_experiment=aux_exp,
469+
purpose=AuxiliaryExperimentPurpose.PE_EXPERIMENT,
470+
)
471+
pref_opt_config = self._make_pref_opt_config(
472+
profile_name="test_profile_empty"
473+
)
474+
475+
# Execute: Should fall back to hypervolume due to empty data
476+
trace = get_trace(exp, pref_opt_config)
477+
478+
# Assert: Verify trace is valid
479+
self._assert_valid_trace(trace, expected_len=3, check_monotonic=False)
480+
481+
with self.subTest("with_pe_experiment_valid_data_computes_utility"):
482+
# This subtest verifies that when PE_EXPERIMENT exists with data,
483+
# the code attempts to use the preference model (may fall back).
484+
485+
# Setup: Create main experiment with tracking data
486+
exp = get_experiment_with_observations(
487+
observations=[[1.0, 2.0], [2.0, 3.0], [3.0, 4.0]],
488+
)
489+
exp.name = "main_experiment_with_pe"
490+
491+
# Setup: Create PE_EXPERIMENT with minimal preference data
492+
pe_experiment = Experiment(
493+
name="test_profile_with_minimal_data",
494+
search_space=self._get_pe_search_space(),
495+
)
496+
497+
# Setup: Add one pairwise preference comparison (minimal data)
498+
trial1 = pe_experiment.new_batch_trial()
499+
trial1.add_arm(Arm(name="0_0", parameters={"m1": 0.5, "m2": 1.0}))
500+
trial1.add_arm(Arm(name="0_1", parameters={"m1": 1.0, "m2": 1.5}))
501+
trial1.mark_running(no_runner_required=True).mark_completed()
502+
503+
# Setup: Create minimal preference data
504+
pe_data_records = [
505+
{
506+
"trial_index": 0,
507+
"arm_name": "0_0",
508+
"metric_name": Keys.PAIRWISE_PREFERENCE_QUERY.value,
509+
"mean": 0.0,
510+
"sem": 0.0,
511+
"metric_signature": Keys.PAIRWISE_PREFERENCE_QUERY.value,
512+
},
513+
{
514+
"trial_index": 0,
515+
"arm_name": "0_1",
516+
"metric_name": Keys.PAIRWISE_PREFERENCE_QUERY.value,
517+
"mean": 1.0,
518+
"sem": 0.0,
519+
"metric_signature": Keys.PAIRWISE_PREFERENCE_QUERY.value,
520+
},
521+
]
522+
pe_data = Data(df=pd.DataFrame.from_records(pe_data_records))
523+
pe_experiment.attach_data(pe_data)
524+
525+
# Setup: Attach PE_EXPERIMENT to main experiment
526+
aux_exp = AuxiliaryExperiment(experiment=pe_experiment, data=pe_data)
527+
exp.add_auxiliary_experiment(
528+
auxiliary_experiment=aux_exp,
529+
purpose=AuxiliaryExperimentPurpose.PE_EXPERIMENT,
530+
)
531+
pref_opt_config = self._make_pref_opt_config(
532+
profile_name="test_profile_with_minimal_data"
533+
)
534+
535+
# Execute: With minimal data, model may fail and fall back gracefully
536+
trace = get_trace(exp, pref_opt_config)
537+
538+
# Assert: Verify trace is valid and monotonically increasing
539+
self._assert_valid_trace(trace, expected_len=3, check_monotonic=True)

0 commit comments

Comments
 (0)