Skip to content

Commit c4e22e0

Browse files
shrutipatel31facebook-github-bot
authored andcommitted
Support BOPE experiments in get_trace with preference model utilities (#4792)
Summary: This diff updates the `get_trace` function in `ax/service/utils/best_point.py` to support preference learning (BOPE) experiments with `PreferenceOptimizationConfig`. When a BOPE experiment has an associated PE_EXPERIMENT auxiliary experiment with preference data, `get_trace` now: 1. Fits a PairwiseGP preference model to the PE_EXPERIMENT data 2. Uses the learned preference model to predict utility values for each arm's metric values 3. Returns a trace based on predicted utilities Adds `_compute_utility_from_preference_model()` helper function and corresponding unit tests. Differential Revision: D91073267
1 parent 71fbd1d commit c4e22e0

File tree

2 files changed

+352
-5
lines changed

2 files changed

+352
-5
lines changed

ax/service/tests/test_best_point.py

Lines changed: 228 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,23 @@
1010
import pandas as pd
1111
from ax.adapter.registry import Generators
1212
from ax.core.arm import Arm
13+
from ax.core.auxiliary import AuxiliaryExperiment, AuxiliaryExperimentPurpose
1314
from ax.core.batch_trial import BatchTrial
1415
from ax.core.data import Data
15-
from ax.core.optimization_config import MultiObjectiveOptimizationConfig
16+
from ax.core.experiment import Experiment
17+
from ax.core.metric import Metric
18+
from ax.core.objective import MultiObjective, Objective
19+
from ax.core.optimization_config import (
20+
MultiObjectiveOptimizationConfig,
21+
PreferenceOptimizationConfig,
22+
)
23+
from ax.core.parameter import ParameterType, RangeParameter
24+
from ax.core.search_space import SearchSpace
1625
from ax.core.trial import Trial
17-
from ax.exceptions.core import DataRequiredError
26+
from ax.exceptions.core import DataRequiredError, UserInputError
1827
from ax.service.utils.best_point import get_trace
1928
from ax.service.utils.best_point_mixin import BestPointMixin
29+
from ax.utils.common.constants import Keys
2030
from ax.utils.common.testutils import TestCase
2131
from ax.utils.testing.core_stubs import (
2232
get_experiment_with_batch_trial,
@@ -367,3 +377,219 @@ def test_get_best_observed_value(self) -> None:
367377
minimize=True,
368378
)
369379
self.assertEqual(get_best(exp), 10) # 5 and 9 are out of design
380+
381+
def _get_pe_search_space(self) -> SearchSpace:
382+
"""Create a standard PE_EXPERIMENT search space with m1 and m2 parameters."""
383+
return SearchSpace(
384+
parameters=[
385+
RangeParameter(
386+
name="m1",
387+
parameter_type=ParameterType.FLOAT,
388+
lower=0.0,
389+
upper=10.0,
390+
),
391+
RangeParameter(
392+
name="m2",
393+
parameter_type=ParameterType.FLOAT,
394+
lower=0.0,
395+
upper=10.0,
396+
),
397+
]
398+
)
399+
400+
def _make_pref_opt_config(self, profile_name: str) -> PreferenceOptimizationConfig:
401+
"""Create a PreferenceOptimizationConfig with m1 and m2 objectives."""
402+
return PreferenceOptimizationConfig(
403+
objective=MultiObjective(
404+
objectives=[
405+
Objective(metric=Metric(name="m1"), minimize=False),
406+
Objective(metric=Metric(name="m2"), minimize=False),
407+
]
408+
),
409+
preference_profile_name=profile_name,
410+
)
411+
412+
def _assert_valid_trace(self, trace: list[float], expected_len: int) -> None:
413+
"""Assert trace has expected length, contains floats, is non-decreasing and has
414+
more than one unique value."""
415+
self.assertEqual(len(trace), expected_len)
416+
for value in trace:
417+
self.assertIsInstance(value, float)
418+
for i in range(1, len(trace)):
419+
self.assertGreaterEqual(
420+
trace[i],
421+
trace[i - 1],
422+
msg=f"Trace not monotonically increasing at index {i}: {trace}",
423+
)
424+
unique_values = set(trace)
425+
self.assertGreater(
426+
len(unique_values),
427+
1,
428+
msg=f"Trace has only trivial values (all same): {trace}",
429+
)
430+
431+
def test_get_trace_preference_learning_config(self) -> None:
432+
"""Test that get_trace works correctly with PreferenceOptimizationConfig.
433+
434+
This test verifies various scenarios for BOPE experiments,
435+
including cases with and without PE_EXPERIMENT data.
436+
"""
437+
with self.subTest("without_pe_experiment_raises_error"):
438+
# Setup: Create a multi-objective experiment WITHOUT PE_EXPERIMENT
439+
exp = get_experiment_with_observations(
440+
observations=[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
441+
)
442+
exp.name = "main_experiment"
443+
pref_opt_config = self._make_pref_opt_config(
444+
profile_name="nonexistent_profile"
445+
)
446+
447+
# Execute & Assert: Should raise UserInputError without PE_EXPERIMENT
448+
with self.assertRaisesRegex(
449+
UserInputError,
450+
"Preference profile 'nonexistent_profile' not found",
451+
):
452+
get_trace(exp, pref_opt_config)
453+
454+
with self.subTest("with_pe_experiment_empty_data_raises_error"):
455+
# Setup: Create main experiment
456+
exp = get_experiment_with_observations(
457+
observations=[[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]],
458+
)
459+
exp.name = "main_experiment_empty"
460+
461+
# Setup: Create PE_EXPERIMENT with no preference comparisons
462+
pe_experiment = Experiment(
463+
name="test_profile_empty",
464+
search_space=self._get_pe_search_space(),
465+
)
466+
467+
# Setup: Attach PE_EXPERIMENT without any data
468+
aux_exp = AuxiliaryExperiment(experiment=pe_experiment, data=None)
469+
exp.add_auxiliary_experiment(
470+
auxiliary_experiment=aux_exp,
471+
purpose=AuxiliaryExperimentPurpose.PE_EXPERIMENT,
472+
)
473+
pref_opt_config = self._make_pref_opt_config(
474+
profile_name="test_profile_empty"
475+
)
476+
477+
# Execute & Assert: Should raise DataRequiredError due to empty data
478+
with self.assertRaisesRegex(
479+
DataRequiredError,
480+
"No preference data found in preference profile",
481+
):
482+
get_trace(exp, pref_opt_config)
483+
484+
with self.subTest("with_pe_experiment_valid_data_computes_utility"):
485+
# This subtest verifies that when PE_EXPERIMENT exists with valid data,
486+
# the code uses the preference model to compute utility-based traces.
487+
488+
# Setup: Create main experiment with tracking data
489+
# Values are spread across the parameter space to get different utilities
490+
exp = get_experiment_with_observations(
491+
observations=[[1.0, 1.0], [5.0, 5.0], [9.0, 9.0]],
492+
)
493+
exp.name = "main_experiment_with_pe"
494+
495+
# Setup: Create PE_EXPERIMENT with sufficient preference data
496+
pe_experiment = Experiment(
497+
name="test_profile_with_data",
498+
search_space=self._get_pe_search_space(),
499+
)
500+
501+
# Setup: Add multiple pairwise preference comparisons
502+
# Trial 1: User prefers higher values (0_1 over 0_0)
503+
trial1 = pe_experiment.new_batch_trial()
504+
trial1.add_arm(Arm(name="0_0", parameters={"m1": 1.0, "m2": 1.0}))
505+
trial1.add_arm(Arm(name="0_1", parameters={"m1": 5.0, "m2": 5.0}))
506+
trial1.mark_running(no_runner_required=True).mark_completed()
507+
508+
# Trial 2: Another comparison reinforcing preference for higher values
509+
trial2 = pe_experiment.new_batch_trial()
510+
trial2.add_arm(Arm(name="1_0", parameters={"m1": 3.0, "m2": 3.0}))
511+
trial2.add_arm(Arm(name="1_1", parameters={"m1": 8.0, "m2": 8.0}))
512+
trial2.mark_running(no_runner_required=True).mark_completed()
513+
514+
# Trial 3: Another comparison to strengthen the preference model
515+
trial3 = pe_experiment.new_batch_trial()
516+
trial3.add_arm(Arm(name="2_0", parameters={"m1": 2.0, "m2": 2.0}))
517+
trial3.add_arm(Arm(name="2_1", parameters={"m1": 7.0, "m2": 7.0}))
518+
trial3.mark_running(no_runner_required=True).mark_completed()
519+
520+
# Setup: Create preference data indicating user prefers higher metric values
521+
# In each trial, the second arm (higher values) is preferred (mean=1.0)
522+
pe_data_records = [
523+
# Trial 1
524+
{
525+
"trial_index": 0,
526+
"arm_name": "0_0",
527+
"metric_name": Keys.PAIRWISE_PREFERENCE_QUERY.value,
528+
"mean": 0.0,
529+
"sem": 0.0,
530+
"metric_signature": Keys.PAIRWISE_PREFERENCE_QUERY.value,
531+
},
532+
{
533+
"trial_index": 0,
534+
"arm_name": "0_1",
535+
"metric_name": Keys.PAIRWISE_PREFERENCE_QUERY.value,
536+
"mean": 1.0,
537+
"sem": 0.0,
538+
"metric_signature": Keys.PAIRWISE_PREFERENCE_QUERY.value,
539+
},
540+
# Trial 2
541+
{
542+
"trial_index": 1,
543+
"arm_name": "1_0",
544+
"metric_name": Keys.PAIRWISE_PREFERENCE_QUERY.value,
545+
"mean": 0.0,
546+
"sem": 0.0,
547+
"metric_signature": Keys.PAIRWISE_PREFERENCE_QUERY.value,
548+
},
549+
{
550+
"trial_index": 1,
551+
"arm_name": "1_1",
552+
"metric_name": Keys.PAIRWISE_PREFERENCE_QUERY.value,
553+
"mean": 1.0,
554+
"sem": 0.0,
555+
"metric_signature": Keys.PAIRWISE_PREFERENCE_QUERY.value,
556+
},
557+
# Trial 3
558+
{
559+
"trial_index": 2,
560+
"arm_name": "2_0",
561+
"metric_name": Keys.PAIRWISE_PREFERENCE_QUERY.value,
562+
"mean": 0.0,
563+
"sem": 0.0,
564+
"metric_signature": Keys.PAIRWISE_PREFERENCE_QUERY.value,
565+
},
566+
{
567+
"trial_index": 2,
568+
"arm_name": "2_1",
569+
"metric_name": Keys.PAIRWISE_PREFERENCE_QUERY.value,
570+
"mean": 1.0,
571+
"sem": 0.0,
572+
"metric_signature": Keys.PAIRWISE_PREFERENCE_QUERY.value,
573+
},
574+
]
575+
pe_data = Data(df=pd.DataFrame.from_records(pe_data_records))
576+
pe_experiment.attach_data(pe_data)
577+
578+
# Setup: Attach PE_EXPERIMENT to main experiment
579+
aux_exp = AuxiliaryExperiment(experiment=pe_experiment, data=pe_data)
580+
exp.add_auxiliary_experiment(
581+
auxiliary_experiment=aux_exp,
582+
purpose=AuxiliaryExperimentPurpose.PE_EXPERIMENT,
583+
)
584+
pref_opt_config = self._make_pref_opt_config(
585+
profile_name="test_profile_with_data"
586+
)
587+
588+
# Execute: With valid data, model computes utility-based trace
589+
trace = get_trace(exp, pref_opt_config)
590+
591+
# Assert: Verify trace is valid, monotonically increasing, and non-trivial
592+
self._assert_valid_trace(
593+
trace,
594+
expected_len=3,
595+
)

0 commit comments

Comments
 (0)