Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 30 additions & 6 deletions src/evidently/llm/optimization/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,10 +364,10 @@ class OptimizerConfig(AutoAliasMixin, EvidentlyBaseModel):
class Config:
is_base_type = True

provider: str = "openai"
"""LLM provider name."""
model: str = "gpt-4o-mini"
"""LLM model name."""
provider: Optional[str] = None
"""LLM provider name. When None, inherited from the executor's judge if available."""
model: Optional[str] = None
"""LLM model name. When None, inherited from the executor's judge if available."""
verbose: bool = False
"""Whether to print optimization progress."""
seed: Optional[int] = None
Expand Down Expand Up @@ -586,14 +586,38 @@ async def new_run(self) -> OptimizerRun:
self.runs.append(run)
return run

def resolve_provider_model(self) -> Tuple[str, str]:
"""Resolve the effective LLM provider and model for this context.

When provider or model are not set on the config, falls back to the
provider/model of the executor's judge (if it has one). This lets
PromptOptimizer inherit the provider used by an LLMJudge-based executor
without requiring the user to duplicate that configuration.

Returns:
* Tuple of (provider, model) as concrete strings.
"""
provider = self.config.provider
model = self.config.model
if provider is None or model is None:
executor = self.params.get(Params.Executor)
if executor is not None and hasattr(executor, "judge"):
judge = executor.judge
if provider is None and hasattr(judge, "provider"):
provider = judge.provider
if model is None and hasattr(judge, "model"):
model = judge.model
return provider or "openai", model or "gpt-4o-mini"

@property
def llm_wrapper(self) -> LLMWrapper:
"""Get the LLM wrapper for this context.

Returns:
* `LLMWrapper` configured with the context's provider and model.
* `LLMWrapper` configured with the resolved provider and model.
"""
return get_llm_wrapper(self.config.provider, self.config.model, self.params[Params.Options])
provider, model = self.resolve_provider_model()
return get_llm_wrapper(provider, model, self.params[Params.Options])

@property
def options(self) -> Options:
Expand Down
8 changes: 6 additions & 2 deletions src/evidently/llm/optimization/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -563,8 +563,12 @@ async def _build_judge(self, run: OptimizerRun) -> LLMJudge:
raise OptimizationConfigurationError("Target is required for BlankLLMJudge executor")
inputs = dataset.input_values
labels = target.unique()
model = context.config.model
provider = context.config.provider
if context.config.provider is None or context.config.model is None:
raise OptimizationConfigurationError(
"BlankLLMJudge requires provider and model to be set on the optimizer. "
"Pass provider=... and model=... to PromptOptimizer()."
)
provider, model = context.resolve_provider_model()
if len(labels) < 2:
raise OptimizationConfigurationError(f"Cannot create judge, target column has {len(labels)} labels")
if len(labels) == 2:
Expand Down
46 changes: 46 additions & 0 deletions tests/future/llm/test_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from evidently.llm.optimization.optimizer import OptimizationConfigurationError
from evidently.llm.optimization.optimizer import OptimizerConfig
from evidently.llm.optimization.optimizer import OptimizerContext
from evidently.llm.optimization.optimizer import Params


def test_optimizer_context_set_get_param():
Expand Down Expand Up @@ -44,3 +45,48 @@ async def test_optimizer_context_add_log_and_get_log():
assert run.get_log("logid") == log
with pytest.raises(KeyError):
run.get_log("notfound")


def test_resolve_provider_model_from_config():
"""Explicit provider/model on config are returned as-is."""
ctx = OptimizerContext(
config=OptimizerConfig(provider="anthropic", model="claude-3-haiku"),
params={Params.Options: MagicMock()},
runs=[],
)
ctx.locked = True
provider, model = ctx.resolve_provider_model()
assert provider == "anthropic"
assert model == "claude-3-haiku"


def test_resolve_provider_model_inherits_from_executor_judge():
"""When config has no provider/model, they are inherited from the executor's judge."""
mock_judge = MagicMock()
mock_judge.provider = "vertex_ai"
mock_judge.model = "gemini-2.5-flash"
mock_executor = MagicMock()
mock_executor.judge = mock_judge

ctx = OptimizerContext(
config=OptimizerConfig(),
params={Params.Options: MagicMock(), Params.Executor: mock_executor},
runs=[],
)
ctx.locked = True
provider, model = ctx.resolve_provider_model()
assert provider == "vertex_ai"
assert model == "gemini-2.5-flash"


def test_resolve_provider_model_falls_back_to_openai():
"""With no config and no executor judge, defaults to openai/gpt-4o-mini."""
ctx = OptimizerContext(
config=OptimizerConfig(),
params={Params.Options: MagicMock()},
runs=[],
)
ctx.locked = True
provider, model = ctx.resolve_provider_model()
assert provider == "openai"
assert model == "gpt-4o-mini"