Skip to content

Commit a1e0b1a

Browse files
committed
Add LLM Answer Match evaluator
1 parent 6d75898 commit a1e0b1a

File tree

22 files changed

+2040
-199
lines changed

22 files changed

+2040
-199
lines changed

evaluators/azure/poetry.lock

Lines changed: 11 additions & 10 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

evaluators/example/poetry.lock

Lines changed: 11 additions & 10 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

evaluators/huggingface/poetry.lock

Lines changed: 12 additions & 11 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

evaluators/huggingface/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ license = "MIT"
1212
python = "^3.11"
1313
langevals-core = { path = "../../langevals_core", develop = true }
1414
httpx = "^0.27.0"
15-
litellm = "^1.52.14"
15+
litellm = "^1.53.7"
1616

1717
[tool.poetry.group.test.dependencies]
1818
pytest = "^7.4.2"
Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
import litellm
2+
from pydantic import Field
3+
from typing import Optional
4+
import dspy
5+
6+
from langevals_core.base_evaluator import (
7+
BaseEvaluator,
8+
EvaluatorEntry,
9+
EvaluationResult,
10+
LLMEvaluatorSettings,
11+
SingleEvaluationResult,
12+
EvaluationResultSkipped,
13+
Money,
14+
)
15+
from litellm.cost_calculator import cost_per_token
16+
17+
18+
class LLMAnswerMatchEntry(EvaluatorEntry):
19+
input: Optional[str] = Field(default="")
20+
output: Optional[str] = Field(default="")
21+
expected_output: Optional[str] = Field(default="")
22+
23+
24+
class LLMAnswerMatchSettings(LLMEvaluatorSettings):
25+
pass
26+
27+
28+
class LLMAnswerMatchResult(EvaluationResult):
29+
passed: bool = Field(default=True)
30+
details: Optional[str] = Field(default=None)
31+
32+
33+
class LLMAnswerMatchSignature(dspy.Signature):
34+
"""Verify that the predicted answer matches the gold answer for the question. Style does not matter, for example the gold answer may be more direct while the predicted answer more verbose and still be correct."""
35+
36+
question = dspy.InputField()
37+
gold_answer = dspy.InputField(desc="correct answer for question")
38+
predicted_answer = dspy.InputField(desc="predicted answer for question")
39+
reasoning = dspy.OutputField(desc="reasoning for the answer")
40+
is_correct = dspy.OutputField(desc="True or False")
41+
42+
43+
class LLMAnswerMatchEvaluator(
44+
BaseEvaluator[
45+
LLMAnswerMatchEntry,
46+
LLMAnswerMatchSettings,
47+
LLMAnswerMatchResult,
48+
]
49+
):
50+
"""
51+
Uses an LLM to check if the generated output answers a question correctly the same way as the expected output, even if their style is different.
52+
"""
53+
54+
name = "LLM Answer Match"
55+
category = "quality"
56+
env_vars = []
57+
is_guardrail = False
58+
59+
def evaluate(self, entry: LLMAnswerMatchEntry) -> SingleEvaluationResult:
60+
total_tokens = len(
61+
litellm.encode(
62+
model=self.settings.model,
63+
text=f"{entry.input} {entry.output} {entry.expected_output}",
64+
)
65+
)
66+
max_tokens = min(self.settings.max_tokens, 32768)
67+
if total_tokens > max_tokens:
68+
return EvaluationResultSkipped(
69+
details=f"Total tokens exceed the maximum of {max_tokens}: {total_tokens}"
70+
)
71+
72+
lm = model_to_dspy_lm(self.settings.model)
73+
dspy.settings.configure(experimental=True)
74+
75+
answer_match = dspy.Predict(LLMAnswerMatchSignature)
76+
answer_match.set_lm(lm)
77+
78+
result = answer_match(
79+
question=entry.input,
80+
gold_answer=entry.expected_output,
81+
predicted_answer=entry.output,
82+
)
83+
84+
last_response = lm.history[-1]
85+
cost = None
86+
if last_response:
87+
input_cost, output_cost = cost_per_token(
88+
model=self.settings.model,
89+
prompt_tokens=last_response.get("usage", {}).get("prompt_tokens", 0),
90+
completion_tokens=last_response.get("usage", {}).get(
91+
"completion_tokens", 0
92+
),
93+
)
94+
cost = input_cost + output_cost
95+
96+
return LLMAnswerMatchResult(
97+
passed="true" in str(result.is_correct).lower(),
98+
details=result.reasoning,
99+
cost=Money(amount=cost, currency="USD") if cost is not None else None,
100+
)
101+
102+
103+
def model_to_dspy_lm(model: str) -> dspy.LM:
104+
llm_params = {}
105+
if "azure/" in model:
106+
llm_params["api_version"] = "2023-07-01-preview"
107+
108+
lm = dspy.LM(
109+
model=model,
110+
temperature=0,
111+
drop_params=True,
112+
model_type="chat",
113+
**llm_params,
114+
)
115+
return lm

0 commit comments

Comments
 (0)