Skip to content

Commit 24429cf

Browse files
authored
Adding Hallucination Example (#23)
1 parent 1c00878 commit 24429cf

File tree

4 files changed

+125
-1
lines changed

4 files changed

+125
-1
lines changed

pyproject.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ dependencies = [
4747
"addict>=2.4.0",
4848
"deepdiff>=6.0.0",
4949
"pandas>=1.5.0",
50+
"fireworks-ai>=0.19.12",
5051
]
5152

5253
[project.urls]
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
{"knowledge": "Arthur's Magazine (1844–1846) was an American literary periodical published in Philadelphia in the 19th century.First for Women is a woman's magazine published by Bauer Media Group in the USA.", "question": "Which magazine was started first Arthur's Magazine or First for Women?", "right_answer": "Arthur's Magazine", "hallucinated_answer": "First for Women was started first."}
2+
{"knowledge": "The Oberoi family is an Indian family that is famous for its involvement in hotels, namely through The Oberoi Group.The Oberoi Group is a hotel company with its head office in Delhi.", "question": "The Oberoi family is part of a hotel company that has a head office in what city?", "right_answer": "Delhi", "hallucinated_answer": "The Oberoi family's hotel company is based in Mumbai."}
3+
{"knowledge": "Allison Beth \"Allie\" Goertz (born March 2, 1991) is an American musician. Goertz is known for her satirical songs based on various pop culture topics. Her videos are posted on YouTube under the name of Cossbysweater.Milhouse Mussolini van Houten is a fictional character featured in the animated television series \"The Simpsons\", voiced by Pamela Hayden, and created by Matt Groening who named the character after President Richard Nixon's middle name.", "question": "Musician and satirist Allie Goertz wrote a song about the \"The Simpsons\" character Milhouse, who Matt Groening named after who?", "right_answer": "President Richard Nixon", "hallucinated_answer": "Allie Goertz wrote a song about Milhouse, a popular TV character, named after an influential political figure."}

tests/pytest/test_hallucination.py

Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,118 @@
1+
"""
2+
Hallucination detection test using LLM-as-judge.
3+
4+
This test demonstrates how to detect factual inaccuracies in model responses
5+
by comparing them against provided knowledge using an LLM judge, similar to
6+
tau's evaluate_nl_assertions approach.
7+
"""
8+
9+
import json
10+
from typing import Any, Dict, List
11+
12+
from fireworks import LLM
13+
14+
from eval_protocol.models import EvaluateResult, EvaluationRow, Message, MetricResult
15+
from eval_protocol.pytest import default_single_turn_rollout_processor, evaluation_test
16+
17+
judge_llm = LLM(model="accounts/fireworks/models/kimi-k2-instruct", deployment_type="serverless")
18+
19+
def hallucination_dataset_adapter(data: List[Dict[str, Any]]) -> List[EvaluationRow]:
20+
"""Convert HaluEval dataset to EvaluationRow objects."""
21+
return [
22+
EvaluationRow(
23+
messages=[Message(role="user", content=f"Knowledge: {item['knowledge']}\n\nQuestion: {item['question']}")],
24+
ground_truth=item["right_answer"]
25+
)
26+
for item in data
27+
]
28+
29+
30+
@evaluation_test(
31+
input_dataset=["tests/pytest/data/halueval_sample_dataset.jsonl"],
32+
dataset_adapter=hallucination_dataset_adapter,
33+
model=["accounts/fireworks/models/kimi-k2-instruct"],
34+
rollout_input_params=[{"temperature": 0.0, "max_tokens": 512}],
35+
rollout_processor=default_single_turn_rollout_processor,
36+
threshold_of_success=1.0,
37+
num_runs=1,
38+
mode="pointwise",
39+
)
40+
def test_hallucination_detection(row: EvaluationRow) -> EvaluationRow:
41+
"""
42+
Test for response correctness using LLM-as-judge.
43+
"""
44+
messages = row.messages
45+
assistant_response = messages[-1].content
46+
47+
if not assistant_response:
48+
return EvaluateResult(score=0.0, reason="❌ No assistant response found")
49+
50+
correct_answer = row.ground_truth
51+
52+
system_prompt = """
53+
TASK
54+
- You will be given an assistant's response and the correct answer.
55+
- Your job is to evaluate whether the assistant's response is factually consistent with the correct answer.
56+
- Grade whether the assistant got it right or wrong.
57+
58+
FORMAT
59+
- Your response should be a JSON object with the following fields:
60+
- `reasoning`: a short explanation for your classification
61+
- `is_correct`: `true` if the assistant's response matches the correct answer, `false` otherwise
62+
63+
Example response structure:
64+
{
65+
"reasoning": "<reasoning trace>",
66+
"is_correct": <true or false>
67+
}
68+
"""
69+
70+
user_prompt = f"""
71+
assistant_response:
72+
{assistant_response}
73+
74+
correct_answer:
75+
{correct_answer}
76+
"""
77+
78+
try:
79+
response = judge_llm.chat.completions.create(
80+
messages=[
81+
{"role": "system", "content": system_prompt},
82+
{"role": "user", "content": user_prompt}
83+
],
84+
temperature=0.1,
85+
max_tokens=500,
86+
)
87+
88+
result_data = json.loads(response.choices[0].message.content)
89+
is_correct = result_data.get("is_correct", False)
90+
reasoning = result_data.get("reasoning", "Could not parse reasoning")
91+
92+
except Exception as e:
93+
# Fallback if parsing fails
94+
is_correct = False
95+
reasoning = f"Evaluation failed: {str(e)}"
96+
97+
score = 1.0 if is_correct else 0.0
98+
99+
if is_correct:
100+
assessment = "✅ Response is correct"
101+
else:
102+
assessment = "❌ Response is incorrect"
103+
104+
reason = f"{assessment}\nReasoning: {reasoning}"
105+
106+
row.evaluation_result = EvaluateResult(
107+
score=score,
108+
reason=reason,
109+
metrics={
110+
"llm_judge": MetricResult(
111+
score=score,
112+
reason=reasoning,
113+
is_score_valid=True
114+
)
115+
}
116+
)
117+
118+
return row

uv.lock

Lines changed: 3 additions & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)