-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmtbench.py
More file actions
168 lines (134 loc) · 6.19 KB
/
mtbench.py
File metadata and controls
168 lines (134 loc) · 6.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
from openai import OpenAI
import re
import json
# Initialize the OpenAI client with the API key
client = OpenAI(
api_key=,
)
def format_prompt(question, answer):
"""
Generates a formatted prompt using the provided template.
"""
template = ("[Instruction]\n"
"Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. "
"Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. "
"Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format: \"[[rating]]\", for example: \"Rating: [[5]]\".\n\n"
"[Question]\n{question}\n\n"
"[The Start of Assistant's Answer]\n{answer}\n"
"[The End of Assistant's Answer]")
return template.format(question=question, answer=answer)
def get_response_from_gpt(question, answer):
"""
Sends a formatted request to GPT-4 and extracts the score from the response.
"""
formatted_prompt = format_prompt(question, answer)
response = client.chat.completions.create(
model="gpt-4",
messages=[
{
"role": "user",
"content": formatted_prompt,
}
],
)
# Correctly access the response content
response_text = response.choices[0].message.content
# Extract score using regular expression
# score_match = re.search(r"\[\[(\d+(\.\d+)?)\]\]", response_text)
score_match = re.search(r"\[\[(\d+(\.\d+)?)\]\]", response_text)
if score_match:
score = score_match.group(1)
score = float(score)
# print(f"Extracted score from response: {score}")
else:
# print("No score found in the response.")
score = 0.3
return score
def evaluate_questions(questions, context):
total_score = 0
scores = []
for question in questions:
answer = get_answer_on_device(question, context, model='llama2:7b-chat-q2_K')
score = get_response_from_gpt(question, answer)
if score is not None:
scores.append(score)
total_score += score
if scores:
average_score = total_score / len(scores)
else:
average_score = 0
return average_score, total_score, scores
def clean_text(text):
text = text.strip()
text = re.sub(r'\s+', ' ', text)
text = re.sub(r'[^\w\s]', '', text)
return text.lower()
if __name__ == "__main__":
# Example usage
scores_writing = []
scores_roleplay = []
scores_reasoning = []
scores_stem = []
scores_humanities = []
scores_extraction = []
scores_math = []
scores_coding = []
scores = []
with open("question.jsonl", 'r') as file:
for line in file:
data = json.loads(line)
question = data.get("turns")[0]
category_value = data.get("category")
context = "Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response."
answer = get_answer_on_device(question, context,'llama3.2:3b-instruct-q4_K_M')
score = get_response_from_gpt(question, answer)
score = float(score)
#if score and category_value == "writing":
if score:
scores.append(score)
if category_value == "writing":
scores_writing.append(score)
print(f"Writing:{score}")
elif category_value == "roleplay":
scores_roleplay.append(score)
print(f"Roleplay:{score}")
elif category_value == "reasoning":
scores_reasoning.append(score)
print(f"Reasoning:{score}")
elif category_value == "stem":
scores_stem.append(score)
print(f"STEM:{score}")
elif category_value == "humanities":
scores_humanities.append(score)
print(f"Humanities:{score}")
elif category_value == "coding":
scores_coding.append(score)
print(f"Coding:{score}")
elif category_value == "extraction":
scores_extraction.append(score)
print(f"Extraction:{score}")
elif category_value == "math":
scores_math.append(score)
print(f"Math:{score}")
# Calculate and print statistics
if scores_writing or scores_roleplay or scores_reasoning:
average_score_writing = sum(scores_writing) / len(scores_writing)
print(f"Average writing Score: {average_score_writing}")
average_score_roleplay = sum(scores_roleplay) / len(scores_roleplay)
print(f"Average roleplay Score: {average_score_roleplay}")
average_score_reasoning = sum(scores_reasoning) / len(scores_reasoning)
print(f"Average reasoning Score: {average_score_reasoning}")
average_score_math = sum(scores_math) / len(scores_math)
print(f"Average math Score: {average_score_math}")
average_score_coding = sum(scores_coding) / len(scores_coding)
print(f"Average coding Score: {average_score_coding}")
average_score_extraction = sum(scores_extraction) / len(scores_extraction)
print(f"Average extraction Score: {average_score_extraction}")
average_score_stem = sum(scores_stem) / len(scores_stem)
print(f"Average stem Score: {average_score_stem}")
average_score_humanities = sum(scores_humanities) / len(scores_humanities)
print(f"Average humanities Score: {average_score_humanities}")
average_score = sum(scores) / len(scores)
print(f"Average Score: {average_score}")
else:
print("No scores were calculated.")