-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathevaluation.py
More file actions
135 lines (102 loc) · 4.81 KB
/
evaluation.py
File metadata and controls
135 lines (102 loc) · 4.81 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import re
import json
import os
import argparse
# these functions are heavily influenced by the HF squad_metrics.py script
def normalize_text(s):
"""Removing articles and punctuation, and standardizing whitespace are all typical text processing steps."""
import string, re
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def compute_exact_match(prediction, truth):
return int(normalize_text(prediction) == normalize_text(truth))
# return prediction == truth
def compute_f1(prediction, truth):
pred_tokens = normalize_text(prediction).split()
truth_tokens = normalize_text(truth).split()
# if either the prediction or the truth is no-answer then f1 = 1 if they agree, 0 otherwise
if len(pred_tokens) == 0 or len(truth_tokens) == 0:
return int(pred_tokens == truth_tokens)
common_tokens = set(pred_tokens) & set(truth_tokens)
# if there are no common tokens then f1 = 0
if len(common_tokens) == 0:
return 0
prec = len(common_tokens) / len(pred_tokens)
rec = len(common_tokens) / len(truth_tokens)
return 2 * (prec * rec) / (prec + rec)
def evaluate_sample(prediction, gold_answers):
em_score = max((compute_exact_match(prediction, answer)) for answer in gold_answers)
f1_score = max((compute_f1(prediction, answer)) for answer in gold_answers)
return em_score, f1_score
# Make sure the answer is A not A.
def get_choice(answer_str):
choices = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'A)', 'B)', 'C)', 'D)', 'E)', 'F)', 'G)', 'H)',
'A.', 'B.', 'C.', 'D.', 'E.', 'F.', 'G.', 'H.']
for c in choices:
if answer_str.startswith(c):
return c.replace(')', '')
if answer_str.startswith(':'):
return answer_str.replace(':', '').replace('.', '').strip()
return None
def evaluate_QA(QA_results):
total_em = 0.0
count = 0
for sample in QA_results:
gold_answer = sample['answer'].replace('(', '').replace(')', '').strip()
answer_str = sample['predicted_answer'].strip() if sample['predicted_answer'] is not None else ''
prediction = get_choice(answer_str)
indicators = ['the correct option is', 'the correct answer is',
'The correct answer is', 'The correct option is',
'Thus, the answer is']
if prediction is None:
for indicator in indicators:
if answer_str.find(indicator) >= 0:
answer_str = answer_str.split(indicator)[1].strip()
prediction = get_choice(answer_str)
break
# if prediction is None:
# print(answer_str)
# print(f"prediction: {prediction} \t gold_answers: {gold_answer} \t match: {prediction == gold_answer}")
em_score = 1.0 if prediction == gold_answer else 0.0
total_em += em_score
count += 1
avg_em = total_em / count
# print(f"Accuracy: {avg_em}")
return avg_em
def full_evaluation(result_file):
with open(result_file, 'r') as f:
all_samples = json.load(f)
executable_samples = [sample for sample in all_samples if sample['flag'] == 'success']
print(f"Overall accuracy: {evaluate_QA(all_samples)}")
print(f'Executable rate (Exe_Rate): {len(executable_samples) / len(all_samples)}')
print(f"Executable accuracy (Exe_Acc): {evaluate_QA(executable_samples)}")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_name", type=str)
parser.add_argument('--depth', type=str, default='d5')
parser.add_argument('--World', type=str, default='')
parser.add_argument('--shot', type=int, default=1)
parser.add_argument("--model_name", type=str, default='text-davinci-003')
parser.add_argument('--solver', type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
if args.dataset_name == "ProofWriter":
result_file = f'Processed_Datasets/{args.dataset_name}_{args.World}_{args.depth}_{args.solver}_{args.model_name}.json'
elif args.dataset_name == "FOLIO" and args.shot > 1:
result_file = f'Processed_Datasets/{args.dataset_name}_{args.shot}Shot_{args.solver}_{args.model_name}.json'
else:
result_file = f'Processed_Datasets/{args.dataset_name}_{args.solver}_{args.model_name}.json'
#result_file = f'Processed_Datasets/{args.dataset_name}_{args.solver}_{args.model_name}.json'
#evaluate_QA(result_file)
full_evaluation(result_file)