-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy patheval_BLEU.py
More file actions
132 lines (101 loc) · 4.77 KB
/
eval_BLEU.py
File metadata and controls
132 lines (101 loc) · 4.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
import os
import pickle
from contextlib import nullcontext
import torch
import tiktoken
from model import GPTConfig, GPT
import sacrebleu
import pandas as pd
# -----------------------------------------------------------------------------
init_from = 'gpt2'
#init_from = 'out-arc' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl')
out_dir = 'out' # ignored if init_from is not 'resume'
start = "\n" # or "<|endoftext|>" or etc. Can also specify a file, use as: "FILE:prompt.txt"
temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
top_k = 200 # retain only the top_k most likely tokens, clamp others to have 0 probability
seed = 1337
device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc.
dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16' # 'float32' or 'bfloat16' or 'float16'
compile = False # use PyTorch 2.0 to compile the model to be faster
sparse = 0.
exec(open('configurator.py').read()) # overrides from command line or config file
# -----------------------------------------------------------------------------
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast
ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
# model
if init_from == 'resume':
# init from a model saved in a specific directory
ckpt_path = os.path.join(out_dir, 'ckpt.pt')
checkpoint = torch.load(ckpt_path, map_location=device)
gptconf = GPTConfig(**checkpoint['model_args'])
model = GPT(gptconf)
state_dict = checkpoint['model']
unwanted_prefix = '_orig_mod.'
for k,v in list(state_dict.items()):
if k.startswith(unwanted_prefix):
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
model.load_state_dict(state_dict)
elif init_from.startswith('gpt2'):
# init from a given GPT-2 model
model = GPT.from_pretrained(init_from, dict(dropout=0.0))
model.eval()
model.to(device)
if compile:
model = torch.compile(model) # requires PyTorch 2.0 (optional)
# look for the meta pickle in case it is available in the dataset folder
load_meta = False
if init_from == 'resume' and 'config' in checkpoint and 'dataset' in checkpoint['config']: # older checkpoints might not have these...
meta_path = os.path.join('data', checkpoint['config']['dataset'], 'meta.pkl')
load_meta = os.path.exists(meta_path)
if load_meta:
print(f"Loading meta from {meta_path}...")
with open(meta_path, 'rb') as f:
meta = pickle.load(f)
# TODO want to make this more general to arbitrary encoder/decoder schemes
stoi, itos = meta['stoi'], meta['itos']
encode = lambda s: [stoi[c] for c in s]
decode = lambda l: ''.join([itos[i] for i in l])
else:
# ok let's assume gpt-2 encodings by default
print("No meta.pkl found, assuming GPT-2 encodings...")
enc = tiktoken.get_encoding("gpt2")
encode = lambda s: enc.encode(s, allowed_special={"<|endoftext|>"})
decode = lambda l: enc.decode(l)
def ans(question):
question = "Translate this English sentence to French and only include the French sentence and no English words:" +question
start_id = encode(question)
x = (torch.tensor(start_id, dtype=torch.long, device=device)[None, ...])
with torch.no_grad():
with ctx:
y = model.generate(x, 1, temperature=temperature, top_k=top_k)
answer = decode(y[0].tolist())
return answer.upper()
def getscoreBLEU(question, answer):
ref = [[answer]]
sys = [ans(question)]
bleu = sacrebleu.corpus_bleu(sys, ref)
return bleu.score
def get_accuracy(df):
""" Input the directory of question file, and the corresponding difficulty, update the accuracy dictionary"""
score_bleu = 0
max_bleu = 0
min_bleu = 0
for q, a in zip(df.en, df.fr):
score = getscoreBLEU(q,a)
max_bleu = max(max_bleu,score)
min_bleu = min(min_bleu,score)
score_bleu += score
score_bleu = score_bleu / df.shape[0]
return(score_bleu,max_bleu,min_bleu,df.shape[0])
# Evaluate all files wait check that im loading the test data vs train data TODO
df = pd.read_csv("data/en-fr/en-fr-dataset/en-fr-test_dataframe.csv")
(score_bleu,max_bleu,min_bleu,tests) = get_accuracy(df)
print("----------------------------")
print(f"There are {tests} translations in total.\n\
Using the BLEU metric The GPT-2 model had an average of {score_bleu}, \n\
a max of {max_bleu} and a min of {min_bleu}. \n")