-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathngrams_train.py
More file actions
60 lines (50 loc) · 3.56 KB
/
ngrams_train.py
File metadata and controls
60 lines (50 loc) · 3.56 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
from NGramProcessor import *
import os
# Create folders to save the results
os.makedirs('language_models', exist_ok=True)
os.makedirs('average_perplexity', exist_ok=True)
# Get the preprocessed dataset
df_train = pd.read_csv('dataset/train_dataset.csv')
df_test = pd.read_csv('dataset/validate_dataset.csv')
# Input the n-gram you want to train. 1 for unigram, 2 for bigram, 3 for trigram, 4 for quadgram
n = 4
if n==1:
n_gram = "unigram"
elif n==2:
n_gram = "bigram"
elif n==3:
n_gram = "trigram"
elif n==4:
n_gram = "quadgram"
else:
n_gram = f"{n}-gram"
ngram = NGramProcessor(df_train, n)
ngram.train()
ngram.calc_perplexity(df_train, perplexity_csv=f'perplexities/no_smoothing/perplexity_{n_gram}_train.csv',
log_prob_save_csv=f'language_models/no_smoothing/{n_gram}_train.csv')
ngram.calc_perplexity(df_test, perplexity_csv=f'perplexities/no_smoothing/perplexity_{n_gram}_test.csv',
log_prob_save_csv=f'language_models/no_smoothing/{n_gram}_test.csv')
ngram.calc_perplexity(df_train, perplexity_csv=f'perplexities/laplace/perplexity_{n_gram}_train.csv', smoothing='laplace',
log_prob_save_csv=f'language_models/laplace/{n_gram}_train.csv')
ngram.calc_perplexity(df_test, perplexity_csv=f'perplexities/laplace/perplexity_{n_gram}_test.csv', smoothing='laplace',
log_prob_save_csv=f'language_models/laplace/{n_gram}_test.csv')
ngram.calc_perplexity(df_train, perplexity_csv=f'perplexities/additive/perplexity_{n_gram}_train.csv', smoothing='additive', k=0.01,
log_prob_save_csv=f'language_models/additive/{n_gram}_train.csv')
ngram.calc_perplexity(df_test, perplexity_csv=f'perplexities/additive/perplexity_{n_gram}_test.csv', smoothing='additive', k=0.01,
log_prob_save_csv=f'language_models/additive/{n_gram}_test.csv')
ngram.calc_perplexity(df_train, perplexity_csv=f'perplexities/additive/perplexity_{n_gram}_train.csv', smoothing='additive', k=0.1,
log_prob_save_csv=f'language_models/additive/{n_gram}_train.csv')
ngram.calc_perplexity(df_test, perplexity_csv=f'perplexities/additive/perplexity_{n_gram}_test.csv', smoothing='additive', k=0.1,
log_prob_save_csv=f'language_models/additive/{n_gram}_test.csv')
ngram.calc_perplexity(df_train, perplexity_csv=f'perplexities/additive/perplexity_{n_gram}_train.csv', smoothing='additive', k=10,
log_prob_save_csv=f'language_models/additive/{n_gram}_train.csv')
ngram.calc_perplexity(df_test, perplexity_csv=f'perplexities/additive/perplexity_{n_gram}_test.csv', smoothing='additive', k=10,
log_prob_save_csv=f'language_models/additive/{n_gram}_test.csv')
ngram.calc_perplexity(df_train, perplexity_csv=f'perplexities/additive/perplexity_{n_gram}_train.csv', smoothing='additive', k=100,
log_prob_save_csv=f'language_models/additive/{n_gram}_train.csv')
ngram.calc_perplexity(df_test, perplexity_csv=f'perplexities/additive/perplexity_{n_gram}_test.csv', smoothing='additive', k=100,
log_prob_save_csv=f'language_models/additive/{n_gram}_test.csv')
ngram.calc_perplexity(df_train, smoothing='turing', perplexity_csv=f'perplexities/turing/perplexity_{n_gram}_train.csv',
log_prob_save_csv=f'language_models/turing/{n_gram}_train.csv')
ngram.calc_perplexity(df_test, smoothing='turing', perplexity_csv=f'perplexities/turing/perplexity_{n_gram}_test.csv',
log_prob_save_csv=f'language_models/turing/{n_gram}_test.csv')