-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathutils.py
More file actions
164 lines (132 loc) · 5.33 KB
/
utils.py
File metadata and controls
164 lines (132 loc) · 5.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
import os
import random
import numpy as np
import pandas as pd
import torch
from lxml import etree
import xml.etree.ElementTree as ET
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader, TensorDataset, RandomSampler
import param
import re
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.label_id = label_id
def XML2Array(neg_path, pos_path):
parser = etree.XMLParser(recover=True)
reviews = []
negCount = 0
posCount = 0
labels = []
regex = re.compile(r'[\n\r\t+]')
neg_tree = ET.parse(neg_path, parser=parser)
neg_root = neg_tree.getroot()
for rev in neg_root.iter('review_text'):
text = regex.sub(" ", rev.text)
reviews.append(text)
negCount += 1
labels.extend(np.zeros(negCount, dtype=int))
pos_tree = ET.parse(pos_path, parser=parser)
pos_root = pos_tree.getroot()
for rev in pos_root.iter('review_text'):
text = regex.sub(" ", rev.text)
reviews.append(text)
posCount += 1
labels.extend(np.ones(posCount, dtype=int))
reviews = np.array(reviews)
labels = np.array(labels)
return reviews, labels
def CSV2Array(path):
data = pd.read_csv(path, encoding='latin')
reviews, labels = data.reviews.values.tolist(), data.labels.values.tolist()
return reviews, labels
def make_cuda(tensor):
"""Use CUDA if it's available."""
if torch.cuda.is_available():
tensor = tensor.cuda()
return tensor
def init_random_seed(manual_seed):
"""Init random seed."""
if manual_seed is None:
seed = random.randint(1, 10000)
else:
seed = manual_seed
print("use random seed: {}".format(seed))
random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def init_model(net, restore=None):
# restore model weights
if restore is not None and os.path.exists(restore):
net.load_state_dict(torch.load(restore))
print("Restore model from: {}".format(os.path.abspath(restore)))
# check if cuda is available
if torch.cuda.is_available():
cudnn.benchmark = True
net.cuda()
return net
def save_model(net, path):
"""Save trained model."""
folder_name = os.path.dirname(path)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
torch.save(net.state_dict(), path)
print("save pretrained model to: {}".format(path))
def convert_examples_to_features(reviews, labels, max_seq_length, tokenizer,
cls_token='[CLS]', sep_token='[SEP]',
pad_token=0):
features = []
for ex_index, (review, label) in enumerate(zip(reviews, labels)):
if (ex_index + 1) % 200 == 0:
print("writing example %d of %d" % (ex_index + 1, len(reviews)))
tokens = tokenizer.tokenize(review)
if len(tokens) > max_seq_length - 2:
tokens = tokens[:(max_seq_length - 2)]
tokens = [cls_token] + tokens + [sep_token]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
label_id=label))
return features
def roberta_convert_examples_to_features(reviews, labels, max_seq_length, tokenizer,
cls_token='<s>', sep_token='</s>',
pad_token=1):
features = []
for ex_index, (review, label) in enumerate(zip(reviews, labels)):
if (ex_index + 1) % 200 == 0:
print("writing example %d of %d" % (ex_index + 1, len(reviews)))
tokens = tokenizer.tokenize(review)
if len(tokens) > max_seq_length - 2:
tokens = tokens[:(max_seq_length - 2)]
tokens = [cls_token] + tokens + [sep_token]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding_length = max_seq_length - len(input_ids)
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
label_id=label))
return features
def get_data_loader(features, batch_size):
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_label_ids)
sampler = RandomSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size)
return dataloader