-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel.py
More file actions
89 lines (63 loc) · 2.88 KB
/
model.py
File metadata and controls
89 lines (63 loc) · 2.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=2):
super(DecoderRNN, self).__init__()
self.embed_size = embed_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.num_layers = num_layers
# embedding layer that turns words into a vector of a specified size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
#inititializing lstm
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
## TODO: define the final, fully-connected output layer
self.linear = nn.Linear(hidden_size, vocab_size)
self.init_weights()
# initialize the weights
def init_weights(self):
torch.nn.init.xavier_uniform_(self.linear.weight)
torch.nn.init.xavier_uniform_(self.word_embeddings.weight)
def forward(self, features, captions):
embeds = self.word_embeddings(captions[:,:-1])
inputs = torch.cat((features.unsqueeze(dim=1),embeds), dim=1)
x, (h, c) = self.lstm(inputs)
# Stack up LSTM outputs using view
#x = x.view(x.size()[0]*x.size()[1], self.n_hidden)
## TODO: put x through the fully-connected layer
x = self.linear(x)
# return x and the hidden state (h, c)
return x
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
preds = []
count = 0
word_item = None
while count < max_len and word_item != 1 :
#Predict output
output_lstm, states = self.lstm(inputs, states)
output = self.linear(output_lstm.squeeze(1))
#Get max value
prob, word = output.max(1)
#append word
word_item = word.item()
preds.append(word_item)
#next input is current prediction
inputs = self.word_embeddings(word).unsqueeze(1)
count+=1
return preds