-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathvoice.py
More file actions
143 lines (108 loc) · 4.07 KB
/
voice.py
File metadata and controls
143 lines (108 loc) · 4.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class FrequencyDelayedStack(nn.Module):
def __init__(self, dims):
super().__init__()
self.rnn = nn.GRU(dims, dims, batch_first=True)
def forward(self, x_time, x_freq):
# sum the inputs
x = x_time + x_freq
# Batch, Timesteps, Mels, Dims
B, T, M, D = x.size()
# collapse the first two axes
x = x.view(-1, M, D)
# Through the RNN
x, _ = self.rnn(x)
return x.view(B, T, M, D)
class TimeDelayedStack(nn.Module):
def __init__(self, dims):
super().__init__()
self.bi_freq_rnn = nn.GRU(dims, dims, batch_first=True, bidirectional=True)
self.time_rnn = nn.GRU(dims, dims, batch_first=True)
def forward(self, x_time):
# Batch, Timesteps, Mels, Dims
B, T, M, D = x_time.size()
# Collapse the first two axes
time_input = x_time.transpose(1, 2).contiguous().view(-1, T, D)
freq_input = x_time.view(-1, M, D)
# Run through the rnns
x_1, _ = self.time_rnn(time_input)
x_2_and_3, _ = self.bi_freq_rnn(freq_input)
# Reshape the first two axes back to original
x_1 = x_1.view(B, M, T, D).transpose(1, 2)
x_2_and_3 = x_2_and_3.view(B, T, M, 2 * D)
# And concatenate for output
x_time = torch.cat([x_1, x_2_and_3], dim=3)
return x_time
class Layer(nn.Module):
def __init__(self, dims):
super().__init__()
self.freq_stack = FrequencyDelayedStack(dims)
self.freq_out = nn.Linear(dims, dims)
self.time_stack = TimeDelayedStack(dims)
self.time_out = nn.Linear(3 * dims, dims)
def forward(self, x):
# unpack the input tuple
x_time, x_freq = x
# grab a residual for x_time
x_time_res = x_time
# run through the time delayed stack
x_time = self.time_stack(x_time)
# reshape output
x_time = self.time_out(x_time)
# connect time residual
x_time = x_time + x_time_res
# grab a residual for x_freq
x_freq_res = x_freq
# run through the freq delayed stack
x_freq = self.freq_stack(x_time, x_freq)
# reshape output TODO: is this even needed?
x_freq = self.freq_out(x_freq)
# connect the freq residual
x_freq = x_freq + x_freq_res
return [x_time, x_freq]
class MelNet(nn.Module):
def __init__(self, dims, n_layers, n_mixtures=10):
super().__init__()
# Input layers
self.freq_input = nn.Linear(1, dims)
self.time_input = nn.Linear(1, dims)
# Main layers
self.layers = nn.Sequential(
*[Layer(dims) for _ in range(n_layers)]
)
# Output layer
self.fc_out = nn.Linear(2 * dims, 3 * n_mixtures)
# Print model size
self.num_params()
def forward(self, x):
# Shift the inputs left for time-delay inputs
x_time = F.pad(x, [0, 0, -1, 1, 0, 0]).unsqueeze(-1)
# Shift the inputs down for freq-delay inputs
x_freq = F.pad(x, [0, 0, 0, 0, -1, 1]).unsqueeze(-1)
# Initial transform from 1 to dims
x_time = self.time_input(x_time)
x_freq = self.freq_input(x_freq)
# Run through the layers
x = (x_time, x_freq)
x_time, x_freq = self.layers(x)
# Get the mixture params
x = torch.cat([x_time, x_freq], dim=-1)
params = self.fc_out(x)
return params
def num_params(self):
parameters = filter(lambda p: p.requires_grad, self.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
print('Trainable Parameters: %.3fM' % parameters)
batchsize = 4
timesteps = 10
num_mels = 8
dims = 512
n_layers = 5
model = MelNet(dims, n_layers)
x = torch.ones(batchsize, timesteps, num_mels)
print("Input Shape:", x.shape)
y = model(x)
print("Output Shape", y.shape)