-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathapplication.py
More file actions
162 lines (107 loc) · 5.08 KB
/
application.py
File metadata and controls
162 lines (107 loc) · 5.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from keras.layers import Conv1D, Input, Add, Activation, Dropout
from keras.models import Sequential, Model
from keras.regularizers import l2
from keras.initializers import TruncatedNormal
from keras.layers.advanced_activations import LeakyReLU, ELU
from keras import optimizers
def DC_CNN_Block(nb_filter, filter_length, dilation, l2_layer_reg):
def f(input_):
residual = input_
layer_out = Conv1D(filters=nb_filter, kernel_size=filter_length,
dilation_rate=dilation,
activation='linear', padding='causal', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(input_)
layer_out = Activation('selu')(layer_out)
skip_out = Conv1D(1, 1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)
network_in = Conv1D(1, 1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05,
seed=42), kernel_regularizer=l2(l2_layer_reg))(layer_out)
network_out = Add()([residual, network_in])
return network_out, skip_out
return f
def DC_CNN_Model(length):
input = Input(shape=(length, 1))
l1a, l1b = DC_CNN_Block(32, 2, 1, 0.001)(input)
l2a, l2b = DC_CNN_Block(32, 2, 2, 0.001)(l1a)
l3a, l3b = DC_CNN_Block(32, 2, 4, 0.001)(l2a)
l4a, l4b = DC_CNN_Block(32, 2, 8, 0.001)(l3a)
l5a, l5b = DC_CNN_Block(32, 2, 16, 0.001)(l4a)
l6a, l6b = DC_CNN_Block(32, 2, 32, 0.001)(l5a)
l6b = Dropout(0.8)(l6b) # dropout used to limit influence of earlier data
l7a, l7b = DC_CNN_Block(32, 2, 64, 0.001)(l6a)
l7b = Dropout(0.8)(l7b) # dropout used to limit influence of earlier data
l8 = Add()([l1b, l2b, l3b, l4b, l5b, l6b, l7b])
l9 = Activation('relu')(l8)
l21 = Conv1D(1, 1, activation='linear', use_bias=False,
kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=42),
kernel_regularizer=l2(0.001))(l9)
model = Model(input=input, output=l21)
adam = optimizers.Adam(lr=0.00075, beta_1=0.9, beta_2=0.999, epsilon=None,
decay=0.0, amsgrad=False)
model.compile(loss='mae', optimizer=adam, metrics=['mse'])
return model
def evaluate_timeseries(timeseries, predict_size):
# timeseries input is 1-D numpy array
# forecast_size is the forecast horizon
timeseries = timeseries[~pd.isna(timeseries)]
length = len(timeseries) - 1
timeseries = np.atleast_2d(np.asarray(timeseries))
if timeseries.shape[0] == 1:
timeseries = timeseries.T
model = DC_CNN_Model(length)
print('\n\nModel with input size {}, output size {}'.
format(model.input_shape, model.output_shape))
model.summary()
X = timeseries[:-1].reshape(1, length, 1)
y = timeseries[1:].reshape(1, length, 1)
model.fit(X, y, epochs=3000)
pred_array = np.zeros(predict_size).reshape(1, predict_size, 1)
X_test_initial = timeseries[1:].reshape(1, length, 1)
# pred_array = model.predict(X_test_initial) if predictions of training samples required
# forecast is created by predicting next future value based on previous predictions
pred_array[:, 0, :] = model.predict(X_test_initial)[:, -1:, :]
for i in range(predict_size - 1):
pred_array[:, i + 1:, :] = model.predict(np.append(X_test_initial[:, i + 1:, :],
pred_array[:, :i + 1, :]).reshape(1, length, 1))[:, -1:, :]
return pred_array.flatten()
def lorenz(X, l, sigma, beta, rho):
"""The Lorenz equations."""
u, v, w = X
up = -sigma * (u - v)
vp = rho * u - v - u * w
wp = -beta * w + u * v
return up, vp, wp
def main():
sigma, beta, rho = 10, 2.667, 28
u0, v0, w0 = 0, 1, 1.05
tmax, n = 100, 10000
l = np.linspace(0, tmax, n)
f = odeint(lorenz, (u0, v0, w0), l, args=(sigma, beta, rho))
x, y, z = f.T
window_size = 50
timeseries = x[0:1000]
evaluate_timeseries(timeseries, window_size)
# Maximum time point and total number of time points
# tmax, n = 100, 10000
"""Prepare input data, build model, evaluate."""
# np.set_printoptions(threshold=25)
# ts_length = 1000
# window_size = 50
# print('\nSimple single timeseries vector prediction')
# timeseries = np.arange(ts_length) # The timeseries f(t) = t
# evaluate_timeseries(timeseries, window_size)
if __name__ == '__main__':
main()