-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathstandard.py
More file actions
70 lines (53 loc) · 2.33 KB
/
standard.py
File metadata and controls
70 lines (53 loc) · 2.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import numpy as np
import tensorflow as tf
from summarization import create_var_summaries
# ----------------------------------------------------------------------------
def parametric_relu(_x):
alphas = tf.get_variable('alpha', _x.get_shape()[-1],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32)
pos = tf.nn.relu(_x)
neg = alphas * (_x - abs(_x)) * 0.5
return pos + neg
def conv1d(x, n_filters, n_size, stride=1, nl='relu', name='conv1d', dropOut=False):
n_batch, n_dim, n_input_chan = x.get_shape()
#with tf.variable_scope(name):
# create and track weights
#with tf.name_scope('weights') as scope:
W = tf.get_variable('W', shape=[n_size, n_input_chan, n_filters], initializer=tf.random_normal_initializer(stddev=1e-3))
create_var_summaries(W)
# create and track biases
#with tf.name_scope('biases'):
b = tf.get_variable('b', [n_filters], initializer=tf.constant_initializer(0.))
create_var_summaries(b)
# create drop out layer
if dropOut:
#with tf.name_scope('dropout'):
x = tf.layers.dropout(x,training=dropOut) # default = 0.5
# create and track pre-activations
#with tf.name_scope('preactivations'):
x = tf.nn.conv1d(x, W, stride=1, padding='SAME')
x = tf.nn.bias_add(x, b)
tf.summary.histogram('preactivations', x)
# create and track activations
if nl == 'relu':
x = tf.nn.relu(x)
elif nl == 'prelu':
x = parametric_relu(x)
elif nl == None:
pass
else:
raise ValueError('Invalid non-linearity')
tf.summary.histogram('activations', x)
return x
def deconv1d(x, r, n_chan, n_in_dim, n_in_chan, name='deconv1d'):
x = tf.reshape(x, [128, 1, n_in_dim, n_in_chan])
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
# filter : [height, width, output_channels, in_channels]
W = tf.get_variable('W', shape=[1, r, n_chan, n_in_chan],
initializer=tf.random_normal_initializer(stddev=1e-3))
b = tf.get_variable('b', [n_chan], initializer=tf.constant_initializer(0.))
x = tf.nn.conv2d_transpose(x, W, output_shape=(128, 1, r*n_in_dim, n_chan),
strides=[1, 1, r, 1])
x = tf.nn.bias_add(x, b)
return tf.reshape(x, [-1, r*n_in_dim, n_chan])