-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathTCN.py
More file actions
133 lines (119 loc) · 5.35 KB
/
TCN.py
File metadata and controls
133 lines (119 loc) · 5.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# Redefining CausalConv1D to simplify its return values
class CausalConv1D(tf.keras.layers.Conv1D):
def __init__(self, filters,
kernel_size,
strides=1,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(CausalConv1D, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_last',
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name, **kwargs
)
def call(self, inputs):
padding = (self.kernel_size[0] - 1) * self.dilation_rate[0]
inputs = tf.pad(inputs, tf.constant([(0, 0,), (1, 0), (0, 0)]) * padding)
return super(CausalConv1D, self).call(inputs)
class TemporalBlock(tf.keras.layers.Layer):
def __init__(self, n_outputs, kernel_size, strides, dilation_rate, dropout=0.2,
trainable=True, name=None, dtype=None,
activity_regularizer=None, reduction_dim=None, drop_residual=False, **kwargs):
super(TemporalBlock, self).__init__(
trainable=trainable, dtype=dtype,
activity_regularizer=activity_regularizer,
name=name, **kwargs
)
self.dropout = dropout
self.n_outputs = n_outputs
self.first_1x1, self.second_1x1 = None, None
self.drop_residual = drop_residual #flag that indicates whether residual connection should be removed
# First 1x1 conv layer
if reduction_dim is not None:
self.first_1x1 = tf.keras.layers.Dense(reduction_dim, activation=tf.nn.relu)
self.conv1 = CausalConv1D(
n_outputs, kernel_size, strides=strides,
dilation_rate=dilation_rate, activation=tf.nn.relu,
name="conv1")
if reduction_dim is not None:
self.second_1x1 = tf.keras.layers.Dense(reduction_dim, activation=tf.nn.relu)
self.conv2 = CausalConv1D(
n_outputs, kernel_size, strides=strides,
dilation_rate=dilation_rate, activation=tf.nn.relu,
name="conv2")
self.down_sample = None
def build(self, input_shape):
channel_dim = 2
self.dropout1 = tf.keras.layers.Dropout(self.dropout, [tf.constant(1), tf.constant(1), tf.constant(self.n_outputs)])
self.dropout2 = tf.keras.layers.Dropout(self.dropout, [tf.constant(1), tf.constant(1), tf.constant(self.n_outputs)])
if input_shape[channel_dim] != self.n_outputs:
self.down_sample = tf.keras.layers.Dense(self.n_outputs, activation=None)
self.built = True
def call(self, inputs, training=True):
x = inputs
if self.first_1x1 is not None:
x = self.first_1x1(x)
x = self.conv1(x)
x = tf.keras.layers.LayerNormalization(axis=-1)(x)
x = self.dropout1(x, training=training)
if self.second_1x1 is not None:
x = self.second_1x1(x)
x = self.conv2(x)
x = tf.keras.layers.LayerNormalization(axis=-1)(x)
x = self.dropout2(x, training=training)
if self.down_sample is not None:
inputs = self.down_sample(inputs)
if self.drop_residual:
return tf.nn.relu(x)
else:
return tf.nn.relu(x + inputs)
class TemporalConvNet(tf.keras.layers.Layer):
def __init__(self, num_channels, kernel_size=2, dropout=0.2,
trainable=True, name=None, dtype=None,
activity_regularizer=None, reduction_dim=None, drop_first_res=False, **kwargs):
super(TemporalConvNet, self).__init__(
trainable=trainable, dtype=dtype,
activity_regularizer=activity_regularizer,
name=name, **kwargs
)
self.layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
out_channels = num_channels[i]
self.layers.append(
TemporalBlock(out_channels, kernel_size, strides=1, dilation_rate=dilation_size,
dropout=dropout, reduction_dim=reduction_dim, drop_residual=(drop_first_res == True and i == 0) ,name="tblock_{}".format(i))
)
def call(self, inputs, training=True):
outputs = inputs
for layer in self.layers:
outputs = layer(outputs, training=training)
return outputs
# example
#tcn = TemporalConvNet([40] * 4, kernel_size=4, dropout=0.1, reduction_dim=None, drop_first_res=False)
#tcn(np.ones(shape=(1, 1, 10))*3)