-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlayers.py
More file actions
188 lines (149 loc) · 5.51 KB
/
layers.py
File metadata and controls
188 lines (149 loc) · 5.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
import random
from typing import List, Tuple, Union
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_prime(x):
return sigmoid(x) * (1 - sigmoid(x))
def relu(x):
return np.maximum(x, 0)
def relu_prime(x):
return np.where(x > 0, 1, 0)
def linear(x):
return x
def linear_prime(x):
return np.ones(x.shape)
ACTIVATION_FN_PRIME = {relu: relu_prime, sigmoid: sigmoid_prime, linear: linear_prime}
class Layer:
def __init__(
self,
units: int,
activation_fn: str,
) -> None:
"""
Learning rate is dynamic as we change it based on how the loss changes
"""
self.units = units
if activation_fn == "relu":
self.activation_fn = relu
elif activation_fn == "sigmoid":
self.activation_fn = sigmoid
elif activation_fn == "linear":
self.activation_fn = linear
else:
raise Exception(f"{activation_fn} activation fn not supported")
self._is_compiled = False
# Below params are set by compilation
self.input_size = None
self.W = None # shape = (# of units, input_size)
self.b = None # shape = (# of units, 1)
self.id = None
self.next_layer = None
self.is_last_layer = False
def compile(self, input_size, id, next_layer):
self.id = id # 1st hidden layer is layer id 1
self._init_params(input_size)
self.next_layer = next_layer
if next_layer is None:
self.is_last_layer = True
self._is_compiled = True
def apply_activation(self, x):
# x can be a real number, vector, or matrix of real numbers
return self.activation_fn(x)
def compute(
self, input: np.array, include_z=False
) -> Union[np.array, Tuple[np.array, np.array]]:
"""
input can be a vector or a matrix
If matrix, each column represents a data sample
Output is of shape = (# of units, # of input samples)
"""
if not self._is_compiled:
raise Exception(
"Layers are not compiled. Must be used in context of NeuralNetwork class"
)
if len(input.shape) == 1:
num_samples = 1
else:
num_samples = input.shape[1]
b = np.tile(self.b, (1, num_samples)) # Add b to each sample
WX_b = np.dot(self.W, input) + b
output = self.apply_activation(WX_b)
if include_z:
return output, WX_b
return output
def dloss_dW(self, next_layer_dloss_dinput, X_out, Z_out, X_in):
"""
next_layer_dloss_dinput shape = (# of units, # of samples)
_dX_dZ shape = (# of units, # of samples)
_dZ_dW shape = (# of samples, input size)
Return value shape = (# of units, input_size)
"""
dX_dZ = self._dX_dZ(X_out)
dZ_dW = self._dZ_dW(X_in)
matrix_mult = next_layer_dloss_dinput * dX_dZ
return np.dot(matrix_mult, dZ_dW)
def dloss_db(self, next_layer_dloss_dinput, X_out, Z_out, X_in):
"""
next_layer_dloss_dinput shape = (# of units, # of samples)
_dX_dZ shape = (# of units, # of samples)
_dZ_dB shape = (# of units, # of samples)
Return value shape = (# of units, 1)
"""
dX_dZ = self._dX_dZ(X_out)
dZ_dB = self._dZ_dB(Z_out)
dloss_dB = next_layer_dloss_dinput * dX_dZ * dZ_dB
# Convert to b.shape by taking sum across samples (along the column)
return np.sum(dloss_dB, axis=1).reshape(self.b.shape)
def _dX_dZ(self, X_out):
"""
SPECIAL EXCEPTION: For log loss and the last layer
- don't multiply by dX_dZ since we already factored it out
in the gradient of the log loss! No-op instead
X_out shape = (# of units, # of samples)
Returns X_out shape
"""
if self.is_last_layer and self.activation_fn == sigmoid:
return 1
return ACTIVATION_FN_PRIME[self.activation_fn](X_out)
def _dZ_dW(self, X_in):
"""
X_in shape = (# of prev_layer units, # of samples)
note: input size == # of prev_layer units
Returns (# of samples, input size)
"""
return X_in.T
def _dZ_dB(self, Z_out):
"""
Z_out shape = (# of units, # of samples)
Return value shape = Z_out shape
"""
return 1
def dloss_dinput(self, next_layer_dloss_dinput, X_out) -> float:
"""
We expose this function in each layer as it's used to backprop to the prev layer
next_layer_dloss_dinput shape = (# of units, # of samples)
dX_dZ shape = (# of units, # of samples)
dZ_dinput shape = (# of units, input_size)
Return value shape = (input_size, # of samples)
"""
dX_dZ = self._dX_dZ(X_out)
dZ_dinput = self._dZ_dinput()
return np.dot(dZ_dinput.T, next_layer_dloss_dinput * dX_dZ)
def _dZ_dinput(self):
"""
This is just self.W
Return value shape = (# of units, input_size)
"""
return self.W
def _init_params(self, input_size) -> None:
self.input_size = input_size
weight_vectors = []
bias_terms = []
for _ in range(self.units):
weight_vectors.append(np.random.randn(self.input_size))
bias_terms.append(float(0))
weight_vectors = np.array(weight_vectors)
bias_terms = np.array(bias_terms)
self.W = weight_vectors
self.b = bias_terms.reshape(-1, 1)