-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathAlexNet.py
More file actions
99 lines (80 loc) · 3.64 KB
/
AlexNet.py
File metadata and controls
99 lines (80 loc) · 3.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import tensorflow as tf
import numpy as np
def train(x, preTrainedData):
# Layer 01: Convolutional.
# Set Layer Parameters & Network
W1 = tf.Variable(preTrainedData["conv1"][0])
B1 = tf.Variable(preTrainedData["conv1"][1])
CO = 96
CI = x.get_shape()[-1]
assert CI % 1 == 0
assert CO % 1 == 0
Conv1Init = tf.nn.conv2d(x, W1, [1, 4, 4, 1], padding='SAME')
Conv1 = tf.reshape(tf.nn.bias_add(Conv1Init, B1), [-1] + Conv1Init.get_shape().as_list()[1:])
# Set Activation
Conv1 = tf.nn.relu(Conv1)
# Do Normalization
Lrn1 = tf.nn.local_response_normalization(Conv1, depth_radius=2, alpha=2e-05, beta=0.75, bias=1.0)
# Do Pooling
Maxpool1 = tf.nn.max_pool(Lrn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 02: Convolutional.
# Set Layer Parameters & Network
W2 = tf.Variable(preTrainedData["conv2"][0])
B2 = tf.Variable(preTrainedData["conv2"][1])
InputGroups = tf.split(Maxpool1, 2, 3)
KernelGroups = tf.split(W2, 2, 3)
Convolve = lambda i, k: tf.nn.conv2d(i, k, [1, 1, 1, 1], padding='SAME')
OutputGroups = [Convolve(i, k) for i, k in zip(InputGroups, KernelGroups)]
Conv2 = tf.concat(OutputGroups, 3)
Conv2 = tf.reshape(tf.nn.bias_add(Conv2, B2), [-1] + Conv2.get_shape().as_list()[1:])
# Set Activation
Conv2 = tf.nn.relu(Conv2)
# Do Normalization
Lrn2 = tf.nn.local_response_normalization(Conv2, depth_radius=2, alpha=2e-05, beta=0.75, bias=1)
# Do Pooling
Maxpool2 = tf.nn.max_pool(Lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 03: Convolutional.
# Set Layer Parameters & Network
W3 = tf.Variable(preTrainedData["conv3"][0])
B3 = tf.Variable(preTrainedData["conv3"][1])
Conv3 = tf.nn.conv2d(Maxpool2, W3, [1, 1, 1, 1], padding='SAME')
Conv3 = tf.reshape(tf.nn.bias_add(Conv3, B3), [-1] + Conv3.get_shape().as_list()[1:])
# Set Activation
Conv3 = tf.nn.relu(Conv3)
# Layer 04: Convolutional.
# Set Layer Parameters & Network
W4 = tf.Variable(preTrainedData["conv4"][0])
B4 = tf.Variable(preTrainedData["conv4"][1])
InputGroups = tf.split(Conv3, 2, 3)
KernelGroups = tf.split(W4, 2, 3)
Convolve = lambda i, k: tf.nn.conv2d(i, k, [1, 1, 1, 1], padding='SAME')
OutputGroups = [Convolve(i, k) for i, k in zip(InputGroups, KernelGroups)]
Conv4 = tf.concat(OutputGroups, 3)
Conv4 = tf.reshape(tf.nn.bias_add(Conv4, B4), [-1] + Conv4.get_shape().as_list()[1:])
# Set Activation
Conv4 = tf.nn.relu(Conv4)
# Layer 05: Convolutional.
# Set Layer Parameters & Network
W5 = tf.Variable(preTrainedData["conv5"][0])
B5 = tf.Variable(preTrainedData["conv5"][1])
InputGroups = tf.split(Conv4, 2, 3)
KernelGroups = tf.split(W5, 2, 3)
Convolve = lambda i, k: tf.nn.conv2d(i, k, [1, 1, 1, 1], padding='SAME')
OutputGroups = [Convolve(i, k) for i, k in zip(InputGroups, KernelGroups)]
Conv5 = tf.concat(OutputGroups, 3)
Conv5 = tf.reshape(tf.nn.bias_add(Conv5, B5), [-1] + Conv5.get_shape().as_list()[1:])
# Set Activation
Conv5 = tf.nn.relu(Conv5)
# Do Pooling
Maxpool5 = tf.nn.max_pool(Conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer 06: Fully Connected.
W6 = tf.Variable(preTrainedData["fc6"][0])
B6 = tf.Variable(preTrainedData["fc6"][1])
Flat = tf.reshape(Maxpool5, [-1, int(np.prod(Maxpool5.get_shape()[1:]))])
N6 = tf.nn.relu(tf.matmul(Flat, W6) + B6)
# Layer 07: Fully Connected.
W7 = tf.Variable(preTrainedData["fc7"][0])
B7 = tf.Variable(preTrainedData["fc7"][1])
N7 = tf.nn.relu(tf.matmul(N6, W7) + B7)
# Return Last Layer
return N7