Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 14 additions & 6 deletions backpropogation.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import numpy as np

def sigmoid(z):
return 1.0/(1.0+np.exp(-z))
def sigmoid(z): # This is funtion which gives output between 0 and 1
# it represents the activation of that neuron.
return 1.0/(1.0+np.exp(-z))

def sigmoid_prime(z):
return sigmoid(z)*(1-sigmoid(z))
return sigmoid(z)*(1-sigmoid(z)) # This gives the derivative of the sigmoid function

#The backpropogation function
def backprop(net, x, y):
'''
Expand All @@ -16,22 +18,28 @@ def backprop(net, x, y):
'''
nabla_b = [np.zeros(b.shape) for b in net.biases]
nabla_w = [np.zeros(w.shape) for w in net.weights]
#feedforward
activation = x
activations = [x]
zs = []
activations = [x] #list to store all the activations, layer by layer
zs = [] #list to store all z vectors, layer by layer
for b, w in zip(net.biases, net.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
#backward pass
# transpose() returns the numpy array with the rows as columns and columns as rows
delta = net.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on.
for l in range(2, net.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(net.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (net,nabla_b, nabla_w)
return (net,nabla_b, nabla_w) #Return ''(nabla_b, nabla_w)'' representing the
#gradient for the cost function.