Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
67 commits
Select commit Hold shift + click to select a range
c362a8f
Create .travis.yml
lishulincug Jan 17, 2018
973e9bb
Add files via upload
lishulincug Jan 17, 2018
c3b684f
Update .travis.yml
lishulincug Jan 17, 2018
8df77fc
Add files via upload
lishulincug Jan 17, 2018
0173302
Update macd_back_test.py
lishulincug Jan 17, 2018
4af3248
Update __init__.py
lishulincug Jan 17, 2018
35030ba
Update __init__.py
lishulincug Jan 17, 2018
3252891
Add files via upload
lishulincug Jan 17, 2018
d3ef3c7
Add files via upload
lishulincug Jan 17, 2018
99cbec2
Update macd_back_test.py
lishulincug Jan 17, 2018
afdac25
Update init.py
lishulincug Jan 18, 2018
22648b3
Update efund_mail2.py
lishulincug Jan 18, 2018
21f8ec2
Update tread_tracking.py
lishulincug Jan 18, 2018
e03173f
Update macd_back_test.py
lishulincug Jan 18, 2018
d07e951
Update macd_back_test.py
lishulincug Jan 18, 2018
a935030
Update macd_back_test.py
lishulincug Jan 18, 2018
ede9b81
Update macd_back_test.py
lishulincug Jan 18, 2018
6eaba5d
Update macd_back_test.py
lishulincug Jan 18, 2018
464257f
Update macd_back_test.py
lishulincug Jan 18, 2018
619f0ce
择时策略
lishulincug Jan 21, 2018
a1dc8af
Update efund_mail2.py
lishulincug Jan 21, 2018
0f6e977
Update efund_mail2.py
lishulincug Jan 22, 2018
7fc60cd
Update efund_mail2.py
lishulincug Jan 22, 2018
aed312c
Update stock_trader.py
lishulincug Jan 22, 2018
9bdfe1f
Update efund_mail2.py
lishulincug Jan 22, 2018
a858eb3
Update requirements.txt
lishulincug Jan 22, 2018
2c2c60c
Update requirements.txt
lishulincug Jan 22, 2018
9f745af
Update requirements.txt
lishulincug Jan 22, 2018
491a1a0
Update requirements.txt
lishulincug Jan 22, 2018
d97e639
Update requirements.txt
lishulincug Jan 22, 2018
11d71e0
Update macd_back_test.py
lishulincug Jan 22, 2018
23152d7
Update stock_trader.py
lishulincug Jan 22, 2018
8a301cd
Update stock_trader.py
lishulincug Jan 22, 2018
faa8a02
Update macd_back_test.py
lishulincug Jan 22, 2018
95ca4de
Update stock_trader.py
lishulincug Jan 22, 2018
6d0f272
ss
lishulincug Jan 28, 2018
6bafe11
优化
lishulincug Feb 3, 2018
3e28f77
1
lishulincug Feb 4, 2018
b6fc38a
修改bug
lishulincug Feb 4, 2018
5892561
12
lishulincug Feb 4, 2018
05640a9
1
lishulincug Feb 4, 2018
798e00c
111
lishulincug Feb 4, 2018
2f98c67
2
lishulincug Feb 4, 2018
192f799
超额收益
lishulincug Feb 4, 2018
6f6e1f4
3
lishulincug Feb 4, 2018
aea7977
3
lishulincug Feb 4, 2018
8a862ad
Update efund_mail2.py
lishulincug Feb 4, 2018
cbbb11a
Update efund_mail2.py
lishulincug Feb 5, 2018
74075bc
Update efund_mail2.py
lishulincug Feb 5, 2018
d0ac128
Update efund_mail2.py
lishulincug Feb 5, 2018
6147364
Update efund_mail2.py
lishulincug Feb 5, 2018
545250e
Update fund_zf.py
lishulincug Feb 5, 2018
f6b6e58
Update efund_mail2.py
lishulincug Feb 5, 2018
d3a37b3
Update fund_zf.py
lishulincug Feb 26, 2018
a6ac86a
Update efund_mail2.py
lishulincug Mar 9, 2018
8cc23d9
Update fund_zf.py
lishulincug Mar 9, 2018
9bb2262
Update fund_zf.py
lishulincug Mar 21, 2018
f697f42
Update efund_mail2.py
lishulincug Mar 21, 2018
0813454
Update efund_mail2.py
lishulincug Mar 21, 2018
6bef55d
Update efund_mail2.py
lishulincug Mar 21, 2018
48dda85
Update efund_mail2.py
lishulincug Jul 11, 2018
57309ee
Update macd_back_test.py
lishulincug Jul 11, 2018
a5b92e3
Update efund_mail2.py
lishulincug Jul 11, 2018
6e1747e
Update macd_back_test.py
lishulincug Jul 20, 2018
fabbc3f
Update macd_back_test.py
lishulincug Jul 20, 2018
85eab7e
Update efund_mail2.py
lishulincug Jul 20, 2018
206598b
Update README.md
lishulincug Aug 27, 2018
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
language: python
python:
- "2.7"
# command to install dependencies
install: "pip install -r requirements.txt"
# command to run tests
script:
- python stock_trader.py
386 changes: 193 additions & 193 deletions BP.py
Original file line number Diff line number Diff line change
@@ -1,193 +1,193 @@
# coding=utf-8
# Back-Propagation Neural Networks
# http://blog.csdn.net/akunainiannian/article/details/40073903

import math
import random
import string

random.seed(0)

# calculate a random number where: a <= rand < b
def rand(a, b):
return (b-a)*random.random() + a

# Make a matrix (we could use NumPy to speed this up)
def makeMatrix(I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill]*J)
return m

# our sigmoid function, tanh is a little nicer than the standard 1/(1+e^-x)
#使用双正切函数代替logistic函数
def sigmoid(x):
return math.tanh(x)

# derivative of our sigmoid function, in terms of the output (i.e. y)
# 双正切函数的导数,在求取输出层和隐藏侧的误差项的时候会用到
def dsigmoid(y):
return 1.0 - y**2

class NN:
def __init__(self, ni, nh, no):
# number of input, hidden, and output nodes
# 输入层,隐藏层,输出层的数量,三层网络
self.ni = ni + 1 # +1 for bias node
self.nh = nh
self.no = no

# activations for nodes
self.ai = [1.0]*self.ni
self.ah = [1.0]*self.nh
self.ao = [1.0]*self.no

# create weights
#生成权重矩阵,每一个输入层节点和隐藏层节点都连接
#每一个隐藏层节点和输出层节点链接
#大小:self.ni*self.nh
self.wi = makeMatrix(self.ni, self.nh)
#大小:self.ni*self.nh
self.wo = makeMatrix(self.nh, self.no)
# set them to random vaules
#生成权重,在-0.2-0.2之间
for i in range(self.ni):
for j in range(self.nh):
self.wi[i][j] = rand(-0.2, 0.2)
for j in range(self.nh):
for k in range(self.no):
self.wo[j][k] = rand(-2.0, 2.0)

# last change in weights for momentum
#?
self.ci = makeMatrix(self.ni, self.nh)
self.co = makeMatrix(self.nh, self.no)

def update(self, inputs):
if len(inputs) != self.ni-1:
raise ValueError('wrong number of inputs')

# input activations
# 输入的激活函数,就是y=x;
for i in range(self.ni-1):
#self.ai[i] = sigmoid(inputs[i])
self.ai[i] = inputs[i]

# hidden activations
#隐藏层的激活函数,求和然后使用压缩函数
for j in range(self.nh):
sum = 0.0
for i in range(self.ni):
#sum就是《ml》书中的net
sum = sum + self.ai[i] * self.wi[i][j]
self.ah[j] = sigmoid(sum)

# output activations
#输出的激活函数
for k in range(self.no):
sum = 0.0
for j in range(self.nh):
sum = sum + self.ah[j] * self.wo[j][k]
self.ao[k] = sigmoid(sum)

return self.ao[:]

#反向传播算法 targets是样本的正确的输出
def backPropagate(self, targets, N, M):
if len(targets) != self.no:
raise ValueError('wrong number of target values')

# calculate error terms for output
#计算输出层的误差项
output_deltas = [0.0] * self.no
for k in range(self.no):
#计算k-o
error = targets[k]-self.ao[k]
#计算书中公式4.14
output_deltas[k] = dsigmoid(self.ao[k]) * error

# calculate error terms for hidden
#计算隐藏层的误差项,使用《ml》书中的公式4.15
hidden_deltas = [0.0] * self.nh
for j in range(self.nh):
error = 0.0
for k in range(self.no):
error = error + output_deltas[k]*self.wo[j][k]
hidden_deltas[j] = dsigmoid(self.ah[j]) * error

# update output weights
# 更新输出层的权重参数
# 这里可以看出,本例使用的是带有“增加冲量项”的BPANN
# 其中,N为学习速率 M为充量项的参数 self.co为冲量项
# N: learning rate
# M: momentum factor
for j in range(self.nh):
for k in range(self.no):
change = output_deltas[k]*self.ah[j]
self.wo[j][k] = self.wo[j][k] + N*change + M*self.co[j][k]
self.co[j][k] = change
#print N*change, M*self.co[j][k]

# update input weights
#更新输入项的权重参数
for i in range(self.ni):
for j in range(self.nh):
change = hidden_deltas[j]*self.ai[i]
self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]
self.ci[i][j] = change

# calculate error
#计算E(w)
error = 0.0
for k in range(len(targets)):
error = error + 0.5*(targets[k]-self.ao[k])**2
return error

#测试函数,用于测试训练效果
def test(self, patterns):
for p in patterns:
print(p[0], '->', self.update(p[0]))

def weights(self):
print('Input weights:')
for i in range(self.ni):
print(self.wi[i])
print()
print('Output weights:')
for j in range(self.nh):
print(self.wo[j])

def train(self, patterns, iterations=1000, N=0.5, M=0.1):
# N: learning rate
# M: momentum factor
for i in range(iterations):
error = 0.0
for p in patterns:
inputs = p[0]
targets = p[1]
self.update(inputs)
error = error + self.backPropagate(targets, N, M)
if i % 100 == 0:
print('error %-.5f' % error)


def demo():
# Teach network XOR function
pat = [
[[0], [0]],
[[2], [1]],
[[3], [1]],
[[4], [5]]
]

# create a network with two input, two hidden, and one output nodes
n = NN(1, 2, 1)
# train it with some patterns
n.train(pat)
# test it
n.test(pat)



if __name__ == '__main__':
demo()
# coding=utf-8
# Back-Propagation Neural Networks
# http://blog.csdn.net/akunainiannian/article/details/40073903
import math
import random
import string
random.seed(0)
# calculate a random number where: a <= rand < b
def rand(a, b):
return (b-a)*random.random() + a
# Make a matrix (we could use NumPy to speed this up)
def makeMatrix(I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill]*J)
return m
# our sigmoid function, tanh is a little nicer than the standard 1/(1+e^-x)
#使用双正切函数代替logistic函数
def sigmoid(x):
return math.tanh(x)
# derivative of our sigmoid function, in terms of the output (i.e. y)
# 双正切函数的导数,在求取输出层和隐藏侧的误差项的时候会用到
def dsigmoid(y):
return 1.0 - y**2
class NN:
def __init__(self, ni, nh, no):
# number of input, hidden, and output nodes
# 输入层,隐藏层,输出层的数量,三层网络
self.ni = ni + 1 # +1 for bias node
self.nh = nh
self.no = no
# activations for nodes
self.ai = [1.0]*self.ni
self.ah = [1.0]*self.nh
self.ao = [1.0]*self.no
# create weights
#生成权重矩阵,每一个输入层节点和隐藏层节点都连接
#每一个隐藏层节点和输出层节点链接
#大小:self.ni*self.nh
self.wi = makeMatrix(self.ni, self.nh)
#大小:self.ni*self.nh
self.wo = makeMatrix(self.nh, self.no)
# set them to random vaules
#生成权重,在-0.2-0.2之间
for i in range(self.ni):
for j in range(self.nh):
self.wi[i][j] = rand(-0.2, 0.2)
for j in range(self.nh):
for k in range(self.no):
self.wo[j][k] = rand(-2.0, 2.0)
# last change in weights for momentum
#?
self.ci = makeMatrix(self.ni, self.nh)
self.co = makeMatrix(self.nh, self.no)
def update(self, inputs):
if len(inputs) != self.ni-1:
raise ValueError('wrong number of inputs')
# input activations
# 输入的激活函数,就是y=x;
for i in range(self.ni-1):
#self.ai[i] = sigmoid(inputs[i])
self.ai[i] = inputs[i]
# hidden activations
#隐藏层的激活函数,求和然后使用压缩函数
for j in range(self.nh):
sum = 0.0
for i in range(self.ni):
#sum就是《ml》书中的net
sum = sum + self.ai[i] * self.wi[i][j]
self.ah[j] = sigmoid(sum)
# output activations
#输出的激活函数
for k in range(self.no):
sum = 0.0
for j in range(self.nh):
sum = sum + self.ah[j] * self.wo[j][k]
self.ao[k] = sigmoid(sum)
return self.ao[:]
#反向传播算法 targets是样本的正确的输出
def backPropagate(self, targets, N, M):
if len(targets) != self.no:
raise ValueError('wrong number of target values')
# calculate error terms for output
#计算输出层的误差项
output_deltas = [0.0] * self.no
for k in range(self.no):
#计算k-o
error = targets[k]-self.ao[k]
#计算书中公式4.14
output_deltas[k] = dsigmoid(self.ao[k]) * error
# calculate error terms for hidden
#计算隐藏层的误差项,使用《ml》书中的公式4.15
hidden_deltas = [0.0] * self.nh
for j in range(self.nh):
error = 0.0
for k in range(self.no):
error = error + output_deltas[k]*self.wo[j][k]
hidden_deltas[j] = dsigmoid(self.ah[j]) * error
# update output weights
# 更新输出层的权重参数
# 这里可以看出,本例使用的是带有“增加冲量项”的BPANN
# 其中,N为学习速率 M为充量项的参数 self.co为冲量项
# N: learning rate
# M: momentum factor
for j in range(self.nh):
for k in range(self.no):
change = output_deltas[k]*self.ah[j]
self.wo[j][k] = self.wo[j][k] + N*change + M*self.co[j][k]
self.co[j][k] = change
#print N*change, M*self.co[j][k]
# update input weights
#更新输入项的权重参数
for i in range(self.ni):
for j in range(self.nh):
change = hidden_deltas[j]*self.ai[i]
self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]
self.ci[i][j] = change
# calculate error
#计算E(w)
error = 0.0
for k in range(len(targets)):
error = error + 0.5*(targets[k]-self.ao[k])**2
return error
#测试函数,用于测试训练效果
def test(self, patterns):
for p in patterns:
print(p[0], '->', self.update(p[0]))
def weights(self):
print('Input weights:')
for i in range(self.ni):
print(self.wi[i])
print()
print('Output weights:')
for j in range(self.nh):
print(self.wo[j])
def train(self, patterns, iterations=1000, N=0.5, M=0.1):
# N: learning rate
# M: momentum factor
for i in range(iterations):
error = 0.0
for p in patterns:
inputs = p[0]
targets = p[1]
self.update(inputs)
error = error + self.backPropagate(targets, N, M)
if i % 100 == 0:
print('error %-.5f' % error)
def demo():
# Teach network XOR function
pat = [
[[0], [0]],
[[2], [1]],
[[3], [1]],
[[4], [5]]
]
# create a network with two input, two hidden, and one output nodes
n = NN(1, 2, 1)
# train it with some patterns
n.train(pat)
# test it
n.test(pat)
if __name__ == '__main__':
demo()
Binary file added BP.pyc
Binary file not shown.
Loading