这段时间在自学神经网络,想实战一下,发现网上找不到python版的,自己试着写一个
#encoding utf-8
import numpy as np
import math
testData = np.arange(0.1,0.9,0.01).reshape(-1,4)
td = np.array([
[0.1,0.9,0.9],
[0.9,0.1,0.9],
[0.1,0.1,0.1],
[0.9,0.9,0.1]
])
td = np.transpose(td)
#testData = np.([[0.1]])
def addBias(arr):
#a = np.asarray(arr)
b = np.array([[1]])
b = np.repeat(b,arr.shape[1],axis=1)
c = np.vstack((arr,b))
return c
testData = np.transpose (testData)
#testData = addBias(testData)
def sigmoid(x):
# if(x>100):return 1
# if(x<-100):return 0
return 1.0 / (1.0 + math.exp(-x))
def dsingmoid(y):
return y * (1.0 - y)
sigmoid_ufunc = np.vectorize(sigmoid,otypes=[np.float])
dsingmoid_ufunc = np.vectorize(dsingmoid,otypes=[np.float])
class Bpnn(object):
"""docstring for Bpnn"""
def __init__(self, ni,nh,no):
super(Bpnn, self).__init__()
self.ni = ni
self.nh = nh
self.no = no
self.alpha = 0.2
self.beata = 0.5
self.a2 = 0
self.a3 = 0
self.a1 = 0
self.z2 = 0
self.z3 = 0
self.d3 = 0
self.d2 = 0
self.gradw2 = 0
self.gradw1 = 0
self.gradb2 = 0
self.gradb1 = 0
self.wih = np.random.rand(nh,ni) * 0.2 - 0.1
self.who = np.random.rand(no,nh) * 0.2 - 0.1
self.w1 = self.wih
self.w2 = self.who
self.b1 = np.random.rand(nh,1) * 0.2 - 0.1
self.b2 = np.random.rand(no,1) * 0.2 - 0.1
''' all one matrix in order to check whether it is correct
self.wih = np.zeros((nh,ni)) + 1
self.who = np.zeros((no,nh)) + 1
'''
def feedforward(self,x):
self.a1 = x
self.z2 = np.dot(self.w1 , x) + self.b1
#print(self.z2)
self.a2 = sigmoid_ufunc(self.z2)
#print (self.a2)
self.z3 = np.dot(self.w2,self.a2) +self.b2
#print (self.z3)
self.a3 = sigmoid_ufunc(self.z3)
#print(self.a3)
def backPropagate(self,y):
self.d3 = -(y - self.a3) * dsingmoid_ufunc(self.a3) #d3=-(y-a3).f'(z3)
# print('d3=',self.d3)
self.d2 = np.dot(np.transpose(self.w2),self.d3) * dsingmoid_ufunc(self.a2) #d2=((w2Td3)).f'(z2)
self.gradw2 = np.dot(self.d3,np.transpose(self.a2)) + self.beata *self.w2 #grad2=d3(a2T)
self.gradb2 = self.d3
self.gradw1 = np.dot(self.d2,np.transpose(self.a1)) + self.beata *self.w1 #grad1=d2(a1T)
self.gradb1 = self.d2
def weightUpdate(self,x,y):
self.feedforward(x)
self.backPropagate(y)
# self.gradw2 = self.dcheck(self.w2,x,y)
# self.gradb2 = self.dcheck(self.b2,x,y)
# self.gradw1 = self.dcheck(self.w1,x,y)
# self.gradb1 = self.dcheck(self.b1,x,y)
self.w2 = self.w2 - self.gradw2 * self.alpha
self.w1 = self.w1 - self.gradw1 * self.alpha
self.b2 = self.b2 - self.gradb2 * self.alpha
self.b1 = self.b1 - self.gradb1 * self.alpha
# print('grad2',self.gradw2,self.gradb2)
# print('grad1',self.gradw1,self.gradb1)
def train (self,x,y,iterations=1000,alpha=0.2,beata=0.5):
self.alpha = alpha
self.beata = beata
for i in range(iterations):
#for j in range (np.shape(x)[1]):
#self.weightUpdate(x[:,j:j+1],y[:,j:j+1])
self.weightUpdate(x,y)
error = np.sum((y - self.a3) ** 2) + np.sum(self.w2 ** 2) +np.sum(self.w1 ** 2)
error1 = np.sum((y - self.a3) ** 2)
if (error < 0.001):break
if i % 100 == 0:
print('error ' , error)
print('error1',error1)
def predict(self,x):
self.feedforward(x)
print (self.a3)
def dcheck(self,w,x,y):
ww = np.copy(w)
for i in range(w.shape[0]):
for j in range(w.shape[1]):
wij = w[i,j]
w[i,j] += 0.0001
self.feedforward(x)
self.backPropagate(y)
you = np.sum((y - self.a3) ** 2)
w[i,j] = wij
w[i,j] -= 0.0001
self.feedforward(x)
self.backPropagate(y)
zuo = np.sum((y - self.a3) ** 2)
w[i,j] = wij
ww[i,j] = (you - zuo) / 0.0002
#print ('ww',ww)
return ww
def test():
bpnn=Bpnn(4,3,4)
bpnn.train(testData,testData,2000,0.2,0.2)
print ('testData',testData)
bpnn.predict(testData)
if __name__ == '__main__':
test()
自学的python ,语法差,代码质量差,欢迎大家提出宝贵意见