使用numpy写的神经网络

本文详细介绍了如何使用Python实现神经网络,并通过实例代码展示了训练过程和预测应用。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

这段时间在自学神经网络,想实战一下,发现网上找不到python版的,自己试着写一个

#encoding utf-8
import numpy as np
import math

testData = np.arange(0.1,0.9,0.01).reshape(-1,4)
td = np.array([
	[0.1,0.9,0.9],
	[0.9,0.1,0.9],
	[0.1,0.1,0.1],
	[0.9,0.9,0.1]
])
td = np.transpose(td)
#testData = np.([[0.1]])
def addBias(arr):
	#a = np.asarray(arr)
	b = np.array([[1]])
	b = np.repeat(b,arr.shape[1],axis=1)
	c = np.vstack((arr,b))
	return c

testData = np.transpose (testData)
#testData = addBias(testData)

def sigmoid(x):
	# if(x>100):return 1
	# if(x<-100):return 0
	return 1.0 / (1.0 + math.exp(-x))

def dsingmoid(y):
	return y * (1.0 - y)

sigmoid_ufunc = np.vectorize(sigmoid,otypes=[np.float])
dsingmoid_ufunc = np.vectorize(dsingmoid,otypes=[np.float])

class Bpnn(object):
	"""docstring for Bpnn"""
	def __init__(self, ni,nh,no):
		super(Bpnn, self).__init__()
		self.ni = ni
		self.nh = nh
		self.no = no

		self.alpha = 0.2
		self.beata = 0.5

		self.a2 = 0
		self.a3 = 0
		self.a1 = 0

		self.z2 = 0
		self.z3 = 0

		self.d3 = 0
		self.d2 = 0
		self.gradw2 = 0
		self.gradw1 = 0
		self.gradb2 = 0
		self.gradb1 = 0



		


		self.wih = np.random.rand(nh,ni) * 0.2 - 0.1
		self.who = np.random.rand(no,nh) * 0.2 - 0.1
		self.w1 = self.wih
		self.w2 = self.who

		self.b1 = np.random.rand(nh,1) * 0.2 - 0.1
		self.b2 = np.random.rand(no,1) * 0.2 - 0.1

		''' all one matrix in order to check whether it is correct
		self.wih = np.zeros((nh,ni)) + 1
		self.who = np.zeros((no,nh)) + 1
		'''


	def feedforward(self,x):
		self.a1 = x
		self.z2 = np.dot(self.w1 , x) + self.b1
		#print(self.z2)
		self.a2 = sigmoid_ufunc(self.z2)
		#print (self.a2)
		self.z3 = np.dot(self.w2,self.a2) +self.b2
		#print (self.z3)
		self.a3 = sigmoid_ufunc(self.z3)
		#print(self.a3)

	def backPropagate(self,y):
		self.d3 = -(y - self.a3) * dsingmoid_ufunc(self.a3) #d3=-(y-a3).f'(z3)
		# print('d3=',self.d3)
		self.d2 = np.dot(np.transpose(self.w2),self.d3) * dsingmoid_ufunc(self.a2) #d2=((w2Td3)).f'(z2)
		self.gradw2 = np.dot(self.d3,np.transpose(self.a2)) + self.beata *self.w2 #grad2=d3(a2T)
		self.gradb2 = self.d3 
		self.gradw1 = np.dot(self.d2,np.transpose(self.a1)) + self.beata *self.w1 #grad1=d2(a1T)
		self.gradb1 = self.d2 




	def weightUpdate(self,x,y):
		self.feedforward(x)
		self.backPropagate(y)

		# self.gradw2 = self.dcheck(self.w2,x,y)
		# self.gradb2 = self.dcheck(self.b2,x,y)
		# self.gradw1 = self.dcheck(self.w1,x,y)
		# self.gradb1 = self.dcheck(self.b1,x,y)

		self.w2 = self.w2 - self.gradw2 * self.alpha 
		self.w1 = self.w1 - self.gradw1 * self.alpha
		self.b2 = self.b2 - self.gradb2 * self.alpha
		self.b1 = self.b1 - self.gradb1 * self.alpha
		# print('grad2',self.gradw2,self.gradb2)
		# print('grad1',self.gradw1,self.gradb1)

	def train (self,x,y,iterations=1000,alpha=0.2,beata=0.5):
		self.alpha = alpha
		self.beata = beata
		for i in range(iterations):

			#for j in range (np.shape(x)[1]):

				#self.weightUpdate(x[:,j:j+1],y[:,j:j+1])
				self.weightUpdate(x,y)
				error = np.sum((y - self.a3) ** 2) + np.sum(self.w2 ** 2) +np.sum(self.w1 ** 2)
				error1 = np.sum((y - self.a3) ** 2)
				if (error < 0.001):break
				if i % 100 == 0: 
					print('error ' , error)
					print('error1',error1)

	def predict(self,x):
		self.feedforward(x)
		print (self.a3)

	def dcheck(self,w,x,y):
		ww = np.copy(w)
		for i in range(w.shape[0]):
			for j in range(w.shape[1]):
				wij = w[i,j]
				w[i,j] += 0.0001
				self.feedforward(x)
				self.backPropagate(y)
				you = np.sum((y - self.a3) ** 2)
				w[i,j] = wij

				w[i,j] -= 0.0001
				self.feedforward(x)
				self.backPropagate(y)
				zuo = np.sum((y - self.a3) ** 2)
				w[i,j] = wij

				ww[i,j] = (you - zuo) / 0.0002
		#print ('ww',ww)
		return ww


def test():
	bpnn=Bpnn(4,3,4)
	bpnn.train(testData,testData,2000,0.2,0.2)
	print ('testData',testData)
	bpnn.predict(testData)

if __name__ == '__main__':
	test()
	
自学的python ,语法差,代码质量差,欢迎大家提出宝贵意见

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值