人工智能课程作业——三层神经网络前期SGD,中期minibatch

本文深入讲解了神经网络的实现细节,包括前向传播、反向传播等关键步骤,并通过具体的Python代码展示了神经网络的训练过程。文章涵盖了权重初始化、激活函数、误差计算及权重更新等内容。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

import numpy as np
from fuction import *
from classNN_for_parallel import NN_P

# 神经网络
class NN:
 def __init__(self, ni, nh, no):# 输入层、隐含层、输出层的节点数
  self.ni = ni + 1 # 偏置节点
  self.nh = nh
  self.no = no

  self.ai = [1.0]*self.ni
  self.ah = [1.0]*self.nh
  self.ao = [1.0]*self.no

  # 设权重为随机值
  self.wi = makeMatrix(self.ni, self.nh)
  self.wo = makeMatrix(self.nh, self.no)
  for i in range(self.ni):
   for j in range(self.nh):
    self.wi[i][j] = rand(-1, 1)
  for j in range(self.nh):
   for k in range(self.no):
    self.wo[j][k] = rand(-1, 1)

  # 设置偏置
  self.ci = makeMatrix(self.ni, self.nh)
  self.co = makeMatrix(self.nh, self.no)
 def update(self, inputs):
  if len(inputs) != self.ni-1:
   raise ValueError('与输入层节点数不符!')
  # 激活输入层
  for i in range(self.ni-1):
   self.ai[i] = inputs[i]
  # 激活隐藏层
  for j in range(self.nh):
   sum = 0.0
   for i in range(self.ni):
    sum = sum + self.ai[i] * self.wi[i][j]
   self.ah[j] = sigmoid(sum)
  # 激活输出层
  for k in range(self.no):
   sum = 0.0
   for j in range(self.nh):
    sum = sum + self.ah[j] * self.wo[j][k]
   self.ao[k] = sigmoid(sum)
  return self.ao[:]


 #反向传播
 def backPropagate(self, targets, N):# N: 学习速率
  if len(targets) != self.no:
   raise ValueError('与输出层节点数不符!')
  # 计算输出层的误差
  output_deltas = [0.0] * self.no
  for k in range(self.no):
   error = targets[k]-self.ao[k]
   output_deltas[k] = dsigmoid(self.ao[k]) * error
  # 计算隐藏层的误差
  hidden_deltas = [0.0] * self.nh
  for j in range(self.nh):
   error = 0.0
   for k in range(self.no):
    error = error + output_deltas[k]*self.wo[j][k]
   hidden_deltas[j] = dsigmoid(self.ah[j]) * error
  # 更新输出层权重
  for j in range(self.nh):
   for k in range(self.no):
    change = output_deltas[k]*self.ah[j]
    self.wo[j][k] = self.wo[j][k] + N*change
    self.co[j][k] = change
  # 更新输入层权重
  for i in range(self.ni):
   for j in range(self.nh):
    change = hidden_deltas[j]*self.ai[i]
    self.wi[i][j] = self.wi[i][j] + N*change
    self.ci[i][j] = change
  # 计算误差
  error = 0.0
  for k in range(len(targets)):
   error = error + 0.5*(targets[k]-self.ao[k])**2
  return error,self.wi,self.wo,self.ci,self.co


 # def weights(self):
 #  print('输入层权重:')
 #  for i in range(self.ni):
 #   print(self.wi[i])
 #  print()
 #  print('输出层权重:')
 #  for j in range(self.nh):
 #   print(self.wo[j])


 def train(self, patterns, N=0.0005):
    train_size = len(patterns)
    batch_size = 3    #batch_size修改
    error = 1
    count = 0
    #前期SGD
    while error > 0.01:
        i = np.random.randint(0, len(patterns))
        p = patterns[i]
        # print('p',p)
        inputs = p[0]
        targets = p[1]
        self.update(inputs)
        error,_,_,_,_ = self.backPropagate(targets, N)
        count = count + 1
        if count % 100 == 0:
            print('第一阶段误差 %-.5f' % error)



    # 中期mini_batch
    while error>0.00001:
        batch_mask = np.random.choice(train_size, batch_size, replace=False)
        p_batch = []
        for i in batch_mask:
            p_batch.append(patterns[i])
        for p in p_batch:
            inputs = p[0]
        # print('input',inputs)
        targets = p[1]
        self.update(inputs)
        error,wi1,wo1,ci1,co1 = self.backPropagate(targets, N)
        count = count + 1
        print('第二阶段误差 %-.5f' % error)


    print('迭代次数',count)
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值