# encoding:utf-8
import numpy as np
import random
class Network(object):
def __init__(self, sizes):
self.num_layers = len(sizes)
print("self.num_layers", self.num_layers)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
print("biases", self.biases)
# biases [array([[-0.55676967],
# [-1.60486518],
# [ 0.28285971],
# [ 0.50856141]]), array([[-1.02563044]])]
print("sizes[1:]", sizes[1:])
# [4, 1]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
print("weights", self.weights)
print("sizes[:-1]", sizes[:-1])
# [3, 4]
# [array([[-0.60968692, 2.86107499, 0.25978906],
# [ 0.20304469, -1.42226893, 1.66564988],
# [-0.73320158, 0.53635735, 1.49307876],
# [-0.27301879, -1.42973446, -0.17142598]]), array([[ 0.39620414, -1.0758598 ,
# 0.17447957, -0.67224015]])]
def backprop(self, x, y):
"""return a tuple
"""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # 存放激活值
zs = [] # list用来存放z 向量
# 前向传递
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = self.sigmoid(z)
activations.append(activation)
# 后向传递
delta = self.cost_derivative(activations[-1], y) * self.sigmoid(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for l in range(2, self.num_layers):
z = zs[-l]
sp = self.sigmoid_prime(z)
delta = n
代码实现bp算法示例
最新推荐文章于 2025-05-19 16:47:35 发布