(方法一)第一隐藏层为30,第二隐藏层为60 ,输出层为10的简单神经网络:
import numpy as np
from dataset.mnist import load_mnist
import matplotlib.pylab as plt
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_grad(x):
return (1.0 - sigmoid(x)) * sigmoid(x)
def softmax(x):
if x.ndim == 2:
x = x.T
x = x - np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x) # 溢出对策
return np.exp(x) / np.sum(np.exp(x))
def cross_entropy_error(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
# 监督数据是one-hot-vector的情况下,转换为正确解标签的索引
if t.size == y.size:
t = t.argmax(axis=1)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size
def numerical_gradient(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2 * h)
x[idx] = tmp_val # 还原值
it.iternext()
return grad
# (x_train,t_train),(x_test,t_test)=load_mnist(normalize=True,one_hot_label=True)
# 两层神经网络的类
class TwoLayerNet:
def __init__(self, input_size, hidden_size1, hidden_size2, output_size, weight_init_std=0.01):
# 初始化权重
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size1)
self.params['b1'] = np.zeros(hidden_size1)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size1, hidden_size2)
self.params['b2'] = np.zeros(hidden_size2)
self.params['W3'] = weight_init_std * np.random.randn(hidden_size2, output_size)
self.params['b3'] = np.zeros(output_size)
def predict(self, x):
W1, W2, W3 = self.params['W1'], self.params['W2'], self.params['W3']
b1, b2, b3 = self.params['b1'], self.params['b2'], self.params['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = softmax(a3)
return y
# 损失函数
def loss(self, x, t):
y = self.predict(x)
return cross_entrop