# coding: utf-8
import numpy as np
import theano.tensor as T
import matplotlib.pyplot as plt
import theano
from theano import function
class Layer(object):
def __init__(self, inputs, input_size, output_size, activiation_function=None):
self.W = theano.shared(np.random.normal(0, 1, (input_size, output_size)))
self.b = theano.shared(np.zeros((output_size,)) + 0.1)
self.y = theano.dot(inputs, self.W) + self.b
self.activiation_function = activiation_function
if activiation_function is None:
self.outputs = self.y
else:
self.outputs = self.activiation_function(self.y)
# make up data
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
# show the data
#plt.scatter(x_data, y_data)
#plt.show()
# determine the inputs dtype
x = T.dmatrix('x')
y = T.dmatrix('y')
# add layers
l1 = Layer(x, 1, 10, T.nnet.softplus)
l2 = Layer(l1.outputs, 10, 1, None)
# compute the cost
cost = T.mean(T.square(l2.outputs - y))
# comput the gradients
gW1, gb1, gW2, gb2 = T.grad(cost, [l1.W, l1.b, l2.W, l2.b])
# apply gradient descent
learning_rate = 0.05
train = function(
inputs=[x, y],
outputs=cost,
updates=[(l1.W, l1.W-learning_rate*gW1),
(l1.b, l1.b-learning_rate*gb1),
(l2.W, l2.W-learning_rate*gW2),
(l2.b, l2.b-learning_rate*gb2)]
)
# predict
predict = function(inputs=[x], outputs=l2.outputs)
# plot the data
#fig = plt.figure()
#ax = fig.add_subplot(1, 1, 1)
#ax.scatter(x_data, y_data)
#plt.ion()
#plt.show()
for i in range(1000):
# training
err = train(x_data, y_data)
if i%50 == 0:
print i, err
#try:
# ax.lines.remove(lines[0])
#except Exception:
# pass
#predict_y = predict(x_data)
#lines = ax.plot(x_data, predict_y, 'r-', lw=5)
#plt.pause(1)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(x_data, y_data)
predict_y = predict(x_data)
lines = ax.plot(x_data, predict_y, 'r-', lw=5)
[深度学习框架] Theano上回归神经网络实现
最新推荐文章于 2020-06-23 12:50:00 发布