importnumpy as np#定义tanh函数
deftanh(x):returnnp.tanh(x)#tanh函数的导数
deftan_deriv(x):return 1.0 - np.tanh(x) *np.tan(x)#sigmoid函数
deflogistic(x):return 1 / (1 + np.exp(-x))#sigmoid函数的导数
deflogistic_derivative(x):return logistic(x) * (1 -logistic(x))classNeuralNetwork:def __init__(self, layers, activation='tanh'):"""神经网络算法构造函数
:param layers: 神经元层数
:param activation: 使用的函数(默认tanh函数)
:return:none"""
if activation == 'logistic':
self.activation=logistic
self.activation_deriv=logistic_derivativeelif activation == 'tanh':
self.activation=tanh
self.activation_deriv=tan_deriv#权重列表
self.weights =[]#初始化权重(随机)
for i in range(1, len(layers) - 1):
self.weights.append((2 * np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1) * 0.25)
self.weights.append((2 * np.random.random((layers[i] + 1, layers[i + 1])) - 1) * 0.25)def fit(self, X, y, learning_rate=0.2, epochs=10000):"""训练神经网络
:param X: 数据集(通常是二维)
:param y: 分类标记
:param learning_rate: 学习率(默认0.2)
:param epochs: 训练次数(最大循环次数,默认10000)
:return: none"""
#确保数据集是二维的
X =np.atleast_2d(X)
temp= np.ones([X.shape[0], X.shape[1] + 1])
temp[:, 0:-1] =X
X=temp
y=np.array(y)for k inrange(epochs):#随机抽取X的一行
i =np.random.randint(X.shape[0])#用随机抽取的这一组数据对神经网络更新
a =[X[i]]#正向更新
for l inrange(len(self.weights)):
a.append(self.activation(np.dot(a[l], self.weights[l])))
error= y[i] - a[-1]
deltas= [error * self.activation_deriv(a[-1])]#反向更新
for l in range(len(a) - 2, 0, -1):
deltas.append(deltas[-1].dot(self.weights[l].T) *self.activation_deriv(a[l]))
deltas.reverse()for i inrange(len(self.weights)):
layer=np.atleast_2d(a[i])
delta=np.atleast_2d(deltas[i])
self.weights[i]+= learning_rate *layer.T.dot(delta)defpredict(self, x):
x=np.array(x)
temp= np.ones(x.shape[0] + 1)
temp[0:-1] =x
a=tempfor l inrange(0, len(self.weights)):
a=self.activation(np.dot(a, self.weights[l]))return a