import numpy as np
import os
import time
def sigmoid(x): # 定义激活函数
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x): # 定义激活函数的导数
return x * (1 - x)
inputs = np.array(
[
[0, 0, 1],
[1, 1, 0],
[0, 0, 0],
[1, 1, 1], #样本
[1, 0, 1],
[0, 1, 1]
]) #输入数据
outputs = np.array([
[0],
[1],
[0],
[1], #样本标注
[1],
[0]
]) # 目标输出
np.random.seed(1) # 随机初始化权重
weights = 2 * np.random.random((inputs.shape[1], 1)) - 1 # inputs的列数
i = 0
for _ in range(10000000): # 训练神经网络
input_layer = inputs
weighted_sum = np.dot(input_layer, weights) # 将输入向量与权重矩阵相乘
output_layer = sigmoid(weighted_sum) # 应用激活函数(sigmoid)
test = np.array([
[0, 1, 0], [0, 1, 1], [0, 0, 0], [0, 0, 1],
[1, 1, 0], [1, 1, 1], [1, 0, 0], [1, 0, 1]])
output = sigmoid(np.dot(test, weights))# 求缺失的
error = outputs - output_layer # 计算误差
adjustments = error * sigmoid_derivative(output_layer) # 更新权重
weights += np.dot(input_layer.T, adjustments)
i = i + 1
if not i % 1:
print(
"样本标注:", "\n", outputs, "\n", "\n",
"样本:", "\n", input_layer, "\n", "\n",
"权重和:", "\n", weighted_sum, "\n", "\n",
"输出层:", "\n", output_layer, "\n", "\n",
"误差:", "\n", error, "\n", "\n",
"调整:", "\n", adjustments, "\n", "\n",
"权重:", "\n", weights, "\n", "\n", "\n",
"重复次数:", i, "\n",
"输出:", "\n", output, "\n"
)
test=np.array([[i/1000.0, i/1000.0, i/1000.0, i/1000.0]])
print("test:\n", sigmoid(test))
time.sleep(0.1) # 等待1秒
os.system('cls' if os.name == 'nt' else 'clear') # 根据操作系统选择清屏命令
# 测试神经网络
print("输出结果:", output_layer)
好像能判断一个向量第一维度是不是1