1.写一个简单的神经网络,用多维x预测y
在机器学习、深度学习等任务过程中,个人觉得最难的过程就是特征工程,搞完特征工程之后一些迎刃而解。
任务描述:写一个简单的神经网络,就是用x预测y
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from sklearn.model_selection import train_test_split
import numpy as np
# 假设 x 是一个包含特征的向量矩阵,y 是相应的标签向量
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32) # 举例: 二维特征
y = np.array([0, 1, 1, 0], dtype=np.float32) # 举例: 对应的标签
# 划分训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
# 转换为 PyTorch 的 Tensor
x_train_tensor = torch.tensor(x_train)
y_train_tensor = torch.tensor(y_train)
x_test_tensor = torch.tensor(x_test)
y_test_tensor = torch.tensor(y_test)
# 构建数据集
train_dataset = TensorDataset(x_train_tensor, y_train_tensor)
test_dataset = TensorDataset(x_test_tensor, y_test_tensor)
神经网络代码:
# 定义神经网络模型
class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(2, 4) # 输入层到第一个隐藏层
self.fc2 = nn.Linear(4, 2) # 第一个隐藏层到第二个隐藏层
self.fc3 = nn.Linear(2, 1) # 第二个隐藏层到输出层
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# 初始化模型
model = SimpleNN()
# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
训练代码:
# 定义训练函数
def train_model(model, train_loader, criterion, optimizer, num_epochs=1000):
for epoch in range(num_epochs):
model.train()
running_loss = 0.0
for inputs, labels in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels.unsqueeze(1))
loss.backward()
optimizer.step()
running_loss += loss.item()
if (epoch+1) % 100 == 0:
print(f"Epoch {epoch+1}/{num_epochs}, Loss: {running_loss/len(train_loader)}")
# 将数据加载到 DataLoader
train_loader = DataLoader(train_dataset, batch_size=2, shuffle=True)
# 训练模型
train_model(model, train_loader, criterion, optimizer)
# 测试模型
model.eval()
with torch.no_grad():
outputs = model(x_test_tensor)
test_loss = criterion(outputs, y_test_tensor.unsqueeze(1))
print("测试集均方误差:", test_loss.item())
最后输出的是一个1维的特征,可以转化为0-1之间的score,也可以输出两维的特征,分别表示y=0和y=1的概率,当然也可以定一个阈值直接转化为标签0或1


被折叠的 条评论
为什么被折叠?



