目录
1.手动构建模型实战
1.0 模型训练基础概念
| 名词 | 定义 |
|---|---|
| Epoch | 使用训练集的全部数据对模型进行一次完整训练,被称为“一代训练” |
| Batch | 使用训练集中的一小部分样本对模型权重进行一次反向传播的参数更新,这一小部分样本被称为“一批数据” |
| Iteration | 使用一个Batch数据对模型进行一次参数更新的过程,被称为“一次训练” |
1.1 数据处理
1.1.1 构建数据集
# 构建数据集
def build_dataset():
'''
使用 sklearn 的 make_regression 方法来构建一个模拟的回归数据集。
make_regression 方法的参数解释:
- n_samples: 生成的样本数量,决定了数据集的规模。
- n_features: 生成的特征数量,决定了数据维度。
- noise: 添加到目标变量的噪声标准差,用于模拟真实世界数据的不完美。
- coef: 如果为 True, 会返回生成数据的真实系数,用于了解特征与目标变量间的真实关系。
- random_state: 随机数生成的种子,确保在多次运行中能够复现相同的结果。
返回:
- X: 生成的特征矩阵。
- y: 生成的目标变量。
- coef: 如果在调用时 coef 参数为 True,则返回真实系数。
'''
noise = random.randint(1, 5)
bias = 14.5
x, y, coef = make_regression(
n_samples=1000,
n_features=5,
bias=14.5,
noise=noise,
coef=True,
random_state=666
)
# 将数据转换为张量
x = torch.tensor(x, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.float32)
coef = torch.tensor(coef, dtype=torch.float32)
return x, y, coef, bias
1.1.2 构建数据加载器
数据需要分批次加载到模型进行训练
def data_loader(x, y, batch_size=16):
"""
将数据集转换为迭代器,以便在训练过程中进行批量处理。
"""
# 获取样本数量
num_samples = x.shape[0]
# 构建数据索引
indices = list(range(num_samples))
# 打乱数据顺序
random.shuffle(indices)
# 计算总的批次数量,向上取整
num_batches = math.ceil(num_samples / batch_size)
for i in range(num_batches):
start = i * batch_size
end = min((i + 1) * batch_size, num_samples)
# 数据切片
train_x = x[indices[start:end]]
train_y = y[indices[start:end]]
# 异步响应数据集
yield train_x, train_y
1.2 模型函数
'''
构建模型:权重参数和偏置参数
'''
def linear_regression(x, w, b):
return torch.matmul(x, w) + b
1.3 损失函数
# 构建模型函数:损失函数和优化器
def mean_squared_error(y_pred, y_true):
return torch.mean((y_pred - y_true) ** 2)
1.4 优化器
使用梯度下降对参数进行调整。
# 优化器:SGD
def sgd(w, b, dw, db, learning_rate, batch_size):
w.data -= learning_rate * dw.data / batch_size
b.data -= learning_rate * db.data / batch_size
return w, b
1.5 参数初始化
# 初始化参数
def initialize_params(n_features):
# 随机初始化权重w,将偏置初始化为:0
w = torch.randn(n_features, requires_grad=True, dtype=torch.float32)
b = torch.tensor(0.0, requires_grad=True, dtype=torch.float32)
return w, b
1.6 训练函数
训练函数完成对数据的训练和参数调整等。
def train():
# 1.构建数据集
x, y, coef, bias = build_dataset()
# 2.初始化模型参数
w, b = initialize_params(x.shape[1])
# 3.定义训练参数
learning_rate = 0.01
epochs = 120
batch_size = 16
# 4.训练
for epochs in range(epochs):
epochs_loss = 0
num_batches = 0
for train_x, train_y in data_loader(x, y, batch_size):
num_batches += 1
# 5.前向传播
y_pred = linear_regression(train_x, w, b)
# 6.计算损失
loss = mean_squared_error(y_pred, train_y)
# 梯度清零
if w.grad is not None:
w.grad.data.zero_()
if b.grad is not None:
b.grad.data.zero_()
# 反向传播
loss.backward()
# 更新参数
w, b = sgd(w, b, w.grad, b.grad, learning_rate, batch_size)
# 训练批次及损失率
epochs_loss += loss.item()
print(f'Epoch:{epochs},Loss:{epochs_loss / num_batches}')
return coef, bias, w, b
1.7 完整代码
import math
import random
import torch
import numpy as np
from sklearn.datasets import make_regression
import pandas as pd
# 构建数据集
def build_dataset():
noise = random.randint(1, 5)
bias = 14.5
x, y, coef = make_regression(
n_samples=1000,
n_features=5,
bias=14.5,
noise=noise,
coef=True,
random_state=666
)
# 将数据转换为张量
x = torch.tensor(x, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.float32)
coef = torch.tensor(coef, dtype=torch.float32)
return x, y, coef, bias
def data_loader(x, y, batch_size=16):
"""
将数据集转换为迭代器,以便在训练过程中进行批量处理。
"""
# 获取样本数量
num_samples = x.shape[0]
# 构建数据索引
indices = list(range(num_samples))
# 打乱数据顺序
random.shuffle(indices)
# 计算总的批次数量,向上取整
num_batches = math.ceil(num_samples / batch_size)
for i in range(num_batches):
start = i * batch_size
end = min((i + 1) * batch_size, num_samples)
# 数据切片
train_x = x[indices[start:end]]
train_y = y[indices[start:end]]
# 异步响应数据集
yield train_x, train_y
'''
构建模型:权重参数和偏置参数
'''
# 初始化参数
def initialize_params(n_features):
# 随机初始化权重w,将偏置初始化为:0
w = torch.randn(n_features, requires_grad=True, dtype=torch.float32)
b = torch.tensor(0.0, requires_grad=True, dtype=torch.float32)
return w, b
# ??
def linear_regression(x, w, b):
return torch.matmul(x, w) + b
# 构建模型函数:损失函数和优化器
def mean_squared_error(y_pred, y_true):
return torch.mean((y_pred - y_true) ** 2)
# 优化器:SGD
def sgd(w, b, dw, db, learning_rate, batch_size):
w.data -= learning_rate * dw.data / batch_size
b.data -= learning_rate * db.data / batch_size
return w, b
def train():
# 1.构建数据集
x, y, coef, bias = build_dataset()
# 2.初始化模型参数
w, b = initialize_params(x.shape[1])
# 3.定义训练参数
learning_rate = 0.01
epochs = 120
batch_size = 16
# 4.训练
for epochs in range(epochs):
epochs_loss = 0
num_batches = 0
for train_x, train_y in data_loader(x, y, batch_size):
num_batches += 1
# 5.前向传播
y_pred = linear_regression(train_x, w, b)
# 6.计算损失
loss = mean_squared_error(y_pred, train_y)
# 梯度清零
if w.grad is not None:
w.grad.data.zero_()
if b.grad is not None:
b.grad.data.zero_()
# 反向传播
loss.backward()
# 更新参数
w, b = sgd(w, b, w.grad, b.grad, learning_rate, batch_size)
# 训练批次及损失率
epochs_loss += loss.item()
print(f'Epoch:{epochs},Loss:{epochs_loss / num_batches}')
return coef, bias, w, b
if __name__ == '__main__':
coef, bias, w, b = train()
print('真实系数:', coef)
print('预测系数:', w)
print('真实偏置:', bias)
print('预测偏置:', b)
2.模型定义组件
模型(神经网络,深度神经网络,深度学习)定义组件帮助我们在 PyTorch 中定义、训练和评估模型等。
2.1 基本组件认知
2.1.1 损失函数组件
PyTorch已内置多种损失函数
import torch.nn as nn
import torch
criterion = nn.MSELoss()
y_true = torch.randn(5,3,)
y_pred = torch.randn(5,3,requires_grad=True)
print(y_true)
print(criterion(y_true,y_pred))
2.1.2 线性层组件
构建一个简单的线性层,后续还有卷积层、池化层、激活、归一化等需要学习和使用。
model = nn.Linear(in_features=20,out_features=60)
input = torch.randn(128,20)
output = model(input)
print(output.size())
2.1.3 优化器方法
这里牵涉到的API有:
import torch.optim as optim
-
params=model.parameters():模型参数获取;
-
optimizer=optim.SGD(params):优化器方法;
-
optimizer.zero_grad():梯度清零;
-
optimizer.step():参数更新;
import torch.optim as optim
# 构建数据集
input_x = torch.randint(1,10,(400,5)).type(torch.float32)
target = torch.randint(1,10,(400,1)).type(torch.float32)
# 线性层模型
model = nn.Linear(5,1)
# 优化器对象
sgd = optim.SGD(model.parameters(),lr=0.01)
# 预测
y_pred= model(input_x)
# 损失函数
loss_fn = nn.MSELoss()
loss = loss_fn(y_pred,target)
print(loss)
# 梯度清零
sgd.zero_grad()
# 反向传播
loss.backward()
# 梯度更新
sgd.step()
2.2 数据加载器
分数据集和加载器2个步骤。
2.2.1 构建数据类
在 PyTorch 中,构建自定义数据加载类通常需要继承 torch.utils.data.Dataset 并实现以下几个方法:
-
__init__ 方法 用于初始化数据集对象:通常在这里加载数据,或者定义如何从存储中获取数据的路径和方法。
def __init__(self, data, labels): self.data = data self.labels = labels
-
__len__ 方法 返回样本数量:需要实现,以便 Dataloader加载器能够知道数据集的大小。
def __len__(self): return len(self.data)
-
__getitem__ 方法 根据索引返回样本:将从数据集中提取一个样本,并可能对样本进行预处理或变换。
def __getitem__(self, index): sample = self.data[index] label = self.labels[index] return sample, label
如果你需要进行更多的预处理或数据变换,可以在 __getitem__ 方法中添加额外的逻辑。
import torch
from torch.utils.data import Dataset,DataLoader
class CustomDataset(Dataset):
def __init__(self,data,labels):
self.data = data
self.labels =labels
self.len = len(self.data)
def __len__(self):
return self.len
def __getitem__(self,index):
# 确保取值:
# 若index小于数据集的最小索引,取第一个值
# 若index大于数据集的最大索引,取最后一个值
# 若index在数据集的索引范围内,取index索引的值
index = min(max(index,0),self.len-1)
sample = self.data[index]
label = self.labels[index]
return sample,label
data_x=torch.randn(size=(666,20),requires_grad =True,dtype=torch.float32)
data_y = torch.randn((data_x.shape[0],1),dtype=torch.float32)
dataset = CustomDataset(data_x,data_y)
print(dataset[0])
2.2.2 数据加载器
在训练或者验证的时候,需要用到数据加载器批量的加载样本。
import torch
from torch.utils.data import Dataset,DataLoader
class CustomDataset(Dataset):
def __init__(self,data,labels):
self.data = data
self.labels =labels
self.len = len(self.data)
def __len__(self):
return self.len
def __getitem__(self,index):
# 确保取值:
# 若index小于数据集的最小索引,取第一个值
# 若index大于数据集的最大索引,取最后一个值
# 若index在数据集的索引范围内,取index索引的值
index = min(max(index,0),self.len-1)
sample = self.data[index]
label = self.labels[index]
return sample,label
data_x=torch.randn(size=(666,20),requires_grad =True,dtype=torch.float32)
data_y = torch.randn((data_x.shape[0],1),dtype=torch.float32)
dataset = CustomDataset(data_x,data_y)
print(dataset[0])
2.3 数据集加载案例
通过一些数据集的加载案例,真正了解数据类及数据加载器。
2.3.1 加载Excel数据集
import torch
from torch.utils.data import Dataset,DataLoader
import pandas as pd
class ExcelDataset(Dataset):
def __init__(self,file_path):
super(ExcelDataset,self).__init__()
data1 =pd.read_excel(file_path)
# 删除全是NAN的列
data1 = data1.dropna(axis=1,how='all')
df = pd.DataFrame(data1)
df.columns = [
"zubie",
"student_id",
"name",
"expression",
"ppt",
"answer",
"present",
"defense",
"comments",
]
df = df.drop(["zubie","student_id", "name", "comments"],axis=1)
# 转换为tensor
data = torch.tensor(df.values)
self.data =data[:,:-1]
self.target =data[:,-1]
self.len =len(self.data)
def __len__(self):
return self.len
def __getitem__(self,index):
index =min(max(index,0),self.len-1)
return self.data[index],self.target[index]
excel_path = './test.xlsx'
dataset = ExcelDataset(excel_path)
loader = DataLoader(dataset,batch_size = 4,shuffle =True)
for i,(data,target) in enumerate(loader):
print(i,data,target)
2.3.2 加载图片数据集
import torch
from torch.utils.data import Dataset,DataLoader
import os
import cv2
class ImageDataset(Dataset):
def __init__(self,folder):
self.img_size =(224,224)
# 存储路径
self.imgs_path_list = []
#把类别存储下来
self.target = []
#把目录存储下来
dir_names = []
for root,dirs,files in os.walk(folder):
if len(dirs)>0:
dir_names = dirs
print(dir_names)
else:
for file in files:
file_path = os.path.join(root,file)
self.imgs_path_list.append(file_path)
# 使用系统路径分隔符对路径经行分割
class_name = os.path.split(root)[-1]
# 将类别转换为数字
class_id = dir_names.index(class_name)
self.target.append(class_id)
self.len = len(self.imgs_path_list)
def __len__(self):
return self.len
def __getitem__(self,index):
idx = min(max(index,0),self.len-1)
file_path = self.imgs_path_list[idx]
# 读取文件内容
img = cv2.imread(file_path)
# 统一图片大小
img = cv2.resize(img,self.img_size)
# 转换成tensor
img_tensor = torch.from_numpy(img)
# 从HWC转换为CHW
img_tensor = img_tensor.permute(2,0,1)
# 获取标签
target =self.target[idx]
return img_tensor,target
dataset = ImageDataset('.')
loader = DataLoader(dataset,batch_size=4,shuffle=True)
for i,(data,target) in enumerate(loader):
print(i,data.shape,target)
if i >5:
break
2.3.3 加载官方数据集
在 PyTorch 中官方提供了一些经典的数据集,如 CIFAR-10、MNIST、ImageNet 等,可以直接使用这些数据集进行训练和测试。
MNIST 数据集
from torchvision import transforms,datasets
transform =transforms.Compose([transforms.ToTensor()])
# 训练集
data_train = datasets.MNIST(
root = './data',
train =True,
download =True,
transform = transform
)
train_loader = DataLoader(data_train,batch_size=5,shuffle=True)
for data,target in train_loader:
print(data.shape,target)
break
# 测试集
data_test = datasets.MNIST(
root = './data',
train =False,
download =True,
transform = transform
)
train_loader = DataLoader(data_test,batch_size=5,shuffle=True)
for data,target in train_loader:
print(data.shape,target)
break
CIFAR10 数据集
transform = transforms.Compose([transforms.ToTensor()])
# 训练数据集
data_train = datasets.CIFAR10(
root = './data',
train = True,
download =True,
transform=transform,
)
train_loader = DataLoader(
data_train,
batch_szie =5,
shuffle=True,
num_workers=2)
for x, y in train_loader:
print(x.shape)
print(y)
break
# 测试数据集
data_test = datasets.CIFAR10(
root = './data',
train =False,
download =True,
transform=transform,
)
test_loader = DataLoader(
data_train,
batch_szie =5,
shuffle=True,
num_workers=2)
for x, y in test_loader:
print(x.shape)
print(y)
break
2.4 重构线性回归
使用pytorch对线性回归项目进行重构。
import torch
from sklearn.datasets import make_regression
import random
import torch.nn as nn
from torch.utils.data import DataLoader,TensorDataset
import torch.optim as optim
# 特征数
n_features = 5
def build_datasets():
bias =5.0
noise = random.randint(1,3)
x,y,coef =make_regression(
n_samples=1000,
n_features=n_features,
bias = bias,
noise = noise,
shuffle =True,
random_state=40,
coef = True)
x = torch.tensor(x,dtype=torch.float32)
y = torch.tensor(y,dtype=torch.float32)
coef = torch.tensor(coef,dtype=torch.float32)
bias = torch.tensor(bias,dtype=torch.float32)
return x,y,coef,bias
def train():
# 创建模型
model = nn.Linear(in_features=n_features,out_features=1)
# 数据集
train_x,train_y,coef,bias = build_datasets()
dataset = TensorDataset(train_x,train_y)
# 初始化参数
learning_rate = 0.1
epochs = 100
batch_size = 16
# 损失函数
criterion = nn.MSELoss()
# 优化器
opitmizer = optim.SGD(params=model.parameters(),lr=learning_rate)
for epoch in range(epochs):
loader = DataLoader(dataset,batch_size,shuffle=True)
# loss_sum和num_batches用与计算loss的大小
num_batches=0
loss_sum=0
for batch in loader:
# 前向传播,算出预测值
y_pred=model(train_x)
# 用预测值和真实值,计算损失
loss = criterion(y_pred,train_y.reshape(-1,1))
# 梯度清零
if loss.grad is not None:
loss.grad.data.zero_()
# 反向传播,梯度下降,减小loss
loss.backward()
# 更新参数
opitmizer.step()
loss_sum +=loss
num_batches +=1
print(f"Epoch: {epoch}, Loss: {loss_sum / num_batches}")
w = model.weight.detach().flatten()
b = model.bias.detach().item()
return w,b,coef,bias
w,b,coef,bias=train()
print(w,b)
print(coef,bias)
3.模型的保存和加载
训练一个模型通常需要大量的数据、时间和计算资源。通过保存训练好的模型,可以满足后续的模型部署、模型更新、迁移学习、训练恢复等各种业务需要求。
3.1 标准网络模型构建
import torch.nn as nn
import torch
class MyModle(nn.Module):
def __init__(self,input_size,output_size):
super(MyModle,self).__init__()
self.fc1 = nn.Linear(input_size,128)
self.fc2 = nn.Linear(128,64)
self.fc3 = nn.Linear(64,output_size)
def forward(self,x):
x = self.fc1(x)
x = self.fc2(x)
output = self.fc3(x)
return output
3.2 序列化模型对象
模型序列化对象的保存和加载:
import torch.nn as nn
import torch
class MyModle(nn.Module):
def __init__(self,input_size,output_size):
super(MyModle,self).__init__()
self.fc1 = nn.Linear(input_size,128)
self.fc2 = nn.Linear(128,64)
self.fc3 = nn.Linear(64,output_size)
def forward(self,x):
x = self.fc1(x)
x = self.fc2(x)
output = self.fc3(x)
return output
model = MyModle(input_size =128,output_size = 32)
# 序列化方式保存模型
torch.save(model,'./data/model.pkl')
torch.load('./data/model.pkl',map_location='cuda')
print(model)
3.3 保存模型参数
这种形式更常用,只需要保存权重、偏执、准确率等相关参数。
import torch.nn as nn
import torch
import torch.optim as optim
class MyModle(nn.Module):
def __init__(self,input_size,output_size):
super(MyModle,self).__init__()
self.fc1 = nn.Linear(input_size,128)
self.fc2 = nn.Linear(128,64)
self.fc3 = nn.Linear(64,output_size)
def forward(self,x):
x = self.fc1(x)
x = self.fc2(x)
output = self.fc3(x)
return output
model = MyModle(input_size =128,output_size =32)
optimizer =optim.SGD(model.parameters(),lr=0.01)
# 要存储的模型参数
save_dict = {
'init_params':{'input_size' :128,'output_size':32},
'accuracy':0.99,
'model_state_dict':model.state_dict(),
'optimizer_state_dict':optimizer.state_dict()}
# 保存模型参数
torch.save(save_dict,'./data/model_dict.pth')
# 加载模型
save_dict = torch.load('./data/model_dict.pth')
model = MyModle(
input_size = save_dict['init_params']['input_size'],
output_size = save_dict['init_params']['output_size'])
# 初始化模型参数
model.load_state_dict(save_dict['model_state_dict'])
optimizer = optim.SGD(model.parameters(),lr=0.01)
# 初始化优化器参数
optimizer.load_state_dict(save_dict['optimizer_state_dict'])
print(save_dict['accuracy'])
print(model)
26万+

被折叠的 条评论
为什么被折叠?



