刘二大人《PyTorch深度学习实践》—课程代码

课程链接:http://www.bilibili.com/video/BV1Y7411d7Ys/?share_source=copy_web&vd_source=1a32dd27a726236a74603cf06b7302aa

说明:部分代码没有完全按照老师的写法

目录

1. 线性模型

2. 梯度下降

3. 反向传播

4. 用Pytorch实现线性回归

5. 逻辑斯蒂回归

6. 处理多维输入的特征

7. 加载数据集

8. 多分类问题

9. 卷积神经网络——基础篇

10. 卷积神经网络——高级篇

inception网络

resnet网络

11. 循环神经网络-——基础篇

12. 循环神经网络-——高级篇

1. 线性模型

import matplotlib
matplotlib.use('TkAgg')

import matplotlib.pyplot as plt
import numpy as np

x_data = [1.0,2.0,3.0]
y_data = [2.0,4.0,6.0]

def forward(x):
    return x * w

def loss(x,y):
    y_pred = forward(x)
    return (y_pred-y) * (y_pred-y)

w_list = []
mse_list = []

for w in np.arange(0.0,4.1,0.1):
    print("w=",w)
    loss_sum = 0

    for x_val,y_val in zip(x_data,y_data):
        y_pred_val = forward((x_val))
        loss_val = loss(x_val,y_val)
        loss_sum += loss_val
        print("\t",x_val,y_val,y_pred_val,loss_val)
    print("MSE:",loss_sum/len(x_data))
    w_list.append(w)
    mse_list.append(loss_sum/len(x_data))

plt.plot(w_list,mse_list)
plt.ylabel("loss")
plt.xlabel("w")
plt.show()

2. 梯度下降

梯度下降时可进行并行计算因此速度快,随机梯度下降最优值好但是不能进行并行计算,如何兼顾呢?Batch(Mini-Batch)

import matplotlib.pyplot as plt
import numpy as np

# --------------------------梯度下降------------------------------------
x_data = [1.0,2.0,3.0]
y_data = [2.0,4.0,6.0]

w = 1.0 #指定初始w

def forward(x):
    return x * w

def cost(xs,ys):
    cost = 0
    for x,y in zip(xs,ys):
        y_pred = forward(x)
        cost += (y_pred-y)**2
    return cost/len(xs)

def gradient(xs,ys):
    grad = 0
    for x, y in zip(xs,ys):
        grad += 2*x*(x*w-y)
    return grad/len(xs)

print("predict_before training",4,forward(4))

epoch_list = []
cost_list = []

for epoch in range(100):
    cost_val = cost(x_data,y_data)
    grad_val = gradient(x_data,y_data)
    w -= 0.01 * grad_val

    epoch_list.append(epoch)
    cost_list.append(cost_val)

    print("epoch:",epoch,"w:",w,"loss=",cost_val)

print("predict_after training", 4, forward(4))

plt.plot(epoch_list, cost_list)
plt.xlabel("epoch")
plt.ylabel("cost")
plt.show()


# --------------------------随机梯度下降------------------------------------
# x_data = [1.0,2.0,3.0]
# y_data = [2.0,4.0,6.0]
#
# w = 1.0 #指定初始w
#
# def forward(x):
#     return x * w
#
# def loss(x,y):
#     y_pred = forward(x)
#     return (y_pred - y) **2
#
# def gradient(x,y):
#     return  2*x*(x*w-y)
#
# print("predict_before training",4,forward(4))
#
# epoch_list = []
# loss_list = []
#
# for epoch in range(100):
#     for x,y in zip(x_data,y_data):
#         grad = gradient(x,y)
#         l = loss(x,y)
#         w -= 0.01 * grad
#         epoch_list.append(epoch)
#         loss_list.append(l)
#
#         print("epoch:",epoch,"w=",w,"x=",x,"y=",y,"loss=",loss)
#
# print("predict_after training", 4, forward(4))
#
# plt.plot(epoch_list, loss_list)
# plt.xlabel("epoch")
# plt.ylabel("loss")
# plt.show()

3. 反向传播

Tensor 用于保存data :w,grad

import torch

x_data = [1.0,2.0,3.0]
y_data = [2.0,4.0,6.0]

w = torch.tensor([1.0])
w.requires_grad = True #需要对w计算梯度

def forward(x):
    return w * x  #w为tensor类型,相乘时x也会被转化为tensor,且结果也会保留grad

def loss(x,y):
    y_pred = forward(x)
    return (y_pred-y)**2

print("predict_before training",4,forward(4).item())

for epoch in range(100):
    for x,y in zip(x_data,y_data):
        l = loss(x,y)
        l.backward() #反向传播求出grad且存在w中,计算图释放

        print("x=",x,"y=",y,"grad",w.grad.item(),"w:",w) #可以分别打印w,w.data,w.grad.data加深理解
        print("epoch:", epoch, "loss=", l.item())  # 使用item取出python标量

        print("---------------------------------")

        w.data = w.data-0.01*w.grad.data #注意使用.data
        w.grad.data.zero_() #梯度清零


print("predict_after training",4,forward(4).item())

4. 用Pytorch实现线性回归

nn.MSELoss(size_average=False) 参数size_average即是否进行平均,当最后一个batch数量和前面的batch不同时设置为True。

import torch
from torch import nn

#data
x_data = torch.tensor([[1.0],[2.0],[3.0]])
y_data = torch.tensor([[2.0],[4.0],[6.0]])

#model
class LinearModel(nn.Module):
    def __init__(self):
        super(LinearModel,self).__init__()
        self.linear = nn.Linear(1,1)

    def forward(self,x):
        y_pred = self.linear(x)
        return y_pred

model = LinearModel()

#loss and optimizer
loss = nn.MSELoss(size_average=False) #size_average即是否平均 在最后一个batch不相同时设为TRUE
optimizer = torch.optim.SGD(model.parameters(),lr= 0.01)

#train
for epoch in range(100):
    y_pred = model(x_data)
    l = loss(y_pred,y_data)

    print("epoch",epoch, "loss:",l)

    optimizer.zero_grad() #梯度清零
    l.backward() #反向传播
    optimizer.step() #updata

print("w:",model.linear.weight.item(),"b:",model.linear.bias.item())

#test
x_test = torch.tensor([[4.0]])
y_test = model(x_test)
print("y_pred:",y_test.data)

5. 逻辑斯蒂回归

import torch
import torch.nn.functional as F
from torch import nn

#data
x_data = torch.Tensor([[1.0]
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值