Pytorch龙良曲实战代码与所遇问题解决

部署运行你感兴趣的模型镜像

摘要:
本文为个人学习Pytorch龙良曲版本时跟写的代码,碍于版本、环境等诸多原因,原视频中代码不能直接照搬。针对自己所遇到的问题进行了改动,与大家分享共勉。


链接: B站-Pytorch龙良曲

课时4-简单回归问题

#利用numpy随机生成数据
import numpy as np
import pandas as pd

# 生成200个随机数
data = np.random.uniform(low=0, high=100, size=(100, 2))

# 将数据存放到DataFrame中
df = pd.DataFrame(data=data)

# 将DataFrame输出到CSV文件中
df.to_csv('data.csv', index=False)

# 输出DataFrame为表格
print(df.to_string(index=False))

import numpy as np

def computer_error_line(b,w,points):
	totalError = 0
	for i in range(0,len(points)):
		x = points[i,0]
		y = points[i,1]
		totalError += (y - (w*x+b))**2
	return totalError / float(len(points))

def step_gradient(b_current,w_current,points,learningRate):
	b_gradient = 0
	w_gradient = 0
	N = float(len(points))
	for i in range(0,len(points)):
		x = points[i,0]
		y = points[i,1]
		b_gradient += -(2/N) * (y -((w_current * x) + b_current))
		w_gradient += -(2/N) * x * (y - ((w_current * x) + b_current))
	new_b = b_current - (learningRate * b_gradient)
	new_m = w_current - (learningRate * w_gradient)
	return [new_b,new_m]

def gradient_descent_runner(points,starting_b,starting_m,learning_rate,num_iterations):
	b = starting_b
	m = starting_m
	for i in range(num_iterations):
		b,m = step_gradient(b,m,np.array(points),learning_rate)
	return [b,m]

def run():
	points = np.genfromtxt("data.csv",delimiter=",")
	learning_rate = 0.0001
	initial_b = 0
	initial_m =0
	num_iterations = 1000
	print("Starting gradient descent at b = {0},m = {1},error = {2}".format(initial_b,
		initial_m,computer_error_line(initial_b,initial_m,points)))
	print("Running..")
	[b,m] = gradient_descent_runner(points,initial_b,initial_m,learning_rate,
		num_iterations)
	print("After {0} iteartions b = {1},m = {2},error = {3}".format(num_iterations,b,m,
		computer_error_line(b,m,points)))

run()

课时9-手写数字初体验

#utils.py
import torch
from matplotlib import pyplot as plt

def plot_curve(data):
    fig = plt.figure()
    plt.plot(range(len(data)),data,color='blue')
    plt.legend(['value'],loc='upper right')
    plt.xlabel('step')
    plt.ylabel('value')
    plt.show()

def plot_img(img,label,name):
    fig = plt.figure()
    for i in range(6):
        plt.subplot(2,3,i+1)
        plt.tight_layout()
        plt.imshow(img[i][0]*0.3081+0.1307,cmap='gray',interpolation='none')
        plt.title("{}: {}".format(name,label[i].item()))
        plt.xticks([])
        plt.yticks([])
    plt.show()

def one_hot(label,depth=10):
    out = torch.zeros(label.size(0),depth)
    idx = torch.LongTensor(label).view(-1,1)
    out.scatter_(dim=1, index=idx, value=1)
    return out
'''
手写数字识别初体验
'''
import torch
from torch import nn
from torch.nn import functional as F
from torch import optim

import torchvision
from matplotlib import pyplot as plt

from utils import plot_img, plot_curve,one_hot

batch_size = 512

train_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('mnist_data', train=True, download=True,
                               transform=torchvision.transforms.Compose([
                                   torchvision.transforms.ToTensor(),
                                   torchvision.transforms.Normalize(
                                       (0.1307,),(0.3081,))
                               ])),
    batch_size = batch_size, shuffle=True)

test_loader = torch.utils.data.DataLoader(
    torchvision.datasets.MNIST('mnist_data/',train=False,download=True,
                               transform=torchvision.transforms.Compose([
                                   torchvision.transforms.ToTensor(),
                                   torchvision.transforms.Normalize(
                                       (0.1307,),(0.3081,))
                               ])),
    batch_size=batch_size, shuffle = False)

x, y = next(iter(train_loader))
print(x.shape, y.shape,x.min(),x.max())
#plot_img(x, y, 'image simple')

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()

        self.fc1 = nn.Linear(28*28,256)
        self.fc2 = nn.Linear(256,64)
        self.fc3 = nn.Linear(64,10)

    def forward(self,x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)

        return x

net = Net()
optimizier = optim.SGD(net.parameters(),lr=0.01,momentum=0.9)
train_loss = []

for epoch in range(3):
    for batch_idx,(x,y) in enumerate(train_loader):
        x = x.view(x.size(0),28*28)
        out = net(x)
        y_onehot = one_hot(y)
        loss = F.mse_loss(out,y_onehot)

        optimizier.zero_grad()
        loss.backward()
        optimizier.step()

        train_loss.append(loss.item())

        if batch_idx % 10 ==0:
            print(epoch,batch_idx,loss.item())

#plot_curve(train_loss)

total_correct = 0
for x,y in test_loader:
    x = x.view(x.size(0),28*28)
    out = net(x)
    pred = out.argmax(dim = 1)
    correct = pred.eq(y).sum().float().item()
    total_correct += correct

total_num = len(test_loader.dataset)
acc = total_correct / total_num
print('test acc:',acc)

x,y = next(iter(test_loader))
out = net(x.view(x.size(0),28*28))
pred = out.argmax(dim = 1)
plot_img(x,pred,'test')

课时46-优化问题实战

在这里插入图片描述

报错:1、projection不识别2、用Axes3D运行成功但没有生成模型

import numpy as np
from matplotlib import pyplot as plt
import torch
from mpl_toolkits.mplot3d import Axes3D

def himmelblau(x):
    return (x[0] ** 2 + x[1] - 11) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2

x = np.arange(-6, 6, 0.1)
y = np.arange(-6, 6, 0.1)
print('x,y range:', x.shape, y.shape)
X, Y = np.meshgrid(x, y)
print('X,Y maps:', X.shape, Y.shape)
Z =himmelblau([X, Y])

fig = plt.figure('himmelblau')
# ax = fig.gca(projection='3d')
# ax = Axes3D(fig)
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z)
ax.view_init(60, -30)
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.show()
'''
当你使用`fig.gca(projection='3d')`时,`gca`代表"Get Current Axes",它会尝试获取当前的Axes对象。  
然而,在你的代码中,因为`fig`是通过`fig = plt.figure('himmelblau')`创建的,它是一个新创建  
的Figure对象,并没有任何Axes对象。因此,尝试调用`gca()`方法时会报错。  
而使用`fig.add_subplot(111, projection='3d')`方法,你创建了一个具有指定投影类型(3D)  
的Axes对象,并将其添加到Figure中。`111`参数表示将Figure分割为1行1列的子图网格,  
并选择第一个子图(也是唯一的一个子图)。这种方法可以成功创建带有3D投影的Axes对象,使用  
`fig.add_subplot(111, projection='3d')`的方式更直接地创建了一个具有指定投影类型的Axes对象,  
并将其添加到Figure中,从而避免了使用`gca()`方法时的报错。
'''

x = torch.tensor([0., 0],requires_grad=True)
optimizer = torch.optim.Adam([x], lr=1e-3)
for step in range(20000):

    pred = himmelblau(x)

    optimizer.zero_grad()
    pred.backward()
    optimizer.step()

    if step % 2000 == 0:
        print('step {}:x = {}, f(x) = {}'
              .format(step,x.tolist(),pred.item()))

课时50-多分类问题实战及visdom

碍于缺少课时51 这里对y轴做了更改 主要让visdom跑起来为主
在这里插入图片描述

import torch
from torch import nn
from torch.nn import functional as F
from torchvision import datasets,transforms
from visdom import Visdom

batch_size=200
learning_rate=0.01
epochs=10

train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=batch_size, shuffle=True
)
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=False,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=batch_size, shuffle=True
)

w1, b1 = torch.randn(200, 784, requires_grad=True),\
    torch.zeros(200, requires_grad=True)
w2, b2 = torch.randn(200, 200, requires_grad=True),\
    torch.zeros(200, requires_grad=True)
w3, b3 = torch.randn(10, 200, requires_grad=True),\
    torch.zeros(10, requires_grad=True)

torch.nn.init.kaiming_normal_(w1)
torch.nn.init.kaiming_normal_(w2)
torch.nn.init.kaiming_normal_(w3)

def forward(x):
    x = x@w1.t() + b1
    x = F.relu(x)
    x = x @ w2.t() + b2
    x = F.relu(x)
    x = x @ w3.t() + b3
    x = F.relu(x)
    return x

optimizer = torch.optim.SGD([w1, b1, w2, b2, w3, b3], lr=learning_rate)
criteon = nn.CrossEntropyLoss()

for epoch in range(epochs):

    for batch_idx, (data, target) in enumerate(train_loader):
        data = data.view(-1, 28*28)

        logits = forward(data)
        loss = criteon(logits, target)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if batch_idx % 100 == 0:
            print('Train Epoch:{} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()
            ))

    test_loss = 0
    correct = 0
    for data, target in test_loader:
        data = data.view(-1, 28*28)
        logits = forward(data)
        test_loss += criteon(logits, target).item()

        pred = logits.data.max(1)[1]
        correct += pred.eq(target.data).sum()

    test_loss /= len(test_loader.dataset)
    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)
    ))

    # python -m visdom.server

    viz = Visdom()
    viz.line([0.], [0.], win='train_loss', opts=dict(title='train loss'))
    viz.line([loss.item()], [epoch], win='train_loss', update='append')

课时57-交叉运算 gpu运算

报错: Torch not compiled with CUDA enabled
主要为Pytorch和cuda版本的兼容问题,起初我的pytorch是下载anaconda时一起下载的所以之前没注意,做到这里才报错
我下载是官方最新版本pytorch2.0.1+cuda11.8
方法参考
Torch not compiled with CUDA enabled看这一篇就足够了
而且windows系统的cuda版本和程序运行环境中的cuda版本可以不同,已在两台电脑中这样
在这里插入图片描述
在这里插入图片描述

import torch
from torch import nn
from torch.nn import functional as F
from torchvision import datasets,transforms
# from visdom import Visdom

batch_size=200
learning_rate=0.01
epochs=10

train_db = datasets.MNIST('../data', train=True, download=True,
                          transform=transforms.Compose([
                              transforms.ToTensor(),
                              transforms.Normalize((0.1307,), (0.3081,))
                          ]))
train_loader = torch.utils.data.DataLoader(
    train_db,
    batch_size=batch_size, shuffle=True)
test_db = datasets.MNIST('../data', train=False, transform=transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,),(0.3081,))
]))
test_loader = torch.utils.data.DataLoader(test_db,
                                          batch_size=batch_size, shuffle=True)

print('train:', len(train_db), 'test:', len(test_db))
train_db, val_db = torch.utils.data.random_split(train_db, [50000, 10000])
print('db1:', len(train_db), 'db2:', len(val_db))
train_loader = torch.utils.data.DataLoader(
    train_db,
    batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(
    val_db,
    batch_size=batch_size, shuffle=True)

class MLP(nn.Module):
    def __init__(self):
        super(MLP, self).__init__()

        self.model = nn.Sequential(
            nn.Linear(784,200),
            nn.LeakyReLU(inplace=True),
            nn.Linear(200, 200),
            nn.LeakyReLU(inplace=True),
            nn.Linear(200, 10),
            nn.LeakyReLU(inplace=True),
        )

    def forward(self, x):
        x = self.model(x)
        return x

device = torch.device('cuda:0')
#Torch not compiled with CUDA enabled
net = MLP().to(device)
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
criteon = nn.CrossEntropyLoss().to(device)

for epoch in range(epochs):

    for batch_idx, (data, target) in enumerate(train_loader):
        data = data.view(-1, 28*28)
        data, target = data.to(device), target.cuda()

        logits = net(data)
        loss = criteon(logits, target)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if batch_idx % 100 == 0:
            print('Train Epoch:{} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()
            ))

    test_loss = 0
    correct = 0
    for data, target in val_loader:
        data = data.view(-1, 28 * 28)
        data, target = data.to(device), target.cuda()
        logits = net(data)
        test_loss += criteon(logits, target).item()

        pred = logits.data.max(1)[1]
        correct += pred.eq(target.data).sum()

    test_loss /= len(val_loader.dataset)
    print('\nVal set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(val_loader.dataset),
        100. * correct / len(val_loader.dataset)))

test_loss = 0
correct = 0
for data, target in test_loader:
    data = data.view(-1, 28 * 28)
    data, target = data.to(device), target.cuda()
    logits = net(data)
    test_loss += criteon(logits, target).item()

    pred = logits.data.max(1)[1]
    correct += pred.eq(target.data).sum()

test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
    test_loss, correct, len(test_loader.dataset),
    100. * correct / len(test_loader.dataset)))

课时76-卷积网络实战 Lenet5

报错: AttributeError: ‘_SingleProcessDataLoaderIter’ object has no attribute ‘next’
小问题 代码改动如下

#   wrong:  x, label = iter(cifar_train).next()
    x, label = iter(cifar_train).__next__()

完整代码 mian()

import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from torch import nn, optim

from  lenet5 import  Lenet5

def main():
    batchsz = 32
    cifar_train = datasets.CIFAR10('cifar', True, transform=transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor()
    ]), download=True)
    cifar_train = DataLoader(cifar_train, batch_size=batchsz, shuffle=True)

    cifar_test = datasets.CIFAR10('cifar', False, transform=transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor()
    ]), download=True)
    cifar_test = DataLoader(cifar_test, batch_size=batchsz, shuffle=True)

# AttributeError: '_SingleProcessDataLoaderIter' object has no attribute 'next'
#   wrong:  x, label = iter(cifar_train).next()
    x, label = iter(cifar_train).__next__()
    print('x:', x.shape, 'label:', label.shape)

    device = torch.device('cuda')
    model = Lenet5().to(device)
    criteon = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    print(model)

    for epoch in range(1000):

        model.train()
        for batchidx, (x, label) in enumerate(cifar_train):
            # [b, 3, 32, 32]
            # [b]
            x, label = x.to(device), label.to(device)

            # pred = logits经过softmax
            logits = model(x)
            # logits [b, 10]
            # label [b]
            # loss: tensor scalar
            loss = criteon(logits, label)
            #backprop
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        #
        print(epoch, loss.item())

        model.eval()
        with torch.no_grad():
            # test
            total_correct = 0
            total_num = 0
            for x, label in cifar_test:
                # [b, 3, 32, 32]
                # [b]
                x, label = x.to(device), label.to(device)

                # [b, 10]
                logits = model(x)
                # [b]
                pred = logits.argmax(dim=1)
                # [b] vs [b] => scalar tensor
                total_correct += torch.eq(pred, label).float().sum().item()
                total_num += x.size(0)

            acc = total_correct / total_num
            print(epoch, acc)


if __name__ == '__main__':
    main()

lenet5():

import torch
from torch import nn
from torch.nn import functional as F

class Lenet5(nn.Module):
    '''
    for  cifar10 dataset
    '''
    def __init__(self):
        super(Lenet5, self).__init__()

        self.conv_unit = nn.Sequential(
            #x = [b, 3, 32, 32]→ [b, 6,
            #kernel_size  一次关注的像素点
            #stride 一次移动的像素点
            #padding 类似于扩展矩阵,给周围像素增添0,这样原图像26*26就能移动传值26次
            nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0),
            nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
            #
            nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),
            nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
            #
        )
        # flatten
        # fc unit
        self.fc_unit = nn.Sequential(
            nn.Linear(16*5*5, 120),
            nn.ReLU(),
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Linear(84, 10),
        )

        # [b, 3, 32, 32]
        tmp = torch.randn(2, 3, 32, 32)
        out = self.conv_unit(tmp)
        #[2, 16, 5, 5]
        print('conv_out:', out.shape)

        # use Cross Entropy Loss
        # self.criteon = nn.CrossEntropyLoss()

    def forward(self, x):
        '''
        :param x:[b, 3, 32, 32]
        :return:
        '''
        batchsz = x.size(0)
        # [b, 3, 32, 32] => [b, 16, 5, 5]
        x = self.conv_unit(x)
        # [b, 16, 5, 5] => [b, 16*5*5]
        x = x.view(batchsz, 16*5*5)
        # [b, 16*5*5] =>[b, 10]
        logits = self.fc_unit(x)

        # loss = self.criteon(logits, y)
        return logits

def main():
    net = Lenet5()

    tmp = torch.randn(2, 3, 32, 32)
    out = net(tmp)
    # [2, 16, 5, 5]
    print('lennt out:', out.shape)

if __name__ == '__main__':
    main()

80-Resnet

import torch
from torch import nn
from torch.nn import functional as F

class ResBlk(nn.Module):
    '''
    resnet block
    '''

    def __init__(self, ch_in, ch_out, stride=1):
        '''

        :param ch_in:
        :param ch_out:
        '''
        super(ResBlk, self).__init__()

        self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm2d(ch_out)
        self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(ch_out)

        self.extra = nn.Sequential()
        if ch_out != ch_in:
            self.extra = nn.Sequential(
                nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=stride),
                nn.BatchNorm2d(ch_out)
            )

    def forward(self, x):
        '''

        :param x:
        :return:
        '''
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        # short cut,
        # extra module: [b,ch_in, h, w] => [b, ch_out, h, w]
        # element-wise add :
        out = self.extra(x) + out
        out = F.relu(out)

        return  out

class ResNet18(nn.Module):

    def __init__(self):
        super(ResNet18, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=3, padding=0),
            nn.BatchNorm2d(64)
        )
        # followed 4 blocks
        self.blk1 = ResBlk(64, 128, stride=2)
        self.blk2 = ResBlk(128, 256, stride=2)
        self.blk3 = ResBlk(256, 512, stride=2)
        self.blk4 = ResBlk(512, 1024, stride=2)

        self.outlayer = nn.Linear(512*1*1, 10)

    def forward(self, x):
        '''

        :param x:
        :return:
        '''
        x = F.relu(self.conv1(x))

        x = self.blk1(x)
        x = self.blk2(x)
        x = self.blk3(x)
        x = self.blk4(x)

        x = self.outlayer(x)

        return x

def main():
    # blk = ResBlk(64, 128, stride=2)
    blk = ResBlk(64, 128, stride=4)
    tmp = torch.randn(2, 64, 32, 32)
    out = blk(tmp)
    print('block:', out.shape)

main()

96-Pokemon数据集

百度网盘链接: https://pan.baidu.com/s/1V_ZJ7ufjUUFZwD2NHSNMFw
网盘里文件夹是pokeman 输入路径的时候注意一下
utils

import torch
from torch import nn
from matplotlib import pyplot as plt

class Flatten(nn.Module):
    def __init__(self):
        super(Flatten, self).__init__()

    def forward(self, x):
        shape = torch.prod(torch.tensor(x.shape[1:])).item()
        return x.view(-1, shape)

def plot_image(img, label, name):
    fig = plt.figure()
    for i in range(6):
        plt.subplot(2, 3, i+1)
        plt.tight_layout()
        plt.imshow(img[i][0]*0.3081+0.1307, cmap='gray', interpolation='none')
        plt.title("{}: {}".format(name, label[i].item()))
        plt.xticks([])
        plt.yticks([])
    plt.show()

resnet

import torch
from torch import nn
from torch.nn import functional as F

class ResBlk(nn.Module):
    '''
    resnet block
    '''

    def __init__(self, ch_in, ch_out, stride=1):
        '''

        :param ch_in:
        :param ch_out:
        '''
        super(ResBlk, self).__init__()

        self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm2d(ch_out)
        self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(ch_out)

        self.extra = nn.Sequential()
        if ch_out != ch_in:
            self.extra = nn.Sequential(
                nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=stride),
                nn.BatchNorm2d(ch_out)
            )

    def forward(self, x):
        '''

        :param x:
        :return:
        '''
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        # short cut,
        # extra module: [b,ch_in, h, w] => [b, ch_out, h, w]
        # element-wise add :
        out = self.extra(x) + out
        out = F.relu(out)

        return  out

class ResNet18(nn.Module):

    def __init__(self, num_class):
        super(ResNet18, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 16, kernel_size=3, stride=3, padding=0),
            nn.BatchNorm2d(16)
        )
        # followed 4 blocks
        self.blk1 = ResBlk(16, 32, stride=3)
        self.blk2 = ResBlk(32, 64, stride=3)
        self.blk3 = ResBlk(64, 128, stride=2)
        self.blk4 = ResBlk(128, 256, stride=2)

        self.outlayer = nn.Linear(256*3*3, num_class)

    def forward(self, x):
        '''

        :param x:
        :return:
        '''
        x = F.relu(self.conv1(x))

        x = self.blk1(x)
        x = self.blk2(x)
        x = self.blk3(x)
        x = self.blk4(x)

        # print(x.shape)
        x = x.view(x.size(0), -1)
        x = self.outlayer(x)

        return x

def main():
    # blk = ResBlk(64, 128, stride=2)
    blk = ResBlk(64, 128)
    tmp = torch.randn(2, 64, 224, 224)
    out = blk(tmp)
    print('block:', out.shape)

    model = ResNet18(5)
    tmp = torch.randn(2, 3, 244, 244)
    out = model(tmp)
    print('resnet:', out.shape)

    p = sum(map(lambda p:p.numel(), model.parameters()))
    print('parameters size:', p)

if __name__ == '__main__':
    main()

pokemon

import torch
import os, glob
import random, csv

from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image

class Pokemon(Dataset):
    def __init__(self, root, resize, mode):
        super(Pokemon, self).__init__()

        self.root = root
        self.resize = resize

        self.name2label = {}
        for name in sorted(os.listdir(os.path.join(root))):
            if not os.path.isdir(os.path.join(root, name)):
                continue

            self.name2label[name] = len(self.name2label.keys())

        # print(self.name2label)
        self.images, self.labels = self.load_csv('images.csv')

        if mode =='train':#60%
            self.images = self.images[:int(0.6*len(self.images))]
            self.labels = self.labels[:int(0.6*len(self.labels))]
        elif mode=='val':
            self.images = self.images[int(0.6 * len(self.images)):int(0.8 * len(self.images))]
            self.labels = self.labels[int(0.6 * len(self.labels)):int(0.8 * len(self.labels))]
        else:
            self.images = self.images[int(0.8 * len(self.images)):]
            self.labels = self.labels[int(0.8 * len(self.labels)):]

    def load_csv(self, filename):

        if not os.path.exists(os.path.join(self.root, filename)):
            images = []
            for name in self.name2label.keys():
                images += glob.glob(os.path.join(self.root, name, '*.png'))
                images += glob.glob(os.path.join(self.root, name, '*.jpg'))
                images += glob.glob(os.path.join(self.root, name, '*.jpeg'))

            # pokeman\\bulbasaur\\00000000.png
            print(len(images), images)

            random.shuffle(images)
            with open(os.path.join(self.root, filename), mode='w', newline='') as f:
                writer = csv.writer(f)
                for img in images:
                    name = img.split(os.sep)[-2]
                    label = self.name2label[name]
                    writer.writerow([img, label])
                print('writen into csv file:', filename)
    # read form csv file
        images, labels = [], []
        with open(os.path.join(self.root, filename)) as f:
            reader = csv.reader(f)
            for row in reader:
                img, label = row
                label = int(label)

                images.append(img)
                labels.append(label)
        assert len(images) == len(labels)

        return images, labels


    def __len__(self):

        return len(self.images)

    def denormalize(self, x_hat):
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.255]

        # x_hat = (x-mean)/ std
        # x = x_hat*std = mean
        # x:[c,h,w]
        # mean:[3] => [3, 1, 1]
        mean = torch.tensor(mean).unsqueeze(1).unsqueeze(1)
        std = torch.tensor(std).unsqueeze(1).unsqueeze(1)
        # print(mean.shape, std.shape)
        x = x_hat * std +mean

        return x


    def __getitem__(self, idx):
        # self.images, self.labels
        # img: 'pokemon\\bu...\\...
        # label:0, 1, 2...
        img , label = self.images[idx], self.labels[idx]

        tf = transforms.Compose([
            lambda x:Image.open(x).convert('RGB'), # path => data
            transforms.Resize((int(self.resize*1.25), int(self.resize*1.25))),
            transforms.RandomRotation(15),# 数据增强 随机旋转15°
            transforms.CenterCrop(self.resize),# 中心裁剪 取消黑边
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.255])
        ])

        img = tf(img)
        label = torch.tensor(label)

        return img, label


def main():
    import visdom
    import time

    viz = visdom.Visdom()

    db = Pokemon('pokeman', 64, 'train')

    x, y = next(iter(db))
    print('sample:', x.shape, y.shape)

    viz.image(db.denormalize(x), win='sample_x', opts=dict(title='sample_x'))

    loader = DataLoader(db, batch_size=32, shuffle=True, num_workers=8)

    for x,y in loader:
        viz.images(db.denormalize(x), nrow=8, win='batch', opts=dict(title='batch'))
        viz.text(str(y.numpy()), win='lavel', opts=dict(title='batch-y'))

        time.sleep(15)

if __name__ == '__main__':
    main()

train_scratch

import torch
from torch import optim, nn
import visdom
import torchvision
from torch.utils.data import DataLoader

from pokemon import Pokemon
from resnet import ResNet18

batchsz = 32
lr = 1e-3
epochs = 10

device = torch.device('cuda')
torch.manual_seed(1234)

train_db = Pokemon('pokeman', 224, mode='train')
val_db = Pokemon('pokeman', 224, mode='val')
test_db = Pokemon('pokeman', 224, mode='test')
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True,
                          num_workers=4)
val_loader = DataLoader(val_db, batch_size=batchsz, num_workers=2)
test_loader = DataLoader(test_db, batch_size=batchsz, num_workers=2)


viz = visdom.Visdom()


def evalute(model, loader):
    correct = 0
    total = len(loader.dataset)

    for x,y in loader:
        x,y = x.to(device), y.to(device)
        with torch.no_grad():
            logits = model(x)
            pred = logits.argmax(dim=1)
        correct += torch.eq(pred, y).sum().float().item()

    return correct / total
def main():
    model = ResNet18(5).to(device)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    criteon = nn.CrossEntropyLoss()

    best_acc, best_epoch = 0, 0
    global_step = 0
    viz.line([0], [-1], win='loss', opts=dict(title='loss'))
    viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
    for epoch in range(epochs):
        for step, (x,y) in enumerate(train_loader):
            x, y = x.to(device), y.to(device)

            logits = model(x)
            loss = criteon(logits, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            viz.line([loss.item()], [global_step], win='loss', update='append')
            global_step += 1

        if epoch % 1 ==0:
            val_acc = evalute(model, val_loader)
            if val_acc > best_acc:
                best_epoch = epoch
                best_acc = val_acc

                torch.save(model.state_dict(), 'best_mdl')

                viz.line([val_acc], [global_step], win='val_acc', update='append')

    print('best acc:', best_acc, 'best epoch:', best_epoch)

    model.load_state_dict(torch.load('best.mdl'))
    print('loaded from ckpt!')

    test_acc = evalute(model, test_loader)
    print('test acc:', test_acc)

if __name__ == '__main__':
    main()

train_transfer

import torch
from torch import optim, nn
import visdom
import torchvision
from torch.utils.data import DataLoader

from pokemon import Pokemon
# from resnet import ResNet18
from torchvision.models import resnet18

from utils import Flatten

batchsz = 32
lr = 1e-3
epochs = 10

device = torch.device('cuda')
torch.manual_seed(1234)

train_db = Pokemon('pokeman', 224, mode='train')
val_db = Pokemon('pokeman', 224, mode='val')
test_db = Pokemon('pokeman', 224, mode='test')
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True,
                          num_workers=4)
val_loader = DataLoader(val_db, batch_size=batchsz, num_workers=2)
test_loader = DataLoader(test_db, batch_size=batchsz, num_workers=2)


viz = visdom.Visdom()


def evalute(model, loader):
    correct = 0
    total = len(loader.dataset)

    for x,y in loader:
        x,y = x.to(device), y.to(device)
        with torch.no_grad():
            logits = model(x)
            pred = logits.argmax(dim=1)
        correct += torch.eq(pred, y).sum().float().item()

    return correct / total
def main():
    # model = ResNet18(5).to(device)
    trained_model = resnet18(pretrained=True)
    model = nn.Sequential(*list(trained_model.children())[:-1],
                          Flatten(),
                          nn.Linear(512, 5)).to(device)

    # x = torch.randn(2, 3, 224, 224)
    # print(model(x).shape)
    optimizer = optim.Adam(model.parameters(), lr=lr)
    criteon = nn.CrossEntropyLoss()

    best_acc, best_epoch = 0, 0
    global_step = 0
    viz.line([0], [-1], win='loss', opts=dict(title='loss'))
    viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
    for epoch in range(epochs):
        for step, (x,y) in enumerate(train_loader):
            x, y = x.to(device), y.to(device)

            logits = model(x)
            loss = criteon(logits, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            viz.line([loss.item()], [global_step], win='loss', update='append')
            global_step += 1

        if epoch % 1 ==0:
            val_acc = evalute(model, val_loader)
            if val_acc > best_acc:
                best_epoch = epoch
                best_acc = val_acc

                torch.save(model.state_dict(), 'best_mdl')

                viz.line([val_acc], [global_step], win='val_acc', update='append')

    print('best acc:', best_acc, 'best epoch:', best_epoch)

    model.load_state_dict(torch.load('best.mdl'))
    print('loaded from ckpt!')

    test_acc = evalute(model, test_loader)
    print('test acc:', test_acc)

if __name__ == '__main__':
    main()

115-Auto Encoder

记得开visdom
ae

import torch
from torch import nn

class AE(nn.Module):
    
    
    def __init__(self):
        super(AE, self).__init__()

        # [b, 784] => [b, 20]
        self.encoder = nn.Sequential(
            nn.Linear(784, 256),
            nn.ReLU(),
            nn.Linear(256, 64),
            nn.ReLU(),
            nn.Linear(64, 20),
            nn.ReLU()
        )
        # [b, 20] => [b, 784]
        self.decoder = nn.Sequential(
            nn.Linear(20, 64),
            nn.ReLU(),
            nn.Linear(64, 256),
            nn.ReLU(),
            nn.Linear(256, 784),
            nn.Sigmoid()
        )

    def forward(self, x):
        '''

        :param x:
        :return:
        '''
        batchsz = x.size(0)
        x = x.view(batchsz,784)
        x = self.encoder(x)
        x = self.decoder(x)
        x = x.view(batchsz, 1, 28, 28)

        return x

ae_main

import torch
from torch.utils.data import DataLoader
from torch import nn, optim

from torchvision import transforms, datasets
from ae import AE
import visdom


def main():
    mnist_train = datasets.MNIST('mnist', True, transform=transforms.Compose([
        transforms.ToTensor()
    ]), download=True)
    mnist_train = DataLoader(mnist_train, batch_size=32, shuffle=True)

    mnist_test = datasets.MNIST('mnist', False, transform=transforms.Compose([
        transforms.ToTensor()
    ]), download=True)
    mnist_test = DataLoader(mnist_test, batch_size=32, shuffle=True)

    x, _ = iter(mnist_train).__next__()
    print('x:', x.shape)

    device = torch.device('cuda')
    model = AE().to(device)
    criteon = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    print(model)

    viz = visdom.Visdom()

    for epoch in range(1000):
        for batchidx, (x, _) in enumerate(mnist_train):
            x = x.to(device)

            x_hat = model(x)
            loss = criteon(x_hat, x)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print(epoch, 'loss', loss.item())

        x, _ = iter(mnist_test).__next__()
        x = x.to(device)
        with torch.no_grad():
            x_hat = model(x)
        viz.images(x, nrow=8, win='x', opts=dict(title='x'))
        viz.images(x_hat, nrow=8, win='x_hat', opts=dict(title='x_hat'))


if __name__ == '__main__':
    main()

117-VAE

vae

import numpy as np
import torch
from torch import nn


class VAE(nn.Module):

    def __init__(self):
        super(VAE, self).__init__()

        # [b, 784] => [b, 20]
        self.encoder = nn.Sequential(
            nn.Linear(784, 256),
            nn.ReLU(),
            nn.Linear(256, 64),
            nn.ReLU(),
            nn.Linear(64, 20),
            nn.ReLU()
        )
        # [b, 10] => [b, 784]
        self.decoder = nn.Sequential(
            nn.Linear(10, 64),
            nn.ReLU(),
            nn.Linear(64, 256),
            nn.ReLU(),
            nn.Linear(256, 784),
            nn.Sigmoid()
        )

    def forward(self, x):
        '''

        :param x:
        :return:
        '''
        batchsz = x.size(0)
        x = x.view(batchsz, 784)
        h_ = self.encoder(x)
        mu, sigma = h_.chunk(2, dim=1)
        h = mu + sigma * torch.rand_like(sigma)

        x_hat = self.decoder(h)
        x_hat = x.view(batchsz, 1, 28, 28)

        kld = 0.5 * torch.sum(
            torch.pow(mu, 2) +
            torch.pow(sigma, 2) -
            torch.log(1e-8 + torch.pow(sigma, 2)) -1
        ) / (batchsz*28*28)



        return x_hat, kld

vae_main

import torch
from torch.utils.data import DataLoader
from torch import nn, optim

from torchvision import transforms, datasets
from vae import VAE
import visdom


def main():
    mnist_train = datasets.MNIST('mnist', True, transform=transforms.Compose([
        transforms.ToTensor()
    ]), download=True)
    mnist_train = DataLoader(mnist_train, batch_size=32, shuffle=True)

    mnist_test = datasets.MNIST('mnist', False, transform=transforms.Compose([
        transforms.ToTensor()
    ]), download=True)
    mnist_test = DataLoader(mnist_test, batch_size=32, shuffle=True)

    x, _ = iter(mnist_train).__next__()
    print('x:', x.shape)

    device = torch.device('cuda')
    model = VAE().to(device)
    criteon = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=1e-3)
    print(model)

    viz = visdom.Visdom()

    for epoch in range(1000):
        for batchidx, (x, _) in enumerate(mnist_train):
            x = x.to(device)

            x_hat, kld = model(x)
            loss = criteon(x_hat, x)

            if kld is not None:
                elbo = - loss - 1.0 * kld
                loss = -elbo

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print(epoch, 'loss', loss.item(), 'kld:', kld.item())

        x, _ = iter(mnist_test).__next__()
        x = x.to(device)
        with torch.no_grad():
            x_hat, kld= model(x)
        viz.images(x, nrow=8, win='x', opts=dict(title='x'))
        viz.images(x_hat, nrow=8, win='x_hat', opts=dict(title='x_hat'))


if __name__ == '__main__':
    main()

127-GAN

import torch
from torch import nn, optim, autograd
import numpy as np
import visdom
import random

h_dim = 400
batchsz = 512
# viz = visdom.Visdom()

class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()

        self.net = nn.Sequential(
            nn.Linear(2, h_dim),
            nn.ReLU(True),
            nn.Linear(h_dim, h_dim),
            nn.ReLU(True),
            nn.Linear(h_dim, h_dim),
            nn.ReLU(True),
            nn.Linear(h_dim, 2)
        )

    def forward(self, z):
        output = self.net(z)
        return output

class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()

        self.net = nn.Sequential(
            nn.Linear(2, h_dim),
            nn.ReLU(True),
            nn.Linear(h_dim, h_dim),
            nn.ReLU(True),
            nn.Linear(h_dim, h_dim),
            nn.ReLU(True),
            nn.Linear(h_dim, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        output = self.net(x)
        return output.view(-1)

def data_generator():
    scale = 2.
    centers = [
        (1, 0),
        (-1, 0),
        (0, 1),
        (0, -1),
        (1. / np.sqrt(2), 1. / np.sqrt(2)),
        (1. / np.sqrt(2), -1. / np.sqrt(2)),
        (-1. / np.sqrt(2), 1. / np.sqrt(2)),
        (-1. / np.sqrt(2), -1. / np.sqrt(2)),
    ]
    centers = [(scale * x, scale * y) for x,y in centers]

    while True:
        dataset = []

        for i in range(batchsz):

            point = np.random.randn(2) * 0.02
            center = random.choice(centers)
            point[0] += center[0]
            point[1] += center[1]
            dataset.append(point)

        dataset = np.array(dataset).astype(np.float32)
        dataset /= 1.414
        yield dataset

def main():

    torch.manual_seed(23)
    np.random.seed(23)

    data_iter = data_generator()
    x = next(data_iter)
    # print(x.shape)

    G = Generator().cuda()
    D = Discriminator().cuda()
    # print(G)
    # print(D)
    optim_G = optim.Adam(G.parameters(), lr=5e-4, betas=(0.5, 0.9))
    optim_D = optim.Adam(D.parameters(), lr=5e-4, betas=(0.5, 0.9))

    for epoch in range(50000):

        for _ in range(5):
            x = next(data_iter)
            x = torch.from_numpy(x).cuda()
            predr = D(x)
            lossr = -predr.mean()

            z = torch.randn(batchsz, 2).cuda()
            xf = G(z).detach()
            predf = D(xf)
            lossf = predf.mean()

            loss_D =lossr + lossf

            optim_D.zero_grad()
            loss_D.backward()
            optim_D.step()

        z = torch.randn(batchsz, 2).cuda()
        xf = G(z)
        predf = D(xf)
        loss_G = -predf.mean()

        optim_G.zero_grad()
        loss_G.backward()
        optim_G.step()

        if epoch % 100 ==0:

            print(loss_D.item(), loss_G.item())


if __name__ == '__main__':
    main()

144-numpy实现神经网络

import numpy as np
import random

def sigmoid(z):
    return  1.0 / (1.0 + np.exp(-z))

def sigmoid_prime(z):
    return sigmoid(z)(1-sigmoid(z))


class MIP_np:

    def __init__(self, sizes):
        '''

        :param sizes:[784, 30, 10]
        '''
        self.sizes = sizes
        self.num_layers = len(sizes) - 1

        self.weights = [np.random.randn(ch2, ch1) for ch1,ch2 in zip(sizes[:-1], sizes[1:])]
        self.biases = [np.random.randn(ch, 1) for ch in sizes[1:]]

    def forward(self, x):
        '''

        :param x:[784, 1]
        :return: [10, 1]
        '''

        for b,w in zip(self.biases, self.weights):
            z = np.dot(w, x) + b
            x = sigmoid(z)

        return x

    def backprop(self, x, y):
        '''

        :param x: 784 1
        :param y: 10 1
        :return:
        '''

        nabla_w = [np.zeros(w.shape) for w in self.weights]
        nabla_b = [np.zeros(b.shape) for b in self.biases]

        activations = [x]
        zs = []
        activation = x

        for b,w in zip(self.biases, self.weights):
            z = np.dot(w, activation) + b
            activation = sigmoid(z)
            zs.append(z)
            activations.append(activation)

        delta = activations[-1] * (1 - activations[-1]) * (activations[-1] -y)
        nabla_b[-1] = delta
        nabla_w[-1] = np.dot(delta, activations[-2].T)

        for l in range(2, self.num_layers+1):
            l = -1

            z = zs[l]
            a = activations[l]

            delta = np.dot(self.weights[l + 1].T, delta) * a * (1-a)

            nabla_b[l] = delta

            nabla_w[l] = np.dot(delta, activations[l-1].T)

            return nabla_w, nabla_w

    def train(self, training_data, epochs, batchsz, lr , test_data):
        '''

        :param training_data: list
        :param epochs: 1000
        :param batchsz: 10
        :param lr: 0.01
        :param test_data:
        :return:
        '''
        if test_data:
            n_test = len(test_data)

        n = len(training_data)
        for j in range(epochs):
            random.shuffle(training_data)
            mini_batches = [
                training_data[k:k+batchsz]
                for k in range(0, n ,batchsz)
            ]
            for mini_batch in mini_batches:
                self.update_mini_batch(mini_batch, lr)

            if test_data:
                print('Epoch {0}: {1} / {2}'.format(
                    j, self.evaluate(test_data), n_test
                ))
            else:
                print('Epoch {0} complete'.format(j))

    def update_mini_batch(self, batch, lr):
        '''

        :param batch:
        :param lr:
        :return:
        '''
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        nabla_b = [np.zeros(b.shape) for b in self.biases]

        for x,y in batch:
            nabla_w, nabla_b = self.backprop(x, y)
            nabla_w = [accu+cur for accu,cur in zip(nabla_w, nabla_w)]
            nabla_b = [accu+cur for accu,cur in zip(nabla_b, nabla_b)]

        nabla_w = [w/len(batch) for w in nabla_w]
        nabla_b = [b/len(batch) for b in nabla_b]

        self.weights = [w - lr * nabla for w, nabla in zip(self.weights, nabla_w)]
        self.biases = [b - lr * nabla for b, nabla in zip(self.biases, nabla_b)]

    def evaluate(self, test_data):
        '''

        :param test_data:
        :return:
        '''
        result = [(np.argmax(self.forward(x)), y) for x,y in test_data]

        correct = sum(int(pred == y) for pred,y in result)

        return correct


def main():
    pass

if __name__ == '__main__':
    main()

您可能感兴趣的与本文相关的镜像

PyTorch 2.5

PyTorch 2.5

PyTorch
Cuda

PyTorch 是一个开源的 Python 机器学习库,基于 Torch 库,底层由 C++ 实现,应用于人工智能领域,如计算机视觉和自然语言处理

评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值