原始生成对抗网络的pytorch实现

一、代码及注释

# 1.预处理阶段
import os
import numpy as np
import math

import torch
from torch.utils.data import DataLoader
import torch.nn as nn  
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable

import torchvision
from torchvision import datasets
import torchvision.transforms as transforms
from torchvision.utils import save_image

from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()

cuda = True if torch.cuda.is_available() else False
os.makedirs("image", exist_ok=True) # 如果已经存在该目录则不报错

n_epochs = 200
batch_size = 64
lr = 2e-4
img_size = 28
channels = 1
z_dim = 100
sample_interval = 1000
img_shape = (channels,img_size, img_size)
# print(*img_shape) 1 28 28

os.makedirs("./data/mnist", exist_ok=True)
transform = transforms.Compose([transforms.Resize(img_size), 
                                transforms.ToTensor(), 
                                transforms.Normalize([0.5], [0.5])])
trainset = datasets.MNIST(root='./data/mnist', train=True,
                          download = True,transform = transform)
dataloader = DataLoader(trainset, batch_size=batch_size, shuffle = True, num_workers=16)

# 2.搭建网络
class Generator(nn.Module):
    def __init__(self):
        super(Generator,self).__init__()
        def block(in_feat, out_feat, normalize = True):
            layers = [nn.Linear(in_feat, out_feat)]
            if normalize: 
                layers.append(nn.BatchNorm1d(out_feat,0.8)) # 1D是因为数字图是1channel,momentum:计算running_mean和running_var的滑动平均系数。
            layers.append(nn.LeakyReLU(0.2, inplace = True)) #否将得到的值计算得到的值覆盖之前的值,从上层网络Conv2d中传递下来的tensor直接进行修改,这样能够节省运算内存,不用多存储其他变量
            return layers
        self.model = nn.Sequential(
            *block(z_dim, 128, normalize = False), # 第一层不用batch normal
            *block(128,256),
            *block(256,512),
            *block(512, 1024),
            nn.Linear(1024, int(np.prod(img_shape))),
            nn.Tanh()
        )
    def forward(self, z):
        img = self.model(z)
        img = img.view(img.size(0),*img_shape)
        return img

class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator,self).__init__()
        self.model = nn.Sequential(
            nn.Linear(int(np.prod(img_shape)),512),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Linear(512,256),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Linear(256, 1),
            nn.Sigmoid(),
        )

    def forward(self, img):
        img_flat = img.view(img.size(0), -1)  # img.size(0) 为批处理数
        validity = self.model(img_flat)
        return validity
generator = Generator()
discriminator = Discriminator()


# 3.定义损失函数和优化
adverial_loss = nn.BCELoss() 
optimizer_G = torch.optim.Adam(generator.parameters(),lr = lr, betas =(0.9,0.999))
optimizer_D = torch.optim.Adam(discriminator.parameters(),lr = lr, betas =(0.9,0.999))
if cuda:
    generator.cuda()
    discriminator.cuda()
    adverial_loss.cuda()

# 4.训练网络
# 怎么实现固定一个,更新另一个? 通过optimizer_G.step(),optimizer_D.step()确定更新哪个模型参数
#  https://zhuanlan.zhihu.com/p/43843694
Tensor = torch.cuda.FloatTensor if cuda else torch.cuda.FloatTensor
for epoch in range(n_epochs):
    for i,(imgs,_) in enumerate(dataloader): #标签123456不要了imgs:torch.Size([64, 1, 28, 28])
        valid = Variable(Tensor(imgs.size(0),1).fill_(1.0),requires_grad=False) # _表示Inplace
        fake = Variable(Tensor(imgs.size(0),1).fill_(0.0),requires_grad=False)
        real_imgs = Variable(imgs.type(Tensor))
        z = Variable(Tensor(np.random.normal(0,1,(imgs.shape[0],z_dim))))
        # 训练生成器
        optimizer_G.zero_grad()
        gen_imgs = generator(z)
        g_loss = adverial_loss(discriminator(gen_imgs),valid) 
        # (64,1)?求了平均值 https://blog.youkuaiyun.com/qq_22210253/article/details/85222093
        g_loss.backward()
        optimizer_G.step() # 一个mini-batch更新一次参数
        # 训练鉴别器
        optimizer_D.zero_grad()
        real_loss = adverial_loss(discriminator(real_imgs), valid)
        fake_loss = adverial_loss(discriminator(gen_imgs.detach()), fake)
        d_loss = (real_loss + fake_loss) / 2
        d_loss.backward()
        optimizer_D.step()

        # item是得到一个元素张量里面的元素值
        # print("[Epoch %d/%d][Batch %d/%d][D_loss:%f][G_loss:%f]"
        #         %(epoch, n_epochs, i, len(dataloader) ,d_loss.item(), g_loss.item()))
        writer.add_scalar('g_loss', g_loss.item(), epoch*len(dataloader) + i)
        writer.add_scalar('d_loss', d_loss.item(), epoch*len(dataloader) + i)
        writer.flush()
        # tensorboard --logdir=/home/workstation/lyx_code/first_gan/runs/Jan21_21-15-39_ubuntu-18-04
        
        # 5. 保存生成图片
        batches_done = epoch*len(dataloader) + i
        if batches_done % sample_interval == 0:
            save_image(gen_imgs.data[:25], "image/%d.png" % (batches_done / sample_interval), nrow=5, normalize=True)



二、训练结果

图1
在这里插入图片描述
在这里插入图片描述

训练不稳定情况:
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值