动手学pytorch RNN

本文深入探讨了深度学习在自然语言处理领域的应用,包括词法分析、文本分类、情感分析、文本生成等关键技术,同时介绍了神经网络、卷积神经网络、循环神经网络等模型在NLP任务中的实践。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

tool.py

#coding=utf-8
import time
import math
import numpy as np
import torch
from torch import nn, optim
import torch.nn.functional as F

import sys
sys.path.append("..")
# import d2lzh_pytorch as d2l
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

import zipfile
import random

print(torch.__version__)
print(device)


"""6.2 rnn """
def test_P154():
    X, W_xh = torch.randn(3, 1), torch.randn(1, 4)
    H, W_hh = torch.randn(3, 4), torch.randn(4, 4)
    R1 = torch.matmul(X, W_xh) + torch.matmul(H, W_hh)
    R2 = torch.matmul(torch.cat((X, H), dim=1), torch.cat((W_xh, W_hh), dim=0))
    print(R1, R2)  # 两结果相等 两方式等价


def test_howrnn():
    H = torch.randn(3, 4)
    X = torch.randn(3, 1)
    W_xh = torch.randn(1, 4)
    W_hh = torch.randn(4, 4)
    H = torch.matmul(X, W_xh) + torch.matmul(H, W_hh)
    print(H)  # H 循环使用


"""6.3 语言模型数据集处理"""


def load_data_jay_lyrics():
    """
    corpus_indices, idx_to_char, char_to_list, vocab_size = load_data_jay_lyrics()

    print(vocab_size)  >> 1028
    print(idx_to_list[1000:1028])
    >> ['度', '怯', '妈', '卷', '药', '悲', '居', '代', '殿', '湖', '子', '武', '悄', '魂', '沟', '喘', '爽', '吴', '往', '宇', '乡', '神', '碰', '医', '别', '左', '?', '刚']
    print(char_to_list['告'])
    >> 279

    corpus_chars 为 idx_to_char [ corpus_indices ]
    """
    with zipfile.ZipFile('../dive/data/jaychou_lyrics.txt.zip') as zin:
        with zin.open('jaychou_lyrics.txt') as f:
            corpus_chars = f.read().decode('utf-8')
    corpus_chars.replace('\n', ' ').replace('\r', ' ')
    corpus_chars = corpus_chars[:10000]

    idx_to_char = list(set(corpus_chars))
    char_to_list = dict([(char, i) for i, char in enumerate(idx_to_char)])
    vocab_size = len(idx_to_char)
    corpus_indices = [char_to_list[c] for c in corpus_chars]
    return corpus_indices, idx_to_char, char_to_list, vocab_size



def data_iter_random(corpus_indices, batch_size, num_steps, device=None):
    # 减1是因为输出的索引x是相应输入的索引y加1
    """
    随机采样
    my_seq = list(range(30))
	for X, Y in data_iter_random(my_seq, batch_size=2, num_steps=6):
    print('X: ', X, '\nY:', Y, '\n')

    >> X:  tensor([[18., 19., 20., 21., 22., 23.],
        		   [12., 13., 14., 15., 16., 17.]], device='cuda:0')
	   Y:  tensor([[19., 20., 21., 22., 23., 24.],
        		   [13., 14., 15., 16., 17., 18.]], device='cuda:0')

	   X:  tensor([[ 0.,  1.,  2.,  3.,  4.,  5.],
        		   [ 6.,  7.,  8.,  9., 10., 11.]], device='cuda:0')
	   Y:  tensor([[ 1.,  2.,  3.,  4.,  5.,  6.],
        		   [ 7.,  8.,  9., 10., 11., 12.]], device='cuda:0')

	"""
    num_examples = (len(corpus_indices) - 1) // num_steps
    epoch_size = num_examples // batch_size
    example_indices = list(range(num_examples))
    random.shuffle(example_indices)

    # 返回从pos开始的长为num_steps的序列
    def _data(pos):
        return corpus_indices[pos: pos + num_steps]

    if device is None:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    for i in range(epoch_size):
        # 每次读取batch_size个随机样本
        i = i * batch_size
        batch_indices = example_indices[i: i + batch_size]
        X = [_data(j * num_steps) for j in batch_indices]
        Y = [_data(j * num_steps + 1) for j in batch_indices]
        yield torch.tensor(X, dtype=torch.float32, device=device), torch.tensor(Y, dtype=torch.float32, device=device)


def data_iter_consecutive(corpus_indices, batch_size, num_steps, device=None):
    """
    相邻采样
    for X, Y in data_iter_consecutive(my_seq, batch_size=2, num_steps=6):
        print('X: ', X, '\nY:', Y, '\n')

    X:  tensor([[ 0.,  1.,  2.,  3.,  4.,  5.],
                [15., 16., 17., 18., 19., 20.]])
    Y:  tensor([[ 1.,  2.,  3.,  4.,  5.,  6.],
                [16., 17., 18., 19., 20., 21.]])

    X:  tensor([[ 6.,  7.,  8.,  9., 10., 11.],
                [21., 22., 23., 24., 25., 26.]])
    Y:  tensor([[ 7.,  8.,  9., 10., 11., 12.],
                [22., 23., 24., 25., 26., 27.]])
    """
    if device is None:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    corpus_indices = torch.tensor(corpus_indices, dtype=torch.float32, device=device)
    data_len = len(corpus_indices)
    batch_len = data_len // batch_size
    indices = corpus_indices[0: batch_size * batch_len].view(batch_size, batch_len)
    epoch_size = (batch_len - 1) // num_steps
    for i in range(epoch_size):
        i = i * num_steps
        X = indices[:, i: i + num_steps]
        Y = indices[:, i + 1: i + num_steps + 1]
        yield X, Y



"""6.4 RNN从零开始实现"""

def one_hot(x, n_class, dtype=torch.float32):
    # X shape: (batch), output shape: (batch, n_class)
    """
    x = torch.tensor([0, 2])
    one_hot(x, vocab_size)
    >> tensor([[ 1.,  0.,  0.,  ...,  0.,  0.,  0.],
               [ 0.,  0.,  1.,  ...,  0.,  0.,  0.]])
    """
    x = x.long()
    res = torch.zeros(x.shape[0], n_class, dtype=dtype, device=x.device)
    res.scatter_(1, x.view(-1, 1), 1)
    return res


def to_onehot(X, n_class):
    # X shape: (batch, seq_len), output: seq_len elements of (batch, n_class)
    """
    X = torch.arange(10).view(2, 5)
	inputs = to_onehot(X, vocab_size)
	print(len(inputs), inputs[0].shape)
	>> 5 torch.Size([2, 1027])
	"""
    return [one_hot(X[:, i], n_class) for i in range(X.shape[1])]


# 2. 模型参数
def get_params(num_inputs, num_hiddens, num_outputs, device):
    """
    	初始化模型参数 隐藏层:W_xh, W_hh, b_h 输出层: W_hq, b_q

    	name = ['W_xh:', 'W_hh:', 'b_h:', 'W_hq:', 'b_q:']
    	params = get_params(10, 4, 2, 'cpu')
    	for i, p in enumerate(params):
        	print(name[i], p.shape)
    	>> 	W_xh: torch.Size([10, 4])
    		W_hh: torch.Size([4, 4])
    		b_h: torch.Size([4])
    		W_hq: torch.Size([4, 2])
    		b_q: torch.Size([2])
    	"""
    def _one(shape):
        ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32)
        return torch.nn.Parameter(ts, requires_grad=True)

    # 隐藏层参数
    W_xh = _one((num_inputs, num_hiddens))
    W_hh = _one((num_hiddens, num_hiddens))
    b_h = torch.nn.Parameter(torch.zeros(num_hiddens, device=device, requires_grad=True))
    # 输出层参数
    W_hq = _one((num_hiddens, num_outputs))
    b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, requires_grad=True))
    return nn.ParameterList([W_xh, W_hh, b_h, W_hq, b_q])


def init_rnn_state(batch_size, num_hiddens, device):
    """
    初始化隐藏状态 H, 全为0
    H.shape: (batch_size, num_hiddens)
    """
    return (torch.zeros((batch_size, num_hiddens), device=device),)


"""
总结:输入与参数大小
batch_size: 批量大小
num_steps: 时间步数
vocab_size: 词典大小

num_inputs: 输入元个数
num_hiddles: 隐藏元个数
num_outputs: 输出元个数

H : 隐藏状态

采样 -->  样本 [batch_size, num_steps]
		tensor([[ 0.,  1.,  2.,  3.,  4.,  5.],   # 2 * 6
        		[15., 16., 17., 18., 19., 20.]])
to_onehot --> 输入数据 (num_steps, [batch_size, vocab_size])
		tensor([[ 1.,  0.,  0.,  ...,  0.,  0.,  0.],  # 2 * 1027
                [ 0.,  0.,  1.,  ...,  0.,  0.,  0.]])

num_inputs = num_outputs = 1027
设 num_hiddles=256

输入: X.shape  (num_steps, [batch_size, vocab_size])

	  W_xh.shape  [num_inputs, num_hiddles]
	  W_hh.shape  [num_hiddles, num_hiddles]
	  b_h.shape   [num_hiddles,]
	  W_hq.shape  [num_hiddles, num_outputs]
	  b_q.shape   [num_outputs,]

	  H.shape  [batch_size, num_hiddles]

	assert H = X*W_xh + H*W_hh
	"""


def rnn(inputs, state, params):
    # inputs和outputs皆为num_steps个形状为(batch_size, vocab_size)的矩阵
    """
  RNN 模型
    inputs: 输入数据
    state: 初始隐藏状态H
    params: 网络结构超参数

  run.py:
  (corpus_indices, char_to_idx, idx_to_char, vocab_size) = d2l.load_data_jay_lyrics()

	device = 'cpu'
	num_inputs = vocab_size
	num_hiddens = 256
	num_outputs = vocab_size

	X = torch.arange(10).view(2, 5)   # 2*5
	inputs = to_onehot(X.to(device), vocab_size) # 5, 2*1027

	state = init_rnn_state(X.shape[0], num_hiddens, device)

	params = get_params(num_inputs, num_hiddens, num_outputs, device)

	outputs, state_new = rnn(inputs, state, params)
	print(len(outputs), outputs[0].shape, state_new[0].shape)

	"""
    W_xh, W_hh, b_h, W_hq, b_q = params
    H, = state    # (2*256)  (batch_size * num_hidden)
    outputs = []
    for X in inputs:    # 循环次数: num_steps = 5  循环利用循环状态H
        H = torch.tanh(torch.matmul(X, W_xh) + torch.matmul(H, W_hh) + b_h)
        Y = torch.matmul(H, W_hq) + b_q
        outputs.append(Y)
    return outputs, (H,)


def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,
                num_hiddens, vocab_size, device, idx_to_char, char_to_idx):
    """
    预测函数, 由prefix来预测接下来num_chars个字符(无训练)
    上接rnn函数
    predict_rnn('塞纳河', 12, rnn, params, init_rnn_state, num_hiddens, vocab_size,
    device, idx_to_char, char_to_idx)
    >> '塞纳河瞎土摩漫代手画鹰专W誓病'
    """
    state = init_rnn_state(1, num_hiddens, device)
    output = [char_to_idx[prefix[0]]]
    for t in range(num_chars + len(prefix) - 1):
        # 将上一时间步的输出作为当前时间步的输入
        X = to_onehot(torch.tensor([[output[-1]]], device=device), vocab_size)
        # 计算输出和更新隐藏状态
        (Y, state) = rnn(X, state, params)
        # 下一个时间步的输入是prefix里的字符或者当前的最佳预测字符
        if t < len(prefix) - 1:
            output.append(char_to_idx[prefix[t + 1]])
        else:
            output.append(int(Y[0].argmax(dim=1).item()))
    return ''.join([idx_to_char[i] for i in output])



def grad_clipping(params, theta, device):
    norm = torch.tensor([0.0], device=device)
    for param in params:
        norm += (param.grad.data ** 2).sum()
    norm = norm.sqrt().item()
    if norm > theta:
        for param in params:
            param.grad.data *= (theta / norm)


def sgd(params, lr, batch_size):
    # 为了和原书保持一致,这里除以了batch_size,但是应该是不用除的,因为一般用PyTorch计算loss时就默认已经
    # 沿batch维求了平均了。
    for param in params:
        param.data -= lr * param.grad / batch_size # 注意这里更改param时用的param.data


def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
                          vocab_size, device, corpus_indices, idx_to_char,
                          char_to_idx, is_random_iter, num_epochs, num_steps,
                          lr, clipping_theta, batch_size, pred_period,
                          pred_len, prefixes):
    """
    训练过程见另一文件
    """
    if is_random_iter:
        data_iter_fn = data_iter_random
    else:
        data_iter_fn = data_iter_consecutive
    params = get_params(vocab_size, num_hiddens, vocab_size, device)
    loss = nn.CrossEntropyLoss()

    for epoch in range(num_epochs):
        if not is_random_iter:  # 如使用相邻采样,在epoch开始时初始化隐藏状态
            state = init_rnn_state(batch_size, num_hiddens, device)
        l_sum, n, start = 0.0, 0, time.time()
        data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)
        for X, Y in data_iter:  # X, Y : (2, 5)  (batch_size, num_steps)
            if is_random_iter:  # 如使用随机采样,在每个小批量更新前初始化隐藏状态
                state = init_rnn_state(batch_size, num_hiddens, device)
            else:  # 否则需要使用detach函数从计算图分离隐藏状态
                for s in state:
                    s.detach_()

            inputs = to_onehot(X, vocab_size)
            # outputs有num_steps个形状为(batch_size, vocab_size)的矩阵
            (outputs, state) = rnn(inputs, state, params)
            # 拼接之后形状为(num_steps * batch_size, vocab_size)
            outputs = torch.cat(outputs, dim=0)
            # Y的形状是(batch_size, num_steps),转置后再变成长度为
            # batch * num_steps 的向量,这样跟输出的行一一对应
            y = torch.transpose(Y, 0, 1).contiguous().view(-1)
            # 使用交叉熵损失计算平均分类误差
            l = loss(outputs, y.long())

            # 梯度清0
            if params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()
            l.backward()
            grad_clipping(params, clipping_theta, device)  # 裁剪梯度
            sgd(params, lr, 1)  # 因为误差已经取过均值,梯度不用再做平均
            l_sum += l.item() * y.shape[0]
            n += y.shape[0]

        if (epoch + 1) % pred_period == 0:
            print('epoch %d, perplexity %f, time %.2f sec' % (
                epoch + 1, math.exp(l_sum / n), time.time() - start))
            for prefix in prefixes:
                print(' -', predict_rnn(prefix, pred_len, rnn, params, init_rnn_state,
                                        num_hiddens, vocab_size, device, idx_to_char, char_to_idx))

def train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,
                                  corpus_indices, idx_to_char, char_to_idx,
                                  num_epochs, num_steps, lr, clipping_theta,
                                  batch_size, pred_period, pred_len, prefixes):
    loss = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    model.to(device)
    state = None
    for epoch in range(num_epochs):
        l_sum, n, start = 0.0, 0, time.time()
        data_iter = data_iter_consecutive(corpus_indices, batch_size, num_steps, device)  # 相邻采样
        for X, Y in data_iter:
            if state is not None:
                # 使用detach函数从计算图分离隐藏状态, 这是为了
                # 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)
                if isinstance(state, tuple):  # LSTM, state:(h, c)
                    state = (state[0].detach(), state[1].detach())
                else:
                    state = state.detach()

            (output, state) = model(X, state)  # output: 形状为(num_steps * batch_size, vocab_size)

            # Y的形状是(batch_size, num_steps),转置后再变成长度为
            # batch * num_steps 的向量,这样跟输出的行一一对应
            y = torch.transpose(Y, 0, 1).contiguous().view(-1)
            l = loss(output, y.long())

            optimizer.zero_grad()
            l.backward()
            # 梯度裁剪
            grad_clipping(model.parameters(), clipping_theta, device)
            optimizer.step()
            l_sum += l.item() * y.shape[0]
            n += y.shape[0]

        try:
            perplexity = math.exp(l_sum / n)
        except OverflowError:
            perplexity = float('inf')
        if (epoch + 1) % pred_period == 0:
            print('epoch %d, perplexity %f, time %.2f sec' % (
                epoch + 1, perplexity, time.time() - start))
            for prefix in prefixes:
                print(' -', model.predict(
                    prefix, pred_len, vocab_size, device, idx_to_char,
                    char_to_idx))

run.py (scratch)

#coding=utf-8
import torch
import tool

(corpus_indices, idx_to_char, char_to_idx, vocab_size) = tool.load_data_jay_lyrics()

device = 'cpu'
num_inputs = vocab_size
num_hiddens = 256
num_outputs = vocab_size

X = torch.arange(10).view(2, 5)  # 2*5
inputs = tool.to_onehot(X, vocab_size)  # 2*1027

state = tool.init_rnn_state(X.shape[0], num_hiddens, device)
inputs = tool.to_onehot(X.to(device), vocab_size)   # list  ( 5, (2*1028) )
params = tool.get_params(num_inputs, num_hiddens, num_outputs, device)

num_epochs, num_steps, batch_size, lr, clipping_theta = 250, 35, 32, 1e2, 1e-2
pred_period, pred_len, prefixes = 50, 50, ['分开', '不分开']

rnn = tool.rnn
get_params = tool.get_params
init_rnn_state = tool.init_rnn_state

"""
batch_size = 2   
num_steps = 5 步长

num_inputs = num_outputs = 1028 词典长
num_hiddens = 256
"""

tool.train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
                      vocab_size, device, corpus_indices, idx_to_char,
                      char_to_idx, True, num_epochs, num_steps, lr,
                      clipping_theta, batch_size, pred_period, pred_len,
                      prefixes)

model.py (pytorch)

#coding=utf-8
import time
import math
import numpy as np
import torch
from torch import nn, optim
import torch.nn.functional as F

import sys
sys.path.append("..")
# import d2lzh_pytorch as d2l
import tool
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

class RNNModel(nn.Module):
    def __init__(self, num_hiddens, vocab_size):
        super(RNNModel, self).__init__()
        rnn_layer = nn.RNN(input_size=vocab_size, hidden_size=num_hiddens)
        self.rnn = rnn_layer
        self.hidden_size = rnn_layer.hidden_size * (2 if rnn_layer.bidirectional else 1)
        self.vocab_size = vocab_size
        self.dense = nn.Linear(self.hidden_size, vocab_size)
        self.state = None

    def forward(self, inputs, state): # inputs: (batch, seq_len)
        # 获取one-hot向量表示
        X = tool.to_onehot(inputs, self.vocab_size) # X是个list
        Y, self.state = self.rnn(torch.stack(X), state)
        # 全连接层会首先将Y的形状变成(num_steps * batch_size, num_hiddens),它的输出
        # 形状为(num_steps * batch_size, vocab_size)
        output = self.dense(Y.view(-1, Y.shape[-1]))
        return output, self.state

    def predict(self, prefix, num_chars, vocab_size, device, idx_to_char, char_to_idx):
        state = None
        output = [char_to_idx[prefix[0]]]  # output会记录prefix加上输出
        for t in range(num_chars + len(prefix) - 1):
            X = torch.tensor([output[-1]], device=device).view(1, 1)
            if state is not None:
                if isinstance(state, tuple):  # LSTM, state:(h, c)
                    state = (state[0].to(device), state[1].to(device))
                else:
                    state = state.to(device)
            (Y, state) = self.forward(X, state)  # 前向计算不需要传入模型参数
            if t < len(prefix) - 1:
                output.append(char_to_idx[prefix[t + 1]])
            else:
                output.append(int(Y.argmax(dim=1).item()))
        return ''.join([idx_to_char[i] for i in output])

run.py (pytorch)

import tool
from model import RNNModel
import torch

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

(corpus_indices, idx_to_char, char_to_idx, vocab_size) = tool.load_data_jay_lyrics()

num_steps = 5
num_hiddens = 256
model = RNNModel(num_hiddens, vocab_size).to(device)

num_epochs, batch_size, lr, clipping_theta = 250, 32, 1e-3, 1e-2
pred_period, pred_len, prefixes = 50, 50, ['分开', '不分开']
tool.train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,
                            corpus_indices, idx_to_char, char_to_idx,
                            num_epochs, num_steps, lr, clipping_theta,
                            batch_size, pred_period, pred_len, prefixes)

GRU 门控神经单元 GRU

model.py(scratch)

#coding=utf-8
import numpy as np
import torch
from torch import nn, optim
import torch.nn.functional as F
import time
import math
import sys
sys.path.append("..")
import tool
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

(corpus_indices, idx_to_char, char_to_idx,  vocab_size) = tool.load_data_jay_lyrics()
print(torch.__version__, device)


class GRU(nn.Module):

    @staticmethod
    def get_params(num_inputs, num_hiddens, num_outputs, device):
        def _one(shape):
            ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32)
            return torch.nn.Parameter(ts, requires_grad=True)

        def _three():
            return (_one((num_inputs, num_hiddens)),
                    _one((num_hiddens, num_hiddens)),
                    torch.nn.Parameter(torch.zeros(num_hiddens, device=device, dtype=torch.float32),
                                       requires_grad=True))

        W_xz, W_hz, b_z = _three()  # 更新门参数
        W_xr, W_hr, b_r = _three()  # 重置门参数
        W_xh, W_hh, b_h = _three()  # 候选隐藏状态参数

        # 输出层参数
        W_hq = _one((num_hiddens, num_outputs))
        b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, dtype=torch.float32), requires_grad=True)
        return nn.ParameterList([W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q])


    @staticmethod
    def init_gru_state(batch_size, num_hiddens, device):
        return (torch.zeros((batch_size, num_hiddens), device=device),)


    def __init__(self, num_hiddens, vocab_size, device):
        super(GRU, self).__init__()
        self.params = self.get_params(vocab_size, num_hiddens, vocab_size, device)
        self.W_xz, self.W_hz, self.b_z, \
        self.W_xr, self.W_hr, self.b_r, \
        self.W_xh, self.W_hh, self.b_h, \
        self.W_hq, self.b_q = self.params


    def forward(self, inputs, state):
        H, = state
        outputs = []
        for X in inputs:
            Z = torch.sigmoid(torch.matmul(X, self.W_xz) + torch.matmul(H, self.W_hz) + self.b_z)
            R = torch.sigmoid(torch.matmul(X, self.W_xr) + torch.matmul(H, self.W_hr) + self.b_r)
            H_tilda = torch.tanh(torch.matmul(X, self.W_xh) + R * torch.matmul(H, self.W_hh) + self.b_h)
            H = Z * H + (1 - Z) * H_tilda
            Y = torch.matmul(H, self.W_hq) + self.b_q
            outputs.append(Y)
        return outputs, (H,)

    def predict_rnn(self, prefix, num_chars,
                    num_hiddens, vocab_size, device, idx_to_char, char_to_idx):
        """
        预测函数, 由prefix来预测接下来num_chars个字符(无训练)
        上接rnn函数
        predict_rnn('塞纳河', 12, rnn, params, init_rnn_state, num_hiddens, vocab_size,
        device, idx_to_char, char_to_idx)
        >> '塞纳河瞎土摩漫代手画鹰专W誓病'
        """
        state = self.init_gru_state(1, num_hiddens, device)
        output = [char_to_idx[prefix[0]]]
        for t in range(num_chars + len(prefix) - 1):
            # 将上一时间步的输出作为当前时间步的输入
            X = tool.to_onehot(torch.tensor([[output[-1]]], device=device), vocab_size)
            # 计算输出和更新隐藏状态
            (Y, state) = self.forward(X, state)
            # 下一个时间步的输入是prefix里的字符或者当前的最佳预测字符
            if t < len(prefix) - 1:
                output.append(char_to_idx[prefix[t + 1]])
            else:
                output.append(int(Y[0].argmax(dim=1).item()))
        return ''.join([idx_to_char[i] for i in output])


    def fit(self, num_hiddens,
                  vocab_size, device, corpus_indices, idx_to_char,
                  char_to_idx, is_random_iter, num_epochs, num_steps,
                  lr, clipping_theta, batch_size, pred_period,
                  pred_len, prefixes):

        if is_random_iter:
            data_iter_fn = tool.data_iter_random
        else:
            data_iter_fn = tool.data_iter_consecutive
        # params = get_params(vocab_size, num_hiddens, vocab_size, device)
        # params = get_params()
        loss = nn.CrossEntropyLoss()

        for epoch in range(num_epochs):
            if not is_random_iter:  # 如使用相邻采样,在epoch开始时初始化隐藏状态
                state = self.init_gru_state(batch_size, num_hiddens, device)
            l_sum, n, start = 0.0, 0, time.time()
            data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)
            for X, Y in data_iter:  # X, Y : (2, 5)  (batch_size, num_steps)
                if is_random_iter:  # 如使用随机采样,在每个小批量更新前初始化隐藏状态
                    state = self.init_gru_state(batch_size, num_hiddens, device)
                else:  # 否则需要使用detach函数从计算图分离隐藏状态
                    for s in state:
                        s.detach_()

                inputs = tool.to_onehot(X, vocab_size)
                # outputs有num_steps个形状为(batch_size, vocab_size)的矩阵
                (outputs, state) = self.forward(inputs, state)
                # 拼接之后形状为(num_steps * batch_size, vocab_size)
                outputs = torch.cat(outputs, dim=0)
                # Y的形状是(batch_size, num_steps),转置后再变成长度为
                # batch * num_steps 的向量,这样跟输出的行一一对应
                y = torch.transpose(Y, 0, 1).contiguous().view(-1)
                # 使用交叉熵损失计算平均分类误差
                l = loss(outputs, y.long())

                # 梯度清0
                if self.params[0].grad is not None:
                    for param in self.params:
                        param.grad.data.zero_()
                l.backward()
                tool.grad_clipping(self.params, clipping_theta, device)  # 裁剪梯度
                tool.sgd(self.params, lr, 1)  # 因为误差已经取过均值,梯度不用再做平均
                l_sum += l.item() * y.shape[0]
                n += y.shape[0]

            if (epoch + 1) % pred_period == 0:
                print('epoch %d, perplexity %f, time %.2f sec' % (
                    epoch + 1, math.exp(l_sum / n), time.time() - start))
                for prefix in prefixes:
                    print(' -', self.predict_rnn(prefix, pred_len,
                                            num_hiddens, vocab_size, device, idx_to_char, char_to_idx))

run.py(scratch)

import numpy as np
import torch
from torch import nn, optim
import torch.nn.functional as F
import sys
sys.path.append("..")
import tool
from tmodel import GRU

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

(corpus_indices, idx_to_char, char_to_idx, vocab_size) = tool.load_data_jay_lyrics()
print(torch.__version__, device)

num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
print('will use', device)

num_epochs, num_steps, batch_size, lr, clipping_theta = 160, 35, 32, 1e2, 1e-2
pred_period, pred_len, prefixes = 40, 50, ['分开', '不分开']

net = GRU(num_hiddens, vocab_size, device)
net.fit(num_hiddens,vocab_size, device, corpus_indices, idx_to_char,
                          char_to_idx, False, num_epochs, num_steps, lr,
                          clipping_theta, batch_size, pred_period, pred_len,
                          prefixes)

LSTM

model.py(scratch)

#coding=utf-8
import numpy as np
import torch
from torch import nn, optim
import torch.nn.functional as F
import time
import math
import sys
sys.path.append("..")
import tool
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

(corpus_indices, idx_to_char, char_to_idx,  vocab_size) = tool.load_data_jay_lyrics()
print(torch.__version__, device)


class LSTM(nn.Module):

    @staticmethod
    def get_params(num_inputs, num_hiddens, num_outputs, device):
        def _one(shape):
            ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32)
            return torch.nn.Parameter(ts, requires_grad=True)

        def _three():
            return (_one((num_inputs, num_hiddens)),
                    _one((num_hiddens, num_hiddens)),
                    torch.nn.Parameter(torch.zeros(num_hiddens, device=device, dtype=torch.float32),
                                       requires_grad=True))

        W_xi, W_hi, b_i = _three()  # 输入门参数
        W_xf, W_hf, b_f = _three()  # 遗忘门参数
        W_xo, W_ho, b_o = _three()  # 输出门参数
        W_xc, W_hc, b_c = _three()  # 候选记忆细胞参数

        # 输出层参数
        W_hq = _one((num_hiddens, num_outputs))
        b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, dtype=torch.float32), requires_grad=True)
        return nn.ParameterList([W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q])


    @staticmethod
    def init_state(batch_size, num_hiddens, device):
        return (torch.zeros((batch_size, num_hiddens), device=device),
                torch.zeros((batch_size, num_hiddens), device=device))


    def __init__(self, num_hiddens, vocab_size, device):
        super(LSTM, self).__init__()
        self.params = self.get_params(vocab_size, num_hiddens, vocab_size, device)
        self.W_xi, self.W_hi, self.b_i,\
        self.W_xf, self.W_hf, self.b_f,\
        self.W_xo, self.W_ho, self.b_o,\
        self.W_xc, self.W_hc, self.b_c,\
        self.W_hq, self.b_q = self.params


    def forward(self, inputs, state):
        (H, C) = state
        outputs = []
        for X in inputs:
            I = torch.sigmoid(torch.matmul(X, self.W_xi) + torch.matmul(H, self.W_hi) + self.b_i)      # p176 公式1
            F = torch.sigmoid(torch.matmul(X, self.W_xf) + torch.matmul(H, self.W_hf) + self.b_f)      # 公式2
            O = torch.sigmoid(torch.matmul(X, self.W_xo) + torch.matmul(H, self.W_ho) + self.b_o)      # 公式3
            C_tilda = torch.tanh(torch.matmul(X, self.W_xc) + torch.matmul(H, self.W_hc) + self.b_c)   # p177 公式4
            C = F * C + I * C_tilda     # p178 公式5
            H = O * C.tanh()            # p178 公式6
            Y = torch.matmul(H, self.W_hq) + self.b_q
            outputs.append(Y)
        return outputs, (H, C)

    def predict_rnn(self, prefix, num_chars,
                    num_hiddens, vocab_size, device, idx_to_char, char_to_idx):
        """
        预测函数, 由prefix来预测接下来num_chars个字符(无训练)
        上接rnn函数
        predict_rnn('塞纳河', 12, rnn, params, init_rnn_state, num_hiddens, vocab_size,
        device, idx_to_char, char_to_idx)
        >> '塞纳河瞎土摩漫代手画鹰专W誓病'
        """
        state = self.init_state(1, num_hiddens, device)
        output = [char_to_idx[prefix[0]]]
        for t in range(num_chars + len(prefix) - 1):
            # 将上一时间步的输出作为当前时间步的输入
            X = tool.to_onehot(torch.tensor([[output[-1]]], device=device), vocab_size)
            # 计算输出和更新隐藏状态
            (Y, state) = self.forward(X, state)
            # 下一个时间步的输入是prefix里的字符或者当前的最佳预测字符
            if t < len(prefix) - 1:
                output.append(char_to_idx[prefix[t + 1]])
            else:
                output.append(int(Y[0].argmax(dim=1).item()))
        return ''.join([idx_to_char[i] for i in output])


    def fit(self, num_hiddens,
                  vocab_size, device, corpus_indices, idx_to_char,
                  char_to_idx, is_random_iter, num_epochs, num_steps,
                  lr, clipping_theta, batch_size, pred_period,
                  pred_len, prefixes):

        if is_random_iter:
            data_iter_fn = tool.data_iter_random
        else:
            data_iter_fn = tool.data_iter_consecutive
        # params = get_params(vocab_size, num_hiddens, vocab_size, device)
        # params = get_params()
        loss = nn.CrossEntropyLoss()

        for epoch in range(num_epochs):
            if not is_random_iter:  # 如使用相邻采样,在epoch开始时初始化隐藏状态
                state = self.init_state(batch_size, num_hiddens, device)
            l_sum, n, start = 0.0, 0, time.time()
            data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)
            for X, Y in data_iter:  # X, Y : (2, 5)  (batch_size, num_steps)
                if is_random_iter:  # 如使用随机采样,在每个小批量更新前初始化隐藏状态
                    state = self.init_state(batch_size, num_hiddens, device)
                else:  # 否则需要使用detach函数从计算图分离隐藏状态
                    for s in state:
                        s.detach_()

                inputs = tool.to_onehot(X, vocab_size)
                # outputs有num_steps个形状为(batch_size, vocab_size)的矩阵
                (outputs, state) = self.forward(inputs, state)
                # 拼接之后形状为(num_steps * batch_size, vocab_size)
                outputs = torch.cat(outputs, dim=0)
                # Y的形状是(batch_size, num_steps),转置后再变成长度为
                # batch * num_steps 的向量,这样跟输出的行一一对应
                y = torch.transpose(Y, 0, 1).contiguous().view(-1)
                # 使用交叉熵损失计算平均分类误差
                l = loss(outputs, y.long())

                # 梯度清0
                if self.params[0].grad is not None:
                    for param in self.params:
                        param.grad.data.zero_()
                l.backward()
                tool.grad_clipping(self.params, clipping_theta, device)  # 裁剪梯度
                tool.sgd(self.params, lr, 1)  # 因为误差已经取过均值,梯度不用再做平均
                l_sum += l.item() * y.shape[0]
                n += y.shape[0]

            if (epoch + 1) % pred_period == 0:
                print('epoch %d, perplexity %f, time %.2f sec' % (
                    epoch + 1, math.exp(l_sum / n), time.time() - start))
                for prefix in prefixes:
                    print(' -', self.predict_rnn(prefix, pred_len,
                                            num_hiddens, vocab_size, device, idx_to_char, char_to_idx))

run.py(scratch)

import numpy as np
import torch
from torch import nn, optim
import torch.nn.functional as F
import sys
sys.path.append("..")
import tool
from tmodel import LSTM

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

(corpus_indices, idx_to_char, char_to_idx, vocab_size) = tool.load_data_jay_lyrics()
print(torch.__version__, device)

num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
print('will use', device)

num_epochs, num_steps, batch_size, lr, clipping_theta = 160, 35, 32, 1e2, 1e-2
pred_period, pred_len, prefixes = 40, 50, ['分开', '不分开']

net = LSTM(num_hiddens, vocab_size, device)
net.fit(num_hiddens,vocab_size, device, corpus_indices, idx_to_char,
                          char_to_idx, False, num_epochs, num_steps, lr,
                          clipping_theta, batch_size, pred_period, pred_len,
                          prefixes)

model.py(pytorch)

与 RNNModel 只有一句区别
GRU , LSTM

class GRU2(nn.Module):
    def __init__(self, num_hiddens, vocab_size, device):
        super(GRU2, self).__init__()
        # rnn_layer = nn.RNN(input_size=vocab_size, hidden_size=num_hiddens)
        # self.rnn = rnn_layer

        rnn_layer = nn.GRU(input_size=vocab_size, hidden_size=num_hiddens)      # 与RNNModel唯一的区别
        # rnn_layer = nn.LSTM(input_size=vocab_size, hidden_size=num_hiddens)   # LSTM
        self.rnn = rnn_layer
        self.hidden_size = rnn_layer.hidden_size * (2 if rnn_layer.bidirectional else 1)
        self.vocab_size = vocab_size
        self.dense = nn.Linear(self.hidden_size, vocab_size)
        self.state = None

    def forward(self, inputs, state): # inputs: (batch, seq_len)
        # 获取one-hot向量表示
        X = tool.to_onehot(inputs, self.vocab_size) # X是个list
        Y, self.state = self.rnn(torch.stack(X), state)
        # 全连接层会首先将Y的形状变成(num_steps * batch_size, num_hiddens),它的输出
        # 形状为(num_steps * batch_size, vocab_size)
        output = self.dense(Y.view(-1, Y.shape[-1]))
        return output, self.state

    def predict(self, prefix, num_chars, vocab_size, device, idx_to_char, char_to_idx):
        state = None
        output = [char_to_idx[prefix[0]]]  # output会记录prefix加上输出
        for t in range(num_chars + len(prefix) - 1):
            X = torch.tensor([output[-1]], device=device).view(1, 1)
            if state is not None:
                if isinstance(state, tuple):  # LSTM, state:(h, c)
                    state = (state[0].to(device), state[1].to(device))
                else:
                    state = state.to(device)
            (Y, state) = self.forward(X, state)  # 前向计算不需要传入模型参数
            if t < len(prefix) - 1:
                output.append(char_to_idx[prefix[t + 1]])
            else:
                output.append(int(Y.argmax(dim=1).item()))
        return ''.join([idx_to_char[i] for i in output])


    def fit(self, num_hiddens, vocab_size, device,
                    corpus_indices, idx_to_char, char_to_idx,
                    num_epochs, num_steps, lr, clipping_theta,
                    batch_size, pred_period, pred_len, prefixes):
        loss = nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(self.parameters(), lr=lr)
        # model.to(device)
        state = None
        for epoch in range(num_epochs):
            l_sum, n, start = 0.0, 0, time.time()
            data_iter = tool.data_iter_consecutive(corpus_indices, batch_size, num_steps, device)  # 相邻采样
            for X, Y in data_iter:
                if state is not None:
                    # 使用detach函数从计算图分离隐藏状态, 这是为了
                    # 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)
                    if isinstance(state, tuple):  # LSTM, state:(h, c)
                        state = (state[0].detach(), state[1].detach())
                    else:
                        state = state.detach()

                (output, state) = self.forward(X, state)  # output: 形状为(num_steps * batch_size, vocab_size)

                # Y的形状是(batch_size, num_steps),转置后再变成长度为
                # batch * num_steps 的向量,这样跟输出的行一一对应
                y = torch.transpose(Y, 0, 1).contiguous().view(-1)
                l = loss(output, y.long())

                optimizer.zero_grad()
                l.backward()
                # 梯度裁剪
                tool.grad_clipping(self.parameters(), clipping_theta, device)
                optimizer.step()
                l_sum += l.item() * y.shape[0]
                n += y.shape[0]

            try:
                perplexity = math.exp(l_sum / n)
            except OverflowError:
                perplexity = float('inf')
            if (epoch + 1) % pred_period == 0:
                print('epoch %d, perplexity %f, time %.2f sec' % (
                    epoch + 1, perplexity, time.time() - start))
                for prefix in prefixes:
                    print(' -', self.predict(
                        prefix, pred_len, vocab_size, device, idx_to_char,
                        char_to_idx))

run.py(pytorch)

import numpy as np
import torch
from torch import nn, optim
import torch.nn.functional as F
import sys
sys.path.append("..")
import tool
from tmodel import GRU2

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

(corpus_indices, idx_to_char, char_to_idx, vocab_size) = tool.load_data_jay_lyrics()
print(torch.__version__, device)

num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size
print('will use', device)

num_epochs, num_steps, batch_size, lr, clipping_theta = 160, 35, 32, 1e2, 1e-2
pred_period, pred_len, prefixes = 40, 50, ['分开', '不分开']

net = GRU2(num_hiddens, vocab_size, device)
net.fit(num_hiddens,vocab_size, device, corpus_indices, idx_to_char,
                          char_to_idx, num_epochs, num_steps, lr,
                          clipping_theta, batch_size, pred_period, pred_len,
                          prefixes)
动手学习深度学习是理解和掌握深度学习的重要途径之一。PyTorch作为一个开源的深度学习框架,具有易用性和灵活性,适合初学者入门。下面我将介绍一些练习方法,帮助大家动手学PyTorch。 首先,学习PyTorch的基础知识是必要的。可以通过阅读官方文档、教程和书籍等方式学习PyTorch的基本概念、操作和函数等知识点,了解它的使用方法和语法规则。 其次,可以通过实践来深入理解和掌握PyTorch。可以选择一些经典的深度学习模型,如卷积神经网络(CNN)、递归神经网络(RNN)等,利用PyTorch的功能实现这些模型。可以从模型的搭建、训练和评估等方面入手,逐步掌握PyTorch的使用方法和技巧。 此外,实践过程中可以使用一些已有的深度学习数据集,如MNIST手写数字数据集、CIFAR-10图像分类数据集等,用于模型的训练和测试。可以通过探索不同数据集的使用方法,了解数据预处理、批量处理和加载等操作。 同时,还可以学习PyTorch中的一些常用模块和函数,如优化器(Optimizer)、损失函数(Loss Function)等,了解它们的作用和使用方法,并在实践中尝试不同的组合和调节,优化模型的训练效果。 最后,还可以参与一些开源项目或者比赛,与其他开发者共同学习和交流。可以从官方论坛、GitHub等平台获取一些有趣的项目,尝试运行和优化,加深对PyTorch的理解和运用能力。 总之,动手学PyTorch的方式是最有效的方式之一。通过实践,可以深入理解深度学习的原理和PyTorch的使用方法,提升自己的编程能力和解决实际问题的能力。希望以上的建议对大家有所帮助。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值