一、LLM模型架构总起
像GPT和Llama这样的模型,它们按照顺序生成单词,并基于transformer架构的解码器部分
因此,这些LLM通常被称为“decoder only”的架构
与传统的深度学习模型相比,LLM更大,主要是因为它们有大量的参数,而不是代码的数量
接下来我们考虑嵌入和模型大小类似于一个小型GPT-2模型,我们将专门编码最小GPT-2模型的架构(1.24亿个参数)
对于一句话 every effort moves you,经过如上部分预测出单词 “forward”
import torch
import torch.nn as nn
GPT_CONFIG_124M = {
"vocab_size": 50257, # Vocabulary size,所能容纳的词汇个数的最大值
"context_length": 1024, # Context length,所能接收的文本长度最大值
"emb_dim": 768, # Embedding dimension
"n_heads": 12, # Number of attention heads
"n_layers": 12, # Number of layers
"drop_rate": 0.1, # Dropout rate
"qkv_bias": False # Query-Key-Value bias
}
class DummyGPTModel(nn.Module):
def __init__(self, cfg):
super().__init__()
self.tok_emb = nn.Embedding(cfg["vocab_size"], cfg["emb_dim"])
self.pos_emb = nn.Embedding(cfg["context_length"], cfg["emb_dim"])
self.drop_emb = nn.Dropout(cfg["drop_rate"])
# Use a placeholder for TransformerBlock
self.trf_blocks = nn.Sequential(
*[DummyTransformerBlock(cfg) for _ in range(cfg["n_layers"])])
# Use a placeholder for LayerNorm
self.final_norm = DummyLayerNorm(cfg["emb_dim"])
self.out_head = nn.Linear(
cfg["emb_dim"], cfg["vocab_size"], bias=False
)
def forward(self, in_idx):
batch_size, seq_len = in_idx.shape
tok_embeds = self.tok_emb(in_idx)
pos_embeds = self.pos_emb(torch.arange(seq_len, device=in_idx.device))
x = tok_embeds + pos_embeds
x = self.drop_emb(x)
x = self.trf_blocks(x)
x = self.final_norm(x)
logits = self.out_head(x)
return logits
class DummyTransformerBlock(nn.Module):
def __init__(self, cfg):
super().__init__()
# A simple placeholder,将在后面实现,其余的dummy均同
def forward(self, x):
# This block does nothing and just returns its input.
return x
class DummyLayerNorm(nn.Module):
def __init__(self, normalized_shape, eps=1e-5):
super().__init__()
# The parameters here are just to mimic the LayerNorm interface.
def forward(self, x):