torch.tensor.view(*args)

本文详细介绍了PyTorch中的view()函数,该函数用于改变张量的形状,同时保持其数据和元素数量不变。通过实例演示了如何使用view()函数进行张量维度的变换,包括使用-1作为自动推导的维度。

view(*args) → Tensor
返回一个有相同数据但大小不同的tensor。 返回的tensor必须有与原tensor相同的数据和相同数目的元素,但可以有不同的大小。一个tensor必须是连续的contiguous()才能被查看。

import torch
x = torch.randn(4, 5)

print('tensor原型:',x)

print('tensor维度变换,由(4,5)到(20,1):',x.view(20, 1))
#由(4,5)到(-1,1)的tensor维度变换,其中-1是tensor在1下的另一个维度的大小,即为20/1=20,也就是说在这里-1=20
print('tensor维度变换,由(4,5)到(-1,1):',x.view(-1, 1))

print('tensor维度变换,由(4,5)到(1,20):',x.view(1, 20))
#由(4,5)到(1, -1)的tensor维度变换,其中-1是tensor在1下的另一个维度的大小,即为20/1=20,也就是说在这里-1=20
print('tensor维度变换,由(4,5)到(1,-1):',x.view(1, -1))

代码运行结果:

tensor原型: tensor([[ 0.2278, -0.6850,  0.6527, -0.3206, -2.5704],
        [ 0.8447,  0.2473, -0.5029,  0.6311, -0.4551],
        [ 0.8049, -0.3084,  0.5642,  0.2411,  0.5785],
        [-0.6099, -0.8746, -0.9222,  2.0989,  1.5902]])
tensor维度变换,由(4,5)到(20,1): tensor([[ 0.2278],
        [-0.6850],
        [ 0.6527],
        [-0.3206],
        [-2.5704],
        [ 0.8447],
        [ 0.2473],
        [-0.5029],
        [ 0.6311],
        [-0.4551],
        [ 0.8049],
        [-0.3084],
        [ 0.5642],
        [ 0.2411],
        [ 0.5785],
        [-0.6099],
        [-0.8746],
        [-0.9222],
        [ 2.0989],
        [ 1.5902]])
tensor维度变换,由(4,5)到(-1,1): tensor([[ 0.2278],
        [-0.6850],
        [ 0.6527],
        [-0.3206],
        [-2.5704],
        [ 0.8447],
        [ 0.2473],
        [-0.5029],
        [ 0.6311],
        [-0.4551],
        [ 0.8049],
        [-0.3084],
        [ 0.5642],
        [ 0.2411],
        [ 0.5785],
        [-0.6099],
        [-0.8746],
        [-0.9222],
        [ 2.0989],
        [ 1.5902]])
tensor维度变换,由(4,5)到(1,20): tensor([[ 0.2278, -0.6850,  0.6527, -0.3206, -2.5704,  0.8447,  0.2473, -0.5029,
          0.6311, -0.4551,  0.8049, -0.3084,  0.5642,  0.2411,  0.5785, -0.6099,
         -0.8746, -0.9222,  2.0989,  1.5902]])
tensor维度变换,由(4,5)到(1,-1): tensor([[ 0.2278, -0.6850,  0.6527, -0.3206, -2.5704,  0.8447,  0.2473, -0.5029,
          0.6311, -0.4551,  0.8049, -0.3084,  0.5642,  0.2411,  0.5785, -0.6099,
         -0.8746, -0.9222,  2.0989,  1.5902]])
class AttentionStaticQuantPattern: def __init__( self, layer_name: str, num_heads: int, head_size: int, quant_dtype: torch.dtype, symmetric=True, ): self.layer_name = layer_name self.num_heads = num_heads self.head_size = head_size self.quant_dtype = quant_dtype self.quant_key = QuantKey(dtype=quant_dtype, static=True, group_shape=GroupShape.PER_TENSOR, symmetric=symmetric) assert self.quant_key in QUANT_OPS, \ f"unsupported quantization scheme {self.quant_key}" self.QUANT_OP = QUANT_OPS[self.quant_key] def empty_quant(self, *args, **kwargs): kwargs = {'dtype': self.quant_dtype, 'device': "cuda", **kwargs} return torch.empty(*args, **kwargs) def register_if_supported(self, pm_pass: PatternMatcherPass, layer: Attention): if layer.impl.fused_output_quant_supported(self.quant_dtype, self.quant_key.static, self.quant_key.group_shape): self._register(pm_pass) def _register(self, pm_pass: PatternMatcherPass): def pattern(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, output_attn: torch.Tensor, output_quant: torch.Tensor, scale: torch.Tensor): view_7 = RESHAPE_OP(output_attn, [-1, self.num_heads, self.head_size]) at1 = auto_functionalized(ATTN_OP, query=q, key=k, value=v, output=view_7, layer_name=self.layer_name, output_scale=None) attn_out_view = RESHAPE_OP(at1[1], [-1, self.num_heads * self.head_size]) at2 = auto_functionalized(self.QUANT_OP, result=output_quant, input=attn_out_view, scale=scale) return at2[1] def replacement(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, output_attn: torch.Tensor, output_quant: torch.Tensor, scale: torch.Tensor): view_7 = RESHAPE_OP(output_quant, [-1, self.num_heads, self.head_size]) at1 = auto_functionalized(ATTN_OP, query=q, key=k, value=v, output=view_7, layer_name=self.layer_name, output_scale=scale) return RESHAPE_OP(at1[1], [-1, self.num_heads * self.head_size]) # Need custom fake mode, otherwise tracing happens with real tensors. # That would not work for the unified_attention custom op. with unset_fake_temporarily(), FakeTensorMode(): inputs = [ empty_bf16(5, self.num_heads, self.head_size), # q empty_bf16(5, self.num_heads, self.head_size), # k empty_bf16(5, self.num_heads, self.head_size), # v empty_bf16(5, self.num_heads * self.head_size), # attn_output self.empty_quant(5, self.num_heads * self.head_size), # quant_output empty_fp32(1, 1) # scale ] def wrap_trace_fn(process_fx, trace_fn): def wrapped(*args, **kwargs): return process_fx(trace_fn(*args, **kwargs)) return wrapped def fx_view_to_reshape(gm: torch.fx.GraphModule): from torch._inductor.fx_passes.post_grad import view_to_reshape view_to_reshape(gm) return gm pm.register_replacement( pattern, replacement, inputs, wrap_trace_fn(fx_view_to_reshape, pm.fwd_only), pm_pass)这段代码在干什么
07-29
# %% import torch import torch.nn as nn import math from typing import Optional, Tuple, Dict # %% [markdown] # 1. RoPE: 旋转位置编码(完全自实现) # %% def precompute_freqs_cis(hidden_size: int, max_seq_len: int, base: int = 10000, num_attention_heads: int = 8) -> torch.Tensor: """ 预计算复数形式的旋转位置编码 (RoPE) Args: hidden_size: 模型维度(如 512) max_seq_len: 最大序列长度(如 8192) base: 频率基数,默认 10000 Returns: freqs_cis: complex tensor of shape (max_seq_len, head_dim // 2) """ head_dim = hidden_size // num_attention_heads theta = torch.arange(0, head_dim, 2).float() / (-head_dim / math.log(base)) freqs = 1.0 / (torch.exp(theta)) # shape: (head_dim // 2,) t = torch.arange(max_seq_len, device=freqs.device) # positions freqs = torch.outer(t, freqs) # (seq_len, head_dim // 2) # 转为复数:cosθ + i*sinθ freqs_cis = torch.polar(torch.ones_like(freqs), freqs) return freqs_cis # shape: (max_seq_len, head_dim//2) def apply_rotary_pos_emb(q: torch.Tensor, k: torch.Tensor, freqs_cis: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ 将 RoPE 应用于 query 和 key 张量。 Args: q: (bsz, n_heads, seq_len, head_dim) k: (bsz, n_heads, seq_len, head_dim) freqs_cis: (seq_len, head_dim // 2) Returns: q_embed, k_embed """ bsz, n_heads, seq_len, head_dim = q.shape half_dim = head_dim // 2 # Reshape to (bsz, n_heads, seq_len, head_dim//2, 2) def reshape_for_complex(tensor: torch.Tensor) -> torch.Tensor: return tensor.view(bsz, n_heads, seq_len, half_dim, 2).float() q_ = reshape_for_complex(q) k_ = reshape_for_complex(k) cos = freqs_cis.real sin = freqs_cis.imag # 然后扩展维度以便广播 cos = cos.view(1, 1, seq_len, head_dim//2) # → (1,1,10,32,1) sin = sin.view(1, 1, seq_len, head_dim//2) # 复数乘法: (a + bi)(c + di) = (ac - bd) + (ad + bc)i # 这里使用实数模拟复数运算 q_out_real = q_[..., 0] * cos - q_[..., 1] * sin q_out_imag = q_[..., 0] * sin + q_[..., 1] * cos q_out = torch.stack([q_out_real, q_out_imag], dim=-1).flatten(-2) k_out_real = k_[..., 0] * cos - k_[..., 1] * sin k_out_imag = k_[..., 0] * sin + k_[..., 1] * cos k_out = torch.stack([k_out_real, k_out_imag], dim=-1).flatten(-2) return q_out.type_as(q), k_out.type_as(k) # %% [markdown] # 2. 自定义注意力层 # %% class YiziAttention(nn.Module): def __init__(self, hidden_size: int, num_heads: int): super().__init__() self.hidden_size = hidden_size self.num_heads = num_heads self.head_dim = hidden_size // num_heads self.scale = self.head_dim ** -0.5 self.q_proj = nn.Linear(hidden_size, hidden_size, bias=False) self.k_proj = nn.Linear(hidden_size, hidden_size, bias=False) self.v_proj = nn.Linear(hidden_size, hidden_size, bias=False) self.o_proj = nn.Linear(hidden_size, hidden_size, bias=False) def forward(self, x: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor: bsz, seq_len, _ = x.shape # 投影到 QKV query_states = self.q_proj(x).view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(x).view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(x).view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2) # 应用 RoPE query_states, key_states = apply_rotary_pos_emb(query_states, key_states, freqs_cis) # Scaled Dot-Product Attention attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale attn_weights = torch.softmax(attn_weights, dim=-1) attn_output = torch.matmul(attn_weights, value_states) # 合并头 attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, seq_len, self.hidden_size) return self.o_proj(attn_output) # %% [markdown] # 3. 前馈网络 + 残差连接 # %% class YiziBlock(nn.Module): def __init__(self, hidden_size: int, num_heads: int, intermediate_size: int): super().__init__() self.attn = YiziAttention(hidden_size, num_heads) self.norm1 = nn.LayerNorm(hidden_size) self.mlp = nn.Sequential( nn.Linear(hidden_size, intermediate_size), nn.GELU(), nn.Linear(intermediate_size, hidden_size) ) self.norm2 = nn.LayerNorm(hidden_size) def forward(self, x: torch.Tensor, freqs_cis: torch.Tensor) -> torch.Tensor: # 注意力残差连接 x = x + self.attn(self.norm1(x), freqs_cis) # MLP 残差连接 x = x + self.mlp(self.norm2(x)) return x # %% [markdown] # 4. 主模型:YiziLM # %% class YiziLMConfig: def __init__( self, vocab_size: int = 30000, hidden_size: int = 512, num_hidden_layers: int = 6, num_attention_heads: int = 8, max_position_embeddings: int = 8192, intermediate_size: int = 2048 ): self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.intermediate_size = intermediate_size class YiziLM(nn.Module): def __init__(self, config: YiziLMConfig): super().__init__() self.config = config # 词嵌入 self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size) # 预计算所有位置的 RoPE 编码,并注册为 buffer(随模型保存) freqs_cis = precompute_freqs_cis( hidden_size=config.hidden_size, max_seq_len=config.max_position_embeddings, base=10000, num_attention_heads=config.num_attention_heads ) self.register_buffer("freqs_cis", freqs_cis) # Transformer 层堆叠 self.layers = nn.ModuleList([ YiziBlock( hidden_size=config.hidden_size, num_heads=config.num_attention_heads, intermediate_size=config.intermediate_size ) for _ in range(config.num_hidden_layers) ]) # 输出归一化与解码头 self.norm = nn.LayerNorm(config.hidden_size) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # 权重共享(可选):词嵌入与 lm_head 共享参数 self.lm_head.weight = self.embed_tokens.weight def forward(self, input_ids: torch.LongTensor, labels: Optional[torch.LongTensor] = None) -> Dict[str, torch.Tensor]: inputs_embeds = self.embed_tokens(input_ids) bsz, seq_len, _ = inputs_embeds.shape # 获取当前位置对应的 freqs_cis freqs_cis = self.freqs_cis[:seq_len] hidden_states = inputs_embeds for layer in self.layers: hidden_states = layer(hidden_states, freqs_cis) hidden_states = self.norm(hidden_states) logits = self.lm_head(hidden_states) output = {"logits": logits} if labels is not None: loss = nn.CrossEntropyLoss()(logits.view(-1, logits.size(-1)), labels.view(-1)) output["loss"] = loss return output @torch.no_grad() def generate( self, input_ids: torch.LongTensor, max_new_tokens: int = 128, temperature: float = 0.7, top_k: int = 50, eos_token_id: Optional[int] = None ) -> torch.LongTensor: """ 自回归生成文本 """ for _ in range(max_new_tokens): logits = self.forward(input_ids)["logits"] next_token_logits = logits[:, -1, :] / temperature # Top-k filtering if top_k > 0: v, _ = torch.topk(next_token_logits, top_k) pivot = v[:, [-1]] next_token_logits = torch.where(next_token_logits < pivot, torch.full_like(next_token_logits, -float('inf')), next_token_logits) probs = torch.softmax(next_token_logits, dim=-1) next_token = torch.multinomial(probs, num_samples=1) input_ids = torch.cat([input_ids, next_token], dim=1) if eos_token_id is not None and next_token.item() == eos_token_id: break return input_ids # %% # 创建配置 config = YiziLMConfig( vocab_size=30000, hidden_size=512, num_hidden_layers=6, num_attention_heads=8, max_position_embeddings=8192 ) # 构建模型 model = YiziLM(config) # 输入数据 input_ids = torch.randint(0, 30000, (2, 10)) # batch=2, seq_len=10 labels = input_ids.clone() # 训练模式 output = model(input_ids=input_ids, labels=labels) print("Loss:", output["loss"].item()) print("Logits shape:", output["logits"].shape) # 推理生成 prompt = torch.tensor([[100, 200, 300]]) # 初始 token generated = model.generate(prompt, max_new_tokens=20, eos_token_id=2) print("Generated:", generated[0].tolist()) # %% torch.save(model.state_dict(), "yizilm_70m.pth") 这是现在的代码
最新发布
12-01
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值