self.layer2 = torch.nn.TransformerEncoderLayer(d_model=LSTM_hidden_size, nhead=Attention_head)这句代码的意...

代码创建了一个TransformerEncoderLayer,设置了模型维度d_model等于LSTM隐层大小,注意力头nhead等于Attention_head的数量。此层在Transformer模型中用于编码序列数据。
部署运行你感兴趣的模型镜像

这句代码定义了一个 TransformerEncoderLayer 层,并将其命名为 self.layer2。它的参数如下:

  • d_model:TransformerEncoderLayer 层的模型维度,值为 LSTM_hidden_size。
  • nhead:TransformerEncoderLayer 层的注意力头数量,值为 Attention_head。

这个 TransformerEncoderLayer 层是 PyTorch 中的一个编码器层,它可以在 Transformer 模型中用于对序列数据进行编码。

您可能感兴趣的与本文相关的镜像

PyTorch 2.6

PyTorch 2.6

PyTorch
Cuda

PyTorch 是一个开源的 Python 机器学习库,基于 Torch 库,底层由 C++ 实现,应用于人工智能领域,如计算机视觉和自然语言处理

RuntimeError: CUDA error: device-side assert triggered CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1 Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions. !pip install transformers datasets torch rouge-score matplotlib import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import Dataset, DataLoader from transformers import BertTokenizerFast import time import numpy as np from datasets import load_dataset from rouge_score import rouge_scorer import matplotlib.pyplot as plt from IPython.display import clear_output # 设备配置 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"使用设备: {device}") # 数据预处理(严格过滤无效样本) class SummaryDataset(Dataset): def __init__(self, dataset_split, tokenizer, max_article_len=384, max_summary_len=96, subset_size=0.01): self.tokenizer = tokenizer self.max_article_len = max_article_len self.max_summary_len = max_summary_len self.subset = dataset_split.select(range(int(len(dataset_split) * subset_size))) # 严格过滤无效样本 self.articles = [] self.summaries = [] self.vocab = set(tokenizer.vocab.keys()) for item in self.subset: article = item['article'].strip() summary = item['highlights'].strip() if len(article) > 20 and len(summary) > 10: article_tokens = tokenizer.tokenize(article) summary_tokens = tokenizer.tokenize(summary) if all(t in self.vocab for t in article_tokens) and all(t in self.vocab for t in summary_tokens): self.articles.append(article) self.summaries.append(summary) self.pad_token_id = tokenizer.pad_token_id self.unk_token_id = tokenizer.unk_token_id def __len__(self): return len(self.articles) def __getitem__(self, idx): src = self.tokenizer( self.articles[idx], max_length=self.max_article_len, truncation=True, padding='max_length', return_tensors='pt', add_special_tokens=True ) tgt = self.tokenizer( self.summaries[idx], max_length=self.max_summary_len, truncation=True, padding='max_length', return_tensors='pt', add_special_tokens=True ) tgt_labels = tgt['input_ids'].squeeze() tgt_labels[tgt_labels == self.pad_token_id] = -100 # 忽略填充 tgt_labels[tgt_labels >= len(self.tokenizer.vocab)] = self.unk_token_id # 过滤无效id return { 'input_ids': src['input_ids'].squeeze(), 'attention_mask': src['attention_mask'].squeeze(), 'labels': tgt_labels } # 基础Seq2Seq模型 class BasicEncoder(nn.Module): def __init__(self, vocab_size, emb_dim=128, hidden_dim=256): super().__init__() self.embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=0) self.gru = nn.GRU(emb_dim, hidden_dim, num_layers=2, batch_first=True, bidirectional=True) self.fc_hidden = nn.Linear(hidden_dim * 2, hidden_dim) def forward(self, src): embedded = self.embedding(src) outputs, hidden = self.gru(embedded) # 取第二层双向隐藏状态 forward_hidden = hidden[-2, :, :] # 第二层正向 backward_hidden = hidden[-1, :, :] # 第二层反向 hidden = torch.cat([forward_hidden, backward_hidden], dim=1) # (batch, 2*hidden_dim) hidden = self.fc_hidden(hidden).unsqueeze(0) # (1, batch, hidden_dim) return hidden class BasicDecoder(nn.Module): def __init__(self, vocab_size, emb_dim=128, hidden_dim=256): super().__init__() self.embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=0) self.gru = nn.GRU(emb_dim + hidden_dim, hidden_dim, num_layers=1, batch_first=True) self.fc = nn.Linear(hidden_dim * 2 + emb_dim, vocab_size) def forward(self, input_ids, hidden, context): input_embedded = self.embedding(input_ids.unsqueeze(1)) # (batch, 1, emb_dim) input_combined = torch.cat([input_embedded, context.unsqueeze(1)], dim=2) # (batch, 1, emb_dim+hidden_dim) output, hidden = self.gru(input_combined, hidden) # (batch, 1, hidden_dim) output = output.squeeze(1) # (batch, hidden_dim) combined = torch.cat([output, context, input_embedded.squeeze(1)], dim=1) # (batch, 2*hidden_dim+emb_dim) logits = self.fc(combined) return logits, hidden class BasicSeq2Seq(nn.Module): def __init__(self, vocab_size, emb_dim=128, hidden_dim=256): super().__init__() self.encoder = BasicEncoder(vocab_size, emb_dim, hidden_dim) self.decoder = BasicDecoder(vocab_size, emb_dim, hidden_dim) self.device = device self.sos_token_id = 101 # [CLS] self.eos_token_id = 102 # [SEP] self.unk_token_id = 100 # [UNK] def forward(self, src, tgt): hidden = self.encoder(src) context = hidden.squeeze(0) batch_size, tgt_len = tgt.size() outputs = torch.zeros(batch_size, tgt_len, self.decoder.fc.out_features).to(device) input_ids = tgt[:, 0] for t in range(1, tgt_len): logits, hidden = self.decoder(input_ids, hidden, context) outputs[:, t] = logits input_ids = tgt[:, t] return outputs def generate(self, src, max_length=80): src = src.to(device) hidden = self.encoder(src) context = hidden.squeeze(0) # 修正后的生成初始化 generated = torch.full((src.size(0), 1), self.sos_token_id, device=device) # 注这里的修正 for _ in range(max_length-1): logits, hidden = self.decoder(generated[:, -1], hidden, context) next_token = torch.argmax(logits, dim=1, keepdim=True) # 防止过早生成标点 if generated.size(1) < 5: punctuation = [',', '.', ';', ':', '!', '?', "'", '"', '`', '~'] punct_ids = [self.tokenizer.convert_tokens_to_ids(p) for p in punctuation] if next_token.item() in punct_ids: # 替换为最常见的实词 next_token = torch.tensor([[self.tokenizer.convert_tokens_to_ids('the')]], device=device) generated = torch.cat([generated, next_token], dim=1) if (next_token == self.eos_token_id).all(): break return generated # 注力Seq2Seq模型 class Attention(nn.Module): def __init__(self, hidden_dim): super().__init__() self.W = nn.Linear(2 * hidden_dim, hidden_dim) self.v = nn.Linear(hidden_dim, 1, bias=False) def forward(self, hidden, encoder_outputs): src_len = encoder_outputs.size(1) hidden = hidden.unsqueeze(1).repeat(1, src_len, 1) # (batch, src_len, hidden_dim) combined = torch.cat([hidden, encoder_outputs], dim=2) # (batch, src_len, 2*hidden_dim) energy = self.v(torch.tanh(self.W(combined))).squeeze(2) # (batch, src_len) return torch.softmax(energy, dim=1) class AttnEncoder(nn.Module): def __init__(self, vocab_size, emb_dim=128, hidden_dim=256): super().__init__() self.embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=0) self.lstm = nn.LSTM(emb_dim, hidden_dim, num_layers=2, batch_first=True, bidirectional=True, dropout=0.1) self.fc_hidden = nn.Linear(hidden_dim * 2, hidden_dim) # 双向输出拼接 self.fc_cell = nn.Linear(hidden_dim * 2, hidden_dim) def forward(self, src): embedded = self.embedding(src) outputs, (hidden, cell) = self.lstm(embedded) # outputs: (batch, src_len, 2*hidden_dim) # 取第二层双向隐藏状态 hidden = torch.cat([hidden[-2, :, :], hidden[-1, :, :]], dim=1) # (batch, 2*hidden_dim) cell = torch.cat([cell[-2, :, :], cell[-1, :, :]], dim=1) hidden = self.fc_hidden(hidden).unsqueeze(0) # (1, batch, hidden_dim) cell = self.fc_cell(cell).unsqueeze(0) return outputs, (hidden, cell) class AttnDecoder(nn.Module): def __init__(self, vocab_size, emb_dim=128, hidden_dim=256): super().__init__() self.embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=0) self.attention = Attention(hidden_dim) self.lstm = nn.LSTM(emb_dim + 2 * hidden_dim, hidden_dim, num_layers=1, batch_first=True) self.fc = nn.Linear(hidden_dim + emb_dim, vocab_size) def forward(self, input_ids, hidden, cell, encoder_outputs): input_embedded = self.embedding(input_ids.unsqueeze(1)) # (batch, 1, emb_dim) attn_weights = self.attention(hidden.squeeze(0), encoder_outputs) # (batch, src_len) context = torch.bmm(attn_weights.unsqueeze(1), encoder_outputs) # (batch, 1, 2*hidden_dim) lstm_input = torch.cat([input_embedded, context], dim=2) # (batch, 1, emb_dim+2*hidden_dim) output, (hidden, cell) = self.lstm(lstm_input, (hidden, cell)) # output: (batch, 1, hidden_dim) logits = self.fc(torch.cat([output.squeeze(1), input_embedded.squeeze(1)], dim=1)) # (batch, vocab_size) return logits, hidden, cell class AttnSeq2Seq(nn.Module): def __init__(self, vocab_size, emb_dim=128, hidden_dim=256): super().__init__() self.encoder = AttnEncoder(vocab_size, emb_dim, hidden_dim) self.decoder = AttnDecoder(vocab_size, emb_dim, hidden_dim) self.device = device self.sos_token_id = 101 # [CLS] self.eos_token_id = 102 # [SEP] self.unk_token_id = 100 # [UNK] def forward(self, src, tgt): encoder_outputs, (hidden, cell) = self.encoder(src) batch_size, tgt_len = tgt.size() outputs = torch.zeros(batch_size, tgt_len, self.decoder.fc.out_features).to(device) input_ids = tgt[:, 0] for t in range(1, tgt_len): logits, hidden, cell = self.decoder(input_ids, hidden, cell, encoder_outputs) outputs[:, t] = logits input_ids = tgt[:, t] return outputs def generate(self, src, max_length=80): encoder_outputs, (hidden, cell) = self.encoder(src) # 修正后的生成初始化 generated = torch.full((src.size(0), 1), self.sos_token_id, device=device) # 注这里的修正 for _ in range(max_length-1): logits, hidden, cell = self.decoder(generated[:, -1], hidden, cell, encoder_outputs) next_token = torch.argmax(logits, dim=1, keepdim=True) # 防止过早生成标点 if generated.size(1) < 5: punctuation = [',', '.', ';', ':', '!', '?', "'", '"', '`', '~'] punct_ids = [self.tokenizer.convert_tokens_to_ids(p) for p in punctuation] if next_token.item() in punct_ids: # 替换为最常见的实词 next_token = torch.tensor([[self.tokenizer.convert_tokens_to_ids('the')]], device=device) generated = torch.cat([generated, next_token], dim=1) if (next_token == self.eos_token_id).all(): break return generated # Transformer模型 class PositionalEncoding(nn.Module): def __init__(self, d_model, max_len=5000): super().__init__() pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) self.register_buffer('pe', pe.unsqueeze(0)) def forward(self, x): return x + self.pe[:, :x.size(1)] class TransformerModel(nn.Module): def __init__(self, vocab_size, d_model=128, nhead=8, num_layers=3, dim_feedforward=512, max_len=5000): super().__init__() self.embedding = nn.Embedding(vocab_size, d_model, padding_idx=0) self.pos_encoder = PositionalEncoding(d_model, max_len) # 编码器 encoder_layer = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout=0.1) self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers) # 解码器 decoder_layer = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout=0.1) self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers) self.fc = nn.Linear(d_model, vocab_size) self.d_model = d_model self.sos_token_id = 101 # [CLS] self.eos_token_id = 102 # [SEP] def _generate_square_subsequent_mask(self, sz): mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) return mask def forward(self, src, tgt): src_mask = None tgt_mask = self._generate_square_subsequent_mask(tgt.size(1)).to(device) src_key_padding_mask = (src == 0) tgt_key_padding_mask = (tgt == 0) src = self.embedding(src) * np.sqrt(self.d_model) src = self.pos_encoder(src) tgt = self.embedding(tgt) * np.sqrt(self.d_model) tgt = self.pos_encoder(tgt) memory = self.transformer_encoder(src.transpose(0, 1), src_mask, src_key_padding_mask) output = self.transformer_decoder( tgt.transpose(0, 1), memory, tgt_mask, None, tgt_key_padding_mask, src_key_padding_mask ) output = self.fc(output.transpose(0, 1)) return output def generate(self, src, max_length=80): src_mask = None src_key_padding_mask = (src == 0) src = self.embedding(src) * np.sqrt(self.d_model) src = self.pos_encoder(src) memory = self.transformer_encoder(src.transpose(0, 1), src_mask, src_key_padding_mask) batch_size = src.size(0) generated = torch.full((batch_size, 1), self.sos_token_id, device=device) for i in range(max_length-1): tgt_mask = self._generate_square_subsequent_mask(generated.size(1)).to(device) tgt_key_padding_mask = (generated == 0) tgt = self.embedding(generated) * np.sqrt(self.d_model) tgt = self.pos_encoder(tgt) output = self.transformer_decoder( tgt.transpose(0, 1), memory, tgt_mask, None, tgt_key_padding_mask, src_key_padding_mask ) output = self.fc(output.transpose(0, 1)[:, -1, :]) next_token = torch.argmax(output, dim=1, keepdim=True) generated = torch.cat([generated, next_token], dim=1) if (next_token == self.eos_token_id).all(): break return generated # 训练函数 def train_model(model, train_loader, optimizer, criterion, epochs=3): model.train() optimizer = optim.Adam(model.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=1, factor=0.5) start_time = time.time() for epoch in range(epochs): total_loss = 0 model.train() for i, batch in enumerate(train_loader): src = batch['input_ids'].to(device) tgt = batch['labels'].to(device) optimizer.zero_grad() outputs = model(src, tgt[:, :-1]) # 检查模型输出有效性 if torch.isnan(outputs).any(): print("警告:模型输出包含NaN,跳过此批次") continue loss = criterion(outputs.reshape(-1, outputs.size(-1)), tgt[:, 1:].reshape(-1)) loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5) # 梯度裁剪 optimizer.step() total_loss += loss.item() if (i+1) % 10 == 0: print(f"Epoch {epoch+1}/{epochs} | Batch {i+1}/{len(train_loader)} | Loss: {loss.item():.4f}") avg_loss = total_loss / len(train_loader) scheduler.step(avg_loss) print(f"Epoch {epoch+1} | 平均损失: {avg_loss:.4f}") torch.cuda.empty_cache() total_time = time.time() - start_time print(f"训练完成!总耗时: {total_time:.2f}s ({total_time/60:.2f}分钟)") return model, total_time # 评估函数 def evaluate_model(model, val_loader, tokenizer, num_examples=2): model.eval() scorer = rouge_scorer.RougeScorer(['rouge1', 'rouge2', 'rougeL'], use_stemmer=True) rouge_scores = {'rouge1': [], 'rouge2': [], 'rougeL': []} valid_count = 0 with torch.no_grad(): for i, batch in enumerate(val_loader): src = batch['input_ids'].to(device) tgt = batch['labels'].to(device) generated = model.generate(src) for s, p, t in zip(src, generated, tgt): src_txt = tokenizer.decode(s, skip_special_tokens=True) pred_txt = tokenizer.decode(p, skip_special_tokens=True) true_txt = tokenizer.decode(t[t != -100], skip_special_tokens=True) if len(pred_txt.split()) > 3 and len(true_txt.split()) > 3: valid_count += 1 if valid_count <= num_examples: print(f"\n原文: {src_txt[:100]}...") print(f"生成: {pred_txt}") print(f"参考: {true_txt[:80]}...") print("-"*60) if true_txt and pred_txt: scores = scorer.score(true_txt, pred_txt) for key in rouge_scores: rouge_scores[key].append(scores[key].fmeasure) if valid_count > 0: avg_scores = {key: sum(rouge_scores[key])/len(rouge_scores[key]) for key in rouge_scores} print(f"\n评估结果 (基于{valid_count}个样本):") print(f"ROUGE-1: {avg_scores['rouge1']*100:.2f}%") print(f"ROUGE-2: {avg_scores['rouge2']*100:.2f}%") print(f"ROUGE-L: {avg_scores['rougeL']*100:.2f}%") else: print("警告:未生成有效摘要") avg_scores = {key: 0.0 for key in rouge_scores} return avg_scores # 可视化模型性能 def visualize_model_performance(model_names, train_times, rouge_scores): plt.figure(figsize=(15, 6)) # 训练时间对比图 plt.subplot(1, 2, 1) bars = plt.bar(model_names, train_times) plt.title('模型训练时间对比') plt.ylabel('时间 (分钟)') for bar in bars: height = bar.get_height() plt.text(bar.get_x() + bar.get_width()/2., height, f'{height:.1f} min', ha='center', va='bottom') # ROUGE分数对比图 plt.subplot(1, 2, 2) x = np.arange(len(model_names)) width = 0.25 plt.bar(x - width, [scores['rouge1'] for scores in rouge_scores], width, label='ROUGE-1') plt.bar(x, [scores['rouge2'] for scores in rouge_scores], width, label='ROUGE-2') plt.bar(x + width, [scores['rougeL'] for scores in rouge_scores], width, label='ROUGE-L') plt.title('模型ROUGE分数对比') plt.ylabel('F1分数') plt.xticks(x, model_names) plt.legend() plt.tight_layout() plt.savefig('performance_comparison.png') plt.show() print("性能对比图已保存为 performance_comparison.png") # 交互式文本摘要生成 def interactive_summarization(models, tokenizer, model_names, max_length=80): while True: print("\n" + "="*60) print("文本摘要交互式测试 (输入 'q' 退出)") print("="*60) input_text = input("请输入要摘要的文本:\n") if input_text.lower() == 'q': break if len(input_text) < 50: print("请输入更长的文本(至少50个字符)") continue # 生成摘要 inputs = tokenizer( input_text, max_length=384, truncation=True, padding='max_length', return_tensors='pt' ).to(device) print("\n生成摘要中...") all_summaries = [] for i, model in enumerate(models): model.eval() with torch.no_grad(): generated = model.generate(inputs["input_ids"]) summary = tokenizer.decode(generated[0], skip_special_tokens=True) all_summaries.append(summary) # 打印结果 print(f"\n{model_names[i]} 摘要:") print("-"*50) print(summary) print("-"*50) print("\n所有模型摘要对比:") for i, (name, summary) in enumerate(zip(model_names, all_summaries)): print(f"{i+1}. {name}: {summary}") # 主程序 print("加载数据集...") dataset = load_dataset("cnn_dailymail", "3.0.0") tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') vocab_size = len(tokenizer.vocab) # 准备训练数据 print("准备训练数据...") train_ds = SummaryDataset(dataset['train'], tokenizer, subset_size=0.01) # 使用1%的数据 val_ds = SummaryDataset(dataset['validation'], tokenizer, subset_size=0.01) train_loader = DataLoader(train_ds, batch_size=4, shuffle=True, num_workers=0) val_loader = DataLoader(val_ds, batch_size=8, shuffle=False, num_workers=0) # 定义损失函数 criterion = nn.CrossEntropyLoss(ignore_index=-100) # 训练基础Seq2Seq print("\n" + "="*60) print("训练基础Seq2Seq模型") print("="*60) basic_model = BasicSeq2Seq(vocab_size).to(device) trained_basic, basic_time = train_model(basic_model, train_loader, None, criterion, epochs=3) basic_rouge = evaluate_model(trained_basic, val_loader, tokenizer) # 训练注力Seq2Seq print("\n" + "="*60) print("训练注力Seq2Seq模型") print("="*60) attn_model = AttnSeq2Seq(vocab_size).to(device) trained_attn, attn_time = train_model(attn_model, train_loader, None, criterion, epochs=3) attn_rouge = evaluate_model(trained_attn, val_loader, tokenizer) # 训练Transformer print("\n" + "="*60) print("训练Transformer模型") print("="*60) transformer_model = TransformerModel(vocab_size).to(device) trained_transformer, transformer_time = train_model(transformer_model, train_loader, None, criterion, epochs=3) transformer_rouge = evaluate_model(trained_transformer, val_loader, tokenizer) # 可视化模型性能 print("\n" + "="*60) print("模型性能对比") print("="*60) model_names = ['基础Seq2Seq', '注力Seq2Seq', 'Transformer'] train_times = [basic_time/60, attn_time/60, transformer_time/60] rouge_scores = [basic_rouge, attn_rouge, transformer_rouge] visualize_model_performance(model_names, train_times, rouge_scores) # 交互式测试 print("\n" + "="*60) print("交互式文本摘要测试") print("="*60) print("提示:输入一段文本,将同时生成三个模型的摘要结果") interactive_summarization( [trained_basic, trained_attn, trained_transformer], tokenizer, model_names ) 修改完错误后发完整代码给我
06-09
import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import TensorDataset, DataLoader from sklearn.preprocessing import MinMaxScaler from vmdpy import VMD from sklearn.cluster import OPTICS import math import matplotlib.pyplot as plt from torch.cuda import amp import time import logging # 设置日志 logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) # -------------------- 配置参数 -------------------- CONFIG = { 'seq_length': 12, # 输入序列长度 'batch_size': 256, # 批大小 'epochs': 150, # 训练轮数 'd_model': 64, # 模型维度 'nhead': 4, # 注力头数 'num_layers': 2, # Transformer层数 'dim_feedforward': 128, # 前馈网络维度 'dropout': 0.1, # Dropout率 'vmd_k': 8, # VMD分解模态数 'vmd_alpha': 2000, # VMD带宽约束 'vmd_tau': 0.1, # VMD噪声容忍度 'sru_hidden': 32, # SRU隐藏单元数 'sru_layers': 2, # SRU层数 'cluster_min_samples': 3, # 聚类最小样本数 'use_amp': True, # 启用混合精度训练 'device': 'cuda' if torch.cuda.is_available() else 'cpu' } # -------------------- 数据预处理模块 -------------------- def load_data(file_path): """加载并预处理风电数据""" logger.info(f"加载数据: {file_path}") try: data = pd.read_csv(file_path) logger.info(f"数据列: {data.columns.tolist()}") except Exception as e: logger.error(f"加载文件错误: {e}") raise time_col = 'Time' power_col = 'Power' # 检查必要列是否存在 required_cols = [time_col, power_col] missing_cols = [col for col in required_cols if col not in data.columns] if missing_cols: logger.warning(f"警告: 缺少必要列 {missing_cols}") # 如果缺少Time列,创建一个简单的时间索引 if time_col not in data.columns: logger.info("创建简单时间索引") data[time_col] = pd.date_range(start='2023-01-01', periods=len(data), freq='H') # 时间序列处理 data[time_col] = pd.to_datetime(data[time_col]) data = data.set_index(time_col).resample('1h').asfreq() data.index = data.index.fillna(pd.Timestamp.now()) # 功率处理 if power_col not in data.columns: logger.error("错误: 缺少功率列") raise ValueError("CSV文件必须包含功率列") data[power_col] = data[power_col].replace(0, np.nan) data[power_col] = data[power_col].interpolate(method='time') data[power_col] = data[power_col].fillna(0) # 添加风速特征 - 使用 windspeed_10m 和 windspeed_100m if 'windspeed_10m' in data.columns and 'windspeed_100m' in data.columns: logger.info("使用10m和100m风速数据计算有效风速") # 对于NaN值,进行插值 data['windspeed_10m'] = data['windspeed_10m'].interpolate(method='time').fillna(method='bfill') data['windspeed_100m'] = data['windspeed_100m'].interpolate(method='time').fillna(method='bfill') # 计算平均风速作为有效风速 data['EffectiveWind'] = (data['windspeed_10m'] + data['windspeed_100m']) / 2 else: logger.warning("风速数据缺失,生成模拟风速数据") # 基于功率生成合理的模拟风速 max_power = data[power_col].max() # 基本风速曲线:功率越高风速越高(但非线性) base_wind = np.clip(data[power_col] / max_power * 10, 2, 25) # 添加随机波动 random_fluctuation = np.random.normal(0, 1.5, len(data)) data['EffectiveWind'] = base_wind + random_fluctuation # 确保只返回必要的列 result_cols = ['Power'] if 'EffectiveWind' in data.columns: result_cols.append('EffectiveWind') logger.info(f"返回列: {result_cols}") return data[result_cols].values # -------------------- VMD分解模块(替代CEEMDAN)------------------- def vmd_decomposition(data, alpha=2000, tau=0.1, K=8): """执行VMD分解 - 比CEEMDAN更快更高效""" logger.info("开始VMD分解...") start_time = time.time() # 仅对功率数据进行分解 power_data = data[:, 0].flatten() # 执行VMD分解 u, u_hat, omega = VMD(power_data, alpha, tau, K, DC=0, init=1, tol=1e-7) # 保留分解结果和气象特征 imfs = u residual = power_data - np.sum(imfs, axis=0) # 添加残差作为最后一个分量 components = np.vstack([imfs, residual]) # 将气象特征附加到每个分量 components_with_features = [] for i in range(len(components)): comp = components[i] # 为每个分量添加气象特征 # 如果数据只有一列(功率),则复制该列作为特征 if data.shape[1] == 1: comp_with_features = np.column_stack((comp, np.zeros_like(comp))) else: comp_with_features = np.column_stack((comp, data[:, 1])) components_with_features.append(comp_with_features) logger.info(f"VMD分解完成,耗时: {time.time() - start_time:.2f}秒") return components_with_features # -------------------- 多维度特征提取 -------------------- def extract_features(component): """提取序列的多维度特征""" power_series = component[:, 0] # 1. 样本熵 def sample_entropy(series, m=2, alpha=0.2): n = len(series) if n < m + 1: return 0 std = np.std(series) r = alpha * std def _phi(_m): x = np.array([series[i:i + _m] for i in range(n - _m + 1)]) C = 0 for i in range(len(x)): dist = np.max(np.abs(x[i] - x), axis=1) C += np.sum((dist < r) & (dist > 0)) return C / ((n - _m) * (n - _m + 1)) return -np.log(_phi(m + 1) / _phi(m)) if _phi(m) != 0 else 0 # 2. 排列熵 def permutation_entropy(series, d=3, tau=1): n = len(series) if n < d * tau: return 0 # 创建符号序列 permutations = [] for i in range(n - d * tau + 1): segment = series[i:i + d * tau:tau] permutations.append(tuple(np.argsort(segment))) # 计算概率分布 unique, counts = np.unique(permutations, return_counts=True) probs = counts / len(permutations) # 计算熵 return -np.sum(probs * np.log(probs)) # 3. 频域能量 fft_vals = np.abs(np.fft.rfft(power_series)) spectral_energy = np.sum(fft_vals[:len(fft_vals) // 2]) / np.sum(fft_vals) return np.array([ sample_entropy(power_series), permutation_entropy(power_series), spectral_energy ]) # -------------------- 轻量化Transformer模型(高频序列)------------------- class ProbSparseAttention(nn.Module): """概率稀疏注力机制 - 降低计算复杂度""" def __init__(self, d_model, n_heads, factor=5): super().__init__() self.d_model = d_model self.n_heads = n_heads self.factor = factor self.head_dim = d_model // n_heads def forward(self, Q, K, V): batch_size, seq_len, _ = Q.size() # 采样关键点 M = self.factor * int(np.ceil(np.log(seq_len))) sample_indices = torch.randperm(seq_len)[:M] K_sampled = K[:, sample_indices, :] V_sampled = V[:, sample_indices, :] # 计算稀疏注力 Q = Q.view(batch_size, seq_len, self.n_heads, self.head_dim).transpose(1, 2) K_sampled = K_sampled.view(batch_size, M, self.n_heads, self.head_dim).transpose(1, 2) V_sampled = V_sampled.view(batch_size, M, self.n_heads, self.head_dim).transpose(1, 2) attn_scores = torch.matmul(Q, K_sampled.transpose(-2, -1)) / np.sqrt(self.head_dim) attn_weights = F.softmax(attn_scores, dim=-1) output = torch.matmul(attn_weights, V_sampled) output = output.transpose(1, 2).contiguous().view(batch_size, seq_len, self.d_model) return output class PositionalEncoding(nn.Module): def __init__(self, d_model, dropout=0.1, max_len=5000): super().__init__() self.dropout = nn.Dropout(p=dropout) pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:, :x.size(1)] return self.dropout(x) class DistillingLayer(nn.Module): """蒸馏层 - 压缩序列长度""" def __init__(self, d_model): super().__init__() self.conv = nn.Conv1d( in_channels=d_model, out_channels=d_model, kernel_size=3, stride=2, padding=1 ) self.activation = nn.ReLU() def forward(self, x): # x: [batch, seq_len, d_model] x = x.permute(0, 2, 1) # [batch, d_model, seq_len] x = self.conv(x) x = self.activation(x) return x.permute(0, 2, 1) # [batch, new_seq, d_model] class EfficientTransformer(nn.Module): """高效Transformer模型 - 使用概率稀疏注力和蒸馏机制""" def __init__(self, input_dim, d_model=64, nhead=4, num_layers=2, dim_feedforward=128, dropout=0.1): super().__init__() self.d_model = d_model self.embedding = nn.Linear(input_dim, d_model) self.pos_encoder = PositionalEncoding(d_model, dropout) # 编码器层 self.encoder_layers = nn.ModuleList() for i in range(num_layers): self.encoder_layers.append(nn.TransformerEncoderLayer( d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout, batch_first=True )) # 蒸馏层 self.distill_layers = nn.ModuleList([ DistillingLayer(d_model) for _ in range(num_layers - 1) ]) # 解码器 self.decoder = nn.TransformerDecoder( nn.TransformerDecoderLayer( d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout, batch_first=True ), num_layers=1 ) self.output_layer = nn.Linear(d_model, 1) def forward(self, src, tgt=None): # 嵌入和位置编码 src = self.embedding(src) * math.sqrt(self.d_model) src = self.pos_encoder(src) # 编码过程 for i, layer in enumerate(self.encoder_layers): src = layer(src) if i < len(self.distill_layers): src = self.distill_layers[i](src) # 解码过程 if tgt is None: tgt = torch.zeros(src.size(0), src.size(1), self.d_model, device=src.device) else: tgt = self.embedding(tgt) tgt = self.pos_encoder(tgt) output = self.decoder(tgt, src) output = self.output_layer(output) return output[:, -1, :].squeeze(-1) # -------------------- SRU模型(低频序列)------------------- class SRU(nn.Module): """Simple Recurrent Unit - 比GRU更快的替代方案""" def __init__(self, input_size, hidden_size, num_layers=2): super().__init__() self.hidden_size = hidden_size self.num_layers = num_layers # 门控参数 self.gates = nn.ModuleList() for i in range(num_layers): in_dim = input_size if i == 0 else hidden_size self.gates.append(nn.Linear(in_dim + hidden_size, 3 * hidden_size)) def forward(self, x): batch_size, seq_len, _ = x.size() h = torch.zeros(self.num_layers, batch_size, self.hidden_size, device=x.device) outputs = [] for t in range(seq_len): input_t = x[:, t, :] new_h = [] for i in range(self.num_layers): # 当前层的输入 layer_input = input_t if i == 0 else new_h[i - 1] # 与前一隐藏状态连接 combined = torch.cat((layer_input, h[i]), dim=1) # 计算门控 gates = self.gates[i](combined) f, r, c = torch.chunk(gates, 3, dim=1) f = torch.sigmoid(f) r = torch.sigmoid(r) c = torch.tanh(c) # 更新隐藏状态 h_i = f * h[i] + (1 - f) * c output = r * h_i new_h.append(h_i) input_t = output outputs.append(output) h = torch.stack(new_h, dim=0) return torch.stack(outputs, dim=1) class SRUAttention(nn.Module): """带注力机制的SRU模型""" def __init__(self, input_dim, hidden_size=32, num_layers=2): super().__init__() self.sru = SRU(input_dim, hidden_size, num_layers) self.attention = nn.Sequential( nn.Linear(hidden_size, hidden_size), nn.Tanh(), nn.Linear(hidden_size, 1), nn.Softmax(dim=1) ) self.fc = nn.Linear(hidden_size, 1) def forward(self, x): # SRU输出: [batch, seq_len, hidden_size] sru_out = self.sru(x) # 注力权重 attn_weights = self.attention(sru_out) # 上下文向量 context = torch.sum(attn_weights * sru_out, dim=1) return self.fc(context).squeeze(-1) # -------------------- 模型路由网络 -------------------- class RoutingNetwork(nn.Module): """动态模型路由网络 - 根据序列特征选择模型""" def __init__(self, transformer, sru_model): super().__init__() self.transformer = transformer self.sru_model = sru_model self.router = nn.Sequential( nn.Linear(3, 16), # 输入特征数 nn.ReLU(), nn.Linear(16, 2), nn.Softmax(dim=1) ) def forward(self, x, features): # 特征: [样本熵, 排列熵, 频域能量] route_probs = self.router(features) # 使用两个模型进行预测 trans_pred = self.transformer(x) sru_pred = self.sru_model(x) # 加权组合 return (route_probs[:, 0] * trans_pred + route_probs[:, 1] * sru_pred) # -------------------- 主流程(含动态路由策略)------------------- if __name__ == "__main__": try: # 设置随机种子确保可重复性 torch.manual_seed(42) np.random.seed(42) # 数据加载与预处理 file_path = 'G:/shuju/Location1.csv' raw_data = load_data(file_path) # 打印前5行数据 logger.info(f"数据形状: {raw_data.shape}") logger.info(f"前5行数据:\n{raw_data[:5]}") # 数据标准化 scalers = [] scaled_data = np.zeros_like(raw_data) for i in range(raw_data.shape[1]): scaler = MinMaxScaler(feature_range=(0, 1)) scaled_data[:, i] = scaler.fit_transform(raw_data[:, i].reshape(-1, 1)).flatten() scalers.append(scaler) # VMD分解(替代CEEMDAN) components = vmd_decomposition(scaled_data, alpha=CONFIG['vmd_alpha'], tau=CONFIG['vmd_tau'], K=CONFIG['vmd_k']) # 提取特征并聚类分组 logger.info("提取特征并聚类分组...") features = np.array([extract_features(comp) for comp in components]) # OPTICS聚类 clusterer = OPTICS(min_samples=CONFIG['cluster_min_samples'], xi=0.05) labels = clusterer.fit_predict(features) # 按聚类结果分组 grouped_components = {} for i, label in enumerate(labels): if label not in grouped_components: grouped_components[label] = [] grouped_components[label].append(components[i][:, 0]) # 只取功率部分 # 重构序列 reconstructed_series = [] for label, comp_list in grouped_components.items(): if len(comp_list) > 1: reconstructed_series.append(np.sum(comp_list, axis=0)) else: reconstructed_series.append(comp_list[0]) logger.info(f"重构为 {len(reconstructed_series)} 个序列") # 转换为监督学习格式 def create_sequences(data, seq_length): X, y = [], [] for i in range(len(data) - seq_length): X.append(data[i:i + seq_length]) y.append(data[i + seq_length]) return np.array(X), np.array(y) # 创建数据集 datasets = [] for series in reconstructed_series: X, y = create_sequences(series, CONFIG['seq_length']) datasets.append((X, y)) # 模型初始化 transformer = EfficientTransformer(input_dim=1, d_model=CONFIG['d_model'], nhead=CONFIG['nhead'], num_layers=CONFIG['num_layers'], dim_feedforward=CONFIG['dim_feedforward'], dropout=CONFIG['dropout']).to(CONFIG['device']) sru_model = SRUAttention(input_dim=1, hidden_size=CONFIG['sru_hidden'], num_layers=CONFIG['sru_layers']).to(CONFIG['device']) routing_net = RoutingNetwork(transformer, sru_model).to(CONFIG['device']) criterion = nn.MSELoss() optimizer = torch.optim.Adam(routing_net.parameters(), lr=0.001) # 混合精度训练的梯度缩放器 scaler = amp.GradScaler(enabled=CONFIG['use_amp']) # 训练循环 logger.info("开始训练...") start_time = time.time() # 由于我们有多个序列,需要合并训练数据 all_X, all_y, all_features = [], [], [] for i, (X, y) in enumerate(datasets): # 为每个序列提取特征 seq_features = features[i] all_X.append(X) all_y.append(y) # 为每个样本复制特征 all_features.extend([seq_features] * len(X)) all_X = np.concatenate(all_X) all_y = np.concatenate(all_y) all_features = np.array(all_features) # 按时间顺序划分数据集 split_index = int(len(all_X) * 0.9) train_dataset = TensorDataset( torch.tensor(all_X[:split_index], dtype=torch.float32), torch.tensor(all_y[:split_index], dtype=torch.float32), torch.tensor(all_features[:split_index], dtype=torch.float32) ) test_dataset = TensorDataset( torch.tensor(all_X[split_index:], dtype=torch.float32), torch.tensor(all_y[split_index:], dtype=torch.float32), torch.tensor(all_features[split_index:], dtype=torch.float32) ) # 创建DataLoader train_loader = DataLoader(train_dataset, batch_size=CONFIG['batch_size'], shuffle=True) test_loader = DataLoader(test_dataset, batch_size=CONFIG['batch_size']) # 训练循环 for epoch in range(CONFIG['epochs']): routing_net.train() epoch_loss = 0 for inputs, targets, feat in train_loader: inputs, targets, feat = inputs.to(CONFIG['device']), targets.to(CONFIG['device']), feat.to( CONFIG['device']) optimizer.zero_grad() # 混合精度训练 with amp.autocast(enabled=CONFIG['use_amp']): outputs = routing_net(inputs.unsqueeze(-1), feat) loss = criterion(outputs, targets) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() epoch_loss += loss.item() avg_loss = epoch_loss / len(train_loader) logger.info(f"Epoch {epoch + 1}/{CONFIG['epochs']}, Loss: {avg_loss:.6f}") logger.info(f"训练完成,总耗时: {time.time() - start_time:.2f}秒") # 预测与评估 def inverse_transform(predictions, feature_idx=0): return scalers[feature_idx].inverse_transform(predictions.reshape(-1, 1)) routing_net.eval() all_preds, all_targets = [], [] with torch.no_grad(): for inputs, targets, feat in test_loader: inputs, targets, feat = inputs.to(CONFIG['device']), targets.to(CONFIG['device']), feat.to( CONFIG['device']) outputs = routing_net(inputs.unsqueeze(-1), feat) all_preds.append(outputs.cpu().numpy()) all_targets.append(targets.cpu().numpy()) all_preds = np.concatenate(all_preds) all_targets = np.concatenate(all_targets) # 反归一化 final_pred = inverse_transform(all_preds) y_true = inverse_transform(all_targets) # 评估指标 from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score rmse = np.sqrt(mean_squared_error(y_true, final_pred)) mae = mean_absolute_error(y_true, final_pred) r2 = r2_score(y_true, final_pred) logger.info(f"最终评估 - RMSE: {rmse:.4f}, MAE: {mae:.4f}, R&sup2;: {r2:.4f}") # 绘制预测结果与真实值的对比图 plt.figure(figsize=(15, 6)) plt.plot(y_true[:500], label='True', linewidth=2) plt.plot(final_pred[:500], label='Predicted', linestyle='--') plt.title(f'Wind Power Prediction\nRMSE: {rmse:.2f}, MAE: {mae:.2f}, R&sup2;: {r2:.4f}') plt.xlabel('Time Steps') plt.ylabel('Power (MW)') plt.legend() plt.grid(True) plt.savefig('optimized_prediction_comparison.png', dpi=300) plt.show() # 保存模型 torch.save(routing_net.state_dict(), 'optimized_wind_power_model.pth') logger.info("模型已保存") except Exception as e: logger.error(f"程序出错: {str(e)}") import traceback logger.error(traceback.format_exc())阅读该代码生成流程图,并确保流程图美观,简洁符合代码
08-26
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值