常规函数模块CALL in new task 报错

 使用START NEW TASK, 函数需要是远程调用模块。

 

错误:FUNCTION module  ' ZMMFM0021'  cannot be used for 'remote' CALLS.

 CALL FUNCTION 'ZMMFM0201' STARTING NEW TASK pv_zproid
    TABLES
      it_item = lt_item.
View Code

 

 

转载于:https://www.cnblogs.com/rainysblog/p/11057859.html

import json import torch from typing import Dict, List from torch.utils.data import Dataset import transformers from peft import LoraConfig, TaskType, get_peft_model from torch.utils.data import DataLoader, SequentialSampler from transformers import Trainer, TrainingArguments from lora_plus import LoraPlusTrainer from torch.utils.data import RandomSampler from swanlab.integration.transformers import SwanLabCallback import swanlab import numpy as np import pandas as pd import re from typing import Dict, List import torch from tqdm import tqdm from transformers import PreTrainedTokenizer from transformers import AutoTokenizer import torch.nn as nn from lora_plus import LoraPlusTrainer # 确保已安装lora_plus库 from transformers import PreTrainedModel # 初始化SwanLab swanlab.init("Finetune-Llama3.2-with-Encoder") swanlab_callback = SwanLabCallback( project="Finetune-Llama3.2-with-Encoder", experiment_name="Finetune-Llama3.2-with-Encoder" ) # 常量定义 CHEM_FORMULA_SIZE = r"([A-Z][a-z]*)([0-9]*)" VALID_ELEMENTS = ["C", "N", "P", "O", "S", "Si", "I", "H", "Cl", "F", "Br", "B", "Se", "Fe", "Co", "As", "K", "Na"] element_to_idx = {elem: idx for idx, elem in enumerate(VALID_ELEMENTS)} # 化学式转密集向量 def formula_to_dense(chem_formula: str) -> torch.Tensor: dense_vec = torch.zeros(len(VALID_ELEMENTS), dtype=torch.float32) matches = re.findall(CHEM_FORMULA_SIZE, chem_formula) for chem_symbol, num_str in matches: num = 1 if num_str == "" else int(num_str) if chem_symbol in element_to_idx: idx = element_to_idx[chem_symbol] dense_vec[idx] += num return dense_vec # 位置编码生成 (PyTorch实现) def positional_encoding(max_position: int, d_model: int, min_freq: float = 1e-4) -> torch.Tensor: position = torch.arange(max_position).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * (-torch.log(torch.tensor(min_freq)) / d_model)) pos_enc = torch.zeros(max_position, d_model) pos_enc[:, 0::2] = torch.sin(position * div_term) pos_enc[:, 1::2] = torch.cos(position * div_term) return pos_enc # 初始化位置编码矩阵 P = positional_encoding(2000000, 254) dimn = 254 # 与位置编码维度一致 # 质谱数据编码 def encode_spectra(rag_tensor: list, P: torch.Tensor, dimn: int) -> torch.Tensor: encoded_list = [] for sample in rag_tensor: mz_list, intensity_list = sample # 创建基础特征矩阵 [m/z, intensity] base_features = torch.tensor([mz_list, intensity_list], dtype=torch.float32).T # 添加位置编码特征 pos_enc = torch.stack([P[min(int(mz), P.size(0)-1)] for mz in mz_list]) # 组合所有特征 [m/z, intensity, pos_enc...] features = torch.cat([base_features, pos_enc], dim=1) # 填充/截断到固定长度 if features.size(0) < 501: padding = torch.zeros(501 - features.size(0), features.size(1)) features = torch.cat([features, padding], dim=0) else: features = features[:501] encoded_list.append(features) return torch.stack(encoded_list) # 质谱数据预处理 def preprocess_spectra(df: pd.DataFrame) -> list: spectra_list = [] for idx, row in tqdm(df.iterrows(), total=len(df)): spectrum_str = row['Spectrum'] total_mass = row['Total Exact Mass'] # 解析质谱字符串 pairs = spectrum_str.split() mz_list, intensity_list = [], [] for pair in pairs: mz, intensity = pair.split(':') mz_list.append(float(mz)) intensity_list.append(float(intensity)) # 添加总精确质量 mz_list.append(total_mass) intensity_list.append(0.0) # 四舍五入处理 mz_list = [round(mz, 2) for mz in mz_list] intensity_list = [round(intensity, 2) for intensity in intensity_list] spectra_list.append([mz_list, intensity_list]) return spectra_list class MolecularDataset(Dataset): def __init__(self, csv_path: str, tokenizer: AutoTokenizer, max_seq_len: int = 512): self.df = pd.read_csv(csv_path) self.tokenizer = tokenizer self.max_seq_len = max_seq_len self.pad_token_id = tokenizer.pad_token_id # 预处理质谱数据 spectra_data = preprocess_spectra(self.df) self.spec_encoded = encode_spectra(spectra_data, P, dimn) def __len__(self): return len(self.df) def __getitem__(self, idx) -> dict: # 分子式向量和质谱矩阵(保持不变) formula = self.df.iloc[idx]['Molecular Formula'] formula_vec = formula_to_dense(formula).unsqueeze(0) spec_matrix = self.spec_encoded[idx] # SELFIES目标序列 selfies_str = self.df.iloc[idx]['SELFIES'] encoding = self.tokenizer( selfies_str, add_special_tokens=True, # 包含<s>和</s> padding='max_length', truncation=True, max_length=self.max_seq_len, return_tensors='pt' ) # 输入序列仅包含开始符号 input_ids = encoding['input_ids'].squeeze(0) attention_mask = encoding['attention_mask'].squeeze(0) # 标签为完整的目标序列(替换padding为-100) labels = input_ids.clone() labels[labels == self.pad_token_id] = -100 return { 'encoder1_inputs': formula_vec, 'encoder2_inputs': spec_matrix, 'input_ids': input_ids, 'attention_mask': attention_mask, 'labels': labels } # 加载tokenizer tokenizer = AutoTokenizer.from_pretrained('/root/workspace/d21lv5s7v38s73b4ddlg/checkpoint-2500') # 创建数据集 dataset = MolecularDataset('/root/workspace/d21lv5s7v38s73b4ddlg/SELFIES-SFT.csv', tokenizer) def custom_collator(features: List[Dict]) -> Dict: batch = { 'encoder1_inputs': torch.stack([f['encoder1_inputs'] for f in features]), # 形状:(batch_size, 1, 18) 'encoder2_inputs': torch.stack([f['encoder2_inputs'] for f in features]), # 形状:(batch_size, 501, 258) 'input_ids': torch.stack([f['input_ids'] for f in features]), 'attention_mask': torch.stack([f['attention_mask'] for f in features]), 'labels': torch.stack([f['labels'] for f in features]), } return batch class LlamaWithEncoder(PreTrainedModel): def __init__(self, base_model, encoder1_dim=18, encoder2_dim=256, hidden_dim=512): # 添加config属性,这是PreTrainedModel要求的 self.config = base_model.config super().__init__(self.config) # 将基础模型赋值给model属性,PEFT库会寻找这个属性 self.model = base_model # 关键修改:添加model属性 # 第一个Transformer Encoder encoder1_layer = nn.TransformerEncoderLayer( d_model=encoder1_dim, nhead=3, # 18能被3整除 dim_feedforward=hidden_dim, batch_first=True ) self.encoder1 = nn.TransformerEncoder(encoder1_layer, num_layers=2) # 第二个Transformer Encoder(适配256维输入) encoder2_layer = nn.TransformerEncoderLayer( d_model=encoder2_dim, nhead=8, # 256能被8整除 dim_feedforward=hidden_dim, batch_first=True ) self.encoder2 = nn.TransformerEncoder(encoder2_layer, num_layers=2) # 投影层 self.proj1 = nn.Linear(encoder1_dim, base_model.config.hidden_size) self.proj2 = nn.Linear(encoder2_dim, base_model.config.hidden_size) # 融合层 self.fusion = nn.Linear(2 * base_model.config.hidden_size, base_model.config.hidden_size) # 添加这个方法,帮助PEFT正确识别基础模型 def get_base_model(self): return self.model # 重写这个方法,确保模块结构正确 def _get_name(self): return "LlamaWithEncoder" def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): return self.base_model.prepare_inputs_for_generation( input_ids, past_key_values=past_key_values,** kwargs ) def forward( self, input_ids=None, # 仅用于接收输入,但调用基础模型时不传递 attention_mask=None, encoder1_inputs=None, encoder2_inputs=None, labels=None, past_key_values=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs # 可能包含冗余参数,需过滤 ): # 处理编码器输入 enc1_out = self.encoder1(encoder1_inputs) # (batch_size, 1, 18) enc1_out = enc1_out.mean(dim=1) # (batch_size, 18) enc1_proj = self.proj1(enc1_out) # (batch_size, hidden_size) enc2_out = self.encoder2(encoder2_inputs) # (batch_size, 501, 256) enc2_out = enc2_out.mean(dim=1) # (batch_size, 256) enc2_proj = self.proj2(enc2_out) # (batch_size, hidden_size) # 融合编码器输出 fused = self.fusion(torch.cat([enc1_proj, enc2_proj], dim=1)) # (batch_size, hidden_size) fused = fused.unsqueeze(1) # (batch_size, 1, hidden_size) # 获取嵌入层输出(基于输入的input_ids,但后续会被融合结果修改) embeddings = self.base_model.get_input_embeddings()(input_ids) # (batch_size, seq_len, hidden_size) # 将融合结果与第一个token的嵌入结合 if embeddings.size(1) > 0: embeddings[:, 0, :] = (embeddings[:, 0, :] + fused[:, 0, :]) / 2 # 调用基础模型时,仅传递必要参数,排除input_ids(与inputs_embeds互斥) return self.base_model( inputs_embeds=embeddings, # 传递修改后的嵌入,替代input_ids attention_mask=attention_mask, labels=labels, past_key_values=past_key_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, # 移除**kwargs,避免传递冗余参数(如input_ids)导致冲突 ) # 加载预训练模型 base_model = transformers.AutoModelForCausalLM.from_pretrained( "/root/workspace/d21lv5s7v38s73b4ddlg/checkpoint-2500", trust_remote_code=True, torch_dtype=torch.bfloat16, ) model = LlamaWithEncoder(base_model) lora_config = LoraConfig( r=8, lora_alpha=16, target_modules="all-linear", # 目标注意力层 lora_dropout=0.0, bias="none", task_type="CAUSAL_LM" ) model = get_peft_model(model, lora_config) model.print_trainable_parameters() # 输出示例:0.3% 参数可训练 training_args = TrainingArguments( output_dir="./llama3.2-SELFIES-SFT", per_device_train_batch_size=24, gradient_accumulation_steps=24, num_train_epochs=1, learning_rate=5.0e-05, optim="adamw_torch", logging_steps=10, bf16=True, save_strategy="steps", lr_scheduler_type='cosine', max_grad_norm=1.0, save_steps=2000, warmup_steps=0 ) class CustomTrainer(LoraPlusTrainer): def get_train_dataloader(self) -> DataLoader: """ Returns the training dataloader using a random sampler to shuffle the dataset. """ return DataLoader( self.train_dataset, batch_size=self.args.train_batch_size, shuffle=True, collate_fn=self.data_collator, drop_last=False, ) # 使用修改后的 CustomTrainer lp_trainer = CustomTrainer( model, training_args, train_dataset=dataset, tokenizer=tokenizer, data_collator=custom_collator, callbacks=[swanlab_callback], ) lp_trainer.train() lp_trainer.save_model(output_dir='./llama3.2-SELFIES-SFT') # 合并LoRA权重 model = model.merge_and_unload() # 保存整个模型(包括自定义编码器和融合层)为safetensors格式 save_directory = './llama3.2-SELFIES' model.save_pretrained(save_directory, safe_serialization=True) # 同时保存tokenizer tokenizer.save_pretrained(save_directory)解决报错 File "/root/workspace/d21lv5s7v38s73b4ddlg/encoder-sft.py", line 338, in <module> lp_trainer.train() File "/opt/conda/lib/python3.10/site-packages/transformers/trainer.py", line 2241, in train return inner_training_loop( File "/opt/conda/lib/python3.10/site-packages/transformers/trainer.py", line 2548, in _inner_training_loop tr_loss_step = self.training_step(model, inputs, num_items_in_batch) File "/opt/conda/lib/python3.10/site-packages/transformers/trainer.py", line 3698, in training_step loss = self.compute_loss(model, inputs, num_items_in_batch=num_items_in_batch) File "/opt/conda/lib/python3.10/site-packages/transformers/trainer.py", line 3759, in compute_loss outputs = model(**inputs) File "/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "/opt/conda/lib/python3.10/site-packages/deepspeed/utils/nvtx.py", line 20, in wrapped_fn ret_val = func(*args, **kwargs) File "/opt/conda/lib/python3.10/site-packages/deepspeed/runtime/engine.py", line 2054, in forward loss = self.module(*inputs, **kwargs) File "/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1568, in _call_impl result = forward_call(*args, **kwargs) File "/opt/conda/lib/python3.10/site-packages/peft/peft_model.py", line 1845, in forward return self.base_model( File "/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "/opt/conda/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "/opt/conda/lib/python3.10/site-packages/peft/tuners/tuners_utils.py", line 216, in forward return self.model.forward(*args, **kwargs) File "/root/workspace/d21lv5s7v38s73b4ddlg/encoder-sft.py", line 260, in forward embeddings = self.base_model.get_input_embeddings()(input_ids) # (batch_size, seq_len, hidden_size) File "/opt/conda/lib/python3.10/site-packages/transformers/modeling_utils.py", line 1865, in get_input_embeddings raise NotImplementedError
最新发布
07-27
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值