transformer的Trainer中加入compute_metrics

from transformers import DistilBertForSequenceClassification, Trainer, TrainingArguments

def compute_metrics(pred):
    labels = pred.label_ids
    preds = pred.predictions.argmax(-1)
    precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='binary')
    acc = accuracy_score(labels, preds)
    return {
        'accuracy': acc,
        'f1': f1,
        'precision': precision,
        'recall': recall
    }


# 加入evaluation_strategy
training_args = TrainingArguments(
    output_dir='./classification_results',          # output directory
    num_train_epochs=5,              # total number of training epochs
    per_device_train_batch_size=16,  # batch size per device during training
    per_device_eval_batch_size=64,   # batch size for evaluation
    warmup_steps=5000,                # number of warmup steps for learning rate scheduler
    weight_decay=0.01,               # strength of weight decay
    logging_dir='./classification_logs',            # directory for storing logs
    evaluation_strategy='steps',      # "no": No evaluation is done during training.
                                      # "steps": Evaluation is done (and logged) every steps
                                      # "epoch": Evaluation is done at the end of each epoch.
    logging_steps=100,
)

model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")

# 加入compute_metrics,并定义compute_metrics函数
trainer = Trainer(
    model=model,                         # the instantiated 🤗 Transformers model to be trained
    args=training_args,                  # training arguments, defined above
    train_dataset=train_dataset,         # training dataset
    eval_dataset=val_dataset,             # evaluation dataset
    compute_metrics=compute_metrics
)

trainer.train()
trainer.evaluate()

from transformers import BertTokenizer, BertForSequenceClassification, TrainingArguments, Trainer from datasets import load_from_disk import numpy as np from sklearn.metrics import accuracy_score def load_processed_data(): dataset = load_from_disk("../data/processed_imdb") return dataset['train'], dataset['test'] # 定义预处理函数 def preprocess_function(examples): return tokenizer( examples['text'], truncation=True, padding='max_length', max_length=64 ) # 定义评估指标 def compute_metrics(p): preds = np.argmax(p.predictions, axis=1) return {'accuracy': accuracy_score(p.label_ids, preds)} if __name__ == '__main__': train_data, test_data = load_processed_data() # 加载BERT的分词器 tokenizer = BertTokenizer.from_pretrained("C:/Users/Administrator/bert_cache") # 应用预处理 train_data = train_data.map(preprocess_function, batched=True) test_data = test_data.map(preprocess_function, batched=True) # 重命名标签列(适配HuggingFace格式) train_data = train_data.rename_column('label', 'labels') test_data = test_data.rename_column('label', 'labels') # 设置pytorch格式 train_data.set_format('torch', columns=['input_ids', 'attention_mask', 'labels']) test_data.set_format('torch', columns=['input_ids', 'attention_mask', 'labels']) # 加载预训练模型 model = BertForSequenceClassification.from_pretrained( "C:/Users/Administrator/bert_cache", num_labels=2 ) # 冻结所有BERT基础层参数(Embedding+Transformer) for param in model.bert.parameters(): param.requires_grad = False # ⭐ 关闭梯度计算 # 仅保留分类头可训练(最后一层) model.classifier.requires_grad = True training_args = TrainingArguments( output_dir='./results', # 输出目录 num_train_epochs=1, # 训练轮次 per_device_train_batch_size=4, # 训练批次大小 gradient_accumulation_steps=1, # 等效批次32 per_device_eval_batch_size=16, # 评估批次大小 warmup_steps=500, # 学习率预热步数 weight_decay=0.01, # 权重衰减 logging_dir='./logs', # 日志目录 logging_steps=100, # 减少日志频率 eval_strategy='epoch', # 每轮评估一次 save_strategy='epoch', gradient_checkpointing=False, # 以时间换空间 optim="adamw_torch", # 比AdamW节省30%显存 ) # 创建Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_data, eval_dataset=test_data, # 实际应用时应使用验证集 compute_metrics=compute_metrics ) # 开始训练! trainer.train() # 保存模型 model.save_pretrained('./my_bert_model') tokenizer.save_pretrained('./my_bert_model') # 加载模型 # from_pretrained_model = BertForSequenceClassification.from_pretrained('./my_bert_model') 现在代码是这样的,CPU版本,只能使用CPU不能使用GPU,请告诉我在只改变代码的情况下怎么能让训练速度提高
04-04
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值