XLNet做文本分类

本文展示了如何在Python中使用HuggingFace的Transformers库加载预训练的XLNet模型进行文本情感分类。代码包括数据预处理、模型训练和测试过程,以及如何在GPU或CPU上运行模型。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

import torch
from transformers import XLNetTokenizer, XLNetForSequenceClassification
from torch.utils.data import DataLoader, TensorDataset

# 示例文本数据
texts = ["This is a positive example.", "This is a negative example.", "Another positive example."]

# 示例标签
labels = [1, 0, 1]  # 1表示正例,0表示负例

# 加载XLNet模型和分词器
model_name = "xlnet-base-cased"
tokenizer = XLNetTokenizer.from_pretrained(model_name)
model = XLNetForSequenceClassification.from_pretrained(model_name)

# 分词并编码文本
tokenized_texts = tokenizer(texts, padding=True, truncation=True, return_tensors='pt')

# 将标签转换为PyTorch张量
labels = torch.tensor(labels)

# 创建数据集
dataset = TensorDataset(tokenized_texts['input_ids'], tokenized_texts['attention_mask'], labels)

# 创建数据加载器
batch_size = 2
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# 设置训练设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# 定义优化器和损失函数
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5)
criterion = torch.nn.CrossEntropyLoss()

# 训练模型
epochs = 3
for epoch in range(epochs):
    for input_ids, attention_mask, labels in dataloader:
        input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
        
        optimizer.zero_grad()
        outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
        loss = outputs.loss
        loss.backward()
        optimizer.step()

# 测试模型
model.eval()
with torch.no_grad():
    test_texts = ["This is a test sentence.", "Another test sentence."]
    tokenized_test_texts = tokenizer(test_texts, padding=True, truncation=True, return_tensors='pt')
    input_ids = tokenized_test_texts['input_ids'].to(device)
    attention_mask = tokenized_test_texts['attention_mask'].to(device)
    
    outputs = model(input_ids, attention_mask=attention_mask)
    logits = outputs.logits
    predictions = torch.argmax(logits, dim=1)
    print("Predictions:", predictions.tolist())
 

import torch
from transformers import XLNetTokenizer, XLNetForSequenceClassification
from torch.utils.data import DataLoader, TensorDataset

# 示例文本数据
texts = ["This is a positive example.", "This is a negative example.", "Another positive example."]

# 示例标签
labels = [1, 0, 1]  # 1表示正例,0表示负例

# 加载XLNet模型和分词器
model_name = "xlnet-base-cased"
tokenizer = XLNetTokenizer.from_pretrained(model_name)
model = XLNetForSequenceClassification.from_pretrained(model_name)

# 分词并编码文本
tokenized_texts = tokenizer(texts, padding=True, truncation=True, return_tensors='pt')

# 将标签转换为PyTorch张量
labels = torch.tensor(labels)

# 创建数据集
dataset = TensorDataset(tokenized_texts['input_ids'], tokenized_texts['attention_mask'], labels)

# 创建数据加载器
batch_size = 2
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# 设置训练设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# 定义优化器和损失函数
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5)
criterion = torch.nn.CrossEntropyLoss()

# 训练模型
epochs = 3
for epoch in range(epochs):
    for input_ids, attention_mask, labels in dataloader:
        input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
        
        optimizer.zero_grad()
        outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
        loss = outputs.loss
        loss.backward()
        optimizer.step()

# 测试模型
model.eval()
with torch.no_grad():
    test_texts = ["This is a test sentence.", "Another test sentence."]
    tokenized_test_texts = tokenizer(test_texts, padding=True, truncation=True, return_tensors='pt')
    input_ids = tokenized_test_texts['input_ids'].to(device)
    attention_mask = tokenized_test_texts['attention_mask'].to(device)
    
    outputs = model(input_ids, attention_mask=attention_mask)
    logits = outputs.logits
    predictions = torch.argmax(logits, dim=1)
    print("Predictions:", predictions.tolist())

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值