import torch
from transformers import XLNetTokenizer, XLNetForSequenceClassification
from torch.utils.data import DataLoader, TensorDataset
# 示例文本数据
texts = ["This is a positive example.", "This is a negative example.", "Another positive example."]
# 示例标签
labels = [1, 0, 1] # 1表示正例,0表示负例
# 加载XLNet模型和分词器
model_name = "xlnet-base-cased"
tokenizer = XLNetTokenizer.from_pretrained(model_name)
model = XLNetForSequenceClassification.from_pretrained(model_name)
# 分词并编码文本
tokenized_texts = tokenizer(texts, padding=True, truncation=True, return_tensors='pt')
# 将标签转换为PyTorch张量
labels = torch.tensor(labels)
# 创建数据集
dataset = TensorDataset(tokenized_texts['input_ids'], tokenized_texts['attention_mask'], labels)
# 创建数据加载器
batch_size = 2
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# 设置训练设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# 定义优化器和损失函数
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5)
criterion = torch.nn.CrossEntropyLoss()
# 训练模型
epochs = 3
for epoch in range(epochs):
for input_ids, attention_mask, labels in dataloader:
input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
loss = outputs.loss
loss.backward()
optimizer.step()
# 测试模型
model.eval()
with torch.no_grad():
test_texts = ["This is a test sentence.", "Another test sentence."]
tokenized_test_texts = tokenizer(test_texts, padding=True, truncation=True, return_tensors='pt')
input_ids = tokenized_test_texts['input_ids'].to(device)
attention_mask = tokenized_test_texts['attention_mask'].to(device)
outputs = model(input_ids, attention_mask=attention_mask)
logits = outputs.logits
predictions = torch.argmax(logits, dim=1)
print("Predictions:", predictions.tolist())
import torch
from transformers import XLNetTokenizer, XLNetForSequenceClassification
from torch.utils.data import DataLoader, TensorDataset
# 示例文本数据
texts = ["This is a positive example.", "This is a negative example.", "Another positive example."]
# 示例标签
labels = [1, 0, 1] # 1表示正例,0表示负例
# 加载XLNet模型和分词器
model_name = "xlnet-base-cased"
tokenizer = XLNetTokenizer.from_pretrained(model_name)
model = XLNetForSequenceClassification.from_pretrained(model_name)
# 分词并编码文本
tokenized_texts = tokenizer(texts, padding=True, truncation=True, return_tensors='pt')
# 将标签转换为PyTorch张量
labels = torch.tensor(labels)
# 创建数据集
dataset = TensorDataset(tokenized_texts['input_ids'], tokenized_texts['attention_mask'], labels)
# 创建数据加载器
batch_size = 2
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# 设置训练设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# 定义优化器和损失函数
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5)
criterion = torch.nn.CrossEntropyLoss()
# 训练模型
epochs = 3
for epoch in range(epochs):
for input_ids, attention_mask, labels in dataloader:
input_ids, attention_mask, labels = input_ids.to(device), attention_mask.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
loss = outputs.loss
loss.backward()
optimizer.step()
# 测试模型
model.eval()
with torch.no_grad():
test_texts = ["This is a test sentence.", "Another test sentence."]
tokenized_test_texts = tokenizer(test_texts, padding=True, truncation=True, return_tensors='pt')
input_ids = tokenized_test_texts['input_ids'].to(device)
attention_mask = tokenized_test_texts['attention_mask'].to(device)
outputs = model(input_ids, attention_mask=attention_mask)
logits = outputs.logits
predictions = torch.argmax(logits, dim=1)
print("Predictions:", predictions.tolist())