pytorch-task4

本文介绍了一个使用Word2Vec和TextCNN进行中文文本分类的深度学习模型。模型首先通过Word2Vec获取词向量,然后利用TextCNN进行特征提取和分类。文章详细描述了数据预处理、模型构建和训练过程。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

import os
import csv
import jieba
import warnings
import numpy as np
import pandas as pd
from tqdm import tqdm
from gensim.models import Word2Vec
import torch
import torch.nn as nn
from torch.optim import Adam
import torch.autograd as autograd
from sklearn.model_selection import train_test_split

warnings.filterwarnings("ignore")

class_num = 10
batch_size = 256
maxlen = 500
word2vec_size = 100

train_dir = './data/cnews/cnews.train.txt'
valid_dir = './data/cnews/cnews.val.txt'
test_dir = './data/cnews/cnews.test.txt'
word2vec_dir = './word2vec/word2vec.hdf5'
userdict_dir = './dict/userdict.txt'
stopword_dir = './dict/stopword.txt'

def cut_word(x, stop_word):
    words = []
    for word in list(jieba.cut(x)):
        if word not in stop_word and len(word) != 1:
            words.append(word)
    return words


def get_word_vocab(content):
    word_vocb = []
    for sentence in content:
        word_vocb.extend(list(set(sentence)))
    return list(set(word_vocb))


def get_x(content, word_index):
    X = np.array((len(content), maxlen))
    for i in range(len(content)):
        if len(content[i]) < maxlen:
            for j in range(0, len(content[i])):
                X[i][j] = word_index[content[i][j]]
        else:
            for j in range(0, maxlen):
                X[i][j] = word_index[content[i][j]]
    return X


def get_label_vector(label):
    label_code = pd.get_dummies(list(set(label)))
    label_vector = dict()
    for col in label_code.columns:
        label_vector[col] = label_code[col].tolist()
    return label_vector


print('read data')
data = pd.read_csv(test_dir, delimiter='\t', index_col=None, names=['label', 'content'])
test = pd.read_csv(valid_dir, delimiter='\t', index_col=None, names=['label', 'content'])

print(data.shape)
print(test.shape)

print('cut word')
jieba.load_userdict(userdict_dir)
stop_word = pd.read_csv(stopword_dir, quoting=csv.QUOTE_NONE, index_col=None, names=['word'])['word'].tolist()
data['content'] = data['content'].apply(lambda x: cut_word(x, stop_word))
test['content'] = test['content'].apply(lambda x: cut_word(x, stop_word))
content = pd.concat([data['content'], test['content']], axis=0, ignore_index=True)

print('word vocab')
word_vocab = get_word_vocab(content)
word_index = dict(zip(word_vocab, range(1, len(word_vocab) + 1)))
index_word = dict(zip(list(word_index.values()), list(word_index.keys())))

print('word2vec')
if not os.path.exists(word2vec_dir):
    model = Word2Vec(content, size=word2vec_size, seed=2019, min_count=6, window=10, iter=8, workers=8)
    model.save(word2vec_dir)
else:
    model = Word2Vec.load(word2vec_dir)

embedding_matrix = np.zeros((len(word_index) + 1, word2vec_size))
for word, i in word_index.items():
    if word in model:
        embedding_matrix[i] = model[word]
print(embedding_matrix.shape)

print('label_vector')
label_vector = get_label_vector(data['label'])

class DataLoader(object):
    def __init__(self, data, config):
        self.data = data
        self.batch_size = config['batch_size']
        self.maxlen = config['maxlen']
        self.word_index = config['word_index']
        self.label_vector = config['label_vector']

    def pad_sequences(self, content):
        sequences = np.zeros((len(content), self.maxlen))
        for i in range(len(content)):
            if len(content[i]) < maxlen:
                for j in range(0, len(content[i])):
                    sequences[i][j] = self.word_index[content[i][j]]
            else:
                for j in range(0, maxlen):
                    sequences[i][j] = self.word_index[content[i][j]]
        return sequences

    def train_batch_data(self, is_shuffle=True):
        if is_shuffle:
            self.data = self.data.sample(frac=1).reset_index(drop=True)

        length = len(self.data) // self.batch_size

        if self.batch_size * length < len(self.data):
            length += 1

        for i in tqdm(range(length)):
            if self.batch_size * (i + 1) > len(self.data):
                batch_data = self.data['content'].iloc[self.batch_size * i:]
            else:
                batch_data = self.data['content'].iloc[self.batch_size * i:self.batch_size * (i + 1)]
            sequences = self.pad_sequences(batch_data)
            label = self.data['label'].map(self.label_vector)
            yield torch.LongTensor(sequences), torch.LongTensor(label)

    def test_batch_data(self):
        length = len(self.data) // self.batch_size

        if self.batch_size * length < len(self.data):
            length += 1

        for i in tqdm(range(length)):
            if self.batch_size * (i + 1) > len(self.data):
                batch_data = self.data['content'].iloc[self.batch_size * i:]
            else:
                batch_data = self.data['content'].iloc[self.batch_size * i:self.batch_size * (i + 1)]
            sequences = self.pad_sequences(batch_data)
            yield torch.LongTensor(sequences)

class TextCNN(nn.Module):
    def __init__(self, config):
        super(TextCNN, self).__init__()
        self.class_num = config['class_num']
        self.embedding_matrix = config['embedding_matrix']

        self.embeding = nn.Embedding(self.embedding_matrix.shape[0], self.embedding_matrix.shape[1], _weight=self.embedding_matrix)
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2))
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            nn.MaxPool2d(2))
        self.conv3 = nn.Sequential(
            nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            nn.MaxPool2d(2))
        self.conv4 = nn.Sequential(
            nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            nn.MaxPool2d(2))
        self.out = nn.Linear(256, self.class_num)

    def forward(self, x):
        x = self.embeding(x)
        x = x.view(x.size(0), 1, maxlen, word2vec_size)
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = x.view(x.size(0), -1)  # 将(batch,outchanel,w,h)展平为(batch,outchanel*w*h)
        print(x.size(0))
        print(x.size())
        output = self.out(x)
        return output

class Model(object):
    def __init__(self, train_loader, valid_loader, test_loader, config):
        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.test_loader = test_loader
        self.model = TextCNN(config=config)
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = Adam(self.model.parameters(), lr=config['learning_rate'])

    def verification(self):
        res = []
        for query, title, fea, label in self.valid_loader.train_batch_data(is_shuffle=True):
            out = self.model(query, title, fea)
            res.extend([item.detach().numpy()[1] for item in list(out)])
    
        res = pd.DataFrame(res, columns=['pred'])
        valid_ans = pd.concat([self.valid_loader.data.loc[:, ['query_id', 'label']], res], axis=1)
    
        qauc = calculate_qauc(valid_ans)
        print('qauc is:')
        print(qauc)
        if qauc > self.mx_qauc:
            self.mx_qauc = qauc
            torch.save(self.model, './wnd/model/model.pkl')

    def fit(self, epoch):
        for i in range(epoch):
            for sequences, label in self.train_loader.train_batch_data():
                print(sequences)
                out = self.model(sequences)  # 前向传播求出的预测值
                self.optimizer.zero_grad()  # 将梯度初始化为零
                loss = self.criterion(out, autograd.Variable(label.long()))  # 损失函数
                loss.backward()  # 反向传播求梯度
                self.optimizer.step()  # 更新所有参数

            # self.verification()

    def restore(self):
        self.model = torch.load('./wnd/model/model.pkl')

    def predict(self):
        res = []
        for sequences in self.test_loader.test_batch_data():
            out = self.model(sequences)
            res.extend([item.detach().numpy()[1] for item in list(out)])

        res = pd.DataFrame(res, columns=['pred'])
        res.to_csv('./nn_res.csv', header=None, index=None, sep=',')

config = dict()
config['batch_size'] = batch_size
config['maxlen'] = maxlen
config['word_index'] = word_index
config['label_vector'] = label_vector
config['class_num'] = class_num
config['learning_rate'] = 1e-3
config['embedding_matrix'] = torch.Tensor(embedding_matrix)

data = data.sample(frac=1).reset_index(drop=True)
train = data.head(8000)
valid = data.tail(2000)

print('data', data.shape)
print('train', train.shape)
print('valid', valid.shape)
print('test', test.shape)

train_loader = DataLoader(train, config)
valid_loader = DataLoader(valid, config)
test_loader = DataLoader(test, config)
model = Model(train_loader, valid_loader, test_loader, config)
model.fit(2)
# model = Model(train_loader, valid_loader, test_loader, config)
# model.restore()
model.predict()

 

内容概要:本文档详细介绍了基于MATLAB实现多目标差分进化(MODE)算法进行无人机三维路径规划的项目实例。项目旨在提升无人机在复杂三维环境中路径规划的精度、实时性、多目标协调处理能力、障碍物避让能力和路径平滑性。通过引入多目标差分进化算法,项目解决了传统路径规划算法在动态环境和多目标优化中的不足,实现了路径长度、飞行安全距离、能耗等多个目标的协调优化。文档涵盖了环境建模、路径编码、多目标优化策略、障碍物检测与避让、路径平滑处理等关键技术模块,并提供了部分MATLAB代码示例。 适合人群:具备一定编程基础,对无人机路径规划和多目标优化算法感兴趣的科研人员、工程师和研究生。 使用场景及目标:①适用于无人机在军事侦察、环境监测、灾害救援、物流运输、城市管理等领域的三维路径规划;②通过多目标差分进化算法,优化路径长度、飞行安全距离、能耗等多目标,提升无人机任务执行效率和安全性;③解决动态环境变化、实时路径调整和复杂障碍物避让等问题。 其他说明:项目采用模块化设计,便于集成不同的优化目标和动态环境因素,支持后续算法升级与功能扩展。通过系统实现和仿真实验验证,项目不仅提升了理论研究的实用价值,还为无人机智能自主飞行提供了技术基础。文档提供了详细的代码示例,有助于读者深入理解和实践该项目。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值