【头歌实践教学-NLP】第九章:基于机器学习的NLP算法

机器学习在 NLP 中的实战

第1关:无监督学习的文本聚类

在这里插入图片描述

第2关:基于 K-Means 算法的文本聚类

import jieba
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cluster import KMeans


class KmeansClustering():
    def __init__(self, stopwords_path=None):
        self.stopwords = self.load_stopwords(stopwords_path)
        self.vectorizer = CountVectorizer()
        self.transformer = TfidfTransformer()

    def load_stopwords(self, stopwords=None):
        # 加载停用词
        if stopwords:
            with open(stopwords, 'r', encoding='utf-8') as f:
                return [line.strip() for line in f]
        else:
            return []

    def preprocess_data(self, corpus_path):
        # 文本预处理,每行一个文本
        corpus = []
        with open(corpus_path, 'r', encoding='utf-8') as f:
            for line in f:
                corpus.append(' '.join([word for word in jieba.lcut(line.strip()) if word not in self.stopwords]))
        return corpus

    def get_text_tfidf_matrix(self, corpus):
        # 获取tfidf矩阵
        tfidf = self.transformer.fit_transform(self.vectorizer.fit_transform(corpus))
        # 获取tfidf矩阵中权重
        weights = tfidf.toarray()
        return weights

    def kmeans(self, corpus_path, n_clusters=2):
        """
        KMeans文本聚类
        :param corpus_path: 语料路径(每行一篇),文章id从0开始
        :param n_clusters: :聚类类别数目
        :return: 聚类标签数组,索引为文本id,值为对应的聚类标签
        """
        corpus = self.preprocess_data(corpus_path)
        weights = self.get_text_tfidf_matrix(corpus)
        result = {}
        
        # 任务:完成基于K-Means算法的文本聚类,并将结果保存到result变量中。
        # ********** Begin *********#
        # 初始化KMeans模型
        kmeans_model = KMeans(n_clusters=n_clusters)
        # 拟合TF-IDF矩阵数据
        kmeans_model.fit(weights)
        # 获取每个文本的聚类标签(数组,索引对应文本id)
        result = kmeans_model.labels_
        # ********** End **********#
        
        return result

第3关:基于 DBSCAN 的文本聚类

import jieba
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt

class DbscanClustering():
    def __init__(self, stopwords_path=None):
        self.stopwords = self.load_stopwords(stopwords_path)
        self.vectorizer = CountVectorizer()
        self.transformer = TfidfTransformer()

    def load_stopwords(self, stopwords=None): # 加载停用词 
        if stopwords:
            with open(stopwords, 'r', encoding='utf-8') as f:
                return [line.strip() for line in f]
        else:
            return []

    def preprocess_data(self, corpus_path): # 文本预处理
        corpus = []
        with open(corpus_path, 'r', encoding='utf-8') as f:
            for line in f:
                corpus.append(' '.join([word for word in jieba.lcut(line.strip()) if word not in self.stopwords]))
        return corpus

    def get_text_tfidf_matrix(self, corpus): # 获取tf-idf矩阵
        tfidf = self.transformer.fit_transform(self.vectorizer.fit_transform(corpus))
        weights = tfidf.toarray() # 获取tfidf矩阵中权重
        return weights

    def pca(self, weights, n_components=2): # PCA对数据进行降维
        pca = PCA(n_components=n_components)
        return pca.fit_transform(weights)

    def dbscan(self, corpus_path, eps=0.1, min_samples=3, fig=True): # 基于密度的文本聚类算法
     
        # 任务:完成 DBSCAN 聚类算法
        # ********** Begin *********#
        corpus=self.preprocess_data(corpus_path)
        weights=self.get_text_tfidf_matrix(corpus)
        pca_weights=self.pca(weights)
        clf=DBSCAN(eps=eps,min_samples=min_samples)
        
        
        # ********** End **********#
        y = clf.fit_predict(pca_weights)

       
        result = {}  # 每个样本所属的簇
        for text_idx, label_idx in enumerate(y):
            if label_idx not in result:
                result[label_idx] = [text_idx]
            else:
                result[label_idx].append(text_idx)
        return result

第4关:基于机器学习的情感分析

import xlwt
import pickle
import itertools
import nltk
import os
import sklearn
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score

pos_f = 'src/step3/pkl_data/1000/pos_review.pkl'
neg_f = 'src/step3/pkl_data/1000/neg_review.pkl'

def load_data():  # 加载训练集数据
    global pos_review, neg_review
    pos_review = pickle.load(open(pos_f, 'rb'))
    neg_review = pickle.load(open(neg_f, 'rb'))

def create_word_bigram_scores(): # 计算整个语料里面每个词和双词搭配的信息量
    posdata = pickle.load(open(pos_f, 'rb'))
    negdata = pickle.load(open(neg_f, 'rb'))

    posWords = list(itertools.chain(*posdata))
    negWords = list(itertools.chain(*negdata))

    bigram_finder = BigramCollocationFinder.from_words(posWords)
    posBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 5000)
    bigram_finder = BigramCollocationFinder.from_words(negWords)
    negBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 5000)

    pos = posWords + posBigrams  # 词和双词搭配
    neg = negWords + negBigrams

    word_fd = FreqDist()
    cond_word_fd = ConditionalFreqDist()
    for word in pos:
        word_fd[word] += 1
        cond_word_fd["pos"][word] += 1
    for word in neg:
        word_fd[word] += 1
        cond_word_fd["neg"][word] += 1

    pos_word_count = cond_word_fd['pos'].N()
    neg_word_count = cond_word_fd['neg'].N()
    total_word_count = pos_word_count + neg_word_count

    word_scores = {}
    for word, freq in word_fd.items():
        pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count)  # 计算积极词的卡方统计量,这里也可以计算互信息等其它统计量
        neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count)
        word_scores[word] = pos_score + neg_score

    return word_scores

def find_best_words(word_scores, number): # 根据信息量进行倒序排序,选择排名靠前的信息量的词
    best_vals = sorted(word_scores.items(), key=lambda w_s: w_s[1], reverse=True)[:number]  # 把词按信息量倒序排序。number是特征的维度,是可以不断调整直至最优的
    best_words = set([w for w, s in best_vals])
    return best_words

def pos_features(feature_extraction_method): # 赋予积极的文本类标签
    posFeatures = []
    for i in pos_review:
        posWords = [feature_extraction_method(i), 'pos']  # 为积极文本赋予"pos"
        posFeatures.append(posWords)
    return posFeatures

def neg_features(feature_extraction_method): # 赋予消极的文本类标签

    negFeatures = []
    for j in neg_review:
        negWords = [feature_extraction_method(j), 'neg']  # 为消极文本赋予"neg"
        negFeatures.append(negWords)
    return negFeatures

def best_word_features(words): # 把选出的这些词作为特征(这就是选择了信息量丰富的特征)
    global best_words
    return dict([(word, True) for word in words if word in best_words])

def score(classifier):
   # 任务:构建分类器模型并进行训练
   # ********** Begin *********#
    classifier = nltk.SklearnClassifier(classifier)
    classifier.train(train)
   
   # ********** End **********#

    pred = classifier.classify_many(dev)  # 对开发测试集的数据进行分类,给出预测的标签
    return accuracy_score(tag_dev, pred)  # 对比分类预测结果和人工标注的正确结果,给出分类器准确度

# 使用测试集测试分类器的最终效果
def use_the_best():
    word_scores = create_word_bigram_scores()  # 使用词和双词搭配作为特征
    best_words = find_best_words(word_scores, 4000)  # 特征维度1500
    load_data()
    posFeatures = pos_features(best_word_features, best_words)
    negFeatures = neg_features(best_word_features, best_words)
    cut_data(posFeatures, negFeatures)
    trainSet = posFeatures[1500:] + negFeatures[1500:]  # 使用了更多数据
    testSet = posFeatures[:500] + negFeatures[:500]
    test, tag_test = zip(*testSet)

    # 存储分类器
    def final_score(classifier):
        classifier = SklearnClassifier(classifier)
        classifier.train(trainSet)
        pred = classifier.classify_many(test)
        return accuracy_score(tag_test, pred)

    print(final_score(MultinomialNB()))  # 使用开发集中得出的最佳分类器


# 把分类器存储下来(存储分类器和前面没有区别,只是使用了更多的训练数据以便分类器更为准确)
def store_classifier():
    load_data()
    word_scores = create_word_bigram_scores()
    global best_words
    best_words = find_best_words(word_scores, 7500)

    posFeatures = pos_features(best_word_features)
    negFeatures = neg_features(best_word_features)

    trainSet = posFeatures + negFeatures

    MultinomialNB_classifier = SklearnClassifier(MultinomialNB())
    MultinomialNB_classifier.train(trainSet)
    pickle.dump(MultinomialNB_classifier, open('src/step3/out/classifier.pkl', 'wb'))
    

分类器方法

第1关:分类器方法

在这里插入图片描述

第2关:基于朴素贝叶斯的文本分类

from functools import reduce
import operator
from numpy import array, zeros
 
def trainNB(trainMatrix, trainCategory):
    numTrainDocs = len(trainMatrix)  # 文档数量
    numWords = len(trainMatrix[0])  # 第一篇文档的长度,也就是词汇表的长度
    pAbusive = sum(trainCategory) / float(numTrainDocs)  # 负面文档占总文档比例
    p0Num = zeros(numWords)  # 初始化概率
    p1Num = zeros(numWords)
    p0Denom = 0
    p1Denom = 0
    
    for i in range(numTrainDocs):
        if trainCategory[i] == 1:  # 如果是负面文档
            p1Num += trainMatrix[i]  # 文档对应的词语数量全部加1,向量相加
            p1Denom += sum(trainMatrix[i])  # 负面文档词语的总数量
        else:
            p0Num += trainMatrix[i]  # 正常文档对应的词语数量向量
            p0Denom += sum(trainMatrix[i])  # 正常文档词语的总数量
            
    p1Vect = p1Num / p1Denom   # 对p1Num的每个元素做除法,即负面文档中出现每个词语的概率
    p0Vect = p0Num / p0Denom   # 对p0Num的每个元素做除法,即正常文档中出现每个词语的概率
    return p0Vect, p1Vect, pAbusive
    
def classifyNB(vec2Classify, trainMatrix, trainCategory):
    p0Vect, p1Vect, pAb = trainNB(trainMatrix, trainCategory)
    # 计算待分类文档词条对应的条件概率
    p1VectClassify = vec2Classify * p1Vect  
    p0VectClassify = vec2Classify * p0Vect
    p1Cond = [];
    p0Cond = []
    
    for i in range(len(p1VectClassify)):
        if p1VectClassify[i] == 0:
            continue
        else:
            p1Cond.append(p1VectClassify[i])
            
    for i in range(len(p0VectClassify)):
        if p0VectClassify[i] == 0:
            continue
        else:
            p0Cond.append(p0VectClassify[i])
    # 任务:完成对各概率向量的计算
    # ********** Begin *********#
    if len(p0Cond):                        # 若p0Cond不为空,即p0VectClassify不全为0
        pC0=reduce(operator.mul, p0Cond, 1) # 计算概率向量内元素乘积
    else:
        pC0=0
    if len(p1Cond):   # 计算概率
        pC1=reduce(operator.mul, p1Cond, 1)
    else:
        pC1=0
    p1=pC1*pAb 
    p0=pC0*(1.0-pAb)
    # ********** End **********#
    if p1 > p0:
        return 1
    else:
        return 0

第3关:基于逻辑回归的文本分类

import numpy as np
import pandas as pd
import re
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import nltk

def Text_categorization():
    file_name = input()
    dataset = pd.read_csv('src/step2/data/' + file_name)
    
    # 下载停用词(如果未下载)
    try:
        words = stopwords.words("english")
    except LookupError:
        nltk.download('stopwords')
        words = stopwords.words("english")
    
    # 文本清洗与预处理
    stemmer = PorterStemmer()
    dataset['cleaned'] = dataset['text'].apply(
        lambda x: " ".join([
            stemmer.stem(i) for i in re.sub("[^a-zA-Z]", " ", x).split()  # 词干提取+去除非字母字符
            if i.lower() not in words  # 过滤停用词
        ]).lower()  # 统一小写
    )
    
    # 划分训练集和测试集
    X = dataset['cleaned']  # 清洗后的文本特征
    Y = dataset['category']  # 分类标签
    X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, random_state=42)
    
    # 任务:构建逻辑回归文本分类模型
    # ********** Begin *********#
    # 构建Pipeline:集成TF-IDF向量化器和逻辑回归模型
    pipeline = Pipeline([
        ('tfidf', TfidfVectorizer(
            min_df=3,          # 忽略出现次数少于3的词
            stop_words="english",  # 使用英文停用词
            sublinear_tf=True,    # 应用亚线性TF缩放(log(tf+1))
            norm='l2',           # 归一化(L2范数)
            ngram_range=(1, 2)   # 同时考虑1-gram和2-gram
        )),
        ('clf', LogisticRegression(
            max_iter=1000,     # 增加迭代次数确保收敛
            random_state=42    # 固定随机种子保证结果可复现
        ))
    ])
    
    # 训练模型
    model = pipeline.fit(X_train, y_train)
    
    # 预测测试集
    y_pred = model.predict(X_test)
    
    # 输出分类报告(精确率、召回率、F1值)
    print("分类报告:\n", classification_report(y_test, y_pred))
    # ********** End **********#
    
    ytest = np.array(y_test)
    return X_test, ytest, model


# 示例调用(测试时取消注释)
# X_test, ytest, model = Text_categorization()
# print("测试集文本示例:", X_test.iloc[0])
# print("真实标签:", ytest[0])
# print("预测标签:", model.predict([X_test.iloc[0]]))

第4关:基于支持向量机的文本分类

import numpy as np
from sklearn.svm import SVC
from sklearn.feature_extraction.text import CountVectorizer
from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfTransformer
from test import get_data, get_result

# 获取数据并筛选TOP10类别
df = get_data()
counter = Counter(df['variety'].tolist())
top_10_varieties = {i[0]: idx for idx, i in enumerate(counter.most_common(10))}
df = df[df['variety'].map(lambda x: x in top_10_varieties)]

# 准备文本特征和标签
description_list = df['description'].tolist()
varietal_list = [top_10_varieties[i] for i in df['variety'].tolist()]
varietal_list = np.array(varietal_list)

# 词频向量化
count_vect = CountVectorizer() 
x_train_counts = count_vect.fit_transform(description_list)

# 任务:完成对文本的TF-IDF值的计算
# ********** Begin *********#
# 初始化TF-IDF转换器并计算TF-IDF矩阵
tfidf_transformer = TfidfTransformer()
x_train_tfidf = tfidf_transformer.fit_transform(x_train_counts)  # 词频矩阵 → TF-IDF矩阵
# ********** End *********#

# 划分训练集和测试集
train_x, test_x, train_y, test_y = train_test_split(x_train_tfidf, varietal_list, test_size=0.3)

# 训练SVM分类器(线性核)
clf = SVC(kernel='linear').fit(train_x, train_y)
y_score = clf.predict(test_x)

# 计算准确率
n_right = 0
for i in range(len(y_score)):
    if y_score[i] == test_y[i]:
        n_right += 1

# 输出结果(根据test模块的要求)
get_result(n_right, test_y)

走进机器学习

第1关:走进机器学习

在这里插入图片描述

第2关:常用的机器学习方法

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小熊猫爱学习

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值