tfidf代码实现

这是一个TF-IDF算法实现的实例,用于从文本数据中提取关键词。程序首先读取文件内容,使用jieba进行分词并去除停用词,然后计算每个词在文档中的词频(TF)、逆文档频率(IDF)以及TF-IDF值。最后,程序可以获取每篇文章的关键词,并输出所有文章的TF-IDF值。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

#encoding:utf8
import re
import os
import jieba
import jieba.posseg as pseg
import numpy as np

class TFIDFCounter:
    #构造方法
    def __init__(self):
        #对象属性 
        self.tfs = {}#每篇文章的每个词的词频{"docid1":{'苹果':tf值, '乔布斯':tf值,.....},"docid2":{'手机':tf值, '乔布斯':tf值,.....}}
        self.tfidfs = {}#每篇文章的每个词的tfidf值{"docid1":{'苹果':tfidf值, '乔布斯':tfidf值,.....},"docid2":{'手机':tfidf值, '乔布斯':tfidf值,.....}}
        self.idfs = {}#语料词典中每个词的idf值{'苹果':idf值, '乔布斯':idf值,"手机":idf值, .....}
        self.stopwordsPath = r'C:\Users\adcar\PycharmProjects\pythonProject2\NLP\datas\stopwords.txt'
        
    def load_data(self, filepath):##导入数据
        with open(filepath, 'r', encoding='gbk') as fin:
            content = fin.read()
            return re.sub(r'[\n]', '', content)##按行读取
    
    #添加语料
    def add(self, docid, content):
        self.termset = set()###set集合为了避免重复
        #分词
        #document = jieba.lcut(content)#得到分词列表 #根据词性过滤
        psresult = pseg.cut(content)
        #根据词性过滤-去掉标点符号
        document = [x.word for x in psresult if x.flag not in ['x', 'm'] and len(x.word) > 1 ]
        print(document)
        #停用词过滤
        stopwords = self.get_stopwords()
        temp = document.copy()
        for word in temp:
            if word in stopwords:
                document.remove(word)
        #添加到语料词典中
        self.termset.update(document)
        #计算统计每篇文档的 每个词的词频
        self.getTf(docid, document)
        
    #加载停用词
    def get_stopwords(self):
        stopwordset = set()
        with open(filepath, 'r', encoding='gbk') as fin:
            for line in fin:
                stopwordset.add(line.strip())
        return stopwordset
    
    #计算与统计
    def computer(self):
        #1.gettf()
        #2.getidf()
        #3.gettfidf()
        pass
    #计算tf值
    def getTf(self, docid, document):
        #文章的每个词的词频统计
        #公式:每个词在该文档中出现的次数/该文档的总词数
        total = len(document)
        doc_tfs = {}#{'苹果':tf值, '乔布斯':tf值,.....}
        if total != 0 :
            for word in document:
                count = document.count(word)
                word_tf = count/total
                doc_tfs.update({word : word_tf})
        self.tfs.update({docid: doc_tfs})
        
    #计算idf值
    def getIdf(self):
        #{"docid1":{'苹果':tf值, '乔布斯':tf值,.....},"docid2":{'手机':tf值, '乔布斯':tf值,.....}}
        #公式:log(语料中文档总数/(包含该词的文档数+1))
        #语料词典中每个词的idf值{'苹果':idf值, '乔布斯':idf值,"手机":idf值, .....}
        total = len(self.tfs)#语料中文档总数
        
        for word in self.termset:
            count = 1
            
            for document in self.tfs.values():
                if word in document.keys():#keys values items
                    count += 1
            word_idf = np.log(total/count)
            self.idfs.update({word: word_idf})
    #计算tfidf值
    def getTfIdf(self):
        #公式:tf*idf
        #每篇文章的每个词的tfidf值{"docid1":{'苹果':tfidf值, '乔布斯':tfidf值,.....},"docid2":{'手机':tfidf值, '乔布斯':tfidf值,.....}}
        
        for docid, document in self.tfs.items():
            doc_tfidfs = {}
            for word in document.keys():
                if word in self.termset:
                    
                
                    word_tf = document.get(word)
                    word_idf = self.idfs.get(word)
                    word_tfidf = word_tf * word_idf
                    doc_tfidfs.update({word: word_tfidf})
                self.tfidfs.update({docid: doc_tfidfs})
            
        
    #提取指定文章的topn个关键词
    def getKeywordsTopN(self, docid, topN):
        pass
    
    
if __name__ == '__main__':
    dirName = r'C:\Users\adcar\PycharmProjects\pythonProject2\NLP\datas\datas'
    counter = TFIDFCounter()
    #1.添加语料
    for fileName in os.listdir(dirName):
        filepath = os.path.join(dirName, fileName)
        content = counter.load_data(filepath)
        docid = re.sub(r'\.txt', '', fileName)
        counter.add(docid, content)
    
    counter.getIdf()
    counter.getTfIdf()
    print(counter.tfidfs.keys())

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值