from numpy import *
def trainNB0(trainMatrix,trainCatergory):
#适用于二分类问题,其中一类的标签为1
#return
#p0Vect:标签为0的样本中,出现某个特征对应的概率
#p1Vect:标签为1的样本中,出现某个特征对应的概率
#pAbusive:标签为1的样本出现的概率
numTrainDoc = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCatergory)/float(numTrainDoc)
#防止多个概率的成绩当中的一个为0
#p0Num: 在训练样本标签为0的数据中,所有特征的对应value值之和,为矩阵
#p1Num: 在训练样本标签为1的数据中,所有特征的对应value值之和,为矩阵
p0Num = ones(numWords)
p1Num = ones(numWords)
#p0Denom:在训练样本标签为0的数据中,所有特征的value值之和,为标量
#p1Denom:在训练样本标签为1的数据中,所有特征的value值之和,为标量
#为什么初始化为2??
p0Denom = 2.0
p1Denom = 2.0
for i in range(numTrainDoc):
if trainCatergory[i] == 1:
p1Num +=trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num +=trainMatrix[i]
p0Denom += sum(trainMatrix[i])
#出于精度的考虑,否则很可能到限归零,change to log()
p1Vect = log(p1Num/p1Denom)
p0Vect = log(p0Num/p0Denom)
return p0Vect,p1Vect,pAbusive
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
#element-wise mult,只算分子的log值,因为只需比较大小,所以正负无关
p1 = sum(vec2Classify * p1Vec) + log(pClass1)
p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
####################3
#from numpy import *
#import os
#os.chdir(r"/home/luogan/lg/Python728/bayes/classical-machine-learning-algorithm-master/bayesian")
#import bayes
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1]#1 is abusive, 0 not
return postingList,classVec
def createVocabList(dataSet):
vocabSet = set([]) #create empty set
for document in dataSet:
vocabSet = vocabSet | set(document) #union of the two sets
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else: print ("the word: %s is not in my Vocabulary!" % word)
return returnVec
def testingNB():
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat=[]
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb = trainNB0(array(trainMat),array(listClasses))
testEntry = ['love', 'my', 'dalmation']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
testEntry = ['stupid', 'garbage']
thisDoc = array(setOfWords2Vec(myVocabList, testEntry))
print (testEntry,'classified as: ',classifyNB(thisDoc,p0V,p1V,pAb))
def bagOfWords2VecMN(vocabList, inputSet):
returnVec = [0]*len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
def textParse(bigString): #input is big string, #output is word list
import re
listOfTokens = re.split(r'\W*', bigString)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
if __name__ == "__main__":
listOPosts,listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
#print (myVocabList)
trainMat = []
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V,p1V,pAb = trainNB0(trainMat, listClasses)
testingNB()
bayes
朴素贝叶斯文本分类
最新推荐文章于 2024-10-21 03:32:30 发布
本文介绍了一种基于朴素贝叶斯算法的文本分类方法,包括词汇表创建、文档向量化及训练分类器等步骤,并通过示例展示了如何进行垃圾邮件过滤。
3万+

被折叠的 条评论
为什么被折叠?



