from __future__ import division
from nltk.book import *
"""
搜索文本
"""
"""
计数词汇
"""
"""
链表(list)
连接 +
追加 appand()
索引 text[10]
切片 text[10:100]
排序 sorted()
去重 set()
"""
"""
频率分布:FreqDist()来寻找频率
找到一本书使用最频繁的50个词
"""
"""
细粒度选择词:
选择长度大于15的词
"""
"""
词语搭配和双连词
"""
print text4.collocations()
print text8.collocations()
"""
计数的其他东西:
fdist = FreqDist(samples) 创建包含给定样本的频率分布
fdist.inc(sample) 增加样本
fdist['monstrous'] 计数给定样本出现的次数
fdist.freq('monstrous') 给定样本的频率
fdist.N() 样本总数
fdist.keys() 以频率递减顺序排序的样本链表
for sample in fdist: 以频率递减的顺序遍历样本
fdist.max() 数值最大的样本
fdist.tabulate() 绘制频率分布表
fdist.plot() 绘制频率分布图
fdist.plot(cumulative=True) 绘制累积频率分布图
fdist1<fdist2 测试样本在fdist1中出现的频率是否小于fdist2
"""
"""
s.startswith(t) 测试 s是否以t开头
s.endswith(t) 测试 s是否以t结尾
t in s 测试 s是否包含t
s.islower() 测试 s中所有字符是否都是小写字母
s.isupper() 测试 s中所有字符是否都是大写字母
s.isalpha() 测试 s中所有字符是否都是字母
s.isalnum() 测试 s中所有字符是否都是字母或数字
s.isdigit() 测试 s中所有字符是否都是数字
s.istitle() 测试 s是否首字母大写( s中所有的词都首字母大写
"""
"""
思考:既然NLTK这么有用,那么对于中文的处理又该如何处理呢??
方法:
1、使用中文分词器(jieba)
2、对中文字符做编码处理,使用unicode编码方式
3、python的源码统一声明为gbk
4、使用支持中文的语料库
"""
"""
原始数据,用于建立模型
"""
courses = [
u'Writing II: Rhetorical Composing',
u'Genetics and Society: A Course for Educators',
u'General Game Playing',
u'Genes and the Human Condition (From Behavior to Biotechnology)',
u'A Brief History of Humankind',
u'New Models of Business in Society',
u'Analyse Numrique pour Ingnieurs',
u'Evolution: A Course for Educators',
u'Coding the Matrix: Linear Algebra through Computer Science Applications',
u'The Dynamic Earth: A Course for Educators',
u'Tiny Wings\tYou have always dreamed of flying - but your wings are tiny. Luckily the world is full of beautiful hills. Use the hills as jumps - slide down, flap your wings and fly! At least for a moment - until this annoying gravity brings you back down to earth. But the next hill is waiting for you already. Watch out for the night and fly as fast as you can. ',
u'Angry Birds Free',
u'没有\它很相似',
u'没有\t它很相似',
u'没有\t他很相似',
u'没有\t他不很相似',
u'没有',
u'可以没有',
u'也没有',
u'有没有也不管',
u'Angry Birds Stella',
u'Flappy Wings - FREE\tFly into freedom!A parody of the #1 smash hit game!',
u'没有一个',
u'没有一个2',
]
courses_name = courses
"""
预处理(easy_install nltk)
"""
def pre_process_cn(courses, low_freq_filter=True):
"""
简化的 中文+英文 预处理
1.去掉停用词
2.去掉标点符号
3.处理为词干
4.去掉低频词
"""
import nltk
import jieba.analyse
from nltk.tokenize import word_tokenize
texts_tokenized = []
for document in courses:
texts_tokenized_tmp = []
for word in word_tokenize(document):
texts_tokenized_tmp += jieba.analyse.extract_tags(word, 10)
texts_tokenized.append(texts_tokenized_tmp)
texts_filtered_stopwords = texts_tokenized
english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%']
texts_filtered = [[word for word in document if not word in english_punctuations] for document in
texts_filtered_stopwords]
from nltk.stem.lancaster import LancasterStemmer
st = LancasterStemmer()
texts_stemmed = [[st.stem(word) for word in docment] for docment in texts_filtered]
if low_freq_filter:
all_stems = sum(texts_stemmed, [])
stems_once = set(stem for stem in set(all_stems) if all_stems.count(stem) == 1)
texts = [[stem for stem in text if stem not in stems_once] for text in texts_stemmed]
else:
texts = texts_stemmed
return texts
lib_texts = pre_process_cn(courses)
"""
引入gensim,正式开始处理(easy_install gensim)
"""
def train_by_lsi(lib_texts):
"""
通过LSI模型的训练
"""
from gensim import corpora, models, similarities
dictionary = corpora.Dictionary(lib_texts)
corpus = [dictionary.doc2bow(text) for text in
lib_texts]
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=10)
index = similarities.MatrixSimilarity(lsi[corpus])
return (index, dictionary, lsi)
(index, dictionary, lsi) = train_by_lsi(lib_texts)
target_courses = [u'没有']
target_text = pre_process_cn(target_courses, low_freq_filter=False)
"""
对具体对象相似度匹配
"""
ml_course = target_text[0]
ml_bow = dictionary.doc2bow(ml_course)
ml_lsi = lsi[ml_bow]
sims = index[ml_lsi]
sort_sims = sorted(enumerate(sims), key=lambda item: -item[1])
print sort_sims[0:10]
print courses_name[sort_sims[1][0]]
print courses_name[sort_sims[2][0]]
print courses_name[sort_sims[3][0]]