import jieba
target_file ="stress_words.txt"# 读取空白行之前的句子为个性化词典
custom_dict ={}withopen(target_file,"r", encoding="utf-8")as f:for line in f.readlines():# 之前做的标记,跳过此行if(line.strip()=="||"):continue# 新做的标记,结束个性词典if(line.strip()==""):break# 切分出前半部分,将长度大于1的词加入词典
line = line.split("||")[0]for word in line.strip().split(" "):if(len(word)>1):
freq = custom_dict.get(word,0)
custom_dict[word]= freq+1# 将词典保存
special_dict ="""
布艺 床品 家纺 窗帘
"""for w in special_dict.replace("/n","").strip().split(" "):
custom_dict[w]=20000000withopen("format_dict.txt","w", encoding="utf-8")as out:
out.write("\n".join([k+" "+str(custom_dict[k])for k in custom_dict.keys()]))# 加载词典
jieba.load_userdict("format_dict.txt")# 根据已经整理的词典重新分词
words =[]
cut_start =Falsewithopen(target_file,"r", encoding="utf-8")as f:for line in f.readlines():# 之前做的标记,跳过此行if(line.strip()=="||"):continue# 遇到空白标记行,则改变标记,跳过该空白行,并开始切词if(line.strip ==""):
cut_start =True
words.append(" || ")continueif"||"in line:
line = line.split("||")[0]
res =[]if(not cut_start):# 遇到空白行之前,不重新切词,只分析
res = line.strip().split(" ")else:# 遇到空白行之后,进行重新切词
res =list(jieba.cut(line.strip().replace(" ","")))# 记录改行词典中未出现的词
unique_word =[w for w in res if w notin custom_dict]
cut_res =" ".join(res)
true_len =60-(len(cut_res)*2-len(res)+1)
words.append(cut_res + true_len*" "+"|| "+" ".join(unique_word))withopen(target_file,"w", encoding="utf-8")as out:
out.write("\n".join(words))