#-*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding("gbk")
def autotxt(fname1,times):
import random
import jieba
file = open(fname1)
string=file.read()
listzk=[]
listzk=string.split()
dataset_file = []
for i in range(times):
dataset_file.append(listzk[random.randint(0,len(listzk)-1)])
dataset_file.append(listzk[random.randint(0,len(listzk)-1)])
#dataset_file = ["",""]
print("\n分词前:", dataset_file)
for i, each_sentence in enumerate(dataset_file):
dataset_file[i] = " ".join(jieba.cut(each_sentence))
print("\n分词后:", dataset_file)
model = {}
for line in dataset_file:
line = line.lower().split()
for i, word in enumerate(line):
if i == len(line)-1:
model['END'] = model.get('END', []) + [word]
else:
if i == 0:
model['START'] = model.get('START', []) + [word]
model[word] = model.get(word, []) + [line[i+1]]
print("\n模型:", model)
generated = []
while True:
if not generated:
words = model['START']
elif generated[-1] in model['END']:
break
else:
words = model[generated[-1]]
generated.append(random.choice(words))
print("\n生成的一个结果:" + "".join(generated))
#file.close()
#########################
autotxt('readme.txt',6) # 在文件中,随机抽取6句话,并根据这6句话,随机生成一个新的词组
import sys
reload(sys)
sys.setdefaultencoding("gbk")
def autotxt(fname1,times):
import random
import jieba
file = open(fname1)
string=file.read()
listzk=[]
listzk=string.split()
dataset_file = []
for i in range(times):
dataset_file.append(listzk[random.randint(0,len(listzk)-1)])
dataset_file.append(listzk[random.randint(0,len(listzk)-1)])
#dataset_file = ["",""]
print("\n分词前:", dataset_file)
for i, each_sentence in enumerate(dataset_file):
dataset_file[i] = " ".join(jieba.cut(each_sentence))
print("\n分词后:", dataset_file)
model = {}
for line in dataset_file:
line = line.lower().split()
for i, word in enumerate(line):
if i == len(line)-1:
model['END'] = model.get('END', []) + [word]
else:
if i == 0:
model['START'] = model.get('START', []) + [word]
model[word] = model.get(word, []) + [line[i+1]]
print("\n模型:", model)
generated = []
while True:
if not generated:
words = model['START']
elif generated[-1] in model['END']:
break
else:
words = model[generated[-1]]
generated.append(random.choice(words))
print("\n生成的一个结果:" + "".join(generated))
#file.close()
#########################
autotxt('readme.txt',6) # 在文件中,随机抽取6句话,并根据这6句话,随机生成一个新的词组