# In[4]:
#网址来源
#https://github.com/fchollet/keras/blob/master/examples/imdb_lstm.py
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
from keras.datasets import imdb
import numpy as np
max_features = 20000
maxlen = 80 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print('Loading data...')
#(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
#np.savetxt("x_train.txt", x_train)
#np.savetxt("x_test.txt", x_test)
#np.savetxt(" y_train.txt", y_train)
#np.savetxt("y_test.txt", y_test)
# In[2]:
x_train = np.loadtxt("/mfsdata/pachong/RNN/x_train.txt")
y_train=np.loadtxt("/mfsdata/pachong/RNN/ y_train.txt")
x_test = np.loadtxt("/mfsdata/pachong/RNN/x_test.txt")
y_test = np.loadtxt("/mfsdata/pachong/RNN/y_test.txt")
# In[5]:
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
# In[6]:
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
# In[10]:
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=15,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test,
batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
karas 实现lstm 文本分类
最新推荐文章于 2024-03-16 15:28:26 发布