#加载数据分析常用库
import pandas as pd
from pandas import concat
from pandas import DataFrame,Series
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from keras.layers import Dense,Activation
from keras.layers.recurrent import LSTM
from keras.models import Sequential
train_url = ""
industrydata = pd.read_csv(train_url+'1.csv',header=None)
des = industrydata.describe()
industrydata2 = pd.read_csv(train_url+'2.csv',header=None)
#csv文件描述
industrydata2.describe()
#对当日的温度最大,最小,均值,湿度和输出进行0-1归一化处理
industrydata['WDmax']=industrydata.iloc[:,3].apply(lambda x:((x-des.loc['min'][3])/(des.loc['max'][3]-des.loc['min'][3])))
industrydata['WDmin']=industrydata.iloc[:,4].apply(lambda x:((x-des.loc['min'][4])/(des.loc['max'][4]-des.loc['min'][4])))
industrydata['WDave']=industrydata.iloc[:,5].apply(lambda x:((x-des.loc['min'][5])/(des.loc['max'][5]-des.loc['min'][5])))
industrydata['SD']=industrydata.iloc[:,6].apply(lambda x:((x-des.loc['min'][6])/(des.loc['max'][6]-des.loc['min'][6])))
industrydata['OUT']=industrydata.iloc[:,7].apply(lambda x:((x-des.loc['min'][7])/(des.loc['max'][7]-des.loc['min'][7])))
# data为数据;n_in为时间窗口大小;n_out为输出序列
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
"""
Frame a time series as a supervised learning dataset.
Arguments:
data: Sequence of observations as a list or NumPy array.
n_in: Number of lag observations as input (X).
n_out: Number of observations as output (y).
dropnan: Boolean whether or not to drop rows with NaN values.
Returns:
Pandas DataFrame of series framed for supervised learning.
"""
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# 对训练集数据进行特征选取,这里窗口选择3,预测为1 也就是in=3 out=1
industrydata_filter=series_to_supervised(industrydata[['WDmax', 'WDmin', 'WDave', 'SD', 'OUT']],3,1)
# 丢弃不需要预测的特征
industrydata_filter.drop(industrydata_filter.columns[[15,16,17,18]], axis=1, inplace=True)
# 在这里var1(t-3)...var4(t)为LSTM的输入特征,var5(t)为最后的预测值
#训练模型的集合划分
#对训练集进行划分,分为训练集、验证集和测试集
#对'WDmax', 'WDmin', 'WDave', 'SD', 'OUT'进行窗口化处理
def split_industrydata(industrydata_filter):
# 这里使用np.array 格式来为以后模型训练做准备
industrydata_train=np.array(industrydata_filter[:350])
industrydata_validation=np.array(industrydata_filter[350:450])
industrydata_test=np.array(industrydata_filter[450:])
return industrydata_train,industrydata_validation,industrydata_test
#industrydata_train,industrydata_validation,industrydata_test=split_industrydata(industrydata_filter)
def split_features_result(industrydata_filter, in_timesteps):
# 将数据集分割出来特征集合和目标输出集合,考虑输出为1个值的情况
industrydata_train, industrydata_validation, industrydata_test = split_industrydata(industrydata_filter)
train_X, train_y = industrydata_train[:, :-1], industrydata_train[:, -1]
validation_X, validation_y = industrydata_validation[:, :-1], industrydata_validation[:, -1]
test_X, test_y = industrydata_test[:, :-1], industrydata_test[:, -1]
# reshape input to be 3D [samples, timesteps, features]
train_X = train_X.reshape((train_X.shape[0], in_timesteps, train_X.shape[1] // in_timesteps))
validation_X = validation_X.reshape((validation_X.shape[0], in_timesteps, validation_X.shape[1] // in_timesteps))
test_X = test_X.reshape((test_X.shape[0], in_timesteps, test_X.shape[1] // in_timesteps))
print("训练集、验证集和测试集中特征和标签的大小:")
print(train_X.shape, train_y.shape, validation_X.shape, validation_y.shape, test_X.shape, test_y.shape)
return train_X, train_y, validation_X, validation_y, test_X, test_y
train_X, train_y, validation_X, validation_y, test_X, test_y = split_features_result(industrydata_filter, 3)
'''
使用Keras建立LSTM模型
该模型有一层lstm,一层dense结构
loss选用mae optimizer选用Adam
'''
def lstm_model(train_X,train_y,validation_X,validation_y,hiddencells=50,epochs=100,batch_size=32):
# design network
model = Sequential()
model.add(LSTM(hiddencells, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
# fit network
history = model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, validation_data=(validation_X, validation_y), verbose=2, shuffle=False)
# plot history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='validation')
plt.legend()
plt.show()
return model
data_model=lstm_model(train_X,train_y,validation_X,validation_y)
def test_model(data_model,test_X,test_y):
predict_y=data_model.predict(test_X)
# plot predict and test
plt.plot(predict_y, label='predict')
plt.plot(test_y, label='test')
plt.legend()
plt.show()
return data_model.evaluate(test_X,test_y)
score=test_model(data_model,test_X,test_y)
data_model=lstm_model(train_X,train_y,validation_X,validation_y,hiddencells=100,epochs=200,batch_size=32)
score=test_model(data_model,test_X,test_y)
'''
经过对比可以发现,epochs在达到150步左右的时候验证集的loss基本处于一个水平线,已经是验证到收敛,多加步数会产生过拟合的效果。lstm神经单元的增加,会让最后的评分下降,也是达到过拟合的效果。
选取:hiddencells=20,epochs=150,batch_size=32 测试结果
'''
data_model=lstm_model(train_X,train_y,validation_X,validation_y,hiddencells=20,epochs=150,batch_size=32)
score=test_model(data_model,test_X,test_y)
score
'''
经过对比可以发现,lstm神经单元的减少,会让最后的评分下降,说明应该增加隐含层神经元个数来提高效果。
选取:hiddencells=60,epochs=150,batch_size=32 测试结果
'''
data_model=lstm_model(train_X,train_y,validation_X,validation_y,hiddencells=60,epochs=150,batch_size=32)
score=test_model(data_model,test_X,test_y)
score
'''
在输入窗口为3,输出为1的情况下,通过调整lstm模型的隐含层单元数和训练轮次 得到结果如下: 1. hiddencells=50,epochs=100,batch_size=32 score:0.048210320770740507 2. hiddencells=100,epochs=200,batch_size=32 score:0.053018131256103516 3. hiddencells=20,epochs=150,batch_size=32 score:0.051017957806587216 4. hiddencells=60,epochs=150,batch_size=32 score:0.047194169998168944
'''
keras_LSTM案例分析(一)
最新推荐文章于 2025-06-03 13:32:03 发布