简单 风功率预测模型

该博客主要介绍了使用LSTM长短期记忆网络进行风速预测的完整流程,包括数据预处理、模型定义、训练、验证和预测。通过PyTorch实现了一个LSTM回归模型,对风力发电机每天的平均风速数据进行建模,并利用TensorBoard进行可视化监控。最终,模型训练完成后,进行了预测结果与实际数据的对比并绘制了图表。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >




from time import time
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import tushare as ts
import torch
from torch import nn
from torch.optim import lr_scheduler
from tensorboardX import SummaryWriter
import pandas as pd
import set
import pymysql
from pyecharts.charts import Line
from pyecharts import options as opts

start= time()

TABLE_NAME = set.TABLE_NAME
# 是否打印sql
SHOW_SQL = set.SHOW_SQL
config = set.CONFIG
conn = pymysql.connect(**config)

df = pd.read_sql("select * from winderturbine_power_per_day where wt_date >'2018/1/1'",conn)
wind_speed=df.mean_wind_speed
# 清理数据 消除风速数据小于0的记录  给成平均值
wind_speed[wind_speed<=0]=5
df.mean_wind_speed=wind_speed

wind_speed= df.groupby('wt_date').mean_wind_speed.mean()

g= df.groupby('wt_date').active_power_generation.sum()

writer = SummaryWriter()
DAYS_FOR_TRAIN = 10
EPOCHS = 200
learning_rate=0.1
min_learning_rate=2e-5
HS=8 #hidden_siz
NL=2 #num_layers


class LSTM_Regression(nn.Module):
    """
        使用LSTM进行回归
        参数:
        - input_size: feature size
        - hidden_size: number of hidden units
        - output_size: number of output
        - num_layers: layers of LSTM to stack
    """

    def __init__(self, input_size, hidden_size, output_size=1, num_layers=2):#10 8 1 3
        super().__init__()
        self.lstm = nn.LSTM(input_size, hidden_size, num_layers)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, _x):
        x, _ = self.lstm(_x)  # _x is input, size (seq_len, batch, input_size)
        s, b, h = x.shape  # x is output, size (seq_len, batch, hidden_size)
        x = x.view(s * b, h)
        x = self.fc(x)
        x = x.view(s, b, -1)  # 把形状改回来
        return x


def create_dataset(data, days_for_train=5) -> (np.array, np.array):
    """
        根据给定的序列data,生成数据集。
        数据集分为输入和输出,每一个输入的长度为days_for_train,每一个输出的长度为1。
        也就是说用days_for_train天的数据,对应下一天的数据。
        若给定序列的长度为d,将输出长度为(d-days_for_train)个输入/输出对
        根据10天数据预测第11天数据  
    """
    dataset_x, dataset_y = [], []
    for i in range(len(data) - days_for_train):
        _x = data[i:(i + days_for_train)]
        dataset_x.append(_x)
        dataset_y.append(data[i + days_for_train])
    return (np.array(dataset_x), np.array(dataset_y))




if __name__ == '__main__':

    share_prices=np.array(wind_speed).astype('float32')

    # 上证指数收盘价作图
    plt.plot(share_prices)
    plt.savefig('share_prices.png', format='png', dpi=200)
    plt.close()
    # 将数据集标准化到 [-1,1] 区间
    scaler = MinMaxScaler(feature_range=(0, 1))  # train data normalized
    share_prices = scaler.fit_transform(share_prices.reshape(-1, 1))
    # 数据集序列化,进行标签分离
    dataset_x, dataset_y = create_dataset(share_prices, DAYS_FOR_TRAIN)
    # 划分训练集和测试集,70%作为训练集,30%作为测试集   
    train_size = int(len(dataset_x) * 0.90)
    train_x = dataset_x[:train_size]
    train_y = dataset_y[:train_size]
    test_x = dataset_x[train_size:]
    test_y = dataset_y[train_size:]
    # 改变数据集形状,RNN 读入的数据维度是 (seq_size, batch_size, feature_size)
    train_x = train_x.reshape(-1, 1, DAYS_FOR_TRAIN)
    train_y = train_y.reshape(-1, 1, 1)
    # 数据集转为pytorch的tensor对象
    train_x = torch.from_numpy(train_x)
    train_y = torch.from_numpy(train_y)
    # 训练模型
    model = LSTM_Regression(DAYS_FOR_TRAIN, hidden_size=HS, output_size=1, num_layers=NL)  # 网络初始化
    loss_function = nn.MSELoss()  # 损失函数
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)  # 优化器
    # 可调学习率
    lr_lambda = lambda epoch: max(0.54 ** (epoch//150),min_learning_rate)  # 函数2、.
    lr_list = []
    scheduler = lr_scheduler.LambdaLR(optimizer,lr_lambda=lr_lambda) 
    # scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08)
    
    for epoch in range(EPOCHS):
        scheduler.step()  # 学习率更新
        lr_list.append(optimizer.state_dict()['param_groups'][0]['lr']) # 可调学习率
        
        out = model(train_x)
        loss = loss_function(out, train_y)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()   
        writer.add_scalar('data/loss', loss, epoch)
        writer.add_scalar('data/lr', lr_list[-1], epoch)
        if (epoch + 1) % 20 == 0:
            print('{}{}Epoch: {}, Loss:{:.5f}'.format(HS,NL,epoch + 1, loss.item()))
            
            if loss.item() < 0.011:
                torch.save(model.state_dict(), './model/model_params_wind_speed'+
                   '{}{}_Epoch_{}, Loss_{:.5f},DFT{}'.format(HS,NL,epoch + 1, loss.item(),DAYS_FOR_TRAIN)+'.pkl')  # 可以保存模型的参数供未来使用

    torch.save(model.state_dict(), './model/model_params_wind_speed'+
                   '{}{}_Epoch_{}, Loss_{:.5f},DFT{}'.format(HS,NL,epoch + 1, loss.item(),DAYS_FOR_TRAIN)+'.pkl')  # 可以保存模型的参数供未来使用


    # predict 预测
    model = model.eval()  # 转换成测试模式
    #读取模型
    # model.load_state_dict(torch.load('./model/model_params_wind_speed93_Epoch_3000, Loss_0.01684,DFT18.pkl'))  # 读取参数
    # 使用全部数据集dataset_x,模型的输出长度会比dataset_x少 DAYS_FOR_TRAIN
    dataset_x = dataset_x.reshape(-1, 1, DAYS_FOR_TRAIN)  # (seq_size, batch_size, feature_size) 
    dataset_x = torch.from_numpy(dataset_x)  # 转为pytorch的tensor对象
    pred_y = model(dataset_x)  # 全量数据集的模型输出 (seq_size, batch_size, output_size)
    pred_y = pred_y.view(-1).data.numpy()
    # 画图
    with SummaryWriter(comment='LeNet{}{}'.format(HS,NL)) as w:
        w.add_graph(model, (dataset_x, ))
    # 对标准化数据进行还原
    actual_pred_y = scaler.inverse_transform(pred_y.reshape(-1, 1))
    actual_pred_y = actual_pred_y.reshape(-1, 1).flatten()
    test_y = scaler.inverse_transform(test_y.reshape(-1, 1))
    test_y = test_y.reshape(-1, 1).flatten()
    actual_pred_y = actual_pred_y[-len(test_y):]
    test_y = test_y.reshape(-1, 1)
    assert len(actual_pred_y) == len(test_y)
    writer.export_scalars_to_json("./test.json")
    writer.close()

    # 初始结果 - 预测结果
    plt.plot(actual_pred_y, 'r', label='prediction{}{}_Epoch_{}, Loss_{:.5f},DFT{}'.format(HS,NL,epoch + 1, loss.item(),DAYS_FOR_TRAIN))
    plt.plot(test_y, 'b', label='real')
    plt.plot((len(actual_pred_y), len(test_y)), (0, 1), 'g--')  # 分割线 左边是训练数据 右边是测试数据的输出
    plt.legend(loc='best')
    plt.savefig('./img/result{}{}_Epoch_{}, Loss_{:.5f},DFT{}'.format(HS,NL,epoch + 1, loss.item(),DAYS_FOR_TRAIN)+'.png', format='png', dpi=1200)
    plt.show()
    plt.close()
    end=time()
    print('运行: %.2f 秒'%(end-start))

    

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值