证券数据集下使用LSTM模型预测A股走势

参考证券数据集下使用LSTM模型预测A股走势,将Tushare接口更新为最新的Tushare Pro

import numpy as np
import pandas as pd
import re
from matplotlib import pyplot as plt
from matplotlib.ticker import MultipleLocator
import mplfinance as mpl
import matplotlib.dates as mpdates
from sklearn.preprocessing import MinMaxScaler

# 导入 paddle
import paddle
import paddle.nn.functional as F


class LSTM(paddle.nn.Layer):
    def __init__(self, num_classes, input_size, hidden_size, num_layers):
        super(LSTM, self).__init__()

        self.num_classes = num_classes
        self.num_layers = num_layers
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.seq_length = seq_length
        self.lstm = paddle.nn.LSTM(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=num_layers,
        )
        # self.relu = paddle.nn.ReLU()
        self.fc = paddle.nn.Linear(hidden_size, num_classes)
        # self.relu = paddle.nn.ReLU()
        # self.head = paddle.nn.Linear(int(hidden_size/2), out_features=num_classes)

    def forward(self, x):
        x, (h, c) = self.lstm(x)
        # print(h)
        # x = x[:,-1,:]
        h = h[-1]
        x = self.fc(h)
        # x = self.head(x)
        # print(out)
        return x

print(paddle.__version__)

import warnings

warnings.filterwarnings("ignore")

def MA_next(df, date_idx, price_type, n):
    return df[price_type][date_idx : date_idx + n].mean()

#Returns the condition ``if_`` iff it is not ``None``, or if a transformation is
#specified, ``transform(if_)``. Returns ``else_`` if the condition is ``None``.
#``transform`` can be any callable, which will be passed ``if_`` in case ``if_`` is not ``None``."""
def ifnone(if_, else_, transform=None):
    if if_ is None:
        return else_
    else:
        if transform is not None:
            return transform(if_)
        else:
            return if_
def make_date(df, date_field):
    "Make sure `df[date_field]` is of the right date type."
    field_dtype = df[date_field].dtype
    if isinstance(field_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
        field_dtype = np.datetime64
    if not np.issubdtype(field_dtype, np.datetime64):
        df[date_field] = pd.to_datetime(
            df[date_field], infer_datetime_format=True
        )
def add_datepart(df, field_name, prefix=None, drop=True, time=False):
    "Helper function that adds columns relevant to a date in the column `field_name` of `df`."
    make_date(df, field_name)
    field = df[field_name]
    prefix = ifnone(prefix, re.sub("[Dd]ate$", "", field_name))
    attr = [
        "Year",
        "Month",
        "Week",
        "Day",
        "Dayofweek",
        "Dayofyear",
        "Is_month_end",
        "Is_month_start",
        "Is_quarter_end",
        "Is_quarter_start",
        "Is_year_end",
        "Is_year_start",
    ]
    if time:
        attr = attr + ["Hour", "Minute", "Second"]
    # Pandas removed `dt.week` in v1.1.10
    week = (
        field.dt.isocalendar().week.astype(field.dt.day.dtype)
        if hasattr(field.dt, "isocalendar")
        else field.dt.week
    )
    for n in attr:
        df[prefix + n] = getattr(field.dt, n.lower()) if n != "Week" else week
    mask = ~field.isna()
    df[prefix + "Elapsed"] = np.where(
        mask, field.values.astype(np.int64) // 10**9, np.nan
    )
    if drop:
        df.drop(field_name, axis=1, inplace=True)
    return df
import tushare as ts
ts.set_token('************************************')#替换为自己的token
pro = ts.pro_api()
df = pro.daily(ts_code='000001.SZ', start_date='20240718', end_date='20250718') 
df.rename(
    columns={
        "trade_date": "date"
    },
    inplace=True
)
print(df)
#东财数据
#df = ts.realtime_quote(ts_code='600000.SH', src='dc')
df_plots = df[["date", "open", "high", "low", "close", "vol"]]
# 对数据进行改名,mplfinance名字必须是Date, Open, High, Low, Close, Volume
df_plots.rename(
    columns={
        "date": "Date",
        "open": "Open",
        "high": "High",
        "low": "Low",
        "close": "Close",
        "vol": "Volume",
    },
    inplace=True,
)
# 时间倒序改为正序
df_plots = df_plots[::-1]
# 把Date列数据设置成索引
df_plots.set_index(["Date"], inplace=True)
# 把Date列数据装换成datetime格式
df_plots.index = pd.to_datetime(df_plots.index)
mpl.plot(df_plots, type="candle", mav=(3, 6, 9), volume=True)
print(df)
float_type = [
    "open",
    "high",
    "close",
    "pre_close",
    "low",
    "vol",
    "pct_chg",
    "change",
    "amount",
]

for item in float_type:
    df[item] = df[item].astype("float")
s_time = 2
m_time = 6
l_time = 15
#2 含义为买入,0 含义为卖出,1 为持有
for i in range(len(df) - l_time):
    if (
        MA_next(df, i, "close", l_time)
        > MA_next(df, i, "close", m_time) * 1.03
        > MA_next(df, i, "close", s_time) * 1.03
    ):
        df.loc[i, "buy_flag"] = 2
    elif MA_next(df, i, "close", s_time) > MA_next(df, i, "close", m_time):
        df.loc[i, "buy_flag"] = 0
    else:
        df.loc[i, "buy_flag"] = 1
print(df)
add_datepart(df, "date", drop=False)
seq_length = 30
train_df = df[seq_length:-seq_length]
# 丢掉不重要的特征 
train_df = train_df.drop(
    [
        "ts_code",
        "date",
        "Is_month_end",
        "Is_month_start",
        "Is_quarter_end",
        "Is_quarter_start",
        "Is_year_end",
        "Is_year_start",
        "Dayofyear",
    ],
    axis=1,
)

train_df = train_df.fillna(0)
print(train_df)

def sliding_windows(data, label, seq_length):
    x = []
    y = []

    for i in range(len(data) - seq_length - 1):
        _x = data[i : (i + seq_length)]
        _y = label[i + seq_length]
        x.append(_x)
        y.append(_y)

    return np.array(x), np.array(y)

y_scaler = MinMaxScaler()
x_scaler = MinMaxScaler()
print(train_df)
X = train_df.drop(["buy_flag"], axis=1).values
print(X)
X = x_scaler.fit_transform(X)
Y = train_df["buy_flag"]
Y = np.array(Y).reshape(-1, 1)

x, y = sliding_windows(X, Y, seq_length)

y_train, y_test = y[: int(y.shape[0] * 0.8)], y[int(y.shape[0] * 0.8) :]
x_train, x_test = x[: int(x.shape[0] * 0.8)], x[int(x.shape[0] * 0.8) :]


class MyDataset(paddle.io.Dataset):
    """
    步骤一:继承paddle.io.Dataset类
    """

    def __init__(self, x, y):
        """
        步骤二:实现 __init__ 函数,初始化数据集,将样本和标签映射到列表中
        """
        super(MyDataset, self).__init__()
        self.data = paddle.to_tensor(x.transpose(1, 0, 2), dtype="float32")
        self.label = paddle.to_tensor(y, dtype="float32")

    def __getitem__(self, index):
        """
        步骤三:实现__getitem__函数,定义指定index时如何获取数据,并返回单条数据(样本数据、对应的标签)
        """
        data = self.data[index]
        label = self.label[index]
        return data, label

    def __len__(self):
        """
        步骤四:实现__len__方法,返回数据集总数目
        """
        return len(self.data)

class LossCallback(paddle.callbacks.Callback):
    def __init__(self):
        self.losses = []

    def on_train_begin(self, logs={}):
        # 在fit前 初始化losses,用于保存每个batch的loss结果
        self.losses = []

    def on_train_batch_end(self, step, logs={}):
        # 每个batch训练完成后调用,把当前loss添加到losses中
        self.losses.append(logs.get("loss"))


loss_log = LossCallback()


# 实例化数据集
train_dataset = MyDataset(x_train, y_train)
eval_dataset = MyDataset(x_test, y_test)

# 查看数据样本
x_train[0][8]

model = LSTM(128, 18, 300, 1)
paddle.summary(model, (30, 796, 18))


from paddle.static import InputSpec

# 参数设置
num_epochs = 15
learning_rate = 5e-4

input_size = train_df.shape[1] - 1  # 输入的变量指标数量
hidden_size = 300  # 隐藏状态 h 大小
num_layers = 1  # 循环网络的层数

num_classes = 1  # 输出的特征数
batch_size = 8

model = paddle.Model(LSTM(num_classes, input_size, hidden_size, num_layers))

lr_schedual = paddle.optimizer.lr.CosineAnnealingDecay(
    learning_rate=learning_rate, T_max=num_epochs, verbose=False
)
# 设置优化器,学习率,并且把模型参数给优化器
opt = paddle.optimizer.Adam(
    learning_rate=learning_rate,
    parameters=model.parameters(),
    beta1=0.9,
    beta2=0.999,
)

model.prepare(opt, paddle.nn.MSELoss(), paddle.metric.Accuracy())

model.fit(
    train_dataset,
    eval_dataset,
    epochs=num_epochs,
    batch_size=batch_size,
    eval_freq=10,
    save_freq=10,
    save_dir="lstm_checkpoint",
    verbose=1,
    drop_last=False,
    shuffle=False,
    callbacks=[loss_log],
)

model.load("lstm_checkpoint/final")
test_result = model.predict(eval_dataset)
# 由于模型是单一输出,test_result的形状为[1, N],N是测试数据集的数据量。这里打印第一个数据的预测结果。
print(len(test_result))
print(test_result[0][0])
print(
    "预测值:{0}, 实际值:{1}".format(test_result[0][0][0], eval_dataset[0][1][0])
)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值