学习莫烦的强化学习课 改写了一个pytorch版本的
主要改动包括:
1.改为pytorch版本
2.更新了env环境为v1,并把其中的reward标准化改了(新版v1reward的范围是-16.27-1)
3.改动了部分超参数使训练效果更好
1.RL_Brain
"""
agent代码
"""
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
np.random.seed(42)
torch.manual_seed(2)
class Network(nn.Module):
def __init__(self,n_features,n_actions,n_neuron=128):
super(Network, self).__init__()
self.net = nn.Sequential(
nn.Linear(in_features=n_features, out_features=n_neuron, bias=True),
nn.ReLU(),
nn.Linear(in_features=n_neuron, out_features=n_actions, bias=True)
)
def forward(self,s):
s = s.float()
q = self.net(s)
return q
class DoubleDQN:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.005,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=200,
memory_size=3000,
batch_size = 64,
e_greedy_increment = None,
output_graph=True
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.e_greedy_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
self.learn_step_counter = 0
self.memory = pd.DataFrame(np.zeros((self.memory_size, self.n_features * 2 + 2)))
self.eval_net = Network(self.n_features,self.n_actions)
self.target_net = Network(self.n_features,self.n_actions)
self.loss_function = nn.MSELoss()
self.optimizer = torch.optim.Adam(self.eval_net.parameters(),lr=self.lr)
self.cost_his = []
def store_transition(self,s,a,r,s_):
# 检查对象是否包含对应的属性 没有 则创建
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
# 保证数据类型一致
transition = np.hstack((s,[r,a],s_))
# 覆盖旧的经验
index = self.memory_counter % self.memory_size
self.memory.iloc[index, :] = transition
self.memory_counter += 1
def choose_action(self,observation):
# 将一维向量转化为二维矩阵
observation = observation[np.newaxis,:]
if np.random.uniform() < self.epsilon:
s = torch.tensor(observation)
actions_value = self.eval_net(s)
action = [np.argmax(actions_value.detach().numpy())][0]
else:
action = np.random.randint(0,self.n_actions)
return action
def replace_target_params(self):
self.target_net.load_state_dict(self.eval_net.state_dict())
def learn(self):
if self.learn_step_counter % self.replace_target_iter == 0 :
self.replace_target_params()
print('\ntarget params replaced\n')
# 更清晰的写法(功能等效)
if self.memory_counter > self.memory_size:
batch_memory = self.memory.sample(self.batch_size)
else:
batch_memory = self.memory.iloc[:self.memory_counter].sample(
self.batch_size, replace=True
)
s = torch.tensor(batch_memory.iloc[:,:self.n_features].values)
s_ = torch.tensor(batch_memory.iloc[:,-self.n_features:].values)
q_eval = self.eval_net(s)
q_next = self.target_net(s_)
q_target = q_eval.clone()
batch_index = np.arange(self.batch_size,dtype=np.int32)
eval_act_index = batch_memory.iloc[:,self.n_features+1].values.astype(int)
q_eval_ = self.eval_net(s_)
max_action_index = torch.argmax(q_eval_,dim=1)
reward = batch_memory.iloc[:,self.n_features].values
# 注意pandas和pytorch的value用法不同 前者是返回数组 后者返回最大值
q_target[batch_index, eval_act_index] = torch.tensor(reward).float() + self.gamma * q_next.gather(dim=1,index=max_action_index.unsqueeze(1)).squeeze(1)
loss = self.loss_function(q_target,q_eval)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.cost_his.append(loss.detach().numpy())
self.epsilon = self.epsilon + self.e_greedy_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
def plot_cost(self):
plt.figure()
plt.plot(np.arange(len(self.cost_his)),self.cost_his)
plt.show()
2.主函数
"""
主函数
"""
import gym
from RL_Brain import DoubleDQN
import numpy as np
import matplotlib.pyplot as plt
def train(RL):
step = 0
observation = env.reset()
rewards = [] # 记录每个step的奖励
avg_rewards = [] # 记录滑动平均奖励
# 这种单循环 + 总步数判断的写法用来完成无终点任务
while True:
env.render()
action = RL.choose_action(observation)
f_action = (action - (ACTION_SPACE - 1) / 2) / ((ACTION_SPACE - 1) / 4)
# 强化学习环境的期望输入动作是一个numpy数组
observation_, reward, done, info = env.step(np.array([f_action]))
reward /= 16.27
RL.store_transition(observation, action, reward, observation_)
if step > MEMORY_SIZE:
RL.learn()
rewards.append(reward)
if len(rewards) >= 100: # 计算滑动平均奖励
avg_rewards.append(np.mean(rewards[-100:]))
if step - MEMORY_SIZE > 20000:
break
observation = observation_
step += 1
plt.plot(rewards, label='Reward')
plt.plot(avg_rewards, label='Average Reward (last 100 steps)')
plt.xlabel('Steps')
plt.ylabel('Reward')
plt.legend()
plt.show()
if __name__ == '__main__':
env = gym.make('Pendulum-v1')
env = env.unwrapped
env.seed(1)
ACTION_SPACE = 11
MEMORY_SIZE = 3000
RL = DoubleDQN(ACTION_SPACE,n_features = 3,
learning_rate = 0.01,
reward_decay = 0.9,
e_greedy = 0.9,
replace_target_iter = 200,
memory_size = MEMORY_SIZE,
e_greedy_increment= 0.001,
output_graph=True
)
train(RL)