1、安装依赖库
通过测试发现除了gym以外,pygame也是是必须的
pip install tensorflow
pip install keras
pip install keras-rl
pip install PyOpenGL
pip install pyglet1.5.11
pip install gym0.19.0 #测试0.19.0可以正常运行,0.26.0,fit input错误
pip install pygame
2、测试代码
gym.make()必须指定渲染模式如 env = gym.make(“CartPole-v1”, render_mode = “human”),测试发现 render_mode = “human” 会正常显示游戏画面,而 render_mode=“rgb_array” 不会显示游戏画面
env.step的返回值有之前的4个变为了5个
observation, reward, done, info,_ = env.step(action)
import gym
env = gym.make("CartPole-v1", render_mode = "human") # 创建环境
for i_episode in range(20):
observation = env.reset() # 保存环境初始状态
for t in range(100):
env.render()
#print(observation) # [位置,加速度,角度,角加速度]
action = env.action_space.sample() # 获取一个动作
observation, reward, done, info,_ = env.step(action) # 获取执行上面动作的结果
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
3、源码
import gym
import tensorflow as tf
import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Flatten
from tensorflow.keras.optimizers import Adam
from rl.agents import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
def build_agent(nb_actions,model):
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000,
target_model_update=1e-2, policy=policy)
return dqn
def build_model(status,nb_actions):
model = Sequential()
model.add(Flatten(input_shape=(1,) + status))
model.add(Dense(16,activation='relu'))
model.add(Dense(16,activation='relu'))
model.add(Dense(16,activation='relu'))
model.add(Dense(nb_actions, activation='linear'))
return model
env = gym.make('CartPole-v0')#render_mode="human"显示游戏画面
nb_actions = env.action_space.n
status = env.observation_space.shape
model = build_model(status,nb_actions)
dqn = build_agent(nb_actions,model)
dqn.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-3), metrics=['mae'])
dqn.fit(env, nb_steps=10000, visualize=True, verbose=1)
dqn.save_weights('dqn_weights.h5f', overwrite=True)
dqn.test(env, nb_episodes=20, visualize=True)
#下面是载入模型测试
#model = build_model(status,nb_actions)
#dqn = build_agent(nb_actions,model)
#dqn.load_weights('dqn_weights.h5f')
#dqn.test(env, nb_episodes=20, visualize=True)
4、dqn智能体测试
使用前面训练好的dqn来玩游戏,主要使用到了dqn.forward(observation)函数,需要传入一个状态observation,observation通过env.reset()函数返回。observation = env.reset()
import gym
import tensorflow as tf
import keras
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Flatten
from tensorflow.keras.optimizers import Adam
from rl.agents import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
def build_agent(nb_actions,model):
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000,
target_model_update=1e-2, policy=policy)
dqn.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-3), metrics=['mae'])
return dqn
def build_model(status,nb_actions):
model = Sequential()
#model.add(Flatten(input_shape=(1,) + status))
model.add(Flatten(input_shape=(1,4)))
model.add(Dense(16,activation='relu'))
model.add(Dense(16,activation='relu'))
model.add(Dense(16,activation='relu'))
model.add(Dense(nb_actions, activation='linear'))
return model
env = gym.make("CartPole-v1")
nb_actions = env.action_space.n
status = env.observation_space.shape
model = build_model(status,nb_actions)
dqn = build_agent(nb_actions,model)
dqn.load_weights('dqn_weights.h5f')
observation = env.reset()
t=0
for _ in range(5000):
env.render()
action=dqn.forward(observation)
observation, reward, done, info = env.step(action)
if done:
observation = env.reset()#_代表运行的次数
print('observation:{}, reward:{}, done:{}, info:{},{}:次,共运行:{}次'.format(observation, reward, done, info,_-t,_))
t=_
env.close()
本文介绍了如何搭建gym强化学习环境,重点强调了pygame作为必要依赖的安装,以及在创建环境时设置render_mode参数以实现游戏画面显示。同时提到了env.step()返回值的变化,并提到dqn智能体的测试,特别是dqn.forward()函数在处理观测值方面的应用。
3万+

被折叠的 条评论
为什么被折叠?



