强化学习

# -*- coding: utf-8 -*-

import numpy as np
import pandas as pd
import time

np.random.seed(2)

N_STATES = 6  # 状态数量
ACTIONS = ['left','right']  #动作类型列表
EPSILON = 0.9   #学习策略
ALPHA = 0.01     #学习率
LAMBDA = 0.9    #衰减值
MAX_EPISODES = 2   #最大批次
FRESH_TIME = 0.3    #刷新时间

# 生成Q表
def build_q_table(n_states,actions):
    table = pd.DataFrame(
        np.zeros((n_states,len(actions))),
        columns=actions
    )

    # print(table)
    return table

# 选择动作类型
def choose_action(state,q_table):
    state_actions = q_table.iloc[state,:]
    # 均匀分布
    if np.random.uniform() > EPSILON or state_actions.all() == 0:
        action_name = np.random.choice(ACTIONS)
    else:
        action_name = state_actions.argmax() #选择概率大的方向
    return action_name

# 获取动作反馈
# S: 状态值
# A: 动作类型
# R: 奖赏值
def get_env_feedback(S,A):
    if A == 'right':
        if S == N_STATES - 2:
            S_ = 'terminal'
            R = 1
        else:
            S_ = S + 1
            R = 0
    else:
        R = 0
        if S == 0:
            S_ = S
        else:
            S_ = S - 1
    return S_,R

# 更新环境
def update_env(S,episode,step_counter):
    env_list = ['-'] * (N_STATES-1)+['T']
    if S == 'terminal':
        interaction = 'Episode %s: total_steps = %s' % (episode+1,step_counter)
        print('\r{}'.format(interaction))
        time.sleep(2)
        print('\r                         ')
    else:
        env_list[S] = 'o'
        interaction = ''.join(env_list)
        print('\r{}'.format(interaction))
        time.sleep(FRESH_TIME)

# 主程序
def rl():
    q_table = build_q_table(N_STATES,ACTIONS)
    for episode in range(MAX_EPISODES):
        step_counter = 0
        S = 0
        is_terminated = False
        update_env(S,episode,step_counter)
        while not is_terminated:
            A = choose_action(S,q_table)

            S_,R = get_env_feedback(S,A)
            q_predict = q_table.ix[S,A]

            if S_ != 'terminal':
                q_target = R + LAMBDA* q_table.iloc[S_,:].max()
                print S_, q_predict, q_target
            else:
                q_target = R
                is_terminated = True
            q_table.ix[S,A] += ALPHA * (q_target - q_predict)
            '''
                第一回合到步骤4的时候才有奖励,更新q表的值
                0.01*(1-0)=0.01
                第二回合的s3时,选择了right所以有奖励
                0.01*0.09
                s3状态选择right才会到s4,s4状态是有奖励的
                如果s3状态选择了left会到s2,s2的奖励为0
            '''
            if ALPHA * (q_target - q_predict) >0 :
                print q_table
            S = S_

            update_env(S,episode,step_counter+1)
            step_counter += 1
    return q_table

if __name__ == "__main__":
    q_table = rl()
    print('\r\nQ-table:\n')
    print(q_table)


#建立q表
#选择状态,选择动作
#给出奖励
#更新q表   公式:  rate*(q_target-q_predict)

参考:
https://blog.youkuaiyun.com/hecongqing/article/details/61927615
https://morvanzhou.github.io/tutorials/machine-learning/reinforcement-learning/

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值