目录
CartPole又叫倒立摆。小车上放了一根杆,杆会因重力而倒下。为了不让杆倒下,我们要通过移动小车,来保持其是直立的。在每一个时间步,模型的输入是一个4维的向量,表示当前小车和杆的状态,模型输出的信号用于控制小车往左或者右移动。当杆没有倒下的时候,每个时间步,环境会给1分的奖励;当杆倒下后,环境不会给任何的奖励,游戏结束。本章介绍具针对此问题的经典算法DQN(百度飞桨-PARL库)。
禁止转载,侵权必究!Update 2020.12.1
前言
前面章节介绍了基于Q表格的SARSA、Q-Learning算法,那么为什么要引入DQN算法呢?试想一下,实战中的Q表格会非常大,无法放入内存,我们要怎么办?DQN算法引入了Q函数来代替Q表格,这个Q函数就是我们常说的的神经网络。对!神经网络的本质就是函数。DQN算法全称:Deep Q-Networks
注意:本章我们用了最简单的全连接神经网络。因为CartPole问题很简单。
教学环境
本章仍然采用gym仿真器。DQN算法原作者采用的是ALE仿真器,请参考本章最后的参考资料部分。
原始DQN算法的卷积神经网络跟本例不同,原始算法的神经网络结构是:
层 | w形状 | w参数个数 | b形状 | b参数个数 | 输出形状 |
conv | (20,4,8,8) | 5120 | (16) | 16 | (4,16,20,20) |
conv | (32,16,4,4) | 8192 | (32) | 32 | (4,32,8,8) |
FC | (256,32) | 8192 | 32 | 32 | (4,32) |
DQN论文的CNN网络最多可以有32个actions类别。而实际只需要4~18类actions,已经足够满足Atari游戏控制的需要。
w数据格式:(Cout, Cin, Kh, Kw)。输出数据格式:(N, C, H, W) 。参数含义可以参考之前的教程
DQN – PaddlePaddle实现
DQN算法最早在2013年发表,Nature DQN是DQN算法在2015年改进后发表的论文中的实现。下面是算法手写实现(没用PARL库):
1.Agent
它继承了parl.Agent类。它的build_program()方法定义了两个项目空间。百度飞桨主程序默认运行在default_program中,因此pred_program和learn_program会运行在独立的项目空间中。
import numpy as np
import paddle.fluid as fluid
import parl
from parl import layers
class Agent(parl.Agent):
def __init__(self,
algorithm,
obs_dim,
act_dim,
e_greed=0.1,
e_greed_decrement=0):
...
def build_program(self):
self.pred_program = fluid.Program()
self.learn_program = fluid.Program()
with fluid.program_guard(self.pred_program): # 搭建计算图用于 预测动作,定义输入输出变量
obs = layers.data(
name='obs', shape=[self.obs_dim], dtype='float32')
self.value = self.alg.predict(obs)
with fluid.program_guard(self.learn_program): # 搭建计算图用于 更新Q网络,定义输入输出变量
obs = layers.data(
name='obs', shape=[self.obs_dim], dtype='float32')
action = layers.data(name='act', shape=[1], dtype='int32')
reward = layers.data(name='reward', shape=[], dtype='float32')
next_obs = layers.data(
name='next_obs', shape=[self.obs_dim], dtype='float32')
terminal = layers.data(name='terminal', shape=[], dtype='bool')
self.cost = self.alg.learn(obs, action, reward, next_obs, terminal)
def sample(self, obs):
...
def predict(self, obs): # 选择最优动作
...
def learn(self, obs, act, reward, next_obs, terminal):
...
sample函数:
def sample(self, obs):
sample = np.random.rand() # 产生0~1之间的小数
if sample < self.e_greed:
act = np.random.randint(self.act_dim) # 探索:每个动作都有概率被选择
else:
act = self.predict(obs) # 选择最优动作
self.e_greed = max(
0.01, self.e_greed - self.e_greed_decrement) # 随着训练逐步收敛,探索的程度慢慢降低
return act
learn函数:
def learn(self, obs, act, reward, next_obs, terminal):
# 每隔200个training steps同步一次model和target_model的参数
if self.global_step % self.update_target_steps == 0:
self.alg.sync_target()
self.global_step += 1
act = np.expand_dims(act, -1)
feed = {
'obs': obs.astype('float32'),
'act': act.astype('int32'),
'reward': reward,
'next_obs': next_obs.astype('float32'),
'terminal': terminal
}
cost = self.fluid_executor.run(
self.learn_program, feed=feed, fetch_list=[self.cost])[0] # 训练一次网络
return cost
注意self.alg.sync_target()这一行,它每隔200步更新一下target_Q网络(也就是把训练Q网络的参数值拷贝到target_Q网络上去),这一点是Nature DQN(2015)算法在DQN(2013)算法上的创新,先临时固定住target_Q,每隔一定步数才更新target_Q,因为训练过程中每一个步都会update Q,多个步骤对Q网络参数的更新是有密切关联的,所以必须要减少这种关联。
可以调整参数update_target_steps从200改为20,算法会更快收敛,但是每个episode运行会变慢一些。
predict函数:
def predict(self, obs): # 选择最优动作
obs = np.expand_dims(obs, axis=0)
pred_Q = self.fluid_executor.run(
self.pred_program,
feed={'obs': obs.astype('float32')},
fetch_list=[self.value])[0]
pred_Q = np.squeeze(pred_Q, axis=0)
act = np.argmax(pred_Q) # 选择Q最大的下标,即对应的动作
return act
2.Algorithm
import copy
import paddle.fluid as fluid
import parl
from parl import layers
class DQN(parl.Algorithm):
def __init__(self, model, act_dim=None, gamma=None, lr=None):
""" DQN algorithm
Args:
model (parl.Model): 定义Q函数的前向网络结构
act_dim (int): action空间的维度,即有几个action
gamma (float): reward的衰减因子
lr (float): learning_rate,学习率.
"""
self.model = model
self.target_model = copy.deepcopy(model)
assert isinstance(act_dim, int)
assert isinstance(gamma, float)
assert isinstance(lr, float)
self.act_dim = act_dim
self.gamma = gamma
self.lr = lr
def predict(self, obs):
""" 使用self.model的value网络来获取 [Q(s,a1),Q(s,a2),...]
"""
return self.model.value(obs)
def learn(self, obs, action, reward, next_obs, terminal):
...
def sync_target(self):
""" 把 self.model 的模型参数值同步到 self.target_model
"""
self.model.sync_weights_to(self.target_model)
Algorithm中的learn函数:
def learn(self, obs, action, reward, next_obs, terminal):
""" 使用DQN算法更新self.model的value网络
"""
# 从target_model中获取 max Q' 的值,用于计算target_Q
next_pred_value = self.target_model.value(next_obs)
best_v = layers.reduce_max(next_pred_value, dim=1)
best_v.stop_gradient = True # 阻止梯度传递
terminal = layers.cast(terminal, dtype='float32')
target = reward + (1.0 - terminal) * self.gamma * best_v
pred_value = self.model.value(obs) # 获取Q预测值
# 将action转onehot向量,比如:3 => [0,0,0,1,0]
action_onehot = layers.one_hot(action, self.act_dim)
action_onehot = layers.cast(action_onehot, dtype='float32')
# 下面一行是逐元素相乘,拿到action对应的 Q(s,a)
# 比如:pred_value = [[2.3, 5.7, 1.2, 3.9, 1.4]], action_onehot = [[0,0,0,1,0]]
# ==> pred_action_value = [[3.9]]
pred_action_value = layers.reduce_sum(
layers.elementwise_mul(action_onehot, pred_value), dim=1)
# 计算 Q(s,a) 与 target_Q的均方差,得到loss
cost = layers.square_error_cost(pred_action_value, target)
cost = layers.reduce_mean(cost)
optimizer = fluid.optimizer.Adam(learning_rate=self.lr) # 使用Adam优化器
optimizer.minimize(cost)
return cost
3.Model
定义Q函数:(三层全连接神经网络)
import parl
from parl import layers # 封装了 paddle.fluid.layers 的API
class Model(parl.Model):
def __init__(self, act_dim):
hid1_size = 128
hid2_size = 128
# 3层全连接网络
self.fc1 = layers.fc(size=hid1_size, act='relu')
self.fc2 = layers.fc(size=hid2_size, act='relu')
self.fc3 = layers.fc(size=act_dim, act=None)
def value(self, obs):
h1 = self.fc1(obs)
h2 = self.fc2(h1)
Q = self.fc3(h2)
return Q
- 继承
parl.Model
类 - 构造函数
__init__
中声明要用到的中间层 - 在
forward
函数中搭建网络
4. replay_memory
我们已经有了经典的Agent–>Algorithm–>Model,为啥会出现一个replay_memory?
经验回放(Experience Replay)是DQN算法的创新之一。主要解决动作关联性问题。使用多步的经验值,可以避免局部最优,甚至不收敛的情况。
import random
import collections
import numpy as np
class ReplayMemory(object):
def __init__(self, max_size):
self.buffer = collections.deque(maxlen=max_size)
def append(self, exp):
self.buffer.append(exp)
def sample(self, batch_size):
...
def __len__(self):
return len(self.buffer)
ReplayMemory中sample函数定义:
def sample(self, batch_size):
mini_batch = random.sample(self.buffer, batch_size) #这里的随机函数可以平滑训练动作关联性
obs_batch, action_batch, reward_batch, next_obs_batch, done_batch = [], [], [], [], []
for experience in mini_batch:
s, a, r, s_p, done = experience
obs_batch.append(s)
action_batch.append(a)
reward_batch.append(r)
next_obs_batch.append(s_p)
done_batch.append(done)
return np.array(obs_batch).astype('float32'), \
np.array(action_batch).astype('float32'), np.array(reward_batch).astype('float32'),\
np.array(next_obs_batch).astype('float32'), np.array(done_batch).astype('float32')
5.main函数
def main():
env = gym.make(
'CartPole-v0'
) # CartPole-v0: expected reward > 180 MountainCar-v0 : expected reward > -120
action_dim = env.action_space.n # CartPole-v0: 2
obs_shape = env.observation_space.shape # CartPole-v0: (4,)
rpm = ReplayMemory(MEMORY_SIZE) # DQN的经验回放池
# 根据parl框架构建agent
model = Model(act_dim=action_dim)
algorithm = DQN(model, act_dim=action_dim, gamma=GAMMA, lr=LEARNING_RATE)
agent = Agent(
algorithm,
obs_dim=obs_shape[0],
act_dim=action_dim,
e_greed=0.1, # 有一定概率随机选取动作,探索
e_greed_decrement=1e-6) # 随着训练逐步收敛,探索的程度慢慢降低
# 先往经验池里存一些数据,避免最开始训练的时候样本丰富度不够
while len(rpm) < MEMORY_WARMUP_SIZE:
run_episode(env, agent, rpm)
用PARL库简化代码
利用PARL的内置算法库,我们可以省掉Algorithm类和ReplayMemory类的实现代码:
def main():
env = gym.make('CartPole-v0')
action_dim = env.action_space.n
obs_shape = env.observation_space.shape
rpm = ReplayMemory(MEMORY_SIZE)
model = CartpoleModel(act_dim=action_dim)
# 使用parl.algorithms内置的DQN算法,简化掉Algorithm类
algorithm = parl.algorithms.DQN(
model, act_dim=action_dim, gamma=GAMMA, lr=LEARNING_RATE)
agent = CartpoleAgent(
algorithm,
obs_dim=obs_shape[0],
act_dim=action_dim,
e_greed=0.1, # explore
e_greed_decrement=1e-6
) # probability of exploring is decreasing during training
我们可以省掉Algorithm类和ReplayMemory类的实现代码。
查看结果
示例代码
Nature DQN – Keras实现
Keras和TensorFlow2也可以用类似的代码结构实现DQN(2015),如下:
1.Network
跟PARL-Model类似。
from keras.models import Model
from keras.layers import Input, Dense
class Network:
def __init__(self):
"""基本网络结构.
"""
inputs = Input(shape=(4,))
x = Dense(16, activation='relu')(inputs)
x = Dense(16, activation='relu')(x)
x = Dense(2, activation='linear')(x)
self.model = Model(inputs=inputs, outputs=x)
def build_model(self):
return self.model
2.Algorithm
导入依赖
import os
# 允许重复加载动态链接库
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import gym
import random
import numpy as np
from collections import deque
from network import Network
初始化
class DQN:
def __init__(self):
self.model = Network().build_model()
self.target_model = Network().build_model()
self.update_target_model()
if os.path.exists('dqn.h5'):
self.model.load_weights('dqn.h5')
# 经验池
self.memory_buffer = deque(maxlen=2000)
# Q_value的discount rate,以便计算未来reward的折扣回报
self.gamma = 0.95
# 贪婪选择法的随机选择行为的程度
self.epsilon = 1.0
# 上述参数的衰减率
self.epsilon_decay = 0.995
# 最小随机探索的概率
self.epsilon_min = 0.01
self.env = gym.make('CartPole-v0')
def update_target_model(self):
...
def sample(self, state):
...
def remember(self, state, action, reward, next_state, done):
...
def update_epsilon(self):
...
def process_batch(self, batch):
...
sample函数
ε-greedy选择action
remember函数
经验回放池。
3.主类
导入依赖
import os
# 允许重复加载动态链接库
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import gym
import random
import numpy as np
from keras.optimizers import Adam
from algorithm import DQN
定义训练函数
def train(env, algorithm, episode, batch):
"""训练
Arguments:
episode: 游戏次数
batch: batch size
Returns:
history: 训练记录
"""
algorithm.model.compile(loss='mse', optimizer=Adam(1e-3))
history = {'episode': [], 'Episode_reward': [], 'Loss': []}
count = 0
for i in range(episode):
observation = env.reset()
reward_sum = 0
loss = np.infty
done = False
while not done:
# 通过贪婪选择法ε-greedy选择action。
x = observation.reshape(-1, 4)
action = algorithm.sample(x)
observation, reward, done, _ = env.step(action)
# 将数据加入到经验池。
reward_sum += reward
algorithm.remember(x[0], action, reward, observation, done)
if len(algorithm.memory_buffer) > batch:
# 训练
X, y = algorithm.process_batch(batch)
loss = algorithm.model.train_on_batch(X, y)
count += 1
# 减小egreedy的epsilon参数。
algorithm.update_epsilon()
# 固定次数更新target_model
if count != 0 and count % 20 == 0:
algorithm.update_target_model()
if reward_sum == 200:
break;
if i % 5 == 0:
history['episode'].append(i)
history['Episode_reward'].append(reward_sum)
history['Loss'].append(loss)
print('Episode: {} | Episode reward: {} | loss: {:.3f} | e:{:.2f}'.format(i, reward_sum, loss, algorithm.epsilon))
algorithm.model.save_weights('dqn.h5')
return history
定义测试方法
def test(env, model):
"""使用训练好的模型测试游戏.
"""
observation = env.reset()
count = 0
reward_sum = 0
random_episodes = 0
while random_episodes < 5:
env.render()
x = observation.reshape(-1, 4)
q_values = model.predict(x)[0]
action = np.argmax(q_values)
observation, reward, done, _ = env.step(action)
count += 1
reward_sum += reward
if done:
print("Reward for this episode was: {}, turns was: {}".format(reward_sum, count))
random_episodes += 1
reward_sum = 0
count = 0
observation = env.reset()
env.close()
main函数
if __name__ == '__main__':
env = gym.make('CartPole-v0')
algorithm = DQN()
history = train(env, algorithm, 600, 32)
test(env, algorithm.model)
查看结果
示例代码
查看结果
参考资料
DQN论文使用Arcade-Learning-Environment环境来测试Atari游戏