如何在Tensorflow中累积和应用Async n-step DQNetwork更新的梯度?

8
我正尝试实现异步深度强化学习的 Asynchronous Methods for Deep Reinforcement Learning ,其中一些步骤需要在不同的步骤上累积梯度,然后再应用它。 在tensorflow中实现这个功能的最佳方法是什么? 我已经成功地积累了梯度,但我认为这不是最快的方法(需要在tensorflow和python之间频繁传输)。 欢迎任何建议。 这是我的一个玩具NN代码, 它没有建模或计算任何东西,只是练习我想使用的操作。
import tensorflow as tf

from model import *


graph = tf.Graph()

with graph.as_default():

    state = tf.placeholder(tf.float32, shape=[None, 80,80,1])

    with tf.variable_scope('layer1'):
        W = weight_variable([8, 8, 1, 32])
        variable_summaries(W, "layer1/W")
        b = bias_variable([32])
        variable_summaries(b, "layer1/b")
        h = conv2d(state, W, 4) + b
        activation = tf.nn.relu(h)
        pool1 = max_pool_2x2(activation)

    print(pool1.get_shape())
    pool1 = tf.reshape(pool1, [-1, 3200])

    with tf.variable_scope('readout'):
        W = weight_variable([3200, 3])
        b = bias_variable([3])
        logits = tf.matmul(pool1, W) + b
        variable_summaries(h, "y")

    action_indexes = tf.placeholder(tf.int32, shape=[None], name="action_indexes")

    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, action_indexes)

    starter_learning_rate = 1e-6

    global_step = tf.Variable(0, trainable=False)

    # decay every 1000 steps with a base of 0.96:
    learning_rate = tf.train.exponential_decay(starter_learning_rate,
        global_step,
        10000, 0.96, staircase=True)

    optimizer = tf.train.RMSPropOptimizer(learning_rate)

    gradients_and_variables = optimizer.compute_gradients(loss, tf.trainable_variables())

    discounted_values = tf.placeholder(tf.float32, shape=[None, 1])

with tf.Session(graph=graph) as s:

    for v in tf.trainable_variables():
        print(v.name, v.dtype, v.get_shape())

    s.run(tf.initialize_all_variables())

    feed_dict= {
        state : np.zeros([1, 80, 80, 1]),
        action_indexes: [1],
    }


    var_to_grad = dict((var.name, grad) for grad, var in gradients_and_variables)
    keys = sorted(var_to_grad.keys())
    print(keys)

    name_to_var = dict((var.name, var) for _, var in gradients_and_variables)

    for i in range(10):

        gradients = s.run([ var_to_grad[k] for k in keys], feed_dict=feed_dict)

        for k,v in zip(keys, gradients):
            var_to_grad[k] += v

    for k in keys:
        print(var_to_grad[k])

    s.run( optimizer.apply_gradients( (g, name_to_var[v]) for v,g in var_to_grad.iteritems()), feed_dict=feed_dict)

@yaroslave建议后更新的代码:

import tensorflow as tf

from model import *


graph = tf.Graph()

with graph.as_default():

    minibatch = 32
    state = tf.placeholder(tf.float32, shape=[minibatch, 80,80,1], name="input")

    with tf.variable_scope('layer1'):
        W = weight_variable([8, 8, 1, 32])
        variable_summaries(W, "layer1/W")
        b = bias_variable([32])
        variable_summaries(b, "layer1/b")
        h = conv2d(state, W, 4) + b
        activation = tf.nn.relu(h)
        pool1 = max_pool_2x2(activation)

    print(pool1.get_shape())
    pool1 = tf.reshape(pool1, [-1, 3200])

    with tf.variable_scope('readout'):
        W = weight_variable([3200, 3])
        b = bias_variable([3])
        logits = tf.matmul(pool1, W) + b
        variable_summaries(h, "y")

    action_indexes = tf.placeholder(tf.int32, shape=[minibatch], name="action_indexes")

    loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, action_indexes)

    starter_learning_rate = 1e-6

    global_step = tf.Variable(0, trainable=False)

    # decay every 1000 steps with a base of 0.96:
    learning_rate = tf.train.exponential_decay(starter_learning_rate,
        global_step,
        10000, 0.96, staircase=True)

    optimizer = tf.train.RMSPropOptimizer(learning_rate)

    trainable_variables = tf.trainable_variables()
    varname_to_var = dict( (v.name, v) for v in trainable_variables )
    keys = sorted(varname_to_var.keys())

    gradients_and_variables = optimizer.compute_gradients(loss, [ varname_to_var[k] for k in keys])

    var_to_grad = dict((var.name, grad) for grad, var in gradients_and_variables)

    name_to_var = dict((var.name, var) for _, var in gradients_and_variables)

    # save the gradients in memory
    var_to_ref_grad = {}
    for k in keys:
        grad = var_to_grad[k]
        print(k, grad.get_shape())
        ref = tf.Variable(tf.zeros_like(grad))
        ref = ref.assign_add(grad)
        var_to_ref_grad[k] = ref

    discounted_values = tf.placeholder(tf.float32, shape=[None, 1], name='discounted_values')

    # control when to apply gradients
    compute_gradients_flag = tf.placeholder(tf.int32, name="compute_gradients")
    def fn1():
        var_grad_list = []
        for k in keys:
            grad = var_to_ref_grad[k]
            var  = varname_to_var[k]
            var_grad_list.append((grad,var))

        optimizer.apply_gradients(var_grad_list)
        return tf.no_op()

    fn2 = lambda : tf.no_op()

    last_op = tf.cond(tf.equal(compute_gradients_flag, 1), fn1, fn2)

with tf.Session(graph=graph) as s:

    feed_dict= {
        state : np.zeros([minibatch, 80, 80, 1]),
        action_indexes: [1],
        compute_gradients_flag: False,
    }

    s.run(tf.initialize_all_variables())

    for i in range(10):

        # accumulate gradients
        s.run(last_op, feed_dict=feed_dict)

1
你可以通过运行“assign”操作将梯度保存在变量中而不是获取值,并使用“assign_add”来累加,从而将所有内容保留在TF中。 - Yaroslav Bulatov
@YaroslavBulatov,您认为TF会添加更多的接口来实现这样的功能吗?如果我们也可以将TF用于强化学习,那将是很好的。 - Sung Kim
1
@SungKim,[正在制作](https://github.com/tensorflow/tensorflow/pull/2595)一个即时执行接口,因此您可以在将数据保留在GPU的同时使用标准Python结构。 - Yaroslav Bulatov
@YaroslavBulatov,我按照你的建议更新了代码,但有趣的是tf.initialize_all_variables()需要运行图中的操作来初始化变量。也许我做错了,不必使用initialize_all_variables,只需手动初始化即可。 - fabrizioM
嗯,我不明白问题在哪里,“initialize_all_variables”只应该运行初始化器以设置变量的初始值,它运行了什么不该运行的东西? - Yaroslav Bulatov
显示剩余2条评论
2个回答

0

你不必手动累积梯度。您可以通过将回滚更新应用为批处理来让Tensorflow代替您累积梯度。

s_list = list_of_states_visited
a_list = list_of_actions_taken
R_list = list_of_value_targets

sess.run(local_net.update, feed_dict={
    local_net.input: s_list,
    local_net.a: a_list,
    local_net.R: R_list
})

0

创建累积梯度,重置累积梯度和应用累积梯度的操作可能会像这样(未经测试!):

def build_gradient_accumulators(optimizer, gradients_and_variables):
    accum_grads_and_vars = []
    accumulators = []
    resetters = []

    for grad, var in gradients_and_variables:
        accum = tf.Variable(tf.zeros_like(grad))
        accum = accum.assign_add(grad)
        accumulators.append(accum)
        accum_grads_and_vars.append((accum, var))
        resetters.append(tf.assign(accum, tf.zeros_like(accum)))

    reset_op = tf.group(*resetters)
    accum_op = tf.group(*accumulators)
    apply_op = optimizer.apply_gradients(accum_grads_and_vars)
    return reset_op, accum_op, apply_op

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接