Skip to content

Commit 6de805d

Browse files
committed
HW5c fix: use activation functions consistently between RNN and FC
add comment clarifying activation use
1 parent 0d43fed commit 6de805d

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

hw5/meta/train_policy.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ def build_rnn(x, h, output_size, scope, n_layers, size, activation=tf.tanh, outp
6464
6565
make MLP layers with `size` number of units
6666
make the GRU with `output_size` number of units
67+
use `activation` as the activation function for both MLP and GRU
6768
6869
arguments:
6970
(see `build_policy()`)
@@ -99,7 +100,7 @@ def build_policy(x, h, output_size, scope, n_layers, size, gru_size, recurrent=T
99100
"""
100101
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
101102
if recurrent:
102-
x, h = build_rnn(x, h, gru_size, scope, n_layers, size, activation=activation, output_activation=output_activation)
103+
x, h = build_rnn(x, h, gru_size, scope, n_layers, size, activation=activation, output_activation=activation)
103104
else:
104105
x = tf.reshape(x, (-1, x.get_shape()[1]*x.get_shape()[2]))
105106
x = build_mlp(x, gru_size, scope, n_layers + 1, size, activation=activation, output_activation=activation)

0 commit comments

Comments
 (0)