@conference {cFojo, title = {Comparing Fixed and Adaptive Computation Time for Recurrent Neural Network}, booktitle = {International Conference on Learning Representations (ICLR)}, year = {2018}, month = {02/2018}, address = {Vancouver, Canada}, abstract = {

Deep networks commonly perform better than shallow ones, but allocating the proper amount of computation for each particular input sample remains an open problem. This issue is particularly challenging in sequential tasks, where the required complexity may vary for different tokens in the input sequence. Adaptive Computation Time (ACT) was proposed as a method for dynamically adapting the computation at each step for Recurrent Neural Networks (RNN). ACT introduces two main modifications to the regular RNN formulation: (1) more than one RNN steps may be executed between an input sample is fed to the layer and and this layer generates an output,\  and (2) this number of steps is dynamically predicted depending on the input token and the hidden state of the network. In our work, we aim at gaining intuition about the contribution of these two factors to the overall performance boost observed when augmenting RNNs with ACT. We design a new baseline, Repeat-RNN, which performs a constant number of RNN state updates larger than one before generating an output. Surprisingly, such uniform distribution of the computational resources matches the performance of ACT in the studied tasks. We hope that this finding motivates new research efforts towards designing RNN architectures that are able to dynamically allocate computational resources.

Reproducing and Analyzing Adaptive Computation Time in PyTorch and TensorFlow from Universitat Polit{\`e}cnica de Catalunya
}, author = {Fojo, Daniel and V{\'\i}ctor Campos and Xavier Gir{\'o}-i-Nieto} }