@article{oai:oist.repo.nii.ac.jp:00001545, author = {Han, Dongqi and Doya, Kenji and Tani, Jun}, journal = {Neural Networks}, month = {Jun}, note = {Recurrent neural networks (RNNs) for reinforcement learning (RL) have shown distinct advantages, e.g., solving memory-dependent tasks and meta-learning. However, little effort has been spent on improving RNN architectures and on understanding the underlying neural mechanisms for performance gain. In this paper, we propose a novel, multiple-timescale, stochastic RNN for RL. Empirical results show that the network can autonomously learn to abstract sub-goals and can self-develop an action hierarchy using internal dynamics in a challenging continuous control task. Furthermore, we show that the self-developed compositionality of the network enhances faster re-learning when adapting to a new task that is a re-composition of previously learned sub-goals, than when starting from scratch. We also found that improved performance can be achieved when neural activities are subject to stochastic rather than deterministic dynamics.}, pages = {149--162}, title = {Self-organization of action hierarchy and compositionality by reinforcement learning with recurrent neural networks}, volume = {129}, year = {2020} }