@article{oai:oist.repo.nii.ac.jp:00000680, author = {Hwang, Jungsik and Kim, Jinhyung and Ahmadi, Ahmadreza and Choi, Minkyu and Tani, Jun}, issue = {5}, journal = {IEEE Transactions on Systems, Man, and Cybernetics: Systems}, month = {Apr}, note = {This paper aims to investigate how adequate cognitive functions for recognizing, predicting, and generating a variety of actions can be developed through iterative learning of action-caused dynamic perceptual patterns. Particularly, we examined the capabilities of mental simulation of one's own actions as well as the inference of others' intention because they play a crucial role, especially in social cognition. We propose a dynamic neural network model based on predictive coding which can generate and recognize dynamic visuo-proprioceptive patterns. The proposed model was examined by conducting a set of robotic simulation experiments in which a robot was trained to imitate visually perceived gesture patterns of human subjects in a simulation environment. The experimental results showed that the proposed model was able to develop a predictive model of imitative interaction through iterative learning of large-scale spatio-temporal patterns in visuo-proprioceptive input streams. Also, the experiment verified that the model was able to generate mental imagery of dynamic visuo-proprioceptive patterns without feeding the external inputs. Furthermore, the model was able to recognize the intention of others by minimizing prediction error in the observations of the others' action patterns in an online manner. These findings suggest that the error minimization principle in predictive coding could provide a primal account for the mirror neuron functions for generating actions as well as recognizing those generated by others in a social cognitive context.}, pages = {1918--1931}, title = {Dealing With Large-Scale Spatio-Temporal Patterns in Imitative Interaction Between a Robot and a Human by Using the Predictive Coding Framework}, volume = {50}, year = {2020} }