{"created":"2023-06-26T11:01:35.900245+00:00","id":2339,"links":{},"metadata":{"_buckets":{"deposit":"7887e1ad-e85a-4eec-8b7f-5df7364bb467"},"_deposit":{"created_by":32,"id":"2339","owners":[32],"pid":{"revision_id":0,"type":"depid","value":"2339"},"status":"published"},"_oai":{"id":"oai:oist.repo.nii.ac.jp:00002339","sets":["6:26"]},"author_link":["15468","15467"],"item_10001_biblio_info_7":{"attribute_name":"Bibliographic Information","attribute_value_mlt":[{"bibliographicIssueDates":{"bibliographicIssueDate":"2021-08-20","bibliographicIssueDateType":"Issued"},"bibliographicPageEnd":"153","bibliographicPageStart":"138","bibliographicVolumeNumber":"144","bibliographic_titles":[{},{"bibliographic_title":"Neural Networks","bibliographic_titleLang":"en"}]}]},"item_10001_creator_3":{"attribute_name":"Author","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Uchibe, Eiji"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Doya, Kenji"}],"nameIdentifiers":[{}]}]},"item_10001_description_5":{"attribute_name":"Abstract","attribute_value_mlt":[{"subitem_description":"This paper proposes model-free imitation learning named Entropy-Regularized Imitation Learning (ERIL) that minimizes the reverse Kullback–Leibler (KL) divergence. ERIL combines forward and inverse reinforcement learning (RL) under the framework of an entropy-regularized Markov decision process. An inverse RL step computes the log-ratio between two distributions by evaluating two binary discriminators. The first discriminator distinguishes the state generated by the forward RL step from the expert’s state. The second discriminator, which is structured by the theory of entropy regularization, distinguishes the state–action–next-state tuples generated by the learner from the expert ones. One notable feature is that the second discriminator shares hyperparameters with the forward RL, which can be used to control the discriminator’s ability. A forward RL step minimizes the reverse KL estimated by the inverse RL step. We show that minimizing the reverse KL divergence is equivalent to finding an optimal policy. Our experimental results on MuJoCo-simulated environments and vision-based reaching tasks with a robotic arm show that ERIL is more sample-efficient than the baseline methods. We apply the method to human behaviors that perform a pole-balancing task and describe how the estimated reward functions show how every subject achieves her goal.","subitem_description_type":"Other"}]},"item_10001_publisher_8":{"attribute_name":"Publisher","attribute_value_mlt":[{"subitem_publisher":"Elsevier Ltd"}]},"item_10001_relation_13":{"attribute_name":"PubMedNo.","attribute_value_mlt":[{"subitem_relation_type":"isIdenticalTo","subitem_relation_type_id":{"subitem_relation_type_id_text":"info:pmid/34492548","subitem_relation_type_select":"PMID"}}]},"item_10001_relation_14":{"attribute_name":"DOI","attribute_value_mlt":[{"subitem_relation_type":"isIdenticalTo","subitem_relation_type_id":{"subitem_relation_type_id_text":"info:doi/10.1016/j.neunet.2021.08.017","subitem_relation_type_select":"DOI"}}]},"item_10001_relation_17":{"attribute_name":"Related site","attribute_value_mlt":[{"subitem_relation_type_id":{"subitem_relation_type_id_text":"https://www.sciencedirect.com/science/article/pii/S0893608021003221?via%3Dihub","subitem_relation_type_select":"URI"}}]},"item_10001_rights_15":{"attribute_name":"Rights","attribute_value_mlt":[{"subitem_rights":"© 2021 The Author(s)."}]},"item_10001_source_id_9":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"0893-6080","subitem_source_identifier_type":"ISSN"}]},"item_10001_version_type_20":{"attribute_name":"Author's flag","attribute_value_mlt":[{"subitem_version_resource":"http://purl.org/coar/version/c_970fb48d4fbd8a85","subitem_version_type":"VoR"}]},"item_files":{"attribute_name":"ファイル情報","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2021-11-19"}],"displaytype":"detail","filename":"1-s2.0-S0893608021003221-main.pdf","filesize":[{"value":"2.1 MB"}],"format":"application/pdf","license_note":"Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (https://creativecommons.org/licenses/by-nc-nd/4.0/)","licensetype":"license_note","mimetype":"application/pdf","url":{"label":"1-s2.0-S0893608021003221-main","url":"https://oist.repo.nii.ac.jp/record/2339/files/1-s2.0-S0893608021003221-main.pdf"},"version_id":"13ebbd7c-4154-475c-b267-cd374f8645a5"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"Reinforcement learning","subitem_subject_language":"en","subitem_subject_scheme":"Other"},{"subitem_subject":"Inverse reinforcement learning","subitem_subject_language":"en","subitem_subject_scheme":"Other"},{"subitem_subject":"Imitation learning","subitem_subject_language":"en","subitem_subject_scheme":"Other"},{"subitem_subject":"Entropy regularization","subitem_subject_language":"en","subitem_subject_scheme":"Other"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourcetype":"journal article","resourceuri":"http://purl.org/coar/resource_type/c_6501"}]},"item_title":"Forward and inverse reinforcement learning sharing network weights and hyperparameters","item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Forward and inverse reinforcement learning sharing network weights and hyperparameters","subitem_title_language":"en"}]},"item_type_id":"10001","owner":"32","path":["26"],"pubdate":{"attribute_name":"公開日","attribute_value":"2021-10-19"},"publish_date":"2021-10-19","publish_status":"0","recid":"2339","relation_version_is_last":true,"title":["Forward and inverse reinforcement learning sharing network weights and hyperparameters"],"weko_creator_id":"32","weko_shared_id":32},"updated":"2023-06-26T11:32:32.208561+00:00"}