{"created":"2023-06-26T11:01:38.681937+00:00","id":2393,"links":{},"metadata":{"_buckets":{"deposit":"be89fc03-fee3-4185-a09b-9607928cc8db"},"_deposit":{"created_by":31,"id":"2393","owners":[31],"pid":{"revision_id":0,"type":"depid","value":"2393"},"status":"published"},"_oai":{"id":"oai:oist.repo.nii.ac.jp:00002393","sets":["6:226"]},"author_link":["15882","15881","15883","15879","15878","15880"],"item_10001_biblio_info_7":{"attribute_name":"Bibliographic Information","attribute_value_mlt":[{"bibliographicIssueDates":{"bibliographicIssueDate":"2021-07-07","bibliographicIssueDateType":"Issued"},"bibliographicPageEnd":"394","bibliographicPageStart":"383","bibliographicVolumeNumber":"459","bibliographic_titles":[{},{"bibliographic_title":"Neurocomputing","bibliographic_titleLang":"en"}]}]},"item_10001_creator_3":{"attribute_name":"Author","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Zhang, Xianjie"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Liu, Yu"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Xu, Xiujuan"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Huang, Qiong"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Mao, Hangyu"}],"nameIdentifiers":[{}]},{"creatorNames":[{"creatorName":"Carie, Anil"}],"nameIdentifiers":[{}]}]},"item_10001_description_5":{"attribute_name":"Abstract","attribute_value_mlt":[{"subitem_description":"Multi-agent reinforcement learning (MARL) is essential for a wide range of high-dimensional scenarios and complicated tasks with multiple agents. Many attempts have been made for agents with prior domain knowledge and predefined structure. However, the interaction relationship between agents in a multi-agent system (MAS) in general is usually unknown, and previous methods could not tackle dynamical activities in an ever-changing environment. Here we propose a multi-agent Actor-Critic algorithm called Structural Relational Inference Actor-Critic (SRI-AC), which is based on the framework of centralized training and decentralized execution. SRI-AC utilizes the latent codes in variational autoencoder (VAE) to represent interactions between paired agents, and the reconstruction error is based on Graph Neural Network (GNN). With this framework, we test whether the reinforcement learning learners could form an interpretable structure while achieving better performance in both cooperative and competitive scenarios. The results indicate that SRI-AC could be applied to complex dynamic environments to find an interpretable structure while obtaining better performance compared to baseline algorithms.","subitem_description_type":"Other"}]},"item_10001_publisher_8":{"attribute_name":"Publisher","attribute_value_mlt":[{"subitem_publisher":"Elsevier"}]},"item_10001_relation_14":{"attribute_name":"DOI","attribute_value_mlt":[{"subitem_relation_type":"isVersionOf","subitem_relation_type_id":{"subitem_relation_type_id_text":"info:doi/10.1016/j.neucom.2021.07.014","subitem_relation_type_select":"DOI"}}]},"item_10001_relation_17":{"attribute_name":"Related site","attribute_value_mlt":[{"subitem_relation_type_id":{"subitem_relation_type_id_text":"https://www.sciencedirect.com/science/article/pii/S0925231221010481?via%3Dihub","subitem_relation_type_select":"URI"}}]},"item_10001_rights_15":{"attribute_name":"Rights","attribute_value_mlt":[{"subitem_rights":"This article/chapter was published in Neurocomputing, 459, X. J. Zhang, Y. Liu, X. J. Xu, Q. Huang, H. Y. Mao and A. Carie, Structural relational inference actor-critic for multi-agent reinforcement learning, 383-394, Copyright Elsevier 2021."}]},"item_10001_source_id_9":{"attribute_name":"ISSN","attribute_value_mlt":[{"subitem_source_identifier":"0925-2312","subitem_source_identifier_type":"ISSN"}]},"item_10001_version_type_20":{"attribute_name":"Author's flag","attribute_value_mlt":[{"subitem_version_resource":"http://purl.org/coar/version/c_ab4af688f83e57aa","subitem_version_type":"AM"}]},"item_files":{"attribute_name":"ファイル情報","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2023-07-07"}],"displaytype":"detail","filename":"NEUCOM-D-20-03673_R1.pdf","filesize":[{"value":"1.3 MB"}],"format":"application/pdf","license_note":"CC BY-NC-ND 4.0\nCreative Commons Attribution-NonCommercial-NoDerivatives 4.0 International(https://creativecommons.org/licenses/by-nc-nd/4.0/)","licensetype":"license_note","mimetype":"application/pdf","url":{"label":"NEUCOM-D-20-03673_R1","url":"https://oist.repo.nii.ac.jp/record/2393/files/NEUCOM-D-20-03673_R1.pdf"},"version_id":"03a10235-98bd-48ea-af74-7b56bde616f0"}]},"item_keyword":{"attribute_name":"キーワード","attribute_value_mlt":[{"subitem_subject":"Multi-agent systems","subitem_subject_scheme":"Other"},{"subitem_subject":"Deep reinforcement learning","subitem_subject_scheme":"Other"},{"subitem_subject":"Variational autoencoder","subitem_subject_scheme":"Other"},{"subitem_subject":"Actor-critic","subitem_subject_scheme":"Other"},{"subitem_subject":"Graph neural network","subitem_subject_scheme":"Other"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourcetype":"journal article","resourceuri":"http://purl.org/coar/resource_type/c_6501"}]},"item_title":"Structural relational inference actor-critic for multi-agent reinforcement learning","item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Structural relational inference actor-critic for multi-agent reinforcement learning","subitem_title_language":"en"}]},"item_type_id":"10001","owner":"31","path":["226"],"pubdate":{"attribute_name":"公開日","attribute_value":"2021-11-29"},"publish_date":"2021-11-29","publish_status":"0","recid":"2393","relation_version_is_last":true,"title":["Structural relational inference actor-critic for multi-agent reinforcement learning"],"weko_creator_id":"31","weko_shared_id":31},"updated":"2023-06-26T11:31:12.232361+00:00"}