{"created":"2023-06-26T11:00:49.932948+00:00","id":1423,"links":{},"metadata":{"_buckets":{"deposit":"c8990eaa-4b72-488e-941a-7d98f09c37ac"},"_deposit":{"created_by":29,"id":"1423","owners":[29],"pid":{"revision_id":0,"type":"depid","value":"1423"},"status":"published"},"_oai":{"id":"oai:oist.repo.nii.ac.jp:00001423","sets":["86:203"]},"author_link":["8640"],"item_10003_biblio_info_7":{"attribute_name":"書誌情報","attribute_value_mlt":[{"bibliographicIssueDates":{"bibliographicIssueDate":"2018-10-24","bibliographicIssueDateType":"Issued"},"bibliographicPageEnd":"55","bibliographicPageStart":"54","bibliographic_titles":[{"bibliographic_title":"第 28 回 日本神経回路学会全国大会 講演論文集"},{"bibliographic_title":"The Proceedings of the 28th Annual Conference of the Japanese Neural Network Society","bibliographic_titleLang":"en"}]}]},"item_10003_description_5":{"attribute_name":"抄録","attribute_value_mlt":[{"subitem_description":"Reinforcement learning is a useful ap-proach to solve machine learning problems by self-exploration when training samples are not provided.However, researchers usually ignore the importance ofthe choice of exploration noise. In this paper, I showthat temporally self-correlated exploration stochastic-ity, generated by Ornstein-Uhlenbeck process, can sig-nificantly enhance the performance of reinforcementlearning tasks by improving exploration.","subitem_description_type":"Abstract"}]},"item_10003_publisher_8":{"attribute_name":"出版者","attribute_value_mlt":[{"subitem_publisher":"Japanese Neural Network Society"}]},"item_10003_relation_17":{"attribute_name":"関連サイト","attribute_value_mlt":[{"subitem_relation_type_id":{"subitem_relation_type_id_text":"http://jnns.org/conference/2018/en/program.html","subitem_relation_type_select":"URI"}}]},"item_10003_rights_15":{"attribute_name":"権利","attribute_value_mlt":[{"subitem_rights":" © 2018 Japanese Neural Network Society"}]},"item_10003_version_type_20":{"attribute_name":"著者版フラグ","attribute_value_mlt":[{"subitem_version_resource":"http://purl.org/coar/version/c_ab4af688f83e57aa","subitem_version_type":"AM"}]},"item_creator":{"attribute_name":"著者","attribute_type":"creator","attribute_value_mlt":[{"creatorNames":[{"creatorName":"Han, Dongqi","creatorNameLang":"en"}],"nameIdentifiers":[{}]}]},"item_files":{"attribute_name":"ファイル情報","attribute_type":"file","attribute_value_mlt":[{"accessrole":"open_date","date":[{"dateType":"Available","dateValue":"2020-04-28"}],"displaytype":"detail","filename":"jnns2018_dongqi.pdf","filesize":[{"value":"2.4 MB"}],"format":"application/pdf","licensetype":"license_note","mimetype":"application/pdf","url":{"label":"jnns2018_dongqi","url":"https://oist.repo.nii.ac.jp/record/1423/files/jnns2018_dongqi.pdf"},"version_id":"5bf16d22-69f6-48c2-95b4-7ded149c31ba"}]},"item_language":{"attribute_name":"言語","attribute_value_mlt":[{"subitem_language":"eng"}]},"item_resource_type":{"attribute_name":"資源タイプ","attribute_value_mlt":[{"resourcetype":"conference paper","resourceuri":"http://purl.org/coar/resource_type/c_5794"}]},"item_title":"Improving exploration in reinforcement learning with temporally correlated stochasticity","item_titles":{"attribute_name":"タイトル","attribute_value_mlt":[{"subitem_title":"Improving exploration in reinforcement learning with temporally correlated stochasticity","subitem_title_language":"en"}]},"item_type_id":"10003","owner":"29","path":["203"],"pubdate":{"attribute_name":"公開日","attribute_value":"2020-04-28"},"publish_date":"2020-04-28","publish_status":"0","recid":"1423","relation_version_is_last":true,"title":["Improving exploration in reinforcement learning with temporally correlated stochasticity"],"weko_creator_id":"29","weko_shared_id":29},"updated":"2023-06-26T11:51:19.664213+00:00"}