{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T13:42:28Z","timestamp":1730209348753,"version":"3.28.0"},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,8,21]],"date-time":"2023-08-21T00:00:00Z","timestamp":1692576000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,8,21]],"date-time":"2023-08-21T00:00:00Z","timestamp":1692576000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,8,21]]},"DOI":"10.1109\/cog57401.2023.10333135","type":"proceedings-article","created":{"date-parts":[[2023,12,4]],"date-time":"2023-12-04T18:48:21Z","timestamp":1701715701000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Mastering Curling with RL-revised Decision Tree"],"prefix":"10.1109","author":[{"given":"Junjie","family":"Lin","sequence":"first","affiliation":[{"name":"University of Science and Technology of China,Hefei,China,230026"}]},{"given":"Yuhao","family":"Gong","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China,Hefei,China,230026"}]},{"given":"Jian","family":"Zhao","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China,Hefei,China,230026"}]},{"given":"Wengang","family":"Zhou","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence,Hefei Comprehensive National Science Center"}]},{"given":"Houqiang","family":"Li","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence,Hefei Comprehensive National Science Center"}]}],"member":"263","reference":[{"volume-title":"Curling: the history, the players, the game","year":"1999","author":"Hansen","key":"ref1"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5878"},{"article-title":"Fever basketball: a complex, flexible, and asynchronized sports game environment for multi-agent reinforcement learning","year":"2020","author":"Jia","key":"ref3"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"article-title":"Suphx: Mastering mahjong with deep reinforcement learning","year":"2020","author":"Li","key":"ref6"},{"first-page":"12 333","article-title":"Douzero: mastering doudizhu with self-play deep reinforcement learning","volume-title":"Proceedings of International Conference on Machine Learning","author":"Zha","key":"ref7"},{"volume-title":"The rules of curling","key":"ref8"},{"issue":"2","key":"ref9","first-page":"130","article-title":"Decision tree methods: applications for classification and prediction","volume":"27","author":"Song","year":"2015","journal-title":"Shanghai Archives of Psychiatry"},{"volume":"14","article-title":"An introduction to classification and regression tree (cart) analysis","volume-title":"Proceedings of Annual Meeting of the Society for Academic Emergency Medicine in San Francisco, California","author":"Lewis","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/BF00116251"},{"article-title":"Conservative q-improvement: Reinforcement learning for an interpretable decision-tree policy","year":"2019","author":"Roth","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.5391\/IJFIS.2016.16.1.27"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00642"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICMLA.2019.00016"},{"key":"ref16","first-page":"70","article-title":"Decision tree function approximation in reinforcement learning","volume-title":"Proceedings of the Third International Symposium on Adaptive Systems: Evolutionary Computation and Probabilistic Graphical Models","volume":"2","author":"Pyeatt"},{"article-title":"Learning decision trees with reinforcement learning","volume-title":"Proceedings of NIPS Workshop on Meta-Learning","author":"Xiong","key":"ref17"},{"first-page":"1855","article-title":"Optimization methods for interpretable differentiable decision trees applied to reinforcement learning","volume-title":"Proceedings of International Conference on Artificial Intelligence and Statistics","author":"Silva","key":"ref18"},{"volume-title":"Reinforcement learning: An introduction","year":"2018","author":"Sutton","key":"ref19"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/j.ins.2022.11.065"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(01)00129-1"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TCIAIG.2012.2186810"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1038\/d41586-019-02156-9"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"article-title":"Dota 2 with large scale deep reinforcement learning","year":"2019","author":"Berner","key":"ref25"},{"first-page":"387","article-title":"Deterministic policy gradient algorithms","volume-title":"Proceedings of International Conference on Machine Learning","author":"Silver","key":"ref26"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.32657\/10356\/90191"},{"first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proceedings of International Conference on Machine Learning","author":"Fujimoto","key":"ref28"},{"first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proceedings of International Conference on Machine Learning","author":"Haarnoja","key":"ref29"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4615-3618-5_2"},{"first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proceedings of International Conference on Machine Learning","author":"Mnih","key":"ref31"},{"first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proceedings of International Conference on Machine Learning","author":"Schulman","key":"ref32"},{"article-title":"Proximal policy optimization algorithms","year":"2017","author":"Schulman","key":"ref33"},{"key":"ref34","first-page":"5","article-title":"Multilayer perceptron tutorial","volume":"4","author":"Noriega","year":"2005","journal-title":"School of Computing. Staffordshire University"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1613\/jair.639"},{"first-page":"3540","article-title":"Feudal networks for hierarchical reinforcement learning","volume-title":"Proceedings of International Conference on Machine Learning","author":"Vezhnevets","key":"ref36"},{"key":"ref37","article-title":"Data-efficient hierarchical reinforcement learning","volume":"31","author":"Nachum","year":"2018","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.330110009"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1145\/3453160"}],"event":{"name":"2023 IEEE Conference on Games (CoG)","start":{"date-parts":[[2023,8,21]]},"location":"Boston, MA, USA","end":{"date-parts":[[2023,8,24]]}},"container-title":["2023 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10333091\/10333129\/10333135.pdf?arnumber=10333135","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,6]],"date-time":"2024-02-06T18:42:51Z","timestamp":1707244971000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10333135\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,21]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/cog57401.2023.10333135","relation":{},"subject":[],"published":{"date-parts":[[2023,8,21]]}}}