{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T09:51:33Z","timestamp":1730195493517,"version":"3.28.0"},"reference-count":11,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,10]]},"DOI":"10.1109\/aiam54119.2021.00063","type":"proceedings-article","created":{"date-parts":[[2022,3,8]],"date-time":"2022-03-08T22:00:41Z","timestamp":1646776841000},"page":"276-279","source":"Crossref","is-referenced-by-count":2,"title":["Performance of Reinforcement Learning on Traditional Video Games"],"prefix":"10.1109","author":[{"given":"Yuanxi","family":"Sun","sequence":"first","affiliation":[{"name":"New York University Center for Data Science,New York,the United States,10003"}]}],"member":"263","reference":[{"journal-title":"Dota 2 with large scale deep reinforcement learning","year":"2019","author":"open","key":"ref4"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3319619.3321894"},{"key":"ref10","article-title":"Asynchronous methods for deep reinforcement learning","volume":"abs 1602 l783","author":"mnih","year":"2016","journal-title":"CaRR"},{"key":"ref6","article-title":"Deep reinforcement learning with double q-learning","volume":"abs 1509 6461","author":"hasselt","year":"0","journal-title":"CoRR"},{"key":"ref11","article-title":"Proximal policy optimization algorithms","volume":"abs 1707 6347","author":"schulman","year":"2017","journal-title":"CoRR"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CoG52621.2021.9619124"},{"journal-title":"Pytorch-flappy-bird","year":"2015","author":"gao","key":"ref8"},{"key":"ref7","article-title":"Dueling network architectures for deep reinforcement learning","volume":"abs 1511 6581","author":"wang","year":"0","journal-title":"CaRR"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"journal-title":"Rl-flappybird","year":"2020","author":"xu","key":"ref9"},{"key":"ref1","article-title":"Playing atari with deep reinforcement learning","volume":"abs 1312 s602","author":"mnih","year":"2013","journal-title":"CoRR"}],"event":{"name":"2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)","start":{"date-parts":[[2021,10,23]]},"location":"Manchester, United Kingdom","end":{"date-parts":[[2021,10,25]]}},"container-title":["2021 3rd International Conference on Artificial Intelligence and Advanced Manufacture (AIAM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9724649\/9724654\/09724774.pdf?arnumber=9724774","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,10]],"date-time":"2022-06-10T21:34:51Z","timestamp":1654896891000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9724774\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,10]]},"references-count":11,"URL":"https:\/\/doi.org\/10.1109\/aiam54119.2021.00063","relation":{},"subject":[],"published":{"date-parts":[[2021,10]]}}}