{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,28]],"date-time":"2025-03-28T09:20:56Z","timestamp":1743153656313,"version":"3.40.3"},"publisher-location":"Cham","reference-count":16,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031606915"},{"type":"electronic","value":"9783031606922"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-60692-2_19","type":"book-chapter","created":{"date-parts":[[2024,5,31]],"date-time":"2024-05-31T06:02:11Z","timestamp":1717135331000},"page":"275-294","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Enhancing Pok\u00e9mon VGC Player Performance: Intelligent Agents Through Deep Reinforcement Learning and\u00a0Neuroevolution"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-3663-0311","authenticated-orcid":false,"given":"Gian","family":"Rodriguez","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6540-1230","authenticated-orcid":false,"given":"Edwin","family":"Villanueva","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7975-2608","authenticated-orcid":false,"given":"Johan","family":"Balde\u00f3n","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,6,1]]},"reference":[{"key":"19_CR1","doi-asserted-by":"publisher","unstructured":"Abukhait, J., Aljaafreh, A., Al-Oudat, N.: A multi-agent design of a computer player for nine men\u2019s morris board game using deep reinforcement learning. In: 2019 6th International Conference on Social Networks Analysis, Management and Security, SNAMS 2019, pp. 489\u2013493 (2019). https:\/\/doi.org\/10.1109\/SNAMS.2019.8931879","DOI":"10.1109\/SNAMS.2019.8931879"},{"key":"19_CR2","doi-asserted-by":"publisher","unstructured":"Aljaafreh, A., Al-Oudat, N.: Development of a computer player for seejeh (a.k.a seega, siga, kharbga) board game with deep reinforcement learning. In: Procedia Computer Science. vol.\u00a0160, pp. 241\u2013247 (2019). https:\/\/doi.org\/10.1016\/j.procs.2019.09.463","DOI":"10.1016\/j.procs.2019.09.463"},{"key":"19_CR3","doi-asserted-by":"publisher","unstructured":"Arun, E., Rajesh, H., Chakrabarti, D., Cherala, H., George, K.: Monopoly using reinforcement learning. In: IEEE Region 10 Annual International Conference, Proceedings\/TENCON, pp. 858\u2013862 (2019). https:\/\/doi.org\/10.1109\/TENCON.2019.8929523","DOI":"10.1109\/TENCON.2019.8929523"},{"key":"19_CR4","doi-asserted-by":"publisher","first-page":"364","DOI":"10.1016\/j.neunet.2022.03.013","volume":"150","author":"P Barros","year":"2022","unstructured":"Barros, P., Sciutti, A.: All by myself: learning individualized competitive behavior with a contrastive reinforcement learning optimization. Neural Netw. 150, 364\u2013376 (2022). https:\/\/doi.org\/10.1016\/j.neunet.2022.03.013","journal-title":"Neural Netw."},{"key":"19_CR5","unstructured":"Chen, K., Lin, E.: Gotta train\u2019em all: learning to play pokemon showdown with reinforcement learning (2018). https:\/\/cs230.stanford.edu\/projects_fall_2018\/reports\/12447633.pdf"},{"key":"19_CR6","doi-asserted-by":"publisher","unstructured":"Czech, J., Willig, M., Beyer, A., Kersting, K., F\u00fcrnkranz, J.: Learning to play the chess variant crazyhouse above world champion level with deep neural networks and human data. Front. Artif. Intell. 3 (2020). https:\/\/doi.org\/10.3389\/frai.2020.00024","DOI":"10.3389\/frai.2020.00024"},{"key":"19_CR7","unstructured":"Gu, S., Chen, T., Li, J., Wang, W., Liu, H., He, R.: Deep reinforcement learning for real-time strategy games. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a033, pp. 1286\u20131293 (2019)"},{"key":"19_CR8","doi-asserted-by":"publisher","unstructured":"Hu, J., Zhao, F., Meng, J., Wu, S.: Application of deep reinforcement learning in the board game. In: 2020 IEEE International Conference on Information Technology,Big Data and Artificial Intelligence (ICIBA), vol.\u00a01, pp. 809\u2013812 (2020). https:\/\/doi.org\/10.1109\/ICIBA50161.2020.9277188","DOI":"10.1109\/ICIBA50161.2020.9277188"},{"key":"19_CR9","doi-asserted-by":"publisher","unstructured":"Huang, D., Lee, S.: A self-play policy optimization approach to battling pok\u00e9mon. In: 2019 IEEE Conference on Games (CoG), pp.\u00a01\u20134 (2019). https:\/\/doi.org\/10.1109\/CIG.2019.8848014","DOI":"10.1109\/CIG.2019.8848014"},{"key":"19_CR10","unstructured":"Khosla, K., Lin, L., Qi, C.: Artificial Intelligence for Pokemon Showdown. Ph.D. thesis, PhD thesis, Stanford University (2017). https:\/\/docplayer.net\/63514819-Artificial-intelligence-for-pokemon-showdown.html"},{"key":"19_CR11","unstructured":"Llobet\u00a0Sanchez, M.: Learning complex games through self play-Pok\u00e9mon battles. B.S. thesis, Universitat Polit\u00e8cnica de Catalunya (2018). http:\/\/hdl.handle.net\/2117\/121655"},{"key":"19_CR12","doi-asserted-by":"publisher","unstructured":"Mnih, V., Kavukcuoglu, K., Silver, D., Graves, A., Antonoglou, I., Wierstra, D., Riedmiller, M.: Playing atari with deep reinforcement learning (2013). https:\/\/doi.org\/10.48550\/ARXIV.1312.5602","DOI":"10.48550\/ARXIV.1312.5602"},{"key":"19_CR13","doi-asserted-by":"publisher","unstructured":"Reis, S., Reis, L.P., Lau, N.: Vgc ai competition - a new model of meta-game balance AI competition. In: 2021 IEEE Conference on Games (CoG). IEEE (Aug 2021). https:\/\/doi.org\/10.1109\/cog52621.2021.9618985","DOI":"10.1109\/cog52621.2021.9618985"},{"key":"19_CR14","doi-asserted-by":"publisher","unstructured":"Silver, D., et al.: Mastering the game of go with deep neural networks and tree search. Nature 529(7587), 484\u2013489 (2016). https:\/\/doi.org\/10.1038\/nature16961","DOI":"10.1038\/nature16961"},{"key":"19_CR15","doi-asserted-by":"publisher","unstructured":"Sim\u00f5es, D., Reis, S., Lau, N., Reis, L.P.: Competitive deep reinforcement learning over a pok\u00e9mon battling simulator. In: 2020 IEEE International Conference on Autonomous Robot Systems and Competitions (ICARSC), pp. 40\u201345 (2020). https:\/\/doi.org\/10.1109\/ICARSC49921.2020.9096092","DOI":"10.1109\/ICARSC49921.2020.9096092"},{"issue":"1\u20132","key":"19_CR16","doi-asserted-by":"publisher","first-page":"181","DOI":"10.1016\/s0004-3702(01)00110-2","volume":"134","author":"G Tesauro","year":"2002","unstructured":"Tesauro, G.: Programming backgammon using self-teaching neural nets. Artif. Intell. 134(1\u20132), 181\u2013199 (2002). https:\/\/doi.org\/10.1016\/s0004-3702(01)00110-2","journal-title":"Artif. Intell."}],"container-title":["Lecture Notes in Computer Science","HCI in Games"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-60692-2_19","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,5,31]],"date-time":"2024-05-31T06:04:41Z","timestamp":1717135481000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-60692-2_19"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031606915","9783031606922"],"references-count":16,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-60692-2_19","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"1 June 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"HCII","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Human-Computer Interaction","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Washington DC","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"USA","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 June 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 July 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"hcii2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/2024.hci.international\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}