{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2023,9,19]],"date-time":"2023-09-19T10:10:48Z","timestamp":1695118248389},"reference-count":51,"publisher":"Springer Science and Business Media LLC","issue":"3-4","license":[{"start":{"date-parts":[[2019,8,1]],"date-time":"2019-08-01T00:00:00Z","timestamp":1564617600000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2019,8,1]],"date-time":"2019-08-01T00:00:00Z","timestamp":1564617600000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Comput Game J"],"published-print":{"date-parts":[[2019,12]]},"DOI":"10.1007\/s40869-019-00083-3","type":"journal-article","created":{"date-parts":[[2019,8,1]],"date-time":"2019-08-01T10:03:02Z","timestamp":1564653782000},"page":"143-156","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Intelligent Adjustment of Game Properties at Run Time Using Multi-armed Bandits"],"prefix":"10.1007","volume":"8","author":[{"given":"Zahra","family":"Amiri","sequence":"first","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0003-3654-9583","authenticated-orcid":false,"given":"Yoones A.","family":"Sekhavat","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2019,8,1]]},"reference":[{"key":"83_CR1","doi-asserted-by":"publisher","first-page":"1054","DOI":"10.2307\/1427934","volume":"27","author":"R Agrawal","year":"1995","unstructured":"Agrawal, R. (1995). Sample mean-based index policies with O(log n) regret for the multi-armed bandit problem. Advances in Applied Probability,27, 1054\u20131078.","journal-title":"Advances in Applied Probability"},{"key":"83_CR2","doi-asserted-by":"crossref","unstructured":"Andersen, E., Liu, Y. E., Apter, E., Boucher-Genesse, F. & Popovic, Z. (2010). Gameplay analysis through state projection. In Proceedings of FDG (pp. 1\u20138). ACM Press.","DOI":"10.1145\/1822348.1822349"},{"key":"83_CR3","unstructured":"Audibert, J. Y., Bubeck, S., & Munos, R. (2010). Best arm identification in multi-armed bandits. In Proceedings of the 23rd conference on learning theory."},{"key":"83_CR4","doi-asserted-by":"publisher","first-page":"235","DOI":"10.1023\/A:1013689704352","volume":"47","author":"P Auer","year":"2002","unstructured":"Auer, P., Cesa-Bianchi, N., & Fischer, P. (2002). Finite-time analysis of the multiarmed bandit problem. Machine Learning,47, 235\u2013256.","journal-title":"Machine Learning"},{"key":"83_CR5","doi-asserted-by":"crossref","unstructured":"Belluz, J., Gaudesi, M., & Tonda, A. (2015). Operator selection using improved dynamic multi-armed bandit. In GECCO\u201915, July 11\u201315, 2015, Madrid, Spain. Copyright \u00a9ACM.","DOI":"10.1145\/2739480.2754712"},{"key":"83_CR7","doi-asserted-by":"publisher","first-page":"16","DOI":"10.1016\/j.neucom.2016.02.052","volume":"205:C","author":"PD Bouneffouf","year":"2016","unstructured":"Bouneffouf, P. D., & Feraud, R. (2016). Multi-armed bandit problem with known trend. Neurocomputing Archive,205:C, 16\u201321.","journal-title":"Neurocomputing Archive"},{"key":"83_CR8","unstructured":"Broden, B., Hammar, M., Nilsson, B. J., & Paraschakis, D. (2017). Bandit algorithms for e-Commerce recommender systems. In: RecSys 2017, Como, Italy."},{"issue":"1","key":"83_CR9","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1561\/2200000024","volume":"5","author":"S Bubeck","year":"2012","unstructured":"Bubeck, S., & Cesa-Bianchi, N. (2012). Regret analysis of stochastic and nonstochastic multi-armed bandit problems. Foundations and Trends in Machine Learning,5(1), 1\u2013122.","journal-title":"Foundations and Trends in Machine Learning"},{"key":"83_CR10","doi-asserted-by":"publisher","first-page":"1404","DOI":"10.1016\/j.jcss.2012.01.001","volume":"78","author":"N Cesa-Bianchi","year":"2012","unstructured":"Cesa-Bianchi, N., & Lugosi, G. (2012). Combinatorial bandits. Journal of Computer and System Sciences,78, 1404\u20131422.","journal-title":"Journal of Computer and System Sciences"},{"key":"83_CR11","first-page":"1","volume":"17","author":"W Chen","year":"2016","unstructured":"Chen, W., Wang, Y., Yuan, Y., & Wang, Q. (2016). Combinatorial multi-armed bandit and its extension to probabilistically triggered arms. Journal of Machine Learning Research,17, 1\u201333.","journal-title":"Journal of Machine Learning Research"},{"key":"83_CR12","unstructured":"Desurvire, H., Caplan, M., & Toth, J. A. (2004). Using heuristics to evaluate the playability of games. In: Extended abstracts CHI 2004 (pp. 1509\u20131512). ACM Press."},{"key":"83_CR13","doi-asserted-by":"crossref","unstructured":"Desurvire, H., & Wiberg, C. (2009). Game usability heuristics (play) for evaluating and designing better games: the next iteration. In Proceedings of OCSC 2009 (pp. 557\u2013566).","DOI":"10.1007\/978-3-642-02774-1_60"},{"key":"83_CR14","unstructured":"Dixit, P. N., Youngblood, G. M. (2008). Understanding playtest data through visual data mining in interactive 3D environments. In: Proceedings of CGAMES."},{"key":"83_CR15","doi-asserted-by":"crossref","unstructured":"Drachen, A., & Canossa, A. (2009a). Towards gameplay analysis via gameplay metrics. In: Proceedings of Mind Trek (pp. 202\u2013209). ACM Press.","DOI":"10.1145\/1621841.1621878"},{"key":"83_CR16","doi-asserted-by":"crossref","unstructured":"Drachen, A., & Canossa, A. (2009b). Analyzing spatial user behavior in computer games using geo-graphic information systems. In: Proceedings of MindTrek (pp. 182\u2013189). ACM Press.","DOI":"10.1145\/1621841.1621875"},{"key":"83_CR17","unstructured":"Dumas, J. S. (2002). User-based evaluations. In The human\u2013computer interaction handbook (pp. 1093\u20131117). L. Erlbaum Associates Inc."},{"issue":"1\u20132","key":"83_CR18","doi-asserted-by":"publisher","first-page":"25","DOI":"10.1007\/s10472-010-9213-y","volume":"60","author":"A Fialho","year":"2010","unstructured":"Fialho, A., Da Costa, L., Schoenauer, M., & Sebag, M. (2010). Analyzing bandit-based adaptive operator selection mechanisms. Annals of Mathematics and Artificial Intelligence,60(1\u20132), 25\u201364.","journal-title":"Annals of Mathematics and Artificial Intelligence"},{"key":"83_CR19","doi-asserted-by":"crossref","unstructured":"Gajos, K., & Weld, D. S. (2005). Preference elicitation for interface optimization. In Proceedings of the 18th annual ACM symposium on user interface software and technology (pp. 173\u2013182).","DOI":"10.1145\/1095034.1095063"},{"key":"83_CR20","unstructured":"Garivier, A., Kaufmann, E., & Koolen, W. (2016). Maximum action identification: A new bandit framework for games. In JMLR: Workshop and Conference Proceedings (vol 49, pp. 1\u201323)."},{"key":"83_CR21","doi-asserted-by":"publisher","DOI":"10.1155\/2016\/5182768","author":"E Geslin","year":"2016","unstructured":"Geslin, E., Jegou, L., & Beaudoin, D. (2016). How color properties can be used to elicit emotions in video games. International Journal of Computer Games Technology. https:\/\/doi.org\/10.1155\/2016\/5182768 .","journal-title":"International Journal of Computer Games Technology"},{"key":"83_CR22","first-page":"228","volume-title":"Using frustration in the design of adaptive videogames","author":"KM Gilleade","year":"2004","unstructured":"Gilleade, K. M., & Dix, A. (2004). Using frustration in the design of adaptive videogames (pp. 228\u2013232). New York: ACM Press."},{"key":"83_CR23","unstructured":"Ijsselsteijn, W. A., de Kort, Y. A. W., & Poels, K. (2013). The game experience questionnaire. Technische Universiteit Eindhoven."},{"key":"83_CR24","unstructured":"Kaufmann, E., & Garivier, A. (2017). Learning the distribution with largest mean: Two bandit frameworks. In ESAIM: Proceedings and surveys (pp. 1\u201310)."},{"key":"83_CR25","unstructured":"Kaufmann, E., & Kalyanakrishnan, S. (2013). Information complexity in bandit subset selection. In Proceedings of the 26th conference on learning theory (pp. 228\u2013251)."},{"key":"83_CR26","doi-asserted-by":"crossref","unstructured":"Kohavi, R., Deng, A., Frasca, B., Longbotham, R., Walker, T., & Ya, X. (2012) Trustworthy online controlled experiments: Five puzzling outcomes explained. In Proceedings of the 18th ACM SIGKDD international conference on knowledge discovery and data mining (pp. 786\u2013794).","DOI":"10.1145\/2339530.2339653"},{"key":"83_CR27","doi-asserted-by":"publisher","first-page":"140","DOI":"10.1007\/s10618-008-0114-1","volume":"18","author":"R Kohavi","year":"2009","unstructured":"Kohavi, R., Longbotham, R., Sommerfield, D., & Henne, R. M. (2009). Controlled experiments on the web: Survey and practical guide. Data Mining and Knowledge Discovery,18, 140\u2013181.","journal-title":"Data Mining and Knowledge Discovery"},{"key":"83_CR28","first-page":"1","volume":"1","author":"V Kuleshov","year":"2000","unstructured":"Kuleshov, V., & Precup, D. (2000). Algorithms for the multi-armed bandit problem. Journal of Machine Learning Research,1, 1\u201348.","journal-title":"Journal of Machine Learning Research"},{"key":"83_CR29","volume-title":"Observing the user experience: A practitioner\u2019s guide to user research","author":"M Kuniavsky","year":"2003","unstructured":"Kuniavsky, M. (2003). Observing the user experience: A practitioner\u2019s guide to user research. Amsterdam: Elsevier."},{"key":"83_CR30","doi-asserted-by":"crossref","unstructured":"Tran-Thanh, L. Chapman, A., de Cote, E. M., Rogers, A., & Jennings, N. R. (2010). First policies for budget-limited multi-armed bandits. In Proceedings of the 24th AAAI conference on artificial intelligence, 11\u201315 July 2010, Georgia, USA (pp. 1211\u20131216).","DOI":"10.1609\/aaai.v24i1.7758"},{"key":"83_CR31","doi-asserted-by":"crossref","unstructured":"Li, L., Chu, W., Langford, J., & Schapire, R. E. (2010). A contextual-bandit approach to personalized news article recommendation. In Proceedings of the 19th international conference on World Wide Web (pp. 661\u2013670).","DOI":"10.1145\/1772690.1772758"},{"key":"83_CR32","unstructured":"Liu, J., Togelius, J., Perez-Liebana, D., & Lucas, S. M. (2017). Evolving game skill-depth using general video game AI agents. In: IEEE congress on evolutionary computation (CEC) San Sebastian, Spain."},{"key":"83_CR33","unstructured":"Liu, Y., Mandel, T., Brunskill, E., & Popovic, Z. (2014). Trading off scientific knowledge and user learning with multi-armed bandits. In EDM (pp. 161\u2013168)."},{"key":"83_CR34","doi-asserted-by":"crossref","unstructured":"Lomas, D., Forlizzi, J., Poonawala, N., Patel, N., Shodhan, S., Patel, K., Koedinger, K. R., & Brunskill, E. (2016). Interface design optimization as a multi-armed bandit problem. In Proceedings of the 2016 CHI conference on human factors in computing systems (pp. 4142\u20134153).","DOI":"10.1145\/2858036.2858425"},{"key":"83_CR36","volume-title":"Introduction to string field theory","year":"1999","unstructured":"Loren, R., & Benson, D. B. (Eds.). (1999). Introduction to string field theory (2nd ed.). New York: Springer.","edition":"2"},{"key":"83_CR37","doi-asserted-by":"crossref","unstructured":"Lu, J., Li, L., Shen, D., Chen, G., Jia, B., Blasch, E., & Pham, K. (2017). Dynamic multi-arm bandit game based multi-agents spectrum sharing strategy design. Cornell University. Submitted 12 Nov 2017.","DOI":"10.1109\/DASC.2017.8102137"},{"key":"83_CR38","first-page":"412","volume-title":"Noise-free multi-armed bandit game","author":"A Nakamura","year":"2016","unstructured":"Nakamura, A., Helmbold, D. P., & Warmuth, M. K. (2016). Noise-free multi-armed bandit game (pp. 412\u2013423). Switzerland: Springer."},{"key":"83_CR39","doi-asserted-by":"publisher","first-page":"665","DOI":"10.1613\/jair.5398","volume":"58","author":"S Ontanon","year":"2017","unstructured":"Ontanon, S. (2017). Combinatorial multi-armed bandits for real-time strategy games. Journal of Artificial Intelligence Research,58, 665\u2013702.","journal-title":"Journal of Artificial Intelligence Research"},{"key":"83_CR40","doi-asserted-by":"crossref","unstructured":"Ontanon, S., & Zhu, J. (2011). The SAM algorithm for analogy-based story generation. In Proceedings of the seventh AAAI conference on artificial intelligence and interactive digital entertainment, AIIDE 2011, 10\u201314 October 2011, Stanford, California, USA (pp. 67\u201372).","DOI":"10.1609\/aiide.v7i1.12426"},{"key":"83_CR42","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.eswa.2018.02.032","volume":"103","author":"PC Pendharkar","year":"2017","unstructured":"Pendharkar, P. C., & Cusatis, P. (2017). Trading financial indices with reinforcement learning agents. Expert Systems with Applications,103, 1\u201313.","journal-title":"Expert Systems with Applications"},{"key":"83_CR43","first-page":"397","volume":"3","author":"K Raharjo","year":"2002","unstructured":"Raharjo, K. (2002). Using confidence bounds for exploitation\u2013exploration trade-offs. Journal of Machine Learning Research,3, 397\u2013422.","journal-title":"Journal of Machine Learning Research"},{"issue":"10","key":"83_CR44","first-page":"1758","volume":"10","author":"K Raharjo","year":"2016","unstructured":"Raharjo, K., & Lawrence, R. (2016). Using multi-armed bandits to optimize game play metrics and effective game design. International Journal of Computer and Information Engineering,10(10), 1758\u20131761.","journal-title":"International Journal of Computer and Information Engineering"},{"key":"83_CR45","unstructured":"Ramirez, A. J., & Bulitko, V. (2014). Automated planning and player modeling for interactive story telling. In IEEE transactions on computer intelligence and AI in games (pp. 375\u2013386)."},{"key":"83_CR46","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1142\/S0218213017300010","volume":"26","author":"YA Sekhavat","year":"2017","unstructured":"Sekhavat, Y. A. (2017). Behavior trees for computer games. International Journal on Artificial Intelligence Tools,26, 1\u201328.","journal-title":"International Journal on Artificial Intelligence Tools"},{"issue":"6","key":"83_CR47","doi-asserted-by":"publisher","first-page":"626","DOI":"10.1109\/THMS.2018.2860579","volume":"48","author":"YA Sekhavat","year":"2018","unstructured":"Sekhavat, Y. A., & Namani, M. S. (2018). Projection-based AR: Effective visual feedback in gait rehabilitation. IEEE Transactions on Human\u2013Machine Systems,48(6), 626\u2013636.","journal-title":"IEEE Transactions on Human\u2013Machine Systems"},{"key":"83_CR48","volume-title":"Introduction to reinforcement learning","author":"RS Sutton","year":"1988","unstructured":"Sutton, R. S., & Barto, A. G. (1988). Introduction to reinforcement learning. Cambridge, MA: MIT Press."},{"key":"83_CR49","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/1077246.1077253","volume":"3","author":"P Sweetser","year":"2005","unstructured":"Sweetser, P., & Wyeth, P. (2005). Gameflow: A model for evaluating player enjoyment in games. Computer Entertainment,3, 1\u201324.","journal-title":"Computer Entertainment"},{"key":"83_CR50","doi-asserted-by":"crossref","first-page":"11","DOI":"10.20870\/IJVR.2007.6.4.2704","volume":"6","author":"R Thawonmas","year":"2007","unstructured":"Thawonmas, R., Kurashige, M., & Chen, K. T. (2007). Detection of landmarks for clustering of online-game players. International Journal of Virtual Reality,6, 11\u201316.","journal-title":"International Journal of Virtual Reality"},{"key":"83_CR51","doi-asserted-by":"publisher","first-page":"119","DOI":"10.1007\/BFb0030565","volume-title":"Creating personalities for synthetic actors: Towards autonomous personality agents","author":"R Trappl","year":"1997","unstructured":"Trappl, R., & Petta, P. (1997). Creating personalities for synthetic actors: Towards autonomous personality agents (p. 119). Berlin: Springer."},{"key":"83_CR52","doi-asserted-by":"crossref","unstructured":"Tychsen, A., & Canossa, A. (2008). Defining personas in games using metrics. In Proceedings of future play 2008 (pp. 400\u2013433). ACM Press.","DOI":"10.1145\/1496984.1496997"},{"key":"83_CR53","doi-asserted-by":"crossref","unstructured":"Vermeulen, I. E., Roth, C., Vorderer, P., & Klimmt, C. (2010). Measuring user responses to inter-active stories: Towards a standardized assessment tool. In Proceedings of the international conference on interactive digital storytelling (ICIDS) (pp. 38\u201343).","DOI":"10.1007\/978-3-642-16638-9_7"},{"key":"83_CR54","doi-asserted-by":"crossref","unstructured":"Zhao, Z., & Liu, A. L. (2017) Intelligent demand response for electricity consumers: A multi-armed bandit game approach. In Intelligent system application to power systems (ISAP).","DOI":"10.1109\/ISAP.2017.8071376"}],"container-title":["The Computer Games Journal"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s40869-019-00083-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/article\/10.1007\/s40869-019-00083-3\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s40869-019-00083-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,18]],"date-time":"2023-09-18T19:16:31Z","timestamp":1695064591000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/s40869-019-00083-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,8,1]]},"references-count":51,"journal-issue":{"issue":"3-4","published-print":{"date-parts":[[2019,12]]}},"alternative-id":["83"],"URL":"https:\/\/doi.org\/10.1007\/s40869-019-00083-3","relation":{},"ISSN":["2052-773X"],"issn-type":[{"value":"2052-773X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019,8,1]]},"assertion":[{"value":"17 April 2019","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 July 2019","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 August 2019","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}