{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,6]],"date-time":"2024-09-06T01:02:34Z","timestamp":1725584554792},"reference-count":65,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2020]]},"DOI":"10.1109\/access.2020.3024979","type":"journal-article","created":{"date-parts":[[2020,9,18]],"date-time":"2020-09-18T20:17:42Z","timestamp":1600460262000},"page":"171528-171541","source":"Crossref","is-referenced-by-count":11,"title":["Optimizing Transportation Dynamics at a City-Scale Using a Reinforcement Learning Framework"],"prefix":"10.1109","volume":"8","author":[{"given":"Luckyson","family":"Khaidem","sequence":"first","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0001-6964-9877","authenticated-orcid":false,"given":"Massimiliano","family":"Luca","sequence":"additional","affiliation":[]},{"given":"Fan","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Ankit","family":"Anand","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0003-1275-2333","authenticated-orcid":false,"given":"Bruno","family":"Lepri","sequence":"additional","affiliation":[]},{"given":"Wen","family":"Dong","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref38","article-title":"Learning from delayed rewards","author":"watkins","year":"1989"},{"key":"ref33","article-title":"Deep reinforcement learning for traffic light control in vehicular networks","author":"liang","year":"2018","journal-title":"arXiv 1803 11115"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2013.2255286"},{"key":"ref31","doi-asserted-by":"crossref","first-page":"247","DOI":"10.1109\/JAS.2016.7508798","article-title":"Traffic signal timing via deep reinforcement learning","volume":"3","author":"li","year":"2016","journal-title":"IEEE\/CAA Journal of Automatica Sinica"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1061\/(ASCE)0733-947X(2003)129:3(278)"},{"key":"ref37","article-title":"Deep deterministic policy gradient for urban traffic light control","author":"casas","year":"2017","journal-title":"arXiv 1703 09035"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1049\/iet-its.2017.0153"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1049\/iet-its.2009.0070"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3219819.3220096"},{"key":"ref60","first-page":"-387i","article-title":"Deterministic policy gradient algorithms","volume":"32","author":"silver","year":"2014","journal-title":"Proc 31st Int Conf Int Conf Mach Learn"},{"key":"ref62","author":"kickh\u00f6fer","year":"2016","journal-title":"Creating an open matsim scenario from open data The case of santiago de chile"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.3141\/2493-13"},{"key":"ref28","article-title":"Coordinated deep reinforcement learners for traffic light control","author":"van der pol","year":"2016","journal-title":"Proc Learn Inference Control Multi-Agent Syst"},{"key":"ref63","article-title":"D4D-senegal: The second mobile phone data for development challenge","author":"de montjoye","year":"2014","journal-title":"arXiv 1407 4885"},{"key":"ref27","first-page":"2976","article-title":"Overview of the ACS-lite adaptive control system","author":"gettman","year":"2006","journal-title":"Proc 13th World Congr Intell Transp Syst Services"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.3141\/1894-07"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1287\/trsc.1100.0367"},{"key":"ref29","article-title":"Adaptive traffic signal control: Deep reinforcement learning algorithm with experience replay and target network","author":"gao","year":"2017","journal-title":"arXiv 1705 02755"},{"key":"ref2","first-page":"294","article-title":"The geography of urban transportation finance","volume":"3","author":"taylor","year":"2004","journal-title":"The Geography of Urban Transportation"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1080\/17538947.2018.1512662"},{"key":"ref20","first-page":"1","article-title":"VISSIM: A microscopic simulation tool to evaluate actuated signal control including bus priority","volume":"32","author":"fellendorf","year":"1994","journal-title":"Proc 64th Inst Transp Eng Annu Meet"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1061\/(ASCE)0733-947X(2003)129:4(342)"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/S0968-090X(00)00047-4"},{"key":"ref24","article-title":"Scoot and scats: A closer look into their operations","author":"stevanovic","year":"2009","journal-title":"Proc 88th Annu Meeting Transp Res Board"},{"key":"ref23","article-title":"Traffic signals: Capacity and timing analysis","author":"akcelik","year":"1981"},{"key":"ref26","first-page":"105","article-title":"Evaluation of optimized policies for adaptive control strategy","author":"gartner","year":"1991","journal-title":"Transp Res Rec"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/MIS.2005.15"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1038\/nature02541"},{"key":"ref59","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv 1509 02971"},{"key":"ref58","first-page":"3528","article-title":"Gradient estimation using stochastic computation graphs","author":"schulman","year":"2015","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref57","first-page":"4008","article-title":"Guided policy search via approximate mirror descent","author":"montgomery","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref56","first-page":"1071","article-title":"Learning neural network policies with guided policy search under unknown dynamics","author":"levine","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref55","first-page":"2029","article-title":"Expectation propagation with stochastic kinetic model in complex interaction systems","author":"fang","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref54","first-page":"2783","article-title":"Using social dynamics to make individual predictions: variational inference with a stochastic kinetic model","author":"xu","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref53","first-page":"942","article-title":"Bethe and related pairwise entropy approximations","author":"weller","year":"2015","journal-title":"Proc UAI"},{"key":"ref52","article-title":"OpenAI gym","author":"brockman","year":"2016","journal-title":"arXiv 1606 01540 [cs]"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/1247660.1247686"},{"key":"ref40","article-title":"Playing atari with deep reinforcement learning","author":"mnih","year":"2013","journal-title":"arXiv 1312 5602"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2013.2247040"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2864318"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2938236"},{"key":"ref14","first-page":"128","article-title":"Recent development and applications of SUMO-Simulation of Urban MObility","volume":"5","author":"krajzewicz","year":"2012","journal-title":"Int J Adv Syst Meas"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.5334\/baw"},{"key":"ref16","article-title":"Using a deep reinforcement learning agent for traffic signal control","author":"genders","year":"2016","journal-title":"arXiv 1611 01142"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-87479-9_61"},{"key":"ref18","first-page":"1557","article-title":"Optimal control in partially observable complex social systems","author":"yang","year":"2020","journal-title":"Proc 8th Int Conf Auton Agents Multiagent Syst"},{"key":"ref19","first-page":"296","article-title":"Optimal control of complex systems through variational inference with a discrete event decision process","author":"yang","year":"2019","journal-title":"Proc 8th Int Conf Auton Agents Multiagent Syst"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1080\/23288604.2019.1661212"},{"key":"ref3","author":"shatz","year":"2011","journal-title":"Highway Infrastructure Economy Implications for Federal Policy"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.envint.2017.11.025"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1086\/692115"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/1409635.1409677"},{"key":"ref49","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/1869790.1869807"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1140\/epjst\/e2013-01715-5"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1007\/s11116-004-8287-y"},{"key":"ref45","volume":"9","author":"ben-akiva","year":"1985","journal-title":"Discrete Choice Analysis Theory and Application to Travel Demand"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1140\/epjds\/s13688-015-0046-0"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.2172\/88648"},{"key":"ref42","first-page":"1008","article-title":"Actor-critic algorithms","author":"konda","year":"2000","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref41","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"sutton","year":"2000","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref44","author":"russel","year":"2013","journal-title":"Artificial Intelligence A Modern Approach"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1080\/01944360208976274"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/8948470\/09200608.pdf?arnumber=9200608","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,12,17]],"date-time":"2021-12-17T19:56:01Z","timestamp":1639770961000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9200608\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"references-count":65,"URL":"https:\/\/doi.org\/10.1109\/access.2020.3024979","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]}}}