{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T10:14:41Z","timestamp":1740132881974,"version":"3.37.3"},"reference-count":18,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2019,4,1]],"date-time":"2019-04-01T00:00:00Z","timestamp":1554076800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,4,1]],"date-time":"2019-04-01T00:00:00Z","timestamp":1554076800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,4,1]],"date-time":"2019-04-01T00:00:00Z","timestamp":1554076800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000104","name":"National Aeronautics and Space Administration","doi-asserted-by":"publisher","award":["NNXI4AIlIG"],"id":[{"id":"10.13039\/100000104","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000183","name":"Army Research Office","doi-asserted-by":"publisher","award":["W911NF-15-1-0127"],"id":[{"id":"10.13039\/100000183","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Intell. Transport. Syst."],"published-print":{"date-parts":[[2019,4]]},"DOI":"10.1109\/tits.2018.2848264","type":"journal-article","created":{"date-parts":[[2018,7,25]],"date-time":"2018-07-25T18:50:23Z","timestamp":1532544623000},"page":"1259-1268","source":"Crossref","is-referenced-by-count":36,"title":["Deep Reinforcement Learning for Event-Driven Multi-Agent Decision Processes"],"prefix":"10.1109","volume":"20","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1830-0637","authenticated-orcid":false,"given":"Kunal","family":"Menda","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3272-3460","authenticated-orcid":false,"given":"Yi-Chun","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Justin","family":"Grana","sequence":"additional","affiliation":[]},{"given":"James W.","family":"Bono","sequence":"additional","affiliation":[]},{"given":"Brendan D.","family":"Tracey","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7238-9663","authenticated-orcid":false,"given":"Mykel J.","family":"Kochenderfer","sequence":"additional","affiliation":[]},{"given":"David","family":"Wolpert","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-71682-4_5"},{"key":"ref11","first-page":"1","article-title":"High-dimensional continuous control using generalized advantage estimation","author":"schulman","year":"2016","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref12","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref13","first-page":"1273","article-title":"Planning with macro-actions in decentralized POMDPs","author":"amato","year":"2014","journal-title":"Proc Int'l Joint Conf Autonomous Agents and Multiagent Systems (AAMAS)"},{"journal-title":"Simpy Simulating Systems in Python","year":"2002","author":"m\u00fcller","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2016.7795538"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1023\/A:1008202821328"},{"journal-title":"SciPy Open source scientific tools for Python","year":"2001","author":"jones","key":"ref17"},{"key":"ref18","first-page":"1329","article-title":"Benchmarking deep reinforcement learning for continuous control","author":"duan","year":"2016","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-006-7035-4"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3171"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/IMSCCS.2006.90"},{"key":"ref5","first-page":"2523","article-title":"Learning for decentralized control of multiagent systems in large, partially-observable stochastic environments","author":"liu","year":"2016","journal-title":"Proc AAAI Conf Artif Intell (AAAI)"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.2514\/6.2016-1407"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/SMC.2016.7844563"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1023\/A:1025696116075"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1177\/0278364916679611"}],"container-title":["IEEE Transactions on Intelligent Transportation Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6979\/8678528\/08419722.pdf?arnumber=8419722","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,13]],"date-time":"2022-07-13T21:08:40Z","timestamp":1657746520000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8419722\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,4]]},"references-count":18,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tits.2018.2848264","relation":{},"ISSN":["1524-9050","1558-0016"],"issn-type":[{"type":"print","value":"1524-9050"},{"type":"electronic","value":"1558-0016"}],"subject":[],"published":{"date-parts":[[2019,4]]}}}