{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,8,9]],"date-time":"2024-08-09T05:49:49Z","timestamp":1723182589353},"reference-count":22,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2019,6,1]],"date-time":"2019-06-01T00:00:00Z","timestamp":1559347200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,6,1]],"date-time":"2019-06-01T00:00:00Z","timestamp":1559347200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,6,1]],"date-time":"2019-06-01T00:00:00Z","timestamp":1559347200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Tencent Rhino-bird Gift Funds"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2019,6]]},"DOI":"10.1109\/tnnls.2018.2871361","type":"journal-article","created":{"date-parts":[[2018,10,30]],"date-time":"2018-10-30T19:01:01Z","timestamp":1540926061000},"page":"1831-1840","source":"Crossref","is-referenced-by-count":8,"title":["Stable and Efficient Policy Evaluation"],"prefix":"10.1109","volume":"30","author":[{"ORCID":"http:\/\/orcid.org\/0000-0003-2625-9865","authenticated-orcid":false,"given":"Daoming","family":"Lyu","sequence":"first","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0003-2519-6196","authenticated-orcid":false,"given":"Bo","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Matthieu","family":"Geist","sequence":"additional","affiliation":[]},{"given":"Wen","family":"Dong","sequence":"additional","affiliation":[]},{"given":"Saad","family":"Biaz","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0002-7028-4956","authenticated-orcid":false,"given":"Qi","family":"Wang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2017.2712188"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1137\/1.9780898718003"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-377-6.50013-X"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/9.580874"},{"key":"ref14","first-page":"809","article-title":"Policy evaluation with temporal differences: A survey and comparison","volume":"15","author":"dann","year":"2014","journal-title":"J Mach Learn Res"},{"key":"ref15","first-page":"289","article-title":"Off-policy learning with eligibility traces: A survey","volume":"15","author":"geist","year":"2014","journal-title":"J Mach Learn Res"},{"key":"ref16","first-page":"2603","article-title":"An emphatic approach to the problem of off-policy temporal-difference learning","volume":"17","author":"sutton","year":"2015","journal-title":"J Mach Learn Res"},{"key":"ref17","first-page":"1","article-title":"Safe and efficient off-policy reinforcement learning","author":"munos","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref18","first-page":"759","article-title":"Eligibility traces for off-policy policy evaluation","author":"precup","year":"2000","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref19","first-page":"1724","article-title":"On convergence of emphatic temporal-difference learning","author":"yu","year":"2015","journal-title":"Proc Conf Learn Theory"},{"key":"ref4","first-page":"504","article-title":"Finite-sample analysis of proximal gradient TD algorithms","author":"liu","year":"2015","journal-title":"Proc Conf Uncertainty Artif Intell"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553501"},{"key":"ref6","first-page":"959","article-title":"Should one compute the temporal difference fix point or minimize the Bellman Residual? The unified oblique projection view","author":"scherrer","year":"2010","journal-title":"Proc 27th Int Conf Mach Learn"},{"key":"ref5","first-page":"494","article-title":"Investigating practical linear temporal difference learning","author":"adam","year":"2016","journal-title":"Proc 1st Int Conf Autonomous Agents Multiagent Syst"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2017.2773458"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"ref2","first-page":"1","article-title":"Convergence of least squares temporal difference methods under general conditions","author":"yu","year":"2010","journal-title":"Proc 27th Int Conf Mach Learn"},{"key":"ref1","first-page":"1052","article-title":"Stable fitted reinforcement learning","author":"gordon","year":"1996","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"621","DOI":"10.1109\/TNNLS.2013.2281663","article-title":"Policy iteration adaptive dynamic programming algorithm for discrete-time nonlinear systems","volume":"25","author":"liu","year":"2014","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"ref20","article-title":"Incremental off-policy reinforcement learning algorithms","author":"mahmood","year":"2017"},{"key":"ref22","first-page":"380","article-title":"Value function approximation in reinforcement learning using the Fourier basis","author":"konidaris","year":"2011","journal-title":"Proc 25th Conf Artif Intell"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1023\/A:1017936530646"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/8721184\/08515047.pdf?arnumber=8515047","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,13]],"date-time":"2022-07-13T20:53:30Z","timestamp":1657745610000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8515047\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,6]]},"references-count":22,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2018.2871361","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019,6]]}}}