{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,20]],"date-time":"2024-09-20T15:53:01Z","timestamp":1726847581598},"publisher-location":"Berlin, Heidelberg","reference-count":18,"publisher":"Springer Berlin Heidelberg","isbn-type":[{"type":"print","value":"9783642106767"},{"type":"electronic","value":"9783642106774"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2009]]},"DOI":"10.1007\/978-3-642-10677-4_57","type":"book-chapter","created":{"date-parts":[[2009,12,14]],"date-time":"2009-12-14T17:12:56Z","timestamp":1260810776000},"page":"502-511","source":"Crossref","is-referenced-by-count":14,"title":["Tracking in Reinforcement Learning"],"prefix":"10.1007","author":[{"given":"Matthieu","family":"Geist","sequence":"first","affiliation":[]},{"given":"Olivier","family":"Pietquin","sequence":"additional","affiliation":[]},{"given":"Gabriel","family":"Fricout","sequence":"additional","affiliation":[]}],"member":"297","reference":[{"key":"57_CR1","volume-title":"Reinforcement Learning: An Introduction","author":"R.S. Sutton","year":"1996","unstructured":"Sutton, R.S., Barto, A.G.: Reinforcement Learning: An Introduction. MIT Press, Cambridge (1996)"},{"key":"57_CR2","doi-asserted-by":"crossref","unstructured":"Phua, C.W., Fitch, R.: Tracking Value Function Dynamics to Improve Reinforcement Learning with Piecewise Linear Function Approximation. In: International Conference on Machine Learning, ICML 2007 (2007)","DOI":"10.1145\/1273496.1273591"},{"key":"57_CR3","doi-asserted-by":"crossref","unstructured":"Sutton, R.S., Koop, A., Silver, D.: On the role of tracking in stationary environments. In: Proceedings of the 24th international conference on Machine learning, pp. 871\u2013878 (2007)","DOI":"10.1145\/1273496.1273606"},{"key":"57_CR4","doi-asserted-by":"crossref","unstructured":"Geist, M., Pietquin, O., Fricout, G.: Kalman Temporal Differences: the deterministic case. In: Proceedings of the IEEE International Symposium on Adaptive Dynamic Programming and Reinforcement Learning (ADPRL 2009), Nashville, TN, USA (April 2009)","DOI":"10.1109\/ADPRL.2009.4927543"},{"key":"57_CR5","doi-asserted-by":"crossref","first-page":"35","DOI":"10.1115\/1.3662552","volume":"82","author":"R.E. Kalman","year":"1960","unstructured":"Kalman, R.E.: A New Approach to Linear Filtering and Prediction Problems. Transactions of the ASME\u2013Journal of Basic Engineering\u00a082(Series D), 35\u201345 (1960)","journal-title":"Transactions of the ASME\u2013Journal of Basic Engineering"},{"issue":"3","key":"57_CR6","doi-asserted-by":"publisher","first-page":"401","DOI":"10.1109\/JPROC.2003.823141","volume":"92","author":"S.J. Julier","year":"2004","unstructured":"Julier, S.J., Uhlmann, J.K.: Unscented filtering and nonlinear estimation. Proceedings of the IEEE\u00a092(3), 401\u2013422 (2004)","journal-title":"Proceedings of the IEEE"},{"key":"57_CR7","unstructured":"van der Merwe, R.: Sigma-Point Kalman Filters for Probabilistic Inference in Dynamic State-Space Models. PhD thesis, Oregon Health&Science University, Portland, USA (2004)"},{"issue":"1-3","key":"57_CR8","doi-asserted-by":"publisher","first-page":"33","DOI":"10.1007\/BF00114723","volume":"22","author":"S.J. Bradtke","year":"1996","unstructured":"Bradtke, S.J., Barto, A.G.: Linear Least-Squares Algorithms for Temporal Difference Learning. Machine Learning\u00a022(1-3), 33\u201357 (1996)","journal-title":"Machine Learning"},{"key":"57_CR9","doi-asserted-by":"crossref","unstructured":"Baird, L.C.: Residual Algorithms: Reinforcement Learning with Function Approximation. In: Proceedings of the International Conference on Machine Learning, pp. 30\u201337 (1995)","DOI":"10.1016\/B978-1-55860-377-6.50013-X"},{"issue":"1","key":"57_CR10","doi-asserted-by":"publisher","first-page":"89","DOI":"10.1007\/s10994-007-5038-2","volume":"71","author":"A. Antos","year":"2008","unstructured":"Antos, A., Szepesv\u00e1ri, C., Munos, R.: Learning near-optimal policies with Bellman-residual minimization based fitted policy iteration and a single sample path. Machine Learning\u00a071(1), 89\u2013129 (2008)","journal-title":"Machine Learning"},{"key":"57_CR11","unstructured":"Kakade, S.: A natural policy gradient. In: Advances in Neural Information Processing Systems 14 (NIPS 2001), Vancouver, British Columbia, Canada, pp. 1531\u20131538 (2001)"},{"key":"57_CR12","series-title":"Lecture Notes in Artificial Intelligence","doi-asserted-by":"publisher","first-page":"280","DOI":"10.1007\/11564096_29","volume-title":"Machine Learning: ECML 2005","author":"J. Peters","year":"2005","unstructured":"Peters, J., Vijayakumar, S., Schaal, S.: Natural actor-critic. In: Gama, J., Camacho, R., Brazdil, P.B., Jorge, A.M., Torgo, L. (eds.) ECML 2005. LNCS (LNAI), vol.\u00a03720, pp. 280\u2013291. Springer, Heidelberg (2005)"},{"issue":"2-3","key":"57_CR13","first-page":"233","volume":"49","author":"J.A. Boyan","year":"1999","unstructured":"Boyan, J.A.: Technical Update: Least-Squares Temporal Difference Learning. Machine Learning\u00a049(2-3), 233\u2013246 (1999)","journal-title":"Machine Learning"},{"key":"57_CR14","doi-asserted-by":"publisher","first-page":"1107","DOI":"10.1162\/jmlr.2003.4.6.1107","volume":"4","author":"M.G. Lagoudakis","year":"2003","unstructured":"Lagoudakis, M.G., Parr, R.: Least-Squares Policy Iteration. Journal of Machine Learning Research\u00a04, 1107\u20131149 (2003)","journal-title":"Journal of Machine Learning Research"},{"issue":"6","key":"57_CR15","doi-asserted-by":"publisher","first-page":"2112","DOI":"10.1109\/TSP.2005.847845","volume":"53","author":"S. Jo","year":"2005","unstructured":"Jo, S., Kim, S.W.: Consistent Normalized Least Mean Square Filtering with Noisy Data Matrix. IEEE Transactions on Signal Processing\u00a053(6), 2112\u20132123 (2005)","journal-title":"IEEE Transactions on Signal Processing"},{"key":"57_CR16","doi-asserted-by":"crossref","unstructured":"Engel, Y., Mannor, S., Meir, R.: Reinforcement Learning with Gaussian Processes. In: Proceedings of Internation Conference on Machine Learning, ICML 2005 (2005)","DOI":"10.1145\/1102351.1102377"},{"key":"57_CR17","unstructured":"Bhatnagar, S., Sutton, R.S., Ghavamzadeh, M., Lee, M.: Incremental Natural Actor-Critic Algorithms. In: Advances in Neural Information Processing Systems, Vancouver, vol.\u00a021 (2008)"},{"key":"57_CR18","series-title":"Lecture Notes in Artificial Intelligence","doi-asserted-by":"publisher","first-page":"96","DOI":"10.1007\/978-3-540-89722-4_8","volume-title":"Recent Advances in Reinforcement Learning","author":"M. Geist","year":"2008","unstructured":"Geist, M., Pietquin, O., Fricout, G.: Bayesian Reward Filtering. In: Girgin, S., Loth, M., Munos, R., Preux, P., Ryabko, D. (eds.) EWRL 2008. LNCS (LNAI), vol.\u00a05323, pp. 96\u2013109. Springer, Heidelberg (2008)"}],"container-title":["Lecture Notes in Computer Science","Neural Information Processing"],"original-title":[],"link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-642-10677-4_57.pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,4,30]],"date-time":"2021-04-30T07:38:12Z","timestamp":1619768292000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-642-10677-4_57"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2009]]},"ISBN":["9783642106767","9783642106774"],"references-count":18,"URL":"https:\/\/doi.org\/10.1007\/978-3-642-10677-4_57","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2009]]}}}