{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T07:24:27Z","timestamp":1740122667037,"version":"3.37.3"},"reference-count":33,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2017,8,10]],"date-time":"2017-08-10T00:00:00Z","timestamp":1502323200000},"content-version":"unspecified","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61374067 and 11471341"],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Discrete Event Dyn Syst"],"published-print":{"date-parts":[[2017,12]]},"DOI":"10.1007\/s10626-017-0257-6","type":"journal-article","created":{"date-parts":[[2017,8,10]],"date-time":"2017-08-10T03:51:06Z","timestamp":1502337066000},"page":"675-699","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":15,"title":["The risk probability criterion for discounted continuous-time Markov decision processes"],"prefix":"10.1007","volume":"27","author":[{"given":"Haifeng","family":"Huo","sequence":"first","affiliation":[]},{"given":"Xiaolong","family":"Zou","sequence":"additional","affiliation":[]},{"given":"Xianping","family":"Guo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2017,8,10]]},"reference":[{"key":"257_CR1","doi-asserted-by":"crossref","unstructured":"Anderson WJ (1991) Continuous-time Markov chains. Springer","DOI":"10.1007\/978-1-4612-3038-0"},{"key":"257_CR2","doi-asserted-by":"crossref","unstructured":"Ba\u00fcerle N, Rieder U (2011) Markov decision processes with applications to finance. Springer","DOI":"10.1007\/978-3-642-18324-9"},{"key":"257_CR3","unstructured":"Bertsekas D, Shreve S (1996) Stochastic optimal control: the discrete-time case. Academic Press, Inc"},{"key":"257_CR4","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1007\/BF02193458","volume":"86","author":"M Bouakiz","year":"1995","unstructured":"Bouakiz M, kebir Y (1995) Target-level criterion in Markov decision process. J Optim Theory Appl 86:1\u201315","journal-title":"J Optim Theory Appl"},{"key":"257_CR5","unstructured":"Chung KL (1967) Markov chains with stationary transition probabilities. Springer"},{"key":"257_CR6","doi-asserted-by":"crossref","first-page":"1749","DOI":"10.1016\/j.automatica.2004.05.003","volume":"40","author":"XR Cao","year":"2004","unstructured":"Cao XR, Guo XP (2004) A unified approach to Markov decision problems and performance sensitivity analysis with discounted and average criteria: multichain cases. Automatica 40:1749\u20131759","journal-title":"Automatica"},{"key":"257_CR7","doi-asserted-by":"crossref","unstructured":"Cao XR (2007) Stochastic learning and optimization-a sensitivity-based approach. Springer","DOI":"10.1007\/978-0-387-69082-7"},{"key":"257_CR8","doi-asserted-by":"crossref","first-page":"11","DOI":"10.1007\/s10626-010-0093-4","volume":"21","author":"XR Cao","year":"2011","unstructured":"Cao XR, Wang DX, Lu T, Xu YF (2011) Stochastic control via direct comparison. Discrete Event Dyn Syst 21:11\u201338","journal-title":"Discrete Event Dyn Syst"},{"key":"257_CR9","doi-asserted-by":"crossref","unstructured":"Feinberg E (2012) Reduction of discounted continuous-time MDPs with unbounded jump and reward rates to discrete-time total-reward MDPs. Optimization Control and Applications of Stochastic Systems Springer pp 77\u201397","DOI":"10.1007\/978-0-8176-8337-5_5"},{"key":"257_CR10","doi-asserted-by":"crossref","first-page":"73","DOI":"10.1287\/moor.1060.0210","volume":"32","author":"XP Guo","year":"2007","unstructured":"Guo XP (2007) Continuous-time Markov decision processes with discounted rewards: the case of Polish spaces. Math Oper Res 32:73\u201387","journal-title":"Math Oper Res"},{"key":"257_CR11","doi-asserted-by":"crossref","unstructured":"Guo XP, Hern\u00e1ndez-Lerma O (2009) Continuous-time Markov decision processes. Springer","DOI":"10.1007\/978-3-642-02547-1"},{"key":"257_CR12","doi-asserted-by":"crossref","first-page":"105","DOI":"10.1287\/moor.1100.0477","volume":"36","author":"XP Guo","year":"2011","unstructured":"Guo XP, Piunovskiy A (2011) Discounted continuous-time Markov decision processes with constraints: unbounded transition and loss rates. Math Oper Res 36:105\u2013132","journal-title":"Math Oper Res"},{"key":"257_CR13","doi-asserted-by":"crossref","first-page":"23","DOI":"10.1137\/100805169","volume":"50","author":"XP Guo","year":"2012","unstructured":"Guo XP, Huang YH, Song XY (2012) Linear programming and constrained average optimality for general continuous-time Markov decision processes in history-dependent policies. SIAM J Control Optim 50:23\u201347","journal-title":"SIAM J Control Optim"},{"issue":"4","key":"257_CR14","doi-asserted-by":"crossref","first-page":"1064","DOI":"10.1017\/S0001867800049016","volume":"47","author":"XP Guo","year":"2015","unstructured":"Guo XP, Huang XX, Huang YH (2015) Finite-horizon optimality for continuous-time Markov decision processs with unbounded transition rates. Adv Appl Prob 47(4):1064\u20131087","journal-title":"Adv Appl Prob"},{"key":"257_CR15","doi-asserted-by":"crossref","first-page":"395","DOI":"10.1007\/s10957-011-9813-7","volume":"150","author":"YH Huang","year":"2011","unstructured":"Huang YH, Guo XP, Song XY (2011) Performance analysis for controlled semi-Markov process. J Optim Theory Appl 150:395\u2013415","journal-title":"J Optim Theory Appl"},{"key":"257_CR16","doi-asserted-by":"crossref","first-page":"378","DOI":"10.1016\/j.jmaa.2013.01.021","volume":"402","author":"YH Huang","year":"2013","unstructured":"Huang YH, Guo XP, Li ZF (2013) Minimum risk probability for finite horizon semi-Markov decision process. J Math Anal Appl 402:378\u2013391","journal-title":"J Math Anal Appl"},{"key":"257_CR17","doi-asserted-by":"crossref","first-page":"1923","DOI":"10.1007\/s11425-015-5029-x","volume":"58","author":"XX Huang","year":"2015","unstructured":"Huang XX, Zou XL, Guo XP (2015) A minimization problem of the risk probability in first passage semi-Markov decision processes with loss rates. Sci China Math 58:1923\u20131938","journal-title":"Sci China Math"},{"key":"257_CR18","doi-asserted-by":"crossref","first-page":"281","DOI":"10.1287\/mnsc.1080.0901","volume":"55","author":"LJ Hong","year":"2009","unstructured":"Hong LJ, Liu G (2009) Simulating sensitivities of conditional value at risk. Manag Sci 55:281\u2013293","journal-title":"Manag Sci"},{"key":"257_CR19","doi-asserted-by":"crossref","unstructured":"Hern\u00e1ndez-Lerma O, Lasserre JB (1996) Discrete-time Markov control processes: basic optimality criteria. Springer","DOI":"10.1007\/978-1-4612-0729-0"},{"key":"257_CR20","first-page":"24","volume":"319","author":"J Janssen","year":"2006","unstructured":"Janssen J, Manca R (2006) Semi-Markov risk models for finance, insurance, and reliability. Springer Mathematics 319:24\u201337","journal-title":"Springer Mathematics"},{"key":"257_CR21","doi-asserted-by":"crossref","first-page":"333","DOI":"10.1016\/j.ejor.2012.08.010","volume":"224","author":"YJ Li","year":"2013","unstructured":"Li YJ, Cao F (2013) A basic formula for performance gradient estimation of semi-Markov decision processes. Eur J Oper Res 224:333\u2013339","journal-title":"Eur J Oper Res"},{"key":"257_CR22","doi-asserted-by":"crossref","first-page":"66","DOI":"10.1016\/S0022-247X(02)00097-5","volume":"271","author":"Y Ohtsubo","year":"2002","unstructured":"Ohtsubo Y, Toyonaga K (2002) Optimal policy for minimizing risk models in Markov decision processes. J Math Anal Appl 271:66\u201381","journal-title":"J Math Anal Appl"},{"key":"257_CR23","doi-asserted-by":"crossref","first-page":"79","DOI":"10.1007\/s001860200246","volume":"57","author":"Y Ohtsubo","year":"2003","unstructured":"Ohtsubo Y (2003) Minimizing risk models in stochastic shortest path problems. Mathe Meth Oper Res 57:79\u201388","journal-title":"Mathe Meth Oper Res"},{"key":"257_CR24","doi-asserted-by":"crossref","first-page":"548","DOI":"10.1007\/s11768-013-2194-8","volume":"11","author":"M Sakaguchi","year":"2013","unstructured":"Sakaguchi M, Ohtsubo Y (2013) Markov decision processes associated with two threshold probability criteria. J Control Theory Appl 11:548\u2013557","journal-title":"J Control Theory Appl"},{"key":"257_CR25","doi-asserted-by":"crossref","first-page":"1072","DOI":"10.1239\/jap\/1354716658","volume":"49","author":"T Prieto-Rumeau","year":"2012","unstructured":"Prieto-Rumeau T, Hern\u00e1ndez-Lerma O (2012) Discounted continuous-time controlled Markov chains: convergence of control models. J Appl Probab 49:1072\u20131090","journal-title":"J Appl Probab"},{"key":"257_CR26","doi-asserted-by":"crossref","first-page":"2032","DOI":"10.1137\/10081366X","volume":"49","author":"A Piunovskiy","year":"2011","unstructured":"Piunovskiy A, Zhang Y (2011) Discounted continuous-time Markov decision processes with unbounded rates: the convex analytic approach. SIAM J Control Optim 49:2032\u20132061","journal-title":"SIAM J Control Optim"},{"key":"257_CR27","doi-asserted-by":"crossref","DOI":"10.1002\/9780470316887","volume-title":"Markov decision processes: discrete stochastic dynamic programming","author":"ML Puterman","year":"1994","unstructured":"Puterman ML (1994) Markov decision processes: discrete stochastic dynamic programming. Wiley , New York"},{"key":"257_CR28","volume-title":"Estimating distribution sensitivity using generalized likelihood ratio method","author":"YJ Peng","year":"2016","unstructured":"Peng YJ, Fu M, Hu JQ (2016) Estimating distribution sensitivity using generalized likelihood ratio method. WODES, Xi\u2019an, China"},{"key":"257_CR29","doi-asserted-by":"crossref","first-page":"744","DOI":"10.1017\/S0021900200023123","volume":"19","author":"MJ Sobel","year":"1982","unstructured":"Sobel MJ (1982) The variance of discounted Markov decision processes. J Appl Probab 19:744\u2013802","journal-title":"J Appl Probab"},{"key":"257_CR30","doi-asserted-by":"crossref","first-page":"634","DOI":"10.1006\/jmaa.1993.1093","volume":"173","author":"DJ White","year":"1993","unstructured":"White DJ (1993) Minimizing a threshold probability in discounted Markov decision processes. J Math Anal Appl 173:634\u2013646","journal-title":"J Math Anal Appl"},{"key":"257_CR31","doi-asserted-by":"crossref","first-page":"47","DOI":"10.1006\/jmaa.1998.6203","volume":"231","author":"CB Wu","year":"1999","unstructured":"Wu CB, Lin YL (1999) Minimizing risk models in Markov decision processes with policies depending on target values. J Math Anal Appl 231:47\u201367","journal-title":"J Math Anal Appl"},{"key":"257_CR32","first-page":"206","volume":"29","author":"HS Xi","year":"2003","unstructured":"Xi HS, Tang H, Yin BQ (2003) Optimal policies for a continuous time MCP with compact action set. Acta Automat Sinica 29:206\u2013211","journal-title":"Acta Automat Sinica"},{"key":"257_CR33","doi-asserted-by":"crossref","first-page":"29","DOI":"10.1016\/j.automatica.2015.01.006","volume":"54","author":"L Xia","year":"2015","unstructured":"Xia L, Jia QS (2015) Parameterized Markov decision process and its application to service rate control. Automatica 54:29\u201335","journal-title":"Automatica"}],"container-title":["Discrete Event Dynamic Systems"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/article\/10.1007\/s10626-017-0257-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s10626-017-0257-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s10626-017-0257-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2019,10,2]],"date-time":"2019-10-02T02:47:05Z","timestamp":1569984425000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/s10626-017-0257-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017,8,10]]},"references-count":33,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2017,12]]}},"alternative-id":["257"],"URL":"https:\/\/doi.org\/10.1007\/s10626-017-0257-6","relation":{},"ISSN":["0924-6703","1573-7594"],"issn-type":[{"type":"print","value":"0924-6703"},{"type":"electronic","value":"1573-7594"}],"subject":[],"published":{"date-parts":[[2017,8,10]]}}}