@article{15055,
  abstract     = {<jats:p>Markov decision processes (MDPs) are the defacto framework for sequential decision making in the presence of stochastic uncertainty. A classical optimization criterion for MDPs is to maximize the expected discounted-sum payoff, which ignores low probability catastrophic events with highly negative impact on the system. On the other hand, risk-averse policies require the probability of undesirable events to be below a given threshold, but they do not account for optimization of the expected payoff. We consider MDPs with discounted-sum payoff with failure states which represent catastrophic outcomes. The objective of risk-constrained planning is to maximize the expected discounted-sum payoff among risk-averse policies that ensure the probability to encounter a failure state is below a desired threshold. Our main contribution is an efficient risk-constrained planning algorithm that combines UCT-like search with a predictor learned through interaction with the MDP (in the style of AlphaZero) and with a risk-constrained action selection via linear programming. We demonstrate the effectiveness of our approach with experiments on classical MDPs from the literature, including benchmarks with an order of 106 states.</jats:p>},
  author       = {Brázdil, Tomáš and Chatterjee, Krishnendu and Novotný, Petr and Vahala, Jiří},
  issn         = {2374-3468},
  journal      = {Proceedings of the 34th AAAI Conference on Artificial Intelligence},
  keywords     = {General Medicine},
  location     = {New York, NY, United States},
  number       = {06},
  pages        = {9794--9801},
  publisher    = {Association for the Advancement of Artificial Intelligence},
  title        = {{Reinforcement learning of risk-constrained policies in Markov decision processes}},
  doi          = {10.1609/aaai.v34i06.6531},
  volume       = {34},
  year         = {2020},
}

