@inproceedings{628,
  abstract     = {We consider the problem of developing automated techniques for solving recurrence relations to aid the expected-runtime analysis of programs. The motivation is that several classical textbook algorithms have quite efficient expected-runtime complexity, whereas the corresponding worst-case bounds are either inefficient (e.g., Quick-Sort), or completely ineffective (e.g., Coupon-Collector). Since the main focus of expected-runtime analysis is to obtain efficient bounds, we consider bounds that are either logarithmic, linear or almost-linear (O(log n), O(n), O(n · log n), respectively, where n represents the input size). Our main contribution is an efficient (simple linear-time algorithm) sound approach for deriving such expected-runtime bounds for the analysis of recurrence relations induced by randomized algorithms. The experimental results show that our approach can efficiently derive asymptotically optimal expected-runtime bounds for recurrences of classical randomized algorithms, including Randomized-Search, Quick-Sort, Quick-Select, Coupon-Collector, where the worst-case bounds are either inefficient (such as linear as compared to logarithmic expected-runtime complexity, or quadratic as compared to linear or almost-linear expected-runtime complexity), or ineffective.},
  author       = {Chatterjee, Krishnendu and Fu, Hongfei and Murhekar, Aniket},
  editor       = {Majumdar, Rupak and Kunčak, Viktor},
  isbn         = {978-331963386-2},
  location     = {Heidelberg, Germany},
  pages        = {118 -- 139},
  publisher    = {Springer},
  title        = {{Automated recurrence analysis for almost linear expected runtime bounds}},
  doi          = {10.1007/978-3-319-63387-9_6},
  volume       = {10426},
  year         = {2017},
}

@inproceedings{645,
  abstract     = {Markov decision processes (MDPs) are standard models for probabilistic systems with non-deterministic behaviours. Long-run average rewards provide a mathematically elegant formalism for expressing long term performance. Value iteration (VI) is one of the simplest and most efficient algorithmic approaches to MDPs with other properties, such as reachability objectives. Unfortunately, a naive extension of VI does not work for MDPs with long-run average rewards, as there is no known stopping criterion. In this work our contributions are threefold. (1) We refute a conjecture related to stopping criteria for MDPs with long-run average rewards. (2) We present two practical algorithms for MDPs with long-run average rewards based on VI. First, we show that a combination of applying VI locally for each maximal end-component (MEC) and VI for reachability objectives can provide approximation guarantees. Second, extending the above approach with a simulation-guided on-demand variant of VI, we present an anytime algorithm that is able to deal with very large models. (3) Finally, we present experimental results showing that our methods significantly outperform the standard approaches on several benchmarks.},
  author       = {Ashok, Pranav and Chatterjee, Krishnendu and Daca, Przemyslaw and Kretinsky, Jan and Meggendorfer, Tobias},
  editor       = {Majumdar, Rupak and Kunčak, Viktor},
  isbn         = {978-331963386-2},
  location     = {Heidelberg, Germany},
  pages        = {201 -- 221},
  publisher    = {Springer},
  title        = {{Value iteration for long run average reward in markov decision processes}},
  doi          = {10.1007/978-3-319-63387-9_10},
  volume       = {10426},
  year         = {2017},
}

