@inproceedings{81,
  abstract     = {We solve the offline monitoring problem for timed propositional temporal logic (TPTL), interpreted over dense-time Boolean signals. The variant of TPTL we consider extends linear temporal logic (LTL) with clock variables and reset quantifiers, providing a mechanism to specify real-time constraints. We first describe a general monitoring algorithm based on an exhaustive computation of the set of satisfying clock assignments as a finite union of zones. We then propose a specialized monitoring algorithm for the one-variable case using a partition of the time domain based on the notion of region equivalence, whose complexity is linear in the length of the signal, thereby generalizing a known result regarding the monitoring of metric temporal logic (MTL). The region and zone representations of time constraints are known from timed automata verification and can also be used in the discrete-time case. Our prototype implementation appears to outperform previous discrete-time implementations of TPTL monitoring,},
  author       = {Elgyütt, Adrian and Ferrere, Thomas and Henzinger, Thomas A},
  location     = {Beijing, China},
  pages        = {53 -- 70},
  publisher    = {Springer},
  title        = {{Monitoring temporal logic with clock variables}},
  doi          = {10.1007/978-3-030-00151-3_4},
  volume       = {11022},
  year         = {2018},
}

@inproceedings{24,
  abstract     = {Partially-observable Markov decision processes (POMDPs) with discounted-sum payoff are a standard framework to model a wide range of problems related to decision making under uncertainty. Traditionally, the goal has been to obtain policies that optimize the expectation of the discounted-sum payoff. A key drawback of the expectation measure is that even low probability events with extreme payoff can significantly affect the expectation, and thus the obtained policies are not necessarily risk-averse. An alternate approach is to optimize the probability that the payoff is above a certain threshold, which allows obtaining risk-averse policies, but ignores optimization of the expectation. We consider the expectation optimization with probabilistic guarantee (EOPG) problem, where the goal is to optimize the expectation ensuring that the payoff is above a given threshold with at least a specified probability. We present several results on the EOPG problem, including the first algorithm to solve it.},
  author       = {Chatterjee, Krishnendu and Elgyütt, Adrian and Novotny, Petr and Rouillé, Owen},
  location     = {Stockholm, Sweden},
  pages        = {4692 -- 4699},
  publisher    = {IJCAI},
  title        = {{Expectation optimization with probabilistic guarantees in POMDPs with discounted-sum objectives}},
  doi          = {10.24963/ijcai.2018/652},
  volume       = {2018},
  year         = {2018},
}

