@inproceedings{13967,
  abstract     = {A classic solution technique for Markov decision processes (MDP) and stochastic games (SG) is value iteration (VI). Due to its good practical performance, this approximative approach is typically preferred over exact techniques, even though no practical bounds on the imprecision of the result could be given until recently. As a consequence, even the most used model checkers could return arbitrarily wrong results. Over the past decade, different works derived stopping criteria, indicating when the precision reaches the desired level, for various settings, in particular MDP with reachability, total reward, and mean payoff, and SG with reachability.In this paper, we provide the first stopping criteria for VI on SG with total reward and mean payoff, yielding the first anytime algorithms in these settings. To this end, we provide the solution in two flavours: First through a reduction to the MDP case and second directly on SG. The former is simpler and automatically utilizes any advances on MDP. The latter allows for more local computations, heading towards better practical efficiency.Our solution unifies the previously mentioned approaches for MDP and SG and their underlying ideas. To achieve this, we isolate objective-specific subroutines as well as identify objective-independent concepts. These structural concepts, while surprisingly simple, form the very essence of the unified solution.},
  author       = {Kretinsky, Jan and Meggendorfer, Tobias and Weininger, Maximilian},
  booktitle    = {38th Annual ACM/IEEE Symposium on Logic in Computer Science},
  isbn         = {9798350335873},
  issn         = {1043-6871},
  location     = {Boston, MA, United States},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Stopping criteria for value iteration on stochastic games with quantitative objectives}},
  doi          = {10.1109/LICS56636.2023.10175771},
  volume       = {2023},
  year         = {2023},
}

@inproceedings{14242,
  abstract     = {We study the problem of training and certifying adversarially robust quantized neural networks (QNNs). Quantization is a technique for making neural networks more efficient by running them using low-bit integer arithmetic and is therefore commonly adopted in industry. Recent work has shown that floating-point neural networks that have been verified to be robust can become vulnerable to adversarial attacks after quantization, and certification of the quantized representation is necessary to guarantee robustness. In this work, we present quantization-aware interval bound propagation (QA-IBP), a novel method for training robust QNNs. Inspired by advances in robust learning of non-quantized networks, our training algorithm computes the gradient of an abstract representation of the actual network. Unlike existing approaches, our method can handle the discrete semantics of QNNs. Based on QA-IBP, we also develop a complete verification procedure for verifying the adversarial robustness of QNNs, which is guaranteed to terminate and produce a correct answer. Compared to existing approaches, the key advantage of our verification procedure is that it runs entirely on GPU or other accelerator devices. We demonstrate experimentally that our approach significantly outperforms existing methods and establish the new state-of-the-art for training and certifying the robustness of QNNs.},
  author       = {Lechner, Mathias and Zikelic, Dorde and Chatterjee, Krishnendu and Henzinger, Thomas A and Rus, Daniela},
  booktitle    = {Proceedings of the 37th AAAI Conference on Artificial Intelligence},
  isbn         = {9781577358800},
  location     = {Washington, DC, United States},
  number       = {12},
  pages        = {14964--14973},
  publisher    = {Association for the Advancement of Artificial Intelligence},
  title        = {{Quantization-aware interval bound propagation for training certifiably robust quantized neural networks}},
  doi          = {10.1609/aaai.v37i12.26747},
  volume       = {37},
  year         = {2023},
}

@inproceedings{14243,
  abstract     = {Two-player zero-sum "graph games" are central in logic, verification, and multi-agent systems. The game proceeds by placing a token on a vertex of a graph, and allowing the players to move it to produce an infinite path, which determines the winner or payoff of the game. Traditionally, the players alternate turns in moving the token. In "bidding games", however, the players have budgets and in each turn, an auction (bidding) determines which player moves the token. So far, bidding games have only been studied as full-information games. In this work we initiate the study of partial-information bidding games: we study bidding games in which a player's initial budget is drawn from a known probability distribution. We show that while for some bidding mechanisms and objectives, it is straightforward to adapt the results from the full-information setting to the partial-information setting, for others, the analysis is significantly more challenging, requires new techniques, and gives rise to interesting results. Specifically, we study games with "mean-payoff" objectives in combination with "poorman" bidding. We construct optimal strategies for a partially-informed player who plays against a fully-informed adversary. We show that, somewhat surprisingly, the "value" under pure strategies does not necessarily exist in such games.},
  author       = {Avni, Guy and Jecker, Ismael R and Zikelic, Dorde},
  booktitle    = {Proceedings of the 37th AAAI Conference on Artificial Intelligence},
  isbn         = {9781577358800},
  location     = {Washington, DC, United States},
  number       = {5},
  pages        = {5464--5471},
  title        = {{Bidding graph games with partially-observable budgets}},
  doi          = {10.1609/aaai.v37i5.25679},
  volume       = {37},
  year         = {2023},
}

@inproceedings{14259,
  abstract     = {We provide a learning-based technique for guessing a winning strategy in a parity game originating from an LTL synthesis problem. A cheaply obtained guess can be useful in several applications. Not only can the guessed strategy be applied as best-effort in cases where the game’s huge size prohibits rigorous approaches, but it can also increase the scalability of rigorous LTL synthesis in several ways. Firstly, checking whether a guessed strategy is winning is easier than constructing one. Secondly, even if the guess is wrong in some places, it can be fixed by strategy iteration faster than constructing one from scratch. Thirdly, the guess can be used in on-the-fly approaches to prioritize exploration in the most fruitful directions.
In contrast to previous works, we (i) reflect the highly structured logical information in game’s states, the so-called semantic labelling, coming from the recent LTL-to-automata translations, and (ii) learn to reflect it properly by learning from previously solved games, bringing the solving process closer to human-like reasoning.},
  author       = {Kretinsky, Jan and Meggendorfer, Tobias and Prokop, Maximilian and Rieder, Sabine},
  booktitle    = {35th International Conference on Computer Aided Verification },
  isbn         = {9783031377051},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {390--414},
  publisher    = {Springer Nature},
  title        = {{Guessing winning policies in LTL synthesis by semantic learning}},
  doi          = {10.1007/978-3-031-37706-8_20},
  volume       = {13964},
  year         = {2023},
}

@inproceedings{12676,
  abstract     = {Turn-based stochastic games (aka simple stochastic games) are two-player zero-sum games played on directed graphs with probabilistic transitions. The goal of player-max is to maximize the probability to reach a target state against the adversarial player-min. These games lie in NP ∩ coNP and are among the rare combinatorial problems that belong to this complexity class for which the existence of polynomial-time algorithm is a major open question. While randomized sub-exponential time algorithm exists, all known deterministic algorithms require exponential time in the worst-case. An important open question has been whether faster algorithms can be obtained parametrized by the treewidth of the game graph. Even deterministic sub-exponential time algorithm for constant treewidth turn-based stochastic games has remain elusive. In this work our main result is a deterministic algorithm to solve turn-based stochastic games that, given a game with n states, treewidth at most t, and the bit-complexity of the probabilistic transition function log D, has running time O ((tn2 log D)t log n). In particular, our algorithm is quasi-polynomial time for games with constant or poly-logarithmic treewidth.},
  author       = {Chatterjee, Krishnendu and Meggendorfer, Tobias and Saona Urmeneta, Raimundo J and Svoboda, Jakub},
  booktitle    = {Proceedings of the 2023 Annual ACM-SIAM Symposium on Discrete Algorithms},
  isbn         = {9781611977554},
  location     = {Florence, Italy},
  pages        = {4590--4605},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Faster algorithm for turn-based stochastic games with bounded treewidth}},
  doi          = {10.1137/1.9781611977554.ch173},
  year         = {2023},
}

@article{12706,
  abstract     = {Allometric settings of population dynamics models are appealing due to their parsimonious nature and broad utility when studying system level effects. Here, we parameterise the size-scaled Rosenzweig-MacArthur differential equations to eliminate prey-mass dependency, facilitating an in depth analytic study of the equations which incorporates scaling parameters’ contributions to coexistence. We define the functional response term to match empirical findings, and examine situations where metabolic theory derivations and observation diverge. The dynamical properties of the Rosenzweig-MacArthur system, encompassing the distribution of size-abundance equilibria, the scaling of period and amplitude of population cycling, and relationships between predator and prey abundances, are consistent with empirical observation. Our parameterisation is an accurate minimal model across 15+ orders of mass magnitude.},
  author       = {Mckerral, Jody C. and Kleshnina, Maria and Ejov, Vladimir and Bartle, Louise and Mitchell, James G. and Filar, Jerzy A.},
  issn         = {1932-6203},
  journal      = {PLoS One},
  number       = {2},
  pages        = {e0279838},
  publisher    = {Public Library of Science},
  title        = {{Empirical parameterisation and dynamical analysis of the allometric Rosenzweig-MacArthur equations}},
  doi          = {10.1371/journal.pone.0279838},
  volume       = {18},
  year         = {2023},
}

@article{12738,
  abstract     = {We study turn-based stochastic zero-sum games with lexicographic preferences over objectives. Stochastic games are standard models in control, verification, and synthesis of stochastic reactive systems that exhibit both randomness as well as controllable and adversarial non-determinism. Lexicographic order allows one to consider multiple objectives with a strict preference order. To the best of our knowledge, stochastic games with lexicographic objectives have not been studied before. For a mixture of reachability and safety objectives, we show that deterministic lexicographically optimal strategies exist and memory is only required to remember the already satisfied and violated objectives. For a constant number of objectives, we show that the relevant decision problem is in NP∩coNP, matching the current known bound for single objectives; and in general the decision problem is PSPACE-hard and can be solved in NEXPTIME∩coNEXPTIME. We present an algorithm that computes the lexicographically optimal strategies via a reduction to the computation of optimal strategies in a sequence of single-objectives games. For omega-regular objectives, we restrict our analysis to one-player games, also known as Markov decision processes. We show that lexicographically optimal strategies exist and need either randomization or finite memory. We present an algorithm that solves the relevant decision problem in polynomial time. We have implemented our algorithms and report experimental results on various case studies.},
  author       = {Chatterjee, Krishnendu and Katoen, Joost P and Mohr, Stefanie and Weininger, Maximilian and Winkler, Tobias},
  issn         = {1572-8102},
  journal      = {Formal Methods in System Design},
  publisher    = {Springer Nature},
  title        = {{Stochastic games with lexicographic objectives}},
  doi          = {10.1007/s10703-023-00411-4},
  year         = {2023},
}

@article{12787,
  abstract     = {Populations evolve in spatially heterogeneous environments. While a certain trait might bring a fitness advantage in some patch of the environment, a different trait might be advantageous in another patch. Here, we study the Moran birth–death process with two types of individuals in a population stretched across two patches of size N, each patch favouring one of the two types. We show that the long-term fate of such populations crucially depends on the migration rate μ
 between the patches. To classify the possible fates, we use the distinction between polynomial (short) and exponential (long) timescales. We show that when μ is high then one of the two types fixates on the whole population after a number of steps that is only polynomial in N. By contrast, when μ is low then each type holds majority in the patch where it is favoured for a number of steps that is at least exponential in N. Moreover, we precisely identify the threshold migration rate μ⋆ that separates those two scenarios, thereby exactly delineating the situations that support long-term coexistence of the two types. We also discuss the case of various cycle graphs and we present computer simulations that perfectly match our analytical results.},
  author       = {Svoboda, Jakub and Tkadlec, Josef and Kaveh, Kamran and Chatterjee, Krishnendu},
  issn         = {1471-2946},
  journal      = {Proceedings of the Royal Society A: Mathematical, Physical and Engineering Sciences},
  number       = {2271},
  publisher    = {The Royal Society},
  title        = {{Coexistence times in the Moran process with environmental heterogeneity}},
  doi          = {10.1098/rspa.2022.0685},
  volume       = {479},
  year         = {2023},
}

@article{12833,
  abstract     = {The input to the token swapping problem is a graph with vertices v1, v2, . . . , vn, and n tokens with labels 1,2, . . . , n, one on each vertex. The goal is to get token i to vertex vi for all i= 1, . . . , n using a minimum number of swaps, where a swap exchanges the tokens on the endpoints of an edge.Token swapping on a tree, also known as “sorting with a transposition tree,” is not known to be in P nor NP-complete. We present some partial results: 1. An optimum swap sequence may need to perform a swap on a leaf vertex that has the correct token (a “happy leaf”), disproving a conjecture of Vaughan. 2. Any algorithm that fixes happy leaves—as all known approximation algorithms for the problem do—has approximation factor at least 4/3. Furthermore, the two best-known 2-approximation algorithms have approximation factor exactly 2. 3. A generalized problem—weighted coloured token swapping—is NP-complete on trees, but solvable in polynomial time on paths and stars. In this version, tokens and vertices have colours, and colours have weights. The goal is to get every token to a vertex of the same colour, and the cost of a swap is the sum of the weights of the two tokens involved.},
  author       = {Biniaz, Ahmad and Jain, Kshitij and Lubiw, Anna and Masárová, Zuzana and Miltzow, Tillmann and Mondal, Debajyoti and Naredla, Anurag Murty and Tkadlec, Josef and Turcotte, Alexi},
  issn         = {1365-8050},
  journal      = {Discrete Mathematics and Theoretical Computer Science},
  number       = {2},
  publisher    = {EPI Sciences},
  title        = {{Token swapping on trees}},
  doi          = {10.46298/DMTCS.8383},
  volume       = {24},
  year         = {2023},
}

@article{12861,
  abstract     = {The field of indirect reciprocity investigates how social norms can foster cooperation when individuals continuously monitor and assess each other’s social interactions. By adhering to certain social norms, cooperating individuals can improve their reputation and, in turn, receive benefits from others. Eight social norms, known as the “leading eight," have been shown to effectively promote the evolution of cooperation as long as information is public and reliable. These norms categorize group members as either ’good’ or ’bad’. In this study, we examine a scenario where individuals instead assign nuanced reputation scores to each other, and only cooperate with those whose reputation exceeds a certain threshold. We find both analytically and through simulations that such quantitative assessments are error-correcting, thus facilitating cooperation in situations where information is private and unreliable. Moreover, our results identify four specific norms that are robust to such conditions, and may be relevant for helping to sustain cooperation in natural populations.},
  author       = {Schmid, Laura and Ekbatani, Farbod and Hilbe, Christian and Chatterjee, Krishnendu},
  issn         = {2041-1723},
  journal      = {Nature Communications},
  publisher    = {Springer Nature},
  title        = {{Quantitative assessment can stabilize indirect reciprocity under imperfect information}},
  doi          = {10.1038/s41467-023-37817-x},
  volume       = {14},
  year         = {2023},
}

@article{10731,
  abstract     = {Motivated by COVID-19, we develop and analyze a simple stochastic model for the spread of disease in human population. We track how the number of infected and critically ill people develops over time in order to estimate the demand that is imposed on the hospital system. To keep this demand under control, we consider a class of simple policies for slowing down and reopening society and we compare their efficiency in mitigating the spread of the virus from several different points of view. We find that in order to avoid overwhelming of the hospital system, a policy must impose a harsh lockdown or it must react swiftly (or both). While reacting swiftly is universally beneficial, being harsh pays off only when the country is patient about reopening and when the neighboring countries coordinate their mitigation efforts. Our work highlights the importance of acting decisively when closing down and the importance of patience and coordination between neighboring countries when reopening.},
  author       = {Svoboda, Jakub and Tkadlec, Josef and Pavlogiannis, Andreas and Chatterjee, Krishnendu and Nowak, Martin A.},
  issn         = {2045-2322},
  journal      = {Scientific Reports},
  number       = {1},
  publisher    = {Springer Nature},
  title        = {{Infection dynamics of COVID-19 virus under lockdown and reopening}},
  doi          = {10.1038/s41598-022-05333-5},
  volume       = {12},
  year         = {2022},
}

@article{11402,
  abstract     = {Fixed-horizon planning considers a weighted graph and asks to construct a path that maximizes the sum of weights for a given time horizon T. However, in many scenarios, the time horizon is not fixed, but the stopping time is chosen according to some distribution such that the expected stopping time is T. If the stopping-time distribution is not known, then to ensure robustness, the distribution is chosen by an adversary as the worst-case scenario. A stationary plan for every vertex always chooses the same outgoing edge. For fixed horizon or fixed stopping-time distribution, stationary plans are not sufficient for optimality. Quite surprisingly we show that when an adversary chooses the stopping-time distribution with expected stopping-time T, then stationary plans are sufficient. While computing optimal stationary plans for fixed horizon is NP-complete, we show that computing optimal stationary plans under adversarial stopping-time distribution can be achieved in polynomial time.},
  author       = {Chatterjee, Krishnendu and Doyen, Laurent},
  issn         = {1090-2724},
  journal      = {Journal of Computer and System Sciences},
  pages        = {1--21},
  publisher    = {Elsevier},
  title        = {{Graph planning with expected finite horizon}},
  doi          = {10.1016/j.jcss.2022.04.003},
  volume       = {129},
  year         = {2022},
}

@inproceedings{11459,
  abstract     = {We present a novel approach to differential cost analysis that, given a program revision, attempts to statically bound the difference in resource usage, or cost, between the two program versions. Differential cost analysis is particularly interesting because of the many compelling applications for it, such as detecting resource-use regressions at code-review time or proving the absence of certain side-channel vulnerabilities. One prior approach to differential cost analysis is to apply relational reasoning that conceptually constructs a product program on which one can over-approximate the difference in costs between the two program versions. However, a significant challenge in any relational approach is effectively aligning the program versions to get precise results. In this paper, our key insight is that we can avoid the need for and the limitations of program alignment if, instead, we bound the difference of two cost-bound summaries rather than directly bounding the concrete cost difference. In particular, our method computes a threshold value for the maximal difference in cost between two program versions simultaneously using two kinds of cost-bound summaries---a potential function that evaluates to an upper bound for the cost incurred in the first program and an anti-potential function that evaluates to a lower bound for the cost incurred in the second. Our method has a number of desirable properties: it can be fully automated, it allows optimizing the threshold value on relative cost, it is suitable for programs that are not syntactically similar, and it supports non-determinism. We have evaluated an implementation of our approach on a number of program pairs collected from the literature, and we find that our method computes tight threshold values on relative cost in most examples.},
  author       = {Zikelic, Dorde and Chang, Bor-Yuh Evan and Bolignano, Pauline and Raimondi, Franco},
  booktitle    = {Proceedings of the 43rd ACM SIGPLAN International Conference on Programming Language Design and Implementation},
  isbn         = {9781450392655},
  location     = {San Diego, CA, United States},
  pages        = {442--457},
  publisher    = {Association for Computing Machinery},
  title        = {{Differential cost analysis with simultaneous potentials and anti-potentials}},
  doi          = {10.1145/3519939.3523435},
  year         = {2022},
}

@unpublished{14600,
  abstract     = {We study the problem of learning controllers for discrete-time non-linear stochastic dynamical systems with formal reach-avoid guarantees. This work presents the first method for providing formal reach-avoid guarantees, which combine and generalize stability and safety guarantees, with a tolerable probability threshold $p\in[0,1]$ over the infinite time horizon. Our method leverages advances in machine learning literature and it represents formal certificates as neural networks. In particular, we learn a certificate in the form of a reach-avoid supermartingale (RASM), a novel notion that we introduce in this work. Our RASMs provide reachability and avoidance guarantees by imposing constraints on what can be viewed as a stochastic extension of level sets of Lyapunov functions for deterministic systems. Our approach solves several important problems -- it can be used to learn a control policy from scratch, to verify a reach-avoid specification for a fixed control policy, or to fine-tune a pre-trained policy if it does not satisfy the reach-avoid specification. We validate our approach on $3$ stochastic non-linear reinforcement learning tasks.},
  author       = {Zikelic, Dorde and Lechner, Mathias and Henzinger, Thomas A and Chatterjee, Krishnendu},
  booktitle    = {arXiv},
  title        = {{Learning control policies for stochastic systems with reach-avoid guarantees}},
  doi          = {10.48550/ARXIV.2210.05308},
  year         = {2022},
}

@unpublished{14601,
  abstract     = {In this work, we address the problem of learning provably stable neural
network policies for stochastic control systems. While recent work has
demonstrated the feasibility of certifying given policies using martingale
theory, the problem of how to learn such policies is little explored. Here, we
study the effectiveness of jointly learning a policy together with a martingale
certificate that proves its stability using a single learning algorithm. We
observe that the joint optimization problem becomes easily stuck in local
minima when starting from a randomly initialized policy. Our results suggest
that some form of pre-training of the policy is required for the joint
optimization to repair and verify the policy successfully.},
  author       = {Zikelic, Dorde and Lechner, Mathias and Chatterjee, Krishnendu and Henzinger, Thomas A},
  booktitle    = {arXiv},
  title        = {{Learning stabilizing policies in stochastic control systems}},
  doi          = {10.48550/arXiv.2205.11991},
  year         = {2022},
}

@article{9311,
  abstract     = {Partially observable Markov decision processes (POMDPs) are standard models for dynamic systems with probabilistic and nondeterministic behaviour in uncertain environments. We prove that in POMDPs with long-run average objective, the decision maker has approximately optimal strategies with finite memory. This implies notably that approximating the long-run value is recursively enumerable, as well as a weak continuity property of the value with respect to the transition function. },
  author       = {Chatterjee, Krishnendu and Saona Urmeneta, Raimundo J and Ziliotto, Bruno},
  issn         = {1526-5471},
  journal      = {Mathematics of Operations Research},
  keywords     = {Management Science and Operations Research, General Mathematics, Computer Science Applications},
  number       = {1},
  pages        = {100--119},
  publisher    = {Institute for Operations Research and the Management Sciences},
  title        = {{Finite-memory strategies in POMDPs with long-run average objectives}},
  doi          = {10.1287/moor.2020.1116},
  volume       = {47},
  year         = {2022},
}

@article{10602,
  abstract     = {Transforming ω-automata into parity automata is traditionally done using appearance records. We present an efficient variant of this idea, tailored to Rabin automata, and several optimizations applicable to all appearance records. We compare the methods experimentally and show that our method produces significantly smaller automata than previous approaches.},
  author       = {Kretinsky, Jan and Meggendorfer, Tobias and Waldmann, Clara and Weininger, Maximilian},
  issn         = {1432-0525},
  journal      = {Acta Informatica},
  keywords     = {computer networks and communications, information systems, software},
  pages        = {585--618},
  publisher    = {Springer Nature},
  title        = {{Index appearance record with preorders}},
  doi          = {10.1007/s00236-021-00412-y},
  volume       = {59},
  year         = {2022},
}

@article{11938,
  abstract     = {A matching is compatible to two or more labeled point sets of size n with labels {1, . . . , n} if its straight-line drawing on each of these point sets is crossing-free. We study the maximum number of edges in a matching compatible to two or more labeled point sets in general position in the plane. We show that for any two labeled sets of n points in convex position there exists a compatible matching with ⌊√2n + 1 − 1⌋ edges. More generally, for any ℓ labeled point sets we construct compatible matchings of size Ω(n1/ℓ). As a corresponding upper bound, we use probabilistic arguments to show that for any ℓ given sets of n points there exists a labeling of each set such that the largest compatible matching has O(n2/(ℓ+1)) edges. Finally, we show that Θ(log n) copies of any set of n points are necessary and sufficient for the existence of labelings of these point sets such that any compatible matching consists only of a single edge.},
  author       = {Aichholzer, Oswin and Arroyo Guevara, Alan M and Masárová, Zuzana and Parada, Irene and Perz, Daniel and Pilz, Alexander and Tkadlec, Josef and Vogtenhuber, Birgit},
  issn         = {1526-1719},
  journal      = {Journal of Graph Algorithms and Applications},
  number       = {2},
  pages        = {225--240},
  publisher    = {Brown University},
  title        = {{On compatible matchings}},
  doi          = {10.7155/jgaa.00591},
  volume       = {26},
  year         = {2022},
}

@inproceedings{12000,
  abstract     = {We consider the quantitative problem of obtaining lower-bounds on the probability of termination of a given non-deterministic probabilistic program. Specifically, given a non-termination threshold p∈[0,1], we aim for certificates proving that the program terminates with probability at least 1−p. The basic idea of our approach is to find a terminating stochastic invariant, i.e. a subset SI of program states such that (i) the probability of the program ever leaving SI is no more than p, and (ii) almost-surely, the program either leaves SI or terminates.

While stochastic invariants are already well-known, we provide the first proof that the idea above is not only sound, but also complete for quantitative termination analysis. We then introduce a novel sound and complete characterization of stochastic invariants that enables template-based approaches for easy synthesis of quantitative termination certificates, especially in affine or polynomial forms. Finally, by combining this idea with the existing martingale-based methods that are relatively complete for qualitative termination analysis, we obtain the first automated, sound, and relatively complete algorithm for quantitative termination analysis. Notably, our completeness guarantees for quantitative termination analysis are as strong as the best-known methods for the qualitative variant.

Our prototype implementation demonstrates the effectiveness of our approach on various probabilistic programs. We also demonstrate that our algorithm certifies lower bounds on termination probability for probabilistic programs that are beyond the reach of previous methods.},
  author       = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Meggendorfer, Tobias and Zikelic, Dorde},
  booktitle    = {Proceedings of the 34th International Conference on Computer Aided Verification},
  isbn         = {9783031131844},
  issn         = {1611-3349},
  location     = {Haifa, Israel},
  pages        = {55--78},
  publisher    = {Springer},
  title        = {{Sound and complete certificates for auantitative termination analysis of probabilistic programs}},
  doi          = {10.1007/978-3-031-13185-1_4},
  volume       = {13371},
  year         = {2022},
}

@inproceedings{12101,
  abstract     = {Spatial games form a widely-studied class of games from biology and physics modeling the evolution of social behavior. Formally, such a game is defined by a square (d by d) payoff matrix M and an undirected graph G. Each vertex of G represents an individual, that initially follows some strategy i ∈ {1,2,…,d}. In each round of the game, every individual plays the matrix game with each of its neighbors: An individual following strategy i meeting a neighbor following strategy j receives a payoff equal to the entry (i,j) of M. Then, each individual updates its strategy to its neighbors' strategy with the highest sum of payoffs, and the next round starts. The basic computational problems consist of reachability between configurations and the average frequency of a strategy. For general spatial games and graphs, these problems are in PSPACE. In this paper, we examine restricted setting: the game is a prisoner’s dilemma; and G is a subgraph of grid. We prove that basic computational problems for spatial games with prisoner’s dilemma on a subgraph of a grid are PSPACE-hard.},
  author       = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Jecker, Ismael R and Svoboda, Jakub},
  booktitle    = {42nd IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science},
  isbn         = {9783959772617},
  issn         = {1868-8969},
  location     = {Madras, India},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Complexity of spatial games}},
  doi          = {10.4230/LIPIcs.FSTTCS.2022.11},
  volume       = {250},
  year         = {2022},
}

