@inproceedings{9644,
  abstract     = {We present a new approach to proving non-termination of non-deterministic integer programs. Our technique is rather simple but efficient. It relies on a purely syntactic reversal of the program's transition system followed by a constraint-based invariant synthesis with constraints coming from both the original and the reversed transition system. The latter task is performed by a simple call to an off-the-shelf SMT-solver, which allows us to leverage the latest advances in SMT-solving. Moreover, our method offers a combination of features not present (as a whole) in previous approaches: it handles programs with non-determinism, provides relative completeness guarantees and supports programs with polynomial arithmetic. The experiments performed with our prototype tool RevTerm show that our approach, despite its simplicity and stronger theoretical guarantees, is at least on par with the state-of-the-art tools, often achieving a non-trivial improvement under a proper configuration of its parameters.},
  author       = {Chatterjee, Krishnendu and Goharshady, Ehsan Kafshdar and Novotný, Petr and Zikelic, Dorde},
  booktitle    = {Proceedings of the 42nd ACM SIGPLAN International Conference on Programming Language Design and Implementation},
  isbn         = {9781450383912},
  location     = {Online},
  pages        = {1033--1048},
  publisher    = {Association for Computing Machinery},
  title        = {{Proving non-termination by program reversal}},
  doi          = {10.1145/3453483.3454093},
  year         = {2021},
}

@inproceedings{9645,
  abstract     = {We consider the fundamental problem of reachability analysis over imperative programs with real variables. Previous works that tackle reachability are either unable to handle programs consisting of general loops (e.g. symbolic execution), or lack completeness guarantees (e.g. abstract interpretation), or are not automated (e.g. incorrectness logic). In contrast, we propose a novel approach for reachability analysis that can handle general and complex loops, is complete, and can be entirely automated for a wide family of programs. Through the notion of Inductive Reachability Witnesses (IRWs), our approach extends ideas from both invariant generation and termination to reachability analysis.

We first show that our IRW-based approach is sound and complete for reachability analysis of imperative programs. Then, we focus on linear and polynomial programs and develop automated methods for synthesizing linear and polynomial IRWs. In the linear case, we follow the well-known approaches using Farkas' Lemma. Our main contribution is in the polynomial case, where we present a push-button semi-complete algorithm. We achieve this using a novel combination of classical theorems in real algebraic geometry, such as Putinar's Positivstellensatz and Hilbert's Strong Nullstellensatz. Finally, our experimental results show we can prove complex reachability objectives over various benchmarks that were beyond the reach of previous methods.},
  author       = {Asadi, Ali and Chatterjee, Krishnendu and Fu, Hongfei and Goharshady, Amir Kafshdar and Mahdavi, Mohammad},
  booktitle    = {Proceedings of the 42nd ACM SIGPLAN International Conference on Programming Language Design and Implementation},
  isbn         = {9781450383912},
  location     = {Online},
  pages        = {772--787},
  publisher    = {Association for Computing Machinery},
  title        = {{Polynomial reachability witnesses via Stellensätze}},
  doi          = {10.1145/3453483.3454076},
  year         = {2021},
}

@inproceedings{9646,
  abstract     = {We consider the fundamental problem of deriving quantitative bounds on the probability that a given assertion is violated in a probabilistic program. We provide automated algorithms that obtain both lower and upper bounds on the assertion violation probability. The main novelty of our approach is that we prove new and dedicated fixed-point theorems which serve as the theoretical basis of our algorithms and enable us to reason about assertion violation bounds in terms of pre and post fixed-point functions. To synthesize such fixed-points, we devise algorithms that utilize a wide range of mathematical tools, including repulsing ranking supermartingales, Hoeffding's lemma, Minkowski decompositions, Jensen's inequality, and convex optimization. On the theoretical side, we provide (i) the first automated algorithm for lower-bounds on assertion violation probabilities, (ii) the first complete algorithm for upper-bounds of exponential form in affine programs, and (iii) provably and significantly tighter upper-bounds than the previous approaches. On the practical side, we show our algorithms can handle a wide variety of programs from the literature and synthesize bounds that are remarkably tighter than previous results, in some cases by thousands of orders of magnitude.},
  author       = {Wang, Jinyi and Sun, Yican and Fu, Hongfei and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar},
  booktitle    = {Proceedings of the 42nd ACM SIGPLAN International Conference on Programming Language Design and Implementation},
  isbn         = {9781450383912},
  location     = {Online},
  pages        = {1171--1186},
  publisher    = {Association for Computing Machinery},
  title        = {{Quantitative analysis of assertion violations in probabilistic programs}},
  doi          = {10.1145/3453483.3454102},
  year         = {2021},
}

@inproceedings{10002,
  abstract     = {We present a faster symbolic algorithm for the following central problem in probabilistic verification: Compute the maximal end-component (MEC) decomposition of Markov decision processes (MDPs). This problem generalizes the SCC decomposition problem of graphs and closed recurrent sets of Markov chains. The model of symbolic algorithms is widely used in formal verification and model-checking, where access to the input model is restricted to only symbolic operations (e.g., basic set operations and computation of one-step neighborhood). For an input MDP with  n  vertices and  m  edges, the classical symbolic algorithm from the 1990s for the MEC decomposition requires  O(n2)  symbolic operations and  O(1)  symbolic space. The only other symbolic algorithm for the MEC decomposition requires  O(nm−−√)  symbolic operations and  O(m−−√)  symbolic space. A main open question is whether the worst-case  O(n2)  bound for symbolic operations can be beaten. We present a symbolic algorithm that requires  O˜(n1.5)  symbolic operations and  O˜(n−−√)  symbolic space. Moreover, the parametrization of our algorithm provides a trade-off between symbolic operations and symbolic space: for all  0<ϵ≤1/2  the symbolic algorithm requires  O˜(n2−ϵ)  symbolic operations and  O˜(nϵ)  symbolic space ( O˜  hides poly-logarithmic factors). Using our techniques we present faster algorithms for computing the almost-sure winning regions of  ω -regular objectives for MDPs. We consider the canonical parity objectives for  ω -regular objectives, and for parity objectives with  d -priorities we present an algorithm that computes the almost-sure winning region with  O˜(n2−ϵ)  symbolic operations and  O˜(nϵ)  symbolic space, for all  0<ϵ≤1/2 .},
  author       = {Chatterjee, Krishnendu and Dvorak, Wolfgang and Henzinger, Monika H and Svozil, Alexander},
  booktitle    = {Proceedings of the 36th Annual ACM/IEEE Symposium on Logic in Computer Science},
  isbn         = {978-1-6654-4896-3},
  issn         = {1043-6871},
  keywords     = {Computer science, Computational modeling, Markov processes, Probabilistic logic, Formal verification, Game Theory},
  location     = {Rome, Italy},
  pages        = {1--13},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Symbolic time and space tradeoffs for probabilistic verification}},
  doi          = {10.1109/LICS52264.2021.9470739},
  year         = {2021},
}

@inproceedings{10004,
  abstract     = {Markov chains are the de facto finite-state model for stochastic dynamical systems, and Markov decision processes (MDPs) extend Markov chains by incorporating non-deterministic behaviors. Given an MDP and rewards on states, a classical optimization criterion is the maximal expected total reward where the MDP stops after T steps, which can be computed by a simple dynamic programming algorithm. We consider a natural generalization of the problem where the stopping times can be chosen according to a probability distribution, such that the expected stopping time is T, to optimize the expected total reward. Quite surprisingly we establish inter-reducibility of the expected stopping-time problem for Markov chains with the Positivity problem (which is related to the well-known Skolem problem), for which establishing either decidability or undecidability would be a major breakthrough. Given the hardness of the exact problem, we consider the approximate version of the problem: we show that it can be solved in exponential time for Markov chains and in exponential space for MDPs.},
  author       = {Chatterjee, Krishnendu and Doyen, Laurent},
  booktitle    = {Proceedings of the 36th Annual ACM/IEEE Symposium on Logic in Computer Science},
  isbn         = {978-1-6654-4896-3},
  issn         = {1043-6871},
  keywords     = {Computer science, Heuristic algorithms, Memory management, Automata, Markov processes, Probability distribution, Complexity theory},
  location     = {Rome, Italy},
  pages        = {1--13},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Stochastic processes with expected stopping time}},
  doi          = {10.1109/LICS52264.2021.9470595},
  year         = {2021},
}

@inproceedings{10052,
  abstract     = {A deterministic finite automaton (DFA) 𝒜 is composite if its language L(𝒜) can be decomposed into an intersection ⋂_{i = 1}^k L(𝒜_i) of languages of smaller DFAs. Otherwise, 𝒜 is prime. This notion of primality was introduced by Kupferman and Mosheiff in 2013, and while they proved that we can decide whether a DFA is composite, the precise complexity of this problem is still open, with a doubly-exponential gap between the upper and lower bounds. In this work, we focus on permutation DFAs, i.e., those for which the transition monoid is a group. We provide an NP algorithm to decide whether a permutation DFA is composite, and show that the difficulty of this problem comes from the number of non-accepting states of the instance: we give a fixed-parameter tractable algorithm with the number of rejecting states as the parameter. Moreover, we investigate the class of commutative permutation DFAs. Their structural properties allow us to decide compositionality in NL, and even in LOGSPACE if the alphabet size is fixed. Despite this low complexity, we show that complex behaviors still arise in this class: we provide a family of composite DFAs each requiring polynomially many factors with respect to its size. We also consider the variant of the problem that asks whether a DFA is k-factor composite, that is, decomposable into k smaller DFAs, for some given integer k ∈ ℕ. We show that, for commutative permutation DFAs, restricting the number of factors makes the decision computationally harder, and yields a problem with tight bounds: it is NP-complete. Finally, we show that in general, this problem is in PSPACE, and it is in LOGSPACE for DFAs with a singleton alphabet.},
  author       = {Jecker, Ismael R and Mazzocchi, Nicolas and Wolf, Petra},
  booktitle    = {32nd International Conference on Concurrency Theory},
  isbn         = {978-3-9597-7203-7},
  issn         = {1868-8969},
  location     = {Paris, France},
  publisher    = {Schloss Dagstuhl - Leibniz Zentrum für Informatik},
  title        = {{Decomposing permutation automata}},
  doi          = {10.4230/LIPIcs.CONCUR.2021.18},
  volume       = {203},
  year         = {2021},
}

@inproceedings{10054,
  abstract     = {Graphs and games on graphs are fundamental models for the analysis of reactive systems, in particular, for model-checking and the synthesis of reactive systems. The class of ω-regular languages provides a robust specification formalism for the desired properties of reactive systems. In the classical infinitary formulation of the liveness part of an ω-regular specification, a "good" event must happen eventually without any bound between the good events. A stronger notion of liveness is bounded liveness, which requires that good events happen within d transitions. Given a graph or a game graph with n vertices, m edges, and a bounded liveness objective, the previous best-known algorithmic bounds are as follows: (i) O(dm) for graphs, which in the worst-case is O(n³); and (ii) O(n² d²) for games on graphs. Our main contributions improve these long-standing algorithmic bounds. For graphs we present: (i) a randomized algorithm with one-sided error with running time O(n^{2.5} log n) for the bounded liveness objectives; and (ii) a deterministic linear-time algorithm for the complement of bounded liveness objectives. For games on graphs, we present an O(n² d) time algorithm for the bounded liveness objectives.},
  author       = {Chatterjee, Krishnendu and Henzinger, Monika H and Kale, Sagar Sudhir and Svozil, Alexander},
  booktitle    = {48th International Colloquium on Automata, Languages, and Programming},
  isbn         = {978-3-95977-195-5},
  issn         = {1868-8969},
  location     = {Glasgow, Scotland},
  publisher    = {Schloss Dagstuhl - Leibniz Zentrum für Informatik},
  title        = {{Faster algorithms for bounded liveness in graphs and game graphs}},
  doi          = {10.4230/LIPIcs.ICALP.2021.124},
  volume       = {198},
  year         = {2021},
}

@inproceedings{10055,
  abstract     = {Repeated idempotent elements are commonly used to characterise iterable behaviours in abstract models of computation. Therefore, given a monoid M, it is natural to ask how long a sequence of elements of M needs to be to ensure the presence of consecutive idempotent factors. This question is formalised through the notion of the Ramsey function R_M associated to M, obtained by mapping every k ∈ ℕ to the minimal integer R_M(k) such that every word u ∈ M^* of length R_M(k) contains k consecutive non-empty factors that correspond to the same idempotent element of M. In this work, we study the behaviour of the Ramsey function R_M by investigating the regular 𝒟-length of M, defined as the largest size L(M) of a submonoid of M isomorphic to the set of natural numbers {1,2, …, L(M)} equipped with the max operation. We show that the regular 𝒟-length of M determines the degree of R_M, by proving that k^L(M) ≤ R_M(k) ≤ (k|M|⁴)^L(M). To allow applications of this result, we provide the value of the regular 𝒟-length of diverse monoids. In particular, we prove that the full monoid of n × n Boolean matrices, which is used to express transition monoids of non-deterministic automata, has a regular 𝒟-length of (n²+n+2)/2.},
  author       = {Jecker, Ismael R},
  booktitle    = {38th International Symposium on Theoretical Aspects of Computer Science},
  isbn         = {978-3-9597-7180-1},
  issn         = {1868-8969},
  location     = {Saarbrücken, Germany},
  publisher    = {Schloss Dagstuhl - Leibniz Zentrum für Informatik},
  title        = {{A Ramsey theorem for finite monoids}},
  doi          = {10.4230/LIPIcs.STACS.2021.44},
  volume       = {187},
  year         = {2021},
}

@inproceedings{10075,
  abstract     = {We study the expressiveness and succinctness of good-for-games pushdown automata (GFG-PDA) over finite words, that is, pushdown automata whose nondeterminism can be resolved based on the run constructed so far, but independently of the remainder of the input word. We prove that GFG-PDA recognise more languages than deterministic PDA (DPDA) but not all context-free languages (CFL). This class is orthogonal to unambiguous CFL. We further show that GFG-PDA can be exponentially more succinct than DPDA, while PDA can be double-exponentially more succinct than GFG-PDA. We also study GFGness in visibly pushdown automata (VPA), which enjoy better closure properties than PDA, and for which we show GFGness to be ExpTime-complete. GFG-VPA can be exponentially more succinct than deterministic VPA, while VPA can be exponentially more succinct than GFG-VPA. Both of these lower bounds are tight. Finally, we study the complexity of resolving nondeterminism in GFG-PDA. Every GFG-PDA has a positional resolver, a function that resolves nondeterminism and that is only dependant on the current configuration. Pushdown transducers are sufficient to implement the resolvers of GFG-VPA, but not those of GFG-PDA. GFG-PDA with finite-state resolvers are determinisable.},
  author       = {Guha, Shibashis and Jecker, Ismael R and Lehtinen, Karoliina and Zimmermann, Martin},
  booktitle    = {46th International Symposium on Mathematical Foundations of Computer Science},
  isbn         = {978-3-9597-7201-3},
  issn         = {1868-8969},
  location     = {Tallinn, Estonia},
  publisher    = {Schloss Dagstuhl - Leibniz Zentrum für Informatik},
  title        = {{A bit of nondeterminism makes pushdown automata expressive and succinct}},
  doi          = {10.4230/LIPIcs.MFCS.2021.53},
  volume       = {202},
  year         = {2021},
}

@article{10191,
  abstract     = {In this work we solve the algorithmic problem of consistency verification for the TSO and PSO memory models given a reads-from map, denoted VTSO-rf and VPSO-rf, respectively. For an execution of n events over k threads and d variables, we establish novel bounds that scale as nk+1 for TSO and as nk+1· min(nk2, 2k· d) for PSO. Moreover, based on our solution to these problems, we develop an SMC algorithm under TSO and PSO that uses the RF equivalence. The algorithm is exploration-optimal, in the sense that it is guaranteed to explore each class of the RF partitioning exactly once, and spends polynomial time per class when k is bounded. Finally, we implement all our algorithms in the SMC tool Nidhugg, and perform a large number of experiments over benchmarks from existing literature. Our experimental results show that our algorithms for VTSO-rf and VPSO-rf provide significant scalability improvements over standard alternatives. Moreover, when used for SMC, the RF partitioning is often much coarser than the standard Shasha-Snir partitioning for TSO/PSO, which yields a significant speedup in the model checking task.

},
  author       = {Bui, Truc Lam and Chatterjee, Krishnendu and Gautam, Tushar and Pavlogiannis, Andreas and Toman, Viktor},
  issn         = {2475-1421},
  journal      = {Proceedings of the ACM on Programming Languages},
  keywords     = {safety, risk, reliability and quality, software},
  number       = {OOPSLA},
  publisher    = {Association for Computing Machinery},
  title        = {{The reads-from equivalence for the TSO and PSO memory models}},
  doi          = {10.1145/3485541},
  volume       = {5},
  year         = {2021},
}

@phdthesis{10199,
  abstract     = {The design and verification of concurrent systems remains an open challenge due to the non-determinism that arises from the inter-process communication. In particular, concurrent programs are notoriously difficult both to be written correctly and to be analyzed formally, as complex thread interaction has to be accounted for. The difficulties are further exacerbated when concurrent programs get executed on modern-day hardware, which contains various buffering and caching mechanisms for efficiency reasons. This causes further subtle non-determinism, which can often produce very unintuitive behavior of the concurrent programs. Model checking is at the forefront of tackling the verification problem, where the task is to decide, given as input a concurrent system and a desired property, whether the system satisfies the property. The inherent state-space explosion problem in model checking of concurrent systems causes naïve explicit methods not to scale, thus more inventive methods are required. One such method is stateless model checking (SMC), which explores in memory-efficient manner the program executions rather than the states of the program. State-of-the-art SMC is typically coupled with partial order reduction (POR) techniques, which argue that certain executions provably produce identical system behavior, thus limiting the amount of executions one needs to explore in order to cover all possible behaviors. Another method to tackle the state-space explosion is symbolic model checking, where the considered techniques operate on a succinct implicit representation of the input system rather than explicitly accessing the system. In this thesis we present new techniques for verification of concurrent systems. We present several novel POR methods for SMC of concurrent programs under various models of semantics, some of which account for write-buffering mechanisms. Additionally, we present novel algorithms for symbolic model checking of finite-state concurrent systems, where the desired property of the systems is to ensure a formally defined notion of fairness.},
  author       = {Toman, Viktor},
  issn         = {2663-337X},
  keywords     = {concurrency, verification, model checking},
  pages        = {166},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Improved verification techniques for concurrent systems}},
  doi          = {10.15479/at:ista:10199},
  year         = {2021},
}

@phdthesis{10293,
  abstract     = {Indirect reciprocity in evolutionary game theory is a prominent mechanism for explaining the evolution of cooperation among unrelated individuals. In contrast to direct reciprocity, which is based on individuals meeting repeatedly, and conditionally cooperating by using their own experiences, indirect reciprocity is based on individuals’ reputations. If a player helps another, this increases the helper’s public standing, benefitting them in the future. This lets cooperation in the population emerge without individuals having to meet more than once. While the two modes of reciprocity are intertwined, they are difficult to compare. Thus, they are usually studied in isolation. Direct reciprocity can maintain cooperation with simple strategies, and is robust against noise even when players do not remember more
than their partner’s last action. Meanwhile, indirect reciprocity requires its successful strategies, or social norms, to be more complex. Exhaustive search previously identified eight such norms, called the “leading eight”, which excel at maintaining cooperation. However, as the first result of this thesis, we show that the leading eight break down once we remove the fundamental assumption that information is synchronized and public, such that everyone agrees on reputations. Once we consider a more realistic scenario of imperfect information, where reputations are private, and individuals occasionally misinterpret or miss observations, the leading eight do not promote cooperation anymore. Instead, minor initial disagreements can proliferate, fragmenting populations into subgroups. In a next step, we consider ways to mitigate this issue. We first explore whether introducing “generosity” can stabilize cooperation when players use the leading eight strategies in noisy environments. This approach of modifying strategies to include probabilistic elements for coping with errors is known to work well in direct reciprocity. However, as we show here, it fails for the more complex norms of indirect reciprocity. Imperfect information still prevents cooperation from evolving. On the other hand, we succeeded to show in this thesis that modifying the leading eight to use “quantitative assessment”, i.e. tracking reputation scores on a scale beyond good and bad, and making overall judgments of others based on a threshold, is highly successful, even when noise increases in the environment. Cooperation can flourish when reputations
are more nuanced, and players have a broader understanding what it means to be “good.” Finally, we present a single theoretical framework that unites the two modes of reciprocity despite their differences. Within this framework, we identify a novel simple and successful strategy for indirect reciprocity, which can cope with noisy environments and has an analogue in direct reciprocity. We can also analyze decision making when different sources of information are available. Our results help highlight that for sustaining cooperation, already the most simple rules of reciprocity can be sufficient.},
  author       = {Schmid, Laura},
  issn         = {2663-337X},
  pages        = {171},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Evolution of cooperation via (in)direct reciprocity under imperfect information}},
  doi          = {10.15479/at:ista:10293},
  year         = {2021},
}

@inproceedings{10414,
  abstract     = {We consider the almost-sure (a.s.) termination problem for probabilistic programs, which are a stochastic extension of classical imperative programs. Lexicographic ranking functions provide a sound and practical approach for termination of non-probabilistic programs, and their extension to probabilistic programs is achieved via lexicographic ranking supermartingales (LexRSMs). However, LexRSMs introduced in the previous work have a limitation that impedes their automation: all of their components have to be non-negative in all reachable states. This might result in LexRSM not existing even for simple terminating programs. Our contributions are twofold: First, we introduce a generalization of LexRSMs which allows for some components to be negative. This standard feature of non-probabilistic termination proofs was hitherto not known to be sound in the probabilistic setting, as the soundness proof requires a careful analysis of the underlying stochastic process. Second, we present polynomial-time algorithms using our generalized LexRSMs for proving a.s. termination in broad classes of linear-arithmetic programs.},
  author       = {Chatterjee, Krishnendu and Goharshady, Ehsan Kafshdar and Novotný, Petr and Zárevúcky, Jiří and Zikelic, Dorde},
  booktitle    = {24th International Symposium on Formal Methods},
  isbn         = {9-783-0309-0869-0},
  issn         = {1611-3349},
  location     = {Virtual},
  pages        = {619--639},
  publisher    = {Springer Nature},
  title        = {{On lexicographic proof rules for probabilistic termination}},
  doi          = {10.1007/978-3-030-90870-6_33},
  volume       = {13047},
  year         = {2021},
}

@inproceedings{10629,
  abstract     = {Product graphs arise naturally in formal verification and program analysis. For example, the analysis of two concurrent threads requires the product of two component control-flow graphs, and for language inclusion of deterministic automata the product of two automata is constructed. In many cases, the component graphs have constant treewidth, e.g., when the input contains control-flow graphs of programs. We consider the algorithmic analysis of products of two constant-treewidth graphs with respect to three classic specification languages, namely, (a) algebraic properties, (b) mean-payoff properties, and (c) initial credit for energy properties.
Our main contributions are as follows. Consider a graph G that is the product of two constant-treewidth graphs of size n each. First, given an idempotent semiring, we present an algorithm that computes the semiring transitive closure of G in time Õ(n⁴). Since the output has size Θ(n⁴), our algorithm is optimal (up to polylog factors). Second, given a mean-payoff objective, we present an O(n³)-time algorithm for deciding whether the value of a starting state is non-negative, improving the previously known O(n⁴) bound. Third, given an initial credit for energy objective, we present an O(n⁵)-time algorithm for computing the minimum initial credit for all nodes of G, improving the previously known O(n⁸) bound. At the heart of our approach lies an algorithm for the efficient construction of strongly-balanced tree decompositions of constant-treewidth graphs. Given a constant-treewidth graph G' of n nodes and a positive integer λ, our algorithm constructs a binary tree decomposition of G' of width O(λ) with the property that the size of each subtree decreases geometrically with rate (1/2 + 2^{-λ}).},
  author       = {Chatterjee, Krishnendu and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas},
  booktitle    = {41st IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science},
  isbn         = {978-3-9597-7215-0},
  issn         = {1868-8969},
  location     = {Virtual},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Quantitative verification on product graphs of small treewidth}},
  doi          = {10.4230/LIPIcs.FSTTCS.2021.42},
  volume       = {213},
  year         = {2021},
}

@inproceedings{10630,
  abstract     = {In the Intersection Non-emptiness problem, we are given a list of finite automata A_1, A_2,… , A_m over a common alphabet Σ as input, and the goal is to determine whether some string w ∈ Σ^* lies in the intersection of the languages accepted by the automata in the list. We analyze the complexity of the Intersection Non-emptiness problem under the promise that all input automata accept a language in some level of the dot-depth hierarchy, or some level of the Straubing-Thérien hierarchy. Automata accepting languages from the lowest levels of these hierarchies arise naturally in the context of model checking. We identify a dichotomy in the dot-depth hierarchy by showing that the problem is already NP-complete when all input automata accept languages of the levels B_0 or B_{1/2} and already PSPACE-hard when all automata accept a language from the level B_1. Conversely, we identify a tetrachotomy in the Straubing-Thérien hierarchy. More precisely, we show that the problem is in AC^0 when restricted to level L_0; complete for L or NL, depending on the input representation, when restricted to languages in the level L_{1/2}; NP-complete when the input is given as DFAs accepting a language in L_1 or L_{3/2}; and finally, PSPACE-complete when the input automata accept languages in level L_2 or higher. Moreover, we show that the proof technique used to show containment in NP for DFAs accepting languages in L_1 or L_{3/2} does not generalize to the context of NFAs. To prove this, we identify a family of languages that provide an exponential separation between the state complexity of general NFAs and that of partially ordered NFAs. To the best of our knowledge, this is the first superpolynomial separation between these two models of computation.},
  author       = {Arrighi, Emmanuel and Fernau, Henning and Hoffmann, Stefan and Holzer, Markus and Jecker, Ismael R and De Oliveira Oliveira, Mateus and Wolf, Petra},
  booktitle    = {41st IARCS Annual Conference on Foundations of Software Technology and Theoretical Computer Science},
  isbn         = {978-3-9597-7215-0},
  issn         = {1868-8969},
  location     = {Virtual},
  publisher    = {Schloss Dagstuhl - Leibniz Zentrum für Informatik},
  title        = {{On the complexity of intersection non-emptiness for star-free language classes}},
  doi          = {10.4230/LIPIcs.FSTTCS.2021.34},
  volume       = {213},
  year         = {2021},
}

@inproceedings{12767,
  abstract     = {Several problems in planning and reactive synthesis can be reduced to the analysis of two-player quantitative graph games. Optimization is one form of analysis. We argue that in many cases it may be better to replace the optimization problem with the satisficing problem, where instead of searching for optimal solutions, the goal is to search for solutions that adhere to a given threshold bound.
This work defines and investigates the satisficing problem on a two-player graph game with the discounted-sum cost model. We show that while the satisficing problem can be solved using numerical methods just like the optimization problem, this approach does not render compelling benefits over optimization. When the discount factor is, however, an integer, we present another approach to satisficing, which is purely based on automata methods. We show that this approach is algorithmically more performant – both theoretically and empirically – and demonstrates the broader applicability of satisficing over optimization.},
  author       = {Bansal, Suguman and Chatterjee, Krishnendu and Vardi, Moshe Y.},
  booktitle    = {27th International Conference on Tools and Algorithms for the Construction and Analysis of Systems},
  isbn         = {9783030720155},
  issn         = {1611-3349},
  location     = {Luxembourg City, Luxembourg},
  pages        = {20--37},
  publisher    = {Springer Nature},
  title        = {{On satisficing in quantitative games}},
  doi          = {10.1007/978-3-030-72016-2},
  volume       = {12651},
  year         = {2021},
}

@inproceedings{9987,
  abstract     = {Stateless model checking (SMC) is one of the standard approaches to the verification of concurrent programs. As scheduling non-determinism creates exponentially large spaces of thread interleavings, SMC attempts to partition this space into equivalence classes and explore only a few representatives from each class. The efficiency of this approach depends on two factors: (a) the coarseness of the partitioning, and (b) the time to generate representatives in each class. For this reason, the search for coarse partitionings that are efficiently explorable is an active research challenge. In this work we present   RVF-SMC , a new SMC algorithm that uses a novel reads-value-from (RVF) partitioning. Intuitively, two interleavings are deemed equivalent if they agree on the value obtained in each read event, and read events induce consistent causal orderings between them. The RVF partitioning is provably coarser than recent approaches based on Mazurkiewicz and “reads-from” partitionings. Our experimental evaluation reveals that RVF is quite often a very effective equivalence, as the underlying partitioning is exponentially coarser than other approaches. Moreover,   RVF-SMC  generates representatives very efficiently, as the reduction in the partitioning is often met with significant speed-ups in the model checking task.},
  author       = {Agarwal, Pratyush and Chatterjee, Krishnendu and Pathak, Shreya and Pavlogiannis, Andreas and Toman, Viktor},
  booktitle    = {33rd International Conference on Computer-Aided Verification },
  isbn         = {978-3-030-81684-1},
  issn         = {1611-3349},
  location     = {Virtual},
  pages        = {341--366},
  publisher    = {Springer Nature},
  title        = {{Stateless model checking under a reads-value-from equivalence}},
  doi          = {10.1007/978-3-030-81685-8_16},
  volume       = {12759 },
  year         = {2021},
}

@article{9997,
  abstract     = {Indirect reciprocity is a mechanism for the evolution of cooperation based on social norms. This mechanism requires that individuals in a population observe and judge each other’s behaviors. Individuals with a good reputation are more likely to receive help from others. Previous work suggests that indirect reciprocity is only effective when all relevant information is reliable and publicly available. Otherwise, individuals may disagree on how to assess others, even if they all apply the same social norm. Such disagreements can lead to a breakdown of cooperation. Here we explore whether the predominantly studied ‘leading eight’ social norms of indirect reciprocity can be made more robust by equipping them with an element of generosity. To this end, we distinguish between two kinds of generosity. According to assessment generosity, individuals occasionally assign a good reputation to group members who would usually be regarded as bad. According to action generosity, individuals occasionally cooperate with group members with whom they would usually defect. Using individual-based simulations, we show that the two kinds of generosity have a very different effect on the resulting reputation dynamics. Assessment generosity tends to add to the overall noise and allows defectors to invade. In contrast, a limited amount of action generosity can be beneficial in a few cases. However, even when action generosity is beneficial, the respective simulations do not result in full cooperation. Our results suggest that while generosity can favor cooperation when individuals use the most simple strategies of reciprocity, it is disadvantageous when individuals use more complex social norms.},
  author       = {Schmid, Laura and Shati, Pouya and Hilbe, Christian and Chatterjee, Krishnendu},
  issn         = {2045-2322},
  journal      = {Scientific Reports},
  keywords     = {Multidisciplinary},
  number       = {1},
  publisher    = {Springer Nature},
  title        = {{The evolution of indirect reciprocity under action and assessment generosity}},
  doi          = {10.1038/s41598-021-96932-1},
  volume       = {11},
  year         = {2021},
}

@inproceedings{7810,
  abstract     = {Interprocedural data-flow analyses form an expressive and useful paradigm of numerous static analysis applications, such as live variables analysis, alias analysis and null pointers analysis. The most widely-used framework for interprocedural data-flow analysis is IFDS, which encompasses distributive data-flow functions over a finite domain. On-demand data-flow analyses restrict the focus of the analysis on specific program locations and data facts. This setting provides a natural split between (i) an offline (or preprocessing) phase, where the program is partially analyzed and analysis summaries are created, and (ii) an online (or query) phase, where analysis queries arrive on demand and the summaries are used to speed up answering queries.
In this work, we consider on-demand IFDS analyses where the queries concern program locations of the same procedure (aka same-context queries). We exploit the fact that flow graphs of programs have low treewidth to develop faster algorithms that are space and time optimal for many common data-flow analyses, in both the preprocessing and the query phase. We also use treewidth to develop query solutions that are embarrassingly parallelizable, i.e. the total work for answering each query is split to a number of threads such that each thread performs only a constant amount of work. Finally, we implement a static analyzer based on our algorithms, and perform a series of on-demand analysis experiments on standard benchmarks. Our experimental results show a drastic speed-up of the queries after only a lightweight preprocessing phase, which significantly outperforms existing techniques.},
  author       = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Ibsen-Jensen, Rasmus and Pavlogiannis, Andreas},
  booktitle    = {European Symposium on Programming},
  isbn         = {9783030449131},
  issn         = {16113349},
  location     = {Dublin, Ireland},
  pages        = {112--140},
  publisher    = {Springer Nature},
  title        = {{Optimal and perfectly parallel algorithms for on-demand data-flow analysis}},
  doi          = {10.1007/978-3-030-44914-8_5},
  volume       = {12075},
  year         = {2020},
}

@inproceedings{7955,
  abstract     = {Simple stochastic games are turn-based 2½-player games with a reachability objective. The basic question asks whether one player can ensure reaching a given target with at least a given probability. A natural extension is games with a conjunction of such conditions as objective. Despite a plethora of recent results on the analysis of systems with multiple objectives, the decidability of this basic problem remains open. In this paper, we present an algorithm approximating the Pareto frontier of the achievable values to a given precision. Moreover, it is an anytime algorithm, meaning it can be stopped at any time returning the current approximation and its error bound.},
  author       = {Ashok, Pranav and Chatterjee, Krishnendu and Kretinsky, Jan and Weininger, Maximilian and Winkler, Tobias},
  booktitle    = {Proceedings of the 35th Annual ACM/IEEE Symposium on Logic in Computer Science },
  isbn         = {9781450371049},
  location     = {Saarbrücken, Germany},
  pages        = {102--115},
  publisher    = {Association for Computing Machinery},
  title        = {{Approximating values of generalized-reachability stochastic games}},
  doi          = {10.1145/3373718.3394761},
  year         = {2020},
}

