@inproceedings{15006,
  abstract     = {Graphical games are a useful framework for modeling the interactions of (selfish) agents who are connected via an underlying topology and whose behaviors influence each other. They have wide applications ranging from computer science to economics and biology. Yet, even though an agent’s payoff only depends on the actions of their direct neighbors in graphical games, computing the Nash equilibria and making statements about the convergence time of "natural" local dynamics in particular can be highly challenging. In this work, we present a novel approach for classifying complexity of Nash equilibria in graphical games by establishing a connection to local graph algorithms, a subfield of distributed computing. In particular, we make the observation that the equilibria of graphical games are equivalent to locally verifiable labelings (LVL) in graphs; vertex labelings which are verifiable with constant-round local algorithms. This connection allows us to derive novel lower bounds on the convergence time to equilibrium of best-response dynamics in graphical games. Since we establish that distributed convergence can sometimes be provably slow, we also introduce and give bounds on an intuitive notion of "time-constrained" inefficiency of best responses. We exemplify how our results can be used in the implementation of mechanisms that ensure convergence of best responses to a Nash equilibrium. Our results thus also give insight into the convergence of strategy-proof algorithms for graphical games, which is still not well understood.},
  author       = {Hirvonen, Juho and Schmid, Laura and Chatterjee, Krishnendu and Schmid, Stefan},
  booktitle    = {27th International Conference on Principles of Distributed Systems},
  isbn         = {9783959773089},
  issn         = {18688969},
  location     = {Tokyo, Japan},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{On the convergence time in graphical games: A locality-sensitive approach}},
  doi          = {10.4230/LIPIcs.OPODIS.2023.11},
  volume       = {286},
  year         = {2024},
}

@article{10770,
  abstract     = {Mathematical models often aim to describe a complicated mechanism in a cohesive and simple manner. However, reaching perfect balance between being simple enough or overly simplistic is a challenging task. Frequently, game-theoretic models have an underlying assumption that players, whenever they choose to execute a specific action, do so perfectly. In fact, it is rare that action execution perfectly coincides with intentions of individuals, giving rise to behavioural mistakes. The concept of incompetence of players was suggested to address this issue in game-theoretic settings. Under the assumption of incompetence, players have non-zero probabilities of executing a different strategy from the one they chose, leading to stochastic outcomes of the interactions. In this article, we survey results related to the concept of incompetence in classic as well as evolutionary game theory and provide several new results. We also suggest future extensions of the model and argue why it is important to take into account behavioural mistakes when analysing interactions among players in both economic and biological settings.},
  author       = {Graham, Thomas and Kleshnina, Maria and Filar, Jerzy A.},
  issn         = {2153-0793},
  journal      = {Dynamic Games and Applications},
  pages        = {231--264},
  publisher    = {Springer Nature},
  title        = {{Where do mistakes lead? A survey of games with incompetent players}},
  doi          = {10.1007/s13235-022-00425-3},
  volume       = {13},
  year         = {2023},
}

@inproceedings{14317,
  abstract     = {Markov decision processes can be viewed as transformers of probability distributions. While this view is useful from a practical standpoint to reason about trajectories of distributions, basic reachability and safety problems are known to be computationally intractable (i.e., Skolem-hard) to solve in such models. Further, we show that even for simple examples of MDPs, strategies for safety objectives over distributions can require infinite memory and randomization.
In light of this, we present a novel overapproximation approach to synthesize strategies in an MDP, such that a safety objective over the distributions is met. More precisely, we develop a new framework for template-based synthesis of certificates as affine distributional and inductive invariants for safety objectives in MDPs. We provide two algorithms within this framework. One can only synthesize memoryless strategies, but has relative completeness guarantees, while the other can synthesize general strategies. The runtime complexity of both algorithms is in PSPACE. We implement these algorithms and show that they can solve several non-trivial examples.},
  author       = {Akshay, S. and Chatterjee, Krishnendu and Meggendorfer, Tobias and Zikelic, Dorde},
  booktitle    = {International Conference on Computer Aided Verification},
  isbn         = {9783031377082},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {86--112},
  publisher    = {Springer Nature},
  title        = {{MDPs as distribution transformers: Affine invariant synthesis for safety objectives}},
  doi          = {10.1007/978-3-031-37709-9_5},
  volume       = {13966},
  year         = {2023},
}

@inproceedings{14318,
  abstract     = {Probabilistic recurrence relations (PRRs) are a standard formalism for describing the runtime of a randomized algorithm. Given a PRR and a time limit κ, we consider the tail probability Pr[T≥κ], i.e., the probability that the randomized runtime T of the PRR exceeds κ. Our focus is the formal analysis of tail bounds that aims at finding a tight asymptotic upper bound u≥Pr[T≥κ]. To address this problem, the classical and most well-known approach is the cookbook method by Karp (JACM 1994), while other approaches are mostly limited to deriving tail bounds of specific PRRs via involved custom analysis.
In this work, we propose a novel approach for deriving the common exponentially-decreasing tail bounds for PRRs whose preprocessing time and random passed sizes observe discrete or (piecewise) uniform distribution and whose recursive call is either a single procedure call or a divide-and-conquer. We first establish a theoretical approach via Markov’s inequality, and then instantiate the theoretical approach with a template-based algorithmic approach via a refined treatment of exponentiation. Experimental evaluation shows that our algorithmic approach is capable of deriving tail bounds that are (i) asymptotically tighter than Karp’s method, (ii) match the best-known manually-derived asymptotic tail bound for QuickSelect, and (iii) is only slightly worse (with a loglogn factor) than the manually-proven optimal asymptotic tail bound for QuickSort. Moreover, our algorithmic approach handles all examples (including realistic PRRs such as QuickSort, QuickSelect, DiameterComputation, etc.) in less than 0.1 s, showing that our approach is efficient in practice.},
  author       = {Sun, Yican and Fu, Hongfei and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar},
  booktitle    = {Computer Aided Verification},
  isbn         = {9783031377082},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {16--39},
  publisher    = {Springer Nature},
  title        = {{Automated tail bound analysis for probabilistic recurrence relations}},
  doi          = {10.1007/978-3-031-37709-9_2},
  volume       = {13966},
  year         = {2023},
}

@inproceedings{14417,
  abstract     = {Entropic risk (ERisk) is an established risk measure in finance, quantifying risk by an exponential re-weighting of rewards. We study ERisk for the first time in the context of turn-based stochastic games with the total reward objective. This gives rise to an objective function that demands the control of systems in a risk-averse manner. We show that the resulting games are determined and, in particular, admit optimal memoryless deterministic strategies. This contrasts risk measures that previously have been considered in the special case of Markov decision processes and that require randomization and/or memory. We provide several results on the decidability and the computational complexity of the threshold problem, i.e. whether the optimal value of ERisk exceeds a given threshold. In the most general case, the problem is decidable subject to Shanuel’s conjecture. If all inputs are rational, the resulting threshold problem can be solved using algebraic numbers, leading to decidability via a polynomial-time reduction to the existential theory of the reals. Further restrictions on the encoding of the input allow the solution of the threshold problem in NP∩coNP. Finally, an approximation algorithm for the optimal value of ERisk is provided.},
  author       = {Baier, Christel and Chatterjee, Krishnendu and Meggendorfer, Tobias and Piribauer, Jakob},
  booktitle    = {48th International Symposium on Mathematical Foundations of Computer Science},
  isbn         = {9783959772921},
  issn         = {1868-8969},
  location     = {Bordeaux, France},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Entropic risk for turn-based stochastic games}},
  doi          = {10.4230/LIPIcs.MFCS.2023.15},
  volume       = {272},
  year         = {2023},
}

@inproceedings{14456,
  abstract     = {In this paper, we present novel algorithms that efficiently compute a shortest reconfiguration sequence between two given dominating sets in trees and interval graphs under the TOKEN SLIDING model. In this problem, a graph is provided along with its two dominating sets, which can be imagined as tokens placed on vertices. The objective is to find a shortest sequence of dominating sets that transforms one set into the other, with each set in the sequence resulting from sliding a single token in the previous set. While identifying any sequence has been well studied, our work presents the first polynomial algorithms for this optimization variant in the context of dominating sets.},
  author       = {Křišťan, Jan Matyáš and Svoboda, Jakub},
  booktitle    = {24th International Symposium on Fundamentals of Computation Theory},
  isbn         = {9783031435867},
  issn         = {1611-3349},
  location     = {Trier, Germany},
  pages        = {333--347},
  publisher    = {Springer Nature},
  title        = {{Shortest dominating set reconfiguration under token sliding}},
  doi          = {10.1007/978-3-031-43587-4_24},
  volume       = {14292},
  year         = {2023},
}

@inproceedings{14518,
  abstract     = {We consider bidding games, a class of two-player zero-sum graph games. The game proceeds as follows. Both players have bounded budgets. A token is placed on a vertex of a graph, in each turn the players simultaneously submit bids, and the higher bidder moves the token, where we break bidding ties in favor of Player 1. Player 1 wins the game iff the token visits a designated target vertex. We consider, for the first time, poorman discrete-bidding in which the granularity of the bids is restricted and the higher bid is paid to the bank. Previous work either did not impose granularity restrictions or considered Richman bidding (bids are paid to the opponent). While the latter mechanisms are technically more accessible, the former is more appealing from a practical standpoint. Our study focuses on threshold budgets, which is the necessary and sufficient initial budget required for Player 1 to ensure winning against a given Player 2 budget. We first show existence of thresholds. In DAGs, we show that threshold budgets can be approximated with error bounds by thresholds under continuous-bidding and that they exhibit a periodic behavior. We identify closed-form solutions in special cases. We implement and experiment with an algorithm to find threshold budgets.},
  author       = {Avni, Guy and Meggendorfer, Tobias and Sadhukhan, Suman and Tkadlec, Josef and Zikelic, Dorde},
  booktitle    = {Frontiers in Artificial Intelligence and Applications},
  isbn         = {9781643684369},
  issn         = {0922-6389},
  location     = {Krakow, Poland},
  pages        = {141--148},
  publisher    = {IOS Press},
  title        = {{Reachability poorman discrete-bidding games}},
  doi          = {10.3233/FAIA230264},
  volume       = {372},
  year         = {2023},
}

@phdthesis{14539,
  abstract     = {Stochastic systems provide a formal framework for modelling and quantifying uncertainty in systems and have been widely adopted in many application domains. Formal
verification and control of finite state stochastic systems, a subfield of formal methods
also known as probabilistic model checking, is well studied. In contrast, formal verification and control of infinite state stochastic systems have received comparatively
less attention. However, infinite state stochastic systems commonly arise in practice.
For instance, probabilistic models that contain continuous probability distributions such
as normal or uniform, or stochastic dynamical systems which are a classical model for
control under uncertainty, both give rise to infinite state systems.
The goal of this thesis is to contribute to laying theoretical and algorithmic foundations
of fully automated formal verification and control of infinite state stochastic systems,
with a particular focus on systems that may be executed over a long or infinite time.
We consider formal verification of infinite state stochastic systems in the setting of
static analysis of probabilistic programs and formal control in the setting of controller
synthesis in stochastic dynamical systems. For both problems, we present some of the
first fully automated methods for probabilistic (a.k.a. quantitative) reachability and
safety analysis applicable to infinite time horizon systems. We also advance the state
of the art of probability 1 (a.k.a. qualitative) reachability analysis for both problems.
Finally, for formal controller synthesis in stochastic dynamical systems, we present a
novel framework for learning neural network control policies in stochastic dynamical
systems with formal guarantees on correctness with respect to quantitative reachability,
safety or reach-avoid specifications.
},
  author       = {Zikelic, Dorde},
  isbn         = {978-3-99078-036-7},
  issn         = {2663 - 337X},
  pages        = {256},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Automated verification and control of infinite state stochastic systems}},
  doi          = {10.15479/14539},
  year         = {2023},
}

@inproceedings{14559,
  abstract     = {We consider the problem of learning control policies in discrete-time stochastic systems which guarantee that the system stabilizes within some specified stabilization region with probability 1. Our approach is based on the novel notion of stabilizing ranking supermartingales (sRSMs) that we introduce in this work. Our sRSMs overcome the limitation of methods proposed in previous works whose applicability is restricted to systems in which the stabilizing region cannot be left once entered under any control policy. We present a learning procedure that learns a control policy together with an sRSM that formally certifies probability 1 stability, both learned as neural networks. We show that this procedure can also be adapted to formally verifying that, under a given Lipschitz continuous control policy, the stochastic system stabilizes within some stabilizing region with probability 1. Our experimental evaluation shows that our learning procedure can successfully learn provably stabilizing policies in practice.},
  author       = {Ansaripour, Matin and Chatterjee, Krishnendu and Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde},
  booktitle    = {21st International Symposium on Automated Technology for Verification and Analysis},
  isbn         = {9783031453281},
  issn         = {1611-3349},
  location     = {Singapore, Singapore},
  pages        = {357--379},
  publisher    = {Springer Nature},
  title        = {{Learning provably stabilizing neural controllers for discrete-time stochastic systems}},
  doi          = {10.1007/978-3-031-45329-8_17},
  volume       = {14215},
  year         = {2023},
}

@article{14657,
  abstract     = {Natural selection is usually studied between mutants that differ in reproductive rate, but are subject to the same population structure. Here we explore how natural selection acts on mutants that have the same reproductive rate, but different population structures. In our framework, population structure is given by a graph that specifies where offspring can disperse. The invading mutant disperses offspring on a different graph than the resident wild-type. We find that more densely connected dispersal graphs tend to increase the invader’s fixation probability, but the exact relationship between structure and fixation probability is subtle. We present three main results. First, we prove that if both invader and resident are on complete dispersal graphs, then removing a single edge in the invader’s dispersal graph reduces its fixation probability. Second, we show that for certain island models higher invader’s connectivity increases its fixation probability, but the magnitude of the effect depends on the exact layout of the connections. Third, we show that for lattices the effect of different connectivity is comparable to that of different fitness: for large population size, the invader’s fixation probability is either constant or exponentially small, depending on whether it is more or less connected than the resident.},
  author       = {Tkadlec, Josef and Kaveh, Kamran and Chatterjee, Krishnendu and Nowak, Martin A.},
  issn         = {1742-5662},
  journal      = {Journal of the Royal Society, Interface},
  number       = {208},
  publisher    = {The Royal Society},
  title        = {{Evolutionary dynamics of mutants that modify population structure}},
  doi          = {10.1098/rsif.2023.0355},
  volume       = {20},
  year         = {2023},
}

@inproceedings{14736,
  abstract     = {Payment channel networks (PCNs) are a promising technology to improve the scalability of cryptocurrencies. PCNs, however, face the challenge that the frequent usage of certain routes may deplete channels in one direction, and hence prevent further transactions. In order to reap the full potential of PCNs, recharging and rebalancing mechanisms are required to provision channels, as well as an admission control logic to decide which transactions to reject in case capacity is insufficient. This paper presents a formal model of this optimisation problem. In particular, we consider an online algorithms perspective, where transactions arrive over time in an unpredictable manner. Our main contributions are competitive online algorithms which come with provable guarantees over time. We empirically evaluate our algorithms on randomly generated transactions to compare the average performance of our algorithms to our theoretical bounds. We also show how this model and approach differs from related problems in classic communication networks.},
  author       = {Bastankhah, Mahsa and Chatterjee, Krishnendu and Maddah-Ali, Mohammad Ali and Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X},
  booktitle    = {27th International Conference on Financial Cryptography and Data Security},
  isbn         = {9783031477539},
  issn         = {1611-3349},
  location     = {Bol, Brac, Croatia},
  pages        = {309--325},
  publisher    = {Springer Nature},
  title        = {{R2: Boosting liquidity in payment channel networks with online admission control}},
  doi          = {10.1007/978-3-031-47754-6_18},
  volume       = {13950},
  year         = {2023},
}

@article{14778,
  abstract     = {We consider the almost-sure (a.s.) termination problem for probabilistic programs, which are a stochastic extension of classical imperative programs. Lexicographic ranking functions provide a sound and practical approach for termination of non-probabilistic programs, and their extension to probabilistic programs is achieved via lexicographic ranking supermartingales (LexRSMs). However, LexRSMs introduced in the previous work have a limitation that impedes their automation: all of their components have to be non-negative in all reachable states. This might result in a LexRSM not existing even for simple terminating programs. Our contributions are twofold. First, we introduce a generalization of LexRSMs that allows for some components to be negative. This standard feature of non-probabilistic termination proofs was hitherto not known to be sound in the probabilistic setting, as the soundness proof requires a careful analysis of the underlying stochastic process. Second, we present polynomial-time algorithms using our generalized LexRSMs for proving a.s. termination in broad classes of linear-arithmetic programs.},
  author       = {Chatterjee, Krishnendu and Kafshdar Goharshady, Ehsan and Novotný, Petr and Zárevúcky, Jiří and Zikelic, Dorde},
  issn         = {1433-299X},
  journal      = {Formal Aspects of Computing},
  keywords     = {Theoretical Computer Science, Software},
  number       = {2},
  publisher    = {Association for Computing Machinery},
  title        = {{On lexicographic proof rules for probabilistic termination}},
  doi          = {10.1145/3585391},
  volume       = {35},
  year         = {2023},
}

@inproceedings{14830,
  abstract     = {We study the problem of learning controllers for discrete-time non-linear stochastic dynamical systems with formal reach-avoid guarantees. This work presents the first method for providing formal reach-avoid guarantees, which combine and generalize stability and safety guarantees, with a tolerable probability threshold p in [0,1] over the infinite time horizon. Our method leverages advances in machine learning literature and it represents formal certificates as neural networks. In particular, we learn a certificate in the form of a reach-avoid supermartingale (RASM), a novel notion that we introduce in this work. Our RASMs provide reachability and avoidance guarantees by imposing constraints on what can be viewed as a stochastic extension of level sets of Lyapunov functions for deterministic systems. Our approach solves several important problems -- it can be used to learn a control policy from scratch, to verify a reach-avoid specification for a fixed control policy, or to fine-tune a pre-trained policy if it does not satisfy the reach-avoid specification. We validate our approach on 3 stochastic non-linear reinforcement learning tasks.},
  author       = {Zikelic, Dorde and Lechner, Mathias and Henzinger, Thomas A and Chatterjee, Krishnendu},
  booktitle    = {Proceedings of the 37th AAAI Conference on Artificial Intelligence},
  issn         = {2374-3468},
  keywords     = {General Medicine},
  location     = {Washington, DC, United States},
  number       = {10},
  pages        = {11926--11935},
  publisher    = {Association for the Advancement of Artificial Intelligence},
  title        = {{Learning control policies for stochastic systems with reach-avoid guarantees}},
  doi          = {10.1609/aaai.v37i10.26407},
  volume       = {37},
  year         = {2023},
}

@misc{14990,
  abstract     = {The software artefact to evaluate the approximation of stationary distributions implementation.},
  author       = {Meggendorfer, Tobias},
  publisher    = {Zenodo},
  title        = {{Artefact for: Correct Approximation of Stationary Distributions}},
  doi          = {10.5281/ZENODO.7548214},
  year         = {2023},
}

@inproceedings{15023,
  abstract     = {Reinforcement learning has shown promising results in learning neural network policies for complicated control tasks. However, the lack of formal guarantees about the behavior of such policies remains an impediment to their deployment. We propose a novel method for learning a composition of neural network policies in stochastic environments, along with a formal certificate which guarantees that a specification over the policy's behavior is satisfied with the desired probability. Unlike prior work on verifiable RL, our approach leverages the compositional nature of logical specifications provided in SpectRL, to learn over graphs of probabilistic reach-avoid specifications. The formal guarantees are provided by learning neural network policies together with reach-avoid supermartingales (RASM) for the graph’s sub-tasks and then composing them into a global policy. We also derive a tighter lower bound compared to previous work on the probability of reach-avoidance implied by a RASM, which is required to find a compositional policy with an acceptable probabilistic threshold for complex tasks with multiple edge policies. We implement a prototype of our approach and evaluate it on a Stochastic Nine Rooms environment.},
  author       = {Zikelic, Dorde and Lechner, Mathias and Verma, Abhinav and Chatterjee, Krishnendu and Henzinger, Thomas A},
  booktitle    = {37th Conference on Neural Information Processing Systems},
  location     = {New Orleans, LO, United States},
  title        = {{Compositional policy learning in stochastic control systems with formal guarantees}},
  year         = {2023},
}

@inproceedings{13139,
  abstract     = {A classical problem for Markov chains is determining their stationary (or steady-state) distribution. This problem has an equally classical solution based on eigenvectors and linear equation systems. However, this approach does not scale to large instances, and iterative solutions are desirable. It turns out that a naive approach, as used by current model checkers, may yield completely wrong results. We present a new approach, which utilizes recent advances in partial exploration and mean payoff computation to obtain a correct, converging approximation.},
  author       = {Meggendorfer, Tobias},
  booktitle    = {TACAS 2023: Tools and Algorithms for the Construction and Analysis of Systems},
  isbn         = {9783031308222},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {489--507},
  publisher    = {Springer Nature},
  title        = {{Correct approximation of stationary distributions}},
  doi          = {10.1007/978-3-031-30823-9_25},
  volume       = {13993},
  year         = {2023},
}

@inproceedings{13142,
  abstract     = {Reinforcement learning has received much attention for learning controllers of deterministic systems. We consider a learner-verifier framework for stochastic control systems and survey recent methods that formally guarantee a conjunction of reachability and safety properties. Given a property and a lower bound on the probability of the property being satisfied, our framework jointly learns a control policy and a formal certificate to ensure the satisfaction of the property with a desired probability threshold. Both the control policy and the formal certificate are continuous functions from states to reals, which are learned as parameterized neural networks. While in the deterministic case, the certificates are invariant and barrier functions for safety, or Lyapunov and ranking functions for liveness, in the stochastic case the certificates are supermartingales. For certificate verification, we use interval arithmetic abstract interpretation to bound the expected values of neural network functions.},
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde},
  booktitle    = {Tools and Algorithms for the Construction and Analysis of Systems },
  isbn         = {9783031308222},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {3--25},
  publisher    = {Springer Nature},
  title        = {{A learner-verifier framework for neural network controllers and certificates of stochastic systems}},
  doi          = {10.1007/978-3-031-30823-9_1},
  volume       = {13993},
  year         = {2023},
}

@inproceedings{13238,
  abstract     = {We consider a natural problem dealing with weighted packet selection across a rechargeable link, which e.g., finds applications in cryptocurrency networks. The capacity of a link (u, v) is determined by how much nodes u and v allocate for this link. Specifically, the input is a finite ordered sequence of packets that arrive in both directions along a link. Given (u, v) and a packet of weight x going from u to v, node u can either accept or reject the packet. If u accepts the packet, the capacity on link (u, v) decreases by x. Correspondingly, v’s capacity on (u, v) increases by x. If a node rejects the packet, this will entail a cost affinely linear in the weight of the packet. A link is “rechargeable” in the sense that the total capacity of the link has to remain constant, but the allocation of capacity at the ends of the link can depend arbitrarily on the nodes’ decisions. The goal is to minimise the sum of the capacity injected into the link and the cost of rejecting packets. We show that the problem is NP-hard, but can be approximated efficiently with a ratio of (1+ε)⋅(1+3–√) for some arbitrary ε>0.
.},
  author       = {Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X},
  booktitle    = {SIROCCO 2023: Structural Information and Communication Complexity },
  isbn         = {9783031327322},
  issn         = {1611-3349},
  location     = {Alcala de Henares, Spain},
  pages        = {576--594},
  publisher    = {Springer Nature},
  title        = {{Weighted packet selection for rechargeable links in cryptocurrency networks: Complexity and approximation}},
  doi          = {10.1007/978-3-031-32733-9_26},
  volume       = {13892},
  year         = {2023},
}

@article{13258,
  abstract     = {Many human interactions feature the characteristics of social dilemmas where individual actions have consequences for the group and the environment. The feedback between behavior and environment can be studied with the framework of stochastic games. In stochastic games, the state of the environment can change, depending on the choices made by group members. Past work suggests that such feedback can reinforce cooperative behaviors. In particular, cooperation can evolve in stochastic games even if it is infeasible in each separate repeated game. In stochastic games, participants have an interest in conditioning their strategies on the state of the environment. Yet in many applications, precise information about the state could be scarce. Here, we study how the availability of information (or lack thereof) shapes evolution of cooperation. Already for simple examples of two state games we find surprising effects. In some cases, cooperation is only possible if there is precise information about the state of the environment. In other cases, cooperation is most abundant when there is no information about the state of the environment. We systematically analyze all stochastic games of a given complexity class, to determine when receiving information about the environment is better, neutral, or worse for evolution of cooperation.},
  author       = {Kleshnina, Maria and Hilbe, Christian and Simsa, Stepan and Chatterjee, Krishnendu and Nowak, Martin A.},
  issn         = {2041-1723},
  journal      = {Nature Communications},
  publisher    = {Springer Nature},
  title        = {{The effect of environmental information on evolution of cooperation in stochastic games}},
  doi          = {10.1038/s41467-023-39625-9},
  volume       = {14},
  year         = {2023},
}

@misc{13336,
  author       = {Kleshnina, Maria},
  publisher    = {Zenodo},
  title        = {{kleshnina/stochgames_info: The effect of environmental information on evolution of cooperation in stochastic games}},
  doi          = {10.5281/ZENODO.8059564},
  year         = {2023},
}

