@inproceedings{14888,
  abstract     = {A face in a curve arrangement is called popular if it is bounded by the same curve multiple times. Motivated by the automatic generation of curved nonogram puzzles, we investigate possibilities to eliminate the popular faces in an arrangement by inserting a single additional curve. This turns out to be NP-hard; however, it becomes tractable when the number of popular faces is small: We present a probabilistic FPT-approach in the number of popular faces.},
  author       = {De Nooijer, Phoebe and Terziadis, Soeren and Weinberger, Alexandra and Masárová, Zuzana and Mchedlidze, Tamara and Löffler, Maarten and Rote, Günter},
  booktitle    = {31st International Symposium on Graph Drawing and Network Visualization},
  isbn         = {9783031492747},
  issn         = {1611-3349},
  location     = {Isola delle Femmine, Palermo, Italy},
  pages        = {18--33},
  publisher    = {Springer Nature},
  title        = {{Removing popular faces in curve arrangements}},
  doi          = {10.1007/978-3-031-49275-4_2},
  volume       = {14466},
  year         = {2024},
}

@inproceedings{14317,
  abstract     = {Markov decision processes can be viewed as transformers of probability distributions. While this view is useful from a practical standpoint to reason about trajectories of distributions, basic reachability and safety problems are known to be computationally intractable (i.e., Skolem-hard) to solve in such models. Further, we show that even for simple examples of MDPs, strategies for safety objectives over distributions can require infinite memory and randomization.
In light of this, we present a novel overapproximation approach to synthesize strategies in an MDP, such that a safety objective over the distributions is met. More precisely, we develop a new framework for template-based synthesis of certificates as affine distributional and inductive invariants for safety objectives in MDPs. We provide two algorithms within this framework. One can only synthesize memoryless strategies, but has relative completeness guarantees, while the other can synthesize general strategies. The runtime complexity of both algorithms is in PSPACE. We implement these algorithms and show that they can solve several non-trivial examples.},
  author       = {Akshay, S. and Chatterjee, Krishnendu and Meggendorfer, Tobias and Zikelic, Dorde},
  booktitle    = {International Conference on Computer Aided Verification},
  isbn         = {9783031377082},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {86--112},
  publisher    = {Springer Nature},
  title        = {{MDPs as distribution transformers: Affine invariant synthesis for safety objectives}},
  doi          = {10.1007/978-3-031-37709-9_5},
  volume       = {13966},
  year         = {2023},
}

@inproceedings{14318,
  abstract     = {Probabilistic recurrence relations (PRRs) are a standard formalism for describing the runtime of a randomized algorithm. Given a PRR and a time limit κ, we consider the tail probability Pr[T≥κ], i.e., the probability that the randomized runtime T of the PRR exceeds κ. Our focus is the formal analysis of tail bounds that aims at finding a tight asymptotic upper bound u≥Pr[T≥κ]. To address this problem, the classical and most well-known approach is the cookbook method by Karp (JACM 1994), while other approaches are mostly limited to deriving tail bounds of specific PRRs via involved custom analysis.
In this work, we propose a novel approach for deriving the common exponentially-decreasing tail bounds for PRRs whose preprocessing time and random passed sizes observe discrete or (piecewise) uniform distribution and whose recursive call is either a single procedure call or a divide-and-conquer. We first establish a theoretical approach via Markov’s inequality, and then instantiate the theoretical approach with a template-based algorithmic approach via a refined treatment of exponentiation. Experimental evaluation shows that our algorithmic approach is capable of deriving tail bounds that are (i) asymptotically tighter than Karp’s method, (ii) match the best-known manually-derived asymptotic tail bound for QuickSelect, and (iii) is only slightly worse (with a loglogn factor) than the manually-proven optimal asymptotic tail bound for QuickSort. Moreover, our algorithmic approach handles all examples (including realistic PRRs such as QuickSort, QuickSelect, DiameterComputation, etc.) in less than 0.1 s, showing that our approach is efficient in practice.},
  author       = {Sun, Yican and Fu, Hongfei and Chatterjee, Krishnendu and Goharshady, Amir Kafshdar},
  booktitle    = {Computer Aided Verification},
  isbn         = {9783031377082},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {16--39},
  publisher    = {Springer Nature},
  title        = {{Automated tail bound analysis for probabilistic recurrence relations}},
  doi          = {10.1007/978-3-031-37709-9_2},
  volume       = {13966},
  year         = {2023},
}

@inproceedings{14410,
  abstract     = {This paper focuses on the implementation details of the baseline methods and a recent lightweight conditional model extrapolation algorithm LIMES [5] for streaming data under class-prior shift. LIMES achieves superior performance over the baseline methods, especially concerning the minimum-across-day accuracy, which is important for the users of the system. In this work, the key measures to facilitate reproducibility and enhance the credibility of the results are described.},
  author       = {Tomaszewska, Paulina and Lampert, Christoph},
  booktitle    = {International Workshop on Reproducible Research in Pattern Recognition},
  isbn         = {9783031407727},
  issn         = {1611-3349},
  location     = {Montreal, Canada},
  pages        = {67--73},
  publisher    = {Springer Nature},
  title        = {{On the implementation of baselines and lightweight conditional model extrapolation (LIMES) under class-prior shift}},
  doi          = {10.1007/978-3-031-40773-4_6},
  volume       = {14068},
  year         = {2023},
}

@inproceedings{14411,
  abstract     = {Partially specified Boolean networks (PSBNs) represent a promising framework for the qualitative modelling of biological systems in which the logic of interactions is not completely known. Phenotype control aims to stabilise the network in states exhibiting specific traits.
In this paper, we define the phenotype control problem in the context of asynchronous PSBNs and propose a novel semi-symbolic algorithm for solving this problem with permanent variable perturbations.},
  author       = {Beneš, Nikola and Brim, Luboš and Pastva, Samuel and Šafránek, David and Šmijáková, Eva},
  booktitle    = {21st International Conference on Computational Methods in Systems Biology},
  isbn         = {9783031426964},
  issn         = {1611-3349},
  location     = {Luxembourg City, Luxembourg},
  pages        = {18--35},
  publisher    = {Springer Nature},
  title        = {{Phenotype control of partially specified boolean networks}},
  doi          = {10.1007/978-3-031-42697-1_2},
  volume       = {14137},
  year         = {2023},
}

@inproceedings{14428,
  abstract     = {Suppose we have two hash functions h1 and h2, but we trust the security of only one of them. To mitigate this worry, we wish to build a hash combiner Ch1,h2 which is secure so long as one of the underlying hash functions is. This question has been well-studied in the regime of collision resistance. In this case, concatenating the two hash function outputs clearly works. Unfortunately, a long series of works (Boneh and Boyen, CRYPTO’06; Pietrzak, Eurocrypt’07; Pietrzak, CRYPTO’08) showed no (noticeably) shorter combiner for collision resistance is possible.
In this work, we revisit this pessimistic state of affairs, motivated by the observation that collision-resistance is insufficient for many interesting applications of cryptographic hash functions anyway. We argue the right formulation of the “hash combiner” is to build what we call random oracle (RO) combiners, utilizing stronger assumptions for stronger constructions.
Indeed, we circumvent the previous lower bounds for collision resistance by constructing a simple length-preserving RO combiner C˜h1,h2Z1,Z2(M)=h1(M,Z1)⊕h2(M,Z2),where Z1,Z2
 are random salts of appropriate length. We show that this extra randomness is necessary for RO combiners, and indeed our construction is somewhat tight with this lower bound.
On the negative side, we show that one cannot generically apply the composition theorem to further replace “monolithic” hash functions h1 and h2 by some simpler indifferentiable construction (such as the Merkle-Damgård transformation) from smaller components, such as fixed-length compression functions. Finally, despite this issue, we directly prove collision resistance of the Merkle-Damgård variant of our combiner, where h1 and h2 are replaced by iterative Merkle-Damgård hashes applied to a fixed-length compression function. Thus, we can still subvert the concatenation barrier for collision-resistance combiners while utilizing practically small fixed-length components underneath.},
  author       = {Dodis, Yevgeniy and Ferguson, Niels and Goldin, Eli and Hall, Peter and Pietrzak, Krzysztof Z},
  booktitle    = {43rd Annual International Cryptology Conference},
  isbn         = {9783031385445},
  issn         = {1611-3349},
  location     = {Santa Barbara, CA, United States},
  pages        = {514--546},
  publisher    = {Springer Nature},
  title        = {{Random oracle combiners: Breaking the concatenation barrier for collision-resistance}},
  doi          = {10.1007/978-3-031-38545-2_17},
  volume       = {14082},
  year         = {2023},
}

@inproceedings{14454,
  abstract     = {As AI and machine-learned software are used increasingly for making decisions that affect humans, it is imperative that they remain fair and unbiased in their decisions. To complement design-time bias mitigation measures, runtime verification techniques have been introduced recently to monitor the algorithmic fairness of deployed systems. Previous monitoring techniques assume full observability of the states of the (unknown) monitored system. Moreover, they can monitor only fairness properties that are specified as arithmetic expressions over the probabilities of different events. In this work, we extend fairness monitoring to systems modeled as partially observed Markov chains (POMC), and to specifications containing arithmetic expressions over the expected values of numerical functions on event sequences. The only assumptions we make are that the underlying POMC is aperiodic and starts in the stationary distribution, with a bound on its mixing time being known. These assumptions enable us to estimate a given property for the entire distribution of possible executions of the monitored POMC, by observing only a single execution. Our monitors observe a long run of the system and, after each new observation, output updated PAC-estimates of how fair or biased the system is. The monitors are computationally lightweight and, using a prototype implementation, we demonstrate their effectiveness on several real-world examples.},
  author       = {Henzinger, Thomas A and Kueffner, Konstantin and Mallik, Kaushik},
  booktitle    = {23rd International Conference on Runtime Verification},
  isbn         = {9783031442667},
  issn         = {1611-3349},
  location     = {Thessaloniki, Greece},
  pages        = {291--311},
  publisher    = {Springer Nature},
  title        = {{Monitoring algorithmic fairness under partial observations}},
  doi          = {10.1007/978-3-031-44267-4_15},
  volume       = {14245},
  year         = {2023},
}

@inproceedings{14456,
  abstract     = {In this paper, we present novel algorithms that efficiently compute a shortest reconfiguration sequence between two given dominating sets in trees and interval graphs under the TOKEN SLIDING model. In this problem, a graph is provided along with its two dominating sets, which can be imagined as tokens placed on vertices. The objective is to find a shortest sequence of dominating sets that transforms one set into the other, with each set in the sequence resulting from sliding a single token in the previous set. While identifying any sequence has been well studied, our work presents the first polynomial algorithms for this optimization variant in the context of dominating sets.},
  author       = {Křišťan, Jan Matyáš and Svoboda, Jakub},
  booktitle    = {24th International Symposium on Fundamentals of Computation Theory},
  isbn         = {9783031435867},
  issn         = {1611-3349},
  location     = {Trier, Germany},
  pages        = {333--347},
  publisher    = {Springer Nature},
  title        = {{Shortest dominating set reconfiguration under token sliding}},
  doi          = {10.1007/978-3-031-43587-4_24},
  volume       = {14292},
  year         = {2023},
}

@inproceedings{14457,
  abstract     = {Threshold secret sharing allows a dealer to split a secret s into n shares, such that any t shares allow for reconstructing s, but no t-1 shares reveal any information about s. Leakage-resilient secret sharing requires that the secret remains hidden, even when an adversary additionally obtains a limited amount of leakage from every share. Benhamouda et al. (CRYPTO’18) proved that Shamir’s secret sharing scheme is one bit leakage-resilient for reconstruction threshold t≥0.85n and conjectured that the same holds for t = c.n for any constant 0≤c≤1.  Nielsen and Simkin (EUROCRYPT’20) showed that this is the best one can hope for by proving that Shamir’s scheme is not secure against one-bit leakage when t0c.n/log(n).
In this work, we strengthen the lower bound of Nielsen and Simkin. We consider noisy leakage-resilience, where a random subset of leakages is replaced by uniformly random noise. We prove a lower bound for Shamir’s secret sharing, similar to that of Nielsen and Simkin, which holds even when a constant fraction of leakages is replaced by random noise. To this end, we first prove a lower bound on the share size of any noisy-leakage-resilient sharing scheme. We then use this lower bound to show that there exist universal constants c1, c2,  such that for sufficiently large n it holds that Shamir’s secret sharing scheme is not noisy-leakage-resilient for t≤c1.n/log(n), even when a c2 fraction of leakages are replaced by random noise.



},
  author       = {Hoffmann, Charlotte and Simkin, Mark},
  booktitle    = {8th International Conference on Cryptology and Information Security in Latin America},
  isbn         = {9783031444685},
  issn         = {1611-3349},
  location     = {Quito, Ecuador},
  pages        = {215--228},
  publisher    = {Springer Nature},
  title        = {{Stronger lower bounds for leakage-resilient secret sharing}},
  doi          = {10.1007/978-3-031-44469-2_11},
  volume       = {14168},
  year         = {2023},
}

@inproceedings{14559,
  abstract     = {We consider the problem of learning control policies in discrete-time stochastic systems which guarantee that the system stabilizes within some specified stabilization region with probability 1. Our approach is based on the novel notion of stabilizing ranking supermartingales (sRSMs) that we introduce in this work. Our sRSMs overcome the limitation of methods proposed in previous works whose applicability is restricted to systems in which the stabilizing region cannot be left once entered under any control policy. We present a learning procedure that learns a control policy together with an sRSM that formally certifies probability 1 stability, both learned as neural networks. We show that this procedure can also be adapted to formally verifying that, under a given Lipschitz continuous control policy, the stochastic system stabilizes within some stabilizing region with probability 1. Our experimental evaluation shows that our learning procedure can successfully learn provably stabilizing policies in practice.},
  author       = {Ansaripour, Matin and Chatterjee, Krishnendu and Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde},
  booktitle    = {21st International Symposium on Automated Technology for Verification and Analysis},
  isbn         = {9783031453281},
  issn         = {1611-3349},
  location     = {Singapore, Singapore},
  pages        = {357--379},
  publisher    = {Springer Nature},
  title        = {{Learning provably stabilizing neural controllers for discrete-time stochastic systems}},
  doi          = {10.1007/978-3-031-45329-8_17},
  volume       = {14215},
  year         = {2023},
}

@inproceedings{14691,
  abstract     = {Continuous Group-Key Agreement (CGKA) allows a group of users to maintain a shared key. It is the fundamental cryptographic primitive underlying group messaging schemes and related protocols, most notably TreeKEM, the underlying key agreement protocol of the Messaging Layer Security (MLS) protocol, a standard for group messaging by the IETF. CKGA works in an asynchronous setting where parties only occasionally must come online, and their messages are relayed by an untrusted server. The most expensive operation provided by CKGA is that which allows for a user to refresh their key material in order to achieve forward secrecy (old messages are secure when a user is compromised) and post-compromise security (users can heal from compromise). One caveat of early CGKA protocols is that these update operations had to be performed sequentially, with any user wanting to update their key material having had to receive and process all previous updates. Late versions of TreeKEM do allow for concurrent updates at the cost of a communication overhead per update message that is linear in the number of updating parties. This was shown to be indeed necessary when achieving PCS in just two rounds of communication by [Bienstock et al. TCC’20].
The recently proposed protocol CoCoA [Alwen et al. Eurocrypt’22], however, shows that this overhead can be reduced if PCS requirements are relaxed, and only a logarithmic number of rounds is required. The natural question, thus, is whether CoCoA is optimal in this setting.
In this work we answer this question, providing a lower bound on the cost (concretely, the amount of data to be uploaded to the server) for CGKA protocols that heal in an arbitrary k number of rounds, that shows that CoCoA is very close to optimal. Additionally, we extend CoCoA to heal in an arbitrary number of rounds, and propose a modification of it, with a reduced communication cost for certain k.
We prove our bound in a combinatorial setting where the state of the protocol progresses in rounds, and the state of the protocol in each round is captured by a set system, each set specifying a set of users who share a secret key. We show this combinatorial model is equivalent to a symbolic model capturing building blocks including PRFs and public-key encryption, related to the one used by Bienstock et al.
Our lower bound is of order k•n1+1/(k-1)/log(k), where 2≤k≤log(n) is the number of updates per user the protocol requires to heal. This generalizes the n2 bound for k=2 from Bienstock et al.. This bound almost matches the k⋅n1+2/(k-1) or k2⋅n1+1/(k-1) efficiency we get for the variants of the CoCoA protocol also introduced in this paper.},
  author       = {Auerbach, Benedikt and Cueto Noval, Miguel and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z},
  booktitle    = {21st International Conference on Theory of Cryptography},
  isbn         = {9783031486203},
  issn         = {1611-3349},
  location     = {Taipei, Taiwan},
  pages        = {271--300},
  publisher    = {Springer Nature},
  title        = {{On the cost of post-compromise security in concurrent Continuous Group-Key Agreement}},
  doi          = {10.1007/978-3-031-48621-0_10},
  volume       = {14371},
  year         = {2023},
}

@inproceedings{14692,
  abstract     = {The generic-group model (GGM) aims to capture algorithms working over groups of prime order that only rely on the group operation, but do not exploit any additional structure given by the concrete implementation of the group. In it, it is possible to prove information-theoretic lower bounds on the hardness of problems like the discrete logarithm (DL) or computational Diffie-Hellman (CDH). Thus, since its introduction, it has served as a valuable tool to assess the concrete security provided by cryptographic schemes based on such problems. A work on the related algebraic-group model (AGM) introduced a method, used by many subsequent works, to adapt GGM lower bounds for one problem to another, by means of conceptually simple reductions.
In this work, we propose an alternative approach to extend GGM bounds from one problem to another. Following an idea by Yun [EC15], we show that, in the GGM, the security of a large class of problems can be reduced to that of geometric search-problems. By reducing the security of the resulting geometric-search problems to variants of the search-by-hypersurface problem, for which information theoretic lower bounds exist, we give alternative proofs of several results that used the AGM approach.
The main advantage of our approach is that our reduction from geometric search-problems works, as well, for the GGM with preprocessing (more precisely the bit-fixing GGM introduced by Coretti, Dodis and Guo [Crypto18]). As a consequence, this opens up the possibility of transferring preprocessing GGM bounds from one problem to another, also by means of simple reductions. Concretely, we prove novel preprocessing bounds on the hardness of the d-strong discrete logarithm, the d-strong Diffie-Hellman inversion, and multi-instance CDH problems, as well as a large class of Uber assumptions. Additionally, our approach applies to Shoup’s GGM without additional restrictions on the query behavior of the adversary, while the recent works of Zhang, Zhou, and Katz [AC22] and Zhandry [Crypto22] highlight that this is not the case for the AGM approach.},
  author       = {Auerbach, Benedikt and Hoffmann, Charlotte and Pascual Perez, Guillermo},
  booktitle    = {21st International Conference on Theory of Cryptography},
  isbn         = {9783031486203},
  issn         = {1611-3349},
  pages        = {301--330},
  publisher    = {Springer Nature},
  title        = {{Generic-group lower bounds via reductions between geometric-search problems: With and without preprocessing}},
  doi          = {10.1007/978-3-031-48621-0_11},
  volume       = {14371},
  year         = {2023},
}

@inproceedings{14693,
  abstract     = {Lucas sequences are constant-recursive integer sequences with a long history of applications in cryptography, both in the design of cryptographic schemes and cryptanalysis. In this work, we study the sequential hardness of computing Lucas sequences over an RSA modulus.
First, we show that modular Lucas sequences are at least as sequentially hard as the classical delay function given by iterated modular squaring proposed by Rivest, Shamir, and Wagner (MIT Tech. Rep. 1996) in the context of time-lock puzzles. Moreover, there is no obvious reduction in the other direction, which suggests that the assumption of sequential hardness of modular Lucas sequences is strictly weaker than that of iterated modular squaring. In other words, the sequential hardness of modular Lucas sequences might hold even in the case of an algorithmic improvement violating the sequential hardness of iterated modular squaring.
Second, we demonstrate the feasibility of constructing practically-efficient verifiable delay functions based on the sequential hardness of modular Lucas sequences. Our construction builds on the work of Pietrzak (ITCS 2019) by leveraging the intrinsic connection between the problem of computing modular Lucas sequences and exponentiation in an appropriate extension field.},
  author       = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Krňák, Tomáš},
  booktitle    = {21st International Conference on Theory of Cryptography},
  isbn         = {9783031486234},
  issn         = {1611-3349},
  location     = {Taipei, Taiwan},
  pages        = {336--362},
  publisher    = {Springer Nature},
  title        = {{(Verifiable) delay functions from Lucas sequences}},
  doi          = {10.1007/978-3-031-48624-1_13},
  volume       = {14372},
  year         = {2023},
}

@inproceedings{14736,
  abstract     = {Payment channel networks (PCNs) are a promising technology to improve the scalability of cryptocurrencies. PCNs, however, face the challenge that the frequent usage of certain routes may deplete channels in one direction, and hence prevent further transactions. In order to reap the full potential of PCNs, recharging and rebalancing mechanisms are required to provision channels, as well as an admission control logic to decide which transactions to reject in case capacity is insufficient. This paper presents a formal model of this optimisation problem. In particular, we consider an online algorithms perspective, where transactions arrive over time in an unpredictable manner. Our main contributions are competitive online algorithms which come with provable guarantees over time. We empirically evaluate our algorithms on randomly generated transactions to compare the average performance of our algorithms to our theoretical bounds. We also show how this model and approach differs from related problems in classic communication networks.},
  author       = {Bastankhah, Mahsa and Chatterjee, Krishnendu and Maddah-Ali, Mohammad Ali and Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X},
  booktitle    = {27th International Conference on Financial Cryptography and Data Security},
  isbn         = {9783031477539},
  issn         = {1611-3349},
  location     = {Bol, Brac, Croatia},
  pages        = {309--325},
  publisher    = {Springer Nature},
  title        = {{R2: Boosting liquidity in payment channel networks with online admission control}},
  doi          = {10.1007/978-3-031-47754-6_18},
  volume       = {13950},
  year         = {2023},
}

@inproceedings{14744,
  abstract     = {Sharding distributed ledgers is a promising on-chain solution for scaling blockchains but lacks formal grounds, nurturing skepticism on whether such complex systems can scale blockchains securely. We fill this gap by introducing the first formal framework as well as a roadmap to robust sharding. In particular, we first define the properties sharded distributed ledgers should fulfill. We build upon and extend the Bitcoin backbone protocol by defining consistency and scalability. Consistency encompasses the need for atomic execution of cross-shard transactions to preserve safety, whereas scalability encapsulates the speedup a sharded system can gain in comparison to a non-sharded system.
Using our model, we explore the limitations of sharding. We show that a sharded ledger with n participants cannot scale under a fully adaptive adversary, but it can scale up to m shards where n=c'm log m, under an epoch-adaptive adversary; the constant c' encompasses the trade-off between security and scalability. This is possible only if the sharded ledgers create succinct proofs of the valid state updates at every epoch. We leverage our results to identify the sufficient components for robust sharding, which we incorporate in a protocol abstraction termed Divide & Scale. To demonstrate the power of our framework, we analyze the most prominent sharded blockchains (Elastico, Monoxide, OmniLedger, RapidChain) and pinpoint where they fail to meet the desired properties.},
  author       = {Avarikioti, Zeta and Desjardins, Antoine and Kokoris Kogias, Eleftherios and Wattenhofer, Roger},
  booktitle    = {30th International Colloquium on Structural Information and Communication Complexity},
  isbn         = {9783031327322},
  issn         = {1611-3349},
  location     = {Alcalá de Henares, Spain},
  pages        = {199--245},
  publisher    = {Springer Nature},
  title        = {{Divide & Scale: Formalization and roadmap to robust sharding}},
  doi          = {10.1007/978-3-031-32733-9_10},
  volume       = {13892},
  year         = {2023},
}

@inproceedings{14758,
  abstract     = {We present a flexible and efficient toolchain to symbolically solve (standard) Rabin games, fair-adversarial Rabin games, and 2 1/2 license type-player Rabin games. To our best knowledge, our tools are the first ones to be able to solve these problems. Furthermore, using these flexible game solvers as a back-end, we implemented a tool for computing correct-by-construction controllers for stochastic dynamical systems under LTL specifications. Our implementations use the recent theoretical result that all of these games can be solved using the same symbolic fixpoint algorithm but utilizing different, domain specific calculations of the involved predecessor operators. The main feature of our toolchain is the utilization of two programming abstractions: one to separate the symbolic fixpoint computations from the predecessor calculations, and another one to allow the integration of different BDD libraries as back-ends. In particular, we employ a multi-threaded execution of the fixpoint algorithm by using the multi-threaded BDD library Sylvan, which leads to enormous computational savings.},
  author       = {Majumdar, Rupak and Mallik, Kaushik and Rychlicki, Mateusz and Schmuck, Anne-Kathrin and Soudjani, Sadegh},
  booktitle    = {35th International Conference on Computer Aided Verification},
  isbn         = {9783031377082},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {3--15},
  publisher    = {Springer Nature},
  title        = {{A flexible toolchain for symbolic rabin games under fair and stochastic uncertainties}},
  doi          = {10.1007/978-3-031-37709-9_1},
  volume       = {13966},
  year         = {2023},
}

@inproceedings{14829,
  abstract     = {This paper explores a modular design architecture aimed at helping blockchains (and other SMR implementation) to scale to a very large number of processes. This comes in contrast to existing monolithic architectures that interleave transaction dissemination, ordering, and execution in a single functionality. To achieve this we first split the monolith to multiple layers which can use existing distributed computing primitives. The exact specifications of the data dissemination part are formally defined by the Proof of Availability & Retrieval (PoA &R) abstraction. Solutions to the PoA &R problem contain two related sub-protocols: one that “pushes” information into the network and another that “pulls” this information. Regarding the latter, there is a dearth of research literature which is rectified in this paper. We present a family of pulling sub-protocols and rigorously analyze them. Extensive simulations support the theoretical claims of efficiency and robustness in case of a very large number of players. Finally, actual implementation and deployment on a small number of machines (roughly the size of several industrial systems) demonstrates the viability of the architecture’s paradigm.},
  author       = {Cohen, Shir and Goren, Guy and Kokoris Kogias, Eleftherios and Sonnino, Alberto and Spiegelman, Alexander},
  booktitle    = {27th International Conference on Financial Cryptography and Data Security},
  isbn         = {9783031477508},
  issn         = {1611-3349},
  location     = {Bol, Brac, Croatia},
  pages        = {36--53},
  publisher    = {Springer Nature},
  title        = {{Proof of availability and retrieval in a modular blockchain architecture}},
  doi          = {10.1007/978-3-031-47751-5_3},
  volume       = {13951},
  year         = {2023},
}

@inproceedings{13139,
  abstract     = {A classical problem for Markov chains is determining their stationary (or steady-state) distribution. This problem has an equally classical solution based on eigenvectors and linear equation systems. However, this approach does not scale to large instances, and iterative solutions are desirable. It turns out that a naive approach, as used by current model checkers, may yield completely wrong results. We present a new approach, which utilizes recent advances in partial exploration and mean payoff computation to obtain a correct, converging approximation.},
  author       = {Meggendorfer, Tobias},
  booktitle    = {TACAS 2023: Tools and Algorithms for the Construction and Analysis of Systems},
  isbn         = {9783031308222},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {489--507},
  publisher    = {Springer Nature},
  title        = {{Correct approximation of stationary distributions}},
  doi          = {10.1007/978-3-031-30823-9_25},
  volume       = {13993},
  year         = {2023},
}

@inproceedings{13141,
  abstract     = {We automatically compute a new class of environment assumptions in two-player turn-based finite graph games which characterize an “adequate cooperation” needed from the environment to allow the system player to win. Given an ω-regular winning condition Φ for the system player, we compute an ω-regular assumption Ψ for the environment player, such that (i) every environment strategy compliant with Ψ allows the system to fulfill Φ (sufficiency), (ii) Ψ
 can be fulfilled by the environment for every strategy of the system (implementability), and (iii) Ψ does not prevent any cooperative strategy choice (permissiveness).
For parity games, which are canonical representations of ω-regular games, we present a polynomial-time algorithm for the symbolic computation of adequately permissive assumptions and show that our algorithm runs faster and produces better assumptions than existing approaches—both theoretically and empirically. To the best of our knowledge, for ω
-regular games, we provide the first algorithm to compute sufficient and implementable environment assumptions that are also permissive.},
  author       = {Anand, Ashwani and Mallik, Kaushik and Nayak, Satya Prakash and Schmuck, Anne Kathrin},
  booktitle    = {TACAS 2023: Tools and Algorithms for the Construction and Analysis of Systems},
  isbn         = {9783031308192},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {211--228},
  publisher    = {Springer Nature},
  title        = {{Computing adequately permissive assumptions for synthesis}},
  doi          = {10.1007/978-3-031-30820-8_15},
  volume       = {13994},
  year         = {2023},
}

@inproceedings{13142,
  abstract     = {Reinforcement learning has received much attention for learning controllers of deterministic systems. We consider a learner-verifier framework for stochastic control systems and survey recent methods that formally guarantee a conjunction of reachability and safety properties. Given a property and a lower bound on the probability of the property being satisfied, our framework jointly learns a control policy and a formal certificate to ensure the satisfaction of the property with a desired probability threshold. Both the control policy and the formal certificate are continuous functions from states to reals, which are learned as parameterized neural networks. While in the deterministic case, the certificates are invariant and barrier functions for safety, or Lyapunov and ranking functions for liveness, in the stochastic case the certificates are supermartingales. For certificate verification, we use interval arithmetic abstract interpretation to bound the expected values of neural network functions.},
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde},
  booktitle    = {Tools and Algorithms for the Construction and Analysis of Systems },
  isbn         = {9783031308222},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {3--25},
  publisher    = {Springer Nature},
  title        = {{A learner-verifier framework for neural network controllers and certificates of stochastic systems}},
  doi          = {10.1007/978-3-031-30823-9_1},
  volume       = {13993},
  year         = {2023},
}

