@inproceedings{13143,
  abstract     = {GIMPS and PrimeGrid are large-scale distributed projects dedicated to searching giant prime numbers, usually of special forms like Mersenne and Proth primes. The numbers in the current search-space are millions of digits large and the participating volunteers need to run resource-consuming primality tests. Once a candidate prime N has been found, the only way for another party to independently verify the primality of N used to be by repeating the expensive primality test. To avoid the need for second recomputation of each primality test, these projects have recently adopted certifying mechanisms that enable efficient verification of performed tests. However, the mechanisms presently in place only detect benign errors and there is no guarantee against adversarial behavior: a malicious volunteer can mislead the project to reject a giant prime as being non-prime.
In this paper, we propose a practical, cryptographically-sound mechanism for certifying the non-primality of Proth numbers. That is, a volunteer can – parallel to running the primality test for N – generate an efficiently verifiable proof at a little extra cost certifying that N is not prime. The interactive protocol has statistical soundness and can be made non-interactive using the Fiat-Shamir heuristic.
Our approach is based on a cryptographic primitive called Proof of Exponentiation (PoE) which, for a group G, certifies that a tuple (x,y,T)∈G2×N satisfies x2T=y (Pietrzak, ITCS 2019 and Wesolowski, J. Cryptol. 2020). In particular, we show how to adapt Pietrzak’s PoE at a moderate additional cost to make it a cryptographically-sound certificate of non-primality.},
  author       = {Hoffmann, Charlotte and Hubáček, Pavel and Kamath, Chethan and Pietrzak, Krzysztof Z},
  booktitle    = {Public-Key Cryptography - PKC 2023},
  isbn         = {9783031313677},
  issn         = {1611-3349},
  location     = {Atlanta, GA, United States},
  pages        = {530--553},
  publisher    = {Springer Nature},
  title        = {{Certifying giant nonprimes}},
  doi          = {10.1007/978-3-031-31368-4_19},
  volume       = {13940},
  year         = {2023},
}

@inproceedings{13236,
  abstract     = {We present an auction algorithm using multiplicative instead of constant weight updates to compute a (1−ε)-approximate maximum weight matching (MWM) in a bipartite graph with n vertices and m edges in time O(mε−1log(ε−1)), matching the running time of the linear-time approximation algorithm of Duan and Pettie [JACM ’14]. Our algorithm is very simple and it can be extended to give a dynamic data structure that maintains a (1−ε)-approximate maximum weight matching under (1) one-sided vertex deletions (with incident edges) and (2) one-sided vertex insertions (with incident edges sorted by weight) to the other side. The total time time used is O(mε−1log(ε−1)), where m is the sum of the number of initially existing and inserted edges.},
  author       = {Zheng, Da Wei and Henzinger, Monika H},
  booktitle    = {International Conference on Integer Programming and Combinatorial Optimization},
  isbn         = {9783031327254},
  issn         = {1611-3349},
  location     = {Madison, WI, United States},
  pages        = {453--465},
  publisher    = {Springer Nature},
  title        = {{Multiplicative auction algorithm for approximate maximum weight bipartite matching}},
  doi          = {10.1007/978-3-031-32726-1_32},
  volume       = {13904},
  year         = {2023},
}

@inproceedings{13238,
  abstract     = {We consider a natural problem dealing with weighted packet selection across a rechargeable link, which e.g., finds applications in cryptocurrency networks. The capacity of a link (u, v) is determined by how much nodes u and v allocate for this link. Specifically, the input is a finite ordered sequence of packets that arrive in both directions along a link. Given (u, v) and a packet of weight x going from u to v, node u can either accept or reject the packet. If u accepts the packet, the capacity on link (u, v) decreases by x. Correspondingly, v’s capacity on (u, v) increases by x. If a node rejects the packet, this will entail a cost affinely linear in the weight of the packet. A link is “rechargeable” in the sense that the total capacity of the link has to remain constant, but the allocation of capacity at the ends of the link can depend arbitrarily on the nodes’ decisions. The goal is to minimise the sum of the capacity injected into the link and the cost of rejecting packets. We show that the problem is NP-hard, but can be approximated efficiently with a ratio of (1+ε)⋅(1+3–√) for some arbitrary ε>0.
.},
  author       = {Schmid, Stefan and Svoboda, Jakub and Yeo, Michelle X},
  booktitle    = {SIROCCO 2023: Structural Information and Communication Complexity },
  isbn         = {9783031327322},
  issn         = {1611-3349},
  location     = {Alcala de Henares, Spain},
  pages        = {576--594},
  publisher    = {Springer Nature},
  title        = {{Weighted packet selection for rechargeable links in cryptocurrency networks: Complexity and approximation}},
  doi          = {10.1007/978-3-031-32733-9_26},
  volume       = {13892},
  year         = {2023},
}

@inproceedings{13310,
  abstract     = {Machine-learned systems are in widespread use for making decisions about humans, and it is important that they are fair, i.e., not biased against individuals based on sensitive attributes. We present runtime verification of algorithmic fairness for systems whose models are unknown, but are assumed to have a Markov chain structure. We introduce a specification language that can model many common algorithmic fairness properties, such as demographic parity, equal opportunity, and social burden. We build monitors that observe a long sequence of events as generated by a given system, and output, after each observation, a quantitative estimate of how fair or biased the system was on that run until that point in time. The estimate is proven to be correct modulo a variable error bound and a given confidence level, where the error bound gets tighter as the observed sequence gets longer. Our monitors are of two types, and use, respectively, frequentist and Bayesian statistical inference techniques. While the frequentist monitors compute estimates that are objectively correct with respect to the ground truth, the Bayesian monitors compute estimates that are correct subject to a given prior belief about the system’s model. Using a prototype implementation, we show how we can monitor if a bank is fair in giving loans to applicants from different social backgrounds, and if a college is fair in admitting students while maintaining a reasonable financial burden on the society. Although they exhibit different theoretical complexities in certain cases, in our experiments, both frequentist and Bayesian monitors took less than a millisecond to update their verdicts after each observation.},
  author       = {Henzinger, Thomas A and Karimi, Mahyar and Kueffner, Konstantin and Mallik, Kaushik},
  booktitle    = {Computer Aided Verification},
  isbn         = {9783031377020},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {358–382},
  publisher    = {Springer Nature},
  title        = {{Monitoring algorithmic fairness}},
  doi          = {10.1007/978-3-031-37703-7_17},
  volume       = {13965},
  year         = {2023},
}

@inproceedings{14259,
  abstract     = {We provide a learning-based technique for guessing a winning strategy in a parity game originating from an LTL synthesis problem. A cheaply obtained guess can be useful in several applications. Not only can the guessed strategy be applied as best-effort in cases where the game’s huge size prohibits rigorous approaches, but it can also increase the scalability of rigorous LTL synthesis in several ways. Firstly, checking whether a guessed strategy is winning is easier than constructing one. Secondly, even if the guess is wrong in some places, it can be fixed by strategy iteration faster than constructing one from scratch. Thirdly, the guess can be used in on-the-fly approaches to prioritize exploration in the most fruitful directions.
In contrast to previous works, we (i) reflect the highly structured logical information in game’s states, the so-called semantic labelling, coming from the recent LTL-to-automata translations, and (ii) learn to reflect it properly by learning from previously solved games, bringing the solving process closer to human-like reasoning.},
  author       = {Kretinsky, Jan and Meggendorfer, Tobias and Prokop, Maximilian and Rieder, Sabine},
  booktitle    = {35th International Conference on Computer Aided Verification },
  isbn         = {9783031377051},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {390--414},
  publisher    = {Springer Nature},
  title        = {{Guessing winning policies in LTL synthesis by semantic learning}},
  doi          = {10.1007/978-3-031-37706-8_20},
  volume       = {13964},
  year         = {2023},
}

@inproceedings{14260,
  abstract     = {This paper presents Lincheck, a new practical and user-friendly framework for testing concurrent algorithms on the Java Virtual Machine (JVM). Lincheck provides a simple and declarative way to write concurrent tests: instead of describing how to perform the test, users specify what to test by declaring all the operations to examine; the framework automatically handles the rest. As a result, tests written with Lincheck are concise and easy to understand. The framework automatically generates a set of concurrent scenarios, examines them using stress-testing or bounded model checking, and verifies that the results of each invocation are correct. Notably, if an error is detected via model checking, Lincheck provides an easy-to-follow trace to reproduce it, significantly simplifying the bug investigation.

To the best of our knowledge, Lincheck is the first production-ready tool on the JVM that offers such a simple way of writing concurrent tests, without requiring special skills or expertise. We successfully integrated Lincheck in the development process of several large projects, such as Kotlin Coroutines, and identified new bugs in popular concurrency libraries, such as a race in Java’s standard ConcurrentLinkedDeque and a liveliness bug in Java’s AbstractQueuedSynchronizer framework, which is used in most of the synchronization primitives. We believe that Lincheck can significantly improve the quality and productivity of concurrent algorithms research and development and become the state-of-the-art tool for checking their correctness.},
  author       = {Koval, Nikita and Fedorov, Alexander and Sokolova, Maria and Tsitelov, Dmitry and Alistarh, Dan-Adrian},
  booktitle    = {35th International Conference on Computer Aided Verification },
  isbn         = {9783031377051},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {156--169},
  publisher    = {Springer Nature},
  title        = {{Lincheck: A practical framework for testing concurrent data structures on JVM}},
  doi          = {10.1007/978-3-031-37706-8_8},
  volume       = {13964},
  year         = {2023},
}

@inproceedings{12467,
  abstract     = {Safety and liveness are elementary concepts of computation, and the foundation of many verification paradigms. The safety-liveness classification of boolean properties characterizes whether a given property can be falsified by observing a finite prefix of an infinite computation trace (always for safety, never for liveness). In quantitative specification and verification, properties assign not truth values, but quantitative values to infinite traces (e.g., a cost, or the distance to a boolean property). We introduce quantitative safety and liveness, and we prove that our definitions induce conservative quantitative generalizations of both (1)~the safety-progress hierarchy of boolean properties and (2)~the safety-liveness decomposition of boolean properties. In particular, we show that every quantitative property can be written as the pointwise minimum of a quantitative safety property and a quantitative liveness property. Consequently, like boolean properties, also quantitative properties can be min-decomposed into safety and liveness parts, or alternatively, max-decomposed into co-safety and co-liveness parts. Moreover, quantitative properties can be approximated naturally. We prove that every quantitative property that has both safe and co-safe approximations can be monitored arbitrarily precisely by a monitor that uses only a finite number of states.},
  author       = {Henzinger, Thomas A and Mazzocchi, Nicolas Adrien and Sarac, Naci E},
  booktitle    = {26th International Conference Foundations of Software Science and Computation Structures},
  isbn         = {9783031308284},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {349--370},
  publisher    = {Springer Nature},
  title        = {{Quantitative safety and liveness}},
  doi          = {10.1007/978-3-031-30829-1_17},
  volume       = {13992},
  year         = {2023},
}

@inproceedings{12854,
  abstract     = {The main idea behind BUBAAK is to run multiple program analyses in parallel and use runtime monitoring and enforcement to observe and control their progress in real time. The analyses send information about (un)explored states of the program and discovered invariants to a monitor. The monitor processes the received data and can force an analysis to stop the search of certain program parts (which have already been analyzed by other analyses), or to make it utilize a program invariant found by another analysis.
At SV-COMP  2023, the implementation of data exchange between the monitor and the analyses was not yet completed, which is why BUBAAK only ran several analyses in parallel, without any coordination. Still, BUBAAK won the meta-category FalsificationOverall and placed very well in several other (sub)-categories of the competition.},
  author       = {Chalupa, Marek and Henzinger, Thomas A},
  booktitle    = {Tools and Algorithms for the Construction and Analysis of Systems},
  isbn         = {9783031308192},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {535--540},
  publisher    = {Springer Nature},
  title        = {{Bubaak: Runtime monitoring of program verifiers}},
  doi          = {10.1007/978-3-031-30820-8_32},
  volume       = {13994},
  year         = {2023},
}

@inproceedings{12856,
  abstract     = {As the complexity and criticality of software increase every year, so does the importance of run-time monitoring. Third-party monitoring, with limited knowledge of the monitored software, and best-effort monitoring, which keeps pace with the monitored software, are especially valuable, yet underexplored areas of run-time monitoring. Most existing monitoring frameworks do not support their combination because they either require access to the monitored code for instrumentation purposes or the processing of all observed events, or both.

We present a middleware framework, VAMOS, for the run-time monitoring of software which is explicitly designed to support third-party and best-effort scenarios. The design goals of VAMOS are (i) efficiency (keeping pace at low overhead), (ii) flexibility (the ability to monitor black-box code through a variety of different event channels, and the connectability to monitors written in different specification languages), and (iii) ease-of-use. To achieve its goals, VAMOS combines aspects of event broker and event recognition systems with aspects of stream processing systems.
We implemented a prototype toolchain for VAMOS and conducted experiments including a case study of monitoring for data races. The results indicate that VAMOS enables writing useful yet efficient monitors, is compatible with a variety of event sources and monitor specifications, and simplifies key aspects of setting up a monitoring system from scratch.},
  author       = {Chalupa, Marek and Mühlböck, Fabian and Muroya Lei, Stefanie and Henzinger, Thomas A},
  booktitle    = {Fundamental Approaches to Software Engineering},
  isbn         = {9783031308253},
  issn         = {1611-3349},
  location     = {Paris, France},
  pages        = {260--281},
  publisher    = {Springer Nature},
  title        = {{Vamos: Middleware for best-effort third-party monitoring}},
  doi          = {10.1007/978-3-031-30826-0_15},
  volume       = {13991},
  year         = {2023},
}

@inproceedings{10891,
  abstract     = {We present a formal framework for the online black-box monitoring of software using monitors with quantitative verdict functions. Quantitative verdict functions have several advantages. First, quantitative monitors can be approximate, i.e., the value of the verdict function does not need to correspond exactly to the value of the property under observation. Second, quantitative monitors can be quantified universally, i.e., for every possible observed behavior, the monitor tries to make the best effort to estimate the value of the property under observation. Third, quantitative monitors can watch boolean as well as quantitative properties, such as average response time. Fourth, quantitative monitors can use non-finite-state resources, such as counters. As a consequence, quantitative monitors can be compared according to how many resources they use (e.g., the number of counters) and how precisely they approximate the property under observation. This allows for a rich spectrum of cost-precision trade-offs in monitoring software.},
  author       = {Henzinger, Thomas A},
  booktitle    = {Software Verification},
  isbn         = {9783030955601},
  issn         = {1611-3349},
  location     = {New Haven, CT, United States},
  pages        = {3--6},
  publisher    = {Springer Nature},
  title        = {{Quantitative monitoring of software}},
  doi          = {10.1007/978-3-030-95561-8_1},
  volume       = {13124},
  year         = {2022},
}

@inproceedings{11185,
  abstract     = {Bundling crossings is a strategy which can enhance the readability of graph drawings. In this paper we consider bundlings for families of pseudosegments, i.e., simple curves such that any two have share at most one point at which they cross. Our main result is that there is a polynomial-time algorithm to compute an 8-approximation of the bundled crossing number of such instances (up to adding a term depending on the facial structure). This 8-approximation also holds for bundlings of good drawings of graphs. In the special case of circular drawings the approximation factor is 8 (no extra term), this improves upon the 10-approximation of Fink et al. [6]. We also show how to compute a 92-approximation when the intersection graph of the pseudosegments is bipartite.},
  author       = {Arroyo Guevara, Alan M and Felsner, Stefan},
  booktitle    = {WALCOM 2022: Algorithms and Computation},
  isbn         = {9783030967307},
  issn         = {1611-3349},
  location     = {Jember, Indonesia},
  pages        = {383--395},
  publisher    = {Springer Nature},
  title        = {{Approximating the bundled crossing number}},
  doi          = {10.1007/978-3-030-96731-4_31},
  volume       = {13174},
  year         = {2022},
}

@inproceedings{11355,
  abstract     = {Contract-based design is a promising methodology for taming the complexity of developing sophisticated systems. A formal contract distinguishes between assumptions, which are constraints that the designer of a component puts on the environments in which the component can be used safely, and guarantees, which are promises that the designer asks from the team that implements the component. A theory of formal contracts can be formalized as an interface theory, which supports the composition and refinement of both assumptions and guarantees.
Although there is a rich landscape of contract-based design methods that address functional and extra-functional properties, we present the first interface theory that is designed for ensuring system-wide security properties. Our framework provides a refinement relation and a composition operation that support both incremental design and independent implementability. We develop our theory for both stateless and stateful interfaces. We illustrate the applicability of our framework with an example inspired from the automotive domain.},
  author       = {Bartocci, Ezio and Ferrere, Thomas and Henzinger, Thomas A and Nickovic, Dejan and Da Costa, Ana Oliveira},
  booktitle    = {Fundamental Approaches to Software Engineering},
  isbn         = {9783030994280},
  issn         = {1611-3349},
  location     = {Munich, Germany},
  pages        = {3--22},
  publisher    = {Springer Nature},
  title        = {{Information-flow interfaces}},
  doi          = {10.1007/978-3-030-99429-7_1},
  volume       = {13241},
  year         = {2022},
}

@book{11429,
  abstract     = {This book constitutes the refereed proceedings of the 18th International Symposium on Web and Wireless Geographical Information Systems, W2GIS 2022, held in Konstanz, Germany, in April 2022.
The 7 full papers presented together with 6 short papers in the volume were carefully reviewed and selected from 16 submissions.  The papers cover topics that range from mobile GIS and Location-Based Services to Spatial Information Retrieval and Wireless Sensor Networks.},
  editor       = {Karimipour, Farid and Storandt, Sabine},
  isbn         = {9783031062445},
  issn         = {1611-3349},
  pages        = {153},
  publisher    = {Springer Nature},
  title        = {{Web and Wireless Geographical Information Systems}},
  doi          = {10.1007/978-3-031-06245-2},
  volume       = {13238},
  year         = {2022},
}

@inproceedings{11476,
  abstract     = {Messaging platforms like Signal are widely deployed and provide strong security in an asynchronous setting. It is a challenging problem to construct a protocol with similar security guarantees that can efficiently scale to large groups. A major bottleneck are the frequent key rotations users need to perform to achieve post compromise forward security.

In current proposals – most notably in TreeKEM (which is part of the IETF’s Messaging Layer Security (MLS) protocol draft) – for users in a group of size n to rotate their keys, they must each craft a message of size log(n) to be broadcast to the group using an (untrusted) delivery server.

In larger groups, having users sequentially rotate their keys requires too much bandwidth (or takes too long), so variants allowing any T≤n users to simultaneously rotate their keys in just 2 communication rounds have been suggested (e.g. “Propose and Commit” by MLS). Unfortunately, 2-round concurrent updates are either damaging or expensive (or both); i.e. they either result in future operations being more costly (e.g. via “blanking” or “tainting”) or are costly themselves requiring Ω(T) communication for each user [Bienstock et al., TCC’20].

In this paper we propose CoCoA; a new scheme that allows for T concurrent updates that are neither damaging nor costly. That is, they add no cost to future operations yet they only require Ω(log2(n)) communication per user. To circumvent the [Bienstock et al.] lower bound, CoCoA increases the number of rounds needed to complete all updates from 2 up to (at most) log(n); though typically fewer rounds are needed.

The key insight of our protocol is the following: in the (non-concurrent version of) TreeKEM, a delivery server which gets T concurrent update requests will approve one and reject the remaining T−1. In contrast, our server attempts to apply all of them. If more than one user requests to rotate the same key during a round, the server arbitrarily picks a winner. Surprisingly, we prove that regardless of how the server chooses the winners, all previously compromised users will recover after at most log(n) such update rounds.

To keep the communication complexity low, CoCoA is a server-aided CGKA. That is, the delivery server no longer blindly forwards packets, but instead actively computes individualized packets tailored to each user. As the server is untrusted, this change requires us to develop new mechanisms ensuring robustness of the protocol.},
  author       = {Alwen, Joël and Auerbach, Benedikt and Cueto Noval, Miguel and Klein, Karen and Pascual Perez, Guillermo and Pietrzak, Krzysztof Z and Walter, Michael},
  booktitle    = {Advances in Cryptology – EUROCRYPT 2022},
  isbn         = {9783031070846},
  issn         = {1611-3349},
  location     = {Trondheim, Norway},
  pages        = {815–844},
  publisher    = {Springer Nature},
  title        = {{CoCoA: Concurrent continuous group key agreement}},
  doi          = {10.1007/978-3-031-07085-3_28},
  volume       = {13276},
  year         = {2022},
}

@inproceedings{11707,
  abstract     = {In this work we introduce the graph-theoretic notion of mendability: for each locally checkable graph problem we can define its mending radius, which captures the idea of how far one needs to modify a partial solution in order to “patch a hole.” We explore how mendability is connected to the existence of efficient algorithms, especially in distributed, parallel, and fault-tolerant settings. It is easy to see that O(1)-mendable problems are also solvable in O(log∗n) rounds in the LOCAL model of distributed computing. One of the surprises is that in paths and cycles, a converse also holds in the following sense: if a problem Π can be solved in O(log∗n), there is always a restriction Π′⊆Π that is still efficiently solvable but that is also O(1)-mendable. We also explore the structure of the landscape of mendability. For example, we show that in trees, the mending radius of any locally checkable problem is O(1), Θ(logn), or Θ(n), while in general graphs the structure is much more diverse.},
  author       = {Balliu, Alkida and Hirvonen, Juho and Melnyk, Darya and Olivetti, Dennis and Rybicki, Joel and Suomela, Jukka},
  booktitle    = {International Colloquium on Structural Information and Communication Complexity},
  editor       = {Parter, Merav},
  isbn         = {9783031099922},
  issn         = {1611-3349},
  location     = {Paderborn, Germany},
  pages        = {1--20},
  publisher    = {Springer Nature},
  title        = {{Local mending}},
  doi          = {10.1007/978-3-031-09993-9_1},
  volume       = {13298},
  year         = {2022},
}

@inproceedings{11775,
  abstract     = {Quantitative monitoring can be universal and approximate: For every finite sequence of observations, the specification provides a value and the monitor outputs a best-effort approximation of it. The quality of the approximation may depend on the resources that are available to the monitor. By taking to the limit the sequences of specification values and monitor outputs, we obtain precision-resource trade-offs also for limit monitoring. This paper provides a formal framework for studying such trade-offs using an abstract interpretation for monitors: For each natural number n, the aggregate semantics of a monitor at time n is an equivalence relation over all sequences of at most n observations so that two equivalent sequences are indistinguishable to the monitor and thus mapped to the same output. This abstract interpretation of quantitative monitors allows us to measure the number of equivalence classes (or “resource use”) that is necessary for a certain precision up to a certain time, or at any time. Our framework offers several insights. For example, we identify a family of specifications for which any resource-optimal exact limit monitor is independent of any error permitted over finite traces. Moreover, we present a specification for which any resource-optimal approximate limit monitor does not minimize its resource use at any time. },
  author       = {Henzinger, Thomas A and Mazzocchi, Nicolas Adrien and Sarac, Naci E},
  booktitle    = {22nd International Conference on Runtime Verification},
  issn         = {0302-9743},
  location     = {Tbilisi, Georgia},
  pages        = {200--220},
  publisher    = {Springer Nature},
  title        = {{Abstract monitors for quantitative specifications}},
  doi          = {10.1007/978-3-031-17196-3_11},
  volume       = {13498},
  year         = {2022},
}

@inproceedings{12000,
  abstract     = {We consider the quantitative problem of obtaining lower-bounds on the probability of termination of a given non-deterministic probabilistic program. Specifically, given a non-termination threshold p∈[0,1], we aim for certificates proving that the program terminates with probability at least 1−p. The basic idea of our approach is to find a terminating stochastic invariant, i.e. a subset SI of program states such that (i) the probability of the program ever leaving SI is no more than p, and (ii) almost-surely, the program either leaves SI or terminates.

While stochastic invariants are already well-known, we provide the first proof that the idea above is not only sound, but also complete for quantitative termination analysis. We then introduce a novel sound and complete characterization of stochastic invariants that enables template-based approaches for easy synthesis of quantitative termination certificates, especially in affine or polynomial forms. Finally, by combining this idea with the existing martingale-based methods that are relatively complete for qualitative termination analysis, we obtain the first automated, sound, and relatively complete algorithm for quantitative termination analysis. Notably, our completeness guarantees for quantitative termination analysis are as strong as the best-known methods for the qualitative variant.

Our prototype implementation demonstrates the effectiveness of our approach on various probabilistic programs. We also demonstrate that our algorithm certifies lower bounds on termination probability for probabilistic programs that are beyond the reach of previous methods.},
  author       = {Chatterjee, Krishnendu and Goharshady, Amir Kafshdar and Meggendorfer, Tobias and Zikelic, Dorde},
  booktitle    = {Proceedings of the 34th International Conference on Computer Aided Verification},
  isbn         = {9783031131844},
  issn         = {1611-3349},
  location     = {Haifa, Israel},
  pages        = {55--78},
  publisher    = {Springer},
  title        = {{Sound and complete certificates for auantitative termination analysis of probabilistic programs}},
  doi          = {10.1007/978-3-031-13185-1_4},
  volume       = {13371},
  year         = {2022},
}

@inproceedings{12167,
  abstract     = {Payment channels effectively move the transaction load off-chain thereby successfully addressing the inherent scalability problem most cryptocurrencies face. A major drawback of payment channels is the need to “top up” funds on-chain when a channel is depleted. Rebalancing was proposed to alleviate this issue, where parties with depleting channels move their funds along a cycle to replenish their channels off-chain. Protocols for rebalancing so far either introduce local solutions or compromise privacy.
In this work, we present an opt-in rebalancing protocol that is both private and globally optimal, meaning our protocol maximizes the total amount of rebalanced funds. We study rebalancing from the framework of linear programming. To obtain full privacy guarantees, we leverage multi-party computation in solving the linear program, which is executed by selected participants to maintain efficiency. Finally, we efficiently decompose the rebalancing solution into incentive-compatible cycles which conserve user balances when executed atomically.},
  author       = {Avarikioti, Georgia and Pietrzak, Krzysztof Z and Salem, Iosif and Schmid, Stefan and Tiwari, Samarth and Yeo, Michelle X},
  booktitle    = {Financial Cryptography and Data Security},
  isbn         = {9783031182822},
  issn         = {1611-3349},
  location     = {Grenada},
  pages        = {358--373},
  publisher    = {Springer Nature},
  title        = {{Hide & Seek: Privacy-preserving rebalancing on payment channel networks}},
  doi          = {10.1007/978-3-031-18283-9_17},
  volume       = {13411},
  year         = {2022},
}

@inproceedings{12168,
  abstract     = {Advances in blockchains have influenced the State-Machine-Replication (SMR) world and many state-of-the-art blockchain-SMR solutions are based on two pillars: Chaining and Leader-rotation. A predetermined round-robin mechanism used for Leader-rotation, however, has an undesirable behavior: crashed parties become designated leaders infinitely often, slowing down overall system performance. In this paper, we provide a new Leader-Aware SMR framework that, among other desirable properties, formalizes a Leader-utilization requirement that bounds the number of rounds whose leaders are faulty in crash-only executions.
We introduce Carousel, a novel, reputation-based Leader-rotation solution to achieve Leader-Aware SMR. The challenge in adaptive Leader-rotation is that it cannot rely on consensus to determine a leader, since consensus itself needs a leader. Carousel uses the available on-chain information to determine a leader locally and achieves Liveness despite this difficulty. A HotStuff implementation fitted with Carousel demonstrates drastic performance improvements: it increases throughput over 2x in faultless settings and provided a 20x throughput increase and 5x latency reduction in the presence of faults.},
  author       = {Cohen, Shir and Gelashvili, Rati and Kokoris Kogias, Eleftherios and Li, Zekun and Malkhi, Dahlia and Sonnino, Alberto and Spiegelman, Alexander},
  booktitle    = {International Conference on Financial Cryptography and Data Security},
  isbn         = {9783031182822},
  issn         = {1611-3349},
  location     = {Grenada},
  pages        = {279--295},
  publisher    = {Springer Nature},
  title        = {{Be aware of your leaders}},
  doi          = {10.1007/978-3-031-18283-9_13},
  volume       = {13411},
  year         = {2022},
}

@inproceedings{12170,
  abstract     = {We present PET, a specialized and highly optimized framework for partial exploration on probabilistic systems. Over the last decade, several significant advances in the analysis of Markov decision processes employed partial exploration. In a nutshell, this idea allows to focus computation on specific parts of the system, guided by heuristics, while maintaining correctness. In particular, only relevant parts of the system are constructed on demand, which in turn potentially allows to omit constructing large parts of the system. Depending on the model, this leads to dramatic speed-ups, in extreme cases even up to an arbitrary factor. PET unifies several previous implementations and provides a flexible framework to easily implement partial exploration for many further problems. Our experimental evaluation shows significant improvements compared to the previous implementations while vastly reducing the overhead required to add support for additional properties.},
  author       = {Meggendorfer, Tobias},
  booktitle    = {20th International Symposium on Automated Technology for Verification and Analysis},
  isbn         = {9783031199912},
  issn         = {1611-3349},
  location     = {Virtual},
  pages        = {320--326},
  publisher    = {Springer Nature},
  title        = {{PET – A partial exploration tool for probabilistic verification}},
  doi          = {10.1007/978-3-031-19992-9_20},
  volume       = {13505},
  year         = {2022},
}

