@article{12159,
  abstract     = {The term “haplotype block” is commonly used in the developing field of haplotype-based inference methods. We argue that the term should be defined based on the structure of the Ancestral Recombination Graph (ARG), which contains complete information on the ancestry of a sample. We use simulated examples to demonstrate key features of the relationship between haplotype blocks and ancestral structure, emphasizing the stochasticity of the processes that generate them. Even the simplest cases of neutrality or of a “hard” selective sweep produce a rich structure, often missed by commonly used statistics. We highlight a number of novel methods for inferring haplotype structure, based on the full ARG, or on a sequence of trees, and illustrate how they can be used to define haplotype blocks using an empirical data set. While the advent of new, computationally efficient methods makes it possible to apply these concepts broadly, they (and additional new methods) could benefit from adding features to explore haplotype blocks, as we define them. Understanding and applying the concept of the haplotype block will be essential to fully exploit long and linked-read sequencing technologies.},
  author       = {Shipilina, Daria and Pal, Arka and Stankowski, Sean and Chan, Yingguang Frank and Barton, Nicholas H},
  issn         = {1365-294X},
  journal      = {Molecular Ecology},
  keywords     = {Genetics, Ecology, Evolution, Behavior and Systematics},
  number       = {6},
  pages        = {1441--1457},
  publisher    = {Wiley},
  title        = {{On the origin and structure of haplotype blocks}},
  doi          = {10.1111/mec.16793},
  volume       = {32},
  year         = {2023},
}

@article{12861,
  abstract     = {The field of indirect reciprocity investigates how social norms can foster cooperation when individuals continuously monitor and assess each other’s social interactions. By adhering to certain social norms, cooperating individuals can improve their reputation and, in turn, receive benefits from others. Eight social norms, known as the “leading eight," have been shown to effectively promote the evolution of cooperation as long as information is public and reliable. These norms categorize group members as either ’good’ or ’bad’. In this study, we examine a scenario where individuals instead assign nuanced reputation scores to each other, and only cooperate with those whose reputation exceeds a certain threshold. We find both analytically and through simulations that such quantitative assessments are error-correcting, thus facilitating cooperation in situations where information is private and unreliable. Moreover, our results identify four specific norms that are robust to such conditions, and may be relevant for helping to sustain cooperation in natural populations.},
  author       = {Schmid, Laura and Ekbatani, Farbod and Hilbe, Christian and Chatterjee, Krishnendu},
  issn         = {2041-1723},
  journal      = {Nature Communications},
  publisher    = {Springer Nature},
  title        = {{Quantitative assessment can stabilize indirect reciprocity under imperfect information}},
  doi          = {10.1038/s41467-023-37817-x},
  volume       = {14},
  year         = {2023},
}

@inproceedings{10774,
  abstract     = {We study the problem of specifying sequential information-flow properties of systems. Information-flow properties are hyperproperties, as they compare different traces of a system. Sequential information-flow properties can express changes, over time, in the information-flow constraints. For example, information-flow constraints during an initialization phase of a system may be different from information-flow constraints that are required during the operation phase. We formalize several variants of interpreting sequential information-flow constraints, which arise from different assumptions about what can be observed of the system. For this purpose, we introduce a first-order logic, called Hypertrace Logic, with both trace and time quantifiers for specifying linear-time hyperproperties. We prove that HyperLTL, which corresponds to a fragment of Hypertrace Logic with restricted quantifier prefixes, cannot specify the majority of the studied variants of sequential information flow, including all variants in which the transition between sequential phases (such as initialization and operation) happens asynchronously. Our results rely on new equivalences between sets of traces that cannot be distinguished by certain classes of formulas from Hypertrace Logic. This presents a new approach to proving inexpressiveness results for HyperLTL.},
  author       = {Bartocci, Ezio and Ferrere, Thomas and Henzinger, Thomas A and Nickovic, Dejan and Da Costa, Ana Oliveira},
  booktitle    = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
  isbn         = {9783030945824},
  issn         = {16113349},
  location     = {Philadelphia, PA, United States},
  pages        = {1--19},
  publisher    = {Springer Nature},
  title        = {{Flavors of sequential information flow}},
  doi          = {10.1007/978-3-030-94583-1_1},
  volume       = {13182},
  year         = {2022},
}

@inproceedings{10891,
  abstract     = {We present a formal framework for the online black-box monitoring of software using monitors with quantitative verdict functions. Quantitative verdict functions have several advantages. First, quantitative monitors can be approximate, i.e., the value of the verdict function does not need to correspond exactly to the value of the property under observation. Second, quantitative monitors can be quantified universally, i.e., for every possible observed behavior, the monitor tries to make the best effort to estimate the value of the property under observation. Third, quantitative monitors can watch boolean as well as quantitative properties, such as average response time. Fourth, quantitative monitors can use non-finite-state resources, such as counters. As a consequence, quantitative monitors can be compared according to how many resources they use (e.g., the number of counters) and how precisely they approximate the property under observation. This allows for a rich spectrum of cost-precision trade-offs in monitoring software.},
  author       = {Henzinger, Thomas A},
  booktitle    = {Software Verification},
  isbn         = {9783030955601},
  issn         = {1611-3349},
  location     = {New Haven, CT, United States},
  pages        = {3--6},
  publisher    = {Springer Nature},
  title        = {{Quantitative monitoring of software}},
  doi          = {10.1007/978-3-030-95561-8_1},
  volume       = {13124},
  year         = {2022},
}

@phdthesis{11362,
  abstract     = {Deep learning has enabled breakthroughs in challenging computing problems and has emerged as the standard problem-solving tool for computer vision and natural language processing tasks.
One exception to this trend is safety-critical tasks where robustness and resilience requirements contradict the black-box nature of neural networks. 
To deploy deep learning methods for these tasks, it is vital to provide guarantees on neural network agents' safety and robustness criteria. 
This can be achieved by developing formal verification methods to verify the safety and robustness properties of neural networks.

Our goal is to design, develop and assess safety verification methods for neural networks to improve their reliability and trustworthiness in real-world applications.
This thesis establishes techniques for the verification of compressed and adversarially trained models as well as the design of novel neural networks for verifiably safe decision-making.

First, we establish the problem of verifying quantized neural networks. Quantization is a technique that trades numerical precision for the computational efficiency of running a neural network and is widely adopted in industry.
We show that neglecting the reduced precision when verifying a neural network can lead to wrong conclusions about the robustness and safety of the network, highlighting that novel techniques for quantized network verification are necessary. We introduce several bit-exact verification methods explicitly designed for quantized neural networks and experimentally confirm on realistic networks that the network's robustness and other formal properties are affected by the quantization.

Furthermore, we perform a case study providing evidence that adversarial training, a standard technique for making neural networks more robust, has detrimental effects on the network's performance. This robustness-accuracy tradeoff has been studied before regarding the accuracy obtained on classification datasets where each data point is independent of all other data points. On the other hand, we investigate the tradeoff empirically in robot learning settings where a both, a high accuracy and a high robustness, are desirable.
Our results suggest that the negative side-effects of adversarial training outweigh its robustness benefits in practice.

Finally, we consider the problem of verifying safety when running a Bayesian neural network policy in a feedback loop with systems over the infinite time horizon. Bayesian neural networks are probabilistic models for learning uncertainties in the data and are therefore often used on robotic and healthcare applications where data is inherently stochastic.
We introduce a method for recalibrating Bayesian neural networks so that they yield probability distributions over safe decisions only.
Our method learns a safety certificate that guarantees safety over the infinite time horizon to determine which decisions are safe in every possible state of the system.
We demonstrate the effectiveness of our approach on a series of reinforcement learning benchmarks.},
  author       = {Lechner, Mathias},
  isbn         = {978-3-99078-017-6},
  keywords     = {neural networks, verification, machine learning},
  pages        = {124},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Learning verifiable representations}},
  doi          = {10.15479/at:ista:11362},
  year         = {2022},
}

@inproceedings{12010,
  abstract     = {World models learn behaviors in a latent imagination space to enhance the sample-efficiency of deep reinforcement learning (RL) algorithms. While learning world models for high-dimensional observations (e.g., pixel inputs) has become practicable on standard RL benchmarks and some games, their effectiveness in real-world robotics applications has not been explored. In this paper, we investigate how such agents generalize to real-world autonomous vehicle control tasks, where advanced model-free deep RL algorithms fail. In particular, we set up a series of time-lap tasks for an F1TENTH racing robot, equipped with a high-dimensional LiDAR sensor, on a set of test tracks with a gradual increase in their complexity. In this continuous-control setting, we show that model-based agents capable of learning in imagination substantially outperform model-free agents with respect to performance, sample efficiency, successful task completion, and generalization. Moreover, we show that the generalization ability of model-based agents strongly depends on the choice of their observation model. We provide extensive empirical evidence for the effectiveness of world models provided with long enough memory horizons in sim2real tasks.},
  author       = {Brunnbauer, Axel and Berducci, Luigi and Brandstatter, Andreas and Lechner, Mathias and Hasani, Ramin and Rus, Daniela and Grosu, Radu},
  booktitle    = {2022 International Conference on Robotics and Automation},
  isbn         = {9781728196817},
  issn         = {1050-4729},
  location     = {Philadelphia, PA, United States},
  pages        = {7513--7520},
  publisher    = {IEEE},
  title        = {{Latent imagination facilitates zero-shot transfer in autonomous racing}},
  doi          = {10.1109/ICRA46639.2022.9811650},
  year         = {2022},
}

@article{12147,
  abstract     = {Continuous-time neural networks are a class of machine learning systems that can tackle representation learning on spatiotemporal decision-making tasks. These models are typically represented by continuous differential equations. However, their expressive power when they are deployed on computers is bottlenecked by numerical differential equation solvers. This limitation has notably slowed down the scaling and understanding of numerous natural physical phenomena such as the dynamics of nervous systems. Ideally, we would circumvent this bottleneck by solving the given dynamical system in closed form. This is known to be intractable in general. Here, we show that it is possible to closely approximate the interaction between neurons and synapses—the building blocks of natural and artificial neural networks—constructed by liquid time-constant networks efficiently in closed form. To this end, we compute a tightly bounded approximation of the solution of an integral appearing in liquid time-constant dynamics that has had no known closed-form solution so far. This closed-form solution impacts the design of continuous-time and continuous-depth neural models. For instance, since time appears explicitly in closed form, the formulation relaxes the need for complex numerical solvers. Consequently, we obtain models that are between one and five orders of magnitude faster in training and inference compared with differential equation-based counterparts. More importantly, in contrast to ordinary differential equation-based continuous networks, closed-form networks can scale remarkably well compared with other deep learning instances. Lastly, as these models are derived from liquid networks, they show good performance in time-series modelling compared with advanced recurrent neural network models.},
  author       = {Hasani, Ramin and Lechner, Mathias and Amini, Alexander and Liebenwein, Lucas and Ray, Aaron and Tschaikowski, Max and Teschl, Gerald and Rus, Daniela},
  issn         = {2522-5839},
  journal      = {Nature Machine Intelligence},
  keywords     = {Artificial Intelligence, Computer Networks and Communications, Computer Vision and Pattern Recognition, Human-Computer Interaction, Software},
  number       = {11},
  pages        = {992--1003},
  publisher    = {Springer Nature},
  title        = {{Closed-form continuous-time neural networks}},
  doi          = {10.1038/s42256-022-00556-7},
  volume       = {4},
  year         = {2022},
}

@article{12510,
  abstract     = {We introduce a new statistical verification algorithm that formally quantifies the behavioral robustness of any time-continuous process formulated as a continuous-depth model. Our algorithm solves a set of global optimization (Go) problems over a given time horizon to construct a tight enclosure (Tube) of the set of all process executions starting from a ball of initial states. We call our algorithm GoTube. Through its construction, GoTube ensures that the bounding tube is conservative up to a desired probability and up to a desired tightness.
 GoTube is implemented in JAX and optimized to scale to complex continuous-depth neural network models. Compared to advanced reachability analysis tools for time-continuous neural networks, GoTube does not accumulate overapproximation errors between time steps and avoids the infamous wrapping effect inherent in symbolic techniques. We show that GoTube substantially outperforms state-of-the-art verification tools in terms of the size of the initial ball, speed, time-horizon, task completion, and scalability on a large set of experiments.
 GoTube is stable and sets the state-of-the-art in terms of its ability to scale to time horizons well beyond what has been previously possible.},
  author       = {Gruenbacher, Sophie A. and Lechner, Mathias and Hasani, Ramin and Rus, Daniela and Henzinger, Thomas A and Smolka, Scott A. and Grosu, Radu},
  isbn         = {978577358350},
  issn         = {2374-3468},
  journal      = {Proceedings of the AAAI Conference on Artificial Intelligence},
  keywords     = {General Medicine},
  number       = {6},
  pages        = {6755--6764},
  publisher    = {Association for the Advancement of Artificial Intelligence},
  title        = {{GoTube: Scalable statistical verification of continuous-depth models}},
  doi          = {10.1609/aaai.v36i6.20631},
  volume       = {36},
  year         = {2022},
}

@inproceedings{10665,
  abstract     = {Formal verification of neural networks is an active topic of research, and recent advances have significantly increased the size of the networks that verification tools can handle. However, most methods are designed for verification of an idealized model of the actual network which works over real arithmetic and ignores rounding imprecisions. This idealization is in stark contrast to network quantization, which is a technique that trades numerical precision for computational efficiency and is, therefore, often applied in practice. Neglecting rounding errors of such low-bit quantized neural networks has been shown to lead to wrong conclusions about the network’s correctness. Thus, the desired approach for verifying quantized neural networks would be one that takes these rounding errors
into account. In this paper, we show that verifying the bitexact implementation of quantized neural networks with bitvector specifications is PSPACE-hard, even though verifying idealized real-valued networks and satisfiability of bit-vector specifications alone are each in NP. Furthermore, we explore several practical heuristics toward closing the complexity gap between idealized and bit-exact verification. In particular, we propose three techniques for making SMT-based verification of quantized neural networks more scalable. Our experiments demonstrate that our proposed methods allow a speedup of up to three orders of magnitude over existing approaches.},
  author       = {Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde},
  booktitle    = {Proceedings of the AAAI Conference on Artificial Intelligence},
  isbn         = {978-1-57735-866-4},
  issn         = {2374-3468},
  location     = {Virtual},
  number       = {5A},
  pages        = {3787--3795},
  publisher    = {AAAI Press},
  title        = {{Scalable verification of quantized neural networks}},
  volume       = {35},
  year         = {2021},
}

@inproceedings{10666,
  abstract     = {Adversarial training is an effective method to train deep learning models that are resilient to norm-bounded perturbations, with the cost of nominal performance drop. While adversarial training appears to enhance the robustness and safety of a deep model deployed in open-world decision-critical applications, counterintuitively, it induces undesired behaviors in robot learning settings. In this paper, we show theoretically and experimentally that neural controllers obtained via adversarial training are subjected to three types of defects, namely transient, systematic, and conditional errors. We first generalize adversarial training to a safety-domain optimization scheme allowing for more generic specifications. We then prove that such a learning process tends to cause certain error profiles. We support our theoretical results by a thorough experimental safety analysis in a robot-learning task. Our results suggest that adversarial training is not yet ready for robot learning.},
  author       = {Lechner, Mathias and Hasani, Ramin and Grosu, Radu and Rus, Daniela and Henzinger, Thomas A},
  booktitle    = {2021 IEEE International Conference on Robotics and Automation},
  isbn         = {978-1-7281-9078-5},
  issn         = {2577-087X},
  location     = {Xi'an, China},
  pages        = {4140--4147},
  title        = {{Adversarial training is not ready for robot learning}},
  doi          = {10.1109/ICRA48506.2021.9561036},
  year         = {2021},
}

@inproceedings{10667,
  abstract     = {Bayesian neural networks (BNNs) place distributions over the weights of a neural network to model uncertainty in the data and the network's prediction. We consider the problem of verifying safety when running a Bayesian neural network policy in a feedback loop with infinite time horizon systems. Compared to the existing sampling-based approaches, which are inapplicable to the infinite time horizon setting, we train a separate deterministic neural network that serves as an infinite time horizon safety certificate. In particular, we show that the certificate network guarantees the safety of the system over a subset of the BNN weight posterior's support. Our method first computes a safe weight set and then alters the BNN's weight posterior to reject samples outside this set. Moreover, we show how to extend our approach to a safe-exploration reinforcement learning setting, in order to avoid unsafe trajectories during the training of the policy. We evaluate our approach on a series of reinforcement learning benchmarks, including non-Lyapunovian safety specifications.},
  author       = {Lechner, Mathias and Žikelić, Ðorđe and Chatterjee, Krishnendu and Henzinger, Thomas A},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  location     = {Virtual},
  title        = {{Infinite time horizon safety of Bayesian neural networks}},
  doi          = {10.48550/arXiv.2111.03165},
  year         = {2021},
}

@inproceedings{10668,
  abstract     = {Robustness to variations in lighting conditions is a key objective for any deep vision system. To this end, our paper extends the receptive field of convolutional neural networks with two residual components, ubiquitous in the visual processing system of vertebrates: On-center and off-center pathways, with an excitatory center and inhibitory surround; OOCS for short. The On-center pathway is excited by the presence of a light stimulus in its center, but not in its surround, whereas the Off-center pathway is excited by the absence of a light stimulus in its center, but not in its surround. We design OOCS pathways via a difference of Gaussians, with their variance computed analytically from the size of the receptive fields. OOCS pathways complement each other in their response to light stimuli, ensuring this way a strong edge-detection capability, and as a result an accurate and robust inference under challenging lighting conditions. We provide extensive empirical evidence showing that networks supplied with OOCS pathways gain accuracy and illumination-robustness from the novel edge representation, compared to other baselines.},
  author       = {Babaiee, Zahra and Hasani, Ramin and Lechner, Mathias and Rus, Daniela and Grosu, Radu},
  booktitle    = {Proceedings of the 38th International Conference on Machine Learning},
  issn         = {2640-3498},
  location     = {Virtual},
  pages        = {478--489},
  publisher    = {ML Research Press},
  title        = {{On-off center-surround receptive fields for accurate and robust image classification}},
  volume       = {139},
  year         = {2021},
}

@inproceedings{10669,
  abstract     = {We show that Neural ODEs, an emerging class of timecontinuous neural networks, can be verified by solving a set of global-optimization problems. For this purpose, we introduce Stochastic Lagrangian Reachability (SLR), an
abstraction-based technique for constructing a tight Reachtube (an over-approximation of the set of reachable states
over a given time-horizon), and provide stochastic guarantees in the form of confidence intervals for the Reachtube bounds. SLR inherently avoids the infamous wrapping effect (accumulation of over-approximation errors) by performing local optimization steps to expand safe regions instead of repeatedly forward-propagating them as is done by deterministic reachability methods. To enable fast local optimizations, we introduce a novel forward-mode adjoint sensitivity method to compute gradients without the need for backpropagation. Finally, we establish asymptotic and non-asymptotic convergence rates for SLR.},
  author       = {Grunbacher, Sophie and Hasani, Ramin and Lechner, Mathias and Cyranka, Jacek and Smolka, Scott A and Grosu, Radu},
  booktitle    = {Proceedings of the AAAI Conference on Artificial Intelligence},
  isbn         = {978-1-57735-866-4},
  issn         = {2374-3468},
  location     = {Virtual},
  number       = {13},
  pages        = {11525--11535},
  publisher    = {AAAI Press},
  title        = {{On the verification of neural ODEs with stochastic guarantees}},
  volume       = {35},
  year         = {2021},
}

@inproceedings{10670,
  abstract     = {Imitation learning enables high-fidelity, vision-based learning of policies within rich, photorealistic environments. However, such techniques often rely on traditional discrete-time neural models and face difficulties in generalizing to domain shifts by failing to account for the causal relationships between the agent and the environment. In this paper, we propose a theoretical and experimental framework for learning causal representations using continuous-time neural networks, specifically over their discrete-time counterparts. We evaluate our method in the context of visual-control learning of drones over a series of complex tasks, ranging from short- and long-term navigation, to chasing static and dynamic objects through photorealistic environments. Our results demonstrate that causal continuous-time
deep models can perform robust navigation tasks, where advanced recurrent models fail. These models learn complex causal control representations directly from raw visual inputs and scale to solve a variety of tasks using imitation learning.},
  author       = {Vorbach, Charles J and Hasani, Ramin and Amini, Alexander and Lechner, Mathias and Rus, Daniela},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  location     = {Virtual},
  title        = {{Causal navigation by continuous-time neural networks}},
  year         = {2021},
}

@inproceedings{10671,
  abstract     = {We introduce a new class of time-continuous recurrent neural network models. Instead of declaring a learning system’s dynamics by implicit nonlinearities, we construct networks of linear first-order dynamical systems modulated via nonlinear interlinked gates. The resulting models represent dynamical systems with varying (i.e., liquid) time-constants coupled to their hidden state, with outputs being computed by numerical differential equation solvers. These neural networks exhibit stable and bounded behavior, yield superior expressivity within the family of neural ordinary differential equations, and give rise to improved performance on time-series prediction tasks. To demonstrate these properties, we first take a theoretical approach to find bounds over their dynamics, and compute their expressive power by the trajectory length measure in a latent trajectory space. We then conduct a series of time-series prediction experiments to manifest the approximation capability of Liquid Time-Constant Networks (LTCs) compared to classical and modern RNNs.},
  author       = {Hasani, Ramin and Lechner, Mathias and Amini, Alexander and Rus, Daniela and Grosu, Radu},
  booktitle    = {Proceedings of the AAAI Conference on Artificial Intelligence},
  isbn         = {978-1-57735-866-4},
  issn         = {2374-3468},
  location     = {Virtual},
  number       = {9},
  pages        = {7657--7666},
  publisher    = {AAAI Press},
  title        = {{Liquid time-constant networks}},
  volume       = {35},
  year         = {2021},
}

@article{10674,
  abstract     = {In two-player games on graphs, the players move a token through a graph to produce an infinite path, which determines the winner of the game. Such games are central in formal methods since they model the interaction between a non-terminating system and its environment. In bidding games the players bid for the right to move the token: in each round, the players simultaneously submit bids, and the higher bidder moves the token and pays the other player. Bidding games are known to have a clean and elegant mathematical structure that relies on the ability of the players to submit arbitrarily small bids. Many applications, however, require a fixed granularity for the bids, which can represent, for example, the monetary value expressed in cents. We study, for the first time, the combination of discrete-bidding and infinite-duration games. Our most important result proves that these games form a large determined subclass of concurrent games, where determinacy is the strong property that there always exists exactly one player who can guarantee winning the game. In particular, we show that, in contrast to non-discrete bidding games, the mechanism with which tied bids are resolved plays an important role in discrete-bidding games. We study several natural tie-breaking mechanisms and show that, while some do not admit determinacy, most natural mechanisms imply determinacy for every pair of initial budgets.},
  author       = {Aghajohari, Milad and Avni, Guy and Henzinger, Thomas A},
  issn         = {1860-5974},
  journal      = {Logical Methods in Computer Science},
  keywords     = {computer science, computer science and game theory, logic in computer science},
  number       = {1},
  pages        = {10:1--10:23},
  publisher    = {International Federation for Computational Logic},
  title        = {{Determinacy in discrete-bidding infinite-duration games}},
  doi          = {10.23638/LMCS-17(1:10)2021},
  volume       = {17},
  year         = {2021},
}

@inproceedings{10688,
  abstract     = {Civl is a static verifier for concurrent programs designed around the conceptual framework of layered refinement,
which views the task of verifying a program as a sequence of program simplification steps each justified by its own invariant. Civl verifies a layered concurrent program that compactly expresses all the programs in this sequence and the supporting invariants. This paper presents the design and implementation of the Civl verifier.},
  author       = {Kragl, Bernhard and Qadeer, Shaz},
  booktitle    = {Proceedings of the 21st Conference on Formal Methods in Computer-Aided Design},
  editor       = {Ruzica, Piskac and Whalen, Michael W.},
  isbn         = {978-3-85448-046-4},
  location     = {Virtual},
  pages        = {143–152},
  publisher    = {TU Wien Academic Press},
  title        = {{The Civl verifier}},
  doi          = {10.34727/2021/isbn.978-3-85448-046-4_23},
  volume       = {2},
  year         = {2021},
}

@inproceedings{10694,
  abstract     = {In a two-player zero-sum graph game the players move a token throughout a graph to produce an infinite path, which determines the winner or payoff of the game. Traditionally, the players alternate turns in moving the token. In bidding games, however, the players have budgets, and in each turn, we hold an “auction” (bidding) to determine which player moves the token: both players simultaneously submit bids and the higher bidder moves the token. The bidding mechanisms differ in their payment schemes. Bidding games were largely studied with variants of first-price bidding in which only the higher bidder pays his bid. We focus on all-pay bidding, where both players pay their bids. Finite-duration all-pay bidding games were studied and shown to be technically more challenging than their first-price counterparts. We study for the first time, infinite-duration all-pay bidding games. Our most interesting results are for mean-payoff objectives: we portray a complete picture for games played on strongly-connected graphs. We study both pure (deterministic) and mixed (probabilistic) strategies and completely characterize the optimal and almost-sure (with probability 1) payoffs the players can respectively guarantee. We show that mean-payoff games under all-pay bidding exhibit the intriguing mathematical properties of their first-price counterparts; namely, an equivalence with random-turn games in which in each turn, the player who moves is selected according to a (biased) coin toss. The equivalences for all-pay bidding are more intricate and unexpected than for first-price bidding.},
  author       = {Avni, Guy and Jecker, Ismael R and Zikelic, Dorde},
  booktitle    = {Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms},
  editor       = {Marx, Dániel},
  isbn         = {978-1-61197-646-5},
  location     = {Virtual},
  pages        = {617--636},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Infinite-duration all-pay bidding games}},
  doi          = {10.1137/1.9781611976465.38},
  year         = {2021},
}

@article{8912,
  abstract     = {For automata, synchronization, the problem of bringing an automaton to a particular state regardless of its initial state, is important. It has several applications in practice and is related to a fifty-year-old conjecture on the length of the shortest synchronizing word. Although using shorter words increases the effectiveness in practice, finding a shortest one (which is not necessarily unique) is NP-hard. For this reason, there exist various heuristics in the literature. However, high-quality heuristics such as SynchroP producing relatively shorter sequences are very expensive and can take hours when the automaton has tens of thousands of states. The SynchroP heuristic has been frequently used as a benchmark to evaluate the performance of the new heuristics. In this work, we first improve the runtime of SynchroP and its variants by using algorithmic techniques. We then focus on adapting SynchroP for many-core architectures,
and overall, we obtain more than 1000× speedup on GPUs compared to naive sequential implementation that has been frequently used as a benchmark to evaluate new heuristics in the literature. We also propose two SynchroP variants and evaluate their performance.},
  author       = {Sarac, Naci E and Altun, Ömer Faruk and Atam, Kamil Tolga and Karahoda, Sertac and Kaya, Kamer and Yenigün, Hüsnü},
  issn         = {09574174},
  journal      = {Expert Systems with Applications},
  number       = {4},
  publisher    = {Elsevier},
  title        = {{Boosting expensive synchronizing heuristics}},
  doi          = {10.1016/j.eswa.2020.114203},
  volume       = {167},
  year         = {2021},
}

@inproceedings{9200,
  abstract     = {Formal design of embedded and cyber-physical systems relies on mathematical modeling. In this paper, we consider the model class of hybrid automata whose dynamics are defined by affine differential equations. Given a set of time-series data, we present an algorithmic approach to synthesize a hybrid automaton exhibiting behavior that is close to the data, up to a specified precision, and changes in synchrony with the data. A fundamental problem in our synthesis algorithm is to check membership of a time series in a hybrid automaton. Our solution integrates reachability and optimization techniques for affine dynamical systems to obtain both a sufficient and a necessary condition for membership, combined in a refinement framework. The algorithm processes one time series at a time and hence can be interrupted, provide an intermediate result, and be resumed. We report experimental results demonstrating the applicability of our synthesis approach.},
  author       = {Garcia Soto, Miriam and Henzinger, Thomas A and Schilling, Christian},
  booktitle    = {HSCC '21: Proceedings of the 24th International Conference on Hybrid Systems: Computation and Control},
  isbn         = {9781450383394},
  keywords     = {hybrid automaton, membership, system identification},
  location     = {Nashville, TN, United States},
  pages        = {2102.12734},
  publisher    = {Association for Computing Machinery},
  title        = {{Synthesis of hybrid automata with affine dynamics from time-series data}},
  doi          = {10.1145/3447928.3456704},
  year         = {2021},
}

