@inproceedings{1875,
  abstract     = {We present a formal framework for repairing infinite-state, imperative, sequential programs, with (possibly recursive) procedures and multiple assertions; the framework can generate repaired programs by modifying the original erroneous program in multiple program locations, and can ensure the readability of the repaired program using user-defined expression templates; the framework also generates a set of inductive assertions that serve as a proof of correctness of the repaired program. As a step toward integrating programmer intent and intuition in automated program repair, we present a cost-aware formulation - given a cost function associated with permissible statement modifications, the goal is to ensure that the total program modification cost does not exceed a given repair budget. As part of our predicate abstractionbased solution framework, we present a sound and complete algorithm for repair of Boolean programs. We have developed a prototype tool based on SMT solving and used it successfully to repair diverse errors in benchmark C programs.},
  author       = {Samanta, Roopsha and Olivo, Oswaldo and Allen, Emerson},
  editor       = {Müller-Olm, Markus and Seidl, Helmut},
  location     = {Munich, Germany},
  pages        = {268 -- 284},
  publisher    = {Springer},
  title        = {{Cost-aware automatic program repair}},
  doi          = {10.1007/978-3-319-10936-7_17},
  volume       = {8723},
  year         = {2014},
}

@inproceedings{2026,
  abstract     = {We present a tool for translating LTL formulae into deterministic ω-automata. It is the first tool that covers the whole LTL that does not use Safra’s determinization or any of its variants. This leads to smaller automata. There are several outputs of the tool: firstly, deterministic Rabin automata, which are the standard input for probabilistic model checking, e.g. for the probabilistic model-checker PRISM; secondly, deterministic generalized Rabin automata, which can also be used for probabilistic model checking and are sometimes by orders of magnitude smaller. We also link our tool to PRISM and show that this leads to a significant speed-up of probabilistic LTL model checking, especially with the generalized Rabin automata.},
  author       = {Komárková, Zuzana and Kretinsky, Jan},
  booktitle    = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
  editor       = {Cassez, Franck and Raskin, Jean-François},
  location     = {Sydney, Australia},
  pages        = {235 -- 241},
  publisher    = {Springer},
  title        = {{Rabinizer 3: Safraless translation of ltl to small deterministic automata}},
  doi          = {10.1007/978-3-319-11936-6_17},
  volume       = {8837},
  year         = {2014},
}

@inproceedings{2027,
  abstract     = {We present a general framework for applying machine-learning algorithms to the verification of Markov decision processes (MDPs). The primary goal of these techniques is to improve performance by avoiding an exhaustive exploration of the state space. Our framework focuses on probabilistic reachability, which is a core property for verification, and is illustrated through two distinct instantiations. The first assumes that full knowledge of the MDP is available, and performs a heuristic-driven partial exploration of the model, yielding precise lower and upper bounds on the required probability. The second tackles the case where we may only sample the MDP, and yields probabilistic guarantees, again in terms of both the lower and upper bounds, which provides efficient stopping criteria for the approximation. The latter is the first extension of statistical model checking for unbounded properties inMDPs. In contrast with other related techniques, our approach is not restricted to time-bounded (finite-horizon) or discounted properties, nor does it assume any particular properties of the MDP. We also show how our methods extend to LTL objectives. We present experimental results showing the performance of our framework on several examples.},
  author       = {Brázdil, Tomáš and Chatterjee, Krishnendu and Chmelik, Martin and Forejt, Vojtěch and Kretinsky, Jan and Kwiatkowska, Marta and Parker, David and Ujma, Mateusz},
  booktitle    = { Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
  editor       = {Cassez, Franck and Raskin, Jean-François},
  location     = {Sydney, Australia},
  pages        = {98 -- 114},
  publisher    = {Society of Industrial and Applied Mathematics},
  title        = {{Verification of markov decision processes using learning algorithms}},
  doi          = {10.1007/978-3-319-11936-6_8},
  volume       = {8837},
  year         = {2014},
}

@article{2038,
  abstract     = {Recently, there has been an effort to add quantitative objectives to formal verification and synthesis. We introduce and investigate the extension of temporal logics with quantitative atomic assertions. At the heart of quantitative objectives lies the accumulation of values along a computation. It is often the accumulated sum, as with energy objectives, or the accumulated average, as with mean-payoff objectives. We investigate the extension of temporal logics with the prefix-accumulation assertions Sum(v) ≥ c and Avg(v) ≥ c, where v is a numeric (or Boolean) variable of the system, c is a constant rational number, and Sum(v) and Avg(v) denote the accumulated sum and average of the values of v from the beginning of the computation up to the current point in time. We also allow the path-accumulation assertions LimInfAvg(v) ≥ c and LimSupAvg(v) ≥ c, referring to the average value along an entire infinite computation. We study the border of decidability for such quantitative extensions of various temporal logics. In particular, we show that extending the fragment of CTL that has only the EX, EF, AX, and AG temporal modalities with both prefix-accumulation assertions, or extending LTL with both path-accumulation assertions, results in temporal logics whose model-checking problem is decidable. Moreover, the prefix-accumulation assertions may be generalized with &quot;controlled accumulation,&quot; allowing, for example, to specify constraints on the average waiting time between a request and a grant. On the negative side, we show that this branching-time logic is, in a sense, the maximal logic with one or both of the prefix-accumulation assertions that permits a decidable model-checking procedure. Extending a temporal logic that has the EG or EU modalities, such as CTL or LTL, makes the problem undecidable.},
  author       = {Boker, Udi and Chatterjee, Krishnendu and Henzinger, Thomas A and Kupferman, Orna},
  journal      = {ACM Transactions on Computational Logic (TOCL)},
  number       = {4},
  publisher    = {ACM},
  title        = {{Temporal specifications with accumulative values}},
  doi          = {10.1145/2629686},
  volume       = {15},
  year         = {2014},
}

@inproceedings{2053,
  abstract     = {In contrast to the usual understanding of probabilistic systems as stochastic processes, recently these systems have also been regarded as transformers of probabilities. In this paper, we give a natural definition of strong bisimulation for probabilistic systems corresponding to this view that treats probability distributions as first-class citizens. Our definition applies in the same way to discrete systems as well as to systems with uncountable state and action spaces. Several examples demonstrate that our definition refines the understanding of behavioural equivalences of probabilistic systems. In particular, it solves a longstanding open problem concerning the representation of memoryless continuous time by memoryfull continuous time. Finally, we give algorithms for computing this bisimulation not only for finite but also for classes of uncountably infinite systems.},
  author       = {Hermanns, Holger and Krčál, Jan and Kretinsky, Jan},
  booktitle    = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
  editor       = {Baldan, Paolo and Gorla, Daniele},
  location     = {Rome, Italy},
  pages        = {249 -- 265},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Probabilistic bisimulation: Naturally on distributions}},
  doi          = {10.1007/978-3-662-44584-6_18},
  volume       = {8704},
  year         = {2014},
}

@article{2056,
  abstract     = {We consider a continuous-time Markov chain (CTMC) whose state space is partitioned into aggregates, and each aggregate is assigned a probability measure. A sufficient condition for defining a CTMC over the aggregates is presented as a variant of weak lumpability, which also characterizes that the measure over the original process can be recovered from that of the aggregated one. We show how the applicability of de-aggregation depends on the initial distribution. The application section is devoted to illustrate how the developed theory aids in reducing CTMC models of biochemical systems particularly in connection to protein-protein interactions. We assume that the model is written by a biologist in form of site-graph-rewrite rules. Site-graph-rewrite rules compactly express that, often, only a local context of a protein (instead of a full molecular species) needs to be in a certain configuration in order to trigger a reaction event. This observation leads to suitable aggregate Markov chains with smaller state spaces, thereby providing sufficient reduction in computational complexity. This is further exemplified in two case studies: simple unbounded polymerization and early EGFR/insulin crosstalk.},
  author       = {Ganguly, Arnab and Petrov, Tatjana and Koeppl, Heinz},
  journal      = {Journal of Mathematical Biology},
  number       = {3},
  pages        = {767 -- 797},
  publisher    = {Springer},
  title        = {{Markov chain aggregation and its applications to combinatorial reaction networks}},
  doi          = {10.1007/s00285-013-0738-7},
  volume       = {69},
  year         = {2014},
}

@inproceedings{2063,
  abstract     = {We consider Markov decision processes (MDPs) which are a standard model for probabilistic systems.We focus on qualitative properties forMDPs that can express that desired behaviors of the system arise almost-surely (with probability 1) or with positive probability. We introduce a new simulation relation to capture the refinement relation ofMDPs with respect to qualitative properties, and present discrete graph theoretic algorithms with quadratic complexity to compute the simulation relation.We present an automated technique for assume-guarantee style reasoning for compositional analysis ofMDPs with qualitative properties by giving a counterexample guided abstraction-refinement approach to compute our new simulation relation. We have implemented our algorithms and show that the compositional analysis leads to significant improvements.},
  author       = {Chatterjee, Krishnendu and Chmelik, Martin and Daca, Przemyslaw},
  location     = {Vienna, Austria},
  pages        = {473 -- 490},
  publisher    = {Springer},
  title        = {{CEGAR for qualitative analysis of probabilistic systems}},
  doi          = {10.1007/978-3-319-08867-9_31},
  volume       = {8559},
  year         = {2014},
}

@inproceedings{2167,
  abstract     = {Model-based testing is a promising technology for black-box software and hardware testing, in which test cases are generated automatically from high-level specifications. Nowadays, systems typically consist of multiple interacting components and, due to their complexity, testing presents a considerable portion of the effort and cost in the design process. Exploiting the compositional structure of system specifications can considerably reduce the effort in model-based testing. Moreover, inferring properties about the system from testing its individual components allows the designer to reduce the amount of integration testing. In this paper, we study compositional properties of the ioco-testing theory. We propose a new approach to composition and hiding operations, inspired by contract-based design and interface theories. These operations preserve behaviors that are compatible under composition and hiding, and prune away incompatible ones. The resulting specification characterizes the input sequences for which the unit testing of components is sufficient to infer the correctness of component integration without the need for further tests. We provide a methodology that uses these results to minimize integration testing effort, but also to detect potential weaknesses in specifications. While we focus on asynchronous models and the ioco conformance relation, the resulting methodology can be applied to a broader class of systems.},
  author       = {Daca, Przemyslaw and Henzinger, Thomas A and Krenn, Willibald and Nickovic, Dejan},
  booktitle    = {IEEE 7th International Conference on Software Testing, Verification and Validation},
  isbn         = {978-1-4799-2255-0},
  issn         = {2159-4848},
  location     = {Cleveland, USA},
  publisher    = {IEEE},
  title        = {{Compositional specifications for IOCO testing}},
  doi          = {10.1109/ICST.2014.50},
  year         = {2014},
}

@article{2187,
  abstract     = {Systems should not only be correct but also robust in the sense that they behave reasonably in unexpected situations. This article addresses synthesis of robust reactive systems from temporal specifications. Existing methods allow arbitrary behavior if assumptions in the specification are violated. To overcome this, we define two robustness notions, combine them, and show how to enforce them in synthesis. The first notion applies to safety properties: If safety assumptions are violated temporarily, we require that the system recovers to normal operation with as few errors as possible. The second notion requires that, if liveness assumptions are violated, as many guarantees as possible should be fulfilled nevertheless. We present a synthesis procedure achieving this for the important class of GR(1) specifications, and establish complexity bounds. We also present an implementation of a special case of robustness, and show experimental results.},
  author       = {Bloem, Roderick and Chatterjee, Krishnendu and Greimel, Karin and Henzinger, Thomas A and Hofferek, Georg and Jobstmann, Barbara and Könighofer, Bettina and Könighofer, Robert},
  journal      = {Acta Informatica},
  number       = {3-4},
  pages        = {193 -- 220},
  publisher    = {Springer},
  title        = {{Synthesizing robust systems}},
  doi          = {10.1007/s00236-013-0191-5},
  volume       = {51},
  year         = {2014},
}

@inproceedings{2190,
  abstract     = {We present a new algorithm to construct a (generalized) deterministic Rabin automaton for an LTL formula φ. The automaton is the product of a master automaton and an array of slave automata, one for each G-subformula of φ. The slave automaton for G ψ is in charge of recognizing whether FG ψ holds. As opposed to standard determinization procedures, the states of all our automata have a clear logical structure, which allows for various optimizations. Our construction subsumes former algorithms for fragments of LTL. Experimental results show improvement in the sizes of the resulting automata compared to existing methods.},
  author       = {Esparza, Javier and Kretinsky, Jan},
  pages        = {192 -- 208},
  publisher    = {Springer},
  title        = {{From LTL to deterministic automata: A safraless compositional approach}},
  doi          = {10.1007/978-3-319-08867-9_13},
  volume       = {8559},
  year         = {2014},
}

@inproceedings{2217,
  abstract     = {As hybrid systems involve continuous behaviors, they should be evaluated by quantitative methods, rather than qualitative methods. In this paper we adapt a quantitative framework, called model measuring, to the hybrid systems domain. The model-measuring problem asks, given a model M and a specification, what is the maximal distance such that all models within that distance from M satisfy (or violate) the specification. A distance function on models is given as part of the input of the problem. Distances, especially related to continuous behaviors are more natural in the hybrid case than the discrete case. We are interested in distances represented by monotonic hybrid automata, a hybrid counterpart of (discrete) weighted automata, whose recognized timed languages are monotone (w.r.t. inclusion) in the values of parameters.

The contributions of this paper are twofold. First, we give sufficient conditions under which the model-measuring problem can be solved. Second, we discuss the modeling of distances and applications of the model-measuring problem.},
  author       = {Henzinger, Thomas A and Otop, Jan},
  booktitle    = {Proceedings of the 17th international conference on Hybrid systems: computation and control},
  location     = {Berlin, Germany},
  pages        = {213 -- 222},
  publisher    = {Springer},
  title        = {{Model measuring for hybrid systems}},
  doi          = {10.1145/2562059.2562130},
  year         = {2014},
}

@inproceedings{2218,
  abstract     = {While fixing concurrency bugs, program repair algorithms may introduce new concurrency bugs. We present an algorithm that avoids such regressions. The solution space is given by a set of program transformations we consider in the repair process. These include reordering of instructions within a thread and inserting atomic sections. The new algorithm learns a constraint on the space of candidate solutions, from both positive examples (error-free traces) and counterexamples (error traces). From each counterexample, the algorithm learns a constraint necessary to remove the errors. From each positive examples, it learns a constraint that is necessary in order to prevent the repair from turning the trace into an error trace. We implemented the algorithm and evaluated it on simplified Linux device drivers with known bugs.},
  author       = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun and Ryzhyk, Leonid and Tarrach, Thorsten},
  isbn         = {978-331908866-2},
  location     = {Vienna, Austria},
  pages        = {568 -- 584},
  publisher    = {Springer},
  title        = {{Regression-free synthesis for concurrency}},
  doi          = {10.1007/978-3-319-08867-9_38},
  volume       = {8559},
  year         = {2014},
}

@article{2233,
  abstract     = { A discounted-sum automaton (NDA) is a nondeterministic finite automaton with edge weights, valuing a run by the discounted sum of visited edge weights. More precisely, the weight in the i-th position of the run is divided by λi, where the discount factor λ is a fixed rational number greater than 1. The value of a word is the minimal value of the automaton runs on it. Discounted summation is a common and useful measuring scheme, especially for infinite sequences, reflecting the assumption that earlier weights are more important than later weights. Unfortunately, determinization of NDAs, which is often essential in formal verification, is, in general, not possible. We provide positive news, showing that every NDA with an integral discount factor is determinizable. We complete the picture by proving that the integers characterize exactly the discount factors that guarantee determinizability: for every nonintegral rational discount factor λ, there is a nondeterminizable λ-NDA. We also prove that the class of NDAs with integral discount factors enjoys closure under the algebraic operations min, max, addition, and subtraction, which is not the case for general NDAs nor for deterministic NDAs. For general NDAs, we look into approximate determinization, which is always possible as the influence of a word's suffix decays. We show that the naive approach, of unfolding the automaton computations up to a sufficient level, is doubly exponential in the discount factor. We provide an alternative construction for approximate determinization, which is singly exponential in the discount factor, in the precision, and in the number of states. We also prove matching lower bounds, showing that the exponential dependency on each of these three parameters cannot be avoided. All our results hold equally for automata over finite words and for automata over infinite words. },
  author       = {Boker, Udi and Henzinger, Thomas A},
  issn         = {18605974},
  journal      = {Logical Methods in Computer Science},
  number       = {1},
  publisher    = {International Federation of Computational Logic},
  title        = {{Exact and approximate determinization of discounted-sum automata}},
  doi          = {10.2168/LMCS-10(1:10)2014},
  volume       = {10},
  year         = {2014},
}

@inproceedings{2239,
  abstract     = {The analysis of the energy consumption of software is an important goal for quantitative formal methods. Current methods, using weighted transition systems or energy games, model the energy source as an ideal resource whose status is characterized by one number, namely the amount of remaining energy. Real batteries, however, exhibit behaviors that can deviate substantially from an ideal energy resource. Based on a discretization of a standard continuous battery model, we introduce battery transition systems. In this model, a battery is viewed as consisting of two parts-the available-charge tank and the bound-charge tank. Any charge or discharge is applied to the available-charge tank. Over time, the energy from each tank diffuses to the other tank. Battery transition systems are infinite state systems that, being not well-structured, fall into no decidable class that is known to us. Nonetheless, we are able to prove that the !-regular modelchecking problem is decidable for battery transition systems. We also present a case study on the verification of control programs for energy-constrained semi-autonomous robots.},
  author       = {Boker, Udi and Henzinger, Thomas A and Radhakrishna, Arjun},
  isbn         = {978-145032544-8},
  location     = {San Diego, USA},
  number       = {1},
  pages        = {595 -- 606},
  publisher    = {ACM},
  title        = {{Battery transition systems}},
  doi          = {10.1145/2535838.2535875},
  volume       = {49},
  year         = {2014},
}

@misc{5411,
  abstract     = {Model-based testing is a promising technology for black-box software and hardware testing, in which test cases are generated automatically from high-level specifications. Nowadays, systems typically consist of multiple interacting components and, due to their complexity, testing presents a considerable portion of the effort and cost in the design process. Exploiting the compositional structure of system specifications can considerably reduce the effort in model-based testing. Moreover, inferring properties about the system from testing its individual components allows the designer to reduce the amount of integration testing.
In this paper, we study compositional properties of the IOCO-testing theory. We propose a new approach to composition and hiding operations, inspired by contract-based design and interface theories. These operations preserve behaviors that are compatible under composition and hiding, and prune away incompatible ones. The resulting specification characterizes the input sequences for which the unit testing of components is sufficient to infer the correctness of component integration without the need for further tests. We provide a methodology that uses these results to minimize integration testing effort, but also to detect potential weaknesses in specifications. While we focus on asynchronous models and the IOCO conformance relation, the resulting methodology can be applied to a broader class of systems.},
  author       = {Daca, Przemyslaw and Henzinger, Thomas A and Krenn, Willibald and Nickovic, Dejan},
  issn         = {2664-1690},
  pages        = {20},
  publisher    = {IST Austria},
  title        = {{Compositional specifications for IOCO testing}},
  doi          = {10.15479/AT:IST-2014-148-v2-1},
  year         = {2014},
}

@misc{5415,
  abstract     = {Recently there has been a significant effort to add quantitative properties in formal verification and synthesis. While weighted automata over finite and infinite words provide a natural and flexible framework to express quantitative properties, perhaps surprisingly, several basic system properties such as average response time cannot be expressed with weighted automata. In this work, we introduce nested weighted automata as a new formalism for expressing important quantitative properties such as average response time. We establish an almost complete decidability picture for the basic decision problems for nested weighted automata, and illustrate its applicability in several domains.  },
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan},
  issn         = {2664-1690},
  pages        = {27},
  publisher    = {IST Austria},
  title        = {{Nested weighted automata}},
  doi          = {10.15479/AT:IST-2014-170-v1-1},
  year         = {2014},
}

@misc{5416,
  abstract     = {As hybrid systems involve continuous behaviors, they should be evaluated by quantitative methods, rather than qualitative methods. In this paper we adapt a quantitative framework, called model measuring, to the hybrid systems domain. The model-measuring problem asks, given a model M and a specification, what is the maximal distance such that all models within that distance from M satisfy (or violate) the specification. A distance function on models is given as part of the input of the problem. Distances, especially related to continuous behaviors are more natural in the hybrid case than the discrete case. We are interested in distances represented by monotonic hybrid automata, a hybrid counterpart of (discrete) weighted automata, whose recognized timed languages are monotone (w.r.t. inclusion) in the values of parameters.The contributions of this paper are twofold. First, we give sufficient conditions under which the model-measuring problem can be solved. Second, we discuss the modeling of distances and applications of the model-measuring problem.},
  author       = {Henzinger, Thomas A and Otop, Jan},
  issn         = {2664-1690},
  pages        = {22},
  publisher    = {IST Austria},
  title        = {{Model measuring for hybrid systems}},
  doi          = {10.15479/AT:IST-2014-171-v1-1},
  year         = {2014},
}

@misc{5417,
  abstract     = {We define the model-measuring problem: given a model M and specification φ, what is the maximal distance ρ such that all models M'within distance ρ from M satisfy (or violate)φ. The model measuring problem presupposes a distance function on models. We concentrate on automatic distance functions, which are defined by weighted automata.
The model-measuring problem subsumes several generalizations of the classical model-checking problem, in particular, quantitative model-checking problems that measure the degree of satisfaction of a specification, and robustness problems that measure how much a model can be perturbed without violating the specification.
We show that for automatic distance functions, and ω-regular linear-time and branching-time specifications, the model-measuring problem can be solved.
We use automata-theoretic model-checking methods for model measuring, replacing the emptiness question for standard word and tree automata by the optimal-weight question for the weighted versions of these automata. We consider weighted automata that accumulate weights by maximizing, summing, discounting, and limit averaging. 
We give several examples of using the model-measuring problem to compute various notions of robustness and quantitative satisfaction for temporal specifications.},
  author       = {Henzinger, Thomas A and Otop, Jan},
  issn         = {2664-1690},
  pages        = {14},
  publisher    = {IST Austria},
  title        = {{From model checking to model measuring}},
  doi          = {10.15479/AT:IST-2014-172-v1-1},
  year         = {2014},
}

@misc{5428,
  abstract     = {Simulation is an attractive alternative for language inclusion for automata as it is an under-approximation of language inclusion, but usually has much lower complexity. For non-deterministic automata, while language inclusion is PSPACE-complete, simulation can be computed in polynomial time. Simulation has also been extended in two orthogonal directions, namely, (1) fair simulation, for simulation over specified set of infinite runs; and (2) quantitative simulation, for simulation between weighted automata. Again, while fair trace inclusion is PSPACE-complete, fair simulation can be computed in polynomial time. For weighted automata, the (quantitative) language inclusion problem is undecidable for mean-payoff automata and the decidability is open for discounted-sum automata, whereas the (quantitative) simulation reduce to mean-payoff games and discounted-sum games, which admit pseudo-polynomial time algorithms.

In this work, we study (quantitative) simulation for weighted automata with Büchi acceptance conditions, i.e., we generalize fair simulation from non-weighted automata to weighted automata. We show that imposing Büchi acceptance conditions on weighted automata changes many fundamental properties of the simulation games. For example, whereas for mean-payoff and discounted-sum games, the players do not need memory to play optimally; we show in contrast that for simulation games with Büchi acceptance conditions, (i) for mean-payoff objectives, optimal strategies for both players require infinite memory in general, and (ii) for discounted-sum objectives, optimal strategies need not exist for both players. While the simulation games with Büchi acceptance conditions are more complicated (e.g., due to infinite-memory requirements for mean-payoff objectives) as compared to their counterpart without Büchi acceptance conditions, we still present pseudo-polynomial time algorithms to solve simulation games with Büchi acceptance conditions for both weighted mean-payoff and weighted discounted-sum automata.},
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Otop, Jan and Velner, Yaron},
  issn         = {2664-1690},
  pages        = {26},
  publisher    = {IST Austria},
  title        = {{Quantitative fair simulation games}},
  doi          = {10.15479/AT:IST-2014-315-v1-1},
  year         = {2014},
}

@inproceedings{1392,
  abstract     = {Fault-tolerant distributed algorithms play an important role in ensuring the reliability of many software applications. In this paper we consider distributed algorithms whose computations are organized in rounds. To verify the correctness of such algorithms, we reason about (i) properties (such as invariants) of the state, (ii) the transitions controlled by the algorithm, and (iii) the communication graph. We introduce a logic that addresses these points, and contains set comprehensions with cardinality constraints, function symbols to describe the local states of each process, and a limited form of quantifier alternation to express the verification conditions. We show its use in automating the verification of consensus algorithms. In particular, we give a semi-decision procedure for the unsatisfiability problem of the logic and identify a decidable fragment. We successfully applied our framework to verify the correctness of a variety of consensus algorithms tolerant to both benign faults (message loss, process crashes) and value faults (message corruption).},
  author       = {Dragoi, Cezara and Henzinger, Thomas A and Veith, Helmut and Widder, Josef and Zufferey, Damien},
  location     = {San Diego, USA},
  pages        = {161 -- 181},
  publisher    = {Springer},
  title        = {{A logic-based framework for verifying consensus algorithms}},
  doi          = {10.1007/978-3-642-54013-4_10},
  volume       = {8318},
  year         = {2014},
}

