@misc{5439,
  abstract     = {The target discounted-sum problem is the following: Given a rational discount factor 0 < λ < 1 and three rational values a, b, and t, does there exist a finite or an infinite sequence w ε(a, b)∗ or w ε(a, b)w, such that Σ|w| i=0 w(i)λi equals t? The problem turns out to relate to many fields of mathematics and computer science, and its decidability question is surprisingly hard to solve. We solve the finite version of the problem, and show the hardness of the infinite version, linking it to various areas and open problems in mathematics and computer science: β-expansions, discounted-sum automata, piecewise affine maps, and generalizations of the Cantor set. We provide some partial results to the infinite version, among which are solutions to its restriction to eventually-periodic sequences and to the cases that λ λ 1/2 or λ = 1/n, for every n ε N. We use our results for solving some open problems on discounted-sum automata, among which are the exact-value problem for nondeterministic automata over finite words and the universality and inclusion problems for functional automata. },
  author       = {Boker, Udi and Henzinger, Thomas A and Otop, Jan},
  issn         = {2664-1690},
  pages        = {20},
  publisher    = {IST Austria},
  title        = {{The target discounted-sum problem}},
  doi          = {10.15479/AT:IST-2015-335-v1-1},
  year         = {2015},
}

@misc{5549,
  abstract     = {This repository contains the experimental part of the CAV 2015 publication Counterexample Explanation by Learning Small Strategies in Markov Decision Processes.
We extended the probabilistic model checker PRISM to represent strategies of Markov Decision Processes as Decision Trees.
The archive contains a java executable version of the extended tool (prism_dectree.jar) together with a few examples of the PRISM benchmark library.
To execute the program, please have a look at the README.txt, which provides instructions and further information on the archive.
The archive contains scripts that (if run often enough) reproduces the data presented in the publication.},
  author       = {Fellner, Andreas},
  keywords     = {Markov Decision Process, Decision Tree, Probabilistic Verification, Counterexample Explanation},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Experimental part of CAV 2015 publication: Counterexample Explanation by Learning Small Strategies in Markov Decision Processes}},
  doi          = {10.15479/AT:ISTA:28},
  year         = {2015},
}

@inproceedings{1498,
  abstract     = {Fault-tolerant distributed algorithms play an important role in many critical/high-availability applications. These algorithms are notoriously difficult to implement correctly, due to asynchronous communication and the occurrence of faults, such as the network dropping messages or computers crashing. Nonetheless there is surprisingly little language and verification support to build distributed systems based on fault-tolerant algorithms. In this paper, we present some of the challenges that a designer has to overcome to implement a fault-tolerant distributed system. Then we review different models that have been proposed to reason about distributed algorithms and sketch how such a model can form the basis for a domain-specific programming language. Adopting a high-level programming model can simplify the programmer's life and make the code amenable to automated verification, while still compiling to efficiently executable code. We conclude by summarizing the current status of an ongoing language design and implementation project that is based on this idea.},
  author       = {Dragoi, Cezara and Henzinger, Thomas A and Zufferey, Damien},
  isbn         = {978-3-939897-80-4 },
  location     = {Asilomar, CA, United States},
  pages        = {90 -- 102},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{The need for language support for fault-tolerant distributed systems}},
  doi          = {10.4230/LIPIcs.SNAPL.2015.90},
  volume       = {32},
  year         = {2015},
}

@inproceedings{1499,
  abstract     = {We consider weighted automata with both positive and negative integer weights on edges and
study the problem of synchronization using adaptive strategies that may only observe whether
the current weight-level is negative or nonnegative. We show that the synchronization problem is decidable in polynomial time for deterministic weighted automata.},
  author       = {Kretinsky, Jan and Larsen, Kim and Laursen, Simon and Srba, Jiří},
  location     = {Madrid, Spain},
  pages        = {142 -- 154},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Polynomial time decidability of weighted synchronization under partial observability}},
  doi          = {10.4230/LIPIcs.CONCUR.2015.142},
  volume       = {42},
  year         = {2015},
}

@article{1501,
  abstract     = {We consider Markov decision processes (MDPs) which are a standard model for probabilistic systems. We focus on qualitative properties for MDPs that can express that desired behaviors of the system arise almost-surely (with probability 1) or with positive probability. We introduce a new simulation relation to capture the refinement relation of MDPs with respect to qualitative properties, and present discrete graph algorithms with quadratic complexity to compute the simulation relation. We present an automated technique for assume-guarantee style reasoning for compositional analysis of two-player games by giving a counterexample guided abstraction-refinement approach to compute our new simulation relation. We show a tight link between two-player games and MDPs, and as a consequence the results for games are lifted to MDPs with qualitative properties. We have implemented our algorithms and show that the compositional analysis leads to significant improvements. },
  author       = {Chatterjee, Krishnendu and Chmelik, Martin and Daca, Przemyslaw},
  journal      = {Formal Methods in System Design},
  number       = {2},
  pages        = {230 -- 264},
  publisher    = {Springer},
  title        = {{CEGAR for compositional analysis of qualitative properties in Markov decision processes}},
  doi          = {10.1007/s10703-015-0235-2},
  volume       = {47},
  year         = {2015},
}

@inproceedings{1502,
  abstract     = {We extend the theory of input-output conformance with operators for merge and quotient. The former is useful when testing against multiple requirements or views. The latter can be used to generate tests for patches of an already tested system. Both operators can combine systems with different action alphabets, which is usually the case when constructing complex systems and specifications from parts, for instance different views as well as newly defined functionality of a~previous version of the system.},
  author       = {Beneš, Nikola and Daca, Przemyslaw and Henzinger, Thomas A and Kretinsky, Jan and Nickovic, Dejan},
  isbn         = {978-1-4503-3471-6},
  location     = {Montreal, QC, Canada},
  pages        = {101 -- 110},
  publisher    = {ACM},
  title        = {{Complete composition operators for IOCO-testing theory}},
  doi          = {10.1145/2737166.2737175},
  year         = {2015},
}

@article{1538,
  abstract     = {Systems biology rests on the idea that biological complexity can be better unraveled through the interplay of modeling and experimentation. However, the success of this approach depends critically on the informativeness of the chosen experiments, which is usually unknown a priori. Here, we propose a systematic scheme based on iterations of optimal experiment design, flow cytometry experiments, and Bayesian parameter inference to guide the discovery process in the case of stochastic biochemical reaction networks. To illustrate the benefit of our methodology, we apply it to the characterization of an engineered light-inducible gene expression circuit in yeast and compare the performance of the resulting model with models identified from nonoptimal experiments. In particular, we compare the parameter posterior distributions and the precision to which the outcome of future experiments can be predicted. Moreover, we illustrate how the identified stochastic model can be used to determine light induction patterns that make either the average amount of protein or the variability in a population of cells follow a desired profile. Our results show that optimal experiment design allows one to derive models that are accurate enough to precisely predict and regulate the protein expression in heterogeneous cell populations over extended periods of time.},
  author       = {Ruess, Jakob and Parise, Francesca and Milias Argeitis, Andreas and Khammash, Mustafa and Lygeros, John},
  journal      = {PNAS},
  number       = {26},
  pages        = {8148 -- 8153},
  publisher    = {National Academy of Sciences},
  title        = {{Iterative experiment design guides the characterization of a light-inducible gene expression circuit}},
  doi          = {10.1073/pnas.1423947112},
  volume       = {112},
  year         = {2015},
}

@article{1539,
  abstract     = {Many stochastic models of biochemical reaction networks contain some chemical species for which the number of molecules that are present in the system can only be finite (for instance due to conservation laws), but also other species that can be present in arbitrarily large amounts. The prime example of such networks are models of gene expression, which typically contain a small and finite number of possible states for the promoter but an infinite number of possible states for the amount of mRNA and protein. One of the main approaches to analyze such models is through the use of equations for the time evolution of moments of the chemical species. Recently, a new approach based on conditional moments of the species with infinite state space given all the different possible states of the finite species has been proposed. It was argued that this approach allows one to capture more details about the full underlying probability distribution with a smaller number of equations. Here, I show that the result that less moments provide more information can only stem from an unnecessarily complicated description of the system in the classical formulation. The foundation of this argument will be the derivation of moment equations that describe the complete probability distribution over the finite state space but only low-order moments over the infinite state space. I will show that the number of equations that is needed is always less than what was previously claimed and always less than the number of conditional moment equations up to the same order. To support these arguments, a symbolic algorithm is provided that can be used to derive minimal systems of unconditional moment equations for models with partially finite state space. },
  author       = {Ruess, Jakob},
  journal      = {Journal of Chemical Physics},
  number       = {24},
  publisher    = {American Institute of Physics},
  title        = {{Minimal moment equations for stochastic models of biochemical reaction networks with partially finite state space}},
  doi          = {10.1063/1.4937937},
  volume       = {143},
  year         = {2015},
}

@inproceedings{1541,
  abstract     = {We present XSpeed a parallel state-space exploration algorithm for continuous systems with linear dynamics and nondeterministic inputs. The motivation of having parallel algorithms is to exploit the computational power of multi-core processors to speed-up performance. The parallelization is achieved on two fronts. First, we propose a parallel implementation of the support function algorithm by sampling functions in parallel. Second, we propose a parallel state-space exploration by slicing the time horizon and computing the reachable states in the time slices in parallel. The second method can be however applied only to a class of linear systems with invertible dynamics and fixed input. A GP-GPU implementation is also presented following a lazy evaluation strategy on support functions. The parallel algorithms are implemented in the tool XSpeed. We evaluated the performance on two benchmarks including an 28 dimension Helicopter model. Comparison with the sequential counterpart shows a maximum speed-up of almost 7× on a 6 core, 12 thread Intel Xeon CPU E5-2420 processor. Our GP-GPU implementation shows a maximum speed-up of 12× over the sequential implementation and 53× over SpaceEx (LGG scenario), the state of the art tool for reachability analysis of linear hybrid systems. Experiments illustrate that our parallel algorithm with time slicing not only speeds-up performance but also improves precision.},
  author       = {Ray, Rajarshi and Gurung, Amit and Das, Binayak and Bartocci, Ezio and Bogomolov, Sergiy and Grosu, Radu},
  location     = {Haifa, Israel},
  pages        = {3 -- 18},
  publisher    = {Springer},
  title        = {{XSpeed: Accelerating reachability analysis on multi-core processors}},
  doi          = {10.1007/978-3-319-26287-1_1},
  volume       = {9434},
  year         = {2015},
}

@inproceedings{1594,
  abstract     = {Quantitative extensions of temporal logics have recently attracted significant attention. In this work, we study frequency LTL (fLTL), an extension of LTL which allows to speak about frequencies of events along an execution. Such an extension is particularly useful for probabilistic systems that often cannot fulfil strict qualitative guarantees on the behaviour. It has been recently shown that controller synthesis for Markov decision processes and fLTL is decidable when all the bounds on frequencies are 1. As a step towards a complete quantitative solution, we show that the problem is decidable for the fragment fLTL\GU, where U does not occur in the scope of G (but still F can). Our solution is based on a novel translation of such quantitative formulae into equivalent deterministic automata.},
  author       = {Forejt, Vojtěch and Krčál, Jan and Kretinsky, Jan},
  location     = {Suva, Fiji},
  pages        = {162 -- 177},
  publisher    = {Springer},
  title        = {{Controller synthesis for MDPs and frequency LTL\GU}},
  doi          = {10.1007/978-3-662-48899-7_12},
  volume       = {9450},
  year         = {2015},
}

@inproceedings{1601,
  abstract     = {We propose a flexible exchange format for ω-automata, as typically used in formal verification, and implement support for it in a range of established tools. Our aim is to simplify the interaction of tools, helping the research community to build upon other people’s work. A key feature of the format is the use of very generic acceptance conditions, specified by Boolean combinations of acceptance primitives, rather than being limited to common cases such as Büchi, Streett, or Rabin. Such flexibility in the choice of acceptance conditions can be exploited in applications, for example in probabilistic model checking, and furthermore encourages the development of acceptance-agnostic tools for automata manipulations. The format allows acceptance conditions that are either state-based or transition-based, and also supports alternating automata.},
  author       = {Babiak, Tomáš and Blahoudek, František and Duret Lutz, Alexandre and Klein, Joachim and Kretinsky, Jan and Mueller, Daniel and Parker, David and Strejček, Jan},
  location     = {San Francisco, CA, United States},
  pages        = {479 -- 486},
  publisher    = {Springer},
  title        = {{The Hanoi omega-automata format}},
  doi          = {10.1007/978-3-319-21690-4_31},
  volume       = {9206},
  year         = {2015},
}

@inproceedings{1603,
  abstract     = {For deterministic systems, a counterexample to a property can simply be an error trace, whereas counterexamples in probabilistic systems are necessarily more complex. For instance, a set of erroneous traces with a sufficient cumulative probability mass can be used. Since these are too large objects to understand and manipulate, compact representations such as subchains have been considered. In the case of probabilistic systems with non-determinism, the situation is even more complex. While a subchain for a given strategy (or scheduler, resolving non-determinism) is a straightforward choice, we take a different approach. Instead, we focus on the strategy itself, and extract the most important decisions it makes, and present its succinct representation.
The key tools we employ to achieve this are (1) introducing a concept of importance of a state w.r.t. the strategy, and (2) learning using decision trees. There are three main consequent advantages of our approach. Firstly, it exploits the quantitative information on states, stressing the more important decisions. Secondly, it leads to a greater variability and degree of freedom in representing the strategies. Thirdly, the representation uses a self-explanatory data structure. In summary, our approach produces more succinct and more explainable strategies, as opposed to e.g. binary decision diagrams. Finally, our experimental results show that we can extract several rules describing the strategy even for very large systems that do not fit in memory, and based on the rules explain the erroneous behaviour.},
  author       = {Brázdil, Tomáš and Chatterjee, Krishnendu and Chmelik, Martin and Fellner, Andreas and Kretinsky, Jan},
  location     = {San Francisco, CA, United States},
  pages        = {158 -- 177},
  publisher    = {Springer},
  title        = {{Counterexample explanation by learning small strategies in Markov decision processes}},
  doi          = {10.1007/978-3-319-21690-4_10},
  volume       = {9206},
  year         = {2015},
}

@inproceedings{1605,
  abstract     = {Multiaffine hybrid automata (MHA) represent a powerful formalism to model complex dynamical systems. This formalism is particularly suited for the representation of biological systems which often exhibit highly non-linear behavior. In this paper, we consider the problem of parameter identification for MHA. We present an abstraction of MHA based on linear hybrid automata, which can be analyzed by the SpaceEx model checker. This abstraction enables a precise handling of time-dependent properties. We demonstrate the potential of our approach on a model of a genetic regulatory network and a myocyte model.},
  author       = {Bogomolov, Sergiy and Schilling, Christian and Bartocci, Ezio and Batt, Grégory and Kong, Hui and Grosu, Radu},
  location     = {Haifa, Israel},
  pages        = {19 -- 35},
  publisher    = {Springer},
  title        = {{Abstraction-based parameter synthesis for multiaffine systems}},
  doi          = {10.1007/978-3-319-26287-1_2},
  volume       = {9434},
  year         = {2015},
}

@inproceedings{1606,
  abstract     = {In this paper, we present the first steps toward a runtime verification framework for monitoring hybrid and cyber-physical systems (CPS) development tools based on randomized differential testing. The development tools include hybrid systems reachability analysis tools, model-based development environments like Simulink/Stateflow (SLSF), etc. First, hybrid automaton models are randomly generated. Next, these hybrid automaton models are translated to a number of different tools (currently, SpaceEx, dReach, Flow*, HyCreate, and the MathWorks’ Simulink/Stateflow) using the HyST source transformation and translation tool. Then, the hybrid automaton models are executed in the different tools and their outputs are parsed. The final step is the differential comparison: the outputs of the different tools are compared. If the results do not agree (in the sense that an analysis or verification result from one tool does not match that of another tool, ignoring timeouts, etc.), a candidate bug is flagged and the model is saved for future analysis by the user. The process then repeats and the monitoring continues until the user terminates the process. We present preliminary results that have been useful in identifying a few bugs in the analysis methods of different development tools, and in an earlier version of HyST.},
  author       = {Nguyen, Luan and Schilling, Christian and Bogomolov, Sergiy and Johnson, Taylor},
  booktitle    = {6th International Conference},
  isbn         = {978-3-319-23819-7},
  location     = {Vienna, Austria},
  pages        = {281 -- 286},
  publisher    = {Springer Nature},
  title        = {{Runtime verification for hybrid analysis tools}},
  doi          = {10.1007/978-3-319-23820-3_19},
  volume       = {9333},
  year         = {2015},
}

@inproceedings{1610,
  abstract     = {The edit distance between two words w1, w2 is the minimal number of word operations (letter insertions, deletions, and substitutions) necessary to transform w1 to w2. The edit distance generalizes to languages L1,L2, where the edit distance is the minimal number k such that for every word from L1 there exists a word in L2 with edit distance at most k. We study the edit distance computation problem between pushdown automata and their subclasses. The problem of computing edit distance to pushdown automata is undecidable, and in practice, the interesting question is to compute the edit distance from a pushdown automaton (the implementation, a standard model for programs with recursion) to a regular language (the specification). In this work, we present a complete picture of decidability and complexity for deciding whether, for a given threshold k, the edit distance from a pushdown automaton to a finite automaton is at most k.},
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Ibsen-Jensen, Rasmus and Otop, Jan},
  booktitle    = {42nd International Colloquium},
  isbn         = {978-3-662-47665-9},
  location     = {Kyoto, Japan},
  number       = {Part II},
  pages        = {121 -- 133},
  publisher    = {Springer Nature},
  title        = {{Edit distance for pushdown automata}},
  doi          = {10.1007/978-3-662-47666-6_10},
  volume       = {9135},
  year         = {2015},
}

@inproceedings{1702,
  abstract     = {In this paper we present INTERHORN, a solver for recursion-free Horn clauses. The main application domain of INTERHORN lies in solving interpolation problems arising in software verification. We show how a range of interpolation problems, including path, transition, nested, state/transition and well-founded interpolation can be handled directly by INTERHORN. By detailing these interpolation problems and their Horn clause representations, we hope to encourage the emergence of a common back-end interpolation interface useful for diverse verification tools.},
  author       = {Gupta, Ashutosh and Popeea, Corneliu and Rybalchenko, Andrey},
  booktitle    = {Electronic Proceedings in Theoretical Computer Science, EPTCS},
  location     = {Vienna, Austria},
  pages        = {31 -- 38},
  publisher    = {Open Publishing},
  title        = {{Generalised interpolation by solving recursion free-horn clauses}},
  doi          = {10.4204/EPTCS.169.5},
  volume       = {169},
  year         = {2014},
}

@article{1733,
  abstract     = {The classical (boolean) notion of refinement for behavioral interfaces of system components is the alternating refinement preorder. In this paper, we define a distance for interfaces, called interface simulation distance. It makes the alternating refinement preorder quantitative by, intuitively, tolerating errors (while counting them) in the alternating simulation game. We show that the interface simulation distance satisfies the triangle inequality, that the distance between two interfaces does not increase under parallel composition with a third interface, that the distance between two interfaces can be bounded from above and below by distances between abstractions of the two interfaces, and how to synthesize an interface from incompatible requirements. We illustrate the framework, and the properties of the distances under composition of interfaces, with two case studies.},
  author       = {Cerny, Pavol and Chmelik, Martin and Henzinger, Thomas A and Radhakrishna, Arjun},
  journal      = {Theoretical Computer Science},
  number       = {3},
  pages        = {348 -- 363},
  publisher    = {Elsevier},
  title        = {{Interface simulation distances}},
  doi          = {10.1016/j.tcs.2014.08.019},
  volume       = {560},
  year         = {2014},
}

@inproceedings{1869,
  abstract     = {Boolean controllers for systems with complex datapaths are often very difficult to implement correctly, in particular when concurrency is involved. Yet, in many instances it is easy to formally specify correctness. For example, the specification for the controller of a pipelined processor only has to state that the pipelined processor gives the same results as a non-pipelined reference design. This makes such controllers a good target for automated synthesis. However, an efficient abstraction for the complex datapath elements is needed, as a bit-precise description is often infeasible. We present Suraq, the first controller synthesis tool which uses uninterpreted functions for the abstraction. Quantified firstorder formulas (with specific quantifier structure) serve as the specification language from which Suraq synthesizes Boolean controllers. Suraq transforms the specification into an unsatisfiable SMT formula, and uses Craig interpolation to compute its results. Using Suraq, we were able to synthesize a controller (consisting of two Boolean signals) for a five-stage pipelined DLX processor in roughly one hour and 15 minutes.},
  author       = {Hofferek, Georg and Gupta, Ashutosh},
  booktitle    = {HVC 2014},
  editor       = {Yahav, Eran},
  location     = {Haifa, Israel},
  pages        = {68 -- 74},
  publisher    = {Springer},
  title        = {{Suraq - a controller synthesis tool using uninterpreted functions}},
  doi          = {10.1007/978-3-319-13338-6_6},
  volume       = {8855},
  year         = {2014},
}

@inproceedings{1870,
  abstract     = {We investigate the problem of checking if a finite-state transducer is robust to uncertainty in its input. Our notion of robustness is based on the analytic notion of Lipschitz continuity - a transducer is K-(Lipschitz) robust if the perturbation in its output is at most K times the perturbation in its input. We quantify input and output perturbation using similarity functions. We show that K-robustness is undecidable even for deterministic transducers. We identify a class of functional transducers, which admits a polynomial time automata-theoretic decision procedure for K-robustness. This class includes Mealy machines and functional letter-to-letter transducers. We also study K-robustness of nondeterministic transducers. Since a nondeterministic transducer generates a set of output words for each input word, we quantify output perturbation using setsimilarity functions. We show that K-robustness of nondeterministic transducers is undecidable, even for letter-to-letter transducers. We identify a class of set-similarity functions which admit decidable K-robustness of letter-to-letter transducers.},
  author       = {Henzinger, Thomas A and Otop, Jan and Samanta, Roopsha},
  booktitle    = {Leibniz International Proceedings in Informatics, LIPIcs},
  location     = {Delhi, India},
  pages        = {431 -- 443},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Lipschitz robustness of finite-state transducers}},
  doi          = {10.4230/LIPIcs.FSTTCS.2014.431},
  volume       = {29},
  year         = {2014},
}

@inproceedings{1872,
  abstract     = {Extensionality axioms are common when reasoning about data collections, such as arrays and functions in program analysis, or sets in mathematics. An extensionality axiom asserts that two collections are equal if they consist of the same elements at the same indices. Using extensionality is often required to show that two collections are equal. A typical example is the set theory theorem (∀x)(∀y)x∪y = y ∪x. Interestingly, while humans have no problem with proving such set identities using extensionality, they are very hard for superposition theorem provers because of the calculi they use. In this paper we show how addition of a new inference rule, called extensionality resolution, allows first-order theorem provers to easily solve problems no modern first-order theorem prover can solve. We illustrate this by running the VAMPIRE theorem prover with extensionality resolution on a number of set theory and array problems. Extensionality resolution helps VAMPIRE to solve problems from the TPTP library of first-order problems that were never solved before by any prover.},
  author       = {Gupta, Ashutosh and Kovács, Laura and Kragl, Bernhard and Voronkov, Andrei},
  booktitle    = {ATVA 2014},
  editor       = {Cassez, Franck and Raskin, Jean-François},
  location     = {Sydney, Australia},
  pages        = {185 -- 200},
  publisher    = {Springer},
  title        = {{Extensional crisis and proving identity}},
  doi          = {10.1007/978-3-319-11936-6_14},
  volume       = {8837},
  year         = {2014},
}

