@phdthesis{4409,
  abstract     = {Models of timed systems must incorporate not only the sequence of system events, but the timings of these events as well to capture the real-time aspects of physical systems. Timed automata are models of real-time systems in which states consist of discrete locations and values for real-time clocks. The presence of real-time clocks leads to an uncountable state space. This thesis studies verification problems on timed automata in a game theoretic framework.

For untimed systems, two systems are close if every sequence of events of one system is also observable in the second system. For timed systems, the difference in timings of the two corresponding sequences is also of importance. We propose the notion of bisimulation distance which quantifies timing differences; if the bisimulation distance between two systems is epsilon, then (a) every sequence of events of one system has a corresponding matching sequence in the other, and (b) the timings of matching events in between the two corresponding traces do not differ by more than epsilon. We show that we can compute the bisimulation distance between two timed automata to within any desired degree of accuracy. We also show that the timed verification logic TCTL is robust with respect to our notion of quantitative bisimilarity, in particular, if a system satisfies a formula, then every close system satisfies a close formula.

Timed games are used for distinguishing between the actions of several agents, typically a controller and an environment. The controller must achieve its objective against all possible choices of the environment. The modeling of the passage of time leads to the presence of zeno executions, and corresponding unrealizable strategies of the controller which may achieve objectives by blocking time. We disallow such unreasonable strategies by restricting all agents to use only receptive strategies --strategies which while not being required to ensure time divergence by any agent, are such that no agent is responsible for blocking time. Time divergence is guaranteed when all players use receptive strategies. We show that timed automaton games with receptive strategies can be solved by a reduction to finite state turn based game graphs. We define the logic timed alternating-time temporal logic for verification of timed automaton games and show that the logic can be model checked in EXPTIME. We also show that the minimum time required by an agent to reach a desired location, and the maximum time an agent can stay safe within a set of locations, against all possible actions of its adversaries are both computable.

We next study the memory requirements of winning strategies for timed automaton games. We prove that finite memory strategies suffice for safety objectives, and that winning strategies for reachability objectives may require infinite memory in general. We introduce randomized strategies in which an agent can propose a probabilistic distribution of moves and show that finite memory randomized strategies suffice for all omega-regular objectives. We also show that while randomization helps in simplifying winning strategies, and thus allows the construction of simpler controllers, it does not help a player in winning at more states, and thus does not allow the construction of more powerful controllers.

Finally we study robust winning strategies in timed games. In a physical system, a controller may propose an action together with a time delay, but the action cannot be assumed to be executed at the exact proposed time delay. We present robust strategies which incorporate such jitters and show that the set of states from which an agent can win robustly is computable.},
  author       = {Prabhu, Vinayak},
  pages        = {1 -- 137},
  publisher    = {University of California, Berkeley},
  title        = {{Games for the verification of timed systems}},
  year         = {2008},
}

@phdthesis{4415,
  abstract     = {Many computing applications, especially those in safety critical embedded systems, require highly predictable timing properties. However, time is often not present in the prevailing computing and networking abstractions. In fact, most advances in computer architecture, software, and networking favor average-case performance over timing predictability. This thesis studies several methods for the design of concurrent and/or distributed embedded systems with precise timing guarantees. The focus is on flexible and compositional methods for programming and verification of the timing properties. The presented methods together with related formalisms cover two levels of design: (1) Programming language/model level. We propose the distributed variant of Giotto, a coordination programming language with an explicit temporal semantics—the logical execution time (LET) semantics. The LET of a task is an interval of time that specifies the time instants at which task inputs and outputs become available (task release and termination instants). The LET of a task is always non-zero. This allows us to communicate values across the network without changing the timing information of the task, and without introducing nondeterminism. We show how this methodology supports distributed code generation for distributed real-time systems. The method gives up some performance in favor of composability and predictability. We characterize the tradeoff by comparing the LET semantics with the semantics used in Simulink. (2) Abstract task graph level. We study interface-based design and verification of applications represented with task graphs. We consider task sequence graphs with general event models, and cyclic graphs with periodic event models with jitter and phase. Here an interface of a component exposes time and resource constraints of the component. Together with interfaces we formally define interface composition operations and the refinement relation. For efficient and flexible composability checking two properties are important: incremental design and independent refinement. According to the incremental design property the composition of interfaces can be performed in any order, even if interfaces for some components are not known. The refinement relation is defined such that in a design we can always substitute a refined interface for an abstract one. We show that the framework supports independent refinement, i.e., the refinement relation is preserved under composition operations.},
  author       = {Matic, Slobodan},
  pages        = {1 -- 148},
  publisher    = {University of California, Berkeley},
  title        = {{Compositionality in deterministic real-time embedded systems}},
  year         = {2008},
}

@inproceedings{4452,
  abstract     = {We describe Valigator, a software tool for imperative program verification that efficiently combines symbolic computation and automated reasoning in a uniform framework. The system offers support for automatically generating and proving verification conditions and, most importantly, for automatically inferring loop invariants and bound assertions by means of symbolic summation, Gröbner basis computation, and quantifier elimination. We present general principles of the implementation and illustrate them on examples.},
  author       = {Thomas Henzinger and Hottelier, Thibaud and Kovács, Laura},
  pages        = {333 -- 342},
  publisher    = {Springer},
  title        = {{Valigator: A verification tool with bound and invariant generation}},
  doi          = {10.1007/978-3-540-89439-1_24},
  volume       = {5330},
  year         = {2008},
}

@article{4509,
  abstract     = {I discuss two main challenges in embedded systems design: the challenge to build predictable systems, and that to build robust systems. I suggest how predictability can be formalized as a form of determinism, and robustness as a form of continuity.},
  author       = {Thomas Henzinger},
  journal      = {Philosophical Transactions of the Royal Society A Mathematical Physical and Engineering Sciences},
  number       = {1881},
  pages        = {3727 -- 3736},
  publisher    = {Royal Society of London},
  title        = {{Two challenges in embedded systems design: Predictability and robustness}},
  doi          = {10.1098/rsta.2008.0141},
  volume       = {366},
  year         = {2008},
}

@inproceedings{4521,
  abstract     = {The search for proof and the search for counterexamples (bugs) are complementary activities that need to be pursued concurrently in order to maximize the practical success rate of verification tools.While this is well-understood in safety verification, the current focus of liveness verification has been almost exclusively on the search for termination proofs. A counterexample to termination is an infinite programexecution. In this paper, we propose a method to search for such counterexamples. The search proceeds in two phases. We first dynamically enumerate lasso-shaped candidate paths for counterexamples, and then statically prove their feasibility. We illustrate the utility of our nontermination prover, called TNT, on several nontrivial examples, some of which require bit-level reasoning about integer representations.},
  author       = {Ashutosh Gupta and Thomas Henzinger and Majumdar, Ritankar S and Rybalchenko, Andrey and Xu, Ru-Gang},
  pages        = {147 -- 158},
  publisher    = {ACM},
  title        = {{Proving non-termination}},
  doi          = {10.1145/1328438.1328459},
  year         = {2008},
}

@phdthesis{4524,
  abstract     = {Complex requirements, time-to-market pressure and regulatory constraints have made the designing of embedded systems extremely challenging. This is evident by the increase in effort and expenditure for design of safety-driven real-time control-dominated applications like automotive and avionic controllers. Design processes are often challenged by lack of proper programming tools for specifying and verifying critical requirements (e.g. timing and reliability) of such applications. Platform based design, an approach for designing embedded systems, addresses the above concerns by separating requirement from architecture. The requirement specifies the intended behavior of an application while the architecture specifies the guarantees (e.g. execution speed, failure rate etc). An implementation, a mapping of the requirement on the architecture, is then analyzed for correctness. The orthogonalization of concerns makes the specification and analyses simpler. An effective use of such design methodology has been proposed in Logical Execution Time (LET) model of real-time tasks. The model separates the timing requirements (specified by release and termination instances of a task) from the architecture guarantees (specified by worst-case execution time of the task).

This dissertation proposes a coordination language, Hierarchical Timing Language (HTL), that captures the timing and reliability requirements of real-time applications. An implementation of the program on an architecture is then analyzed to check whether desired timing and reliability requirements are met or not. The core framework extends the LET model by accounting for reliability and refinement. The reliability model separates the reliability requirements of tasks from the reliability guarantees of the architecture. The requirement expresses the desired long-term reliability while the architecture provides a short-term reliability guarantee (e.g. failure rate for each iteration). The analysis checks if the short-term guarantee ensures the desired long-term reliability. The refinement model allows replacing a task by another task during program execution. Refinement preserves schedulability and reliability, i.e., if a refined task is schedulable and reliable for an implementation, then the refining task is also schedulable and reliable for the implementation. Refinement helps in concise specification without overloading analysis.

The work presents the formal model, the analyses (both with and without refinement), and a compiler for HTL programs. The compiler checks composition and refinement constraints, performs schedulability and reliability analyses, and generates code for implementation of an HTL program on a virtual machine. Three real-time controllers, one each from automatic control, automotive control and avionic control, are used to illustrate the steps in modeling and analyzing HTL programs.},
  author       = {Ghosal, Arkadeb},
  pages        = {1 -- 210},
  publisher    = {University of California, Berkeley},
  title        = {{A hierarchical coordination language for reliable real-time tasks}},
  year         = {2008},
}

@inproceedings{4527,
  abstract     = {We introduce bounded asynchrony, a notion of concurrency tailored to the modeling of biological cell-cell interactions. Bounded asynchrony is the result of a scheduler that bounds the number of steps that one process gets ahead of other processes; this allows the components of a system to move independently while keeping them coupled. Bounded asynchrony accurately reproduces the experimental observations made about certain cell-cell interactions: its constrained nondeterminism captures the variability observed in cells that, although equally potent, assume distinct fates. Real-life cells are not “scheduled”, but we show that distributed real-time behavior can lead to component interactions that are observationally equivalent to bounded asynchrony; this provides a possible mechanistic explanation for the phenomena observed during cell fate specification.
We use model checking to determine cell fates. The nondeterminism of bounded asynchrony causes state explosion during model checking, but partial-order methods are not directly applicable. We present a new algorithm that reduces the number of states that need to be explored: our optimization takes advantage of the bounded-asynchronous progress and the spatially local interactions of components that model cells. We compare our own communication-based reduction with partial-order reduction (on a restricted form of bounded asynchrony) and experiments illustrate that our algorithm leads to significant savings.},
  author       = {Fisher, Jasmin and Thomas Henzinger and Maria Mateescu and Piterman, Nir},
  pages        = {17 -- 32},
  publisher    = {Springer},
  title        = {{Bounded asynchrony: Concurrency for modeling cell-cell interactions}},
  doi          = {10.1007/978-3-540-68413-8_2},
  volume       = {5054},
  year         = {2008},
}

@article{4532,
  abstract     = {We consider the equivalence problem for labeled Markov chains (LMCs), where each state is labeled with an observation. Two LMCs are equivalent if every finite sequence of observations has the same probability of occurrence in the two LMCs. We show that equivalence can be decided in polynomial time, using a reduction to the equivalence problem for probabilistic automata, which is known to be solvable in polynomial time. We provide an alternative algorithm to solve the equivalence problem, which is based on a new definition of bisimulation for probabilistic automata. We also extend the technique to decide the equivalence of weighted probabilistic automata.},
  author       = {Doyen, Laurent and Thomas Henzinger and Raskin, Jean-François},
  journal      = {International Journal of Foundations of Computer Science},
  number       = {3},
  pages        = {549 -- 563},
  publisher    = {World Scientific Publishing},
  title        = {{Equivalence of labeled Markov chains}},
  doi          = {10.1142/S0129054108005814 },
  volume       = {19},
  year         = {2008},
}

@inproceedings{4533,
  abstract     = {Interface theories have been proposed to support incremental design and independent implementability. Incremental design means that the compatibility checking of interfaces can proceed for partial system descriptions, without knowing the interfaces of all components. Independent implementability means that compatible interfaces can be refined separately, maintaining compatibility. We show that these interface theories provide no formal support for component reuse, meaning that the same component cannot be used to implement several different interfaces in a design. We add a new operation to interface theories in order to support such reuse. For example, different interfaces for the same component may refer to different aspects such as functionality, timing, and power consumption. We give both stateless and stateful examples for interface theories with component reuse. To illustrate component reuse in interface-based design, we show how the stateful theory provides a natural framework for specifying and refining PCI bus clients.},
  author       = {Doyen, Laurent and Thomas Henzinger and Jobstmann, Barbara and Tatjana Petrov},
  pages        = {79 -- 88},
  publisher    = {ACM},
  title        = {{Interface theories with component reuse}},
  doi          = {10.1145/1450058.1450070},
  year         = {2008},
}

@article{4534,
  abstract     = {A stochastic graph game is played by two players on a game graph with probabilistic transitions. We consider stochastic graph games with ω-regular winning conditions specified as parity objectives, and mean-payoff (or limit-average) objectives. These games lie in NP ∩ coNP. We present a polynomial-time Turing reduction of stochastic parity games to stochastic mean-payoff games.},
  author       = {Krishnendu Chatterjee and Thomas Henzinger},
  journal      = {Information Processing Letters},
  number       = {1},
  pages        = {1 -- 7},
  publisher    = {Elsevier},
  title        = {{Reduction of stochastic parity to stochastic mean-payoff games}},
  doi          = {10.1016/j.ipl.2007.08.035},
  volume       = {106},
  year         = {2008},
}

@inproceedings{4546,
  abstract     = {We propose the notion of logical reliability for real-time program tasks that interact through periodically updated program variables. We describe a reliability analysis that checks if the given short-term (e.g., single-period) reliability of a program variable update in an implementation is sufficient to meet the logical reliability requirement (of the program variable) in the long run. We then present a notion of design by refinement where a task can be refined by another task that writes to program variables with less logical reliability. The resulting analysis can be combined with an incremental schedulability analysis for interacting real-time tasks proposed earlier for the Hierarchical Timing Language (HTL), a coordination language for distributed real-time systems. We implemented a logical-reliability-enhanced prototype of the compiler and runtime infrastructure for HTL.},
  author       = {Krishnendu Chatterjee and Ghosal, Arkadeb and Thomas Henzinger and Iercan, Daniel and Kirsch, Christoph M and Pinello, Claudio and Sangiovanni-Vincentelli, Alberto},
  pages        = {909 -- 914},
  publisher    = {IEEE},
  title        = {{Logical reliability of interacting real-time tasks}},
  doi          = {10.1145/1403375.1403595},
  year         = {2008},
}

@article{4548,
  abstract     = {The value of a finite-state two-player zero-sum stochastic game with limit-average payoff can be approximated to within ε in time exponential in a polynomial in the size of the game times polynomial in logarithmic in 1/ε, for all ε &gt; 0.},
  author       = {Krishnendu Chatterjee and Majumdar, Ritankar S and Thomas Henzinger},
  journal      = {International Journal of Game Theory},
  number       = {2},
  pages        = {219 -- 234},
  publisher    = {Springer},
  title        = {{Stochastic limit-average games are in EXPTIME}},
  doi          = {10.1007/s00182-007-0110-5},
  volume       = {37},
  year         = {2008},
}

