@article{4193,
  abstract     = {The controlled adhesion of cells to each other and to the extracellular matrix is crucial for tissue development and maintenance. Numerous assays have been developed to quantify cell adhesion. Among these, the use of atomic force microscopy (AFM) for single-cell force spectroscopy (SCFS) has recently been established. This assay permits the adhesion of living cells to be studied in near-physiological conditions. This implementation of AFM allows unrivaled spatial and temporal control of cells, as well as highly quantitative force actuation and force measurement that is sufficiently sensitive to characterize the interaction of single molecules. Therefore, not only overall cell adhesion but also the properties of single adhesion-receptor-ligand interactions can be studied. Here we describe current implementations and applications of SCFS, as well as potential pitfalls, and outline how developments will provide insight into the forces, energetics and kinetics of cell-adhesion processes.},
  author       = {Helenius, Jonne and Heisenberg, Carl-Philipp J and Gaub, Hermann and Mueller, Daniel},
  journal      = {Journal of Cell Science},
  number       = {11},
  pages        = {1785 -- 1791},
  publisher    = {Company of Biologists},
  title        = {{Single-cell force spectroscopy}},
  doi          = {10.1242/​jcs.030999},
  volume       = {121},
  year         = {2008},
}

@article{4198,
  abstract     = {Animal body plan arises during gastrulation and organogenesis by the coordination of inductive events and cell movements. Several signaling pathways, such as BMP, FGF, Hedgehog, Nodal, and Wnt have well-recognized instructive roles in cell fate specification during vertebrate embryogenesis. Growing evidence indicates that BMP, Nodal, and FGF signaling also regulate cell movements, and that they do so through mechanisms distinct from those that specify cell fates. Moreover, pathways controlling cell movements can also indirectly influence cell fate specification by regulating dimensions and relative positions of interacting tissues. The current challenge is to delineate the molecular mechanisms via which the major signaling pathways regulate cell fate specification and movements, and how these two processes are coordinated to ensure normal development.},
  author       = {Heisenberg, Carl-Philipp J and Solnica Krezel, Lilianna},
  journal      = {Current Opinion in Genetics & Development},
  number       = {4},
  pages        = {311 -- 316},
  publisher    = {Elsevier},
  title        = {{Back and forth between cell fate specification and movement during vertebrate gastrulation}},
  doi          = {10.1016/j.gde.2008.07.011},
  volume       = {18},
  year         = {2008},
}

@article{4227,
  abstract     = {Morphogen concentration gradients provide positional information by activating target genes in a concentration-dependent manner. Recent reports show that the gradient of the syncytial morphogen Bicoid seems to provide precise positional information to determine target gene domains. For secreted morphogenetic ligands, the precision of the gradients, the signal transduction and the reliability of target gene expression domains have not been studied. Here we investigate these issues for the TGF-beta-type morphogen Dpp. We first studied theoretically how cell-to-cell variability in the source, the target tissue, or both, contribute to the variations of the gradient. Fluctuations in the source and target generate a local maximum of precision at a finite distance to the source. We then determined experimentally in the wing epithelium: (1) the precision of the Dpp concentration gradient; (2) the precision of the Dpp signaling activity profile; and (3) the precision of activation of the Dpp target gene spalt. As captured by our theoretical description, the Dpp gradient provides positional information with a maximal precision a few cells away from the source. This maximal precision corresponds to a positional uncertainly of about a single cell diameter. The precision of the Dpp gradient accounts for the precision of the spalt expression range, implying that Dpp can act as a morphogen to coarsely determine the expression pattern of target genes.},
  author       = {Bollenbach, Tobias and Pantazis, Periklis and Anna Kicheva and Bokel,  Christian and González-Gaitán, Marcos and Julicher, Frank},
  journal      = {Development},
  number       = {6},
  pages        = {1137 -- 1146},
  publisher    = {Company of Biologists},
  title        = {{Precision of the Dpp gradient}},
  doi          = {10.1242/dev.012062},
  volume       = {135},
  year         = {2008},
}

@inproceedings{4244,
  abstract     = {This paper presents a new approach to optimization of an energy-constrained modulation scheme for wireless sensor networks by taking advantage of a novel bio-inspired optimization algorithm. The algorithm is inspired by Wrightpsilas shifting balance theory (SBT) of evolution in population genetics. The total energy consumption of an energy-constrained modulation scheme is minimized by using the new SBT-based optimization algorithm. The results obtained by this new algorithm are compared with other popular optimization algorithms. Numerical experiments are performed to demonstrate that the SBT-based algorithm could be used as an efficient optimizer for solving the optimization problems arising from currently emerging energy-efficient wireless sensor networks.},
  author       = {Yang, Erfu and Nicholas Barton and Arslan, Tughrul and Erdogan, Ahmet T},
  pages        = {2749 -- 2756},
  publisher    = {IEEE},
  title        = {{A novel shifting balance theory-based approach to optimization of an energy-constrained modulation scheme for wireless sensor networks}},
  doi          = {10.1109/CEC.2008.4631167},
  year         = {2008},
}

@article{4245,
  abstract     = {Sex allocation theory has proved extremely successful at predicting when individuals should adjust the sex of their offspring in response to environmental conditions. However, we know rather little about the underlying genetics of sex ratio or how genetic architecture might constrain adaptive sex-ratio behavior. We examined how mutation influenced genetic variation in the sex ratios produced by the parasitoid wasp Nasonia vitripennis. In a mutation accumulation experiment, we determined the mutability of sex ratio, and compared this with the amount of genetic variation observed in natural populations. We found that the mutability (h2m) ranges from 0.001 to 0.002, similar to estimates for life-history traits in other organisms. These estimates suggest one mutation every 5–60 generations, which shift the sex ratio by approximately 0.01 (proportion males). In this and other studies, the genetic variation in N. vitripennis sex ratio ranged from 0.02 to 0.17 (broad-sense heritability, H2). If sex ratio is maintained by mutation–selection balance, a higher genetic variance would be expected given our mutational parameters. Instead, the observed genetic variance perhaps suggests additional selection against sex-ratio mutations with deleterious effects on other fitness traits as well as sex ratio (i.e., pleiotropy), as has been argued to be the case more generally.},
  author       = {Pannebakker, Bart A and Halligan, Daniel and Reynolds, K Tracy and Ballantyne, Gavin A and Shuker, David M and Nicholas Barton and West, Stuart A},
  journal      = {Evolution; International Journal of Organic Evolution},
  number       = {8},
  pages        = {1921 -- 1935},
  publisher    = {Wiley-Blackwell},
  title        = {{Effects of spontaneous mutation accumulation on sex ratio traits}},
  doi          = {10.1111/j.1558-5646.2008.00434.x},
  volume       = {62},
  year         = {2008},
}

@inproceedings{4366,
  abstract     = {Termination of a heap-manipulating program generally depends on preconditions that express heap assumptions (i.e., assertions describing reachability, aliasing, separation and sharing in the heap). We present an algorithm for the inference of such preconditions. The algorithm exploits a unique interplay between counterexample-producing abstract termination checker and shape analysis. The shape analysis produces heap assumptions on demand to eliminate counterexamples, i.e., non-terminating abstract computations. The experiments with our prototype implementation indicate its practical potential.},
  author       = {Podelski,Andreas and Rybalchenko, Andrey and Thomas Wies},
  pages        = {314 -- 327},
  publisher    = {Springer},
  title        = {{Heap Assumptions on Demand}},
  doi          = {10.1007/978-3-540-70545-1_31},
  volume       = {5123},
  year         = {2008},
}

@inbook{4371,
  abstract     = {We survey some of the problems associated with checking whether a given behavior (a sequence, a Boolean signal or a continuous signal) satisfies a property specified in an appropriate temporal logic and describe two such monitoring algorithms for the real-time logic MITL.},
  author       = {Maler, Oded and Nickovic, Dejan and Pnueli, Amir},
  booktitle    = {Pillars of Computer science: Essays Dedicated To Boris (Boaz) Trakhtenbrot on the Occasion of His 85th Birthday},
  isbn         = {9783540781264},
  pages        = {475 -- 505},
  publisher    = {Springer},
  title        = {{Checking Temporal Properties of Discrete, Timed and Continuous Behaviors}},
  doi          = {10.1007/978-3-540-78127-1_26},
  year         = {2008},
}

@inproceedings{4384,
  abstract     = {Model checking software transactional memories (STMs) is difficult because of the unbounded number, length, and delay of concurrent transactions and the unbounded size of the memory. We show that, under certain conditions, the verification problem can be reduced to a finite-state problem, and we illustrate the use of the method by proving the correctness of several STMs, including two-phase locking, DSTM, TL2, and optimistic concurrency control. The safety properties we consider include strict serializability and opacity; the liveness properties include obstruction freedom, livelock freedom, and wait freedom.

Our main contribution lies in the structure of the proofs, which are largely automated and not restricted to the STMs mentioned above. In a first step we show that every STM that enjoys certain structural properties either violates a safety or liveness requirement on some program with two threads and two shared variables, or satisfies the requirement on all programs. In the second step we use a model checker to prove the requirement for the STM applied to a most general program with two threads and two variables. In the safety case, the model checker constructs a simulation relation between two carefully constructed finite-state transition systems, one representing the given STM applied to a most general program, and the other representing a most liberal safe STM applied to the same program. In the liveness case, the model checker analyzes fairness conditions on the given STM transition system.},
  author       = {Guerraoui, Rachid and Thomas Henzinger and Jobstmann, Barbara and Vasu Singh},
  pages        = {372 -- 382},
  publisher    = {ACM},
  title        = {{Model checking transactional memories}},
  doi          = {10.1145/1375581.1375626},
  year         = {2008},
}

@inproceedings{4386,
  abstract     = {We introduce the notion of permissiveness in transactional memories (TM). Intuitively, a TM is permissive if it never aborts a transaction when it need not. More specifically, a TM is permissive with respect to a safety property p if the TM accepts every history that satisfies p. Permissiveness, like safety and liveness, can be used as a metric to compare TMs. We illustrate that it is impractical to achieve permissiveness deterministically, and then show how randomization can be used to achieve permissiveness efficiently. We introduce Adaptive Validation STM (AVSTM), which is probabilistically permissive with respect to opacity; that is, every opaque history is accepted by AVSTM with positive probability. Moreover, AVSTM guarantees lock freedom. Owing to its permissiveness, AVSTM outperforms other STMs by up to 40% in read dominated workloads in high contention scenarios. But, in low contention scenarios, the book-keeping done by AVSTM to achieve permissiveness makes AVSTM, on average, 20-30% worse than existing STMs.},
  author       = {Guerraoui, Rachid and Thomas Henzinger and Vasu Singh},
  pages        = {305 -- 319},
  publisher    = {Springer},
  title        = {{Permissiveness in transactional memories}},
  doi          = {10.1007/978-3-540-87779-0_21},
  volume       = {5218},
  year         = {2008},
}

@inproceedings{4387,
  abstract     = {Software transactional memory (STM) offers a disciplined concurrent programming model for exploiting the parallelism of modern processor architectures. This paper presents the first deterministic specification automata for strict serializability and opacity in STMs. Using an antichain-based tool, we show our deterministic specifications to be equivalent to more intuitive, nondeterministic specification automata (which are too large to be determinized automatically). Using deterministic specification automata, we obtain a complete verification tool for STMs. We also show how to model and verify contention management within STMs. We automatically check the opacity of popular STM algorithms, such as TL2 and DSTM, with a universal contention manager. The universal contention manager is nondeterministic and establishes correctness for all possible contention management schemes.},
  author       = {Guerraoui, Rachid and Thomas Henzinger and Vasu Singh},
  pages        = {21 -- 35},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Completeness and nondeterminism in model checking transactional memories}},
  doi          = {10.1007/978-3-540-85361-9_6},
  volume       = {5201},
  year         = {2008},
}

@inproceedings{4397,
  author       = {Beyer, Dirk and Damien Zufferey and Majumdar, Ritankar S},
  pages        = {304 -- 308},
  publisher    = {Springer},
  title        = {{CSIsat: Interpolation for LA+EUF}},
  year         = {2008},
}

@inproceedings{4400,
  author       = {Aviv,Adam J. and Pavol Cerny and Clark,Sandy and Cronin,Eric and Shah,Gaurav and Sherr,Micah and Blaze,Matt},
  publisher    = {USENIX},
  title        = {{Security Evaluation of ES&amp;S Voting Machines and Election Management System}},
  doi          = {1545},
  year         = {2008},
}

@phdthesis{4409,
  abstract     = {Models of timed systems must incorporate not only the sequence of system events, but the timings of these events as well to capture the real-time aspects of physical systems. Timed automata are models of real-time systems in which states consist of discrete locations and values for real-time clocks. The presence of real-time clocks leads to an uncountable state space. This thesis studies verification problems on timed automata in a game theoretic framework.

For untimed systems, two systems are close if every sequence of events of one system is also observable in the second system. For timed systems, the difference in timings of the two corresponding sequences is also of importance. We propose the notion of bisimulation distance which quantifies timing differences; if the bisimulation distance between two systems is epsilon, then (a) every sequence of events of one system has a corresponding matching sequence in the other, and (b) the timings of matching events in between the two corresponding traces do not differ by more than epsilon. We show that we can compute the bisimulation distance between two timed automata to within any desired degree of accuracy. We also show that the timed verification logic TCTL is robust with respect to our notion of quantitative bisimilarity, in particular, if a system satisfies a formula, then every close system satisfies a close formula.

Timed games are used for distinguishing between the actions of several agents, typically a controller and an environment. The controller must achieve its objective against all possible choices of the environment. The modeling of the passage of time leads to the presence of zeno executions, and corresponding unrealizable strategies of the controller which may achieve objectives by blocking time. We disallow such unreasonable strategies by restricting all agents to use only receptive strategies --strategies which while not being required to ensure time divergence by any agent, are such that no agent is responsible for blocking time. Time divergence is guaranteed when all players use receptive strategies. We show that timed automaton games with receptive strategies can be solved by a reduction to finite state turn based game graphs. We define the logic timed alternating-time temporal logic for verification of timed automaton games and show that the logic can be model checked in EXPTIME. We also show that the minimum time required by an agent to reach a desired location, and the maximum time an agent can stay safe within a set of locations, against all possible actions of its adversaries are both computable.

We next study the memory requirements of winning strategies for timed automaton games. We prove that finite memory strategies suffice for safety objectives, and that winning strategies for reachability objectives may require infinite memory in general. We introduce randomized strategies in which an agent can propose a probabilistic distribution of moves and show that finite memory randomized strategies suffice for all omega-regular objectives. We also show that while randomization helps in simplifying winning strategies, and thus allows the construction of simpler controllers, it does not help a player in winning at more states, and thus does not allow the construction of more powerful controllers.

Finally we study robust winning strategies in timed games. In a physical system, a controller may propose an action together with a time delay, but the action cannot be assumed to be executed at the exact proposed time delay. We present robust strategies which incorporate such jitters and show that the set of states from which an agent can win robustly is computable.},
  author       = {Prabhu, Vinayak},
  pages        = {1 -- 137},
  publisher    = {University of California, Berkeley},
  title        = {{Games for the verification of timed systems}},
  year         = {2008},
}

@phdthesis{4415,
  abstract     = {Many computing applications, especially those in safety critical embedded systems, require highly predictable timing properties. However, time is often not present in the prevailing computing and networking abstractions. In fact, most advances in computer architecture, software, and networking favor average-case performance over timing predictability. This thesis studies several methods for the design of concurrent and/or distributed embedded systems with precise timing guarantees. The focus is on flexible and compositional methods for programming and verification of the timing properties. The presented methods together with related formalisms cover two levels of design: (1) Programming language/model level. We propose the distributed variant of Giotto, a coordination programming language with an explicit temporal semantics—the logical execution time (LET) semantics. The LET of a task is an interval of time that specifies the time instants at which task inputs and outputs become available (task release and termination instants). The LET of a task is always non-zero. This allows us to communicate values across the network without changing the timing information of the task, and without introducing nondeterminism. We show how this methodology supports distributed code generation for distributed real-time systems. The method gives up some performance in favor of composability and predictability. We characterize the tradeoff by comparing the LET semantics with the semantics used in Simulink. (2) Abstract task graph level. We study interface-based design and verification of applications represented with task graphs. We consider task sequence graphs with general event models, and cyclic graphs with periodic event models with jitter and phase. Here an interface of a component exposes time and resource constraints of the component. Together with interfaces we formally define interface composition operations and the refinement relation. For efficient and flexible composability checking two properties are important: incremental design and independent refinement. According to the incremental design property the composition of interfaces can be performed in any order, even if interfaces for some components are not known. The refinement relation is defined such that in a design we can always substitute a refined interface for an abstract one. We show that the framework supports independent refinement, i.e., the refinement relation is preserved under composition operations.},
  author       = {Matic, Slobodan},
  pages        = {1 -- 148},
  publisher    = {University of California, Berkeley},
  title        = {{Compositionality in deterministic real-time embedded systems}},
  year         = {2008},
}

@inproceedings{4452,
  abstract     = {We describe Valigator, a software tool for imperative program verification that efficiently combines symbolic computation and automated reasoning in a uniform framework. The system offers support for automatically generating and proving verification conditions and, most importantly, for automatically inferring loop invariants and bound assertions by means of symbolic summation, Gröbner basis computation, and quantifier elimination. We present general principles of the implementation and illustrate them on examples.},
  author       = {Thomas Henzinger and Hottelier, Thibaud and Kovács, Laura},
  pages        = {333 -- 342},
  publisher    = {Springer},
  title        = {{Valigator: A verification tool with bound and invariant generation}},
  doi          = {10.1007/978-3-540-89439-1_24},
  volume       = {5330},
  year         = {2008},
}

@article{4509,
  abstract     = {I discuss two main challenges in embedded systems design: the challenge to build predictable systems, and that to build robust systems. I suggest how predictability can be formalized as a form of determinism, and robustness as a form of continuity.},
  author       = {Thomas Henzinger},
  journal      = {Philosophical Transactions of the Royal Society A Mathematical Physical and Engineering Sciences},
  number       = {1881},
  pages        = {3727 -- 3736},
  publisher    = {Royal Society of London},
  title        = {{Two challenges in embedded systems design: Predictability and robustness}},
  doi          = {10.1098/rsta.2008.0141},
  volume       = {366},
  year         = {2008},
}

@inproceedings{4521,
  abstract     = {The search for proof and the search for counterexamples (bugs) are complementary activities that need to be pursued concurrently in order to maximize the practical success rate of verification tools.While this is well-understood in safety verification, the current focus of liveness verification has been almost exclusively on the search for termination proofs. A counterexample to termination is an infinite programexecution. In this paper, we propose a method to search for such counterexamples. The search proceeds in two phases. We first dynamically enumerate lasso-shaped candidate paths for counterexamples, and then statically prove their feasibility. We illustrate the utility of our nontermination prover, called TNT, on several nontrivial examples, some of which require bit-level reasoning about integer representations.},
  author       = {Ashutosh Gupta and Thomas Henzinger and Majumdar, Ritankar S and Rybalchenko, Andrey and Xu, Ru-Gang},
  pages        = {147 -- 158},
  publisher    = {ACM},
  title        = {{Proving non-termination}},
  doi          = {10.1145/1328438.1328459},
  year         = {2008},
}

@phdthesis{4524,
  abstract     = {Complex requirements, time-to-market pressure and regulatory constraints have made the designing of embedded systems extremely challenging. This is evident by the increase in effort and expenditure for design of safety-driven real-time control-dominated applications like automotive and avionic controllers. Design processes are often challenged by lack of proper programming tools for specifying and verifying critical requirements (e.g. timing and reliability) of such applications. Platform based design, an approach for designing embedded systems, addresses the above concerns by separating requirement from architecture. The requirement specifies the intended behavior of an application while the architecture specifies the guarantees (e.g. execution speed, failure rate etc). An implementation, a mapping of the requirement on the architecture, is then analyzed for correctness. The orthogonalization of concerns makes the specification and analyses simpler. An effective use of such design methodology has been proposed in Logical Execution Time (LET) model of real-time tasks. The model separates the timing requirements (specified by release and termination instances of a task) from the architecture guarantees (specified by worst-case execution time of the task).

This dissertation proposes a coordination language, Hierarchical Timing Language (HTL), that captures the timing and reliability requirements of real-time applications. An implementation of the program on an architecture is then analyzed to check whether desired timing and reliability requirements are met or not. The core framework extends the LET model by accounting for reliability and refinement. The reliability model separates the reliability requirements of tasks from the reliability guarantees of the architecture. The requirement expresses the desired long-term reliability while the architecture provides a short-term reliability guarantee (e.g. failure rate for each iteration). The analysis checks if the short-term guarantee ensures the desired long-term reliability. The refinement model allows replacing a task by another task during program execution. Refinement preserves schedulability and reliability, i.e., if a refined task is schedulable and reliable for an implementation, then the refining task is also schedulable and reliable for the implementation. Refinement helps in concise specification without overloading analysis.

The work presents the formal model, the analyses (both with and without refinement), and a compiler for HTL programs. The compiler checks composition and refinement constraints, performs schedulability and reliability analyses, and generates code for implementation of an HTL program on a virtual machine. Three real-time controllers, one each from automatic control, automotive control and avionic control, are used to illustrate the steps in modeling and analyzing HTL programs.},
  author       = {Ghosal, Arkadeb},
  pages        = {1 -- 210},
  publisher    = {University of California, Berkeley},
  title        = {{A hierarchical coordination language for reliable real-time tasks}},
  year         = {2008},
}

@inproceedings{4527,
  abstract     = {We introduce bounded asynchrony, a notion of concurrency tailored to the modeling of biological cell-cell interactions. Bounded asynchrony is the result of a scheduler that bounds the number of steps that one process gets ahead of other processes; this allows the components of a system to move independently while keeping them coupled. Bounded asynchrony accurately reproduces the experimental observations made about certain cell-cell interactions: its constrained nondeterminism captures the variability observed in cells that, although equally potent, assume distinct fates. Real-life cells are not “scheduled”, but we show that distributed real-time behavior can lead to component interactions that are observationally equivalent to bounded asynchrony; this provides a possible mechanistic explanation for the phenomena observed during cell fate specification.
We use model checking to determine cell fates. The nondeterminism of bounded asynchrony causes state explosion during model checking, but partial-order methods are not directly applicable. We present a new algorithm that reduces the number of states that need to be explored: our optimization takes advantage of the bounded-asynchronous progress and the spatially local interactions of components that model cells. We compare our own communication-based reduction with partial-order reduction (on a restricted form of bounded asynchrony) and experiments illustrate that our algorithm leads to significant savings.},
  author       = {Fisher, Jasmin and Thomas Henzinger and Maria Mateescu and Piterman, Nir},
  pages        = {17 -- 32},
  publisher    = {Springer},
  title        = {{Bounded asynchrony: Concurrency for modeling cell-cell interactions}},
  doi          = {10.1007/978-3-540-68413-8_2},
  volume       = {5054},
  year         = {2008},
}

@article{4532,
  abstract     = {We consider the equivalence problem for labeled Markov chains (LMCs), where each state is labeled with an observation. Two LMCs are equivalent if every finite sequence of observations has the same probability of occurrence in the two LMCs. We show that equivalence can be decided in polynomial time, using a reduction to the equivalence problem for probabilistic automata, which is known to be solvable in polynomial time. We provide an alternative algorithm to solve the equivalence problem, which is based on a new definition of bisimulation for probabilistic automata. We also extend the technique to decide the equivalence of weighted probabilistic automata.},
  author       = {Doyen, Laurent and Thomas Henzinger and Raskin, Jean-François},
  journal      = {International Journal of Foundations of Computer Science},
  number       = {3},
  pages        = {549 -- 563},
  publisher    = {World Scientific Publishing},
  title        = {{Equivalence of labeled Markov chains}},
  doi          = {10.1142/S0129054108005814 },
  volume       = {19},
  year         = {2008},
}

