@article{3290,
  abstract     = {Analysis of genomic data requires an efficient way to calculate likelihoods across very large numbers of loci. We describe a general method for finding the distribution of genealogies: we allow migration between demes, splitting of demes [as in the isolation-with-migration (IM) model], and recombination between linked loci. These processes are described by a set of linear recursions for the generating function of branch lengths. Under the infinite-sites model, the probability of any configuration of mutations can be found by differentiating this generating function. Such calculations are feasible for small numbers of sampled genomes: as an example, we show how the generating function can be derived explicitly for three genes under the two-deme IM model. This derivation is done automatically, using Mathematica. Given data from a large number of unlinked and nonrecombining blocks of sequence, these results can be used to find maximum-likelihood estimates of model parameters by tabulating the probabilities of all relevant mutational configurations and then multiplying across loci. The feasibility of the method is demonstrated by applying it to simulated data and to a data set previously analyzed by Wang and Hey (2010) consisting of 26,141 loci sampled from Drosophila simulans and D. melanogaster. Our results suggest that such likelihood calculations are scalable to genomic data as long as the numbers of sampled individuals and mutations per sequence block are small.},
  author       = {Lohse, Konrad and Harrison, Richard and Barton, Nicholas H},
  journal      = {Genetics},
  number       = {3},
  pages        = {977 -- 987},
  publisher    = {Genetics Society of America},
  title        = {{A general method for calculating likelihoods under the coalescent process}},
  doi          = {10.1534/genetics.111.129569},
  volume       = {189},
  year         = {2011},
}

@inproceedings{3297,
  abstract     = {Animating detailed liquid surfaces has always been a challenge for computer graphics researchers and visual effects artists. Over the past few years, researchers in this field have focused on mesh-based surface tracking to synthesize extremely detailed liquid surfaces as efficiently as possible. This course provides a solid understanding of the steps required to create a fluid simulator with a mesh-based liquid surface.

The course begins with an overview of several existing liquid-surface-tracking techniques and the pros and cons of each method. Then it explains how to embed a triangle mesh into a finite-difference-based fluid simulator and describes several methods for allowing the liquid surface to merge together or break apart. The final section showcases the benefits and further applications of a mesh-based liquid surface, highlighting state-of-the-art methods for tracking colors and textures, maintaining liquid volume, preserving small surface features, and simulating realistic surface-tension waves.},
  author       = {Wojtan, Christopher J and Müller Fischer, Matthias and Brochu, Tyson},
  location     = {Vancouver, BC, Canada},
  publisher    = {ACM},
  title        = {{Liquid simulation with mesh-based surface tracking}},
  doi          = {10.1145/2037636.2037644},
  year         = {2011},
}

@inproceedings{3298,
  abstract     = {We present a new algorithm for enforcing incompressibility for Smoothed Particle Hydrodynamics (SPH) by preserving uniform density across the domain. We propose a hybrid method that uses a Poisson solve on a coarse grid to enforce a divergence free velocity ﬁeld, followed by a local density correction of the particles. This avoids typical grid artifacts and maintains the Lagrangian nature of SPH by directly transferring pressures onto particles. Our method can be easily integrated with existing SPH techniques such as the incompressible PCISPH method as well as weakly compressible SPH by adding an additional force term. We show that this hybrid method accelerates convergence towards uniform density and permits a signiﬁcantly larger time step compared to earlier approaches while producing similar results. We demonstrate our approach in a variety of scenarios with signiﬁcant pressure gradients such as splashing liquids.},
  author       = {Raveendran, Karthik and Wojtan, Christopher J and Turk, Greg},
  editor       = {Spencer, Stephen},
  location     = {Vancouver, Canada},
  pages        = {33 -- 42},
  publisher    = {ACM},
  title        = {{Hybrid smoothed particle hydrodynamics}},
  doi          = {10.1145/2019406.2019411},
  year         = {2011},
}

@inproceedings{3299,
  abstract     = {We introduce propagation models, a formalism designed to support general and efficient data structures for the transient analysis of biochemical reaction networks. We give two use cases for propagation abstract data types: the uniformization method and numerical integration. We also sketch an implementation of a propagation abstract data type, which uses abstraction to approximate states.},
  author       = {Henzinger, Thomas A and Mateescu, Maria},
  location     = {Paris, France},
  pages        = {1 -- 3},
  publisher    = {Springer},
  title        = {{Propagation models for computing biochemical reaction networks}},
  doi          = {10.1145/2037509.2037510},
  year         = {2011},
}

@inproceedings{3301,
  abstract     = {The chemical master equation is a differential equation describing the time evolution of the probability distribution over the possible “states” of a biochemical system. The solution of this equation is of interest within the systems biology field ever since the importance of the molec- ular noise has been acknowledged. Unfortunately, most of the systems do not have analytical solutions, and numerical solutions suffer from the course of dimensionality and therefore need to be approximated. Here, we introduce the concept of tail approximation, which retrieves an approximation of the probabilities in the tail of a distribution from the total probability of the tail and its conditional expectation. This approximation method can then be used to numerically compute the solution of the chemical master equation on a subset of the state space, thus fighting the explosion of the state space, for which this problem is renowned.},
  author       = {Henzinger, Thomas A and Mateescu, Maria},
  publisher    = {Tampere International Center for Signal Processing},
  title        = {{Tail approximation for the chemical master equation}},
  year         = {2011},
}

@inproceedings{3302,
  abstract     = {Cloud computing aims to give users virtually unlimited pay-per-use computing resources without the burden of managing the underlying infrastructure. We present a new job execution environment Flextic that exploits scal- able static scheduling techniques to provide the user with a flexible pricing model, such as a tradeoff between dif- ferent degrees of execution speed and execution price, and at the same time, reduce scheduling overhead for the cloud provider. We have evaluated a prototype of Flextic on Amazon EC2 and compared it against Hadoop. For various data parallel jobs from machine learning, im- age processing, and gene sequencing that we considered, Flextic has low scheduling overhead and reduces job du- ration by up to 15% compared to Hadoop, a dynamic cloud scheduler.},
  author       = {Henzinger, Thomas A and Singh, Anmol and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
  pages        = {1 -- 6},
  publisher    = {USENIX},
  title        = {{Static scheduling in clouds}},
  year         = {2011},
}

@inbook{3311,
  abstract     = {Alpha shapes have been conceived in 1981 as an attempt to define the shape of a finite set of point in the plane. Since then, connections to diverse areas in the sciences and engineering have developed, including to pattern recognition, digital shape sampling and processing, and structural molecular biology. This survey begins with a historical account and discusses geometric, algorithmic, topological, and combinatorial aspects of alpha shapes in this sequence.},
  author       = {Edelsbrunner, Herbert},
  booktitle    = {Tessellations in the Sciences: Virtues, Techniques and Applications of Geometric Tilings},
  editor       = {van de Weygaert, R and Vegter, G and Ritzerveld, J and Icke, V},
  publisher    = {Springer},
  title        = {{Alpha shapes - a survey}},
  year         = {2011},
}

@misc{3312,
  abstract     = {We study the 3D reconstruction of plant roots from multiple 2D images. To meet the challenge caused by the delicate nature of thin branches, we make three innovations to cope with the sensitivity to image quality and calibration. First, we model the background as a harmonic function to improve the segmentation of the root in each 2D image. Second, we develop the concept of the regularized visual hull which reduces the effect of jittering and refraction by ensuring consistency with one 2D image. Third, we guarantee connectedness through adjustments to the 3D reconstruction that minimize global error. Our software is part of a biological phenotype/genotype study of agricultural root systems. It has been tested on more than 40 plant roots and results are promising in terms of reconstruction quality and efficiency.},
  author       = {Zheng, Ying and Gu, Steve and Edelsbrunner, Herbert and Tomasi, Carlo and Benfey, Philip},
  booktitle    = {Proceedings of the IEEE International Conference on Computer Vision},
  location     = {Barcelona, Spain},
  publisher    = {IEEE},
  title        = {{Detailed reconstruction of 3D plant root shape}},
  doi          = {10.1109/ICCV.2011.6126475},
  year         = {2011},
}

@inproceedings{3313,
  abstract     = {Interpreting an image as a function on a compact sub- set of the Euclidean plane, we get its scale-space by diffu- sion, spreading the image over the entire plane. This gener- ates a 1-parameter family of functions alternatively defined as convolutions with a progressively wider Gaussian ker- nel. We prove that the corresponding 1-parameter family of persistence diagrams have norms that go rapidly to zero as time goes to infinity. This result rationalizes experimental observations about scale-space. We hope this will lead to targeted improvements of related computer vision methods.},
  author       = {Chen, Chao and Edelsbrunner, Herbert},
  booktitle    = {Proceedings of the IEEE International Conference on Computer Vision},
  location     = {Barcelona, Spain},
  publisher    = {IEEE},
  title        = {{Diffusion runs low on persistence fast}},
  doi          = {10.1109/ICCV.2011.6126271},
  year         = {2011},
}

@article{3315,
  abstract     = {We consider two-player games played in real time on game structures with clocks where the objectives of players are described using parity conditions. The games are concurrent in that at each turn, both players independently propose a time delay and an action, and the action with the shorter delay is chosen. To prevent a player from winning by blocking time, we restrict each player to play strategies that ensure that the player cannot be responsible for causing a zeno run. First, we present an efficient reduction of these games to turn-based (i.e., not concurrent) finite-state (i.e., untimed) parity games. Our reduction improves the best known complexity for solving timed parity games. Moreover, the rich class of algorithms for classical parity games can now be applied to timed parity games. The states of the resulting game are based on clock regions of the original game, and the state space of the finite game is linear in the size of the region graph. Second, we consider two restricted classes of strategies for the player that represents the controller in a real-time synthesis problem, namely, limit-robust and bounded-robust winning strategies. Using a limit-robust winning strategy, the controller cannot choose an exact real-valued time delay but must allow for some nonzero jitter in each of its actions. If there is a given lower bound on the jitter, then the strategy is bounded-robust winning. We show that exact strategies are more powerful than limit-robust strategies, which are more powerful than bounded-robust winning strategies for any bound. For both kinds of robust strategies, we present efficient reductions to standard timed automaton games. These reductions provide algorithms for the synthesis of robust real-time controllers.},
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Prabhu, Vinayak},
  journal      = {Logical Methods in Computer Science},
  number       = {4},
  publisher    = {International Federation of Computational Logic},
  title        = {{Timed parity games: Complexity and robustness}},
  doi          = {10.2168/LMCS-7(4:8)2011},
  volume       = {7},
  year         = {2011},
}

@inproceedings{3316,
  abstract     = {In addition to being correct, a system should be robust, that is, it should behave reasonably even after receiving unexpected inputs. In this paper, we summarize two formal notions of robustness that we have introduced previously for reactive systems. One of the notions is based on assigning costs for failures on a user-provided notion of incorrect transitions in a specification. Here, we define a system to be robust if a finite number of incorrect inputs does not lead to an infinite number of incorrect outputs. We also give a more refined notion of robustness that aims to minimize the ratio of output failures to input failures. The second notion is aimed at liveness. In contrast to the previous notion, it has no concept of recovery from an error. Instead, it compares the ratio of the number of liveness constraints that the system violates to the number of liveness constraints that the environment violates.},
  author       = {Bloem, Roderick and Chatterjee, Krishnendu and Greimel, Karin and Henzinger, Thomas A and Jobstmann, Barbara},
  booktitle    = {6th IEEE International Symposium on Industrial and Embedded Systems},
  location     = {Vasteras, Sweden},
  pages        = {176 -- 185},
  publisher    = {IEEE},
  title        = {{Specification-centered robustness}},
  doi          = {10.1109/SIES.2011.5953660},
  year         = {2011},
}

@article{3318,
  abstract     = {Parvalbumin is thought to act in a manner similar to EGTA, but how a slow Ca2+ buffer affects nanodomain-coupling regimes at GABAergic synapses is unclear. Direct measurements of parvalbumin concentration and paired recordings in rodent hippocampus and cerebellum revealed that parvalbumin affects synaptic dynamics only when expressed at high levels. Modeling suggests that, in high concentrations, parvalbumin may exert BAPTA-like effects, modulating nanodomain coupling via competition with local saturation of endogenous fixed buffers.},
  author       = {Eggermann, Emmanuel and Jonas, Peter M},
  journal      = {Nature Neuroscience},
  pages        = {20 -- 22},
  publisher    = {Nature Publishing Group},
  title        = {{How the “slow” Ca(2+) buffer parvalbumin affects transmitter release in nanodomain coupling regimes at GABAergic synapses}},
  doi          = {10.1038/nn.3002},
  volume       = {15},
  year         = {2011},
}

@inproceedings{3319,
  abstract     = {We address the problem of metric learning for multi-view data, namely the construction of embedding projections from data in different representations into a shared feature space, such that the Euclidean distance in this space provides a meaningful within-view as well as between-view similarity. Our motivation stems from the problem of cross-media retrieval tasks, where the availability of a joint Euclidean distance function is a pre-requisite to allow fast, in particular hashing-based, nearest neighbor queries. We formulate an objective function that expresses the intuitive concept that matching samples are mapped closely together in the output space, whereas non-matching samples are pushed apart, no matter in which view they are available. The resulting optimization problem is not convex, but it can be decomposed explicitly into a convex and a concave part, thereby allowing efficient optimization using the convex-concave procedure. Experiments on an image retrieval task show that nearest-neighbor based cross-view retrieval is indeed possible, and the proposed technique improves the retrieval accuracy over baseline techniques.},
  author       = {Quadrianto, Novi and Lampert, Christoph},
  location     = {Bellevue, United States},
  pages        = {425 -- 432},
  publisher    = {ML Research Press},
  title        = {{Learning multi-view neighborhood preserving projections}},
  year         = {2011},
}

@article{3320,
  abstract     = {Powerful statistical models that can be learned efficiently from large amounts of data are currently revolutionizing computer vision. These models possess a rich internal structure reflecting task-specific relations and constraints. This monograph introduces the reader to the most popular classes of structured models in computer vision. Our focus is discrete undirected graphical models which we cover in detail together with a description of algorithms for both probabilistic inference and maximum a posteriori inference. We discuss separately recently successful techniques for prediction in general structured models. In the second part of this monograph we describe methods for parameter learning where we distinguish the classic maximum likelihood based methods from the more recent prediction-based parameter learning methods. We highlight developments to enhance current models and discuss kernelized models and latent variable models. To make the monograph more practical and to provide links to further study we provide examples of successful application of many methods in the computer vision literature.},
  author       = {Nowozin, Sebastian and Lampert, Christoph},
  journal      = {Foundations and Trends in Computer Graphics and Vision},
  number       = {3-4},
  pages        = {185 -- 365},
  publisher    = {Now Publishers},
  title        = {{Structured learning and prediction in computer vision}},
  doi          = {10.1561/0600000033},
  volume       = {6},
  year         = {2011},
}

@misc{3322,
  abstract     = {We study multi-label prediction for structured output spaces, a problem that occurs, for example, in object detection in images, secondary structure prediction in computational biology, and graph matching with symmetries. Conventional multi-label classification techniques are typically not applicable in this situation, because they require explicit enumeration of the label space, which is infeasible in case of structured outputs. Relying on techniques originally designed for single- label structured prediction, in particular structured support vector machines, results in reduced prediction accuracy, or leads to infeasible optimization problems. In this work we derive a maximum-margin training formulation for multi-label structured prediction that remains computationally tractable while achieving high prediction accuracy. It also shares most beneficial properties with single-label maximum-margin approaches, in particular a formulation as a convex optimization problem, efficient working set training, and PAC-Bayesian generalization bounds.},
  author       = {Lampert, Christoph},
  booktitle    = {NIPS: Neural Information Processing Systems},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Maximum margin multi label structured prediction}},
  year         = {2011},
}

@inproceedings{3323,
  abstract     = {We present a new decidable logic called TREX for expressing constraints about imperative tree data structures. In particular, TREX supports a transitive closure operator that can express reachability constraints, which often appear in data structure invariants. We show that our logic is closed under weakest precondition computation, which enables its use for automated software verification. We further show that satisfiability of formulas in TREX is decidable in NP. The low complexity makes it an attractive alternative to more expensive logics such as monadic second-order logic (MSOL) over trees, which have been traditionally used for reasoning about tree data structures.},
  author       = {Wies, Thomas and Muñiz, Marco and Kuncak, Viktor},
  location     = {Wrocław, Poland},
  pages        = {476 -- 491},
  publisher    = {Springer},
  title        = {{An efficient decision procedure for imperative tree data structures}},
  doi          = {10.1007/978-3-642-22438-6_36},
  volume       = {6803},
  year         = {2011},
}

@inproceedings{3324,
  abstract     = {Automated termination provers often use the following schema to prove that a program terminates: construct a relational abstraction of the program's transition relation and then show that the relational abstraction is well-founded. The focus of current tools has been on developing sophisticated techniques for constructing the abstractions while relying on known decidable logics (such as linear arithmetic) to express them. We believe we can significantly increase the class of programs that are amenable to automated termination proofs by identifying more expressive decidable logics for reasoning about well-founded relations. We therefore present a new decision procedure for reasoning about multiset orderings, which are among the most powerful orderings used to prove termination. We show that, using our decision procedure, one can automatically prove termination of natural abstractions of programs.},
  author       = {Piskac, Ruzica and Wies, Thomas},
  editor       = {Jhala, Ranjit and Schmidt, David},
  location     = {Texas, USA},
  pages        = {371 -- 386},
  publisher    = {Springer},
  title        = {{Decision procedures for automating termination proofs}},
  doi          = {10.1007/978-3-642-18275-4_26},
  volume       = {6538},
  year         = {2011},
}

@inproceedings{3325,
  abstract     = {We introduce streaming data string transducers that map input data strings to output data strings in a single left-to-right pass in linear time. Data strings are (unbounded) sequences of data values, tagged with symbols from a finite set, over a potentially infinite data do- main that supports only the operations of equality and ordering. The transducer uses a finite set of states, a finite set of variables ranging over the data domain, and a finite set of variables ranging over data strings. At every step, it can make decisions based on the next in- put symbol, updating its state, remembering the input data value in its data variables, and updating data-string variables by concatenat- ing data-string variables and new symbols formed from data vari- ables, while avoiding duplication. We establish that the problems of checking functional equivalence of two streaming transducers, and of checking whether a streaming transducer satisfies pre/post verification conditions specified by streaming acceptors over in- put/output data-strings, are in PSPACE. We identify a class of imperative and a class of functional pro- grams, manipulating lists of data items, which can be effectively translated to streaming data-string transducers. The imperative pro- grams dynamically modify a singly-linked heap by changing next- pointers of heap-nodes and by adding new nodes. The main re- striction specifies how the next-pointers can be used for traversal. We also identify an expressively equivalent fragment of functional programs that traverse a list using syntactically restricted recursive calls. Our results lead to algorithms for assertion checking and for checking functional equivalence of two programs, written possibly in different programming styles, for commonly used routines such as insert, delete, and reverse.},
  author       = {Alur, Rajeev and Cerny, Pavol},
  location     = {Texas, USA},
  number       = {1},
  pages        = {599 -- 610},
  publisher    = {ACM},
  title        = {{Streaming transducers for algorithmic verification of single pass list processing programs}},
  doi          = {10.1145/1926385.1926454},
  volume       = {46},
  year         = {2011},
}

@inproceedings{3326,
  abstract     = {Weighted automata map input words to numerical values. Ap- plications of weighted automata include formal verification of quantitative properties, as well as text, speech, and image processing. A weighted au- tomaton is defined with respect to a semiring. For the tropical semiring, the weight of a run is the sum of the weights of the transitions taken along the run, and the value of a word is the minimal weight of an accepting run on it. In the 90’s, Krob studied the decidability of problems on rational series defined with respect to the tropical semiring. Rational series are strongly related to weighted automata, and Krob’s results apply to them. In par- ticular, it follows from Krob’s results that the universality problem (that is, deciding whether the values of all words are below some threshold) is decidable for weighted automata defined with respect to the tropical semir- ing with domain ∪ {∞}, and that the equality problem is undecidable when the domain is ∪ {∞}. In this paper we continue the study of the borders of decidability in weighted automata, describe alternative and direct proofs of the above results, and tighten them further. Unlike the proofs of Krob, which are algebraic in their nature, our proofs stay in the terrain of state machines, and the reduction is from the halting problem of a two-counter machine. This enables us to significantly simplify Krob’s reasoning, make the un- decidability result accessible to the automata-theoretic community, and strengthen it to apply already to a very simple class of automata: all the states are accepting, there are no initial nor final weights, and all the weights on the transitions are from the set {−1, 0, 1}. The fact we work directly with the automata enables us to tighten also the decidability re- sults and to show that the universality problem for weighted automata defined with respect to the tropical semiring with domain ∪ {∞}, and in fact even with domain ≥0 ∪ {∞}, is PSPACE-complete. Our results thus draw a sharper picture about the decidability of decision problems for weighted automata, in both the front of containment vs. universality and the front of the ∪ {∞} vs. the ∪ {∞} domains.},
  author       = {Almagor, Shaull and Boker, Udi and Kupferman, Orna},
  location     = {Taipei, Taiwan},
  pages        = {482 -- 491},
  publisher    = {Springer},
  title        = {{What’s decidable about weighted automata }},
  doi          = {10.1007/978-3-642-24372-1_37},
  volume       = {6996},
  year         = {2011},
}

@inproceedings{3327,
  abstract     = {We solve the open problems of translating, when possible, all common classes of nondeterministic word automata to deterministic and nondeterministic co-Büchi word automata. The handled classes include Büchi, parity, Rabin, Streett and Muller automata. The translations follow a unified approach and are all asymptotically tight. The problem of translating Büchi automata to equivalent co-Büchi automata was solved in [2], leaving open the problems of translating automata with richer acceptance conditions. For these classes, one cannot easily extend or use the construction in [2]. In particular, going via an intermediate Büchi automaton is not optimal and might involve a blow-up exponentially higher than the known lower bound. Other known translations are also not optimal and involve a doubly exponential blow-up. We describe direct, simple, and asymptotically tight constructions, involving a 2Θ(n) blow-up. The constructions are variants of the subset construction, and allow for symbolic implementations. Beyond the theoretical importance of the results, the new constructions have various applications, among which is an improved algorithm for translating, when possible, LTL formulas to deterministic Büchi word automata.},
  author       = {Boker, Udi and Kupferman, Orna},
  editor       = {Hofmann, Martin},
  location     = {Saarbrücken, Germany},
  pages        = {184 -- 198},
  publisher    = {Springer},
  title        = {{Co-Büching them all}},
  doi          = {10.1007/978-3-642-19805-2_13},
  volume       = {6604},
  year         = {2011},
}

