@inproceedings{3298,
  abstract     = {We present a new algorithm for enforcing incompressibility for Smoothed Particle Hydrodynamics (SPH) by preserving uniform density across the domain. We propose a hybrid method that uses a Poisson solve on a coarse grid to enforce a divergence free velocity ﬁeld, followed by a local density correction of the particles. This avoids typical grid artifacts and maintains the Lagrangian nature of SPH by directly transferring pressures onto particles. Our method can be easily integrated with existing SPH techniques such as the incompressible PCISPH method as well as weakly compressible SPH by adding an additional force term. We show that this hybrid method accelerates convergence towards uniform density and permits a signiﬁcantly larger time step compared to earlier approaches while producing similar results. We demonstrate our approach in a variety of scenarios with signiﬁcant pressure gradients such as splashing liquids.},
  author       = {Raveendran, Karthik and Wojtan, Christopher J and Turk, Greg},
  editor       = {Spencer, Stephen},
  location     = {Vancouver, Canada},
  pages        = {33 -- 42},
  publisher    = {ACM},
  title        = {{Hybrid smoothed particle hydrodynamics}},
  doi          = {10.1145/2019406.2019411},
  year         = {2011},
}

@inproceedings{3299,
  abstract     = {We introduce propagation models, a formalism designed to support general and efficient data structures for the transient analysis of biochemical reaction networks. We give two use cases for propagation abstract data types: the uniformization method and numerical integration. We also sketch an implementation of a propagation abstract data type, which uses abstraction to approximate states.},
  author       = {Henzinger, Thomas A and Mateescu, Maria},
  location     = {Paris, France},
  pages        = {1 -- 3},
  publisher    = {Springer},
  title        = {{Propagation models for computing biochemical reaction networks}},
  doi          = {10.1145/2037509.2037510},
  year         = {2011},
}

@inproceedings{3301,
  abstract     = {The chemical master equation is a differential equation describing the time evolution of the probability distribution over the possible “states” of a biochemical system. The solution of this equation is of interest within the systems biology field ever since the importance of the molec- ular noise has been acknowledged. Unfortunately, most of the systems do not have analytical solutions, and numerical solutions suffer from the course of dimensionality and therefore need to be approximated. Here, we introduce the concept of tail approximation, which retrieves an approximation of the probabilities in the tail of a distribution from the total probability of the tail and its conditional expectation. This approximation method can then be used to numerically compute the solution of the chemical master equation on a subset of the state space, thus fighting the explosion of the state space, for which this problem is renowned.},
  author       = {Henzinger, Thomas A and Mateescu, Maria},
  publisher    = {Tampere International Center for Signal Processing},
  title        = {{Tail approximation for the chemical master equation}},
  year         = {2011},
}

@inproceedings{3302,
  abstract     = {Cloud computing aims to give users virtually unlimited pay-per-use computing resources without the burden of managing the underlying infrastructure. We present a new job execution environment Flextic that exploits scal- able static scheduling techniques to provide the user with a flexible pricing model, such as a tradeoff between dif- ferent degrees of execution speed and execution price, and at the same time, reduce scheduling overhead for the cloud provider. We have evaluated a prototype of Flextic on Amazon EC2 and compared it against Hadoop. For various data parallel jobs from machine learning, im- age processing, and gene sequencing that we considered, Flextic has low scheduling overhead and reduces job du- ration by up to 15% compared to Hadoop, a dynamic cloud scheduler.},
  author       = {Henzinger, Thomas A and Singh, Anmol and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
  pages        = {1 -- 6},
  publisher    = {USENIX},
  title        = {{Static scheduling in clouds}},
  year         = {2011},
}

@inbook{3311,
  abstract     = {Alpha shapes have been conceived in 1981 as an attempt to define the shape of a finite set of point in the plane. Since then, connections to diverse areas in the sciences and engineering have developed, including to pattern recognition, digital shape sampling and processing, and structural molecular biology. This survey begins with a historical account and discusses geometric, algorithmic, topological, and combinatorial aspects of alpha shapes in this sequence.},
  author       = {Edelsbrunner, Herbert},
  booktitle    = {Tessellations in the Sciences: Virtues, Techniques and Applications of Geometric Tilings},
  editor       = {van de Weygaert, R and Vegter, G and Ritzerveld, J and Icke, V},
  publisher    = {Springer},
  title        = {{Alpha shapes - a survey}},
  year         = {2011},
}

@misc{3312,
  abstract     = {We study the 3D reconstruction of plant roots from multiple 2D images. To meet the challenge caused by the delicate nature of thin branches, we make three innovations to cope with the sensitivity to image quality and calibration. First, we model the background as a harmonic function to improve the segmentation of the root in each 2D image. Second, we develop the concept of the regularized visual hull which reduces the effect of jittering and refraction by ensuring consistency with one 2D image. Third, we guarantee connectedness through adjustments to the 3D reconstruction that minimize global error. Our software is part of a biological phenotype/genotype study of agricultural root systems. It has been tested on more than 40 plant roots and results are promising in terms of reconstruction quality and efficiency.},
  author       = {Zheng, Ying and Gu, Steve and Edelsbrunner, Herbert and Tomasi, Carlo and Benfey, Philip},
  booktitle    = {Proceedings of the IEEE International Conference on Computer Vision},
  location     = {Barcelona, Spain},
  publisher    = {IEEE},
  title        = {{Detailed reconstruction of 3D plant root shape}},
  doi          = {10.1109/ICCV.2011.6126475},
  year         = {2011},
}

@inproceedings{3313,
  abstract     = {Interpreting an image as a function on a compact sub- set of the Euclidean plane, we get its scale-space by diffu- sion, spreading the image over the entire plane. This gener- ates a 1-parameter family of functions alternatively defined as convolutions with a progressively wider Gaussian ker- nel. We prove that the corresponding 1-parameter family of persistence diagrams have norms that go rapidly to zero as time goes to infinity. This result rationalizes experimental observations about scale-space. We hope this will lead to targeted improvements of related computer vision methods.},
  author       = {Chen, Chao and Edelsbrunner, Herbert},
  booktitle    = {Proceedings of the IEEE International Conference on Computer Vision},
  location     = {Barcelona, Spain},
  publisher    = {IEEE},
  title        = {{Diffusion runs low on persistence fast}},
  doi          = {10.1109/ICCV.2011.6126271},
  year         = {2011},
}

@article{3315,
  abstract     = {We consider two-player games played in real time on game structures with clocks where the objectives of players are described using parity conditions. The games are concurrent in that at each turn, both players independently propose a time delay and an action, and the action with the shorter delay is chosen. To prevent a player from winning by blocking time, we restrict each player to play strategies that ensure that the player cannot be responsible for causing a zeno run. First, we present an efficient reduction of these games to turn-based (i.e., not concurrent) finite-state (i.e., untimed) parity games. Our reduction improves the best known complexity for solving timed parity games. Moreover, the rich class of algorithms for classical parity games can now be applied to timed parity games. The states of the resulting game are based on clock regions of the original game, and the state space of the finite game is linear in the size of the region graph. Second, we consider two restricted classes of strategies for the player that represents the controller in a real-time synthesis problem, namely, limit-robust and bounded-robust winning strategies. Using a limit-robust winning strategy, the controller cannot choose an exact real-valued time delay but must allow for some nonzero jitter in each of its actions. If there is a given lower bound on the jitter, then the strategy is bounded-robust winning. We show that exact strategies are more powerful than limit-robust strategies, which are more powerful than bounded-robust winning strategies for any bound. For both kinds of robust strategies, we present efficient reductions to standard timed automaton games. These reductions provide algorithms for the synthesis of robust real-time controllers.},
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Prabhu, Vinayak},
  journal      = {Logical Methods in Computer Science},
  number       = {4},
  publisher    = {International Federation of Computational Logic},
  title        = {{Timed parity games: Complexity and robustness}},
  doi          = {10.2168/LMCS-7(4:8)2011},
  volume       = {7},
  year         = {2011},
}

@inproceedings{3316,
  abstract     = {In addition to being correct, a system should be robust, that is, it should behave reasonably even after receiving unexpected inputs. In this paper, we summarize two formal notions of robustness that we have introduced previously for reactive systems. One of the notions is based on assigning costs for failures on a user-provided notion of incorrect transitions in a specification. Here, we define a system to be robust if a finite number of incorrect inputs does not lead to an infinite number of incorrect outputs. We also give a more refined notion of robustness that aims to minimize the ratio of output failures to input failures. The second notion is aimed at liveness. In contrast to the previous notion, it has no concept of recovery from an error. Instead, it compares the ratio of the number of liveness constraints that the system violates to the number of liveness constraints that the environment violates.},
  author       = {Bloem, Roderick and Chatterjee, Krishnendu and Greimel, Karin and Henzinger, Thomas A and Jobstmann, Barbara},
  booktitle    = {6th IEEE International Symposium on Industrial and Embedded Systems},
  location     = {Vasteras, Sweden},
  pages        = {176 -- 185},
  publisher    = {IEEE},
  title        = {{Specification-centered robustness}},
  doi          = {10.1109/SIES.2011.5953660},
  year         = {2011},
}

@article{3318,
  abstract     = {Parvalbumin is thought to act in a manner similar to EGTA, but how a slow Ca2+ buffer affects nanodomain-coupling regimes at GABAergic synapses is unclear. Direct measurements of parvalbumin concentration and paired recordings in rodent hippocampus and cerebellum revealed that parvalbumin affects synaptic dynamics only when expressed at high levels. Modeling suggests that, in high concentrations, parvalbumin may exert BAPTA-like effects, modulating nanodomain coupling via competition with local saturation of endogenous fixed buffers.},
  author       = {Eggermann, Emmanuel and Jonas, Peter M},
  journal      = {Nature Neuroscience},
  pages        = {20 -- 22},
  publisher    = {Nature Publishing Group},
  title        = {{How the “slow” Ca(2+) buffer parvalbumin affects transmitter release in nanodomain coupling regimes at GABAergic synapses}},
  doi          = {10.1038/nn.3002},
  volume       = {15},
  year         = {2011},
}

@inproceedings{3319,
  abstract     = {We address the problem of metric learning for multi-view data, namely the construction of embedding projections from data in different representations into a shared feature space, such that the Euclidean distance in this space provides a meaningful within-view as well as between-view similarity. Our motivation stems from the problem of cross-media retrieval tasks, where the availability of a joint Euclidean distance function is a pre-requisite to allow fast, in particular hashing-based, nearest neighbor queries. We formulate an objective function that expresses the intuitive concept that matching samples are mapped closely together in the output space, whereas non-matching samples are pushed apart, no matter in which view they are available. The resulting optimization problem is not convex, but it can be decomposed explicitly into a convex and a concave part, thereby allowing efficient optimization using the convex-concave procedure. Experiments on an image retrieval task show that nearest-neighbor based cross-view retrieval is indeed possible, and the proposed technique improves the retrieval accuracy over baseline techniques.},
  author       = {Quadrianto, Novi and Lampert, Christoph},
  location     = {Bellevue, United States},
  pages        = {425 -- 432},
  publisher    = {ML Research Press},
  title        = {{Learning multi-view neighborhood preserving projections}},
  year         = {2011},
}

@article{3320,
  abstract     = {Powerful statistical models that can be learned efficiently from large amounts of data are currently revolutionizing computer vision. These models possess a rich internal structure reflecting task-specific relations and constraints. This monograph introduces the reader to the most popular classes of structured models in computer vision. Our focus is discrete undirected graphical models which we cover in detail together with a description of algorithms for both probabilistic inference and maximum a posteriori inference. We discuss separately recently successful techniques for prediction in general structured models. In the second part of this monograph we describe methods for parameter learning where we distinguish the classic maximum likelihood based methods from the more recent prediction-based parameter learning methods. We highlight developments to enhance current models and discuss kernelized models and latent variable models. To make the monograph more practical and to provide links to further study we provide examples of successful application of many methods in the computer vision literature.},
  author       = {Nowozin, Sebastian and Lampert, Christoph},
  journal      = {Foundations and Trends in Computer Graphics and Vision},
  number       = {3-4},
  pages        = {185 -- 365},
  publisher    = {Now Publishers},
  title        = {{Structured learning and prediction in computer vision}},
  doi          = {10.1561/0600000033},
  volume       = {6},
  year         = {2011},
}

@misc{3322,
  abstract     = {We study multi-label prediction for structured output spaces, a problem that occurs, for example, in object detection in images, secondary structure prediction in computational biology, and graph matching with symmetries. Conventional multi-label classification techniques are typically not applicable in this situation, because they require explicit enumeration of the label space, which is infeasible in case of structured outputs. Relying on techniques originally designed for single- label structured prediction, in particular structured support vector machines, results in reduced prediction accuracy, or leads to infeasible optimization problems. In this work we derive a maximum-margin training formulation for multi-label structured prediction that remains computationally tractable while achieving high prediction accuracy. It also shares most beneficial properties with single-label maximum-margin approaches, in particular a formulation as a convex optimization problem, efficient working set training, and PAC-Bayesian generalization bounds.},
  author       = {Lampert, Christoph},
  booktitle    = {NIPS: Neural Information Processing Systems},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Maximum margin multi label structured prediction}},
  year         = {2011},
}

@inproceedings{3323,
  abstract     = {We present a new decidable logic called TREX for expressing constraints about imperative tree data structures. In particular, TREX supports a transitive closure operator that can express reachability constraints, which often appear in data structure invariants. We show that our logic is closed under weakest precondition computation, which enables its use for automated software verification. We further show that satisfiability of formulas in TREX is decidable in NP. The low complexity makes it an attractive alternative to more expensive logics such as monadic second-order logic (MSOL) over trees, which have been traditionally used for reasoning about tree data structures.},
  author       = {Wies, Thomas and Muñiz, Marco and Kuncak, Viktor},
  location     = {Wrocław, Poland},
  pages        = {476 -- 491},
  publisher    = {Springer},
  title        = {{An efficient decision procedure for imperative tree data structures}},
  doi          = {10.1007/978-3-642-22438-6_36},
  volume       = {6803},
  year         = {2011},
}

@inproceedings{3324,
  abstract     = {Automated termination provers often use the following schema to prove that a program terminates: construct a relational abstraction of the program's transition relation and then show that the relational abstraction is well-founded. The focus of current tools has been on developing sophisticated techniques for constructing the abstractions while relying on known decidable logics (such as linear arithmetic) to express them. We believe we can significantly increase the class of programs that are amenable to automated termination proofs by identifying more expressive decidable logics for reasoning about well-founded relations. We therefore present a new decision procedure for reasoning about multiset orderings, which are among the most powerful orderings used to prove termination. We show that, using our decision procedure, one can automatically prove termination of natural abstractions of programs.},
  author       = {Piskac, Ruzica and Wies, Thomas},
  editor       = {Jhala, Ranjit and Schmidt, David},
  location     = {Texas, USA},
  pages        = {371 -- 386},
  publisher    = {Springer},
  title        = {{Decision procedures for automating termination proofs}},
  doi          = {10.1007/978-3-642-18275-4_26},
  volume       = {6538},
  year         = {2011},
}

@inproceedings{3325,
  abstract     = {We introduce streaming data string transducers that map input data strings to output data strings in a single left-to-right pass in linear time. Data strings are (unbounded) sequences of data values, tagged with symbols from a finite set, over a potentially infinite data do- main that supports only the operations of equality and ordering. The transducer uses a finite set of states, a finite set of variables ranging over the data domain, and a finite set of variables ranging over data strings. At every step, it can make decisions based on the next in- put symbol, updating its state, remembering the input data value in its data variables, and updating data-string variables by concatenat- ing data-string variables and new symbols formed from data vari- ables, while avoiding duplication. We establish that the problems of checking functional equivalence of two streaming transducers, and of checking whether a streaming transducer satisfies pre/post verification conditions specified by streaming acceptors over in- put/output data-strings, are in PSPACE. We identify a class of imperative and a class of functional pro- grams, manipulating lists of data items, which can be effectively translated to streaming data-string transducers. The imperative pro- grams dynamically modify a singly-linked heap by changing next- pointers of heap-nodes and by adding new nodes. The main re- striction specifies how the next-pointers can be used for traversal. We also identify an expressively equivalent fragment of functional programs that traverse a list using syntactically restricted recursive calls. Our results lead to algorithms for assertion checking and for checking functional equivalence of two programs, written possibly in different programming styles, for commonly used routines such as insert, delete, and reverse.},
  author       = {Alur, Rajeev and Cerny, Pavol},
  location     = {Texas, USA},
  number       = {1},
  pages        = {599 -- 610},
  publisher    = {ACM},
  title        = {{Streaming transducers for algorithmic verification of single pass list processing programs}},
  doi          = {10.1145/1926385.1926454},
  volume       = {46},
  year         = {2011},
}

@inproceedings{3326,
  abstract     = {Weighted automata map input words to numerical values. Ap- plications of weighted automata include formal verification of quantitative properties, as well as text, speech, and image processing. A weighted au- tomaton is defined with respect to a semiring. For the tropical semiring, the weight of a run is the sum of the weights of the transitions taken along the run, and the value of a word is the minimal weight of an accepting run on it. In the 90’s, Krob studied the decidability of problems on rational series defined with respect to the tropical semiring. Rational series are strongly related to weighted automata, and Krob’s results apply to them. In par- ticular, it follows from Krob’s results that the universality problem (that is, deciding whether the values of all words are below some threshold) is decidable for weighted automata defined with respect to the tropical semir- ing with domain ∪ {∞}, and that the equality problem is undecidable when the domain is ∪ {∞}. In this paper we continue the study of the borders of decidability in weighted automata, describe alternative and direct proofs of the above results, and tighten them further. Unlike the proofs of Krob, which are algebraic in their nature, our proofs stay in the terrain of state machines, and the reduction is from the halting problem of a two-counter machine. This enables us to significantly simplify Krob’s reasoning, make the un- decidability result accessible to the automata-theoretic community, and strengthen it to apply already to a very simple class of automata: all the states are accepting, there are no initial nor final weights, and all the weights on the transitions are from the set {−1, 0, 1}. The fact we work directly with the automata enables us to tighten also the decidability re- sults and to show that the universality problem for weighted automata defined with respect to the tropical semiring with domain ∪ {∞}, and in fact even with domain ≥0 ∪ {∞}, is PSPACE-complete. Our results thus draw a sharper picture about the decidability of decision problems for weighted automata, in both the front of containment vs. universality and the front of the ∪ {∞} vs. the ∪ {∞} domains.},
  author       = {Almagor, Shaull and Boker, Udi and Kupferman, Orna},
  location     = {Taipei, Taiwan},
  pages        = {482 -- 491},
  publisher    = {Springer},
  title        = {{What’s decidable about weighted automata }},
  doi          = {10.1007/978-3-642-24372-1_37},
  volume       = {6996},
  year         = {2011},
}

@inproceedings{3328,
  abstract     = {We report on a generic uni- and bivariate algebraic kernel that is publicly available with CGAL 3.7. It comprises complete, correct, though efficient state-of-the-art implementations on polynomials, roots of polynomial systems, and the support to analyze algebraic curves defined by bivariate polynomials. The kernel design is generic, that is, various number types and substeps can be exchanged. It is accompanied with a ready-to-use interface to enable arrangements induced by algebraic curves, that have already been used as basis for various geometric applications, as arrangements on Dupin cyclides or the triangulation of algebraic surfaces. We present two novel applications: arrangements of rotated algebraic curves and Boolean set operations on polygons bounded by segments of algebraic curves. We also provide experiments showing that our general implementation is competitive and even often clearly outperforms existing implementations that are explicitly tailored for specific types of non-linear curves that are available in CGAL.},
  author       = {Berberich, Eric and Hemmer, Michael and Kerber, Michael},
  location     = {Paris, France},
  pages        = {179 -- 186},
  publisher    = {ACM},
  title        = {{A generic algebraic kernel for non linear geometric applications}},
  doi          = {10.1145/1998196.1998224},
  year         = {2011},
}

@inproceedings{3329,
  abstract     = {We consider the offset-deconstruction problem: Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance µ in Hausdorff distance, as the Minkowski sum of another polygonal shape P with a disk of fixed radius? If it does, we also seek a preferably simple-looking solution shape P; then, P's offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give an O(n log n)-time exact decision algorithm that handles any polygonal shape, assuming the real-RAM model of computation. An alternative algorithm, based purely on rational arithmetic, answers the same deconstruction problem, up to an uncertainty parameter, and its running time depends on the parameter δ (in addition to the other input parameters: n, δ and the radius of the disk). If the input shape is found to be approximable, the rational-arithmetic algorithm also computes an approximate solution shape for the problem. For convex shapes, the complexity of the exact decision algorithm drops to O(n), which is also the time required to compute a solution shape P with at most one more vertex than a vertex-minimal one. Our study is motivated by applications from two different domains. However, since the offset operation has numerous uses, we anticipate that the reverse question that we study here will be still more broadly applicable. We present results obtained with our implementation of the rational-arithmetic algorithm.},
  author       = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
  booktitle    = {Proceedings of the twenty-seventh annual symposium on Computational geometry},
  location     = {Paris, France},
  pages        = {187 -- 196},
  publisher    = {ACM},
  title        = {{Deconstructing approximate offsets}},
  doi          = {10.1145/1998196.1998225},
  year         = {2011},
}

@inproceedings{3330,
  abstract     = {We consider the problem of approximating all real roots of a square-free polynomial f. Given isolating intervals, our algorithm refines each of them to a width at most 2-L, that is, each of the roots is approximated to L bits after the binary point. Our method provides a certified answer for arbitrary real polynomials, only requiring finite approximations of the polynomial coefficient and choosing a suitable working precision adaptively. In this way, we get a correct algorithm that is simple to implement and practically efficient. Our algorithm uses the quadratic interval refinement method; we adapt that method to be able to cope with inaccuracies when evaluating f, without sacrificing its quadratic convergence behavior. We prove a bound on the bit complexity of our algorithm in terms of degree, coefficient size and discriminant. Our bound improves previous work on integer polynomials by a factor of deg f and essentially matches best known theoretical bounds on root approximation which are obtained by very sophisticated algorithms.},
  author       = {Kerber, Michael and Sagraloff, Michael},
  location     = {California, USA},
  pages        = {209 -- 216},
  publisher    = {Springer},
  title        = {{Root refinement for real polynomials}},
  doi          = {10.1145/1993886.1993920},
  year         = {2011},
}

