@article{3771,
  abstract     = {The small-sized frugivorous bat Carollia perspicillata is an understory specialist and occurs in a wide range of lowland habitats, tending to be more common in tropical dry or moist forests of South and Central America. Its sister species, Carollia brevicauda, occurs almost exclusively in the Amazon rainforest. A recent phylogeographic study proposed a hypothesis of origin and subsequent diversification for C. perspicillata along the Atlantic coastal forest of Brazil. Additionally, it also found two allopatric clades for C. brevicauda separated by the Amazon Basin. We used cytochrome b gene sequences and a more extensive sampling to test hypotheses related to the origin and diversification of C. perspicillata plus C. brevicauda clade in South America. The results obtained indicate that there are two sympatric evolutionary lineages within each species. In C. perspicillata, one lineage is limited to the Southern Atlantic Forest, whereas the other is widely distributed. Coalescent analysis points to a simultaneous origin for C. perspicillata and C. brevicauda, although no place for the diversification of each species can be firmly suggested. The phylogeographic pattern shown by C. perspicillata is also congruent with the Pleistocene refugia hypothesis as a likely vicariant phenomenon shaping the present distribution of its intraspecific lineages.},
  author       = {Pavan, Ana and Martins, Felipe and Santos, Fabrício and Ditchfield, Albert and Fernandes Redondo, Rodrigo A},
  journal      = {Biological Journal of the Linnean Society},
  number       = {3},
  pages        = {527 -- 539},
  publisher    = {Wiley-Blackwell},
  title        = {{Patterns of diversification in two species of short-tailed bats (Carollia Gray, 1838): the effects of historical fragmentation of Brazilian rainforests.}},
  doi          = {10.1111/j.1095-8312.2010.01601.x},
  volume       = {102},
  year         = {2011},
}

@article{3778,
  author       = {Barton, Nicholas H},
  journal      = {Heredity},
  number       = {2},
  pages        = {205 -- 206},
  publisher    = {Nature Publishing Group},
  title        = {{Estimating linkage disequilibria}},
  doi          = {10.1038/hdy.2010.67},
  volume       = {106},
  year         = {2011},
}

@article{3781,
  abstract     = {We bound the difference in length of two curves in terms of their total curvatures and the Fréchet distance. The bound is independent of the dimension of the ambient Euclidean space, it improves upon a bound by Cohen-Steiner and Edelsbrunner, and it generalizes a result by Fáry and Chakerian.},
  author       = {Fasy, Brittany Terese},
  journal      = {Acta Sci. Math. (Szeged)},
  number       = {1-2},
  pages        = {359 -- 367},
  publisher    = {Szegedi Tudományegyetem},
  title        = {{The difference in length of curves in R^n}},
  volume       = {77},
  year         = {2011},
}

@article{3784,
  abstract     = {Advanced stages of Scyllarus phyllosoma larvae were collected by demersal trawling during fishery research surveys in the western Mediterranean Sea in 2003–2005. Nucleotide sequence analysis of the mitochondrial 16S rDNA gene allowed the final-stage phyllosoma of Scyllarus arctus to be identified among these larvae. Its morphology is described and illustrated. This constitutes the second complete description of a Scyllaridae phyllosoma with its specific identity being validated by molecular techniques (the first was S. pygmaeus). These results also solved a long lasting taxonomic anomaly of several species assigned to the ancient genus Phyllosoma Leach, 1814. Detailed examination indicated that the final-stage phyllosoma of S. arctus shows closer affinities with the American scyllarid Scyllarus depressus or with the Australian Scyllarus sp. b (sensu Phillips et al., 1981) than to its sympatric species S. pygmaeus.},
  author       = {Palero, Ferran and Guerao, Guillermo and Clark, Paul and Abello, Pere},
  journal      = {Journal of the Marine Biological Association of the United Kingdom},
  number       = {2},
  pages        = {485 -- 492},
  publisher    = {Cambridge University Press},
  title        = {{Scyllarus arctus (Crustacea: Decapoda: Scyllaridae) final stage phyllosoma identified by DNA analysis, with morphological description}},
  doi          = {10.1017/S0025315410000287},
  volume       = {91},
  year         = {2011},
}

@inbook{3791,
  abstract     = {During the development of multicellular organisms, cell fate specification is followed by the sorting of different cell types into distinct domains from where the different tissues and organs are formed. Cell sorting involves both the segregation of a mixed population of cells with different fates and properties into distinct domains, and the active maintenance of their segregated state. Because of its biological importance and apparent resemblance to fluid segregation in physics, cell sorting was extensively studied by both biologists and physicists over the last decades. Different theories were developed that try to explain cell sorting on the basis of the physical properties of the constituent cells. However, only recently the molecular and cellular mechanisms that control the physical properties driving cell sorting, have begun to be unraveled. In this review, we will provide an overview of different cell-sorting processes in development and discuss how these processes can be explained by the different sorting theories, and how these theories in turn can be connected to the molecular and cellular mechanisms driving these processes.},
  author       = {Krens, Gabriel and Heisenberg, Carl-Philipp J},
  booktitle    = {Forces and Tension in Development},
  editor       = {Labouesse, Michel},
  pages        = {189 -- 213},
  publisher    = {Elsevier},
  title        = {{Cell sorting in development}},
  doi          = {10.1016/B978-0-12-385065-2.00006-2},
  volume       = {95},
  year         = {2011},
}

@inbook{3796,
  abstract     = {We address the problem of covering ℝ n with congruent balls, while minimizing the number of balls that contain an average point. Considering the 1-parameter family of lattices defined by stretching or compressing the integer grid in diagonal direction, we give a closed formula for the covering density that depends on the distortion parameter. We observe that our family contains the thinnest lattice coverings in dimensions 2 to 5. We also consider the problem of packing congruent balls in ℝ n , for which we give a closed formula for the packing density as well. Again we observe that our family contains optimal configurations, this time densest packings in dimensions 2 and 3.},
  author       = {Edelsbrunner, Herbert and Kerber, Michael},
  booktitle    = {Rainbow of Computer Science},
  editor       = {Calude, Cristian and Rozenberg, Grzegorz and Salomaa, Arto},
  pages        = {20 -- 35},
  publisher    = {Springer},
  title        = {{Covering and packing with spheres by diagonal distortion in R^n}},
  doi          = {10.1007/978-3-642-19391-0_2},
  volume       = {6570},
  year         = {2011},
}

@article{3965,
  abstract     = {The elevation function on a smoothly embedded 2-manifold in R-3 reflects the multiscale topography of cavities and protrusions as local maxima. The function has been useful in identifying coarse docking configurations for protein pairs. Transporting the concept from the smooth to the piecewise linear category, this paper describes an algorithm for finding all local maxima. While its worst-case running time is the same as of the algorithm used in prior work, its performance in practice is orders of magnitudes superior. We cast light on this improvement by relating the running time to the total absolute Gaussian curvature of the 2-manifold.},
  author       = {Wang, Bei and Edelsbrunner, Herbert and Morozov, Dmitriy},
  journal      = {Journal of Experimental Algorithmics},
  number       = {2.2},
  pages        = {1 -- 13},
  publisher    = {ACM},
  title        = {{Computing elevation maxima by searching the Gauss sphere}},
  doi          = {10.1145/1963190.1970375},
  volume       = {16},
  year         = {2011},
}

@article{3303,
  abstract     = {Biological traits result in part from interactions between different genetic loci. This can lead to sign epistasis, in which a beneficial adaptation involves a combination of individually deleterious or neutral mutations; in this case, a population must cross a “fitness valley” to adapt. Recombination can assist this process by combining mutations from different individuals or retard it by breaking up the adaptive combination. Here, we analyze the simplest fitness valley, in which an adaptation requires one mutation at each of two loci to provide a fitness benefit. We present a theoretical analysis of the effect of recombination on the valley-crossing process across the full spectrum of possible parameter regimes. We find that low recombination rates can speed up valley crossing relative to the asexual case, while higher recombination rates slow down valley crossing, with the transition between the two regimes occurring when the recombination rate between the loci is approximately equal to the selective advantage provided by the adaptation. In large populations, if the recombination rate is high and selection against single mutants is substantial, the time to cross the valley grows exponentially with population size, effectively meaning that the population cannot acquire the adaptation. Recombination at the optimal (low) rate can reduce the valley-crossing time by up to several orders of magnitude relative to that in an asexual population. },
  author       = {Weissman, Daniel and Feldman, Marcus and Fisher, Daniel},
  journal      = {Genetics},
  number       = {4},
  pages        = {1389 -- 1410},
  publisher    = {Genetics Society of America},
  title        = {{The rate of fitness-valley crossing in sexual populations}},
  doi          = {10.1534/genetics.110.123240},
  volume       = {186},
  year         = {2010},
}

@inproceedings{10908,
  abstract     = {We present ABC, a software tool for automatically computing symbolic upper bounds on the number of iterations of nested program loops. The system combines static analysis of programs with symbolic summation techniques to derive loop invariant relations between program variables. Iteration bounds are obtained from the inferred invariants, by replacing variables with bounds on their greatest values. We have successfully applied ABC to a large number of examples. The derived symbolic bounds express non-trivial polynomial relations over loop variables. We also report on results to automatically infer symbolic expressions over harmonic numbers as upper bounds on loop iteration counts.},
  author       = {Blanc, Régis and Henzinger, Thomas A and Hottelier, Thibaud and Kovács, Laura},
  booktitle    = {Logic for Programming, Artificial Intelligence, and Reasoning},
  editor       = {Clarke, Edmund M and Voronkov, Andrei},
  isbn         = {9783642175107},
  issn         = {1611-3349},
  location     = {Dakar, Senegal},
  pages        = {103--118},
  publisher    = {Springer Nature},
  title        = {{ABC: Algebraic Bound Computation for loops}},
  doi          = {10.1007/978-3-642-17511-4_7},
  volume       = {6355},
  year         = {2010},
}

@inproceedings{10909,
  abstract     = {We address the problem of localizing homology classes, namely, finding the cycle representing a given class with the most concise geometric measure. We focus on the volume measure, that is, the 1-norm of a cycle. Two main results are presented. First, we prove the problem is NP-hard to approximate within any constant factor. Second, we prove that for homology of dimension two or higher, the problem is NP-hard to approximate even when the Betti number is O(1). A side effect is the inapproximability of the problem of computing the nonbounding cycle with the smallest volume, and computing cycles representing a homology basis with the minimal total volume. We also discuss other geometric measures (diameter and radius) and show their disadvantages in homology localization. Our work is restricted to homology over the ℤ2 field.},
  author       = {Chen, Chao and Freedman, Daniel},
  booktitle    = {Proceedings of the 2010 Annual ACM-SIAM Symposium on Discrete Algorithms},
  location     = {Austin, TX, United States},
  pages        = {1594--1604},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Hardness results for homology localization}},
  doi          = {10.1137/1.9781611973075.129},
  year         = {2010},
}

@article{2409,
  abstract     = {Background: The availability of many gene alignments with overlapping taxon sets raises the question of which strategy is the best to infer species phylogenies from multiple gene information. Methods and programs abound that use the gene alignment in different ways to reconstruct the species tree. In particular, different methods combine the original data at different points along the way from the underlying sequences to the final tree. Accordingly, they are classified into superalignment, supertree and medium-level approaches. Here, we present a simulation study to compare different methods from each of these three approaches.

Results: We observe that superalignment methods usually outperform the other approaches over a wide range of parameters including sparse data and gene-specific evolutionary parameters. In the presence of high incongruency among gene trees, however, other combination methods show better performance than the superalignment approach. Surprisingly, some supertree and medium-level methods exhibit, on average, worse results than a single gene phylogeny with complete taxon information.

Conclusions: For some methods, using the reconstructed gene tree as an estimation of the species tree is superior to the combination of incomplete information. Superalignment usually performs best since it is less susceptible to stochastic error. Supertree methods can outperform superalignment in the presence of gene-tree conflict.},
  author       = {Kupczok, Anne and Schmidt, Heiko and Von Haeseler, Arndt},
  journal      = {Algorithms for Molecular Biology},
  number       = {1},
  publisher    = {BioMed Central},
  title        = {{Accuracy of phylogeny reconstruction methods combining overlapping gene data sets }},
  doi          = {10.1186/1748-7188-5-37},
  volume       = {5},
  year         = {2010},
}

@article{474,
  abstract     = {Classical models of gene flow fail in three ways: they cannot explain large-scale patterns; they predict much more genetic diversity than is observed; and they assume that loosely linked genetic loci evolve independently. We propose a new model that deals with these problems. Extinction events kill some fraction of individuals in a region. These are replaced by offspring from a small number of parents, drawn from the preexisting population. This model of evolution forwards in time corresponds to a backwards model, in which ancestral lineages jump to a new location if they are hit by an event, and may coalesce with other lineages that are hit by the same event. We derive an expression for the identity in allelic state, and show that, over scales much larger than the largest event, this converges to the classical value derived by Wright and Malécot. However, rare events that cover large areas cause low genetic diversity, large-scale patterns, and correlations in ancestry between unlinked loci.},
  author       = {Barton, Nicholas H and Kelleher, Jerome and Etheridge, Alison},
  journal      = {Evolution},
  number       = {9},
  pages        = {2701 -- 2715},
  publisher    = {Wiley-Blackwell},
  title        = {{A new model for extinction and recolonization in two dimensions: Quantifying phylogeography}},
  doi          = {10.1111/j.1558-5646.2010.01019.x},
  volume       = {64},
  year         = {2010},
}

@inproceedings{488,
  abstract     = {Streaming string transducers [1] define (partial) functions from input strings to output strings. A streaming string transducer makes a single pass through the input string and uses a finite set of variables that range over strings from the output alphabet. At every step, the transducer processes an input symbol, and updates all the variables in parallel using assignments whose right-hand-sides are concatenations of output symbols and variables with the restriction that a variable can be used at most once in a right-hand-side expression. It has been shown that streaming string transducers operating on strings over infinite data domains are of interest in algorithmic verification of list-processing programs, as they lead to PSPACE decision procedures for checking pre/post conditions and for checking semantic equivalence, for a well-defined class of heap-manipulating programs. In order to understand the theoretical expressiveness of streaming transducers, we focus on streaming transducers processing strings over finite alphabets, given the existence of a robust and well-studied class of &quot;regular&quot; transductions for this case. Such regular transductions can be defined either by two-way deterministic finite-state transducers, or using a logical MSO-based characterization. Our main result is that the expressiveness of streaming string transducers coincides exactly with this class of regular transductions. },
  author       = {Alur, Rajeev and Cerny, Pavol},
  location     = {Chennai, India},
  pages        = {1 -- 12},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Expressiveness of streaming string transducers}},
  doi          = {10.4230/LIPIcs.FSTTCS.2010.1},
  volume       = {8},
  year         = {2010},
}

@inproceedings{489,
  abstract     = {Graph games of infinite length are a natural model for open reactive processes: one player represents the controller, trying to ensure a given specification, and the other represents a hostile environment. The evolution of the system depends on the decisions of both players, supplemented by chance. In this work, we focus on the notion of randomised strategy. More specifically, we show that three natural definitions may lead to very different results: in the most general cases, an almost-surely winning situation may become almost-surely losing if the player is only allowed to use a weaker notion of strategy. In more reasonable settings, translations exist, but they require infinite memory, even in simple cases. Finally, some traditional problems becomes undecidable for the strongest type of strategies.},
  author       = {Cristau, Julien and David, Claire and Horn, Florian},
  booktitle    = {Proceedings of GandALF 2010},
  location     = {Minori, Amalfi Coast, Italy},
  pages        = {30 -- 39},
  publisher    = {Open Publishing Association},
  title        = {{How do we remember the past in randomised strategies? }},
  doi          = {10.4204/EPTCS.25.7},
  volume       = {25},
  year         = {2010},
}

@article{533,
  abstract     = {Any programming error that can be revealed before compiling a program saves precious time for the programmer. While integrated development environments already do a good job by detecting, e.g., data-flow abnormalities, current static analysis tools suffer from false positives (&quot;noise&quot;) or require strong user interaction. We propose to avoid this deficiency by defining a new class of errors. A program fragment is doomed if its execution will inevitably fail, regardless of which state it is started in. We use a formal verification method to identify such errors fully automatically and, most significantly, without producing noise. We report on experiments with a prototype tool.},
  author       = {Hoenicke, Jochen and Leino, Kari and Podelski, Andreas and Schäf, Martin and Wies, Thomas},
  journal      = {Formal Methods in System Design},
  number       = {2-3},
  pages        = {171 -- 199},
  publisher    = {Springer},
  title        = {{Doomed program points}},
  doi          = {10.1007/s10703-010-0102-0},
  volume       = {37},
  year         = {2010},
}

@misc{5388,
  abstract     = {We present an algorithmic method for the synthesis of concurrent programs that are optimal with respect to quantitative performance measures. The input consists of a sequential sketch, that is, a program that does not contain synchronization constructs, and of a parametric performance model that assigns costs to actions such as locking, context switching, and idling. The quantitative synthesis problem is to automatically introduce synchronization constructs into the sequential sketch so that both correctness is guaranteed and worst-case (or average-case) performance is optimized. Correctness is formalized as race freedom or linearizability.

We show that for worst-case performance, the problem can be modeled
as a 2-player graph game with quantitative (limit-average) objectives, and
for average-case performance, as a 2 1/2 -player graph game (with probabilistic transitions). In both cases, the optimal correct program is derived from an optimal strategy in the corresponding quantitative game. We prove that the respective game problems are computationally expensive (NP-complete), and present several techniques that overcome the theoretical difficulty in cases of concurrent programs of practical interest.

We have implemented a prototype tool and used it for the automatic syn- thesis of programs that access a concurrent list. For certain parameter val- ues, our method automatically synthesizes various classical synchronization schemes for implementing a concurrent list, such as fine-grained locking or a lazy algorithm. For other parameter values, a new, hybrid synchronization style is synthesized, which uses both the lazy approach and coarse-grained locks (instead of standard fine-grained locks). The trade-off occurs because while fine-grained locking tends to decrease the cost that is due to waiting for locks, it increases cache size requirements.},
  author       = {Chatterjee, Krishnendu and Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun and Singh, Rohit},
  issn         = {2664-1690},
  pages        = {17},
  publisher    = {IST Austria},
  title        = {{Quantitative synthesis for concurrent programs}},
  doi          = {10.15479/AT:IST-2010-0004},
  year         = {2010},
}

@misc{5389,
  abstract     = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of “fit” or “desirability.” We extend the simulation preorder to the quantitative setting, by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the im- plementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
  author       = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
  issn         = {2664-1690},
  pages        = {24},
  publisher    = {IST Austria},
  title        = {{Simulation distances}},
  doi          = {10.15479/AT:IST-2010-0003},
  year         = {2010},
}

@misc{5390,
  abstract     = {The class of ω regular languages provide a robust specification language in verification. Every ω-regular condition can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens “eventually.” Two main strengths of the classical, infinite-limit formulation of liveness are robustness (independence from the granularity of transitions) and simplicity (abstraction of complicated time bounds). However, the classical liveness formulation suffers from the drawback that the time until something good happens may be unbounded. A stronger formulation of liveness, so-called finitary liveness, overcomes this drawback, while still retaining robustness and simplicity. Finitary liveness requires that there exists an unknown, fixed bound b such that something good happens within b transitions. In this work we consider the finitary parity and Streett (fairness) conditions. We present the topological, automata-theoretic and logical characterization of finitary languages defined by finitary parity and Streett conditions. We (a) show that the finitary parity and Streett languages are Σ2-complete; (b) present a complete characterization of the expressive power of various classes of automata with finitary and infinitary conditions (in particular we show that non-deterministic finitary parity and Streett automata cannot be determinized to deterministic finitary parity or Streett automata); and (c) show that the languages defined by non-deterministic finitary parity automata exactly characterize the star-free fragment of ωB-regular languages.},
  author       = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
  issn         = {2664-1690},
  pages        = {21},
  publisher    = {IST Austria},
  title        = {{Topological, automata-theoretic and logical characterization of finitary languages}},
  doi          = {10.15479/AT:IST-2010-0002},
  year         = {2010},
}

@misc{5391,
  abstract     = {Concurrent data structures with fine-grained synchronization are notoriously difficult to implement correctly. The difficulty of reasoning about these implementations does not stem from the number of variables or the program size, but rather from the large number of possible interleavings. These implementations are therefore prime candidates for model checking. We introduce an algorithm for verifying linearizability of singly-linked heap-based concurrent data structures. We consider a model consisting of an unbounded heap where each node consists an element from an unbounded data domain, with a restricted set of operations for testing and updating pointers and data elements. Our main result is that linearizability is decidable for programs that invoke a fixed number of methods, possibly in parallel. This decidable fragment covers many of the common implementation techniques — fine-grained locking, lazy synchronization, and lock-free synchronization. We also show how the technique can be used to verify optimistic implementations with the help of programmer annotations. We developed a verification tool CoLT and evaluated it on a representative sample of Java implementations of the concurrent set data structure. The tool verified linearizability of a number of implementations, found a known error in a lock-free imple- mentation and proved that the corrected version is linearizable.},
  author       = {Cerny, Pavol and Radhakrishna, Arjun and Zufferey, Damien and Chaudhuri, Swarat and Alur, Rajeev},
  issn         = {2664-1690},
  pages        = {27},
  publisher    = {IST Austria},
  title        = {{Model checking of linearizability of concurrent list implementations}},
  doi          = {10.15479/AT:IST-2010-0001},
  year         = {2010},
}

@inbook{14983,
  abstract     = {This chapter tackles a difficult challenge: presenting signal processing material to non-experts. This chapter is meant to be comprehensible to people who have some math background, including a course in linear algebra and basic statistics, but do not specialize in mathematics, engineering, or related fields. Some formulas assume the reader is familiar with matrices and basic matrix operations, but not more advanced material. Furthermore, we tried to make the chapter readable even if you skip the formulas. Nevertheless, we include some simple methods to demonstrate the basics of adaptive data processing, then we proceed with some advanced methods that are fundamental in adaptive signal processing, and are likely to be useful in a variety of applications. The advanced algorithms are also online available [30]. In the second part, these techniques are applied to some real-world BCI data.},
  author       = {Schlögl, Alois and Vidaurre, Carmen and Müller, Klaus-Robert},
  booktitle    = {Brain-Computer Interfaces},
  editor       = {Graimann, Bernhard and Pfurtscheller, Gert and Allison, Brendan},
  isbn         = {9783642020902},
  issn         = {1612-3018},
  pages        = {331--355},
  publisher    = {Springer},
  title        = {{Adaptive Methods in BCI Research - An Introductory Tutorial}},
  doi          = {10.1007/978-3-642-02091-9_18},
  year         = {2010},
}

