@inproceedings{488,
  abstract     = {Streaming string transducers [1] define (partial) functions from input strings to output strings. A streaming string transducer makes a single pass through the input string and uses a finite set of variables that range over strings from the output alphabet. At every step, the transducer processes an input symbol, and updates all the variables in parallel using assignments whose right-hand-sides are concatenations of output symbols and variables with the restriction that a variable can be used at most once in a right-hand-side expression. It has been shown that streaming string transducers operating on strings over infinite data domains are of interest in algorithmic verification of list-processing programs, as they lead to PSPACE decision procedures for checking pre/post conditions and for checking semantic equivalence, for a well-defined class of heap-manipulating programs. In order to understand the theoretical expressiveness of streaming transducers, we focus on streaming transducers processing strings over finite alphabets, given the existence of a robust and well-studied class of &quot;regular&quot; transductions for this case. Such regular transductions can be defined either by two-way deterministic finite-state transducers, or using a logical MSO-based characterization. Our main result is that the expressiveness of streaming string transducers coincides exactly with this class of regular transductions. },
  author       = {Alur, Rajeev and Cerny, Pavol},
  location     = {Chennai, India},
  pages        = {1 -- 12},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Expressiveness of streaming string transducers}},
  doi          = {10.4230/LIPIcs.FSTTCS.2010.1},
  volume       = {8},
  year         = {2010},
}

@inproceedings{489,
  abstract     = {Graph games of infinite length are a natural model for open reactive processes: one player represents the controller, trying to ensure a given specification, and the other represents a hostile environment. The evolution of the system depends on the decisions of both players, supplemented by chance. In this work, we focus on the notion of randomised strategy. More specifically, we show that three natural definitions may lead to very different results: in the most general cases, an almost-surely winning situation may become almost-surely losing if the player is only allowed to use a weaker notion of strategy. In more reasonable settings, translations exist, but they require infinite memory, even in simple cases. Finally, some traditional problems becomes undecidable for the strongest type of strategies.},
  author       = {Cristau, Julien and David, Claire and Horn, Florian},
  booktitle    = {Proceedings of GandALF 2010},
  location     = {Minori, Amalfi Coast, Italy},
  pages        = {30 -- 39},
  publisher    = {Open Publishing Association},
  title        = {{How do we remember the past in randomised strategies? }},
  doi          = {10.4204/EPTCS.25.7},
  volume       = {25},
  year         = {2010},
}

@article{533,
  abstract     = {Any programming error that can be revealed before compiling a program saves precious time for the programmer. While integrated development environments already do a good job by detecting, e.g., data-flow abnormalities, current static analysis tools suffer from false positives (&quot;noise&quot;) or require strong user interaction. We propose to avoid this deficiency by defining a new class of errors. A program fragment is doomed if its execution will inevitably fail, regardless of which state it is started in. We use a formal verification method to identify such errors fully automatically and, most significantly, without producing noise. We report on experiments with a prototype tool.},
  author       = {Hoenicke, Jochen and Leino, Kari and Podelski, Andreas and Schäf, Martin and Wies, Thomas},
  journal      = {Formal Methods in System Design},
  number       = {2-3},
  pages        = {171 -- 199},
  publisher    = {Springer},
  title        = {{Doomed program points}},
  doi          = {10.1007/s10703-010-0102-0},
  volume       = {37},
  year         = {2010},
}

@misc{5388,
  abstract     = {We present an algorithmic method for the synthesis of concurrent programs that are optimal with respect to quantitative performance measures. The input consists of a sequential sketch, that is, a program that does not contain synchronization constructs, and of a parametric performance model that assigns costs to actions such as locking, context switching, and idling. The quantitative synthesis problem is to automatically introduce synchronization constructs into the sequential sketch so that both correctness is guaranteed and worst-case (or average-case) performance is optimized. Correctness is formalized as race freedom or linearizability.

We show that for worst-case performance, the problem can be modeled
as a 2-player graph game with quantitative (limit-average) objectives, and
for average-case performance, as a 2 1/2 -player graph game (with probabilistic transitions). In both cases, the optimal correct program is derived from an optimal strategy in the corresponding quantitative game. We prove that the respective game problems are computationally expensive (NP-complete), and present several techniques that overcome the theoretical difficulty in cases of concurrent programs of practical interest.

We have implemented a prototype tool and used it for the automatic syn- thesis of programs that access a concurrent list. For certain parameter val- ues, our method automatically synthesizes various classical synchronization schemes for implementing a concurrent list, such as fine-grained locking or a lazy algorithm. For other parameter values, a new, hybrid synchronization style is synthesized, which uses both the lazy approach and coarse-grained locks (instead of standard fine-grained locks). The trade-off occurs because while fine-grained locking tends to decrease the cost that is due to waiting for locks, it increases cache size requirements.},
  author       = {Chatterjee, Krishnendu and Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun and Singh, Rohit},
  issn         = {2664-1690},
  pages        = {17},
  publisher    = {IST Austria},
  title        = {{Quantitative synthesis for concurrent programs}},
  doi          = {10.15479/AT:IST-2010-0004},
  year         = {2010},
}

@misc{5389,
  abstract     = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of “fit” or “desirability.” We extend the simulation preorder to the quantitative setting, by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the im- plementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
  author       = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
  issn         = {2664-1690},
  pages        = {24},
  publisher    = {IST Austria},
  title        = {{Simulation distances}},
  doi          = {10.15479/AT:IST-2010-0003},
  year         = {2010},
}

@misc{5390,
  abstract     = {The class of ω regular languages provide a robust specification language in verification. Every ω-regular condition can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens “eventually.” Two main strengths of the classical, infinite-limit formulation of liveness are robustness (independence from the granularity of transitions) and simplicity (abstraction of complicated time bounds). However, the classical liveness formulation suffers from the drawback that the time until something good happens may be unbounded. A stronger formulation of liveness, so-called finitary liveness, overcomes this drawback, while still retaining robustness and simplicity. Finitary liveness requires that there exists an unknown, fixed bound b such that something good happens within b transitions. In this work we consider the finitary parity and Streett (fairness) conditions. We present the topological, automata-theoretic and logical characterization of finitary languages defined by finitary parity and Streett conditions. We (a) show that the finitary parity and Streett languages are Σ2-complete; (b) present a complete characterization of the expressive power of various classes of automata with finitary and infinitary conditions (in particular we show that non-deterministic finitary parity and Streett automata cannot be determinized to deterministic finitary parity or Streett automata); and (c) show that the languages defined by non-deterministic finitary parity automata exactly characterize the star-free fragment of ωB-regular languages.},
  author       = {Chatterjee, Krishnendu and Fijalkow, Nathanaël},
  issn         = {2664-1690},
  pages        = {21},
  publisher    = {IST Austria},
  title        = {{Topological, automata-theoretic and logical characterization of finitary languages}},
  doi          = {10.15479/AT:IST-2010-0002},
  year         = {2010},
}

@misc{5391,
  abstract     = {Concurrent data structures with fine-grained synchronization are notoriously difficult to implement correctly. The difficulty of reasoning about these implementations does not stem from the number of variables or the program size, but rather from the large number of possible interleavings. These implementations are therefore prime candidates for model checking. We introduce an algorithm for verifying linearizability of singly-linked heap-based concurrent data structures. We consider a model consisting of an unbounded heap where each node consists an element from an unbounded data domain, with a restricted set of operations for testing and updating pointers and data elements. Our main result is that linearizability is decidable for programs that invoke a fixed number of methods, possibly in parallel. This decidable fragment covers many of the common implementation techniques — fine-grained locking, lazy synchronization, and lock-free synchronization. We also show how the technique can be used to verify optimistic implementations with the help of programmer annotations. We developed a verification tool CoLT and evaluated it on a representative sample of Java implementations of the concurrent set data structure. The tool verified linearizability of a number of implementations, found a known error in a lock-free imple- mentation and proved that the corrected version is linearizable.},
  author       = {Cerny, Pavol and Radhakrishna, Arjun and Zufferey, Damien and Chaudhuri, Swarat and Alur, Rajeev},
  issn         = {2664-1690},
  pages        = {27},
  publisher    = {IST Austria},
  title        = {{Model checking of linearizability of concurrent list implementations}},
  doi          = {10.15479/AT:IST-2010-0001},
  year         = {2010},
}

@inbook{14983,
  abstract     = {This chapter tackles a difficult challenge: presenting signal processing material to non-experts. This chapter is meant to be comprehensible to people who have some math background, including a course in linear algebra and basic statistics, but do not specialize in mathematics, engineering, or related fields. Some formulas assume the reader is familiar with matrices and basic matrix operations, but not more advanced material. Furthermore, we tried to make the chapter readable even if you skip the formulas. Nevertheless, we include some simple methods to demonstrate the basics of adaptive data processing, then we proceed with some advanced methods that are fundamental in adaptive signal processing, and are likely to be useful in a variety of applications. The advanced algorithms are also online available [30]. In the second part, these techniques are applied to some real-world BCI data.},
  author       = {Schlögl, Alois and Vidaurre, Carmen and Müller, Klaus-Robert},
  booktitle    = {Brain-Computer Interfaces},
  editor       = {Graimann, Bernhard and Pfurtscheller, Gert and Allison, Brendan},
  isbn         = {9783642020902},
  issn         = {1612-3018},
  pages        = {331--355},
  publisher    = {Springer},
  title        = {{Adaptive Methods in BCI Research - An Introductory Tutorial}},
  doi          = {10.1007/978-3-642-02091-9_18},
  year         = {2010},
}

@misc{9764,
  author       = {Rosas, Ulises and Barton, Nicholas H and Copsey, Lucy and Barbier De Reuille, Pierre and Coen, Enrico},
  publisher    = {Public Library of Science},
  title        = {{Heterosis and the drift load}},
  doi          = {10.1371/journal.pbio.1000429.s003},
  year         = {2010},
}

@article{3498,
  abstract     = {Purpose
Calcifying tendinitis is a common condition of the shoulder. In many cases, arthroscopic reduction in the deposit is indicated. The localization of the deposit is sometimes challenging and time-consuming. Pre-operative ultrasound (US)-guided needle placement in the deposit and pre-operative US marking of the deposit at the skin with a ballpoint are described and recommended methods to alleviate the procedure without using ionizing radiation by fluoroscopy.
Methods
Intra-operative sonography of the shoulder is introduced as a new method to localize the calcific deposit with high accuracy. After standard arthroscopic buresectomy, the surgeon performs an ultrasound examination under sterile conditions to localize the deposits. A ventral longitudinal US section is recommended, and the upper arm is rotated until the deposit is visible. Subsequently, perpendicular to the skin at the position of the transducer, a needle is introduced under arthroscopic and ultrasound visualization to puncture the deposit.
Results
The presence of snow-white crystals at the tip of the needle proves the exact localization. Consecutively, the curettage can be accomplished. Another intra-operative sonography evaluates possible calcific remnants and the tendon structure.
Conclusion
This new technique may alleviate arthroscopic calcific deposit curettage by visualizing the deposit without using ionizing radiation. Additionally, soft tissue damage due to decreased number of punctures to detect the deposit may be achieved. Both factors may contribute to reduced operation time.},
  author       = {Sabeti Aschraf, M. and Gonano, C. and Nemecek, E. and Cichocki, Lisa and Schueller Weidekamm, C.},
  journal      = {Knee Surgery, Sports Traumatology, Arthroscopy},
  number       = {12},
  pages        = {1792 -- 1794},
  publisher    = {Springer},
  title        = {{Intra-operative ultrasound facilitates the localization of the calcific deposit during arthroscopic treatment of calcifying tendinitis}},
  doi          = {10.1007/s00167-010-1227-9},
  volume       = {18},
  year         = {2010},
}

@article{3604,
  abstract     = {We investigated temporal changes in hybridization and introgression between native red deer (Cervus elaphus) and invasive Japanese sika (Cervus nippon) on the Kintyre Peninsula, Scotland, over 15 years, through analysis of 1513 samples of deer at 20 microsatellite loci and a mtDNA marker. We found no evidence that either the proportion of recent hybrids, or the levels of introgression had changed over the study period. Nevertheless, in one population where the two species have been in contact since ∼1970, 44% of individuals sampled during the study were hybrids. This suggests that hybridization between these species can proceed fairly rapidly. By analysing the number of alleles that have introgressed from polymorphic red deer into the genetically homogenous sika population, we reconstructed the haplotypes of red deer alleles introduced by backcrossing. Five separate hybridization events could account for all the recently hybridized sika-like individuals found across a large section of the Peninsula. Although we demonstrate that low rates of F1 hybridization can lead to substantial introgression, the progress of hybridization and introgression appears to be unpredictable over the short timescales.},
  author       = {Senn, Helen and Goodman, Simon and Swanson, Graeme and Barton, Nicholas H and Pemberton, Josephine},
  journal      = {Molecular Ecology},
  number       = {5},
  pages        = {910 -- 924},
  publisher    = {Wiley-Blackwell},
  title        = {{Investigating temporal changes in hybridisation and introgression between invasive sika (Cervus nippon) and native red deer (Cervus elaphus) on the Kintyre Peninsula, Scotland}},
  doi          = {10.1111/j.1365-294X.2009.04497.x},
  volume       = {19},
  year         = {2010},
}

@article{3718,
  abstract     = {Long-term depression (LTD) is a form of synaptic plasticity that may contribute to information storage in the central nervous system. Here we report that LTD can be elicited in layer 5 pyramidal neurons of the rat prefrontal cortex by pairing low frequency stimulation with a modest postsynaptic depolarization. The induction of LTD required the activation of both metabotropic glutamate receptors of the mGlu1 subtype and voltage-sensitive Ca(2+) channels (VSCCs) of the T/R, P/Q and N types, leading to the stimulation of intracellular inositol trisphosphate (IP3) receptors by IP3 and Ca(2+). The subsequent release of Ca(2+) from intracellular stores activated the protein phosphatase cascade involving calcineurin and protein phosphatase 1. The activation of purinergic P2Y(1) receptors blocked LTD. This effect was prevented by P2Y(1) receptor antagonists and was absent in mice lacking P2Y(1) but not P2Y(2) receptors. We also found that activation of P2Y(1) receptors inhibits Ca(2+) transients via VSCCs in the apical dendrites and spines of pyramidal neurons. In addition, we show that the release of ATP under hypoxia is able to inhibit LTD by acting on postsynaptic P2Y(1) receptors. In conclusion, these data suggest that the reduction of Ca(2+) influx via VSCCs caused by the activation of P2Y(1) receptors by ATP is the possible mechanism for the inhibition of LTD in prefrontal cortex.},
  author       = {Guzmán, José and Schmidt, Hartmut and Franke, Heike and Krügel, Ute and Eilers, Jens and Illes, Peter and Gerevich, Zoltan},
  journal      = {Neuropharmacology},
  number       = {6},
  pages        = {406 -- 415},
  publisher    = {Elsevier},
  title        = {{P2Y1 receptors inhibit long-term depression in the prefrontal cortex.}},
  doi          = {10.1016/j.neuropharm.2010.05.013},
  volume       = {59},
  year         = {2010},
}

@inproceedings{3719,
  abstract     = {The induction of a signaling pathway is characterized by transient complex formation and mutual posttranslational modification of proteins. To faithfully capture this combinatorial process in a math- ematical model is an important challenge in systems biology. Exploiting the limited context on which most binding and modification events are conditioned, attempts have been made to reduce the com- binatorial complexity by quotienting the reachable set of molecular species, into species aggregates while preserving the deterministic semantics of the thermodynamic limit. Recently we proposed a quotienting that also preserves the stochastic semantics and that is complete in the sense that the semantics of individual species can be recovered from the aggregate semantics. In this paper we prove that this quotienting yields a sufficient condition for weak lumpability and that it gives rise to a backward Markov bisimulation between the original and aggregated transition system. We illustrate the framework on a case study of the EGF/insulin receptor crosstalk.},
  author       = {Feret, Jérôme and Henzinger, Thomas A and Koeppl, Heinz and Petrov, Tatjana},
  location     = {Jena, Germany},
  pages        = {142--161},
  publisher    = {Open Publishing Association},
  title        = {{Lumpability abstractions of rule-based systems}},
  volume       = {40},
  year         = {2010},
}

@article{3772,
  author       = {Barton, Nicholas H},
  journal      = {PLoS Genetics},
  number       = {6},
  publisher    = {Public Library of Science},
  title        = {{Understanding adaptation in large populations}},
  doi          = {10.1371/journal.pgen.1000987},
  volume       = {6},
  year         = {2010},
}

@article{3773,
  abstract     = {If distinct biological species are to coexist in sympatry, they must be reproductively isolated and must exploit different limiting resources. A two-niche Levene model is analysed, in which habitat preference and survival depend on underlying additive traits. The population genetics of preference and viability are equivalent. However, there is a linear trade-off between the chances of settling in either niche, whereas viabilities may be constrained arbitrarily. With a convex trade-off, a sexual population evolves a single generalist genotype, whereas with a concave trade-off, disruptive selection favours maximal variance. A pure habitat preference evolves to global linkage equilibrium if mating occurs in a single pool, but remarkably, evolves to pairwise linkage equilibrium within niches if mating is within those niches--independent of the genetics. With a concave trade-off, the population shifts sharply between a unimodal distribution with high gene flow and a bimodal distribution with strong isolation, as the underlying genetic variance increases. However, these alternative states are only simultaneously stable for a narrow parameter range. A sharp threshold is only seen if survival in the 'wrong' niche is low; otherwise, strong isolation is impossible. Gene flow from divergent demes makes speciation much easier in parapatry than in sympatry.},
  author       = {Barton, Nicholas H},
  journal      = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
  number       = {1547},
  pages        = {1825 -- 1840},
  publisher    = {Royal Society},
  title        = {{What role does natural selection play in speciation?}},
  doi          = {10.1098/rstb.2010.0001},
  volume       = {365},
  year         = {2010},
}

@article{3774,
  abstract     = {1. Hybridisation with an invasive species has the potential to alter the phenotype and hence the ecology of a native counterpart. 2. Here data from populations of native red deer Cervus elaphus and invasive sika deer Cervus nippon in Scotland is used to assess the extent to which hybridisation between them is causing phenotypic change. This is done by regression of phenotypic traits against genetic hybrid scores. 3. Hybridisation is causing increases in the body weight of sika-like deer and decreases in the body weight of red-like females. Hybridisation is causing increases in jaw length and increases in incisor arcade breadth in sika-like females. Hybridisation is also causing decreases in incisor arcade breadth in red-like females. 4. There is currently no evidence that hybridisation is causing changes in the kidney fat weight or pregnancy rates of either population. 5. Increased phenotypic similarity between the two species is likely to lead to further hybridisation. The ecological consequences of this are difficult to predict.},
  author       = {Senn, Helen and Swanson, Graeme and Goodman, Simon and Barton, Nicholas H and Pemberton, Josephine},
  journal      = {Journal of Animal Ecology},
  number       = {2},
  pages        = {414 -- 425},
  publisher    = {Wiley-Blackwell},
  title        = {{Phenotypic correlates of hybridisation between red and sika deer (genus Cervus)}},
  doi          = {10.1111/j.1365-2656.2009.01633.x},
  volume       = {79},
  year         = {2010},
}

@article{3776,
  abstract     = {The prevalence of recombination in eukaryotes poses one of the most puzzling questions in biology. The most compelling general explanation is that recombination facilitates selection by breaking down the negative associations generated by random drift (i.e. Hill-Robertson interference, HRI). I classify the effects of HRI owing to: deleterious mutation, balancing selection and selective sweeps on: neutral diversity, rates of adaptation and the mutation load. These effects are mediated primarily by the density of deleterious mutations and of selective sweeps. Sequence polymorphism and divergence suggest that these rates may be high enough to cause significant interference even in genomic regions of high recombination. However, neither seems able to generate enough variance in fitness to select strongly for high rates of recombination. It is plausible that spatial and temporal fluctuations in selection generate much more fitness variance, and hence selection for recombination, than can be explained by uniformly deleterious mutations or species-wide selective sweeps.},
  author       = {Barton, Nicholas H},
  journal      = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
  number       = {1552},
  pages        = {2559 -- 2569},
  publisher    = {Royal Society},
  title        = {{Genetic linkage and natural selection}},
  doi          = {10.1098/rstb.2010.0106},
  volume       = {365},
  year         = {2010},
}

@article{3777,
  abstract     = {Under the classical view, selection depends more or less directly on mutation: standing genetic variance is maintained by a balance between selection and mutation, and adaptation is fuelled by new favourable mutations. Recombination is favoured if it breaks negative associations among selected alleles, which interfere with adaptation. Such associations may be generated by negative epistasis, or by random drift (leading to the Hill-Robertson effect). Both deterministic and stochastic explanations depend primarily on the genomic mutation rate, U. This may be large enough to explain high recombination rates in some organisms, but seems unlikely to be so in general. Random drift is a more general source of negative linkage disequilibria, and can cause selection for recombination even in large populations, through the chance loss of new favourable mutations. The rate of species-wide substitutions is much too low to drive this mechanism, but local fluctuations in selection, combined with gene flow, may suffice. These arguments are illustrated by comparing the interaction between good and bad mutations at unlinked loci under the infinitesimal model.},
  author       = {Barton, Nicholas H},
  journal      = {Philosophical Transactions of the Royal Society of London. Series B, Biological Sciences},
  number       = {1544},
  pages        = {1281 -- 1294},
  publisher    = {Royal Society},
  title        = {{Mutation and the evolution of recombination}},
  doi          = {10.1098/rstb.2009.0320},
  volume       = {365},
  year         = {2010},
}

@article{3779,
  abstract     = {Crosses between closely related species give two contrasting results. One result is that species hybrids may be inferior to their parents, for example, being less fertile [1]. The other is that F1 hybrids may display superior performance (heterosis), for example with increased vigour [2]. Although various hypotheses have been proposed to account for these two aspects of hybridisation, their biological basis is still poorly understood [3]. To gain further insights into this issue, we analysed the role that variation in gene expression may play. We took a conserved trait, flower asymmetry in Antirrhinum, and determined the extent to which the underlying regulatory genes varied in expression among closely related species. We show that expression of both genes analysed, CYC and RAD, varies significantly between species because of cis-acting differences. By making a quantitative genotype-phenotype map, using a range of mutant alleles, we demonstrate that the species lie on a plateau in gene expression-morphology space, so that the variation has no detectable phenotypic effect. However, phenotypic differences can be revealed by shifting genotypes off the plateau through genetic crosses. Our results can be readily explained if genomes are free to evolve within an effectively neutral zone in gene expression space. The consequences of this drift will be negligible for individual loci, but when multiple loci across the genome are considered, we show that the variation may have significant effects on phenotype and fitness, causing a significant drift load. By considering these consequences for various gene-expression-fitness landscapes, we conclude that F1 hybrids might be expected to show increased performance with regard to conserved traits, such as basic physiology, but reduced performance with regard to others. Thus, our study provides a new way of explaining how various aspects of hybrid performance may arise through natural variation in gene activity.},
  author       = {Rosas, Ulises and Barton, Nicholas H and Copsey, Lucy and Barbier De Reuille, Pierre and Coen, Enrico},
  journal      = {PLoS Biology},
  number       = {7},
  publisher    = {Public Library of Science},
  title        = {{Cryptic variation between species and the basis of hybrid performance}},
  doi          = {10.1371/journal.pbio.1000429},
  volume       = {8},
  year         = {2010},
}

@inproceedings{3782,
  abstract     = {In cortex surface segmentation, the extracted surface is required to have a particular topology, namely, a two-sphere. We present a new method for removing topology noise of a curve or surface within the level set framework, and thus produce a cortical surface with correct topology. We define a new energy term which quantifies topology noise. We then show how to minimize this term by computing its functional derivative with respect to the level set function. This method differs from existing methods in that it is inherently continuous and not digital; and in the way that our energy directly relates to the topology of the underlying curve or surface, versus existing knot-based measures which are related in a more indirect fashion. The proposed flow is validated empirically.},
  author       = {Chen, Chao and Freedman, Daniel},
  booktitle    = { Conference proceedings MCV 2010},
  location     = {Beijing, China},
  pages        = {31 -- 42},
  publisher    = {Springer},
  title        = {{Topology noise removal for curve  and surface evolution}},
  doi          = {10.1007/978-3-642-18421-5_4},
  volume       = {6533},
  year         = {2010},
}

