@inproceedings{3280,
  abstract     = {The (decisional) learning with errors problem (LWE) asks to distinguish &quot;noisy&quot; inner products of a secret vector with random vectors from uniform. The learning parities with noise problem (LPN) is the special case where the elements of the vectors are bits. In recent years, the LWE and LPN problems have found many applications in cryptography. In this paper we introduce a (seemingly) much stronger adaptive assumption, called &quot;subspace LWE&quot; (SLWE), where the adversary can learn the inner product of the secret and random vectors after they were projected into an adaptively and adversarially chosen subspace. We prove that, surprisingly, the SLWE problem mapping into subspaces of dimension d is almost as hard as LWE using secrets of length d (the other direction is trivial.) This result immediately implies that several existing cryptosystems whose security is based on the hardness of the LWE/LPN problems are provably secure in a much stronger sense than anticipated. As an illustrative example we show that the standard way of using LPN for symmetric CPA secure encryption is even secure against a very powerful class of related key attacks. },
  author       = {Pietrzak, Krzysztof Z},
  location     = {Taormina, Sicily, Italy},
  pages        = {548 -- 563},
  publisher    = {Springer},
  title        = {{Subspace LWE}},
  doi          = {10.1007/978-3-642-28914-9_31},
  volume       = {7194},
  year         = {2012},
}

@inproceedings{3281,
  abstract     = {We consider the problem of amplifying the &quot;lossiness&quot; of functions. We say that an oracle circuit C*: {0,1} m → {0,1}* amplifies relative lossiness from ℓ/n to L/m if for every function f:{0,1} n → {0,1} n it holds that 1 If f is injective then so is C f. 2 If f has image size of at most 2 n-ℓ, then C f has image size at most 2 m-L. The question is whether such C* exists for L/m ≫ ℓ/n. This problem arises naturally in the context of cryptographic &quot;lossy functions,&quot; where the relative lossiness is the key parameter. We show that for every circuit C* that makes at most t queries to f, the relative lossiness of C f is at most L/m ≤ ℓ/n + O(log t)/n. In particular, no black-box method making a polynomial t = poly(n) number of queries can amplify relative lossiness by more than an O(logn)/n additive term. We show that this is tight by giving a simple construction (cascading with some randomization) that achieves such amplification.},
  author       = {Pietrzak, Krzysztof Z and Rosen, Alon and Segev, Gil},
  location     = {Taormina, Sicily, Italy},
  pages        = {458 -- 475},
  publisher    = {Springer},
  title        = {{Lossy functions do not amplify well}},
  doi          = {10.1007/978-3-642-28914-9_26},
  volume       = {7194},
  year         = {2012},
}

@inproceedings{3282,
  abstract     = {Traditionally, symmetric-key message authentication codes (MACs) are easily built from pseudorandom functions (PRFs). In this work we propose a wide variety of other approaches to building efficient MACs, without going through a PRF first. In particular, unlike deterministic PRF-based MACs, where each message has a unique valid tag, we give a number of probabilistic MAC constructions from various other primitives/assumptions. Our main results are summarized as follows: We show several new probabilistic MAC constructions from a variety of general assumptions, including CCA-secure encryption, Hash Proof Systems and key-homomorphic weak PRFs. By instantiating these frameworks under concrete number theoretic assumptions, we get several schemes which are more efficient than just using a state-of-the-art PRF instantiation under the corresponding assumption. For probabilistic MACs, unlike deterministic ones, unforgeability against a chosen message attack (uf-cma ) alone does not imply security if the adversary can additionally make verification queries (uf-cmva ). We give an efficient generic transformation from any uf-cma secure MAC which is &quot;message-hiding&quot; into a uf-cmva secure MAC. This resolves the main open problem of Kiltz et al. from Eurocrypt'11; By using our transformation on their constructions, we get the first efficient MACs from the LPN assumption. While all our new MAC constructions immediately give efficient actively secure, two-round symmetric-key identification schemes, we also show a very simple, three-round actively secure identification protocol from any weak PRF. In particular, the resulting protocol is much more efficient than the trivial approach of building a regular PRF from a weak PRF. © 2012 International Association for Cryptologic Research.},
  author       = {Dodis, Yevgeniy and Pietrzak, Krzysztof Z and Kiltz, Eike and Wichs, Daniel},
  location     = {Cambridge, UK},
  pages        = {355 -- 374},
  publisher    = {Springer},
  title        = {{Message authentication, revisited}},
  doi          = {10.1007/978-3-642-29011-4_22},
  volume       = {7237},
  year         = {2012},
}

@article{3289,
  abstract     = {Viral manipulation of transduction pathways associated with key cellular functions such as survival, response to microbial infection, and cytoskeleton reorganization can provide the supportive milieu for a productive infection. Here, we demonstrate that vaccinia virus (VACV) infection leads to activation of the stress-activated protein kinase (SAPK)/extracellular signal-regulated kinase (ERK) 4/7 (MKK4/7)-c-Jun N-terminal protein kinase 1/2 (JNK1/2) pathway; further, the stimulation of this pathway requires postpenetration, prereplicative events in the viral replication cycle. Although the formation of intracellular mature virus (IMV) was not affected in MKK4/7- or JNK1/2-knockout (KO) cells, we did note an accentuated deregulation of microtubule and actin network organization in infected JNK1/2-KO cells. This was followed by deregulated viral trafficking to the periphery and enhanced enveloped particle release. Furthermore, VACV infection induced alterations in the cell contractility and morphology, and cell migration was reduced in the JNK-KO cells. In addition, phosphorylation of proteins implicated with early cell contractility and cell migration, such as microtubule-associated protein 1B and paxillin, respectively, was not detected in the VACV-infected KO cells. In sum, our findings uncover a regulatory role played by the MKK4/7-JNK1/2 pathway in cytoskeleton reorganization during VACV infection.
},
  author       = {Pereira, Anna and Leite, Flávia and Brasil, Bruno and Soares Martins, Jamaria and Torres, Alice and Pimenta, Paulo and Souto Padrón, Thais and Tranktman, Paula and Ferreira, Paulo and Kroon, Erna and Bonjardim, Cláudio},
  journal      = {Journal of Virology},
  number       = {1},
  pages        = {172 -- 184},
  publisher    = {ASM},
  title        = {{A vaccinia virus-driven interplay between the MKK4/7-JNK1/2 pathway and cytoskeleton reorganization}},
  doi          = {10.1128/JVI.05638-11},
  volume       = {86},
  year         = {2012},
}

@article{3310,
  abstract     = {The theory of persistent homology opens up the possibility to reason about topological features of a space or a function quantitatively and in combinatorial terms. We refer to this new angle at a classical subject within algebraic topology as a point calculus, which we present for the family of interlevel sets of a real-valued function. Our account of the subject is expository, devoid of proofs, and written for non-experts in algebraic topology.},
  author       = {Bendich, Paul and Cabello, Sergio and Edelsbrunner, Herbert},
  journal      = {Pattern Recognition Letters},
  number       = {11},
  pages        = {1436 -- 1444},
  publisher    = {Elsevier},
  title        = {{A point calculus for interlevel set homology}},
  doi          = {10.1016/j.patrec.2011.10.007},
  volume       = {33},
  year         = {2012},
}

@article{3314,
  abstract     = {We introduce two-level discounted and mean-payoff games played by two players on a perfect-information stochastic game graph. The upper level game is a discounted or mean-payoff game and the lower level game is a (undiscounted) reachability game. Two-level games model hierarchical and sequential decision making under uncertainty across different time scales. For both discounted and mean-payoff two-level games, we show the existence of pure memoryless optimal strategies for both players and an ordered field property. We show that if there is only one player (Markov decision processes), then the values can be computed in polynomial time. It follows that whether the value of a player is equal to a given rational constant in two-level discounted or mean-payoff games can be decided in NP ∩ coNP. We also give an alternate strategy improvement algorithm to compute the value. © 2012 World Scientific Publishing Company.},
  author       = {Chatterjee, Krishnendu and Majumdar, Ritankar},
  journal      = {International Journal of Foundations of Computer Science},
  number       = {3},
  pages        = {609 -- 625},
  publisher    = {World Scientific Publishing},
  title        = {{Discounting and averaging in games across time scales}},
  doi          = {10.1142/S0129054112400308},
  volume       = {23},
  year         = {2012},
}

@article{3317,
  abstract     = {The physical distance between presynaptic Ca2+ channels and the Ca2+ sensors that trigger exocytosis of neurotransmitter-containing vesicles is a key determinant of the signalling properties of synapses in the nervous system. Recent functional analysis indicates that in some fast central synapses, transmitter release is triggered by a small number of Ca2+ channels that are coupled to Ca2+ sensors at the nanometre scale. Molecular analysis suggests that this tight coupling is generated by protein–protein interactions involving Ca2+ channels, Ca2+ sensors and various other synaptic proteins. Nanodomain coupling has several functional advantages, as it increases the efficacy, speed and energy efficiency of synaptic transmission.},
  author       = {Eggermann, Emmanuel and Bucurenciu, Iancu and Goswami, Sarit and Jonas, Peter M},
  journal      = {Nature Reviews Neuroscience},
  number       = {1},
  pages        = {7 -- 21},
  publisher    = {Nature Publishing Group},
  title        = {{Nanodomain coupling between Ca(2+) channels and sensors of exocytosis at fast mammalian synapses}},
  doi          = {10.1038/nrn3125},
  volume       = {13},
  year         = {2012},
}

@article{3331,
  abstract     = {Computing the topology of an algebraic plane curve C means computing a combinatorial graph that is isotopic to C and thus represents its topology in R2. We prove that, for a polynomial of degree n with integer coefficients bounded by 2ρ, the topology of the induced curve can be computed with  bit operations ( indicates that we omit logarithmic factors). Our analysis improves the previous best known complexity bounds by a factor of n2. The improvement is based on new techniques to compute and refine isolating intervals for the real roots of polynomials, and on the consequent amortized analysis of the critical fibers of the algebraic curve.},
  author       = {Kerber, Michael and Sagraloff, Michael},
  journal      = { Journal of Symbolic Computation},
  number       = {3},
  pages        = {239 -- 258},
  publisher    = {Elsevier},
  title        = {{A worst case bound for topology computation of algebraic curves}},
  doi          = {10.1016/j.jsc.2011.11.001},
  volume       = {47},
  year         = {2012},
}

@inproceedings{3341,
  abstract     = {We consider two-player stochastic games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves independently and simultaneously; the current state and the two moves determine a probability distribution over the successor states. We also consider the important special case of turn-based stochastic games where players make moves in turns, rather than concurrently. We study concurrent games with \omega-regular winning conditions specified as parity objectives. The value for player 1 for a parity objective is the maximal probability with which the player can guarantee the satisfaction of the objective against all strategies of the opponent. We study the problem of continuity and robustness of the value function in concurrent and turn-based stochastic parity gameswith respect to imprecision in the transition probabilities. We present quantitative bounds on the difference of the value function (in terms of the imprecision of the transition probabilities) and show the value continuity for structurally equivalent concurrent games (two games are structurally equivalent if the support of the transition function is same and the probabilities differ). We also show robustness of optimal strategies for structurally equivalent turn-based stochastic parity games. Finally we show that the value continuity property breaks without the structurally equivalent assumption (even for Markov chains) and show that our quantitative bound is asymptotically optimal. Hence our results are tight (the assumption is both necessary and sufficient) and optimal (our quantitative bound is asymptotically optimal).},
  author       = {Chatterjee, Krishnendu},
  location     = {Tallinn, Estonia},
  pages        = {270 -- 285},
  publisher    = {Springer},
  title        = {{Robustness of structurally equivalent concurrent parity games}},
  doi          = {10.1007/978-3-642-28729-9_18},
  volume       = {7213},
  year         = {2012},
}

@inbook{10896,
  abstract     = {Under physiological conditions the brain, via the purine salvage pathway, reuses the preformed purine bases hypoxanthine, derived from ATP degradation, and adenine (Ade), derived from polyamine synthesis, to restore its ATP pool. However, the massive degradation of ATP during ischemia, although providing valuable neuroprotective adenosine, results in the accumulation and loss of diffusible purine metabolites and thereby leads to a protracted reduction in the post-ischemic ATP pool size. In vivo, this may both limit the ability to deploy ATP-dependent reparative mechanisms and reduce the subsequent availability of adenosine, whilst in brain slices results in tissue with substantially lower levels of ATP than in vivo. In the present review, we describe the mechanisms by which brain tissue replenishes its ATP, how this can be improved with the clinically tolerated chemicals D-ribose and adenine, and the functional, and potential therapeutic, implications of doing so.},
  author       = {zur Nedden, Stephanie and Doney, Alexander S. and Frenguelli, Bruno G.},
  booktitle    = {Adenosine},
  editor       = {Masino, Susan and Boison, Detlev},
  isbn         = {9781461439028},
  pages        = {109--129},
  publisher    = {Springer},
  title        = {{The double-edged sword: Gaining Adenosine at the expense of ATP. How to balance the books}},
  doi          = {10.1007/978-1-4614-3903-5_6},
  year         = {2012},
}

@inproceedings{10903,
  abstract     = {We propose a logic-based framework for automated reasoning about sequential programs manipulating singly-linked lists and arrays with unbounded data. We introduce the logic SLAD, which allows combining shape constraints, written in a fragment of Separation Logic, with data and size constraints. We address the problem of checking the entailment between SLAD formulas, which is crucial in performing pre-post condition reasoning. Although this problem is undecidable in general for SLAD, we propose a sound and powerful procedure that is able to solve this problem for a large class of formulas, beyond the capabilities of existing techniques and tools. We prove that this procedure is complete, i.e., it is actually a decision procedure for this problem, for an important fragment of SLAD including known decidable logics. We implemented this procedure and shown its preciseness and its efficiency on a significant benchmark of formulas.},
  author       = {Bouajjani, Ahmed and Dragoi, Cezara and Enea, Constantin and Sighireanu, Mihaela},
  booktitle    = {Automated Technology for Verification and Analysis},
  isbn         = {9783642333859},
  issn         = {1611-3349},
  location     = {Thiruvananthapuram, India},
  pages        = {167--182},
  publisher    = {Springer},
  title        = {{Accurate invariant checking for programs manipulating lists and arrays with infinite data}},
  doi          = {10.1007/978-3-642-33386-6_14},
  volume       = {7561},
  year         = {2012},
}

@inproceedings{10904,
  abstract     = {Multi-dimensional mean-payoff and energy games provide the mathematical foundation for the quantitative study of reactive systems, and play a central role in the emerging quantitative theory of verification and synthesis. In this work, we study the strategy synthesis problem for games with such multi-dimensional objectives along with a parity condition, a canonical way to express ω-regular conditions. While in general, the winning strategies in such games may require infinite memory, for synthesis the most relevant problem is the construction of a finite-memory winning strategy (if one exists). Our main contributions are as follows. First, we show a tight exponential bound (matching upper and lower bounds) on the memory required for finite-memory winning strategies in both multi-dimensional mean-payoff and energy games along with parity objectives. This significantly improves the triple exponential upper bound for multi energy games (without parity) that could be derived from results in literature for games on VASS (vector addition systems with states). Second, we present an optimal symbolic and incremental algorithm to compute a finite-memory winning strategy (if one exists) in such games. Finally, we give a complete characterization of when finite memory of strategies can be traded off for randomness. In particular, we show that for one-dimension mean-payoff parity games, randomized memoryless strategies are as powerful as their pure finite-memory counterparts.},
  author       = {Chatterjee, Krishnendu and Randour, Mickael and Raskin, Jean-François},
  booktitle    = {CONCUR 2012 - Concurrency Theory},
  editor       = {Koutny, Maciej and Ulidowski, Irek},
  isbn         = {9783642329395},
  issn         = {0302-9743},
  location     = {Newcastle upon Tyne, United Kingdom},
  pages        = {115--131},
  publisher    = {Springer},
  title        = {{Strategy synthesis for multi-dimensional quantitative objectives}},
  doi          = {10.1007/978-3-642-32940-1_10},
  volume       = {7454},
  year         = {2012},
}

@inproceedings{10905,
  abstract     = {Energy games belong to a class of turn-based two-player infinite-duration games played on a weighted directed graph. It is one of the rare and intriguing combinatorial problems that lie in NP ∩ co−NP, but are not known to be in P. While the existence of polynomial-time algorithms has been a major open problem for decades, there is no algorithm that solves any non-trivial subclass in polynomial time.
In this paper, we give several results based on the weight structures of the graph. First, we identify a notion of penalty and present a polynomial-time algorithm when the penalty is large. Our algorithm is the first polynomial-time algorithm on a large class of weighted graphs. It includes several counter examples that show that many previous algorithms, such as value iteration and random facet algorithms, require at least sub-exponential time. Our main technique is developing the first non-trivial approximation algorithm and showing how to convert it to an exact algorithm. Moreover, we show that in a practical case in verification where weights are clustered around a constant number of values, the energy game problem can be solved in polynomial time. We also show that the problem is still as hard as in general when the clique-width is bounded or the graph is strongly ergodic, suggesting that restricting graph structures need not help.},
  author       = {Chatterjee, Krishnendu and Henzinger, Monika H and Krinninger, Sebastian and Nanongkai, Danupon},
  booktitle    = {Algorithms – ESA 2012},
  isbn         = {9783642330896},
  issn         = {1611-3349},
  location     = {Ljubljana, Slovenia},
  pages        = {301--312},
  publisher    = {Springer},
  title        = {{Polynomial-time algorithms for energy games with special weight structures}},
  doi          = {10.1007/978-3-642-33090-2_27},
  volume       = {7501},
  year         = {2012},
}

@inproceedings{10906,
  abstract     = {HSF(C) is a tool that automates verification of safety and liveness properties for C programs. This paper describes the verification approach taken by HSF(C) and provides instructions on how to install and use the tool.},
  author       = {Grebenshchikov, Sergey and Gupta, Ashutosh and Lopes, Nuno P. and Popeea, Corneliu and Rybalchenko, Andrey},
  booktitle    = {Tools and Algorithms for the Construction and Analysis of Systems},
  editor       = {Flanagan, Cormac and König, Barbara},
  isbn         = {9783642287558},
  issn         = {1611-3349},
  location     = {Tallinn, Estonia},
  pages        = {549--551},
  publisher    = {Springer},
  title        = {{HSF(C): A software verifier based on Horn clauses}},
  doi          = {10.1007/978-3-642-28756-5_46},
  volume       = {7214},
  year         = {2012},
}

@inproceedings{2048,
  abstract     = {Leakage resilient cryptography attempts to incorporate side-channel leakage into the black-box security model and designs cryptographic schemes that are provably secure within it. Informally, a scheme is leakage-resilient if it remains secure even if an adversary learns a bounded amount of arbitrary information about the schemes internal state. Unfortunately, most leakage resilient schemes are unnecessarily complicated in order to achieve strong provable security guarantees. As advocated by Yu et al. [CCS’10], this mostly is an artefact of the security proof and in practice much simpler construction may already suffice to protect against realistic side-channel attacks. In this paper, we show that indeed for simpler constructions leakage-resilience can be obtained when we aim for relaxed security notions where the leakage-functions and/or the inputs to the primitive are chosen non-adaptively. For example, we show that a three round Feistel network instantiated with a leakage resilient PRF yields a leakage resilient PRP if the inputs are chosen non-adaptively (This complements the result of Dodis and Pietrzak [CRYPTO’10] who show that if a adaptive queries are allowed, a superlogarithmic number of rounds is necessary.) We also show that a minor variation of the classical GGM construction gives a leakage resilient PRF if both, the leakage-function and the inputs, are chosen non-adaptively.},
  author       = {Faust, Sebastian and Pietrzak, Krzysztof Z and Schipper, Joachim},
  booktitle    = { Conference proceedings CHES 2012},
  location     = {Leuven, Belgium},
  pages        = {213 -- 232},
  publisher    = {Springer},
  title        = {{Practical leakage-resilient symmetric cryptography}},
  doi          = {10.1007/978-3-642-33027-8_13},
  volume       = {7428},
  year         = {2012},
}

@inproceedings{2049,
  abstract     = {We propose a new authentication protocol that is provably secure based on a ring variant of the learning parity with noise (LPN) problem. The protocol follows the design principle of the LPN-based protocol from Eurocrypt’11 (Kiltz et al.), and like it, is a two round protocol secure against active attacks. Moreover, our protocol has small communication complexity and a very small footprint which makes it applicable in scenarios that involve low-cost, resource-constrained devices.

Performance-wise, our protocol is more efficient than previous LPN-based schemes, such as the many variants of the Hopper-Blum (HB) protocol and the aforementioned protocol from Eurocrypt’11. Our implementation results show that it is even comparable to the standard challenge-and-response protocols based on the AES block-cipher. Our basic protocol is roughly 20 times slower than AES, but with the advantage of having 10 times smaller code size. Furthermore, if a few hundred bytes of non-volatile memory are available to allow the storage of some off-line pre-computations, then the online phase of our protocols is only twice as slow as AES.
},
  author       = {Heyse, Stefan and Kiltz, Eike and Lyubashevsky, Vadim and Paar, Christof and Pietrzak, Krzysztof Z},
  booktitle    = { Conference proceedings FSE 2012},
  location     = {Washington, DC, USA},
  pages        = {346 -- 365},
  publisher    = {Springer},
  title        = {{Lapin: An efficient authentication protocol based on ring-LPN}},
  doi          = {10.1007/978-3-642-34047-5_20},
  volume       = {7549},
  year         = {2012},
}

@article{2263,
  abstract     = {Nestin-cre transgenic mice have been widely used to direct recombination to neural stem cells (NSCs) and intermediate neural progenitor cells (NPCs). Here we report that a readily utilized, and the only commercially available, Nestin-cre line is insufficient for directing recombination in early embryonic NSCs and NPCs. Analysis of recombination efficiency in multiple cre-dependent reporters and a genetic mosaic line revealed consistent temporal and spatial patterns of recombination in NSCs and NPCs. For comparison we utilized a knock-in Emx1cre line and found robust recombination in NSCs and NPCs in ventricular and subventricular zones of the cerebral cortices as early as embryonic day 12.5. In addition we found that the rate of Nestin-cre driven recombination only reaches sufficiently high levels in NSCs and NPCs during late embryonic and early postnatal periods. These findings are important when commercially available cre lines are considered for directing recombination to embryonic NSCs and NPCs.},
  author       = {Liang, Huixuan and Hippenmeyer, Simon and Ghashghaei, H.},
  journal      = {Biology open},
  number       = {12},
  pages        = {1200 -- 1203},
  publisher    = {The Company of Biologists},
  title        = {{A Nestin-cre transgenic mouse is insufficient for recombination in early embryonic neural progenitors}},
  doi          = {10.1242/bio.20122287},
  volume       = {1},
  year         = {2012},
}

@article{2302,
  abstract     = {We introduce propagation models (PMs), a formalism able to express several kinds of equations that describe the behavior of biochemical reaction networks. Furthermore, we introduce the propagation abstract data type (PADT), which separates concerns regarding different numerical algorithms for the transient analysis of biochemical reaction networks from concerns regarding their implementation, thus allowing for portable and efficient solutions. The state of a propagation abstract data type is given by a vector that assigns mass values to a set of nodes, and its (next) operator propagates mass values through this set of nodes. We propose an approximate implementation of the (next) operator, based on threshold abstraction, which propagates only &quot;significant&quot; mass values and thus achieves a compromise between efficiency and accuracy. Finally, we give three use cases for propagation models: the chemical master equation (CME), the reaction rate equation (RRE), and a hybrid method that combines these two equations. These three applications use propagation models in order to propagate probabilities and/or expected values and variances of the model's variables.},
  author       = {Henzinger, Thomas A and Mateescu, Maria},
  journal      = {IEEE ACM Transactions on Computational Biology and Bioinformatics},
  number       = {2},
  pages        = {310 -- 322},
  publisher    = {IEEE},
  title        = {{The propagation approach for computing biochemical reaction networks}},
  doi          = {10.1109/TCBB.2012.91},
  volume       = {10},
  year         = {2012},
}

@article{2318,
  abstract     = {We show that bosons interacting via pair potentials with negative scattering length form bound states for a suitable number of particles. In other words, the absence of many-particle bound states of any kind implies the non-negativity of the scattering length of the interaction potential. },
  author       = {Seiringer, Robert},
  journal      = {Journal of Spectral Theory},
  number       = {3},
  pages        = {321--328},
  publisher    = {European Mathematical Society},
  title        = {{Absence of bound states implies non-negativity of the scattering length}},
  doi          = {10.4171/JST/31},
  volume       = {2},
  year         = {2012},
}

@article{2411,
  abstract     = {The kingdom of fungi provides model organisms for biotechnology, cell biology, genetics, and life sciences in general. Only when their phylogenetic relationships are stably resolved, can individual results from fungal research be integrated into a holistic picture of biology. However, and despite recent progress, many deep relationships within the fungi remain unclear. Here, we present the first phylogenomic study of an entire eukaryotic kingdom that uses a consistency criterion to strengthen phylogenetic conclusions. We reason that branches (splits) recovered with independent data and different tree reconstruction methods are likely to reflect true evolutionary relationships. Two complementary phylogenomic data sets based on 99 fungal genomes and 109 fungal expressed sequence tag (EST) sets analyzed with four different tree reconstruction methods shed light from different angles on the fungal tree of life. Eleven additional data sets address specifically the phylogenetic position of Blastocladiomycota, Ustilaginomycotina, and Dothideomycetes, respectively. The combined evidence from the resulting trees supports the deep-level stability of the fungal groups toward a comprehensive natural system of the fungi. In addition, our analysis reveals methodologically interesting aspects. Enrichment for EST encoded data-a common practice in phylogenomic analyses-introduces a strong bias toward slowly evolving and functionally correlated genes. Consequently, the generalization of phylogenomic data sets as collections of randomly selected genes cannot be taken for granted. A thorough characterization of the data to assess possible influences on the tree reconstruction should therefore become a standard in phylogenomic analyses.},
  author       = {Ebersberger, Ingo and De Matos Simoes, Ricardo and Kupczok, Anne and Gube, Matthias and Kothe, Erika and Voigt, Kerstin and Von Haeseler, Arndt},
  journal      = {Molecular Biology and Evolution},
  number       = {5},
  pages        = {1319 -- 1334},
  publisher    = {Oxford University Press},
  title        = {{A consistent phylogenetic backbone for the fungi}},
  doi          = {10.1093/molbev/msr285},
  volume       = {29},
  year         = {2012},
}

