@book{4346,
  abstract     = {With the term "Library 2.0" the editors mean an institution which applies the principles of the Web 2.0 such as openness, re-use, collaboration and interaction in the entire organization. Libraries are extending their service offerings and work processes to include the potential of Web 2.0 technologies. This changes the job description and self-image of librarians. The collective volume offers a complete overview of the topic Library 2.0 and the current state of developments from a technological, sociological, information theoretical and practice-oriented perspective.},
  editor       = {Danowski, Patrick and Bergmann, Julia},
  isbn         = {9-783-1102-3209-7},
  pages        = {405},
  publisher    = {De Gruyter},
  title        = {{Handbuch Bibliothek 2.0}},
  doi          = {10.1515/9783110232103},
  volume       = { 41},
  year         = {2010},
}

@inproceedings{4361,
  abstract     = {Depth-bounded processes form the most expressive known fragment of the π-calculus for which interesting verification problems are still decidable. In this paper we develop an adequate domain of limits for the well-structured transition systems that are induced by depth-bounded processes. An immediate consequence of our result is that there exists a forward algorithm that decides the covering problem for this class. Unlike backward algorithms, the forward algorithm terminates even if the depth of the process is not known a priori. More importantly, our result suggests a whole spectrum of forward algorithms that enable the effective verification of a large class of mobile systems.},
  author       = {Wies, Thomas and Zufferey, Damien and Henzinger, Thomas A},
  editor       = {Ong, Luke},
  location     = {Paphos, Cyprus},
  pages        = {94 -- 108},
  publisher    = {Springer},
  title        = {{Forward analysis of depth-bounded processes}},
  doi          = {10.1007/978-3-642-12032-9_8},
  volume       = {6014},
  year         = {2010},
}

@inproceedings{4369,
  abstract     = {In this paper we propose a novel technique for constructing timed automata from properties expressed in the logic mtl, under bounded-variability assumptions. We handle full mtl and include all future operators. Our construction is based on separation of the continuous time monitoring of the input sequence and discrete predictions regarding the future. The separation of the continuous from the discrete allows us to determinize our automata in an exponential construction that does not increase the number of clocks. This leads to a doubly exponential construction from mtl to deterministic timed automata, compared with triply exponential using existing approaches. We offer an alternative to the existing approach to linear real-time model checking, which has never been implemented. It further offers a unified framework for model checking, runtime monitoring, and synthesis, in an approach that can reuse tools, implementations, and insights from the discrete setting.},
  author       = {Nickovic, Dejan and Piterman, Nir},
  editor       = {Henzinger, Thomas A. and Chatterjee, Krishnendu},
  location     = {Klosterneuburg, Austria},
  pages        = {152 -- 167},
  publisher    = {Springer},
  title        = {{From MTL to deterministic timed automata}},
  doi          = {10.1007/978-3-642-15297-9_13},
  volume       = {6246},
  year         = {2010},
}

@inproceedings{4378,
  abstract     = {Techniques such as verification condition generation, predicate abstraction, and expressive type systems reduce software verification to proving formulas in expressive logics. Programs and their specifications often make use of data structures such as sets, multisets, algebraic data types, or graphs. Consequently, formulas generated from verification also involve such data structures. To automate the proofs of such formulas we propose a logic (a “calculus”) of such data structures. We build the calculus by starting from decidable logics of individual data structures, and connecting them through functions and sets, in ways that go beyond the frameworks such as Nelson-Oppen. The result are new decidable logics that can simultaneously specify properties of different kinds of data structures and overcome the limitations of the individual logics. Several of our decidable logics include abstraction functions that map a data structure into its more abstract view (a tree into a multiset, a multiset into a set), into a numerical quantity (the size or the height), or into the truth value of a candidate data structure invariant (sortedness, or the heap property). For algebraic data types, we identify an asymptotic many-to-one condition on the abstraction function that guarantees the existence of a decision procedure. In addition to the combination based on abstraction functions, we can combine multiple data structure theories if they all reduce to the same data structure logic. As an instance of this approach, we describe a decidable logic whose formulas are propositional combinations of formulas in: weak monadic second-order logic of two successors, two-variable logic with counting, multiset algebra with Presburger arithmetic, the Bernays-Schönfinkel-Ramsey class of first-order logic, and the logic of algebraic data types with the set content function. The subformulas in this combination can share common variables that refer to sets of objects along with the common set algebra operations. Such sound and complete combination is possible because the relations on sets definable in the component logics are all expressible in Boolean Algebra with Presburger Arithmetic. Presburger arithmetic and its new extensions play an important role in our decidability results. In several cases, when we combine logics that belong to NP, we can prove the satisfiability for the combined logic is still in NP.},
  author       = {Kuncak, Viktor and Piskac, Ruzica and Suter, Philippe and Wies, Thomas},
  editor       = {Barthe, Gilles and Hermenegildo, Manuel},
  location     = {Madrid, Spain},
  pages        = {26 -- 44},
  publisher    = {Springer},
  title        = {{Building a calculus of data structures}},
  doi          = {10.1007/978-3-642-11319-2_6},
  volume       = {5944},
  year         = {2010},
}

@article{4379,
  abstract     = {The formal specification component of verification can be exported to simulation through the idea of property checkers. The essence of this approach is the automatic construction of an observer from the specification in the form of a program that can be interfaced with a simulator and alert the user if the property is violated by a simulation trace. Although not complete, this lighter approach to formal verification has been effectively used in software and digital hardware to detect errors. Recently, the idea of property checkers has been extended to analog and mixed-signal systems.

In this paper, we apply the property-based checking methodology to an industrial and realistic example of a DDR2 memory interface. The properties describing the DDR2 analog behavior are expressed in the formal specification language stl/psl in form of assertions. The simulation traces generated from an actual DDR2 interface design are checked with respect to the stl/psl assertions using the amt tool. The focus of this paper is on the translation of the official (informal and descriptive) specification of two non-trivial DDR2 properties into stl/psl assertions. We study both the benefits and the current limits of such approach.
},
  author       = {Jones, Kevin D and Konrad,Victor and Dejan Nickovic},
  journal      = {Formal Methods in System Design},
  number       = {2},
  pages        = {114 -- 130},
  publisher    = {Springer},
  title        = {{Analog property checkers: a DDR2 case study}},
  doi          = {10.1007/s10703-009-0085-x},
  volume       = {36},
  year         = {2010},
}

@inproceedings{4380,
  abstract     = {Cloud computing is an emerging paradigm aimed to offer users pay-per-use computing resources, while leaving the burden of managing the computing infrastructure to the cloud provider. We present a new programming and pricing model that gives the cloud user the flexibility of trading execution speed and price on a per-job basis. We discuss the scheduling and resource management challenges for the cloud provider that arise in the implementation of this model. We argue that techniques from real-time and embedded software can be useful in this context.},
  author       = {Henzinger, Thomas A and Tomar, Anmol and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
  location     = {Arizona, USA},
  pages        = {1 -- 8},
  publisher    = {ACM},
  title        = {{A marketplace for cloud resources}},
  doi          = {10.1145/1879021.1879022},
  year         = {2010},
}

@inproceedings{4381,
  abstract     = {Cloud computing aims to give users virtually unlimited pay-per-use computing resources without the burden of managing the underlying infrastructure. We claim that, in order to realize the full potential of cloud computing, the user must be presented with a pricing model that offers flexibility at the requirements level, such as a choice between different degrees of execution speed and the cloud provider must be presented with a programming model that offers flexibility at the execution level, such as a choice between different scheduling policies. In such a flexible framework, with each job, the user purchases a virtual computer with the desired speed and cost characteristics, and the cloud provider can optimize the utilization of resources across a stream of jobs from different users. We designed a flexible framework to test our hypothesis, which is called FlexPRICE (Flexible Provisioning of Resources in a Cloud Environment) and works as follows. A user presents a job to the cloud. The cloud finds different schedules to execute the job and presents a set of quotes to the user in terms of price and duration for the execution. The user then chooses a particular quote and the cloud is obliged to execute the job according to the chosen quote. FlexPRICE thus hides the complexity of the actual scheduling decisions from the user, but still provides enough flexibility to meet the users actual demands. We implemented FlexPRICE in a simulator called PRICES that allows us to experiment with our framework. We observe that FlexPRICE provides a wide range of execution options-from fast and expensive to slow and cheap-- for the whole spectrum of data-intensive and computation-intensive jobs. We also observe that the set of quotes computed by FlexPRICE do not vary as the number of simultaneous jobs increases.},
  author       = {Henzinger, Thomas A and Tomar, Anmol and Singh, Vasu and Wies, Thomas and Zufferey, Damien},
  location     = {Miami, USA},
  pages        = {83 -- 90},
  publisher    = {IEEE},
  title        = {{FlexPRICE: Flexible provisioning of resources in a cloud environment}},
  doi          = {10.1109/CLOUD.2010.71},
  year         = {2010},
}

@inproceedings{4382,
  abstract     = {Transactional memory (TM) has shown potential to simplify the task of writing concurrent programs. Inspired by classical work on databases, formal definitions of the semantics of TM executions have been proposed. Many of these definitions assumed that accesses to shared data are solely performed through transactions. In practice, due to legacy code and concurrency libraries, transactions in a TM have to share data with non-transactional operations. The semantics of such interaction, while widely discussed by practitioners, lacks a clear formal specification. Those interactions can vary, sometimes in subtle ways, between TM implementations and underlying memory models. We propose a correctness condition for TMs, parametrized opacity, to formally capture the now folklore notion of strong atomicity by stipulating the two following intuitive requirements: first, every transaction appears as if it is executed instantaneously with respect to other transactions and non-transactional operations, and second, non-transactional operations conform to the given underlying memory model. We investigate the inherent cost of implementing parametrized opacity. We first prove that parametrized opacity requires either instrumenting non-transactional operations (for most memory models) or writing to memory by transactions using potentially expensive read-modify-write instructions (such as compare-and-swap). Then, we show that for a class of practical relaxed memory models, parametrized opacity can indeed be implemented with constant-time instrumentation of non-transactional writes and no instrumentation of non-transactional reads. We show that, in practice, parametrizing the notion of correctness allows developing more efficient TM implementations.},
  author       = {Guerraoui, Rachid and Henzinger, Thomas A and Kapalka, Michal and Singh, Vasu},
  location     = {Santorini, Greece},
  pages        = {263 -- 272},
  publisher    = {ACM},
  title        = {{Transactions in the jungle}},
  doi          = {10.1145/1810479.1810529},
  year         = {2010},
}

@inproceedings{4388,
  abstract     = {GIST is a tool that (a) solves the qualitative analysis problem of turn-based probabilistic games with ω-regular objectives; and (b) synthesizes reasonable environment assumptions for synthesis of unrealizable specifications. Our tool provides the first and efficient implementations of several reduction-based techniques to solve turn-based probabilistic games, and uses the analysis of turn-based probabilistic games for synthesizing environment assumptions for unrealizable specifications.},
  author       = {Chatterjee, Krishnendu and Henzinger, Thomas A and Jobstmann, Barbara and Radhakrishna, Arjun},
  location     = {Edinburgh, UK},
  pages        = {665 -- 669},
  publisher    = {Springer},
  title        = {{GIST: A solver for probabilistic games}},
  doi          = {10.1007/978-3-642-14295-6_57},
  volume       = {6174},
  year         = {2010},
}

@inproceedings{4389,
  abstract     = {Digital components play a central role in the design of complex embedded systems. These components are interconnected with other, possibly analog, devices and the physical environment. This environment cannot be entirely captured and can provide inaccurate input data to the component. It is thus important for digital components to have a robust behavior, i.e. the presence of a small change in the input sequences should not result in a drastic change in the output sequences. In this paper, we study a notion of robustness for sequential circuits. However, since sequential circuits may have parts that are naturally discontinuous (e.g., digital controllers with switching behavior), we need a flexible framework that accommodates this fact and leaves discontinuous parts of the circuit out from the robustness analysis. As a consequence, we consider sequential circuits that have their input variables partitioned into two disjoint sets: control and disturbance variables. Our contributions are (1) a definition of robustness for sequential circuits as a form of continuity with respect to disturbance variables, (2) the characterization of the exact class of sequential circuits that are robust according to our definition, (3) an algorithm to decide whether a sequential circuit is robust or not.},
  author       = {Doyen, Laurent and Henzinger, Thomas A and Legay, Axel and Nickovic, Dejan},
  pages        = {77 -- 84},
  publisher    = {IEEE},
  title        = {{Robustness of sequential circuits}},
  doi          = {10.1109/ACSD.2010.26},
  year         = {2010},
}

@inproceedings{4390,
  abstract     = {Concurrent data structures with fine-grained synchronization are notoriously difficult to implement correctly. The difficulty of reasoning about these implementations does not stem from the number of variables or the program size, but rather from the large number of possible interleavings. These implementations are therefore prime candidates for model checking. We introduce an algorithm for verifying linearizability of singly-linked heap-based concurrent data structures. We consider a model consisting of an unbounded heap where each vertex stores an element from an unbounded data domain, with a restricted set of operations for testing and updating pointers and data elements. Our main result is that linearizability is decidable for programs that invoke a fixed number of methods, possibly in parallel. This decidable fragment covers many of the common implementation techniques — fine-grained locking, lazy synchronization, and lock-free synchronization. We also show how the technique can be used to verify optimistic implementations with the help of programmer annotations. We developed a verification tool CoLT and evaluated it on a representative sample of Java implementations of the concurrent set data structure. The tool verified linearizability of a number of implementations, found a known error in a lock-free implementation and proved that the corrected version is linearizable.},
  author       = {Cerny, Pavol and Radhakrishna, Arjun and Zufferey, Damien and Chaudhuri, Swarat and Alur, Rajeev},
  location     = {Edinburgh, UK},
  pages        = {465 -- 479},
  publisher    = {Springer},
  title        = {{Model checking of linearizability of concurrent list implementations}},
  doi          = {10.1007/978-3-642-14295-6_41},
  volume       = {6174},
  year         = {2010},
}

@inproceedings{4393,
  abstract     = {Boolean notions of correctness are formalized by preorders on systems. Quantitative measures of correctness can be formalized by real-valued distance functions between systems, where the distance between implementation and specification provides a measure of “fit” or “desirability.” We extend the simulation preorder to the quantitative setting, by making each player of a simulation game pay a certain price for her choices. We use the resulting games with quantitative objectives to define three different simulation distances. The correctness distance measures how much the specification must be changed in order to be satisfied by the implementation. The coverage distance measures how much the implementation restricts the degrees of freedom offered by the specification. The robustness distance measures how much a system can deviate from the implementation description without violating the specification. We consider these distances for safety as well as liveness specifications. The distances can be computed in polynomial time for safety specifications, and for liveness specifications given by weak fairness constraints. We show that the distance functions satisfy the triangle inequality, that the distance between two systems does not increase under parallel composition with a third system, and that the distance between two systems can be bounded from above and below by distances between abstractions of the two systems. These properties suggest that our simulation distances provide an appropriate basis for a quantitative theory of discrete systems. We also demonstrate how the robustness distance can be used to measure how many transmission errors are tolerated by error correcting codes.},
  author       = {Cerny, Pavol and Henzinger, Thomas A and Radhakrishna, Arjun},
  location     = {Paris, France},
  pages        = {235 -- 268},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Simulation distances}},
  doi          = {10.1007/978-3-642-15375-4_18},
  volume       = {6269},
  year         = {2010},
}

@inproceedings{4396,
  abstract     = {Shape analysis is a promising technique to prove program properties about recursive data structures. The challenge is to automatically determine the data-structure type, and to supply the shape analysis with the necessary information about the data structure. We present a stepwise approach to the selection of instrumentation predicates for a TVLA-based shape analysis, which takes us a step closer towards the fully automatic verification of data structures. The approach uses two techniques to guide the refinement of shape abstractions: (1) during program exploration, an explicit heap analysis collects sample instances of the heap structures, which are used to identify the data structures that are manipulated by the program; and (2) during abstraction refinement along an infeasible error path, we consider different possible heap abstractions and choose the coarsest one that eliminates the infeasible path. We have implemented this combined approach for automatic shape refinement as an extension of the software model checker BLAST. Example programs from a data-structure library that manipulate doubly-linked lists and trees were successfully verified by our tool.},
  author       = {Beyer, Dirk and Henzinger, Thomas A and Théoduloz, Grégory and Zufferey, Damien},
  editor       = {Rosenblum, David and Taenzer, Gabriele},
  location     = {Paphos, Cyprus},
  pages        = {263 -- 277},
  publisher    = {Springer},
  title        = {{Shape refinement through explicit heap analysis}},
  doi          = {10.1007/978-3-642-12029-9_19},
  volume       = {6013},
  year         = {2010},
}

@article{2499,
  abstract     = {G protein-coupled receptors (GPCRs) have critical functions in intercellular communication. Although a wide range of different receptors have been identified in the same cells, the mechanism by which signals are integrated remains elusive. The ability of GPCRs to form dimers or larger hetero-oligomers is thought to generate such signal integration. We examined the molecular mechanisms responsible for the GABAB receptor-mediated potentiation of the mGlu receptor signalling reported in Purkinje neurons. We showed that this effect does not require a physical interaction between both receptors. Instead, it is the result of a more general mechanism in which the βγ subunits produced by the Gi-coupled GABAB receptor enhance the mGlu-mediated Gq response. Most importantly, this mechanism could be generally applied to other pairs of Gi- and Gq-coupled receptors and the signal integration varied depending on the time delay between activation of each receptor. Such a mechanism helps explain specific properties of cells expressing two different Gi- and Gq-coupled receptors activated by a single transmitter, or properties of GPCRs naturally coupled to both types of the G protein.},
  author       = {Rives, Marie L and Vol, Claire and Fukazawa, Yugo and Tinel, Norbert and Trinquet, Eric and Ayoub, Mohammed A and Ryuichi Shigemoto and Pin, Jean-Philippe and Prezèau, Laurent},
  journal      = {EMBO Journal},
  number       = {15},
  pages        = {2195 -- 2208},
  publisher    = {Wiley-Blackwell},
  title        = {{Crosstalk between GABAB and mGlu1a receptors reveals new insight into GPCR signal integration}},
  doi          = {10.1038/emboj.2009.177},
  volume       = {28},
  year         = {2009},
}

@article{3051,
  author       = {Weijers, Dolf and Friml, Jirí},
  journal      = {Cell},
  number       = {6},
  pages        = {1172 -- 1172},
  publisher    = {Cell Press},
  title        = {{SnapShot: Auxin signaling and transport}},
  doi          = {10.1016/j.cell.2009.03.009},
  volume       = {136},
  year         = {2009},
}

@article{3052,
  abstract     = {The dynamic, differential distribution of the hormone auxin within plant tissues controls an impressive variety of developmental processes, which tailor plant growth and morphology to environmental conditions. Various environmental and endogenous signals can be integrated into changes in auxin distribution through their effects on local auxin biosynthesis and intercellular auxin transport. Individual cells interpret auxin largely by a nuclear signaling pathway that involves the F box protein TIR1 acting as an auxin receptor. Auxin-dependent TIR1 activity leads to ubiquitination-based degradation of transcriptional repressors and complex transcriptional reprogramming. Thus, auxin appears to be a versatile trigger of preprogrammed developmental changes in plant cells.},
  author       = {Vanneste, Steffen and Friml, Jirí},
  journal      = {Cell},
  number       = {6},
  pages        = {1005 -- 1016},
  publisher    = {Cell Press},
  title        = {{Auxin: A trigger for change in plant development}},
  doi          = {10.1016/j.cell.2009.03.001},
  volume       = {136},
  year         = {2009},
}

@article{3057,
  abstract     = {The differential distribution of the plant signaling molecule auxin is required for many aspects of plant development. Local auxin maxima and gradients arise as a result of local auxin metabolism and, predominantly, from directional cell-to-cell transport. In this primer, we discuss how the coordinated activity of several auxin influx and efflux systems, which transport auxin across the plasma membrane, mediates directional auxin flow. This activity crucially contributes to the correct setting of developmental cues in embryogenesis, organogenesis, vascular tissue formation and directional growth in response to environmental stimuli.},
  author       = {Petrášek, Jan and Friml, Jirí},
  journal      = {Development},
  number       = {16},
  pages        = {2675 -- 2688},
  publisher    = {Company of Biologists},
  title        = {{Auxin transport routes in plant development}},
  doi          = {10.1242/dev.030353},
  volume       = {136},
  year         = {2009},
}

@article{3061,
  abstract     = {The PIN-FORMED (PIN) proteins are secondary transporters acting in the efflux of the plant signal molecule auxin from cells. They are asymmetrically localized within cells and their polarity determines the directionality of intercellular auxin flow. PIN genes are found exclusively in the genomes of multicellular plants and play an important role in regulating asymmetric auxin distribution in multiple developmental processes, including embryogenesis, organogenesis, tissue differentiation and tropic responses. All PIN proteins have a similar structure with amino- and carboxy-terminal hydrophobic, membrane-spanning domains separated by a central hydrophilic domain. The structure of the hydrophobic domains is well conserved. The hydrophilic domain is more divergent and it determines eight groups within the protein family. The activity of PIN proteins is regulated at multiple levels, including transcription, protein stability, subcellular localization and transport activity. Different endogenous and environmental signals can modulate PIN activity and thus modulate auxin-distribution-dependent development. A large group of PIN proteins, including the most ancient members known from mosses, localize to the endoplasmic reticulum and they regulate the subcellular compartmentalization of auxin and thus auxin metabolism. Further work is needed to establish the physiological importance of this unexpected mode of auxin homeostasis regulation. Furthermore, the evolution of PIN-based transport, PIN protein structure and more detailed biochemical characterization of the transport function are important topics for further studies.},
  author       = {Křeček, Pavel and Skůpa, Petr and Libus, Jiří and Naramoto, Satoshi and Tejos, Ricardo and Friml, Jirí and Zažímalová, Eva},
  journal      = {Genome Biology},
  number       = {12},
  publisher    = {BioMed Central},
  title        = {{The PIN-FORMED (PIN) protein family of auxin transporters}},
  doi          = {10.1186/gb-2009-10-12-249},
  volume       = {10},
  year         = {2009},
}

@article{3197,
  abstract     = {The problem of obtaining the maximum a posteriori estimate of a general discrete Markov random field (i.e., a Markov random field defined using a discrete set of labels) is known to be NP-hard. However, due to its central importance in many applications, several approximation algorithms have been proposed in the literature. In this paper, we present an analysis of three such algorithms based on convex relaxations: (i) LP-S: the linear programming (LP) relaxation proposed by Schlesinger (1976) for a special case and independently in Chekuri et al. (2001), Koster et al. (1998), and Wainwright et al. (2005) for the general case; (ii) QP-RL: the quadratic programming (QP) relaxation of Ravikumar and Lafferty (2006); and (iii) SOCP-MS: the second order cone programming (SOCP) relaxation first proposed by Muramatsu and Suzuki (2003) for two label problems and later extended by Kumar et al. (2006) for a general label set.

We show that the SOCP-MS and the QP-RL relaxations are equivalent. Furthermore, we prove that despite the flexibility in the form of the constraints/objective function offered by QP and SOCP, the LP-S relaxation strictly dominates (i.e., provides a better approximation than) QP-RL and SOCP-MS. We generalize these results by defining a large class of SOCP (and equivalent QP) relaxations which is dominated by the LP-S relaxation. Based on these results we propose some novel SOCP relaxations which define constraints using random variables that form cycles or cliques in the graphical model representation of the random field. Using some examples we show that the new SOCP relaxations strictly dominate the previous approaches.},
  author       = {Kumar, M Pawan and Vladimir Kolmogorov and Torr, Philip H},
  journal      = {Journal of Machine Learning Research},
  pages        = {71 -- 106},
  publisher    = {Microtome Publishing},
  title        = {{An analysis of convex relaxations for MAP estimation of discrete MRFs}},
  volume       = {10},
  year         = {2009},
}

@article{11103,
  abstract     = {Over the last decade, the nuclear envelope (NE) has emerged as a key component in the organization and function of the nuclear genome. As many as 100 different proteins are thought to specifically localize to this double membrane that separates the cytoplasm and the nucleoplasm of eukaryotic cells. Selective portals through the NE are formed at sites where the inner and outer nuclear membranes are fused, and the coincident assembly of ∼30 proteins into nuclear pore complexes occurs. These nuclear pore complexes are essential for the control of nucleocytoplasmic exchange. Many of the NE and nuclear pore proteins are thought to play crucial roles in gene regulation and thus are increasingly linked to human diseases.},
  author       = {HETZER, Martin W and Wente, Susan R.},
  issn         = {1534-5807},
  journal      = {Developmental Cell},
  keywords     = {Developmental Biology, Cell Biology, General Biochemistry, Genetics and Molecular Biology, Molecular Biology},
  number       = {5},
  pages        = {606--616},
  publisher    = {Elsevier},
  title        = {{Border control at the nucleus: Biogenesis and organization of the nuclear membrane and pore complexes}},
  doi          = {10.1016/j.devcel.2009.10.007},
  volume       = {17},
  year         = {2009},
}

