@inproceedings{3328,
  abstract     = {We report on a generic uni- and bivariate algebraic kernel that is publicly available with CGAL 3.7. It comprises complete, correct, though efficient state-of-the-art implementations on polynomials, roots of polynomial systems, and the support to analyze algebraic curves defined by bivariate polynomials. The kernel design is generic, that is, various number types and substeps can be exchanged. It is accompanied with a ready-to-use interface to enable arrangements induced by algebraic curves, that have already been used as basis for various geometric applications, as arrangements on Dupin cyclides or the triangulation of algebraic surfaces. We present two novel applications: arrangements of rotated algebraic curves and Boolean set operations on polygons bounded by segments of algebraic curves. We also provide experiments showing that our general implementation is competitive and even often clearly outperforms existing implementations that are explicitly tailored for specific types of non-linear curves that are available in CGAL.},
  author       = {Berberich, Eric and Hemmer, Michael and Kerber, Michael},
  location     = {Paris, France},
  pages        = {179 -- 186},
  publisher    = {ACM},
  title        = {{A generic algebraic kernel for non linear geometric applications}},
  doi          = {10.1145/1998196.1998224},
  year         = {2011},
}

@inproceedings{3329,
  abstract     = {We consider the offset-deconstruction problem: Given a polygonal shape Q with n vertices, can it be expressed, up to a tolerance µ in Hausdorff distance, as the Minkowski sum of another polygonal shape P with a disk of fixed radius? If it does, we also seek a preferably simple-looking solution shape P; then, P's offset constitutes an accurate, vertex-reduced, and smoothened approximation of Q. We give an O(n log n)-time exact decision algorithm that handles any polygonal shape, assuming the real-RAM model of computation. An alternative algorithm, based purely on rational arithmetic, answers the same deconstruction problem, up to an uncertainty parameter, and its running time depends on the parameter δ (in addition to the other input parameters: n, δ and the radius of the disk). If the input shape is found to be approximable, the rational-arithmetic algorithm also computes an approximate solution shape for the problem. For convex shapes, the complexity of the exact decision algorithm drops to O(n), which is also the time required to compute a solution shape P with at most one more vertex than a vertex-minimal one. Our study is motivated by applications from two different domains. However, since the offset operation has numerous uses, we anticipate that the reverse question that we study here will be still more broadly applicable. We present results obtained with our implementation of the rational-arithmetic algorithm.},
  author       = {Berberich, Eric and Halperin, Dan and Kerber, Michael and Pogalnikova, Roza},
  booktitle    = {Proceedings of the twenty-seventh annual symposium on Computational geometry},
  location     = {Paris, France},
  pages        = {187 -- 196},
  publisher    = {ACM},
  title        = {{Deconstructing approximate offsets}},
  doi          = {10.1145/1998196.1998225},
  year         = {2011},
}

@inproceedings{3330,
  abstract     = {We consider the problem of approximating all real roots of a square-free polynomial f. Given isolating intervals, our algorithm refines each of them to a width at most 2-L, that is, each of the roots is approximated to L bits after the binary point. Our method provides a certified answer for arbitrary real polynomials, only requiring finite approximations of the polynomial coefficient and choosing a suitable working precision adaptively. In this way, we get a correct algorithm that is simple to implement and practically efficient. Our algorithm uses the quadratic interval refinement method; we adapt that method to be able to cope with inaccuracies when evaluating f, without sacrificing its quadratic convergence behavior. We prove a bound on the bit complexity of our algorithm in terms of degree, coefficient size and discriminant. Our bound improves previous work on integer polynomials by a factor of deg f and essentially matches best known theoretical bounds on root approximation which are obtained by very sophisticated algorithms.},
  author       = {Kerber, Michael and Sagraloff, Michael},
  location     = {California, USA},
  pages        = {209 -- 216},
  publisher    = {Springer},
  title        = {{Root refinement for real polynomials}},
  doi          = {10.1145/1993886.1993920},
  year         = {2011},
}

@article{3332,
  abstract     = {Given an algebraic hypersurface O in ℝd, how many simplices are necessary for a simplicial complex isotopic to O? We address this problem and the variant where all vertices of the complex must lie on O. We give asymptotically tight worst-case bounds for algebraic plane curves. Our results gradually improve known bounds in higher dimensions; however, the question for tight bounds remains unsolved for d ≥ 3.},
  author       = {Kerber, Michael and Sagraloff, Michael},
  journal      = {Graphs and Combinatorics},
  number       = {3},
  pages        = {419 -- 430},
  publisher    = {Springer},
  title        = {{A note on the complexity of real algebraic hypersurfaces}},
  doi          = {10.1007/s00373-011-1020-7},
  volume       = {27},
  year         = {2011},
}

@article{3334,
  author       = {Edelsbrunner, Herbert and Pach, János and Ziegler, Günter},
  journal      = {Discrete & Computational Geometry},
  number       = {1},
  pages        = {1 -- 2},
  publisher    = {Springer},
  title        = {{Letter from the new editors-in-chief}},
  doi          = {10.1007/s00454-010-9313-9},
  volume       = {45},
  year         = {2011},
}

@inbook{3335,
  abstract     = {We study the topology of the Megaparsec Cosmic Web in terms of the scale-dependent Betti numbers, which formalize the topological information content of the cosmic mass distribution. While the Betti numbers do not fully quantify topology, they extend the information beyond conventional cosmological studies of topology in terms of genus and Euler characteristic. The richer information content of Betti numbers goes along the availability of fast algorithms to compute them. For continuous density fields, we determine the scale-dependence of Betti numbers by invoking the cosmologically familiar filtration of sublevel or superlevel sets defined by density thresholds. For the discrete galaxy distribution, however, the analysis is based on the alpha shapes of the particles. These simplicial complexes constitute an ordered sequence of nested subsets of the Delaunay tessellation, a filtration defined by the scale parameter, α. As they are homotopy equivalent to the sublevel sets of the distance field, they are an excellent tool for assessing the topological structure of a discrete point distribution. In order to develop an intuitive understanding for the behavior of Betti numbers as a function of α, and their relation to the morphological patterns in the Cosmic Web, we first study them within the context of simple heuristic Voronoi clustering models. These can be tuned to consist of specific morphological elements of the Cosmic Web, i.e. clusters, filaments, or sheets. To elucidate the relative prominence of the various Betti numbers in different stages of morphological evolution, we introduce the concept of alpha tracks. Subsequently, we address the topology of structures emerging in the standard LCDM scenario and in cosmological scenarios with alternative dark energy content. The evolution of the Betti numbers is shown to reflect the hierarchical evolution of the Cosmic Web. We also demonstrate that the scale-dependence of the Betti numbers yields a promising measure of cosmological parameters, with a potential to help in determining the nature of dark energy and to probe primordial non-Gaussianities. We also discuss the expected Betti numbers as a function of the density threshold for superlevel sets of a Gaussian random field. Finally, we introduce the concept of persistent homology. It measures scale levels of the mass distribution and allows us to separate small from large scale features. Within the context of the hierarchical cosmic structure formation, persistence provides a natural formalism for a multiscale topology study of the Cosmic Web.},
  author       = {Van De Weygaert, Rien and Vegter, Gert and Edelsbrunner, Herbert and Jones, Bernard and Pranav, Pratyush and Park, Changbom and Hellwing, Wojciech and Eldering, Bob and Kruithof, Nico and Bos, Patrick and Hidding, Johan and Feldbrugge, Job and Ten Have, Eline and Van Engelen, Matti and Caroli, Manuel and Teillaud, Monique},
  booktitle    = {Transactions on Computational Science XIV},
  editor       = {Gavrilova, Marina and Tan, Kenneth and Mostafavi, Mir},
  pages        = {60 -- 101},
  publisher    = {Springer},
  title        = {{Alpha, Betti and the Megaparsec Universe: On the topology of the Cosmic Web}},
  doi          = {10.1007/978-3-642-25249-5_3},
  volume       = {6970},
  year         = {2011},
}

@inproceedings{3336,
  abstract     = {We introduce TopoCut: a new way to integrate knowledge about topological properties (TPs) into random field image segmentation model. Instead of including TPs as additional constraints during minimization of the energy function, we devise an efficient algorithm for modifying the unary potentials such that the resulting segmentation is guaranteed with the desired properties. Our method is more flexible in the sense that it handles more topology constraints than previous methods, which were only able to enforce pairwise or global connectivity. In particular, our method is very fast, making it for the first time possible to enforce global topological properties in practical image segmentation tasks.},
  author       = {Chen, Chao and Freedman, Daniel and Lampert, Christoph},
  booktitle    = {CVPR: Computer Vision and Pattern Recognition},
  isbn         = {978-1-4577-0394-2},
  location     = {Colorado Springs, CO, United States},
  pages        = {2089 -- 2096},
  publisher    = {IEEE},
  title        = {{Enforcing topological constraints in random field image segmentation}},
  doi          = {10.1109/CVPR.2011.5995503},
  year         = {2011},
}

@inproceedings{3337,
  abstract     = {Playing table tennis is a difficult task for robots, especially due to their limitations of acceleration. A key bottleneck is the amount of time needed to reach the desired hitting position and velocity of the racket for returning the incoming ball. Here, it often does not suffice to simply extrapolate the ball's trajectory after the opponent returns it but more information is needed. Humans are able to predict the ball's trajectory based on the opponent's moves and, thus, have a considerable advantage. Hence, we propose to incorporate an anticipation system into robot table tennis players, which enables the robot to react earlier while the opponent is performing the striking movement. Based on visual observation of the opponent's racket movement, the robot can predict the aim of the opponent and adjust its movement generation accordingly. The policies for deciding how and when to react are obtained by reinforcement learning. We conduct experiments with an existing robot player to show that the learned reaction policy can significantly improve the performance of the overall system.},
  author       = {Wang, Zhikun and Lampert, Christoph and Mülling, Katharina and Schölkopf, Bernhard and Peters, Jan},
  location     = {San Francisco, USA},
  pages        = {332 -- 337},
  publisher    = {IEEE},
  title        = {{Learning anticipation policies for robot table tennis}},
  doi          = {10.1109/IROS.2011.6094892},
  year         = {2011},
}

@unpublished{3338,
  abstract     = {We consider 2-player games played on a finite state space for an infinite number of rounds. The games are concurrent: in each round, the two players (player 1 and player 2) choose their moves inde- pendently and simultaneously; the current state and the two moves determine the successor state. We study concurrent games with ω-regular winning conditions specified as parity objectives. We consider the qualitative analysis problems: the computation of the almost-sure and limit-sure winning set of states, where player 1 can ensure to win with probability 1 and with probability arbitrarily close to 1, respec- tively. In general the almost-sure and limit-sure winning strategies require both infinite-memory as well as infinite-precision (to describe probabilities). We study the bounded-rationality problem for qualitative analysis of concurrent parity games, where the strategy set for player 1 is restricted to bounded-resource strategies. In terms of precision, strategies can be deterministic, uniform, finite-precision or infinite- precision; and in terms of memory, strategies can be memoryless, finite-memory or infinite-memory. We present a precise and complete characterization of the qualitative winning sets for all combinations of classes of strategies. In particular, we show that uniform memoryless strategies are as powerful as finite-precision infinite-memory strategies, and infinite-precision memoryless strategies are as power- ful as infinite-precision finite-memory strategies. We show that the winning sets can be computed in O(n2d+3) time, where n is the size of the game structure and 2d is the number of priorities (or colors), and our algorithms are symbolic. The membership problem of whether a state belongs to a winning set can be decided in NP ∩ coNP. While this complexity is the same as for the simpler class of turn-based parity games, where in each state only one of the two players has a choice of moves, our algorithms, that are obtained by characterization of the winning sets as μ-calculus formulas, are considerably more involved than those for turn-based games.},
  author       = {Chatterjee, Krishnendu},
  booktitle    = {arXiv},
  pages        = {1 -- 51},
  publisher    = {ArXiv},
  title        = {{Bounded rationality in concurrent parity games}},
  year         = {2011},
}

@unpublished{3339,
  abstract     = {Turn-based stochastic games and its important subclass Markov decision processes (MDPs) provide models for systems with both probabilistic and nondeterministic behaviors. We consider turn-based stochastic games with two classical quantitative objectives: discounted-sum and long-run average objectives. The game models and the quantitative objectives are widely used in probabilistic verification, planning, optimal inventory control, network protocol and performance analysis. Games and MDPs that model realistic systems often have very large state spaces, and probabilistic abstraction techniques are necessary to handle the state-space explosion. The commonly used full-abstraction techniques do not yield space-savings for systems that have many states with similar value, but does not necessarily have similar transition structure. A semi-abstraction technique, namely Magnifying-lens abstractions (MLA), that clusters states based on value only, disregarding differences in their transition relation was proposed for qualitative objectives (reachability and safety objectives). In this paper we extend the MLA technique to solve stochastic games with discounted-sum and long-run average objectives. We present the MLA technique based abstraction-refinement algorithm for stochastic games and MDPs with discounted-sum objectives. For long-run average objectives, our solution works for all MDPs and a sub-class of stochastic games where every state has the same value. },
  author       = {Chatterjee, Krishnendu and De Alfaro, Luca and Pritam, Roy},
  booktitle    = {arXiv},
  pages        = {17},
  publisher    = {ArXiv},
  title        = {{Magnifying lens abstraction for stochastic games with discounted and long-run average objectives}},
  year         = {2011},
}

@inproceedings{3342,
  abstract     = {We consider Markov decision processes (MDPs) with ω-regular specifications given as parity objectives. We consider the problem of computing the set of almost-sure winning states from where the objective can be ensured with probability 1. The algorithms for the computation of the almost-sure winning set for parity objectives iteratively use the solutions for the almost-sure winning set for Büchi objectives (a special case of parity objectives). Our contributions are as follows: First, we present the first subquadratic symbolic algorithm to compute the almost-sure winning set for MDPs with Büchi objectives; our algorithm takes O(nm)  symbolic steps as compared to the previous known algorithm that takes O(n 2) symbolic steps, where n is the number of states and m is the number of edges of the MDP. In practice MDPs often have constant out-degree, and then our symbolic algorithm takes O(nn)  symbolic steps, as compared to the previous known O(n 2) symbolic steps algorithm. Second, we present a new algorithm, namely win-lose algorithm, with the following two properties: (a) the algorithm iteratively computes subsets of the almost-sure winning set and its complement, as compared to all previous algorithms that discover the almost-sure winning set upon termination; and (b) requires O(nK)  symbolic steps, where K is the maximal number of edges of strongly connected components (scc’s) of the MDP. The win-lose algorithm requires symbolic computation of scc’s. Third, we improve the algorithm for symbolic scc computation; the previous known algorithm takes linear symbolic steps, and our new algorithm improves the constants associated with the linear number of steps. In the worst case the previous known algorithm takes 5·n symbolic steps, whereas our new algorithm takes 4 ·n symbolic steps.},
  author       = {Chatterjee, Krishnendu and Henzinger, Monika H and Joglekar, Manas and Nisarg, Shah},
  editor       = {Gopalakrishnan, Ganesh and Qadeer, Shaz},
  location     = {Snowbird, USA},
  pages        = {260 -- 276},
  publisher    = {Springer},
  title        = {{Symbolic algorithms for qualitative analysis of Markov decision processes with Büchi objectives}},
  doi          = {10.1007/978-3-642-22110-1_21},
  volume       = {6806},
  year         = {2011},
}

@inproceedings{3343,
  abstract     = {We present faster and dynamic algorithms for the following problems arising in probabilistic verification: Computation of the maximal end-component (mec) decomposition of Markov decision processes (MDPs), and of the almost sure winning set for reachability and parity objectives in MDPs. We achieve the following running time for static algorithms in MDPs with graphs of n vertices and m edges: (1) O(m · min{ √m, n2/3 }) for the mec decomposition, improving the longstanding O(m·n) bound; (2) O(m·n2/3) for reachability objectives, improving the previous O(m · √m) bound for m &gt; n4/3; and (3) O(m · min{ √m, n2/3 } · log(d)) for parity objectives with d priorities, improving the previous O(m · √m · d) bound. We also give incremental and decremental algorithms in linear time for mec decomposition and reachability objectives and O(m · log d) time for parity ob jectives.},
  author       = {Chatterjee, Krishnendu and Henzinger, Monika H},
  location     = {San Francisco, SA, United States},
  pages        = {1318 -- 1336},
  publisher    = {SIAM},
  title        = {{Faster and dynamic algorithms for maximal end-component decomposition and related graph problems in probabilistic verification}},
  doi          = {10.1137/1.9781611973082.101},
  year         = {2011},
}

@inproceedings{3344,
  abstract     = {Games played on graphs provide the mathematical framework to analyze several important problems in computer science as well as mathematics, such as the synthesis problem of Church, model checking of open reactive systems and many others. On the basis of mode of interaction of the players these games can be classified as follows: (a) turn-based (players make moves in turns); and (b) concurrent (players make moves simultaneously). On the basis of the information available to the players these games can be classified as follows: (a) perfect-information (players have perfect view of the game); and (b) partial-information (players have partial view of the game). In this talk we will consider all these classes of games with reachability objectives, where the goal of one player is to reach a set of target vertices of the graph, and the goal of the opponent player is to prevent the player from reaching the target. We will survey the results for various classes of games, and the results range from linear time decision algorithms to EXPTIME-complete problems to undecidable problems.},
  author       = {Chatterjee, Krishnendu},
  editor       = {Delzanno, Giorgo and Potapov, Igor},
  location     = {Genoa, Italy},
  pages        = {1 -- 1},
  publisher    = {Springer},
  title        = {{Graph games with reachability objectives}},
  doi          = {10.1007/978-3-642-24288-5_1},
  volume       = {6945},
  year         = {2011},
}

@inproceedings{10907,
  abstract     = {This paper presents a method to create a model of an articulated object using the planar motion in an initialization video. The model consists of rigid parts connected by points of articulation. The rigid parts are described by the positions of salient feature-points tracked throughout the video. Following a filtering step that identifies points that belong to different objects, rigid parts are found by a grouping process in a graph pyramid. Valid articulation points are selected by verifying multiple hypotheses for each pair of parts.},
  author       = {Artner, Nicole M. and Ion, Adrian and Kropatsch, Walter G.},
  booktitle    = {Graph-Based Representations in Pattern Recognition},
  editor       = {Jiang, Xiaoyi and Ferrer, Miquel and Torsello, Andrea},
  isbn         = {9783642208430},
  issn         = {1611-3349},
  location     = {Münster, Germany},
  pages        = {215--224},
  publisher    = {Springer},
  title        = {{Spatio-temporal extraction of articulated models in a graph pyramid}},
  doi          = {10.1007/978-3-642-20844-7_22},
  volume       = {6658},
  year         = {2011},
}

@article{11094,
  abstract     = {Nuclear pore complexes (NPCs) assemble at the end of mitosis during nuclear envelope (NE) reformation and into an intact NE as cells progress through interphase. Although recent studies have shown that NPC formation occurs by two different molecular mechanisms at two distinct cell cycle stages, little is known about the molecular players that mediate the fusion of the outer and inner nuclear membranes to form pores. In this paper, we provide evidence that the transmembrane nucleoporin (Nup), POM121, but not the Nup107–160 complex, is present at new pore assembly sites at a time that coincides with inner nuclear membrane (INM) and outer nuclear membrane (ONM) fusion. Overexpression of POM121 resulted in juxtaposition of the INM and ONM. Additionally, Sun1, an INM protein that is known to interact with the cytoskeleton, was specifically required for interphase assembly and localized with POM121 at forming pores. We propose a model in which POM121 and Sun1 interact transiently to promote early steps of interphase NPC assembly.},
  author       = {Talamas, Jessica A. and HETZER, Martin W},
  issn         = {1540-8140},
  journal      = {Journal of Cell Biology},
  keywords     = {Cell Biology},
  number       = {1},
  pages        = {27--37},
  publisher    = {Rockefeller University Press},
  title        = {{POM121 and Sun1 play a role in early steps of interphase NPC assembly}},
  doi          = {10.1083/jcb.201012154},
  volume       = {194},
  year         = {2011},
}

@article{11095,
  author       = {HETZER, Martin W and Cavalli, Giacomo},
  issn         = {0955-0674},
  journal      = {Current Opinion in Cell Biology},
  keywords     = {Cell Biology},
  number       = {3},
  pages        = {255--257},
  publisher    = {Elsevier},
  title        = {{Editorial overview}},
  doi          = {10.1016/j.ceb.2011.04.013},
  volume       = {23},
  year         = {2011},
}

@article{11096,
  abstract     = {As the gatekeepers of the eukaryotic cell nucleus, nuclear pore complexes (NPCs) mediate all molecular trafficking between the nucleoplasm and the cytoplasm. In recent years, transport-independent functions of NPC components, nucleoporins, have been identified including roles in chromatin organization and gene regulation. Here, we summarize our current view of the NPC as a dynamic hub for the integration of chromatin regulation and nuclear trafficking and discuss the functional interplay between nucleoporins and the nuclear genome.},
  author       = {Liang, Yun and HETZER, Martin W},
  issn         = {0955-0674},
  journal      = {Current Opinion in Cell Biology},
  keywords     = {Cell Biology},
  number       = {1},
  pages        = {65--70},
  publisher    = {Elsevier},
  title        = {{Functional interactions between nucleoporins and chromatin}},
  doi          = {10.1016/j.ceb.2010.09.008},
  volume       = {23},
  year         = {2011},
}

@article{11100,
  abstract     = {Eukaryotic cell function depends on the physical separation of nucleoplasmic and cytoplasmic components by the nuclear envelope (NE). Molecular communication between the two compartments involves active, signal-mediated trafficking, a function that is exclusively performed by nuclear pore complexes (NPCs). The individual NPC components and the mechanisms that are involved in nuclear trafficking are well documented and have become textbook knowledge. However, in addition to their roles as nuclear gatekeepers, NPC components-nucleoporins-have been shown to have critical roles in chromatin organization and gene regulation. These findings have sparked new enthusiasm to study the roles of this multiprotein complex in nuclear organization and explore novel functions that in some cases appear to go beyond a role in transport. Here, we discuss our present view of NPC biogenesis, which is tightly linked to proper cell cycle progression and cell differentiation. In addition, we summarize new data suggesting that NPCs represent dynamic hubs for the integration of gene regulation and nuclear transport processes.},
  author       = {Capelson, M. and Doucet, C. and HETZER, Martin W},
  isbn         = {9781936113071},
  issn         = {0091-7451},
  journal      = {Cold Spring Harbor Symposia on Quantitative Biology},
  keywords     = {Genetics, Molecular Biology, Biochemistry},
  pages        = {585--597},
  publisher    = {Cold Spring Harbor Laboratory Press},
  title        = {{Nuclear pore complexes: Guardians of the nuclear genome}},
  doi          = {10.1101/sqb.2010.75.059},
  volume       = {75},
  year         = {2011},
}

@article{112,
  abstract     = {Particle beams are important tools for probing atomic and molecular interactions. Here we demonstrate that particle beams also offer a unique opportunity to investigate interactions in macroscopic systems, such as granular media. Motivated by recent experiments on streams of grains that exhibit liquid-like breakup into droplets, we use molecular dynamics simulations to investigate the evolution of a dense stream of macroscopic spheres accelerating out of an opening at the bottom of a reservoir. We show how nanoscale details associated with energy dissipation during collisions modify the stream\'s macroscopic behavior. We find that inelastic collisions collimate the stream, while the presence of short-range attractive interactions drives structure formation. Parameterizing the collision dynamics by the coefficient of restitution (i.e., the ratio of relative velocities before and after impact) and the strength of the cohesive interaction, we map out a spectrum of behaviors that ranges from gaslike jets in which all grains drift apart to liquid-like streams that break into large droplets containing hundreds of grains. We also find a new, intermediate regime in which small aggregates form by capture from the gas phase, similar to what can be observed in molecular beams. Our results show that nearly all aspects of stream behavior are closely related to the velocity gradient associated with vertical free fall. Led by this observation, we propose a simple energy balance model to explain the droplet formation process. The qualitative as well as many quantitative features of the simulations and the model compare well with available experimental data and provide a first quantitative measure of the role of attractions in freely cooling granular streams.},
  author       = {Waitukaitis, Scott R and Grütjen, Helge and Royer, John and Jaeger, Heinrich},
  journal      = {Physical Review E},
  number       = {5},
  publisher    = {American Physical Society},
  title        = {{Droplet and cluster formation in freely falling granular streams}},
  doi          = {10.1103/PhysRevE.83.051302},
  volume       = {83},
  year         = {2011},
}

@article{11673,
  abstract     = {Given only the URL of a Web page, can we identify its topic? We study this problem in detail by exploring a large number of different feature sets and algorithms on several datasets. We also show that the inherent overlap between topics and the sparsity of the information in URLs makes this a very challenging problem. Web page classification without a page’s content is desirable when the content is not available at all, when a classification is needed before obtaining the content, or when classification speed is of utmost importance. For our experiments we used five different corpora comprising a total of about 3 million (URL, classification) pairs. We evaluated several techniques for feature generation and classification algorithms. The individual binary classifiers were then combined via boosting into metabinary classifiers. We achieve typical F-measure values between 80 and 85, and a typical precision of around 86. The precision can be pushed further over 90 while maintaining a typical level of recall between 30 and 40.},
  author       = {Baykan, Eda and Henzinger, Monika H and Marian, Ludmila and Weber, Ingmar},
  issn         = {1559-114X},
  journal      = {ACM Transactions on the Web},
  keywords     = {Topic classification, URL, ODP},
  number       = {3},
  publisher    = {Association for Computing Machinery},
  title        = {{A comprehensive study of features and algorithms for URL-based topic classification}},
  doi          = {10.1145/1993053.1993057},
  volume       = {5},
  year         = {2011},
}

