@article{1063,
  abstract     = {Severe environmental change can drive a population extinct unless the population adapts in time to the new conditions (“evolutionary rescue”). How does biparental sexual reproduction influence the chances of population persistence compared to clonal reproduction or selfing? In this article, we set up a one‐locus two‐allele model for adaptation in diploid species, where rescue is contingent on the establishment of the mutant homozygote. Reproduction can occur by random mating, selfing, or clonally. Random mating generates and destroys the rescue mutant; selfing is efficient at generating it but at the same time depletes the heterozygote, which can lead to a low mutant frequency in the standing genetic variation. Due to these (and other) antagonistic effects, we find a nontrivial dependence of population survival on the rate of sex/selfing, which is strongly influenced by the dominance coefficient of the mutation before and after the environmental change. Importantly, since mating with the wild‐type breaks the mutant homozygote up, a slow decay of the wild‐type population size can impede rescue in randomly mating populations.},
  author       = {Uecker, Hildegard},
  issn         = {00143820},
  journal      = {Evolution},
  number       = {4},
  pages        = {845 -- 858},
  publisher    = {Wiley-Blackwell},
  title        = {{Evolutionary rescue in randomly mating, selfing, and clonal populations}},
  doi          = {10.1111/evo.13191},
  volume       = {71},
  year         = {2017},
}

@article{1065,
  abstract     = {We consider the problem of reachability in pushdown graphs. We study the problem for pushdown graphs with constant treewidth. Even for pushdown graphs with treewidth 1, for the reachability problem we establish the following: (i) the problem is PTIME-complete, and (ii) any subcubic algorithm for the problem would contradict the k-clique conjecture and imply faster combinatorial algorithms for cliques in graphs.},
  author       = {Chatterjee, Krishnendu and Osang, Georg F},
  issn         = {00200190},
  journal      = {Information Processing Letters},
  pages        = {25 -- 29},
  publisher    = {Elsevier},
  title        = {{Pushdown reachability with constant treewidth}},
  doi          = {10.1016/j.ipl.2017.02.003},
  volume       = {122},
  year         = {2017},
}

@article{1180,
  abstract     = {In this article we define an algebraic vertex of a generalized polyhedron and show that the set of algebraic vertices is the smallest set of points needed to define the polyhedron. We prove that the indicator function of a generalized polytope P is a linear combination of indicator functions of simplices whose vertices are algebraic vertices of P. We also show that the indicator function of any generalized polyhedron is a linear combination, with integer coefficients, of indicator functions of cones with apices at algebraic vertices and line-cones. The concept of an algebraic vertex is closely related to the Fourier–Laplace transform. We show that a point v is an algebraic vertex of a generalized polyhedron P if and only if the tangent cone of P, at v, has non-zero Fourier–Laplace transform.},
  author       = {Akopyan, Arseniy and Bárány, Imre and Robins, Sinai},
  issn         = {00018708},
  journal      = {Advances in Mathematics},
  pages        = {627 -- 644},
  publisher    = {Academic Press},
  title        = {{Algebraic vertices of non-convex polyhedra}},
  doi          = {10.1016/j.aim.2016.12.026},
  volume       = {308},
  year         = {2017},
}

@inproceedings{11829,
  abstract     = {In recent years it has become popular to study dynamic problems in a sensitivity setting: Instead of allowing for an arbitrary sequence of updates, the sensitivity model only allows to apply batch updates of small size to the original input data. The sensitivity model is particularly appealing since recent strong conditional lower bounds ruled out fast algorithms for many dynamic problems, such as shortest paths, reachability, or subgraph connectivity.

In this paper we prove conditional lower bounds for these and additional problems in a sensitivity setting. For example, we show that under the Boolean Matrix Multiplication (BMM) conjecture combinatorial algorithms cannot compute the (4/3-\varepsilon)-approximate diameter of an undirected unweighted dense graph with truly subcubic preprocessing time and truly subquadratic update/query time. This result is surprising since in the static setting it is not clear whether a reduction from BMM to diameter is possible. We further show under the BMM conjecture that many problems, such as reachability or approximate shortest paths, cannot be solved faster than by recomputation from scratch even after only one or two edge insertions. We extend our reduction from BMM to Diameter to give a reduction from All Pairs Shortest Paths to Diameter under one deletion in weighted graphs. This is intriguing, as in the static setting it is a big open problem whether Diameter is as hard as APSP. We further get a nearly tight lower bound for shortest paths after two edge deletions based on the APSP conjecture. We give more lower bounds under the Strong Exponential Time Hypothesis. Many of our lower bounds also hold for static oracle data structures where no sensitivity is required.

Finally, we give the first algorithm for the (1+\varepsilon)-approximate radius, diameter, and eccentricity problems in directed or undirected unweighted graphs in case of single edges failures. The algorithm has a truly subcubic running time for graphs with a truly subquadratic number of edges; it is tight w.r.t. the conditional lower bounds we obtain.},
  author       = {Henzinger, Monika H and Lincoln, Andrea and Neumann, Stefan and Vassilevska Williams, Virginia},
  booktitle    = {8th Innovations in Theoretical Computer Science Conference},
  isbn         = {9783959770293},
  issn         = {1868-8969},
  location     = {Berkley, CA, United States},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Conditional hardness for sensitivity problems}},
  doi          = {10.4230/LIPICS.ITCS.2017.26},
  volume       = {67},
  year         = {2017},
}

@inproceedings{11831,
  abstract     = {Graph Sparsification aims at compressing large graphs into smaller ones while (approximately) preserving important characteristics of the input graph. In this work we study Vertex Sparsifiers, i.e., sparsifiers whose goal is to reduce the number of vertices. Given a weighted graph G=(V,E), and a terminal set K with |K|=k, a quality-q vertex cut sparsifier of G is a graph H with K contained in V_H that preserves the value of minimum cuts separating any bipartition of K, up to a factor of q. We show that planar graphs with all the k terminals lying on the same face admit quality-1 vertex cut sparsifier of size O(k^2) that are also planar. Our result extends to vertex flow and distance sparsifiers. It improves the previous best known bound of O(k^2 2^(2k)) for cut and flow sparsifiers by an exponential factor, and matches an Omega(k^2) lower-bound for this class of graphs.

We also study vertex reachability sparsifiers for directed graphs. Given a digraph G=(V,E) and a terminal set K, a vertex reachability sparsifier of G is a digraph H=(V_H,E_H), K contained in V_H that preserves all reachability information among terminal pairs. We introduce the notion of reachability-preserving minors, i.e., we require H to be a minor of G. Among others, for general planar digraphs, we construct reachability-preserving minors of size O(k^2 log^2 k). We complement our upper-bound by showing that there exists an infinite family of acyclic planar digraphs such that any reachability-preserving minor must have Omega(k^2) vertices.},
  author       = {Goranci, Gramoz and Henzinger, Monika H and Peng, Pan},
  booktitle    = {25th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-049-1},
  issn         = {1868-8969},
  location     = {Vienna, Austria},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Improved guarantees for vertex sparsification in planar graphs}},
  doi          = {10.4230/LIPICS.ESA.2017.44},
  volume       = {87},
  year         = {2017},
}

@inproceedings{11832,
  abstract     = {In this paper, we study the problem of opening centers to cluster a set of clients in a metric space so as to minimize the sum of the costs of the centers and of the cluster radii, in a dynamic environment where clients arrive and depart, and the solution must be updated efficiently while remaining competitive with respect to the current optimal solution. We call this dynamic sum-of-radii clustering problem.

We present a data structure that maintains a solution whose cost is within a constant factor of the cost of an optimal solution in metric spaces with bounded doubling dimension and whose worst-case update time is logarithmic in the parameters of the problem.},
  author       = {Henzinger, Monika H and Leniowski, Dariusz and Mathieu, Claire},
  booktitle    = {25th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-049-1},
  issn         = {1868-8969},
  location     = {Vienna, Austria},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Dynamic clustering to minimize the sum of radii}},
  doi          = {10.4230/LIPICS.ESA.2017.48},
  volume       = {87},
  year         = {2017},
}

@inproceedings{11833,
  abstract     = {We introduce a new algorithmic framework for designing dynamic graph algorithms in minor-free graphs, by exploiting the structure of such graphs and a tool called vertex sparsification, which is a way to compress large graphs into small ones that well preserve relevant properties among a subset of vertices and has previously mainly been used in the design of approximation algorithms.

Using this framework, we obtain a Monte Carlo randomized fully dynamic algorithm for (1 + epsilon)-approximating the energy of electrical flows in n-vertex planar graphs with tilde{O}(r epsilon^{-2}) worst-case update time and tilde{O}((r + n / sqrt{r}) epsilon^{-2}) worst-case query time, for any r larger than some constant. For r=n^{2/3}, this gives tilde{O}(n^{2/3} epsilon^{-2}) update time and tilde{O}(n^{2/3} epsilon^{-2}) query time. We also extend this algorithm to work for minor-free graphs with similar approximation and running time guarantees. Furthermore, we illustrate our framework on the all-pairs max flow and shortest path problems by giving corresponding dynamic algorithms in minor-free graphs with both sublinear update and query times. To the best of our knowledge, our results are the first to systematically establish such a connection between dynamic graph algorithms and vertex sparsification.

We also present both upper bound and lower bound for maintaining the energy of electrical flows in the incremental subgraph model, where updates consist of only vertex activations, which might be of independent interest.},
  author       = {Goranci, Gramoz and Henzinger, Monika H and Peng, Pan},
  booktitle    = {25th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-049-1},
  issn         = {1868-8969},
  location     = {Vienna, Austria},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{The power of vertex sparsifiers in dynamic graph algorithms}},
  doi          = {10.4230/LIPICS.ESA.2017.45},
  volume       = {87},
  year         = {2017},
}

@article{1187,
  abstract     = {We construct efficient authentication protocols and message authentication codes (MACs) whose security can be reduced to the learning parity with noise (LPN) problem. Despite a large body of work—starting with the (Formula presented.) protocol of Hopper and Blum in 2001—until now it was not even known how to construct an efficient authentication protocol from LPN which is secure against man-in-the-middle attacks. A MAC implies such a (two-round) protocol.},
  author       = {Kiltz, Eike and Pietrzak, Krzysztof Z and Venturi, Daniele and Cash, David and Jain, Abhishek},
  journal      = {Journal of Cryptology},
  number       = {4},
  pages        = {1238 -- 1275},
  publisher    = {Springer},
  title        = {{Efficient authentication from hard learning problems}},
  doi          = {10.1007/s00145-016-9247-3},
  volume       = {30},
  year         = {2017},
}

@inproceedings{11873,
  abstract     = {We study the problem of computing a minimum cut in a simple, undirected graph and give a deterministic O(m log2 n log log2 n) time algorithm. This improves both on the best previously known deterministic running time of O(m log12 n) (Kawarabayashi and Thorup [12]) and the best previously known randomized running time of O(mlog3n) (Karger [11]) for this problem, though Karger's algorithm can be further applied to weighted graphs.

Our approach is using the Kawarabayashi and Tho- rup graph compression technique, which repeatedly finds low-conductance cuts. To find these cuts they use a diffusion-based local algorithm. We use instead a flow- based local algorithm and suitably adjust their framework to work with our flow-based subroutine. Both flow and diffusion based methods have a long history of being applied to finding low conductance cuts. Diffusion algorithms have several variants that are naturally local while it is more complicated to make flow methods local. Some prior work has proven nice properties for local flow based algorithms with respect to improving or cleaning up low conductance cuts. Our flow subroutine, however, is the first that is both local and produces low conductance cuts. Thus, it may be of independent interest.},
  author       = {Henzinger, Monika H and Rao, Satish and Wang, Di},
  booktitle    = {28th Annual ACM-SIAM Symposium on Discrete Algorithms},
  location     = {Barcelona, Spain},
  pages        = {1919--1938},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Local flow partitioning for faster edge connectivity}},
  doi          = {10.1137/1.9781611974782.125},
  year         = {2017},
}

@inproceedings{11874,
  abstract     = {We consider the problem of maintaining an approximately maximum (fractional) matching and an approximately minimum vertex cover in a dynamic graph. Starting with the seminal paper by Onak and Rubinfeld [STOC 2010], this problem has received significant attention in recent years. There remains, however, a polynomial gap between the best known worst case update time and the best known amortised update time for this problem, even after allowing for randomisation. Specifically, Bernstein and Stein [ICALP 2015, SODA 2016] have the best known worst case update time. They present a deterministic data structure with approximation ratio (3/2 + ∊) and worst case update time O(m1/4/ ∊2), where m is the number of edges in the graph. In recent past, Gupta and Peng [FOCS 2013] gave a deterministic data structure with approximation ratio (1+ ∊) and worst case update time  No known randomised data structure beats the worst case update times of these two results. In contrast, the paper by Onak and Rubinfeld [STOC 2010] gave a randomised data structure with approximation ratio O(1) and amortised update time O(log2 n), where n is the number of nodes in the graph. This was later improved by Baswana, Gupta and Sen [FOCS 2011] and Solomon [FOCS 2016], leading to a randomised date structure with approximation ratio 2 and amortised update time O(1).

We bridge the polynomial gap between the worst case and amortised update times for this problem, without using any randomisation. We present a deterministic data structure with approximation ratio (2 + ∊) and worst case update time O(log3 n), for all sufficiently small constants ∊.},
  author       = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon},
  booktitle    = {28th Annual ACM-SIAM Symposium on Discrete Algorithms},
  location     = {Barcelona, Spain},
  pages        = {470 -- 489},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Fully dynamic approximate maximum matching and minimum vertex cover in o(log3 n) worst case update time}},
  doi          = {10.1137/1.9781611974782.30},
  year         = {2017},
}

@article{11903,
  abstract     = {Online social networks allow the collection of large amounts of data about the influence between users connected by a friendship-like relationship. When distributing items among agents forming a social network, this information allows us to exploit network externalities that each agent receives from his neighbors that get the same item. In this paper we consider Friends-of-Friends (2-hop) network externalities, i.e., externalities that not only depend on the neighbors that get the same item but also on neighbors of neighbors. For these externalities we study a setting where multiple different items are assigned to unit-demand agents. Specifically, we study the problem of welfare maximization under different types of externality functions. Let n be the number of agents and m be the number of items. Our contributions are the following: (1) We show that welfare maximization is APX-hard; we show that even for step functions with 2-hop (and also with 1-hop) externalities it is NP-hard to approximate social welfare better than (1−1/e). (2) On the positive side we present (i) an 𝑂(𝑛√)-approximation algorithm for general concave externality functions, (ii) an O(log m)-approximation algorithm for linear externality functions, and (iii) a 518(1−1/𝑒)-approximation algorithm for 2-hop step function externalities. We also improve the result from [7] for 1-hop step function externalities by giving a 12(1−1/𝑒)-approximation algorithm.},
  author       = {Bhattacharya, Sayan and Dvořák, Wolfgang and Henzinger, Monika H and Starnberger, Martin},
  issn         = {1433-0490},
  journal      = {Theory of Computing Systems},
  number       = {4},
  pages        = {948--986},
  publisher    = {Springer Nature},
  title        = {{Welfare maximization with friends-of-friends network externalities}},
  doi          = {10.1007/s00224-017-9759-8},
  volume       = {61},
  year         = {2017},
}

@article{1191,
  abstract     = {Variation in genotypes may be responsible for differences in dispersal rates, directional biases, and growth rates of individuals. These traits may favor certain genotypes and enhance their spatiotemporal spreading into areas occupied by the less advantageous genotypes. We study how these factors influence the speed of spreading in the case of two competing genotypes under the assumption that spatial variation of the total population is small compared to the spatial variation of the frequencies of the genotypes in the population. In that case, the dynamics of the frequency of one of the genotypes is approximately described by a generalized Fisher–Kolmogorov–Petrovskii–Piskunov (F–KPP) equation. This generalized F–KPP equation with (nonlinear) frequency-dependent diffusion and advection terms admits traveling wave solutions that characterize the invasion of the dominant genotype. Our existence results generalize the classical theory for traveling waves for the F–KPP with constant coefficients. Moreover, in the particular case of the quadratic (monostable) nonlinear growth–decay rate in the generalized F–KPP we study in detail the influence of the variance in diffusion and mean displacement rates of the two genotypes on the minimal wave propagation speed.},
  author       = {Kollár, Richard and Novak, Sebastian},
  journal      = {Bulletin of Mathematical Biology},
  number       = {3},
  pages        = {525--559},
  publisher    = {Springer},
  title        = {{Existence of traveling waves for the generalized F–KPP equation}},
  doi          = {10.1007/s11538-016-0244-3},
  volume       = {79},
  year         = {2017},
}

@inproceedings{1192,
  abstract     = {The main result of this paper is a generalization of the classical blossom algorithm for finding perfect matchings. Our algorithm can efficiently solve Boolean CSPs where each variable appears in exactly two constraints (we call it edge CSP) and all constraints are even Δ-matroid relations (represented by lists of tuples). As a consequence of this, we settle the complexity classification of planar Boolean CSPs started by Dvorak and Kupec. Knowing that edge CSP is tractable for even Δ-matroid constraints allows us to extend the tractability result to a larger class of Δ-matroids that includes many classes that were known to be tractable before, namely co-independent, compact, local and binary.},
  author       = {Kazda, Alexandr and Kolmogorov, Vladimir and Rolinek, Michal},
  isbn         = {978-161197478-2},
  location     = {Barcelona, Spain},
  pages        = {307 -- 326},
  publisher    = {SIAM},
  title        = {{Even delta-matroids and the complexity of planar Boolean CSPs}},
  doi          = {10.1137/1.9781611974782.20},
  year         = {2017},
}

@inproceedings{1194,
  abstract     = {Termination is one of the basic liveness properties, and we study the termination problem for probabilistic programs with real-valued variables. Previous works focused on the qualitative problem that asks whether an input program terminates with probability~1 (almost-sure termination). A powerful approach for this qualitative problem is the notion of ranking supermartingales with respect to a given set of invariants. The quantitative problem (probabilistic termination) asks for bounds on the termination probability. A fundamental and conceptual drawback of the existing approaches to address probabilistic termination is that even though the supermartingales consider the probabilistic behavior of the programs, the invariants are obtained completely ignoring the probabilistic aspect. In this work we address the probabilistic termination problem for linear-arithmetic probabilistic programs with nondeterminism. We define the notion of {\em stochastic invariants}, which are constraints along with a probability bound that the constraints hold. We introduce a concept of {\em repulsing supermartingales}. First, we show that repulsing supermartingales can be used to obtain bounds on the probability of the stochastic invariants. Second, we show the effectiveness of repulsing supermartingales in the following three ways: (1)~With a combination of ranking and repulsing supermartingales we can compute lower bounds on the probability of termination; (2)~repulsing supermartingales provide witnesses for refutation of almost-sure termination; and (3)~with a combination of ranking and repulsing supermartingales we can establish persistence properties of probabilistic programs. We also present results on related computational problems and an experimental evaluation of our approach on academic examples. },
  author       = {Chatterjee, Krishnendu and Novotny, Petr and Zikelic, Djordje},
  issn         = {07308566},
  location     = {Paris, France},
  number       = {1},
  pages        = {145 -- 160},
  publisher    = {ACM},
  title        = {{Stochastic invariants for probabilistic termination}},
  doi          = {10.1145/3009837.3009873},
  volume       = {52},
  year         = {2017},
}

@article{1196,
  abstract     = {We define the . model-measuring problem: given a model . M and specification . ϕ, what is the maximal distance . ρ such that all models . M' within distance . ρ from . M satisfy (or violate) . ϕ. The model-measuring problem presupposes a distance function on models. We concentrate on . automatic distance functions, which are defined by weighted automata. The model-measuring problem subsumes several generalizations of the classical model-checking problem, in particular, quantitative model-checking problems that measure the degree of satisfaction of a specification; robustness problems that measure how much a model can be perturbed without violating the specification; and parameter synthesis for hybrid systems. We show that for automatic distance functions, and (a) . ω-regular linear-time, (b) . ω-regular branching-time, and (c) hybrid specifications, the model-measuring problem can be solved.We use automata-theoretic model-checking methods for model measuring, replacing the emptiness question for word, tree, and hybrid automata by the . optimal-value question for the weighted versions of these automata. For automata over words and trees, we consider weighted automata that accumulate weights by maximizing, summing, discounting, and limit averaging. For hybrid automata, we consider monotonic (parametric) hybrid automata, a hybrid counterpart of (discrete) weighted automata.We give several examples of using the model-measuring problem to compute various notions of robustness and quantitative satisfaction for temporal specifications. Further, we propose the modeling framework for model measuring to ease the specification and reduce the likelihood of errors in modeling.Finally, we present a variant of the model-measuring problem, called the . model-repair problem. The model-repair problem applies to models that do not satisfy the specification; it can be used to derive restrictions, under which the model satisfies the specification, i.e., to repair the model.},
  author       = {Henzinger, Thomas A and Otop, Jan},
  journal      = {Nonlinear Analysis: Hybrid Systems},
  pages        = {166 -- 190},
  publisher    = {Elsevier},
  title        = {{Model measuring for discrete and hybrid systems}},
  doi          = {10.1016/j.nahs.2016.09.001},
  volume       = {23},
  year         = {2017},
}

@article{11961,
  abstract     = {Flow chemistry involves the use of channels or tubing to conduct a reaction in a continuous stream rather than in a flask. Flow equipment provides chemists with unique control over reaction parameters enhancing reactivity or in some cases enabling new reactions. This relatively young technology has received a remarkable amount of attention in the past decade with many reports on what can be done in flow. Until recently, however, the question, “Should we do this in flow?” has merely been an afterthought. This review introduces readers to the basic principles and fundamentals of flow chemistry and critically discusses recent flow chemistry accounts.},
  author       = {Plutschack, Matthew B. and Pieber, Bartholomäus and Gilmore, Kerry and Seeberger, Peter H.},
  issn         = {1520-6890},
  journal      = {Chemical Reviews},
  number       = {18},
  pages        = {11796--11893},
  publisher    = {American Chemical Society},
  title        = {{The Hitchhiker’s Guide to flow chemistry}},
  doi          = {10.1021/acs.chemrev.7b00183},
  volume       = {117},
  year         = {2017},
}

@article{11976,
  abstract     = {The way organic multistep synthesis is performed is changing due to the adoption of flow chemical techniques, which has enabled the development of improved methods to make complex molecules. The modular nature of the technique provides not only access to target molecules via linear flow approaches but also for the targeting of structural cores with single systems. This perspective article summarizes the state of the art of continuous multistep synthesis and discusses the main challenges and opportunities in this area.},
  author       = {Pieber, Bartholomäus and Gilmore, Kerry and Seeberger, Peter H.},
  issn         = {2063-0212},
  journal      = {Journal of Flow Chemistry},
  number       = {3-4},
  pages        = {129--136},
  publisher    = {AKJournals},
  title        = {{Integrated flow processing - challenges in continuous multistep synthesis}},
  doi          = {10.1556/1846.2017.00016},
  volume       = {7},
  year         = {2017},
}

@article{1198,
  abstract     = {We consider a model of fermions interacting via point interactions, defined via a certain weighted Dirichlet form. While for two particles the interaction corresponds to infinite scattering length, the presence of further particles effectively decreases the interaction strength. We show that the model becomes trivial in the thermodynamic limit, in the sense that the free energy density at any given particle density and temperature agrees with the corresponding expression for non-interacting particles.},
  author       = {Moser, Thomas and Seiringer, Robert},
  issn         = {03779017},
  journal      = {Letters in Mathematical Physics},
  number       = {3},
  pages        = { 533 -- 552},
  publisher    = {Springer},
  title        = {{Triviality of a model of particles with point interactions in the thermodynamic limit}},
  doi          = {10.1007/s11005-016-0915-x},
  volume       = {107},
  year         = {2017},
}

@article{1199,
  abstract     = {Much of quantitative genetics is based on the ‘infinitesimal model’, under which selection has a negligible effect on the genetic variance. This is typically justified by assuming a very large number of loci with additive effects. However, it applies even when genes interact, provided that the number of loci is large enough that selection on each of them is weak relative to random drift. In the long term, directional selection will change allele frequencies, but even then, the effects of epistasis on the ultimate change in trait mean due to selection may be modest. Stabilising selection can maintain many traits close to their optima, even when the underlying alleles are weakly selected. However, the number of traits that can be optimised is apparently limited to ~4Ne by the ‘drift load’, and this is hard to reconcile with the apparent complexity of many organisms. Just as for the mutation load, this limit can be evaded by a particular form of negative epistasis. A more robust limit is set by the variance in reproductive success. This suggests that selection accumulates information most efficiently in the infinitesimal regime, when selection on individual alleles is weak, and comparable with random drift. A review of evidence on selection strength suggests that although most variance in fitness may be because of alleles with large Nes, substantial amounts of adaptation may be because of alleles in the infinitesimal regime, in which epistasis has modest effects.},
  author       = {Barton, Nicholas H},
  journal      = {Heredity},
  pages        = {96 -- 109},
  publisher    = {Nature Publishing Group},
  title        = {{How does epistasis influence the response to selection?}},
  doi          = {10.1038/hdy.2016.109},
  volume       = {118},
  year         = {2017},
}

@article{1207,
  abstract     = {The eigenvalue distribution of the sum of two large Hermitian matrices, when one of them is conjugated by a Haar distributed unitary matrix, is asymptotically given by the free convolution of their spectral distributions. We prove that this convergence also holds locally in the bulk of the spectrum, down to the optimal scales larger than the eigenvalue spacing. The corresponding eigenvectors are fully delocalized. Similar results hold for the sum of two real symmetric matrices, when one is conjugated by Haar orthogonal matrix.},
  author       = {Bao, Zhigang and Erdös, László and Schnelli, Kevin},
  issn         = {00103616},
  journal      = {Communications in Mathematical Physics},
  number       = {3},
  pages        = {947 -- 990},
  publisher    = {Springer},
  title        = {{Local law of addition of random matrices on optimal scale}},
  doi          = {10.1007/s00220-016-2805-6},
  volume       = {349},
  year         = {2017},
}

