@article{10374,
  abstract     = {The formation of filaments from naturally occurring protein molecules is a process at the core of a range of functional and aberrant biological phenomena, such as the assembly of the cytoskeleton or the appearance of aggregates in Alzheimer's disease. The macroscopic behaviour associated with such processes is remarkably diverse, ranging from simple nucleated growth to highly cooperative processes with a well-defined lagtime. Thus, conventionally, different molecular mechanisms have been used to explain the self-assembly of different proteins. Here we show that this range of behaviour can be quantitatively captured by a single unifying Petri net that describes filamentous growth in terms of aggregate number and aggregate mass concentrations. By considering general features associated with a particular network connectivity, we are able to establish directly the rate-determining steps of the overall aggregation reaction from the system's scaling behaviour. We illustrate the power of this framework on a range of different experimental and simulated aggregating systems. The approach is general and will be applicable to any future extensions of the reaction network of filamentous self-assembly.},
  author       = {Meisl, Georg and Rajah, Luke and Cohen, Samuel A. I. and Pfammatter, Manuela and Šarić, Anđela and Hellstrand, Erik and Buell, Alexander K. and Aguzzi, Adriano and Linse, Sara and Vendruscolo, Michele and Dobson, Christopher M. and Knowles, Tuomas P. J.},
  issn         = {2041-6539},
  journal      = {Chemical Science},
  keywords     = {general chemistry},
  number       = {10},
  pages        = {7087--7097},
  publisher    = {Royal Society of Chemistry},
  title        = {{Scaling behaviour and rate-determining steps in filamentous self-assembly}},
  doi          = {10.1039/c7sc01965c},
  volume       = {8},
  year         = {2017},
}

@article{10375,
  abstract     = {Cellular membranes exhibit a large variety of shapes, strongly coupled to their function. Many biological processes involve dynamic reshaping of membranes, usually mediated by proteins. This interaction works both ways: while proteins influence the membrane shape, the membrane shape affects the interactions between the proteins. To study these membrane-mediated interactions on closed and anisotropically curved membranes, we use colloids adhered to ellipsoidal membrane vesicles as a model system. We find that two particles on a closed system always attract each other, and tend to align with the direction of largest curvature. Multiple particles form arcs, or, at large enough numbers, a complete ring surrounding the vesicle in its equatorial plane. The resulting vesicle shape resembles a snowman. Our results indicate that these physical interactions on membranes with anisotropic shapes can be exploited by cells to drive macromolecules to preferred regions of cellular or intracellular membranes, and utilized to initiate dynamic processes such as cell division. The same principle could be used to find the midplane of an artificial vesicle, as a first step towards dividing it into two equal parts.},
  author       = {Vahid, Afshin and Šarić, Anđela and Idema, Timon},
  issn         = {1744-6848},
  journal      = {Soft Matter},
  keywords     = {condensed matter physics, general chemistry},
  number       = {28},
  pages        = {4924--4930},
  publisher    = {Royal Society of Chemistry},
  title        = {{Curvature variation controls particle aggregation on fluid vesicles}},
  doi          = {10.1039/c7sm00433h},
  volume       = {13},
  year         = {2017},
}

@article{10416,
  abstract     = {A fundamental algorithmic problem at the heart of static analysis is Dyck reachability. The input is a graph where the edges are labeled with different types of opening and closing parentheses, and the reachability information is computed via paths whose parentheses are properly matched. We present new results for Dyck reachability problems with applications to alias analysis and data-dependence analysis. Our main contributions, that include improved upper bounds as well as lower bounds that establish optimality guarantees, are as follows: First, we consider Dyck reachability on bidirected graphs, which is the standard way of performing field-sensitive points-to analysis. Given a bidirected graph with n nodes and m edges, we present: (i) an algorithm with worst-case running time O(m + n · α(n)), where α(n) is the inverse Ackermann function, improving the previously known O(n2) time bound; (ii) a matching lower bound that shows that our algorithm is optimal wrt to worst-case complexity; and (iii) an optimal average-case upper bound of O(m) time, improving the previously known O(m · logn) bound. Second, we consider the problem of context-sensitive data-dependence analysis, where the task is to obtain analysis summaries of library code in the presence of callbacks. Our algorithm preprocesses libraries in almost linear time, after which the contribution of the library in the complexity of the client analysis is only linear, and only wrt the number of call sites. Third, we prove that combinatorial algorithms for Dyck reachability on general graphs with truly sub-cubic bounds cannot be obtained without obtaining sub-cubic combinatorial algorithms for Boolean Matrix Multiplication, which is a long-standing open problem. Thus we establish that the existing combinatorial algorithms for Dyck reachability are (conditionally) optimal for general graphs. We also show that the same hardness holds for graphs of constant treewidth. Finally, we provide a prototype implementation of our algorithms for both alias analysis and data-dependence analysis. Our experimental evaluation demonstrates that the new algorithms significantly outperform all existing methods on the two problems, over real-world benchmarks.},
  author       = {Chatterjee, Krishnendu and Choudhary, Bhavya and Pavlogiannis, Andreas},
  issn         = {2475-1421},
  journal      = {Proceedings of the ACM on Programming Languages},
  location     = {Los Angeles, CA, United States},
  number       = {POPL},
  publisher    = {Association for Computing Machinery},
  title        = {{Optimal Dyck reachability for data-dependence and Alias analysis}},
  doi          = {10.1145/3158118},
  volume       = {2},
  year         = {2017},
}

@article{10417,
  abstract     = {We present a new dynamic partial-order reduction method for stateless model checking of concurrent programs. A common approach for exploring program behaviors relies on enumerating the traces of the program, without storing the visited states (aka stateless exploration). As the number of distinct traces grows exponentially, dynamic partial-order reduction (DPOR) techniques have been successfully used to partition the space of traces into equivalence classes (Mazurkiewicz partitioning), with the goal of exploring only few representative traces from each class.

We introduce a new equivalence on traces under sequential consistency semantics, which we call the observation equivalence. Two traces are observationally equivalent if every read event observes the same write event in both traces. While the traditional Mazurkiewicz equivalence is control-centric, our new definition is data-centric. We show that our observation equivalence is coarser than the Mazurkiewicz equivalence, and in many cases even exponentially coarser. We devise a DPOR exploration of the trace space, called data-centric DPOR, based on the observation equivalence.},
  author       = {Chalupa, Marek and Chatterjee, Krishnendu and Pavlogiannis, Andreas and Sinha, Nishant and Vaidya, Kapil},
  issn         = {2475-1421},
  journal      = {Proceedings of the ACM on Programming Languages},
  location     = {Los Angeles, CA, United States},
  number       = {POPL},
  publisher    = {Association for Computing Machinery},
  title        = {{Data-centric dynamic partial order reduction}},
  doi          = {10.1145/3158119},
  volume       = {2},
  year         = {2017},
}

@article{10418,
  abstract     = {We present a new proof rule for proving almost-sure termination of probabilistic programs, including those that contain demonic non-determinism. An important question for a probabilistic program is whether the probability mass of all its diverging runs is zero, that is that it terminates "almost surely". Proving that can be hard, and this paper presents a new method for doing so. It applies directly to the program's source code, even if the program contains demonic choice. Like others, we use variant functions (a.k.a. "super-martingales") that are real-valued and decrease randomly on each loop iteration; but our key innovation is that the amount as well as the probability of the decrease are parametric. We prove the soundness of the new rule, indicate where its applicability goes beyond existing rules, and explain its connection to classical results on denumerable (non-demonic) Markov chains.},
  author       = {Mciver, Annabelle and Morgan, Carroll and Kaminski, Benjamin Lucien and Katoen, Joost P},
  issn         = {2475-1421},
  journal      = {Proceedings of the ACM on Programming Languages},
  location     = {Los Angeles, CA, United States},
  number       = {POPL},
  publisher    = {Association for Computing Machinery},
  title        = {{A new proof rule for almost-sure termination}},
  doi          = {10.1145/3158121},
  volume       = {2},
  year         = {2017},
}

@article{1061,
  abstract     = {Background: Metabolic engineering and synthetic biology of cyanobacteria offer a promising sustainable alternative approach for fossil-based ethylene production, by using sunlight via oxygenic photosynthesis, to convert carbon dioxide directly into ethylene. Towards this, both well-studied cyanobacteria, i.e., Synechocystis sp PCC 6803 and Synechococcus elongatus PCC 7942, have been engineered to produce ethylene by introducing the ethylene-forming enzyme (Efe) from Pseudomonas syringae pv. phaseolicola PK2 (the Kudzu strain), which catalyzes the conversion of the ubiquitous tricarboxylic acid cycle intermediate 2-oxoglutarate into ethylene. Results: This study focuses on Synechocystis sp PCC 6803 and shows stable ethylene production through the integration of a codon-optimized version of the efe gene under control of the Ptrc promoter and the core Shine-Dalgarno sequence (5\'-AGGAGG-3\') as the ribosome-binding site (RBS), at the slr0168 neutral site. We have increased ethylene production twofold by RBS screening and further investigated improving ethylene production from a single gene copy of efe, using multiple tandem promoters and by putting our best construct on an RSF1010-based broad-host-self-replicating plasmid, which has a higher copy number than the genome. Moreover, to raise the intracellular amounts of the key Efe substrate, 2-oxoglutarate, from which ethylene is formed, we constructed a glycogen-synthesis knockout mutant (glgC) and introduced the ethylene biosynthetic pathway in it. Under nitrogen limiting conditions, the glycogen knockout strain has increased intracellular 2-oxoglutarate levels; however, surprisingly, ethylene production was lower in this strain than in the wild-type background. Conclusion: Making use of different RBS sequences, production of ethylene ranging over a 20-fold difference has been achieved. However, a further increase of production through multiple tandem promoters and a broad-host plasmid was not achieved speculating that the transcription strength and the gene copy number are not the limiting factors in our system.},
  author       = {Veetil, Vinod and Angermayr, Andreas and Hellingwerf, Klaas},
  issn         = {14752859},
  journal      = {Microbial Cell Factories},
  number       = {1},
  publisher    = {BioMed Central},
  title        = {{Ethylene production with engineered Synechocystis sp PCC 6803 strains}},
  doi          = {10.1186/s12934-017-0645-5},
  volume       = {16},
  year         = {2017},
}

@article{1062,
  abstract     = {Mouse chromaffin cells (MCCs) generate action potential (AP) firing that regulates the Ca2+‐dependent release of catecholamines (CAs). Recent findings indicate that MCCs possess a variety of spontaneous firing modes that span from the common ‘tonic‐irregular’ to the less frequent ‘burst’ firing. This latter is evident in a small fraction of MCCs but occurs regularly when Nav1.3/1.7 channels are made less available or when the Slo1β2‐subunit responsible for BK channel inactivation is deleted. Burst firing causes large increases of Ca2+‐entry and potentiates CA release by ∼3.5‐fold and thus may be a key mechanism for regulating MCC function. With the aim to uncover a physiological role for burst‐firing we investigated the effects of acidosis on MCC activity. Lowering the extracellular pH (pHo) from 7.4 to 7.0 and 6.6 induces cell depolarizations of 10–15 mV that generate repeated bursts. Bursts at pHo 6.6 lasted ∼330 ms, occurred at 1–2 Hz and caused an ∼7‐fold increase of CA cumulative release. Burst firing originates from the inhibition of the pH‐sensitive TASK‐1/TASK‐3 channels and from a 40% BK channel conductance reduction at pHo 7.0. The same pHo had little or no effect on Nav, Cav, Kv and SK channels that support AP firing in MCCs. Burst firing of pHo 6.6 could be mimicked by mixtures of the TASK‐1 blocker A1899 (300 nm) and BK blocker paxilline (300 nm) and could be prevented by blocking L‐type channels by adding 3 μm nifedipine. Mixtures of the two blockers raised cumulative CA‐secretion even more than low pHo (∼12‐fold), showing that the action of protons on vesicle release is mainly a result of the ionic conductance changes that increase Ca2+‐entry during bursts. Our data provide direct evidence suggesting that MCCs respond to low pHo with sustained depolarization, burst firing and enhanced CA‐secretion, thus mimicking the physiological response of CCs to acute acidosis and hyperkalaemia generated during heavy exercise and muscle fatigue.},
  author       = {Guarina, Laura and Vandael, David H and Carabelli, Valentina and Carbone, Emilio},
  journal      = {Journal of Physiology},
  number       = {8},
  pages        = {2587 -- 2609 },
  publisher    = {Wiley-Blackwell},
  title        = {{Low pH inf o boosts burst firing and catecholamine release by blocking TASK-1 and BK channels while preserving Cav1 channels in mouse chromaffin cells}},
  doi          = {10.1113/JP273735},
  volume       = {595},
  year         = {2017},
}

@article{1063,
  abstract     = {Severe environmental change can drive a population extinct unless the population adapts in time to the new conditions (“evolutionary rescue”). How does biparental sexual reproduction influence the chances of population persistence compared to clonal reproduction or selfing? In this article, we set up a one‐locus two‐allele model for adaptation in diploid species, where rescue is contingent on the establishment of the mutant homozygote. Reproduction can occur by random mating, selfing, or clonally. Random mating generates and destroys the rescue mutant; selfing is efficient at generating it but at the same time depletes the heterozygote, which can lead to a low mutant frequency in the standing genetic variation. Due to these (and other) antagonistic effects, we find a nontrivial dependence of population survival on the rate of sex/selfing, which is strongly influenced by the dominance coefficient of the mutation before and after the environmental change. Importantly, since mating with the wild‐type breaks the mutant homozygote up, a slow decay of the wild‐type population size can impede rescue in randomly mating populations.},
  author       = {Uecker, Hildegard},
  issn         = {00143820},
  journal      = {Evolution},
  number       = {4},
  pages        = {845 -- 858},
  publisher    = {Wiley-Blackwell},
  title        = {{Evolutionary rescue in randomly mating, selfing, and clonal populations}},
  doi          = {10.1111/evo.13191},
  volume       = {71},
  year         = {2017},
}

@article{1065,
  abstract     = {We consider the problem of reachability in pushdown graphs. We study the problem for pushdown graphs with constant treewidth. Even for pushdown graphs with treewidth 1, for the reachability problem we establish the following: (i) the problem is PTIME-complete, and (ii) any subcubic algorithm for the problem would contradict the k-clique conjecture and imply faster combinatorial algorithms for cliques in graphs.},
  author       = {Chatterjee, Krishnendu and Osang, Georg F},
  issn         = {00200190},
  journal      = {Information Processing Letters},
  pages        = {25 -- 29},
  publisher    = {Elsevier},
  title        = {{Pushdown reachability with constant treewidth}},
  doi          = {10.1016/j.ipl.2017.02.003},
  volume       = {122},
  year         = {2017},
}

@article{1180,
  abstract     = {In this article we define an algebraic vertex of a generalized polyhedron and show that the set of algebraic vertices is the smallest set of points needed to define the polyhedron. We prove that the indicator function of a generalized polytope P is a linear combination of indicator functions of simplices whose vertices are algebraic vertices of P. We also show that the indicator function of any generalized polyhedron is a linear combination, with integer coefficients, of indicator functions of cones with apices at algebraic vertices and line-cones. The concept of an algebraic vertex is closely related to the Fourier–Laplace transform. We show that a point v is an algebraic vertex of a generalized polyhedron P if and only if the tangent cone of P, at v, has non-zero Fourier–Laplace transform.},
  author       = {Akopyan, Arseniy and Bárány, Imre and Robins, Sinai},
  issn         = {00018708},
  journal      = {Advances in Mathematics},
  pages        = {627 -- 644},
  publisher    = {Academic Press},
  title        = {{Algebraic vertices of non-convex polyhedra}},
  doi          = {10.1016/j.aim.2016.12.026},
  volume       = {308},
  year         = {2017},
}

@inproceedings{11829,
  abstract     = {In recent years it has become popular to study dynamic problems in a sensitivity setting: Instead of allowing for an arbitrary sequence of updates, the sensitivity model only allows to apply batch updates of small size to the original input data. The sensitivity model is particularly appealing since recent strong conditional lower bounds ruled out fast algorithms for many dynamic problems, such as shortest paths, reachability, or subgraph connectivity.

In this paper we prove conditional lower bounds for these and additional problems in a sensitivity setting. For example, we show that under the Boolean Matrix Multiplication (BMM) conjecture combinatorial algorithms cannot compute the (4/3-\varepsilon)-approximate diameter of an undirected unweighted dense graph with truly subcubic preprocessing time and truly subquadratic update/query time. This result is surprising since in the static setting it is not clear whether a reduction from BMM to diameter is possible. We further show under the BMM conjecture that many problems, such as reachability or approximate shortest paths, cannot be solved faster than by recomputation from scratch even after only one or two edge insertions. We extend our reduction from BMM to Diameter to give a reduction from All Pairs Shortest Paths to Diameter under one deletion in weighted graphs. This is intriguing, as in the static setting it is a big open problem whether Diameter is as hard as APSP. We further get a nearly tight lower bound for shortest paths after two edge deletions based on the APSP conjecture. We give more lower bounds under the Strong Exponential Time Hypothesis. Many of our lower bounds also hold for static oracle data structures where no sensitivity is required.

Finally, we give the first algorithm for the (1+\varepsilon)-approximate radius, diameter, and eccentricity problems in directed or undirected unweighted graphs in case of single edges failures. The algorithm has a truly subcubic running time for graphs with a truly subquadratic number of edges; it is tight w.r.t. the conditional lower bounds we obtain.},
  author       = {Henzinger, Monika H and Lincoln, Andrea and Neumann, Stefan and Vassilevska Williams, Virginia},
  booktitle    = {8th Innovations in Theoretical Computer Science Conference},
  isbn         = {9783959770293},
  issn         = {1868-8969},
  location     = {Berkley, CA, United States},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Conditional hardness for sensitivity problems}},
  doi          = {10.4230/LIPICS.ITCS.2017.26},
  volume       = {67},
  year         = {2017},
}

@inproceedings{11831,
  abstract     = {Graph Sparsification aims at compressing large graphs into smaller ones while (approximately) preserving important characteristics of the input graph. In this work we study Vertex Sparsifiers, i.e., sparsifiers whose goal is to reduce the number of vertices. Given a weighted graph G=(V,E), and a terminal set K with |K|=k, a quality-q vertex cut sparsifier of G is a graph H with K contained in V_H that preserves the value of minimum cuts separating any bipartition of K, up to a factor of q. We show that planar graphs with all the k terminals lying on the same face admit quality-1 vertex cut sparsifier of size O(k^2) that are also planar. Our result extends to vertex flow and distance sparsifiers. It improves the previous best known bound of O(k^2 2^(2k)) for cut and flow sparsifiers by an exponential factor, and matches an Omega(k^2) lower-bound for this class of graphs.

We also study vertex reachability sparsifiers for directed graphs. Given a digraph G=(V,E) and a terminal set K, a vertex reachability sparsifier of G is a digraph H=(V_H,E_H), K contained in V_H that preserves all reachability information among terminal pairs. We introduce the notion of reachability-preserving minors, i.e., we require H to be a minor of G. Among others, for general planar digraphs, we construct reachability-preserving minors of size O(k^2 log^2 k). We complement our upper-bound by showing that there exists an infinite family of acyclic planar digraphs such that any reachability-preserving minor must have Omega(k^2) vertices.},
  author       = {Goranci, Gramoz and Henzinger, Monika H and Peng, Pan},
  booktitle    = {25th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-049-1},
  issn         = {1868-8969},
  location     = {Vienna, Austria},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Improved guarantees for vertex sparsification in planar graphs}},
  doi          = {10.4230/LIPICS.ESA.2017.44},
  volume       = {87},
  year         = {2017},
}

@inproceedings{11832,
  abstract     = {In this paper, we study the problem of opening centers to cluster a set of clients in a metric space so as to minimize the sum of the costs of the centers and of the cluster radii, in a dynamic environment where clients arrive and depart, and the solution must be updated efficiently while remaining competitive with respect to the current optimal solution. We call this dynamic sum-of-radii clustering problem.

We present a data structure that maintains a solution whose cost is within a constant factor of the cost of an optimal solution in metric spaces with bounded doubling dimension and whose worst-case update time is logarithmic in the parameters of the problem.},
  author       = {Henzinger, Monika H and Leniowski, Dariusz and Mathieu, Claire},
  booktitle    = {25th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-049-1},
  issn         = {1868-8969},
  location     = {Vienna, Austria},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Dynamic clustering to minimize the sum of radii}},
  doi          = {10.4230/LIPICS.ESA.2017.48},
  volume       = {87},
  year         = {2017},
}

@inproceedings{11833,
  abstract     = {We introduce a new algorithmic framework for designing dynamic graph algorithms in minor-free graphs, by exploiting the structure of such graphs and a tool called vertex sparsification, which is a way to compress large graphs into small ones that well preserve relevant properties among a subset of vertices and has previously mainly been used in the design of approximation algorithms.

Using this framework, we obtain a Monte Carlo randomized fully dynamic algorithm for (1 + epsilon)-approximating the energy of electrical flows in n-vertex planar graphs with tilde{O}(r epsilon^{-2}) worst-case update time and tilde{O}((r + n / sqrt{r}) epsilon^{-2}) worst-case query time, for any r larger than some constant. For r=n^{2/3}, this gives tilde{O}(n^{2/3} epsilon^{-2}) update time and tilde{O}(n^{2/3} epsilon^{-2}) query time. We also extend this algorithm to work for minor-free graphs with similar approximation and running time guarantees. Furthermore, we illustrate our framework on the all-pairs max flow and shortest path problems by giving corresponding dynamic algorithms in minor-free graphs with both sublinear update and query times. To the best of our knowledge, our results are the first to systematically establish such a connection between dynamic graph algorithms and vertex sparsification.

We also present both upper bound and lower bound for maintaining the energy of electrical flows in the incremental subgraph model, where updates consist of only vertex activations, which might be of independent interest.},
  author       = {Goranci, Gramoz and Henzinger, Monika H and Peng, Pan},
  booktitle    = {25th Annual European Symposium on Algorithms},
  isbn         = {978-3-95977-049-1},
  issn         = {1868-8969},
  location     = {Vienna, Austria},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{The power of vertex sparsifiers in dynamic graph algorithms}},
  doi          = {10.4230/LIPICS.ESA.2017.45},
  volume       = {87},
  year         = {2017},
}

@article{1187,
  abstract     = {We construct efficient authentication protocols and message authentication codes (MACs) whose security can be reduced to the learning parity with noise (LPN) problem. Despite a large body of work—starting with the (Formula presented.) protocol of Hopper and Blum in 2001—until now it was not even known how to construct an efficient authentication protocol from LPN which is secure against man-in-the-middle attacks. A MAC implies such a (two-round) protocol.},
  author       = {Kiltz, Eike and Pietrzak, Krzysztof Z and Venturi, Daniele and Cash, David and Jain, Abhishek},
  journal      = {Journal of Cryptology},
  number       = {4},
  pages        = {1238 -- 1275},
  publisher    = {Springer},
  title        = {{Efficient authentication from hard learning problems}},
  doi          = {10.1007/s00145-016-9247-3},
  volume       = {30},
  year         = {2017},
}

@inproceedings{11873,
  abstract     = {We study the problem of computing a minimum cut in a simple, undirected graph and give a deterministic O(m log2 n log log2 n) time algorithm. This improves both on the best previously known deterministic running time of O(m log12 n) (Kawarabayashi and Thorup [12]) and the best previously known randomized running time of O(mlog3n) (Karger [11]) for this problem, though Karger's algorithm can be further applied to weighted graphs.

Our approach is using the Kawarabayashi and Tho- rup graph compression technique, which repeatedly finds low-conductance cuts. To find these cuts they use a diffusion-based local algorithm. We use instead a flow- based local algorithm and suitably adjust their framework to work with our flow-based subroutine. Both flow and diffusion based methods have a long history of being applied to finding low conductance cuts. Diffusion algorithms have several variants that are naturally local while it is more complicated to make flow methods local. Some prior work has proven nice properties for local flow based algorithms with respect to improving or cleaning up low conductance cuts. Our flow subroutine, however, is the first that is both local and produces low conductance cuts. Thus, it may be of independent interest.},
  author       = {Henzinger, Monika H and Rao, Satish and Wang, Di},
  booktitle    = {28th Annual ACM-SIAM Symposium on Discrete Algorithms},
  location     = {Barcelona, Spain},
  pages        = {1919--1938},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Local flow partitioning for faster edge connectivity}},
  doi          = {10.1137/1.9781611974782.125},
  year         = {2017},
}

@inproceedings{11874,
  abstract     = {We consider the problem of maintaining an approximately maximum (fractional) matching and an approximately minimum vertex cover in a dynamic graph. Starting with the seminal paper by Onak and Rubinfeld [STOC 2010], this problem has received significant attention in recent years. There remains, however, a polynomial gap between the best known worst case update time and the best known amortised update time for this problem, even after allowing for randomisation. Specifically, Bernstein and Stein [ICALP 2015, SODA 2016] have the best known worst case update time. They present a deterministic data structure with approximation ratio (3/2 + ∊) and worst case update time O(m1/4/ ∊2), where m is the number of edges in the graph. In recent past, Gupta and Peng [FOCS 2013] gave a deterministic data structure with approximation ratio (1+ ∊) and worst case update time  No known randomised data structure beats the worst case update times of these two results. In contrast, the paper by Onak and Rubinfeld [STOC 2010] gave a randomised data structure with approximation ratio O(1) and amortised update time O(log2 n), where n is the number of nodes in the graph. This was later improved by Baswana, Gupta and Sen [FOCS 2011] and Solomon [FOCS 2016], leading to a randomised date structure with approximation ratio 2 and amortised update time O(1).

We bridge the polynomial gap between the worst case and amortised update times for this problem, without using any randomisation. We present a deterministic data structure with approximation ratio (2 + ∊) and worst case update time O(log3 n), for all sufficiently small constants ∊.},
  author       = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon},
  booktitle    = {28th Annual ACM-SIAM Symposium on Discrete Algorithms},
  location     = {Barcelona, Spain},
  pages        = {470 -- 489},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Fully dynamic approximate maximum matching and minimum vertex cover in o(log3 n) worst case update time}},
  doi          = {10.1137/1.9781611974782.30},
  year         = {2017},
}

@article{11903,
  abstract     = {Online social networks allow the collection of large amounts of data about the influence between users connected by a friendship-like relationship. When distributing items among agents forming a social network, this information allows us to exploit network externalities that each agent receives from his neighbors that get the same item. In this paper we consider Friends-of-Friends (2-hop) network externalities, i.e., externalities that not only depend on the neighbors that get the same item but also on neighbors of neighbors. For these externalities we study a setting where multiple different items are assigned to unit-demand agents. Specifically, we study the problem of welfare maximization under different types of externality functions. Let n be the number of agents and m be the number of items. Our contributions are the following: (1) We show that welfare maximization is APX-hard; we show that even for step functions with 2-hop (and also with 1-hop) externalities it is NP-hard to approximate social welfare better than (1−1/e). (2) On the positive side we present (i) an 𝑂(𝑛√)-approximation algorithm for general concave externality functions, (ii) an O(log m)-approximation algorithm for linear externality functions, and (iii) a 518(1−1/𝑒)-approximation algorithm for 2-hop step function externalities. We also improve the result from [7] for 1-hop step function externalities by giving a 12(1−1/𝑒)-approximation algorithm.},
  author       = {Bhattacharya, Sayan and Dvořák, Wolfgang and Henzinger, Monika H and Starnberger, Martin},
  issn         = {1433-0490},
  journal      = {Theory of Computing Systems},
  number       = {4},
  pages        = {948--986},
  publisher    = {Springer Nature},
  title        = {{Welfare maximization with friends-of-friends network externalities}},
  doi          = {10.1007/s00224-017-9759-8},
  volume       = {61},
  year         = {2017},
}

@article{1191,
  abstract     = {Variation in genotypes may be responsible for differences in dispersal rates, directional biases, and growth rates of individuals. These traits may favor certain genotypes and enhance their spatiotemporal spreading into areas occupied by the less advantageous genotypes. We study how these factors influence the speed of spreading in the case of two competing genotypes under the assumption that spatial variation of the total population is small compared to the spatial variation of the frequencies of the genotypes in the population. In that case, the dynamics of the frequency of one of the genotypes is approximately described by a generalized Fisher–Kolmogorov–Petrovskii–Piskunov (F–KPP) equation. This generalized F–KPP equation with (nonlinear) frequency-dependent diffusion and advection terms admits traveling wave solutions that characterize the invasion of the dominant genotype. Our existence results generalize the classical theory for traveling waves for the F–KPP with constant coefficients. Moreover, in the particular case of the quadratic (monostable) nonlinear growth–decay rate in the generalized F–KPP we study in detail the influence of the variance in diffusion and mean displacement rates of the two genotypes on the minimal wave propagation speed.},
  author       = {Kollár, Richard and Novak, Sebastian},
  journal      = {Bulletin of Mathematical Biology},
  number       = {3},
  pages        = {525--559},
  publisher    = {Springer},
  title        = {{Existence of traveling waves for the generalized F–KPP equation}},
  doi          = {10.1007/s11538-016-0244-3},
  volume       = {79},
  year         = {2017},
}

@inproceedings{1192,
  abstract     = {The main result of this paper is a generalization of the classical blossom algorithm for finding perfect matchings. Our algorithm can efficiently solve Boolean CSPs where each variable appears in exactly two constraints (we call it edge CSP) and all constraints are even Δ-matroid relations (represented by lists of tuples). As a consequence of this, we settle the complexity classification of planar Boolean CSPs started by Dvorak and Kupec. Knowing that edge CSP is tractable for even Δ-matroid constraints allows us to extend the tractability result to a larger class of Δ-matroids that includes many classes that were known to be tractable before, namely co-independent, compact, local and binary.},
  author       = {Kazda, Alexandr and Kolmogorov, Vladimir and Rolinek, Michal},
  isbn         = {978-161197478-2},
  location     = {Barcelona, Spain},
  pages        = {307 -- 326},
  publisher    = {SIAM},
  title        = {{Even delta-matroids and the complexity of planar Boolean CSPs}},
  doi          = {10.1137/1.9781611974782.20},
  year         = {2017},
}

