@article{9677,
  abstract     = {Progress in the atomic-scale modeling of matter over the past decade has been tremendous. This progress has been brought about by improvements in methods for evaluating interatomic forces that work by either solving the electronic structure problem explicitly, or by computing accurate approximations of the solution and by the development of techniques that use the Born–Oppenheimer (BO) forces to move the atoms on the BO potential energy surface. As a consequence of these developments it is now possible to identify stable or metastable states, to sample configurations consistent with the appropriate thermodynamic ensemble, and to estimate the kinetics of reactions and phase transitions. All too often, however, progress is slowed down by the bottleneck associated with implementing new optimization algorithms and/or sampling techniques into the many existing electronic-structure and empirical-potential codes. To address this problem, we are thus releasing a new version of the i-PI software. This piece of software is an easily extensible framework for implementing advanced atomistic simulation techniques using interatomic potentials and forces calculated by an external driver code. While the original version of the code (Ceriotti et al., 2014) was developed with a focus on path integral molecular dynamics techniques, this second release of i-PI not only includes several new advanced path integral methods, but also offers other classes of algorithms. In other words, i-PI is moving towards becoming a universal force engine that is both modular and tightly coupled to the driver codes that evaluate the potential energy surface and its derivatives.},
  author       = {Kapil, Venkat and Rossi, Mariana and Marsalek, Ondrej and Petraglia, Riccardo and Litman, Yair and Spura, Thomas and Cheng, Bingqing and Cuzzocrea, Alice and Meißner, Robert H. and Wilkins, David M. and Helfrecht, Benjamin A. and Juda, Przemysław and Bienvenue, Sébastien P. and Fang, Wei and Kessler, Jan and Poltavsky, Igor and Vandenbrande, Steven and Wieme, Jelle and Corminboeuf, Clemence and Kühne, Thomas D. and Manolopoulos, David E. and Markland, Thomas E. and Richardson, Jeremy O. and Tkatchenko, Alexandre and Tribello, Gareth A. and Van Speybroeck, Veronique and Ceriotti, Michele},
  issn         = {0010-4655},
  journal      = {Computer Physics Communications},
  pages        = {214--223},
  publisher    = {Elsevier},
  title        = {{i-PI 2.0: A universal force engine for advanced molecular simulations}},
  doi          = {10.1016/j.cpc.2018.09.020},
  volume       = {236},
  year         = {2019},
}

@article{9680,
  abstract     = {Atomistic modeling of phase transitions, chemical reactions, or other rare events that involve overcoming high free energy barriers usually entails prohibitively long simulation times. Introducing a bias potential as a function of an appropriately chosen set of collective variables can significantly accelerate the exploration of phase space, albeit at the price of distorting the distribution of microstates. Efficient reweighting to recover the unbiased distribution can be nontrivial when employing adaptive sampling techniques such as metadynamics, variationally enhanced sampling, or parallel bias metadynamics, in which the system evolves in a quasi-equilibrium manner under a time-dependent bias. We introduce an iterative unbiasing scheme that makes efficient use of all the trajectory data and that does not require the distribution to be evaluated on a grid. The method can thus be used even when the bias has a high dimensionality. We benchmark this approach against some of the existing schemes on model systems with different complexity and dimensionality.},
  author       = {Giberti, F. and Cheng, Bingqing and Tribello, G. A. and Ceriotti, M.},
  issn         = {1549-9626},
  journal      = {Journal of Chemical Theory and Computation},
  number       = {1},
  pages        = {100--107},
  publisher    = {American Chemical Society},
  title        = {{Iterative unbiasing of quasi-equilibrium sampling}},
  doi          = {10.1021/acs.jctc.9b00907},
  volume       = {16},
  year         = {2019},
}

@article{9689,
  abstract     = {A central goal of computational physics and chemistry is to predict material properties by using first-principles methods based on the fundamental laws of quantum mechanics. However, the high computational costs of these methods typically prevent rigorous predictions of macroscopic quantities at finite temperatures, such as heat capacity, density, and chemical potential. Here, we enable such predictions by marrying advanced free-energy methods with data-driven machine-learning interatomic potentials. We show that, for the ubiquitous and technologically essential system of water, a first-principles thermodynamic description not only leads to excellent agreement with experiments, but also reveals the crucial role of nuclear quantum fluctuations in modulating the thermodynamic stabilities of different phases of water.},
  author       = {Cheng, Bingqing and Engel, Edgar A. and Behler, Jörg and Dellago, Christoph and Ceriotti, Michele},
  issn         = {1091-6490},
  journal      = {Proceedings of the National Academy of Sciences},
  number       = {4},
  pages        = {1110--1115},
  publisher    = {National Academy of Sciences},
  title        = {{Ab initio thermodynamics of liquid and solid water}},
  doi          = {10.1073/pnas.1815117116},
  volume       = {116},
  year         = {2019},
}

@unpublished{10065,
  abstract     = {We study double quantum dots in a Ge/SiGe heterostructure and test their maturity towards singlet-triplet ($S-T_0$) qubits. We demonstrate a large range of tunability, from two single quantum dots to a double quantum dot. We measure Pauli spin blockade and study the anisotropy of the $g$-factor. We use an adjacent quantum dot for sensing charge transitions in the double quantum dot at interest. In conclusion, Ge/SiGe possesses all ingredients necessary for building a singlet-triplet qubit.},
  author       = {Hofmann, Andrea C and Jirovec, Daniel and Borovkov, Maxim and Prieto Gonzalez, Ivan and Ballabio, Andrea and Frigerio, Jacopo and Chrastina, Daniel and Isella, Giovanni and Katsaros, Georgios},
  booktitle    = {arXiv},
  title        = {{Assessing the potential of Ge/SiGe quantum dots as hosts for singlet-triplet qubits}},
  doi          = {10.48550/arXiv.1910.05841},
  year         = {2019},
}

@inproceedings{10190,
  abstract     = {The verification of concurrent programs remains an open challenge, as thread interaction has to be accounted for, which leads to state-space explosion. Stateless model checking battles this problem by exploring traces rather than states of the program. As there are exponentially many traces, dynamic partial-order reduction (DPOR) techniques are used to partition the trace space into equivalence classes, and explore a few representatives from each class. The standard equivalence that underlies most DPOR techniques is the happens-before equivalence, however recent works have spawned a vivid interest towards coarser equivalences. The efficiency of such approaches is a product of two parameters: (i) the size of the partitioning induced by the equivalence, and (ii) the time spent by the exploration algorithm in each class of the partitioning. In this work, we present a new equivalence, called value-happens-before and show that it has two appealing features. First, value-happens-before is always at least as coarse as the happens-before equivalence, and can be even exponentially coarser. Second, the value-happens-before partitioning is efficiently explorable when the number of threads is bounded. We present an algorithm called value-centric DPOR (VCDPOR), which explores the underlying partitioning using polynomial time per class. Finally, we perform an experimental evaluation of VCDPOR on various benchmarks, and compare it against other state-of-the-art approaches. Our results show that value-happens-before typically induces a significant reduction in the size of the underlying partitioning, which leads to a considerable reduction in the running time for exploring the whole partitioning.},
  author       = {Chatterjee, Krishnendu and Pavlogiannis, Andreas and Toman, Viktor},
  booktitle    = {Proceedings of the 34th ACM International Conference on Object-Oriented Programming, Systems, Languages, and Applications},
  issn         = {2475-1421},
  keywords     = {safety, risk, reliability and quality, software},
  location     = {Athens, Greece},
  publisher    = {ACM},
  title        = {{Value-centric dynamic partial order reduction}},
  doi          = {10.1145/3360550},
  volume       = {3},
  year         = {2019},
}

@article{10354,
  abstract     = {Background
ESCRT-III is a membrane remodelling filament with the unique ability to cut membranes from the inside of the membrane neck. It is essential for the final stage of cell division, the formation of vesicles, the release of viruses, and membrane repair. Distinct from other cytoskeletal filaments, ESCRT-III filaments do not consume energy themselves, but work in conjunction with another ATP-consuming complex. Despite rapid progress in describing the cell biology of ESCRT-III, we lack an understanding of the physical mechanisms behind its force production and membrane remodelling.
Results
Here we present a minimal coarse-grained model that captures all the experimentally reported cases of ESCRT-III driven membrane sculpting, including the formation of downward and upward cones and tubules. This model suggests that a change in the geometry of membrane bound ESCRT-III filaments—from a flat spiral to a 3D helix—drives membrane deformation. We then show that such repetitive filament geometry transitions can induce the fission of cargo-containing vesicles.
Conclusions
Our model provides a general physical mechanism that explains the full range of ESCRT-III-dependent membrane remodelling and scission events observed in cells. This mechanism for filament force production is distinct from the mechanisms described for other cytoskeletal elements discovered so far. The mechanistic principles revealed here suggest new ways of manipulating ESCRT-III-driven processes in cells and could be used to guide the engineering of synthetic membrane-sculpting systems.},
  author       = {Harker-Kirschneck, Lena and Baum, Buzz and Šarić, Anđela},
  issn         = {1741-7007},
  journal      = {BMC Biology},
  keywords     = {cell biology},
  number       = {1},
  publisher    = {Springer Nature},
  title        = {{Changes in ESCRT-III filament geometry drive membrane remodelling and fission in silico}},
  doi          = {10.1186/s12915-019-0700-2},
  volume       = {17},
  year         = {2019},
}

@article{10355,
  abstract     = {The molecular machinery of life is largely created via self-organisation of individual molecules into functional assemblies. Minimal coarse-grained models, in which a whole macromolecule is represented by a small number of particles, can be of great value in identifying the main driving forces behind self-organisation in cell biology. Such models can incorporate data from both molecular and continuum scales, and their results can be directly compared to experiments. Here we review the state of the art of models for studying the formation and biological function of macromolecular assemblies in living organisms. We outline the key ingredients of each model and their main findings. We illustrate the contribution of this class of simulations to identifying the physical mechanisms behind life and diseases, and discuss their future developments.},
  author       = {Hafner, Anne E and Krausser, Johannes and Šarić, Anđela},
  issn         = {0959-440X},
  journal      = {Current Opinion in Structural Biology},
  keywords     = {molecular biology, structural biology},
  pages        = {43--52},
  publisher    = {Elsevier},
  title        = {{Minimal coarse-grained models for molecular self-organisation in biology}},
  doi          = {10.1016/j.sbi.2019.05.018},
  volume       = {58},
  year         = {2019},
}

@article{105,
  abstract     = {Clinical Utility Gene Card. 1. Name of Disease (Synonyms): Pontocerebellar hypoplasia type 9 (PCH9) and spastic paraplegia-63 (SPG63). 2. OMIM# of the Disease: 615809 and 615686. 3. Name of the Analysed Genes or DNA/Chromosome Segments: AMPD2 at 1p13.3. 4. OMIM# of the Gene(s): 102771.},
  author       = {Marsh, Ashley and Novarino, Gaia and Lockhart, Paul and Leventer, Richard},
  journal      = {European Journal of Human Genetics},
  pages        = {161--166},
  publisher    = {Springer Nature},
  title        = {{CUGC for pontocerebellar hypoplasia type 9 and spastic paraplegia-63}},
  doi          = {10.1038/s41431-018-0231-2},
  volume       = {27},
  year         = {2019},
}

@article{10619,
  abstract     = {The quantum anomalous Hall (QAH) effect combines topology and magnetism to produce precisely quantized Hall resistance at zero magnetic field. We report the observation of a QAH effect in twisted bilayer graphene aligned to hexagonal boron nitride. The effect is driven by intrinsic strong interactions, which polarize the electrons into a single spin- and valley-resolved moiré miniband with Chern number C = 1. In contrast to magnetically doped systems, the measured transport energy gap is larger than the Curie temperature for magnetic ordering, and quantization to within 0.1% of the von Klitzing constant persists to temperatures of several kelvin at zero magnetic field. Electrical currents as small as 1 nanoampere controllably switch the magnetic order between states of opposite polarization, forming an electrically rewritable magnetic memory.},
  author       = {Serlin, M. and Tschirhart, C. L. and Polshyn, Hryhoriy and Zhang, Y. and Zhu, J. and Watanabe, K. and Taniguchi, T. and Balents, L. and Young, A. F.},
  issn         = {1095-9203},
  journal      = {Science},
  keywords     = {multidisciplinary},
  number       = {6480},
  pages        = {900--903},
  publisher    = {American Association for the Advancement of Science},
  title        = {{Intrinsic quantized anomalous Hall effect in a moiré heterostructure}},
  doi          = {10.1126/science.aay5533},
  volume       = {367},
  year         = {2019},
}

@article{10620,
  abstract     = {Partially filled Landau levels host competing electronic orders. For example, electron solids may prevail close to integer filling of the Landau levels before giving way to fractional quantum Hall liquids at higher carrier density1,2. Here, we report the observation of an electron solid with non-collinear spin texture in monolayer graphene, consistent with solidification of skyrmions3—topological spin textures characterized by quantized electrical charge4,5. We probe the spin texture of the solids using a modified Corbino geometry that allows ferromagnetic magnons to be launched and detected6,7. We find that magnon transport is highly efficient when one Landau level is filled (ν=1), consistent with quantum Hall ferromagnetic spin polarization. However, even minimal doping immediately quenches the magnon signal while leaving the vanishing low-temperature charge conductivity unchanged. Our results can be understood by the formation of a solid of charged skyrmions near ν=1, whose non-collinear spin texture leads to rapid magnon decay. Data near fractional fillings show evidence of several fractional skyrmion solids, suggesting that graphene hosts a highly tunable landscape of coupled spin and charge orders.},
  author       = {Zhou, H. and Polshyn, Hryhoriy and Taniguchi, T. and Watanabe, K. and Young, A. F.},
  issn         = {1745-2481},
  journal      = {Nature Physics},
  keywords     = {General Physics and Astronomy},
  number       = {2},
  pages        = {154--158},
  publisher    = {Springer Nature},
  title        = {{Solids of quantum Hall skyrmions in graphene}},
  doi          = {10.1038/s41567-019-0729-8},
  volume       = {16},
  year         = {2019},
}

@article{10621,
  abstract     = {Twisted bilayer graphene has recently emerged as a platform for hosting correlated phenomena. For twist angles near θ ≈ 1.1°, the low-energy electronic structure of twisted bilayer graphene features isolated bands with a flat dispersion1,2. Recent experiments have observed a variety of low-temperature phases that appear to be driven by electron interactions, including insulating states, superconductivity and magnetism3,4,5,6. Here we report electrical transport measurements up to room temperature for twist angles varying between 0.75° and 2°. We find that the resistivity, ρ, scales linearly with temperature, T, over a wide range of T before falling again owing to interband activation. The T-linear response is much larger than observed in monolayer graphene for all measured devices, and in particular increases by more than three orders of magnitude in the range where the flat band exists. Our results point to the dominant role of electron–phonon scattering in twisted bilayer graphene, with possible implications for the origin of the observed superconductivity.},
  author       = {Polshyn, Hryhoriy and Yankowitz, Matthew and Chen, Shaowen and Zhang, Yuxuan and Watanabe, K. and Taniguchi, T. and Dean, Cory R. and Young, Andrea F.},
  issn         = {1745-2481},
  journal      = {Nature Physics},
  keywords     = {general physics and astronomy},
  number       = {10},
  pages        = {1011--1016},
  publisher    = {Springer Nature},
  title        = {{Large linear-in-temperature resistivity in twisted bilayer graphene}},
  doi          = {10.1038/s41567-019-0596-3},
  volume       = {15},
  year         = {2019},
}

@article{10622,
  abstract     = {We demonstrate a method for manipulating small ensembles of vortices in multiply connected superconducting structures. A micron-size magnetic particle attached to the tip of a silicon cantilever is used to locally apply magnetic flux through the superconducting structure. By scanning the tip over the surface of the device and by utilizing the dynamical coupling between the vortices and the cantilever, a high-resolution spatial map of the different vortex configurations is obtained. Moving the tip to a particular location in the map stabilizes a distinct multivortex configuration. Thus, the scanning of the tip over a particular trajectory in space permits nontrivial operations to be performed, such as braiding of individual vortices within a larger vortex ensemble—a key capability required by many proposals for topological quantum computing.},
  author       = {Polshyn, Hryhoriy and Naibert, Tyler and Budakian, Raffi},
  issn         = {1530-6992},
  journal      = {Nano Letters},
  keywords     = {mechanical engineering, condensed matter physics, general materials science, general chemistry, bioengineering},
  number       = {8},
  pages        = {5476--5482},
  publisher    = {American Chemical Society},
  title        = {{Manipulating multivortex states in superconducting structures}},
  doi          = {10.1021/acs.nanolett.9b01983},
  volume       = {19},
  year         = {2019},
}

@article{10625,
  abstract     = {The discovery of superconductivity and exotic insulating phases in twisted bilayer graphene has established this material as a model system of strongly correlated electrons. To achieve superconductivity, the two layers of graphene need to be at a very precise angle with respect to each other. Yankowitz et al. now show that another experimental knob, hydrostatic pressure, can be used to tune the phase diagram of twisted bilayer graphene (see the Perspective by Feldman). Applying pressure increased the coupling between the layers, which shifted the superconducting transition to higher angles and somewhat higher temperatures.},
  author       = {Yankowitz, Matthew and Chen, Shaowen and Polshyn, Hryhoriy and Zhang, Yuxuan and Watanabe, K. and Taniguchi, T. and Graf, David and Young, Andrea F. and Dean, Cory R.},
  issn         = {1095-9203},
  journal      = {Science},
  keywords     = {multidisciplinary},
  number       = {6431},
  pages        = {1059--1064},
  publisher    = {American Association for the Advancement of Science (AAAS)},
  title        = {{Tuning superconductivity in twisted bilayer graphene}},
  doi          = {10.1126/science.aav1910},
  volume       = {363},
  year         = {2019},
}

@inproceedings{11826,
  abstract     = {The diameter, radius and eccentricities are natural graph parameters. While these problems have been studied extensively, there are no known dynamic algorithms for them beyond the ones that follow from trivial recomputation after each update or from solving dynamic All-Pairs Shortest Paths (APSP), which is very computationally intensive. This is the situation for dynamic approximation algorithms as well, and even if only edge insertions or edge deletions need to be supported.
This paper provides a comprehensive study of the dynamic approximation of Diameter, Radius and Eccentricities, providing both conditional lower bounds, and new algorithms whose bounds are optimal under popular hypotheses in fine-grained complexity. Some of the highlights include:
- Under popular hardness hypotheses, there can be no significantly better fully dynamic approximation algorithms than recomputing the answer after each update, or maintaining full APSP.
- Nearly optimal partially dynamic (incremental/decremental) algorithms can be achieved via efficient reductions to (incremental/decremental) maintenance of Single-Source Shortest Paths. For instance, a nearly (3/2+epsilon)-approximation to Diameter in directed or undirected n-vertex, m-edge graphs can be maintained decrementally in total time m^{1+o(1)}sqrt{n}/epsilon^2. This nearly matches the static 3/2-approximation algorithm for the problem that is known to be conditionally optimal.},
  author       = {Ancona, Bertie and Henzinger, Monika H and Roditty, Liam and Williams, Virginia Vassilevska and Wein, Nicole},
  booktitle    = {46th International Colloquium on Automata, Languages, and Programming},
  isbn         = {978-3-95977-109-2},
  issn         = {1868-8969},
  location     = {Patras, Greece},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Algorithms and hardness for diameter in dynamic graphs}},
  doi          = {10.4230/LIPICS.ICALP.2019.13},
  volume       = {132},
  year         = {2019},
}

@inbook{11847,
  abstract     = {This paper serves as a user guide to the Vienna graph clustering framework. We review our general memetic algorithm, VieClus, to tackle the graph clustering problem. A key component of our contribution are natural recombine operators that employ ensemble clusterings as well as multi-level techniques. Lastly, we combine these techniques with a scalable communication protocol, producing a system that is able to compute high-quality solutions in a short amount of time. After giving a description of the algorithms employed, we establish the connection of the graph clustering problem to protein–protein interaction networks and moreover give a description on how the software can be used, what file formats are expected, and how this can be used to find functional groups in protein–protein interaction networks.},
  author       = {Biedermann, Sonja and Henzinger, Monika H and Schulz, Christian and Schuster, Bernhard},
  booktitle    = {Protein-Protein Interaction Networks},
  editor       = {Canzar, Stefan and Rojas Ringeling, Francisca},
  isbn         = {9781493998722},
  issn         = {1940-6029},
  pages        = {215–231},
  publisher    = {Springer Nature},
  title        = {{Vienna Graph Clustering}},
  doi          = {10.1007/978-1-4939-9873-9_16},
  volume       = {2074},
  year         = {2019},
}

@inproceedings{11850,
  abstract     = {Modern networked systems are increasingly reconfigurable, enabling demand-aware infrastructures whose resources can be adjusted according to the workload they currently serve. Such dynamic adjustments can be exploited to improve network utilization and hence performance, by moving frequently interacting communication partners closer, e.g., collocating them in the same server or datacenter. However, dynamically changing the embedding of workloads is algorithmically challenging: communication patterns are often not known ahead of time, but must be learned. During the learning process, overheads related to unnecessary moves (i.e., re-embeddings) should be minimized. This paper studies a fundamental model which captures the tradeoff between the benefits and costs of dynamically collocating communication partners on l servers, in an online manner. Our main contribution is a distributed online algorithm which is asymptotically almost optimal, i.e., almost matches the lower bound (also derived in this paper) on the competitive ratio of any (distributed or centralized) online algorithm.},
  author       = {Henzinger, Monika H and Neumann, Stefan and Schmid, Stefan},
  booktitle    = {SIGMETRICS'19: International Conference on Measurement and Modeling of Computer Systems},
  isbn         = {978-1-4503-6678-6},
  location     = {Phoenix, AZ, United States},
  pages        = {43–44},
  publisher    = {Association for Computing Machinery},
  title        = {{Efficient distributed workload (re-)embedding}},
  doi          = {10.1145/3309697.3331503},
  year         = {2019},
}

@inproceedings{11851,
  abstract     = {The minimum cut problem for an undirected edge-weighted graph asks us to divide its set of nodes into two blocks while minimizing the weighted sum of the cut edges. In this paper, we engineer the fastest known exact algorithm for the problem. State-of-the-art algorithms like the algorithm of Padberg and Rinaldi or the algorithm of Nagamochi, Ono and Ibaraki identify edges that can be contracted to reduce the graph size such that at least one minimum cut is maintained in the contracted graph. Our algorithm achieves improvements in running time over these algorithms by a multitude of techniques. First, we use a recently developed fast and parallel inexact minimum cut algorithm to obtain a better bound for the problem. Afterwards, we use reductions that depend on this bound to reduce the size of the graph much faster than previously possible. We use improved data structures to further lower the running time of our algorithm. Additionally, we parallelize the contraction routines of Nagamochi et al. . Overall, we arrive at a system that significantly outperforms the fastest state-of-the-art solvers for the exact minimum cut problem.},
  author       = {Henzinger, Monika H and Noe, Alexander and Schulz, Christian},
  booktitle    = {33rd International Parallel and Distributed Processing Symposium},
  isbn         = {978-1-7281-1247-3},
  issn         = {1530-2075},
  location     = {Rio de Janeiro, Brazil},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Shared-memory exact minimum cuts}},
  doi          = {10.1109/ipdps.2019.00013},
  year         = {2019},
}

@inproceedings{11853,
  abstract     = {We present a deterministic dynamic algorithm for maintaining a (1+ε)f-approximate minimum cost set cover with O(f log(Cn)/ε^2) amortized update time, when the input set system is undergoing element insertions and deletions. Here, n denotes the number of elements, each element appears in at most f sets, and the cost of each set lies in the range [1/C, 1]. Our result, together with that of Gupta~et~al.~[STOC'17], implies that there is a deterministic algorithm for this problem with O(f log(Cn)) amortized update time and O(min(log n, f)) -approximation ratio, which nearly matches the polynomial-time hardness of approximation for minimum set cover in the static setting. Our update time is only O(log (Cn)) away from a trivial lower bound. Prior to our work, the previous best approximation ratio guaranteed by deterministic algorithms was O(f^2), which was due to Bhattacharya~et~al.~[ICALP`15]. In contrast, the only result that guaranteed O(f) -approximation was obtained very recently by Abboud~et~al.~[STOC`19], who designed a dynamic algorithm with (1+ε)f-approximation ratio and O(f^2 log n/ε) amortized update time. Besides the extra O(f) factor in the update time compared to our and Gupta~et~al.'s results, the Abboud~et~al.~algorithm is randomized, and works only when the adversary is oblivious and the sets are unweighted (each set has the same cost). We achieve our result via the primal-dual approach, by maintaining a fractional packing solution as a dual certificate. This approach was pursued previously by Bhattacharya~et~al.~and Gupta~et~al., but not in the recent paper by Abboud~et~al. Unlike previous primal-dual algorithms that try to satisfy some local constraints for individual sets at all time, our algorithm basically waits until the dual solution changes significantly globally, and fixes the solution only where the fix is needed.},
  author       = {Bhattacharya, Sayan and Henzinger, Monika H and Nanongkai, Danupon},
  booktitle    = {60th Annual Symposium on Foundations of Computer Science},
  isbn         = {978-1-7281-4953-0},
  issn         = {2575-8454},
  location     = {Baltimore, MD, United States},
  pages        = {406--423},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{A new deterministic algorithm for dynamic set cover}},
  doi          = {10.1109/focs.2019.00033},
  year         = {2019},
}

@inproceedings{11865,
  abstract     = {We present the first sublinear-time algorithm that can compute the edge connectivity λ of a network exactly on distributed message-passing networks (the CONGEST model), as long as the network contains no multi-edge. We present the first sublinear-time algorithm for a distributed message-passing network sto compute its edge connectivity λ exactly in the CONGEST model, as long as there are no parallel edges. Our algorithm takes Õ(n1−1/353D1/353+n1−1/706) time to compute λ and a cut of cardinality λ with high probability, where n and D are the number of nodes and the diameter of the network, respectively, and Õ hides polylogarithmic factors. This running time is sublinear in n (i.e. Õ(n1−є)) whenever D is. Previous sublinear-time distributed algorithms can solve this problem either (i) exactly only when λ=O(n1/8−є) [Thurimella PODC’95; Pritchard, Thurimella, ACM Trans. Algorithms’11; Nanongkai, Su, DISC’14] or (ii) approximately [Ghaffari, Kuhn, DISC’13; Nanongkai, Su, DISC’14]. To achieve this we develop and combine several new techniques. First, we design the first distributed algorithm that can compute a k-edge connectivity certificate for any k=O(n1−є) in time Õ(√nk+D). The previous sublinear-time algorithm can do so only when k=o(√n) [Thurimella PODC’95]. In fact, our algorithm can be turned into the first parallel algorithm with polylogarithmic depth and near-linear work. Previous near-linear work algorithms are essentially sequential and previous polylogarithmic-depth algorithms require Ω(mk) work in the worst case (e.g. [Karger, Motwani, STOC’93]). Second, we show that by combining the recent distributed expander decomposition technique of [Chang, Pettie, Zhang, SODA’19] with techniques from the sequential deterministic edge connectivity algorithm of [Kawarabayashi, Thorup, STOC’15], we can decompose the network into a sublinear number of clusters with small average diameter and without any mincut separating a cluster (except the “trivial” ones). This leads to a simplification of the Kawarabayashi-Thorup framework (except that we are randomized while they are deterministic). This might make this framework more useful in other models of computation. Finally, by extending the tree packing technique from [Karger STOC’96], we can find the minimum cut in time proportional to the number of components. As a byproduct of this technique, we obtain an Õ(n)-time algorithm for computing exact minimum cut for weighted graphs.},
  author       = {Daga, Mohit and Henzinger, Monika H and Nanongkai, Danupon and Saranurak, Thatchaphol},
  booktitle    = {Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing},
  isbn         = {978-1-4503-6705-9},
  issn         = {0737-8017},
  location     = {Phoenix, AZ, United States},
  pages        = {343–354},
  publisher    = {Association for Computing Machinery},
  title        = {{Distributed edge connectivity in sublinear time}},
  doi          = {10.1145/3313276.3316346},
  year         = {2019},
}

@inproceedings{11871,
  abstract     = {Many dynamic graph algorithms have an amortized update time, rather than a stronger worst-case guarantee. But amortized data structures are not suitable for real-time systems, where each individual operation has to be executed quickly. For this reason, there exist many recent randomized results that aim to provide a guarantee stronger than amortized expected. The strongest possible guarantee for a randomized algorithm is that it is always correct (Las Vegas), and has high-probability worst-case update time, which gives a bound on the time for each individual operation that holds with high probability.

In this paper we present the first polylogarithmic high-probability worst-case time bounds for the dynamic spanner and the dynamic maximal matching problem.

1.	
For dynamic spanner, the only known o(n) worst-case bounds were O(n3/4) high-probability worst-case update time for maintaining a 3-spanner, and O(n5/9) for maintaining a 5-spanner. We give a O(1)k log3(n) high-probability worst-case time bound for maintaining a (2k – 1)-spanner, which yields the first worst-case polylog update time for all constant k. (All the results above maintain the optimal tradeoff of stretch 2k – 1 and Õ(n1+1/k) edges.)

2.	
For dynamic maximal matching, or dynamic 2-approximate maximum matching, no algorithm with o(n) worst-case time bound was known and we present an algorithm with O(log5 (n)) high-probability worst-case time; similar worst-case bounds existed only for maintaining a matching that was (2 + ∊)-approximate, and hence not maximal.

Our results are achieved using a new approach for converting amortized guarantees to worst-case ones for randomized data structures by going through a third type of guarantee, which is a middle ground between the two above: an algorithm is said to have worst-case expected update time α if for every update σ, the expected time to process σ is at most α. Although stronger than amortized expected, the worst-case expected guarantee does not resolve the fundamental problem of amortization: a worst-case expected update time of O(1) still allows for the possibility that every 1/f(n) updates requires Θ(f(n)) time to process, for arbitrarily high f(n). In this paper we present a black-box reduction that converts any data structure with worst-case expected update time into one with a high-probability worst-case update time: the query time remains the same, while the update time increases by a factor of O(log2(n)).

Thus we achieve our results in two steps: (1) First we show how to convert existing dynamic graph algorithms with amortized expected polylogarithmic running times into algorithms with worst-case expected polylogarithmic running times. (2) Then we use our black-box reduction to achieve the polylogarithmic high-probability worst-case time bound. All our algorithms are Las-Vegas-type algorithms.},
  author       = {Bernstein, Aaron and Forster, Sebastian and Henzinger, Monika H},
  booktitle    = {30th Annual ACM-SIAM Symposium on Discrete Algorithms},
  location     = {San Diego, CA, United States},
  pages        = {1899--1918},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{A deamortization approach for dynamic spanner and dynamic maximal matching}},
  doi          = {10.1137/1.9781611975482.115},
  year         = {2019},
}

