@inproceedings{11183,
  abstract     = {Subgraph detection has recently been one of the most studied problems in the CONGEST model of distributed computing. In this work, we study the distributed complexity of problems closely related to subgraph detection, mainly focusing on induced subgraph detection. The main line of this work presents lower bounds and parameterized algorithms w.r.t structural parameters of the input graph:
- On general graphs, we give unconditional lower bounds for induced detection of cycles and patterns of treewidth 2 in CONGEST. Moreover, by adapting reductions from centralized parameterized complexity, we prove lower bounds in CONGEST for detecting patterns with a 4-clique, and for induced path detection conditional on the hardness of triangle detection in the congested clique.
- On graphs of bounded degeneracy, we show that induced paths can be detected fast in CONGEST using techniques from parameterized algorithms, while detecting cycles and patterns of treewidth 2 is hard.
- On graphs of bounded vertex cover number, we show that induced subgraph detection is easy in CONGEST for any pattern graph. More specifically, we adapt a centralized parameterized algorithm for a more general maximum common induced subgraph detection problem to the distributed setting. In addition to these induced subgraph detection results, we study various related problems in the CONGEST and congested clique models, including for multicolored versions of subgraph-detection-like problems.},
  author       = {Nikabadi, Amir and Korhonen, Janne},
  booktitle    = {25th International Conference on Principles of Distributed Systems},
  editor       = {Bramas, Quentin and Gramoli, Vincent and Milani, Alessia},
  isbn         = {9783959772198},
  issn         = {1868-8969},
  location     = {Strasbourg, France},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Beyond distributed subgraph detection: Induced subgraphs, multicolored problems and graph parameters}},
  doi          = {10.4230/LIPIcs.OPODIS.2021.15},
  volume       = {217},
  year         = {2022},
}

@inproceedings{11184,
  abstract     = {Let G be a graph on n nodes. In the stochastic population protocol model, a collection of n indistinguishable, resource-limited nodes collectively solve tasks via pairwise interactions. In each interaction, two randomly chosen neighbors first read each other’s states, and then update their local states. A rich line of research has established tight upper and lower bounds on the complexity of fundamental tasks, such as majority and leader election, in this model, when G is a clique. Specifically, in the clique, these tasks can be solved fast, i.e., in n polylog n pairwise interactions, with high probability, using at most polylog n states per node.
In this work, we consider the more general setting where G is an arbitrary regular graph, and present a technique for simulating protocols designed for fully-connected networks in any connected regular graph. Our main result is a simulation that is efficient on many interesting graph families: roughly, the simulation overhead is polylogarithmic in the number of nodes, and quadratic in the conductance of the graph. As a sample application, we show that, in any regular graph with conductance φ, both leader election and exact majority can be solved in φ^{-2} ⋅ n polylog n pairwise interactions, with high probability, using at most φ^{-2} ⋅ polylog n states per node. This shows that there are fast and space-efficient population protocols for leader election and exact majority on graphs with good expansion properties. We believe our results will prove generally useful, as they allow efficient technology transfer between the well-mixed (clique) case, and the under-explored spatial setting.},
  author       = {Alistarh, Dan-Adrian and Gelashvili, Rati and Rybicki, Joel},
  booktitle    = {25th International Conference on Principles of Distributed Systems},
  editor       = {Bramas, Quentin and Gramoli, Vincent and Milani, Alessia},
  isbn         = {9783959772198},
  issn         = {1868-8969},
  location     = {Strasbourg, France},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Fast graphical population protocols}},
  doi          = {10.4230/LIPIcs.OPODIS.2021.14},
  volume       = {217},
  year         = {2022},
}

@article{11420,
  abstract     = {Understanding the properties of neural networks trained via stochastic gradient descent (SGD) is at the heart of the theory of deep learning. In this work, we take a mean-field view, and consider a two-layer ReLU network trained via noisy-SGD for a univariate regularized regression problem. Our main result is that SGD with vanishingly small noise injected in the gradients is biased towards a simple solution: at convergence, the ReLU network implements a piecewise linear map of the inputs, and the number of “knot” points -- i.e., points where the tangent of the ReLU network estimator changes -- between two consecutive training inputs is at most three. In particular, as the number of neurons of the network grows, the SGD dynamics is captured by the solution of a gradient flow and, at convergence, the distribution of the weights approaches the unique minimizer of a related free energy, which has a Gibbs form. Our key technical contribution consists in the analysis of the estimator resulting from this minimizer: we show that its second derivative vanishes everywhere, except at some specific locations which represent the “knot” points. We also provide empirical evidence that knots at locations distinct from the data points might occur, as predicted by our theory.},
  author       = {Shevchenko, Aleksandr and Kungurtsev, Vyacheslav and Mondelli, Marco},
  issn         = {1533-7928},
  journal      = {Journal of Machine Learning Research},
  number       = {130},
  pages        = {1--55},
  publisher    = {Journal of Machine Learning Research},
  title        = {{Mean-field analysis of piecewise linear solutions for wide ReLU networks}},
  volume       = {23},
  year         = {2022},
}

@inproceedings{11707,
  abstract     = {In this work we introduce the graph-theoretic notion of mendability: for each locally checkable graph problem we can define its mending radius, which captures the idea of how far one needs to modify a partial solution in order to “patch a hole.” We explore how mendability is connected to the existence of efficient algorithms, especially in distributed, parallel, and fault-tolerant settings. It is easy to see that O(1)-mendable problems are also solvable in O(log∗n) rounds in the LOCAL model of distributed computing. One of the surprises is that in paths and cycles, a converse also holds in the following sense: if a problem Π can be solved in O(log∗n), there is always a restriction Π′⊆Π that is still efficiently solvable but that is also O(1)-mendable. We also explore the structure of the landscape of mendability. For example, we show that in trees, the mending radius of any locally checkable problem is O(1), Θ(logn), or Θ(n), while in general graphs the structure is much more diverse.},
  author       = {Balliu, Alkida and Hirvonen, Juho and Melnyk, Darya and Olivetti, Dennis and Rybicki, Joel and Suomela, Jukka},
  booktitle    = {International Colloquium on Structural Information and Communication Complexity},
  editor       = {Parter, Merav},
  isbn         = {9783031099922},
  issn         = {1611-3349},
  location     = {Paderborn, Germany},
  pages        = {1--20},
  publisher    = {Springer Nature},
  title        = {{Local mending}},
  doi          = {10.1007/978-3-031-09993-9_1},
  volume       = {13298},
  year         = {2022},
}

@misc{13076,
  abstract     = {The source code for replicating experiments presented in the paper.

The implementation of the designed priority schedulers can be found in Galois-2.2.1/include/Galois/WorkList/:
StealingMultiQueue.h is the StealingMultiQueue.
MQOptimized/ contains MQ Optimized variants.

We provide images that contain all the dependencies and datasets. Images can be pulled from npostnikova/mq-based-schedulers repository, or downloaded from Zenodo. See readme for more detail.},
  author       = {Postnikova, Anastasiia and Koval, Nikita and Nadiradze, Giorgi and Alistarh, Dan-Adrian},
  publisher    = {Zenodo},
  title        = {{Multi-queues can be state-of-the-art priority schedulers}},
  doi          = {10.5281/ZENODO.5733408},
  year         = {2022},
}

@inproceedings{11844,
  abstract     = {In the stochastic population protocol model, we are given a connected graph with n nodes, and in every time step, a scheduler samples an edge of the graph uniformly at random and the nodes connected by this edge interact. A fundamental task in this model is stable leader election, in which all nodes start in an identical state and the aim is to reach a configuration in which (1) exactly one node is elected as leader and (2) this node remains as the unique leader no matter what sequence of interactions follows. On cliques, the complexity of this problem has recently been settled: time-optimal protocols stabilize in Θ(n log n) expected steps using Θ(log log n) states, whereas protocols that use O(1) states require Θ(n2) expected steps.

In this work, we investigate the complexity of stable leader election on general graphs. We provide the first non-trivial time lower bounds for leader election on general graphs, showing that, when moving beyond cliques, the complexity landscape of leader election becomes very diverse: the time required to elect a leader can range from O(1) to Θ(n3) expected steps. On the upper bound side, we first observe that there exists a protocol that is time-optimal on many graph families, but uses polynomially-many states. In contrast, we give a near-time-optimal protocol that uses only O(log2n) states that is at most a factor log n slower. Finally, we show that the constant-state protocol of Beauquier et al. [OPODIS 2013] is at most a factor n log n slower than the fast polynomial-state protocol. Moreover, among constant-state protocols, this protocol has near-optimal average case complexity on dense random graphs.},
  author       = {Alistarh, Dan-Adrian and Rybicki, Joel and Voitovych, Sasha},
  booktitle    = {Proceedings of the Annual ACM Symposium on Principles of Distributed Computing},
  isbn         = {9781450392624},
  location     = {Salerno, Italy},
  pages        = {246--256},
  publisher    = {Association for Computing Machinery},
  title        = {{Near-optimal leader election in population protocols on graphs}},
  doi          = {10.1145/3519270.3538435},
  year         = {2022},
}

@inproceedings{12182,
  abstract     = {Online algorithms make decisions based on past inputs, with the goal of being competitive against an algorithm that sees also future inputs. In this work, we introduce time-local online algorithms; these are online algorithms in which the output at any given time is a function of only T latest inputs. Our main observation is that time-local online algorithms are closely connected to local distributed graph algorithms: distributed algorithms make decisions based on the local information in the spatial dimension, while time-local online algorithms make decisions based on the local information in the temporal dimension. We formalize this connection, and show how we can directly use the tools developed to study distributed approximability of graph optimization problems to prove upper and lower bounds on the competitive ratio achieved with time-local online algorithms. Moreover, we show how to use computational techniques to synthesize optimal time-local algorithms.},
  author       = {Pacut, Maciej and Parham, Mahmoud and Rybicki, Joel and Schmid, Stefan and Suomela, Jukka and Tereshchenko, Aleksandr},
  booktitle    = {36th International Symposium on Distributed Computing},
  issn         = {1868-8969},
  location     = {Augusta, GA, United States},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Brief announcement: Temporal locality in online algorithms}},
  doi          = {10.4230/LIPIcs.DISC.2022.52},
  volume       = {246},
  year         = {2022},
}

@inproceedings{12299,
  abstract     = {Transfer learning is a classic paradigm by which models pretrained on large “upstream” datasets are adapted to yield good results on “downstream” specialized datasets. Generally, more accurate models on the “upstream” dataset tend to provide better transfer accuracy “downstream”. In this work, we perform an in-depth investigation of this phenomenon in the context of convolutional neural networks (CNNs) trained on the ImageNet dataset, which have been pruned-that is, compressed by sparsifiying their connections. We consider transfer using unstructured pruned models obtained by applying several state-of-the-art pruning methods, including magnitude-based, second-order, regrowth, lottery-ticket, and regularization approaches, in the context of twelve standard transfer tasks. In a nutshell, our study shows that sparse models can match or even outperform the transfer performance of dense models, even at high sparsities, and, while doing so, can lead to significant inference and even training speedups. At the same time, we observe and analyze significant differences in the behaviour of different pruning methods. The code is available at: https://github.com/IST-DASLab/sparse-imagenet-transfer.},
  author       = {Iofinova, Eugenia B and Peste, Elena-Alexandra and Kurtz, Mark and Alistarh, Dan-Adrian},
  booktitle    = {2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  issn         = {2575-7075},
  location     = {New Orleans, LA, United States},
  pages        = {12256--12266},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{How well do sparse ImageNet models transfer?}},
  doi          = {10.1109/cvpr52688.2022.01195},
  year         = {2022},
}

@inproceedings{12780,
  abstract     = {The ability to scale out training workloads has been one of the key performance enablers of deep learning. The main scaling approach is data-parallel GPU-based training, which has been boosted by hardware and software support for highly efficient point-to-point communication, and in particular via hardware bandwidth over-provisioning. Overprovisioning comes at a cost: there is an order of magnitude price difference between "cloud-grade" servers with such support, relative to their popular "consumer-grade" counterparts, although single server-grade and consumer-grade GPUs can have similar computational envelopes.

In this paper, we show that the costly hardware overprovisioning approach can be supplanted via algorithmic and system design, and propose a framework called CGX, which provides efficient software support for compressed communication in ML applications, for both multi-GPU single-node training, as well as larger-scale multi-node training. CGX is based on two technical advances: At the system level, it relies on a re-developed communication stack for ML frameworks, which provides flexible, highly-efficient support for compressed communication. At the application level, it provides seamless, parameter-free integration with popular frameworks, so that end-users do not have to modify training recipes, nor significant training code. This is complemented by a layer-wise adaptive compression technique which dynamically balances compression gains with accuracy preservation. CGX integrates with popular ML frameworks, providing up to 3X speedups for multi-GPU nodes based on commodity hardware, and order-of-magnitude improvements in the multi-node setting, with negligible impact on accuracy.},
  author       = {Markov, Ilia and Ramezanikebrya, Hamidreza and Alistarh, Dan-Adrian},
  booktitle    = {Proceedings of the 23rd ACM/IFIP International Middleware Conference},
  isbn         = {9781450393409},
  location     = {Quebec, QC, Canada},
  pages        = {241--254},
  publisher    = {Association for Computing Machinery},
  title        = {{CGX: Adaptive system support for communication-efficient deep learning}},
  doi          = {10.1145/3528535.3565248},
  year         = {2022},
}

@inproceedings{10853,
  abstract     = {Dynamic Connectivity is a fundamental algorithmic graph problem, motivated by a wide range of applications to social and communication networks and used as a building block in various other algorithms, such as the bi-connectivity and the dynamic minimal spanning tree problems. In brief, we wish to maintain the connected components of the graph under dynamic edge insertions and deletions. In the sequential case, the problem has been well-studied from both theoretical and practical perspectives. However, much less is known about efficient concurrent solutions to this problem. This is the gap we address in this paper. We start from one of the classic data structures used to solve this problem, the Euler Tour Tree. Our first contribution is a non-blocking single-writer implementation of it. We leverage this data structure to obtain the first truly concurrent generalization of dynamic connectivity, which preserves the time complexity of its sequential counterpart, but is also scalable in practice. To achieve this, we rely on three main techniques. The first is to ensure that connectivity queries, which usually dominate real-world workloads, are non-blocking. The second non-trivial technique expands the above idea by making all queries that do not change the connectivity structure non-blocking. The third ingredient is applying fine-grained locking for updating the connected components, which allows operations on disjoint components to occur in parallel. We evaluate the resulting algorithm on various workloads, executing on both real and synthetic graphs. The results show the efficiency of each of the proposed optimizations; the most efficient variant improves the performance of a coarse-grained based implementation on realistic scenarios up to 6x on average and up to 30x when connectivity queries dominate.},
  author       = {Fedorov, Alexander and Koval, Nikita and Alistarh, Dan-Adrian},
  booktitle    = {Proceedings of the 33rd ACM Symposium on Parallelism in Algorithms and Architectures},
  isbn         = {9781450380706},
  location     = {Virtual, Online},
  pages        = {208--220},
  publisher    = {Association for Computing Machinery},
  title        = {{A scalable concurrent algorithm for dynamic connectivity}},
  doi          = {10.1145/3409964.3461810},
  year         = {2021},
}

@inproceedings{10854,
  abstract     = {Consider a distributed task where the communication network is fixed but the local inputs given to the nodes of the distributed system may change over time. In this work, we explore the following question: if some of the local inputs change, can an existing solution be updated efficiently, in a dynamic and distributed manner?
To address this question, we define the batch dynamic CONGEST model in which we are given a bandwidth-limited communication network and a dynamic edge labelling defines the problem input. The task is to maintain a solution to a graph problem on the labelled graph under batch changes. We investigate, when a batch of alpha edge label changes arrive, - how much time as a function of alpha we need to update an existing solution, and - how much information the nodes have to keep in local memory between batches in order to update the solution quickly.
Our work lays the foundations for the theory of input-dynamic distributed network algorithms. We give a general picture of the complexity landscape in this model, design both universal algorithms and algorithms for concrete problems, and present a general framework for lower bounds. The diverse time complexity of our model spans from constant time, through time polynomial in alpha, and to alpha time, which we show to be enough for any task.},
  author       = {Foerster, Klaus-Tycho and Korhonen, Janne and Paz, Ami and Rybicki, Joel and Schmid, Stefan},
  booktitle    = {Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems},
  isbn         = {9781450380720},
  location     = {Virtual, Online},
  pages        = {71--72},
  publisher    = {Association for Computing Machinery},
  title        = {{Input-dynamic distributed algorithms for communication networks}},
  doi          = {10.1145/3410220.3453923},
  year         = {2021},
}

@article{10855,
  abstract     = {Consider a distributed task where the communication network is fixed but the local inputs given to the nodes of the distributed system may change over time. In this work, we explore the following question: if some of the local inputs change, can an existing solution be updated efficiently, in a dynamic and distributed manner? To address this question, we define the batch dynamic \congest model in which we are given a bandwidth-limited communication network and a dynamic edge labelling defines the problem input. The task is to maintain a solution to a graph problem on the labeled graph under batch changes. We investigate, when a batch of α edge label changes arrive, \beginitemize \item how much time as a function of α we need to update an existing solution, and \item how much information the nodes have to keep in local memory between batches in order to update the solution quickly. \enditemize Our work lays the foundations for the theory of input-dynamic distributed network algorithms. We give a general picture of the complexity landscape in this model, design both universal algorithms and algorithms for concrete problems, and present a general framework for lower bounds. In particular, we derive non-trivial upper bounds for two selected, contrasting problems: maintaining a minimum spanning tree and detecting cliques.},
  author       = {Foerster, Klaus-Tycho and Korhonen, Janne and Paz, Ami and Rybicki, Joel and Schmid, Stefan},
  issn         = {2476-1249},
  journal      = {Proceedings of the ACM on Measurement and Analysis of Computing Systems},
  keywords     = {Computer Networks and Communications, Hardware and Architecture, Safety, Risk, Reliability and Quality, Computer Science (miscellaneous)},
  number       = {1},
  pages        = {1--33},
  publisher    = {Association for Computing Machinery},
  title        = {{Input-dynamic distributed algorithms for communication networks}},
  doi          = {10.1145/3447384},
  volume       = {5},
  year         = {2021},
}

@inproceedings{11436,
  abstract     = {Asynchronous distributed algorithms are a popular way to reduce synchronization costs in large-scale optimization, and in particular for neural network training. However, for nonsmooth and nonconvex objectives, few convergence guarantees exist beyond cases where closed-form proximal operator solutions are available. As training most popular deep neural networks corresponds to optimizing nonsmooth and nonconvex objectives, there is a pressing need for such convergence guarantees. In this paper, we analyze for the first time the convergence of stochastic asynchronous optimization for this general class of objectives. In particular, we focus on stochastic subgradient methods allowing for block variable partitioning, where the shared model is asynchronously updated by concurrent processes. To this end, we use a probabilistic model which captures key features of real asynchronous scheduling between concurrent processes. Under this model, we establish convergence with probability one to an invariant set for stochastic subgradient methods with momentum. From a practical perspective, one issue with the family of algorithms that we consider is that they are not efficiently supported by machine learning frameworks, which mostly focus on distributed data-parallel strategies. To address this, we propose a new implementation strategy for shared-memory based training of deep neural networks for a partitioned but shared model in single- and multi-GPU settings. Based on this implementation, we achieve on average1.2x speed-up in comparison to state-of-the-art training methods for popular image classification tasks, without compromising accuracy.},
  author       = {Kungurtsev, Vyacheslav and Egan, Malcolm and Chatterjee, Bapi and Alistarh, Dan-Adrian},
  booktitle    = {35th AAAI Conference on Artificial Intelligence, AAAI 2021},
  isbn         = {9781713835974},
  issn         = {2374-3468},
  location     = {Virtual, Online},
  number       = {9B},
  pages        = {8209--8216},
  publisher    = {AAAI Press},
  title        = {{Asynchronous optimization methods for efficient training of deep neural networks with guarantees}},
  volume       = {35},
  year         = {2021},
}

@inproceedings{11452,
  abstract     = {We study efficient distributed algorithms for the fundamental problem of principal component analysis and leading eigenvector computation on the sphere, when the data are randomly distributed among a set of computational nodes. We propose a new quantized variant of Riemannian gradient descent to solve this problem, and prove that the algorithm converges with high probability under a set of necessary spherical-convexity properties. We give bounds on the number of bits transmitted by the algorithm under common initialization schemes, and investigate the dependency on the problem dimension in each case.},
  author       = {Alimisis, Foivos and Davies, Peter and Vandereycken, Bart and Alistarh, Dan-Adrian},
  booktitle    = {Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems},
  isbn         = {9781713845393},
  issn         = {1049-5258},
  location     = {Virtual, Online},
  pages        = {2823--2834},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Distributed principal component analysis with limited communication}},
  volume       = {4},
  year         = {2021},
}

@inproceedings{11458,
  abstract     = {The increasing computational requirements of deep neural networks (DNNs) have led to significant interest in obtaining DNN models that are sparse, yet accurate. Recent work has investigated the even harder case of sparse training, where the DNN weights are, for as much as possible, already sparse to reduce computational costs during training. Existing sparse training methods are often empirical and can have lower accuracy relative to the dense baseline. In this paper, we present a general approach called Alternating Compressed/DeCompressed (AC/DC) training of DNNs, demonstrate convergence for a variant of the algorithm, and show that AC/DC outperforms existing sparse training methods in accuracy at similar computational budgets; at high sparsity levels, AC/DC even outperforms existing methods that rely on accurate pre-trained dense models. An important property of AC/DC is that it allows co-training of dense and sparse models, yielding accurate sparse–dense model pairs at the end of the training process. This is useful in practice, where compressed variants may be desirable for deployment in resource-constrained settings without re-doing the entire training flow, and also provides us with insights into the accuracy gap between dense and compressed models. The code is available at: https://github.com/IST-DASLab/ACDC.},
  author       = {Peste, Elena-Alexandra and Iofinova, Eugenia B and Vladu, Adrian and Alistarh, Dan-Adrian},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  isbn         = {9781713845393},
  issn         = {1049-5258},
  location     = {Virtual, Online},
  pages        = {8557--8570},
  publisher    = {Curran Associates},
  title        = {{AC/DC: Alternating Compressed/DeCompressed training of deep neural networks}},
  volume       = {34},
  year         = {2021},
}

@inproceedings{11463,
  abstract     = {Efficiently approximating local curvature information of the loss function is a key tool for optimization and compression of deep neural networks. Yet, most existing methods to approximate second-order information have high computational
or storage costs, which limits their practicality. In this work, we investigate matrix-free, linear-time approaches for estimating Inverse-Hessian Vector Products (IHVPs) for the case when the Hessian can be approximated as a sum of rank-one matrices, as in the classic approximation of the Hessian by the empirical Fisher matrix. We propose two new algorithms: the first is tailored towards network compression and can compute the IHVP for dimension d, if the Hessian is given as a sum of m rank-one matrices, using O(dm2) precomputation, O(dm) cost for computing the IHVP, and query cost O(m) for any single element of the inverse Hessian. The second algorithm targets an optimization setting, where we wish to compute the product between the inverse Hessian, estimated over a sliding window of optimization steps, and a given gradient direction, as required for preconditioned SGD. We give an algorithm with cost O(dm + m2) for computing the IHVP and O(dm + m3) for adding or removing any gradient from the sliding window. These
two algorithms yield state-of-the-art results for network pruning and optimization with lower computational overhead relative to existing second-order methods. Implementations are available at [9] and [17].},
  author       = {Frantar, Elias and Kurtic, Eldar and Alistarh, Dan-Adrian},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  isbn         = {9781713845393},
  issn         = {1049-5258},
  location     = {Virtual, Online},
  pages        = {14873--14886},
  publisher    = {Curran Associates},
  title        = {{M-FAC: Efficient matrix-free approximations of second-order information}},
  volume       = {34},
  year         = {2021},
}

@inproceedings{11464,
  abstract     = {We consider a standard distributed optimisation setting where N machines, each holding a d-dimensional function
fi, aim to jointly minimise the sum of the functions ∑Ni=1fi(x). This problem arises naturally in large-scale distributed optimisation, where a standard solution is to apply variants of (stochastic) gradient descent. We focus on the communication complexity of this problem: our main result provides the first fully unconditional bounds on total number of bits which need to be sent and received by the N machines to solve this problem under point-to-point communication, within a given error-tolerance. Specifically, we show that Ω(Ndlogd/Nε) total bits need to be communicated between the machines to find an additive ϵ-approximation to the minimum of ∑Ni=1fi(x). The result holds for both deterministic and randomised algorithms, and, importantly, requires no assumptions on the algorithm structure. The lower bound is tight under certain restrictions on parameter values, and is matched within constant factors for quadratic objectives by a new variant of quantised gradient descent, which we describe and analyse. Our results bring over tools from communication complexity to distributed optimisation, which has potential for further applications.},
  author       = {Alistarh, Dan-Adrian and Korhonen, Janne},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  isbn         = {9781713845393},
  issn         = {1049-5258},
  location     = {Virtual, Online},
  pages        = {7254--7266},
  publisher    = {Curran Associates},
  title        = {{Towards tight communication lower bounds for distributed optimisation}},
  volume       = {34},
  year         = {2021},
}

@article{7939,
  abstract     = {We design fast deterministic algorithms for distance computation in the Congested Clique model. Our key contributions include:
    A (2+ϵ)-approximation for all-pairs shortest paths in O(log2n/ϵ) rounds on unweighted undirected graphs. With a small additional additive factor, this also applies for weighted graphs. This is the first sub-polynomial constant-factor approximation for APSP in this model.
    A (1+ϵ)-approximation for multi-source shortest paths from O(n−−√) sources in O(log2n/ϵ) rounds on weighted undirected graphs. This is the first sub-polynomial algorithm obtaining this approximation for a set of sources of polynomial size.

Our main techniques are new distance tools that are obtained via improved algorithms for sparse matrix multiplication, which we leverage to construct efficient hopsets and shortest paths. Furthermore, our techniques extend to additional distance problems for which we improve upon the state-of-the-art, including diameter approximation, and an exact single-source shortest paths algorithm for weighted undirected graphs in O~(n1/6) rounds. },
  author       = {Censor-Hillel, Keren and Dory, Michal and Korhonen, Janne and Leitersdorf, Dean},
  issn         = {1432-0452},
  journal      = {Distributed Computing},
  pages        = {463--487},
  publisher    = {Springer Nature},
  title        = {{Fast approximate shortest paths in the congested clique}},
  doi          = {10.1007/s00446-020-00380-5},
  volume       = {34},
  year         = {2021},
}

@article{8286,
  abstract     = {We consider the following dynamic load-balancing process: given an underlying graph G with n nodes, in each step t≥ 0, one unit of load is created, and placed at a randomly chosen graph node. In the same step, the chosen node picks a random neighbor, and the two nodes balance their loads by averaging them. We are interested in the expected gap between the minimum and maximum loads at nodes as the process progresses, and its dependence on n and on the graph structure. Variants of the above graphical balanced allocation process have been studied previously by Peres, Talwar, and Wieder [Peres et al., 2015], and by Sauerwald and Sun [Sauerwald and Sun, 2015]. These authors left as open the question of characterizing the gap in the case of cycle graphs in the dynamic case, where weights are created during the algorithm’s execution. For this case, the only known upper bound is of 𝒪(n log n), following from a majorization argument due to [Peres et al., 2015], which analyzes a related graphical allocation process. In this paper, we provide an upper bound of 𝒪 (√n log n) on the expected gap of the above process for cycles of length n. We introduce a new potential analysis technique, which enables us to bound the difference in load between k-hop neighbors on the cycle, for any k ≤ n/2. We complement this with a "gap covering" argument, which bounds the maximum value of the gap by bounding its value across all possible subsets of a certain structure, and recursively bounding the gaps within each subset. We provide analytical and experimental evidence that our upper bound on the gap is tight up to a logarithmic factor. },
  author       = {Alistarh, Dan-Adrian and Nadiradze, Giorgi and Sabour, Amirmojtaba},
  issn         = {1432-0541},
  journal      = {Algorithmica},
  location     = {Virtual, Online; Germany},
  publisher    = {Springer Nature},
  title        = {{Dynamic averaging load balancing on cycles}},
  doi          = {10.1007/s00453-021-00905-9},
  year         = {2021},
}

@article{8723,
  abstract     = {Deep learning at scale is dominated by communication time. Distributing samples across nodes usually yields the best performance, but poses scaling challenges due to global information dissemination and load imbalance across uneven sample lengths. State-of-the-art decentralized optimizers mitigate the problem, but require more iterations to achieve the same accuracy as their globally-communicating counterparts. We present Wait-Avoiding Group Model Averaging (WAGMA) SGD, a wait-avoiding stochastic optimizer that reduces global communication via subgroup weight exchange. The key insight is a combination of algorithmic changes to the averaging scheme and the use of a group allreduce operation. We prove the convergence of WAGMA-SGD, and empirically show that it retains convergence rates similar to Allreduce-SGD. For evaluation, we train ResNet-50 on ImageNet; Transformer for machine translation; and deep reinforcement learning for navigation at scale. Compared with state-of-the-art decentralized SGD variants, WAGMA-SGD significantly improves training throughput (e.g., 2.1× on 1,024 GPUs for reinforcement learning), and achieves the fastest time-to-solution (e.g., the highest score using the shortest training time for Transformer).},
  author       = {Li, Shigang and Tal Ben-Nun, Tal Ben-Nun and Nadiradze, Giorgi and Girolamo, Salvatore Di and Dryden, Nikoli and Alistarh, Dan-Adrian and Hoefler, Torsten},
  issn         = {10459219},
  journal      = {IEEE Transactions on Parallel and Distributed Systems},
  number       = {7},
  publisher    = {IEEE},
  title        = {{Breaking (global) barriers in parallel stochastic optimization with wait-avoiding group averaging}},
  doi          = {10.1109/TPDS.2020.3040606},
  volume       = {32},
  year         = {2021},
}

