[{"status":"public","intvolume":"       139","type":"conference","day":"01","file_date_updated":"2023-06-19T10:41:05Z","page":"196-206","publication":"Proceedings of the 38th International Conference on Machine Learning","language":[{"iso":"eng"}],"publisher":"ML Research Press","scopus_import":"1","date_published":"2021-07-01T00:00:00Z","month":"07","file":[{"date_updated":"2023-06-19T10:41:05Z","access_level":"open_access","date_created":"2023-06-19T10:41:05Z","checksum":"7ec0d59bac268b49c76bf2e036dedd7a","file_name":"2021_PMLR_Alimisis.pdf","file_size":429087,"creator":"dernst","file_id":"13154","content_type":"application/pdf","relation":"main_file","success":1}],"date_created":"2023-06-18T22:00:48Z","conference":{"name":"International Conference on Machine Learning","location":"Virtual","end_date":"2021-07-24","start_date":"2021-07-18"},"department":[{"_id":"DaAl"}],"license":"https://creativecommons.org/licenses/by/4.0/","has_accepted_license":"1","author":[{"first_name":"Foivos","full_name":"Alimisis, Foivos","last_name":"Alimisis"},{"first_name":"Peter","last_name":"Davies","full_name":"Davies, Peter","orcid":"0000-0002-5646-9524","id":"11396234-BB50-11E9-B24C-90FCE5697425"},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh"}],"abstract":[{"text":"We investigate fast and communication-efficient algorithms for the classic problem of minimizing a sum of strongly convex and smooth functions that are distributed among n\r\n different nodes, which can communicate using a limited number of bits. Most previous communication-efficient approaches for this problem are limited to first-order optimization, and therefore have \\emph{linear} dependence on the condition number in their communication complexity. We show that this dependence is not inherent: communication-efficient methods can in fact have sublinear dependence on the condition number. For this, we design and analyze the first communication-efficient distributed variants of preconditioned gradient descent for Generalized Linear Models, and for Newton’s method. Our results rely on a new technique for quantizing both the preconditioner and the descent direction at each step of the algorithms, while controlling their convergence rate. We also validate our findings experimentally, showing faster convergence and reduced communication relative to previous methods.","lang":"eng"}],"publication_status":"published","citation":{"mla":"Alimisis, Foivos, et al. “Communication-Efficient Distributed Optimization with Quantized Preconditioners.” <i>Proceedings of the 38th International Conference on Machine Learning</i>, vol. 139, ML Research Press, 2021, pp. 196–206.","ama":"Alimisis F, Davies P, Alistarh D-A. Communication-efficient distributed optimization with quantized preconditioners. In: <i>Proceedings of the 38th International Conference on Machine Learning</i>. Vol 139. ML Research Press; 2021:196-206.","short":"F. Alimisis, P. Davies, D.-A. Alistarh, in:, Proceedings of the 38th International Conference on Machine Learning, ML Research Press, 2021, pp. 196–206.","ista":"Alimisis F, Davies P, Alistarh D-A. 2021. Communication-efficient distributed optimization with quantized preconditioners. Proceedings of the 38th International Conference on Machine Learning. International Conference on Machine Learning vol. 139, 196–206.","ieee":"F. Alimisis, P. Davies, and D.-A. Alistarh, “Communication-efficient distributed optimization with quantized preconditioners,” in <i>Proceedings of the 38th International Conference on Machine Learning</i>, Virtual, 2021, vol. 139, pp. 196–206.","apa":"Alimisis, F., Davies, P., &#38; Alistarh, D.-A. (2021). Communication-efficient distributed optimization with quantized preconditioners. In <i>Proceedings of the 38th International Conference on Machine Learning</i> (Vol. 139, pp. 196–206). Virtual: ML Research Press.","chicago":"Alimisis, Foivos, Peter Davies, and Dan-Adrian Alistarh. “Communication-Efficient Distributed Optimization with Quantized Preconditioners.” In <i>Proceedings of the 38th International Conference on Machine Learning</i>, 139:196–206. ML Research Press, 2021."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","acknowledgement":"The authors would like to thank Janne Korhonen, Aurelien Lucchi, Celestine MendlerDunner and Antonio Orvieto for helpful discussions. FA ¨and DA were supported during this work by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML). PD was supported by the European Union’s Horizon 2020 programme under the Marie Skłodowska-Curie grant agreement No. 754411.","project":[{"grant_number":"805223","call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425"},{"call_identifier":"H2020","name":"ISTplus - Postdoctoral Fellowships","_id":"260C2330-B435-11E9-9278-68D0E5697425","grant_number":"754411"}],"quality_controlled":"1","oa_version":"Published Version","_id":"13147","publication_identifier":{"isbn":["9781713845065"],"eissn":["2640-3498"]},"date_updated":"2023-06-19T10:44:38Z","volume":139,"oa":1,"article_processing_charge":"No","arxiv":1,"title":"Communication-efficient distributed optimization with quantized preconditioners","external_id":{"arxiv":["2102.07214"]},"ec_funded":1,"year":"2021","ddc":["000"],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"}},{"doi":"10.1145/3451992","year":"2021","ec_funded":1,"title":"Graph sparsification for derandomizing massively parallel computation with low space","external_id":{"isi":["000661311300006"],"arxiv":["1912.05390"]},"isi":1,"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/1912.05390"}],"article_number":"16","related_material":{"record":[{"relation":"earlier_version","id":"7802","status":"public"}]},"ddc":["000"],"publication_status":"published","citation":{"short":"A. Czumaj, P. Davies, M. Parter, ACM Transactions on Algorithms 17 (2021).","ista":"Czumaj A, Davies P, Parter M. 2021. Graph sparsification for derandomizing massively parallel computation with low space. ACM Transactions on Algorithms. 17(2), 16.","mla":"Czumaj, Artur, et al. “Graph Sparsification for Derandomizing Massively Parallel Computation with Low Space.” <i>ACM Transactions on Algorithms</i>, vol. 17, no. 2, 16, Association for Computing Machinery, 2021, doi:<a href=\"https://doi.org/10.1145/3451992\">10.1145/3451992</a>.","ama":"Czumaj A, Davies P, Parter M. Graph sparsification for derandomizing massively parallel computation with low space. <i>ACM Transactions on Algorithms</i>. 2021;17(2). doi:<a href=\"https://doi.org/10.1145/3451992\">10.1145/3451992</a>","chicago":"Czumaj, Artur, Peter Davies, and Merav Parter. “Graph Sparsification for Derandomizing Massively Parallel Computation with Low Space.” <i>ACM Transactions on Algorithms</i>. Association for Computing Machinery, 2021. <a href=\"https://doi.org/10.1145/3451992\">https://doi.org/10.1145/3451992</a>.","ieee":"A. Czumaj, P. Davies, and M. Parter, “Graph sparsification for derandomizing massively parallel computation with low space,” <i>ACM Transactions on Algorithms</i>, vol. 17, no. 2. Association for Computing Machinery, 2021.","apa":"Czumaj, A., Davies, P., &#38; Parter, M. (2021). Graph sparsification for derandomizing massively parallel computation with low space. <i>ACM Transactions on Algorithms</i>. Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3451992\">https://doi.org/10.1145/3451992</a>"},"abstract":[{"text":"The Massively Parallel Computation (MPC) model is an emerging model that distills core aspects of distributed and parallel computation, developed as a tool to solve combinatorial (typically graph) problems in systems of many machines with limited space. Recent work has focused on the regime in which machines have sublinear (in n, the number of nodes in the input graph) space, with randomized algorithms presented for the fundamental problems of Maximal Matching and Maximal Independent Set. However, there have been no prior corresponding deterministic algorithms. A major challenge underlying the sublinear space setting is that the local space of each machine might be too small to store all edges incident to a single node. This poses a considerable obstacle compared to classical models in which each node is assumed to know and have easy access to its incident edges. To overcome this barrier, we introduce a new graph sparsification technique that deterministically computes a low-degree subgraph, with the additional property that solving the problem on this subgraph provides significant progress towards solving the problem for the original input graph. Using this framework to derandomize the well-known algorithm of Luby [SICOMP’86], we obtain O(log Δ + log log n)-round deterministic MPC algorithms for solving the problems of Maximal Matching and Maximal Independent Set with O(nɛ) space on each machine for any constant ɛ > 0. These algorithms also run in O(log Δ) rounds in the closely related model of CONGESTED CLIQUE, improving upon the state-of-the-art bound of O(log 2Δ) rounds by Censor-Hillel et al. [DISC’17].","lang":"eng"}],"author":[{"first_name":"Artur","full_name":"Czumaj, Artur","last_name":"Czumaj"},{"orcid":"0000-0002-5646-9524","last_name":"Davies","full_name":"Davies, Peter","first_name":"Peter","id":"11396234-BB50-11E9-B24C-90FCE5697425"},{"first_name":"Merav","last_name":"Parter","full_name":"Parter, Merav"}],"arxiv":1,"oa":1,"date_updated":"2024-02-28T12:53:09Z","volume":17,"article_processing_charge":"No","_id":"9541","publication_identifier":{"eissn":["1549-6333"],"issn":["1549-6325"]},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","acknowledgement":"Institute of Science and Technology Austria (IST Austria). Email: peter.davies@ist.ac.at. Work partially\r\ndone at the Department of Computer Science and Centre for Discrete Mathematics and its Applications (DIMAP),University of Warwick. Research partially supported by the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No 754411, the Centre for Discrete Mathematics and its Applications, a Weizmann-UK Making Connections Grant, and EPSRC award EP/N011163/1.","project":[{"grant_number":"754411","_id":"260C2330-B435-11E9-9278-68D0E5697425","name":"ISTplus - Postdoctoral Fellowships","call_identifier":"H2020"}],"quality_controlled":"1","oa_version":"Submitted Version","month":"06","date_published":"2021-06-01T00:00:00Z","article_type":"original","publisher":"Association for Computing Machinery","language":[{"iso":"eng"}],"has_accepted_license":"1","department":[{"_id":"DaAl"}],"file":[{"date_updated":"2021-06-10T19:33:56Z","access_level":"open_access","date_created":"2021-06-10T19:33:56Z","checksum":"a21c627683890c309a68f6389302c408","file_name":"MISMM-arxiv.pdf","file_size":587404,"creator":"pdavies","file_id":"9542","content_type":"application/pdf","relation":"main_file","success":1}],"date_created":"2021-06-10T19:31:05Z","day":"01","type":"journal_article","intvolume":"        17","status":"public","issue":"2","publication":"ACM Transactions on Algorithms","file_date_updated":"2021-06-10T19:33:56Z"},{"oa_version":"Published Version","project":[{"call_identifier":"H2020","name":"ISTplus - Postdoctoral Fellowships","_id":"260C2330-B435-11E9-9278-68D0E5697425","grant_number":"754411"}],"quality_controlled":"1","user_id":"D865714E-FA4E-11E9-B85B-F5C5E5697425","_id":"9543","article_processing_charge":"No","date_updated":"2023-02-23T14:00:40Z","oa":1,"publication":"9th International Conference on Learning Representations","arxiv":1,"status":"public","author":[{"id":"11396234-BB50-11E9-B24C-90FCE5697425","orcid":"0000-0002-5646-9524","last_name":"Davies","full_name":"Davies, Peter","first_name":"Peter"},{"first_name":"Vijaykrishna","last_name":"Gurunanthan","full_name":"Gurunanthan, Vijaykrishna"},{"id":"4db776ff-ce15-11eb-96e3-bc2b90b01c16","last_name":"Moshrefi","full_name":"Moshrefi, Niusha ","first_name":"Niusha "},{"id":"0D0A9058-257B-11EA-A937-9341C3D8BC8A","full_name":"Ashkboos, Saleh","last_name":"Ashkboos","first_name":"Saleh"},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","first_name":"Dan-Adrian"}],"abstract":[{"lang":"eng","text":"We consider the problem ofdistributed mean estimation (DME), in which n machines are each given a local d-dimensional vector xv∈Rd, and must cooperate to estimate the mean of their inputs μ=1n∑nv=1xv, while minimizing total communication cost. DME is a fundamental construct in distributed machine learning, and there has been considerable work on variants of this problem, especially in the context of distributed variance reduction for stochastic gradients in parallel SGD. Previous work typically assumes an upper bound on the norm of the input vectors, and achieves an error bound in terms of this norm. However, in many real applications, the input vectors are concentrated around the correct output μ, but μ itself has large norm. In such cases, previous output error bounds perform poorly. In this paper, we show that output error bounds need not depend on input norm. We provide a method of quantization which allows distributed mean estimation to be performed with solution quality dependent only on the distance between inputs, not on input norm, and show an analogous result for distributed variance reduction. The technique is based on a new connection with lattice theory. We also provide lower bounds showing that the communication to error trade-off of our algorithms is asymptotically optimal. As the lattices achieving optimal bounds under l2-norm can be computationally impractical, we also present an extension which leverages easy-to-use cubic lattices, and is loose only up to a logarithmic factor ind. We show experimentally that our method yields practical improvements for common applications, relative to prior approaches."}],"type":"conference","citation":{"short":"P. Davies, V. Gurunanthan, N. Moshrefi, S. Ashkboos, D.-A. Alistarh, in:, 9th International Conference on Learning Representations, 2021.","ista":"Davies P, Gurunanthan V, Moshrefi N, Ashkboos S, Alistarh D-A. 2021. New bounds for distributed mean estimation and variance reduction. 9th International Conference on Learning Representations.  ICLR: International Conference on Learning Representations.","ama":"Davies P, Gurunanthan V, Moshrefi N, Ashkboos S, Alistarh D-A. New bounds for distributed mean estimation and variance reduction. In: <i>9th International Conference on Learning Representations</i>. ; 2021.","mla":"Davies, Peter, et al. “New Bounds for Distributed Mean Estimation and Variance Reduction.” <i>9th International Conference on Learning Representations</i>, 2021.","chicago":"Davies, Peter, Vijaykrishna Gurunanthan, Niusha  Moshrefi, Saleh Ashkboos, and Dan-Adrian Alistarh. “New Bounds for Distributed Mean Estimation and Variance Reduction.” In <i>9th International Conference on Learning Representations</i>, 2021.","ieee":"P. Davies, V. Gurunanthan, N. Moshrefi, S. Ashkboos, and D.-A. Alistarh, “New bounds for distributed mean estimation and variance reduction,” in <i>9th International Conference on Learning Representations</i>, Virtual, 2021.","apa":"Davies, P., Gurunanthan, V., Moshrefi, N., Ashkboos, S., &#38; Alistarh, D.-A. (2021). New bounds for distributed mean estimation and variance reduction. In <i>9th International Conference on Learning Representations</i>. Virtual."},"day":"01","publication_status":"published","date_created":"2021-06-10T19:46:08Z","conference":{"end_date":"2021-05-07","name":" ICLR: International Conference on Learning Representations","location":"Virtual","start_date":"2021-05-03"},"main_file_link":[{"url":"https://openreview.net/pdf?id=t86MwoUCCNe","open_access":"1"}],"department":[{"_id":"DaAl"}],"language":[{"iso":"eng"}],"title":"New bounds for distributed mean estimation and variance reduction","external_id":{"arxiv":["2002.09268"]},"date_published":"2021-05-01T00:00:00Z","ec_funded":1,"year":"2021","month":"05"},{"year":"2021","external_id":{"arxiv":["1908.06077"]},"title":"NUQSGD: Provably communication-efficient data-parallel SGD via nonuniform quantization","main_file_link":[{"url":"https://www.jmlr.org/papers/v22/20-255.html","open_access":"1"}],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"ddc":["000"],"citation":{"apa":"Ramezani-Kebrya, A., Faghri, F., Markov, I., Aksenov, V., Alistarh, D.-A., &#38; Roy, D. M. (2021). NUQSGD: Provably communication-efficient data-parallel SGD via nonuniform quantization. <i>Journal of Machine Learning Research</i>. Journal of Machine Learning Research.","ieee":"A. Ramezani-Kebrya, F. Faghri, I. Markov, V. Aksenov, D.-A. Alistarh, and D. M. Roy, “NUQSGD: Provably communication-efficient data-parallel SGD via nonuniform quantization,” <i>Journal of Machine Learning Research</i>, vol. 22, no. 114. Journal of Machine Learning Research, p. 1−43, 2021.","chicago":"Ramezani-Kebrya, Ali, Fartash Faghri, Ilya Markov, Vitalii Aksenov, Dan-Adrian Alistarh, and Daniel M. Roy. “NUQSGD: Provably Communication-Efficient Data-Parallel SGD via Nonuniform Quantization.” <i>Journal of Machine Learning Research</i>. Journal of Machine Learning Research, 2021.","ama":"Ramezani-Kebrya A, Faghri F, Markov I, Aksenov V, Alistarh D-A, Roy DM. NUQSGD: Provably communication-efficient data-parallel SGD via nonuniform quantization. <i>Journal of Machine Learning Research</i>. 2021;22(114):1−43.","mla":"Ramezani-Kebrya, Ali, et al. “NUQSGD: Provably Communication-Efficient Data-Parallel SGD via Nonuniform Quantization.” <i>Journal of Machine Learning Research</i>, vol. 22, no. 114, Journal of Machine Learning Research, 2021, p. 1−43.","ista":"Ramezani-Kebrya A, Faghri F, Markov I, Aksenov V, Alistarh D-A, Roy DM. 2021. NUQSGD: Provably communication-efficient data-parallel SGD via nonuniform quantization. Journal of Machine Learning Research. 22(114), 1−43.","short":"A. Ramezani-Kebrya, F. Faghri, I. Markov, V. Aksenov, D.-A. Alistarh, D.M. Roy, Journal of Machine Learning Research 22 (2021) 1−43."},"publication_status":"published","author":[{"last_name":"Ramezani-Kebrya","full_name":"Ramezani-Kebrya, Ali","first_name":"Ali"},{"first_name":"Fartash","full_name":"Faghri, Fartash","last_name":"Faghri"},{"last_name":"Markov","full_name":"Markov, Ilya","first_name":"Ilya"},{"id":"2980135A-F248-11E8-B48F-1D18A9856A87","first_name":"Vitalii","full_name":"Aksenov, Vitalii","last_name":"Aksenov"},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian"},{"first_name":"Daniel M.","full_name":"Roy, Daniel M.","last_name":"Roy"}],"abstract":[{"text":"As the size and complexity of models and datasets grow, so does the need for communication-efficient variants of stochastic gradient descent that can be deployed to perform parallel model training. One popular communication-compression method for data-parallel SGD is QSGD (Alistarh et al., 2017), which quantizes and encodes gradients to reduce communication costs. The baseline variant of QSGD provides strong theoretical guarantees, however, for practical purposes, the authors proposed a heuristic variant which we call QSGDinf, which demonstrated impressive empirical gains for distributed training of large neural networks. In this paper, we build on this work to propose a new gradient quantization scheme, and show that it has both stronger theoretical guarantees than QSGD, and matches and exceeds the empirical performance of the QSGDinf heuristic and of other compression methods.","lang":"eng"}],"article_processing_charge":"No","date_updated":"2024-03-06T12:22:07Z","volume":22,"oa":1,"arxiv":1,"oa_version":"Published Version","quality_controlled":"1","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publication_identifier":{"issn":["15324435"],"eissn":["15337928"]},"_id":"9571","article_type":"original","date_published":"2021-04-01T00:00:00Z","month":"04","language":[{"iso":"eng"}],"scopus_import":"1","publisher":"Journal of Machine Learning Research","department":[{"_id":"DaAl"}],"has_accepted_license":"1","date_created":"2021-06-20T22:01:33Z","file":[{"access_level":"open_access","date_updated":"2021-06-23T07:09:41Z","file_size":11237154,"file_name":"2021_JournalOfMachineLearningResearch_Ramezani-Kebrya.pdf","checksum":"6428aa8bcb67768b6949c99b55d5281d","date_created":"2021-06-23T07:09:41Z","relation":"main_file","content_type":"application/pdf","file_id":"9595","creator":"asandaue","success":1}],"type":"journal_article","day":"01","status":"public","intvolume":"        22","page":"1−43","file_date_updated":"2021-06-23T07:09:41Z","publication":"Journal of Machine Learning Research","issue":"114"},{"intvolume":"     12810","status":"public","day":"20","type":"conference","publication":"Structural Information and Communication Complexity","file_date_updated":"2021-07-01T11:21:40Z","page":"3-12","publisher":"Springer Nature","language":[{"iso":"eng"}],"month":"06","date_published":"2021-06-20T00:00:00Z","conference":{"start_date":"2021-06-28","end_date":"2021-07-01","name":" SIROCCO: International Colloquium on Structural Information and Communication Complexity","location":"Wrocław, Poland"},"file":[{"relation":"main_file","content_type":"application/pdf","creator":"pdavies","file_id":"9621","access_level":"open_access","date_updated":"2021-07-01T11:21:40Z","file_size":319728,"file_name":"Population_Coupon_Collector.pdf","checksum":"fe37fb9af3f5016c1084af9d6e7109bd","date_created":"2021-07-01T11:21:40Z"}],"date_created":"2021-07-01T11:04:43Z","has_accepted_license":"1","department":[{"_id":"DaAl"}],"abstract":[{"text":"In this note, we introduce a distributed twist on the classic coupon collector problem: a set of m collectors wish to each obtain a set of n coupons; for this, they can each sample coupons uniformly at random, but can also meet in pairwise interactions, during which they can exchange coupons. By doing so, they hope to reduce the number of coupons that must be sampled by each collector in order to obtain a full set. This extension is natural when considering real-world manifestations of the coupon collector phenomenon, and has been remarked upon and studied empirically (Hayes and Hannigan 2006, Ahmad et al. 2014, Delmarcelle 2019).\r\n\r\nWe provide the first theoretical analysis for such a scenario. We find that “coupon collecting with friends” can indeed significantly reduce the number of coupons each collector must sample, and raises interesting connections to the more traditional variants of the problem. While our analysis is in most cases asymptotically tight, there are several open questions raised, regarding finer-grained analysis of both “coupon collecting with friends,” and of a long-studied variant of the original problem in which a collector requires multiple full sets of coupons.","lang":"eng"}],"author":[{"first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"},{"id":"11396234-BB50-11E9-B24C-90FCE5697425","first_name":"Peter","orcid":"0000-0002-5646-9524","last_name":"Davies","full_name":"Davies, Peter"}],"citation":{"mla":"Alistarh, Dan-Adrian, and Peter Davies. “Collecting Coupons Is Faster with Friends.” <i>Structural Information and Communication Complexity</i>, vol. 12810, Springer Nature, 2021, pp. 3–12, doi:<a href=\"https://doi.org/10.1007/978-3-030-79527-6_1\">10.1007/978-3-030-79527-6_1</a>.","ama":"Alistarh D-A, Davies P. Collecting coupons is faster with friends. In: <i>Structural Information and Communication Complexity</i>. Vol 12810. Springer Nature; 2021:3-12. doi:<a href=\"https://doi.org/10.1007/978-3-030-79527-6_1\">10.1007/978-3-030-79527-6_1</a>","ista":"Alistarh D-A, Davies P. 2021. Collecting coupons is faster with friends. Structural Information and Communication Complexity.  SIROCCO: International Colloquium on Structural Information and Communication Complexity, LNCS, vol. 12810, 3–12.","short":"D.-A. Alistarh, P. Davies, in:, Structural Information and Communication Complexity, Springer Nature, 2021, pp. 3–12.","ieee":"D.-A. Alistarh and P. Davies, “Collecting coupons is faster with friends,” in <i>Structural Information and Communication Complexity</i>, Wrocław, Poland, 2021, vol. 12810, pp. 3–12.","apa":"Alistarh, D.-A., &#38; Davies, P. (2021). Collecting coupons is faster with friends. In <i>Structural Information and Communication Complexity</i> (Vol. 12810, pp. 3–12). Wrocław, Poland: Springer Nature. <a href=\"https://doi.org/10.1007/978-3-030-79527-6_1\">https://doi.org/10.1007/978-3-030-79527-6_1</a>","chicago":"Alistarh, Dan-Adrian, and Peter Davies. “Collecting Coupons Is Faster with Friends.” In <i>Structural Information and Communication Complexity</i>, 12810:3–12. Springer Nature, 2021. <a href=\"https://doi.org/10.1007/978-3-030-79527-6_1\">https://doi.org/10.1007/978-3-030-79527-6_1</a>."},"publication_status":"published","publication_identifier":{"issn":["0302-9743"],"isbn":["9783030795269"],"eisbn":["9783030795276"],"eissn":["1611-3349"]},"_id":"9620","oa_version":"Preprint","project":[{"grant_number":"754411","call_identifier":"H2020","_id":"260C2330-B435-11E9-9278-68D0E5697425","name":"ISTplus - Postdoctoral Fellowships"}],"quality_controlled":"1","acknowledgement":"Peter Davies is supported by the European Union’s Horizon2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No. 754411.","user_id":"D865714E-FA4E-11E9-B85B-F5C5E5697425","article_processing_charge":"No","volume":12810,"date_updated":"2023-02-23T14:02:46Z","oa":1,"title":"Collecting coupons is faster with friends","year":"2021","doi":"10.1007/978-3-030-79527-6_1","ec_funded":1,"ddc":["000"],"alternative_title":["LNCS"]},{"month":"07","date_published":"2021-07-06T00:00:00Z","scopus_import":"1","language":[{"iso":"eng"}],"department":[{"_id":"DaAl"}],"conference":{"start_date":"2021-07-06","location":" Virtual Event, United States","name":"SPAA: Symposium on Parallelism in Algorithms and Architectures ","end_date":"2021-07-08"},"date_created":"2021-07-18T22:01:22Z","day":"06","type":"conference","status":"public","publication":"Annual ACM Symposium on Parallelism in Algorithms and Architectures","page":"129-139","year":"2021","doi":"10.1145/3409964.3461785","ec_funded":1,"title":"Efficient load-balancing through distributed token dropping","external_id":{"arxiv":["2005.07761"]},"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2005.07761"}],"related_material":{"record":[{"id":"15074","relation":"earlier_version","status":"public"}]},"publication_status":"published","citation":{"short":"S. Brandt, B. Keller, J. Rybicki, J. Suomela, J. Uitto, in:, Annual ACM Symposium on Parallelism in Algorithms and Architectures, 2021, pp. 129–139.","ista":"Brandt S, Keller B, Rybicki J, Suomela J, Uitto J. 2021. Efficient load-balancing through distributed token dropping. Annual ACM Symposium on Parallelism in Algorithms and Architectures. SPAA: Symposium on Parallelism in Algorithms and Architectures , 129–139.","ama":"Brandt S, Keller B, Rybicki J, Suomela J, Uitto J. Efficient load-balancing through distributed token dropping. In: <i>Annual ACM Symposium on Parallelism in Algorithms and Architectures</i>. ; 2021:129-139. doi:<a href=\"https://doi.org/10.1145/3409964.3461785\">10.1145/3409964.3461785</a>","mla":"Brandt, Sebastian, et al. “Efficient Load-Balancing through Distributed Token Dropping.” <i>Annual ACM Symposium on Parallelism in Algorithms and Architectures</i>, 2021, pp. 129–39, doi:<a href=\"https://doi.org/10.1145/3409964.3461785\">10.1145/3409964.3461785</a>.","chicago":"Brandt, Sebastian, Barbara Keller, Joel Rybicki, Jukka Suomela, and Jara Uitto. “Efficient Load-Balancing through Distributed Token Dropping.” In <i>Annual ACM Symposium on Parallelism in Algorithms and Architectures</i>, 129–39, 2021. <a href=\"https://doi.org/10.1145/3409964.3461785\">https://doi.org/10.1145/3409964.3461785</a>.","ieee":"S. Brandt, B. Keller, J. Rybicki, J. Suomela, and J. Uitto, “Efficient load-balancing through distributed token dropping,” in <i>Annual ACM Symposium on Parallelism in Algorithms and Architectures</i>,  Virtual Event, United States, 2021, pp. 129–139.","apa":"Brandt, S., Keller, B., Rybicki, J., Suomela, J., &#38; Uitto, J. (2021). Efficient load-balancing through distributed token dropping. In <i>Annual ACM Symposium on Parallelism in Algorithms and Architectures</i> (pp. 129–139).  Virtual Event, United States. <a href=\"https://doi.org/10.1145/3409964.3461785\">https://doi.org/10.1145/3409964.3461785</a>"},"abstract":[{"lang":"eng","text":"We introduce a new graph problem, the token dropping game, and we show how to solve it efficiently in a distributed setting. We use the token dropping game as a tool to design an efficient distributed algorithm for stable orientations and more generally for locally optimal semi-matchings. The prior work by Czygrinow et al. (DISC 2012) finds a stable orientation in O(Δ^5) rounds in graphs of maximum degree Δ, while we improve it to O(Δ^4) and also prove a lower bound of Ω(Δ). For the more general problem of locally optimal semi-matchings, the prior upper bound is O(S^5) and our new algorithm runs in O(C · S^4) rounds, which is an improvement for C = o(S); here C and S are the maximum degrees of customers and servers, respectively."}],"author":[{"last_name":"Brandt","full_name":"Brandt, Sebastian","first_name":"Sebastian"},{"first_name":"Barbara","full_name":"Keller, Barbara","last_name":"Keller"},{"full_name":"Rybicki, Joel","last_name":"Rybicki","orcid":"0000-0002-6432-6646","first_name":"Joel","id":"334EFD2E-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Suomela, Jukka","last_name":"Suomela","first_name":"Jukka"},{"first_name":"Jara","last_name":"Uitto","full_name":"Uitto, Jara"}],"arxiv":1,"date_updated":"2024-03-05T07:13:12Z","oa":1,"article_processing_charge":"No","_id":"9678","publication_identifier":{"isbn":["9781450380706"]},"user_id":"D865714E-FA4E-11E9-B85B-F5C5E5697425","acknowledgement":"We thank Orr Fischer, Juho Hirvonen, and Tuomo Lempiäinen for valuable discussions. This project has received funding from the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No. 840605.","project":[{"grant_number":"840605","call_identifier":"H2020","_id":"26A5D39A-B435-11E9-9278-68D0E5697425","name":"Coordination in constrained and natural distributed systems"}],"oa_version":"Preprint","quality_controlled":"1"},{"ec_funded":1,"doi":"10.1109/sp40001.2021.00035","year":"2021","title":"Keep the dirt: tainted TreeKEM, adaptively and actively secure continuous group key agreement","main_file_link":[{"open_access":"1","url":"https://eprint.iacr.org/2019/1489"}],"related_material":{"record":[{"id":"10035","relation":"dissertation_contains","status":"public"}]},"publication_status":"published","citation":{"chicago":"Klein, Karen, Guillermo Pascual Perez, Michael Walter, Chethan Kamath Hosdurg, Margarita Capretto, Miguel Cueto Noval, Ilia Markov, Michelle X Yeo, Joel F Alwen, and Krzysztof Z Pietrzak. “Keep the Dirt: Tainted TreeKEM, Adaptively and Actively Secure Continuous Group Key Agreement.” In <i>2021 IEEE Symposium on Security and Privacy </i>, 268–84. IEEE, 2021. <a href=\"https://doi.org/10.1109/sp40001.2021.00035\">https://doi.org/10.1109/sp40001.2021.00035</a>.","apa":"Klein, K., Pascual Perez, G., Walter, M., Kamath Hosdurg, C., Capretto, M., Cueto Noval, M., … Pietrzak, K. Z. (2021). Keep the dirt: tainted TreeKEM, adaptively and actively secure continuous group key agreement. In <i>2021 IEEE Symposium on Security and Privacy </i> (pp. 268–284). San Francisco, CA, United States: IEEE. <a href=\"https://doi.org/10.1109/sp40001.2021.00035\">https://doi.org/10.1109/sp40001.2021.00035</a>","ieee":"K. Klein <i>et al.</i>, “Keep the dirt: tainted TreeKEM, adaptively and actively secure continuous group key agreement,” in <i>2021 IEEE Symposium on Security and Privacy </i>, San Francisco, CA, United States, 2021, pp. 268–284.","short":"K. Klein, G. Pascual Perez, M. Walter, C. Kamath Hosdurg, M. Capretto, M. Cueto Noval, I. Markov, M.X. Yeo, J.F. Alwen, K.Z. Pietrzak, in:, 2021 IEEE Symposium on Security and Privacy , IEEE, 2021, pp. 268–284.","ista":"Klein K, Pascual Perez G, Walter M, Kamath Hosdurg C, Capretto M, Cueto Noval M, Markov I, Yeo MX, Alwen JF, Pietrzak KZ. 2021. Keep the dirt: tainted TreeKEM, adaptively and actively secure continuous group key agreement. 2021 IEEE Symposium on Security and Privacy . SP: Symposium on Security and Privacy, 268–284.","ama":"Klein K, Pascual Perez G, Walter M, et al. Keep the dirt: tainted TreeKEM, adaptively and actively secure continuous group key agreement. In: <i>2021 IEEE Symposium on Security and Privacy </i>. IEEE; 2021:268-284. doi:<a href=\"https://doi.org/10.1109/sp40001.2021.00035\">10.1109/sp40001.2021.00035</a>","mla":"Klein, Karen, et al. “Keep the Dirt: Tainted TreeKEM, Adaptively and Actively Secure Continuous Group Key Agreement.” <i>2021 IEEE Symposium on Security and Privacy </i>, IEEE, 2021, pp. 268–84, doi:<a href=\"https://doi.org/10.1109/sp40001.2021.00035\">10.1109/sp40001.2021.00035</a>."},"author":[{"first_name":"Karen","last_name":"Klein","full_name":"Klein, Karen","id":"3E83A2F8-F248-11E8-B48F-1D18A9856A87"},{"id":"2D7ABD02-F248-11E8-B48F-1D18A9856A87","first_name":"Guillermo","last_name":"Pascual Perez","full_name":"Pascual Perez, Guillermo","orcid":"0000-0001-8630-415X"},{"id":"488F98B0-F248-11E8-B48F-1D18A9856A87","first_name":"Michael","last_name":"Walter","full_name":"Walter, Michael","orcid":"0000-0003-3186-2482"},{"id":"4BD3F30E-F248-11E8-B48F-1D18A9856A87","last_name":"Kamath Hosdurg","full_name":"Kamath Hosdurg, Chethan","first_name":"Chethan"},{"first_name":"Margarita","full_name":"Capretto, Margarita","last_name":"Capretto"},{"full_name":"Cueto Noval, Miguel","last_name":"Cueto Noval","first_name":"Miguel","id":"ffc563a3-f6e0-11ea-865d-e3cce03d17cc"},{"first_name":"Ilia","last_name":"Markov","full_name":"Markov, Ilia","id":"D0CF4148-C985-11E9-8066-0BDEE5697425"},{"id":"2D82B818-F248-11E8-B48F-1D18A9856A87","last_name":"Yeo","full_name":"Yeo, Michelle X","first_name":"Michelle X"},{"full_name":"Alwen, Joel F","last_name":"Alwen","first_name":"Joel F","id":"2A8DFA8C-F248-11E8-B48F-1D18A9856A87"},{"last_name":"Pietrzak","full_name":"Pietrzak, Krzysztof Z","orcid":"0000-0002-9139-1654","first_name":"Krzysztof Z","id":"3E04A7AA-F248-11E8-B48F-1D18A9856A87"}],"abstract":[{"lang":"eng","text":"While messaging systems with strong security guarantees are widely used in practice, designing a protocol that scales efficiently to large groups and enjoys similar security guarantees remains largely open. The two existing proposals to date are ART (Cohn-Gordon et al., CCS18) and TreeKEM (IETF, The Messaging Layer Security Protocol, draft). TreeKEM is the currently considered candidate by the IETF MLS working group, but dynamic group operations (i.e. adding and removing users) can cause efficiency issues. In this paper we formalize and analyze a variant of TreeKEM which we term Tainted TreeKEM (TTKEM for short). The basic idea underlying TTKEM was suggested by Millican (MLS mailing list, February 2018). This version is more efficient than TreeKEM for some natural distributions of group operations, we quantify this through simulations.Our second contribution is two security proofs for TTKEM which establish post compromise and forward secrecy even against adaptive attackers. The security loss (to the underlying PKE) in the Random Oracle Model is a polynomial factor, and a quasipolynomial one in the Standard Model. Our proofs can be adapted to TreeKEM as well. Before our work no security proof for any TreeKEM-like protocol establishing tight security against an adversary who can adaptively choose the sequence of operations was known. We also are the first to prove (or even formalize) active security where the server can arbitrarily deviate from the protocol specification. Proving fully active security – where also the users can arbitrarily deviate – remains open."}],"oa":1,"date_updated":"2023-09-07T13:32:11Z","article_processing_charge":"No","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","acknowledgement":"The first three authors contributed equally to this work. Funded by the European Research Council (ERC) under the European Union’s Horizon2020 research and innovation programme (682815-TOCNeT). Funded by the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie Grant Agreement No.665385.","quality_controlled":"1","project":[{"grant_number":"665385","call_identifier":"H2020","_id":"2564DBCA-B435-11E9-9278-68D0E5697425","name":"International IST Doctoral Program"},{"call_identifier":"H2020","_id":"258AA5B2-B435-11E9-9278-68D0E5697425","name":"Teaching Old Crypto New Tricks","grant_number":"682815"}],"oa_version":"Preprint","_id":"10049","date_published":"2021-08-26T00:00:00Z","month":"08","language":[{"iso":"eng"}],"publisher":"IEEE","department":[{"_id":"KrPi"},{"_id":"DaAl"}],"date_created":"2021-09-27T13:46:27Z","conference":{"start_date":"2021-05-24","end_date":"2021-05-27","location":"San Francisco, CA, United States","name":"SP: Symposium on Security and Privacy"},"type":"conference","day":"26","status":"public","page":"268-284","publication":"2021 IEEE Symposium on Security and Privacy "},{"main_file_link":[{"url":"https://www.jmlr.org/papers/v22/21-0366.html","open_access":"1"}],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"ddc":["000"],"year":"2021","external_id":{"arxiv":["2102.00554"]},"title":"Sparsity in deep learning: Pruning and growth for efficient inference and training in neural networks","article_processing_charge":"No","date_updated":"2022-05-13T09:36:08Z","volume":22,"oa":1,"arxiv":1,"quality_controlled":"1","oa_version":"Published Version","acknowledgement":"We thank Doug Burger, Steve Scott, Marco Heddes, and the respective teams at Microsoft for inspiring discussions on the topic. We thank Angelika Steger for uplifting debates about the connections to biological brains, Sidak Pal Singh for his support regarding experimental results, and Utku Evci as well as Xin Wang for comments on previous versions of this\r\nwork. Special thanks go to Bernhard Schölkopf, our JMLR editor Samy Bengio, and the three anonymous reviewers who provided excellent comprehensive, pointed, and deep review comments that improved the quality of our manuscript significantly.","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publication_identifier":{"eissn":["1533-7928"],"issn":["1532-4435"]},"_id":"10180","citation":{"ista":"Hoefler T, Alistarh D-A, Ben-Nun T, Dryden N, Peste E-A. 2021. Sparsity in deep learning: Pruning and growth for efficient inference and training in neural networks. Journal of Machine Learning Research. 22(241), 1–124.","short":"T. Hoefler, D.-A. Alistarh, T. Ben-Nun, N. Dryden, E.-A. Peste, Journal of Machine Learning Research 22 (2021) 1–124.","mla":"Hoefler, Torsten, et al. “Sparsity in Deep Learning: Pruning and Growth for Efficient Inference and Training in Neural Networks.” <i>Journal of Machine Learning Research</i>, vol. 22, no. 241, Journal of Machine Learning Research, 2021, pp. 1–124.","ama":"Hoefler T, Alistarh D-A, Ben-Nun T, Dryden N, Peste E-A. Sparsity in deep learning: Pruning and growth for efficient inference and training in neural networks. <i>Journal of Machine Learning Research</i>. 2021;22(241):1-124.","chicago":"Hoefler, Torsten, Dan-Adrian Alistarh, Tal Ben-Nun, Nikoli Dryden, and Elena-Alexandra Peste. “Sparsity in Deep Learning: Pruning and Growth for Efficient Inference and Training in Neural Networks.” <i>Journal of Machine Learning Research</i>. Journal of Machine Learning Research, 2021.","apa":"Hoefler, T., Alistarh, D.-A., Ben-Nun, T., Dryden, N., &#38; Peste, E.-A. (2021). Sparsity in deep learning: Pruning and growth for efficient inference and training in neural networks. <i>Journal of Machine Learning Research</i>. Journal of Machine Learning Research.","ieee":"T. Hoefler, D.-A. Alistarh, T. Ben-Nun, N. Dryden, and E.-A. Peste, “Sparsity in deep learning: Pruning and growth for efficient inference and training in neural networks,” <i>Journal of Machine Learning Research</i>, vol. 22, no. 241. Journal of Machine Learning Research, pp. 1–124, 2021."},"publication_status":"published","author":[{"first_name":"Torsten","last_name":"Hoefler","full_name":"Hoefler, Torsten"},{"first_name":"Dan-Adrian","last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Tal","full_name":"Ben-Nun, Tal","last_name":"Ben-Nun"},{"first_name":"Nikoli","full_name":"Dryden, Nikoli","last_name":"Dryden"},{"last_name":"Peste","full_name":"Peste, Elena-Alexandra","first_name":"Elena-Alexandra","id":"32D78294-F248-11E8-B48F-1D18A9856A87"}],"abstract":[{"text":"The growing energy and performance costs of deep learning have driven the community to reduce the size of neural networks by selectively pruning components. Similarly to their biological counterparts, sparse networks generalize just as well, sometimes even better than, the original dense networks. Sparsity promises to reduce the memory footprint of regular networks to fit mobile devices, as well as shorten training time for ever growing networks. In this paper, we survey prior work on sparsity in deep learning and provide an extensive tutorial of sparsification for both inference and training. We describe approaches to remove and add elements of neural networks, different training strategies to achieve model sparsity, and mechanisms to exploit sparsity in practice. Our work distills ideas from more than 300 research papers and provides guidance to practitioners who wish to utilize sparsity today, as well as to researchers whose goal is to push the frontier forward. We include the necessary background on mathematical methods in sparsification, describe phenomena such as early structure adaptation, the intricate relations between sparsity and the training process, and show techniques for achieving acceleration on real hardware. We also define a metric of pruned parameter efficiency that could serve as a baseline for comparison of different sparse networks. We close by speculating on how sparsity can improve future workloads and outline major open problems in the field.","lang":"eng"}],"department":[{"_id":"DaAl"}],"has_accepted_license":"1","file":[{"file_name":"2021_JMachLearnRes_Hoefler.pdf","file_size":3527521,"date_created":"2021-10-27T15:34:18Z","checksum":"3389d9d01fc58f8fb4c1a53e14a8abbf","date_updated":"2021-10-27T15:34:18Z","access_level":"open_access","success":1,"content_type":"application/pdf","relation":"main_file","file_id":"10192","creator":"cziletti"}],"date_created":"2021-10-24T22:01:34Z","article_type":"original","date_published":"2021-09-01T00:00:00Z","month":"09","language":[{"iso":"eng"}],"scopus_import":"1","publisher":"Journal of Machine Learning Research","file_date_updated":"2021-10-27T15:34:18Z","page":"1-124","publication":"Journal of Machine Learning Research","issue":"241","type":"journal_article","day":"01","status":"public","intvolume":"        22"},{"publication":"35th International Symposium on Distributed Computing","file_date_updated":"2021-11-12T09:23:22Z","intvolume":"       209","status":"public","day":"04","type":"conference","conference":{"name":"DISC: Distributed Computing","location":"Freiburg, Germany","end_date":"2021-10-08","start_date":"2021-10-04"},"file":[{"checksum":"76546df112a0ba1166c864d33d7834e2","date_created":"2021-11-12T09:23:22Z","file_size":795860,"file_name":"2021_LIPIcsDISC_BChatterjee.pdf","access_level":"open_access","date_updated":"2021-11-12T09:23:22Z","success":1,"file_id":"10276","creator":"cchlebak","relation":"main_file","content_type":"application/pdf"}],"date_created":"2021-11-07T23:01:23Z","has_accepted_license":"1","department":[{"_id":"DaAl"}],"publisher":"Schloss Dagstuhl - Leibniz Zentrum für Informatik","scopus_import":"1","language":[{"iso":"eng"}],"month":"10","date_published":"2021-10-04T00:00:00Z","_id":"10216","publication_identifier":{"issn":["1868-8969"],"isbn":["9-783-9597-7210-5"]},"user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","acknowledgement":"This work was partially funded by National Supercomputing Mission, Govt. of India under the project “Concurrent and Distributed Programming primitives and algorithms for Temporal Graphs”(DST/NSM/R&D_Exascale/2021/16).\r\n","quality_controlled":"1","oa_version":"Published Version","arxiv":1,"volume":209,"date_updated":"2021-11-12T09:42:55Z","oa":1,"article_processing_charge":"No","abstract":[{"text":"This paper reports a new concurrent graph data structure that supports updates of both edges and vertices and queries: Breadth-first search, Single-source shortest-path, and Betweenness centrality. The operations are provably linearizable and non-blocking.","lang":"eng"}],"author":[{"last_name":"Chatterjee","full_name":"Chatterjee, Bapi","first_name":"Bapi","id":"3C41A08A-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Peri, Sathya","last_name":"Peri","first_name":"Sathya"},{"first_name":"Muktikanta","last_name":"Sa","full_name":"Sa, Muktikanta"}],"publication_status":"published","citation":{"ama":"Chatterjee B, Peri S, Sa M. Brief announcement: Non-blocking dynamic unbounded graphs with worst-case amortized bounds. In: <i>35th International Symposium on Distributed Computing</i>. Vol 209. Schloss Dagstuhl - Leibniz Zentrum für Informatik; 2021. doi:<a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.52\">10.4230/LIPIcs.DISC.2021.52</a>","mla":"Chatterjee, Bapi, et al. “Brief Announcement: Non-Blocking Dynamic Unbounded Graphs with Worst-Case Amortized Bounds.” <i>35th International Symposium on Distributed Computing</i>, vol. 209, 52, Schloss Dagstuhl - Leibniz Zentrum für Informatik, 2021, doi:<a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.52\">10.4230/LIPIcs.DISC.2021.52</a>.","ista":"Chatterjee B, Peri S, Sa M. 2021. Brief announcement: Non-blocking dynamic unbounded graphs with worst-case amortized bounds. 35th International Symposium on Distributed Computing. DISC: Distributed Computing, LIPIcs, vol. 209, 52.","short":"B. Chatterjee, S. Peri, M. Sa, in:, 35th International Symposium on Distributed Computing, Schloss Dagstuhl - Leibniz Zentrum für Informatik, 2021.","ieee":"B. Chatterjee, S. Peri, and M. Sa, “Brief announcement: Non-blocking dynamic unbounded graphs with worst-case amortized bounds,” in <i>35th International Symposium on Distributed Computing</i>, Freiburg, Germany, 2021, vol. 209.","apa":"Chatterjee, B., Peri, S., &#38; Sa, M. (2021). Brief announcement: Non-blocking dynamic unbounded graphs with worst-case amortized bounds. In <i>35th International Symposium on Distributed Computing</i> (Vol. 209). Freiburg, Germany: Schloss Dagstuhl - Leibniz Zentrum für Informatik. <a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.52\">https://doi.org/10.4230/LIPIcs.DISC.2021.52</a>","chicago":"Chatterjee, Bapi, Sathya Peri, and Muktikanta Sa. “Brief Announcement: Non-Blocking Dynamic Unbounded Graphs with Worst-Case Amortized Bounds.” In <i>35th International Symposium on Distributed Computing</i>, Vol. 209. Schloss Dagstuhl - Leibniz Zentrum für Informatik, 2021. <a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.52\">https://doi.org/10.4230/LIPIcs.DISC.2021.52</a>."},"ddc":["000"],"alternative_title":["LIPIcs"],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"article_number":"52","title":"Brief announcement: Non-blocking dynamic unbounded graphs with worst-case amortized bounds","external_id":{"arxiv":["2003.01697"]},"doi":"10.4230/LIPIcs.DISC.2021.52","year":"2021"},{"title":"Lower bounds for shared-memory leader election under bounded write contention","doi":"10.4230/LIPIcs.DISC.2021.4","year":"2021","ec_funded":1,"ddc":["000"],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"alternative_title":["LIPIcs"],"article_number":"4","abstract":[{"lang":"eng","text":"This paper gives tight logarithmic lower bounds on the solo step complexity of leader election in an asynchronous shared-memory model with single-writer multi-reader (SWMR) registers, for both deterministic and randomized obstruction-free algorithms. The approach extends to lower bounds for deterministic and randomized obstruction-free algorithms using multi-writer registers under bounded write concurrency, showing a trade-off between the solo step complexity of a leader election algorithm, and the worst-case number of stalls incurred by a processor in an execution."}],"author":[{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh"},{"last_name":"Gelashvili","full_name":"Gelashvili, Rati","first_name":"Rati"},{"id":"3279A00C-F248-11E8-B48F-1D18A9856A87","last_name":"Nadiradze","full_name":"Nadiradze, Giorgi","first_name":"Giorgi"}],"citation":{"ama":"Alistarh D-A, Gelashvili R, Nadiradze G. Lower bounds for shared-memory leader election under bounded write contention. In: <i>35th International Symposium on Distributed Computing</i>. Vol 209. Schloss Dagstuhl - Leibniz Zentrum für Informatik; 2021. doi:<a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.4\">10.4230/LIPIcs.DISC.2021.4</a>","mla":"Alistarh, Dan-Adrian, et al. “Lower Bounds for Shared-Memory Leader Election under Bounded Write Contention.” <i>35th International Symposium on Distributed Computing</i>, vol. 209, 4, Schloss Dagstuhl - Leibniz Zentrum für Informatik, 2021, doi:<a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.4\">10.4230/LIPIcs.DISC.2021.4</a>.","ista":"Alistarh D-A, Gelashvili R, Nadiradze G. 2021. Lower bounds for shared-memory leader election under bounded write contention. 35th International Symposium on Distributed Computing. DISC: Distributed Computing, LIPIcs, vol. 209, 4.","short":"D.-A. Alistarh, R. Gelashvili, G. Nadiradze, in:, 35th International Symposium on Distributed Computing, Schloss Dagstuhl - Leibniz Zentrum für Informatik, 2021.","ieee":"D.-A. Alistarh, R. Gelashvili, and G. Nadiradze, “Lower bounds for shared-memory leader election under bounded write contention,” in <i>35th International Symposium on Distributed Computing</i>, Freiburg, Germany, 2021, vol. 209.","apa":"Alistarh, D.-A., Gelashvili, R., &#38; Nadiradze, G. (2021). Lower bounds for shared-memory leader election under bounded write contention. In <i>35th International Symposium on Distributed Computing</i> (Vol. 209). Freiburg, Germany: Schloss Dagstuhl - Leibniz Zentrum für Informatik. <a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.4\">https://doi.org/10.4230/LIPIcs.DISC.2021.4</a>","chicago":"Alistarh, Dan-Adrian, Rati Gelashvili, and Giorgi Nadiradze. “Lower Bounds for Shared-Memory Leader Election under Bounded Write Contention.” In <i>35th International Symposium on Distributed Computing</i>, Vol. 209. Schloss Dagstuhl - Leibniz Zentrum für Informatik, 2021. <a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.4\">https://doi.org/10.4230/LIPIcs.DISC.2021.4</a>."},"publication_status":"published","publication_identifier":{"issn":["1868-8969"],"isbn":["9-783-9597-7210-5"]},"_id":"10217","project":[{"grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning","call_identifier":"H2020"}],"quality_controlled":"1","oa_version":"Published Version","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","acknowledgement":"Dan Alistarh: Supported in part by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML). Giorgi Nadiradze: Supported in part by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML). The authors would like to thank the DISC anonymous reviewers for their useful\r\nfeedback and comments.","article_processing_charge":"No","date_updated":"2022-08-19T07:23:28Z","oa":1,"volume":209,"scopus_import":"1","publisher":"Schloss Dagstuhl - Leibniz Zentrum für Informatik","language":[{"iso":"eng"}],"month":"10","date_published":"2021-10-04T00:00:00Z","conference":{"start_date":"2021-10-04","location":"Freiburg, Germany","end_date":"2021-10-08","name":"DISC: Distributed Computing"},"file":[{"success":1,"relation":"main_file","content_type":"application/pdf","creator":"cchlebak","file_id":"10277","file_size":706791,"file_name":"2021_LIPIcsDISC_Alistarh.pdf","checksum":"b4cdc6668c899a601c5e6a96b8ca54d9","date_created":"2021-11-12T09:33:26Z","access_level":"open_access","date_updated":"2021-11-12T09:33:26Z"}],"date_created":"2021-11-07T23:01:23Z","has_accepted_license":"1","department":[{"_id":"DaAl"}],"intvolume":"       209","status":"public","day":"04","type":"conference","publication":"35th International Symposium on Distributed Computing","file_date_updated":"2021-11-12T09:33:26Z"},{"publication_status":"published","citation":{"apa":"Alistarh, D.-A., Gelashvili, R., &#38; Rybicki, J. (2021). Brief announcement: Fast graphical population protocols. In <i>35th International Symposium on Distributed Computing</i> (Vol. 209). Freiburg, Germany: Schloss Dagstuhl - Leibniz-Zentrum für Informatik. <a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.43\">https://doi.org/10.4230/LIPIcs.DISC.2021.43</a>","ieee":"D.-A. Alistarh, R. Gelashvili, and J. Rybicki, “Brief announcement: Fast graphical population protocols,” in <i>35th International Symposium on Distributed Computing</i>, Freiburg, Germany, 2021, vol. 209.","chicago":"Alistarh, Dan-Adrian, Rati Gelashvili, and Joel Rybicki. “Brief Announcement: Fast Graphical Population Protocols.” In <i>35th International Symposium on Distributed Computing</i>, Vol. 209. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2021. <a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.43\">https://doi.org/10.4230/LIPIcs.DISC.2021.43</a>.","ama":"Alistarh D-A, Gelashvili R, Rybicki J. Brief announcement: Fast graphical population protocols. In: <i>35th International Symposium on Distributed Computing</i>. Vol 209. Schloss Dagstuhl - Leibniz-Zentrum für Informatik; 2021. doi:<a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.43\">10.4230/LIPIcs.DISC.2021.43</a>","mla":"Alistarh, Dan-Adrian, et al. “Brief Announcement: Fast Graphical Population Protocols.” <i>35th International Symposium on Distributed Computing</i>, vol. 209, 43, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2021, doi:<a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.43\">10.4230/LIPIcs.DISC.2021.43</a>.","ista":"Alistarh D-A, Gelashvili R, Rybicki J. 2021. Brief announcement: Fast graphical population protocols. 35th International Symposium on Distributed Computing. DISC: Distributed Computing , LIPIcs, vol. 209, 43.","short":"D.-A. Alistarh, R. Gelashvili, J. Rybicki, in:, 35th International Symposium on Distributed Computing, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2021."},"abstract":[{"lang":"eng","text":"Let G be a graph on n nodes. In the stochastic population protocol model, a collection of n indistinguishable, resource-limited nodes collectively solve tasks via pairwise interactions. In each interaction, two randomly chosen neighbors first read each other’s states, and then update their local states. A rich line of research has established tight upper and lower bounds on the complexity of fundamental tasks, such as majority and leader election, in this model, when G is a clique. Specifically, in the clique, these tasks can be solved fast, i.e., in n polylog n pairwise interactions, with high probability, using at most polylog n states per node. In this work, we consider the more general setting where G is an arbitrary graph, and present a technique for simulating protocols designed for fully-connected networks in any connected regular graph. Our main result is a simulation that is efficient on many interesting graph families: roughly, the simulation overhead is polylogarithmic in the number of nodes, and quadratic in the conductance of the graph. As an example, this implies that, in any regular graph with conductance φ, both leader election and exact majority can be solved in φ^{-2} ⋅ n polylog n pairwise interactions, with high probability, using at most φ^{-2} ⋅ polylog n states per node. This shows that there are fast and space-efficient population protocols for leader election and exact majority on graphs with good expansion properties."}],"author":[{"first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"},{"last_name":"Gelashvili","full_name":"Gelashvili, Rati","first_name":"Rati"},{"orcid":"0000-0002-6432-6646","full_name":"Rybicki, Joel","last_name":"Rybicki","first_name":"Joel","id":"334EFD2E-F248-11E8-B48F-1D18A9856A87"}],"arxiv":1,"volume":209,"oa":1,"date_updated":"2023-02-21T09:24:08Z","article_processing_charge":"No","_id":"10218","publication_identifier":{"isbn":["9-783-9597-7210-5"],"issn":["1868-8969"]},"acknowledgement":"This project has received funding from the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No 840605.","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","oa_version":"Published Version","project":[{"name":"Coordination in constrained and natural distributed systems","_id":"26A5D39A-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"840605"}],"quality_controlled":"1","doi":"10.4230/LIPIcs.DISC.2021.43","year":"2021","ec_funded":1,"title":"Brief announcement: Fast graphical population protocols","external_id":{"arxiv":["2102.08808"]},"alternative_title":["LIPIcs"],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"article_number":"43","ddc":["000"],"day":"04","type":"conference","intvolume":"       209","status":"public","publication":"35th International Symposium on Distributed Computing","file_date_updated":"2021-11-12T08:16:44Z","month":"10","date_published":"2021-10-04T00:00:00Z","publisher":"Schloss Dagstuhl - Leibniz-Zentrum für Informatik","scopus_import":"1","language":[{"iso":"eng"}],"has_accepted_license":"1","department":[{"_id":"DaAl"}],"conference":{"location":"Freiburg, Germany","end_date":"2021-10-08","name":"DISC: Distributed Computing ","start_date":"2021-10-04"},"file":[{"file_size":534219,"file_name":"2021_LIPIcsDISC_Alistarh.pdf","checksum":"fd2a690f6856d21247e9aa952b0e2885","date_created":"2021-11-12T08:16:44Z","access_level":"open_access","date_updated":"2021-11-12T08:16:44Z","success":1,"relation":"main_file","content_type":"application/pdf","creator":"cchlebak","file_id":"10274"}],"date_created":"2021-11-07T23:01:24Z"},{"publication_status":"published","citation":{"chicago":"Korhonen, Janne, Ami Paz, Joel Rybicki, Stefan Schmid, and Jukka Suomela. “Brief Announcement: Sinkless Orientation Is Hard Also in the Supported LOCAL Model.” In <i>35th International Symposium on Distributed Computing</i>, Vol. 209. Schloss Dagstuhl - Leibniz Zentrum für Informatik, 2021. <a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.58\">https://doi.org/10.4230/LIPIcs.DISC.2021.58</a>.","ieee":"J. Korhonen, A. Paz, J. Rybicki, S. Schmid, and J. Suomela, “Brief announcement: Sinkless orientation is hard also in the supported LOCAL model,” in <i>35th International Symposium on Distributed Computing</i>, Freiburg, Germany, 2021, vol. 209.","apa":"Korhonen, J., Paz, A., Rybicki, J., Schmid, S., &#38; Suomela, J. (2021). Brief announcement: Sinkless orientation is hard also in the supported LOCAL model. In <i>35th International Symposium on Distributed Computing</i> (Vol. 209). Freiburg, Germany: Schloss Dagstuhl - Leibniz Zentrum für Informatik. <a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.58\">https://doi.org/10.4230/LIPIcs.DISC.2021.58</a>","short":"J. Korhonen, A. Paz, J. Rybicki, S. Schmid, J. Suomela, in:, 35th International Symposium on Distributed Computing, Schloss Dagstuhl - Leibniz Zentrum für Informatik, 2021.","ista":"Korhonen J, Paz A, Rybicki J, Schmid S, Suomela J. 2021. Brief announcement: Sinkless orientation is hard also in the supported LOCAL model. 35th International Symposium on Distributed Computing. DISC: Distributed Computing , LIPIcs, vol. 209, 58.","mla":"Korhonen, Janne, et al. “Brief Announcement: Sinkless Orientation Is Hard Also in the Supported LOCAL Model.” <i>35th International Symposium on Distributed Computing</i>, vol. 209, 58, Schloss Dagstuhl - Leibniz Zentrum für Informatik, 2021, doi:<a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.58\">10.4230/LIPIcs.DISC.2021.58</a>.","ama":"Korhonen J, Paz A, Rybicki J, Schmid S, Suomela J. Brief announcement: Sinkless orientation is hard also in the supported LOCAL model. In: <i>35th International Symposium on Distributed Computing</i>. Vol 209. Schloss Dagstuhl - Leibniz Zentrum für Informatik; 2021. doi:<a href=\"https://doi.org/10.4230/LIPIcs.DISC.2021.58\">10.4230/LIPIcs.DISC.2021.58</a>"},"author":[{"first_name":"Janne","full_name":"Korhonen, Janne","last_name":"Korhonen","id":"C5402D42-15BC-11E9-A202-CA2BE6697425"},{"first_name":"Ami","full_name":"Paz, Ami","last_name":"Paz"},{"id":"334EFD2E-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-6432-6646","full_name":"Rybicki, Joel","last_name":"Rybicki","first_name":"Joel"},{"first_name":"Stefan","full_name":"Schmid, Stefan","last_name":"Schmid"},{"last_name":"Suomela","full_name":"Suomela, Jukka","first_name":"Jukka"}],"abstract":[{"text":"We show that any algorithm that solves the sinkless orientation problem in the supported LOCAL model requires Ω(log n) rounds, and this is tight. The supported LOCAL is at least as strong as the usual LOCAL model, and as a corollary this also gives a new, short and elementary proof that shows that the round complexity of the sinkless orientation problem in the deterministic LOCAL model is Ω(log n).","lang":"eng"}],"volume":209,"oa":1,"date_updated":"2021-11-12T09:37:18Z","article_processing_charge":"No","arxiv":1,"acknowledgement":"Janne H. Korhonen: Project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML). Ami Paz: We acknowledge the Austrian Science Fund (FWF) and netIDEE SCIENCE project P 33775-N. Stefan Schmid: Research supported by the Austrian Science Fund (FWF) project ADVISE, I 4800-N, 2020-2023.\r\n","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","project":[{"grant_number":"805223","call_identifier":"H2020","_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning"}],"oa_version":"Published Version","quality_controlled":"1","_id":"10219","publication_identifier":{"issn":["1868-8969"],"isbn":["9-783-9597-7210-5"]},"ec_funded":1,"doi":"10.4230/LIPIcs.DISC.2021.58","year":"2021","title":"Brief announcement: Sinkless orientation is hard also in the supported LOCAL model","external_id":{"arxiv":["2108.02655"]},"article_number":"58","alternative_title":["LIPIcs"],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"ddc":["000"],"type":"conference","day":"04","status":"public","intvolume":"       209","file_date_updated":"2021-11-12T08:27:42Z","publication":"35th International Symposium on Distributed Computing","date_published":"2021-10-04T00:00:00Z","month":"10","language":[{"iso":"eng"}],"publisher":"Schloss Dagstuhl - Leibniz Zentrum für Informatik","scopus_import":"1","department":[{"_id":"DaAl"}],"has_accepted_license":"1","file":[{"success":1,"content_type":"application/pdf","relation":"main_file","creator":"cchlebak","file_id":"10275","file_name":"2021_LIPIcsDISC_Korhonen.pdf","file_size":474242,"date_created":"2021-11-12T08:27:42Z","checksum":"c43188dc2070bbd2bf5fd6fdaf9ce36d","date_updated":"2021-11-12T08:27:42Z","access_level":"open_access"}],"date_created":"2021-11-07T23:01:24Z","conference":{"end_date":"2021-10-08","name":"DISC: Distributed Computing ","location":"Freiburg, Germany","start_date":"2021-10-04"}},{"alternative_title":["ISTA Thesis"],"ddc":["000"],"related_material":{"record":[{"id":"10432","relation":"part_of_dissertation","status":"public"},{"status":"public","id":"6673","relation":"part_of_dissertation"},{"status":"public","relation":"part_of_dissertation","id":"5965"},{"id":"10435","relation":"part_of_dissertation","status":"public"}]},"ec_funded":1,"doi":"10.15479/at:ista:10429","year":"2021","title":"On achieving scalability through relaxation","date_updated":"2023-10-17T11:48:55Z","oa":1,"article_processing_charge":"No","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","project":[{"grant_number":"805223","name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020"}],"oa_version":"Published Version","_id":"10429","publication_identifier":{"issn":["2663-337X"]},"publication_status":"published","citation":{"short":"G. Nadiradze, On Achieving Scalability through Relaxation, Institute of Science and Technology Austria, 2021.","ista":"Nadiradze G. 2021. On achieving scalability through relaxation. Institute of Science and Technology Austria.","mla":"Nadiradze, Giorgi. <i>On Achieving Scalability through Relaxation</i>. Institute of Science and Technology Austria, 2021, doi:<a href=\"https://doi.org/10.15479/at:ista:10429\">10.15479/at:ista:10429</a>.","ama":"Nadiradze G. On achieving scalability through relaxation. 2021. doi:<a href=\"https://doi.org/10.15479/at:ista:10429\">10.15479/at:ista:10429</a>","chicago":"Nadiradze, Giorgi. “On Achieving Scalability through Relaxation.” Institute of Science and Technology Austria, 2021. <a href=\"https://doi.org/10.15479/at:ista:10429\">https://doi.org/10.15479/at:ista:10429</a>.","ieee":"G. Nadiradze, “On achieving scalability through relaxation,” Institute of Science and Technology Austria, 2021.","apa":"Nadiradze, G. (2021). <i>On achieving scalability through relaxation</i>. Institute of Science and Technology Austria. <a href=\"https://doi.org/10.15479/at:ista:10429\">https://doi.org/10.15479/at:ista:10429</a>"},"author":[{"id":"3279A00C-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-5634-0731","full_name":"Nadiradze, Giorgi","last_name":"Nadiradze","first_name":"Giorgi"}],"abstract":[{"lang":"eng","text":"The scalability of concurrent data structures and distributed algorithms strongly depends on\r\nreducing the contention for shared resources and the costs of synchronization and communication. We show how such cost reductions can be attained by relaxing the strict consistency conditions required by sequential implementations. In the first part of the thesis, we consider relaxation in the context of concurrent data structures. Specifically, in data structures \r\nsuch as priority queues, imposing strong semantics renders scalability impossible, since a correct implementation of the remove operation should return only the element with highest priority. Intuitively, attempting to invoke remove operations concurrently  creates a race condition. This bottleneck  can be circumvented by relaxing semantics of the affected data structure, thus allowing removal of the elements which are no longer required to have the highest priority. We prove that the randomized implementations of relaxed data structures provide provable guarantees on the priority of the removed elements even under concurrency. Additionally, we show that in some cases the relaxed data structures can be used to scale the classical algorithms which are usually implemented with the exact ones. In the second part, we study parallel variants of the  stochastic gradient descent (SGD) algorithm, which distribute computation  among the multiple processors, thus reducing the running time. Unfortunately, in order for standard parallel SGD to succeed, each processor has to maintain a local copy of the necessary model parameter, which is identical to the local copies of other processors; the overheads from this perfect consistency in terms of communication and synchronization can negate the speedup gained by distributing the computation. We show that the consistency conditions required by SGD can be  relaxed, allowing the algorithm to be more flexible in terms of tolerating quantized communication, asynchrony, or even crash faults, while its convergence remains asymptotically the same."}],"department":[{"_id":"GradSch"},{"_id":"DaAl"}],"has_accepted_license":"1","degree_awarded":"PhD","file":[{"checksum":"6bf14e9a523387328f016c0689f5e10e","date_created":"2021-12-09T17:47:49Z","file_size":2370859,"file_name":"Thesis_Final_09_12_2021.pdf","access_level":"open_access","date_updated":"2021-12-09T17:47:49Z","success":1,"creator":"gnadirad","file_id":"10436","relation":"main_file","content_type":"application/pdf"},{"access_level":"closed","date_updated":"2022-03-28T12:55:12Z","checksum":"914d6c5ca86bd0add471971a8f4c4341","date_created":"2021-12-09T17:47:49Z","file_size":2596924,"file_name":"Thesis_Final_09_12_2021.zip","creator":"gnadirad","file_id":"10437","relation":"source_file","content_type":"application/zip"}],"date_created":"2021-12-08T21:52:28Z","date_published":"2021-12-09T00:00:00Z","month":"12","language":[{"iso":"eng"}],"publisher":"Institute of Science and Technology Austria","file_date_updated":"2022-03-28T12:55:12Z","page":"132","type":"dissertation","day":"09","status":"public","supervisor":[{"first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"}]},{"article_processing_charge":"No","volume":35,"oa":1,"date_updated":"2023-09-07T13:31:39Z","arxiv":1,"oa_version":"Published Version","project":[{"call_identifier":"H2020","name":"ISTplus - Postdoctoral Fellowships","_id":"260C2330-B435-11E9-9278-68D0E5697425","grant_number":"754411"},{"call_identifier":"H2020","_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning","grant_number":"805223"}],"quality_controlled":"1","acknowledgement":"We would like to thank Christopher De Sa for his feedback on an earlier draft of this paper, as well as the anonymous AAAI reviewers for their useful comments. This project has received\r\nfunding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML). Bapi\r\nChatterjee was supported by the European Union’s Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No. 754411 (ISTPlus).","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","_id":"10432","citation":{"ieee":"G. Nadiradze, I. Markov, B. Chatterjee, V. Kungurtsev, and D.-A. Alistarh, “Elastic consistency: A practical consistency model for distributed stochastic gradient descent,” in <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, Virtual, 2021, vol. 35, no. 10, pp. 9037–9045.","apa":"Nadiradze, G., Markov, I., Chatterjee, B., Kungurtsev, V., &#38; Alistarh, D.-A. (2021). Elastic consistency: A practical consistency model for distributed stochastic gradient descent. In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i> (Vol. 35, pp. 9037–9045). Virtual.","chicago":"Nadiradze, Giorgi, Ilia Markov, Bapi Chatterjee, Vyacheslav  Kungurtsev, and Dan-Adrian Alistarh. “Elastic Consistency: A Practical Consistency Model for Distributed Stochastic Gradient Descent.” In <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, 35:9037–45, 2021.","ama":"Nadiradze G, Markov I, Chatterjee B, Kungurtsev V, Alistarh D-A. Elastic consistency: A practical consistency model for distributed stochastic gradient descent. In: <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>. Vol 35. ; 2021:9037-9045.","mla":"Nadiradze, Giorgi, et al. “Elastic Consistency: A Practical Consistency Model for Distributed Stochastic Gradient Descent.” <i>Proceedings of the AAAI Conference on Artificial Intelligence</i>, vol. 35, no. 10, 2021, pp. 9037–45.","short":"G. Nadiradze, I. Markov, B. Chatterjee, V. Kungurtsev, D.-A. Alistarh, in:, Proceedings of the AAAI Conference on Artificial Intelligence, 2021, pp. 9037–9045.","ista":"Nadiradze G, Markov I, Chatterjee B, Kungurtsev V, Alistarh D-A. 2021. Elastic consistency: A practical consistency model for distributed stochastic gradient descent. Proceedings of the AAAI Conference on Artificial Intelligence. AAAI: Association for the Advancement of Artificial Intelligence vol. 35, 9037–9045."},"publication_status":"published","author":[{"orcid":"0000-0001-5634-0731","full_name":"Nadiradze, Giorgi","last_name":"Nadiradze","first_name":"Giorgi","id":"3279A00C-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Ilia","full_name":"Markov, Ilia","last_name":"Markov","id":"D0CF4148-C985-11E9-8066-0BDEE5697425"},{"first_name":"Bapi","orcid":"0000-0002-2742-4028","last_name":"Chatterjee","full_name":"Chatterjee, Bapi","id":"3C41A08A-F248-11E8-B48F-1D18A9856A87"},{"last_name":"Kungurtsev","full_name":"Kungurtsev, Vyacheslav ","first_name":"Vyacheslav "},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","first_name":"Dan-Adrian"}],"abstract":[{"lang":"eng","text":"One key element behind the recent progress of machine learning has been the ability to train machine learning models in large-scale distributed shared-memory and message-passing environments. Most of these models are trained employing variants of stochastic gradient descent (SGD) based optimization, but most methods involve some type of consistency relaxation relative to sequential SGD, to mitigate its large communication or synchronization costs at scale. In this paper, we introduce a general consistency condition covering communication-reduced and asynchronous distributed SGD implementations. Our framework, called elastic consistency, decouples the system-specific aspects of the implementation from the SGD convergence requirements, giving a general way to obtain convergence bounds for a wide variety of distributed SGD methods used in practice. Elastic consistency can be used to re-derive or improve several previous convergence bounds in message-passing and shared-memory settings, but also to analyze new models and distribution schemes. As a direct application, we propose and analyze a new synchronization-avoiding scheduling scheme for distributed SGD, and show that it can be used to efficiently train deep convolutional models for image classification."}],"main_file_link":[{"open_access":"1","url":"https://ojs.aaai.org/index.php/AAAI/article/view/17092"}],"related_material":{"record":[{"id":"10429","relation":"dissertation_contains","status":"public"}]},"ec_funded":1,"year":"2021","external_id":{"arxiv":["2001.05918"]},"title":"Elastic consistency: A practical consistency model for distributed stochastic gradient descent","page":"9037-9045","publication":"Proceedings of the AAAI Conference on Artificial Intelligence","issue":"10","type":"conference","day":"18","status":"public","intvolume":"        35","department":[{"_id":"DaAl"}],"date_created":"2021-12-09T09:21:35Z","conference":{"start_date":"2021-02-02","location":"Virtual","end_date":"2021-02-09","name":"AAAI: Association for the Advancement of Artificial Intelligence"},"date_published":"2021-05-18T00:00:00Z","month":"05","language":[{"iso":"eng"}]},{"abstract":[{"lang":"eng","text":"Decentralized optimization is emerging as a viable alternative for scalable distributed machine learning, but also introduces new challenges in terms of synchronization costs. To this end, several communication-reduction techniques, such as non-blocking communication, quantization, and local steps, have been explored in the decentralized setting. Due to the complexity of analyzing optimization in such a relaxed setting, this line of work often assumes \\emph{global} communication rounds, which require additional synchronization. In this paper, we consider decentralized optimization in the simpler, but harder to analyze, \\emph{asynchronous gossip} model, in which communication occurs in discrete, randomly chosen pairings among nodes. Perhaps surprisingly, we show that a variant of SGD called \\emph{SwarmSGD} still converges in this setting, even if \\emph{non-blocking communication}, \\emph{quantization}, and \\emph{local steps} are all applied \\emph{in conjunction}, and even if the node data distributions and underlying graph topology are both \\emph{heterogenous}. Our analysis is based on a new connection with multi-dimensional load-balancing processes. We implement this algorithm and deploy it in a super-computing environment, showing that it can outperform previous decentralized methods in terms of end-to-end training time, and that it can even rival carefully-tuned large-batch SGD for certain tasks."}],"author":[{"first_name":"Giorgi","full_name":"Nadiradze, Giorgi","last_name":"Nadiradze","orcid":"0000-0001-5634-0731","id":"3279A00C-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Amirmojtaba","last_name":"Sabour","full_name":"Sabour, Amirmojtaba","id":"bcc145fd-e77f-11ea-ae8b-80d661dbff67"},{"first_name":"Peter","full_name":"Davies, Peter","last_name":"Davies","orcid":"0000-0002-5646-9524","id":"11396234-BB50-11E9-B24C-90FCE5697425"},{"full_name":"Li, Shigang","last_name":"Li","first_name":"Shigang"},{"first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"}],"citation":{"ieee":"G. Nadiradze, A. Sabour, P. Davies, S. Li, and D.-A. Alistarh, “Asynchronous decentralized SGD with quantized and local updates,” in <i>35th Conference on Neural Information Processing Systems</i>, Sydney, Australia, 2021.","apa":"Nadiradze, G., Sabour, A., Davies, P., Li, S., &#38; Alistarh, D.-A. (2021). Asynchronous decentralized SGD with quantized and local updates. In <i>35th Conference on Neural Information Processing Systems</i>. Sydney, Australia: Neural Information Processing Systems Foundation.","chicago":"Nadiradze, Giorgi, Amirmojtaba Sabour, Peter Davies, Shigang Li, and Dan-Adrian Alistarh. “Asynchronous Decentralized SGD with Quantized and Local Updates.” In <i>35th Conference on Neural Information Processing Systems</i>. Neural Information Processing Systems Foundation, 2021.","ama":"Nadiradze G, Sabour A, Davies P, Li S, Alistarh D-A. Asynchronous decentralized SGD with quantized and local updates. In: <i>35th Conference on Neural Information Processing Systems</i>. Neural Information Processing Systems Foundation; 2021.","mla":"Nadiradze, Giorgi, et al. “Asynchronous Decentralized SGD with Quantized and Local Updates.” <i>35th Conference on Neural Information Processing Systems</i>, Neural Information Processing Systems Foundation, 2021.","short":"G. Nadiradze, A. Sabour, P. Davies, S. Li, D.-A. Alistarh, in:, 35th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2021.","ista":"Nadiradze G, Sabour A, Davies P, Li S, Alistarh D-A. 2021. Asynchronous decentralized SGD with quantized and local updates. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems."},"publication_status":"published","_id":"10435","project":[{"name":"ISTplus - Postdoctoral Fellowships","_id":"260C2330-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"754411"},{"grant_number":"805223","call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425"}],"oa_version":"Published Version","quality_controlled":"1","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","acknowledgement":"We gratefully acknowledge funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML). PD partly conducted this work while at IST Austria and was supported by the European Union’s Horizon 2020 programme under the Marie Skłodowska-Curie grant agreement No. 754411. SL was funded in part by European Research Council (ERC) under the European Union’s Horizon 2020 programme (grant agreement DAPP, No. 678880, and EPiGRAM-HS, No. 801039).\r\n","arxiv":1,"article_processing_charge":"No","date_updated":"2023-10-17T11:48:56Z","oa":1,"external_id":{"arxiv":["1910.12308"]},"title":"Asynchronous decentralized SGD with quantized and local updates","year":"2021","ec_funded":1,"related_material":{"record":[{"status":"public","relation":"dissertation_contains","id":"10429"}]},"main_file_link":[{"open_access":"1","url":"https://papers.nips.cc/paper/2021/hash/362c99307cdc3f2d8b410652386a9dd1-Abstract.html"}],"status":"public","day":"01","type":"conference","publication":"35th Conference on Neural Information Processing Systems","publisher":"Neural Information Processing Systems Foundation","language":[{"iso":"eng"}],"month":"12","date_published":"2021-12-01T00:00:00Z","conference":{"start_date":"2021-12-06","end_date":"2021-12-14","name":"NeurIPS: Neural Information Processing Systems","location":"Sydney, Australia"},"date_created":"2021-12-09T10:59:12Z","department":[{"_id":"DaAl"}]},{"doi":"10.1007/978-3-030-79527-6_6","year":"2021","title":"Wait-free approximate agreement on graphs","external_id":{"arxiv":["2103.08949"]},"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2103.08949"}],"alternative_title":["LNCS"],"citation":{"apa":"Alistarh, D.-A., Ellen, F., &#38; Rybicki, J. (2021). Wait-free approximate agreement on graphs. In <i>Structural Information and Communication Complexity</i> (Vol. 12810, pp. 87–105). Wrocław, Poland: Springer Nature. <a href=\"https://doi.org/10.1007/978-3-030-79527-6_6\">https://doi.org/10.1007/978-3-030-79527-6_6</a>","ieee":"D.-A. Alistarh, F. Ellen, and J. Rybicki, “Wait-free approximate agreement on graphs,” in <i>Structural Information and Communication Complexity</i>, Wrocław, Poland, 2021, vol. 12810, pp. 87–105.","chicago":"Alistarh, Dan-Adrian, Faith Ellen, and Joel Rybicki. “Wait-Free Approximate Agreement on Graphs.” In <i>Structural Information and Communication Complexity</i>, 12810:87–105. Springer Nature, 2021. <a href=\"https://doi.org/10.1007/978-3-030-79527-6_6\">https://doi.org/10.1007/978-3-030-79527-6_6</a>.","mla":"Alistarh, Dan-Adrian, et al. “Wait-Free Approximate Agreement on Graphs.” <i>Structural Information and Communication Complexity</i>, vol. 12810, Springer Nature, 2021, pp. 87–105, doi:<a href=\"https://doi.org/10.1007/978-3-030-79527-6_6\">10.1007/978-3-030-79527-6_6</a>.","ama":"Alistarh D-A, Ellen F, Rybicki J. Wait-free approximate agreement on graphs. In: <i>Structural Information and Communication Complexity</i>. Vol 12810. Springer Nature; 2021:87-105. doi:<a href=\"https://doi.org/10.1007/978-3-030-79527-6_6\">10.1007/978-3-030-79527-6_6</a>","ista":"Alistarh D-A, Ellen F, Rybicki J. 2021. Wait-free approximate agreement on graphs. Structural Information and Communication Complexity. SIROCCO: Structural Information and Communication Complexity, LNCS, vol. 12810, 87–105.","short":"D.-A. Alistarh, F. Ellen, J. Rybicki, in:, Structural Information and Communication Complexity, Springer Nature, 2021, pp. 87–105."},"publication_status":"published","author":[{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian","last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian","orcid":"0000-0003-3650-940X"},{"full_name":"Ellen, Faith","last_name":"Ellen","first_name":"Faith"},{"id":"334EFD2E-F248-11E8-B48F-1D18A9856A87","first_name":"Joel","orcid":"0000-0002-6432-6646","full_name":"Rybicki, Joel","last_name":"Rybicki"}],"abstract":[{"text":"Approximate agreement is one of the few variants of consensus that can be solved in a wait-free manner in asynchronous systems where processes communicate by reading and writing to shared memory. In this work, we consider a natural generalisation of approximate agreement on arbitrary undirected connected graphs. Each process is given a vertex of the graph as input and, if non-faulty, must output a vertex such that\r\nall the outputs are within distance 1 of one another, and\r\n\r\neach output value lies on a shortest path between two input values.\r\n\r\nFrom prior work, it is known that there is no wait-free algorithm among   𝑛≥3  processes for this problem on any cycle of length   𝑐≥4 , by reduction from 2-set agreement (Castañeda et al. 2018).\r\n\r\nIn this work, we investigate the solvability and complexity of this task on general graphs. We give a new, direct proof of the impossibility of approximate agreement on cycles of length   𝑐≥4 , via a generalisation of Sperner’s Lemma to convex polygons. We also extend the reduction from 2-set agreement to a larger class of graphs, showing that approximate agreement on these graphs is unsolvable. On the positive side, we present a wait-free algorithm for a class of graphs that properly contains the class of chordal graphs.","lang":"eng"}],"article_processing_charge":"No","volume":12810,"date_updated":"2023-02-23T14:09:49Z","oa":1,"arxiv":1,"quality_controlled":"1","oa_version":"Preprint","user_id":"6785fbc1-c503-11eb-8a32-93094b40e1cf","publication_identifier":{"isbn":["9783030795269"],"issn":["03029743"],"eissn":["16113349"]},"_id":"9823","date_published":"2021-06-20T00:00:00Z","month":"06","language":[{"iso":"eng"}],"scopus_import":"1","publisher":"Springer Nature","department":[{"_id":"DaAl"}],"date_created":"2021-08-08T22:01:29Z","conference":{"start_date":"2021-06-28","end_date":"2021-07-01","name":"SIROCCO: Structural Information and Communication Complexity","location":"Wrocław, Poland"},"type":"conference","day":"20","status":"public","intvolume":"     12810","page":"87-105","publication":"Structural Information and Communication Complexity"},{"isi":1,"main_file_link":[{"url":"https://publications.lib.chalmers.se/records/fulltext/232185/232185.pdf","open_access":"1"}],"doi":"10.1016/j.tcs.2021.06.041","year":"2021","external_id":{"isi":["000694718900004"]},"title":"Concurrent linearizable nearest neighbour search in LockFree-kD-tree","volume":886,"date_updated":"2023-08-10T14:27:43Z","oa":1,"article_processing_charge":"No","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","quality_controlled":"1","oa_version":"Submitted Version","_id":"9827","publication_identifier":{"issn":["0304-3975"]},"publication_status":"published","citation":{"chicago":"Chatterjee, Bapi, Ivan Walulya, and Philippas Tsigas. “Concurrent Linearizable Nearest Neighbour Search in LockFree-KD-Tree.” <i>Theoretical Computer Science</i>. Elsevier, 2021. <a href=\"https://doi.org/10.1016/j.tcs.2021.06.041\">https://doi.org/10.1016/j.tcs.2021.06.041</a>.","ieee":"B. Chatterjee, I. Walulya, and P. Tsigas, “Concurrent linearizable nearest neighbour search in LockFree-kD-tree,” <i>Theoretical Computer Science</i>, vol. 886. Elsevier, pp. 27–48, 2021.","apa":"Chatterjee, B., Walulya, I., &#38; Tsigas, P. (2021). Concurrent linearizable nearest neighbour search in LockFree-kD-tree. <i>Theoretical Computer Science</i>. Elsevier. <a href=\"https://doi.org/10.1016/j.tcs.2021.06.041\">https://doi.org/10.1016/j.tcs.2021.06.041</a>","ista":"Chatterjee B, Walulya I, Tsigas P. 2021. Concurrent linearizable nearest neighbour search in LockFree-kD-tree. Theoretical Computer Science. 886, 27–48.","short":"B. Chatterjee, I. Walulya, P. Tsigas, Theoretical Computer Science 886 (2021) 27–48.","mla":"Chatterjee, Bapi, et al. “Concurrent Linearizable Nearest Neighbour Search in LockFree-KD-Tree.” <i>Theoretical Computer Science</i>, vol. 886, Elsevier, 2021, pp. 27–48, doi:<a href=\"https://doi.org/10.1016/j.tcs.2021.06.041\">10.1016/j.tcs.2021.06.041</a>.","ama":"Chatterjee B, Walulya I, Tsigas P. Concurrent linearizable nearest neighbour search in LockFree-kD-tree. <i>Theoretical Computer Science</i>. 2021;886:27-48. doi:<a href=\"https://doi.org/10.1016/j.tcs.2021.06.041\">10.1016/j.tcs.2021.06.041</a>"},"author":[{"first_name":"Bapi","last_name":"Chatterjee","full_name":"Chatterjee, Bapi","orcid":"0000-0002-2742-4028","id":"3C41A08A-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Ivan","full_name":"Walulya, Ivan","last_name":"Walulya"},{"last_name":"Tsigas","full_name":"Tsigas, Philippas","first_name":"Philippas"}],"keyword":["Concurrent data structure","kD-tree","Nearest neighbor search","Similarity search","Lock-free","Linearizability"],"abstract":[{"text":"The Nearest neighbour search (NNS) is a fundamental problem in many application domains dealing with multidimensional data. In a concurrent setting, where dynamic modifications are allowed, a linearizable implementation of the NNS is highly desirable.This paper introduces the LockFree-kD-tree (LFkD-tree ): a lock-free concurrent kD-tree, which implements an abstract data type (ADT) that provides the operations Add, Remove, Contains, and NNS. Our implementation is linearizable. The operations in the LFkD-tree use single-word read and compare-and-swap (Image 1 ) atomic primitives, which are readily supported on available multi-core processors. We experimentally evaluate the LFkD-tree using several benchmarks comprising real-world and synthetic datasets. The experiments show that the presented design is scalable and achieves significant speed-up compared to the implementations of an existing sequential kD-tree and a recently proposed multidimensional indexing structure, PH-tree.","lang":"eng"}],"department":[{"_id":"DaAl"}],"date_created":"2021-08-08T22:01:31Z","article_type":"original","date_published":"2021-09-13T00:00:00Z","month":"09","language":[{"iso":"eng"}],"publisher":"Elsevier","scopus_import":"1","page":"27-48","publication":"Theoretical Computer Science","type":"journal_article","day":"13","status":"public","intvolume":"       886"},{"external_id":{"isi":["000744439800049"],"arxiv":["2106.01880"]},"title":"Component stability in low-space massively parallel computation","ec_funded":1,"year":"2021","doi":"10.1145/3465084.3467903","isi":1,"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2106.01880"}],"author":[{"first_name":"Artur","full_name":"Czumaj, Artur","last_name":"Czumaj"},{"last_name":"Davies","full_name":"Davies, Peter","orcid":"0000-0002-5646-9524","first_name":"Peter","id":"11396234-BB50-11E9-B24C-90FCE5697425"},{"first_name":"Merav","last_name":"Parter","full_name":"Parter, Merav"}],"abstract":[{"lang":"eng","text":"In this paper, we study the power and limitations of component-stable algorithms in the low-space model of Massively Parallel Computation (MPC). Recently Ghaffari, Kuhn and Uitto (FOCS 2019) introduced the class of component-stable low-space MPC algorithms, which are, informally, defined as algorithms for which the outputs reported by the nodes in different connected components are required to be independent. This very natural notion was introduced to capture most (if not all) of the known efficient MPC algorithms to date, and it was the first general class of MPC algorithms for which one can show non-trivial conditional lower bounds. In this paper we enhance the framework of component-stable algorithms and investigate its effect on the complexity of randomized and deterministic low-space MPC. Our key contributions include: 1) We revise and formalize the lifting approach of Ghaffari, Kuhn and Uitto. This requires a very delicate amendment of the notion of component stability, which allows us to fill in gaps in the earlier arguments. 2) We also extend the framework to obtain conditional lower bounds for deterministic algorithms and fine-grained lower bounds that depend on the maximum degree Δ. 3) We demonstrate a collection of natural graph problems for which non-component-stable algorithms break the conditional lower bound obtained for component-stable algorithms. This implies that, for both deterministic and randomized algorithms, component-stable algorithms are conditionally weaker than the non-component-stable ones.\r\n\r\nAltogether our results imply that component-stability might limit the computational power of the low-space MPC model, paving the way for improved upper bounds that escape the conditional lower bound setting of Ghaffari, Kuhn, and Uitto."}],"citation":{"chicago":"Czumaj, Artur, Peter Davies, and Merav Parter. “Component Stability in Low-Space Massively Parallel Computation.” In <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>, 481–491. Association for Computing Machinery, 2021. <a href=\"https://doi.org/10.1145/3465084.3467903\">https://doi.org/10.1145/3465084.3467903</a>.","apa":"Czumaj, A., Davies, P., &#38; Parter, M. (2021). Component stability in low-space massively parallel computation. In <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i> (pp. 481–491). Virtual, Italy: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3465084.3467903\">https://doi.org/10.1145/3465084.3467903</a>","ieee":"A. Czumaj, P. Davies, and M. Parter, “Component stability in low-space massively parallel computation,” in <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>, Virtual, Italy, 2021, pp. 481–491.","ista":"Czumaj A, Davies P, Parter M. 2021. Component stability in low-space massively parallel computation. Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing. PODC: Principles of Distributed Computing, 481–491.","short":"A. Czumaj, P. Davies, M. Parter, in:, Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing, Association for Computing Machinery, 2021, pp. 481–491.","ama":"Czumaj A, Davies P, Parter M. Component stability in low-space massively parallel computation. In: <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>. Association for Computing Machinery; 2021:481–491. doi:<a href=\"https://doi.org/10.1145/3465084.3467903\">10.1145/3465084.3467903</a>","mla":"Czumaj, Artur, et al. “Component Stability in Low-Space Massively Parallel Computation.” <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>, Association for Computing Machinery, 2021, pp. 481–491, doi:<a href=\"https://doi.org/10.1145/3465084.3467903\">10.1145/3465084.3467903</a>."},"publication_status":"published","quality_controlled":"1","project":[{"grant_number":"754411","_id":"260C2330-B435-11E9-9278-68D0E5697425","name":"ISTplus - Postdoctoral Fellowships","call_identifier":"H2020"}],"oa_version":"Submitted Version","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","acknowledgement":"This work is partially supported by a Weizmann-UK Making Connections Grant, the Centre for Discrete Mathematics and its Applications (DIMAP), IBM Faculty Award, EPSRC award EP/V01305X/1, European Research Council (ERC) Grant No. 949083, the Minerva foundation with funding from the Federal German Ministry for Education and Research No. 713238, and the European Union’s Horizon 2020 programme under the Marie Skłodowska-Curie grant agreement No 754411.","publication_identifier":{"isbn":["9781450385480"]},"_id":"9933","article_processing_charge":"No","oa":1,"date_updated":"2023-08-17T07:11:32Z","arxiv":1,"language":[{"iso":"eng"}],"publisher":"Association for Computing Machinery","date_published":"2021-07-21T00:00:00Z","month":"07","date_created":"2021-08-17T18:11:16Z","conference":{"location":"Virtual, Italy","name":"PODC: Principles of Distributed Computing","end_date":"2021-07-30","start_date":"2021-07-26"},"department":[{"_id":"DaAl"}],"status":"public","type":"conference","day":"21","page":"481–491","publication":"Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing"},{"conference":{"start_date":"2021-07-26","end_date":"2021-07-30","location":"Virtual, Italy","name":"PODC: Symposium on Principles of Distributed Computing"},"date_created":"2021-08-17T18:14:15Z","department":[{"_id":"DaAl"}],"publisher":"Association for Computing Machinery","language":[{"iso":"eng"}],"month":"07","date_published":"2021-07-21T00:00:00Z","publication":"Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing","page":"469–479","status":"public","day":"21","type":"conference","main_file_link":[{"url":"http://wrap.warwick.ac.uk/153753","open_access":"1"}],"isi":1,"external_id":{"isi":["000744439800048"]},"title":"Improved deterministic (Δ+1) coloring in low-space MPC","year":"2021","doi":"10.1145/3465084.3467937","ec_funded":1,"publication_identifier":{"isbn":["978-1-4503-8548-0"]},"_id":"9935","oa_version":"Submitted Version","quality_controlled":"1","project":[{"grant_number":"754411","call_identifier":"H2020","name":"ISTplus - Postdoctoral Fellowships","_id":"260C2330-B435-11E9-9278-68D0E5697425"}],"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","acknowledgement":"This work is partially supported by a Weizmann-UK Making Connections Grant, the Centre for Discrete Mathematics and its Applications (DIMAP), IBM Faculty Award, EPSRC award EP/V01305X/1, European Research Council (ERC) Grant No. 949083, the Minerva foundation with funding from the Federal German Ministry for Education and Research No. 713238, and the European Union’s Horizon 2020 programme under the Marie Skłodowska-Curie grant agreement No 754411.","article_processing_charge":"No","oa":1,"date_updated":"2023-08-17T07:11:03Z","abstract":[{"lang":"eng","text":"We present a deterministic O(log log log n)-round low-space Massively Parallel Computation (MPC) algorithm for the classical problem of (Δ+1)-coloring on n-vertex graphs. In this model, every machine has sublinear local space of size n^φ for any arbitrary constant φ \\in (0,1). Our algorithm works under the relaxed setting where each machine is allowed to perform exponential local computations, while respecting the n^φ space and bandwidth limitations.\r\n\r\nOur key technical contribution is a novel derandomization of the ingenious (Δ+1)-coloring local algorithm by Chang-Li-Pettie (STOC 2018, SIAM J. Comput. 2020). The Chang-Li-Pettie algorithm runs in T_local =poly(loglog n) rounds, which sets the state-of-the-art randomized round complexity for the problem in the local model. Our derandomization employs a combination of tools, notably pseudorandom generators (PRG) and bounded-independence hash functions.\r\n\r\nThe achieved round complexity of O(logloglog n) rounds matches the bound of log(T_local ), which currently serves an upper bound barrier for all known randomized algorithms for locally-checkable problems in this model. Furthermore, no deterministic sublogarithmic low-space MPC algorithms for the (Δ+1)-coloring problem have been known before."}],"author":[{"last_name":"Czumaj","full_name":"Czumaj, Artur","first_name":"Artur"},{"last_name":"Davies","full_name":"Davies, Peter","orcid":"0000-0002-5646-9524","first_name":"Peter","id":"11396234-BB50-11E9-B24C-90FCE5697425"},{"first_name":"Merav","last_name":"Parter","full_name":"Parter, Merav"}],"citation":{"mla":"Czumaj, Artur, et al. “Improved Deterministic (Δ+1) Coloring in Low-Space MPC.” <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>, Association for Computing Machinery, 2021, pp. 469–479, doi:<a href=\"https://doi.org/10.1145/3465084.3467937\">10.1145/3465084.3467937</a>.","ama":"Czumaj A, Davies P, Parter M. Improved deterministic (Δ+1) coloring in low-space MPC. In: <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>. Association for Computing Machinery; 2021:469–479. doi:<a href=\"https://doi.org/10.1145/3465084.3467937\">10.1145/3465084.3467937</a>","short":"A. Czumaj, P. Davies, M. Parter, in:, Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing, Association for Computing Machinery, 2021, pp. 469–479.","ista":"Czumaj A, Davies P, Parter M. 2021. Improved deterministic (Δ+1) coloring in low-space MPC. Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing. PODC: Symposium on Principles of Distributed Computing, 469–479.","ieee":"A. Czumaj, P. Davies, and M. Parter, “Improved deterministic (Δ+1) coloring in low-space MPC,” in <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>, Virtual, Italy, 2021, pp. 469–479.","apa":"Czumaj, A., Davies, P., &#38; Parter, M. (2021). Improved deterministic (Δ+1) coloring in low-space MPC. In <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i> (pp. 469–479). Virtual, Italy: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3465084.3467937\">https://doi.org/10.1145/3465084.3467937</a>","chicago":"Czumaj, Artur, Peter Davies, and Merav Parter. “Improved Deterministic (Δ+1) Coloring in Low-Space MPC.” In <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>, 469–479. Association for Computing Machinery, 2021. <a href=\"https://doi.org/10.1145/3465084.3467937\">https://doi.org/10.1145/3465084.3467937</a>."},"publication_status":"published"},{"scopus_import":"1","title":"Comparison dynamics in population protocols","external_id":{"isi":["000744439800005"]},"publisher":"Association for Computing Machinery","language":[{"iso":"eng"}],"doi":"10.1145/3465084.3467915","year":"2021","month":"07","date_published":"2021-07-21T00:00:00Z","conference":{"location":"Virtual, Italy","end_date":"2021-07-30","name":"PODC: Symposium on Principles of Distributed Computing","start_date":"2021-07-26"},"date_created":"2021-08-22T22:01:20Z","isi":1,"department":[{"_id":"DaAl"}],"abstract":[{"text":"There has recently been a surge of interest in the computational and complexity properties of the population model, which assumes n anonymous, computationally-bounded nodes, interacting at random, with the goal of jointly computing global predicates. Significant work has gone towards investigating majority or consensus dynamics in this model: that is, assuming that every node is initially in one of two states X or Y, determine which state had higher initial count.\r\n\r\nIn this paper, we consider a natural generalization of majority/consensus, which we call comparison : in its simplest formulation, we are given two baseline states, X and Y, present in any initial configuration in fixed, but possibly small counts. One of these states has higher count than the other: we will assume |X_0| > C |Y_0| for some constant C > 1. The challenge is to design a protocol by which nodes can quickly and reliably decide on which of the baseline states X_0 and Y_0 has higher initial count. We begin by analyzing a simple and general dynamics solving the above comparison problem, which uses O( log n ) states per node, and converges in O(log n) (parallel) time, with high probability, to a state where the whole population votes on opinions X or Y at rates proportional to the initial concentrations of |X_0| vs. |Y_0|. We then describe how this procedure can be bootstrapped to solve comparison, i.e. have every node in the population reach the \"correct'' decision, with probability 1 - o(1), at the cost of O (log log n) additional states. Further, we prove that this dynamics is self-stabilizing, in the sense that it converges to the correct decision from arbitrary initial states, and leak-robust, in the sense that it can withstand spurious faulty reactions, which are known to occur in practical implementations of population protocols. Our analysis is based on a new martingale concentration result relating the discrete-time evolution of a population protocol to its expected (steady-state) analysis, which should be a useful tool when analyzing opinion dynamics and epidemic dissemination in the population model.","lang":"eng"}],"author":[{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian","last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian","orcid":"0000-0003-3650-940X"},{"first_name":"Martin","full_name":"Töpfer, Martin","last_name":"Töpfer","id":"4B865388-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Uznański, Przemysław","last_name":"Uznański","first_name":"Przemysław"}],"status":"public","day":"21","citation":{"apa":"Alistarh, D.-A., Töpfer, M., &#38; Uznański, P. (2021). Comparison dynamics in population protocols. In <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i> (pp. 55–65). Virtual, Italy: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3465084.3467915\">https://doi.org/10.1145/3465084.3467915</a>","ieee":"D.-A. Alistarh, M. Töpfer, and P. Uznański, “Comparison dynamics in population protocols,” in <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>, Virtual, Italy, 2021, pp. 55–65.","chicago":"Alistarh, Dan-Adrian, Martin Töpfer, and Przemysław Uznański. “Comparison Dynamics in Population Protocols.” In <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>, 55–65. Association for Computing Machinery, 2021. <a href=\"https://doi.org/10.1145/3465084.3467915\">https://doi.org/10.1145/3465084.3467915</a>.","ama":"Alistarh D-A, Töpfer M, Uznański P. Comparison dynamics in population protocols. In: <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>. Association for Computing Machinery; 2021:55-65. doi:<a href=\"https://doi.org/10.1145/3465084.3467915\">10.1145/3465084.3467915</a>","mla":"Alistarh, Dan-Adrian, et al. “Comparison Dynamics in Population Protocols.” <i>Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing</i>, Association for Computing Machinery, 2021, pp. 55–65, doi:<a href=\"https://doi.org/10.1145/3465084.3467915\">10.1145/3465084.3467915</a>.","short":"D.-A. Alistarh, M. Töpfer, P. Uznański, in:, Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing, Association for Computing Machinery, 2021, pp. 55–65.","ista":"Alistarh D-A, Töpfer M, Uznański P. 2021. Comparison dynamics in population protocols. Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing. PODC: Symposium on Principles of Distributed Computing, 55–65."},"publication_status":"published","type":"conference","publication_identifier":{"isbn":["9781450385480"]},"_id":"9951","oa_version":"None","quality_controlled":"1","acknowledgement":"We would like to thank Rati Gelashvili for very useful discussions, and the PODC anonymous reviewers for their careful reading of our paper, and for their useful remarks. This work is partially supported by the Polish National Science Center (NCN) grant UMO2017/25/B/ST6/02010.","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","publication":"Proceedings of the 2021 ACM Symposium on Principles of Distributed Computing","page":"55-65","article_processing_charge":"No","date_updated":"2023-08-11T10:56:04Z"}]
