[{"publication":"SIAM Journal on Computing","external_id":{"arxiv":["1811.01421"],"isi":["001082972300004"]},"related_material":{"record":[{"id":"6676","status":"public","relation":"earlier_version"}]},"title":"Why extension-based proofs fail","main_file_link":[{"url":"https://arxiv.org/abs/1811.01421","open_access":"1"}],"issue":"4","scopus_import":"1","project":[{"name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"805223"}],"citation":{"ieee":"D.-A. Alistarh, J. Aspnes, F. Ellen, R. Gelashvili, and L. Zhu, “Why extension-based proofs fail,” <i>SIAM Journal on Computing</i>, vol. 52, no. 4. Society for Industrial and Applied Mathematics, pp. 913–944, 2023.","chicago":"Alistarh, Dan-Adrian, James Aspnes, Faith Ellen, Rati Gelashvili, and Leqi Zhu. “Why Extension-Based Proofs Fail.” <i>SIAM Journal on Computing</i>. Society for Industrial and Applied Mathematics, 2023. <a href=\"https://doi.org/10.1137/20M1375851\">https://doi.org/10.1137/20M1375851</a>.","ama":"Alistarh D-A, Aspnes J, Ellen F, Gelashvili R, Zhu L. Why extension-based proofs fail. <i>SIAM Journal on Computing</i>. 2023;52(4):913-944. doi:<a href=\"https://doi.org/10.1137/20M1375851\">10.1137/20M1375851</a>","short":"D.-A. Alistarh, J. Aspnes, F. Ellen, R. Gelashvili, L. Zhu, SIAM Journal on Computing 52 (2023) 913–944.","ista":"Alistarh D-A, Aspnes J, Ellen F, Gelashvili R, Zhu L. 2023. Why extension-based proofs fail. SIAM Journal on Computing. 52(4), 913–944.","mla":"Alistarh, Dan-Adrian, et al. “Why Extension-Based Proofs Fail.” <i>SIAM Journal on Computing</i>, vol. 52, no. 4, Society for Industrial and Applied Mathematics, 2023, pp. 913–44, doi:<a href=\"https://doi.org/10.1137/20M1375851\">10.1137/20M1375851</a>.","apa":"Alistarh, D.-A., Aspnes, J., Ellen, F., Gelashvili, R., &#38; Zhu, L. (2023). Why extension-based proofs fail. <i>SIAM Journal on Computing</i>. Society for Industrial and Applied Mathematics. <a href=\"https://doi.org/10.1137/20M1375851\">https://doi.org/10.1137/20M1375851</a>"},"arxiv":1,"publication_status":"published","oa_version":"Preprint","page":"913-944","day":"25","ec_funded":1,"abstract":[{"lang":"eng","text":"We introduce extension-based proofs, a class of impossibility proofs that includes valency arguments. They are modelled as an interaction between a prover and a protocol. Using proofs based on combinatorial topology, it has been shown that it is impossible to deterministically solve -set agreement among  processes or approximate agreement on a cycle of length 4 among  processes in a wait-free manner in asynchronous models where processes communicate using objects that can be constructed from shared registers. However, it was unknown whether proofs based on simpler techniques were possible. We show that these impossibility results cannot be obtained by extension-based proofs in the iterated snapshot model and, hence, extension-based proofs are limited in power."}],"status":"public","date_updated":"2023-12-13T12:28:29Z","isi":1,"article_type":"original","volume":52,"month":"07","quality_controlled":"1","publication_identifier":{"eissn":["1095-7111"],"issn":["0097-5397"]},"article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","intvolume":"        52","date_created":"2023-09-24T22:01:11Z","department":[{"_id":"DaAl"}],"acknowledgement":"We would like to thank Valerie King, Toniann Pitassi, and Michael Saks for helpful discussions and Shi Hao Liu for his useful feedback.\r\nThis research was supported by the Natural Science and Engineering Research Council of Canada under grants RGPIN-2015-05080 and RGPIN-2020-04178, a postgraduate scholarship, and a postdoctoral fellowship; a University of Toronto postdoctoral fellowship; the National Science Foundation under grants CCF-1217921, CCF-1301926, CCF-1637385, CCF-1650596, and IIS-1447786; the U.S. Department of Energy under grant ER26116/DE-SC0008923; the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme grant agreement 805223 ScaleML; and the Oracle and Intel corporations. Some of the work on this paper was done while Faith Ellen was visiting IST Austria.","date_published":"2023-07-25T00:00:00Z","author":[{"orcid":"0000-0003-3650-940X","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian"},{"full_name":"Aspnes, James","first_name":"James","last_name":"Aspnes"},{"first_name":"Faith","full_name":"Ellen, Faith","last_name":"Ellen"},{"last_name":"Gelashvili","first_name":"Rati","full_name":"Gelashvili, Rati"},{"last_name":"Zhu","id":"a2117c59-cee4-11ed-b9d0-874ecf0f8ac5","first_name":"Leqi","full_name":"Zhu, Leqi"}],"oa":1,"_id":"14364","type":"journal_article","language":[{"iso":"eng"}],"doi":"10.1137/20M1375851","publisher":"Society for Industrial and Applied Mathematics","year":"2023"},{"abstract":[{"text":"We show for the first time that large-scale generative pretrained transformer (GPT) family models can be pruned to at least 50% sparsity in one-shot, without any retraining, at minimal loss of accuracy. This is achieved via a new pruning method called SparseGPT, specifically designed to work efficiently and accurately on massive GPT-family models. We can execute SparseGPT on the largest available open-source models, OPT-175B and BLOOM-176B, in under 4.5 hours, and can reach 60% unstructured sparsity with negligible increase in perplexity: remarkably, more than 100 billion weights from these models can be ignored at inference time. SparseGPT generalizes to semi-structured (2:4 and 4:8) patterns, and is compatible with weight quantization approaches. The code is available at: https://github.com/IST-DASLab/sparsegpt.","lang":"eng"}],"ec_funded":1,"day":"30","publication_status":"published","oa_version":"Preprint","page":"10323-10337","status":"public","acknowledged_ssus":[{"_id":"ScienComp"}],"date_updated":"2023-10-31T09:59:42Z","title":"SparseGPT: Massive language models can be accurately pruned in one-shot","external_id":{"arxiv":["2301.00774"]},"publication":"Proceedings of the 40th International Conference on Machine Learning","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2301.00774"}],"alternative_title":["PMLR"],"scopus_import":"1","arxiv":1,"citation":{"mla":"Frantar, Elias, and Dan-Adrian Alistarh. “SparseGPT: Massive Language Models Can Be Accurately Pruned in One-Shot.” <i>Proceedings of the 40th International Conference on Machine Learning</i>, vol. 202, ML Research Press, 2023, pp. 10323–37.","short":"E. Frantar, D.-A. Alistarh, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 10323–10337.","apa":"Frantar, E., &#38; Alistarh, D.-A. (2023). SparseGPT: Massive language models can be accurately pruned in one-shot. In <i>Proceedings of the 40th International Conference on Machine Learning</i> (Vol. 202, pp. 10323–10337). Honolulu, Hawaii, HI, United States: ML Research Press.","ista":"Frantar E, Alistarh D-A. 2023. SparseGPT: Massive language models can be accurately pruned in one-shot. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 10323–10337.","ama":"Frantar E, Alistarh D-A. SparseGPT: Massive language models can be accurately pruned in one-shot. In: <i>Proceedings of the 40th International Conference on Machine Learning</i>. Vol 202. ML Research Press; 2023:10323-10337.","chicago":"Frantar, Elias, and Dan-Adrian Alistarh. “SparseGPT: Massive Language Models Can Be Accurately Pruned in One-Shot.” In <i>Proceedings of the 40th International Conference on Machine Learning</i>, 202:10323–37. ML Research Press, 2023.","ieee":"E. Frantar and D.-A. Alistarh, “SparseGPT: Massive language models can be accurately pruned in one-shot,” in <i>Proceedings of the 40th International Conference on Machine Learning</i>, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 10323–10337."},"project":[{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning","grant_number":"805223","call_identifier":"H2020"}],"date_published":"2023-07-30T00:00:00Z","_id":"14458","oa":1,"author":[{"first_name":"Elias","full_name":"Frantar, Elias","id":"09a8f98d-ec99-11ea-ae11-c063a7b7fe5f","last_name":"Frantar"},{"full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"}],"type":"conference","year":"2023","publisher":"ML Research Press","language":[{"iso":"eng"}],"volume":202,"publication_identifier":{"eissn":["2640-3498"]},"quality_controlled":"1","month":"07","intvolume":"       202","conference":{"name":"ICML: International Conference on Machine Learning","start_date":"2023-07-23","end_date":"2023-07-29","location":"Honolulu, Hawaii, HI, United States"},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","department":[{"_id":"DaAl"}],"acknowledgement":"The authors gratefully acknowledge funding from the European Research Council (ERC) under the European Union’s Horizon 2020 programme (grant agreement No. 805223 ScaleML), as well as experimental support from Eldar Kurtic, and from the IST Austria IT department, in particular Stefano Elefante, Andrei Hornoiu, and Alois Schloegl.","date_created":"2023-10-29T23:01:16Z"},{"intvolume":"       202","conference":{"location":"Honolulu, Hawaii, HI, United States","end_date":"2023-07-29","start_date":"2023-07-23","name":"ICML: International Conference on Machine Learning"},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","department":[{"_id":"DaAl"}],"acknowledgement":"We would like to thank Elias Frantar for his valuable assistance and support at the outset of this project, and the anonymous ICML and SNN reviewers for very constructive feedback. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35. DA acknowledges generous ERC support, via Starting Grant 805223 ScaleML. ","date_created":"2023-10-29T23:01:17Z","volume":202,"publication_identifier":{"eissn":["2640-3498"]},"quality_controlled":"1","month":"07","type":"conference","year":"2023","publisher":"ML Research Press","language":[{"iso":"eng"}],"date_published":"2023-07-30T00:00:00Z","_id":"14460","oa":1,"author":[{"full_name":"Nikdan, Mahdi","first_name":"Mahdi","last_name":"Nikdan","id":"66374281-f394-11eb-9cf6-869147deecc0"},{"first_name":"Tommaso","full_name":"Pegolotti, Tommaso","last_name":"Pegolotti"},{"full_name":"Iofinova, Eugenia B","first_name":"Eugenia B","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","last_name":"Iofinova","orcid":"0000-0002-7778-3221"},{"first_name":"Eldar","full_name":"Kurtic, Eldar","id":"47beb3a5-07b5-11eb-9b87-b108ec578218","last_name":"Kurtic"},{"orcid":"0000-0003-3650-940X","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian"}],"alternative_title":["PMLR"],"scopus_import":"1","arxiv":1,"citation":{"ieee":"M. Nikdan, T. Pegolotti, E. B. Iofinova, E. Kurtic, and D.-A. Alistarh, “SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge,” in <i>Proceedings of the 40th International Conference on Machine Learning</i>, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 26215–26227.","chicago":"Nikdan, Mahdi, Tommaso Pegolotti, Eugenia B Iofinova, Eldar Kurtic, and Dan-Adrian Alistarh. “SparseProp: Efficient Sparse Backpropagation for Faster Training of Neural Networks at the Edge.” In <i>Proceedings of the 40th International Conference on Machine Learning</i>, 202:26215–27. ML Research Press, 2023.","ama":"Nikdan M, Pegolotti T, Iofinova EB, Kurtic E, Alistarh D-A. SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. In: <i>Proceedings of the 40th International Conference on Machine Learning</i>. Vol 202. ML Research Press; 2023:26215-26227.","mla":"Nikdan, Mahdi, et al. “SparseProp: Efficient Sparse Backpropagation for Faster Training of Neural Networks at the Edge.” <i>Proceedings of the 40th International Conference on Machine Learning</i>, vol. 202, ML Research Press, 2023, pp. 26215–27.","apa":"Nikdan, M., Pegolotti, T., Iofinova, E. B., Kurtic, E., &#38; Alistarh, D.-A. (2023). SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. In <i>Proceedings of the 40th International Conference on Machine Learning</i> (Vol. 202, pp. 26215–26227). Honolulu, Hawaii, HI, United States: ML Research Press.","ista":"Nikdan M, Pegolotti T, Iofinova EB, Kurtic E, Alistarh D-A. 2023. SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 26215–26227.","short":"M. Nikdan, T. Pegolotti, E.B. Iofinova, E. Kurtic, D.-A. Alistarh, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 26215–26227."},"project":[{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning","grant_number":"805223","call_identifier":"H2020"}],"title":"SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge","external_id":{"arxiv":["2302.04852"]},"publication":"Proceedings of the 40th International Conference on Machine Learning","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2302.04852"}],"status":"public","date_updated":"2023-10-31T09:33:51Z","abstract":[{"text":"We provide an efficient implementation of the backpropagation algorithm, specialized to the case where the weights of the neural network being trained are sparse. Our algorithm is general, as it applies to arbitrary (unstructured) sparsity and common layer types (e.g., convolutional or linear). We provide a fast vectorized implementation on commodity CPUs, and show that it can yield speedups in end-to-end runtime experiments, both in transfer learning using already-sparsified networks, and in training sparse networks from scratch. Thus, our results provide the first support for sparse training on commodity hardware.","lang":"eng"}],"ec_funded":1,"day":"30","oa_version":"Preprint","page":"26215-26227","publication_status":"published"},{"type":"conference","year":"2023","publisher":"ML Research Press","language":[{"iso":"eng"}],"date_published":"2023-07-30T00:00:00Z","_id":"14461","oa":1,"author":[{"first_name":"Ilia","full_name":"Markov, Ilia","id":"D0CF4148-C985-11E9-8066-0BDEE5697425","last_name":"Markov"},{"last_name":"Vladu","first_name":"Adrian","full_name":"Vladu, Adrian"},{"last_name":"Guo","full_name":"Guo, Qi","first_name":"Qi"},{"full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh"}],"intvolume":"       202","conference":{"name":"ICML: International Conference on Machine Learning","start_date":"2023-07-23","end_date":"2023-07-29","location":"Honolulu, Hawaii, HI, United States"},"article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","acknowledgement":"The authors gratefully acknowledge funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML), as well as experimental support from the IST Austria IT department, in particular Stefano Elefante, Andrei Hornoiu, and Alois Schloegl. AV acknowledges the support of the French Agence Nationale de la Recherche (ANR), under grant ANR-21-CE48-0016 (project COMCOPT), the support of Fondation Hadamard with a PRMO grant, and the support of CNRS with a CoopIntEER IEA grant (project ALFRED).","department":[{"_id":"DaAl"}],"date_created":"2023-10-29T23:01:17Z","volume":202,"publication_identifier":{"eissn":["2640-3498"]},"quality_controlled":"1","month":"07","status":"public","acknowledged_ssus":[{"_id":"ScienComp"}],"date_updated":"2023-10-31T09:40:45Z","abstract":[{"lang":"eng","text":"Communication-reduction techniques are a popular way to improve scalability in data-parallel training of deep neural networks (DNNs). The recent emergence of large language models such as GPT has created the need for new approaches to exploit data-parallelism. Among these, fully-sharded data parallel (FSDP) training is highly popular, yet it still encounters scalability bottlenecks. One reason is that applying compression techniques to FSDP is challenging: as the vast majority of the communication involves the model’s weights, direct compression alters convergence and leads to accuracy loss. We present QSDP, a variant of FSDP which supports both gradient and weight quantization with theoretical guarantees, is simple to implement and has essentially no overheads. To derive QSDP we prove that a natural modification of SGD achieves convergence even when we only maintain quantized weights, and thus the domain over which we train consists of quantized points and is, therefore, highly non-convex. We validate this approach by training GPT-family models with up to 1.3 billion parameters on a multi-node cluster. Experiments show that QSDP preserves model accuracy, while completely removing the communication bottlenecks of FSDP, providing end-to-end speedups of up to 2.2x."}],"ec_funded":1,"day":"30","page":"24020-24044","publication_status":"published","oa_version":"Preprint","alternative_title":["PMLR"],"scopus_import":"1","citation":{"chicago":"Markov, Ilia, Adrian Vladu, Qi Guo, and Dan-Adrian Alistarh. “Quantized Distributed Training of Large Models with Convergence Guarantees.” In <i>Proceedings of the 40th International Conference on Machine Learning</i>, 202:24020–44. ML Research Press, 2023.","ieee":"I. Markov, A. Vladu, Q. Guo, and D.-A. Alistarh, “Quantized distributed training of large models with convergence guarantees,” in <i>Proceedings of the 40th International Conference on Machine Learning</i>, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 24020–24044.","short":"I. Markov, A. Vladu, Q. Guo, D.-A. Alistarh, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 24020–24044.","apa":"Markov, I., Vladu, A., Guo, Q., &#38; Alistarh, D.-A. (2023). Quantized distributed training of large models with convergence guarantees. In <i>Proceedings of the 40th International Conference on Machine Learning</i> (Vol. 202, pp. 24020–24044). Honolulu, Hawaii, HI, United States: ML Research Press.","ista":"Markov I, Vladu A, Guo Q, Alistarh D-A. 2023. Quantized distributed training of large models with convergence guarantees. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 24020–24044.","mla":"Markov, Ilia, et al. “Quantized Distributed Training of Large Models with Convergence Guarantees.” <i>Proceedings of the 40th International Conference on Machine Learning</i>, vol. 202, ML Research Press, 2023, pp. 24020–44.","ama":"Markov I, Vladu A, Guo Q, Alistarh D-A. Quantized distributed training of large models with convergence guarantees. In: <i>Proceedings of the 40th International Conference on Machine Learning</i>. Vol 202. ML Research Press; 2023:24020-24044."},"arxiv":1,"project":[{"name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223","call_identifier":"H2020"}],"title":"Quantized distributed training of large models with convergence guarantees","external_id":{"arxiv":["2302.02390"]},"publication":"Proceedings of the 40th International Conference on Machine Learning","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2302.02390"}]},{"acknowledgement":"The authors would like to sincerely thank Sara Hooker for her feedback during the development of this work. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35. AP and DA acknowledge generous ERC support, via Starting Grant 805223 ScaleML.","department":[{"_id":"DaAl"},{"_id":"ChLa"}],"date_created":"2024-01-10T08:42:40Z","conference":{"location":"Vancouver, BC, Canada","name":"CVPR: Conference on Computer Vision and Pattern Recognition","end_date":"2023-06-24","start_date":"2023-06-17"},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","publication_identifier":{"eisbn":["9798350301298"],"eissn":["2575-7075"]},"quality_controlled":"1","month":"08","isi":1,"year":"2023","publisher":"IEEE","doi":"10.1109/cvpr52729.2023.02334","language":[{"iso":"eng"}],"type":"conference","_id":"14771","oa":1,"author":[{"last_name":"Iofinova","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","orcid":"0000-0002-7778-3221","first_name":"Eugenia B","full_name":"Iofinova, Eugenia B"},{"id":"32D78294-F248-11E8-B48F-1D18A9856A87","last_name":"Peste","full_name":"Peste, Elena-Alexandra","first_name":"Elena-Alexandra"},{"full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"}],"date_published":"2023-08-22T00:00:00Z","citation":{"ieee":"E. B. Iofinova, E.-A. Peste, and D.-A. Alistarh, “Bias in pruned vision models: In-depth analysis and countermeasures,” in <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Vancouver, BC, Canada, 2023, pp. 24364–24373.","chicago":"Iofinova, Eugenia B, Elena-Alexandra Peste, and Dan-Adrian Alistarh. “Bias in Pruned Vision Models: In-Depth Analysis and Countermeasures.” In <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 24364–73. IEEE, 2023. <a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">https://doi.org/10.1109/cvpr52729.2023.02334</a>.","ama":"Iofinova EB, Peste E-A, Alistarh D-A. Bias in pruned vision models: In-depth analysis and countermeasures. In: <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. IEEE; 2023:24364-24373. doi:<a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">10.1109/cvpr52729.2023.02334</a>","short":"E.B. Iofinova, E.-A. Peste, D.-A. Alistarh, in:, 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition, IEEE, 2023, pp. 24364–24373.","mla":"Iofinova, Eugenia B., et al. “Bias in Pruned Vision Models: In-Depth Analysis and Countermeasures.” <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, IEEE, 2023, pp. 24364–73, doi:<a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">10.1109/cvpr52729.2023.02334</a>.","ista":"Iofinova EB, Peste E-A, Alistarh D-A. 2023. Bias in pruned vision models: In-depth analysis and countermeasures. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Conference on Computer Vision and Pattern Recognition, 24364–24373.","apa":"Iofinova, E. B., Peste, E.-A., &#38; Alistarh, D.-A. (2023). Bias in pruned vision models: In-depth analysis and countermeasures. In <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 24364–24373). Vancouver, BC, Canada: IEEE. <a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">https://doi.org/10.1109/cvpr52729.2023.02334</a>"},"arxiv":1,"project":[{"grant_number":" W1260-N35","name":"Vienna Graduate School on Computational Optimization","_id":"9B9290DE-BA93-11EA-9121-9846C619BF3A"},{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning","call_identifier":"H2020","grant_number":"805223"}],"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2304.12622"}],"title":"Bias in pruned vision models: In-depth analysis and countermeasures","external_id":{"arxiv":["2304.12622"],"isi":["001062531308068"]},"related_material":{"link":[{"url":"https://github.com/IST-DASLab/pruned-vision-model-bias","relation":"software"}]},"publication":"2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition","date_updated":"2024-01-10T08:59:26Z","status":"public","abstract":[{"text":"Pruning—that is, setting a significant subset of the parameters of a neural network to zero—is one of the most popular methods of model compression. Yet, several recent works have raised the issue that pruning may induce or exacerbate bias in the output of the compressed model. Despite existing evidence for this phenomenon, the relationship between neural network pruning and induced bias is not well-understood. In this work, we systematically investigate and characterize this phenomenon in Convolutional Neural Networks for computer vision. First, we show that it is in fact possible to obtain highly-sparse models, e.g. with less than 10% remaining weights, which do not decrease in accuracy nor substantially increase in bias when compared to dense models. At the same time, we also find that, at higher sparsities, pruned models exhibit higher uncertainty in their outputs, as well as increased correlations, which we directly link to increased bias. We propose easy-to-use criteria which, based only on the uncompressed model, establish whether bias will increase with pruning, and identify the samples most susceptible to biased predictions post-compression. Our code can be found at https://github.com/IST-DASLab/pruned-vision-model-bias.","lang":"eng"}],"ec_funded":1,"day":"22","oa_version":"Preprint","page":"24364-24373","publication_status":"published"},{"acknowledged_ssus":[{"_id":"ScienComp"}],"status":"public","type":"conference","language":[{"iso":"eng"}],"date_updated":"2023-06-01T12:54:45Z","year":"2023","publication_status":"accepted","date_published":"2023-05-01T00:00:00Z","oa_version":"Preprint","ec_funded":1,"abstract":[{"text":"Deep neural networks (DNNs) often have to be compressed, via pruning and/or quantization, before they can be deployed in practical settings. In this work we propose a new compression-aware minimizer dubbed CrAM that modifies the optimization step in a principled way, in order to produce models whose local loss behavior is stable under compression operations such as pruning. Thus, dense models trained via CrAM should be compressible post-training, in a single step, without significant accuracy loss. Experimental results on standard benchmarks, such as residual networks for ImageNet classification and BERT models for language modelling, show that CrAM produces dense models that can be more accurate than the standard SGD/Adam-based baselines, but which are stable under weight pruning: specifically, we can prune models in one-shot to 70-80% sparsity with almost no accuracy loss, and to 90% with reasonable (∼1%) accuracy loss, which is competitive with gradual compression methods. Additionally, CrAM can produce sparse models which perform well for transfer learning, and it also works for semi-structured 2:4 pruning patterns supported by GPU hardware. The code for reproducing the results is available at this https URL .","lang":"eng"}],"author":[{"last_name":"Peste","id":"32D78294-F248-11E8-B48F-1D18A9856A87","full_name":"Peste, Elena-Alexandra","first_name":"Elena-Alexandra"},{"last_name":"Vladu","first_name":"Adrian","full_name":"Vladu, Adrian"},{"last_name":"Kurtic","id":"47beb3a5-07b5-11eb-9b87-b108ec578218","first_name":"Eldar","full_name":"Kurtic, Eldar"},{"orcid":"0000-0001-8622-7887","last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","full_name":"Lampert, Christoph","first_name":"Christoph"},{"full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"}],"oa":1,"_id":"13053","article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","conference":{"location":"Kigali, Rwanda ","start_date":"2023-05-01","end_date":"2023-05-05","name":"ICLR: International Conference on Learning Representations"},"date_created":"2023-05-23T11:36:18Z","project":[{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning","call_identifier":"H2020","grant_number":"805223"}],"citation":{"ieee":"E.-A. Peste, A. Vladu, E. Kurtic, C. Lampert, and D.-A. Alistarh, “CrAM: A Compression-Aware Minimizer,” in <i>11th International Conference on Learning Representations </i>, Kigali, Rwanda .","chicago":"Peste, Elena-Alexandra, Adrian Vladu, Eldar Kurtic, Christoph Lampert, and Dan-Adrian Alistarh. “CrAM: A Compression-Aware Minimizer.” In <i>11th International Conference on Learning Representations </i>, n.d.","ama":"Peste E-A, Vladu A, Kurtic E, Lampert C, Alistarh D-A. CrAM: A Compression-Aware Minimizer. In: <i>11th International Conference on Learning Representations </i>.","apa":"Peste, E.-A., Vladu, A., Kurtic, E., Lampert, C., &#38; Alistarh, D.-A. (n.d.). CrAM: A Compression-Aware Minimizer. In <i>11th International Conference on Learning Representations </i>. Kigali, Rwanda .","short":"E.-A. Peste, A. Vladu, E. Kurtic, C. Lampert, D.-A. Alistarh, in:, 11th International Conference on Learning Representations , n.d.","mla":"Peste, Elena-Alexandra, et al. “CrAM: A Compression-Aware Minimizer.” <i>11th International Conference on Learning Representations </i>.","ista":"Peste E-A, Vladu A, Kurtic E, Lampert C, Alistarh D-A. CrAM: A Compression-Aware Minimizer. 11th International Conference on Learning Representations . ICLR: International Conference on Learning Representations."},"department":[{"_id":"GradSch"},{"_id":"DaAl"},{"_id":"ChLa"}],"arxiv":1,"acknowledgement":"AP, EK, DA received funding from the European Research Council (ERC) under the European\r\nUnion’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML). AV acknowledges the support of the French Agence Nationale de la Recherche (ANR), under grant ANR-21-CE48-0016 (project COMCOPT). We further acknowledge the support from the Scientific Service Units (SSU) of ISTA through resources provided by Scientific Computing (SciComp)-","publication":"11th International Conference on Learning Representations ","external_id":{"arxiv":["2207.14200"]},"related_material":{"record":[{"id":"13074","status":"public","relation":"dissertation_contains"}]},"title":"CrAM: A Compression-Aware Minimizer","month":"05","quality_controlled":"1","main_file_link":[{"url":"https://openreview.net/pdf?id=_eTZBs-yedr","open_access":"1"}]},{"ddc":["000"],"user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","article_processing_charge":"No","file_date_updated":"2023-05-24T16:12:59Z","date_created":"2023-05-23T17:07:53Z","department":[{"_id":"GradSch"},{"_id":"DaAl"},{"_id":"ChLa"}],"supervisor":[{"last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-8622-7887","first_name":"Christoph","full_name":"Lampert, Christoph"},{"first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh"}],"month":"05","publication_identifier":{"issn":["2663-337X"]},"type":"dissertation","language":[{"iso":"eng"}],"doi":"10.15479/at:ista:13074","publisher":"Institute of Science and Technology Austria","year":"2023","date_published":"2023-05-23T00:00:00Z","author":[{"first_name":"Elena-Alexandra","full_name":"Peste, Elena-Alexandra","id":"32D78294-F248-11E8-B48F-1D18A9856A87","last_name":"Peste"}],"oa":1,"_id":"13074","alternative_title":["ISTA Thesis"],"has_accepted_license":"1","project":[{"name":"International IST Doctoral Program","_id":"2564DBCA-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"665385"},{"call_identifier":"H2020","grant_number":"805223","name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425"}],"citation":{"apa":"Peste, E.-A. (2023). <i>Efficiency and generalization of sparse neural networks</i>. Institute of Science and Technology Austria. <a href=\"https://doi.org/10.15479/at:ista:13074\">https://doi.org/10.15479/at:ista:13074</a>","short":"E.-A. Peste, Efficiency and Generalization of Sparse Neural Networks, Institute of Science and Technology Austria, 2023.","ista":"Peste E-A. 2023. Efficiency and generalization of sparse neural networks. Institute of Science and Technology Austria.","mla":"Peste, Elena-Alexandra. <i>Efficiency and Generalization of Sparse Neural Networks</i>. Institute of Science and Technology Austria, 2023, doi:<a href=\"https://doi.org/10.15479/at:ista:13074\">10.15479/at:ista:13074</a>.","ama":"Peste E-A. Efficiency and generalization of sparse neural networks. 2023. doi:<a href=\"https://doi.org/10.15479/at:ista:13074\">10.15479/at:ista:13074</a>","chicago":"Peste, Elena-Alexandra. “Efficiency and Generalization of Sparse Neural Networks.” Institute of Science and Technology Austria, 2023. <a href=\"https://doi.org/10.15479/at:ista:13074\">https://doi.org/10.15479/at:ista:13074</a>.","ieee":"E.-A. Peste, “Efficiency and generalization of sparse neural networks,” Institute of Science and Technology Austria, 2023."},"related_material":{"record":[{"relation":"part_of_dissertation","status":"public","id":"11458"},{"status":"public","id":"13053","relation":"part_of_dissertation"},{"relation":"part_of_dissertation","id":"12299","status":"public"}]},"title":"Efficiency and generalization of sparse neural networks","file":[{"creator":"epeste","content_type":"application/pdf","relation":"main_file","access_level":"open_access","file_size":2152072,"file_name":"PhD_Thesis_Alexandra_Peste_final.pdf","success":1,"date_updated":"2023-05-24T16:11:16Z","checksum":"6b3354968403cb9d48cc5a83611fb571","date_created":"2023-05-24T16:11:16Z","file_id":"13087"},{"file_name":"PhD_Thesis_APeste.zip","file_size":1658293,"file_id":"13088","checksum":"8d0df94bbcf4db72c991f22503b3fd60","date_created":"2023-05-24T16:12:59Z","date_updated":"2023-05-24T16:12:59Z","creator":"epeste","access_level":"closed","relation":"source_file","content_type":"application/zip"}],"acknowledged_ssus":[{"_id":"ScienComp"}],"status":"public","date_updated":"2023-08-04T10:33:27Z","publication_status":"published","oa_version":"Published Version","page":"147","day":"23","ec_funded":1,"abstract":[{"text":"Deep learning has become an integral part of a large number of important applications, and many of the recent breakthroughs have been enabled by the ability to train very large models, capable to capture complex patterns and relationships from the data. At the same time, the massive sizes of modern deep learning models have made their deployment to smaller devices more challenging; this is particularly important, as in many applications the users rely on accurate deep learning predictions, but they only have access to devices with limited memory and compute power. One solution to this problem is to prune neural networks, by setting as many of their parameters as possible to zero, to obtain accurate sparse models with lower memory footprint. Despite the great research progress in obtaining sparse models that preserve accuracy, while satisfying memory and computational constraints, there are still many challenges associated with efficiently training sparse models, as well as understanding their generalization properties.\r\n\r\nThe focus of this thesis is to investigate how the training process of sparse models can be made more efficient, and to understand the differences between sparse and dense models in terms of how well they can generalize to changes in the data distribution. We first study a method for co-training sparse and dense models, at a lower cost compared to regular training. With our method we can obtain very accurate sparse networks, and dense models that can recover the baseline accuracy. Furthermore, we are able to more easily analyze the differences, at prediction level, between the sparse-dense model pairs. Next, we investigate the generalization properties of sparse neural networks in more detail, by studying how well different sparse models trained on a larger task can adapt to smaller, more specialized tasks, in a transfer learning scenario. Our analysis across multiple pruning methods and sparsity levels reveals that sparse models provide features that can transfer similarly to or better than the dense baseline. However, the choice of the pruning method plays an important role, and can influence the results when the features are fixed (linear finetuning), or when they are allowed to adapt to the new task (full finetuning). Using sparse models with fixed masks for finetuning on new tasks has an important practical advantage, as it enables training neural networks on smaller devices. However, one drawback of current pruning methods is that the entire training cycle has to be repeated to obtain the initial sparse model, for every sparsity target; in consequence, the entire training process is costly and also multiple models need to be stored. In the last part of the thesis we propose a method that can train accurate dense models that are compressible in a single step, to multiple sparsity levels, without additional finetuning. Our method results in sparse models that can be competitive with existing pruning methods, and which can also successfully generalize to new tasks.","lang":"eng"}],"degree_awarded":"PhD"},{"license":"https://creativecommons.org/licenses/by/4.0/","status":"public","date_updated":"2023-08-01T13:17:20Z","abstract":[{"lang":"eng","text":"Approximate agreement is one of the few variants of consensus that can be solved in a wait-free manner in asynchronous systems where processes communicate by reading and writing to shared memory. In this work, we consider a natural generalisation of approximate agreement on arbitrary undirected connected graphs. Each process is given a node of the graph as input and, if non-faulty, must output a node such that\r\n– all the outputs are within distance 1 of one another, and\r\n– each output value lies on a shortest path between two input values.\r\nFrom prior work, it is known that there is no wait-free algorithm among  processes for this problem on any cycle of length , by reduction from 2-set agreement (Castañeda et al., 2018).\r\n\r\nIn this work, we investigate the solvability of this task on general graphs. We give a new, direct proof of the impossibility of approximate agreement on cycles of length , via a generalisation of Sperner's Lemma to convex polygons. We also extend the reduction from 2-set agreement to a larger class of graphs, showing that approximate agreement on these graphs is unsolvable. On the positive side, we present a wait-free algorithm for a different class of graphs, which properly contains the class of chordal graphs."}],"ec_funded":1,"day":"28","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"publication_status":"published","oa_version":"Published Version","has_accepted_license":"1","scopus_import":"1","issue":"2","citation":{"ieee":"D.-A. Alistarh, F. Ellen, and J. Rybicki, “Wait-free approximate agreement on graphs,” <i>Theoretical Computer Science</i>, vol. 948, no. 2. Elsevier, 2023.","chicago":"Alistarh, Dan-Adrian, Faith Ellen, and Joel Rybicki. “Wait-Free Approximate Agreement on Graphs.” <i>Theoretical Computer Science</i>. Elsevier, 2023. <a href=\"https://doi.org/10.1016/j.tcs.2023.113733\">https://doi.org/10.1016/j.tcs.2023.113733</a>.","ama":"Alistarh D-A, Ellen F, Rybicki J. Wait-free approximate agreement on graphs. <i>Theoretical Computer Science</i>. 2023;948(2). doi:<a href=\"https://doi.org/10.1016/j.tcs.2023.113733\">10.1016/j.tcs.2023.113733</a>","apa":"Alistarh, D.-A., Ellen, F., &#38; Rybicki, J. (2023). Wait-free approximate agreement on graphs. <i>Theoretical Computer Science</i>. Elsevier. <a href=\"https://doi.org/10.1016/j.tcs.2023.113733\">https://doi.org/10.1016/j.tcs.2023.113733</a>","mla":"Alistarh, Dan-Adrian, et al. “Wait-Free Approximate Agreement on Graphs.” <i>Theoretical Computer Science</i>, vol. 948, no. 2, 113733, Elsevier, 2023, doi:<a href=\"https://doi.org/10.1016/j.tcs.2023.113733\">10.1016/j.tcs.2023.113733</a>.","short":"D.-A. Alistarh, F. Ellen, J. Rybicki, Theoretical Computer Science 948 (2023).","ista":"Alistarh D-A, Ellen F, Rybicki J. 2023. Wait-free approximate agreement on graphs. Theoretical Computer Science. 948(2), 113733."},"project":[{"name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"805223"},{"grant_number":"840605","call_identifier":"H2020","name":"Coordination in constrained and natural distributed systems","_id":"26A5D39A-B435-11E9-9278-68D0E5697425"}],"file":[{"creator":"dernst","access_level":"open_access","content_type":"application/pdf","relation":"main_file","file_name":"2023_TheoreticalCompScience_Alistarh.pdf","file_size":602333,"checksum":"b27c5290f2f1500c403494364ee39c9f","date_created":"2023-02-20T07:30:20Z","file_id":"12570","success":1,"date_updated":"2023-02-20T07:30:20Z"}],"title":"Wait-free approximate agreement on graphs","external_id":{"isi":["000934262700001"]},"publication":"Theoretical Computer Science","type":"journal_article","year":"2023","publisher":"Elsevier","doi":"10.1016/j.tcs.2023.113733","language":[{"iso":"eng"}],"date_published":"2023-02-28T00:00:00Z","_id":"12566","oa":1,"author":[{"orcid":"0000-0003-3650-940X","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian"},{"last_name":"Ellen","first_name":"Faith","full_name":"Ellen, Faith"},{"full_name":"Rybicki, Joel","first_name":"Joel","orcid":"0000-0002-6432-6646","last_name":"Rybicki","id":"334EFD2E-F248-11E8-B48F-1D18A9856A87"}],"intvolume":"       948","file_date_updated":"2023-02-20T07:30:20Z","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","article_processing_charge":"Yes (via OA deal)","ddc":["000"],"acknowledgement":"This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No. 805223 ScaleML) and under the Marie Skłodowska-Curie grant agreement No. 840605 and from the Natural Sciences and Engineering Research Council of Canada grant RGPIN-2020-04178. Part of this work was done while Faith Ellen was visiting IST Austria.","department":[{"_id":"DaAl"}],"date_created":"2023-02-19T23:00:55Z","article_number":"113733","volume":948,"article_type":"original","isi":1,"publication_identifier":{"issn":["0304-3975"]},"quality_controlled":"1","month":"02"},{"date_published":"2022-04-02T00:00:00Z","oa":1,"_id":"11180","author":[{"first_name":"Anastasiia","full_name":"Postnikova, Anastasiia","last_name":"Postnikova"},{"last_name":"Koval","id":"2F4DB10C-F248-11E8-B48F-1D18A9856A87","full_name":"Koval, Nikita","first_name":"Nikita"},{"id":"3279A00C-F248-11E8-B48F-1D18A9856A87","last_name":"Nadiradze","full_name":"Nadiradze, Giorgi","first_name":"Giorgi"},{"last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian"}],"type":"conference","publisher":"Association for Computing Machinery","year":"2022","language":[{"iso":"eng"}],"doi":"10.1145/3503221.3508432","isi":1,"quality_controlled":"1","publication_identifier":{"isbn":["9781450392044"]},"month":"04","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","article_processing_charge":"No","conference":{"location":"Seoul, Republic of Korea","name":"PPoPP: Sympopsium on Principles and Practice of Parallel Programming","start_date":"2022-04-02","end_date":"2022-04-06"},"department":[{"_id":"DaAl"}],"acknowledgement":"We would like to thank the anonymous reviewers for their useful comments. This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML).","date_created":"2022-04-17T22:01:46Z","ec_funded":1,"abstract":[{"text":"Designing and implementing efficient parallel priority schedulers is an active research area. An intriguing proposed design is the Multi-Queue: given n threads and m ≥ n distinct priority queues, task insertions are performed uniformly at random, while, to delete, a thread picks two queues uniformly at random, and removes the observed task of higher priority. This approach scales well, and has probabilistic rank guarantees: roughly, the rank of each task removed, relative to remaining tasks in all other queues, is O (m) in expectation. Yet, the performance of this pattern is below that of well-engineered schedulers, which eschew theoretical guarantees for practical efficiency.\r\n\r\nWe investigate whether it is possible to design and implement a Multi-Queue-based task scheduler that is both highly-efficient and has analytical guarantees. We propose a new variant called the Stealing Multi-Queue (SMQ), a cache-efficient variant of the Multi-Queue, which leverages both queue affinity---each thread has a local queue, from which tasks are usually removed; but, with some probability, threads also attempt to steal higher-priority tasks from the other queues---and task batching, that is, the processing of several tasks in a single insert / remove step. These ideas are well-known for task scheduling without priorities; our theoretical contribution is showing that, despite relaxations, this design can still provide rank guarantees, which in turn implies bounds on total work performed. We provide a general SMQ implementation which can surpass state-of-the-art schedulers such as OBIM and PMOD in terms of performance on popular graph-processing benchmarks. Notably, the performance improvement comes mainly from the superior rank guarantees provided by our scheduler, confirming that analytically-reasoned approaches can still provide performance improvements for priority task scheduling.","lang":"eng"}],"oa_version":"Preprint","publication_status":"published","page":"353-367","day":"02","status":"public","date_updated":"2023-08-03T06:48:35Z","related_material":{"record":[{"status":"public","id":"13076","relation":"research_data"}]},"external_id":{"arxiv":["2109.00657"],"isi":["000883318200025"]},"title":"Multi-queues can be state-of-the-art priority schedulers","publication":"Proceedings of the 27th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming","main_file_link":[{"url":" https://doi.org/10.48550/arXiv.2109.00657","open_access":"1"}],"scopus_import":"1","project":[{"name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"805223"}],"arxiv":1,"citation":{"short":"A. Postnikova, N. Koval, G. Nadiradze, D.-A. Alistarh, in:, Proceedings of the 27th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, Association for Computing Machinery, 2022, pp. 353–367.","mla":"Postnikova, Anastasiia, et al. “Multi-Queues Can Be State-of-the-Art Priority Schedulers.” <i>Proceedings of the 27th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming</i>, Association for Computing Machinery, 2022, pp. 353–67, doi:<a href=\"https://doi.org/10.1145/3503221.3508432\">10.1145/3503221.3508432</a>.","ista":"Postnikova A, Koval N, Nadiradze G, Alistarh D-A. 2022. Multi-queues can be state-of-the-art priority schedulers. Proceedings of the 27th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming. PPoPP: Sympopsium on Principles and Practice of Parallel Programming, 353–367.","apa":"Postnikova, A., Koval, N., Nadiradze, G., &#38; Alistarh, D.-A. (2022). Multi-queues can be state-of-the-art priority schedulers. In <i>Proceedings of the 27th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming</i> (pp. 353–367). Seoul, Republic of Korea: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3503221.3508432\">https://doi.org/10.1145/3503221.3508432</a>","ama":"Postnikova A, Koval N, Nadiradze G, Alistarh D-A. Multi-queues can be state-of-the-art priority schedulers. In: <i>Proceedings of the 27th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming</i>. Association for Computing Machinery; 2022:353-367. doi:<a href=\"https://doi.org/10.1145/3503221.3508432\">10.1145/3503221.3508432</a>","chicago":"Postnikova, Anastasiia, Nikita Koval, Giorgi Nadiradze, and Dan-Adrian Alistarh. “Multi-Queues Can Be State-of-the-Art Priority Schedulers.” In <i>Proceedings of the 27th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming</i>, 353–67. Association for Computing Machinery, 2022. <a href=\"https://doi.org/10.1145/3503221.3508432\">https://doi.org/10.1145/3503221.3508432</a>.","ieee":"A. Postnikova, N. Koval, G. Nadiradze, and D.-A. Alistarh, “Multi-queues can be state-of-the-art priority schedulers,” in <i>Proceedings of the 27th ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming</i>, Seoul, Republic of Korea, 2022, pp. 353–367."}},{"status":"public","date_updated":"2022-05-02T07:56:35Z","ec_funded":1,"abstract":[{"text":"Subgraph detection has recently been one of the most studied problems in the CONGEST model of distributed computing. In this work, we study the distributed complexity of problems closely related to subgraph detection, mainly focusing on induced subgraph detection. The main line of this work presents lower bounds and parameterized algorithms w.r.t structural parameters of the input graph:\r\n- On general graphs, we give unconditional lower bounds for induced detection of cycles and patterns of treewidth 2 in CONGEST. Moreover, by adapting reductions from centralized parameterized complexity, we prove lower bounds in CONGEST for detecting patterns with a 4-clique, and for induced path detection conditional on the hardness of triangle detection in the congested clique.\r\n- On graphs of bounded degeneracy, we show that induced paths can be detected fast in CONGEST using techniques from parameterized algorithms, while detecting cycles and patterns of treewidth 2 is hard.\r\n- On graphs of bounded vertex cover number, we show that induced subgraph detection is easy in CONGEST for any pattern graph. More specifically, we adapt a centralized parameterized algorithm for a more general maximum common induced subgraph detection problem to the distributed setting. In addition to these induced subgraph detection results, we study various related problems in the CONGEST and congested clique models, including for multicolored versions of subgraph-detection-like problems.","lang":"eng"}],"oa_version":"Published Version","publication_status":"published","day":"01","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"alternative_title":["LIPIcs"],"has_accepted_license":"1","editor":[{"last_name":"Bramas","first_name":"Quentin","full_name":"Bramas, Quentin"},{"last_name":"Gramoli","full_name":"Gramoli, Vincent","first_name":"Vincent"},{"full_name":"Milani, Alessia","first_name":"Alessia","last_name":"Milani"}],"scopus_import":"1","project":[{"call_identifier":"H2020","grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning"}],"citation":{"apa":"Nikabadi, A., &#38; Korhonen, J. (2022). Beyond distributed subgraph detection: Induced subgraphs, multicolored problems and graph parameters. In Q. Bramas, V. Gramoli, &#38; A. Milani (Eds.), <i>25th International Conference on Principles of Distributed Systems</i> (Vol. 217). Strasbourg, France: Schloss Dagstuhl - Leibniz-Zentrum für Informatik. <a href=\"https://doi.org/10.4230/LIPIcs.OPODIS.2021.15\">https://doi.org/10.4230/LIPIcs.OPODIS.2021.15</a>","ista":"Nikabadi A, Korhonen J. 2022. Beyond distributed subgraph detection: Induced subgraphs, multicolored problems and graph parameters. 25th International Conference on Principles of Distributed Systems. OPODIS, LIPIcs, vol. 217, 15.","short":"A. Nikabadi, J. Korhonen, in:, Q. Bramas, V. Gramoli, A. Milani (Eds.), 25th International Conference on Principles of Distributed Systems, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022.","mla":"Nikabadi, Amir, and Janne Korhonen. “Beyond Distributed Subgraph Detection: Induced Subgraphs, Multicolored Problems and Graph Parameters.” <i>25th International Conference on Principles of Distributed Systems</i>, edited by Quentin Bramas et al., vol. 217, 15, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022, doi:<a href=\"https://doi.org/10.4230/LIPIcs.OPODIS.2021.15\">10.4230/LIPIcs.OPODIS.2021.15</a>.","ama":"Nikabadi A, Korhonen J. Beyond distributed subgraph detection: Induced subgraphs, multicolored problems and graph parameters. In: Bramas Q, Gramoli V, Milani A, eds. <i>25th International Conference on Principles of Distributed Systems</i>. Vol 217. Schloss Dagstuhl - Leibniz-Zentrum für Informatik; 2022. doi:<a href=\"https://doi.org/10.4230/LIPIcs.OPODIS.2021.15\">10.4230/LIPIcs.OPODIS.2021.15</a>","chicago":"Nikabadi, Amir, and Janne Korhonen. “Beyond Distributed Subgraph Detection: Induced Subgraphs, Multicolored Problems and Graph Parameters.” In <i>25th International Conference on Principles of Distributed Systems</i>, edited by Quentin Bramas, Vincent Gramoli, and Alessia Milani, Vol. 217. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022. <a href=\"https://doi.org/10.4230/LIPIcs.OPODIS.2021.15\">https://doi.org/10.4230/LIPIcs.OPODIS.2021.15</a>.","ieee":"A. Nikabadi and J. Korhonen, “Beyond distributed subgraph detection: Induced subgraphs, multicolored problems and graph parameters,” in <i>25th International Conference on Principles of Distributed Systems</i>, Strasbourg, France, 2022, vol. 217."},"title":"Beyond distributed subgraph detection: Induced subgraphs, multicolored problems and graph parameters","file":[{"success":1,"date_updated":"2022-05-02T07:53:00Z","checksum":"626551c14de5d4091573200ed0535752","date_created":"2022-05-02T07:53:00Z","file_id":"11345","file_size":790396,"file_name":"2022_LIPICs_Nikabadi.pdf","content_type":"application/pdf","relation":"main_file","access_level":"open_access","creator":"dernst"}],"publication":"25th International Conference on Principles of Distributed Systems","type":"conference","publisher":"Schloss Dagstuhl - Leibniz-Zentrum für Informatik","year":"2022","language":[{"iso":"eng"}],"doi":"10.4230/LIPIcs.OPODIS.2021.15","date_published":"2022-02-01T00:00:00Z","oa":1,"_id":"11183","author":[{"first_name":"Amir","full_name":"Nikabadi, Amir","last_name":"Nikabadi"},{"last_name":"Korhonen","id":"C5402D42-15BC-11E9-A202-CA2BE6697425","first_name":"Janne","full_name":"Korhonen, Janne"}],"intvolume":"       217","article_processing_charge":"No","ddc":["510"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","conference":{"location":"Strasbourg, France","end_date":"2021-12-15","start_date":"2021-12-13","name":"OPODIS"},"file_date_updated":"2022-05-02T07:53:00Z","department":[{"_id":"DaAl"}],"acknowledgement":"Amir Nikabadi: Supported by the LABEX MILYON (ANR-10-LABX-0070) of Université de Lyon, within the program “Investissements d’Avenir” (ANR-11-IDEX-0007) operated by the French National Research Agency (ANR). Janne H. Korhonen: Supported by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML).\r\nWe thank François Le Gall and Masayuki Miyamoto for sharing their work on lower bounds for induced subgraph detection [36].","article_number":"15","date_created":"2022-04-17T22:01:47Z","volume":217,"quality_controlled":"1","publication_identifier":{"isbn":["9783959772198"],"issn":["1868-8969"]},"month":"02"},{"project":[{"grant_number":"805223","call_identifier":"H2020","_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning"},{"grant_number":"840605","call_identifier":"H2020","_id":"26A5D39A-B435-11E9-9278-68D0E5697425","name":"Coordination in constrained and natural distributed systems"}],"citation":{"ama":"Alistarh D-A, Gelashvili R, Rybicki J. Fast graphical population protocols. In: Bramas Q, Gramoli V, Milani A, eds. <i>25th International Conference on Principles of Distributed Systems</i>. Vol 217. Schloss Dagstuhl - Leibniz-Zentrum für Informatik; 2022. doi:<a href=\"https://doi.org/10.4230/LIPIcs.OPODIS.2021.14\">10.4230/LIPIcs.OPODIS.2021.14</a>","short":"D.-A. Alistarh, R. Gelashvili, J. Rybicki, in:, Q. Bramas, V. Gramoli, A. Milani (Eds.), 25th International Conference on Principles of Distributed Systems, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022.","apa":"Alistarh, D.-A., Gelashvili, R., &#38; Rybicki, J. (2022). Fast graphical population protocols. In Q. Bramas, V. Gramoli, &#38; A. Milani (Eds.), <i>25th International Conference on Principles of Distributed Systems</i> (Vol. 217). Strasbourg, France: Schloss Dagstuhl - Leibniz-Zentrum für Informatik. <a href=\"https://doi.org/10.4230/LIPIcs.OPODIS.2021.14\">https://doi.org/10.4230/LIPIcs.OPODIS.2021.14</a>","ista":"Alistarh D-A, Gelashvili R, Rybicki J. 2022. Fast graphical population protocols. 25th International Conference on Principles of Distributed Systems. OPODIS, LIPIcs, vol. 217, 14.","mla":"Alistarh, Dan-Adrian, et al. “Fast Graphical Population Protocols.” <i>25th International Conference on Principles of Distributed Systems</i>, edited by Quentin Bramas et al., vol. 217, 14, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022, doi:<a href=\"https://doi.org/10.4230/LIPIcs.OPODIS.2021.14\">10.4230/LIPIcs.OPODIS.2021.14</a>.","ieee":"D.-A. Alistarh, R. Gelashvili, and J. Rybicki, “Fast graphical population protocols,” in <i>25th International Conference on Principles of Distributed Systems</i>, Strasbourg, France, 2022, vol. 217.","chicago":"Alistarh, Dan-Adrian, Rati Gelashvili, and Joel Rybicki. “Fast Graphical Population Protocols.” In <i>25th International Conference on Principles of Distributed Systems</i>, edited by Quentin Bramas, Vincent Gramoli, and Alessia Milani, Vol. 217. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2022. <a href=\"https://doi.org/10.4230/LIPIcs.OPODIS.2021.14\">https://doi.org/10.4230/LIPIcs.OPODIS.2021.14</a>."},"arxiv":1,"alternative_title":["LIPIcs"],"has_accepted_license":"1","editor":[{"last_name":"Bramas","first_name":"Quentin","full_name":"Bramas, Quentin"},{"full_name":"Gramoli, Vincent","first_name":"Vincent","last_name":"Gramoli"},{"full_name":"Milani, Alessia","first_name":"Alessia","last_name":"Milani"}],"scopus_import":"1","external_id":{"arxiv":["2102.08808"]},"title":"Fast graphical population protocols","file":[{"creator":"dernst","access_level":"open_access","relation":"main_file","content_type":"application/pdf","file_name":"2022_LIPICs_Alistarh.pdf","file_size":959406,"date_created":"2022-05-02T08:06:33Z","checksum":"2c7c982174c6f98c4ca6e92539d15086","file_id":"11346","date_updated":"2022-05-02T08:06:33Z","success":1}],"publication":"25th International Conference on Principles of Distributed Systems","date_updated":"2022-05-02T08:09:39Z","status":"public","ec_funded":1,"abstract":[{"lang":"eng","text":"Let G be a graph on n nodes. In the stochastic population protocol model, a collection of n indistinguishable, resource-limited nodes collectively solve tasks via pairwise interactions. In each interaction, two randomly chosen neighbors first read each other’s states, and then update their local states. A rich line of research has established tight upper and lower bounds on the complexity of fundamental tasks, such as majority and leader election, in this model, when G is a clique. Specifically, in the clique, these tasks can be solved fast, i.e., in n polylog n pairwise interactions, with high probability, using at most polylog n states per node.\r\nIn this work, we consider the more general setting where G is an arbitrary regular graph, and present a technique for simulating protocols designed for fully-connected networks in any connected regular graph. Our main result is a simulation that is efficient on many interesting graph families: roughly, the simulation overhead is polylogarithmic in the number of nodes, and quadratic in the conductance of the graph. As a sample application, we show that, in any regular graph with conductance φ, both leader election and exact majority can be solved in φ^{-2} ⋅ n polylog n pairwise interactions, with high probability, using at most φ^{-2} ⋅ polylog n states per node. This shows that there are fast and space-efficient population protocols for leader election and exact majority on graphs with good expansion properties. We believe our results will prove generally useful, as they allow efficient technology transfer between the well-mixed (clique) case, and the under-explored spatial setting."}],"publication_status":"published","oa_version":"Published Version","day":"01","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"acknowledgement":"Dan Alistarh: This project has received funding from the European Research Council (ERC)\r\nunder the European Union’s Horizon 2020 research and innovation programme (grant agreement No.805223 ScaleML).\r\nJoel Rybicki: This project has received from the European Union’s Horizon 2020 research and\r\ninnovation programme under the Marie Skłodowska-Curie grant agreement No. 840605.\r\nAcknowledgements We grateful to Giorgi Nadiradze for pointing out a generalisation of the phase clock construction to non-regular graphs. We also thank anonymous reviewers for their useful comments on earlier versions of this manuscript.","department":[{"_id":"DaAl"}],"date_created":"2022-04-17T22:01:47Z","article_number":"14","intvolume":"       217","article_processing_charge":"No","ddc":["510"],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","conference":{"name":"OPODIS","end_date":"2021-12-15","start_date":"2021-12-13","location":"Strasbourg, France"},"file_date_updated":"2022-05-02T08:06:33Z","quality_controlled":"1","publication_identifier":{"isbn":["9783959772198"],"issn":["1868-8969"]},"month":"02","volume":217,"publisher":"Schloss Dagstuhl - Leibniz-Zentrum für Informatik","year":"2022","language":[{"iso":"eng"}],"doi":"10.4230/LIPIcs.OPODIS.2021.14","type":"conference","oa":1,"_id":"11184","author":[{"orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh","first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian"},{"last_name":"Gelashvili","full_name":"Gelashvili, Rati","first_name":"Rati"},{"orcid":"0000-0002-6432-6646","last_name":"Rybicki","id":"334EFD2E-F248-11E8-B48F-1D18A9856A87","first_name":"Joel","full_name":"Rybicki, Joel"}],"date_published":"2022-02-01T00:00:00Z"},{"file_date_updated":"2022-08-16T08:05:15Z","conference":{"location":"Salerno, Italy","name":"PODC: Symposium on Principles of Distributed Computing","start_date":"2022-07-25","end_date":"2022-07-29"},"ddc":["000"],"article_processing_charge":"Yes (via OA deal)","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","department":[{"_id":"DaAl"}],"acknowledgement":"We thank the anonymous reviewers for their helpful comments. We gratefully acknowledge funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML).","date_created":"2022-08-14T22:01:46Z","publication_identifier":{"isbn":["9781450392624"]},"quality_controlled":"1","month":"07","type":"conference","year":"2022","publisher":"Association for Computing Machinery","doi":"10.1145/3519270.3538435","language":[{"iso":"eng"}],"date_published":"2022-07-21T00:00:00Z","_id":"11844","oa":1,"author":[{"first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh"},{"last_name":"Rybicki","id":"334EFD2E-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-6432-6646","full_name":"Rybicki, Joel","first_name":"Joel"},{"last_name":"Voitovych","first_name":"Sasha","full_name":"Voitovych, Sasha"}],"has_accepted_license":"1","scopus_import":"1","citation":{"ama":"Alistarh D-A, Rybicki J, Voitovych S. Near-optimal leader election in population protocols on graphs. In: <i>Proceedings of the Annual ACM Symposium on Principles of Distributed Computing</i>. Association for Computing Machinery; 2022:246-256. doi:<a href=\"https://doi.org/10.1145/3519270.3538435\">10.1145/3519270.3538435</a>","apa":"Alistarh, D.-A., Rybicki, J., &#38; Voitovych, S. (2022). Near-optimal leader election in population protocols on graphs. In <i>Proceedings of the Annual ACM Symposium on Principles of Distributed Computing</i> (pp. 246–256). Salerno, Italy: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3519270.3538435\">https://doi.org/10.1145/3519270.3538435</a>","mla":"Alistarh, Dan-Adrian, et al. “Near-Optimal Leader Election in Population Protocols on Graphs.” <i>Proceedings of the Annual ACM Symposium on Principles of Distributed Computing</i>, Association for Computing Machinery, 2022, pp. 246–56, doi:<a href=\"https://doi.org/10.1145/3519270.3538435\">10.1145/3519270.3538435</a>.","ista":"Alistarh D-A, Rybicki J, Voitovych S. 2022. Near-optimal leader election in population protocols on graphs. Proceedings of the Annual ACM Symposium on Principles of Distributed Computing. PODC: Symposium on Principles of Distributed Computing, 246–256.","short":"D.-A. Alistarh, J. Rybicki, S. Voitovych, in:, Proceedings of the Annual ACM Symposium on Principles of Distributed Computing, Association for Computing Machinery, 2022, pp. 246–256.","ieee":"D.-A. Alistarh, J. Rybicki, and S. Voitovych, “Near-optimal leader election in population protocols on graphs,” in <i>Proceedings of the Annual ACM Symposium on Principles of Distributed Computing</i>, Salerno, Italy, 2022, pp. 246–256.","chicago":"Alistarh, Dan-Adrian, Joel Rybicki, and Sasha Voitovych. “Near-Optimal Leader Election in Population Protocols on Graphs.” In <i>Proceedings of the Annual ACM Symposium on Principles of Distributed Computing</i>, 246–56. Association for Computing Machinery, 2022. <a href=\"https://doi.org/10.1145/3519270.3538435\">https://doi.org/10.1145/3519270.3538435</a>."},"arxiv":1,"project":[{"name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223","call_identifier":"H2020"}],"title":"Near-optimal leader election in population protocols on graphs","file":[{"creator":"cchlebak","content_type":"application/pdf","relation":"main_file","access_level":"open_access","file_size":1593474,"file_name":"2022_PODC_Alistarh.pdf","success":1,"date_updated":"2022-08-16T08:05:15Z","date_created":"2022-08-16T08:05:15Z","file_id":"11854","checksum":"4c6b29172b8e355b4fbc364a2e0827b2"}],"external_id":{"arxiv":["2205.12597"]},"publication":"Proceedings of the Annual ACM Symposium on Principles of Distributed Computing","status":"public","date_updated":"2023-06-14T12:06:01Z","abstract":[{"lang":"eng","text":"In the stochastic population protocol model, we are given a connected graph with n nodes, and in every time step, a scheduler samples an edge of the graph uniformly at random and the nodes connected by this edge interact. A fundamental task in this model is stable leader election, in which all nodes start in an identical state and the aim is to reach a configuration in which (1) exactly one node is elected as leader and (2) this node remains as the unique leader no matter what sequence of interactions follows. On cliques, the complexity of this problem has recently been settled: time-optimal protocols stabilize in Θ(n log n) expected steps using Θ(log log n) states, whereas protocols that use O(1) states require Θ(n2) expected steps.\r\n\r\nIn this work, we investigate the complexity of stable leader election on general graphs. We provide the first non-trivial time lower bounds for leader election on general graphs, showing that, when moving beyond cliques, the complexity landscape of leader election becomes very diverse: the time required to elect a leader can range from O(1) to Θ(n3) expected steps. On the upper bound side, we first observe that there exists a protocol that is time-optimal on many graph families, but uses polynomially-many states. In contrast, we give a near-time-optimal protocol that uses only O(log2n) states that is at most a factor log n slower. Finally, we show that the constant-state protocol of Beauquier et al. [OPODIS 2013] is at most a factor n log n slower than the fast polynomial-state protocol. Moreover, among constant-state protocols, this protocol has near-optimal average case complexity on dense random graphs."}],"ec_funded":1,"day":"21","tmp":{"legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"page":"246-256","oa_version":"Published Version","publication_status":"published"},{"project":[{"grant_number":" W1260-N35","name":"Vienna Graduate School on Computational Optimization","_id":"9B9290DE-BA93-11EA-9121-9846C619BF3A"},{"name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223","call_identifier":"H2020"}],"citation":{"ama":"Iofinova EB, Peste E-A, Kurtz M, Alistarh D-A. How well do sparse ImageNet models transfer? In: <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. Institute of Electrical and Electronics Engineers; 2022:12256-12266. doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">10.1109/cvpr52688.2022.01195</a>","ista":"Iofinova EB, Peste E-A, Kurtz M, Alistarh D-A. 2022. How well do sparse ImageNet models transfer? 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Computer Vision and Pattern Recognition, 12256–12266.","mla":"Iofinova, Eugenia B., et al. “How Well Do Sparse ImageNet Models Transfer?” <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Institute of Electrical and Electronics Engineers, 2022, pp. 12256–66, doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">10.1109/cvpr52688.2022.01195</a>.","short":"E.B. Iofinova, E.-A. Peste, M. Kurtz, D.-A. Alistarh, in:, 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Institute of Electrical and Electronics Engineers, 2022, pp. 12256–12266.","apa":"Iofinova, E. B., Peste, E.-A., Kurtz, M., &#38; Alistarh, D.-A. (2022). How well do sparse ImageNet models transfer? In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 12256–12266). New Orleans, LA, United States: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">https://doi.org/10.1109/cvpr52688.2022.01195</a>","ieee":"E. B. Iofinova, E.-A. Peste, M. Kurtz, and D.-A. Alistarh, “How well do sparse ImageNet models transfer?,” in <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, New Orleans, LA, United States, 2022, pp. 12256–12266.","chicago":"Iofinova, Eugenia B, Elena-Alexandra Peste, Mark Kurtz, and Dan-Adrian Alistarh. “How Well Do Sparse ImageNet Models Transfer?” In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 12256–66. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">https://doi.org/10.1109/cvpr52688.2022.01195</a>."},"arxiv":1,"scopus_import":"1","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2111.13445","open_access":"1"}],"external_id":{"isi":["000870759105034"],"arxiv":["2111.13445"]},"related_material":{"record":[{"status":"public","id":"13074","relation":"dissertation_contains"}]},"title":"How well do sparse ImageNet models transfer?","publication":"2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition","date_updated":"2023-08-04T10:33:28Z","status":"public","ec_funded":1,"abstract":[{"text":"Transfer learning is a classic paradigm by which models pretrained on large “upstream” datasets are adapted to yield good results on “downstream” specialized datasets. Generally, more accurate models on the “upstream” dataset tend to provide better transfer accuracy “downstream”. In this work, we perform an in-depth investigation of this phenomenon in the context of convolutional neural networks (CNNs) trained on the ImageNet dataset, which have been pruned-that is, compressed by sparsifiying their connections. We consider transfer using unstructured pruned models obtained by applying several state-of-the-art pruning methods, including magnitude-based, second-order, regrowth, lottery-ticket, and regularization approaches, in the context of twelve standard transfer tasks. In a nutshell, our study shows that sparse models can match or even outperform the transfer performance of dense models, even at high sparsities, and, while doing so, can lead to significant inference and even training speedups. At the same time, we observe and analyze significant differences in the behaviour of different pruning methods. The code is available at: https://github.com/IST-DASLab/sparse-imagenet-transfer.","lang":"eng"}],"page":"12256-12266","publication_status":"published","oa_version":"Preprint","day":"27","department":[{"_id":"DaAl"},{"_id":"ChLa"}],"acknowledgement":"he authors would like to sincerely thank Christoph Lampert and Nir Shavit for fruitful discussions during the development of this work, and Eldar Kurtic for experimental support. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35, while AP and DA acknowledge generous support by the ERC, via Starting Grant 805223 ScaleML.","date_created":"2023-01-16T10:06:00Z","article_processing_charge":"No","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","conference":{"start_date":"2022-06-18","end_date":"2022-06-24","name":"CVPR: Computer Vision and Pattern Recognition","location":"New Orleans, LA, United States"},"quality_controlled":"1","publication_identifier":{"eissn":["2575-7075"]},"month":"09","isi":1,"publisher":"Institute of Electrical and Electronics Engineers","year":"2022","language":[{"iso":"eng"}],"doi":"10.1109/cvpr52688.2022.01195","type":"conference","oa":1,"_id":"12299","author":[{"orcid":"0000-0002-7778-3221","last_name":"Iofinova","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","first_name":"Eugenia B","full_name":"Iofinova, Eugenia B"},{"id":"32D78294-F248-11E8-B48F-1D18A9856A87","last_name":"Peste","full_name":"Peste, Elena-Alexandra","first_name":"Elena-Alexandra"},{"last_name":"Kurtz","first_name":"Mark","full_name":"Kurtz, Mark"},{"full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","last_name":"Alistarh","orcid":"0000-0003-3650-940X"}],"date_published":"2022-09-27T00:00:00Z"},{"citation":{"ama":"Foerster K-T, Korhonen J, Paz A, Rybicki J, Schmid S. Input-dynamic distributed algorithms for communication networks. In: <i>Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems</i>. Association for Computing Machinery; 2021:71-72. doi:<a href=\"https://doi.org/10.1145/3410220.3453923\">10.1145/3410220.3453923</a>","apa":"Foerster, K.-T., Korhonen, J., Paz, A., Rybicki, J., &#38; Schmid, S. (2021). Input-dynamic distributed algorithms for communication networks. In <i>Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems</i> (pp. 71–72). Virtual, Online: Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3410220.3453923\">https://doi.org/10.1145/3410220.3453923</a>","mla":"Foerster, Klaus-Tycho, et al. “Input-Dynamic Distributed Algorithms for Communication Networks.” <i>Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems</i>, Association for Computing Machinery, 2021, pp. 71–72, doi:<a href=\"https://doi.org/10.1145/3410220.3453923\">10.1145/3410220.3453923</a>.","short":"K.-T. Foerster, J. Korhonen, A. Paz, J. Rybicki, S. Schmid, in:, Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems, Association for Computing Machinery, 2021, pp. 71–72.","ista":"Foerster K-T, Korhonen J, Paz A, Rybicki J, Schmid S. 2021. Input-dynamic distributed algorithms for communication networks. Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems. SIGMETRICS: International Conference on Measurement and Modeling of Computer Systems, 71–72.","ieee":"K.-T. Foerster, J. Korhonen, A. Paz, J. Rybicki, and S. Schmid, “Input-dynamic distributed algorithms for communication networks,” in <i>Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems</i>, Virtual, Online, 2021, pp. 71–72.","chicago":"Foerster, Klaus-Tycho, Janne Korhonen, Ami Paz, Joel Rybicki, and Stefan Schmid. “Input-Dynamic Distributed Algorithms for Communication Networks.” In <i>Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems</i>, 71–72. Association for Computing Machinery, 2021. <a href=\"https://doi.org/10.1145/3410220.3453923\">https://doi.org/10.1145/3410220.3453923</a>."},"arxiv":1,"project":[{"call_identifier":"H2020","grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning"},{"call_identifier":"H2020","grant_number":"840605","name":"Coordination in constrained and natural distributed systems","_id":"26A5D39A-B435-11E9-9278-68D0E5697425"}],"scopus_import":"1","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2005.07637"}],"publication":"Abstract Proceedings of the 2021 ACM SIGMETRICS / International Conference on Measurement and Modeling of Computer Systems","title":"Input-dynamic distributed algorithms for communication networks","related_material":{"record":[{"relation":"extended_version","id":"10855","status":"public"}]},"external_id":{"arxiv":["2005.07637"]},"date_updated":"2023-09-26T10:40:55Z","status":"public","day":"01","publication_status":"published","oa_version":"Preprint","page":"71-72","abstract":[{"lang":"eng","text":"Consider a distributed task where the communication network is fixed but the local inputs given to the nodes of the distributed system may change over time. In this work, we explore the following question: if some of the local inputs change, can an existing solution be updated efficiently, in a dynamic and distributed manner?\r\nTo address this question, we define the batch dynamic CONGEST model in which we are given a bandwidth-limited communication network and a dynamic edge labelling defines the problem input. The task is to maintain a solution to a graph problem on the labelled graph under batch changes. We investigate, when a batch of alpha edge label changes arrive, - how much time as a function of alpha we need to update an existing solution, and - how much information the nodes have to keep in local memory between batches in order to update the solution quickly.\r\nOur work lays the foundations for the theory of input-dynamic distributed network algorithms. We give a general picture of the complexity landscape in this model, design both universal algorithms and algorithms for concrete problems, and present a general framework for lower bounds. The diverse time complexity of our model spans from constant time, through time polynomial in alpha, and to alpha time, which we show to be enough for any task."}],"ec_funded":1,"date_created":"2022-03-18T08:48:41Z","department":[{"_id":"DaAl"}],"acknowledgement":"We thank Jukka Suomela for discussions. We also thank our shepherd Mohammad Hajiesmaili and the reviewers for their time and suggestions on how to improve the paper. This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML), from the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska–Curie grant agreement No. 840605, from the Vienna Science and Technology Fund (WWTF) project WHATIF, ICT19-045, 2020-2024, and from the Austrian Science Fund (FWF) and netIDEE SCIENCE project P 33775-N.","conference":{"location":"Virtual, Online","name":"SIGMETRICS: International Conference on Measurement and Modeling of Computer Systems","end_date":"2021-06-18","start_date":"2021-06-14"},"article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","month":"05","publication_identifier":{"isbn":["9781450380720"]},"quality_controlled":"1","doi":"10.1145/3410220.3453923","language":[{"iso":"eng"}],"year":"2021","publisher":"Association for Computing Machinery","type":"conference","author":[{"last_name":"Foerster","first_name":"Klaus-Tycho","full_name":"Foerster, Klaus-Tycho"},{"full_name":"Korhonen, Janne","first_name":"Janne","id":"C5402D42-15BC-11E9-A202-CA2BE6697425","last_name":"Korhonen"},{"last_name":"Paz","first_name":"Ami","full_name":"Paz, Ami"},{"first_name":"Joel","full_name":"Rybicki, Joel","last_name":"Rybicki","id":"334EFD2E-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-6432-6646"},{"first_name":"Stefan","full_name":"Schmid, Stefan","last_name":"Schmid"}],"_id":"10854","oa":1,"date_published":"2021-05-01T00:00:00Z"},{"article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","keyword":["Computer Networks and Communications","Hardware and Architecture","Safety","Risk","Reliability and Quality","Computer Science (miscellaneous)"],"intvolume":"         5","date_created":"2022-03-18T09:10:27Z","department":[{"_id":"DaAl"}],"acknowledgement":"We thank Jukka Suomela for discussions. We also thank our shepherd Mohammad Hajiesmaili\r\nand the reviewers for their time and suggestions on how to improve the paper. This project\r\nhas received funding from the European Research Council (ERC) under the European Union’s\r\nHorizon 2020 research and innovation programme (grant agreement No 805223 ScaleML), from the European Union’s Horizon 2020 research and innovation programme under the Marie\r\nSk lodowska–Curie grant agreement No. 840605, from the Vienna Science and Technology Fund (WWTF) project WHATIF, ICT19-045, 2020-2024, and from the Austrian Science Fund (FWF) and netIDEE SCIENCE project P 33775-N.","article_type":"original","volume":5,"month":"03","quality_controlled":"1","publication_identifier":{"issn":["2476-1249"]},"type":"journal_article","language":[{"iso":"eng"}],"doi":"10.1145/3447384","publisher":"Association for Computing Machinery","year":"2021","date_published":"2021-03-01T00:00:00Z","author":[{"last_name":"Foerster","full_name":"Foerster, Klaus-Tycho","first_name":"Klaus-Tycho"},{"first_name":"Janne","full_name":"Korhonen, Janne","id":"C5402D42-15BC-11E9-A202-CA2BE6697425","last_name":"Korhonen"},{"last_name":"Paz","first_name":"Ami","full_name":"Paz, Ami"},{"full_name":"Rybicki, Joel","first_name":"Joel","id":"334EFD2E-F248-11E8-B48F-1D18A9856A87","last_name":"Rybicki","orcid":"0000-0002-6432-6646"},{"last_name":"Schmid","full_name":"Schmid, Stefan","first_name":"Stefan"}],"oa":1,"_id":"10855","issue":"1","scopus_import":"1","project":[{"name":"Coordination in constrained and natural distributed systems","_id":"26A5D39A-B435-11E9-9278-68D0E5697425","grant_number":"840605","call_identifier":"H2020"},{"grant_number":"805223","call_identifier":"H2020","name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425"}],"citation":{"ieee":"K.-T. Foerster, J. Korhonen, A. Paz, J. Rybicki, and S. Schmid, “Input-dynamic distributed algorithms for communication networks,” <i>Proceedings of the ACM on Measurement and Analysis of Computing Systems</i>, vol. 5, no. 1. Association for Computing Machinery, pp. 1–33, 2021.","chicago":"Foerster, Klaus-Tycho, Janne Korhonen, Ami Paz, Joel Rybicki, and Stefan Schmid. “Input-Dynamic Distributed Algorithms for Communication Networks.” <i>Proceedings of the ACM on Measurement and Analysis of Computing Systems</i>. Association for Computing Machinery, 2021. <a href=\"https://doi.org/10.1145/3447384\">https://doi.org/10.1145/3447384</a>.","ama":"Foerster K-T, Korhonen J, Paz A, Rybicki J, Schmid S. Input-dynamic distributed algorithms for communication networks. <i>Proceedings of the ACM on Measurement and Analysis of Computing Systems</i>. 2021;5(1):1-33. doi:<a href=\"https://doi.org/10.1145/3447384\">10.1145/3447384</a>","short":"K.-T. Foerster, J. Korhonen, A. Paz, J. Rybicki, S. Schmid, Proceedings of the ACM on Measurement and Analysis of Computing Systems 5 (2021) 1–33.","ista":"Foerster K-T, Korhonen J, Paz A, Rybicki J, Schmid S. 2021. Input-dynamic distributed algorithms for communication networks. Proceedings of the ACM on Measurement and Analysis of Computing Systems. 5(1), 1–33.","mla":"Foerster, Klaus-Tycho, et al. “Input-Dynamic Distributed Algorithms for Communication Networks.” <i>Proceedings of the ACM on Measurement and Analysis of Computing Systems</i>, vol. 5, no. 1, Association for Computing Machinery, 2021, pp. 1–33, doi:<a href=\"https://doi.org/10.1145/3447384\">10.1145/3447384</a>.","apa":"Foerster, K.-T., Korhonen, J., Paz, A., Rybicki, J., &#38; Schmid, S. (2021). Input-dynamic distributed algorithms for communication networks. <i>Proceedings of the ACM on Measurement and Analysis of Computing Systems</i>. Association for Computing Machinery. <a href=\"https://doi.org/10.1145/3447384\">https://doi.org/10.1145/3447384</a>"},"arxiv":1,"publication":"Proceedings of the ACM on Measurement and Analysis of Computing Systems","related_material":{"record":[{"relation":"shorter_version","id":"10854","status":"public"}]},"external_id":{"arxiv":["2005.07637"]},"title":"Input-dynamic distributed algorithms for communication networks","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2005.07637"}],"status":"public","date_updated":"2023-09-26T10:40:55Z","oa_version":"Preprint","publication_status":"published","page":"1-33","day":"01","ec_funded":1,"abstract":[{"text":"Consider a distributed task where the communication network is fixed but the local inputs given to the nodes of the distributed system may change over time. In this work, we explore the following question: if some of the local inputs change, can an existing solution be updated efficiently, in a dynamic and distributed manner? To address this question, we define the batch dynamic \\congest model in which we are given a bandwidth-limited communication network and a dynamic edge labelling defines the problem input. The task is to maintain a solution to a graph problem on the labeled graph under batch changes. We investigate, when a batch of α edge label changes arrive, \\beginitemize \\item how much time as a function of α we need to update an existing solution, and \\item how much information the nodes have to keep in local memory between batches in order to update the solution quickly. \\enditemize Our work lays the foundations for the theory of input-dynamic distributed network algorithms. We give a general picture of the complexity landscape in this model, design both universal algorithms and algorithms for concrete problems, and present a general framework for lower bounds. In particular, we derive non-trivial upper bounds for two selected, contrasting problems: maintaining a minimum spanning tree and detecting cliques.","lang":"eng"}]},{"conference":{"location":"Virtual, Online","end_date":"2021-02-09","start_date":"2021-02-02","name":"AAAI: Conference on Artificial Intelligence"},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","intvolume":"        35","date_created":"2022-06-05T22:01:52Z","acknowledgement":"Vyacheslav Kungurtsev was supported by the OP VVV project CZ.02.1.01/0.0/0.0/16 019/0000765 “Research Center for Informatics. Bapi Chatterjee was supported by the European Union’s Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No. 754411 (ISTPlus). Dan Alistarh has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML).","department":[{"_id":"DaAl"}],"volume":35,"month":"05","publication_identifier":{"isbn":["9781713835974"],"issn":["2159-5399"],"eissn":["2374-3468"]},"quality_controlled":"1","type":"conference","language":[{"iso":"eng"}],"year":"2021","publisher":"AAAI Press","date_published":"2021-05-18T00:00:00Z","author":[{"full_name":"Kungurtsev, Vyacheslav","first_name":"Vyacheslav","last_name":"Kungurtsev"},{"last_name":"Egan","full_name":"Egan, Malcolm","first_name":"Malcolm"},{"full_name":"Chatterjee, Bapi","first_name":"Bapi","last_name":"Chatterjee","id":"3C41A08A-F248-11E8-B48F-1D18A9856A87"},{"orcid":"0000-0003-3650-940X","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian"}],"_id":"11436","oa":1,"scopus_import":"1","issue":"9B","arxiv":1,"citation":{"ama":"Kungurtsev V, Egan M, Chatterjee B, Alistarh D-A. Asynchronous optimization methods for efficient training of deep neural networks with guarantees. In: <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>. Vol 35. AAAI Press; 2021:8209-8216.","mla":"Kungurtsev, Vyacheslav, et al. “Asynchronous Optimization Methods for Efficient Training of Deep Neural Networks with Guarantees.” <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>, vol. 35, no. 9B, AAAI Press, 2021, pp. 8209–16.","apa":"Kungurtsev, V., Egan, M., Chatterjee, B., &#38; Alistarh, D.-A. (2021). Asynchronous optimization methods for efficient training of deep neural networks with guarantees. In <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i> (Vol. 35, pp. 8209–8216). Virtual, Online: AAAI Press.","short":"V. Kungurtsev, M. Egan, B. Chatterjee, D.-A. Alistarh, in:, 35th AAAI Conference on Artificial Intelligence, AAAI 2021, AAAI Press, 2021, pp. 8209–8216.","ista":"Kungurtsev V, Egan M, Chatterjee B, Alistarh D-A. 2021. Asynchronous optimization methods for efficient training of deep neural networks with guarantees. 35th AAAI Conference on Artificial Intelligence, AAAI 2021. AAAI: Conference on Artificial Intelligence vol. 35, 8209–8216.","ieee":"V. Kungurtsev, M. Egan, B. Chatterjee, and D.-A. Alistarh, “Asynchronous optimization methods for efficient training of deep neural networks with guarantees,” in <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>, Virtual, Online, 2021, vol. 35, no. 9B, pp. 8209–8216.","chicago":"Kungurtsev, Vyacheslav, Malcolm Egan, Bapi Chatterjee, and Dan-Adrian Alistarh. “Asynchronous Optimization Methods for Efficient Training of Deep Neural Networks with Guarantees.” In <i>35th AAAI Conference on Artificial Intelligence, AAAI 2021</i>, 35:8209–16. AAAI Press, 2021."},"project":[{"call_identifier":"H2020","grant_number":"754411","name":"ISTplus - Postdoctoral Fellowships","_id":"260C2330-B435-11E9-9278-68D0E5697425"},{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning","call_identifier":"H2020","grant_number":"805223"}],"publication":"35th AAAI Conference on Artificial Intelligence, AAAI 2021","title":"Asynchronous optimization methods for efficient training of deep neural networks with guarantees","external_id":{"arxiv":["1905.11845"]},"main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.1905.11845"}],"status":"public","date_updated":"2022-06-07T06:53:36Z","day":"18","oa_version":"Preprint","page":"8209-8216","publication_status":"published","abstract":[{"lang":"eng","text":"Asynchronous distributed algorithms are a popular way to reduce synchronization costs in large-scale optimization, and in particular for neural network training. However, for nonsmooth and nonconvex objectives, few convergence guarantees exist beyond cases where closed-form proximal operator solutions are available. As training most popular deep neural networks corresponds to optimizing nonsmooth and nonconvex objectives, there is a pressing need for such convergence guarantees. In this paper, we analyze for the first time the convergence of stochastic asynchronous optimization for this general class of objectives. In particular, we focus on stochastic subgradient methods allowing for block variable partitioning, where the shared model is asynchronously updated by concurrent processes. To this end, we use a probabilistic model which captures key features of real asynchronous scheduling between concurrent processes. Under this model, we establish convergence with probability one to an invariant set for stochastic subgradient methods with momentum. From a practical perspective, one issue with the family of algorithms that we consider is that they are not efficiently supported by machine learning frameworks, which mostly focus on distributed data-parallel strategies. To address this, we propose a new implementation strategy for shared-memory based training of deep neural networks for a partitioned but shared model in single- and multi-GPU settings. Based on this implementation, we achieve on average1.2x speed-up in comparison to state-of-the-art training methods for popular image classification tasks, without compromising accuracy."}],"ec_funded":1},{"status":"public","date_updated":"2022-06-20T08:31:52Z","abstract":[{"lang":"eng","text":"We study efficient distributed algorithms for the fundamental problem of principal component analysis and leading eigenvector computation on the sphere, when the data are randomly distributed among a set of computational nodes. We propose a new quantized variant of Riemannian gradient descent to solve this problem, and prove that the algorithm converges with high probability under a set of necessary spherical-convexity properties. We give bounds on the number of bits transmitted by the algorithm under common initialization schemes, and investigate the dependency on the problem dimension in each case."}],"ec_funded":1,"day":"01","oa_version":"Published Version","page":"2823-2834","publication_status":"published","scopus_import":"1","citation":{"chicago":"Alimisis, Foivos, Peter Davies, Bart Vandereycken, and Dan-Adrian Alistarh. “Distributed Principal Component Analysis with Limited Communication.” In <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>, 4:2823–34. Neural Information Processing Systems Foundation, 2021.","ieee":"F. Alimisis, P. Davies, B. Vandereycken, and D.-A. Alistarh, “Distributed principal component analysis with limited communication,” in <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>, Virtual, Online, 2021, vol. 4, pp. 2823–2834.","short":"F. Alimisis, P. Davies, B. Vandereycken, D.-A. Alistarh, in:, Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2021, pp. 2823–2834.","apa":"Alimisis, F., Davies, P., Vandereycken, B., &#38; Alistarh, D.-A. (2021). Distributed principal component analysis with limited communication. In <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i> (Vol. 4, pp. 2823–2834). Virtual, Online: Neural Information Processing Systems Foundation.","ista":"Alimisis F, Davies P, Vandereycken B, Alistarh D-A. 2021. Distributed principal component analysis with limited communication. Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 4, 2823–2834.","mla":"Alimisis, Foivos, et al. “Distributed Principal Component Analysis with Limited Communication.” <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>, vol. 4, Neural Information Processing Systems Foundation, 2021, pp. 2823–34.","ama":"Alimisis F, Davies P, Vandereycken B, Alistarh D-A. Distributed principal component analysis with limited communication. In: <i>Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems</i>. Vol 4. Neural Information Processing Systems Foundation; 2021:2823-2834."},"arxiv":1,"project":[{"name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"805223"},{"grant_number":"754411","call_identifier":"H2020","name":"ISTplus - Postdoctoral Fellowships","_id":"260C2330-B435-11E9-9278-68D0E5697425"}],"title":"Distributed principal component analysis with limited communication","external_id":{"arxiv":["2110.14391"]},"publication":"Advances in Neural Information Processing Systems - 35th Conference on Neural Information Processing Systems","main_file_link":[{"open_access":"1","url":"https://proceedings.neurips.cc/paper/2021/file/1680e9fa7b4dd5d62ece800239bb53bd-Paper.pdf"}],"type":"conference","year":"2021","publisher":"Neural Information Processing Systems Foundation","language":[{"iso":"eng"}],"date_published":"2021-12-01T00:00:00Z","_id":"11452","oa":1,"author":[{"last_name":"Alimisis","full_name":"Alimisis, Foivos","first_name":"Foivos"},{"full_name":"Davies, Peter","first_name":"Peter","orcid":"0000-0002-5646-9524","last_name":"Davies","id":"11396234-BB50-11E9-B24C-90FCE5697425"},{"full_name":"Vandereycken, Bart","first_name":"Bart","last_name":"Vandereycken"},{"first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian","orcid":"0000-0003-3650-940X","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"}],"intvolume":"         4","conference":{"location":"Virtual, Online","end_date":"2021-12-14","start_date":"2021-12-06","name":"NeurIPS: Neural Information Processing Systems"},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","department":[{"_id":"DaAl"}],"acknowledgement":"We would like to thank the anonymous reviewers for helpful comments and suggestions. We also thank Aurelien Lucchi and Antonio Orvieto for fruitful discussions at an early stage of this work. FA is partially supported by the SNSF under research project No. 192363 and conducted part of this work while at IST Austria under the European Union’s Horizon 2020 research and innovation programme (grant agreement No. 805223 ScaleML). PD partly conducted this work while at IST Austria and was supported by the European Union’s Horizon 2020 programme under the Marie Skłodowska-Curie grant agreement No. 754411.","date_created":"2022-06-19T22:01:58Z","volume":4,"publication_identifier":{"issn":["1049-5258"],"isbn":["9781713845393"]},"quality_controlled":"1","month":"12"},{"quality_controlled":"1","publication_identifier":{"issn":["1049-5258"],"isbn":["9781713845393"]},"month":"12","volume":34,"department":[{"_id":"GradSch"},{"_id":"DaAl"}],"acknowledgement":"This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML), and a CNRS PEPS grant. This research was supported by the Scientific Service Units (SSU) of IST Austria through resources provided by Scientific Computing (SciComp). We would also like to thank Christoph Lampert for his feedback on an earlier version of this work, as well as for providing hardware for the Transformer-XL experiments.","date_created":"2022-06-20T12:11:53Z","intvolume":"        34","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","conference":{"location":"Virtual, Online","end_date":"2021-12-14","start_date":"2021-12-06","name":"NeurIPS: Neural Information Processing Systems"},"oa":1,"_id":"11458","author":[{"last_name":"Peste","id":"32D78294-F248-11E8-B48F-1D18A9856A87","full_name":"Peste, Elena-Alexandra","first_name":"Elena-Alexandra"},{"id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","last_name":"Iofinova","orcid":"0000-0002-7778-3221","first_name":"Eugenia B","full_name":"Iofinova, Eugenia B"},{"first_name":"Adrian","full_name":"Vladu, Adrian","last_name":"Vladu"},{"first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X"}],"date_published":"2021-12-06T00:00:00Z","publisher":"Curran Associates","year":"2021","language":[{"iso":"eng"}],"type":"conference","main_file_link":[{"open_access":"1","url":"https://proceedings.neurips.cc/paper/2021/file/48000647b315f6f00f913caa757a70b3-Paper.pdf"}],"external_id":{"arxiv":["2106.12379"]},"related_material":{"record":[{"relation":"dissertation_contains","id":"13074","status":"public"}]},"title":"AC/DC: Alternating Compressed/DeCompressed training of deep neural networks","publication":"35th Conference on Neural Information Processing Systems","project":[{"grant_number":"805223","call_identifier":"H2020","_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning"}],"arxiv":1,"citation":{"ama":"Peste E-A, Iofinova EB, Vladu A, Alistarh D-A. AC/DC: Alternating Compressed/DeCompressed training of deep neural networks. In: <i>35th Conference on Neural Information Processing Systems</i>. Vol 34. Curran Associates; 2021:8557-8570.","mla":"Peste, Elena-Alexandra, et al. “AC/DC: Alternating Compressed/DeCompressed Training of Deep Neural Networks.” <i>35th Conference on Neural Information Processing Systems</i>, vol. 34, Curran Associates, 2021, pp. 8557–70.","apa":"Peste, E.-A., Iofinova, E. B., Vladu, A., &#38; Alistarh, D.-A. (2021). AC/DC: Alternating Compressed/DeCompressed training of deep neural networks. In <i>35th Conference on Neural Information Processing Systems</i> (Vol. 34, pp. 8557–8570). Virtual, Online: Curran Associates.","short":"E.-A. Peste, E.B. Iofinova, A. Vladu, D.-A. Alistarh, in:, 35th Conference on Neural Information Processing Systems, Curran Associates, 2021, pp. 8557–8570.","ista":"Peste E-A, Iofinova EB, Vladu A, Alistarh D-A. 2021. AC/DC: Alternating Compressed/DeCompressed training of deep neural networks. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 34, 8557–8570.","ieee":"E.-A. Peste, E. B. Iofinova, A. Vladu, and D.-A. Alistarh, “AC/DC: Alternating Compressed/DeCompressed training of deep neural networks,” in <i>35th Conference on Neural Information Processing Systems</i>, Virtual, Online, 2021, vol. 34, pp. 8557–8570.","chicago":"Peste, Elena-Alexandra, Eugenia B Iofinova, Adrian Vladu, and Dan-Adrian Alistarh. “AC/DC: Alternating Compressed/DeCompressed Training of Deep Neural Networks.” In <i>35th Conference on Neural Information Processing Systems</i>, 34:8557–70. Curran Associates, 2021."},"scopus_import":"1","ec_funded":1,"abstract":[{"text":"The increasing computational requirements of deep neural networks (DNNs) have led to significant interest in obtaining DNN models that are sparse, yet accurate. Recent work has investigated the even harder case of sparse training, where the DNN weights are, for as much as possible, already sparse to reduce computational costs during training. Existing sparse training methods are often empirical and can have lower accuracy relative to the dense baseline. In this paper, we present a general approach called Alternating Compressed/DeCompressed (AC/DC) training of DNNs, demonstrate convergence for a variant of the algorithm, and show that AC/DC outperforms existing sparse training methods in accuracy at similar computational budgets; at high sparsity levels, AC/DC even outperforms existing methods that rely on accurate pre-trained dense models. An important property of AC/DC is that it allows co-training of dense and sparse models, yielding accurate sparse–dense model pairs at the end of the training process. This is useful in practice, where compressed variants may be desirable for deployment in resource-constrained settings without re-doing the entire training flow, and also provides us with insights into the accuracy gap between dense and compressed models. The code is available at: https://github.com/IST-DASLab/ACDC.","lang":"eng"}],"oa_version":"Published Version","publication_status":"published","page":"8557-8570","day":"6","date_updated":"2023-06-01T12:54:45Z","acknowledged_ssus":[{"_id":"ScienComp"}],"status":"public"},{"volume":34,"month":"12","publication_identifier":{"isbn":["9781713845393"],"issn":["1049-5258"]},"quality_controlled":"1","conference":{"name":"NeurIPS: Neural Information Processing Systems","end_date":"2021-12-14","start_date":"2021-12-06","location":"Virtual, Online"},"article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","intvolume":"        34","date_created":"2022-06-26T22:01:35Z","department":[{"_id":"DaAl"}],"acknowledgement":"We gratefully acknowledge funding the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML), as well as computational support from Amazon Web Services (AWS) EC2.","date_published":"2021-12-06T00:00:00Z","author":[{"last_name":"Frantar","id":"09a8f98d-ec99-11ea-ae11-c063a7b7fe5f","full_name":"Frantar, Elias","first_name":"Elias"},{"full_name":"Kurtic, Eldar","first_name":"Eldar","last_name":"Kurtic","id":"47beb3a5-07b5-11eb-9b87-b108ec578218"},{"first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X"}],"_id":"11463","oa":1,"type":"conference","language":[{"iso":"eng"}],"year":"2021","publisher":"Curran Associates","publication":"35th Conference on Neural Information Processing Systems","title":"M-FAC: Efficient matrix-free approximations of second-order information","external_id":{"arxiv":["2010.08222"]},"main_file_link":[{"open_access":"1","url":"https://proceedings.neurips.cc/paper/2021/file/7cfd5df443b4eb0d69886a583b33de4c-Paper.pdf"}],"scopus_import":"1","arxiv":1,"citation":{"ieee":"E. Frantar, E. Kurtic, and D.-A. Alistarh, “M-FAC: Efficient matrix-free approximations of second-order information,” in <i>35th Conference on Neural Information Processing Systems</i>, Virtual, Online, 2021, vol. 34, pp. 14873–14886.","chicago":"Frantar, Elias, Eldar Kurtic, and Dan-Adrian Alistarh. “M-FAC: Efficient Matrix-Free Approximations of Second-Order Information.” In <i>35th Conference on Neural Information Processing Systems</i>, 34:14873–86. Curran Associates, 2021.","ama":"Frantar E, Kurtic E, Alistarh D-A. M-FAC: Efficient matrix-free approximations of second-order information. In: <i>35th Conference on Neural Information Processing Systems</i>. Vol 34. Curran Associates; 2021:14873-14886.","mla":"Frantar, Elias, et al. “M-FAC: Efficient Matrix-Free Approximations of Second-Order Information.” <i>35th Conference on Neural Information Processing Systems</i>, vol. 34, Curran Associates, 2021, pp. 14873–86.","apa":"Frantar, E., Kurtic, E., &#38; Alistarh, D.-A. (2021). M-FAC: Efficient matrix-free approximations of second-order information. In <i>35th Conference on Neural Information Processing Systems</i> (Vol. 34, pp. 14873–14886). Virtual, Online: Curran Associates.","ista":"Frantar E, Kurtic E, Alistarh D-A. 2021. M-FAC: Efficient matrix-free approximations of second-order information. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 34, 14873–14886.","short":"E. Frantar, E. Kurtic, D.-A. Alistarh, in:, 35th Conference on Neural Information Processing Systems, Curran Associates, 2021, pp. 14873–14886."},"project":[{"name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425","grant_number":"805223","call_identifier":"H2020"}],"day":"06","publication_status":"published","oa_version":"Published Version","page":"14873-14886","abstract":[{"text":"Efficiently approximating local curvature information of the loss function is a key tool for optimization and compression of deep neural networks. Yet, most existing methods to approximate second-order information have high computational\r\nor storage costs, which limits their practicality. In this work, we investigate matrix-free, linear-time approaches for estimating Inverse-Hessian Vector Products (IHVPs) for the case when the Hessian can be approximated as a sum of rank-one matrices, as in the classic approximation of the Hessian by the empirical Fisher matrix. We propose two new algorithms: the first is tailored towards network compression and can compute the IHVP for dimension d, if the Hessian is given as a sum of m rank-one matrices, using O(dm2) precomputation, O(dm) cost for computing the IHVP, and query cost O(m) for any single element of the inverse Hessian. The second algorithm targets an optimization setting, where we wish to compute the product between the inverse Hessian, estimated over a sliding window of optimization steps, and a given gradient direction, as required for preconditioned SGD. We give an algorithm with cost O(dm + m2) for computing the IHVP and O(dm + m3) for adding or removing any gradient from the sliding window. These\r\ntwo algorithms yield state-of-the-art results for network pruning and optimization with lower computational overhead relative to existing second-order methods. Implementations are available at [9] and [17].","lang":"eng"}],"ec_funded":1,"status":"public","date_updated":"2022-06-27T07:05:12Z"},{"status":"public","date_updated":"2022-06-27T06:54:31Z","page":"7254-7266","publication_status":"published","oa_version":"Published Version","day":"06","ec_funded":1,"abstract":[{"text":"We consider a standard distributed optimisation setting where N machines, each holding a d-dimensional function\r\nfi, aim to jointly minimise the sum of the functions ∑Ni=1fi(x). This problem arises naturally in large-scale distributed optimisation, where a standard solution is to apply variants of (stochastic) gradient descent. We focus on the communication complexity of this problem: our main result provides the first fully unconditional bounds on total number of bits which need to be sent and received by the N machines to solve this problem under point-to-point communication, within a given error-tolerance. Specifically, we show that Ω(Ndlogd/Nε) total bits need to be communicated between the machines to find an additive ϵ-approximation to the minimum of ∑Ni=1fi(x). The result holds for both deterministic and randomised algorithms, and, importantly, requires no assumptions on the algorithm structure. The lower bound is tight under certain restrictions on parameter values, and is matched within constant factors for quadratic objectives by a new variant of quantised gradient descent, which we describe and analyse. Our results bring over tools from communication complexity to distributed optimisation, which has potential for further applications.","lang":"eng"}],"scopus_import":"1","project":[{"call_identifier":"H2020","grant_number":"805223","_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning"}],"citation":{"ama":"Alistarh D-A, Korhonen J. Towards tight communication lower bounds for distributed optimisation. In: <i>35th Conference on Neural Information Processing Systems</i>. Vol 34. Curran Associates; 2021:7254-7266.","ista":"Alistarh D-A, Korhonen J. 2021. Towards tight communication lower bounds for distributed optimisation. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 34, 7254–7266.","apa":"Alistarh, D.-A., &#38; Korhonen, J. (2021). Towards tight communication lower bounds for distributed optimisation. In <i>35th Conference on Neural Information Processing Systems</i> (Vol. 34, pp. 7254–7266). Virtual, Online: Curran Associates.","short":"D.-A. Alistarh, J. Korhonen, in:, 35th Conference on Neural Information Processing Systems, Curran Associates, 2021, pp. 7254–7266.","mla":"Alistarh, Dan-Adrian, and Janne Korhonen. “Towards Tight Communication Lower Bounds for Distributed Optimisation.” <i>35th Conference on Neural Information Processing Systems</i>, vol. 34, Curran Associates, 2021, pp. 7254–66.","ieee":"D.-A. Alistarh and J. Korhonen, “Towards tight communication lower bounds for distributed optimisation,” in <i>35th Conference on Neural Information Processing Systems</i>, Virtual, Online, 2021, vol. 34, pp. 7254–7266.","chicago":"Alistarh, Dan-Adrian, and Janne Korhonen. “Towards Tight Communication Lower Bounds for Distributed Optimisation.” In <i>35th Conference on Neural Information Processing Systems</i>, 34:7254–66. Curran Associates, 2021."},"arxiv":1,"publication":"35th Conference on Neural Information Processing Systems","external_id":{"arxiv":["2010.08222"]},"title":"Towards tight communication lower bounds for distributed optimisation","main_file_link":[{"open_access":"1","url":"https://proceedings.neurips.cc/paper/2021/file/3b92d18aa7a6176dd37d372bc2f1eb71-Paper.pdf"}],"type":"conference","language":[{"iso":"eng"}],"publisher":"Curran Associates","year":"2021","date_published":"2021-12-06T00:00:00Z","author":[{"last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian","full_name":"Alistarh, Dan-Adrian"},{"id":"C5402D42-15BC-11E9-A202-CA2BE6697425","last_name":"Korhonen","first_name":"Janne","full_name":"Korhonen, Janne"}],"oa":1,"_id":"11464","article_processing_charge":"No","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","conference":{"name":"NeurIPS: Neural Information Processing Systems","end_date":"2021-12-14","start_date":"2021-12-06","location":"Virtual, Online"},"intvolume":"        34","date_created":"2022-06-26T22:01:35Z","acknowledgement":"We thank the NeurIPS reviewers for insightful comments that helped us improve the positioning of our results, as well as for pointing out the subsampling approach for complementing the randomised lower bound. We also thank Foivos Alimisis and Peter Davies for useful discussions. This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML).","department":[{"_id":"DaAl"}],"volume":34,"month":"12","quality_controlled":"1","publication_identifier":{"isbn":["9781713845393"],"issn":["1049-5258"]}}]
