[{"year":"2023","external_id":{"arxiv":["2302.04852"]},"publication":"Proceedings of the 40th International Conference on Machine Learning","status":"public","project":[{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","name":"Elastic Coordination for Scalable Machine Learning","grant_number":"805223","call_identifier":"H2020"}],"ec_funded":1,"conference":{"location":"Honolulu, Hawaii, HI, United States","name":"ICML: International Conference on Machine Learning","start_date":"2023-07-23","end_date":"2023-07-29"},"date_published":"2023-07-30T00:00:00Z","acknowledgement":"We would like to thank Elias Frantar for his valuable assistance and support at the outset of this project, and the anonymous ICML and SNN reviewers for very constructive feedback. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35. DA acknowledges generous ERC support, via Starting Grant 805223 ScaleML. ","article_processing_charge":"No","alternative_title":["PMLR"],"publisher":"ML Research Press","_id":"14460","date_updated":"2023-10-31T09:33:51Z","type":"conference","page":"26215-26227","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2302.04852","open_access":"1"}],"quality_controlled":"1","arxiv":1,"month":"07","department":[{"_id":"DaAl"}],"oa":1,"language":[{"iso":"eng"}],"citation":{"ama":"Nikdan M, Pegolotti T, Iofinova EB, Kurtic E, Alistarh D-A. SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. In: <i>Proceedings of the 40th International Conference on Machine Learning</i>. Vol 202. ML Research Press; 2023:26215-26227.","short":"M. Nikdan, T. Pegolotti, E.B. Iofinova, E. Kurtic, D.-A. Alistarh, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 26215–26227.","ieee":"M. Nikdan, T. Pegolotti, E. B. Iofinova, E. Kurtic, and D.-A. Alistarh, “SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge,” in <i>Proceedings of the 40th International Conference on Machine Learning</i>, Honolulu, Hawaii, HI, United States, 2023, vol. 202, pp. 26215–26227.","ista":"Nikdan M, Pegolotti T, Iofinova EB, Kurtic E, Alistarh D-A. 2023. SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. Proceedings of the 40th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 202, 26215–26227.","chicago":"Nikdan, Mahdi, Tommaso Pegolotti, Eugenia B Iofinova, Eldar Kurtic, and Dan-Adrian Alistarh. “SparseProp: Efficient Sparse Backpropagation for Faster Training of Neural Networks at the Edge.” In <i>Proceedings of the 40th International Conference on Machine Learning</i>, 202:26215–27. ML Research Press, 2023.","mla":"Nikdan, Mahdi, et al. “SparseProp: Efficient Sparse Backpropagation for Faster Training of Neural Networks at the Edge.” <i>Proceedings of the 40th International Conference on Machine Learning</i>, vol. 202, ML Research Press, 2023, pp. 26215–27.","apa":"Nikdan, M., Pegolotti, T., Iofinova, E. B., Kurtic, E., &#38; Alistarh, D.-A. (2023). SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge. In <i>Proceedings of the 40th International Conference on Machine Learning</i> (Vol. 202, pp. 26215–26227). Honolulu, Hawaii, HI, United States: ML Research Press."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","day":"30","scopus_import":"1","author":[{"first_name":"Mahdi","id":"66374281-f394-11eb-9cf6-869147deecc0","full_name":"Nikdan, Mahdi","last_name":"Nikdan"},{"first_name":"Tommaso","last_name":"Pegolotti","full_name":"Pegolotti, Tommaso"},{"first_name":"Eugenia B","orcid":"0000-0002-7778-3221","last_name":"Iofinova","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","full_name":"Iofinova, Eugenia B"},{"first_name":"Eldar","last_name":"Kurtic","id":"47beb3a5-07b5-11eb-9b87-b108ec578218","full_name":"Kurtic, Eldar"},{"last_name":"Alistarh","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","full_name":"Alistarh, Dan-Adrian","first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X"}],"oa_version":"Preprint","title":"SparseProp: Efficient sparse backpropagation for faster training of neural networks at the edge","volume":202,"date_created":"2023-10-29T23:01:17Z","abstract":[{"text":"We provide an efficient implementation of the backpropagation algorithm, specialized to the case where the weights of the neural network being trained are sparse. Our algorithm is general, as it applies to arbitrary (unstructured) sparsity and common layer types (e.g., convolutional or linear). We provide a fast vectorized implementation on commodity CPUs, and show that it can yield speedups in end-to-end runtime experiments, both in transfer learning using already-sparsified networks, and in training sparse networks from scratch. Thus, our results provide the first support for sparse training on commodity hardware.","lang":"eng"}],"intvolume":"       202","publication_identifier":{"eissn":["2640-3498"]},"publication_status":"published"},{"abstract":[{"lang":"eng","text":"Pruning—that is, setting a significant subset of the parameters of a neural network to zero—is one of the most popular methods of model compression. Yet, several recent works have raised the issue that pruning may induce or exacerbate bias in the output of the compressed model. Despite existing evidence for this phenomenon, the relationship between neural network pruning and induced bias is not well-understood. In this work, we systematically investigate and characterize this phenomenon in Convolutional Neural Networks for computer vision. First, we show that it is in fact possible to obtain highly-sparse models, e.g. with less than 10% remaining weights, which do not decrease in accuracy nor substantially increase in bias when compared to dense models. At the same time, we also find that, at higher sparsities, pruned models exhibit higher uncertainty in their outputs, as well as increased correlations, which we directly link to increased bias. We propose easy-to-use criteria which, based only on the uncompressed model, establish whether bias will increase with pruning, and identify the samples most susceptible to biased predictions post-compression. Our code can be found at https://github.com/IST-DASLab/pruned-vision-model-bias."}],"publication_identifier":{"eissn":["2575-7075"],"eisbn":["9798350301298"]},"publication_status":"published","day":"22","author":[{"id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","full_name":"Iofinova, Eugenia B","last_name":"Iofinova","first_name":"Eugenia B","orcid":"0000-0002-7778-3221"},{"full_name":"Peste, Elena-Alexandra","id":"32D78294-F248-11E8-B48F-1D18A9856A87","last_name":"Peste","first_name":"Elena-Alexandra"},{"first_name":"Dan-Adrian","orcid":"0000-0003-3650-940X","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh"}],"oa_version":"Preprint","title":"Bias in pruned vision models: In-depth analysis and countermeasures","date_created":"2024-01-10T08:42:40Z","oa":1,"language":[{"iso":"eng"}],"citation":{"chicago":"Iofinova, Eugenia B, Elena-Alexandra Peste, and Dan-Adrian Alistarh. “Bias in Pruned Vision Models: In-Depth Analysis and Countermeasures.” In <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 24364–73. IEEE, 2023. <a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">https://doi.org/10.1109/cvpr52729.2023.02334</a>.","ista":"Iofinova EB, Peste E-A, Alistarh D-A. 2023. Bias in pruned vision models: In-depth analysis and countermeasures. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Conference on Computer Vision and Pattern Recognition, 24364–24373.","apa":"Iofinova, E. B., Peste, E.-A., &#38; Alistarh, D.-A. (2023). Bias in pruned vision models: In-depth analysis and countermeasures. In <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 24364–24373). Vancouver, BC, Canada: IEEE. <a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">https://doi.org/10.1109/cvpr52729.2023.02334</a>","mla":"Iofinova, Eugenia B., et al. “Bias in Pruned Vision Models: In-Depth Analysis and Countermeasures.” <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, IEEE, 2023, pp. 24364–73, doi:<a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">10.1109/cvpr52729.2023.02334</a>.","ama":"Iofinova EB, Peste E-A, Alistarh D-A. Bias in pruned vision models: In-depth analysis and countermeasures. In: <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. IEEE; 2023:24364-24373. doi:<a href=\"https://doi.org/10.1109/cvpr52729.2023.02334\">10.1109/cvpr52729.2023.02334</a>","ieee":"E. B. Iofinova, E.-A. Peste, and D.-A. Alistarh, “Bias in pruned vision models: In-depth analysis and countermeasures,” in <i>2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Vancouver, BC, Canada, 2023, pp. 24364–24373.","short":"E.B. Iofinova, E.-A. Peste, D.-A. Alistarh, in:, 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition, IEEE, 2023, pp. 24364–24373."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","month":"08","arxiv":1,"department":[{"_id":"DaAl"},{"_id":"ChLa"}],"page":"24364-24373","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2304.12622","open_access":"1"}],"quality_controlled":"1","article_processing_charge":"No","doi":"10.1109/cvpr52729.2023.02334","publisher":"IEEE","_id":"14771","date_updated":"2024-01-10T08:59:26Z","type":"conference","publication":"2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition","status":"public","project":[{"_id":"9B9290DE-BA93-11EA-9121-9846C619BF3A","grant_number":" W1260-N35","name":"Vienna Graduate School on Computational Optimization"},{"call_identifier":"H2020","grant_number":"805223","name":"Elastic Coordination for Scalable Machine Learning","_id":"268A44D6-B435-11E9-9278-68D0E5697425"}],"ec_funded":1,"date_published":"2023-08-22T00:00:00Z","acknowledgement":"The authors would like to sincerely thank Sara Hooker for her feedback during the development of this work. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35. AP and DA acknowledge generous ERC support, via Starting Grant 805223 ScaleML.","conference":{"location":"Vancouver, BC, Canada","end_date":"2023-06-24","start_date":"2023-06-17","name":"CVPR: Conference on Computer Vision and Pattern Recognition"},"isi":1,"year":"2023","related_material":{"link":[{"relation":"software","url":"https://github.com/IST-DASLab/pruned-vision-model-bias"}]},"external_id":{"arxiv":["2304.12622"],"isi":["001062531308068"]}},{"type":"conference","_id":"12299","date_updated":"2023-08-04T10:33:28Z","publisher":"Institute of Electrical and Electronics Engineers","article_processing_charge":"No","doi":"10.1109/cvpr52688.2022.01195","quality_controlled":"1","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2111.13445","open_access":"1"}],"page":"12256-12266","related_material":{"record":[{"id":"13074","status":"public","relation":"dissertation_contains"}]},"external_id":{"arxiv":["2111.13445"],"isi":["000870759105034"]},"year":"2022","isi":1,"acknowledgement":"he authors would like to sincerely thank Christoph Lampert and Nir Shavit for fruitful discussions during the development of this work, and Eldar Kurtic for experimental support. EI was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35, while AP and DA acknowledge generous support by the ERC, via Starting Grant 805223 ScaleML.","date_published":"2022-09-27T00:00:00Z","conference":{"location":"New Orleans, LA, United States","end_date":"2022-06-24","start_date":"2022-06-18","name":"CVPR: Computer Vision and Pattern Recognition"},"ec_funded":1,"status":"public","publication":"2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition","project":[{"grant_number":" W1260-N35","name":"Vienna Graduate School on Computational Optimization","_id":"9B9290DE-BA93-11EA-9121-9846C619BF3A"},{"_id":"268A44D6-B435-11E9-9278-68D0E5697425","call_identifier":"H2020","grant_number":"805223","name":"Elastic Coordination for Scalable Machine Learning"}],"date_created":"2023-01-16T10:06:00Z","title":"How well do sparse ImageNet models transfer?","oa_version":"Preprint","scopus_import":"1","day":"27","author":[{"orcid":"0000-0002-7778-3221","first_name":"Eugenia B","last_name":"Iofinova","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","full_name":"Iofinova, Eugenia B"},{"last_name":"Peste","id":"32D78294-F248-11E8-B48F-1D18A9856A87","full_name":"Peste, Elena-Alexandra","first_name":"Elena-Alexandra"},{"first_name":"Mark","last_name":"Kurtz","full_name":"Kurtz, Mark"},{"id":"4A899BFC-F248-11E8-B48F-1D18A9856A87","full_name":"Alistarh, Dan-Adrian","last_name":"Alistarh","orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian"}],"publication_status":"published","publication_identifier":{"eissn":["2575-7075"]},"abstract":[{"text":"Transfer learning is a classic paradigm by which models pretrained on large “upstream” datasets are adapted to yield good results on “downstream” specialized datasets. Generally, more accurate models on the “upstream” dataset tend to provide better transfer accuracy “downstream”. In this work, we perform an in-depth investigation of this phenomenon in the context of convolutional neural networks (CNNs) trained on the ImageNet dataset, which have been pruned-that is, compressed by sparsifiying their connections. We consider transfer using unstructured pruned models obtained by applying several state-of-the-art pruning methods, including magnitude-based, second-order, regrowth, lottery-ticket, and regularization approaches, in the context of twelve standard transfer tasks. In a nutshell, our study shows that sparse models can match or even outperform the transfer performance of dense models, even at high sparsities, and, while doing so, can lead to significant inference and even training speedups. At the same time, we observe and analyze significant differences in the behaviour of different pruning methods. The code is available at: https://github.com/IST-DASLab/sparse-imagenet-transfer.","lang":"eng"}],"department":[{"_id":"DaAl"},{"_id":"ChLa"}],"arxiv":1,"month":"09","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","citation":{"ama":"Iofinova EB, Peste E-A, Kurtz M, Alistarh D-A. How well do sparse ImageNet models transfer? In: <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. Institute of Electrical and Electronics Engineers; 2022:12256-12266. doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">10.1109/cvpr52688.2022.01195</a>","short":"E.B. Iofinova, E.-A. Peste, M. Kurtz, D.-A. Alistarh, in:, 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Institute of Electrical and Electronics Engineers, 2022, pp. 12256–12266.","ieee":"E. B. Iofinova, E.-A. Peste, M. Kurtz, and D.-A. Alistarh, “How well do sparse ImageNet models transfer?,” in <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, New Orleans, LA, United States, 2022, pp. 12256–12266.","ista":"Iofinova EB, Peste E-A, Kurtz M, Alistarh D-A. 2022. How well do sparse ImageNet models transfer? 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Computer Vision and Pattern Recognition, 12256–12266.","chicago":"Iofinova, Eugenia B, Elena-Alexandra Peste, Mark Kurtz, and Dan-Adrian Alistarh. “How Well Do Sparse ImageNet Models Transfer?” In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 12256–66. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">https://doi.org/10.1109/cvpr52688.2022.01195</a>.","mla":"Iofinova, Eugenia B., et al. “How Well Do Sparse ImageNet Models Transfer?” <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Institute of Electrical and Electronics Engineers, 2022, pp. 12256–66, doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">10.1109/cvpr52688.2022.01195</a>.","apa":"Iofinova, E. B., Peste, E.-A., Kurtz, M., &#38; Alistarh, D.-A. (2022). How well do sparse ImageNet models transfer? In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 12256–12266). New Orleans, LA, United States: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01195\">https://doi.org/10.1109/cvpr52688.2022.01195</a>"},"language":[{"iso":"eng"}],"oa":1},{"oa_version":"Published Version","title":"FLEA: Provably robust fair multisource learning from unreliable training data","author":[{"orcid":"0000-0002-7778-3221","first_name":"Eugenia B","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","full_name":"Iofinova, Eugenia B","last_name":"Iofinova"},{"first_name":"Nikola H","id":"4B9D76E4-F248-11E8-B48F-1D18A9856A87","full_name":"Konstantinov, Nikola H","last_name":"Konstantinov"},{"last_name":"Lampert","id":"40C20FD2-F248-11E8-B48F-1D18A9856A87","full_name":"Lampert, Christoph","first_name":"Christoph","orcid":"0000-0001-8622-7887"}],"day":"22","article_type":"original","date_created":"2023-02-02T20:29:57Z","abstract":[{"text":"Fairness-aware learning aims at constructing classifiers that not only make accurate predictions, but also do not discriminate against specific groups. It is a fast-growing area of\r\nmachine learning with far-reaching societal impact. However, existing fair learning methods\r\nare vulnerable to accidental or malicious artifacts in the training data, which can cause\r\nthem to unknowingly produce unfair classifiers. In this work we address the problem of\r\nfair learning from unreliable training data in the robust multisource setting, where the\r\navailable training data comes from multiple sources, a fraction of which might not be representative of the true data distribution. We introduce FLEA, a filtering-based algorithm\r\nthat identifies and suppresses those data sources that would have a negative impact on\r\nfairness or accuracy if they were used for training. As such, FLEA is not a replacement of\r\nprior fairness-aware learning methods but rather an augmentation that makes any of them\r\nrobust against unreliable training data. We show the effectiveness of our approach by a\r\ndiverse range of experiments on multiple datasets. Additionally, we prove formally that\r\n–given enough data– FLEA protects the learner against corruptions as long as the fraction of\r\naffected data sources is less than half. Our source code and documentation are available at\r\nhttps://github.com/ISTAustria-CVML/FLEA.","lang":"eng"}],"tmp":{"name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","image":"/images/cc_by.png","short":"CC BY (4.0)"},"acknowledged_ssus":[{"_id":"ScienComp"}],"has_accepted_license":"1","publication_identifier":{"issn":["2835-8856"]},"publication_status":"published","file_date_updated":"2023-02-23T10:30:04Z","month":"12","arxiv":1,"file":[{"file_name":"2022_TMLR_Iofinova.pdf","success":1,"content_type":"application/pdf","access_level":"open_access","relation":"main_file","checksum":"97c8a8470759cab597abb973ca137a3b","file_size":1948063,"date_created":"2023-02-23T10:30:04Z","creator":"dernst","date_updated":"2023-02-23T10:30:04Z","file_id":"12673"}],"department":[{"_id":"ChLa"}],"language":[{"iso":"eng"}],"oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","citation":{"ama":"Iofinova EB, Konstantinov NH, Lampert C. FLEA: Provably robust fair multisource learning from unreliable training data. <i>Transactions on Machine Learning Research</i>. 2022.","short":"E.B. Iofinova, N.H. Konstantinov, C. Lampert, Transactions on Machine Learning Research (2022).","ieee":"E. B. Iofinova, N. H. Konstantinov, and C. Lampert, “FLEA: Provably robust fair multisource learning from unreliable training data,” <i>Transactions on Machine Learning Research</i>. ML Research Press, 2022.","ista":"Iofinova EB, Konstantinov NH, Lampert C. 2022. FLEA: Provably robust fair multisource learning from unreliable training data. Transactions on Machine Learning Research.","chicago":"Iofinova, Eugenia B, Nikola H Konstantinov, and Christoph Lampert. “FLEA: Provably Robust Fair Multisource Learning from Unreliable Training Data.” <i>Transactions on Machine Learning Research</i>. ML Research Press, 2022.","mla":"Iofinova, Eugenia B., et al. “FLEA: Provably Robust Fair Multisource Learning from Unreliable Training Data.” <i>Transactions on Machine Learning Research</i>, ML Research Press, 2022.","apa":"Iofinova, E. B., Konstantinov, N. H., &#38; Lampert, C. (2022). FLEA: Provably robust fair multisource learning from unreliable training data. <i>Transactions on Machine Learning Research</i>. ML Research Press."},"publisher":"ML Research Press","article_processing_charge":"No","type":"journal_article","date_updated":"2023-02-23T10:30:54Z","_id":"12495","ddc":["000"],"quality_controlled":"1","main_file_link":[{"url":"https://openreview.net/forum?id=XsPopigZXV","open_access":"1"}],"external_id":{"arxiv":["2106.11732"]},"related_material":{"link":[{"relation":"software","description":"source code","url":"https://github.com/ISTAustria-CVML/FLEA"}]},"year":"2022","project":[{"name":"Vienna Graduate School on Computational Optimization","grant_number":" W1260-N35","_id":"9B9290DE-BA93-11EA-9121-9846C619BF3A"}],"status":"public","publication":"Transactions on Machine Learning Research","date_published":"2022-12-22T00:00:00Z","acknowledgement":"The authors would like to thank Bernd Prach, Elias Frantar, Alexandra Peste, Mahdi Nikdan, and Peter Súkeník for their helpful feedback. This research was supported by the Scientific Service Units (SSU) of IST Austria through resources provided by Scientific Computing (SciComp). This publication was made possible by an ETH AI Center postdoctoral fellowship granted to Nikola Konstantinov. Eugenia Iofinova was supported in part by the FWF DK VGSCO, grant agreement number W1260-N35. "},{"ec_funded":1,"conference":{"name":"NeurIPS: Neural Information Processing Systems","end_date":"2021-12-14","start_date":"2021-12-06","location":"Virtual, Online"},"acknowledgement":"This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 805223 ScaleML), and a CNRS PEPS grant. This research was supported by the Scientific Service Units (SSU) of IST Austria through resources provided by Scientific Computing (SciComp). We would also like to thank Christoph Lampert for his feedback on an earlier version of this work, as well as for providing hardware for the Transformer-XL experiments.","date_published":"2021-12-06T00:00:00Z","project":[{"name":"Elastic Coordination for Scalable Machine Learning","grant_number":"805223","call_identifier":"H2020","_id":"268A44D6-B435-11E9-9278-68D0E5697425"}],"status":"public","publication":"35th Conference on Neural Information Processing Systems","year":"2021","external_id":{"arxiv":["2106.12379"]},"related_material":{"record":[{"id":"13074","relation":"dissertation_contains","status":"public"}]},"main_file_link":[{"open_access":"1","url":"https://proceedings.neurips.cc/paper/2021/file/48000647b315f6f00f913caa757a70b3-Paper.pdf"}],"quality_controlled":"1","page":"8557-8570","date_updated":"2023-06-01T12:54:45Z","_id":"11458","type":"conference","article_processing_charge":"No","publisher":"Curran Associates","citation":{"chicago":"Peste, Elena-Alexandra, Eugenia B Iofinova, Adrian Vladu, and Dan-Adrian Alistarh. “AC/DC: Alternating Compressed/DeCompressed Training of Deep Neural Networks.” In <i>35th Conference on Neural Information Processing Systems</i>, 34:8557–70. Curran Associates, 2021.","ista":"Peste E-A, Iofinova EB, Vladu A, Alistarh D-A. 2021. AC/DC: Alternating Compressed/DeCompressed training of deep neural networks. 35th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 34, 8557–8570.","apa":"Peste, E.-A., Iofinova, E. B., Vladu, A., &#38; Alistarh, D.-A. (2021). AC/DC: Alternating Compressed/DeCompressed training of deep neural networks. In <i>35th Conference on Neural Information Processing Systems</i> (Vol. 34, pp. 8557–8570). Virtual, Online: Curran Associates.","mla":"Peste, Elena-Alexandra, et al. “AC/DC: Alternating Compressed/DeCompressed Training of Deep Neural Networks.” <i>35th Conference on Neural Information Processing Systems</i>, vol. 34, Curran Associates, 2021, pp. 8557–70.","ama":"Peste E-A, Iofinova EB, Vladu A, Alistarh D-A. AC/DC: Alternating Compressed/DeCompressed training of deep neural networks. In: <i>35th Conference on Neural Information Processing Systems</i>. Vol 34. Curran Associates; 2021:8557-8570.","ieee":"E.-A. Peste, E. B. Iofinova, A. Vladu, and D.-A. Alistarh, “AC/DC: Alternating Compressed/DeCompressed training of deep neural networks,” in <i>35th Conference on Neural Information Processing Systems</i>, Virtual, Online, 2021, vol. 34, pp. 8557–8570.","short":"E.-A. Peste, E.B. Iofinova, A. Vladu, D.-A. Alistarh, in:, 35th Conference on Neural Information Processing Systems, Curran Associates, 2021, pp. 8557–8570."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"language":[{"iso":"eng"}],"department":[{"_id":"GradSch"},{"_id":"DaAl"}],"month":"12","arxiv":1,"publication_identifier":{"isbn":["9781713845393"],"issn":["1049-5258"]},"publication_status":"published","abstract":[{"text":"The increasing computational requirements of deep neural networks (DNNs) have led to significant interest in obtaining DNN models that are sparse, yet accurate. Recent work has investigated the even harder case of sparse training, where the DNN weights are, for as much as possible, already sparse to reduce computational costs during training. Existing sparse training methods are often empirical and can have lower accuracy relative to the dense baseline. In this paper, we present a general approach called Alternating Compressed/DeCompressed (AC/DC) training of DNNs, demonstrate convergence for a variant of the algorithm, and show that AC/DC outperforms existing sparse training methods in accuracy at similar computational budgets; at high sparsity levels, AC/DC even outperforms existing methods that rely on accurate pre-trained dense models. An important property of AC/DC is that it allows co-training of dense and sparse models, yielding accurate sparse–dense model pairs at the end of the training process. This is useful in practice, where compressed variants may be desirable for deployment in resource-constrained settings without re-doing the entire training flow, and also provides us with insights into the accuracy gap between dense and compressed models. The code is available at: https://github.com/IST-DASLab/ACDC.","lang":"eng"}],"intvolume":"        34","acknowledged_ssus":[{"_id":"ScienComp"}],"volume":34,"date_created":"2022-06-20T12:11:53Z","author":[{"first_name":"Elena-Alexandra","last_name":"Peste","id":"32D78294-F248-11E8-B48F-1D18A9856A87","full_name":"Peste, Elena-Alexandra"},{"orcid":"0000-0002-7778-3221","first_name":"Eugenia B","id":"f9a17499-f6e0-11ea-865d-fdf9a3f77117","full_name":"Iofinova, Eugenia B","last_name":"Iofinova"},{"first_name":"Adrian","last_name":"Vladu","full_name":"Vladu, Adrian"},{"orcid":"0000-0003-3650-940X","first_name":"Dan-Adrian","last_name":"Alistarh","full_name":"Alistarh, Dan-Adrian","id":"4A899BFC-F248-11E8-B48F-1D18A9856A87"}],"scopus_import":"1","day":"6","oa_version":"Published Version","title":"AC/DC: Alternating Compressed/DeCompressed training of deep neural networks"}]
