@inproceedings{14771,
  abstract     = {Pruning—that is, setting a significant subset of the parameters of a neural network to zero—is one of the most popular methods of model compression. Yet, several recent works have raised the issue that pruning may induce or exacerbate bias in the output of the compressed model. Despite existing evidence for this phenomenon, the relationship between neural network pruning and induced bias is not well-understood. In this work, we systematically investigate and characterize this phenomenon in Convolutional Neural Networks for computer vision. First, we show that it is in fact possible to obtain highly-sparse models, e.g. with less than 10% remaining weights, which do not decrease in accuracy nor substantially increase in bias when compared to dense models. At the same time, we also find that, at higher sparsities, pruned models exhibit higher uncertainty in their outputs, as well as increased correlations, which we directly link to increased bias. We propose easy-to-use criteria which, based only on the uncompressed model, establish whether bias will increase with pruning, and identify the samples most susceptible to biased predictions post-compression. Our code can be found at https://github.com/IST-DASLab/pruned-vision-model-bias.},
  author       = {Iofinova, Eugenia B and Peste, Elena-Alexandra and Alistarh, Dan-Adrian},
  booktitle    = {2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  issn         = {2575-7075},
  location     = {Vancouver, BC, Canada},
  pages        = {24364--24373},
  publisher    = {IEEE},
  title        = {{Bias in pruned vision models: In-depth analysis and countermeasures}},
  doi          = {10.1109/cvpr52729.2023.02334},
  year         = {2023},
}

@inproceedings{12299,
  abstract     = {Transfer learning is a classic paradigm by which models pretrained on large “upstream” datasets are adapted to yield good results on “downstream” specialized datasets. Generally, more accurate models on the “upstream” dataset tend to provide better transfer accuracy “downstream”. In this work, we perform an in-depth investigation of this phenomenon in the context of convolutional neural networks (CNNs) trained on the ImageNet dataset, which have been pruned-that is, compressed by sparsifiying their connections. We consider transfer using unstructured pruned models obtained by applying several state-of-the-art pruning methods, including magnitude-based, second-order, regrowth, lottery-ticket, and regularization approaches, in the context of twelve standard transfer tasks. In a nutshell, our study shows that sparse models can match or even outperform the transfer performance of dense models, even at high sparsities, and, while doing so, can lead to significant inference and even training speedups. At the same time, we observe and analyze significant differences in the behaviour of different pruning methods. The code is available at: https://github.com/IST-DASLab/sparse-imagenet-transfer.},
  author       = {Iofinova, Eugenia B and Peste, Elena-Alexandra and Kurtz, Mark and Alistarh, Dan-Adrian},
  booktitle    = {2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  issn         = {2575-7075},
  location     = {New Orleans, LA, United States},
  pages        = {12256--12266},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{How well do sparse ImageNet models transfer?}},
  doi          = {10.1109/cvpr52688.2022.01195},
  year         = {2022},
}

@article{12495,
  abstract     = {Fairness-aware learning aims at constructing classifiers that not only make accurate predictions, but also do not discriminate against specific groups. It is a fast-growing area of
machine learning with far-reaching societal impact. However, existing fair learning methods
are vulnerable to accidental or malicious artifacts in the training data, which can cause
them to unknowingly produce unfair classifiers. In this work we address the problem of
fair learning from unreliable training data in the robust multisource setting, where the
available training data comes from multiple sources, a fraction of which might not be representative of the true data distribution. We introduce FLEA, a filtering-based algorithm
that identifies and suppresses those data sources that would have a negative impact on
fairness or accuracy if they were used for training. As such, FLEA is not a replacement of
prior fairness-aware learning methods but rather an augmentation that makes any of them
robust against unreliable training data. We show the effectiveness of our approach by a
diverse range of experiments on multiple datasets. Additionally, we prove formally that
–given enough data– FLEA protects the learner against corruptions as long as the fraction of
affected data sources is less than half. Our source code and documentation are available at
https://github.com/ISTAustria-CVML/FLEA.},
  author       = {Iofinova, Eugenia B and Konstantinov, Nikola H and Lampert, Christoph},
  issn         = {2835-8856},
  journal      = {Transactions on Machine Learning Research},
  publisher    = {ML Research Press},
  title        = {{FLEA: Provably robust fair multisource learning from unreliable training data}},
  year         = {2022},
}

