@inproceedings{14771,
  abstract     = {Pruning—that is, setting a significant subset of the parameters of a neural network to zero—is one of the most popular methods of model compression. Yet, several recent works have raised the issue that pruning may induce or exacerbate bias in the output of the compressed model. Despite existing evidence for this phenomenon, the relationship between neural network pruning and induced bias is not well-understood. In this work, we systematically investigate and characterize this phenomenon in Convolutional Neural Networks for computer vision. First, we show that it is in fact possible to obtain highly-sparse models, e.g. with less than 10% remaining weights, which do not decrease in accuracy nor substantially increase in bias when compared to dense models. At the same time, we also find that, at higher sparsities, pruned models exhibit higher uncertainty in their outputs, as well as increased correlations, which we directly link to increased bias. We propose easy-to-use criteria which, based only on the uncompressed model, establish whether bias will increase with pruning, and identify the samples most susceptible to biased predictions post-compression. Our code can be found at https://github.com/IST-DASLab/pruned-vision-model-bias.},
  author       = {Iofinova, Eugenia B and Peste, Elena-Alexandra and Alistarh, Dan-Adrian},
  booktitle    = {2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  issn         = {2575-7075},
  location     = {Vancouver, BC, Canada},
  pages        = {24364--24373},
  publisher    = {IEEE},
  title        = {{Bias in pruned vision models: In-depth analysis and countermeasures}},
  doi          = {10.1109/cvpr52729.2023.02334},
  year         = {2023},
}

@inproceedings{14114,
  abstract     = {Algorithmic fairness is frequently motivated in terms of a trade-off in which overall performance is decreased so as to improve performance on disadvantaged groups where the algorithm would otherwise be less accurate. Contrary to this, we find that applying existing fairness approaches to computer vision improve fairness by degrading the performance of classifiers across all groups (with increased degradation on the best performing groups). Extending the bias-variance decomposition for classification to fairness, we theoretically explain why the majority of fairness methods designed for low capacity models should not be used in settings involving high-capacity models, a scenario common to computer vision. We corroborate this analysis with extensive experimental support that shows that many of the fairness heuristics used in computer vision also degrade performance on the most disadvantaged groups. Building on these insights, we propose an adaptive augmentation strategy that, uniquely, of all methods tested, improves performance for the disadvantaged groups.},
  author       = {Zietlow, Dominik and Lohaus, Michael and Balakrishnan, Guha and Kleindessner, Matthaus and Locatello, Francesco and Scholkopf, Bernhard and Russell, Chris},
  booktitle    = {2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  isbn         = {9781665469470},
  issn         = {2575-7075},
  location     = {New Orleans, LA, United States},
  pages        = {10400--10411},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers}},
  doi          = {10.1109/cvpr52688.2022.01016},
  year         = {2022},
}

@inproceedings{12299,
  abstract     = {Transfer learning is a classic paradigm by which models pretrained on large “upstream” datasets are adapted to yield good results on “downstream” specialized datasets. Generally, more accurate models on the “upstream” dataset tend to provide better transfer accuracy “downstream”. In this work, we perform an in-depth investigation of this phenomenon in the context of convolutional neural networks (CNNs) trained on the ImageNet dataset, which have been pruned-that is, compressed by sparsifiying their connections. We consider transfer using unstructured pruned models obtained by applying several state-of-the-art pruning methods, including magnitude-based, second-order, regrowth, lottery-ticket, and regularization approaches, in the context of twelve standard transfer tasks. In a nutshell, our study shows that sparse models can match or even outperform the transfer performance of dense models, even at high sparsities, and, while doing so, can lead to significant inference and even training speedups. At the same time, we observe and analyze significant differences in the behaviour of different pruning methods. The code is available at: https://github.com/IST-DASLab/sparse-imagenet-transfer.},
  author       = {Iofinova, Eugenia B and Peste, Elena-Alexandra and Kurtz, Mark and Alistarh, Dan-Adrian},
  booktitle    = {2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  issn         = {2575-7075},
  location     = {New Orleans, LA, United States},
  pages        = {12256--12266},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{How well do sparse ImageNet models transfer?}},
  doi          = {10.1109/cvpr52688.2022.01195},
  year         = {2022},
}

@inproceedings{8186,
  abstract     = {Numerous methods have been proposed for probabilistic generative modelling of
3D objects. However, none of these is able to produce textured objects, which
renders them of limited use for practical tasks. In this work, we present the
first generative model of textured 3D meshes. Training such a model would
traditionally require a large dataset of textured meshes, but unfortunately,
existing datasets of meshes lack detailed textures. We instead propose a new
training methodology that allows learning from collections of 2D images without
any 3D information. To do so, we train our model to explain a distribution of
images by modelling each image as a 3D foreground object placed in front of a
2D background. Thus, it learns to generate meshes that when rendered, produce
images similar to those in its training set.
  A well-known problem when generating meshes with deep networks is the
emergence of self-intersections, which are problematic for many use-cases. As a
second contribution we therefore introduce a new generation process for 3D
meshes that guarantees no self-intersections arise, based on the physical
intuition that faces should push one another out of the way as they move.
  We conduct extensive experiments on our approach, reporting quantitative and
qualitative results on both synthetic data and natural images. These show our
method successfully learns to generate plausible and diverse textured 3D
samples for five challenging object classes.},
  author       = {Henderson, Paul M and Tsiminaki, Vagia and Lampert, Christoph},
  booktitle    = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  issn         = {2575-7075},
  location     = {Virtual},
  pages        = {7498--7507},
  publisher    = {IEEE},
  title        = {{Leveraging 2D data to learn textured 3D mesh generation}},
  doi          = {10.1109/CVPR42600.2020.00752},
  year         = {2020},
}

@inproceedings{10882,
  abstract     = {We introduce Intelligent Annotation Dialogs for bounding box annotation. We train an agent to automatically choose a sequence of actions for a human annotator to produce a bounding box in a minimal amount of time. Specifically, we consider two actions: box verification [34], where the annotator verifies a box generated by an object detector, and manual box drawing. We explore two kinds of agents, one based on predicting the probability that a box will be positively verified, and the other based on reinforcement learning. We demonstrate that (1) our agents are able to learn efficient annotation strategies in several scenarios, automatically adapting to the image difficulty, the desired quality of the boxes, and the detector strength; (2) in all scenarios the resulting annotation dialogs speed up annotation compared to manual box drawing alone and box verification alone, while also outperforming any fixed combination of verification and drawing in most scenarios; (3) in a realistic scenario where the detector is iteratively re-trained, our agents evolve a series of strategies that reflect the shifting trade-off between verification and drawing as the detector grows stronger.},
  author       = {Uijlings, Jasper and Konyushkova, Ksenia and Lampert, Christoph and Ferrari, Vittorio},
  booktitle    = {2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  isbn         = {9781538664209},
  issn         = {2575-7075},
  location     = {Salt Lake City, UT, United States},
  pages        = {9175--9184},
  publisher    = {IEEE},
  title        = {{Learning intelligent dialogs for bounding box annotation}},
  doi          = {10.1109/cvpr.2018.00956},
  year         = {2018},
}

