@article{7791,
  abstract     = {Extending a result of Milena Radnovic and Serge Tabachnikov, we establish conditionsfor two different non-symmetric norms to define the same billiard reflection law.},
  author       = {Akopyan, Arseniy and Karasev, Roman},
  issn         = {2199-6768},
  journal      = {European Journal of Mathematics},
  number       = {4},
  pages        = {1309 -- 1312},
  publisher    = {Springer Nature},
  title        = {{When different norms lead to same billiard trajectories?}},
  doi          = {10.1007/s40879-020-00405-0},
  volume       = {8},
  year         = {2022},
}

@unpublished{8125,
  abstract     = {Context, such as behavioral state, is known to modulate memory formation and retrieval, but is usually ignored in associative memory models. Here, we propose several types of contextual modulation for associative memory networks that greatly increase their performance. In these networks, context inactivates specific neurons and connections, which modulates the effective connectivity of the network. Memories are stored only by the active components, thereby reducing interference from memories acquired in other contexts. Such networks exhibit several beneficial characteristics, including enhanced memory capacity, high robustness to noise, increased robustness to memory overloading, and better memory retention during continual learning. Furthermore, memories can be biased to have different relative strengths, or even gated on or off, according to contextual cues, providing a candidate model for cognitive control of memory and efficient memory search. An external context-encoding network can dynamically switch the memory network to a desired state, which we liken to experimentally observed contextual signals in prefrontal cortex and hippocampus. Overall, our work illustrates the benefits of organizing memory around context, and provides an important link between behavioral studies of memory and mechanistic details of neural circuits.</jats:p><jats:sec><jats:title>SIGNIFICANCE</jats:title><jats:p>Memory is context dependent — both encoding and recall vary in effectiveness and speed depending on factors like location and brain state during a task. We apply this idea to a simple computational model of associative memory through contextual gating of neurons and synaptic connections. Intriguingly, this results in several advantages, including vastly enhanced memory capacity, better robustness, and flexible memory gating. Our model helps to explain (i) how gating and inhibition contribute to memory processes, (ii) how memory access dynamically changes over time, and (iii) how context representations, such as those observed in hippocampus and prefrontal cortex, may interact with and control memory processes.},
  author       = {Podlaski, William F. and Agnes, Everton J. and Vogels, Tim P},
  booktitle    = {bioRxiv},
  publisher    = {Cold Spring Harbor Laboratory},
  title        = {{High capacity and dynamic accessibility in associative memory networks with context-dependent neuronal and synaptic gating}},
  doi          = {10.1101/2020.01.08.898528},
  year         = {2022},
}

@article{7577,
  abstract     = {Weak convergence of inertial iterative method for solving variational inequalities is the focus of this paper. The cost function is assumed to be non-Lipschitz and monotone. We propose a projection-type method with inertial terms and give weak convergence analysis under appropriate conditions. Some test results are performed and compared with relevant methods in the literature to show the efficiency and advantages given by our proposed methods.},
  author       = {Shehu, Yekini and Iyiola, Olaniyi S.},
  issn         = {1563-504X},
  journal      = {Applicable Analysis},
  number       = {1},
  pages        = {192--216},
  publisher    = {Taylor & Francis},
  title        = {{Weak convergence for variational inequalities with inertial-type method}},
  doi          = {10.1080/00036811.2020.1736287},
  volume       = {101},
  year         = {2022},
}

@article{14355,
  abstract     = {Purpose: The mediator (MED) multisubunit-complex modulates the activity of the transcriptional machinery, and genetic defects in different MED subunits (17, 20, 27) have been implicated in neurologic diseases. In this study, we identified a recurrent homozygous variant in MED11 (c.325C>T; p.Arg109Ter) in 7 affected individuals from 5 unrelated families. Methods: To investigate the genetic cause of the disease, exome or genome sequencing were performed in 5 unrelated families identified via different research networks and Matchmaker Exchange. Deep clinical and brain imaging evaluations were performed by clinical pediatric neurologists and neuroradiologists. The functional effect of the candidate variant on both MED11 RNA and protein was assessed using reverse transcriptase polymerase chain reaction and western blotting using fibroblast cell lines derived from 1 affected individual and controls and through computational approaches. Knockouts in zebrafish were generated using clustered regularly interspaced short palindromic repeats/Cas9. Results: The disease was characterized by microcephaly, profound neurodevelopmental impairment, exaggerated startle response, myoclonic seizures, progressive widespread neurodegeneration, and premature death. Functional studies on patient-derived fibroblasts did not show a loss of protein function but rather disruption of the C-terminal of MED11, likely impairing binding to other MED subunits. A zebrafish knockout model recapitulates key clinical phenotypes. Conclusion: Loss of the C-terminal of MED subunit 11 may affect its binding efficiency to other MED subunits, thus implicating the MED-complex stability in brain development and neurodegeneration. (C) 2022 The Authors. Published by Elsevier Inc. on behalf of American College of Medical Genetics and Genomics.},
  author       = {Cali, Elisa and Lin, Sheng-Jia and Rocca, Clarissa and Sahin, Yavuz and Al Shamsi, Aisha and El Chehadeh, Salima and Chaabouni, Myriam and Mankad, Kshitij and Galanaki, Evangelia and Efthymiou, Stephanie and Sudhakar, Sniya and Athanasiou-Fragkouli, Alkyoni and Celik, Tamer and Narli, Nejat and Bianca, Sebastiano and Murphy, David and Moreira, Francisco Martins De Carvalho and Accogli, Andrea and Petree, Cassidy and Huang, Kevin and Monastiri, Kamel and Edizadeh, Masoud and Nardello, Rosaria and Ognibene, Marzia and De Marco, Patrizia and Ruggieri, Martino and Zara, Federico and Striano, Pasquale and Sahin, Yavuz and Al-Gazali, Lihadh and Warde, Marie Therese Abi and Gerard, Benedicte and Zifarelli, Giovanni and Beetz, Christian and Fortuna, Sara and Soler, Miguel and Valente, Enza Maria and Varshney, Gaurav and Maroofian, Reza and Salpietro, Vincenzo and Houlden, Henry and Grp, SYNaPS Study},
  issn         = {1098-3600},
  journal      = {Genetics in Medicine},
  keywords     = {Human mediator complex, MED11, MEDopathies},
  number       = {10},
  pages        = {2194--2203},
  publisher    = {Elsevier},
  title        = {{A homozygous MED11 C-terminal variant causes a lethal neurodegenerative disease}},
  doi          = {10.1016/j.gim.2022.07.013},
  volume       = {24},
  year         = {2022},
}

@article{14381,
  abstract     = {Expander graphs (sparse but highly connected graphs) have, since their inception, been the source of deep links between Mathematics and Computer Science as well as applications to other areas. In recent years, a fascinating theory of high-dimensional expanders has begun to emerge, which is still in a formative stage but has nonetheless already lead to a number of striking results. Unlike for graphs, in higher dimensions there is a rich array of non-equivalent notions of expansion (coboundary expansion, cosystolic expansion, topological expansion, spectral expansion, etc.), with differents strengths and applications. In this talk, we will survey this landscape of high-dimensional expansion, with a focus on two main results. First, we will present Gromov’s Topological Overlap Theorem, which asserts that coboundary expansion (a quantitative version of vanishing mod 2 cohomology) implies topological expansion (roughly, the property that for every map from a simplicial complex to a manifold of the same dimension, the images of a positive fraction of the simplices have a point in common). Second, we will outline a construction of bounded degree 2-dimensional topological expanders, due to Kaufman, Kazhdan, and Lubotzky.},
  author       = {Wagner, Uli},
  issn         = {2102-622X},
  journal      = {Bulletin de la Societe Mathematique de France},
  pages        = {281--294},
  publisher    = {Societe Mathematique de France},
  title        = {{High-dimensional expanders (after Gromov, Kaufman, Kazhdan, Lubotzky, and others)}},
  doi          = {10.24033/ast.1188},
  volume       = {438},
  year         = {2022},
}

@article{14437,
  abstract     = {Future LEDs could be based on lead halide perovskites. A breakthrough in preparing device-compatible solids composed of nanoscale perovskite crystals overcomes a long-standing hurdle in making blue perovskite LEDs.},
  author       = {Utzat, Hendrik and Ibáñez, Maria},
  issn         = {1476-4687},
  journal      = {Nature},
  keywords     = {Multidisciplinary},
  number       = {7941},
  pages        = {638--639},
  publisher    = {Springer Nature},
  title        = {{Molecular engineering enables bright blue LEDs}},
  doi          = {10.1038/d41586-022-04447-0},
  volume       = {612},
  year         = {2022},
}

@misc{14520,
  abstract     = {This dataset comprises all data shown in the figures of the submitted article "Compact vacuum gap transmon qubits: Selective and sensitive probes for superconductor surface losses" at arxiv.org/abs/2206.14104. Additional raw data are available from the corresponding author on reasonable request.},
  author       = {Zemlicka, Martin and Redchenko, Elena and Peruzzo, Matilda and Hassani, Farid and Trioni, Andrea and Barzanjeh, Shabir and Fink, Johannes M},
  publisher    = {Zenodo},
  title        = {{Compact vacuum gap transmon qubits: Selective and sensitive probes for superconductor surface losses}},
  doi          = {10.5281/ZENODO.8408897},
  year         = {2022},
}

@unpublished{14597,
  abstract     = {Phase-field models such as the Allen-Cahn equation may give rise to the formation and evolution of geometric shapes, a phenomenon that may be analyzed rigorously in suitable scaling regimes. In its sharp-interface limit, the vectorial Allen-Cahn equation with a potential with N≥3 distinct minima has been conjectured to describe the evolution of branched interfaces by multiphase mean curvature flow.
In the present work, we give a rigorous proof for this statement in two and three ambient dimensions and for a suitable class of potentials: As long as a strong solution to multiphase mean curvature flow exists, solutions to the vectorial Allen-Cahn equation with well-prepared initial data converge towards multiphase mean curvature flow in the limit of vanishing interface width parameter ε↘0. We even establish the rate of convergence O(ε1/2).
Our approach is based on the gradient flow structure of the Allen-Cahn equation and its limiting motion: Building on the recent concept of "gradient flow calibrations" for multiphase mean curvature flow, we introduce a notion of relative entropy for the vectorial Allen-Cahn equation with multi-well potential. This enables us to overcome the limitations of other approaches, e.g. avoiding the need for a stability analysis of the Allen-Cahn operator or additional convergence hypotheses for the energy at positive times.},
  author       = {Fischer, Julian L and Marveggio, Alice},
  booktitle    = {arXiv},
  title        = {{Quantitative convergence of the vectorial Allen-Cahn equation towards multiphase mean curvature flow}},
  doi          = {10.48550/ARXIV.2203.17143},
  year         = {2022},
}

@unpublished{14600,
  abstract     = {We study the problem of learning controllers for discrete-time non-linear stochastic dynamical systems with formal reach-avoid guarantees. This work presents the first method for providing formal reach-avoid guarantees, which combine and generalize stability and safety guarantees, with a tolerable probability threshold $p\in[0,1]$ over the infinite time horizon. Our method leverages advances in machine learning literature and it represents formal certificates as neural networks. In particular, we learn a certificate in the form of a reach-avoid supermartingale (RASM), a novel notion that we introduce in this work. Our RASMs provide reachability and avoidance guarantees by imposing constraints on what can be viewed as a stochastic extension of level sets of Lyapunov functions for deterministic systems. Our approach solves several important problems -- it can be used to learn a control policy from scratch, to verify a reach-avoid specification for a fixed control policy, or to fine-tune a pre-trained policy if it does not satisfy the reach-avoid specification. We validate our approach on $3$ stochastic non-linear reinforcement learning tasks.},
  author       = {Zikelic, Dorde and Lechner, Mathias and Henzinger, Thomas A and Chatterjee, Krishnendu},
  booktitle    = {arXiv},
  title        = {{Learning control policies for stochastic systems with reach-avoid guarantees}},
  doi          = {10.48550/ARXIV.2210.05308},
  year         = {2022},
}

@unpublished{14601,
  abstract     = {In this work, we address the problem of learning provably stable neural
network policies for stochastic control systems. While recent work has
demonstrated the feasibility of certifying given policies using martingale
theory, the problem of how to learn such policies is little explored. Here, we
study the effectiveness of jointly learning a policy together with a martingale
certificate that proves its stability using a single learning algorithm. We
observe that the joint optimization problem becomes easily stuck in local
minima when starting from a randomly initialized policy. Our results suggest
that some form of pre-training of the policy is required for the joint
optimization to repair and verify the policy successfully.},
  author       = {Zikelic, Dorde and Lechner, Mathias and Chatterjee, Krishnendu and Henzinger, Thomas A},
  booktitle    = {arXiv},
  title        = {{Learning stabilizing policies in stochastic control systems}},
  doi          = {10.48550/arXiv.2205.11991},
  year         = {2022},
}

@misc{13064,
  abstract     = {Genetically informed, deep-phenotyped biobanks are an important research resource and it is imperative that the most powerful, versatile, and efficient analysis approaches are used. Here, we apply our recently developed Bayesian grouped mixture of regressions model (GMRM) in the UK and Estonian Biobanks and obtain the highest genomic prediction accuracy reported to date across 21 heritable traits. When compared to other approaches, GMRM accuracy was greater than annotation prediction models run in the LDAK or LDPred-funct software by 15% (SE 7%) and 14% (SE 2%), respectively, and was 18% (SE 3%) greater than a baseline BayesR model without single-nucleotide polymorphism (SNP) markers grouped into minor allele frequency–linkage disequilibrium (MAF-LD) annotation categories. For height, the prediction accuracy R 2 was 47% in a UK Biobank holdout sample, which was 76% of the estimated h SNP 2 . We then extend our GMRM prediction model to provide mixed-linear model association (MLMA) SNP marker estimates for genome-wide association (GWAS) discovery, which increased the independent loci detected to 16,162 in unrelated UK Biobank individuals, compared to 10,550 from BoltLMM and 10,095 from Regenie, a 62 and 65% increase, respectively. The average χ2 value of the leading markers increased by 15.24 (SE 0.41) for every 1% increase in prediction accuracy gained over a baseline BayesR model across the traits. Thus, we show that modeling genetic associations accounting for MAF and LD differences among SNP markers, and incorporating prior knowledge of genomic function, is important for both genomic prediction and discovery in large-scale individual-level studies.},
  author       = {Orliac, Etienne and Trejo Banos, Daniel and Ojavee, Sven and Läll, Kristi and Mägi, Reedik and Visscher, Peter and Robinson, Matthew Richard},
  publisher    = {Dryad},
  title        = {{Improving genome-wide association discovery and genomic prediction accuracy in biobank data}},
  doi          = {10.5061/DRYAD.GTHT76HMZ},
  year         = {2022},
}

@misc{13066,
  abstract     = {Chromosomal inversions have been shown to play a major role in local adaptation by suppressing recombination between alternative arrangements and maintaining beneficial allele combinations. However, so far, their importance relative to the remaining genome remains largely unknown. Understanding the genetic architecture of adaptation requires better estimates of how loci of different effect sizes contribute to phenotypic variation. Here, we used three Swedish islands where the marine snail Littorina saxatilis has repeatedly evolved into two distinct ecotypes along a habitat transition. We estimated the contribution of inversion polymorphisms to phenotypic divergence while controlling for polygenic effects in the remaining genome using a quantitative genetics framework. We confirmed the importance of inversions but showed that contributions of loci outside inversions are of similar magnitude, with variable proportions dependent on the trait and the population. Some inversions showed consistent effects across all sites, whereas others exhibited site-specific effects, indicating that the genomic basis for replicated phenotypic divergence is only partly shared. The contributions of sexual dimorphism as well as environmental factors to phenotypic variation were significant but minor compared to inversions and polygenic background. Overall, this integrated approach provides insight into the multiple mechanisms contributing to parallel phenotypic divergence.},
  author       = {Koch, Eva and Ravinet, Mark and Westram, Anja M and Jonannesson, Kerstin and Butlin, Roger},
  publisher    = {Dryad},
  title        = {{Data from: Genetic architecture of repeated phenotypic divergence in Littorina saxatilis ecotype evolution}},
  doi          = {10.5061/DRYAD.M905QFV4B},
  year         = {2022},
}

@misc{13076,
  abstract     = {The source code for replicating experiments presented in the paper.

The implementation of the designed priority schedulers can be found in Galois-2.2.1/include/Galois/WorkList/:
StealingMultiQueue.h is the StealingMultiQueue.
MQOptimized/ contains MQ Optimized variants.

We provide images that contain all the dependencies and datasets. Images can be pulled from npostnikova/mq-based-schedulers repository, or downloaded from Zenodo. See readme for more detail.},
  author       = {Postnikova, Anastasiia and Koval, Nikita and Nadiradze, Giorgi and Alistarh, Dan-Adrian},
  publisher    = {Zenodo},
  title        = {{Multi-queues can be state-of-the-art priority schedulers}},
  doi          = {10.5281/ZENODO.5733408},
  year         = {2022},
}

@inproceedings{13239,
  abstract     = {Brains are thought to engage in predictive learning - learning to predict upcoming stimuli - to construct an internal model of their environment. This is especially notable for spatial navigation, as first described by Tolman’s latent learning tasks. However, predictive learning has also been observed in sensory cortex, in settings unrelated to spatial navigation. Apart from normative frameworks such as active inference or efficient coding, what could be the utility of learning to predict the patterns of occurrence of correlated stimuli? Here we show that prediction, and thereby the construction of an internal model of sequential stimuli, can bootstrap the learning process of a working memory task in a recurrent neural network. We implemented predictive learning alongside working memory match-tasks, and networks emerged to solve the prediction task first by encoding information across time to predict upcoming stimuli, and then eavesdropped on this solution to solve the matching task. Eavesdropping was most beneficial when neural resources were limited. Hence, predictive learning acts as a general neural mechanism to learn to store sensory information that can later be essential for working memory tasks.},
  author       = {Van Der Plas, Thijs L. and Vogels, Tim P and Manohar, Sanjay G.},
  booktitle    = {Proceedings of Machine Learning Research},
  issn         = {2640-3498},
  pages        = {518--531},
  publisher    = {ML Research Press},
  title        = {{Predictive learning enables neural networks to learn complex working memory tasks}},
  volume       = {199},
  year         = {2022},
}

@article{13240,
  abstract     = {Ustilago maydis is a biotrophic phytopathogenic fungus that causes corn smut disease. As a well-established model system, U. maydis is genetically fully accessible with large omics datasets available and subject to various biological questions ranging from DNA-repair, RNA-transport, and protein secretion to disease biology. For many genetic approaches, tight control of transgene regulation is important. Here we established an optimised version of the Tetracycline-ON (TetON) system for U. maydis. We demonstrate the Tetracycline concentration-dependent expression of fluorescent protein transgenes and the system’s suitability for the induced expression of the toxic protein BCL2 Associated X-1 (Bax1). The Golden Gate compatible vector system contains a native minimal promoter from the mating factor a-1 encoding gene, mfa with ten copies of the tet-regulated operator (tetO) and a codon optimised Tet-repressor (tetR*) which is translationally fused to the native transcriptional corepressor Mql1 (UMAG_05501). The metabolism-independent transcriptional regulator system is functional both, in liquid culture as well as on solid media in the presence of the inducer and can become a useful tool for toxin-antitoxin studies, identification of antifungal proteins, and to study functions of toxic gene products in Ustilago maydis.},
  author       = {Ingole, Kishor D. and Nagarajan, Nithya and Uhse, Simon and Giannini, Caterina and Djamei, Armin},
  issn         = {2673-6128},
  journal      = {Frontiers in Fungal Biology},
  publisher    = {Frontiers Media},
  title        = {{Tetracycline-controlled (TetON) gene expression system for the smut fungus Ustilago maydis}},
  doi          = {10.3389/ffunb.2022.1029114},
  volume       = {3},
  year         = {2022},
}

@inproceedings{13241,
  abstract     = {Addressing fairness concerns about machine learning models is a crucial step towards their long-term adoption in real-world automated systems. Many approaches for training fair models from data have been developed and an implicit assumption about such algorithms is that they are able to recover a fair model, despite potential historical biases in the data. In this work we show a number of impossibility results that indicate that there is no learning algorithm that can recover a fair model when a proportion of the dataset is subject to arbitrary manipulations. Specifically, we prove that there are situations in which an adversary can force any learner to return a biased classifier, with or without degrading accuracy, and that the strength of this bias increases for learning problems with underrepresented protected groups in the data. Our results emphasize on the importance of studying further data corruption models of various strength and of establishing stricter data collection practices for fairness-aware learning.},
  author       = {Konstantinov, Nikola H and Lampert, Christoph},
  booktitle    = {Proceedings of Machine Learning Research},
  issn         = {2640-3498},
  pages        = {59--83},
  publisher    = {ML Research Press},
  title        = {{On the impossibility of fairness-aware learning from corrupted data}},
  volume       = {171},
  year         = {2022},
}

@inproceedings{14093,
  abstract     = { We propose a stochastic conditional gradient method (CGM) for minimizing convex finite-sum objectives formed as a sum of smooth and non-smooth terms. Existing CGM variants for this template either suffer from slow convergence rates, or require carefully increasing the batch size over the course of the algorithm’s execution, which leads to computing full gradients. In contrast, the proposed method, equipped with a stochastic average gradient (SAG) estimator, requires only one sample per iteration. Nevertheless, it guarantees fast convergence rates on par with more sophisticated variance reduction techniques. In applications we put special emphasis on problems with a large number of separable constraints. Such problems are prevalent among semidefinite programming (SDP) formulations arising in machine learning and theoretical computer science. We provide numerical experiments on matrix completion, unsupervised clustering, and sparsest-cut SDPs. },
  author       = {Dresdner, Gideon and Vladarean, Maria-Luiza and Rätsch, Gunnar and Locatello, Francesco and Cevher, Volkan and Yurtsever, Alp},
  booktitle    = {Proceedings of the 25th International Conference on Artificial Intelligence and Statistics},
  issn         = {2640-3498},
  location     = {Virtual},
  pages        = {8439--8457},
  publisher    = {ML Research Press},
  title        = {{ Faster one-sample stochastic conditional gradient method for composite convex minimization}},
  volume       = {151},
  year         = {2022},
}

@inproceedings{14106,
  abstract     = {We show that deep networks trained to satisfy demographic parity often do so
through a form of race or gender awareness, and that the more we force a network
to be fair, the more accurately we can recover race or gender from the internal state
of the network. Based on this observation, we investigate an alternative fairness
approach: we add a second classification head to the network to explicitly predict
the protected attribute (such as race or gender) alongside the original task. After
training the two-headed network, we enforce demographic parity by merging the
two heads, creating a network with the same architecture as the original network.
We establish a close relationship between existing approaches and our approach
by showing (1) that the decisions of a fair classifier are well-approximated by our
approach, and (2) that an unfair and optimally accurate classifier can be recovered
from a fair classifier and our second head predicting the protected attribute. We use
our explicit formulation to argue that the existing fairness approaches, just as ours,
demonstrate disparate treatment and that they are likely to be unlawful in a wide
range of scenarios under US law.},
  author       = {Lohaus, Michael and Kleindessner, Matthäus and Kenthapadi, Krishnaram and Locatello, Francesco and Russell, Chris},
  booktitle    = {36th Conference on Neural Information Processing Systems},
  isbn         = {9781713871088},
  location     = {New Orleans, LA, United States},
  pages        = {16548--16562},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Are two heads the same as one? Identifying disparate treatment in fair neural networks}},
  volume       = {35},
  year         = {2022},
}

@inproceedings{14107,
  abstract     = {Amodal perception requires inferring the full shape of an object that is partially occluded. This task is particularly challenging on two levels: (1) it requires more information than what is contained in the instant retina or imaging sensor, (2) it is difficult to obtain enough well-annotated amodal labels for supervision. To this end, this paper develops a new framework of
Self-supervised amodal Video object segmentation (SaVos). Our method efficiently leverages the visual information of video temporal sequences to infer the amodal mask of objects. The key intuition is that the occluded part of an object can be explained away if that part is visible in other frames, possibly deformed as long as the deformation can be reasonably learned.
Accordingly, we derive a novel self-supervised learning paradigm that efficiently utilizes the visible object parts as the supervision to guide the training on videos. In addition to learning type prior to complete masks for known types, SaVos also learns the spatiotemporal prior, which is also useful for the amodal task and could generalize to unseen types. The proposed
framework achieves the state-of-the-art performance on the synthetic amodal segmentation benchmark FISHBOWL and the real world benchmark KINS-Video-Car. Further, it lends itself well to being transferred to novel distributions using test-time adaptation, outperforming existing models even after the transfer to a new distribution.},
  author       = {Yao, Jian and Hong, Yuxin and Wang, Chiyu and Xiao, Tianjun and He, Tong and Locatello, Francesco and Wipf, David and Fu, Yanwei and Zhang, Zheng},
  booktitle    = {36th Conference on Neural Information Processing Systems},
  location     = {New Orleans, LA, United States},
  title        = {{Self-supervised amodal video object segmentation}},
  doi          = {10.48550/arXiv.2210.12733},
  year         = {2022},
}

@inproceedings{14114,
  abstract     = {Algorithmic fairness is frequently motivated in terms of a trade-off in which overall performance is decreased so as to improve performance on disadvantaged groups where the algorithm would otherwise be less accurate. Contrary to this, we find that applying existing fairness approaches to computer vision improve fairness by degrading the performance of classifiers across all groups (with increased degradation on the best performing groups). Extending the bias-variance decomposition for classification to fairness, we theoretically explain why the majority of fairness methods designed for low capacity models should not be used in settings involving high-capacity models, a scenario common to computer vision. We corroborate this analysis with extensive experimental support that shows that many of the fairness heuristics used in computer vision also degrade performance on the most disadvantaged groups. Building on these insights, we propose an adaptive augmentation strategy that, uniquely, of all methods tested, improves performance for the disadvantaged groups.},
  author       = {Zietlow, Dominik and Lohaus, Michael and Balakrishnan, Guha and Kleindessner, Matthaus and Locatello, Francesco and Scholkopf, Bernhard and Russell, Chris},
  booktitle    = {2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
  isbn         = {9781665469470},
  issn         = {2575-7075},
  location     = {New Orleans, LA, United States},
  pages        = {10400--10411},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers}},
  doi          = {10.1109/cvpr52688.2022.01016},
  year         = {2022},
}

