@unpublished{14946,
  abstract     = {We present a unified framework for studying the identifiability of
representations learned from simultaneously observed views, such as different
data modalities. We allow a partially observed setting in which each view
constitutes a nonlinear mixture of a subset of underlying latent variables,
which can be causally related. We prove that the information shared across all
subsets of any number of views can be learned up to a smooth bijection using
contrastive learning and a single encoder per view. We also provide graphical
criteria indicating which latent variables can be identified through a simple
set of rules, which we refer to as identifiability algebra. Our general
framework and theoretical results unify and extend several previous works on
multi-view nonlinear ICA, disentanglement, and causal representation learning.
We experimentally validate our claims on numerical, image, and multi-modal data
sets. Further, we demonstrate that the performance of prior methods is
recovered in different special cases of our setup. Overall, we find that access
to multiple partial views enables us to identify a more fine-grained
representation, under the generally milder assumption of partial observability.},
  author       = {Yao, Dingling and Xu, Danru and Lachapelle, Sébastien and Magliacane, Sara and Taslakian, Perouz and Martius, Georg and Kügelgen, Julius von and Locatello, Francesco},
  booktitle    = {arXiv},
  title        = {{Multi-view causal representation learning with partial observability}},
  doi          = {10.48550/arXiv.2311.04056},
  year         = {2023},
}

@inproceedings{14958,
  abstract     = {Causal representation learning (CRL) aims at identifying high-level causal variables from low-level data, e.g. images. Current methods usually assume that all causal variables are captured in the high-dimensional observations. In this work, we focus on learning causal representations from data under partial observability, i.e., when some of the causal variables are not observed in the measurements, and the set of masked variables changes across the different samples. We introduce some initial theoretical results for identifying causal variables under partial observability by exploiting a sparsity regularizer, focusing in particular on the linear and piecewise linear mixing function case. We provide a theorem that allows us to identify the causal variables up to permutation and element-wise linear transformations in the linear case and a lemma that allows us to identify causal variables up to linear transformation in the piecewise case. Finally, we provide a conjecture that would allow us to identify the causal variables up to permutation and element-wise linear transformations also in the piecewise linear case. We test the theorem and conjecture on simulated data, showing the effectiveness of our method.},
  author       = {Xu, Danru and Yao, Dingling and Lachapelle, Sebastien and Taslakian, Perouz and von Kügelgen, Julius and Locatello, Francesco and Magliacane, Sara},
  booktitle    = {Causal Representation Learning Workshop at NeurIPS 2023},
  location     = {New Orleans, LA, United States},
  publisher    = {OpenReview},
  title        = {{A sparsity principle for partially observable causal representation learning}},
  year         = {2023},
}

