@inproceedings{14200,
  abstract     = {The key idea behind the unsupervised learning of disentangled representations
is that real-world data is generated by a few explanatory factors of variation
which can be recovered by unsupervised learning algorithms. In this paper, we
provide a sober look at recent progress in the field and challenge some common
assumptions. We first theoretically show that the unsupervised learning of
disentangled representations is fundamentally impossible without inductive
biases on both the models and the data. Then, we train more than 12000 models
covering most prominent methods and evaluation metrics in a reproducible
large-scale experimental study on seven different data sets. We observe that
while the different methods successfully enforce properties ``encouraged'' by
the corresponding losses, well-disentangled models seemingly cannot be
identified without supervision. Furthermore, increased disentanglement does not
seem to lead to a decreased sample complexity of learning for downstream tasks.
Our results suggest that future work on disentanglement learning should be
explicit about the role of inductive biases and (implicit) supervision,
investigate concrete benefits of enforcing disentanglement of the learned
representations, and consider a reproducible experimental setup covering
several data sets.},
  author       = {Locatello, Francesco and Bauer, Stefan and Lucic, Mario and Rätsch, Gunnar and Gelly, Sylvain and Schölkopf, Bernhard and Bachem, Olivier},
  booktitle    = {Proceedings of the 36th International Conference on Machine Learning},
  location     = {Long Beach, CA, United States},
  pages        = {4114--4124},
  publisher    = {ML Research Press},
  title        = {{Challenging common assumptions in the unsupervised learning of disentangled representations}},
  volume       = {97},
  year         = {2019},
}

@unpublished{14327,
  abstract     = {A common assumption in causal modeling posits that the data is generated by a
set of independent mechanisms, and algorithms should aim to recover this
structure. Standard unsupervised learning, however, is often concerned with
training a single model to capture the overall distribution or aspects thereof.
Inspired by clustering approaches, we consider mixtures of implicit generative
models that ``disentangle'' the independent generative mechanisms underlying
the data. Relying on an additional set of discriminators, we propose a
competitive training procedure in which the models only need to capture the
portion of the data distribution from which they can produce realistic samples.
As a by-product, each model is simpler and faster to train. We empirically show
that our approach splits the training distribution in a sensible way and
increases the quality of the generated samples.},
  author       = {Locatello, Francesco and Vincent, Damien and Tolstikhin, Ilya and Rätsch, Gunnar and Gelly, Sylvain and Schölkopf, Bernhard},
  booktitle    = {arXiv},
  title        = {{Competitive training of mixtures of independent deep generative models}},
  doi          = {10.48550/arXiv.1804.11130},
  year         = {2018},
}

@inproceedings{14198,
  abstract     = {High-dimensional time series are common in many domains. Since human
cognition is not optimized to work well in high-dimensional spaces, these areas
could benefit from interpretable low-dimensional representations. However, most
representation learning algorithms for time series data are difficult to
interpret. This is due to non-intuitive mappings from data features to salient
properties of the representation and non-smoothness over time. To address this
problem, we propose a new representation learning framework building on ideas
from interpretable discrete dimensionality reduction and deep generative
modeling. This framework allows us to learn discrete representations of time
series, which give rise to smooth and interpretable embeddings with superior
clustering performance. We introduce a new way to overcome the
non-differentiability in discrete representation learning and present a
gradient-based version of the traditional self-organizing map algorithm that is
more performant than the original. Furthermore, to allow for a probabilistic
interpretation of our method, we integrate a Markov model in the representation
space. This model uncovers the temporal transition structure, improves
clustering performance even further and provides additional explanatory
insights as well as a natural representation of uncertainty. We evaluate our
model in terms of clustering performance and interpretability on static
(Fashion-)MNIST data, a time series of linearly interpolated (Fashion-)MNIST
images, a chaotic Lorenz attractor system with two macro states, as well as on
a challenging real world medical time series application on the eICU data set.
Our learned representations compare favorably with competitor methods and
facilitate downstream tasks on the real world data.},
  author       = {Fortuin, Vincent and Hüser, Matthias and Locatello, Francesco and Strathmann, Heiko and Rätsch, Gunnar},
  booktitle    = {International Conference on Learning Representations},
  location     = {New Orleans, LA, United States},
  title        = {{SOM-VAE: Interpretable discrete representation learning on time series}},
  year         = {2018},
}

@inproceedings{14201,
  abstract     = {Variational inference is a popular technique to approximate a possibly
intractable Bayesian posterior with a more tractable one. Recently, boosting
variational inference has been proposed as a new paradigm to approximate the
posterior by a mixture of densities by greedily adding components to the
mixture. However, as is the case with many other variational inference
algorithms, its theoretical properties have not been studied. In the present
work, we study the convergence properties of this approach from a modern
optimization viewpoint by establishing connections to the classic Frank-Wolfe
algorithm. Our analyses yields novel theoretical insights regarding the
sufficient conditions for convergence, explicit rates, and algorithmic
simplifications. Since a lot of focus in previous works for variational
inference has been on tractability, our work is especially important as a much
needed attempt to bridge the gap between probabilistic models and their
corresponding theoretical properties.},
  author       = {Locatello, Francesco and Khanna, Rajiv and Ghosh, Joydeep and Rätsch, Gunnar},
  booktitle    = {Proceedings of the 21st International Conference on Artificial Intelligence and Statistics},
  location     = {Playa Blanca, Lanzarote},
  pages        = {464--472},
  publisher    = {ML Research Press},
  title        = {{Boosting variational inference: An optimization perspective}},
  volume       = {84},
  year         = {2018},
}

@inproceedings{14202,
  abstract     = {Approximating a probability density in a tractable manner is a central task
in Bayesian statistics. Variational Inference (VI) is a popular technique that
achieves tractability by choosing a relatively simple variational family.
Borrowing ideas from the classic boosting framework, recent approaches attempt
to \emph{boost} VI by replacing the selection of a single density with a
greedily constructed mixture of densities. In order to guarantee convergence,
previous works impose stringent assumptions that require significant effort for
practitioners. Specifically, they require a custom implementation of the greedy
step (called the LMO) for every probabilistic model with respect to an
unnatural variational family of truncated distributions. Our work fixes these
issues with novel theoretical and algorithmic insights. On the theoretical
side, we show that boosting VI satisfies a relaxed smoothness assumption which
is sufficient for the convergence of the functional Frank-Wolfe (FW) algorithm.
Furthermore, we rephrase the LMO problem and propose to maximize the Residual
ELBO (RELBO) which replaces the standard ELBO optimization in VI. These
theoretical enhancements allow for black box implementation of the boosting
subroutine. Finally, we present a stopping criterion drawn from the duality gap
in the classic FW analyses and exhaustive experiments to illustrate the
usefulness of our theoretical and algorithmic contributions.},
  author       = {Locatello, Francesco and Dresdner, Gideon and Khanna, Rajiv and Valera, Isabel and Rätsch, Gunnar},
  booktitle    = {Advances in Neural Information Processing Systems},
  isbn         = {9781510884472},
  issn         = {1049-5258},
  location     = {Montreal, Canada},
  publisher    = {Neural Information Processing Systems Foundation},
  title        = {{Boosting black box variational inference}},
  volume       = {31},
  year         = {2018},
}

@inproceedings{14203,
  abstract     = {We propose a conditional gradient framework for a composite convex minimization template with broad applications. Our approach combines smoothing and homotopy techniques under the CGM framework, and provably achieves the optimal O(1/k−−√) convergence rate. We demonstrate that the same rate holds if the linear subproblems are solved approximately with additive or multiplicative error. In contrast with the relevant work, we are able to characterize the convergence when the non-smooth term is an indicator function. Specific applications of our framework include the non-smooth minimization, semidefinite programming, and minimization with linear inclusion constraints over a compact domain. Numerical evidence demonstrates the benefits of our framework.},
  author       = {Yurtsever, Alp and Fercoq, Olivier and Locatello, Francesco and Cevher, Volkan},
  booktitle    = {Proceedings of the 35th International Conference on Machine Learning},
  location     = {Stockholm, Sweden},
  pages        = {5727--5736},
  publisher    = {ML Research Press},
  title        = {{A conditional gradient framework for composite convex minimization with applications to semidefinite programming}},
  volume       = {80},
  year         = {2018},
}

@inproceedings{14204,
  abstract     = {Two popular examples of first-order optimization methods over linear spaces are coordinate descent and matching pursuit algorithms, with their randomized variants. While the former targets the optimization by moving along coordinates, the latter considers a generalized notion of directions. Exploiting the connection between the two algorithms, we present a unified analysis of both, providing affine invariant sublinear O(1/t) rates on smooth objectives and linear convergence on strongly convex objectives. As a byproduct of our affine invariant analysis of matching pursuit, our rates for steepest coordinate descent are the tightest known. Furthermore, we show the first accelerated convergence rate O(1/t2) for matching pursuit and steepest coordinate descent on convex objectives.},
  author       = {Locatello, Francesco and Raj, Anant and Karimireddy, Sai Praneeth and Rätsch, Gunnar and Schölkopf, Bernhard and Stich, Sebastian U. and Jaggi, Martin},
  booktitle    = {Proceedings of the 35th International Conference on Machine Learning},
  pages        = {3198--3207},
  publisher    = {ML Research Press},
  title        = {{On matching pursuit and coordinate descent}},
  volume       = {80},
  year         = {2018},
}

@inproceedings{14224,
  abstract     = {Clustering is a cornerstone of unsupervised learning which can be thought as disentangling multiple generative mechanisms underlying the data. In this paper we introduce an algorithmic framework to train mixtures of implicit generative models which we particularize for variational autoencoders. Relying on an additional set of discriminators, we propose a competitive procedure in which the models only need to approximate the portion of the data distribution from which they can produce realistic samples. As a byproduct, each model is simpler to train, and a clustering interpretation arises naturally from the partitioning of the training points among the models. We empirically show that our approach splits the training distribution in a reasonable way and increases the quality of the generated samples.},
  author       = {Locatello, Francesco and Vincent, Damien and Tolstikhin, Ilya and Ratsch, Gunnar and Gelly, Sylvain and Scholkopf, Bernhard},
  booktitle    = {6th International Conference on Learning Representations},
  location     = {Vancouver, Canada},
  title        = {{Clustering meets implicit generative models}},
  year         = {2018},
}

@inproceedings{14205,
  abstract     = {Two of the most fundamental prototypes of greedy optimization are the matching pursuit and Frank-Wolfe algorithms. In this paper, we take a unified view on both classes of methods, leading to the first explicit convergence rates of matching pursuit methods in an optimization sense, for general sets of atoms. We derive sublinear (1/t) convergence for both classes on general smooth objectives, and linear convergence on strongly convex objectives, as well as a clear correspondence of algorithm variants. Our presented algorithms and rates are affine invariant, and do not need any incoherence or sparsity assumptions.},
  author       = {Locatello, Francesco and Khanna, Rajiv and Tschannen, Michael and Jaggi, Martin},
  booktitle    = {Proceedings of the 20th International Conference on Artificial Intelligence and Statistics},
  location     = {Fort Lauderdale, FL, United States},
  pages        = {860--868},
  publisher    = {ML Research Press},
  title        = {{A unified optimization view on generalized matching pursuit and Frank-Wolfe}},
  volume       = {54},
  year         = {2017},
}

@inproceedings{14206,
  abstract     = {Greedy optimization methods such as Matching Pursuit (MP) and Frank-Wolfe (FW) algorithms regained popularity in recent years due to their simplicity, effectiveness and theoretical guarantees. MP and FW address optimization over the linear span and the convex hull of a set of atoms, respectively. In this paper, we consider the intermediate case of optimization over the convex cone, parametrized as the conic hull of a generic atom set, leading to the first principled definitions of non-negative MP algorithms for which we give explicit convergence rates and demonstrate excellent empirical performance. In particular, we derive sublinear (O(1/t)) convergence on general smooth and convex objectives, and linear convergence (O(e−t)) on strongly convex objectives, in both cases for general sets of atoms. Furthermore, we establish a clear correspondence of our algorithms to known algorithms from the MP and FW literature. Our novel algorithms and analyses target general atom sets and general objective functions, and hence are directly applicable to a large variety of learning settings.},
  author       = {Locatello, Francesco and Tschannen, Michael and Rätsch, Gunnar and Jaggi, Martin},
  booktitle    = {Advances in Neural Information Processing Systems},
  isbn         = {9781510860964},
  location     = {Long Beach, CA, United States},
  title        = {{Greedy algorithms for cone constrained optimization with convergence guarantees}},
  year         = {2017},
}

