[{"day":"09","type":"conference","intvolume":"        97","status":"public","publication":"Proceedings of the 36th International Conference on Machine Learning","page":"4114-4124","month":"06","date_published":"2019-06-09T00:00:00Z","publisher":"ML Research Press","scopus_import":"1","language":[{"iso":"eng"}],"department":[{"_id":"FrLo"}],"conference":{"start_date":"2019-06-10","name":"International Conference on Machine Learning","end_date":"2019-06-15","location":"Long Beach, CA, United States"},"date_created":"2023-08-22T14:13:08Z","publication_status":"published","citation":{"chicago":"Locatello, Francesco, Stefan Bauer, Mario Lucic, Gunnar Rätsch, Sylvain Gelly, Bernhard Schölkopf, and Olivier Bachem. “Challenging Common Assumptions in the Unsupervised Learning of Disentangled Representations.” In <i>Proceedings of the 36th International Conference on Machine Learning</i>, 97:4114–24. ML Research Press, 2019.","ieee":"F. Locatello <i>et al.</i>, “Challenging common assumptions in the unsupervised learning of disentangled representations,” in <i>Proceedings of the 36th International Conference on Machine Learning</i>, Long Beach, CA, United States, 2019, vol. 97, pp. 4114–4124.","apa":"Locatello, F., Bauer, S., Lucic, M., Rätsch, G., Gelly, S., Schölkopf, B., &#38; Bachem, O. (2019). Challenging common assumptions in the unsupervised learning of disentangled representations. In <i>Proceedings of the 36th International Conference on Machine Learning</i> (Vol. 97, pp. 4114–4124). Long Beach, CA, United States: ML Research Press.","ista":"Locatello F, Bauer S, Lucic M, Rätsch G, Gelly S, Schölkopf B, Bachem O. 2019. Challenging common assumptions in the unsupervised learning of disentangled representations. Proceedings of the 36th International Conference on Machine Learning. International Conference on Machine Learning vol. 97, 4114–4124.","short":"F. Locatello, S. Bauer, M. Lucic, G. Rätsch, S. Gelly, B. Schölkopf, O. Bachem, in:, Proceedings of the 36th International Conference on Machine Learning, ML Research Press, 2019, pp. 4114–4124.","ama":"Locatello F, Bauer S, Lucic M, et al. Challenging common assumptions in the unsupervised learning of disentangled representations. In: <i>Proceedings of the 36th International Conference on Machine Learning</i>. Vol 97. ML Research Press; 2019:4114-4124.","mla":"Locatello, Francesco, et al. “Challenging Common Assumptions in the Unsupervised Learning of Disentangled Representations.” <i>Proceedings of the 36th International Conference on Machine Learning</i>, vol. 97, ML Research Press, 2019, pp. 4114–24."},"abstract":[{"lang":"eng","text":"The key idea behind the unsupervised learning of disentangled representations\r\nis that real-world data is generated by a few explanatory factors of variation\r\nwhich can be recovered by unsupervised learning algorithms. In this paper, we\r\nprovide a sober look at recent progress in the field and challenge some common\r\nassumptions. We first theoretically show that the unsupervised learning of\r\ndisentangled representations is fundamentally impossible without inductive\r\nbiases on both the models and the data. Then, we train more than 12000 models\r\ncovering most prominent methods and evaluation metrics in a reproducible\r\nlarge-scale experimental study on seven different data sets. We observe that\r\nwhile the different methods successfully enforce properties ``encouraged'' by\r\nthe corresponding losses, well-disentangled models seemingly cannot be\r\nidentified without supervision. Furthermore, increased disentanglement does not\r\nseem to lead to a decreased sample complexity of learning for downstream tasks.\r\nOur results suggest that future work on disentanglement learning should be\r\nexplicit about the role of inductive biases and (implicit) supervision,\r\ninvestigate concrete benefits of enforcing disentanglement of the learned\r\nrepresentations, and consider a reproducible experimental setup covering\r\nseveral data sets."}],"author":[{"first_name":"Francesco","full_name":"Locatello, Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683","id":"26cfd52f-2483-11ee-8040-88983bcc06d4"},{"first_name":"Stefan","last_name":"Bauer","full_name":"Bauer, Stefan"},{"last_name":"Lucic","full_name":"Lucic, Mario","first_name":"Mario"},{"full_name":"Rätsch, Gunnar","last_name":"Rätsch","first_name":"Gunnar"},{"first_name":"Sylvain","full_name":"Gelly, Sylvain","last_name":"Gelly"},{"first_name":"Bernhard","last_name":"Schölkopf","full_name":"Schölkopf, Bernhard"},{"last_name":"Bachem","full_name":"Bachem, Olivier","first_name":"Olivier"}],"arxiv":1,"volume":97,"date_updated":"2023-09-13T07:45:30Z","oa":1,"article_processing_charge":"No","_id":"14200","extern":"1","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","quality_controlled":"1","year":"2019","external_id":{"arxiv":["1811.12359"]},"title":"Challenging common assumptions in the unsupervised learning of disentangled representations","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/1811.12359"}]},{"department":[{"_id":"FrLo"}],"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.1804.11130","open_access":"1"}],"article_number":"1804.11130","date_created":"2023-09-13T12:20:49Z","date_published":"2018-04-30T00:00:00Z","year":"2018","doi":"10.48550/arXiv.1804.11130","month":"04","language":[{"iso":"eng"}],"external_id":{"arxiv":["1804.11130"]},"title":"Competitive training of mixtures of independent deep generative models","date_updated":"2023-09-13T12:23:03Z","oa":1,"article_processing_charge":"No","arxiv":1,"publication":"arXiv","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","_id":"14327","extern":"1","type":"preprint","publication_status":"submitted","citation":{"ama":"Locatello F, Vincent D, Tolstikhin I, Rätsch G, Gelly S, Schölkopf B. Competitive training of mixtures of independent deep generative models. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.1804.11130\">10.48550/arXiv.1804.11130</a>","mla":"Locatello, Francesco, et al. “Competitive Training of Mixtures of Independent Deep Generative Models.” <i>ArXiv</i>, 1804.11130, doi:<a href=\"https://doi.org/10.48550/arXiv.1804.11130\">10.48550/arXiv.1804.11130</a>.","short":"F. Locatello, D. Vincent, I. Tolstikhin, G. Rätsch, S. Gelly, B. Schölkopf, ArXiv (n.d.).","ista":"Locatello F, Vincent D, Tolstikhin I, Rätsch G, Gelly S, Schölkopf B. Competitive training of mixtures of independent deep generative models. arXiv, 1804.11130.","ieee":"F. Locatello, D. Vincent, I. Tolstikhin, G. Rätsch, S. Gelly, and B. Schölkopf, “Competitive training of mixtures of independent deep generative models,” <i>arXiv</i>. .","apa":"Locatello, F., Vincent, D., Tolstikhin, I., Rätsch, G., Gelly, S., &#38; Schölkopf, B. (n.d.). Competitive training of mixtures of independent deep generative models. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.1804.11130\">https://doi.org/10.48550/arXiv.1804.11130</a>","chicago":"Locatello, Francesco, Damien Vincent, Ilya Tolstikhin, Gunnar Rätsch, Sylvain Gelly, and Bernhard Schölkopf. “Competitive Training of Mixtures of Independent Deep Generative Models.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.1804.11130\">https://doi.org/10.48550/arXiv.1804.11130</a>."},"day":"30","status":"public","author":[{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","full_name":"Locatello, Francesco","last_name":"Locatello","first_name":"Francesco"},{"last_name":"Vincent","full_name":"Vincent, Damien","first_name":"Damien"},{"first_name":"Ilya","full_name":"Tolstikhin, Ilya","last_name":"Tolstikhin"},{"first_name":"Gunnar","full_name":"Rätsch, Gunnar","last_name":"Rätsch"},{"full_name":"Gelly, Sylvain","last_name":"Gelly","first_name":"Sylvain"},{"full_name":"Schölkopf, Bernhard","last_name":"Schölkopf","first_name":"Bernhard"}],"abstract":[{"lang":"eng","text":"A common assumption in causal modeling posits that the data is generated by a\r\nset of independent mechanisms, and algorithms should aim to recover this\r\nstructure. Standard unsupervised learning, however, is often concerned with\r\ntraining a single model to capture the overall distribution or aspects thereof.\r\nInspired by clustering approaches, we consider mixtures of implicit generative\r\nmodels that ``disentangle'' the independent generative mechanisms underlying\r\nthe data. Relying on an additional set of discriminators, we propose a\r\ncompetitive training procedure in which the models only need to capture the\r\nportion of the data distribution from which they can produce realistic samples.\r\nAs a by-product, each model is simpler and faster to train. We empirically show\r\nthat our approach splits the training distribution in a sensible way and\r\nincreases the quality of the generated samples."}]},{"author":[{"full_name":"Fortuin, Vincent","last_name":"Fortuin","first_name":"Vincent"},{"first_name":"Matthias","full_name":"Hüser, Matthias","last_name":"Hüser"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","orcid":"0000-0002-4850-0683","last_name":"Locatello","full_name":"Locatello, Francesco"},{"first_name":"Heiko","last_name":"Strathmann","full_name":"Strathmann, Heiko"},{"full_name":"Rätsch, Gunnar","last_name":"Rätsch","first_name":"Gunnar"}],"status":"public","abstract":[{"text":"High-dimensional time series are common in many domains. Since human\r\ncognition is not optimized to work well in high-dimensional spaces, these areas\r\ncould benefit from interpretable low-dimensional representations. However, most\r\nrepresentation learning algorithms for time series data are difficult to\r\ninterpret. This is due to non-intuitive mappings from data features to salient\r\nproperties of the representation and non-smoothness over time. To address this\r\nproblem, we propose a new representation learning framework building on ideas\r\nfrom interpretable discrete dimensionality reduction and deep generative\r\nmodeling. This framework allows us to learn discrete representations of time\r\nseries, which give rise to smooth and interpretable embeddings with superior\r\nclustering performance. We introduce a new way to overcome the\r\nnon-differentiability in discrete representation learning and present a\r\ngradient-based version of the traditional self-organizing map algorithm that is\r\nmore performant than the original. Furthermore, to allow for a probabilistic\r\ninterpretation of our method, we integrate a Markov model in the representation\r\nspace. This model uncovers the temporal transition structure, improves\r\nclustering performance even further and provides additional explanatory\r\ninsights as well as a natural representation of uncertainty. We evaluate our\r\nmodel in terms of clustering performance and interpretability on static\r\n(Fashion-)MNIST data, a time series of linearly interpolated (Fashion-)MNIST\r\nimages, a chaotic Lorenz attractor system with two macro states, as well as on\r\na challenging real world medical time series application on the eICU data set.\r\nOur learned representations compare favorably with competitor methods and\r\nfacilitate downstream tasks on the real world data.","lang":"eng"}],"type":"conference","citation":{"apa":"Fortuin, V., Hüser, M., Locatello, F., Strathmann, H., &#38; Rätsch, G. (2018). SOM-VAE: Interpretable discrete representation learning on time series. In <i>International Conference on Learning Representations</i>. New Orleans, LA, United States.","ieee":"V. Fortuin, M. Hüser, F. Locatello, H. Strathmann, and G. Rätsch, “SOM-VAE: Interpretable discrete representation learning on time series,” in <i>International Conference on Learning Representations</i>, New Orleans, LA, United States, 2018.","chicago":"Fortuin, Vincent, Matthias Hüser, Francesco Locatello, Heiko Strathmann, and Gunnar Rätsch. “SOM-VAE: Interpretable Discrete Representation Learning on Time Series.” In <i>International Conference on Learning Representations</i>, 2018.","mla":"Fortuin, Vincent, et al. “SOM-VAE: Interpretable Discrete Representation Learning on Time Series.” <i>International Conference on Learning Representations</i>, 2018.","ama":"Fortuin V, Hüser M, Locatello F, Strathmann H, Rätsch G. SOM-VAE: Interpretable discrete representation learning on time series. In: <i>International Conference on Learning Representations</i>. ; 2018.","short":"V. Fortuin, M. Hüser, F. Locatello, H. Strathmann, G. Rätsch, in:, International Conference on Learning Representations, 2018.","ista":"Fortuin V, Hüser M, Locatello F, Strathmann H, Rätsch G. 2018. SOM-VAE: Interpretable discrete representation learning on time series. International Conference on Learning Representations. ICLR: International Conference on Learning Representations."},"day":"06","publication_status":"published","quality_controlled":"1","oa_version":"Preprint","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","extern":"1","_id":"14198","article_processing_charge":"No","date_updated":"2023-09-13T06:35:12Z","oa":1,"publication":"International Conference on Learning Representations","arxiv":1,"language":[{"iso":"eng"}],"title":"SOM-VAE: Interpretable discrete representation learning on time series","external_id":{"arxiv":["1806.02199"]},"date_published":"2018-06-06T00:00:00Z","month":"06","year":"2018","date_created":"2023-08-22T14:12:48Z","conference":{"start_date":"2019-05-06","end_date":"2019-05-09","name":"ICLR: International Conference on Learning Representations","location":"New Orleans, LA, United States"},"main_file_link":[{"url":"https://arxiv.org/abs/1806.02199","open_access":"1"}],"department":[{"_id":"FrLo"}]},{"page":"464-472","publication":"Proceedings of the 21st International Conference on Artificial Intelligence and Statistics","status":"public","intvolume":"        84","type":"conference","day":"15","date_created":"2023-08-22T14:15:20Z","conference":{"end_date":"2018-04-11","location":"Playa Blanca, Lanzarote","name":"AISTATS: Conference on Artificial Intelligence and Statistics","start_date":"2018-04-09"},"department":[{"_id":"FrLo"}],"language":[{"iso":"eng"}],"publisher":"ML Research Press","scopus_import":"1","date_published":"2018-04-15T00:00:00Z","month":"04","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","quality_controlled":"1","_id":"14201","extern":"1","volume":84,"oa":1,"date_updated":"2023-09-13T07:52:40Z","article_processing_charge":"No","arxiv":1,"author":[{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","last_name":"Locatello","full_name":"Locatello, Francesco","orcid":"0000-0002-4850-0683","first_name":"Francesco"},{"full_name":"Khanna, Rajiv","last_name":"Khanna","first_name":"Rajiv"},{"last_name":"Ghosh","full_name":"Ghosh, Joydeep","first_name":"Joydeep"},{"last_name":"Rätsch","full_name":"Rätsch, Gunnar","first_name":"Gunnar"}],"abstract":[{"text":"Variational inference is a popular technique to approximate a possibly\r\nintractable Bayesian posterior with a more tractable one. Recently, boosting\r\nvariational inference has been proposed as a new paradigm to approximate the\r\nposterior by a mixture of densities by greedily adding components to the\r\nmixture. However, as is the case with many other variational inference\r\nalgorithms, its theoretical properties have not been studied. In the present\r\nwork, we study the convergence properties of this approach from a modern\r\noptimization viewpoint by establishing connections to the classic Frank-Wolfe\r\nalgorithm. Our analyses yields novel theoretical insights regarding the\r\nsufficient conditions for convergence, explicit rates, and algorithmic\r\nsimplifications. Since a lot of focus in previous works for variational\r\ninference has been on tractability, our work is especially important as a much\r\nneeded attempt to bridge the gap between probabilistic models and their\r\ncorresponding theoretical properties.","lang":"eng"}],"publication_status":"published","citation":{"chicago":"Locatello, Francesco, Rajiv Khanna, Joydeep Ghosh, and Gunnar Rätsch. “Boosting Variational Inference: An Optimization Perspective.” In <i>Proceedings of the 21st International Conference on Artificial Intelligence and Statistics</i>, 84:464–72. ML Research Press, 2018.","apa":"Locatello, F., Khanna, R., Ghosh, J., &#38; Rätsch, G. (2018). Boosting variational inference: An optimization perspective. In <i>Proceedings of the 21st International Conference on Artificial Intelligence and Statistics</i> (Vol. 84, pp. 464–472). Playa Blanca, Lanzarote: ML Research Press.","ieee":"F. Locatello, R. Khanna, J. Ghosh, and G. Rätsch, “Boosting variational inference: An optimization perspective,” in <i>Proceedings of the 21st International Conference on Artificial Intelligence and Statistics</i>, Playa Blanca, Lanzarote, 2018, vol. 84, pp. 464–472.","ista":"Locatello F, Khanna R, Ghosh J, Rätsch G. 2018. Boosting variational inference: An optimization perspective. Proceedings of the 21st International Conference on Artificial Intelligence and Statistics. AISTATS: Conference on Artificial Intelligence and Statistics, PMLR, vol. 84, 464–472.","short":"F. Locatello, R. Khanna, J. Ghosh, G. Rätsch, in:, Proceedings of the 21st International Conference on Artificial Intelligence and Statistics, ML Research Press, 2018, pp. 464–472.","mla":"Locatello, Francesco, et al. “Boosting Variational Inference: An Optimization Perspective.” <i>Proceedings of the 21st International Conference on Artificial Intelligence and Statistics</i>, vol. 84, ML Research Press, 2018, pp. 464–72.","ama":"Locatello F, Khanna R, Ghosh J, Rätsch G. Boosting variational inference: An optimization perspective. In: <i>Proceedings of the 21st International Conference on Artificial Intelligence and Statistics</i>. Vol 84. ML Research Press; 2018:464-472."},"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/1708.01733"}],"alternative_title":["PMLR"],"title":"Boosting variational inference: An optimization perspective","external_id":{"arxiv":["1708.01733"]},"year":"2018"},{"department":[{"_id":"FrLo"}],"date_created":"2023-08-22T14:15:40Z","conference":{"location":"Montreal, Canada","name":"NeurIPS: Neural Information Processing Systems","end_date":"2018-12-08","start_date":"2018-12-03"},"date_published":"2018-06-06T00:00:00Z","month":"06","language":[{"iso":"eng"}],"publisher":"Neural Information Processing Systems Foundation","scopus_import":"1","publication":"Advances in Neural Information Processing Systems","type":"conference","day":"06","status":"public","intvolume":"        31","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/1806.02185"}],"year":"2018","external_id":{"arxiv":["1806.02185"]},"title":"Boosting black box variational inference","oa":1,"volume":31,"date_updated":"2023-09-13T07:38:24Z","article_processing_charge":"No","arxiv":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","quality_controlled":"1","_id":"14202","publication_identifier":{"isbn":["9781510884472"],"eissn":["1049-5258"]},"extern":"1","publication_status":"published","citation":{"chicago":"Locatello, Francesco, Gideon Dresdner, Rajiv Khanna, Isabel Valera, and Gunnar Rätsch. “Boosting Black Box Variational Inference.” In <i>Advances in Neural Information Processing Systems</i>, Vol. 31. Neural Information Processing Systems Foundation, 2018.","ieee":"F. Locatello, G. Dresdner, R. Khanna, I. Valera, and G. Rätsch, “Boosting black box variational inference,” in <i>Advances in Neural Information Processing Systems</i>, Montreal, Canada, 2018, vol. 31.","apa":"Locatello, F., Dresdner, G., Khanna, R., Valera, I., &#38; Rätsch, G. (2018). Boosting black box variational inference. In <i>Advances in Neural Information Processing Systems</i> (Vol. 31). Montreal, Canada: Neural Information Processing Systems Foundation.","ista":"Locatello F, Dresdner G, Khanna R, Valera I, Rätsch G. 2018. Boosting black box variational inference. Advances in Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems vol. 31.","short":"F. Locatello, G. Dresdner, R. Khanna, I. Valera, G. Rätsch, in:, Advances in Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2018.","mla":"Locatello, Francesco, et al. “Boosting Black Box Variational Inference.” <i>Advances in Neural Information Processing Systems</i>, vol. 31, Neural Information Processing Systems Foundation, 2018.","ama":"Locatello F, Dresdner G, Khanna R, Valera I, Rätsch G. Boosting black box variational inference. In: <i>Advances in Neural Information Processing Systems</i>. Vol 31. Neural Information Processing Systems Foundation; 2018."},"author":[{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","orcid":"0000-0002-4850-0683","last_name":"Locatello","full_name":"Locatello, Francesco"},{"full_name":"Dresdner, Gideon","last_name":"Dresdner","first_name":"Gideon"},{"last_name":"Khanna","full_name":"Khanna, Rajiv","first_name":"Rajiv"},{"full_name":"Valera, Isabel","last_name":"Valera","first_name":"Isabel"},{"first_name":"Gunnar","last_name":"Rätsch","full_name":"Rätsch, Gunnar"}],"abstract":[{"lang":"eng","text":"Approximating a probability density in a tractable manner is a central task\r\nin Bayesian statistics. Variational Inference (VI) is a popular technique that\r\nachieves tractability by choosing a relatively simple variational family.\r\nBorrowing ideas from the classic boosting framework, recent approaches attempt\r\nto \\emph{boost} VI by replacing the selection of a single density with a\r\ngreedily constructed mixture of densities. In order to guarantee convergence,\r\nprevious works impose stringent assumptions that require significant effort for\r\npractitioners. Specifically, they require a custom implementation of the greedy\r\nstep (called the LMO) for every probabilistic model with respect to an\r\nunnatural variational family of truncated distributions. Our work fixes these\r\nissues with novel theoretical and algorithmic insights. On the theoretical\r\nside, we show that boosting VI satisfies a relaxed smoothness assumption which\r\nis sufficient for the convergence of the functional Frank-Wolfe (FW) algorithm.\r\nFurthermore, we rephrase the LMO problem and propose to maximize the Residual\r\nELBO (RELBO) which replaces the standard ELBO optimization in VI. These\r\ntheoretical enhancements allow for black box implementation of the boosting\r\nsubroutine. Finally, we present a stopping criterion drawn from the duality gap\r\nin the classic FW analyses and exhaustive experiments to illustrate the\r\nusefulness of our theoretical and algorithmic contributions."}]},{"publisher":"ML Research Press","language":[{"iso":"eng"}],"month":"07","date_published":"2018-07-15T00:00:00Z","conference":{"name":"ICML: International Conference on Machine Learning","end_date":"2018-07-15","location":"Stockholm, Sweden","start_date":"2018-07-10"},"date_created":"2023-08-22T14:16:01Z","department":[{"_id":"FrLo"}],"intvolume":"        80","status":"public","day":"15","type":"conference","publication":"Proceedings of the 35th International Conference on Machine Learning","page":"5727-5736","title":"A conditional gradient framework for composite convex minimization with applications to semidefinite programming","external_id":{"arxiv":["1804.08544"]},"year":"2018","alternative_title":["PMLR"],"main_file_link":[{"url":"https://arxiv.org/abs/1804.08544","open_access":"1"}],"abstract":[{"lang":"eng","text":"We propose a conditional gradient framework for a composite convex minimization template with broad applications. Our approach combines smoothing and homotopy techniques under the CGM framework, and provably achieves the optimal O(1/k−−√) convergence rate. We demonstrate that the same rate holds if the linear subproblems are solved approximately with additive or multiplicative error. In contrast with the relevant work, we are able to characterize the convergence when the non-smooth term is an indicator function. Specific applications of our framework include the non-smooth minimization, semidefinite programming, and minimization with linear inclusion constraints over a compact domain. Numerical evidence demonstrates the benefits of our framework."}],"author":[{"full_name":"Yurtsever, Alp","last_name":"Yurtsever","first_name":"Alp"},{"first_name":"Olivier","full_name":"Fercoq, Olivier","last_name":"Fercoq"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","orcid":"0000-0002-4850-0683","full_name":"Locatello, Francesco","last_name":"Locatello"},{"full_name":"Cevher, Volkan","last_name":"Cevher","first_name":"Volkan"}],"citation":{"apa":"Yurtsever, A., Fercoq, O., Locatello, F., &#38; Cevher, V. (2018). A conditional gradient framework for composite convex minimization with applications to semidefinite programming. In <i>Proceedings of the 35th International Conference on Machine Learning</i> (Vol. 80, pp. 5727–5736). Stockholm, Sweden: ML Research Press.","ieee":"A. Yurtsever, O. Fercoq, F. Locatello, and V. Cevher, “A conditional gradient framework for composite convex minimization with applications to semidefinite programming,” in <i>Proceedings of the 35th International Conference on Machine Learning</i>, Stockholm, Sweden, 2018, vol. 80, pp. 5727–5736.","chicago":"Yurtsever, Alp, Olivier Fercoq, Francesco Locatello, and Volkan Cevher. “A Conditional Gradient Framework for Composite Convex Minimization with Applications to Semidefinite Programming.” In <i>Proceedings of the 35th International Conference on Machine Learning</i>, 80:5727–36. ML Research Press, 2018.","mla":"Yurtsever, Alp, et al. “A Conditional Gradient Framework for Composite Convex Minimization with Applications to Semidefinite Programming.” <i>Proceedings of the 35th International Conference on Machine Learning</i>, vol. 80, ML Research Press, 2018, pp. 5727–36.","ama":"Yurtsever A, Fercoq O, Locatello F, Cevher V. A conditional gradient framework for composite convex minimization with applications to semidefinite programming. In: <i>Proceedings of the 35th International Conference on Machine Learning</i>. Vol 80. ML Research Press; 2018:5727-5736.","ista":"Yurtsever A, Fercoq O, Locatello F, Cevher V. 2018. A conditional gradient framework for composite convex minimization with applications to semidefinite programming. Proceedings of the 35th International Conference on Machine Learning. ICML: International Conference on Machine Learning, PMLR, vol. 80, 5727–5736.","short":"A. Yurtsever, O. Fercoq, F. Locatello, V. Cevher, in:, Proceedings of the 35th International Conference on Machine Learning, ML Research Press, 2018, pp. 5727–5736."},"publication_status":"published","extern":"1","_id":"14203","oa_version":"Preprint","quality_controlled":"1","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","arxiv":1,"article_processing_charge":"No","date_updated":"2023-09-13T08:13:39Z","oa":1,"volume":80},{"date_published":"2018-07-01T00:00:00Z","month":"07","language":[{"iso":"eng"}],"scopus_import":"1","publisher":"ML Research Press","department":[{"_id":"FrLo"}],"date_created":"2023-08-22T14:16:25Z","type":"conference","day":"01","status":"public","intvolume":"        80","page":"3198-3207","publication":"Proceedings of the 35th International Conference on Machine Learning","year":"2018","title":"On matching pursuit and coordinate descent","external_id":{"arxiv":["1803.09539"]},"main_file_link":[{"url":"https://arxiv.org/abs/1803.09539","open_access":"1"}],"alternative_title":["PMLR"],"citation":{"ista":"Locatello F, Raj A, Karimireddy SP, Rätsch G, Schölkopf B, Stich SU, Jaggi M. 2018. On matching pursuit and coordinate descent. Proceedings of the 35th International Conference on Machine Learning. , PMLR, vol. 80, 3198–3207.","short":"F. Locatello, A. Raj, S.P. Karimireddy, G. Rätsch, B. Schölkopf, S.U. Stich, M. Jaggi, in:, Proceedings of the 35th International Conference on Machine Learning, ML Research Press, 2018, pp. 3198–3207.","mla":"Locatello, Francesco, et al. “On Matching Pursuit and Coordinate Descent.” <i>Proceedings of the 35th International Conference on Machine Learning</i>, vol. 80, ML Research Press, 2018, pp. 3198–207.","ama":"Locatello F, Raj A, Karimireddy SP, et al. On matching pursuit and coordinate descent. In: <i>Proceedings of the 35th International Conference on Machine Learning</i>. Vol 80. ML Research Press; 2018:3198-3207.","chicago":"Locatello, Francesco, Anant Raj, Sai Praneeth Karimireddy, Gunnar Rätsch, Bernhard Schölkopf, Sebastian U. Stich, and Martin Jaggi. “On Matching Pursuit and Coordinate Descent.” In <i>Proceedings of the 35th International Conference on Machine Learning</i>, 80:3198–3207. ML Research Press, 2018.","ieee":"F. Locatello <i>et al.</i>, “On matching pursuit and coordinate descent,” in <i>Proceedings of the 35th International Conference on Machine Learning</i>, 2018, vol. 80, pp. 3198–3207.","apa":"Locatello, F., Raj, A., Karimireddy, S. P., Rätsch, G., Schölkopf, B., Stich, S. U., &#38; Jaggi, M. (2018). On matching pursuit and coordinate descent. In <i>Proceedings of the 35th International Conference on Machine Learning</i> (Vol. 80, pp. 3198–3207). ML Research Press."},"publication_status":"published","author":[{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","orcid":"0000-0002-4850-0683","last_name":"Locatello","full_name":"Locatello, Francesco"},{"first_name":"Anant","last_name":"Raj","full_name":"Raj, Anant"},{"first_name":"Sai Praneeth","last_name":"Karimireddy","full_name":"Karimireddy, Sai Praneeth"},{"last_name":"Rätsch","full_name":"Rätsch, Gunnar","first_name":"Gunnar"},{"first_name":"Bernhard","last_name":"Schölkopf","full_name":"Schölkopf, Bernhard"},{"last_name":"Stich","full_name":"Stich, Sebastian U.","first_name":"Sebastian U."},{"full_name":"Jaggi, Martin","last_name":"Jaggi","first_name":"Martin"}],"abstract":[{"text":"Two popular examples of first-order optimization methods over linear spaces are coordinate descent and matching pursuit algorithms, with their randomized variants. While the former targets the optimization by moving along coordinates, the latter considers a generalized notion of directions. Exploiting the connection between the two algorithms, we present a unified analysis of both, providing affine invariant sublinear O(1/t) rates on smooth objectives and linear convergence on strongly convex objectives. As a byproduct of our affine invariant analysis of matching pursuit, our rates for steepest coordinate descent are the tightest known. Furthermore, we show the first accelerated convergence rate O(1/t2) for matching pursuit and steepest coordinate descent on convex objectives.","lang":"eng"}],"article_processing_charge":"No","volume":80,"oa":1,"date_updated":"2023-09-13T08:19:05Z","arxiv":1,"oa_version":"Preprint","quality_controlled":"1","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","extern":"1","_id":"14204"},{"date_created":"2023-08-22T14:25:34Z","conference":{"start_date":"2018-04-30","end_date":"2018-05-03","location":"Vancouver, Canada","name":"International Conference on Machine Learning"},"main_file_link":[{"url":"https://arxiv.org/abs/1804.11130","open_access":"1"}],"department":[{"_id":"FrLo"}],"language":[{"iso":"eng"}],"external_id":{"arxiv":["1804.11130"]},"scopus_import":"1","title":"Clustering meets implicit generative models","date_published":"2018-05-01T00:00:00Z","month":"05","year":"2018","oa_version":"Preprint","quality_controlled":"1","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","extern":"1","_id":"14224","article_processing_charge":"No","date_updated":"2023-09-13T09:08:24Z","oa":1,"publication":"6th International Conference on Learning Representations","arxiv":1,"author":[{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683","first_name":"Francesco"},{"last_name":"Vincent","full_name":"Vincent, Damien","first_name":"Damien"},{"last_name":"Tolstikhin","full_name":"Tolstikhin, Ilya","first_name":"Ilya"},{"full_name":"Ratsch, Gunnar","last_name":"Ratsch","first_name":"Gunnar"},{"first_name":"Sylvain","last_name":"Gelly","full_name":"Gelly, Sylvain"},{"first_name":"Bernhard","last_name":"Scholkopf","full_name":"Scholkopf, Bernhard"}],"status":"public","abstract":[{"lang":"eng","text":"Clustering is a cornerstone of unsupervised learning which can be thought as disentangling multiple generative mechanisms underlying the data. In this paper we introduce an algorithmic framework to train mixtures of implicit generative models which we particularize for variational autoencoders. Relying on an additional set of discriminators, we propose a competitive procedure in which the models only need to approximate the portion of the data distribution from which they can produce realistic samples. As a byproduct, each model is simpler to train, and a clustering interpretation arises naturally from the partitioning of the training points among the models. We empirically show that our approach splits the training distribution in a reasonable way and increases the quality of the generated samples."}],"type":"conference","day":"01","citation":{"chicago":"Locatello, Francesco, Damien Vincent, Ilya Tolstikhin, Gunnar Ratsch, Sylvain Gelly, and Bernhard Scholkopf. “Clustering Meets Implicit Generative Models.” In <i>6th International Conference on Learning Representations</i>, 2018.","ieee":"F. Locatello, D. Vincent, I. Tolstikhin, G. Ratsch, S. Gelly, and B. Scholkopf, “Clustering meets implicit generative models,” in <i>6th International Conference on Learning Representations</i>, Vancouver, Canada, 2018.","apa":"Locatello, F., Vincent, D., Tolstikhin, I., Ratsch, G., Gelly, S., &#38; Scholkopf, B. (2018). Clustering meets implicit generative models. In <i>6th International Conference on Learning Representations</i>. Vancouver, Canada.","ista":"Locatello F, Vincent D, Tolstikhin I, Ratsch G, Gelly S, Scholkopf B. 2018. Clustering meets implicit generative models. 6th International Conference on Learning Representations. International Conference on Machine Learning.","short":"F. Locatello, D. Vincent, I. Tolstikhin, G. Ratsch, S. Gelly, B. Scholkopf, in:, 6th International Conference on Learning Representations, 2018.","mla":"Locatello, Francesco, et al. “Clustering Meets Implicit Generative Models.” <i>6th International Conference on Learning Representations</i>, 2018.","ama":"Locatello F, Vincent D, Tolstikhin I, Ratsch G, Gelly S, Scholkopf B. Clustering meets implicit generative models. In: <i>6th International Conference on Learning Representations</i>. ; 2018."},"publication_status":"published"},{"page":"860-868","publication":"Proceedings of the 20th International Conference on Artificial Intelligence and Statistics","status":"public","intvolume":"        54","type":"conference","day":"21","date_created":"2023-08-22T14:17:19Z","conference":{"end_date":"2017-04-22","location":"Fort Lauderdale, FL, United States","name":"AISTATS: Conference on Artificial Intelligence and Statistics","start_date":"2017-04-20"},"department":[{"_id":"FrLo"}],"language":[{"iso":"eng"}],"publisher":"ML Research Press","date_published":"2017-02-21T00:00:00Z","month":"02","oa_version":"Preprint","quality_controlled":"1","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","extern":"1","_id":"14205","article_processing_charge":"No","oa":1,"date_updated":"2023-09-13T09:49:10Z","volume":54,"arxiv":1,"author":[{"first_name":"Francesco","orcid":"0000-0002-4850-0683","full_name":"Locatello, Francesco","last_name":"Locatello","id":"26cfd52f-2483-11ee-8040-88983bcc06d4"},{"last_name":"Khanna","full_name":"Khanna, Rajiv","first_name":"Rajiv"},{"first_name":"Michael","full_name":"Tschannen, Michael","last_name":"Tschannen"},{"first_name":"Martin","full_name":"Jaggi, Martin","last_name":"Jaggi"}],"abstract":[{"lang":"eng","text":"Two of the most fundamental prototypes of greedy optimization are the matching pursuit and Frank-Wolfe algorithms. In this paper, we take a unified view on both classes of methods, leading to the first explicit convergence rates of matching pursuit methods in an optimization sense, for general sets of atoms. We derive sublinear (1/t) convergence for both classes on general smooth objectives, and linear convergence on strongly convex objectives, as well as a clear correspondence of algorithm variants. Our presented algorithms and rates are affine invariant, and do not need any incoherence or sparsity assumptions."}],"citation":{"ama":"Locatello F, Khanna R, Tschannen M, Jaggi M. A unified optimization view on generalized matching pursuit and Frank-Wolfe. In: <i>Proceedings of the 20th International Conference on Artificial Intelligence and Statistics</i>. Vol 54. ML Research Press; 2017:860-868.","mla":"Locatello, Francesco, et al. “A Unified Optimization View on Generalized Matching Pursuit and Frank-Wolfe.” <i>Proceedings of the 20th International Conference on Artificial Intelligence and Statistics</i>, vol. 54, ML Research Press, 2017, pp. 860–68.","short":"F. Locatello, R. Khanna, M. Tschannen, M. Jaggi, in:, Proceedings of the 20th International Conference on Artificial Intelligence and Statistics, ML Research Press, 2017, pp. 860–868.","ista":"Locatello F, Khanna R, Tschannen M, Jaggi M. 2017. A unified optimization view on generalized matching pursuit and Frank-Wolfe. Proceedings of the 20th International Conference on Artificial Intelligence and Statistics. AISTATS: Conference on Artificial Intelligence and Statistics vol. 54, 860–868.","apa":"Locatello, F., Khanna, R., Tschannen, M., &#38; Jaggi, M. (2017). A unified optimization view on generalized matching pursuit and Frank-Wolfe. In <i>Proceedings of the 20th International Conference on Artificial Intelligence and Statistics</i> (Vol. 54, pp. 860–868). Fort Lauderdale, FL, United States: ML Research Press.","ieee":"F. Locatello, R. Khanna, M. Tschannen, and M. Jaggi, “A unified optimization view on generalized matching pursuit and Frank-Wolfe,” in <i>Proceedings of the 20th International Conference on Artificial Intelligence and Statistics</i>, Fort Lauderdale, FL, United States, 2017, vol. 54, pp. 860–868.","chicago":"Locatello, Francesco, Rajiv Khanna, Michael Tschannen, and Martin Jaggi. “A Unified Optimization View on Generalized Matching Pursuit and Frank-Wolfe.” In <i>Proceedings of the 20th International Conference on Artificial Intelligence and Statistics</i>, 54:860–68. ML Research Press, 2017."},"publication_status":"published","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.1702.06457"}],"title":"A unified optimization view on generalized matching pursuit and Frank-Wolfe","external_id":{"arxiv":["1702.06457"]},"year":"2017"},{"oa":1,"date_updated":"2023-09-13T08:32:23Z","article_processing_charge":"No","arxiv":1,"publication":"Advances in Neural Information Processing Systems","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa_version":"Preprint","quality_controlled":"1","_id":"14206","extern":"1","publication_identifier":{"isbn":["9781510860964"]},"type":"conference","publication_status":"published","day":"31","citation":{"short":"F. Locatello, M. Tschannen, G. Rätsch, M. Jaggi, in:, Advances in Neural Information Processing Systems, 2017.","ista":"Locatello F, Tschannen M, Rätsch G, Jaggi M. 2017. Greedy algorithms for cone constrained optimization with convergence guarantees. Advances in Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems.","mla":"Locatello, Francesco, et al. “Greedy Algorithms for Cone Constrained Optimization with Convergence Guarantees.” <i>Advances in Neural Information Processing Systems</i>, 2017.","ama":"Locatello F, Tschannen M, Rätsch G, Jaggi M. Greedy algorithms for cone constrained optimization with convergence guarantees. In: <i>Advances in Neural Information Processing Systems</i>. ; 2017.","chicago":"Locatello, Francesco, Michael Tschannen, Gunnar Rätsch, and Martin Jaggi. “Greedy Algorithms for Cone Constrained Optimization with Convergence Guarantees.” In <i>Advances in Neural Information Processing Systems</i>, 2017.","ieee":"F. Locatello, M. Tschannen, G. Rätsch, and M. Jaggi, “Greedy algorithms for cone constrained optimization with convergence guarantees,” in <i>Advances in Neural Information Processing Systems</i>, Long Beach, CA, United States, 2017.","apa":"Locatello, F., Tschannen, M., Rätsch, G., &#38; Jaggi, M. (2017). Greedy algorithms for cone constrained optimization with convergence guarantees. In <i>Advances in Neural Information Processing Systems</i>. Long Beach, CA, United States."},"author":[{"first_name":"Francesco","last_name":"Locatello","full_name":"Locatello, Francesco","orcid":"0000-0002-4850-0683","id":"26cfd52f-2483-11ee-8040-88983bcc06d4"},{"last_name":"Tschannen","full_name":"Tschannen, Michael","first_name":"Michael"},{"last_name":"Rätsch","full_name":"Rätsch, Gunnar","first_name":"Gunnar"},{"first_name":"Martin","last_name":"Jaggi","full_name":"Jaggi, Martin"}],"status":"public","abstract":[{"text":"Greedy optimization methods such as Matching Pursuit (MP) and Frank-Wolfe (FW) algorithms regained popularity in recent years due to their simplicity, effectiveness and theoretical guarantees. MP and FW address optimization over the linear span and the convex hull of a set of atoms, respectively. In this paper, we consider the intermediate case of optimization over the convex cone, parametrized as the conic hull of a generic atom set, leading to the first principled definitions of non-negative MP algorithms for which we give explicit convergence rates and demonstrate excellent empirical performance. In particular, we derive sublinear (O(1/t)) convergence on general smooth and convex objectives, and linear convergence (O(e−t)) on strongly convex objectives, in both cases for general sets of atoms. Furthermore, we establish a clear correspondence of our algorithms to known algorithms from the MP and FW literature. Our novel algorithms and analyses target general atom sets and general objective functions, and hence are directly applicable to a large variety of learning settings.","lang":"eng"}],"department":[{"_id":"FrLo"}],"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/1705.11041"}],"date_created":"2023-08-22T14:17:38Z","conference":{"start_date":"2017-12-04","end_date":"2017-12-09","location":"Long Beach, CA, United States","name":"NeurIPS: Neural Information Processing Systems"},"date_published":"2017-05-31T00:00:00Z","year":"2017","month":"05","language":[{"iso":"eng"}],"title":"Greedy algorithms for cone constrained optimization with convergence guarantees","external_id":{"arxiv":["1705.11041"]}}]
