[{"citation":{"ama":"Lohaus M, Kleindessner M, Kenthapadi K, Locatello F, Russell C. Are two heads the same as one? Identifying disparate treatment in fair neural networks. In: <i>36th Conference on Neural Information Processing Systems</i>. Vol 35. Neural Information Processing Systems Foundation; 2022:16548-16562.","mla":"Lohaus, Michael, et al. “Are Two Heads the Same as One? Identifying Disparate Treatment in Fair Neural Networks.” <i>36th Conference on Neural Information Processing Systems</i>, vol. 35, Neural Information Processing Systems Foundation, 2022, pp. 16548–62.","short":"M. Lohaus, M. Kleindessner, K. Kenthapadi, F. Locatello, C. Russell, in:, 36th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2022, pp. 16548–16562.","ista":"Lohaus M, Kleindessner M, Kenthapadi K, Locatello F, Russell C. 2022. Are two heads the same as one? Identifying disparate treatment in fair neural networks. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems, Advances in Neural Information Processing Systems, vol. 35, 16548–16562.","ieee":"M. Lohaus, M. Kleindessner, K. Kenthapadi, F. Locatello, and C. Russell, “Are two heads the same as one? Identifying disparate treatment in fair neural networks,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States, 2022, vol. 35, pp. 16548–16562.","chicago":"Lohaus, Michael, Matthäus Kleindessner, Krishnaram Kenthapadi, Francesco Locatello, and Chris Russell. “Are Two Heads the Same as One? Identifying Disparate Treatment in Fair Neural Networks.” In <i>36th Conference on Neural Information Processing Systems</i>, 35:16548–62. Neural Information Processing Systems Foundation, 2022.","apa":"Lohaus, M., Kleindessner, M., Kenthapadi, K., Locatello, F., &#38; Russell, C. (2022). Are two heads the same as one? Identifying disparate treatment in fair neural networks. In <i>36th Conference on Neural Information Processing Systems</i> (Vol. 35, pp. 16548–16562). New Orleans, LA, United States: Neural Information Processing Systems Foundation."},"publication_identifier":{"isbn":["9781713871088"]},"oa":1,"status":"public","external_id":{"arxiv":["2204.04440"]},"_id":"14106","year":"2022","volume":35,"page":"16548-16562","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"conference","abstract":[{"lang":"eng","text":"We show that deep networks trained to satisfy demographic parity often do so\r\nthrough a form of race or gender awareness, and that the more we force a network\r\nto be fair, the more accurately we can recover race or gender from the internal state\r\nof the network. Based on this observation, we investigate an alternative fairness\r\napproach: we add a second classification head to the network to explicitly predict\r\nthe protected attribute (such as race or gender) alongside the original task. After\r\ntraining the two-headed network, we enforce demographic parity by merging the\r\ntwo heads, creating a network with the same architecture as the original network.\r\nWe establish a close relationship between existing approaches and our approach\r\nby showing (1) that the decisions of a fair classifier are well-approximated by our\r\napproach, and (2) that an unfair and optimally accurate classifier can be recovered\r\nfrom a fair classifier and our second head predicting the protected attribute. We use\r\nour explicit formulation to argue that the existing fairness approaches, just as ours,\r\ndemonstrate disparate treatment and that they are likely to be unlawful in a wide\r\nrange of scenarios under US law."}],"extern":"1","arxiv":1,"date_created":"2023-08-21T12:12:42Z","oa_version":"Preprint","publication":"36th Conference on Neural Information Processing Systems","department":[{"_id":"FrLo"}],"title":"Are two heads the same as one? Identifying disparate treatment in fair neural networks","publisher":"Neural Information Processing Systems Foundation","month":"12","language":[{"iso":"eng"}],"day":"15","intvolume":"        35","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2204.04440"}],"publication_status":"published","scopus_import":"1","quality_controlled":"1","date_published":"2022-12-15T00:00:00Z","author":[{"last_name":"Lohaus","first_name":"Michael","full_name":"Lohaus, Michael"},{"full_name":"Kleindessner, Matthäus","last_name":"Kleindessner","first_name":"Matthäus"},{"first_name":"Krishnaram","last_name":"Kenthapadi","full_name":"Kenthapadi, Krishnaram"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco","orcid":"0000-0002-4850-0683","last_name":"Locatello","first_name":"Francesco"},{"full_name":"Russell, Chris","last_name":"Russell","first_name":"Chris"}],"article_processing_charge":"No","alternative_title":["Advances in Neural Information Processing Systems"],"date_updated":"2023-09-06T10:29:42Z","conference":{"location":"New Orleans, LA, United States","start_date":"2022-11-28","name":"NeurIPS: Neural Information Processing Systems","end_date":"2022-12-09"}},{"status":"public","oa":1,"external_id":{"arxiv":["2210.12733"]},"day":"23","citation":{"ista":"Yao J, Hong Y, Wang C, Xiao T, He T, Locatello F, Wipf D, Fu Y, Zhang Z. 2022. Self-supervised amodal video object segmentation. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems.","mla":"Yao, Jian, et al. “Self-Supervised Amodal Video Object Segmentation.” <i>36th Conference on Neural Information Processing Systems</i>, 2022, doi:<a href=\"https://doi.org/10.48550/arXiv.2210.12733\">10.48550/arXiv.2210.12733</a>.","short":"J. Yao, Y. Hong, C. Wang, T. Xiao, T. He, F. Locatello, D. Wipf, Y. Fu, Z. Zhang, in:, 36th Conference on Neural Information Processing Systems, 2022.","ama":"Yao J, Hong Y, Wang C, et al. Self-supervised amodal video object segmentation. In: <i>36th Conference on Neural Information Processing Systems</i>. ; 2022. doi:<a href=\"https://doi.org/10.48550/arXiv.2210.12733\">10.48550/arXiv.2210.12733</a>","apa":"Yao, J., Hong, Y., Wang, C., Xiao, T., He, T., Locatello, F., … Zhang, Z. (2022). Self-supervised amodal video object segmentation. In <i>36th Conference on Neural Information Processing Systems</i>. New Orleans, LA, United States. <a href=\"https://doi.org/10.48550/arXiv.2210.12733\">https://doi.org/10.48550/arXiv.2210.12733</a>","ieee":"J. Yao <i>et al.</i>, “Self-supervised amodal video object segmentation,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States, 2022.","chicago":"Yao, Jian, Yuxin Hong, Chiyu Wang, Tianjun Xiao, Tong He, Francesco Locatello, David Wipf, Yanwei Fu, and Zheng Zhang. “Self-Supervised Amodal Video Object Segmentation.” In <i>36th Conference on Neural Information Processing Systems</i>, 2022. <a href=\"https://doi.org/10.48550/arXiv.2210.12733\">https://doi.org/10.48550/arXiv.2210.12733</a>."},"language":[{"iso":"eng"}],"month":"10","type":"conference","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"14107","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2210.12733"}],"publication_status":"published","year":"2022","arxiv":1,"author":[{"full_name":"Yao, Jian","last_name":"Yao","first_name":"Jian"},{"first_name":"Yuxin","last_name":"Hong","full_name":"Hong, Yuxin"},{"last_name":"Wang","first_name":"Chiyu","full_name":"Wang, Chiyu"},{"first_name":"Tianjun","last_name":"Xiao","full_name":"Xiao, Tianjun"},{"full_name":"He, Tong","first_name":"Tong","last_name":"He"},{"full_name":"Locatello, Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","orcid":"0000-0002-4850-0683","last_name":"Locatello"},{"full_name":"Wipf, David","first_name":"David","last_name":"Wipf"},{"last_name":"Fu","first_name":"Yanwei","full_name":"Fu, Yanwei"},{"last_name":"Zhang","first_name":"Zheng","full_name":"Zhang, Zheng"}],"date_published":"2022-10-23T00:00:00Z","doi":"10.48550/arXiv.2210.12733","date_updated":"2023-09-11T09:34:17Z","article_processing_charge":"No","abstract":[{"lang":"eng","text":"Amodal perception requires inferring the full shape of an object that is partially occluded. This task is particularly challenging on two levels: (1) it requires more information than what is contained in the instant retina or imaging sensor, (2) it is difficult to obtain enough well-annotated amodal labels for supervision. To this end, this paper develops a new framework of\r\nSelf-supervised amodal Video object segmentation (SaVos). Our method efficiently leverages the visual information of video temporal sequences to infer the amodal mask of objects. The key intuition is that the occluded part of an object can be explained away if that part is visible in other frames, possibly deformed as long as the deformation can be reasonably learned.\r\nAccordingly, we derive a novel self-supervised learning paradigm that efficiently utilizes the visible object parts as the supervision to guide the training on videos. In addition to learning type prior to complete masks for known types, SaVos also learns the spatiotemporal prior, which is also useful for the amodal task and could generalize to unseen types. The proposed\r\nframework achieves the state-of-the-art performance on the synthetic amodal segmentation benchmark FISHBOWL and the real world benchmark KINS-Video-Car. Further, it lends itself well to being transferred to novel distributions using test-time adaptation, outperforming existing models even after the transfer to a new distribution."}],"extern":"1","title":"Self-supervised amodal video object segmentation","department":[{"_id":"FrLo"}],"publication":"36th Conference on Neural Information Processing Systems","oa_version":"Preprint","conference":{"location":"New Orleans, LA, United States","start_date":"2022-11-28","name":"NeurIPS: Neural Information Processing Systems","end_date":"2022-12-01"},"date_created":"2023-08-21T12:13:25Z"},{"department":[{"_id":"FrLo"}],"title":"Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers","publication":"2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition","publisher":"Institute of Electrical and Electronics Engineers","oa_version":"Preprint","date_created":"2023-08-21T12:18:00Z","arxiv":1,"abstract":[{"text":"Algorithmic fairness is frequently motivated in terms of a trade-off in which overall performance is decreased so as to improve performance on disadvantaged groups where the algorithm would otherwise be less accurate. Contrary to this, we find that applying existing fairness approaches to computer vision improve fairness by degrading the performance of classifiers across all groups (with increased degradation on the best performing groups). Extending the bias-variance decomposition for classification to fairness, we theoretically explain why the majority of fairness methods designed for low capacity models should not be used in settings involving high-capacity models, a scenario common to computer vision. We corroborate this analysis with extensive experimental support that shows that many of the fairness heuristics used in computer vision also degrade performance on the most disadvantaged groups. Building on these insights, we propose an adaptive augmentation strategy that, uniquely, of all methods tested, improves performance for the disadvantaged groups.","lang":"eng"}],"extern":"1","page":"10400-10411","type":"conference","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"14114","year":"2022","status":"public","oa":1,"external_id":{"arxiv":["2203.04913"]},"citation":{"ama":"Zietlow D, Lohaus M, Balakrishnan G, et al. Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. In: <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. Institute of Electrical and Electronics Engineers; 2022:10400-10411. doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">10.1109/cvpr52688.2022.01016</a>","ista":"Zietlow D, Lohaus M, Balakrishnan G, Kleindessner M, Locatello F, Scholkopf B, Russell C. 2022. Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Conference on Computer Vision and Pattern Recognition, 10400–10411.","mla":"Zietlow, Dominik, et al. “Leveling down in Computer Vision: Pareto Inefficiencies in Fair Deep Classifiers.” <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Institute of Electrical and Electronics Engineers, 2022, pp. 10400–11, doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">10.1109/cvpr52688.2022.01016</a>.","short":"D. Zietlow, M. Lohaus, G. Balakrishnan, M. Kleindessner, F. Locatello, B. Scholkopf, C. Russell, in:, 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Institute of Electrical and Electronics Engineers, 2022, pp. 10400–10411.","chicago":"Zietlow, Dominik, Michael Lohaus, Guha Balakrishnan, Matthaus Kleindessner, Francesco Locatello, Bernhard Scholkopf, and Chris Russell. “Leveling down in Computer Vision: Pareto Inefficiencies in Fair Deep Classifiers.” In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 10400–411. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">https://doi.org/10.1109/cvpr52688.2022.01016</a>.","ieee":"D. Zietlow <i>et al.</i>, “Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers,” in <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, New Orleans, LA, United States, 2022, pp. 10400–10411.","apa":"Zietlow, D., Lohaus, M., Balakrishnan, G., Kleindessner, M., Locatello, F., Scholkopf, B., &#38; Russell, C. (2022). Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 10400–10411). New Orleans, LA, United States: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">https://doi.org/10.1109/cvpr52688.2022.01016</a>"},"publication_identifier":{"eissn":["2575-7075"],"issn":["1063-6919"],"isbn":["9781665469470"]},"conference":{"end_date":"2022-06-24","name":"CVPR: Conference on Computer Vision and Pattern Recognition","start_date":"2022-06-18","location":"New Orleans, LA, United States"},"date_published":"2022-07-01T00:00:00Z","author":[{"full_name":"Zietlow, Dominik","first_name":"Dominik","last_name":"Zietlow"},{"full_name":"Lohaus, Michael","first_name":"Michael","last_name":"Lohaus"},{"full_name":"Balakrishnan, Guha","last_name":"Balakrishnan","first_name":"Guha"},{"first_name":"Matthaus","last_name":"Kleindessner","full_name":"Kleindessner, Matthaus"},{"orcid":"0000-0002-4850-0683","last_name":"Locatello","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco"},{"full_name":"Scholkopf, Bernhard","last_name":"Scholkopf","first_name":"Bernhard"},{"full_name":"Russell, Chris","first_name":"Chris","last_name":"Russell"}],"doi":"10.1109/cvpr52688.2022.01016","date_updated":"2023-09-11T09:19:14Z","article_processing_charge":"No","scopus_import":"1","quality_controlled":"1","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2203.04913"}],"publication_status":"published","day":"01","language":[{"iso":"eng"}],"month":"07"},{"status":"public","oa":1,"day":"14","external_id":{"arxiv":["2210.08031"]},"citation":{"ama":"Rahaman N, Weiss M, Locatello F, et al. Neural attentive circuits. In: <i>36th Conference on Neural Information Processing Systems</i>. Vol 35. ; 2022.","mla":"Rahaman, Nasim, et al. “Neural Attentive Circuits.” <i>36th Conference on Neural Information Processing Systems</i>, vol. 35, 2022.","ista":"Rahaman N, Weiss M, Locatello F, Pal C, Bengio Y, Schölkopf B, Li LE, Ballas N. 2022. Neural attentive circuits. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems,  Advances in Neural Information Processing Systems, vol. 35.","short":"N. Rahaman, M. Weiss, F. Locatello, C. Pal, Y. Bengio, B. Schölkopf, L.E. Li, N. Ballas, in:, 36th Conference on Neural Information Processing Systems, 2022.","chicago":"Rahaman, Nasim, Martin Weiss, Francesco Locatello, Chris Pal, Yoshua Bengio, Bernhard Schölkopf, Li Erran Li, and Nicolas Ballas. “Neural Attentive Circuits.” In <i>36th Conference on Neural Information Processing Systems</i>, Vol. 35, 2022.","ieee":"N. Rahaman <i>et al.</i>, “Neural attentive circuits,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, United States, 2022, vol. 35.","apa":"Rahaman, N., Weiss, M., Locatello, F., Pal, C., Bengio, Y., Schölkopf, B., … Ballas, N. (2022). Neural attentive circuits. In <i>36th Conference on Neural Information Processing Systems</i> (Vol. 35). New Orleans, United States."},"month":"10","language":[{"iso":"eng"}],"volume":35,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"conference","_id":"14168","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2210.08031","open_access":"1"}],"intvolume":"        35","year":"2022","publication_status":"published","author":[{"full_name":"Rahaman, Nasim","first_name":"Nasim","last_name":"Rahaman"},{"full_name":"Weiss, Martin","last_name":"Weiss","first_name":"Martin"},{"last_name":"Locatello","orcid":"0000-0002-4850-0683","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco"},{"full_name":"Pal, Chris","last_name":"Pal","first_name":"Chris"},{"full_name":"Bengio, Yoshua","first_name":"Yoshua","last_name":"Bengio"},{"last_name":"Schölkopf","first_name":"Bernhard","full_name":"Schölkopf, Bernhard"},{"first_name":"Li Erran","last_name":"Li","full_name":"Li, Li Erran"},{"last_name":"Ballas","first_name":"Nicolas","full_name":"Ballas, Nicolas"}],"date_published":"2022-10-14T00:00:00Z","arxiv":1,"article_processing_charge":"No","date_updated":"2023-09-11T09:29:09Z","alternative_title":[" Advances in Neural Information Processing Systems"],"abstract":[{"lang":"eng","text":"Recent work has seen the development of general purpose neural architectures\r\nthat can be trained to perform tasks across diverse data modalities. General\r\npurpose models typically make few assumptions about the underlying\r\ndata-structure and are known to perform well in the large-data regime. At the\r\nsame time, there has been growing interest in modular neural architectures that\r\nrepresent the data using sparsely interacting modules. These models can be more\r\nrobust out-of-distribution, computationally efficient, and capable of\r\nsample-efficient adaptation to new data. However, they tend to make\r\ndomain-specific assumptions about the data, and present challenges in how\r\nmodule behavior (i.e., parameterization) and connectivity (i.e., their layout)\r\ncan be jointly learned. In this work, we introduce a general purpose, yet\r\nmodular neural architecture called Neural Attentive Circuits (NACs) that\r\njointly learns the parameterization and a sparse connectivity of neural modules\r\nwithout using domain knowledge. NACs are best understood as the combination of\r\ntwo systems that are jointly trained end-to-end: one that determines the module\r\nconfiguration and the other that executes it on an input. We demonstrate\r\nqualitatively that NACs learn diverse and meaningful module configurations on\r\nthe NLVR2 dataset without additional supervision. Quantitatively, we show that\r\nby incorporating modularity in this way, NACs improve upon a strong non-modular\r\nbaseline in terms of low-shot adaptation on CIFAR and CUBs dataset by about\r\n10%, and OOD robustness on Tiny ImageNet-R by about 2.5%. Further, we find that\r\nNACs can achieve an 8x speedup at inference time while losing less than 3%\r\nperformance. Finally, we find NACs to yield competitive results on diverse data\r\nmodalities spanning point-cloud classification, symbolic processing and\r\ntext-classification from ASCII bytes, thereby confirming its general purpose\r\nnature."}],"extern":"1","publication":"36th Conference on Neural Information Processing Systems","title":"Neural attentive circuits","department":[{"_id":"FrLo"}],"conference":{"end_date":"2022-12-01","name":"NeurIPS: Neural Information Processing Systems","start_date":"2022-11-29","location":"New Orleans, United States"},"date_created":"2023-08-22T13:57:27Z","oa_version":"Preprint"},{"author":[{"full_name":"Dittadi, Andrea","last_name":"Dittadi","first_name":"Andrea"},{"full_name":"Papa, Samuele","last_name":"Papa","first_name":"Samuele"},{"first_name":"Michele De","last_name":"Vita","full_name":"Vita, Michele De"},{"first_name":"Bernhard","last_name":"Schölkopf","full_name":"Schölkopf, Bernhard"},{"full_name":"Winther, Ole","last_name":"Winther","first_name":"Ole"},{"orcid":"0000-0002-4850-0683","last_name":"Locatello","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco"}],"date_published":"2022-07-22T00:00:00Z","article_processing_charge":"No","date_updated":"2023-09-11T10:08:14Z","alternative_title":["PMLR"],"quality_controlled":"1","conference":{"end_date":"2022-07-23","name":"International Conference on Machine Learning","start_date":"2022-07-17","location":"Baltimore, MD, United States"},"day":"22","month":"07","language":[{"iso":"eng"}],"intvolume":"      2022","main_file_link":[{"url":"https://arxiv.org/abs/2107.00637","open_access":"1"}],"publication_status":"submitted","arxiv":1,"abstract":[{"text":"The idea behind object-centric representation learning is that natural scenes can better be modeled as compositions of objects and their relations as opposed to distributed representations. This inductive bias can be injected into neural networks to potentially improve systematic generalization and performance of downstream tasks in scenes with multiple objects. In this paper, we train state-of-the-art unsupervised models on five common multi-object datasets and evaluate segmentation metrics and downstream object property prediction. In addition, we study generalization and robustness by investigating the settings where either a single object is out of distribution -- e.g., having an unseen color, texture, or shape -- or global properties of the scene are altered -- e.g., by occlusions, cropping, or increasing the number of objects. From our experimental study, we find object-centric representations to be useful for\r\ndownstream tasks and generally robust to most distribution shifts affecting objects. However, when the distribution shift affects the input in a less structured manner, robustness in terms of segmentation and downstream task performance may vary significantly across models and distribution shifts. ","lang":"eng"}],"extern":"1","publication":"Proceedings of the 39th International Conference on Machine Learning","department":[{"_id":"FrLo"}],"title":"Generalization and robustness implications in object-centric learning","publisher":"ML Research Press","date_created":"2023-08-22T13:59:55Z","oa_version":"Preprint","status":"public","oa":1,"external_id":{"arxiv":["2107.00637"]},"citation":{"chicago":"Dittadi, Andrea, Samuele Papa, Michele De Vita, Bernhard Schölkopf, Ole Winther, and Francesco Locatello. “Generalization and Robustness Implications in Object-Centric Learning.” In <i>Proceedings of the 39th International Conference on Machine Learning</i>, 2022:5221–85. ML Research Press, n.d.","ieee":"A. Dittadi, S. Papa, M. D. Vita, B. Schölkopf, O. Winther, and F. Locatello, “Generalization and robustness implications in object-centric learning,” in <i>Proceedings of the 39th International Conference on Machine Learning</i>, Baltimore, MD, United States, vol. 2022, pp. 5221–5285.","apa":"Dittadi, A., Papa, S., Vita, M. D., Schölkopf, B., Winther, O., &#38; Locatello, F. (n.d.). Generalization and robustness implications in object-centric learning. In <i>Proceedings of the 39th International Conference on Machine Learning</i> (Vol. 2022, pp. 5221–5285). Baltimore, MD, United States: ML Research Press.","ama":"Dittadi A, Papa S, Vita MD, Schölkopf B, Winther O, Locatello F. Generalization and robustness implications in object-centric learning. In: <i>Proceedings of the 39th International Conference on Machine Learning</i>. Vol 2022. ML Research Press; :5221-5285.","short":"A. Dittadi, S. Papa, M.D. Vita, B. Schölkopf, O. Winther, F. Locatello, in:, Proceedings of the 39th International Conference on Machine Learning, ML Research Press, n.d., pp. 5221–5285.","ista":"Dittadi A, Papa S, Vita MD, Schölkopf B, Winther O, Locatello F. Generalization and robustness implications in object-centric learning. Proceedings of the 39th International Conference on Machine Learning. International Conference on Machine Learning, PMLR, vol. 2022, 5221–5285.","mla":"Dittadi, Andrea, et al. “Generalization and Robustness Implications in Object-Centric Learning.” <i>Proceedings of the 39th International Conference on Machine Learning</i>, vol. 2022, ML Research Press, pp. 5221–85."},"volume":2022,"page":"5221-5285","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"conference","_id":"14170","year":"2022"},{"department":[{"_id":"FrLo"}],"title":"Score matching enables causal discovery of nonlinear additive noise  models","publication":"Proceedings of the 39th International Conference on Machine Learning","publisher":"ML Research Press","oa_version":"Preprint","date_created":"2023-08-22T14:00:18Z","arxiv":1,"abstract":[{"lang":"eng","text":"This paper demonstrates how to recover causal graphs from the score of the\r\ndata distribution in non-linear additive (Gaussian) noise models. Using score\r\nmatching algorithms as a building block, we show how to design a new generation\r\nof scalable causal discovery methods. To showcase our approach, we also propose\r\na new efficient method for approximating the score's Jacobian, enabling to\r\nrecover the causal graph. Empirically, we find that the new algorithm, called\r\nSCORE, is competitive with state-of-the-art causal discovery methods while\r\nbeing significantly faster."}],"extern":"1","page":"18741-18753","volume":162,"type":"conference","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","_id":"14171","year":"2022","oa":1,"status":"public","external_id":{"arxiv":["2203.04413"]},"citation":{"apa":"Rolland, P., Cevher, V., Kleindessner, M., Russel, C., Schölkopf, B., Janzing, D., &#38; Locatello, F. (2022). Score matching enables causal discovery of nonlinear additive noise  models. In <i>Proceedings of the 39th International Conference on Machine Learning</i> (Vol. 162, pp. 18741–18753). Baltimore, MD, United States: ML Research Press.","ieee":"P. Rolland <i>et al.</i>, “Score matching enables causal discovery of nonlinear additive noise  models,” in <i>Proceedings of the 39th International Conference on Machine Learning</i>, Baltimore, MD, United States, 2022, vol. 162, pp. 18741–18753.","chicago":"Rolland, Paul, Volkan Cevher, Matthäus Kleindessner, Chris Russel, Bernhard Schölkopf, Dominik Janzing, and Francesco Locatello. “Score Matching Enables Causal Discovery of Nonlinear Additive Noise  Models.” In <i>Proceedings of the 39th International Conference on Machine Learning</i>, 162:18741–53. ML Research Press, 2022.","mla":"Rolland, Paul, et al. “Score Matching Enables Causal Discovery of Nonlinear Additive Noise  Models.” <i>Proceedings of the 39th International Conference on Machine Learning</i>, vol. 162, ML Research Press, 2022, pp. 18741–53.","short":"P. Rolland, V. Cevher, M. Kleindessner, C. Russel, B. Schölkopf, D. Janzing, F. Locatello, in:, Proceedings of the 39th International Conference on Machine Learning, ML Research Press, 2022, pp. 18741–18753.","ista":"Rolland P, Cevher V, Kleindessner M, Russel C, Schölkopf B, Janzing D, Locatello F. 2022. Score matching enables causal discovery of nonlinear additive noise  models. Proceedings of the 39th International Conference on Machine Learning. International Conference on Machine Learning, PMLR, vol. 162, 18741–18753.","ama":"Rolland P, Cevher V, Kleindessner M, et al. Score matching enables causal discovery of nonlinear additive noise  models. In: <i>Proceedings of the 39th International Conference on Machine Learning</i>. Vol 162. ML Research Press; 2022:18741-18753."},"conference":{"start_date":"2022-07-17","location":"Baltimore, MD, United States","end_date":"2022-07-23","name":"International Conference on Machine Learning"},"date_published":"2022-07-22T00:00:00Z","author":[{"last_name":"Rolland","first_name":"Paul","full_name":"Rolland, Paul"},{"full_name":"Cevher, Volkan","last_name":"Cevher","first_name":"Volkan"},{"full_name":"Kleindessner, Matthäus","first_name":"Matthäus","last_name":"Kleindessner"},{"full_name":"Russel, Chris","first_name":"Chris","last_name":"Russel"},{"full_name":"Schölkopf, Bernhard","first_name":"Bernhard","last_name":"Schölkopf"},{"last_name":"Janzing","first_name":"Dominik","full_name":"Janzing, Dominik"},{"last_name":"Locatello","orcid":"0000-0002-4850-0683","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco"}],"alternative_title":["PMLR"],"date_updated":"2023-09-11T10:14:20Z","article_processing_charge":"No","quality_controlled":"1","intvolume":"       162","main_file_link":[{"url":"https://arxiv.org/abs/2203.04413","open_access":"1"}],"publication_status":"published","day":"22","language":[{"iso":"eng"}],"month":"07"},{"abstract":[{"text":"An important component for generalization in machine learning is to uncover underlying latent factors of variation as well as the mechanism through which each factor acts in the world. In this paper, we test whether 17 unsupervised, weakly supervised, and fully supervised representation learning approaches correctly infer the generative factors of variation in simple datasets (dSprites, Shapes3D, MPI3D) from controlled environments, and on our contributed CelebGlow dataset. In contrast to prior robustness work that introduces novel factors of variation during test time, such as blur or other (un)structured noise, we here recompose, interpolate, or extrapolate only existing factors of variation from the training data set (e.g., small and medium-sized objects during training and large objects during testing). Models\r\nthat learn the correct mechanism should be able to generalize to this benchmark. In total, we train and test 2000+ models and observe that all of them struggle to learn the underlying mechanism regardless of supervision signal and architectural bias. Moreover, the generalization capabilities of all tested models drop significantly as we move from artificial datasets towards\r\nmore realistic real-world datasets. Despite their inability to identify the correct mechanism, the models are quite modular as their ability to infer other in-distribution factors remains fairly stable, providing only a single factoris out-of-distribution. These results point to an important yet understudied problem of learning mechanistic models of observations that can facilitate\r\ngeneralization.","lang":"eng"}],"quality_controlled":"1","extern":"1","arxiv":1,"author":[{"full_name":"Schott, Lukas","first_name":"Lukas","last_name":"Schott"},{"first_name":"Julius von","last_name":"Kügelgen","full_name":"Kügelgen, Julius von"},{"full_name":"Träuble, Frederik","first_name":"Frederik","last_name":"Träuble"},{"last_name":"Gehler","first_name":"Peter","full_name":"Gehler, Peter"},{"full_name":"Russell, Chris","first_name":"Chris","last_name":"Russell"},{"last_name":"Bethge","first_name":"Matthias","full_name":"Bethge, Matthias"},{"first_name":"Bernhard","last_name":"Schölkopf","full_name":"Schölkopf, Bernhard"},{"orcid":"0000-0002-4850-0683","last_name":"Locatello","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco"},{"full_name":"Brendel, Wieland","first_name":"Wieland","last_name":"Brendel"}],"date_published":"2022-04-25T00:00:00Z","date_updated":"2023-09-11T09:40:52Z","article_processing_charge":"No","oa_version":"Preprint","conference":{"end_date":"2022-04-29","name":"ICLR: International Conference on Learning Representations","start_date":"2022-04-25","location":"Virtual"},"date_created":"2023-08-22T14:00:50Z","title":"Visual representation learning does not generalize strongly within the  same domain","department":[{"_id":"FrLo"}],"publication":"10th International Conference on Learning Representations","citation":{"apa":"Schott, L., Kügelgen, J. von, Träuble, F., Gehler, P., Russell, C., Bethge, M., … Brendel, W. (2022). Visual representation learning does not generalize strongly within the  same domain. In <i>10th International Conference on Learning Representations</i>. Virtual.","chicago":"Schott, Lukas, Julius von Kügelgen, Frederik Träuble, Peter Gehler, Chris Russell, Matthias Bethge, Bernhard Schölkopf, Francesco Locatello, and Wieland Brendel. “Visual Representation Learning Does Not Generalize Strongly within the  Same Domain.” In <i>10th International Conference on Learning Representations</i>, 2022.","ieee":"L. Schott <i>et al.</i>, “Visual representation learning does not generalize strongly within the  same domain,” in <i>10th International Conference on Learning Representations</i>, Virtual, 2022.","ista":"Schott L, Kügelgen J von, Träuble F, Gehler P, Russell C, Bethge M, Schölkopf B, Locatello F, Brendel W. 2022. Visual representation learning does not generalize strongly within the  same domain. 10th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","mla":"Schott, Lukas, et al. “Visual Representation Learning Does Not Generalize Strongly within the  Same Domain.” <i>10th International Conference on Learning Representations</i>, 2022.","short":"L. Schott, J. von Kügelgen, F. Träuble, P. Gehler, C. Russell, M. Bethge, B. Schölkopf, F. Locatello, W. Brendel, in:, 10th International Conference on Learning Representations, 2022.","ama":"Schott L, Kügelgen J von, Träuble F, et al. Visual representation learning does not generalize strongly within the  same domain. In: <i>10th International Conference on Learning Representations</i>. ; 2022."},"language":[{"iso":"eng"}],"month":"04","oa":1,"status":"public","external_id":{"arxiv":["2107.08221"]},"day":"25","_id":"14172","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2107.08221"}],"publication_status":"published","year":"2022","type":"conference","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87"},{"extern":"1","abstract":[{"text":"Since out-of-distribution generalization is a generally ill-posed problem, various proxy targets (e.g., calibration, adversarial robustness, algorithmic corruptions, invariance across shifts) were studied across different research programs resulting in different recommendations. While sharing the same aspirational goal, these approaches have never been tested under the same\r\nexperimental conditions on real data. In this paper, we take a unified view of previous work, highlighting message discrepancies that we address empirically, and providing recommendations on how to measure the robustness of a model and how to improve it. To this end, we collect 172 publicly available dataset pairs for training and out-of-distribution evaluation of accuracy, calibration error, adversarial attacks, environment invariance, and synthetic corruptions. We fine-tune over 31k networks, from nine different architectures in the many- and\r\nfew-shot setting. Our findings confirm that in- and out-of-distribution accuracies tend to increase jointly, but show that their relation is largely dataset-dependent, and in general more nuanced and more complex than posited by previous, smaller scale studies.","lang":"eng"}],"arxiv":1,"oa_version":"Preprint","date_created":"2023-08-22T14:01:13Z","publisher":"Neural Information Processing Systems Foundation","title":"Assaying out-of-distribution generalization in transfer learning","department":[{"_id":"FrLo"}],"publication":"36th Conference on Neural Information Processing Systems","publication_identifier":{"isbn":["9781713871088"]},"citation":{"short":"F. Wenzel, A. Dittadi, P.V. Gehler, C.-J.S.-G. Carl-Johann Simon-Gabriel, M. Horn, D. Zietlow, D. Kernert, C. Russell, T. Brox, B. Schiele, B. Schölkopf, F. Locatello, in:, 36th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2022, pp. 7181–7198.","ista":"Wenzel F, Dittadi A, Gehler PV, Carl-Johann Simon-Gabriel C-JS-G, Horn M, Zietlow D, Kernert D, Russell C, Brox T, Schiele B, Schölkopf B, Locatello F. 2022. Assaying out-of-distribution generalization in transfer learning. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems, Advances in Neural Information Processing Systems, vol. 35, 7181–7198.","mla":"Wenzel, Florian, et al. “Assaying Out-of-Distribution Generalization in Transfer Learning.” <i>36th Conference on Neural Information Processing Systems</i>, vol. 35, Neural Information Processing Systems Foundation, 2022, pp. 7181–98.","ama":"Wenzel F, Dittadi A, Gehler PV, et al. Assaying out-of-distribution generalization in transfer learning. In: <i>36th Conference on Neural Information Processing Systems</i>. Vol 35. Neural Information Processing Systems Foundation; 2022:7181-7198.","apa":"Wenzel, F., Dittadi, A., Gehler, P. V., Carl-Johann Simon-Gabriel, C.-J. S.-G., Horn, M., Zietlow, D., … Locatello, F. (2022). Assaying out-of-distribution generalization in transfer learning. In <i>36th Conference on Neural Information Processing Systems</i> (Vol. 35, pp. 7181–7198). New Orleans, LA, United States: Neural Information Processing Systems Foundation.","ieee":"F. Wenzel <i>et al.</i>, “Assaying out-of-distribution generalization in transfer learning,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States, 2022, vol. 35, pp. 7181–7198.","chicago":"Wenzel, Florian, Andrea Dittadi, Peter Vincent Gehler, Carl-Johann Simon-Gabriel Carl-Johann Simon-Gabriel, Max Horn, Dominik Zietlow, David Kernert, et al. “Assaying Out-of-Distribution Generalization in Transfer Learning.” In <i>36th Conference on Neural Information Processing Systems</i>, 35:7181–98. Neural Information Processing Systems Foundation, 2022."},"external_id":{"arxiv":["2207.09239"]},"oa":1,"status":"public","year":"2022","_id":"14173","type":"conference","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","page":"7181-7198","volume":35,"quality_controlled":"1","scopus_import":"1","alternative_title":["Advances in Neural Information Processing Systems"],"date_updated":"2023-09-06T10:34:43Z","article_processing_charge":"No","date_published":"2022-12-15T00:00:00Z","author":[{"full_name":"Wenzel, Florian","last_name":"Wenzel","first_name":"Florian"},{"last_name":"Dittadi","first_name":"Andrea","full_name":"Dittadi, Andrea"},{"full_name":"Gehler, Peter Vincent","first_name":"Peter Vincent","last_name":"Gehler"},{"full_name":"Carl-Johann Simon-Gabriel, Carl-Johann Simon-Gabriel","last_name":"Carl-Johann Simon-Gabriel","first_name":"Carl-Johann Simon-Gabriel"},{"last_name":"Horn","first_name":"Max","full_name":"Horn, Max"},{"first_name":"Dominik","last_name":"Zietlow","full_name":"Zietlow, Dominik"},{"full_name":"Kernert, David","last_name":"Kernert","first_name":"David"},{"full_name":"Russell, Chris","first_name":"Chris","last_name":"Russell"},{"full_name":"Brox, Thomas","first_name":"Thomas","last_name":"Brox"},{"last_name":"Schiele","first_name":"Bernt","full_name":"Schiele, Bernt"},{"first_name":"Bernhard","last_name":"Schölkopf","full_name":"Schölkopf, Bernhard"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco","orcid":"0000-0002-4850-0683","last_name":"Locatello","first_name":"Francesco"}],"conference":{"name":"NeurIPS: Neural Information Processing Systems","end_date":"2022-12-09","location":"New Orleans, LA, United States","start_date":"2022-11-28"},"language":[{"iso":"eng"}],"month":"12","day":"15","publication_status":"published","main_file_link":[{"url":"https://arxiv.org/abs/2207.09239","open_access":"1"}],"intvolume":"        35"},{"citation":{"apa":"Dittadi, A., Träuble, F., Wüthrich, M., Widmaier, F., Gehler, P., Winther, O., … Bauer, S. (2022). The role of pretrained representations for the OOD generalization of  reinforcement learning agents. In <i>10th International Conference on Learning Representations</i>. Virtual.","ieee":"A. Dittadi <i>et al.</i>, “The role of pretrained representations for the OOD generalization of  reinforcement learning agents,” in <i>10th International Conference on Learning Representations</i>, Virtual, 2022.","chicago":"Dittadi, Andrea, Frederik Träuble, Manuel Wüthrich, Felix Widmaier, Peter Gehler, Ole Winther, Francesco Locatello, Olivier Bachem, Bernhard Schölkopf, and Stefan Bauer. “The Role of Pretrained Representations for the OOD Generalization of  Reinforcement Learning Agents.” In <i>10th International Conference on Learning Representations</i>, 2022.","mla":"Dittadi, Andrea, et al. “The Role of Pretrained Representations for the OOD Generalization of  Reinforcement Learning Agents.” <i>10th International Conference on Learning Representations</i>, 2022.","ista":"Dittadi A, Träuble F, Wüthrich M, Widmaier F, Gehler P, Winther O, Locatello F, Bachem O, Schölkopf B, Bauer S. 2022. The role of pretrained representations for the OOD generalization of  reinforcement learning agents. 10th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","short":"A. Dittadi, F. Träuble, M. Wüthrich, F. Widmaier, P. Gehler, O. Winther, F. Locatello, O. Bachem, B. Schölkopf, S. Bauer, in:, 10th International Conference on Learning Representations, 2022.","ama":"Dittadi A, Träuble F, Wüthrich M, et al. The role of pretrained representations for the OOD generalization of  reinforcement learning agents. In: <i>10th International Conference on Learning Representations</i>. ; 2022."},"month":"04","language":[{"iso":"eng"}],"oa":1,"status":"public","day":"25","external_id":{"arxiv":["2107.05686"]},"main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.2107.05686"}],"_id":"14174","year":"2022","publication_status":"published","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"conference","abstract":[{"lang":"eng","text":"Building sample-efficient agents that generalize out-of-distribution (OOD) in real-world settings remains a fundamental unsolved problem on the path towards achieving higher-level cognition. One particularly promising approach is to begin with low-dimensional, pretrained representations of our world, which should facilitate efficient downstream learning and generalization. By training 240 representations and over 10,000 reinforcement learning (RL) policies on a simulated robotic setup, we evaluate to what extent different properties of\r\npretrained VAE-based representations affect the OOD generalization of downstream agents. We observe that many agents are surprisingly robust to realistic distribution shifts, including the challenging sim-to-real case. In addition, we find that the generalization performance of a simple downstream proxy task reliably predicts the generalization performance of our RL agents\r\nunder a wide range of OOD settings. Such proxy tasks can thus be used to select pretrained representations that will lead to agents that generalize."}],"quality_controlled":"1","extern":"1","author":[{"full_name":"Dittadi, Andrea","last_name":"Dittadi","first_name":"Andrea"},{"full_name":"Träuble, Frederik","first_name":"Frederik","last_name":"Träuble"},{"full_name":"Wüthrich, Manuel","last_name":"Wüthrich","first_name":"Manuel"},{"full_name":"Widmaier, Felix","last_name":"Widmaier","first_name":"Felix"},{"full_name":"Gehler, Peter","first_name":"Peter","last_name":"Gehler"},{"full_name":"Winther, Ole","first_name":"Ole","last_name":"Winther"},{"first_name":"Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683","full_name":"Locatello, Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4"},{"full_name":"Bachem, Olivier","last_name":"Bachem","first_name":"Olivier"},{"full_name":"Schölkopf, Bernhard","last_name":"Schölkopf","first_name":"Bernhard"},{"full_name":"Bauer, Stefan","last_name":"Bauer","first_name":"Stefan"}],"date_published":"2022-04-25T00:00:00Z","arxiv":1,"article_processing_charge":"No","date_updated":"2023-09-11T09:48:36Z","conference":{"end_date":"2022-04-29","name":"ICLR: International Conference on Learning Representations","start_date":"2022-04-25","location":"Virtual"},"date_created":"2023-08-22T14:02:13Z","oa_version":"Preprint","publication":"10th International Conference on Learning Representations","title":"The role of pretrained representations for the OOD generalization of  reinforcement learning agents","department":[{"_id":"FrLo"}]},{"date_published":"2022-04-25T00:00:00Z","author":[{"full_name":"Makansi, Osama","first_name":"Osama","last_name":"Makansi"},{"last_name":"Kügelgen","first_name":"Julius von","full_name":"Kügelgen, Julius von"},{"last_name":"Locatello","orcid":"0000-0002-4850-0683","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco"},{"full_name":"Gehler, Peter","last_name":"Gehler","first_name":"Peter"},{"full_name":"Janzing, Dominik","first_name":"Dominik","last_name":"Janzing"},{"first_name":"Thomas","last_name":"Brox","full_name":"Brox, Thomas"},{"first_name":"Bernhard","last_name":"Schölkopf","full_name":"Schölkopf, Bernhard"}],"arxiv":1,"article_processing_charge":"No","date_updated":"2023-09-11T09:52:20Z","abstract":[{"lang":"eng","text":"Predicting the future trajectory of a moving agent can be easy when the past trajectory continues smoothly but is challenging when complex interactions with other agents are involved. Recent deep learning approaches for trajectory prediction show promising performance and partially attribute this to successful reasoning about agent-agent interactions. However, it remains unclear which features such black-box models actually learn to use for making predictions. This paper proposes a procedure that quantifies the contributions\r\nof different cues to model performance based on a variant of Shapley values. Applying this procedure to state-of-the-art trajectory prediction methods on standard benchmark datasets shows that they are, in fact, unable to reason about interactions. Instead, the past trajectory of the target is the only feature used for predicting its future. For a task with richer social\r\ninteraction patterns, on the other hand, the tested models do pick up such interactions to a certain extent, as quantified by our feature attribution method. We discuss the limits of the proposed method and its links to causality."}],"extern":"1","quality_controlled":"1","publication":"10th International Conference on Learning Representations","title":"You mostly walk alone: Analyzing feature attribution in trajectory prediction","department":[{"_id":"FrLo"}],"date_created":"2023-08-22T14:02:34Z","conference":{"end_date":"2022-04-29","name":"ICLR: International Conference on Learning Representations","start_date":"2022-04-25","location":"Virtual"},"oa_version":"Preprint","oa":1,"status":"public","external_id":{"arxiv":["2110.05304"]},"day":"25","citation":{"ama":"Makansi O, Kügelgen J von, Locatello F, et al. You mostly walk alone: Analyzing feature attribution in trajectory prediction. In: <i>10th International Conference on Learning Representations</i>. ; 2022.","ista":"Makansi O, Kügelgen J von, Locatello F, Gehler P, Janzing D, Brox T, Schölkopf B. 2022. You mostly walk alone: Analyzing feature attribution in trajectory prediction. 10th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","short":"O. Makansi, J. von Kügelgen, F. Locatello, P. Gehler, D. Janzing, T. Brox, B. Schölkopf, in:, 10th International Conference on Learning Representations, 2022.","mla":"Makansi, Osama, et al. “You Mostly Walk Alone: Analyzing Feature Attribution in Trajectory Prediction.” <i>10th International Conference on Learning Representations</i>, 2022.","chicago":"Makansi, Osama, Julius von Kügelgen, Francesco Locatello, Peter Gehler, Dominik Janzing, Thomas Brox, and Bernhard Schölkopf. “You Mostly Walk Alone: Analyzing Feature Attribution in Trajectory Prediction.” In <i>10th International Conference on Learning Representations</i>, 2022.","ieee":"O. Makansi <i>et al.</i>, “You mostly walk alone: Analyzing feature attribution in trajectory prediction,” in <i>10th International Conference on Learning Representations</i>, Virtual, 2022.","apa":"Makansi, O., Kügelgen, J. von, Locatello, F., Gehler, P., Janzing, D., Brox, T., &#38; Schölkopf, B. (2022). You mostly walk alone: Analyzing feature attribution in trajectory prediction. In <i>10th International Conference on Learning Representations</i>. Virtual."},"month":"04","language":[{"iso":"eng"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"conference","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2110.05304","open_access":"1"}],"_id":"14175","year":"2022","publication_status":"published"},{"citation":{"apa":"Rahaman, N., Weiss, M., Träuble, F., Locatello, F., Lacoste, A., Bengio, Y., … Schölkopf, B. (n.d.). A general purpose neural architecture for geospatial systems. In <i>36th Conference on Neural Information Processing Systems</i>. New Orleans, LA, United States.","chicago":"Rahaman, Nasim, Martin Weiss, Frederik Träuble, Francesco Locatello, Alexandre Lacoste, Yoshua Bengio, Chris Pal, Li Erran Li, and Bernhard Schölkopf. “A General Purpose Neural Architecture for Geospatial Systems.” In <i>36th Conference on Neural Information Processing Systems</i>, n.d.","ieee":"N. Rahaman <i>et al.</i>, “A general purpose neural architecture for geospatial systems,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States.","mla":"Rahaman, Nasim, et al. “A General Purpose Neural Architecture for Geospatial Systems.” <i>36th Conference on Neural Information Processing Systems</i>.","ista":"Rahaman N, Weiss M, Träuble F, Locatello F, Lacoste A, Bengio Y, Pal C, Li LE, Schölkopf B. A general purpose neural architecture for geospatial systems. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems.","short":"N. Rahaman, M. Weiss, F. Träuble, F. Locatello, A. Lacoste, Y. Bengio, C. Pal, L.E. Li, B. Schölkopf, in:, 36th Conference on Neural Information Processing Systems, n.d.","ama":"Rahaman N, Weiss M, Träuble F, et al. A general purpose neural architecture for geospatial systems. In: <i>36th Conference on Neural Information Processing Systems</i>."},"month":"11","language":[{"iso":"eng"}],"oa":1,"status":"public","external_id":{"arxiv":["2211.02348"]},"day":"04","_id":"14215","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2211.02348","open_access":"1"}],"year":"2022","publication_status":"submitted","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"conference","abstract":[{"text":"Geospatial Information Systems are used by researchers and Humanitarian Assistance and Disaster Response (HADR) practitioners to support a wide variety of important applications. However, collaboration between these actors is difficult due to the heterogeneous nature of geospatial data modalities (e.g., multi-spectral images of various resolutions, timeseries, weather data) and diversity of tasks (e.g., regression of human activity indicators or detecting forest fires). In this work, we present a roadmap towards the construction of a general-purpose neural architecture (GPNA) with a geospatial inductive bias, pre-trained on large amounts of unlabelled earth observation data in a self-supervised manner. We envision how such a model may facilitate cooperation between members of the community. We show preliminary results on the first step of the roadmap, where we instantiate an architecture that can process a wide variety of geospatial data modalities and demonstrate that it can achieve competitive performance with domain-specific architectures on tasks relating to the U.N.'s Sustainable Development Goals.","lang":"eng"}],"quality_controlled":"1","extern":"1","date_published":"2022-11-04T00:00:00Z","author":[{"full_name":"Rahaman, Nasim","first_name":"Nasim","last_name":"Rahaman"},{"last_name":"Weiss","first_name":"Martin","full_name":"Weiss, Martin"},{"first_name":"Frederik","last_name":"Träuble","full_name":"Träuble, Frederik"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco","orcid":"0000-0002-4850-0683","last_name":"Locatello","first_name":"Francesco"},{"last_name":"Lacoste","first_name":"Alexandre","full_name":"Lacoste, Alexandre"},{"first_name":"Yoshua","last_name":"Bengio","full_name":"Bengio, Yoshua"},{"full_name":"Pal, Chris","last_name":"Pal","first_name":"Chris"},{"last_name":"Li","first_name":"Li Erran","full_name":"Li, Li Erran"},{"first_name":"Bernhard","last_name":"Schölkopf","full_name":"Schölkopf, Bernhard"}],"arxiv":1,"article_processing_charge":"No","date_updated":"2023-09-13T09:35:59Z","conference":{"name":"NeurIPS: Neural Information Processing Systems","end_date":"2022-12-09","location":"New Orleans, LA, United States","start_date":"2022-11-28"},"date_created":"2023-08-22T14:21:47Z","oa_version":"Preprint","publication":"36th Conference on Neural Information Processing Systems","department":[{"_id":"FrLo"}],"title":"A general purpose neural architecture for geospatial systems"},{"date_created":"2023-08-22T14:22:04Z","oa_version":"Preprint","publication":"arXiv","department":[{"_id":"FrLo"}],"title":"ASIF: Coupled data turns unimodal models to multimodal without training","article_number":"2210.01738","abstract":[{"lang":"eng","text":"CLIP proved that aligning visual and language spaces is key to solving many vision tasks without explicit training, but required to train image and text encoders from scratch on a huge dataset. LiT improved this by only training the text encoder and using a pre-trained vision network. In this paper, we show that a common space can be created without any training at all, using single-domain encoders (trained with or without supervision) and a much smaller amount of image-text pairs. Furthermore, our model has unique properties. Most notably, deploying a new version with updated training samples can be done in a matter of seconds. Additionally, the representations in the common space are easily interpretable as every dimension corresponds to the similarity of the input to a unique entry in the multimodal dataset. Experiments on standard zero-shot visual benchmarks demonstrate the typical transfer ability of image-text models. Overall, our method represents a simple yet surprisingly strong baseline for foundation multi-modal models, raising important questions on their data efficiency and on the role of retrieval in machine learning."}],"date_published":"2022-10-04T00:00:00Z","author":[{"last_name":"Norelli","first_name":"Antonio","full_name":"Norelli, Antonio"},{"full_name":"Fumero, Marco","first_name":"Marco","last_name":"Fumero"},{"full_name":"Maiorca, Valentino","last_name":"Maiorca","first_name":"Valentino"},{"full_name":"Moschella, Luca","last_name":"Moschella","first_name":"Luca"},{"first_name":"Emanuele","last_name":"Rodolà","full_name":"Rodolà, Emanuele"},{"first_name":"Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683","full_name":"Locatello, Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4"}],"arxiv":1,"article_processing_charge":"No","date_updated":"2024-02-12T09:57:14Z","doi":"10.48550/arXiv.2210.01738","_id":"14216","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2210.01738"}],"year":"2022","publication_status":"submitted","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"preprint","citation":{"short":"A. Norelli, M. Fumero, V. Maiorca, L. Moschella, E. Rodolà, F. Locatello, ArXiv (n.d.).","ista":"Norelli A, Fumero M, Maiorca V, Moschella L, Rodolà E, Locatello F. ASIF: Coupled data turns unimodal models to multimodal without training. arXiv, 2210.01738.","mla":"Norelli, Antonio, et al. “ASIF: Coupled Data Turns Unimodal Models to Multimodal without Training.” <i>ArXiv</i>, 2210.01738, doi:<a href=\"https://doi.org/10.48550/arXiv.2210.01738\">10.48550/arXiv.2210.01738</a>.","ama":"Norelli A, Fumero M, Maiorca V, Moschella L, Rodolà E, Locatello F. ASIF: Coupled data turns unimodal models to multimodal without training. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2210.01738\">10.48550/arXiv.2210.01738</a>","apa":"Norelli, A., Fumero, M., Maiorca, V., Moschella, L., Rodolà, E., &#38; Locatello, F. (n.d.). ASIF: Coupled data turns unimodal models to multimodal without training. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2210.01738\">https://doi.org/10.48550/arXiv.2210.01738</a>","ieee":"A. Norelli, M. Fumero, V. Maiorca, L. Moschella, E. Rodolà, and F. Locatello, “ASIF: Coupled data turns unimodal models to multimodal without training,” <i>arXiv</i>. .","chicago":"Norelli, Antonio, Marco Fumero, Valentino Maiorca, Luca Moschella, Emanuele Rodolà, and Francesco Locatello. “ASIF: Coupled Data Turns Unimodal Models to Multimodal without Training.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2210.01738\">https://doi.org/10.48550/arXiv.2210.01738</a>."},"month":"10","language":[{"iso":"eng"}],"oa":1,"status":"public","day":"04","external_id":{"arxiv":["2210.01738"]}},{"abstract":[{"lang":"eng","text":"Although reinforcement learning has seen remarkable progress over the last years, solving robust dexterous object-manipulation tasks in multi-object settings remains a challenge. In this paper, we focus on models that can learn manipulation tasks in fixed multi-object settings and extrapolate this skill zero-shot without any drop in performance when the number of objects changes. We consider the generic task of bringing a specific cube out of a set to a goal position. We find that previous approaches, which primarily leverage attention and graph neural network-based architectures, do not generalize their skills when the number of input objects changes while scaling as K2. We propose an alternative plug-and-play module based on relational inductive biases to overcome these limitations. Besides exceeding performances in their training environment, we show that our approach, which scales linearly in K, allows agents to extrapolate and generalize zero-shot to any new object number."}],"extern":"1","date_published":"2022-01-31T00:00:00Z","author":[{"full_name":"Mambelli, Davide","first_name":"Davide","last_name":"Mambelli"},{"full_name":"Träuble, Frederik","first_name":"Frederik","last_name":"Träuble"},{"full_name":"Bauer, Stefan","first_name":"Stefan","last_name":"Bauer"},{"full_name":"Schölkopf, Bernhard","first_name":"Bernhard","last_name":"Schölkopf"},{"full_name":"Locatello, Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683"}],"arxiv":1,"article_processing_charge":"No","date_updated":"2023-09-11T11:49:40Z","doi":"10.48550/arXiv.2201.13388","date_created":"2023-08-22T14:23:16Z","oa_version":"Preprint","publication":"arXiv","title":"Compositional multi-object reinforcement learning with linear relation networks","department":[{"_id":"FrLo"}],"article_number":"2201.13388","citation":{"ista":"Mambelli D, Träuble F, Bauer S, Schölkopf B, Locatello F. Compositional multi-object reinforcement learning with linear relation networks. arXiv, 2201.13388.","mla":"Mambelli, Davide, et al. “Compositional Multi-Object Reinforcement Learning with Linear Relation Networks.” <i>ArXiv</i>, 2201.13388, doi:<a href=\"https://doi.org/10.48550/arXiv.2201.13388\">10.48550/arXiv.2201.13388</a>.","short":"D. Mambelli, F. Träuble, S. Bauer, B. Schölkopf, F. Locatello, ArXiv (n.d.).","ama":"Mambelli D, Träuble F, Bauer S, Schölkopf B, Locatello F. Compositional multi-object reinforcement learning with linear relation networks. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2201.13388\">10.48550/arXiv.2201.13388</a>","apa":"Mambelli, D., Träuble, F., Bauer, S., Schölkopf, B., &#38; Locatello, F. (n.d.). Compositional multi-object reinforcement learning with linear relation networks. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2201.13388\">https://doi.org/10.48550/arXiv.2201.13388</a>","chicago":"Mambelli, Davide, Frederik Träuble, Stefan Bauer, Bernhard Schölkopf, and Francesco Locatello. “Compositional Multi-Object Reinforcement Learning with Linear Relation Networks.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2201.13388\">https://doi.org/10.48550/arXiv.2201.13388</a>.","ieee":"D. Mambelli, F. Träuble, S. Bauer, B. Schölkopf, and F. Locatello, “Compositional multi-object reinforcement learning with linear relation networks,” <i>arXiv</i>. ."},"month":"01","language":[{"iso":"eng"}],"oa":1,"status":"public","external_id":{"arxiv":["2201.13388"]},"day":"31","_id":"14220","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2201.13388"}],"year":"2022","publication_status":"submitted","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","type":"preprint"},{"scopus_import":"1","quality_controlled":"1","date_published":"2022-12-01T00:00:00Z","author":[{"first_name":"Timothy D","last_name":"Browning","orcid":"0000-0002-8314-0177","full_name":"Browning, Timothy D","id":"35827D50-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Horesh, Tal","id":"C8B7BF48-8D81-11E9-BCA9-F536E6697425","first_name":"Tal","last_name":"Horesh"},{"full_name":"Wilsch, Florian Alexander","id":"560601DA-8D36-11E9-A136-7AC1E5697425","first_name":"Florian Alexander","last_name":"Wilsch","orcid":"0000-0001-7302-8256"}],"date_updated":"2023-08-02T06:46:38Z","doi":"10.2140/ant.2022.16.2385","article_processing_charge":"No","intvolume":"        16","main_file_link":[{"url":"https://arxiv.org/abs/2102.11552","open_access":"1"}],"publication_status":"published","project":[{"_id":"26A8D266-B435-11E9-9278-68D0E5697425","grant_number":"EP-P026710-2","name":"Between rational and integral points"},{"call_identifier":"FWF","_id":"26AEDAB2-B435-11E9-9278-68D0E5697425","name":"New frontiers of the Manin conjecture","grant_number":"P32428"}],"language":[{"iso":"eng"}],"isi":1,"month":"12","day":"01","oa_version":"Preprint","date_created":"2021-02-25T09:56:57Z","title":"Equidistribution and freeness on Grassmannians","department":[{"_id":"TiBr"}],"publication":"Algebra & Number Theory","publisher":"Mathematical Sciences Publishers","abstract":[{"text":"We associate a certain tensor product lattice to any primitive integer lattice and ask about its typical shape. These lattices are related to the tangent bundle of Grassmannians and their study is motivated by Peyre's programme on \"freeness\" for rational points of bounded height on Fano\r\nvarieties.","lang":"eng"}],"arxiv":1,"_id":"9199","year":"2022","page":"2385-2407","volume":16,"type":"journal_article","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","citation":{"ama":"Browning TD, Horesh T, Wilsch FA. Equidistribution and freeness on Grassmannians. <i>Algebra &#38; Number Theory</i>. 2022;16(10):2385-2407. doi:<a href=\"https://doi.org/10.2140/ant.2022.16.2385\">10.2140/ant.2022.16.2385</a>","mla":"Browning, Timothy D., et al. “Equidistribution and Freeness on Grassmannians.” <i>Algebra &#38; Number Theory</i>, vol. 16, no. 10, Mathematical Sciences Publishers, 2022, pp. 2385–407, doi:<a href=\"https://doi.org/10.2140/ant.2022.16.2385\">10.2140/ant.2022.16.2385</a>.","ista":"Browning TD, Horesh T, Wilsch FA. 2022. Equidistribution and freeness on Grassmannians. Algebra &#38; Number Theory. 16(10), 2385–2407.","short":"T.D. Browning, T. Horesh, F.A. Wilsch, Algebra &#38; Number Theory 16 (2022) 2385–2407.","chicago":"Browning, Timothy D, Tal Horesh, and Florian Alexander Wilsch. “Equidistribution and Freeness on Grassmannians.” <i>Algebra &#38; Number Theory</i>. Mathematical Sciences Publishers, 2022. <a href=\"https://doi.org/10.2140/ant.2022.16.2385\">https://doi.org/10.2140/ant.2022.16.2385</a>.","ieee":"T. D. Browning, T. Horesh, and F. A. Wilsch, “Equidistribution and freeness on Grassmannians,” <i>Algebra &#38; Number Theory</i>, vol. 16, no. 10. Mathematical Sciences Publishers, pp. 2385–2407, 2022.","apa":"Browning, T. D., Horesh, T., &#38; Wilsch, F. A. (2022). Equidistribution and freeness on Grassmannians. <i>Algebra &#38; Number Theory</i>. Mathematical Sciences Publishers. <a href=\"https://doi.org/10.2140/ant.2022.16.2385\">https://doi.org/10.2140/ant.2022.16.2385</a>"},"publication_identifier":{"eissn":["1944-7833"],"issn":["1937-0652"]},"oa":1,"status":"public","acknowledgement":"The authors are very grateful to Will Sawin for useful remarks about this topic. While working on this paper the first two authors were supported by EPSRC grant EP/P026710/1, and the first and last authors by FWF grant P 32428-N35.","article_type":"original","external_id":{"arxiv":["2102.11552"],"isi":["000961514100004"]},"issue":"10"},{"publication":"Mathematics of Operations Research","title":"Finite-memory strategies in POMDPs with long-run average objectives","department":[{"_id":"GradSch"},{"_id":"KrCh"}],"publisher":"Institute for Operations Research and the Management Sciences","date_created":"2021-04-08T09:33:31Z","oa_version":"Preprint","arxiv":1,"abstract":[{"lang":"eng","text":"Partially observable Markov decision processes (POMDPs) are standard models for dynamic systems with probabilistic and nondeterministic behaviour in uncertain environments. We prove that in POMDPs with long-run average objective, the decision maker has approximately optimal strategies with finite memory. This implies notably that approximating the long-run value is recursively enumerable, as well as a weak continuity property of the value with respect to the transition function. "}],"volume":47,"page":"100-119","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","type":"journal_article","_id":"9311","year":"2022","status":"public","acknowledgement":"Partially supported by Austrian Science Fund (FWF) NFN Grant No RiSE/SHiNE S11407, by CONICYT Chile through grant PII 20150140, and by ECOS-CONICYT through grant C15E03.\r\n","oa":1,"issue":"1","external_id":{"arxiv":["1904.13360"],"isi":["000731918100001"]},"article_type":"original","citation":{"ama":"Chatterjee K, Saona Urmeneta RJ, Ziliotto B. Finite-memory strategies in POMDPs with long-run average objectives. <i>Mathematics of Operations Research</i>. 2022;47(1):100-119. doi:<a href=\"https://doi.org/10.1287/moor.2020.1116\">10.1287/moor.2020.1116</a>","ista":"Chatterjee K, Saona Urmeneta RJ, Ziliotto B. 2022. Finite-memory strategies in POMDPs with long-run average objectives. Mathematics of Operations Research. 47(1), 100–119.","mla":"Chatterjee, Krishnendu, et al. “Finite-Memory Strategies in POMDPs with Long-Run Average Objectives.” <i>Mathematics of Operations Research</i>, vol. 47, no. 1, Institute for Operations Research and the Management Sciences, 2022, pp. 100–19, doi:<a href=\"https://doi.org/10.1287/moor.2020.1116\">10.1287/moor.2020.1116</a>.","short":"K. Chatterjee, R.J. Saona Urmeneta, B. Ziliotto, Mathematics of Operations Research 47 (2022) 100–119.","chicago":"Chatterjee, Krishnendu, Raimundo J Saona Urmeneta, and Bruno Ziliotto. “Finite-Memory Strategies in POMDPs with Long-Run Average Objectives.” <i>Mathematics of Operations Research</i>. Institute for Operations Research and the Management Sciences, 2022. <a href=\"https://doi.org/10.1287/moor.2020.1116\">https://doi.org/10.1287/moor.2020.1116</a>.","ieee":"K. Chatterjee, R. J. Saona Urmeneta, and B. Ziliotto, “Finite-memory strategies in POMDPs with long-run average objectives,” <i>Mathematics of Operations Research</i>, vol. 47, no. 1. Institute for Operations Research and the Management Sciences, pp. 100–119, 2022.","apa":"Chatterjee, K., Saona Urmeneta, R. J., &#38; Ziliotto, B. (2022). Finite-memory strategies in POMDPs with long-run average objectives. <i>Mathematics of Operations Research</i>. Institute for Operations Research and the Management Sciences. <a href=\"https://doi.org/10.1287/moor.2020.1116\">https://doi.org/10.1287/moor.2020.1116</a>"},"publication_identifier":{"eissn":["1526-5471"],"issn":["0364-765X"]},"date_published":"2022-02-01T00:00:00Z","author":[{"id":"2E5DCA20-F248-11E8-B48F-1D18A9856A87","full_name":"Chatterjee, Krishnendu","last_name":"Chatterjee","orcid":"0000-0002-4561-241X","first_name":"Krishnendu"},{"id":"BD1DF4C4-D767-11E9-B658-BC13E6697425","full_name":"Saona Urmeneta, Raimundo J","last_name":"Saona Urmeneta","orcid":"0000-0001-5103-038X","first_name":"Raimundo J"},{"last_name":"Ziliotto","first_name":"Bruno","full_name":"Ziliotto, Bruno"}],"article_processing_charge":"No","doi":"10.1287/moor.2020.1116","date_updated":"2023-09-05T13:16:11Z","keyword":["Management Science and Operations Research","General Mathematics","Computer Science Applications"],"scopus_import":"1","quality_controlled":"1","intvolume":"        47","main_file_link":[{"url":"https://arxiv.org/abs/1904.13360","open_access":"1"}],"publication_status":"published","day":"01","isi":1,"month":"02","language":[{"iso":"eng"}],"project":[{"name":"Game Theory","grant_number":"S11407","_id":"25863FF4-B435-11E9-9278-68D0E5697425","call_identifier":"FWF"}]},{"oa":1,"acknowledgement":"I am most thankful to my advisor, Emmanuel Kowalski, for suggesting this problem and for his guidance during these years. I also would like to thank Youness Lamzouri for informing me about his work on sum of incomplete Birch sums and Tal Horesh for her suggestions on a previous version of the paper. Finally, I am very grateful to the anonymous referee for their careful reading of the manuscript and their valuable comments.","status":"public","article_type":"original","external_id":{"isi":["000784421500001"],"arxiv":["1811.10563"]},"issue":"3","citation":{"ieee":"D. Bonolis, “On the size of the maximum of incomplete Kloosterman sums,” <i>Mathematical Proceedings of the Cambridge Philosophical Society</i>, vol. 172, no. 3. Cambridge University Press, pp. 563–590, 2022.","chicago":"Bonolis, Dante. “On the Size of the Maximum of Incomplete Kloosterman Sums.” <i>Mathematical Proceedings of the Cambridge Philosophical Society</i>. Cambridge University Press, 2022. <a href=\"https://doi.org/10.1017/S030500412100030X\">https://doi.org/10.1017/S030500412100030X</a>.","apa":"Bonolis, D. (2022). On the size of the maximum of incomplete Kloosterman sums. <i>Mathematical Proceedings of the Cambridge Philosophical Society</i>. Cambridge University Press. <a href=\"https://doi.org/10.1017/S030500412100030X\">https://doi.org/10.1017/S030500412100030X</a>","ama":"Bonolis D. On the size of the maximum of incomplete Kloosterman sums. <i>Mathematical Proceedings of the Cambridge Philosophical Society</i>. 2022;172(3):563-590. doi:<a href=\"https://doi.org/10.1017/S030500412100030X\">10.1017/S030500412100030X</a>","short":"D. Bonolis, Mathematical Proceedings of the Cambridge Philosophical Society 172 (2022) 563–590.","ista":"Bonolis D. 2022. On the size of the maximum of incomplete Kloosterman sums. Mathematical Proceedings of the Cambridge Philosophical Society. 172(3), 563–590.","mla":"Bonolis, Dante. “On the Size of the Maximum of Incomplete Kloosterman Sums.” <i>Mathematical Proceedings of the Cambridge Philosophical Society</i>, vol. 172, no. 3, Cambridge University Press, 2022, pp. 563–90, doi:<a href=\"https://doi.org/10.1017/S030500412100030X\">10.1017/S030500412100030X</a>."},"publication_identifier":{"eissn":["1469-8064"],"issn":["0305-0041"]},"page":"563 - 590","volume":172,"type":"journal_article","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","_id":"9364","year":"2022","has_accepted_license":"1","arxiv":1,"abstract":[{"lang":"eng","text":"Let t : Fp → C be a complex valued function on Fp. A classical problem in analytic number theory is bounding the maximum M(t) := max 0≤H<p ∣ 1/√p ∑ 0≤n<H t (n) ∣ of the absolute value of the incomplete sums(1/√p)∑0≤n<H t (n). In this very general context one of the most important results is the Pólya–Vinogradov bound M(t)≤IIˆtII∞ log 3p, where ˆt : Fp → C is the normalized Fourier transform of t. In this paper we provide a lower bound for certain incomplete Kloosterman sums, namely we prove that for any ε > 0 there exists a large subset of a ∈ F×p such that for kl a,1,p : x → e((ax+x) / p) we have M(kla,1,p) ≥ (1−ε/√2π + o(1)) log log p, as p→∞. Finally, we prove a result on the growth of the moments of {M (kla,1,p)}a∈F×p. 2020 Mathematics Subject Classification: 11L03, 11T23 (Primary); 14F20, 60F10 (Secondary)."}],"title":"On the size of the maximum of incomplete Kloosterman sums","department":[{"_id":"TiBr"}],"publication":"Mathematical Proceedings of the Cambridge Philosophical Society","publisher":"Cambridge University Press","oa_version":"Published Version","date_created":"2021-05-02T22:01:29Z","day":"01","ddc":["510"],"language":[{"iso":"eng"}],"month":"05","isi":1,"intvolume":"       172","publication_status":"published","file":[{"checksum":"614d2e9b83a78100408e4ee7752a80a8","file_name":"2021_MathProcCamPhilSoc_Bonolis.pdf","file_size":334064,"file_id":"10395","date_updated":"2021-12-01T14:01:54Z","date_created":"2021-12-01T14:01:54Z","success":1,"content_type":"application/pdf","creator":"cchlebak","access_level":"open_access","relation":"main_file"}],"date_published":"2022-05-01T00:00:00Z","author":[{"id":"6A459894-5FDD-11E9-AF35-BB24E6697425","full_name":"Bonolis, Dante","last_name":"Bonolis","first_name":"Dante"}],"date_updated":"2023-08-02T06:47:48Z","doi":"10.1017/S030500412100030X","article_processing_charge":"Yes (via OA deal)","scopus_import":"1","quality_controlled":"1","file_date_updated":"2021-12-01T14:01:54Z","tmp":{"image":"/images/cc_by.png","short":"CC BY (4.0)","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode"}},{"abstract":[{"lang":"eng","text":"Isomanifolds are the generalization of isosurfaces to arbitrary dimension and codimension, i.e. manifolds defined as the zero set of some multivariate vector-valued smooth function f : Rd → Rd−n. A natural (and efficient) way to approximate an isomanifold is to consider its Piecewise-Linear (PL) approximation based on a triangulation T of the ambient space Rd. In this paper, we give conditions under which the PL-approximation of an isomanifold is topologically equivalent to the isomanifold. The conditions are easy to satisfy in the sense that they can always be met by taking a sufficiently\r\nfine triangulation T . This contrasts with previous results on the triangulation of manifolds where, in arbitrary dimensions, delicate perturbations are needed to guarantee topological correctness, which leads to strong limitations in practice. We further give a bound on the Fréchet distance between the original isomanifold and its PL-approximation. Finally we show analogous results for the PL-approximation of an isomanifold with boundary."}],"publisher":"Springer Nature","publication":"Foundations of Computational Mathematics ","title":"The topological correctness of PL approximations of isomanifolds","department":[{"_id":"HeEd"}],"date_created":"2021-07-14T06:44:53Z","ec_funded":1,"oa_version":"Published Version","article_type":"original","external_id":{"isi":["000673039600001"]},"oa":1,"status":"public","acknowledgement":"First and foremost, we acknowledge Siargey Kachanovich for discussions. We thank Herbert Edelsbrunner and all members of his group, all former and current members of the Datashape team (formerly known as Geometrica), and André Lieutier for encouragement. We further thank the reviewers of Foundations of Computational Mathematics and the reviewers and program committee of the Symposium on Computational Geometry for their feedback, which improved the exposition.\r\nThis work was funded by the European Research Council under the European Union’s ERC Grant Agreement number 339025 GUDHI (Algorithmic Foundations of Geometric Understanding in Higher Dimensions). This work was also supported by the French government, through the 3IA Côte d’Azur Investments in the Future project managed by the National Research Agency (ANR) with the reference number ANR-19-P3IA-0002. Mathijs Wintraecken also received funding from the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement no. 754411.","publication_identifier":{"eissn":["1615-3383"]},"citation":{"ieee":"J.-D. Boissonnat and M. Wintraecken, “The topological correctness of PL approximations of isomanifolds,” <i>Foundations of Computational Mathematics </i>, vol. 22. Springer Nature, pp. 967–1012, 2022.","chicago":"Boissonnat, Jean-Daniel, and Mathijs Wintraecken. “The Topological Correctness of PL Approximations of Isomanifolds.” <i>Foundations of Computational Mathematics </i>. Springer Nature, 2022. <a href=\"https://doi.org/10.1007/s10208-021-09520-0\">https://doi.org/10.1007/s10208-021-09520-0</a>.","apa":"Boissonnat, J.-D., &#38; Wintraecken, M. (2022). The topological correctness of PL approximations of isomanifolds. <i>Foundations of Computational Mathematics </i>. Springer Nature. <a href=\"https://doi.org/10.1007/s10208-021-09520-0\">https://doi.org/10.1007/s10208-021-09520-0</a>","ama":"Boissonnat J-D, Wintraecken M. The topological correctness of PL approximations of isomanifolds. <i>Foundations of Computational Mathematics </i>. 2022;22:967-1012. doi:<a href=\"https://doi.org/10.1007/s10208-021-09520-0\">10.1007/s10208-021-09520-0</a>","short":"J.-D. Boissonnat, M. Wintraecken, Foundations of Computational Mathematics  22 (2022) 967–1012.","mla":"Boissonnat, Jean-Daniel, and Mathijs Wintraecken. “The Topological Correctness of PL Approximations of Isomanifolds.” <i>Foundations of Computational Mathematics </i>, vol. 22, Springer Nature, 2022, pp. 967–1012, doi:<a href=\"https://doi.org/10.1007/s10208-021-09520-0\">10.1007/s10208-021-09520-0</a>.","ista":"Boissonnat J-D, Wintraecken M. 2022. The topological correctness of PL approximations of isomanifolds. Foundations of Computational Mathematics . 22, 967–1012."},"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","type":"journal_article","volume":22,"page":"967-1012","has_accepted_license":"1","year":"2022","_id":"9649","article_processing_charge":"Yes (via OA deal)","doi":"10.1007/s10208-021-09520-0","date_updated":"2023-08-02T06:49:17Z","related_material":{"record":[{"status":"public","relation":"earlier_version","id":"7952"}]},"date_published":"2022-01-01T00:00:00Z","author":[{"full_name":"Boissonnat, Jean-Daniel","first_name":"Jean-Daniel","last_name":"Boissonnat"},{"orcid":"0000-0002-7472-2220","last_name":"Wintraecken","first_name":"Mathijs","id":"307CFBC8-F248-11E8-B48F-1D18A9856A87","full_name":"Wintraecken, Mathijs"}],"quality_controlled":"1","scopus_import":"1","file_date_updated":"2021-07-14T06:44:36Z","tmp":{"image":"/images/cc_by.png","short":"CC BY (4.0)","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode"},"ddc":["516"],"day":"01","isi":1,"month":"0","language":[{"iso":"eng"}],"project":[{"name":"ISTplus - Postdoctoral Fellowships","grant_number":"754411","_id":"260C2330-B435-11E9-9278-68D0E5697425","call_identifier":"H2020"}],"file":[{"file_id":"9650","date_updated":"2021-07-14T06:44:36Z","file_name":"Boissonnat-Wintraecken2021_Article_TheTopologicalCorrectnessOfPLA.pdf","checksum":"f1d372ec3c08ec22e84f8e93e1126b8c","file_size":1455699,"creator":"mwintrae","access_level":"open_access","relation":"main_file","date_created":"2021-07-14T06:44:36Z","content_type":"application/pdf"}],"publication_status":"published","intvolume":"        22"},{"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","type":"journal_article","volume":14,"year":"2022","_id":"10016","issue":"5","article_type":"review","external_id":{"pmid":["34400554"],"isi":["000806563000003"]},"acknowledgement":"The author thanks the whole community of researchers consciously or unconsciously working on questions related to auxin, whose hard work and enthusiasm contributed to development of this exciting story. Particular thanks go to many\r\nbrilliant present and past members of the Friml group and our numerous excellent collaborators, without whom my own personal journey would not be possible. The way of the cross with its 14 stations is a popular devotion among Roman Catholics and inspires them to make a spiritual pilgrimage through contemplation of Christ on his last day. Its aspects of gradual progress, struggle, passion, and revelation served as an inspiration for the formal depiction of our journey to understanding auxin as described in this review. It is in no way intended to reflect the personal beliefs of the author and readers. I am grateful to Nick Barton, Eva Benková, Lenka Caisová, Matyáš Fendrych, Lukáš Fiedler, Monika Frátriková, Jarmila Frimlová, Michelle Gallei, Jakub Hajný, Lukas Hoermayer, Alexandra Mally, Ondrˇej Novák, Jan Petrášek, Aleš Pěnčík, Steffen Vanneste, Tongda Xu, and Zhenbiao Yang for their valuable comments. Special thanks go to Michelle Gallei for her invaluable assistance with the figures.","status":"public","oa":1,"publication_identifier":{"issn":["1943-0264"]},"citation":{"ama":"Friml J. Fourteen stations of auxin. <i>Cold Spring Harbor Perspectives in Biology</i>. 2022;14(5). doi:<a href=\"https://doi.org/10.1101/cshperspect.a039859 \">10.1101/cshperspect.a039859 </a>","mla":"Friml, Jiří. “Fourteen Stations of Auxin.” <i>Cold Spring Harbor Perspectives in Biology</i>, vol. 14, no. 5, a039859, Cold Spring Harbor Laboratory, 2022, doi:<a href=\"https://doi.org/10.1101/cshperspect.a039859 \">10.1101/cshperspect.a039859 </a>.","ista":"Friml J. 2022. Fourteen stations of auxin. Cold Spring Harbor Perspectives in Biology. 14(5), a039859.","short":"J. Friml, Cold Spring Harbor Perspectives in Biology 14 (2022).","chicago":"Friml, Jiří. “Fourteen Stations of Auxin.” <i>Cold Spring Harbor Perspectives in Biology</i>. Cold Spring Harbor Laboratory, 2022. <a href=\"https://doi.org/10.1101/cshperspect.a039859 \">https://doi.org/10.1101/cshperspect.a039859 </a>.","ieee":"J. Friml, “Fourteen stations of auxin,” <i>Cold Spring Harbor Perspectives in Biology</i>, vol. 14, no. 5. Cold Spring Harbor Laboratory, 2022.","apa":"Friml, J. (2022). Fourteen stations of auxin. <i>Cold Spring Harbor Perspectives in Biology</i>. Cold Spring Harbor Laboratory. <a href=\"https://doi.org/10.1101/cshperspect.a039859 \">https://doi.org/10.1101/cshperspect.a039859 </a>"},"publisher":"Cold Spring Harbor Laboratory","publication":"Cold Spring Harbor Perspectives in Biology","department":[{"_id":"JiFr"}],"title":"Fourteen stations of auxin","date_created":"2021-09-14T11:36:53Z","oa_version":"Published Version","abstract":[{"text":"Auxin has always been at the forefront of research in plant physiology and development. Since the earliest contemplations by Julius von Sachs and Charles Darwin, more than a century-long struggle has been waged to understand its function. This largely reflects the failures, successes, and inevitable progress in the entire field of plant signaling and development. Here I present 14 stations on our long and sometimes mystical journey to understand auxin. These highlights were selected to give a flavor of the field and to show the scope and limits of our current knowledge. A special focus is put on features that make auxin unique among phytohormones, such as its dynamic, directional transport network, which integrates external and internal signals, including self-organizing feedback. Accented are persistent mysteries and controversies. The unexpected discoveries related to rapid auxin responses and growth regulation recently disturbed our contentment regarding understanding of the auxin signaling mechanism. These new revelations, along with advances in technology, usher us into a new, exciting era in auxin research. ","lang":"eng"}],"publication_status":"published","intvolume":"        14","main_file_link":[{"open_access":"1","url":"https://doi.org/10.1101/cshperspect.a039859 "}],"day":"27","pmid":1,"isi":1,"month":"05","language":[{"iso":"eng"}],"article_number":"a039859","article_processing_charge":"No","doi":"10.1101/cshperspect.a039859 ","date_updated":"2023-08-02T06:54:42Z","author":[{"first_name":"Jiří","last_name":"Friml","orcid":"0000-0002-8302-7596","full_name":"Friml, Jiří","id":"4159519E-F248-11E8-B48F-1D18A9856A87"}],"date_published":"2022-05-27T00:00:00Z","quality_controlled":"1","scopus_import":"1"},{"scopus_import":"1","keyword":["Integral points","del Pezzo surface","universal torsor","Manin’s conjecture"],"quality_controlled":"1","author":[{"last_name":"Derenthal","first_name":"Ulrich","full_name":"Derenthal, Ulrich"},{"id":"560601DA-8D36-11E9-A136-7AC1E5697425","full_name":"Wilsch, Florian Alexander","orcid":"0000-0001-7302-8256","last_name":"Wilsch","first_name":"Florian Alexander"}],"date_published":"2022-11-10T00:00:00Z","doi":"10.1017/S1474748022000482","date_updated":"2023-08-02T06:55:10Z","article_processing_charge":"Yes (via OA deal)","project":[{"_id":"26AEDAB2-B435-11E9-9278-68D0E5697425","grant_number":"P32428","name":"New frontiers of the Manin conjecture","call_identifier":"FWF"}],"language":[{"iso":"eng"}],"month":"11","isi":1,"day":"10","main_file_link":[{"open_access":"1","url":"https://doi.org/10.1017/S1474748022000482"}],"publication_status":"epub_ahead","abstract":[{"lang":"eng","text":"In order to study integral points of bounded log-anticanonical height on weak del Pezzo surfaces, we classify weak del Pezzo pairs. As a representative example, we consider a quartic del Pezzo surface of singularity type A1 + A3 and prove an analogue of Manin's conjecture for integral points with respect to its singularities and its lines."}],"arxiv":1,"oa_version":"Published Version","date_created":"2021-09-15T10:06:48Z","department":[{"_id":"TiBr"}],"title":"Integral points on singular del Pezzo surfaces","publication":"Journal of the Institute of Mathematics of Jussieu","publisher":"Cambridge University Press","citation":{"chicago":"Derenthal, Ulrich, and Florian Alexander Wilsch. “Integral Points on Singular Del Pezzo Surfaces.” <i>Journal of the Institute of Mathematics of Jussieu</i>. Cambridge University Press, 2022. <a href=\"https://doi.org/10.1017/S1474748022000482\">https://doi.org/10.1017/S1474748022000482</a>.","ieee":"U. Derenthal and F. A. Wilsch, “Integral points on singular del Pezzo surfaces,” <i>Journal of the Institute of Mathematics of Jussieu</i>. Cambridge University Press, 2022.","apa":"Derenthal, U., &#38; Wilsch, F. A. (2022). Integral points on singular del Pezzo surfaces. <i>Journal of the Institute of Mathematics of Jussieu</i>. Cambridge University Press. <a href=\"https://doi.org/10.1017/S1474748022000482\">https://doi.org/10.1017/S1474748022000482</a>","ama":"Derenthal U, Wilsch FA. Integral points on singular del Pezzo surfaces. <i>Journal of the Institute of Mathematics of Jussieu</i>. 2022. doi:<a href=\"https://doi.org/10.1017/S1474748022000482\">10.1017/S1474748022000482</a>","mla":"Derenthal, Ulrich, and Florian Alexander Wilsch. “Integral Points on Singular Del Pezzo Surfaces.” <i>Journal of the Institute of Mathematics of Jussieu</i>, Cambridge University Press, 2022, doi:<a href=\"https://doi.org/10.1017/S1474748022000482\">10.1017/S1474748022000482</a>.","ista":"Derenthal U, Wilsch FA. 2022. Integral points on singular del Pezzo surfaces. Journal of the Institute of Mathematics of Jussieu.","short":"U. Derenthal, F.A. Wilsch, Journal of the Institute of Mathematics of Jussieu (2022)."},"publication_identifier":{"eissn":["1475-3030 "],"issn":["1474-7480"]},"oa":1,"status":"public","acknowledgement":"The first author was partly supported by grant DE 1646/4-2 of the Deutsche Forschungsgemeinschaft. The second author was partly supported by FWF grant P 32428-N35 and conducted part of this work as a guest at the Institut de Mathématiques de Jussieu–Paris Rive Gauche invited by Antoine Chambert-Loir and funded by DAAD.","article_type":"original","external_id":{"isi":["000881319200001"],"arxiv":["2109.06778"]},"_id":"10018","year":"2022","type":"journal_article","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8"},{"month":"01","isi":1,"project":[{"_id":"260C2330-B435-11E9-9278-68D0E5697425","name":"ISTplus - Postdoctoral Fellowships","grant_number":"754411","call_identifier":"H2020"},{"_id":"2564DBCA-B435-11E9-9278-68D0E5697425","name":"International IST Doctoral Program","grant_number":"665385","call_identifier":"H2020"},{"_id":"9B8F7476-BA93-11EA-9121-9846C619BF3A","name":"HighTE: The Werner Siemens Laboratory for the High Throughput Discovery of Semiconductors for Waste Heat Recovery"},{"grant_number":"M02889","name":"Bottom-up Engineering for Thermoelectric Applications","_id":"9B8804FC-BA93-11EA-9121-9846C619BF3A"}],"language":[{"iso":"eng"}],"pmid":1,"ddc":["540"],"day":"25","intvolume":"        16","file":[{"date_created":"2022-03-02T16:17:29Z","success":1,"content_type":"application/pdf","access_level":"open_access","creator":"cchlebak","relation":"main_file","checksum":"74f9c1aa5f95c0b992a4328e8e0247b4","file_name":"2022_ACSNano_Liu.pdf","file_size":9050764,"date_updated":"2022-03-02T16:17:29Z","file_id":"10808"}],"publication_status":"published","keyword":["tin selenide","nanocomposite","grain growth","Zener pinning","thermoelectricity","annealing","solution processing"],"scopus_import":"1","quality_controlled":"1","author":[{"first_name":"Yu","last_name":"Liu","orcid":"0000-0001-7313-6740","full_name":"Liu, Yu","id":"2A70014E-F248-11E8-B48F-1D18A9856A87"},{"full_name":"Calcabrini, Mariano","id":"45D7531A-F248-11E8-B48F-1D18A9856A87","first_name":"Mariano","last_name":"Calcabrini"},{"last_name":"Yu","first_name":"Yuan","full_name":"Yu, Yuan"},{"first_name":"Seungho","last_name":"Lee","orcid":"0000-0002-6962-8598","full_name":"Lee, Seungho","id":"BB243B88-D767-11E9-B658-BC13E6697425"},{"id":"9E331C2E-9F27-11E9-AE48-5033E6697425","full_name":"Chang, Cheng","orcid":"0000-0002-9515-4277","last_name":"Chang","first_name":"Cheng"},{"full_name":"David, Jérémy","first_name":"Jérémy","last_name":"David"},{"full_name":"Ghosh, Tanmoy","id":"a5fc9bc3-feff-11ea-93fe-e8015a3c7e9d","first_name":"Tanmoy","last_name":"Ghosh"},{"first_name":"Maria Chiara","last_name":"Spadaro","full_name":"Spadaro, Maria Chiara"},{"last_name":"Xie","first_name":"Chenyang","full_name":"Xie, Chenyang"},{"first_name":"Oana","last_name":"Cojocaru-Mirédin","full_name":"Cojocaru-Mirédin, Oana"},{"full_name":"Arbiol, Jordi","last_name":"Arbiol","first_name":"Jordi"},{"id":"43C61214-F248-11E8-B48F-1D18A9856A87","full_name":"Ibáñez, Maria","orcid":"0000-0001-5013-2843","last_name":"Ibáñez","first_name":"Maria"}],"date_published":"2022-01-25T00:00:00Z","article_processing_charge":"Yes (via OA deal)","date_updated":"2023-08-02T14:41:05Z","related_material":{"record":[{"relation":"dissertation_contains","status":"public","id":"12885"}]},"doi":"10.1021/acsnano.1c06720","tmp":{"image":"/images/cc_by.png","short":"CC BY (4.0)","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode"},"file_date_updated":"2022-03-02T16:17:29Z","citation":{"chicago":"Liu, Yu, Mariano Calcabrini, Yuan Yu, Seungho Lee, Cheng Chang, Jérémy David, Tanmoy Ghosh, et al. “Defect Engineering in Solution-Processed Polycrystalline SnSe Leads to High Thermoelectric Performance.” <i>ACS Nano</i>. American Chemical Society , 2022. <a href=\"https://doi.org/10.1021/acsnano.1c06720\">https://doi.org/10.1021/acsnano.1c06720</a>.","ieee":"Y. Liu <i>et al.</i>, “Defect engineering in solution-processed polycrystalline SnSe leads to high thermoelectric performance,” <i>ACS Nano</i>, vol. 16, no. 1. American Chemical Society , pp. 78–88, 2022.","apa":"Liu, Y., Calcabrini, M., Yu, Y., Lee, S., Chang, C., David, J., … Ibáñez, M. (2022). Defect engineering in solution-processed polycrystalline SnSe leads to high thermoelectric performance. <i>ACS Nano</i>. American Chemical Society . <a href=\"https://doi.org/10.1021/acsnano.1c06720\">https://doi.org/10.1021/acsnano.1c06720</a>","ama":"Liu Y, Calcabrini M, Yu Y, et al. Defect engineering in solution-processed polycrystalline SnSe leads to high thermoelectric performance. <i>ACS Nano</i>. 2022;16(1):78-88. doi:<a href=\"https://doi.org/10.1021/acsnano.1c06720\">10.1021/acsnano.1c06720</a>","short":"Y. Liu, M. Calcabrini, Y. Yu, S. Lee, C. Chang, J. David, T. Ghosh, M.C. Spadaro, C. Xie, O. Cojocaru-Mirédin, J. Arbiol, M. Ibáñez, ACS Nano 16 (2022) 78–88.","ista":"Liu Y, Calcabrini M, Yu Y, Lee S, Chang C, David J, Ghosh T, Spadaro MC, Xie C, Cojocaru-Mirédin O, Arbiol J, Ibáñez M. 2022. Defect engineering in solution-processed polycrystalline SnSe leads to high thermoelectric performance. ACS Nano. 16(1), 78–88.","mla":"Liu, Yu, et al. “Defect Engineering in Solution-Processed Polycrystalline SnSe Leads to High Thermoelectric Performance.” <i>ACS Nano</i>, vol. 16, no. 1, American Chemical Society , 2022, pp. 78–88, doi:<a href=\"https://doi.org/10.1021/acsnano.1c06720\">10.1021/acsnano.1c06720</a>."},"publication_identifier":{"issn":["1936-0851"],"eissn":["1936-086X"]},"oa":1,"acknowledgement":"This work was financially supported by IST Austria and the Werner Siemens Foundation. Y.L. acknowledges funding from the European Union’s Horizon 2020 research and innovation program under the Marie Sklodowska-Curie grant agreement No. 754411. S.L. and M.C. received funding from the European Union’s Horizon 2020 research and innovation program under the Marie Skłodowska-Curie Grant Agreement No. 665385. J.D. acknowledges funding from the European Union’s Horizon 2020 research and innovation program under the Marie Sklodowska-Curie grant agreement no. 665919 (P-SPHERE) cofunded by Severo Ochoa Programme. C.C. acknowledges funding from the FWF “Lise Meitner Fellowship” grant agreement M 2889-N. Y.Y. and O.C.-M. acknowledge the financial support from DFG within the project SFB 917: Nanoswitches. M.C.S. received funding from the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No. 754510 (PROBIST) and the Severo Ochoa programme. J.D. received funding from the European Union’s Horizon 2020 research and innovation programme under the Marie Sklodowska-Curie grant agreement No. 665919 (P-SPHERE) cofunded by Severo Ochoa Programme. The ICN2 is funded by the CERCA Program/Generalitat de Catalunya and by the Severo Ochoa program of the Spanish Ministry of Economy, Industry, and Competitiveness (MINECO, grant no. SEV-2017-0706). ICN2 acknowledges funding from Generalitat de Catalunya 2017 SGR 327 and the Spanish MINECO project NANOGEN (PID2020-116093RB-C43). This project received funding from the European Union’s Horizon 2020 research and innovation program under grant agreement No. 823717-ESTEEM3. The FIB sample preparation was conducted in the LMA-INA-Universidad de Zaragoza.","status":"public","issue":"1","external_id":{"isi":["000767223400008"],"pmid":["34549956"]},"article_type":"original","_id":"10042","year":"2022","has_accepted_license":"1","volume":16,"page":"78-88","user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","type":"journal_article","abstract":[{"text":"SnSe has emerged as one of the most promising materials for thermoelectric energy conversion due to its extraordinary performance in its single-crystal form and its low-cost constituent elements. However, to achieve an economic impact, the polycrystalline counterpart needs to replicate the performance of the single crystal. Herein, we optimize the thermoelectric performance of polycrystalline SnSe produced by consolidating solution-processed and surface-engineered SnSe particles. In particular, the SnSe particles are coated with CdSe molecular complexes that crystallize during the sintering process, forming CdSe nanoparticles. The presence of CdSe nanoparticles inhibits SnSe grain growth during the consolidation step due to Zener pinning, yielding a material with a high density of grain boundaries. Moreover, the resulting SnSe–CdSe nanocomposites present a large number of defects at different length scales, which significantly reduce the thermal conductivity. The produced SnSe–CdSe nanocomposites exhibit thermoelectric figures of merit up to 2.2 at 786 K, which is among the highest reported for solution-processed SnSe.","lang":"eng"}],"date_created":"2021-09-24T07:55:12Z","ec_funded":1,"oa_version":"Published Version","publication":"ACS Nano","title":"Defect engineering in solution-processed polycrystalline SnSe leads to high thermoelectric performance","department":[{"_id":"MaIb"}],"publisher":"American Chemical Society "}]
