[{"intvolume":"       517","status":"public","publication":"Monthly Notices of the Royal Astronomical Society","quality_controlled":"1","publisher":"Oxford Academic","date_created":"2023-08-21T10:11:21Z","month":"12","extern":"1","page":"2028-2055","doi":"10.1093/mnras/stac2598","language":[{"iso":"eng"}],"author":[{"first_name":"Z.","full_name":"Keszthelyi, Z.","last_name":"Keszthelyi"},{"full_name":"Koter, A. de","first_name":"A. de","last_name":"Koter"},{"id":"d0648d0c-0f64-11ee-a2e0-dd0faa2e4f7d","orcid":"0000-0002-6960-6911","full_name":"Götberg, Ylva Louise Linsdotter","first_name":"Ylva Louise Linsdotter","last_name":"Götberg"},{"last_name":"Meynet","full_name":"Meynet, G.","first_name":"G."},{"last_name":"Brands","full_name":"Brands, S. A.","first_name":"S. A."},{"first_name":"V.","full_name":"Petit, V.","last_name":"Petit"},{"full_name":"Carrington, M.","first_name":"M.","last_name":"Carrington"},{"full_name":"A. David-Uraz, A. David-Uraz","first_name":"A. David-Uraz","last_name":"A. David-Uraz"},{"first_name":"S. T.","full_name":"Geen, S. T.","last_name":"Geen"},{"last_name":"Georgy","full_name":"Georgy, C.","first_name":"C."},{"full_name":"Hirschi, R.","first_name":"R.","last_name":"Hirschi"},{"last_name":"Puls","full_name":"Puls, J.","first_name":"J."},{"full_name":"Ramalatswa, K. J.","first_name":"K. J.","last_name":"Ramalatswa"},{"last_name":"Shultz","full_name":"Shultz, M. E.","first_name":"M. E."},{"first_name":"A. ud-Doula","full_name":"A. ud-Doula, A. ud-Doula","last_name":"A. ud-Doula"}],"type":"journal_article","day":"01","title":"The effects of surface fossil magnetic fields on massive star evolution: IV. Grids of models at solar, LMC, and SMC metallicities","citation":{"mla":"Keszthelyi, Z., et al. “The Effects of Surface Fossil Magnetic Fields on Massive Star Evolution: IV. Grids of Models at Solar, LMC, and SMC Metallicities.” <i>Monthly Notices of the Royal Astronomical Society</i>, vol. 517, no. 2, Oxford Academic, 2022, pp. 2028–55, doi:<a href=\"https://doi.org/10.1093/mnras/stac2598\">10.1093/mnras/stac2598</a>.","ista":"Keszthelyi Z, Koter A de, Götberg YLL, Meynet G, Brands SA, Petit V, Carrington M, A. David-Uraz AD-U, Geen ST, Georgy C, Hirschi R, Puls J, Ramalatswa KJ, Shultz ME, A. ud-Doula A ud-Doula. 2022. The effects of surface fossil magnetic fields on massive star evolution: IV. Grids of models at solar, LMC, and SMC metallicities. Monthly Notices of the Royal Astronomical Society. 517(2), 2028–2055.","ieee":"Z. Keszthelyi <i>et al.</i>, “The effects of surface fossil magnetic fields on massive star evolution: IV. Grids of models at solar, LMC, and SMC metallicities,” <i>Monthly Notices of the Royal Astronomical Society</i>, vol. 517, no. 2. Oxford Academic, pp. 2028–2055, 2022.","chicago":"Keszthelyi, Z., A. de Koter, Ylva Louise Linsdotter Götberg, G. Meynet, S. A. Brands, V. Petit, M. Carrington, et al. “The Effects of Surface Fossil Magnetic Fields on Massive Star Evolution: IV. Grids of Models at Solar, LMC, and SMC Metallicities.” <i>Monthly Notices of the Royal Astronomical Society</i>. Oxford Academic, 2022. <a href=\"https://doi.org/10.1093/mnras/stac2598\">https://doi.org/10.1093/mnras/stac2598</a>.","apa":"Keszthelyi, Z., Koter, A. de, Götberg, Y. L. L., Meynet, G., Brands, S. A., Petit, V., … A. ud-Doula, A. ud-Doula. (2022). The effects of surface fossil magnetic fields on massive star evolution: IV. Grids of models at solar, LMC, and SMC metallicities. <i>Monthly Notices of the Royal Astronomical Society</i>. Oxford Academic. <a href=\"https://doi.org/10.1093/mnras/stac2598\">https://doi.org/10.1093/mnras/stac2598</a>","ama":"Keszthelyi Z, Koter A de, Götberg YLL, et al. The effects of surface fossil magnetic fields on massive star evolution: IV. Grids of models at solar, LMC, and SMC metallicities. <i>Monthly Notices of the Royal Astronomical Society</i>. 2022;517(2):2028-2055. doi:<a href=\"https://doi.org/10.1093/mnras/stac2598\">10.1093/mnras/stac2598</a>","short":"Z. Keszthelyi, A. de Koter, Y.L.L. Götberg, G. Meynet, S.A. Brands, V. Petit, M. Carrington, A.D.-U. A. David-Uraz, S.T. Geen, C. Georgy, R. Hirschi, J. Puls, K.J. Ramalatswa, M.E. Shultz, A. ud-Doula A. ud-Doula, Monthly Notices of the Royal Astronomical Society 517 (2022) 2028–2055."},"volume":517,"main_file_link":[{"open_access":"1","url":"https://doi.org/10.1093/mnras/stac2598"}],"oa":1,"publication_status":"published","_id":"14098","date_published":"2022-12-01T00:00:00Z","abstract":[{"text":"Magnetic fields can drastically change predictions of evolutionary models of massive stars via mass-loss quenching, magnetic braking, and efficient angular momentum transport, which we aim to quantify in this work. We use the MESA software instrument to compute an extensive main-sequence grid of stellar structure and evolution models, as well as isochrones, accounting for the effects attributed to a surface fossil magnetic field. The grid is densely populated in initial mass (3–60 M⊙), surface equatorial magnetic field strength (0–50 kG), and metallicity (representative of the Solar neighbourhood and the Magellanic Clouds). We use two magnetic braking and two chemical mixing schemes and compare the model predictions for slowly rotating, nitrogen-enriched (‘Group 2’) stars with observations in the Large Magellanic Cloud. We quantify a range of initial field strengths that allow for producing Group 2 stars and find that typical values (up to a few kG) lead to solutions. Between the subgrids, we find notable departures in surface abundances and evolutionary paths. In our magnetic models, chemical mixing is always less efficient compared to non-magnetic models due to the rapid spin-down. We identify that quasi-chemically homogeneous main sequence evolution by efficient mixing could be prevented by fossil magnetic fields. We recommend comparing this grid of evolutionary models with spectropolarimetric and spectroscopic observations with the goals of (i) revisiting the derived stellar parameters of known magnetic stars, and (ii) observationally constraining the uncertain magnetic braking and chemical mixing schemes.","lang":"eng"}],"article_processing_charge":"No","issue":"2","arxiv":1,"publication_identifier":{"eissn":["1365-2966"],"issn":["0035-8711"]},"scopus_import":"1","external_id":{"arxiv":["2209.06350"]},"date_updated":"2023-08-22T13:18:34Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","year":"2022","oa_version":"Published Version","article_type":"original"},{"external_id":{"arxiv":["2211.07060"]},"language":[{"iso":"eng"}],"date_updated":"2023-08-22T13:20:15Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","doi":"10.48550/arXiv.2211.07060","day":"14","author":[{"first_name":"Z.","full_name":"Keszthelyi, Z.","last_name":"Keszthelyi"},{"full_name":"Koter, A. de","first_name":"A. de","last_name":"Koter"},{"id":"d0648d0c-0f64-11ee-a2e0-dd0faa2e4f7d","orcid":"0000-0002-6960-6911","first_name":"Ylva Louise Linsdotter","full_name":"Götberg, Ylva Louise Linsdotter","last_name":"Götberg"},{"full_name":"Meynet, G.","first_name":"G.","last_name":"Meynet"},{"first_name":"S. A.","full_name":"Brands, S. A.","last_name":"Brands"},{"last_name":"Petit","full_name":"Petit, V.","first_name":"V."},{"first_name":"M.","full_name":"Carrington, M.","last_name":"Carrington"},{"last_name":"A. David-Uraz","first_name":"A. David-Uraz","full_name":"A. David-Uraz, A. David-Uraz"},{"last_name":"Geen","full_name":"Geen, S. T.","first_name":"S. T."},{"last_name":"Georgy","first_name":"C.","full_name":"Georgy, C."},{"full_name":"Hirschi, R.","first_name":"R.","last_name":"Hirschi"},{"last_name":"Puls","full_name":"Puls, J.","first_name":"J."},{"full_name":"Ramalatswa, K. J.","first_name":"K. J.","last_name":"Ramalatswa"},{"last_name":"Shultz","full_name":"Shultz, M. E.","first_name":"M. E."},{"full_name":"A. ud-Doula, A. ud-Doula","first_name":"A. ud-Doula","last_name":"A. ud-Doula"}],"type":"preprint","oa_version":"Submitted Version","year":"2022","citation":{"ieee":"Z. Keszthelyi <i>et al.</i>, “Spin-down and reduced mass loss in early-type stars with large-scale magnetic fields,” <i>arXiv</i>. .","ista":"Keszthelyi Z, Koter A de, Götberg YLL, Meynet G, Brands SA, Petit V, Carrington M, A. David-Uraz AD-U, Geen ST, Georgy C, Hirschi R, Puls J, Ramalatswa KJ, Shultz ME, A. ud-Doula A ud-Doula. Spin-down and reduced mass loss in early-type stars with large-scale magnetic fields. arXiv, 2211.07060.","chicago":"Keszthelyi, Z., A. de Koter, Ylva Louise Linsdotter Götberg, G. Meynet, S. A. Brands, V. Petit, M. Carrington, et al. “Spin-down and Reduced Mass Loss in Early-Type Stars with Large-Scale Magnetic Fields.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2211.07060\">https://doi.org/10.48550/arXiv.2211.07060</a>.","mla":"Keszthelyi, Z., et al. “Spin-down and Reduced Mass Loss in Early-Type Stars with Large-Scale Magnetic Fields.” <i>ArXiv</i>, 2211.07060, doi:<a href=\"https://doi.org/10.48550/arXiv.2211.07060\">10.48550/arXiv.2211.07060</a>.","short":"Z. Keszthelyi, A. de Koter, Y.L.L. Götberg, G. Meynet, S.A. Brands, V. Petit, M. Carrington, A.D.-U. A. David-Uraz, S.T. Geen, C. Georgy, R. Hirschi, J. Puls, K.J. Ramalatswa, M.E. Shultz, A. ud-Doula A. ud-Doula, ArXiv (n.d.).","apa":"Keszthelyi, Z., Koter, A. de, Götberg, Y. L. L., Meynet, G., Brands, S. A., Petit, V., … A. ud-Doula, A. ud-Doula. (n.d.). Spin-down and reduced mass loss in early-type stars with large-scale magnetic fields. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2211.07060\">https://doi.org/10.48550/arXiv.2211.07060</a>","ama":"Keszthelyi Z, Koter A de, Götberg YLL, et al. Spin-down and reduced mass loss in early-type stars with large-scale magnetic fields. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2211.07060\">10.48550/arXiv.2211.07060</a>"},"title":"Spin-down and reduced mass loss in early-type stars with large-scale magnetic fields","publication":"arXiv","status":"public","publication_status":"submitted","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2211.07060"}],"oa":1,"month":"11","article_number":"2211.07060","extern":"1","date_created":"2023-08-21T10:11:37Z","_id":"14099","date_published":"2022-11-14T00:00:00Z","abstract":[{"lang":"eng","text":"Magnetism can greatly impact the evolution of stars. In some stars with OBA spectral types there is direct evidence via the Zeeman effect for stable, large-scale magnetospheres, which lead to the spin-down of the stellar surface and reduced mass loss. So far, a comprehensive grid of stellar structure and evolution models accounting for these effects was lacking. For this reason, we computed and studied models with two magnetic braking and two chemical mixing schemes in three metallicity environments with the MESA software instrument. We find notable differences between the subgrids, which affects the model predictions and thus the detailed characterisation of stars. We are able to quantify the impact of magnetic fields in terms of preventing quasi-chemically homogeneous evolution and producing slowly-rotating, nitrogen-enriched (\"Group 2\") stars. Our model grid is fully open access and open source."}],"arxiv":1,"article_processing_charge":"No"},{"oa_version":"Preprint","year":"2022","external_id":{"arxiv":["2204.04440"]},"scopus_import":"1","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-09-06T10:29:42Z","publication_identifier":{"isbn":["9781713871088"]},"_id":"14106","abstract":[{"lang":"eng","text":"We show that deep networks trained to satisfy demographic parity often do so\r\nthrough a form of race or gender awareness, and that the more we force a network\r\nto be fair, the more accurately we can recover race or gender from the internal state\r\nof the network. Based on this observation, we investigate an alternative fairness\r\napproach: we add a second classification head to the network to explicitly predict\r\nthe protected attribute (such as race or gender) alongside the original task. After\r\ntraining the two-headed network, we enforce demographic parity by merging the\r\ntwo heads, creating a network with the same architecture as the original network.\r\nWe establish a close relationship between existing approaches and our approach\r\nby showing (1) that the decisions of a fair classifier are well-approximated by our\r\napproach, and (2) that an unfair and optimally accurate classifier can be recovered\r\nfrom a fair classifier and our second head predicting the protected attribute. We use\r\nour explicit formulation to argue that the existing fairness approaches, just as ours,\r\ndemonstrate disparate treatment and that they are likely to be unlawful in a wide\r\nrange of scenarios under US law."}],"date_published":"2022-12-15T00:00:00Z","arxiv":1,"article_processing_charge":"No","volume":35,"publication_status":"published","main_file_link":[{"url":"https://arxiv.org/abs/2204.04440","open_access":"1"}],"oa":1,"day":"15","author":[{"last_name":"Lohaus","full_name":"Lohaus, Michael","first_name":"Michael"},{"last_name":"Kleindessner","full_name":"Kleindessner, Matthäus","first_name":"Matthäus"},{"last_name":"Kenthapadi","first_name":"Krishnaram","full_name":"Kenthapadi, Krishnaram"},{"full_name":"Locatello, Francesco","first_name":"Francesco","last_name":"Locatello","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683"},{"full_name":"Russell, Chris","first_name":"Chris","last_name":"Russell"}],"type":"conference","alternative_title":["Advances in Neural Information Processing Systems"],"citation":{"mla":"Lohaus, Michael, et al. “Are Two Heads the Same as One? Identifying Disparate Treatment in Fair Neural Networks.” <i>36th Conference on Neural Information Processing Systems</i>, vol. 35, Neural Information Processing Systems Foundation, 2022, pp. 16548–62.","ieee":"M. Lohaus, M. Kleindessner, K. Kenthapadi, F. Locatello, and C. Russell, “Are two heads the same as one? Identifying disparate treatment in fair neural networks,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States, 2022, vol. 35, pp. 16548–16562.","ista":"Lohaus M, Kleindessner M, Kenthapadi K, Locatello F, Russell C. 2022. Are two heads the same as one? Identifying disparate treatment in fair neural networks. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems, Advances in Neural Information Processing Systems, vol. 35, 16548–16562.","chicago":"Lohaus, Michael, Matthäus Kleindessner, Krishnaram Kenthapadi, Francesco Locatello, and Chris Russell. “Are Two Heads the Same as One? Identifying Disparate Treatment in Fair Neural Networks.” In <i>36th Conference on Neural Information Processing Systems</i>, 35:16548–62. Neural Information Processing Systems Foundation, 2022.","apa":"Lohaus, M., Kleindessner, M., Kenthapadi, K., Locatello, F., &#38; Russell, C. (2022). Are two heads the same as one? Identifying disparate treatment in fair neural networks. In <i>36th Conference on Neural Information Processing Systems</i> (Vol. 35, pp. 16548–16562). New Orleans, LA, United States: Neural Information Processing Systems Foundation.","ama":"Lohaus M, Kleindessner M, Kenthapadi K, Locatello F, Russell C. Are two heads the same as one? Identifying disparate treatment in fair neural networks. In: <i>36th Conference on Neural Information Processing Systems</i>. Vol 35. Neural Information Processing Systems Foundation; 2022:16548-16562.","short":"M. Lohaus, M. Kleindessner, K. Kenthapadi, F. Locatello, C. Russell, in:, 36th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2022, pp. 16548–16562."},"title":"Are two heads the same as one? Identifying disparate treatment in fair neural networks","conference":{"name":"NeurIPS: Neural Information Processing Systems","start_date":"2022-11-28","end_date":"2022-12-09","location":"New Orleans, LA, United States"},"language":[{"iso":"eng"}],"month":"12","extern":"1","date_created":"2023-08-21T12:12:42Z","page":"16548-16562","publication":"36th Conference on Neural Information Processing Systems","department":[{"_id":"FrLo"}],"quality_controlled":"1","status":"public","intvolume":"        35","publisher":"Neural Information Processing Systems Foundation"},{"date_published":"2022-10-23T00:00:00Z","_id":"14107","date_created":"2023-08-21T12:13:25Z","abstract":[{"text":"Amodal perception requires inferring the full shape of an object that is partially occluded. This task is particularly challenging on two levels: (1) it requires more information than what is contained in the instant retina or imaging sensor, (2) it is difficult to obtain enough well-annotated amodal labels for supervision. To this end, this paper develops a new framework of\r\nSelf-supervised amodal Video object segmentation (SaVos). Our method efficiently leverages the visual information of video temporal sequences to infer the amodal mask of objects. The key intuition is that the occluded part of an object can be explained away if that part is visible in other frames, possibly deformed as long as the deformation can be reasonably learned.\r\nAccordingly, we derive a novel self-supervised learning paradigm that efficiently utilizes the visible object parts as the supervision to guide the training on videos. In addition to learning type prior to complete masks for known types, SaVos also learns the spatiotemporal prior, which is also useful for the amodal task and could generalize to unseen types. The proposed\r\nframework achieves the state-of-the-art performance on the synthetic amodal segmentation benchmark FISHBOWL and the real world benchmark KINS-Video-Car. Further, it lends itself well to being transferred to novel distributions using test-time adaptation, outperforming existing models even after the transfer to a new distribution.","lang":"eng"}],"month":"10","extern":"1","article_processing_charge":"No","arxiv":1,"status":"public","publication":"36th Conference on Neural Information Processing Systems","department":[{"_id":"FrLo"}],"oa":1,"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2210.12733","open_access":"1"}],"publication_status":"published","type":"conference","author":[{"first_name":"Jian","full_name":"Yao, Jian","last_name":"Yao"},{"last_name":"Hong","full_name":"Hong, Yuxin","first_name":"Yuxin"},{"last_name":"Wang","first_name":"Chiyu","full_name":"Wang, Chiyu"},{"last_name":"Xiao","full_name":"Xiao, Tianjun","first_name":"Tianjun"},{"full_name":"He, Tong","first_name":"Tong","last_name":"He"},{"orcid":"0000-0002-4850-0683","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","full_name":"Locatello, Francesco","last_name":"Locatello"},{"last_name":"Wipf","full_name":"Wipf, David","first_name":"David"},{"full_name":"Fu, Yanwei","first_name":"Yanwei","last_name":"Fu"},{"last_name":"Zhang","full_name":"Zhang, Zheng","first_name":"Zheng"}],"oa_version":"Preprint","year":"2022","day":"23","title":"Self-supervised amodal video object segmentation","conference":{"location":"New Orleans, LA, United States","name":"NeurIPS: Neural Information Processing Systems","start_date":"2022-11-28","end_date":"2022-12-01"},"citation":{"ama":"Yao J, Hong Y, Wang C, et al. Self-supervised amodal video object segmentation. In: <i>36th Conference on Neural Information Processing Systems</i>. ; 2022. doi:<a href=\"https://doi.org/10.48550/arXiv.2210.12733\">10.48550/arXiv.2210.12733</a>","apa":"Yao, J., Hong, Y., Wang, C., Xiao, T., He, T., Locatello, F., … Zhang, Z. (2022). Self-supervised amodal video object segmentation. In <i>36th Conference on Neural Information Processing Systems</i>. New Orleans, LA, United States. <a href=\"https://doi.org/10.48550/arXiv.2210.12733\">https://doi.org/10.48550/arXiv.2210.12733</a>","short":"J. Yao, Y. Hong, C. Wang, T. Xiao, T. He, F. Locatello, D. Wipf, Y. Fu, Z. Zhang, in:, 36th Conference on Neural Information Processing Systems, 2022.","mla":"Yao, Jian, et al. “Self-Supervised Amodal Video Object Segmentation.” <i>36th Conference on Neural Information Processing Systems</i>, 2022, doi:<a href=\"https://doi.org/10.48550/arXiv.2210.12733\">10.48550/arXiv.2210.12733</a>.","chicago":"Yao, Jian, Yuxin Hong, Chiyu Wang, Tianjun Xiao, Tong He, Francesco Locatello, David Wipf, Yanwei Fu, and Zheng Zhang. “Self-Supervised Amodal Video Object Segmentation.” In <i>36th Conference on Neural Information Processing Systems</i>, 2022. <a href=\"https://doi.org/10.48550/arXiv.2210.12733\">https://doi.org/10.48550/arXiv.2210.12733</a>.","ieee":"J. Yao <i>et al.</i>, “Self-supervised amodal video object segmentation,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States, 2022.","ista":"Yao J, Hong Y, Wang C, Xiao T, He T, Locatello F, Wipf D, Fu Y, Zhang Z. 2022. Self-supervised amodal video object segmentation. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems."},"doi":"10.48550/arXiv.2210.12733","external_id":{"arxiv":["2210.12733"]},"language":[{"iso":"eng"}],"date_updated":"2023-09-11T09:34:17Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87"},{"oa_version":"Preprint","year":"2022","external_id":{"arxiv":["2203.04913"]},"scopus_import":"1","date_updated":"2023-09-11T09:19:14Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publication_identifier":{"eissn":["2575-7075"],"isbn":["9781665469470"],"issn":["1063-6919"]},"arxiv":1,"article_processing_charge":"No","date_published":"2022-07-01T00:00:00Z","_id":"14114","abstract":[{"lang":"eng","text":"Algorithmic fairness is frequently motivated in terms of a trade-off in which overall performance is decreased so as to improve performance on disadvantaged groups where the algorithm would otherwise be less accurate. Contrary to this, we find that applying existing fairness approaches to computer vision improve fairness by degrading the performance of classifiers across all groups (with increased degradation on the best performing groups). Extending the bias-variance decomposition for classification to fairness, we theoretically explain why the majority of fairness methods designed for low capacity models should not be used in settings involving high-capacity models, a scenario common to computer vision. We corroborate this analysis with extensive experimental support that shows that many of the fairness heuristics used in computer vision also degrade performance on the most disadvantaged groups. Building on these insights, we propose an adaptive augmentation strategy that, uniquely, of all methods tested, improves performance for the disadvantaged groups."}],"publication_status":"published","oa":1,"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2203.04913"}],"citation":{"mla":"Zietlow, Dominik, et al. “Leveling down in Computer Vision: Pareto Inefficiencies in Fair Deep Classifiers.” <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, Institute of Electrical and Electronics Engineers, 2022, pp. 10400–11, doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">10.1109/cvpr52688.2022.01016</a>.","ista":"Zietlow D, Lohaus M, Balakrishnan G, Kleindessner M, Locatello F, Scholkopf B, Russell C. 2022. Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition. CVPR: Conference on Computer Vision and Pattern Recognition, 10400–10411.","ieee":"D. Zietlow <i>et al.</i>, “Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers,” in <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, New Orleans, LA, United States, 2022, pp. 10400–10411.","chicago":"Zietlow, Dominik, Michael Lohaus, Guha Balakrishnan, Matthaus Kleindessner, Francesco Locatello, Bernhard Scholkopf, and Chris Russell. “Leveling down in Computer Vision: Pareto Inefficiencies in Fair Deep Classifiers.” In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>, 10400–411. Institute of Electrical and Electronics Engineers, 2022. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">https://doi.org/10.1109/cvpr52688.2022.01016</a>.","apa":"Zietlow, D., Lohaus, M., Balakrishnan, G., Kleindessner, M., Locatello, F., Scholkopf, B., &#38; Russell, C. (2022). Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. In <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i> (pp. 10400–10411). New Orleans, LA, United States: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">https://doi.org/10.1109/cvpr52688.2022.01016</a>","ama":"Zietlow D, Lohaus M, Balakrishnan G, et al. Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers. In: <i>2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition</i>. Institute of Electrical and Electronics Engineers; 2022:10400-10411. doi:<a href=\"https://doi.org/10.1109/cvpr52688.2022.01016\">10.1109/cvpr52688.2022.01016</a>","short":"D. Zietlow, M. Lohaus, G. Balakrishnan, M. Kleindessner, F. Locatello, B. Scholkopf, C. Russell, in:, 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Institute of Electrical and Electronics Engineers, 2022, pp. 10400–10411."},"title":"Leveling down in computer vision: Pareto inefficiencies in fair deep classifiers","conference":{"end_date":"2022-06-24","start_date":"2022-06-18","name":"CVPR: Conference on Computer Vision and Pattern Recognition","location":"New Orleans, LA, United States"},"day":"01","type":"conference","author":[{"full_name":"Zietlow, Dominik","first_name":"Dominik","last_name":"Zietlow"},{"last_name":"Lohaus","first_name":"Michael","full_name":"Lohaus, Michael"},{"first_name":"Guha","full_name":"Balakrishnan, Guha","last_name":"Balakrishnan"},{"last_name":"Kleindessner","first_name":"Matthaus","full_name":"Kleindessner, Matthaus"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","last_name":"Locatello","first_name":"Francesco","full_name":"Locatello, Francesco"},{"first_name":"Bernhard","full_name":"Scholkopf, Bernhard","last_name":"Scholkopf"},{"last_name":"Russell","first_name":"Chris","full_name":"Russell, Chris"}],"language":[{"iso":"eng"}],"doi":"10.1109/cvpr52688.2022.01016","page":"10400-10411","month":"07","extern":"1","date_created":"2023-08-21T12:18:00Z","publisher":"Institute of Electrical and Electronics Engineers","publication":"2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition","quality_controlled":"1","department":[{"_id":"FrLo"}],"status":"public"},{"year":"2022","alternative_title":[" Advances in Neural Information Processing Systems"],"oa_version":"Preprint","type":"conference","author":[{"last_name":"Rahaman","full_name":"Rahaman, Nasim","first_name":"Nasim"},{"full_name":"Weiss, Martin","first_name":"Martin","last_name":"Weiss"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","full_name":"Locatello, Francesco","first_name":"Francesco","last_name":"Locatello"},{"first_name":"Chris","full_name":"Pal, Chris","last_name":"Pal"},{"last_name":"Bengio","full_name":"Bengio, Yoshua","first_name":"Yoshua"},{"last_name":"Schölkopf","first_name":"Bernhard","full_name":"Schölkopf, Bernhard"},{"first_name":"Li Erran","full_name":"Li, Li Erran","last_name":"Li"},{"last_name":"Ballas","first_name":"Nicolas","full_name":"Ballas, Nicolas"}],"day":"14","conference":{"name":"NeurIPS: Neural Information Processing Systems","start_date":"2022-11-29","end_date":"2022-12-01","location":"New Orleans, United States"},"title":"Neural attentive circuits","citation":{"chicago":"Rahaman, Nasim, Martin Weiss, Francesco Locatello, Chris Pal, Yoshua Bengio, Bernhard Schölkopf, Li Erran Li, and Nicolas Ballas. “Neural Attentive Circuits.” In <i>36th Conference on Neural Information Processing Systems</i>, Vol. 35, 2022.","ieee":"N. Rahaman <i>et al.</i>, “Neural attentive circuits,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, United States, 2022, vol. 35.","ista":"Rahaman N, Weiss M, Locatello F, Pal C, Bengio Y, Schölkopf B, Li LE, Ballas N. 2022. Neural attentive circuits. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems,  Advances in Neural Information Processing Systems, vol. 35.","mla":"Rahaman, Nasim, et al. “Neural Attentive Circuits.” <i>36th Conference on Neural Information Processing Systems</i>, vol. 35, 2022.","short":"N. Rahaman, M. Weiss, F. Locatello, C. Pal, Y. Bengio, B. Schölkopf, L.E. Li, N. Ballas, in:, 36th Conference on Neural Information Processing Systems, 2022.","ama":"Rahaman N, Weiss M, Locatello F, et al. Neural attentive circuits. In: <i>36th Conference on Neural Information Processing Systems</i>. Vol 35. ; 2022.","apa":"Rahaman, N., Weiss, M., Locatello, F., Pal, C., Bengio, Y., Schölkopf, B., … Ballas, N. (2022). Neural attentive circuits. In <i>36th Conference on Neural Information Processing Systems</i> (Vol. 35). New Orleans, United States."},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-09-11T09:29:09Z","language":[{"iso":"eng"}],"external_id":{"arxiv":["2210.08031"]},"date_created":"2023-08-22T13:57:27Z","_id":"14168","date_published":"2022-10-14T00:00:00Z","abstract":[{"lang":"eng","text":"Recent work has seen the development of general purpose neural architectures\r\nthat can be trained to perform tasks across diverse data modalities. General\r\npurpose models typically make few assumptions about the underlying\r\ndata-structure and are known to perform well in the large-data regime. At the\r\nsame time, there has been growing interest in modular neural architectures that\r\nrepresent the data using sparsely interacting modules. These models can be more\r\nrobust out-of-distribution, computationally efficient, and capable of\r\nsample-efficient adaptation to new data. However, they tend to make\r\ndomain-specific assumptions about the data, and present challenges in how\r\nmodule behavior (i.e., parameterization) and connectivity (i.e., their layout)\r\ncan be jointly learned. In this work, we introduce a general purpose, yet\r\nmodular neural architecture called Neural Attentive Circuits (NACs) that\r\njointly learns the parameterization and a sparse connectivity of neural modules\r\nwithout using domain knowledge. NACs are best understood as the combination of\r\ntwo systems that are jointly trained end-to-end: one that determines the module\r\nconfiguration and the other that executes it on an input. We demonstrate\r\nqualitatively that NACs learn diverse and meaningful module configurations on\r\nthe NLVR2 dataset without additional supervision. Quantitatively, we show that\r\nby incorporating modularity in this way, NACs improve upon a strong non-modular\r\nbaseline in terms of low-shot adaptation on CIFAR and CUBs dataset by about\r\n10%, and OOD robustness on Tiny ImageNet-R by about 2.5%. Further, we find that\r\nNACs can achieve an 8x speedup at inference time while losing less than 3%\r\nperformance. Finally, we find NACs to yield competitive results on diverse data\r\nmodalities spanning point-cloud classification, symbolic processing and\r\ntext-classification from ASCII bytes, thereby confirming its general purpose\r\nnature."}],"extern":"1","month":"10","article_processing_charge":"No","arxiv":1,"volume":35,"status":"public","intvolume":"        35","department":[{"_id":"FrLo"}],"publication":"36th Conference on Neural Information Processing Systems","oa":1,"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2210.08031"}],"publication_status":"published"},{"volume":2022,"oa":1,"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2107.00637"}],"publication_status":"submitted","date_published":"2022-07-22T00:00:00Z","_id":"14170","abstract":[{"lang":"eng","text":"The idea behind object-centric representation learning is that natural scenes can better be modeled as compositions of objects and their relations as opposed to distributed representations. This inductive bias can be injected into neural networks to potentially improve systematic generalization and performance of downstream tasks in scenes with multiple objects. In this paper, we train state-of-the-art unsupervised models on five common multi-object datasets and evaluate segmentation metrics and downstream object property prediction. In addition, we study generalization and robustness by investigating the settings where either a single object is out of distribution -- e.g., having an unseen color, texture, or shape -- or global properties of the scene are altered -- e.g., by occlusions, cropping, or increasing the number of objects. From our experimental study, we find object-centric representations to be useful for\r\ndownstream tasks and generally robust to most distribution shifts affecting objects. However, when the distribution shift affects the input in a less structured manner, robustness in terms of segmentation and downstream task performance may vary significantly across models and distribution shifts. "}],"article_processing_charge":"No","arxiv":1,"external_id":{"arxiv":["2107.00637"]},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-09-11T10:08:14Z","oa_version":"Preprint","year":"2022","status":"public","intvolume":"      2022","publication":"Proceedings of the 39th International Conference on Machine Learning","department":[{"_id":"FrLo"}],"quality_controlled":"1","publisher":"ML Research Press","date_created":"2023-08-22T13:59:55Z","month":"07","extern":"1","page":"5221-5285","language":[{"iso":"eng"}],"author":[{"last_name":"Dittadi","first_name":"Andrea","full_name":"Dittadi, Andrea"},{"first_name":"Samuele","full_name":"Papa, Samuele","last_name":"Papa"},{"first_name":"Michele De","full_name":"Vita, Michele De","last_name":"Vita"},{"last_name":"Schölkopf","full_name":"Schölkopf, Bernhard","first_name":"Bernhard"},{"first_name":"Ole","full_name":"Winther, Ole","last_name":"Winther"},{"orcid":"0000-0002-4850-0683","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","last_name":"Locatello","full_name":"Locatello, Francesco","first_name":"Francesco"}],"type":"conference","alternative_title":["PMLR"],"day":"22","title":"Generalization and robustness implications in object-centric learning","conference":{"location":"Baltimore, MD, United States","end_date":"2022-07-23","start_date":"2022-07-17","name":"International Conference on Machine Learning"},"citation":{"ama":"Dittadi A, Papa S, Vita MD, Schölkopf B, Winther O, Locatello F. Generalization and robustness implications in object-centric learning. In: <i>Proceedings of the 39th International Conference on Machine Learning</i>. Vol 2022. ML Research Press; :5221-5285.","apa":"Dittadi, A., Papa, S., Vita, M. D., Schölkopf, B., Winther, O., &#38; Locatello, F. (n.d.). Generalization and robustness implications in object-centric learning. In <i>Proceedings of the 39th International Conference on Machine Learning</i> (Vol. 2022, pp. 5221–5285). Baltimore, MD, United States: ML Research Press.","short":"A. Dittadi, S. Papa, M.D. Vita, B. Schölkopf, O. Winther, F. Locatello, in:, Proceedings of the 39th International Conference on Machine Learning, ML Research Press, n.d., pp. 5221–5285.","mla":"Dittadi, Andrea, et al. “Generalization and Robustness Implications in Object-Centric Learning.” <i>Proceedings of the 39th International Conference on Machine Learning</i>, vol. 2022, ML Research Press, pp. 5221–85.","chicago":"Dittadi, Andrea, Samuele Papa, Michele De Vita, Bernhard Schölkopf, Ole Winther, and Francesco Locatello. “Generalization and Robustness Implications in Object-Centric Learning.” In <i>Proceedings of the 39th International Conference on Machine Learning</i>, 2022:5221–85. ML Research Press, n.d.","ieee":"A. Dittadi, S. Papa, M. D. Vita, B. Schölkopf, O. Winther, and F. Locatello, “Generalization and robustness implications in object-centric learning,” in <i>Proceedings of the 39th International Conference on Machine Learning</i>, Baltimore, MD, United States, vol. 2022, pp. 5221–5285.","ista":"Dittadi A, Papa S, Vita MD, Schölkopf B, Winther O, Locatello F. Generalization and robustness implications in object-centric learning. Proceedings of the 39th International Conference on Machine Learning. International Conference on Machine Learning, PMLR, vol. 2022, 5221–5285."}},{"publisher":"ML Research Press","publication":"Proceedings of the 39th International Conference on Machine Learning","quality_controlled":"1","department":[{"_id":"FrLo"}],"status":"public","intvolume":"       162","page":"18741-18753","month":"07","extern":"1","date_created":"2023-08-22T14:00:18Z","language":[{"iso":"eng"}],"citation":{"mla":"Rolland, Paul, et al. “Score Matching Enables Causal Discovery of Nonlinear Additive Noise  Models.” <i>Proceedings of the 39th International Conference on Machine Learning</i>, vol. 162, ML Research Press, 2022, pp. 18741–53.","ista":"Rolland P, Cevher V, Kleindessner M, Russel C, Schölkopf B, Janzing D, Locatello F. 2022. Score matching enables causal discovery of nonlinear additive noise  models. Proceedings of the 39th International Conference on Machine Learning. International Conference on Machine Learning, PMLR, vol. 162, 18741–18753.","ieee":"P. Rolland <i>et al.</i>, “Score matching enables causal discovery of nonlinear additive noise  models,” in <i>Proceedings of the 39th International Conference on Machine Learning</i>, Baltimore, MD, United States, 2022, vol. 162, pp. 18741–18753.","chicago":"Rolland, Paul, Volkan Cevher, Matthäus Kleindessner, Chris Russel, Bernhard Schölkopf, Dominik Janzing, and Francesco Locatello. “Score Matching Enables Causal Discovery of Nonlinear Additive Noise  Models.” In <i>Proceedings of the 39th International Conference on Machine Learning</i>, 162:18741–53. ML Research Press, 2022.","apa":"Rolland, P., Cevher, V., Kleindessner, M., Russel, C., Schölkopf, B., Janzing, D., &#38; Locatello, F. (2022). Score matching enables causal discovery of nonlinear additive noise  models. In <i>Proceedings of the 39th International Conference on Machine Learning</i> (Vol. 162, pp. 18741–18753). Baltimore, MD, United States: ML Research Press.","ama":"Rolland P, Cevher V, Kleindessner M, et al. Score matching enables causal discovery of nonlinear additive noise  models. In: <i>Proceedings of the 39th International Conference on Machine Learning</i>. Vol 162. ML Research Press; 2022:18741-18753.","short":"P. Rolland, V. Cevher, M. Kleindessner, C. Russel, B. Schölkopf, D. Janzing, F. Locatello, in:, Proceedings of the 39th International Conference on Machine Learning, ML Research Press, 2022, pp. 18741–18753."},"title":"Score matching enables causal discovery of nonlinear additive noise  models","conference":{"name":"International Conference on Machine Learning","start_date":"2022-07-17","end_date":"2022-07-23","location":"Baltimore, MD, United States"},"day":"22","author":[{"last_name":"Rolland","first_name":"Paul","full_name":"Rolland, Paul"},{"full_name":"Cevher, Volkan","first_name":"Volkan","last_name":"Cevher"},{"first_name":"Matthäus","full_name":"Kleindessner, Matthäus","last_name":"Kleindessner"},{"last_name":"Russel","first_name":"Chris","full_name":"Russel, Chris"},{"full_name":"Schölkopf, Bernhard","first_name":"Bernhard","last_name":"Schölkopf"},{"full_name":"Janzing, Dominik","first_name":"Dominik","last_name":"Janzing"},{"last_name":"Locatello","full_name":"Locatello, Francesco","first_name":"Francesco","orcid":"0000-0002-4850-0683","id":"26cfd52f-2483-11ee-8040-88983bcc06d4"}],"type":"conference","alternative_title":["PMLR"],"publication_status":"published","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2203.04413"}],"oa":1,"volume":162,"arxiv":1,"article_processing_charge":"No","abstract":[{"lang":"eng","text":"This paper demonstrates how to recover causal graphs from the score of the\r\ndata distribution in non-linear additive (Gaussian) noise models. Using score\r\nmatching algorithms as a building block, we show how to design a new generation\r\nof scalable causal discovery methods. To showcase our approach, we also propose\r\na new efficient method for approximating the score's Jacobian, enabling to\r\nrecover the causal graph. Empirically, we find that the new algorithm, called\r\nSCORE, is competitive with state-of-the-art causal discovery methods while\r\nbeing significantly faster."}],"_id":"14171","date_published":"2022-07-22T00:00:00Z","external_id":{"arxiv":["2203.04413"]},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-09-11T10:14:20Z","oa_version":"Preprint","year":"2022"},{"day":"25","oa_version":"Preprint","year":"2022","author":[{"full_name":"Schott, Lukas","first_name":"Lukas","last_name":"Schott"},{"first_name":"Julius von","full_name":"Kügelgen, Julius von","last_name":"Kügelgen"},{"last_name":"Träuble","full_name":"Träuble, Frederik","first_name":"Frederik"},{"last_name":"Gehler","full_name":"Gehler, Peter","first_name":"Peter"},{"last_name":"Russell","first_name":"Chris","full_name":"Russell, Chris"},{"first_name":"Matthias","full_name":"Bethge, Matthias","last_name":"Bethge"},{"last_name":"Schölkopf","full_name":"Schölkopf, Bernhard","first_name":"Bernhard"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","full_name":"Locatello, Francesco","first_name":"Francesco","last_name":"Locatello"},{"last_name":"Brendel","full_name":"Brendel, Wieland","first_name":"Wieland"}],"type":"conference","citation":{"short":"L. Schott, J. von Kügelgen, F. Träuble, P. Gehler, C. Russell, M. Bethge, B. Schölkopf, F. Locatello, W. Brendel, in:, 10th International Conference on Learning Representations, 2022.","apa":"Schott, L., Kügelgen, J. von, Träuble, F., Gehler, P., Russell, C., Bethge, M., … Brendel, W. (2022). Visual representation learning does not generalize strongly within the  same domain. In <i>10th International Conference on Learning Representations</i>. Virtual.","ama":"Schott L, Kügelgen J von, Träuble F, et al. Visual representation learning does not generalize strongly within the  same domain. In: <i>10th International Conference on Learning Representations</i>. ; 2022.","ista":"Schott L, Kügelgen J von, Träuble F, Gehler P, Russell C, Bethge M, Schölkopf B, Locatello F, Brendel W. 2022. Visual representation learning does not generalize strongly within the  same domain. 10th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","ieee":"L. Schott <i>et al.</i>, “Visual representation learning does not generalize strongly within the  same domain,” in <i>10th International Conference on Learning Representations</i>, Virtual, 2022.","chicago":"Schott, Lukas, Julius von Kügelgen, Frederik Träuble, Peter Gehler, Chris Russell, Matthias Bethge, Bernhard Schölkopf, Francesco Locatello, and Wieland Brendel. “Visual Representation Learning Does Not Generalize Strongly within the  Same Domain.” In <i>10th International Conference on Learning Representations</i>, 2022.","mla":"Schott, Lukas, et al. “Visual Representation Learning Does Not Generalize Strongly within the  Same Domain.” <i>10th International Conference on Learning Representations</i>, 2022."},"conference":{"start_date":"2022-04-25","end_date":"2022-04-29","name":"ICLR: International Conference on Learning Representations","location":"Virtual"},"title":"Visual representation learning does not generalize strongly within the  same domain","date_updated":"2023-09-11T09:40:52Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","language":[{"iso":"eng"}],"external_id":{"arxiv":["2107.08221"]},"extern":"1","month":"04","_id":"14172","date_published":"2022-04-25T00:00:00Z","abstract":[{"lang":"eng","text":"An important component for generalization in machine learning is to uncover underlying latent factors of variation as well as the mechanism through which each factor acts in the world. In this paper, we test whether 17 unsupervised, weakly supervised, and fully supervised representation learning approaches correctly infer the generative factors of variation in simple datasets (dSprites, Shapes3D, MPI3D) from controlled environments, and on our contributed CelebGlow dataset. In contrast to prior robustness work that introduces novel factors of variation during test time, such as blur or other (un)structured noise, we here recompose, interpolate, or extrapolate only existing factors of variation from the training data set (e.g., small and medium-sized objects during training and large objects during testing). Models\r\nthat learn the correct mechanism should be able to generalize to this benchmark. In total, we train and test 2000+ models and observe that all of them struggle to learn the underlying mechanism regardless of supervision signal and architectural bias. Moreover, the generalization capabilities of all tested models drop significantly as we move from artificial datasets towards\r\nmore realistic real-world datasets. Despite their inability to identify the correct mechanism, the models are quite modular as their ability to infer other in-distribution factors remains fairly stable, providing only a single factoris out-of-distribution. These results point to an important yet understudied problem of learning mechanistic models of observations that can facilitate\r\ngeneralization."}],"date_created":"2023-08-22T14:00:50Z","arxiv":1,"article_processing_charge":"No","department":[{"_id":"FrLo"}],"quality_controlled":"1","publication":"10th International Conference on Learning Representations","status":"public","publication_status":"published","oa":1,"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2107.08221"}]},{"year":"2022","oa_version":"Preprint","publication_identifier":{"isbn":["9781713871088"]},"external_id":{"arxiv":["2207.09239"]},"scopus_import":"1","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-09-06T10:34:43Z","article_processing_charge":"No","arxiv":1,"_id":"14173","date_published":"2022-12-15T00:00:00Z","abstract":[{"lang":"eng","text":"Since out-of-distribution generalization is a generally ill-posed problem, various proxy targets (e.g., calibration, adversarial robustness, algorithmic corruptions, invariance across shifts) were studied across different research programs resulting in different recommendations. While sharing the same aspirational goal, these approaches have never been tested under the same\r\nexperimental conditions on real data. In this paper, we take a unified view of previous work, highlighting message discrepancies that we address empirically, and providing recommendations on how to measure the robustness of a model and how to improve it. To this end, we collect 172 publicly available dataset pairs for training and out-of-distribution evaluation of accuracy, calibration error, adversarial attacks, environment invariance, and synthetic corruptions. We fine-tune over 31k networks, from nine different architectures in the many- and\r\nfew-shot setting. Our findings confirm that in- and out-of-distribution accuracies tend to increase jointly, but show that their relation is largely dataset-dependent, and in general more nuanced and more complex than posited by previous, smaller scale studies."}],"oa":1,"main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2207.09239"}],"publication_status":"published","volume":35,"title":"Assaying out-of-distribution generalization in transfer learning","conference":{"name":"NeurIPS: Neural Information Processing Systems","end_date":"2022-12-09","start_date":"2022-11-28","location":"New Orleans, LA, United States"},"citation":{"mla":"Wenzel, Florian, et al. “Assaying Out-of-Distribution Generalization in Transfer Learning.” <i>36th Conference on Neural Information Processing Systems</i>, vol. 35, Neural Information Processing Systems Foundation, 2022, pp. 7181–98.","ieee":"F. Wenzel <i>et al.</i>, “Assaying out-of-distribution generalization in transfer learning,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States, 2022, vol. 35, pp. 7181–7198.","ista":"Wenzel F, Dittadi A, Gehler PV, Carl-Johann Simon-Gabriel C-JS-G, Horn M, Zietlow D, Kernert D, Russell C, Brox T, Schiele B, Schölkopf B, Locatello F. 2022. Assaying out-of-distribution generalization in transfer learning. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems, Advances in Neural Information Processing Systems, vol. 35, 7181–7198.","chicago":"Wenzel, Florian, Andrea Dittadi, Peter Vincent Gehler, Carl-Johann Simon-Gabriel Carl-Johann Simon-Gabriel, Max Horn, Dominik Zietlow, David Kernert, et al. “Assaying Out-of-Distribution Generalization in Transfer Learning.” In <i>36th Conference on Neural Information Processing Systems</i>, 35:7181–98. Neural Information Processing Systems Foundation, 2022.","apa":"Wenzel, F., Dittadi, A., Gehler, P. V., Carl-Johann Simon-Gabriel, C.-J. S.-G., Horn, M., Zietlow, D., … Locatello, F. (2022). Assaying out-of-distribution generalization in transfer learning. In <i>36th Conference on Neural Information Processing Systems</i> (Vol. 35, pp. 7181–7198). New Orleans, LA, United States: Neural Information Processing Systems Foundation.","ama":"Wenzel F, Dittadi A, Gehler PV, et al. Assaying out-of-distribution generalization in transfer learning. In: <i>36th Conference on Neural Information Processing Systems</i>. Vol 35. Neural Information Processing Systems Foundation; 2022:7181-7198.","short":"F. Wenzel, A. Dittadi, P.V. Gehler, C.-J.S.-G. Carl-Johann Simon-Gabriel, M. Horn, D. Zietlow, D. Kernert, C. Russell, T. Brox, B. Schiele, B. Schölkopf, F. Locatello, in:, 36th Conference on Neural Information Processing Systems, Neural Information Processing Systems Foundation, 2022, pp. 7181–7198."},"type":"conference","author":[{"first_name":"Florian","full_name":"Wenzel, Florian","last_name":"Wenzel"},{"last_name":"Dittadi","first_name":"Andrea","full_name":"Dittadi, Andrea"},{"last_name":"Gehler","full_name":"Gehler, Peter Vincent","first_name":"Peter Vincent"},{"full_name":"Carl-Johann Simon-Gabriel, Carl-Johann Simon-Gabriel","first_name":"Carl-Johann Simon-Gabriel","last_name":"Carl-Johann Simon-Gabriel"},{"last_name":"Horn","first_name":"Max","full_name":"Horn, Max"},{"last_name":"Zietlow","first_name":"Dominik","full_name":"Zietlow, Dominik"},{"first_name":"David","full_name":"Kernert, David","last_name":"Kernert"},{"last_name":"Russell","full_name":"Russell, Chris","first_name":"Chris"},{"full_name":"Brox, Thomas","first_name":"Thomas","last_name":"Brox"},{"full_name":"Schiele, Bernt","first_name":"Bernt","last_name":"Schiele"},{"last_name":"Schölkopf","first_name":"Bernhard","full_name":"Schölkopf, Bernhard"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","last_name":"Locatello","full_name":"Locatello, Francesco","first_name":"Francesco"}],"alternative_title":["Advances in Neural Information Processing Systems"],"day":"15","language":[{"iso":"eng"}],"page":"7181-7198","date_created":"2023-08-22T14:01:13Z","month":"12","extern":"1","publisher":"Neural Information Processing Systems Foundation","status":"public","intvolume":"        35","publication":"36th Conference on Neural Information Processing Systems","quality_controlled":"1","department":[{"_id":"FrLo"}]},{"day":"25","oa_version":"Preprint","year":"2022","type":"conference","author":[{"last_name":"Dittadi","full_name":"Dittadi, Andrea","first_name":"Andrea"},{"full_name":"Träuble, Frederik","first_name":"Frederik","last_name":"Träuble"},{"full_name":"Wüthrich, Manuel","first_name":"Manuel","last_name":"Wüthrich"},{"first_name":"Felix","full_name":"Widmaier, Felix","last_name":"Widmaier"},{"full_name":"Gehler, Peter","first_name":"Peter","last_name":"Gehler"},{"last_name":"Winther","first_name":"Ole","full_name":"Winther, Ole"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","last_name":"Locatello","first_name":"Francesco","full_name":"Locatello, Francesco"},{"first_name":"Olivier","full_name":"Bachem, Olivier","last_name":"Bachem"},{"last_name":"Schölkopf","first_name":"Bernhard","full_name":"Schölkopf, Bernhard"},{"full_name":"Bauer, Stefan","first_name":"Stefan","last_name":"Bauer"}],"citation":{"short":"A. Dittadi, F. Träuble, M. Wüthrich, F. Widmaier, P. Gehler, O. Winther, F. Locatello, O. Bachem, B. Schölkopf, S. Bauer, in:, 10th International Conference on Learning Representations, 2022.","apa":"Dittadi, A., Träuble, F., Wüthrich, M., Widmaier, F., Gehler, P., Winther, O., … Bauer, S. (2022). The role of pretrained representations for the OOD generalization of  reinforcement learning agents. In <i>10th International Conference on Learning Representations</i>. Virtual.","ama":"Dittadi A, Träuble F, Wüthrich M, et al. The role of pretrained representations for the OOD generalization of  reinforcement learning agents. In: <i>10th International Conference on Learning Representations</i>. ; 2022.","ista":"Dittadi A, Träuble F, Wüthrich M, Widmaier F, Gehler P, Winther O, Locatello F, Bachem O, Schölkopf B, Bauer S. 2022. The role of pretrained representations for the OOD generalization of  reinforcement learning agents. 10th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","ieee":"A. Dittadi <i>et al.</i>, “The role of pretrained representations for the OOD generalization of  reinforcement learning agents,” in <i>10th International Conference on Learning Representations</i>, Virtual, 2022.","chicago":"Dittadi, Andrea, Frederik Träuble, Manuel Wüthrich, Felix Widmaier, Peter Gehler, Ole Winther, Francesco Locatello, Olivier Bachem, Bernhard Schölkopf, and Stefan Bauer. “The Role of Pretrained Representations for the OOD Generalization of  Reinforcement Learning Agents.” In <i>10th International Conference on Learning Representations</i>, 2022.","mla":"Dittadi, Andrea, et al. “The Role of Pretrained Representations for the OOD Generalization of  Reinforcement Learning Agents.” <i>10th International Conference on Learning Representations</i>, 2022."},"conference":{"location":"Virtual","end_date":"2022-04-29","start_date":"2022-04-25","name":"ICLR: International Conference on Learning Representations"},"title":"The role of pretrained representations for the OOD generalization of  reinforcement learning agents","language":[{"iso":"eng"}],"date_updated":"2023-09-11T09:48:36Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","external_id":{"arxiv":["2107.05686"]},"extern":"1","month":"04","_id":"14174","abstract":[{"lang":"eng","text":"Building sample-efficient agents that generalize out-of-distribution (OOD) in real-world settings remains a fundamental unsolved problem on the path towards achieving higher-level cognition. One particularly promising approach is to begin with low-dimensional, pretrained representations of our world, which should facilitate efficient downstream learning and generalization. By training 240 representations and over 10,000 reinforcement learning (RL) policies on a simulated robotic setup, we evaluate to what extent different properties of\r\npretrained VAE-based representations affect the OOD generalization of downstream agents. We observe that many agents are surprisingly robust to realistic distribution shifts, including the challenging sim-to-real case. In addition, we find that the generalization performance of a simple downstream proxy task reliably predicts the generalization performance of our RL agents\r\nunder a wide range of OOD settings. Such proxy tasks can thus be used to select pretrained representations that will lead to agents that generalize."}],"date_created":"2023-08-22T14:02:13Z","date_published":"2022-04-25T00:00:00Z","arxiv":1,"article_processing_charge":"No","department":[{"_id":"FrLo"}],"quality_controlled":"1","publication":"10th International Conference on Learning Representations","status":"public","publication_status":"published","oa":1,"main_file_link":[{"open_access":"1","url":" https://doi.org/10.48550/arXiv.2107.05686"}]},{"department":[{"_id":"FrLo"}],"quality_controlled":"1","publication":"10th International Conference on Learning Representations","status":"public","publication_status":"published","oa":1,"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2110.05304"}],"extern":"1","month":"04","_id":"14175","date_created":"2023-08-22T14:02:34Z","date_published":"2022-04-25T00:00:00Z","abstract":[{"text":"Predicting the future trajectory of a moving agent can be easy when the past trajectory continues smoothly but is challenging when complex interactions with other agents are involved. Recent deep learning approaches for trajectory prediction show promising performance and partially attribute this to successful reasoning about agent-agent interactions. However, it remains unclear which features such black-box models actually learn to use for making predictions. This paper proposes a procedure that quantifies the contributions\r\nof different cues to model performance based on a variant of Shapley values. Applying this procedure to state-of-the-art trajectory prediction methods on standard benchmark datasets shows that they are, in fact, unable to reason about interactions. Instead, the past trajectory of the target is the only feature used for predicting its future. For a task with richer social\r\ninteraction patterns, on the other hand, the tested models do pick up such interactions to a certain extent, as quantified by our feature attribution method. We discuss the limits of the proposed method and its links to causality.","lang":"eng"}],"arxiv":1,"article_processing_charge":"No","language":[{"iso":"eng"}],"date_updated":"2023-09-11T09:52:20Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","external_id":{"arxiv":["2110.05304"]},"day":"25","oa_version":"Preprint","year":"2022","type":"conference","author":[{"last_name":"Makansi","first_name":"Osama","full_name":"Makansi, Osama"},{"last_name":"Kügelgen","full_name":"Kügelgen, Julius von","first_name":"Julius von"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","full_name":"Locatello, Francesco","first_name":"Francesco","last_name":"Locatello"},{"last_name":"Gehler","full_name":"Gehler, Peter","first_name":"Peter"},{"last_name":"Janzing","first_name":"Dominik","full_name":"Janzing, Dominik"},{"last_name":"Brox","full_name":"Brox, Thomas","first_name":"Thomas"},{"last_name":"Schölkopf","full_name":"Schölkopf, Bernhard","first_name":"Bernhard"}],"citation":{"chicago":"Makansi, Osama, Julius von Kügelgen, Francesco Locatello, Peter Gehler, Dominik Janzing, Thomas Brox, and Bernhard Schölkopf. “You Mostly Walk Alone: Analyzing Feature Attribution in Trajectory Prediction.” In <i>10th International Conference on Learning Representations</i>, 2022.","ista":"Makansi O, Kügelgen J von, Locatello F, Gehler P, Janzing D, Brox T, Schölkopf B. 2022. You mostly walk alone: Analyzing feature attribution in trajectory prediction. 10th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","ieee":"O. Makansi <i>et al.</i>, “You mostly walk alone: Analyzing feature attribution in trajectory prediction,” in <i>10th International Conference on Learning Representations</i>, Virtual, 2022.","mla":"Makansi, Osama, et al. “You Mostly Walk Alone: Analyzing Feature Attribution in Trajectory Prediction.” <i>10th International Conference on Learning Representations</i>, 2022.","short":"O. Makansi, J. von Kügelgen, F. Locatello, P. Gehler, D. Janzing, T. Brox, B. Schölkopf, in:, 10th International Conference on Learning Representations, 2022.","ama":"Makansi O, Kügelgen J von, Locatello F, et al. You mostly walk alone: Analyzing feature attribution in trajectory prediction. In: <i>10th International Conference on Learning Representations</i>. ; 2022.","apa":"Makansi, O., Kügelgen, J. von, Locatello, F., Gehler, P., Janzing, D., Brox, T., &#38; Schölkopf, B. (2022). You mostly walk alone: Analyzing feature attribution in trajectory prediction. In <i>10th International Conference on Learning Representations</i>. Virtual."},"conference":{"location":"Virtual","start_date":"2022-04-25","end_date":"2022-04-29","name":"ICLR: International Conference on Learning Representations"},"title":"You mostly walk alone: Analyzing feature attribution in trajectory prediction"},{"author":[{"full_name":"Rahaman, Nasim","first_name":"Nasim","last_name":"Rahaman"},{"full_name":"Weiss, Martin","first_name":"Martin","last_name":"Weiss"},{"last_name":"Träuble","first_name":"Frederik","full_name":"Träuble, Frederik"},{"last_name":"Locatello","full_name":"Locatello, Francesco","first_name":"Francesco","orcid":"0000-0002-4850-0683","id":"26cfd52f-2483-11ee-8040-88983bcc06d4"},{"full_name":"Lacoste, Alexandre","first_name":"Alexandre","last_name":"Lacoste"},{"last_name":"Bengio","full_name":"Bengio, Yoshua","first_name":"Yoshua"},{"full_name":"Pal, Chris","first_name":"Chris","last_name":"Pal"},{"full_name":"Li, Li Erran","first_name":"Li Erran","last_name":"Li"},{"first_name":"Bernhard","full_name":"Schölkopf, Bernhard","last_name":"Schölkopf"}],"type":"conference","year":"2022","oa_version":"Preprint","day":"04","title":"A general purpose neural architecture for geospatial systems","conference":{"end_date":"2022-12-09","start_date":"2022-11-28","name":"NeurIPS: Neural Information Processing Systems","location":"New Orleans, LA, United States"},"citation":{"short":"N. Rahaman, M. Weiss, F. Träuble, F. Locatello, A. Lacoste, Y. Bengio, C. Pal, L.E. Li, B. Schölkopf, in:, 36th Conference on Neural Information Processing Systems, n.d.","apa":"Rahaman, N., Weiss, M., Träuble, F., Locatello, F., Lacoste, A., Bengio, Y., … Schölkopf, B. (n.d.). A general purpose neural architecture for geospatial systems. In <i>36th Conference on Neural Information Processing Systems</i>. New Orleans, LA, United States.","ama":"Rahaman N, Weiss M, Träuble F, et al. A general purpose neural architecture for geospatial systems. In: <i>36th Conference on Neural Information Processing Systems</i>.","ista":"Rahaman N, Weiss M, Träuble F, Locatello F, Lacoste A, Bengio Y, Pal C, Li LE, Schölkopf B. A general purpose neural architecture for geospatial systems. 36th Conference on Neural Information Processing Systems. NeurIPS: Neural Information Processing Systems.","ieee":"N. Rahaman <i>et al.</i>, “A general purpose neural architecture for geospatial systems,” in <i>36th Conference on Neural Information Processing Systems</i>, New Orleans, LA, United States.","chicago":"Rahaman, Nasim, Martin Weiss, Frederik Träuble, Francesco Locatello, Alexandre Lacoste, Yoshua Bengio, Chris Pal, Li Erran Li, and Bernhard Schölkopf. “A General Purpose Neural Architecture for Geospatial Systems.” In <i>36th Conference on Neural Information Processing Systems</i>, n.d.","mla":"Rahaman, Nasim, et al. “A General Purpose Neural Architecture for Geospatial Systems.” <i>36th Conference on Neural Information Processing Systems</i>."},"external_id":{"arxiv":["2211.02348"]},"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-09-13T09:35:59Z","language":[{"iso":"eng"}],"date_created":"2023-08-22T14:21:47Z","_id":"14215","abstract":[{"lang":"eng","text":"Geospatial Information Systems are used by researchers and Humanitarian Assistance and Disaster Response (HADR) practitioners to support a wide variety of important applications. However, collaboration between these actors is difficult due to the heterogeneous nature of geospatial data modalities (e.g., multi-spectral images of various resolutions, timeseries, weather data) and diversity of tasks (e.g., regression of human activity indicators or detecting forest fires). In this work, we present a roadmap towards the construction of a general-purpose neural architecture (GPNA) with a geospatial inductive bias, pre-trained on large amounts of unlabelled earth observation data in a self-supervised manner. We envision how such a model may facilitate cooperation between members of the community. We show preliminary results on the first step of the roadmap, where we instantiate an architecture that can process a wide variety of geospatial data modalities and demonstrate that it can achieve competitive performance with domain-specific architectures on tasks relating to the U.N.'s Sustainable Development Goals."}],"date_published":"2022-11-04T00:00:00Z","month":"11","extern":"1","article_processing_charge":"No","arxiv":1,"status":"public","publication":"36th Conference on Neural Information Processing Systems","quality_controlled":"1","department":[{"_id":"FrLo"}],"oa":1,"main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2211.02348","open_access":"1"}],"publication_status":"submitted"},{"department":[{"_id":"FrLo"}],"publication":"arXiv","status":"public","publication_status":"submitted","oa":1,"main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2210.01738"}],"article_number":"2210.01738","month":"10","date_created":"2023-08-22T14:22:04Z","_id":"14216","abstract":[{"text":"CLIP proved that aligning visual and language spaces is key to solving many vision tasks without explicit training, but required to train image and text encoders from scratch on a huge dataset. LiT improved this by only training the text encoder and using a pre-trained vision network. In this paper, we show that a common space can be created without any training at all, using single-domain encoders (trained with or without supervision) and a much smaller amount of image-text pairs. Furthermore, our model has unique properties. Most notably, deploying a new version with updated training samples can be done in a matter of seconds. Additionally, the representations in the common space are easily interpretable as every dimension corresponds to the similarity of the input to a unique entry in the multimodal dataset. Experiments on standard zero-shot visual benchmarks demonstrate the typical transfer ability of image-text models. Overall, our method represents a simple yet surprisingly strong baseline for foundation multi-modal models, raising important questions on their data efficiency and on the role of retrieval in machine learning.","lang":"eng"}],"date_published":"2022-10-04T00:00:00Z","arxiv":1,"article_processing_charge":"No","date_updated":"2024-02-12T09:57:14Z","language":[{"iso":"eng"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","external_id":{"arxiv":["2210.01738"]},"doi":"10.48550/arXiv.2210.01738","day":"04","oa_version":"Preprint","year":"2022","type":"preprint","author":[{"full_name":"Norelli, Antonio","first_name":"Antonio","last_name":"Norelli"},{"last_name":"Fumero","first_name":"Marco","full_name":"Fumero, Marco"},{"last_name":"Maiorca","first_name":"Valentino","full_name":"Maiorca, Valentino"},{"first_name":"Luca","full_name":"Moschella, Luca","last_name":"Moschella"},{"last_name":"Rodolà","full_name":"Rodolà, Emanuele","first_name":"Emanuele"},{"orcid":"0000-0002-4850-0683","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","last_name":"Locatello","first_name":"Francesco","full_name":"Locatello, Francesco"}],"citation":{"chicago":"Norelli, Antonio, Marco Fumero, Valentino Maiorca, Luca Moschella, Emanuele Rodolà, and Francesco Locatello. “ASIF: Coupled Data Turns Unimodal Models to Multimodal without Training.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2210.01738\">https://doi.org/10.48550/arXiv.2210.01738</a>.","ieee":"A. Norelli, M. Fumero, V. Maiorca, L. Moschella, E. Rodolà, and F. Locatello, “ASIF: Coupled data turns unimodal models to multimodal without training,” <i>arXiv</i>. .","ista":"Norelli A, Fumero M, Maiorca V, Moschella L, Rodolà E, Locatello F. ASIF: Coupled data turns unimodal models to multimodal without training. arXiv, 2210.01738.","mla":"Norelli, Antonio, et al. “ASIF: Coupled Data Turns Unimodal Models to Multimodal without Training.” <i>ArXiv</i>, 2210.01738, doi:<a href=\"https://doi.org/10.48550/arXiv.2210.01738\">10.48550/arXiv.2210.01738</a>.","short":"A. Norelli, M. Fumero, V. Maiorca, L. Moschella, E. Rodolà, F. Locatello, ArXiv (n.d.).","ama":"Norelli A, Fumero M, Maiorca V, Moschella L, Rodolà E, Locatello F. ASIF: Coupled data turns unimodal models to multimodal without training. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2210.01738\">10.48550/arXiv.2210.01738</a>","apa":"Norelli, A., Fumero, M., Maiorca, V., Moschella, L., Rodolà, E., &#38; Locatello, F. (n.d.). ASIF: Coupled data turns unimodal models to multimodal without training. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2210.01738\">https://doi.org/10.48550/arXiv.2210.01738</a>"},"title":"ASIF: Coupled data turns unimodal models to multimodal without training"},{"day":"31","oa_version":"Preprint","year":"2022","type":"preprint","author":[{"last_name":"Mambelli","full_name":"Mambelli, Davide","first_name":"Davide"},{"first_name":"Frederik","full_name":"Träuble, Frederik","last_name":"Träuble"},{"last_name":"Bauer","first_name":"Stefan","full_name":"Bauer, Stefan"},{"full_name":"Schölkopf, Bernhard","first_name":"Bernhard","last_name":"Schölkopf"},{"full_name":"Locatello, Francesco","first_name":"Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683","id":"26cfd52f-2483-11ee-8040-88983bcc06d4"}],"citation":{"short":"D. Mambelli, F. Träuble, S. Bauer, B. Schölkopf, F. Locatello, ArXiv (n.d.).","apa":"Mambelli, D., Träuble, F., Bauer, S., Schölkopf, B., &#38; Locatello, F. (n.d.). Compositional multi-object reinforcement learning with linear relation networks. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2201.13388\">https://doi.org/10.48550/arXiv.2201.13388</a>","ama":"Mambelli D, Träuble F, Bauer S, Schölkopf B, Locatello F. Compositional multi-object reinforcement learning with linear relation networks. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2201.13388\">10.48550/arXiv.2201.13388</a>","ieee":"D. Mambelli, F. Träuble, S. Bauer, B. Schölkopf, and F. Locatello, “Compositional multi-object reinforcement learning with linear relation networks,” <i>arXiv</i>. .","ista":"Mambelli D, Träuble F, Bauer S, Schölkopf B, Locatello F. Compositional multi-object reinforcement learning with linear relation networks. arXiv, 2201.13388.","chicago":"Mambelli, Davide, Frederik Träuble, Stefan Bauer, Bernhard Schölkopf, and Francesco Locatello. “Compositional Multi-Object Reinforcement Learning with Linear Relation Networks.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2201.13388\">https://doi.org/10.48550/arXiv.2201.13388</a>.","mla":"Mambelli, Davide, et al. “Compositional Multi-Object Reinforcement Learning with Linear Relation Networks.” <i>ArXiv</i>, 2201.13388, doi:<a href=\"https://doi.org/10.48550/arXiv.2201.13388\">10.48550/arXiv.2201.13388</a>."},"title":"Compositional multi-object reinforcement learning with linear relation networks","date_updated":"2023-09-11T11:49:40Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","language":[{"iso":"eng"}],"external_id":{"arxiv":["2201.13388"]},"doi":"10.48550/arXiv.2201.13388","extern":"1","article_number":"2201.13388","month":"01","date_published":"2022-01-31T00:00:00Z","_id":"14220","date_created":"2023-08-22T14:23:16Z","abstract":[{"lang":"eng","text":"Although reinforcement learning has seen remarkable progress over the last years, solving robust dexterous object-manipulation tasks in multi-object settings remains a challenge. In this paper, we focus on models that can learn manipulation tasks in fixed multi-object settings and extrapolate this skill zero-shot without any drop in performance when the number of objects changes. We consider the generic task of bringing a specific cube out of a set to a goal position. We find that previous approaches, which primarily leverage attention and graph neural network-based architectures, do not generalize their skills when the number of input objects changes while scaling as K2. We propose an alternative plug-and-play module based on relational inductive biases to overcome these limitations. Besides exceeding performances in their training environment, we show that our approach, which scales linearly in K, allows agents to extrapolate and generalize zero-shot to any new object number."}],"arxiv":1,"article_processing_charge":"No","department":[{"_id":"FrLo"}],"publication":"arXiv","status":"public","publication_status":"submitted","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2201.13388"}],"oa":1},{"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-09-04T08:31:19Z","language":[{"iso":"eng"}],"external_id":{"arxiv":["2211.09606"]},"doi":"10.48550/arXiv.2211.09606","day":"17","oa_version":"Preprint","year":"2022","author":[{"first_name":"Gramoz","full_name":"Goranci, Gramoz","last_name":"Goranci"},{"last_name":"Henzinger","full_name":"Henzinger, Monika H","first_name":"Monika H","orcid":"0000-0002-5008-6530","id":"540c9bbd-f2de-11ec-812d-d04a5be85630"}],"type":"preprint","citation":{"mla":"Goranci, Gramoz, and Monika H. Henzinger. “Incremental Approximate Maximum Flow in M1/2+o(1) Update Time.” <i>ArXiv</i>, 2211.09606, doi:<a href=\"https://doi.org/10.48550/arXiv.2211.09606\">10.48550/arXiv.2211.09606</a>.","ieee":"G. Goranci and M. H. Henzinger, “Incremental approximate maximum flow in m1/2+o(1) update time,” <i>arXiv</i>. .","ista":"Goranci G, Henzinger MH. Incremental approximate maximum flow in m1/2+o(1) update time. arXiv, 2211.09606.","chicago":"Goranci, Gramoz, and Monika H Henzinger. “Incremental Approximate Maximum Flow in M1/2+o(1) Update Time.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2211.09606\">https://doi.org/10.48550/arXiv.2211.09606</a>.","apa":"Goranci, G., &#38; Henzinger, M. H. (n.d.). Incremental approximate maximum flow in m1/2+o(1) update time. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2211.09606\">https://doi.org/10.48550/arXiv.2211.09606</a>","ama":"Goranci G, Henzinger MH. Incremental approximate maximum flow in m1/2+o(1) update time. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2211.09606\">10.48550/arXiv.2211.09606</a>","short":"G. Goranci, M.H. Henzinger, ArXiv (n.d.)."},"title":"Incremental approximate maximum flow in m1/2+o(1) update time","publication":"arXiv","status":"public","publication_status":"submitted","main_file_link":[{"open_access":"1","url":"https://doi.org/10.48550/arXiv.2211.09606"}],"oa":1,"extern":"1","article_number":"2211.09606","month":"11","_id":"14236","date_published":"2022-11-17T00:00:00Z","abstract":[{"lang":"eng","text":"We show an $(1+\\epsilon)$-approximation algorithm for maintaining maximum $s$-$t$ flow under $m$ edge insertions in $m^{1/2+o(1)} \\epsilon^{-1/2}$ amortized update time for directed, unweighted graphs. This constitutes the first sublinear dynamic maximum flow algorithm in general sparse graphs with arbitrarily good approximation guarantee."}],"date_created":"2023-08-25T15:04:29Z","arxiv":1,"article_processing_charge":"No"},{"day":"13","type":"journal_article","author":[{"last_name":"Holmes","full_name":"Holmes, Daniel","first_name":"Daniel","id":"3a443b4c-080d-11ed-979a-feb062bdcee0"}],"citation":{"short":"D. Holmes, PUMP Journal of Undergraduate Research 5 (2022) 24–51.","ama":"Holmes D. Affine dimers from characteristic polygons. <i>PUMP Journal of Undergraduate Research</i>. 2022;5:24-51.","apa":"Holmes, D. (2022). Affine dimers from characteristic polygons. <i>PUMP Journal of Undergraduate Research</i>. California State University.","chicago":"Holmes, Daniel. “Affine Dimers from Characteristic Polygons.” <i>PUMP Journal of Undergraduate Research</i>. California State University, 2022.","ieee":"D. Holmes, “Affine dimers from characteristic polygons,” <i>PUMP Journal of Undergraduate Research</i>, vol. 5. California State University, pp. 24–51, 2022.","ista":"Holmes D. 2022. Affine dimers from characteristic polygons. PUMP Journal of Undergraduate Research. 5, 24–51.","mla":"Holmes, Daniel. “Affine Dimers from Characteristic Polygons.” <i>PUMP Journal of Undergraduate Research</i>, vol. 5, California State University, 2022, pp. 24–51."},"title":"Affine dimers from characteristic polygons","language":[{"iso":"eng"}],"keyword":["dimer model","hyperplane arrangement","torus","lattice polygon"],"extern":"1","month":"02","date_created":"2023-08-29T13:08:09Z","page":"24-51","quality_controlled":"1","publication":"PUMP Journal of Undergraduate Research","intvolume":"         5","status":"public","publisher":"California State University","article_type":"original","oa_version":"Published Version","year":"2022","date_updated":"2023-09-05T09:28:32Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","external_id":{"arxiv":["2110.01703"]},"publication_identifier":{"issn":["2576-3725"]},"abstract":[{"lang":"eng","text":"Recent work by Forsgård indicates that not every convex lattice polygon arises as the characteristic polygon of an affine dimer or, equivalently, an admissible oriented line arrangement on the torus in general position. We begin the classication of convex lattice polygons arising as characteristic polygons of affine dimers. We present several general constructions of new affine dimers from old, and an algorithm for finding affine dimers with prescribed polygon.\r\n\r\nWith these tools we prove that all lattice triangles, generalised parallelograms, and polygons of genus at most two admit an affine dimer."}],"_id":"14248","date_published":"2022-02-13T00:00:00Z","arxiv":1,"article_processing_charge":"No","volume":5,"publication_status":"published","main_file_link":[{"open_access":"1","url":"https://journals.calstate.edu/pump/article/view/2711"}],"oa":1},{"pmid":1,"language":[{"iso":"eng"}],"doi":"10.1126/science.abj7662","citation":{"mla":"Sahtoe, Danny D., et al. “Reconfigurable Asymmetric Protein Assemblies through Implicit Negative Design.” <i>Science</i>, vol. 375, no. 6578, abj7662, American Association for the Advancement of Science, 2022, doi:<a href=\"https://doi.org/10.1126/science.abj7662\">10.1126/science.abj7662</a>.","chicago":"Sahtoe, Danny D., Florian M Praetorius, Alexis Courbet, Yang Hsia, Basile I. M. Wicky, Natasha I. Edman, Lauren M. Miller, et al. “Reconfigurable Asymmetric Protein Assemblies through Implicit Negative Design.” <i>Science</i>. American Association for the Advancement of Science, 2022. <a href=\"https://doi.org/10.1126/science.abj7662\">https://doi.org/10.1126/science.abj7662</a>.","ieee":"D. D. Sahtoe <i>et al.</i>, “Reconfigurable asymmetric protein assemblies through implicit negative design,” <i>Science</i>, vol. 375, no. 6578. American Association for the Advancement of Science, 2022.","ista":"Sahtoe DD, Praetorius FM, Courbet A, Hsia Y, Wicky BIM, Edman NI, Miller LM, Timmermans BJR, Decarreau J, Morris HM, Kang A, Bera AK, Baker D. 2022. Reconfigurable asymmetric protein assemblies through implicit negative design. Science. 375(6578), abj7662.","ama":"Sahtoe DD, Praetorius FM, Courbet A, et al. Reconfigurable asymmetric protein assemblies through implicit negative design. <i>Science</i>. 2022;375(6578). doi:<a href=\"https://doi.org/10.1126/science.abj7662\">10.1126/science.abj7662</a>","apa":"Sahtoe, D. D., Praetorius, F. M., Courbet, A., Hsia, Y., Wicky, B. I. M., Edman, N. I., … Baker, D. (2022). Reconfigurable asymmetric protein assemblies through implicit negative design. <i>Science</i>. American Association for the Advancement of Science. <a href=\"https://doi.org/10.1126/science.abj7662\">https://doi.org/10.1126/science.abj7662</a>","short":"D.D. Sahtoe, F.M. Praetorius, A. Courbet, Y. Hsia, B.I.M. Wicky, N.I. Edman, L.M. Miller, B.J.R. Timmermans, J. Decarreau, H.M. Morris, A. Kang, A.K. Bera, D. Baker, Science 375 (2022)."},"title":"Reconfigurable asymmetric protein assemblies through implicit negative design","day":"21","type":"journal_article","author":[{"last_name":"Sahtoe","full_name":"Sahtoe, Danny D.","first_name":"Danny D."},{"last_name":"Praetorius","full_name":"Praetorius, Florian M","first_name":"Florian M","id":"dfec9381-4341-11ee-8fd8-faa02bba7d62"},{"last_name":"Courbet","full_name":"Courbet, Alexis","first_name":"Alexis"},{"last_name":"Hsia","first_name":"Yang","full_name":"Hsia, Yang"},{"first_name":"Basile I. M.","full_name":"Wicky, Basile I. M.","last_name":"Wicky"},{"last_name":"Edman","full_name":"Edman, Natasha I.","first_name":"Natasha I."},{"last_name":"Miller","full_name":"Miller, Lauren M.","first_name":"Lauren M."},{"last_name":"Timmermans","full_name":"Timmermans, Bart J. R.","first_name":"Bart J. R."},{"last_name":"Decarreau","first_name":"Justin","full_name":"Decarreau, Justin"},{"full_name":"Morris, Hana M.","first_name":"Hana M.","last_name":"Morris"},{"last_name":"Kang","full_name":"Kang, Alex","first_name":"Alex"},{"last_name":"Bera","full_name":"Bera, Asim K.","first_name":"Asim K."},{"first_name":"David","full_name":"Baker, David","last_name":"Baker"}],"publisher":"American Association for the Advancement of Science","quality_controlled":"1","publication":"Science","status":"public","intvolume":"       375","extern":"1","month":"01","date_created":"2023-09-06T12:05:42Z","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-11-07T12:39:56Z","scopus_import":"1","external_id":{"pmid":["35050655"]},"publication_identifier":{"eissn":["1095-9203"],"issn":["0036-8075"]},"article_type":"original","oa_version":"None","year":"2022","publication_status":"published","volume":375,"issue":"6578","article_processing_charge":"No","article_number":"abj7662","date_published":"2022-01-21T00:00:00Z","_id":"14282","abstract":[{"text":"Asymmetric multiprotein complexes that undergo subunit exchange play central roles in biology but present a challenge for design because the components must not only contain interfaces that enable reversible association but also be stable and well behaved in isolation. We use implicit negative design to generate β sheet–mediated heterodimers that can be assembled into a wide variety of complexes. The designs are stable, folded, and soluble in isolation and rapidly assemble upon mixing, and crystal structures are close to the computational models. We construct linearly arranged hetero-oligomers with up to six different components, branched hetero-oligomers, closed C4-symmetric two-component rings, and hetero-oligomers assembled on a cyclic homo-oligomeric central hub and demonstrate that such complexes can readily reconfigure through subunit exchange. Our approach provides a general route to designing asymmetric reconfigurable protein systems.","lang":"eng"}]},{"publication_identifier":{"issn":["1937-0652"],"eissn":["1944-7833"]},"scopus_import":"1","external_id":{"isi":["000961514100004"],"arxiv":["2102.11552"]},"user_id":"4359f0d1-fa6c-11eb-b949-802e58b17ae8","date_updated":"2023-08-02T06:46:38Z","year":"2022","oa_version":"Preprint","article_type":"original","volume":16,"oa":1,"main_file_link":[{"url":"https://arxiv.org/abs/2102.11552","open_access":"1"}],"publication_status":"published","_id":"9199","abstract":[{"text":"We associate a certain tensor product lattice to any primitive integer lattice and ask about its typical shape. These lattices are related to the tangent bundle of Grassmannians and their study is motivated by Peyre's programme on \"freeness\" for rational points of bounded height on Fano\r\nvarieties.","lang":"eng"}],"date_published":"2022-12-01T00:00:00Z","article_processing_charge":"No","issue":"10","arxiv":1,"doi":"10.2140/ant.2022.16.2385","language":[{"iso":"eng"}],"project":[{"_id":"26A8D266-B435-11E9-9278-68D0E5697425","name":"Between rational and integral points","grant_number":"EP-P026710-2"},{"_id":"26AEDAB2-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","name":"New frontiers of the Manin conjecture","grant_number":"P32428"}],"acknowledgement":"The authors are very grateful to Will Sawin for useful remarks about this topic. While working on this paper the first two authors were supported by EPSRC grant EP/P026710/1, and the first and last authors by FWF grant P 32428-N35.","author":[{"first_name":"Timothy D","full_name":"Browning, Timothy D","last_name":"Browning","id":"35827D50-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0002-8314-0177"},{"last_name":"Horesh","full_name":"Horesh, Tal","first_name":"Tal","id":"C8B7BF48-8D81-11E9-BCA9-F536E6697425"},{"orcid":"0000-0001-7302-8256","id":"560601DA-8D36-11E9-A136-7AC1E5697425","last_name":"Wilsch","full_name":"Wilsch, Florian Alexander","first_name":"Florian Alexander"}],"type":"journal_article","day":"01","title":"Equidistribution and freeness on Grassmannians","citation":{"ama":"Browning TD, Horesh T, Wilsch FA. Equidistribution and freeness on Grassmannians. <i>Algebra &#38; Number Theory</i>. 2022;16(10):2385-2407. doi:<a href=\"https://doi.org/10.2140/ant.2022.16.2385\">10.2140/ant.2022.16.2385</a>","apa":"Browning, T. D., Horesh, T., &#38; Wilsch, F. A. (2022). Equidistribution and freeness on Grassmannians. <i>Algebra &#38; Number Theory</i>. Mathematical Sciences Publishers. <a href=\"https://doi.org/10.2140/ant.2022.16.2385\">https://doi.org/10.2140/ant.2022.16.2385</a>","short":"T.D. Browning, T. Horesh, F.A. Wilsch, Algebra &#38; Number Theory 16 (2022) 2385–2407.","mla":"Browning, Timothy D., et al. “Equidistribution and Freeness on Grassmannians.” <i>Algebra &#38; Number Theory</i>, vol. 16, no. 10, Mathematical Sciences Publishers, 2022, pp. 2385–407, doi:<a href=\"https://doi.org/10.2140/ant.2022.16.2385\">10.2140/ant.2022.16.2385</a>.","chicago":"Browning, Timothy D, Tal Horesh, and Florian Alexander Wilsch. “Equidistribution and Freeness on Grassmannians.” <i>Algebra &#38; Number Theory</i>. Mathematical Sciences Publishers, 2022. <a href=\"https://doi.org/10.2140/ant.2022.16.2385\">https://doi.org/10.2140/ant.2022.16.2385</a>.","ieee":"T. D. Browning, T. Horesh, and F. A. Wilsch, “Equidistribution and freeness on Grassmannians,” <i>Algebra &#38; Number Theory</i>, vol. 16, no. 10. Mathematical Sciences Publishers, pp. 2385–2407, 2022.","ista":"Browning TD, Horesh T, Wilsch FA. 2022. Equidistribution and freeness on Grassmannians. Algebra &#38; Number Theory. 16(10), 2385–2407."},"intvolume":"        16","status":"public","publication":"Algebra & Number Theory","quality_controlled":"1","department":[{"_id":"TiBr"}],"isi":1,"publisher":"Mathematical Sciences Publishers","date_created":"2021-02-25T09:56:57Z","month":"12","page":"2385-2407"},{"citation":{"ieee":"K. Chatterjee, R. J. Saona Urmeneta, and B. Ziliotto, “Finite-memory strategies in POMDPs with long-run average objectives,” <i>Mathematics of Operations Research</i>, vol. 47, no. 1. Institute for Operations Research and the Management Sciences, pp. 100–119, 2022.","ista":"Chatterjee K, Saona Urmeneta RJ, Ziliotto B. 2022. Finite-memory strategies in POMDPs with long-run average objectives. Mathematics of Operations Research. 47(1), 100–119.","chicago":"Chatterjee, Krishnendu, Raimundo J Saona Urmeneta, and Bruno Ziliotto. “Finite-Memory Strategies in POMDPs with Long-Run Average Objectives.” <i>Mathematics of Operations Research</i>. Institute for Operations Research and the Management Sciences, 2022. <a href=\"https://doi.org/10.1287/moor.2020.1116\">https://doi.org/10.1287/moor.2020.1116</a>.","mla":"Chatterjee, Krishnendu, et al. “Finite-Memory Strategies in POMDPs with Long-Run Average Objectives.” <i>Mathematics of Operations Research</i>, vol. 47, no. 1, Institute for Operations Research and the Management Sciences, 2022, pp. 100–19, doi:<a href=\"https://doi.org/10.1287/moor.2020.1116\">10.1287/moor.2020.1116</a>.","short":"K. Chatterjee, R.J. Saona Urmeneta, B. Ziliotto, Mathematics of Operations Research 47 (2022) 100–119.","apa":"Chatterjee, K., Saona Urmeneta, R. J., &#38; Ziliotto, B. (2022). Finite-memory strategies in POMDPs with long-run average objectives. <i>Mathematics of Operations Research</i>. Institute for Operations Research and the Management Sciences. <a href=\"https://doi.org/10.1287/moor.2020.1116\">https://doi.org/10.1287/moor.2020.1116</a>","ama":"Chatterjee K, Saona Urmeneta RJ, Ziliotto B. Finite-memory strategies in POMDPs with long-run average objectives. <i>Mathematics of Operations Research</i>. 2022;47(1):100-119. doi:<a href=\"https://doi.org/10.1287/moor.2020.1116\">10.1287/moor.2020.1116</a>"},"title":"Finite-memory strategies in POMDPs with long-run average objectives","day":"01","type":"journal_article","author":[{"first_name":"Krishnendu","full_name":"Chatterjee, Krishnendu","last_name":"Chatterjee","orcid":"0000-0002-4561-241X","id":"2E5DCA20-F248-11E8-B48F-1D18A9856A87"},{"first_name":"Raimundo J","full_name":"Saona Urmeneta, Raimundo J","last_name":"Saona Urmeneta","id":"BD1DF4C4-D767-11E9-B658-BC13E6697425","orcid":"0000-0001-5103-038X"},{"first_name":"Bruno","full_name":"Ziliotto, Bruno","last_name":"Ziliotto"}],"project":[{"_id":"25863FF4-B435-11E9-9278-68D0E5697425","call_identifier":"FWF","grant_number":"S11407","name":"Game Theory"}],"acknowledgement":"Partially supported by Austrian Science Fund (FWF) NFN Grant No RiSE/SHiNE S11407, by CONICYT Chile through grant PII 20150140, and by ECOS-CONICYT through grant C15E03.\r\n","keyword":["Management Science and Operations Research","General Mathematics","Computer Science Applications"],"language":[{"iso":"eng"}],"doi":"10.1287/moor.2020.1116","page":"100-119","month":"02","date_created":"2021-04-08T09:33:31Z","publisher":"Institute for Operations Research and the Management Sciences","isi":1,"publication":"Mathematics of Operations Research","quality_controlled":"1","department":[{"_id":"GradSch"},{"_id":"KrCh"}],"status":"public","intvolume":"        47","article_type":"original","year":"2022","oa_version":"Preprint","scopus_import":"1","external_id":{"arxiv":["1904.13360"],"isi":["000731918100001"]},"date_updated":"2023-09-05T13:16:11Z","user_id":"c635000d-4b10-11ee-a964-aac5a93f6ac1","publication_identifier":{"eissn":["1526-5471"],"issn":["0364-765X"]},"arxiv":1,"article_processing_charge":"No","issue":"1","date_published":"2022-02-01T00:00:00Z","_id":"9311","abstract":[{"lang":"eng","text":"Partially observable Markov decision processes (POMDPs) are standard models for dynamic systems with probabilistic and nondeterministic behaviour in uncertain environments. We prove that in POMDPs with long-run average objective, the decision maker has approximately optimal strategies with finite memory. This implies notably that approximating the long-run value is recursively enumerable, as well as a weak continuity property of the value with respect to the transition function. "}],"publication_status":"published","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/1904.13360"}],"oa":1,"volume":47}]
