[{"date_published":"2023-08-01T00:00:00Z","intvolume":"       136","month":"08","acknowledgement":"The authors thank their respective lab members for feedback and helpful discussions. We thank the bioimaging and zebrafish facilities of IST Austria for their support.\r\nThis work was supported by the National Institutes of Health [R01GM112794 to A.L.M.], by Grants-in-Aid for Scientific Research from the Japan Society for the Promotion of Science [21K06156 to T.H.], by the Grant Program for Biomedical Engineering Research from the Nakatani Foundation for Advancement of Measuring Technologies in Biomedical Engineering [to T.H.] and by funding from the European Research Council [advanced grant 742573 to C.-P.H.]. ","article_number":"jcs260668","date_updated":"2023-12-13T12:11:18Z","acknowledged_ssus":[{"_id":"PreCl"},{"_id":"Bio"}],"issue":"15","external_id":{"isi":["001070149000001"]},"year":"2023","volume":136,"ddc":["570"],"date_created":"2023-08-20T22:01:13Z","day":"01","status":"public","publication":"Journal of Cell Science","doi":"10.1242/jcs.260668","ec_funded":1,"oa_version":"None","isi":1,"has_accepted_license":"1","publisher":"The Company of Biologists","title":"ZnUMBA - a live imaging method to detect local barrier breaches","author":[{"full_name":"Higashi, Tomohito","first_name":"Tomohito","last_name":"Higashi"},{"first_name":"Rachel E.","last_name":"Stephenson","full_name":"Stephenson, Rachel E."},{"first_name":"Cornelia","id":"3436488C-F248-11E8-B48F-1D18A9856A87","orcid":"0000-0001-5130-2226","last_name":"Schwayer","full_name":"Schwayer, Cornelia"},{"full_name":"Huljev, Karla","id":"44C6F6A6-F248-11E8-B48F-1D18A9856A87","first_name":"Karla","last_name":"Huljev"},{"full_name":"Higashi, Atsuko Y.","first_name":"Atsuko Y.","last_name":"Higashi"},{"full_name":"Heisenberg, Carl-Philipp J","first_name":"Carl-Philipp J","id":"39427864-F248-11E8-B48F-1D18A9856A87","last_name":"Heisenberg","orcid":"0000-0002-0912-4566"},{"first_name":"Hideki","last_name":"Chiba","full_name":"Chiba, Hideki"},{"first_name":"Ann L.","last_name":"Miller","full_name":"Miller, Ann L."}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","file":[{"file_name":"2023_JourCellScience_Higashi.pdf","date_created":"2023-08-21T07:37:54Z","date_updated":"2023-08-21T07:37:54Z","content_type":"application/pdf","file_id":"14092","creator":"dernst","file_size":18665315,"embargo_to":"open_access","access_level":"closed","checksum":"a399389b7e3d072f1788b63e612a10b3","embargo":"2024-08-10","relation":"main_file"}],"article_processing_charge":"No","department":[{"_id":"CaHe"},{"_id":"EvBe"}],"language":[{"iso":"eng"}],"article_type":"original","publication_identifier":{"issn":["0021-9533"],"eissn":["1477-9137"]},"citation":{"apa":"Higashi, T., Stephenson, R. E., Schwayer, C., Huljev, K., Higashi, A. Y., Heisenberg, C.-P. J., … Miller, A. L. (2023). ZnUMBA - a live imaging method to detect local barrier breaches. <i>Journal of Cell Science</i>. The Company of Biologists. <a href=\"https://doi.org/10.1242/jcs.260668\">https://doi.org/10.1242/jcs.260668</a>","mla":"Higashi, Tomohito, et al. “ZnUMBA - a Live Imaging Method to Detect Local Barrier Breaches.” <i>Journal of Cell Science</i>, vol. 136, no. 15, jcs260668, The Company of Biologists, 2023, doi:<a href=\"https://doi.org/10.1242/jcs.260668\">10.1242/jcs.260668</a>.","ieee":"T. Higashi <i>et al.</i>, “ZnUMBA - a live imaging method to detect local barrier breaches,” <i>Journal of Cell Science</i>, vol. 136, no. 15. The Company of Biologists, 2023.","short":"T. Higashi, R.E. Stephenson, C. Schwayer, K. Huljev, A.Y. Higashi, C.-P.J. Heisenberg, H. Chiba, A.L. Miller, Journal of Cell Science 136 (2023).","ista":"Higashi T, Stephenson RE, Schwayer C, Huljev K, Higashi AY, Heisenberg C-PJ, Chiba H, Miller AL. 2023. ZnUMBA - a live imaging method to detect local barrier breaches. Journal of Cell Science. 136(15), jcs260668.","ama":"Higashi T, Stephenson RE, Schwayer C, et al. ZnUMBA - a live imaging method to detect local barrier breaches. <i>Journal of Cell Science</i>. 2023;136(15). doi:<a href=\"https://doi.org/10.1242/jcs.260668\">10.1242/jcs.260668</a>","chicago":"Higashi, Tomohito, Rachel E. Stephenson, Cornelia Schwayer, Karla Huljev, Atsuko Y. Higashi, Carl-Philipp J Heisenberg, Hideki Chiba, and Ann L. Miller. “ZnUMBA - a Live Imaging Method to Detect Local Barrier Breaches.” <i>Journal of Cell Science</i>. The Company of Biologists, 2023. <a href=\"https://doi.org/10.1242/jcs.260668\">https://doi.org/10.1242/jcs.260668</a>."},"abstract":[{"text":"Epithelial barrier function is commonly analyzed using transepithelial electrical resistance, which measures ion flux across a monolayer, or by adding traceable macromolecules and monitoring their passage across the monolayer. Although these methods measure changes in global barrier function, they lack the sensitivity needed to detect local or transient barrier breaches, and they do not reveal the location of barrier leaks. Therefore, we previously developed a method that we named the zinc-based ultrasensitive microscopic barrier assay (ZnUMBA), which overcomes these limitations, allowing for detection of local tight junction leaks with high spatiotemporal resolution. Here, we present expanded applications for ZnUMBA. ZnUMBA can be used in Xenopus embryos to measure the dynamics of barrier restoration and actin accumulation following laser injury. ZnUMBA can also be effectively utilized in developing zebrafish embryos as well as cultured monolayers of Madin–Darby canine kidney (MDCK) II epithelial cells. ZnUMBA is a powerful and flexible method that, with minimal optimization, can be applied to multiple systems to measure dynamic changes in barrier function with spatiotemporal precision.","lang":"eng"}],"scopus_import":"1","file_date_updated":"2023-08-21T07:37:54Z","type":"journal_article","_id":"14082","quality_controlled":"1","project":[{"_id":"260F1432-B435-11E9-9278-68D0E5697425","name":"Interaction and feedback between cell mechanics and fate specification in vertebrate gastrulation","call_identifier":"H2020","grant_number":"742573"}],"publication_status":"published"},{"oa_version":"Published Version","publication":"50th International Colloquium on Automata, Languages, and Programming","doi":"10.4230/LIPIcs.ICALP.2023.99","date_created":"2023-08-20T22:01:13Z","tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"ddc":["000"],"day":"01","status":"public","volume":261,"year":"2023","external_id":{"arxiv":["2210.07754"]},"conference":{"location":"Paderborn, Germany","name":"ICALP: International Colloquium on Automata, Languages, and Programming","start_date":"2023-07-10","end_date":"2023-07-14"},"license":"https://creativecommons.org/licenses/by/4.0/","date_updated":"2023-08-21T07:26:01Z","oa":1,"alternative_title":["LIPIcs"],"date_published":"2023-07-01T00:00:00Z","intvolume":"       261","month":"07","article_number":"99","acknowledgement":"Nicolas Resch: Research supported in part by ERC H2020 grant No.74079 (ALGSTRONGCRYPTO). Chen Yuan: Research supported in part by the National Key Research and Development Projects under Grant 2022YFA1004900 and Grant 2021YFE0109900, the National Natural Science Foundation of China under Grant 12101403 and Grant 12031011.\r\nAcknowledgements YZ is grateful to Shashank Vatedka, Diyuan Wu and Fengxing Zhu for inspiring discussions.","publication_status":"published","_id":"14083","quality_controlled":"1","citation":{"ama":"Resch N, Yuan C, Zhang Y. Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery. In: <i>50th International Colloquium on Automata, Languages, and Programming</i>. Vol 261. Schloss Dagstuhl - Leibniz-Zentrum für Informatik; 2023. doi:<a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.99\">10.4230/LIPIcs.ICALP.2023.99</a>","ieee":"N. Resch, C. Yuan, and Y. Zhang, “Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery,” in <i>50th International Colloquium on Automata, Languages, and Programming</i>, Paderborn, Germany, 2023, vol. 261.","ista":"Resch N, Yuan C, Zhang Y. 2023. Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery. 50th International Colloquium on Automata, Languages, and Programming. ICALP: International Colloquium on Automata, Languages, and Programming, LIPIcs, vol. 261, 99.","short":"N. Resch, C. Yuan, Y. Zhang, in:, 50th International Colloquium on Automata, Languages, and Programming, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023.","chicago":"Resch, Nicolas, Chen Yuan, and Yihan Zhang. “Zero-Rate Thresholds and New Capacity Bounds for List-Decoding and List-Recovery.” In <i>50th International Colloquium on Automata, Languages, and Programming</i>, Vol. 261. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023. <a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.99\">https://doi.org/10.4230/LIPIcs.ICALP.2023.99</a>.","apa":"Resch, N., Yuan, C., &#38; Zhang, Y. (2023). Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery. In <i>50th International Colloquium on Automata, Languages, and Programming</i> (Vol. 261). Paderborn, Germany: Schloss Dagstuhl - Leibniz-Zentrum für Informatik. <a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.99\">https://doi.org/10.4230/LIPIcs.ICALP.2023.99</a>","mla":"Resch, Nicolas, et al. “Zero-Rate Thresholds and New Capacity Bounds for List-Decoding and List-Recovery.” <i>50th International Colloquium on Automata, Languages, and Programming</i>, vol. 261, 99, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023, doi:<a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.99\">10.4230/LIPIcs.ICALP.2023.99</a>."},"publication_identifier":{"isbn":["9783959772785"],"issn":["1868-8969"]},"scopus_import":"1","arxiv":1,"abstract":[{"text":"In this work we consider the list-decodability and list-recoverability of arbitrary q-ary codes, for all integer values of q ≥ 2. A code is called (p,L)_q-list-decodable if every radius pn Hamming ball contains less than L codewords; (p,𝓁,L)_q-list-recoverability is a generalization where we place radius pn Hamming balls on every point of a combinatorial rectangle with side length 𝓁 and again stipulate that there be less than L codewords.\r\nOur main contribution is to precisely calculate the maximum value of p for which there exist infinite families of positive rate (p,𝓁,L)_q-list-recoverable codes, the quantity we call the zero-rate threshold. Denoting this value by p_*, we in fact show that codes correcting a p_*+ε fraction of errors must have size O_ε(1), i.e., independent of n. Such a result is typically referred to as a \"Plotkin bound.\" To complement this, a standard random code with expurgation construction shows that there exist positive rate codes correcting a p_*-ε fraction of errors. We also follow a classical proof template (typically attributed to Elias and Bassalygo) to derive from the zero-rate threshold other tradeoffs between rate and decoding radius for list-decoding and list-recovery.\r\nTechnically, proving the Plotkin bound boils down to demonstrating the Schur convexity of a certain function defined on the q-simplex as well as the convexity of a univariate function derived from it. We remark that an earlier argument claimed similar results for q-ary list-decoding; however, we point out that this earlier proof is flawed.","lang":"eng"}],"file_date_updated":"2023-08-21T07:23:18Z","type":"conference","language":[{"iso":"eng"}],"article_processing_charge":"Yes","department":[{"_id":"MaMo"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"first_name":"Nicolas","last_name":"Resch","full_name":"Resch, Nicolas"},{"first_name":"Chen","last_name":"Yuan","full_name":"Yuan, Chen"},{"last_name":"Zhang","orcid":"0000-0002-6465-6258","id":"2ce5da42-b2ea-11eb-bba5-9f264e9d002c","first_name":"Yihan","full_name":"Zhang, Yihan"}],"title":"Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery","file":[{"success":1,"file_name":"2023_LIPIcsICALP_Resch.pdf","date_created":"2023-08-21T07:23:18Z","date_updated":"2023-08-21T07:23:18Z","file_id":"14091","content_type":"application/pdf","creator":"dernst","file_size":1141497,"access_level":"open_access","relation":"main_file","checksum":"a449143fec3fbebb092cb8ef3b53c226"}],"has_accepted_license":"1","publisher":"Schloss Dagstuhl - Leibniz-Zentrum für Informatik"},{"has_accepted_license":"1","publisher":"Schloss Dagstuhl - Leibniz-Zentrum für Informatik","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"full_name":"Harris, David G.","first_name":"David G.","last_name":"Harris"},{"full_name":"Kolmogorov, Vladimir","last_name":"Kolmogorov","id":"3D50B0BA-F248-11E8-B48F-1D18A9856A87","first_name":"Vladimir"}],"title":"Parameter estimation for Gibbs distributions","file":[{"date_created":"2023-08-21T06:45:16Z","date_updated":"2023-08-21T06:45:16Z","success":1,"file_name":"2023_LIPIcsICALP_Harris.pdf","relation":"main_file","checksum":"6dee0684245bb1c524b9c955db1e933d","access_level":"open_access","file_id":"14088","content_type":"application/pdf","file_size":917791,"creator":"dernst"}],"article_processing_charge":"Yes","department":[{"_id":"VlKo"}],"language":[{"iso":"eng"}],"citation":{"mla":"Harris, David G., and Vladimir Kolmogorov. “Parameter Estimation for Gibbs Distributions.” <i>50th International Colloquium on Automata, Languages, and Programming</i>, vol. 261, 72, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023, doi:<a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.72\">10.4230/LIPIcs.ICALP.2023.72</a>.","apa":"Harris, D. G., &#38; Kolmogorov, V. (2023). Parameter estimation for Gibbs distributions. In <i>50th International Colloquium on Automata, Languages, and Programming</i> (Vol. 261). Paderborn, Germany: Schloss Dagstuhl - Leibniz-Zentrum für Informatik. <a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.72\">https://doi.org/10.4230/LIPIcs.ICALP.2023.72</a>","chicago":"Harris, David G., and Vladimir Kolmogorov. “Parameter Estimation for Gibbs Distributions.” In <i>50th International Colloquium on Automata, Languages, and Programming</i>, Vol. 261. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023. <a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.72\">https://doi.org/10.4230/LIPIcs.ICALP.2023.72</a>.","ama":"Harris DG, Kolmogorov V. Parameter estimation for Gibbs distributions. In: <i>50th International Colloquium on Automata, Languages, and Programming</i>. Vol 261. Schloss Dagstuhl - Leibniz-Zentrum für Informatik; 2023. doi:<a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.72\">10.4230/LIPIcs.ICALP.2023.72</a>","short":"D.G. Harris, V. Kolmogorov, in:, 50th International Colloquium on Automata, Languages, and Programming, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023.","ista":"Harris DG, Kolmogorov V. 2023. Parameter estimation for Gibbs distributions. 50th International Colloquium on Automata, Languages, and Programming. ICALP: International Colloquium on Automata, Languages, and Programming, LIPIcs, vol. 261, 72.","ieee":"D. G. Harris and V. Kolmogorov, “Parameter estimation for Gibbs distributions,” in <i>50th International Colloquium on Automata, Languages, and Programming</i>, Paderborn, Germany, 2023, vol. 261."},"publication_identifier":{"issn":["1868-8969"],"isbn":["9783959772785"]},"scopus_import":"1","abstract":[{"text":"A central problem in computational statistics is to convert a procedure for sampling combinatorial objects into a procedure for counting those objects, and vice versa. We will consider sampling problems which come from Gibbs distributions, which are families of probability distributions over a discrete space Ω with probability mass function of the form μ^Ω_β(ω) ∝ e^{β H(ω)} for β in an interval [β_min, β_max] and H(ω) ∈ {0} ∪ [1, n].\r\nThe partition function is the normalization factor Z(β) = ∑_{ω ∈ Ω} e^{β H(ω)}, and the log partition ratio is defined as q = (log Z(β_max))/Z(β_min)\r\nWe develop a number of algorithms to estimate the counts c_x using roughly Õ(q/ε²) samples for general Gibbs distributions and Õ(n²/ε²) samples for integer-valued distributions (ignoring some second-order terms and parameters), We show this is optimal up to logarithmic factors. We illustrate with improved algorithms for counting connected subgraphs and perfect matchings in a graph.","lang":"eng"}],"arxiv":1,"type":"conference","file_date_updated":"2023-08-21T06:45:16Z","_id":"14084","quality_controlled":"1","publication_status":"published","date_published":"2023-07-01T00:00:00Z","alternative_title":["LIPIcs"],"month":"07","intvolume":"       261","article_number":"72","acknowledgement":"We thank Heng Guo for helpful explanations of algorithms for sampling connected subgraphs and matchings, Maksym Serbyn for bringing to our attention the Wang-Landau algorithm and its use in physics.","date_updated":"2023-08-21T06:49:11Z","oa":1,"external_id":{"arxiv":["2007.10824"]},"conference":{"name":"ICALP: International Colloquium on Automata, Languages, and Programming","start_date":"2023-07-10","end_date":"2023-07-14","location":"Paderborn, Germany"},"year":"2023","volume":261,"date_created":"2023-08-20T22:01:14Z","tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"ddc":["000","510"],"status":"public","day":"01","publication":"50th International Colloquium on Automata, Languages, and Programming","doi":"10.4230/LIPIcs.ICALP.2023.72","oa_version":"Published Version"},{"oa_version":"Published Version","ec_funded":1,"publication":"50th International Colloquium on Automata, Languages, and Programming","doi":"10.4230/LIPIcs.ICALP.2023.69","day":"01","status":"public","date_created":"2023-08-20T22:01:14Z","tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"ddc":["000"],"volume":261,"year":"2023","conference":{"location":"Paderborn, Germany","end_date":"2023-07-14","start_date":"2023-07-10","name":"ICALP: International Colloquium on Automata, Languages, and Programming"},"external_id":{"unknown":["2211.09606"]},"oa":1,"date_updated":"2023-08-21T07:00:49Z","acknowledgement":"This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (Grant agreement No.\r\n101019564 “The Design of Modern Fully Dynamic Data Structures (MoDynStruct)” and from the\r\nAustrian Science Fund (FWF) project “Static and Dynamic Hierarchical Graph Decompositions”,\r\nI 5982-N, and project “Fast Algorithms for a Reactive Network Layer (ReactNet)”, P 33775-N, with additional funding from the netidee SCIENCE Stiftung, 2020–2024.\r\nThis work was done in part while Gramoz Goranci was at Institute for Theoretical Studies, ETH Zurich, Switzerland. There, he was supported by Dr. Max Rössler, the Walter Haefner Foundation and the ETH Zürich Foundation. We also thank Richard Peng, Thatchaphol Saranurak, Sebastian Forster and Sushant Sachdeva for helpful discussions, and the anonymous reviewers for their insightful comments.","article_number":"69","alternative_title":["LIPIcs"],"date_published":"2023-07-01T00:00:00Z","intvolume":"       261","month":"07","publication_status":"published","project":[{"_id":"bd9ca328-d553-11ed-ba76-dc4f890cfe62","name":"The design and evaluation of modern fully dynamic data structures","call_identifier":"H2020","grant_number":"101019564"},{"grant_number":"I05982","_id":"bda196b2-d553-11ed-ba76-8e8ee6c21103","name":"Static and Dynamic Hierarchical Graph Decompositions"},{"grant_number":"P33775 ","name":"Fast Algorithms for a Reactive Network Layer","_id":"bd9e3a2e-d553-11ed-ba76-8aa684ce17fe"}],"quality_controlled":"1","_id":"14085","scopus_import":"1","abstract":[{"lang":"eng","text":"We show an (1+ϵ)-approximation algorithm for maintaining maximum s-t flow under m edge insertions in m1/2+o(1)ϵ−1/2 amortized update time for directed, unweighted graphs. This constitutes the first sublinear dynamic maximum flow algorithm in general sparse graphs with arbitrarily good approximation guarantee."}],"type":"conference","file_date_updated":"2023-08-21T06:59:05Z","publication_identifier":{"issn":["1868-8969"],"isbn":["9783959772785"]},"citation":{"ista":"Goranci G, Henzinger MH. 2023. Efficient data structures for incremental exact and approximate maximum flow. 50th International Colloquium on Automata, Languages, and Programming. ICALP: International Colloquium on Automata, Languages, and Programming, LIPIcs, vol. 261, 69.","short":"G. Goranci, M.H. Henzinger, in:, 50th International Colloquium on Automata, Languages, and Programming, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023.","ieee":"G. Goranci and M. H. Henzinger, “Efficient data structures for incremental exact and approximate maximum flow,” in <i>50th International Colloquium on Automata, Languages, and Programming</i>, Paderborn, Germany, 2023, vol. 261.","ama":"Goranci G, Henzinger MH. Efficient data structures for incremental exact and approximate maximum flow. In: <i>50th International Colloquium on Automata, Languages, and Programming</i>. Vol 261. Schloss Dagstuhl - Leibniz-Zentrum für Informatik; 2023. doi:<a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.69\">10.4230/LIPIcs.ICALP.2023.69</a>","chicago":"Goranci, Gramoz, and Monika H Henzinger. “Efficient Data Structures for Incremental Exact and Approximate Maximum Flow.” In <i>50th International Colloquium on Automata, Languages, and Programming</i>, Vol. 261. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023. <a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.69\">https://doi.org/10.4230/LIPIcs.ICALP.2023.69</a>.","apa":"Goranci, G., &#38; Henzinger, M. H. (2023). Efficient data structures for incremental exact and approximate maximum flow. In <i>50th International Colloquium on Automata, Languages, and Programming</i> (Vol. 261). Paderborn, Germany: Schloss Dagstuhl - Leibniz-Zentrum für Informatik. <a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.69\">https://doi.org/10.4230/LIPIcs.ICALP.2023.69</a>","mla":"Goranci, Gramoz, and Monika H. Henzinger. “Efficient Data Structures for Incremental Exact and Approximate Maximum Flow.” <i>50th International Colloquium on Automata, Languages, and Programming</i>, vol. 261, 69, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023, doi:<a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.69\">10.4230/LIPIcs.ICALP.2023.69</a>."},"language":[{"iso":"eng"}],"department":[{"_id":"MoHe"}],"article_processing_charge":"Yes","title":"Efficient data structures for incremental exact and approximate maximum flow","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"full_name":"Goranci, Gramoz","last_name":"Goranci","first_name":"Gramoz"},{"full_name":"Henzinger, Monika H","orcid":"0000-0002-5008-6530","last_name":"Henzinger","first_name":"Monika H","id":"540c9bbd-f2de-11ec-812d-d04a5be85630"}],"file":[{"checksum":"074177e815a1656de5d4071c7a3dffa6","access_level":"open_access","relation":"main_file","creator":"dernst","file_size":875910,"file_id":"14089","content_type":"application/pdf","date_created":"2023-08-21T06:59:05Z","date_updated":"2023-08-21T06:59:05Z","file_name":"2023_LIPIcsICALP_Goranci.pdf","success":1}],"publisher":"Schloss Dagstuhl - Leibniz-Zentrum für Informatik","has_accepted_license":"1"},{"date_updated":"2023-08-21T07:05:47Z","oa":1,"alternative_title":["LIPIcs"],"date_published":"2023-07-01T00:00:00Z","month":"07","intvolume":"       261","article_number":"74","acknowledgement":" Monika Henzinger: This project has received funding from the European Research Council\r\n(ERC) under the European Union’s Horizon 2020 research and innovation programme (Grant\r\nagreement No. 101019564 “The Design of Modern Fully Dynamic Data Structures (MoDynStruct)” and from the Austrian Science Fund (FWF) project “Static and Dynamic Hierarchical Graph Decompositions”, I 5982-N, and project “Fast Algorithms for a Reactive Network Layer (ReactNet)”, P 33775-N, with additional funding from the netidee SCIENCE Stiftung, 2020–2024. Jan Vondrák: Supported by NSF Award 2127781.","year":"2023","external_id":{"arxiv":["2305.00122"]},"conference":{"end_date":"2023-07-14","start_date":"2023-07-10","name":"ICALP: International Colloquium on Automata, Languages, and Programming","location":"Paderborn, Germany"},"ddc":["000"],"tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"date_created":"2023-08-20T22:01:14Z","status":"public","day":"01","volume":261,"oa_version":"Published Version","ec_funded":1,"publication":"50th International Colloquium on Automata, Languages, and Programming","doi":"10.4230/LIPIcs.ICALP.2023.74","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"full_name":"Henzinger, Monika H","first_name":"Monika H","id":"540c9bbd-f2de-11ec-812d-d04a5be85630","orcid":"0000-0002-5008-6530","last_name":"Henzinger"},{"first_name":"Paul","last_name":"Liu","full_name":"Liu, Paul"},{"first_name":"Jan","last_name":"Vondrák","full_name":"Vondrák, Jan"},{"full_name":"Zheng, Da Wei","last_name":"Zheng","first_name":"Da Wei"}],"title":"Faster submodular maximization for several classes of matroids","file":[{"date_updated":"2023-08-21T07:04:36Z","date_created":"2023-08-21T07:04:36Z","success":1,"file_name":"2023_LIPIcsICALP_HenzingerM.pdf","relation":"main_file","access_level":"open_access","checksum":"a5eef225014e003efbfbe4830fdd23cb","content_type":"application/pdf","file_id":"14090","file_size":930943,"creator":"dernst"}],"has_accepted_license":"1","publisher":"Schloss Dagstuhl - Leibniz-Zentrum für Informatik","language":[{"iso":"eng"}],"article_processing_charge":"Yes","department":[{"_id":"MoHe"}],"_id":"14086","quality_controlled":"1","citation":{"ista":"Henzinger MH, Liu P, Vondrák J, Zheng DW. 2023. Faster submodular maximization for several classes of matroids. 50th International Colloquium on Automata, Languages, and Programming. ICALP: International Colloquium on Automata, Languages, and Programming, LIPIcs, vol. 261, 74.","short":"M.H. Henzinger, P. Liu, J. Vondrák, D.W. Zheng, in:, 50th International Colloquium on Automata, Languages, and Programming, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023.","ieee":"M. H. Henzinger, P. Liu, J. Vondrák, and D. W. Zheng, “Faster submodular maximization for several classes of matroids,” in <i>50th International Colloquium on Automata, Languages, and Programming</i>, Paderborn, Germany, 2023, vol. 261.","ama":"Henzinger MH, Liu P, Vondrák J, Zheng DW. Faster submodular maximization for several classes of matroids. In: <i>50th International Colloquium on Automata, Languages, and Programming</i>. Vol 261. Schloss Dagstuhl - Leibniz-Zentrum für Informatik; 2023. doi:<a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.74\">10.4230/LIPIcs.ICALP.2023.74</a>","chicago":"Henzinger, Monika H, Paul Liu, Jan Vondrák, and Da Wei Zheng. “Faster Submodular Maximization for Several Classes of Matroids.” In <i>50th International Colloquium on Automata, Languages, and Programming</i>, Vol. 261. Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023. <a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.74\">https://doi.org/10.4230/LIPIcs.ICALP.2023.74</a>.","apa":"Henzinger, M. H., Liu, P., Vondrák, J., &#38; Zheng, D. W. (2023). Faster submodular maximization for several classes of matroids. In <i>50th International Colloquium on Automata, Languages, and Programming</i> (Vol. 261). Paderborn, Germany: Schloss Dagstuhl - Leibniz-Zentrum für Informatik. <a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.74\">https://doi.org/10.4230/LIPIcs.ICALP.2023.74</a>","mla":"Henzinger, Monika H., et al. “Faster Submodular Maximization for Several Classes of Matroids.” <i>50th International Colloquium on Automata, Languages, and Programming</i>, vol. 261, 74, Schloss Dagstuhl - Leibniz-Zentrum für Informatik, 2023, doi:<a href=\"https://doi.org/10.4230/LIPIcs.ICALP.2023.74\">10.4230/LIPIcs.ICALP.2023.74</a>."},"publication_identifier":{"issn":["18688969"],"isbn":["9783959772785"]},"abstract":[{"text":"The maximization of submodular functions have found widespread application in areas such as machine learning, combinatorial optimization, and economics, where practitioners often wish to enforce various constraints; the matroid constraint has been investigated extensively due to its algorithmic properties and expressive power. Though tight approximation algorithms for general matroid constraints exist in theory, the running times of such algorithms typically scale quadratically, and are not practical for truly large scale settings. Recent progress has focused on fast algorithms for important classes of matroids given in explicit form. Currently, nearly-linear time algorithms only exist for graphic and partition matroids [Alina Ene and Huy L. Nguyen, 2019]. In this work, we develop algorithms for monotone submodular maximization constrained by graphic, transversal matroids, or laminar matroids in time near-linear in the size of their representation. Our algorithms achieve an optimal approximation of 1-1/e-ε and both generalize and accelerate the results of Ene and Nguyen [Alina Ene and Huy L. Nguyen, 2019]. In fact, the running time of our algorithm cannot be improved within the fast continuous greedy framework of Badanidiyuru and Vondrák [Ashwinkumar Badanidiyuru and Jan Vondrák, 2014].\r\nTo achieve near-linear running time, we make use of dynamic data structures that maintain bases with approximate maximum cardinality and weight under certain element updates. These data structures need to support a weight decrease operation and a novel Freeze operation that allows the algorithm to freeze elements (i.e. force to be contained) in its basis regardless of future data structure operations. For the laminar matroid, we present a new dynamic data structure using the top tree interface of Alstrup, Holm, de Lichtenberg, and Thorup [Stephen Alstrup et al., 2005] that maintains the maximum weight basis under insertions and deletions of elements in O(log n) time. This data structure needs to support certain subtree query and path update operations that are performed every insertion and deletion that are non-trivial to handle in conjunction. For the transversal matroid the Freeze operation corresponds to requiring the data structure to keep a certain set S of vertices matched, a property that we call S-stability. While there is a large body of work on dynamic matching algorithms, none are S-stable and maintain an approximate maximum weight matching under vertex updates. We give the first such algorithm for bipartite graphs with total running time linear (up to log factors) in the number of edges.","lang":"eng"}],"scopus_import":"1","arxiv":1,"type":"conference","file_date_updated":"2023-08-21T07:04:36Z","project":[{"name":"The design and evaluation of modern fully dynamic data structures","_id":"bd9ca328-d553-11ed-ba76-dc4f890cfe62","grant_number":"101019564","call_identifier":"H2020"},{"name":"Static and Dynamic Hierarchical Graph Decompositions","_id":"bda196b2-d553-11ed-ba76-8e8ee6c21103","grant_number":"I05982"},{"name":"Fast Algorithms for a Reactive Network Layer","_id":"bd9e3a2e-d553-11ed-ba76-8aa684ce17fe","grant_number":"P33775 "}],"publication_status":"published"},{"date_updated":"2024-01-30T12:49:24Z","oa":1,"date_published":"2023-09-01T00:00:00Z","intvolume":"        39","month":"09","acknowledgement":"J. Rø and L. A. acknowledge support from the Research Council of Norway through the Center of Excellence funding scheme, Project No. 262644 (PoreLab). A. D. acknowledges funding from the Novo Nordisk Foundation (grant No. NNF18SA0035142 and NERD grant No. NNF21OC0068687), Villum Fonden Grant no. 29476, and the European Union via the ERC-Starting Grant PhysCoMeT. Views and opinions expressed are however those of the authors only and do not necessarily reflect those of the European Union or the European Research Council. Neither the European Union nor the granting authority can be held responsible for them.","year":"2023","external_id":{"isi":["001035766100001"],"arxiv":["2303.07063"]},"page":"7513-7527","tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"date_created":"2023-08-20T22:01:15Z","ddc":["540"],"status":"public","day":"01","volume":39,"oa_version":"Published Version","isi":1,"publication":"Soft Matter","doi":"10.1039/d3sm00316g","title":"Spontaneous flows and dynamics of full-integer topological defects in polar active matter","author":[{"full_name":"Rønning, Jonas","last_name":"Rønning","first_name":"Jonas"},{"full_name":"Renaud, Julian B","id":"7af6767d-14eb-11ed-b536-a32449ae867c","first_name":"Julian B","last_name":"Renaud"},{"first_name":"Amin","last_name":"Doostmohammadi","full_name":"Doostmohammadi, Amin"},{"last_name":"Angheluta","first_name":"Luiza","full_name":"Angheluta, Luiza"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","file":[{"date_updated":"2024-01-30T12:48:24Z","date_created":"2024-01-30T12:48:24Z","file_name":"2023_SoftMatter_Ronning.pdf","success":1,"relation":"main_file","access_level":"open_access","checksum":"b936747170d0b708172b518078c4081a","file_size":7660662,"creator":"dernst","content_type":"application/pdf","file_id":"14908"}],"has_accepted_license":"1","publisher":"Royal Society of Chemistry","language":[{"iso":"eng"}],"article_type":"original","article_processing_charge":"Yes (in subscription journal)","department":[{"_id":"GradSch"}],"_id":"14087","quality_controlled":"1","citation":{"mla":"Rønning, Jonas, et al. “Spontaneous Flows and Dynamics of Full-Integer Topological Defects in Polar Active Matter.” <i>Soft Matter</i>, vol. 39, Royal Society of Chemistry, 2023, pp. 7513–27, doi:<a href=\"https://doi.org/10.1039/d3sm00316g\">10.1039/d3sm00316g</a>.","apa":"Rønning, J., Renaud, J. B., Doostmohammadi, A., &#38; Angheluta, L. (2023). Spontaneous flows and dynamics of full-integer topological defects in polar active matter. <i>Soft Matter</i>. Royal Society of Chemistry. <a href=\"https://doi.org/10.1039/d3sm00316g\">https://doi.org/10.1039/d3sm00316g</a>","chicago":"Rønning, Jonas, Julian B Renaud, Amin Doostmohammadi, and Luiza Angheluta. “Spontaneous Flows and Dynamics of Full-Integer Topological Defects in Polar Active Matter.” <i>Soft Matter</i>. Royal Society of Chemistry, 2023. <a href=\"https://doi.org/10.1039/d3sm00316g\">https://doi.org/10.1039/d3sm00316g</a>.","ama":"Rønning J, Renaud JB, Doostmohammadi A, Angheluta L. Spontaneous flows and dynamics of full-integer topological defects in polar active matter. <i>Soft Matter</i>. 2023;39:7513-7527. doi:<a href=\"https://doi.org/10.1039/d3sm00316g\">10.1039/d3sm00316g</a>","ieee":"J. Rønning, J. B. Renaud, A. Doostmohammadi, and L. Angheluta, “Spontaneous flows and dynamics of full-integer topological defects in polar active matter,” <i>Soft Matter</i>, vol. 39. Royal Society of Chemistry, pp. 7513–7527, 2023.","ista":"Rønning J, Renaud JB, Doostmohammadi A, Angheluta L. 2023. Spontaneous flows and dynamics of full-integer topological defects in polar active matter. Soft Matter. 39, 7513–7527.","short":"J. Rønning, J.B. Renaud, A. Doostmohammadi, L. Angheluta, Soft Matter 39 (2023) 7513–7527."},"publication_identifier":{"issn":["1744-683X"],"eissn":["1744-6848"]},"scopus_import":"1","arxiv":1,"abstract":[{"lang":"eng","text":"Polar active matter of self-propelled particles sustain spontaneous flows through the full-integer topological defects. We study theoretically the incompressible flow profiles around ±1 defects induced by polar and dipolar active forces. We show that dipolar forces induce vortical flows around the +1 defect, while the flow around the −1 defect has an 8-fold rotational symmetry. The vortical flow changes its chirality near the +1 defect core in the absence of the friction with a substrate. We show analytically that the flow induced by polar active forces is vortical near the +1 defect and is 4-fold symmetric near the −1 defect, while it becomes uniform in the far-field. For a pair of oppositely charged defects, this polar flow contributes to a mutual interaction force that depends only on the orientation of the defect pair relative to the background polarization, and that enhances defect pair annihilation. This is in contradiction with the effect of dipolar active forces which decay inversely proportional with the defect separation distance. As such, our analyses reveals a long-ranged mechanism for the pairwise interaction between topological defects in polar active matter."}],"file_date_updated":"2024-01-30T12:48:24Z","type":"journal_article","publication_status":"published"},{"scopus_import":"1","arxiv":1,"abstract":[{"text":"Despite their recent success, deep neural networks continue to perform poorly when they encounter distribution shifts at test time. Many recently proposed approaches try to counter this by aligning the model to the new distribution prior to inference. With no labels available this requires unsupervised objectives to adapt the model on the observed test data. In this paper, we propose Test-Time SelfTraining (TeST): a technique that takes as input a model trained on some source data and a novel data distribution at test time, and learns invariant and robust representations using a student-teacher framework. We find that models adapted using TeST significantly improve over baseline testtime adaptation algorithms. TeST achieves competitive performance to modern domain adaptation algorithms [4, 43], while having access to 5-10x less data at time of adaption. We thoroughly evaluate a variety of baselines on two tasks:\r\nobject detection and image segmentation and find that models adapted with TeST. We find that TeST sets the new stateof-the art for test-time domain adaptation algorithms. ","lang":"eng"}],"type":"conference","publication_identifier":{"isbn":["9781665493475"],"eissn":["2642-9381"]},"citation":{"mla":"Sinha, Samarth, et al. “TeST: Test-Time Self-Training under Distribution Shift.” <i>2023 IEEE/CVF Winter Conference on Applications of Computer Vision</i>, Institute of Electrical and Electronics Engineers, 2023, doi:<a href=\"https://doi.org/10.1109/wacv56688.2023.00278\">10.1109/wacv56688.2023.00278</a>.","apa":"Sinha, S., Gehler, P., Locatello, F., &#38; Schiele, B. (2023). TeST: Test-time Self-Training under distribution shift. In <i>2023 IEEE/CVF Winter Conference on Applications of Computer Vision</i>. Waikoloa, HI, United States: Institute of Electrical and Electronics Engineers. <a href=\"https://doi.org/10.1109/wacv56688.2023.00278\">https://doi.org/10.1109/wacv56688.2023.00278</a>","chicago":"Sinha, Samarth, Peter Gehler, Francesco Locatello, and Bernt Schiele. “TeST: Test-Time Self-Training under Distribution Shift.” In <i>2023 IEEE/CVF Winter Conference on Applications of Computer Vision</i>. Institute of Electrical and Electronics Engineers, 2023. <a href=\"https://doi.org/10.1109/wacv56688.2023.00278\">https://doi.org/10.1109/wacv56688.2023.00278</a>.","ista":"Sinha S, Gehler P, Locatello F, Schiele B. 2023. TeST: Test-time Self-Training under distribution shift. 2023 IEEE/CVF Winter Conference on Applications of Computer Vision. WACV: Winter Conference on Applications of Computer Vision.","ieee":"S. Sinha, P. Gehler, F. Locatello, and B. Schiele, “TeST: Test-time Self-Training under distribution shift,” in <i>2023 IEEE/CVF Winter Conference on Applications of Computer Vision</i>, Waikoloa, HI, United States, 2023.","short":"S. Sinha, P. Gehler, F. Locatello, B. Schiele, in:, 2023 IEEE/CVF Winter Conference on Applications of Computer Vision, Institute of Electrical and Electronics Engineers, 2023.","ama":"Sinha S, Gehler P, Locatello F, Schiele B. TeST: Test-time Self-Training under distribution shift. In: <i>2023 IEEE/CVF Winter Conference on Applications of Computer Vision</i>. Institute of Electrical and Electronics Engineers; 2023. doi:<a href=\"https://doi.org/10.1109/wacv56688.2023.00278\">10.1109/wacv56688.2023.00278</a>"},"extern":"1","quality_controlled":"1","_id":"14105","publication_status":"published","publisher":"Institute of Electrical and Electronics Engineers","title":"TeST: Test-time Self-Training under distribution shift","author":[{"first_name":"Samarth","last_name":"Sinha","full_name":"Sinha, Samarth"},{"full_name":"Gehler, Peter","last_name":"Gehler","first_name":"Peter"},{"first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","last_name":"Locatello","full_name":"Locatello, Francesco"},{"last_name":"Schiele","first_name":"Bernt","full_name":"Schiele, Bernt"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","department":[{"_id":"FrLo"}],"article_processing_charge":"No","language":[{"iso":"eng"}],"status":"public","day":"06","date_created":"2023-08-21T12:11:38Z","publication":"2023 IEEE/CVF Winter Conference on Applications of Computer Vision","doi":"10.1109/wacv56688.2023.00278","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2209.11459"}],"oa_version":"Preprint","date_published":"2023-02-06T00:00:00Z","month":"02","oa":1,"date_updated":"2023-09-06T10:26:56Z","conference":{"name":"WACV: Winter Conference on Applications of Computer Vision","start_date":"2023-01-02","end_date":"2023-01-07","location":"Waikoloa, HI, United States"},"external_id":{"arxiv":["2209.11459"]},"year":"2023"},{"issue":"3","oa":1,"date_updated":"2023-12-13T12:16:19Z","keyword":["Geometry and Topology","Mathematical Physics"],"acknowledgement":"D.M. and K.M. thank Robert Seiringer for helpful discussions. Open access funding provided by Institute of Science and Technology (IST Austria). Financial support from the Agence Nationale de la Recherche (ANR) through the projects ANR-17-CE40-0016, ANR-17-CE40-0007-01, ANR-17-EURE-0002 (J.L.) and from the European Union’s Horizon 2020 research and innovation programme under the Maria Skłodowska-Curie grant agreement No. 665386 (K.M.) is gratefully acknowledged.","article_number":"17","month":"07","intvolume":"        26","date_published":"2023-07-26T00:00:00Z","year":"2023","external_id":{"arxiv":["2206.14708"],"isi":["001032992600001"]},"status":"public","day":"26","tmp":{"image":"/images/cc_by.png","legal_code_url":"https://creativecommons.org/licenses/by/4.0/legalcode","name":"Creative Commons Attribution 4.0 International Public License (CC-BY 4.0)","short":"CC BY (4.0)"},"date_created":"2023-08-22T14:09:47Z","ddc":["510"],"volume":26,"isi":1,"oa_version":"Published Version","doi":"10.1007/s11040-023-09460-x","publication":"Mathematical Physics, Analysis and Geometry","file":[{"success":1,"file_name":"2023_MathPhysics_Lampart.pdf","date_updated":"2023-08-23T10:59:15Z","date_created":"2023-08-23T10:59:15Z","file_id":"14225","content_type":"application/pdf","creator":"dernst","file_size":317026,"access_level":"open_access","checksum":"f0941cc66cb3ed06a12ca4b7e356cfd6","relation":"main_file"}],"author":[{"full_name":"Lampart, Jonas","last_name":"Lampart","first_name":"Jonas"},{"full_name":"Mitrouskas, David Johannes","first_name":"David Johannes","id":"cbddacee-2b11-11eb-a02e-a2e14d04e52d","last_name":"Mitrouskas"},{"full_name":"Mysliwy, Krzysztof","last_name":"Mysliwy","id":"316457FC-F248-11E8-B48F-1D18A9856A87","first_name":"Krzysztof"}],"title":"On the global minimum of the energy–momentum relation for the polaron","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","publisher":"Springer Nature","has_accepted_license":"1","article_type":"original","language":[{"iso":"eng"}],"department":[{"_id":"RoSe"}],"article_processing_charge":"Yes (via OA deal)","quality_controlled":"1","_id":"14192","file_date_updated":"2023-08-23T10:59:15Z","type":"journal_article","abstract":[{"lang":"eng","text":"For the Fröhlich model of the large polaron, we prove that the ground state energy as a function of the total momentum has a unique global minimum at momentum zero. This implies the non-existence of a ground state of the translation invariant Fröhlich Hamiltonian and thus excludes the possibility of a localization transition at finite coupling."}],"scopus_import":"1","arxiv":1,"citation":{"ieee":"J. Lampart, D. J. Mitrouskas, and K. Mysliwy, “On the global minimum of the energy–momentum relation for the polaron,” <i>Mathematical Physics, Analysis and Geometry</i>, vol. 26, no. 3. Springer Nature, 2023.","ista":"Lampart J, Mitrouskas DJ, Mysliwy K. 2023. On the global minimum of the energy–momentum relation for the polaron. Mathematical Physics, Analysis and Geometry. 26(3), 17.","short":"J. Lampart, D.J. Mitrouskas, K. Mysliwy, Mathematical Physics, Analysis and Geometry 26 (2023).","ama":"Lampart J, Mitrouskas DJ, Mysliwy K. On the global minimum of the energy–momentum relation for the polaron. <i>Mathematical Physics, Analysis and Geometry</i>. 2023;26(3). doi:<a href=\"https://doi.org/10.1007/s11040-023-09460-x\">10.1007/s11040-023-09460-x</a>","chicago":"Lampart, Jonas, David Johannes Mitrouskas, and Krzysztof Mysliwy. “On the Global Minimum of the Energy–Momentum Relation for the Polaron.” <i>Mathematical Physics, Analysis and Geometry</i>. Springer Nature, 2023. <a href=\"https://doi.org/10.1007/s11040-023-09460-x\">https://doi.org/10.1007/s11040-023-09460-x</a>.","apa":"Lampart, J., Mitrouskas, D. J., &#38; Mysliwy, K. (2023). On the global minimum of the energy–momentum relation for the polaron. <i>Mathematical Physics, Analysis and Geometry</i>. Springer Nature. <a href=\"https://doi.org/10.1007/s11040-023-09460-x\">https://doi.org/10.1007/s11040-023-09460-x</a>","mla":"Lampart, Jonas, et al. “On the Global Minimum of the Energy–Momentum Relation for the Polaron.” <i>Mathematical Physics, Analysis and Geometry</i>, vol. 26, no. 3, 17, Springer Nature, 2023, doi:<a href=\"https://doi.org/10.1007/s11040-023-09460-x\">10.1007/s11040-023-09460-x</a>."},"publication_identifier":{"issn":["1385-0172"],"eissn":["1572-9656"]},"publication_status":"published"},{"type":"preprint","arxiv":1,"abstract":[{"lang":"eng","text":"The binding problem in human cognition, concerning how the brain represents and connects objects within a fixed network of neural connections, remains a subject of intense debate. Most machine learning efforts addressing this issue in an unsupervised setting have focused on slot-based methods, which may be limiting due to their discrete nature and difficulty to express uncertainty. Recently, the Complex AutoEncoder was proposed as an alternative that learns continuous and distributed object-centric representations. However, it is only applicable to simple toy data. In this paper, we present Rotating Features, a generalization of complex-valued features to higher dimensions, and a new evaluation procedure for extracting objects from distributed representations. Additionally, we show the applicability of our approach to pre-trained features. Together, these advancements enable us to scale distributed object-centric representations from simple toy to real-world data. We believe this work advances a new paradigm for addressing the binding problem in machine learning and has the potential to inspire further innovation in the field."}],"citation":{"ista":"Löwe S, Lippe P, Locatello F, Welling M. Rotating features for object discovery. arXiv, 2306.00600.","short":"S. Löwe, P. Lippe, F. Locatello, M. Welling, ArXiv (n.d.).","ieee":"S. Löwe, P. Lippe, F. Locatello, and M. Welling, “Rotating features for object discovery,” <i>arXiv</i>. .","ama":"Löwe S, Lippe P, Locatello F, Welling M. Rotating features for object discovery. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2306.00600\">10.48550/arXiv.2306.00600</a>","chicago":"Löwe, Sindy, Phillip Lippe, Francesco Locatello, and Max Welling. “Rotating Features for Object Discovery.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2306.00600\">https://doi.org/10.48550/arXiv.2306.00600</a>.","apa":"Löwe, S., Lippe, P., Locatello, F., &#38; Welling, M. (n.d.). Rotating features for object discovery. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2306.00600\">https://doi.org/10.48550/arXiv.2306.00600</a>","mla":"Löwe, Sindy, et al. “Rotating Features for Object Discovery.” <i>ArXiv</i>, 2306.00600, doi:<a href=\"https://doi.org/10.48550/arXiv.2306.00600\">10.48550/arXiv.2306.00600</a>."},"status":"public","day":"01","date_created":"2023-08-22T14:18:00Z","_id":"14207","main_file_link":[{"url":"https://arxiv.org/abs/2306.00600","open_access":"1"}],"doi":"10.48550/arXiv.2306.00600","publication_status":"submitted","publication":"arXiv","oa_version":"Preprint","article_number":"2306.00600","month":"06","date_published":"2023-06-01T00:00:00Z","title":"Rotating features for object discovery","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"first_name":"Sindy","last_name":"Löwe","full_name":"Löwe, Sindy"},{"first_name":"Phillip","last_name":"Lippe","full_name":"Lippe, Phillip"},{"orcid":"0000-0002-4850-0683","last_name":"Locatello","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","full_name":"Locatello, Francesco"},{"full_name":"Welling, Max","last_name":"Welling","first_name":"Max"}],"oa":1,"date_updated":"2024-02-12T09:53:44Z","department":[{"_id":"FrLo"}],"external_id":{"arxiv":["2306.00600"]},"article_processing_charge":"No","year":"2023","language":[{"iso":"eng"}]},{"publication_status":"published","type":"conference","abstract":[{"lang":"eng","text":"This paper focuses on over-parameterized deep neural networks (DNNs) with ReLU activation functions and proves that when the data distribution is well-separated, DNNs can achieve Bayes-optimal test error for classification while obtaining (nearly) zero-training error under the lazy training regime. For this purpose, we unify three interrelated concepts of overparameterization, benign overfitting, and the Lipschitz constant of DNNs. Our results indicate that interpolating with smoother functions leads to better generalization. Furthermore, we investigate the special case where interpolating smooth ground-truth functions is performed by DNNs under the Neural Tangent Kernel (NTK) regime for generalization. Our result demonstrates that the generalization error converges to a constant order that only depends on label noise and initialization noise, which theoretically verifies benign overfitting. Our analysis provides a tight lower bound on the normalized margin under non-smooth activation functions, as well as the minimum eigenvalue of NTK under high-dimensional settings, which has its own interest in learning theory."}],"arxiv":1,"extern":"1","citation":{"apa":"Zhu, Z., Liu, F., Chrysos, G. G., Locatello, F., &#38; Cevher, V. (2023). Benign overfitting in deep neural networks under lazy training. In <i>Proceedings of the 40th International Conference on Machine Learning</i> (Vol. 202, pp. 43105–43128). Honolulu, Hawaii, United States: ML Research Press.","mla":"Zhu, Zhenyu, et al. “Benign Overfitting in Deep Neural Networks under Lazy Training.” <i>Proceedings of the 40th International Conference on Machine Learning</i>, vol. 202, ML Research Press, 2023, pp. 43105–28.","ama":"Zhu Z, Liu F, Chrysos GG, Locatello F, Cevher V. Benign overfitting in deep neural networks under lazy training. In: <i>Proceedings of the 40th International Conference on Machine Learning</i>. Vol 202. ML Research Press; 2023:43105-43128.","ista":"Zhu Z, Liu F, Chrysos GG, Locatello F, Cevher V. 2023. Benign overfitting in deep neural networks under lazy training. Proceedings of the 40th International Conference on Machine Learning. International Conference on Machine Learning, PMLR, vol. 202, 43105–43128.","ieee":"Z. Zhu, F. Liu, G. G. Chrysos, F. Locatello, and V. Cevher, “Benign overfitting in deep neural networks under lazy training,” in <i>Proceedings of the 40th International Conference on Machine Learning</i>, Honolulu, Hawaii, United States, 2023, vol. 202, pp. 43105–43128.","short":"Z. Zhu, F. Liu, G.G. Chrysos, F. Locatello, V. Cevher, in:, Proceedings of the 40th International Conference on Machine Learning, ML Research Press, 2023, pp. 43105–43128.","chicago":"Zhu, Zhenyu, Fanghui Liu, Grigorios G Chrysos, Francesco Locatello, and Volkan Cevher. “Benign Overfitting in Deep Neural Networks under Lazy Training.” In <i>Proceedings of the 40th International Conference on Machine Learning</i>, 202:43105–28. ML Research Press, 2023."},"quality_controlled":"1","_id":"14208","department":[{"_id":"FrLo"}],"article_processing_charge":"No","language":[{"iso":"eng"}],"publisher":"ML Research Press","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"last_name":"Zhu","first_name":"Zhenyu","full_name":"Zhu, Zhenyu"},{"full_name":"Liu, Fanghui","last_name":"Liu","first_name":"Fanghui"},{"full_name":"Chrysos, Grigorios G","first_name":"Grigorios G","last_name":"Chrysos"},{"full_name":"Locatello, Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4"},{"full_name":"Cevher, Volkan","last_name":"Cevher","first_name":"Volkan"}],"title":"Benign overfitting in deep neural networks under lazy training","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2305.19377","open_access":"1"}],"publication":"Proceedings of the 40th International Conference on Machine Learning","oa_version":"Preprint","volume":202,"status":"public","day":"30","date_created":"2023-08-22T14:18:18Z","conference":{"location":"Honolulu, Hawaii, United States","end_date":"2023-07-29","start_date":"2023-07-23","name":"International Conference on Machine Learning"},"external_id":{"arxiv":["2305.19377"]},"page":"43105-43128","year":"2023","intvolume":"       202","month":"05","date_published":"2023-05-30T00:00:00Z","alternative_title":["PMLR"],"oa":1,"date_updated":"2023-09-13T08:46:46Z"},{"month":"04","date_published":"2023-04-20T00:00:00Z","article_number":"2304.10253","date_updated":"2023-09-13T08:51:56Z","author":[{"last_name":"Burg","first_name":"Max F.","full_name":"Burg, Max F."},{"first_name":"Florian","last_name":"Wenzel","full_name":"Wenzel, Florian"},{"full_name":"Zietlow, Dominik","last_name":"Zietlow","first_name":"Dominik"},{"full_name":"Horn, Max","first_name":"Max","last_name":"Horn"},{"last_name":"Makansi","first_name":"Osama","full_name":"Makansi, Osama"},{"full_name":"Locatello, Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco"},{"last_name":"Russell","first_name":"Chris","full_name":"Russell, Chris"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","title":"A data augmentation perspective on diffusion models and retrieval","oa":1,"article_processing_charge":"No","external_id":{"arxiv":["2304.10253"]},"department":[{"_id":"FrLo"}],"language":[{"iso":"eng"}],"year":"2023","extern":"1","citation":{"apa":"Burg, M. F., Wenzel, F., Zietlow, D., Horn, M., Makansi, O., Locatello, F., &#38; Russell, C. (n.d.). A data augmentation perspective on diffusion models and retrieval. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2304.10253\">https://doi.org/10.48550/arXiv.2304.10253</a>","mla":"Burg, Max F., et al. “A Data Augmentation Perspective on Diffusion Models and Retrieval.” <i>ArXiv</i>, 2304.10253, doi:<a href=\"https://doi.org/10.48550/arXiv.2304.10253\">10.48550/arXiv.2304.10253</a>.","ama":"Burg MF, Wenzel F, Zietlow D, et al. A data augmentation perspective on diffusion models and retrieval. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2304.10253\">10.48550/arXiv.2304.10253</a>","ieee":"M. F. Burg <i>et al.</i>, “A data augmentation perspective on diffusion models and retrieval,” <i>arXiv</i>. .","short":"M.F. Burg, F. Wenzel, D. Zietlow, M. Horn, O. Makansi, F. Locatello, C. Russell, ArXiv (n.d.).","ista":"Burg MF, Wenzel F, Zietlow D, Horn M, Makansi O, Locatello F, Russell C. A data augmentation perspective on diffusion models and retrieval. arXiv, 2304.10253.","chicago":"Burg, Max F., Florian Wenzel, Dominik Zietlow, Max Horn, Osama Makansi, Francesco Locatello, and Chris Russell. “A Data Augmentation Perspective on Diffusion Models and Retrieval.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2304.10253\">https://doi.org/10.48550/arXiv.2304.10253</a>."},"type":"preprint","abstract":[{"text":"Diffusion models excel at generating photorealistic images from text-queries. Naturally, many approaches have been proposed to use these generative abilities to augment training datasets for downstream tasks, such as classification. However, diffusion models are themselves trained on large noisily supervised, but nonetheless, annotated datasets. It is an open question whether the generalization capabilities of diffusion models beyond using the additional data of the pre-training process for augmentation lead to improved downstream performance. We perform a systematic evaluation of existing methods to generate images from diffusion models and study new extensions to assess their benefit for data augmentation. While we find that personalizing diffusion models towards the target data outperforms simpler prompting strategies, we also show that using the training data of the diffusion model alone, via a simple nearest neighbor retrieval procedure, leads to even stronger downstream performance. Overall, our study probes the limitations of diffusion models for data augmentation but also highlights its potential in generating new training data to improve performance on simple downstream vision tasks.","lang":"eng"}],"arxiv":1,"date_created":"2023-08-22T14:18:43Z","_id":"14209","day":"20","status":"public","publication_status":"submitted","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2304.10253","open_access":"1"}],"doi":"10.48550/arXiv.2304.10253","publication":"arXiv","oa_version":"Preprint"},{"department":[{"_id":"FrLo"}],"external_id":{"arxiv":["2304.07939"]},"article_processing_charge":"No","year":"2023","language":[{"iso":"eng"}],"article_number":"2304.07939","date_published":"2023-04-17T00:00:00Z","month":"04","title":"Leveraging sparse and shared feature activations for disentangled representation learning","author":[{"full_name":"Fumero, Marco","last_name":"Fumero","first_name":"Marco"},{"first_name":"Florian","last_name":"Wenzel","full_name":"Wenzel, Florian"},{"full_name":"Zancato, Luca","last_name":"Zancato","first_name":"Luca"},{"full_name":"Achille, Alessandro","first_name":"Alessandro","last_name":"Achille"},{"full_name":"Rodolà, Emanuele","last_name":"Rodolà","first_name":"Emanuele"},{"first_name":"Stefano","last_name":"Soatto","full_name":"Soatto, Stefano"},{"last_name":"Schölkopf","first_name":"Bernhard","full_name":"Schölkopf, Bernhard"},{"first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","last_name":"Locatello","full_name":"Locatello, Francesco"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"date_updated":"2024-02-12T09:55:48Z","publication":"arXiv","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2304.07939","open_access":"1"}],"publication_status":"submitted","doi":"10.48550/arXiv.2304.07939","oa_version":"Preprint","arxiv":1,"abstract":[{"text":"Recovering the latent factors of variation of high dimensional data has so far focused on simple synthetic settings. Mostly building on unsupervised and weakly-supervised objectives, prior work missed out on the positive implications for representation learning on real world data. In this work, we propose to leverage knowledge extracted from a diversified set of supervised tasks to learn a common disentangled representation. Assuming each supervised task only depends on an unknown subset of the factors of variation, we disentangle the feature space of a supervised multi-task model, with features activating sparsely across different tasks and information being shared as appropriate. Importantly, we never directly observe the factors of variations but establish that access to multiple tasks is sufficient for identifiability under sufficiency and minimality assumptions. We validate our approach on six real world distribution shift benchmarks, and different data modalities (images, text), demonstrating how disentangled representations can be transferred to real settings.","lang":"eng"}],"type":"preprint","citation":{"chicago":"Fumero, Marco, Florian Wenzel, Luca Zancato, Alessandro Achille, Emanuele Rodolà, Stefano Soatto, Bernhard Schölkopf, and Francesco Locatello. “Leveraging Sparse and Shared Feature Activations for Disentangled Representation Learning.” <i>ArXiv</i>, n.d. <a href=\"https://doi.org/10.48550/arXiv.2304.07939\">https://doi.org/10.48550/arXiv.2304.07939</a>.","ama":"Fumero M, Wenzel F, Zancato L, et al. Leveraging sparse and shared feature activations for disentangled representation learning. <i>arXiv</i>. doi:<a href=\"https://doi.org/10.48550/arXiv.2304.07939\">10.48550/arXiv.2304.07939</a>","short":"M. Fumero, F. Wenzel, L. Zancato, A. Achille, E. Rodolà, S. Soatto, B. Schölkopf, F. Locatello, ArXiv (n.d.).","ieee":"M. Fumero <i>et al.</i>, “Leveraging sparse and shared feature activations for disentangled representation learning,” <i>arXiv</i>. .","ista":"Fumero M, Wenzel F, Zancato L, Achille A, Rodolà E, Soatto S, Schölkopf B, Locatello F. Leveraging sparse and shared feature activations for disentangled representation learning. arXiv, 2304.07939.","mla":"Fumero, Marco, et al. “Leveraging Sparse and Shared Feature Activations for Disentangled Representation Learning.” <i>ArXiv</i>, 2304.07939, doi:<a href=\"https://doi.org/10.48550/arXiv.2304.07939\">10.48550/arXiv.2304.07939</a>.","apa":"Fumero, M., Wenzel, F., Zancato, L., Achille, A., Rodolà, E., Soatto, S., … Locatello, F. (n.d.). Leveraging sparse and shared feature activations for disentangled representation learning. <i>arXiv</i>. <a href=\"https://doi.org/10.48550/arXiv.2304.07939\">https://doi.org/10.48550/arXiv.2304.07939</a>"},"day":"17","status":"public","date_created":"2023-08-22T14:19:03Z","_id":"14210"},{"oa_version":"Preprint","main_file_link":[{"url":"https://arxiv.org/abs/2304.03265","open_access":"1"}],"publication_status":"published","publication":"2nd Conference on Causal Learning and Reasoning","_id":"14211","date_created":"2023-08-22T14:19:21Z","status":"public","day":"01","quality_controlled":"1","extern":"1","citation":{"apa":"Montagna, F., Noceti, N., Rosasco, L., Zhang, K., &#38; Locatello, F. (2023). Causal discovery with score matching on additive models with arbitrary noise. In <i>2nd Conference on Causal Learning and Reasoning</i>. Tübingen, Germany.","mla":"Montagna, Francesco, et al. “Causal Discovery with Score Matching on Additive Models with Arbitrary Noise.” <i>2nd Conference on Causal Learning and Reasoning</i>, 2023.","ista":"Montagna F, Noceti N, Rosasco L, Zhang K, Locatello F. 2023. Causal discovery with score matching on additive models with arbitrary noise. 2nd Conference on Causal Learning and Reasoning. CLeaR: Conference on Causal Learning and Reasoning.","short":"F. Montagna, N. Noceti, L. Rosasco, K. Zhang, F. Locatello, in:, 2nd Conference on Causal Learning and Reasoning, 2023.","ieee":"F. Montagna, N. Noceti, L. Rosasco, K. Zhang, and F. Locatello, “Causal discovery with score matching on additive models with arbitrary noise,” in <i>2nd Conference on Causal Learning and Reasoning</i>, Tübingen, Germany, 2023.","ama":"Montagna F, Noceti N, Rosasco L, Zhang K, Locatello F. Causal discovery with score matching on additive models with arbitrary noise. In: <i>2nd Conference on Causal Learning and Reasoning</i>. ; 2023.","chicago":"Montagna, Francesco, Nicoletta Noceti, Lorenzo Rosasco, Kun Zhang, and Francesco Locatello. “Causal Discovery with Score Matching on Additive Models with Arbitrary Noise.” In <i>2nd Conference on Causal Learning and Reasoning</i>, 2023."},"type":"conference","arxiv":1,"abstract":[{"lang":"eng","text":"Causal discovery methods are intrinsically constrained by the set of assumptions needed to ensure structure identifiability. Moreover additional restrictions are often imposed in order to simplify the inference task: this is the case for the Gaussian noise assumption on additive non-linear models, which is common to many causal discovery approaches. In this paper we show the shortcomings of inference under this hypothesis, analyzing the risk of edge inversion under violation of Gaussianity of the noise terms. Then, we propose a novel method for inferring the topological ordering of the variables in the causal graph, from data generated according to an additive non-linear model with a generic noise distribution. This leads to NoGAM (Not only Gaussian Additive noise Models), a causal discovery algorithm with a minimal set of assumptions and state of the art performance, experimentally benchmarked on synthetic data."}],"scopus_import":"1","language":[{"iso":"eng"}],"year":"2023","article_processing_charge":"No","external_id":{"arxiv":["2304.03265"]},"conference":{"location":"Tübingen, Germany","start_date":"2023-04-11","end_date":"2023-04-14","name":"CLeaR: Conference on Causal Learning and Reasoning"},"department":[{"_id":"FrLo"}],"date_updated":"2023-09-13T09:00:31Z","oa":1,"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"full_name":"Montagna, Francesco","last_name":"Montagna","first_name":"Francesco"},{"last_name":"Noceti","first_name":"Nicoletta","full_name":"Noceti, Nicoletta"},{"full_name":"Rosasco, Lorenzo","last_name":"Rosasco","first_name":"Lorenzo"},{"full_name":"Zhang, Kun","first_name":"Kun","last_name":"Zhang"},{"full_name":"Locatello, Francesco","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","last_name":"Locatello"}],"title":"Causal discovery with score matching on additive models with arbitrary noise","month":"04","date_published":"2023-04-01T00:00:00Z"},{"date_updated":"2023-09-13T09:03:24Z","oa":1,"author":[{"full_name":"Montagna, Francesco","last_name":"Montagna","first_name":"Francesco"},{"last_name":"Noceti","first_name":"Nicoletta","full_name":"Noceti, Nicoletta"},{"last_name":"Rosasco","first_name":"Lorenzo","full_name":"Rosasco, Lorenzo"},{"full_name":"Zhang, Kun","first_name":"Kun","last_name":"Zhang"},{"full_name":"Locatello, Francesco","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","orcid":"0000-0002-4850-0683","last_name":"Locatello"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","title":"Scalable causal discovery with score matching","month":"04","date_published":"2023-04-01T00:00:00Z","language":[{"iso":"eng"}],"year":"2023","external_id":{"arxiv":["2304.03382"]},"article_processing_charge":"No","department":[{"_id":"FrLo"}],"conference":{"location":"Tübingen, Germany","name":"CLeaR: Conference on Causal Learning and Reasoning","end_date":"2023-04-14","start_date":"2023-04-11"},"date_created":"2023-08-22T14:19:40Z","_id":"14212","status":"public","quality_controlled":"1","day":"01","extern":"1","citation":{"short":"F. Montagna, N. Noceti, L. Rosasco, K. Zhang, F. Locatello, in:, 2nd Conference on Causal Learning and Reasoning, 2023.","ieee":"F. Montagna, N. Noceti, L. Rosasco, K. Zhang, and F. Locatello, “Scalable causal discovery with score matching,” in <i>2nd Conference on Causal Learning and Reasoning</i>, Tübingen, Germany, 2023.","ista":"Montagna F, Noceti N, Rosasco L, Zhang K, Locatello F. 2023. Scalable causal discovery with score matching. 2nd Conference on Causal Learning and Reasoning. CLeaR: Conference on Causal Learning and Reasoning.","ama":"Montagna F, Noceti N, Rosasco L, Zhang K, Locatello F. Scalable causal discovery with score matching. In: <i>2nd Conference on Causal Learning and Reasoning</i>. ; 2023.","chicago":"Montagna, Francesco, Nicoletta Noceti, Lorenzo Rosasco, Kun Zhang, and Francesco Locatello. “Scalable Causal Discovery with Score Matching.” In <i>2nd Conference on Causal Learning and Reasoning</i>, 2023.","apa":"Montagna, F., Noceti, N., Rosasco, L., Zhang, K., &#38; Locatello, F. (2023). Scalable causal discovery with score matching. In <i>2nd Conference on Causal Learning and Reasoning</i>. Tübingen, Germany.","mla":"Montagna, Francesco, et al. “Scalable Causal Discovery with Score Matching.” <i>2nd Conference on Causal Learning and Reasoning</i>, 2023."},"type":"conference","abstract":[{"lang":"eng","text":"This paper demonstrates how to discover the whole causal graph from the second derivative of the log-likelihood in observational non-linear additive Gaussian noise models. Leveraging scalable machine learning approaches to approximate the score function ∇logp(X), we extend the work of Rolland et al. (2022) that only recovers the topological order from the score and requires an expensive pruning step removing spurious edges among those admitted by the ordering. Our analysis leads to DAS (acronym for Discovery At Scale), a practical algorithm that reduces the complexity of the pruning by a factor proportional to the graph size. In practice, DAS achieves competitive accuracy with current state-of-the-art while being over an order of magnitude faster. Overall, our approach enables principled and scalable causal discovery, significantly lowering the compute bar."}],"scopus_import":"1","arxiv":1,"oa_version":"Preprint","publication_status":"published","main_file_link":[{"url":"https://arxiv.org/abs/2304.03382","open_access":"1"}],"publication":"2nd Conference on Causal Learning and Reasoning"},{"month":"04","date_published":"2023-04-12T00:00:00Z","oa":1,"author":[{"full_name":"Liu, Yuejiang","first_name":"Yuejiang","last_name":"Liu"},{"first_name":"Alexandre","last_name":"Alahi","full_name":"Alahi, Alexandre"},{"full_name":"Russell, Chris","last_name":"Russell","first_name":"Chris"},{"last_name":"Horn","first_name":"Max","full_name":"Horn, Max"},{"full_name":"Zietlow, Dominik","first_name":"Dominik","last_name":"Zietlow"},{"full_name":"Schölkopf, Bernhard","last_name":"Schölkopf","first_name":"Bernhard"},{"orcid":"0000-0002-4850-0683","last_name":"Locatello","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","full_name":"Locatello, Francesco"}],"title":"Causal triplet: An open challenge for intervention-centric causal representation learning","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-09-13T09:23:08Z","department":[{"_id":"FrLo"}],"conference":{"start_date":"2023-04-11","end_date":"2023-04-14","name":"CLeaR: Conference on Causal Learning and Reasoning","location":"Tübingen, Germany"},"external_id":{"arxiv":["2301.05169"]},"article_processing_charge":"No","year":"2023","language":[{"iso":"eng"}],"type":"conference","arxiv":1,"abstract":[{"text":"Recent years have seen a surge of interest in learning high-level causal representations from low-level image pairs under interventions. Yet, existing efforts are largely limited to simple synthetic settings that are far away from real-world problems. In this paper, we present Causal Triplet, a causal representation learning benchmark featuring not only visually more complex scenes, but also two crucial desiderata commonly overlooked in previous works: (i) an actionable counterfactual setting, where only certain object-level variables allow for counterfactual observations whereas others do not; (ii) an interventional downstream task with an emphasis on out-of-distribution robustness from the independent causal mechanisms principle. Through extensive experiments, we find that models built with the knowledge of disentangled or object-centric representations significantly outperform their distributed counterparts. However, recent causal representation learning methods still struggle to identify such latent structures, indicating substantial challenges and opportunities for future work.","lang":"eng"}],"citation":{"chicago":"Liu, Yuejiang, Alexandre Alahi, Chris Russell, Max Horn, Dominik Zietlow, Bernhard Schölkopf, and Francesco Locatello. “Causal Triplet: An Open Challenge for Intervention-Centric Causal Representation Learning.” In <i>2nd Conference on Causal Learning and Reasoning</i>, 2023.","ista":"Liu Y, Alahi A, Russell C, Horn M, Zietlow D, Schölkopf B, Locatello F. 2023. Causal triplet: An open challenge for intervention-centric causal representation learning. 2nd Conference on Causal Learning and Reasoning. CLeaR: Conference on Causal Learning and Reasoning.","ieee":"Y. Liu <i>et al.</i>, “Causal triplet: An open challenge for intervention-centric causal representation learning,” in <i>2nd Conference on Causal Learning and Reasoning</i>, Tübingen, Germany, 2023.","short":"Y. Liu, A. Alahi, C. Russell, M. Horn, D. Zietlow, B. Schölkopf, F. Locatello, in:, 2nd Conference on Causal Learning and Reasoning, 2023.","ama":"Liu Y, Alahi A, Russell C, et al. Causal triplet: An open challenge for intervention-centric causal representation learning. In: <i>2nd Conference on Causal Learning and Reasoning</i>. ; 2023.","mla":"Liu, Yuejiang, et al. “Causal Triplet: An Open Challenge for Intervention-Centric Causal Representation Learning.” <i>2nd Conference on Causal Learning and Reasoning</i>, 2023.","apa":"Liu, Y., Alahi, A., Russell, C., Horn, M., Zietlow, D., Schölkopf, B., &#38; Locatello, F. (2023). Causal triplet: An open challenge for intervention-centric causal representation learning. In <i>2nd Conference on Causal Learning and Reasoning</i>. Tübingen, Germany."},"extern":"1","day":"12","status":"public","quality_controlled":"1","_id":"14214","date_created":"2023-08-22T14:20:18Z","publication_status":"published","main_file_link":[{"url":"https://doi.org/10.48550/arXiv.2301.05169","open_access":"1"}],"publication":"2nd Conference on Causal Learning and Reasoning","oa_version":"Preprint"},{"extern":"1","citation":{"mla":"Moschella, Luca, et al. “Relative Representations Enable Zero-Shot Latent Space Communication.” <i>The 11th International Conference on Learning Representations</i>, 2023.","apa":"Moschella, L., Maiorca, V., Fumero, M., Norelli, A., Locatello, F., &#38; Rodolà, E. (2023). Relative representations enable zero-shot latent space communication. In <i>The 11th International Conference on Learning Representations</i>. Kigali, Rwanda.","chicago":"Moschella, Luca, Valentino Maiorca, Marco Fumero, Antonio Norelli, Francesco Locatello, and Emanuele Rodolà. “Relative Representations Enable Zero-Shot Latent Space Communication.” In <i>The 11th International Conference on Learning Representations</i>, 2023.","short":"L. Moschella, V. Maiorca, M. Fumero, A. Norelli, F. Locatello, E. Rodolà, in:, The 11th International Conference on Learning Representations, 2023.","ieee":"L. Moschella, V. Maiorca, M. Fumero, A. Norelli, F. Locatello, and E. Rodolà, “Relative representations enable zero-shot latent space communication,” in <i>The 11th International Conference on Learning Representations</i>, Kigali, Rwanda, 2023.","ista":"Moschella L, Maiorca V, Fumero M, Norelli A, Locatello F, Rodolà E. 2023. Relative representations enable zero-shot latent space communication. The 11th International Conference on Learning Representations. International Conference on Machine Learning Representations.","ama":"Moschella L, Maiorca V, Fumero M, Norelli A, Locatello F, Rodolà E. Relative representations enable zero-shot latent space communication. In: <i>The 11th International Conference on Learning Representations</i>. ; 2023."},"type":"conference","abstract":[{"lang":"eng","text":"Neural networks embed the geometric structure of a data manifold lying in a high-dimensional space into latent representations. Ideally, the distribution of the data points in the latent space should depend only on the task, the data, the loss, and other architecture-specific constraints. However, factors such as the random weights initialization, training hyperparameters, or other sources of randomness in the training phase may induce incoherent latent spaces that hinder any form of reuse. Nevertheless, we empirically observe that, under the same data and modeling choices, the angles between the encodings within distinct latent spaces do not change. In this work, we propose the latent similarity between each sample and a fixed set of anchors as an alternative data representation, demonstrating that it can enforce the desired invariances without any additional training. We show how neural architectures can leverage these relative representations to guarantee, in practice, invariance to latent isometries and rescalings, effectively enabling latent space communication: from zero-shot model stitching to latent space comparison between diverse settings. We extensively validate the generalization capability of our approach on different datasets, spanning various modalities (images, text, graphs), tasks (e.g., classification, reconstruction) and architectures (e.g., CNNs, GCNs, transformers)."}],"arxiv":1,"date_created":"2023-08-22T14:22:20Z","_id":"14217","status":"public","day":"01","quality_controlled":"1","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2209.15430"}],"publication_status":"published","publication":"The 11th International Conference on Learning Representations","oa_version":"Preprint","month":"05","date_published":"2023-05-01T00:00:00Z","date_updated":"2023-09-13T09:44:26Z","author":[{"full_name":"Moschella, Luca","first_name":"Luca","last_name":"Moschella"},{"full_name":"Maiorca, Valentino","first_name":"Valentino","last_name":"Maiorca"},{"full_name":"Fumero, Marco","first_name":"Marco","last_name":"Fumero"},{"full_name":"Norelli, Antonio","last_name":"Norelli","first_name":"Antonio"},{"full_name":"Locatello, Francesco","first_name":"Francesco","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","last_name":"Locatello","orcid":"0000-0002-4850-0683"},{"full_name":"Rodolà, Emanuele","first_name":"Emanuele","last_name":"Rodolà"}],"title":"Relative representations enable zero-shot latent space communication","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","oa":1,"article_processing_charge":"No","external_id":{"arxiv":["2209.15430"]},"department":[{"_id":"FrLo"}],"conference":{"location":"Kigali, Rwanda","name":"International Conference on Machine Learning Representations","end_date":"2023-05-05","start_date":"2023-05-01"},"language":[{"iso":"eng"}],"year":"2023"},{"title":"Bridging the gap to real-world object-centric learning","oa":1,"author":[{"full_name":"Seitzer, Maximilian","last_name":"Seitzer","first_name":"Maximilian"},{"full_name":"Horn, Max","first_name":"Max","last_name":"Horn"},{"first_name":"Andrii","last_name":"Zadaianchuk","full_name":"Zadaianchuk, Andrii"},{"full_name":"Zietlow, Dominik","last_name":"Zietlow","first_name":"Dominik"},{"full_name":"Xiao, Tianjun","first_name":"Tianjun","last_name":"Xiao"},{"first_name":"Carl-Johann Simon-Gabriel","last_name":"Carl-Johann Simon-Gabriel","full_name":"Carl-Johann Simon-Gabriel, Carl-Johann Simon-Gabriel"},{"first_name":"Tong","last_name":"He","full_name":"He, Tong"},{"full_name":"Zhang, Zheng","first_name":"Zheng","last_name":"Zhang"},{"last_name":"Schölkopf","first_name":"Bernhard","full_name":"Schölkopf, Bernhard"},{"full_name":"Brox, Thomas","last_name":"Brox","first_name":"Thomas"},{"id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683","full_name":"Locatello, Francesco"}],"user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","date_updated":"2023-09-13T11:37:03Z","month":"05","date_published":"2023-05-10T00:00:00Z","year":"2023","language":[{"iso":"eng"}],"department":[{"_id":"FrLo"}],"conference":{"location":"Kigali, Rwanda","name":"ICLR: International Conference on Learning Representations","start_date":"2023-05-01","end_date":"2023-05-05"},"article_processing_charge":"No","external_id":{"arxiv":["2209.14860"]},"status":"public","quality_controlled":"1","day":"10","_id":"14218","date_created":"2023-08-22T14:22:41Z","type":"conference","abstract":[{"text":"Humans naturally decompose their environment into entities at the appropriate level of abstraction to act in the world. Allowing machine learning algorithms to derive this decomposition in an unsupervised way has become an important line of research. However, current methods are restricted to simulated data or require additional information in the form of motion or depth in order to successfully discover objects. In this work, we overcome this limitation by showing that reconstructing features from models trained in a self-supervised manner is a sufficient training signal for object-centric representations to arise in a fully unsupervised way. Our approach, DINOSAUR, significantly out-performs existing image-based object-centric learning models on simulated data and is the first unsupervised object-centric model that scales to real-world datasets such as COCO and PASCAL VOC. DINOSAUR is conceptually simple and shows competitive performance compared to more involved pipelines from the computer vision literature.","lang":"eng"}],"arxiv":1,"citation":{"chicago":"Seitzer, Maximilian, Max Horn, Andrii Zadaianchuk, Dominik Zietlow, Tianjun Xiao, Carl-Johann Simon-Gabriel Carl-Johann Simon-Gabriel, Tong He, et al. “Bridging the Gap to Real-World Object-Centric Learning.” In <i>The 11th International Conference on Learning Representations</i>, 2023.","ieee":"M. Seitzer <i>et al.</i>, “Bridging the gap to real-world object-centric learning,” in <i>The 11th International Conference on Learning Representations</i>, Kigali, Rwanda, 2023.","ista":"Seitzer M, Horn M, Zadaianchuk A, Zietlow D, Xiao T, Carl-Johann Simon-Gabriel C-JS-G, He T, Zhang Z, Schölkopf B, Brox T, Locatello F. 2023. Bridging the gap to real-world object-centric learning. The 11th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","short":"M. Seitzer, M. Horn, A. Zadaianchuk, D. Zietlow, T. Xiao, C.-J.S.-G. Carl-Johann Simon-Gabriel, T. He, Z. Zhang, B. Schölkopf, T. Brox, F. Locatello, in:, The 11th International Conference on Learning Representations, 2023.","ama":"Seitzer M, Horn M, Zadaianchuk A, et al. Bridging the gap to real-world object-centric learning. In: <i>The 11th International Conference on Learning Representations</i>. ; 2023.","mla":"Seitzer, Maximilian, et al. “Bridging the Gap to Real-World Object-Centric Learning.” <i>The 11th International Conference on Learning Representations</i>, 2023.","apa":"Seitzer, M., Horn, M., Zadaianchuk, A., Zietlow, D., Xiao, T., Carl-Johann Simon-Gabriel, C.-J. S.-G., … Locatello, F. (2023). Bridging the gap to real-world object-centric learning. In <i>The 11th International Conference on Learning Representations</i>. Kigali, Rwanda."},"extern":"1","oa_version":"Preprint","main_file_link":[{"url":"https://arxiv.org/abs/2209.14860","open_access":"1"}],"publication_status":"published","publication":"The 11th International Conference on Learning Representations"},{"publication":"The 11th International Conference on Learning Representations","publication_status":"published","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2207.05027"}],"oa_version":"Preprint","extern":"1","citation":{"apa":"Zadaianchuk, A., Kleindessner, M., Zhu, Y., Locatello, F., &#38; Brox, T. (2023). Unsupervised semantic segmentation with self-supervised object-centric representations. In <i>The 11th International Conference on Learning Representations</i>. Kigali, Rwanda.","mla":"Zadaianchuk, Andrii, et al. “Unsupervised Semantic Segmentation with Self-Supervised Object-Centric Representations.” <i>The 11th International Conference on Learning Representations</i>, 2023.","ama":"Zadaianchuk A, Kleindessner M, Zhu Y, Locatello F, Brox T. Unsupervised semantic segmentation with self-supervised object-centric representations. In: <i>The 11th International Conference on Learning Representations</i>. ; 2023.","short":"A. Zadaianchuk, M. Kleindessner, Y. Zhu, F. Locatello, T. Brox, in:, The 11th International Conference on Learning Representations, 2023.","ieee":"A. Zadaianchuk, M. Kleindessner, Y. Zhu, F. Locatello, and T. Brox, “Unsupervised semantic segmentation with self-supervised object-centric representations,” in <i>The 11th International Conference on Learning Representations</i>, Kigali, Rwanda, 2023.","ista":"Zadaianchuk A, Kleindessner M, Zhu Y, Locatello F, Brox T. 2023. Unsupervised semantic segmentation with self-supervised object-centric representations. The 11th International Conference on Learning Representations. ICLR: International Conference on Learning Representations.","chicago":"Zadaianchuk, Andrii, Matthaeus Kleindessner, Yi Zhu, Francesco Locatello, and Thomas Brox. “Unsupervised Semantic Segmentation with Self-Supervised Object-Centric Representations.” In <i>The 11th International Conference on Learning Representations</i>, 2023."},"arxiv":1,"abstract":[{"lang":"eng","text":"In this paper, we show that recent advances in self-supervised feature\r\nlearning enable unsupervised object discovery and semantic segmentation with a\r\nperformance that matches the state of the field on supervised semantic\r\nsegmentation 10 years ago. We propose a methodology based on unsupervised\r\nsaliency masks and self-supervised feature clustering to kickstart object\r\ndiscovery followed by training a semantic segmentation network on pseudo-labels\r\nto bootstrap the system on images with multiple objects. We present results on\r\nPASCAL VOC that go far beyond the current state of the art (50.0 mIoU), and we\r\nreport for the first time results on MS COCO for the whole set of 81 classes:\r\nour method discovers 34 categories with more than $20\\%$ IoU, while obtaining\r\nan average IoU of 19.6 for all 81 categories."}],"type":"conference","_id":"14219","date_created":"2023-08-22T14:22:58Z","status":"public","day":"01","quality_controlled":"1","article_processing_charge":"No","external_id":{"arxiv":["2207.05027"]},"department":[{"_id":"FrLo"}],"conference":{"location":"Kigali, Rwanda","name":"ICLR: International Conference on Learning Representations","start_date":"2023-05-01","end_date":"2023-05-05"},"language":[{"iso":"eng"}],"year":"2023","date_published":"2023-05-01T00:00:00Z","month":"05","date_updated":"2023-09-13T11:25:43Z","oa":1,"title":"Unsupervised semantic segmentation with self-supervised object-centric representations","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","author":[{"first_name":"Andrii","last_name":"Zadaianchuk","full_name":"Zadaianchuk, Andrii"},{"last_name":"Kleindessner","first_name":"Matthaeus","full_name":"Kleindessner, Matthaeus"},{"full_name":"Zhu, Yi","first_name":"Yi","last_name":"Zhu"},{"full_name":"Locatello, Francesco","orcid":"0000-0002-4850-0683","last_name":"Locatello","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco"},{"first_name":"Thomas","last_name":"Brox","full_name":"Brox, Thomas"}]},{"citation":{"chicago":"Tangemann, Matthias, Steffen Schneider, Julius von Kügelgen, Francesco Locatello, Peter Gehler, Thomas Brox, Matthias Kümmerer, Matthias Bethge, and Bernhard Schölkopf. “Unsupervised Object Learning via Common Fate.” In <i>2nd Conference on Causal Learning and Reasoning</i>, 2023.","ama":"Tangemann M, Schneider S, Kügelgen J von, et al. Unsupervised object learning via common fate. In: <i>2nd Conference on Causal Learning and Reasoning</i>. ; 2023.","ieee":"M. Tangemann <i>et al.</i>, “Unsupervised object learning via common fate,” in <i>2nd Conference on Causal Learning and Reasoning</i>, Tübingen, Germany, 2023.","short":"M. Tangemann, S. Schneider, J. von Kügelgen, F. Locatello, P. Gehler, T. Brox, M. Kümmerer, M. Bethge, B. Schölkopf, in:, 2nd Conference on Causal Learning and Reasoning, 2023.","ista":"Tangemann M, Schneider S, Kügelgen J von, Locatello F, Gehler P, Brox T, Kümmerer M, Bethge M, Schölkopf B. 2023. Unsupervised object learning via common fate. 2nd Conference on Causal Learning and Reasoning. CLeaR: Conference on Causal Learning and Reasoning, 2110.06562.","mla":"Tangemann, Matthias, et al. “Unsupervised Object Learning via Common Fate.” <i>2nd Conference on Causal Learning and Reasoning</i>, 2110.06562, 2023.","apa":"Tangemann, M., Schneider, S., Kügelgen, J. von, Locatello, F., Gehler, P., Brox, T., … Schölkopf, B. (2023). Unsupervised object learning via common fate. In <i>2nd Conference on Causal Learning and Reasoning</i>. Tübingen, Germany."},"extern":"1","type":"conference","arxiv":1,"abstract":[{"text":"Learning generative object models from unlabelled videos is a long standing problem and required for causal scene modeling. We decompose this problem into three easier subtasks, and provide candidate solutions for each of them. Inspired by the Common Fate Principle of Gestalt Psychology, we first extract (noisy) masks of moving objects via unsupervised motion segmentation. Second, generative models are trained on the masks of the background and the moving objects, respectively. Third, background and foreground models are combined in a conditional \"dead leaves\" scene model to sample novel scene configurations where occlusions and depth layering arise naturally. To evaluate the individual stages, we introduce the Fishbowl dataset positioned between complex real-world scenes and common object-centric benchmarks of simplistic objects. We show that our approach allows learning generative models that generalize beyond the occlusions present in the input videos, and represent scenes in a modular fashion that allows sampling plausible scenes outside the training distribution by permitting, for instance, object numbers or densities not observed in the training set.","lang":"eng"}],"_id":"14222","date_created":"2023-08-22T14:23:54Z","day":"15","status":"public","quality_controlled":"1","main_file_link":[{"open_access":"1","url":"https://arxiv.org/abs/2110.06562"}],"publication_status":"published","publication":"2nd Conference on Causal Learning and Reasoning","oa_version":"Preprint","month":"04","date_published":"2023-04-15T00:00:00Z","article_number":"2110.06562","date_updated":"2023-09-13T11:31:14Z","oa":1,"author":[{"full_name":"Tangemann, Matthias","last_name":"Tangemann","first_name":"Matthias"},{"last_name":"Schneider","first_name":"Steffen","full_name":"Schneider, Steffen"},{"last_name":"Kügelgen","first_name":"Julius von","full_name":"Kügelgen, Julius von"},{"full_name":"Locatello, Francesco","last_name":"Locatello","orcid":"0000-0002-4850-0683","id":"26cfd52f-2483-11ee-8040-88983bcc06d4","first_name":"Francesco"},{"full_name":"Gehler, Peter","first_name":"Peter","last_name":"Gehler"},{"full_name":"Brox, Thomas","last_name":"Brox","first_name":"Thomas"},{"full_name":"Kümmerer, Matthias","last_name":"Kümmerer","first_name":"Matthias"},{"first_name":"Matthias","last_name":"Bethge","full_name":"Bethge, Matthias"},{"last_name":"Schölkopf","first_name":"Bernhard","full_name":"Schölkopf, Bernhard"}],"title":"Unsupervised object learning via common fate","user_id":"2DF688A6-F248-11E8-B48F-1D18A9856A87","article_processing_charge":"No","external_id":{"arxiv":["2110.06562"]},"department":[{"_id":"FrLo"}],"conference":{"name":"CLeaR: Conference on Causal Learning and Reasoning","start_date":"2023-04-11","end_date":"2023-04-14","location":"Tübingen, Germany"},"language":[{"iso":"eng"}],"year":"2023"},{"file":[{"file_name":"documents-export-2023-08-24.zip","date_created":"2023-08-24T13:02:49Z","date_updated":"2024-02-26T23:30:03Z","content_type":"application/x-zip-compressed","file_id":"14227","file_size":15501411,"creator":"cchlebak","embargo_to":"open_access","relation":"source_file","access_level":"closed","checksum":"453caf851d75c3478c10ed09bd242a91"},{"date_created":"2023-08-24T13:03:42Z","date_updated":"2024-02-26T23:30:03Z","file_name":"thesis_pdf_a.pdf","embargo":"2024-02-25","checksum":"7349d29963d6695e555e171748648d9a","relation":"main_file","access_level":"open_access","creator":"cchlebak","file_size":6854783,"content_type":"application/pdf","file_id":"14228"}],"author":[{"full_name":"Stephenson, Elizabeth R","last_name":"Stephenson","orcid":"0000-0002-6862-208X","first_name":"Elizabeth R","id":"2D04F932-F248-11E8-B48F-1D18A9856A87"}],"title":"Generalizing medial axes with homology switches","user_id":"8b945eb4-e2f2-11eb-945a-df72226e66a9","publisher":"Institute of Science and Technology Austria","has_accepted_license":"1","language":[{"iso":"eng"}],"department":[{"_id":"GradSch"},{"_id":"HeEd"}],"article_processing_charge":"No","_id":"14226","type":"dissertation","file_date_updated":"2024-02-26T23:30:03Z","abstract":[{"lang":"eng","text":"We introduce the notion of a Faustian interchange in a 1-parameter family of smooth\r\nfunctions to generalize the medial axis to critical points of index larger than 0.\r\nWe construct and implement a general purpose algorithm for approximating such\r\ngeneralized medial axes."}],"citation":{"chicago":"Stephenson, Elizabeth R. “Generalizing Medial Axes with Homology Switches.” Institute of Science and Technology Austria, 2023. <a href=\"https://doi.org/10.15479/at:ista:14226\">https://doi.org/10.15479/at:ista:14226</a>.","ama":"Stephenson ER. Generalizing medial axes with homology switches. 2023. doi:<a href=\"https://doi.org/10.15479/at:ista:14226\">10.15479/at:ista:14226</a>","ieee":"E. R. Stephenson, “Generalizing medial axes with homology switches,” Institute of Science and Technology Austria, 2023.","short":"E.R. Stephenson, Generalizing Medial Axes with Homology Switches, Institute of Science and Technology Austria, 2023.","ista":"Stephenson ER. 2023. Generalizing medial axes with homology switches. Institute of Science and Technology Austria.","mla":"Stephenson, Elizabeth R. <i>Generalizing Medial Axes with Homology Switches</i>. Institute of Science and Technology Austria, 2023, doi:<a href=\"https://doi.org/10.15479/at:ista:14226\">10.15479/at:ista:14226</a>.","apa":"Stephenson, E. R. (2023). <i>Generalizing medial axes with homology switches</i>. Institute of Science and Technology Austria. <a href=\"https://doi.org/10.15479/at:ista:14226\">https://doi.org/10.15479/at:ista:14226</a>"},"publication_identifier":{"issn":["2791-4585"]},"publication_status":"published","oa":1,"date_updated":"2024-02-26T23:30:04Z","month":"08","alternative_title":["ISTA Master's Thesis"],"date_published":"2023-08-24T00:00:00Z","year":"2023","page":"43","supervisor":[{"full_name":"Edelsbrunner, Herbert","orcid":"0000-0002-9823-6833","last_name":"Edelsbrunner","first_name":"Herbert","id":"3FB178DA-F248-11E8-B48F-1D18A9856A87"}],"day":"24","status":"public","ddc":["500"],"date_created":"2023-08-24T13:01:18Z","oa_version":"Published Version","doi":"10.15479/at:ista:14226","degree_awarded":"MS"}]
