@article{14082,
  abstract     = {Epithelial barrier function is commonly analyzed using transepithelial electrical resistance, which measures ion flux across a monolayer, or by adding traceable macromolecules and monitoring their passage across the monolayer. Although these methods measure changes in global barrier function, they lack the sensitivity needed to detect local or transient barrier breaches, and they do not reveal the location of barrier leaks. Therefore, we previously developed a method that we named the zinc-based ultrasensitive microscopic barrier assay (ZnUMBA), which overcomes these limitations, allowing for detection of local tight junction leaks with high spatiotemporal resolution. Here, we present expanded applications for ZnUMBA. ZnUMBA can be used in Xenopus embryos to measure the dynamics of barrier restoration and actin accumulation following laser injury. ZnUMBA can also be effectively utilized in developing zebrafish embryos as well as cultured monolayers of Madin–Darby canine kidney (MDCK) II epithelial cells. ZnUMBA is a powerful and flexible method that, with minimal optimization, can be applied to multiple systems to measure dynamic changes in barrier function with spatiotemporal precision.},
  author       = {Higashi, Tomohito and Stephenson, Rachel E. and Schwayer, Cornelia and Huljev, Karla and Higashi, Atsuko Y. and Heisenberg, Carl-Philipp J and Chiba, Hideki and Miller, Ann L.},
  issn         = {1477-9137},
  journal      = {Journal of Cell Science},
  number       = {15},
  publisher    = {The Company of Biologists},
  title        = {{ZnUMBA - a live imaging method to detect local barrier breaches}},
  doi          = {10.1242/jcs.260668},
  volume       = {136},
  year         = {2023},
}

@inproceedings{14083,
  abstract     = {In this work we consider the list-decodability and list-recoverability of arbitrary q-ary codes, for all integer values of q ≥ 2. A code is called (p,L)_q-list-decodable if every radius pn Hamming ball contains less than L codewords; (p,𝓁,L)_q-list-recoverability is a generalization where we place radius pn Hamming balls on every point of a combinatorial rectangle with side length 𝓁 and again stipulate that there be less than L codewords.
Our main contribution is to precisely calculate the maximum value of p for which there exist infinite families of positive rate (p,𝓁,L)_q-list-recoverable codes, the quantity we call the zero-rate threshold. Denoting this value by p_*, we in fact show that codes correcting a p_*+ε fraction of errors must have size O_ε(1), i.e., independent of n. Such a result is typically referred to as a "Plotkin bound." To complement this, a standard random code with expurgation construction shows that there exist positive rate codes correcting a p_*-ε fraction of errors. We also follow a classical proof template (typically attributed to Elias and Bassalygo) to derive from the zero-rate threshold other tradeoffs between rate and decoding radius for list-decoding and list-recovery.
Technically, proving the Plotkin bound boils down to demonstrating the Schur convexity of a certain function defined on the q-simplex as well as the convexity of a univariate function derived from it. We remark that an earlier argument claimed similar results for q-ary list-decoding; however, we point out that this earlier proof is flawed.},
  author       = {Resch, Nicolas and Yuan, Chen and Zhang, Yihan},
  booktitle    = {50th International Colloquium on Automata, Languages, and Programming},
  isbn         = {9783959772785},
  issn         = {1868-8969},
  location     = {Paderborn, Germany},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Zero-rate thresholds and new capacity bounds for list-decoding and list-recovery}},
  doi          = {10.4230/LIPIcs.ICALP.2023.99},
  volume       = {261},
  year         = {2023},
}

@inproceedings{14084,
  abstract     = {A central problem in computational statistics is to convert a procedure for sampling combinatorial objects into a procedure for counting those objects, and vice versa. We will consider sampling problems which come from Gibbs distributions, which are families of probability distributions over a discrete space Ω with probability mass function of the form μ^Ω_β(ω) ∝ e^{β H(ω)} for β in an interval [β_min, β_max] and H(ω) ∈ {0} ∪ [1, n].
The partition function is the normalization factor Z(β) = ∑_{ω ∈ Ω} e^{β H(ω)}, and the log partition ratio is defined as q = (log Z(β_max))/Z(β_min)
We develop a number of algorithms to estimate the counts c_x using roughly Õ(q/ε²) samples for general Gibbs distributions and Õ(n²/ε²) samples for integer-valued distributions (ignoring some second-order terms and parameters), We show this is optimal up to logarithmic factors. We illustrate with improved algorithms for counting connected subgraphs and perfect matchings in a graph.},
  author       = {Harris, David G. and Kolmogorov, Vladimir},
  booktitle    = {50th International Colloquium on Automata, Languages, and Programming},
  isbn         = {9783959772785},
  issn         = {1868-8969},
  location     = {Paderborn, Germany},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Parameter estimation for Gibbs distributions}},
  doi          = {10.4230/LIPIcs.ICALP.2023.72},
  volume       = {261},
  year         = {2023},
}

@inproceedings{14085,
  abstract     = {We show an (1+ϵ)-approximation algorithm for maintaining maximum s-t flow under m edge insertions in m1/2+o(1)ϵ−1/2 amortized update time for directed, unweighted graphs. This constitutes the first sublinear dynamic maximum flow algorithm in general sparse graphs with arbitrarily good approximation guarantee.},
  author       = {Goranci, Gramoz and Henzinger, Monika H},
  booktitle    = {50th International Colloquium on Automata, Languages, and Programming},
  isbn         = {9783959772785},
  issn         = {1868-8969},
  location     = {Paderborn, Germany},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Efficient data structures for incremental exact and approximate maximum flow}},
  doi          = {10.4230/LIPIcs.ICALP.2023.69},
  volume       = {261},
  year         = {2023},
}

@inproceedings{14086,
  abstract     = {The maximization of submodular functions have found widespread application in areas such as machine learning, combinatorial optimization, and economics, where practitioners often wish to enforce various constraints; the matroid constraint has been investigated extensively due to its algorithmic properties and expressive power. Though tight approximation algorithms for general matroid constraints exist in theory, the running times of such algorithms typically scale quadratically, and are not practical for truly large scale settings. Recent progress has focused on fast algorithms for important classes of matroids given in explicit form. Currently, nearly-linear time algorithms only exist for graphic and partition matroids [Alina Ene and Huy L. Nguyen, 2019]. In this work, we develop algorithms for monotone submodular maximization constrained by graphic, transversal matroids, or laminar matroids in time near-linear in the size of their representation. Our algorithms achieve an optimal approximation of 1-1/e-ε and both generalize and accelerate the results of Ene and Nguyen [Alina Ene and Huy L. Nguyen, 2019]. In fact, the running time of our algorithm cannot be improved within the fast continuous greedy framework of Badanidiyuru and Vondrák [Ashwinkumar Badanidiyuru and Jan Vondrák, 2014].
To achieve near-linear running time, we make use of dynamic data structures that maintain bases with approximate maximum cardinality and weight under certain element updates. These data structures need to support a weight decrease operation and a novel Freeze operation that allows the algorithm to freeze elements (i.e. force to be contained) in its basis regardless of future data structure operations. For the laminar matroid, we present a new dynamic data structure using the top tree interface of Alstrup, Holm, de Lichtenberg, and Thorup [Stephen Alstrup et al., 2005] that maintains the maximum weight basis under insertions and deletions of elements in O(log n) time. This data structure needs to support certain subtree query and path update operations that are performed every insertion and deletion that are non-trivial to handle in conjunction. For the transversal matroid the Freeze operation corresponds to requiring the data structure to keep a certain set S of vertices matched, a property that we call S-stability. While there is a large body of work on dynamic matching algorithms, none are S-stable and maintain an approximate maximum weight matching under vertex updates. We give the first such algorithm for bipartite graphs with total running time linear (up to log factors) in the number of edges.},
  author       = {Henzinger, Monika H and Liu, Paul and Vondrák, Jan and Zheng, Da Wei},
  booktitle    = {50th International Colloquium on Automata, Languages, and Programming},
  isbn         = {9783959772785},
  issn         = {18688969},
  location     = {Paderborn, Germany},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{Faster submodular maximization for several classes of matroids}},
  doi          = {10.4230/LIPIcs.ICALP.2023.74},
  volume       = {261},
  year         = {2023},
}

@article{14087,
  abstract     = {Polar active matter of self-propelled particles sustain spontaneous flows through the full-integer topological defects. We study theoretically the incompressible flow profiles around ±1 defects induced by polar and dipolar active forces. We show that dipolar forces induce vortical flows around the +1 defect, while the flow around the −1 defect has an 8-fold rotational symmetry. The vortical flow changes its chirality near the +1 defect core in the absence of the friction with a substrate. We show analytically that the flow induced by polar active forces is vortical near the +1 defect and is 4-fold symmetric near the −1 defect, while it becomes uniform in the far-field. For a pair of oppositely charged defects, this polar flow contributes to a mutual interaction force that depends only on the orientation of the defect pair relative to the background polarization, and that enhances defect pair annihilation. This is in contradiction with the effect of dipolar active forces which decay inversely proportional with the defect separation distance. As such, our analyses reveals a long-ranged mechanism for the pairwise interaction between topological defects in polar active matter.},
  author       = {Rønning, Jonas and Renaud, Julian B and Doostmohammadi, Amin and Angheluta, Luiza},
  issn         = {1744-6848},
  journal      = {Soft Matter},
  pages        = {7513--7527},
  publisher    = {Royal Society of Chemistry},
  title        = {{Spontaneous flows and dynamics of full-integer topological defects in polar active matter}},
  doi          = {10.1039/d3sm00316g},
  volume       = {39},
  year         = {2023},
}

@inproceedings{14105,
  abstract     = {Despite their recent success, deep neural networks continue to perform poorly when they encounter distribution shifts at test time. Many recently proposed approaches try to counter this by aligning the model to the new distribution prior to inference. With no labels available this requires unsupervised objectives to adapt the model on the observed test data. In this paper, we propose Test-Time SelfTraining (TeST): a technique that takes as input a model trained on some source data and a novel data distribution at test time, and learns invariant and robust representations using a student-teacher framework. We find that models adapted using TeST significantly improve over baseline testtime adaptation algorithms. TeST achieves competitive performance to modern domain adaptation algorithms [4, 43], while having access to 5-10x less data at time of adaption. We thoroughly evaluate a variety of baselines on two tasks:
object detection and image segmentation and find that models adapted with TeST. We find that TeST sets the new stateof-the art for test-time domain adaptation algorithms. },
  author       = {Sinha, Samarth and Gehler, Peter and Locatello, Francesco and Schiele, Bernt},
  booktitle    = {2023 IEEE/CVF Winter Conference on Applications of Computer Vision},
  isbn         = {9781665493475},
  issn         = {2642-9381},
  location     = {Waikoloa, HI, United States},
  publisher    = {Institute of Electrical and Electronics Engineers},
  title        = {{TeST: Test-time Self-Training under distribution shift}},
  doi          = {10.1109/wacv56688.2023.00278},
  year         = {2023},
}

@article{14192,
  abstract     = {For the Fröhlich model of the large polaron, we prove that the ground state energy as a function of the total momentum has a unique global minimum at momentum zero. This implies the non-existence of a ground state of the translation invariant Fröhlich Hamiltonian and thus excludes the possibility of a localization transition at finite coupling.},
  author       = {Lampart, Jonas and Mitrouskas, David Johannes and Mysliwy, Krzysztof},
  issn         = {1572-9656},
  journal      = {Mathematical Physics, Analysis and Geometry},
  keywords     = {Geometry and Topology, Mathematical Physics},
  number       = {3},
  publisher    = {Springer Nature},
  title        = {{On the global minimum of the energy–momentum relation for the polaron}},
  doi          = {10.1007/s11040-023-09460-x},
  volume       = {26},
  year         = {2023},
}

@unpublished{14207,
  abstract     = {The binding problem in human cognition, concerning how the brain represents and connects objects within a fixed network of neural connections, remains a subject of intense debate. Most machine learning efforts addressing this issue in an unsupervised setting have focused on slot-based methods, which may be limiting due to their discrete nature and difficulty to express uncertainty. Recently, the Complex AutoEncoder was proposed as an alternative that learns continuous and distributed object-centric representations. However, it is only applicable to simple toy data. In this paper, we present Rotating Features, a generalization of complex-valued features to higher dimensions, and a new evaluation procedure for extracting objects from distributed representations. Additionally, we show the applicability of our approach to pre-trained features. Together, these advancements enable us to scale distributed object-centric representations from simple toy to real-world data. We believe this work advances a new paradigm for addressing the binding problem in machine learning and has the potential to inspire further innovation in the field.},
  author       = {Löwe, Sindy and Lippe, Phillip and Locatello, Francesco and Welling, Max},
  booktitle    = {arXiv},
  title        = {{Rotating features for object discovery}},
  doi          = {10.48550/arXiv.2306.00600},
  year         = {2023},
}

@inproceedings{14208,
  abstract     = {This paper focuses on over-parameterized deep neural networks (DNNs) with ReLU activation functions and proves that when the data distribution is well-separated, DNNs can achieve Bayes-optimal test error for classification while obtaining (nearly) zero-training error under the lazy training regime. For this purpose, we unify three interrelated concepts of overparameterization, benign overfitting, and the Lipschitz constant of DNNs. Our results indicate that interpolating with smoother functions leads to better generalization. Furthermore, we investigate the special case where interpolating smooth ground-truth functions is performed by DNNs under the Neural Tangent Kernel (NTK) regime for generalization. Our result demonstrates that the generalization error converges to a constant order that only depends on label noise and initialization noise, which theoretically verifies benign overfitting. Our analysis provides a tight lower bound on the normalized margin under non-smooth activation functions, as well as the minimum eigenvalue of NTK under high-dimensional settings, which has its own interest in learning theory.},
  author       = {Zhu, Zhenyu and Liu, Fanghui and Chrysos, Grigorios G and Locatello, Francesco and Cevher, Volkan},
  booktitle    = {Proceedings of the 40th International Conference on Machine Learning},
  location     = {Honolulu, Hawaii, United States},
  pages        = {43105--43128},
  publisher    = {ML Research Press},
  title        = {{Benign overfitting in deep neural networks under lazy training}},
  volume       = {202},
  year         = {2023},
}

@unpublished{14209,
  abstract     = {Diffusion models excel at generating photorealistic images from text-queries. Naturally, many approaches have been proposed to use these generative abilities to augment training datasets for downstream tasks, such as classification. However, diffusion models are themselves trained on large noisily supervised, but nonetheless, annotated datasets. It is an open question whether the generalization capabilities of diffusion models beyond using the additional data of the pre-training process for augmentation lead to improved downstream performance. We perform a systematic evaluation of existing methods to generate images from diffusion models and study new extensions to assess their benefit for data augmentation. While we find that personalizing diffusion models towards the target data outperforms simpler prompting strategies, we also show that using the training data of the diffusion model alone, via a simple nearest neighbor retrieval procedure, leads to even stronger downstream performance. Overall, our study probes the limitations of diffusion models for data augmentation but also highlights its potential in generating new training data to improve performance on simple downstream vision tasks.},
  author       = {Burg, Max F. and Wenzel, Florian and Zietlow, Dominik and Horn, Max and Makansi, Osama and Locatello, Francesco and Russell, Chris},
  booktitle    = {arXiv},
  title        = {{A data augmentation perspective on diffusion models and retrieval}},
  doi          = {10.48550/arXiv.2304.10253},
  year         = {2023},
}

@unpublished{14210,
  abstract     = {Recovering the latent factors of variation of high dimensional data has so far focused on simple synthetic settings. Mostly building on unsupervised and weakly-supervised objectives, prior work missed out on the positive implications for representation learning on real world data. In this work, we propose to leverage knowledge extracted from a diversified set of supervised tasks to learn a common disentangled representation. Assuming each supervised task only depends on an unknown subset of the factors of variation, we disentangle the feature space of a supervised multi-task model, with features activating sparsely across different tasks and information being shared as appropriate. Importantly, we never directly observe the factors of variations but establish that access to multiple tasks is sufficient for identifiability under sufficiency and minimality assumptions. We validate our approach on six real world distribution shift benchmarks, and different data modalities (images, text), demonstrating how disentangled representations can be transferred to real settings.},
  author       = {Fumero, Marco and Wenzel, Florian and Zancato, Luca and Achille, Alessandro and Rodolà, Emanuele and Soatto, Stefano and Schölkopf, Bernhard and Locatello, Francesco},
  booktitle    = {arXiv},
  title        = {{Leveraging sparse and shared feature activations for disentangled representation learning}},
  doi          = {10.48550/arXiv.2304.07939},
  year         = {2023},
}

@inproceedings{14211,
  abstract     = {Causal discovery methods are intrinsically constrained by the set of assumptions needed to ensure structure identifiability. Moreover additional restrictions are often imposed in order to simplify the inference task: this is the case for the Gaussian noise assumption on additive non-linear models, which is common to many causal discovery approaches. In this paper we show the shortcomings of inference under this hypothesis, analyzing the risk of edge inversion under violation of Gaussianity of the noise terms. Then, we propose a novel method for inferring the topological ordering of the variables in the causal graph, from data generated according to an additive non-linear model with a generic noise distribution. This leads to NoGAM (Not only Gaussian Additive noise Models), a causal discovery algorithm with a minimal set of assumptions and state of the art performance, experimentally benchmarked on synthetic data.},
  author       = {Montagna, Francesco and Noceti, Nicoletta and Rosasco, Lorenzo and Zhang, Kun and Locatello, Francesco},
  booktitle    = {2nd Conference on Causal Learning and Reasoning},
  location     = {Tübingen, Germany},
  title        = {{Causal discovery with score matching on additive models with arbitrary noise}},
  year         = {2023},
}

@inproceedings{14212,
  abstract     = {This paper demonstrates how to discover the whole causal graph from the second derivative of the log-likelihood in observational non-linear additive Gaussian noise models. Leveraging scalable machine learning approaches to approximate the score function ∇logp(X), we extend the work of Rolland et al. (2022) that only recovers the topological order from the score and requires an expensive pruning step removing spurious edges among those admitted by the ordering. Our analysis leads to DAS (acronym for Discovery At Scale), a practical algorithm that reduces the complexity of the pruning by a factor proportional to the graph size. In practice, DAS achieves competitive accuracy with current state-of-the-art while being over an order of magnitude faster. Overall, our approach enables principled and scalable causal discovery, significantly lowering the compute bar.},
  author       = {Montagna, Francesco and Noceti, Nicoletta and Rosasco, Lorenzo and Zhang, Kun and Locatello, Francesco},
  booktitle    = {2nd Conference on Causal Learning and Reasoning},
  location     = {Tübingen, Germany},
  title        = {{Scalable causal discovery with score matching}},
  year         = {2023},
}

@inproceedings{14214,
  abstract     = {Recent years have seen a surge of interest in learning high-level causal representations from low-level image pairs under interventions. Yet, existing efforts are largely limited to simple synthetic settings that are far away from real-world problems. In this paper, we present Causal Triplet, a causal representation learning benchmark featuring not only visually more complex scenes, but also two crucial desiderata commonly overlooked in previous works: (i) an actionable counterfactual setting, where only certain object-level variables allow for counterfactual observations whereas others do not; (ii) an interventional downstream task with an emphasis on out-of-distribution robustness from the independent causal mechanisms principle. Through extensive experiments, we find that models built with the knowledge of disentangled or object-centric representations significantly outperform their distributed counterparts. However, recent causal representation learning methods still struggle to identify such latent structures, indicating substantial challenges and opportunities for future work.},
  author       = {Liu, Yuejiang and Alahi, Alexandre and Russell, Chris and Horn, Max and Zietlow, Dominik and Schölkopf, Bernhard and Locatello, Francesco},
  booktitle    = {2nd Conference on Causal Learning and Reasoning},
  location     = {Tübingen, Germany},
  title        = {{Causal triplet: An open challenge for intervention-centric causal representation learning}},
  year         = {2023},
}

@inproceedings{14217,
  abstract     = {Neural networks embed the geometric structure of a data manifold lying in a high-dimensional space into latent representations. Ideally, the distribution of the data points in the latent space should depend only on the task, the data, the loss, and other architecture-specific constraints. However, factors such as the random weights initialization, training hyperparameters, or other sources of randomness in the training phase may induce incoherent latent spaces that hinder any form of reuse. Nevertheless, we empirically observe that, under the same data and modeling choices, the angles between the encodings within distinct latent spaces do not change. In this work, we propose the latent similarity between each sample and a fixed set of anchors as an alternative data representation, demonstrating that it can enforce the desired invariances without any additional training. We show how neural architectures can leverage these relative representations to guarantee, in practice, invariance to latent isometries and rescalings, effectively enabling latent space communication: from zero-shot model stitching to latent space comparison between diverse settings. We extensively validate the generalization capability of our approach on different datasets, spanning various modalities (images, text, graphs), tasks (e.g., classification, reconstruction) and architectures (e.g., CNNs, GCNs, transformers).},
  author       = {Moschella, Luca and Maiorca, Valentino and Fumero, Marco and Norelli, Antonio and Locatello, Francesco and Rodolà, Emanuele},
  booktitle    = {The 11th International Conference on Learning Representations},
  location     = {Kigali, Rwanda},
  title        = {{Relative representations enable zero-shot latent space communication}},
  year         = {2023},
}

@inproceedings{14218,
  abstract     = {Humans naturally decompose their environment into entities at the appropriate level of abstraction to act in the world. Allowing machine learning algorithms to derive this decomposition in an unsupervised way has become an important line of research. However, current methods are restricted to simulated data or require additional information in the form of motion or depth in order to successfully discover objects. In this work, we overcome this limitation by showing that reconstructing features from models trained in a self-supervised manner is a sufficient training signal for object-centric representations to arise in a fully unsupervised way. Our approach, DINOSAUR, significantly out-performs existing image-based object-centric learning models on simulated data and is the first unsupervised object-centric model that scales to real-world datasets such as COCO and PASCAL VOC. DINOSAUR is conceptually simple and shows competitive performance compared to more involved pipelines from the computer vision literature.},
  author       = {Seitzer, Maximilian and Horn, Max and Zadaianchuk, Andrii and Zietlow, Dominik and Xiao, Tianjun and Carl-Johann Simon-Gabriel, Carl-Johann Simon-Gabriel and He, Tong and Zhang, Zheng and Schölkopf, Bernhard and Brox, Thomas and Locatello, Francesco},
  booktitle    = {The 11th International Conference on Learning Representations},
  location     = {Kigali, Rwanda},
  title        = {{Bridging the gap to real-world object-centric learning}},
  year         = {2023},
}

@inproceedings{14219,
  abstract     = {In this paper, we show that recent advances in self-supervised feature
learning enable unsupervised object discovery and semantic segmentation with a
performance that matches the state of the field on supervised semantic
segmentation 10 years ago. We propose a methodology based on unsupervised
saliency masks and self-supervised feature clustering to kickstart object
discovery followed by training a semantic segmentation network on pseudo-labels
to bootstrap the system on images with multiple objects. We present results on
PASCAL VOC that go far beyond the current state of the art (50.0 mIoU), and we
report for the first time results on MS COCO for the whole set of 81 classes:
our method discovers 34 categories with more than $20\%$ IoU, while obtaining
an average IoU of 19.6 for all 81 categories.},
  author       = {Zadaianchuk, Andrii and Kleindessner, Matthaeus and Zhu, Yi and Locatello, Francesco and Brox, Thomas},
  booktitle    = {The 11th International Conference on Learning Representations},
  location     = {Kigali, Rwanda},
  title        = {{Unsupervised semantic segmentation with self-supervised object-centric representations}},
  year         = {2023},
}

@inproceedings{14222,
  abstract     = {Learning generative object models from unlabelled videos is a long standing problem and required for causal scene modeling. We decompose this problem into three easier subtasks, and provide candidate solutions for each of them. Inspired by the Common Fate Principle of Gestalt Psychology, we first extract (noisy) masks of moving objects via unsupervised motion segmentation. Second, generative models are trained on the masks of the background and the moving objects, respectively. Third, background and foreground models are combined in a conditional "dead leaves" scene model to sample novel scene configurations where occlusions and depth layering arise naturally. To evaluate the individual stages, we introduce the Fishbowl dataset positioned between complex real-world scenes and common object-centric benchmarks of simplistic objects. We show that our approach allows learning generative models that generalize beyond the occlusions present in the input videos, and represent scenes in a modular fashion that allows sampling plausible scenes outside the training distribution by permitting, for instance, object numbers or densities not observed in the training set.},
  author       = {Tangemann, Matthias and Schneider, Steffen and Kügelgen, Julius von and Locatello, Francesco and Gehler, Peter and Brox, Thomas and Kümmerer, Matthias and Bethge, Matthias and Schölkopf, Bernhard},
  booktitle    = {2nd Conference on Causal Learning and Reasoning},
  location     = {Tübingen, Germany},
  title        = {{Unsupervised object learning via common fate}},
  year         = {2023},
}

@phdthesis{14226,
  abstract     = {We introduce the notion of a Faustian interchange in a 1-parameter family of smooth
functions to generalize the medial axis to critical points of index larger than 0.
We construct and implement a general purpose algorithm for approximating such
generalized medial axes.},
  author       = {Stephenson, Elizabeth R},
  issn         = {2791-4585},
  pages        = {43},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Generalizing medial axes with homology switches}},
  doi          = {10.15479/at:ista:14226},
  year         = {2023},
}

