@article{11354,
  abstract     = {We construct a recurrent diffusion process with values in the space of probability measures over an arbitrary closed Riemannian manifold of dimension d≥2. The process is associated with the Dirichlet form defined by integration of the Wasserstein gradient w.r.t. the Dirichlet–Ferguson measure, and is the counterpart on multidimensional base spaces to the modified massive Arratia flow over the unit interval described in V. Konarovskyi and M.-K. von Renesse (Comm. Pure Appl. Math. 72 (2019) 764–800). Together with two different constructions of the process, we discuss its ergodicity, invariant sets, finite-dimensional approximations, and Varadhan short-time asymptotics.},
  author       = {Dello Schiavo, Lorenzo},
  issn         = {2168-894X},
  journal      = {Annals of Probability},
  number       = {2},
  pages        = {591--648},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{The Dirichlet–Ferguson diffusion on the space of probability measures over a closed Riemannian manifold}},
  doi          = {10.1214/21-AOP1541},
  volume       = {50},
  year         = {2022},
}

@inproceedings{11355,
  abstract     = {Contract-based design is a promising methodology for taming the complexity of developing sophisticated systems. A formal contract distinguishes between assumptions, which are constraints that the designer of a component puts on the environments in which the component can be used safely, and guarantees, which are promises that the designer asks from the team that implements the component. A theory of formal contracts can be formalized as an interface theory, which supports the composition and refinement of both assumptions and guarantees.
Although there is a rich landscape of contract-based design methods that address functional and extra-functional properties, we present the first interface theory that is designed for ensuring system-wide security properties. Our framework provides a refinement relation and a composition operation that support both incremental design and independent implementability. We develop our theory for both stateless and stateful interfaces. We illustrate the applicability of our framework with an example inspired from the automotive domain.},
  author       = {Bartocci, Ezio and Ferrere, Thomas and Henzinger, Thomas A and Nickovic, Dejan and Da Costa, Ana Oliveira},
  booktitle    = {Fundamental Approaches to Software Engineering},
  isbn         = {9783030994280},
  issn         = {1611-3349},
  location     = {Munich, Germany},
  pages        = {3--22},
  publisher    = {Springer Nature},
  title        = {{Information-flow interfaces}},
  doi          = {10.1007/978-3-030-99429-7_1},
  volume       = {13241},
  year         = {2022},
}

@article{11356,
  author       = {Chang, Cheng and Qin, Bingchao and Su, Lizhong and Zhao, Li Dong},
  issn         = {2095-9281},
  journal      = {Science Bulletin},
  number       = {11},
  pages        = {1105--1107},
  publisher    = {Elsevier},
  title        = {{Distinct electron and hole transports in SnSe crystals}},
  doi          = {10.1016/j.scib.2022.04.007},
  volume       = {67},
  year         = {2022},
}

@phdthesis{11362,
  abstract     = {Deep learning has enabled breakthroughs in challenging computing problems and has emerged as the standard problem-solving tool for computer vision and natural language processing tasks.
One exception to this trend is safety-critical tasks where robustness and resilience requirements contradict the black-box nature of neural networks. 
To deploy deep learning methods for these tasks, it is vital to provide guarantees on neural network agents' safety and robustness criteria. 
This can be achieved by developing formal verification methods to verify the safety and robustness properties of neural networks.

Our goal is to design, develop and assess safety verification methods for neural networks to improve their reliability and trustworthiness in real-world applications.
This thesis establishes techniques for the verification of compressed and adversarially trained models as well as the design of novel neural networks for verifiably safe decision-making.

First, we establish the problem of verifying quantized neural networks. Quantization is a technique that trades numerical precision for the computational efficiency of running a neural network and is widely adopted in industry.
We show that neglecting the reduced precision when verifying a neural network can lead to wrong conclusions about the robustness and safety of the network, highlighting that novel techniques for quantized network verification are necessary. We introduce several bit-exact verification methods explicitly designed for quantized neural networks and experimentally confirm on realistic networks that the network's robustness and other formal properties are affected by the quantization.

Furthermore, we perform a case study providing evidence that adversarial training, a standard technique for making neural networks more robust, has detrimental effects on the network's performance. This robustness-accuracy tradeoff has been studied before regarding the accuracy obtained on classification datasets where each data point is independent of all other data points. On the other hand, we investigate the tradeoff empirically in robot learning settings where a both, a high accuracy and a high robustness, are desirable.
Our results suggest that the negative side-effects of adversarial training outweigh its robustness benefits in practice.

Finally, we consider the problem of verifying safety when running a Bayesian neural network policy in a feedback loop with systems over the infinite time horizon. Bayesian neural networks are probabilistic models for learning uncertainties in the data and are therefore often used on robotic and healthcare applications where data is inherently stochastic.
We introduce a method for recalibrating Bayesian neural networks so that they yield probability distributions over safe decisions only.
Our method learns a safety certificate that guarantees safety over the infinite time horizon to determine which decisions are safe in every possible state of the system.
We demonstrate the effectiveness of our approach on a series of reinforcement learning benchmarks.},
  author       = {Lechner, Mathias},
  isbn         = {978-3-99078-017-6},
  keywords     = {neural networks, verification, machine learning},
  pages        = {124},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Learning verifiable representations}},
  doi          = {10.15479/at:ista:11362},
  year         = {2022},
}

@unpublished{11366,
  abstract     = {Adversarial training (i.e., training on adversarially perturbed input data) is a well-studied method for making neural networks robust to potential adversarial attacks during inference. However, the improved robustness does not
come for free but rather is accompanied by a decrease in overall model accuracy and performance. Recent work has shown that, in practical robot learning applications, the effects of adversarial training do not pose a fair trade-off
but inflict a net loss when measured in holistic robot performance. This work revisits the robustness-accuracy trade-off in robot learning by systematically analyzing if recent advances in robust training methods and theory in
conjunction with adversarial robot learning can make adversarial training suitable for real-world robot applications. We evaluate a wide variety of robot learning tasks ranging from autonomous driving in a high-fidelity environment
amenable to sim-to-real deployment, to mobile robot gesture recognition. Our results demonstrate that, while these techniques make incremental improvements on the trade-off on a relative scale, the negative side-effects caused by
adversarial training still outweigh the improvements by an order of magnitude. We conclude that more substantial advances in robust learning methods are necessary before they can benefit robot learning tasks in practice.},
  author       = {Lechner, Mathias and Amini, Alexander and Rus, Daniela and Henzinger, Thomas A},
  booktitle    = {arXiv},
  title        = {{Revisiting the adversarial robustness-accuracy tradeoff in robot learning}},
  doi          = {10.48550/arXiv.2204.07373},
  year         = {2022},
}

@article{11373,
  abstract     = {The actin-homologue FtsA is essential for E. coli cell division, as it links FtsZ filaments in the Z-ring to transmembrane proteins. FtsA is thought to initiate cell constriction by switching from an inactive polymeric to an active monomeric conformation, which recruits downstream proteins and stabilizes the Z-ring. However, direct biochemical evidence for this mechanism is missing. Here, we use reconstitution experiments and quantitative fluorescence microscopy to study divisome activation in vitro. By comparing wild-type FtsA with FtsA R286W, we find that this hyperactive mutant outperforms FtsA WT in replicating FtsZ treadmilling dynamics, FtsZ filament stabilization and recruitment of FtsN. We could attribute these differences to a faster exchange and denser packing of FtsA R286W below FtsZ filaments. Using FRET microscopy, we also find that FtsN binding promotes FtsA self-interaction. We propose that in the active divisome FtsA and FtsN exist as a dynamic copolymer that follows treadmilling filaments of FtsZ.},
  author       = {Radler, Philipp and Baranova, Natalia S. and Dos Santos Caldas, Paulo R and Sommer, Christoph M and Lopez Pelegrin, Maria D and Michalik, David and Loose, Martin},
  issn         = {2041-1723},
  journal      = {Nature Communications},
  keywords     = {General Physics and Astronomy, General Biochemistry, Genetics and Molecular Biology, General Chemistry},
  publisher    = {Springer Nature},
  title        = {{In vitro reconstitution of Escherichia coli divisome activation}},
  doi          = {10.1038/s41467-022-30301-y},
  volume       = {13},
  year         = {2022},
}

@article{11379,
  abstract     = {Bernal-stacked multilayer graphene is a versatile platform to explore quantum transport phenomena and interaction physics due to its exceptional tunability via electrostatic gating. For instance, upon applying a perpendicular electric field, its band structure exhibits several off-center Dirac points (so-called Dirac gullies) in each valley. Here, the formation of Dirac gullies and the interaction-induced breakdown of gully coherence is explored via magnetotransport measurements in high-quality Bernal-stacked (ABA) trilayer graphene. At zero magnetic field, multiple Lifshitz transitions indicating the formation of Dirac gullies are identified. In the quantum Hall regime, the emergence of Dirac gullies is evident as an increase in Landau level degeneracy. When tuning both electric and magnetic fields, electron–electron interactions can be controllably enhanced until, beyond critical electric and magnetic fields, the gully degeneracy is eventually lifted. The arising correlated ground state is consistent with a previously predicted nematic phase that spontaneously breaks the rotational gully symmetry.},
  author       = {Winterer, Felix and Seiler, Anna M. and Ghazaryan, Areg and Geisenhof, Fabian R. and Watanabe, Kenji and Taniguchi, Takashi and Serbyn, Maksym and Weitz, R. Thomas},
  issn         = {15306992},
  journal      = {Nano Letters},
  number       = {8},
  pages        = {3317--3322},
  publisher    = {American Chemical Society},
  title        = {{Spontaneous gully-polarized quantum hall states in ABA trilayer graphene}},
  doi          = {10.1021/acs.nanolett.2c00435},
  volume       = {22},
  year         = {2022},
}

@phdthesis{11388,
  abstract     = {In evolve and resequence experiments, a population is sequenced, subjected to selection and
then sequenced again, so that genetic changes before and after selection can be observed at
the genetic level. Here, I use these studies to better understand the genetic basis of complex
traits - traits which depend on more than a few genes.
In the first chapter, I discuss the first evolve and resequence experiment, in which a population
of mice, the so-called "Longshanks" mice, were selected for tibia length while their body mass
was kept constant. The full pedigree is known. We observed a selection response on all
chromosomes and used the infinitesimal model with linkage, a model which assumes an infinite
number of genes with infinitesimally small effect sizes, as a null model. Results implied a very
polygenic basis with a few loci of major effect standing out and changing in parallel. There
was large variability between the different chromosomes in this study, probably due to LD.
In chapter two, I go on to discuss the impact of LD, on the variability in an allele-frequency
based summary statistic, giving an equation based on the initial allele frequencies, average
pairwise LD, and the first four moments of the haplotype block copy number distribution. I
describe this distribution by referring back to the founder generation. I then demonstrate
how to infer selection via a maximum likelihood scheme on the example of a single locus and
discuss how to extend this to more realistic scenarios.
In chapter three, I discuss the second evolve and resequence experiment, in which a small
population of Drosophila melanogaster was selected for increased pupal case size over 6
generations. The experiment was highly replicated with 27 lines selected within family and a
known pedigree. We observed a phenotypic selection response of over one standard deviation.
I describe the patterns in allele frequency data, including allele frequency changes and patterns
of heterozygosity, and give ideas for future work.},
  author       = {Belohlavy, Stefanie},
  isbn         = {978-3-99078-018-3},
  pages        = {98},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{The genetic basis of complex traits studied via analysis of evolve and resequence experiments}},
  doi          = {10.15479/at:ista:11388},
  year         = {2022},
}

@phdthesis{11393,
  abstract     = {AMPA receptors (AMPARs) mediate fast excitatory neurotransmission and their role is
implicated in complex processes such as learning and memory and various neurological
diseases. These receptors are composed of different subunits and the subunit composition can
affect channel properties, receptor trafficking and interaction with other associated proteins.
Using the high sensitivity SDS-digested freeze-fracture replica labeling (SDS-FRL) for
electron microscopy I investigated the number, density, and localization of AMPAR subunits,
GluA1, GluA2, GluA3, and GluA1-3 (panAMPA) in pyramidal cells in the CA1 area of mouse
hippocampus. I have found that the immunogold labeling for all of these subunits in the
postsynaptic sites was highest in stratum radiatum and lowest in stratum lacunosummoleculare. The labeling density for the all subunits in the extrasynaptic sites showed a gradual
increase from the pyramidal cell soma towards the distal part of stratum radiatum. The densities
of extrasynaptic GluA1, GluA2 and panAMPA labeling reached 10-15% of synaptic densities,
while the ratio of extrasynaptic labeling for GluA3 was significantly lower compared than those
for other subunits. The labeling patterns for GluA1, GluA2 and GluA1-3 are similar and their
densities were higher in the periphery than center of synapses. In contrast, the GluA3-
containing receptors were more centrally localized compared to the GluA1- and GluA2-
containing receptors.
The hippocampus plays a central role in learning and memory. Contextual learning has been
shown to require the delivery of AMPA receptors to CA1 synapses in the dorsal hippocampus.
However, proximodistal heterogeneity of this plasticity and particular contribution of different
AMPA receptor subunits are not fully understood. By combining inhibitory avoidance task, a
hippocampus-dependent contextual fear-learning paradigm, with SDS-FRL, I have revealed an
increase in synaptic density specific to GluA1-containing AMPA receptors in the CA1 area.
The intrasynaptic distribution of GluA1 also changed from the periphery to center-preferred
pattern. Furthermore, this synaptic plasticity was evident selectively in stratum radiatum but
not stratum oriens, and in the CA1 subregion proximal but not distal to CA2. These findings
further contribute to our understanding of how specific hippocampal subregions and AMPA
receptor subunits are involved in physiological learning.
Although the immunolabeling results above shed light on subunit-specific plasticity in
AMPAR distribution, no tools to visualize and study the subunit composition at the single
channel level in situ have been available. Electron microscopy with conventional immunogold
labeling approaches has limitations in the single channel analysis because of the large size of
antibodies and steric hindrance hampering multiple subunit labeling of single channels. I
managed to develop a new chemical labeling system using a short peptide tag and small
synthetic probes, which form specific covalent bond with a cysteine residue in the tag fused to
proteins of interest (reactive tag system). I additionally made substantial progress into adapting
this system for AMPA receptor subunits.},
  author       = {Jevtic, Marijo},
  issn         = {2663-337X},
  pages        = {108},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Contextual fear learning induced changes in AMPA receptor subtypes along the proximodistal axis in dorsal hippocampus}},
  doi          = {10.15479/at:ista:11393},
  year         = {2022},
}

@article{11400,
  abstract     = {By varying the concentration of molecules in the cytoplasm or on the membrane, cells can induce the formation of condensates and liquid droplets, similar to phase separation. Their thermodynamics, much studied, depends on the mutual interactions between microscopic constituents. Here, we focus on the kinetics and size control of 2D clusters, forming on membranes. Using molecular dynamics of patchy colloids, we model a system of two species of proteins, giving origin to specific heterotypic bonds. We find that concentrations, together with valence and bond strength, control both the size and the growth time rate of the clusters. In particular, if one species is in large excess, it gradually saturates the binding sites of the other species; the system then becomes kinetically arrested and cluster coarsening slows down or stops, thus yielding effective size selection. This phenomenology is observed both in solid and fluid clusters, which feature additional generic homotypic interactions and are reminiscent of the ones observed on biological membranes.},
  author       = {Palaia, Ivan and Šarić, Anđela},
  issn         = {1089-7690},
  journal      = {The Journal of Chemical Physics},
  keywords     = {Physical and Theoretical Chemistry, General Physics and Astronomy},
  number       = {19},
  publisher    = {AIP Publishing},
  title        = {{Controlling cluster size in 2D phase-separating binary mixtures with specific interactions}},
  doi          = {10.1063/5.0087769},
  volume       = {156},
  year         = {2022},
}

@article{11401,
  abstract     = {Tin selenide (SnSe) is considered a robust candidate for thermoelectric applications due to its very high thermoelectric figure of merit, ZT, with values of 2.6 in p-type and 2.8 in n-type single crystals. Sn has been replaced with various lower group dopants to achieve successful p-type doping in SnSe with high ZT values. A known, facile, and powerful alternative way to introduce a hole carrier is to use a natural single Sn vacancy, VSn. Through transport and scanning tunneling microscopy studies, we discovered that VSn are dominant in high-quality (slow cooling rate) SnSe single crystals, while multiple vacancies, Vmulti, are dominant in low-quality (high cooling rate) single crystals. Surprisingly, both VSn and Vmulti help to increase the power factors of SnSe, whereas samples with dominant VSn have superior thermoelectric properties in SnSe single crystals. Additionally, the observation that Vmulti are good p-type sources observed in relatively low-quality single crystals is useful in thermoelectric applications because polycrystalline SnSe can be used due to its mechanical strength; this substance is usually fabricated at very high cooling speeds.},
  author       = {Nguyen, Van Quang and Trinh, Thi Ly and Chang, Cheng and Zhao, Li Dong and Nguyen, Thi Huong and Duong, Van Thiet and Duong, Anh Tuan and Park, Jong Ho and Park, Sudong and Kim, Jungdae and Cho, Sunglae},
  issn         = {1884-4057},
  journal      = {NPG Asia Materials},
  publisher    = {Springer Nature},
  title        = {{Unidentified major p-type source in SnSe: Multivacancies}},
  doi          = {10.1038/s41427-022-00393-5},
  volume       = {14},
  year         = {2022},
}

@article{11402,
  abstract     = {Fixed-horizon planning considers a weighted graph and asks to construct a path that maximizes the sum of weights for a given time horizon T. However, in many scenarios, the time horizon is not fixed, but the stopping time is chosen according to some distribution such that the expected stopping time is T. If the stopping-time distribution is not known, then to ensure robustness, the distribution is chosen by an adversary as the worst-case scenario. A stationary plan for every vertex always chooses the same outgoing edge. For fixed horizon or fixed stopping-time distribution, stationary plans are not sufficient for optimality. Quite surprisingly we show that when an adversary chooses the stopping-time distribution with expected stopping-time T, then stationary plans are sufficient. While computing optimal stationary plans for fixed horizon is NP-complete, we show that computing optimal stationary plans under adversarial stopping-time distribution can be achieved in polynomial time.},
  author       = {Chatterjee, Krishnendu and Doyen, Laurent},
  issn         = {1090-2724},
  journal      = {Journal of Computer and System Sciences},
  pages        = {1--21},
  publisher    = {Elsevier},
  title        = {{Graph planning with expected finite horizon}},
  doi          = {10.1016/j.jcss.2022.04.003},
  volume       = {129},
  year         = {2022},
}

@article{11403,
  author       = {Stöllner, Andrea},
  issn         = {2662-138X},
  journal      = {Nature Reviews Earth and Environment},
  number       = {6},
  pages        = {360},
  publisher    = {Springer Nature},
  title        = {{Measuring airborne nanoplastics using aerosol physics}},
  doi          = {10.1038/s43017-022-00302-y},
  volume       = {3},
  year         = {2022},
}

@article{11411,
  abstract     = {Many studies have quantified the distribution of heterozygosity and relatedness in natural populations, but few have examined the demographic processes driving these patterns. In this study, we take a novel approach by studying how population structure affects both pairwise identity and the distribution of heterozygosity in a natural population of the self-incompatible plant Antirrhinum majus. Excess variance in heterozygosity between individuals is due to identity disequilibrium, which reflects the variance in inbreeding between individuals; it is measured by the statistic g2. We calculated g2 together with FST and pairwise relatedness (Fij) using 91 SNPs in 22,353 individuals collected over 11 years. We find that pairwise Fij declines rapidly over short spatial scales, and the excess variance in heterozygosity between individuals reflects significant variation in inbreeding. Additionally, we detect an excess of individuals with around half the average heterozygosity, indicating either selfing or matings between close relatives. We use 2 types of simulation to ask whether variation in heterozygosity is consistent with fine-scale spatial population structure. First, by simulating offspring using parents drawn from a range of spatial scales, we show that the known pollen dispersal kernel explains g2. Second, we simulate a 1,000-generation pedigree using the known dispersal and spatial distribution and find that the resulting g2 is consistent with that observed from the field data. In contrast, a simulated population with uniform density underestimates g2, indicating that heterogeneous density promotes identity disequilibrium. Our study shows that heterogeneous density and leptokurtic dispersal can together explain the distribution of heterozygosity.},
  author       = {Surendranadh, Parvathy and Arathoon, Louise S and Baskett, Carina and Field, David and Pickup, Melinda and Barton, Nicholas H},
  issn         = {1943-2631},
  journal      = {Genetics},
  number       = {3},
  publisher    = {Oxford University Press},
  title        = {{Effects of fine-scale population structure on the distribution of heterozygosity in a long-term study of Antirrhinum majus}},
  doi          = {10.1093/genetics/iyac083},
  volume       = {221},
  year         = {2022},
}

@article{11417,
  abstract     = {Over the past few years, the field of quantum information science has seen tremendous progress toward realizing large-scale quantum computers. With demonstrations of quantum computers outperforming classical computers for a select range of problems,1–3 we have finally entered the noisy, intermediate-scale quantum (NISQ) computing era. While the quantum computers of today are technological marvels, they are not yet error corrected, and it is unclear whether any system will scale beyond a few hundred logical qubits without significant changes to architecture and control schemes. Today's quantum systems are analogous to the ENIAC (Electronic Numerical Integrator And Computer) and EDVAC (Electronic Discrete Variable Automatic Computer) systems of the 1940s, which ran on vacuum tubes. These machines were built on a solid, nominally scalable architecture and when they were developed, nobody could have predicted the development of the transistor and the impact of the resulting semiconductor industry. Simply put, the computers of today are nothing like the early computers of the 1940s. We believe that the qubits of future fault-tolerant quantum systems will look quite different from the qubits of the NISQ machines in operation today. This Special Topic issue is devoted to new and emerging quantum systems with a focus on enabling technologies that can eventually lead to the quantum analog to the transistor. We have solicited both research4–18 and perspective articles19–21 to discuss new and emerging qubit systems with a focus on novel materials, encodings, and architectures. We are proud to present a collection that touches on a wide range of technologies including superconductors,7–13,21 semiconductors,15–17,19 and individual atomic qubits.18
},
  author       = {Sigillito, Anthony J. and Covey, Jacob P. and Fink, Johannes M and Petersson, Karl and Preble, Stefan},
  issn         = {0003-6951},
  journal      = {Applied Physics Letters},
  number       = {19},
  publisher    = {American Institute of Physics},
  title        = {{Emerging qubit systems: Guest editorial}},
  doi          = {10.1063/5.0097339},
  volume       = {120},
  year         = {2022},
}

@article{11418,
  abstract     = {We consider the quadratic form of a general high-rank deterministic matrix on the eigenvectors of an N×N
Wigner matrix and prove that it has Gaussian fluctuation for each bulk eigenvector in the large N limit. The proof is a combination of the energy method for the Dyson Brownian motion inspired by Marcinek and Yau (2021) and our recent multiresolvent local laws (Comm. Math. Phys. 388 (2021) 1005–1048).},
  author       = {Cipolloni, Giorgio and Erdös, László and Schröder, Dominik J},
  issn         = {2168-894X},
  journal      = {Annals of Probability},
  number       = {3},
  pages        = {984--1012},
  publisher    = {Institute of Mathematical Statistics},
  title        = {{Normal fluctuation in quantum ergodicity for Wigner matrices}},
  doi          = {10.1214/21-AOP1552},
  volume       = {50},
  year         = {2022},
}

@article{11419,
  abstract     = {Elevation of soluble wild-type (WT) tau occurs in synaptic compartments in Alzheimer’s disease. We addressed whether tau elevation affects synaptic transmission at the calyx of Held in slices from mice brainstem. Whole-cell loading of WT human tau (h-tau) in presynaptic terminals at 10–20 µM caused microtubule (MT) assembly and activity-dependent rundown of excitatory neurotransmission. Capacitance measurements revealed that the primary target of WT h-tau is vesicle endocytosis. Blocking MT assembly using nocodazole prevented tau-induced impairments of endocytosis and neurotransmission. Immunofluorescence imaging analyses revealed that MT assembly by WT h-tau loading was associated with an increased MT-bound fraction of the endocytic protein dynamin. A synthetic dodecapeptide corresponding to dynamin 1-pleckstrin-homology domain inhibited MT-dynamin interaction and rescued tau-induced impairments of endocytosis and neurotransmission. We conclude that elevation of presynaptic WT tau induces de novo assembly of MTs, thereby sequestering free dynamins. As a result, endocytosis and subsequent vesicle replenishment are impaired, causing activity-dependent rundown of neurotransmission.},
  author       = {Hori, Tetsuya and Eguchi, Kohgaku and Wang, Han Ying and Miyasaka, Tomohiro and Guillaud, Laurent and Taoufiq, Zacharie and Mahapatra, Satyajit and Yamada, Hiroshi and Takei, Kohji and Takahashi, Tomoyuki},
  issn         = {2050-084X},
  journal      = {eLife},
  publisher    = {eLife Sciences Publications},
  title        = {{Microtubule assembly by tau impairs endocytosis and neurotransmission via dynamin sequestration in Alzheimer's disease synapse model}},
  doi          = {10.7554/eLife.73542},
  volume       = {11},
  year         = {2022},
}

@article{11420,
  abstract     = {Understanding the properties of neural networks trained via stochastic gradient descent (SGD) is at the heart of the theory of deep learning. In this work, we take a mean-field view, and consider a two-layer ReLU network trained via noisy-SGD for a univariate regularized regression problem. Our main result is that SGD with vanishingly small noise injected in the gradients is biased towards a simple solution: at convergence, the ReLU network implements a piecewise linear map of the inputs, and the number of “knot” points -- i.e., points where the tangent of the ReLU network estimator changes -- between two consecutive training inputs is at most three. In particular, as the number of neurons of the network grows, the SGD dynamics is captured by the solution of a gradient flow and, at convergence, the distribution of the weights approaches the unique minimizer of a related free energy, which has a Gibbs form. Our key technical contribution consists in the analysis of the estimator resulting from this minimizer: we show that its second derivative vanishes everywhere, except at some specific locations which represent the “knot” points. We also provide empirical evidence that knots at locations distinct from the data points might occur, as predicted by our theory.},
  author       = {Shevchenko, Aleksandr and Kungurtsev, Vyacheslav and Mondelli, Marco},
  issn         = {1533-7928},
  journal      = {Journal of Machine Learning Research},
  number       = {130},
  pages        = {1--55},
  publisher    = {Journal of Machine Learning Research},
  title        = {{Mean-field analysis of piecewise linear solutions for wide ReLU networks}},
  volume       = {23},
  year         = {2022},
}

@inproceedings{11428,
  abstract     = {The medial axis of a set consists of the points in the ambient space without a unique closest point on the original set. Since its introduction, the medial axis has been used extensively in many applications as a method of computing a topologically equivalent skeleton. Unfortunately, one limiting factor in the use of the medial axis of a smooth manifold is that it is not necessarily topologically stable under small perturbations of the manifold. To counter these instabilities various prunings of the medial axis have been proposed. Here, we examine one type of pruning, called burning. Because of the good experimental results, it was hoped that the burning method of simplifying the medial axis would be stable. In this work we show a simple example that dashes such hopes based on Bing’s house with two rooms, demonstrating an isotopy of a shape where the medial axis goes from collapsible to non-collapsible.},
  author       = {Chambers, Erin and Fillmore, Christopher D and Stephenson, Elizabeth R and Wintraecken, Mathijs},
  booktitle    = {38th International Symposium on Computational Geometry},
  editor       = {Goaoc, Xavier and Kerber, Michael},
  isbn         = {978-3-95977-227-3},
  issn         = {1868-8969},
  location     = {Berlin, Germany},
  pages        = {66:1--66:9},
  publisher    = {Schloss Dagstuhl - Leibniz-Zentrum für Informatik},
  title        = {{A cautionary tale: Burning the medial axis is unstable}},
  doi          = {10.4230/LIPIcs.SoCG.2022.66},
  volume       = {224},
  year         = {2022},
}

@book{11429,
  abstract     = {This book constitutes the refereed proceedings of the 18th International Symposium on Web and Wireless Geographical Information Systems, W2GIS 2022, held in Konstanz, Germany, in April 2022.
The 7 full papers presented together with 6 short papers in the volume were carefully reviewed and selected from 16 submissions.  The papers cover topics that range from mobile GIS and Location-Based Services to Spatial Information Retrieval and Wireless Sensor Networks.},
  editor       = {Karimipour, Farid and Storandt, Sabine},
  isbn         = {9783031062445},
  issn         = {1611-3349},
  pages        = {153},
  publisher    = {Springer Nature},
  title        = {{Web and Wireless Geographical Information Systems}},
  doi          = {10.1007/978-3-031-06245-2},
  volume       = {13238},
  year         = {2022},
}

