@article{9794,
  abstract     = {Lymph nodes (LNs) comprise two main structural elements: fibroblastic reticular cells that form dedicated niches for immune cell interaction and capsular fibroblasts that build a shell around the organ. Immunological challenge causes LNs to increase more than tenfold in size within a few days. Here, we characterized the biomechanics of LN swelling on the cellular and organ scale. We identified lymphocyte trapping by influx and proliferation as drivers of an outward pressure force, causing fibroblastic reticular cells of the T-zone (TRCs) and their associated conduits to stretch. After an initial phase of relaxation, TRCs sensed the resulting strain through cell matrix adhesions, which coordinated local growth and remodeling of the stromal network. While the expanded TRC network readopted its typical configuration, a massive fibrotic reaction of the organ capsule set in and countered further organ expansion. Thus, different fibroblast populations mechanically control LN swelling in a multitier fashion.},
  author       = {Assen, Frank P and Abe, Jun and Hons, Miroslav and Hauschild, Robert and Shamipour, Shayan and Kaufmann, Walter and Costanzo, Tommaso and Krens, Gabriel and Brown, Markus and Ludewig, Burkhard and Hippenmeyer, Simon and Heisenberg, Carl-Philipp J and Weninger, Wolfgang and Hannezo, Edouard B and Luther, Sanjiv A. and Stein, Jens V. and Sixt, Michael K},
  issn         = {1529-2916},
  journal      = {Nature Immunology},
  pages        = {1246--1255},
  publisher    = {Springer Nature},
  title        = {{Multitier mechanics control stromal adaptations in swelling lymph nodes}},
  doi          = {10.1038/s41590-022-01257-4},
  volume       = {23},
  year         = {2022},
}

@article{9955,
  abstract     = {Neurons can change their classical neurotransmitters during ontogeny, sometimes going through stages of dual release. Here, we explored the development of the neurotransmitter identity of neurons of the avian nucleus isthmi parvocellularis (Ipc), whose axon terminals are retinotopically arranged in the optic tectum (TeO) and exert a focal gating effect upon the ascending transmission of retinal inputs. Although cholinergic and glutamatergic markers are both found in Ipc neurons and terminals of adult pigeons and chicks, the mRNA expression of the vesicular acetylcholine transporter, VAChT, is weak or absent. To explore how the Ipc neurotransmitter identity is established during ontogeny, we analyzed the expression of mRNAs coding for cholinergic (ChAT, VAChT, and CHT) and glutamatergic (VGluT2 and VGluT3) markers in chick embryos at different developmental stages. We found that between E12 and E18, Ipc neurons expressed all cholinergic mRNAs and also VGluT2 mRNA; however, from E16 through posthatch stages, VAChT mRNA expression was specifically diminished. Our ex vivo deposits of tracer crystals and intracellular filling experiments revealed that Ipc axons exhibit a mature paintbrush morphology late in development, experiencing marked morphological transformations during the period of presumptive dual vesicular transmitter release. Additionally, although ChAT protein immunoassays increasingly label the growing Ipc axon, this labeling was consistently restricted to sparse portions of the terminal branches. Combined, these results suggest that the synthesis of glutamate and acetylcholine, and their vesicular release, is complexly linked to the developmental processes of branching, growing and remodeling of these unique axons.},
  author       = {Reyes‐Pinto, Rosana and Ferrán, José L. and Vega Zuniga, Tomas A and González‐Cabrera, Cristian and Luksch, Harald and Mpodozis, Jorge and Puelles, Luis and Marín, Gonzalo J.},
  issn         = {1096-9861},
  journal      = {Journal of Comparative Neurology},
  number       = {2},
  pages        = {553--573},
  publisher    = {Wiley},
  title        = {{Change in the neurochemical signature and morphological development of the parvocellular isthmic projection to the avian tectum}},
  doi          = {10.1002/cne.25229},
  volume       = {530},
  year         = {2022},
}

@article{9977,
  abstract     = {For a Seifert fibered homology sphere X we show that the q-series invariant Zˆ0(X; q) introduced by Gukov-Pei-Putrov-Vafa, is a resummation of the Ohtsuki series Z0(X). We show that for every even k ∈ N there exists a full asymptotic expansion of Zˆ0(X; q) for q tending to e 2πi/k, and in particular that the limit Zˆ0(X; e 2πi/k) exists and is equal to the
WRT quantum invariant τk(X). We show that the poles of the Borel transform of Z0(X) coincide with the classical complex Chern-Simons values, which we further show classifies the corresponding components of the moduli space of flat SL(2, C)-connections.},
  author       = {Mistegaard, William and Andersen, Jørgen Ellegaard},
  issn         = {1469-7750},
  journal      = {Journal of the London Mathematical Society},
  number       = {2},
  pages        = {709--764},
  publisher    = {Wiley},
  title        = {{Resurgence analysis of quantum invariants of Seifert fibered homology spheres}},
  doi          = {10.1112/jlms.12506},
  volume       = {105},
  year         = {2022},
}

@inproceedings{10665,
  abstract     = {Formal verification of neural networks is an active topic of research, and recent advances have significantly increased the size of the networks that verification tools can handle. However, most methods are designed for verification of an idealized model of the actual network which works over real arithmetic and ignores rounding imprecisions. This idealization is in stark contrast to network quantization, which is a technique that trades numerical precision for computational efficiency and is, therefore, often applied in practice. Neglecting rounding errors of such low-bit quantized neural networks has been shown to lead to wrong conclusions about the network’s correctness. Thus, the desired approach for verifying quantized neural networks would be one that takes these rounding errors
into account. In this paper, we show that verifying the bitexact implementation of quantized neural networks with bitvector specifications is PSPACE-hard, even though verifying idealized real-valued networks and satisfiability of bit-vector specifications alone are each in NP. Furthermore, we explore several practical heuristics toward closing the complexity gap between idealized and bit-exact verification. In particular, we propose three techniques for making SMT-based verification of quantized neural networks more scalable. Our experiments demonstrate that our proposed methods allow a speedup of up to three orders of magnitude over existing approaches.},
  author       = {Henzinger, Thomas A and Lechner, Mathias and Zikelic, Dorde},
  booktitle    = {Proceedings of the AAAI Conference on Artificial Intelligence},
  isbn         = {978-1-57735-866-4},
  issn         = {2374-3468},
  location     = {Virtual},
  number       = {5A},
  pages        = {3787--3795},
  publisher    = {AAAI Press},
  title        = {{Scalable verification of quantized neural networks}},
  volume       = {35},
  year         = {2021},
}

@inproceedings{10666,
  abstract     = {Adversarial training is an effective method to train deep learning models that are resilient to norm-bounded perturbations, with the cost of nominal performance drop. While adversarial training appears to enhance the robustness and safety of a deep model deployed in open-world decision-critical applications, counterintuitively, it induces undesired behaviors in robot learning settings. In this paper, we show theoretically and experimentally that neural controllers obtained via adversarial training are subjected to three types of defects, namely transient, systematic, and conditional errors. We first generalize adversarial training to a safety-domain optimization scheme allowing for more generic specifications. We then prove that such a learning process tends to cause certain error profiles. We support our theoretical results by a thorough experimental safety analysis in a robot-learning task. Our results suggest that adversarial training is not yet ready for robot learning.},
  author       = {Lechner, Mathias and Hasani, Ramin and Grosu, Radu and Rus, Daniela and Henzinger, Thomas A},
  booktitle    = {2021 IEEE International Conference on Robotics and Automation},
  isbn         = {978-1-7281-9078-5},
  issn         = {2577-087X},
  location     = {Xi'an, China},
  pages        = {4140--4147},
  title        = {{Adversarial training is not ready for robot learning}},
  doi          = {10.1109/ICRA48506.2021.9561036},
  year         = {2021},
}

@inproceedings{10667,
  abstract     = {Bayesian neural networks (BNNs) place distributions over the weights of a neural network to model uncertainty in the data and the network's prediction. We consider the problem of verifying safety when running a Bayesian neural network policy in a feedback loop with infinite time horizon systems. Compared to the existing sampling-based approaches, which are inapplicable to the infinite time horizon setting, we train a separate deterministic neural network that serves as an infinite time horizon safety certificate. In particular, we show that the certificate network guarantees the safety of the system over a subset of the BNN weight posterior's support. Our method first computes a safe weight set and then alters the BNN's weight posterior to reject samples outside this set. Moreover, we show how to extend our approach to a safe-exploration reinforcement learning setting, in order to avoid unsafe trajectories during the training of the policy. We evaluate our approach on a series of reinforcement learning benchmarks, including non-Lyapunovian safety specifications.},
  author       = {Lechner, Mathias and Žikelić, Ðorđe and Chatterjee, Krishnendu and Henzinger, Thomas A},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  location     = {Virtual},
  title        = {{Infinite time horizon safety of Bayesian neural networks}},
  doi          = {10.48550/arXiv.2111.03165},
  year         = {2021},
}

@inproceedings{10668,
  abstract     = {Robustness to variations in lighting conditions is a key objective for any deep vision system. To this end, our paper extends the receptive field of convolutional neural networks with two residual components, ubiquitous in the visual processing system of vertebrates: On-center and off-center pathways, with an excitatory center and inhibitory surround; OOCS for short. The On-center pathway is excited by the presence of a light stimulus in its center, but not in its surround, whereas the Off-center pathway is excited by the absence of a light stimulus in its center, but not in its surround. We design OOCS pathways via a difference of Gaussians, with their variance computed analytically from the size of the receptive fields. OOCS pathways complement each other in their response to light stimuli, ensuring this way a strong edge-detection capability, and as a result an accurate and robust inference under challenging lighting conditions. We provide extensive empirical evidence showing that networks supplied with OOCS pathways gain accuracy and illumination-robustness from the novel edge representation, compared to other baselines.},
  author       = {Babaiee, Zahra and Hasani, Ramin and Lechner, Mathias and Rus, Daniela and Grosu, Radu},
  booktitle    = {Proceedings of the 38th International Conference on Machine Learning},
  issn         = {2640-3498},
  location     = {Virtual},
  pages        = {478--489},
  publisher    = {ML Research Press},
  title        = {{On-off center-surround receptive fields for accurate and robust image classification}},
  volume       = {139},
  year         = {2021},
}

@inproceedings{10669,
  abstract     = {We show that Neural ODEs, an emerging class of timecontinuous neural networks, can be verified by solving a set of global-optimization problems. For this purpose, we introduce Stochastic Lagrangian Reachability (SLR), an
abstraction-based technique for constructing a tight Reachtube (an over-approximation of the set of reachable states
over a given time-horizon), and provide stochastic guarantees in the form of confidence intervals for the Reachtube bounds. SLR inherently avoids the infamous wrapping effect (accumulation of over-approximation errors) by performing local optimization steps to expand safe regions instead of repeatedly forward-propagating them as is done by deterministic reachability methods. To enable fast local optimizations, we introduce a novel forward-mode adjoint sensitivity method to compute gradients without the need for backpropagation. Finally, we establish asymptotic and non-asymptotic convergence rates for SLR.},
  author       = {Grunbacher, Sophie and Hasani, Ramin and Lechner, Mathias and Cyranka, Jacek and Smolka, Scott A and Grosu, Radu},
  booktitle    = {Proceedings of the AAAI Conference on Artificial Intelligence},
  isbn         = {978-1-57735-866-4},
  issn         = {2374-3468},
  location     = {Virtual},
  number       = {13},
  pages        = {11525--11535},
  publisher    = {AAAI Press},
  title        = {{On the verification of neural ODEs with stochastic guarantees}},
  volume       = {35},
  year         = {2021},
}

@inproceedings{10670,
  abstract     = {Imitation learning enables high-fidelity, vision-based learning of policies within rich, photorealistic environments. However, such techniques often rely on traditional discrete-time neural models and face difficulties in generalizing to domain shifts by failing to account for the causal relationships between the agent and the environment. In this paper, we propose a theoretical and experimental framework for learning causal representations using continuous-time neural networks, specifically over their discrete-time counterparts. We evaluate our method in the context of visual-control learning of drones over a series of complex tasks, ranging from short- and long-term navigation, to chasing static and dynamic objects through photorealistic environments. Our results demonstrate that causal continuous-time
deep models can perform robust navigation tasks, where advanced recurrent models fail. These models learn complex causal control representations directly from raw visual inputs and scale to solve a variety of tasks using imitation learning.},
  author       = {Vorbach, Charles J and Hasani, Ramin and Amini, Alexander and Lechner, Mathias and Rus, Daniela},
  booktitle    = {35th Conference on Neural Information Processing Systems},
  location     = {Virtual},
  title        = {{Causal navigation by continuous-time neural networks}},
  year         = {2021},
}

@inproceedings{10671,
  abstract     = {We introduce a new class of time-continuous recurrent neural network models. Instead of declaring a learning system’s dynamics by implicit nonlinearities, we construct networks of linear first-order dynamical systems modulated via nonlinear interlinked gates. The resulting models represent dynamical systems with varying (i.e., liquid) time-constants coupled to their hidden state, with outputs being computed by numerical differential equation solvers. These neural networks exhibit stable and bounded behavior, yield superior expressivity within the family of neural ordinary differential equations, and give rise to improved performance on time-series prediction tasks. To demonstrate these properties, we first take a theoretical approach to find bounds over their dynamics, and compute their expressive power by the trajectory length measure in a latent trajectory space. We then conduct a series of time-series prediction experiments to manifest the approximation capability of Liquid Time-Constant Networks (LTCs) compared to classical and modern RNNs.},
  author       = {Hasani, Ramin and Lechner, Mathias and Amini, Alexander and Rus, Daniela and Grosu, Radu},
  booktitle    = {Proceedings of the AAAI Conference on Artificial Intelligence},
  isbn         = {978-1-57735-866-4},
  issn         = {2374-3468},
  location     = {Virtual},
  number       = {9},
  pages        = {7657--7666},
  publisher    = {AAAI Press},
  title        = {{Liquid time-constant networks}},
  volume       = {35},
  year         = {2021},
}

@article{10674,
  abstract     = {In two-player games on graphs, the players move a token through a graph to produce an infinite path, which determines the winner of the game. Such games are central in formal methods since they model the interaction between a non-terminating system and its environment. In bidding games the players bid for the right to move the token: in each round, the players simultaneously submit bids, and the higher bidder moves the token and pays the other player. Bidding games are known to have a clean and elegant mathematical structure that relies on the ability of the players to submit arbitrarily small bids. Many applications, however, require a fixed granularity for the bids, which can represent, for example, the monetary value expressed in cents. We study, for the first time, the combination of discrete-bidding and infinite-duration games. Our most important result proves that these games form a large determined subclass of concurrent games, where determinacy is the strong property that there always exists exactly one player who can guarantee winning the game. In particular, we show that, in contrast to non-discrete bidding games, the mechanism with which tied bids are resolved plays an important role in discrete-bidding games. We study several natural tie-breaking mechanisms and show that, while some do not admit determinacy, most natural mechanisms imply determinacy for every pair of initial budgets.},
  author       = {Aghajohari, Milad and Avni, Guy and Henzinger, Thomas A},
  issn         = {1860-5974},
  journal      = {Logical Methods in Computer Science},
  keywords     = {computer science, computer science and game theory, logic in computer science},
  number       = {1},
  pages        = {10:1--10:23},
  publisher    = {International Federation for Computational Logic},
  title        = {{Determinacy in discrete-bidding infinite-duration games}},
  doi          = {10.23638/LMCS-17(1:10)2021},
  volume       = {17},
  year         = {2021},
}

@inproceedings{10688,
  abstract     = {Civl is a static verifier for concurrent programs designed around the conceptual framework of layered refinement,
which views the task of verifying a program as a sequence of program simplification steps each justified by its own invariant. Civl verifies a layered concurrent program that compactly expresses all the programs in this sequence and the supporting invariants. This paper presents the design and implementation of the Civl verifier.},
  author       = {Kragl, Bernhard and Qadeer, Shaz},
  booktitle    = {Proceedings of the 21st Conference on Formal Methods in Computer-Aided Design},
  editor       = {Ruzica, Piskac and Whalen, Michael W.},
  isbn         = {978-3-85448-046-4},
  location     = {Virtual},
  pages        = {143–152},
  publisher    = {TU Wien Academic Press},
  title        = {{The Civl verifier}},
  doi          = {10.34727/2021/isbn.978-3-85448-046-4_23},
  volume       = {2},
  year         = {2021},
}

@inproceedings{10694,
  abstract     = {In a two-player zero-sum graph game the players move a token throughout a graph to produce an infinite path, which determines the winner or payoff of the game. Traditionally, the players alternate turns in moving the token. In bidding games, however, the players have budgets, and in each turn, we hold an “auction” (bidding) to determine which player moves the token: both players simultaneously submit bids and the higher bidder moves the token. The bidding mechanisms differ in their payment schemes. Bidding games were largely studied with variants of first-price bidding in which only the higher bidder pays his bid. We focus on all-pay bidding, where both players pay their bids. Finite-duration all-pay bidding games were studied and shown to be technically more challenging than their first-price counterparts. We study for the first time, infinite-duration all-pay bidding games. Our most interesting results are for mean-payoff objectives: we portray a complete picture for games played on strongly-connected graphs. We study both pure (deterministic) and mixed (probabilistic) strategies and completely characterize the optimal and almost-sure (with probability 1) payoffs the players can respectively guarantee. We show that mean-payoff games under all-pay bidding exhibit the intriguing mathematical properties of their first-price counterparts; namely, an equivalence with random-turn games in which in each turn, the player who moves is selected according to a (biased) coin toss. The equivalences for all-pay bidding are more intricate and unexpected than for first-price bidding.},
  author       = {Avni, Guy and Jecker, Ismael R and Zikelic, Dorde},
  booktitle    = {Proceedings of the 2021 ACM-SIAM Symposium on Discrete Algorithms},
  editor       = {Marx, Dániel},
  isbn         = {978-1-61197-646-5},
  location     = {Virtual},
  pages        = {617--636},
  publisher    = {Society for Industrial and Applied Mathematics},
  title        = {{Infinite-duration all-pay bidding games}},
  doi          = {10.1137/1.9781611976465.38},
  year         = {2021},
}

@article{10711,
  abstract     = {In this paper, we investigate the distribution of the maximum of partial sums of families of  m -periodic complex-valued functions satisfying certain conditions. We obtain precise uniform estimates for the distribution function of this maximum in a near-optimal range. Our results apply to partial sums of Kloosterman sums and other families of  ℓ -adic trace functions, and are as strong as those obtained by Bober, Goldmakher, Granville and Koukoulopoulos for character sums. In particular, we improve on the recent work of the third author for Birch sums. However, unlike character sums, we are able to construct families of  m -periodic complex-valued functions which satisfy our conditions, but for which the Pólya–Vinogradov inequality is sharp.},
  author       = {Autissier, Pascal and Bonolis, Dante and Lamzouri, Youness},
  issn         = {1570-5846},
  journal      = {Compositio Mathematica},
  keywords     = {Algebra and Number Theory},
  number       = {7},
  pages        = {1610--1651},
  publisher    = {Cambridge University Press},
  title        = {{The distribution of the maximum of partial sums of Kloosterman sums and other trace functions}},
  doi          = {10.1112/s0010437x21007351},
  volume       = {157},
  year         = {2021},
}

@article{10738,
  abstract     = {We prove an adiabatic theorem for the Landau–Pekar equations. This allows us to derive new results on the accuracy of their use as effective equations for the time evolution generated by the Fröhlich Hamiltonian with large coupling constant α. In particular, we show that the time evolution of Pekar product states with coherent phonon field and the electron being trapped by the phonons is well approximated by the Landau–Pekar equations until times short compared to α2.},
  author       = {Leopold, Nikolai K and Rademacher, Simone Anna Elvira and Schlein, Benjamin and Seiringer, Robert},
  issn         = {1948-206X},
  journal      = {Analysis and PDE},
  number       = {7},
  pages        = {2079--2100},
  publisher    = {Mathematical Sciences Publishers},
  title        = {{ The Landau–Pekar equations: Adiabatic theorem and accuracy}},
  doi          = {10.2140/APDE.2021.14.2079},
  volume       = {14},
  year         = {2021},
}

@unpublished{10762,
  abstract     = {Methods inspired from machine learning have recently attracted great interest in the computational study of quantum many-particle systems. So far, however, it has proven challenging to deal with microscopic models in which the total number of particles is not conserved. To address this issue, we propose a new variant of neural network states, which we term neural coherent states. Taking the Fröhlich impurity model as a case study, we show that neural coherent states can learn the ground state of non-additive systems very well. In particular, we observe substantial improvement over the standard coherent state estimates in the most challenging intermediate coupling regime. Our approach is generic and does not assume specific details of the system, suggesting wide applications.},
  author       = {Rzadkowski, Wojciech and Lemeshko, Mikhail and Mentink, Johan H.},
  booktitle    = {arXiv},
  pages        = {2105.15193},
  title        = {{Artificial neural network states for non-additive systems}},
  doi          = {10.48550/arXiv.2105.15193},
  year         = {2021},
}

@unpublished{10803,
  abstract     = {Given the abundance of applications of ranking in recent years, addressing fairness concerns around automated ranking systems becomes necessary for increasing the trust among end-users. Previous work on fair ranking has mostly focused on application-specific fairness notions, often tailored to online advertising, and it rarely considers learning as part of the process. In this work, we show how to transfer numerous fairness notions from binary classification to a learning to rank setting. Our formalism allows us to design methods for incorporating fairness objectives with provable generalization guarantees. An extensive experimental evaluation shows that our method can improve ranking fairness substantially with no or only little loss of model quality.},
  author       = {Konstantinov, Nikola H and Lampert, Christoph},
  booktitle    = {arXiv},
  title        = {{Fairness through regularization for learning to rank}},
  doi          = {10.48550/arXiv.2102.05996},
  year         = {2021},
}

@article{10806,
  abstract     = {Ligands are a fundamental part of nanocrystals. They control and direct nanocrystal syntheses and provide colloidal stability. Bound ligands also affect the nanocrystals’ chemical reactivity and electronic structure. Surface chemistry is thus crucial to understand nanocrystal properties and functionality. Here, we investigate the synthesis of metal oxide nanocrystals (CeO2-x, ZnO, and NiO) from metal nitrate precursors, in the presence of oleylamine ligands. Surprisingly, the nanocrystals are capped exclusively with a fatty acid instead of oleylamine. Analysis of the reaction mixtures with nuclear magnetic resonance spectroscopy revealed several reaction byproducts and intermediates that are common to the decomposition of Ce, Zn, Ni, and Zr nitrate precursors. Our evidence supports the oxidation of alkylamine and formation of a carboxylic acid, thus unraveling this counterintuitive surface chemistry.},
  author       = {Calcabrini, Mariano and Van den Eynden, Dietger and Sanchez Ribot, Sergi and Pokratath, Rohan and Llorca, Jordi and De Roo, Jonathan and Ibáñez, Maria},
  issn         = {2691-3704},
  journal      = {JACS Au},
  keywords     = {general medicine},
  number       = {11},
  pages        = {1898--1903},
  publisher    = {American Chemical Society},
  title        = {{Ligand conversion in nanocrystal synthesis: The oxidation of alkylamines to fatty acids by nitrate}},
  doi          = {10.1021/jacsau.1c00349},
  volume       = {1},
  year         = {2021},
}

@article{10809,
  abstract     = {Thermoelectric materials are engines that convert heat into an electrical current. Intuitively, the efficiency of this process depends on how many electrons (charge carriers) can move and how easily they do so, how much energy those moving electrons transport, and how easily the temperature gradient is maintained. In terms of material properties, an excellent thermoelectric material requires a high electrical conductivity σ, a high Seebeck coefficient S (a measure of the induced thermoelectric voltage as a function of temperature gradient), and a low thermal conductivity κ. The challenge is that these three properties are strongly interrelated in a conflicting manner (1). On page 722 of this issue, Roychowdhury et al. (2) have found a way to partially break these ties in silver antimony telluride (AgSbTe2) with the addition of cadmium (Cd) cations, which increase the ordering in this inherently disordered thermoelectric material.},
  author       = {Liu, Yu and Ibáñez, Maria},
  issn         = {1095-9203},
  journal      = {Science},
  keywords     = {multidisciplinary},
  number       = {6530},
  pages        = {678--679},
  publisher    = {American Association for the Advancement of Science},
  title        = {{Tidying up the mess}},
  doi          = {10.1126/science.abg0886},
  volume       = {371},
  year         = {2021},
}

@article{10816,
  abstract     = {Pattern separation is a fundamental brain computation that converts small differences in input patterns into large differences in output patterns. Several synaptic mechanisms of pattern separation have been proposed, including code expansion, inhibition and plasticity; however, which of these mechanisms play a role in the entorhinal cortex (EC)–dentate gyrus (DG)–CA3 circuit, a classical pattern separation circuit, remains unclear. Here we show that a biologically realistic, full-scale EC–DG–CA3 circuit model, including granule cells (GCs) and parvalbumin-positive inhibitory interneurons (PV+-INs) in the DG, is an efficient pattern separator. Both external gamma-modulated inhibition and internal lateral inhibition mediated by PV+-INs substantially contributed to pattern separation. Both local connectivity and fast signaling at GC–PV+-IN synapses were important for maximum effectiveness. Similarly, mossy fiber synapses with conditional detonator properties contributed to pattern separation. By contrast, perforant path synapses with Hebbian synaptic plasticity and direct EC–CA3 connection shifted the network towards pattern completion. Our results demonstrate that the specific properties of cells and synapses optimize higher-order computations in biological networks and might be useful to improve the deep learning capabilities of technical networks.},
  author       = {Guzmán, José and Schlögl, Alois and Espinoza Martinez, Claudia  and Zhang, Xiaomin and Suter, Benjamin and Jonas, Peter M},
  issn         = {2662-8457},
  journal      = {Nature Computational Science},
  keywords     = {general medicine},
  number       = {12},
  pages        = {830--842},
  publisher    = {Springer Nature},
  title        = {{How connectivity rules and synaptic properties shape the efficacy of pattern separation in the entorhinal cortex–dentate gyrus–CA3 network}},
  doi          = {10.1038/s43588-021-00157-1},
  volume       = {1},
  year         = {2021},
}

