@article{14515,
  abstract     = {Most natural and engineered information-processing systems transmit information via signals that vary in time. Computing the information transmission rate or the information encoded in the temporal characteristics of these signals requires the mutual information between the input and output signals as a function of time, i.e., between the input and output trajectories. Yet, this is notoriously difficult because of the high-dimensional nature of the trajectory space, and all existing techniques require approximations. We present an exact Monte Carlo technique called path weight sampling (PWS) that, for the first time, makes it possible to compute the mutual information between input and output trajectories for any stochastic system that is described by a master equation. The principal idea is to use the master equation to evaluate the exact conditional probability of an individual output trajectory for a given input trajectory and average this via Monte Carlo sampling in trajectory space to obtain the mutual information. We present three variants of PWS, which all generate the trajectories using the standard stochastic simulation algorithm. While direct PWS is a brute-force method, Rosenbluth-Rosenbluth PWS exploits the analogy between signal trajectory sampling and polymer sampling, and thermodynamic integration PWS is based on a reversible work calculation in trajectory space. PWS also makes it possible to compute the mutual information between input and output trajectories for systems with hidden internal states as well as systems with feedback from output to input. Applying PWS to the bacterial chemotaxis system, consisting of 182 coupled chemical reactions, demonstrates not only that the scheme is highly efficient but also that the number of receptor clusters is much smaller than hitherto believed, while their size is much larger.},
  author       = {Reinhardt, Manuel and Tkačik, Gašper and Ten Wolde, Pieter Rein},
  issn         = {2160-3308},
  journal      = {Physical Review X},
  number       = {4},
  publisher    = {American Physical Society},
  title        = {{Path weight sampling: Exact Monte Carlo computation of the mutual information between stochastic trajectories}},
  doi          = {10.1103/PhysRevX.13.041017},
  volume       = {13},
  year         = {2023},
}

@article{14656,
  abstract     = {Although much is known about how single neurons in the hippocampus represent an animal's position, how circuit interactions contribute to spatial coding is less well understood. Using a novel statistical estimator and theoretical modeling, both developed in the framework of maximum entropy models, we reveal highly structured CA1 cell-cell interactions in male rats during open field exploration. The statistics of these interactions depend on whether the animal is in a familiar or novel environment. In both conditions the circuit interactions optimize the encoding of spatial information, but for regimes that differ in the informativeness of their spatial inputs. This structure facilitates linear decodability, making the information easy to read out by downstream circuits. Overall, our findings suggest that the efficient coding hypothesis is not only applicable to individual neuron properties in the sensory periphery, but also to neural interactions in the central brain.},
  author       = {Nardin, Michele and Csicsvari, Jozsef L and Tkačik, Gašper and Savin, Cristina},
  issn         = {1529-2401},
  journal      = {The Journal of Neuroscience},
  number       = {48},
  pages        = {8140--8156},
  publisher    = {Society of Neuroscience},
  title        = {{The structure of hippocampal CA1 interactions optimizes spatial coding across experience}},
  doi          = {10.1523/JNEUROSCI.0194-23.2023},
  volume       = {43},
  year         = {2023},
}

@article{13127,
  abstract     = {Cooperative disease defense emerges as group-level collective behavior, yet how group members make the underlying individual decisions is poorly understood. Using garden ants and fungal pathogens as an experimental model, we derive the rules governing individual ant grooming choices and show how they produce colony-level hygiene. Time-resolved behavioral analysis, pathogen quantification, and probabilistic modeling reveal that ants increase grooming and preferentially target highly-infectious individuals when perceiving high pathogen load, but transiently suppress grooming after having been groomed by nestmates. Ants thus react to both, the infectivity of others and the social feedback they receive on their own contagiousness. While inferred solely from momentary ant decisions, these behavioral rules quantitatively predict hour-long experimental dynamics, and synergistically combine into efficient colony-wide pathogen removal. Our analyses show that noisy individual decisions based on only local, incomplete, yet dynamically-updated information on pathogen threat and social feedback can lead to potent collective disease defense.},
  author       = {Casillas Perez, Barbara E and Bod'Ová, Katarína and Grasse, Anna V and Tkačik, Gašper and Cremer, Sylvia},
  issn         = {2041-1723},
  journal      = {Nature Communications},
  publisher    = {Springer Nature},
  title        = {{Dynamic pathogen detection and social feedback shape collective hygiene in ants}},
  doi          = {10.1038/s41467-023-38947-y},
  volume       = {14},
  year         = {2023},
}

@article{12762,
  abstract     = {Neurons in the brain are wired into adaptive networks that exhibit collective dynamics as diverse as scale-specific oscillations and scale-free neuronal avalanches. Although existing models account for oscillations and avalanches separately, they typically do not explain both phenomena, are too complex to analyze analytically or intractable to infer from data rigorously. Here we propose a feedback-driven Ising-like class of neural networks that captures avalanches and oscillations simultaneously and quantitatively. In the simplest yet fully microscopic model version, we can analytically compute the phase diagram and make direct contact with human brain resting-state activity recordings via tractable inference of the model’s two essential parameters. The inferred model quantitatively captures the dynamics over a broad range of scales, from single sensor oscillations to collective behaviors of extreme events and neuronal avalanches. Importantly, the inferred parameters indicate that the co-existence of scale-specific (oscillations) and scale-free (avalanches) dynamics occurs close to a non-equilibrium critical point at the onset of self-sustained oscillations.},
  author       = {Lombardi, Fabrizio and Pepic, Selver and Shriki, Oren and Tkačik, Gašper and De Martino, Daniele},
  issn         = {2662-8457},
  journal      = {Nature Computational Science},
  pages        = {254--263},
  publisher    = {Springer Nature},
  title        = {{Statistical modeling of adaptive neural networks explains co-existence of avalanches and oscillations in resting human brain}},
  doi          = {10.1038/s43588-023-00410-9},
  volume       = {3},
  year         = {2023},
}

@article{10736,
  abstract     = {Predicting function from sequence is a central problem of biology. Currently, this is possible only locally in a narrow mutational neighborhood around a wildtype sequence rather than globally from any sequence. Using random mutant libraries, we developed a biophysical model that accounts for multiple features of σ70 binding bacterial promoters to predict constitutive gene expression levels from any sequence. We experimentally and theoretically estimated that 10–20% of random sequences lead to expression and ~80% of non-expressing sequences are one mutation away from a functional promoter. The potential for generating expression from random sequences is so pervasive that selection acts against σ70-RNA polymerase binding sites even within inter-genic, promoter-containing regions. This pervasiveness of σ70-binding sites implies that emergence of promoters is not the limiting step in gene regulatory evolution. Ultimately, the inclusion of novel features of promoter function into a mechanistic model enabled not only more accurate predictions of gene expression levels, but also identified that promoters evolve more rapidly than previously thought.},
  author       = {Lagator, Mato and Sarikas, Srdjan and Steinrueck, Magdalena and Toledo-Aparicio, David and Bollback, Jonathan P and Guet, Calin C and Tkačik, Gašper},
  issn         = {2050-084X},
  journal      = {eLife},
  publisher    = {eLife Sciences Publications},
  title        = {{Predicting bacterial promoter function and evolution from random sequences}},
  doi          = {10.7554/eLife.64543},
  volume       = {11},
  year         = {2022},
}

@article{12081,
  abstract     = {Selection accumulates information in the genome—it guides stochastically evolving populations toward states (genotype frequencies) that would be unlikely under neutrality. This can be quantified as the Kullback–Leibler (KL) divergence between the actual distribution of genotype frequencies and the corresponding neutral distribution. First, we show that this population-level information sets an upper bound on the information at the level of genotype and phenotype, limiting how precisely they can be specified by selection. Next, we study how the accumulation and maintenance of information is limited by the cost of selection, measured as the genetic load or the relative fitness variance, both of which we connect to the control-theoretic KL cost of control. The information accumulation rate is upper bounded by the population size times the cost of selection. This bound is very general, and applies across models (Wright–Fisher, Moran, diffusion) and to arbitrary forms of selection, mutation, and recombination. Finally, the cost of maintaining information depends on how it is encoded: Specifying a single allele out of two is expensive, but one bit encoded among many weakly specified loci (as in a polygenic trait) is cheap.},
  author       = {Hledik, Michal and Barton, Nicholas H and Tkačik, Gašper},
  issn         = {1091-6490},
  journal      = {Proceedings of the National Academy of Sciences},
  number       = {36},
  publisher    = {Proceedings of the National Academy of Sciences},
  title        = {{Accumulation and maintenance of information in evolution}},
  doi          = {10.1073/pnas.2123152119},
  volume       = {119},
  year         = {2022},
}

@article{12156,
  abstract     = {Models of transcriptional regulation that assume equilibrium binding of transcription factors have been less successful at predicting gene expression from sequence in eukaryotes than in bacteria. This could be due to the non-equilibrium nature of eukaryotic regulation. Unfortunately, the space of possible non-equilibrium mechanisms is vast and predominantly uninteresting. The key question is therefore how this space can be navigated efficiently, to focus on mechanisms and models that are biologically relevant. In this review, we advocate for the normative role of theory—theory that prescribes rather than just describes—in providing such a focus. Theory should expand its remit beyond inferring mechanistic models from data, towards identifying non-equilibrium gene regulatory schemes that may have been evolutionarily selected, despite their energy consumption, because they are precise, reliable, fast, or otherwise outperform regulation at equilibrium. We illustrate our reasoning by toy examples for which we provide simulation code.},
  author       = {Zoller, Benjamin and Gregor, Thomas and Tkačik, Gašper},
  issn         = {2452-3100},
  journal      = {Current Opinion in Systems Biology},
  keywords     = {Applied Mathematics, Computer Science Applications, Drug Discovery, General Biochemistry, Genetics and Molecular Biology, Modeling and Simulation},
  number       = {9},
  publisher    = {Elsevier},
  title        = {{Eukaryotic gene regulation at equilibrium, or non?}},
  doi          = {10.1016/j.coisb.2022.100435},
  volume       = {31},
  year         = {2022},
}

@article{12332,
  abstract     = {Activity of sensory neurons is driven not only by external stimuli but also by feedback signals from higher brain areas. Attention is one particularly important internal signal whose presumed role is to modulate sensory representations such that they only encode information currently relevant to the organism at minimal cost. This hypothesis has, however, not yet been expressed in a normative computational framework. Here, by building on normative principles of probabilistic inference and efficient coding, we developed a model of dynamic population coding in the visual cortex. By continuously adapting the sensory code to changing demands of the perceptual observer, an attention-like modulation emerges. This modulation can dramatically reduce the amount of neural activity without deteriorating the accuracy of task-specific inferences. Our results suggest that a range of seemingly disparate cortical phenomena such as intrinsic gain modulation, attention-related tuning modulation, and response variability could be manifestations of the same underlying principles, which combine efficient sensory coding with optimal probabilistic inference in dynamic environments.},
  author       = {Mlynarski, Wiktor F and Tkačik, Gašper},
  issn         = {1545-7885},
  journal      = {PLoS Biology},
  number       = {12},
  pages        = {e3001889},
  publisher    = {Public Library of Science},
  title        = {{Efficient coding theory of dynamic attentional modulation}},
  doi          = {10.1371/journal.pbio.3001889},
  volume       = {20},
  year         = {2022},
}

@unpublished{10912,
  abstract     = {Brain dynamics display collective phenomena as diverse as neuronal oscillations and avalanches. Oscillations are rhythmic, with fluctuations occurring at a characteristic scale, whereas avalanches are scale-free cascades of neural activity. Here we show that such antithetic features can coexist in a very generic class of adaptive neural networks. In the most simple yet fully microscopic model from this class we make direct contact with human brain resting-state activity recordings via tractable inference of the model's two essential parameters. The inferred model quantitatively captures the dynamics over a broad range of scales, from single sensor fluctuations, collective behaviors of nearly-synchronous extreme events on multiple sensors, to neuronal avalanches unfolding over multiple sensors across multiple time-bins. Importantly, the inferred parameters correlate with model-independent signatures of "closeness to criticality", suggesting that the coexistence of scale-specific (neural oscillations) and scale-free (neuronal avalanches) dynamics in brain activity occurs close to a non-equilibrium critical point at the onset of self-sustained oscillations.},
  author       = {Lombardi, Fabrizio and Pepic, Selver and Shriki, Oren and Tkačik, Gašper and De Martino, Daniele},
  pages        = {37},
  publisher    = {arXiv},
  title        = {{Quantifying the coexistence of neuronal oscillations and avalanches}},
  doi          = {10.48550/ARXIV.2108.06686},
  year         = {2021},
}

@article{8997,
  abstract     = {Phenomenological relations such as Ohm’s or Fourier’s law have a venerable history in physics but are still scarce in biology. This situation restrains predictive theory. Here, we build on bacterial “growth laws,” which capture physiological feedback between translation and cell growth, to construct a minimal biophysical model for the combined action of ribosome-targeting antibiotics. Our model predicts drug interactions like antagonism or synergy solely from responses to individual drugs. We provide analytical results for limiting cases, which agree well with numerical results. We systematically refine the model by including direct physical interactions of different antibiotics on the ribosome. In a limiting case, our model provides a mechanistic underpinning for recent predictions of higher-order interactions that were derived using entropy maximization. We further refine the model to include the effects of antibiotics that mimic starvation and the presence of resistance genes. We describe the impact of a starvation-mimicking antibiotic on drug interactions analytically and verify it experimentally. Our extended model suggests a change in the type of drug interaction that depends on the strength of resistance, which challenges established rescaling paradigms. We experimentally show that the presence of unregulated resistance genes can lead to altered drug interaction, which agrees with the prediction of the model. While minimal, the model is readily adaptable and opens the door to predicting interactions of second and higher-order in a broad range of biological systems.},
  author       = {Kavcic, Bor and Tkačik, Gašper and Bollenbach, Tobias},
  issn         = {1553-7358},
  journal      = {PLOS Computational Biology},
  keywords     = {Modelling and Simulation, Genetics, Molecular Biology, Antibiotics, Drug interactions},
  publisher    = {Public Library of Science},
  title        = {{Minimal biophysical model of combined antibiotic action}},
  doi          = {10.1371/journal.pcbi.1008529},
  volume       = {17},
  year         = {2021},
}

@article{7553,
  abstract     = {Normative theories and statistical inference provide complementary approaches for the study of biological systems. A normative theory postulates that organisms have adapted to efficiently solve essential tasks, and proceeds to mathematically work out testable consequences of such optimality; parameters that maximize the hypothesized organismal function can be derived ab initio, without reference to experimental data. In contrast, statistical inference focuses on efficient utilization of data to learn model parameters, without reference to any a priori notion of biological function, utility, or fitness. Traditionally, these two approaches were developed independently and applied separately. Here we unify them in a coherent Bayesian framework that embeds a normative theory into a family of maximum-entropy “optimization priors.” This family defines a smooth interpolation between a data-rich inference regime (characteristic of “bottom-up” statistical models), and a data-limited ab inito prediction regime (characteristic of “top-down” normative theory). We demonstrate the applicability of our framework using data from the visual cortex, and argue that the flexibility it affords is essential to address a number of fundamental challenges relating to inference and prediction in complex, high-dimensional biological problems.},
  author       = {Mlynarski, Wiktor F and Hledik, Michal and Sokolowski, Thomas R and Tkačik, Gašper},
  journal      = {Neuron},
  number       = {7},
  pages        = {1227--1241.e5},
  publisher    = {Cell Press},
  title        = {{Statistical analysis and optimality of neural systems}},
  doi          = {10.1016/j.neuron.2021.01.020},
  volume       = {109},
  year         = {2021},
}

@article{9226,
  abstract     = {Half a century after Lewis Wolpert's seminal conceptual advance on how cellular fates distribute in space, we provide a brief historical perspective on how the concept of positional information emerged and influenced the field of developmental biology and beyond. We focus on a modern interpretation of this concept in terms of information theory, largely centered on its application to cell specification in the early Drosophila embryo. We argue that a true physical variable (position) is encoded in local concentrations of patterning molecules, that this mapping is stochastic, and that the processes by which positions and corresponding cell fates are determined based on these concentrations need to take such stochasticity into account. With this approach, we shift the focus from biological mechanisms, molecules, genes and pathways to quantitative systems-level questions: where does positional information reside, how it is transformed and accessed during development, and what fundamental limits it is subject to?},
  author       = {Tkačik, Gašper and Gregor, Thomas},
  issn         = {1477-9129},
  journal      = {Development},
  number       = {2},
  publisher    = {The Company of Biologists},
  title        = {{The many bits of positional information}},
  doi          = {10.1242/dev.176065},
  volume       = {148},
  year         = {2021},
}

@article{9362,
  abstract     = {A central goal in systems neuroscience is to understand the functions performed by neural circuits. Previous top-down models addressed this question by comparing the behaviour of an ideal model circuit, optimised to perform a given function, with neural recordings. However, this requires guessing in advance what function is being performed, which may not be possible for many neural systems. To address this, we propose an inverse reinforcement learning (RL) framework for inferring the function performed by a neural network from data. We assume that the responses of each neuron in a network are optimised so as to drive the network towards ‘rewarded’ states, that are desirable for performing a given function. We then show how one can use inverse RL to infer the reward function optimised by the network from observing its responses. This inferred reward function can be used to predict how the neural network should adapt its dynamics to perform the same function when the external environment or network structure changes. This could lead to theoretical predictions about how neural network dynamics adapt to deal with cell death and/or varying sensory stimulus statistics.},
  author       = {Chalk, Matthew J and Tkačik, Gašper and Marre, Olivier},
  issn         = {19326203},
  journal      = {PLoS ONE},
  number       = {4},
  publisher    = {Public Library of Science},
  title        = {{Inferring the function performed by a recurrent neural network}},
  doi          = {10.1371/journal.pone.0248940},
  volume       = {16},
  year         = {2021},
}

@unpublished{10077,
  abstract     = {Although much is known about how single neurons in the hippocampus represent an animal’s position, how cell-cell interactions contribute to spatial coding remains poorly understood. Using a novel statistical estimator and theoretical modeling, both developed in the framework of maximum entropy models, we reveal highly structured cell-to-cell interactions whose statistics depend on familiar vs. novel environment. In both conditions the circuit interactions optimize the encoding of spatial information, but for regimes that differ in the signal-to-noise ratio of their spatial inputs. Moreover, the topology of the interactions facilitates linear decodability, making the information easy to read out by downstream circuits. These findings suggest that the efficient coding hypothesis is not applicable only to individual neuron properties in the sensory periphery, but also to neural interactions in the central brain.},
  author       = {Nardin, Michele and Csicsvari, Jozsef L and Tkačik, Gašper and Savin, Cristina},
  booktitle    = {bioRxiv},
  publisher    = {Cold Spring Harbor Laboratory},
  title        = {{The structure of hippocampal CA1 interactions optimizes spatial coding across experience}},
  doi          = {10.1101/2021.09.28.460602},
  year         = {2021},
}

@unpublished{10579,
  abstract     = {We consider a totally asymmetric simple exclusion process (TASEP) consisting of particles on a lattice that require binding by a "token" to move. Using a combination of theory and simulations, we address the following questions: (i) How token binding kinetics affects the current-density relation; (ii) How the current-density relation depends on the scarcity of tokens; (iii) How tokens propagate the effects of the locally-imposed disorder (such a slow site) over the entire lattice; (iv) How a shared pool of tokens couples concurrent TASEPs running on multiple lattices; (v) How our results translate to TASEPs with open boundaries that exchange particles with the reservoir. Since real particle motion (including in systems that inspired the standard TASEP model, e.g., protein synthesis or movement of molecular motors) is often catalyzed, regulated, actuated, or otherwise mediated, the token-driven TASEP dynamics analyzed in this paper should allow for a better understanding of real systems and enable a closer match between TASEP theory and experimental observations.},
  author       = {Kavcic, Bor and Tkačik, Gašper},
  booktitle    = {arXiv},
  title        = {{Token-driven totally asymmetric simple exclusion process}},
  doi          = {10.48550/arXiv.2112.13558},
  year         = {2021},
}

@article{8250,
  abstract     = {Antibiotics that interfere with translation, when combined, interact in diverse and difficult-to-predict ways. Here, we explain these interactions by “translation bottlenecks”: points in the translation cycle where antibiotics block ribosomal progression. To elucidate the underlying mechanisms of drug interactions between translation inhibitors, we generate translation bottlenecks genetically using inducible control of translation factors that regulate well-defined translation cycle steps. These perturbations accurately mimic antibiotic action and drug interactions, supporting that the interplay of different translation bottlenecks causes these interactions. We further show that growth laws, combined with drug uptake and binding kinetics, enable the direct prediction of a large fraction of observed interactions, yet fail to predict suppression. However, varying two translation bottlenecks simultaneously supports that dense traffic of ribosomes and competition for translation factors account for the previously unexplained suppression. These results highlight the importance of “continuous epistasis” in bacterial physiology.},
  author       = {Kavcic, Bor and Tkačik, Gašper and Bollenbach, Tobias},
  issn         = {2041-1723},
  journal      = {Nature Communications},
  publisher    = {Springer Nature},
  title        = {{Mechanisms of drug interactions between translation-inhibiting antibiotics}},
  doi          = {10.1038/s41467-020-17734-z},
  volume       = {11},
  year         = {2020},
}

@article{8698,
  abstract     = {The brain represents and reasons probabilistically about complex stimuli and motor actions using a noisy, spike-based neural code. A key building block for such neural computations, as well as the basis for supervised and unsupervised learning, is the ability to estimate the surprise or likelihood of incoming high-dimensional neural activity patterns. Despite progress in statistical modeling of neural responses and deep learning, current approaches either do not scale to large neural populations or cannot be implemented using biologically realistic mechanisms. Inspired by the sparse and random connectivity of real neuronal circuits, we present a model for neural codes that accurately estimates the likelihood of individual spiking patterns and has a straightforward, scalable, efficient, learnable, and realistic neural implementation. This model’s performance on simultaneously recorded spiking activity of >100 neurons in the monkey visual and prefrontal cortices is comparable with or better than that of state-of-the-art models. Importantly, the model can be learned using a small number of samples and using a local learning rule that utilizes noise intrinsic to neural circuits. Slower, structural changes in random connectivity, consistent with rewiring and pruning processes, further improve the efficiency and sparseness of the resulting neural representations. Our results merge insights from neuroanatomy, machine learning, and theoretical neuroscience to suggest random sparse connectivity as a key design principle for neuronal computation.},
  author       = {Maoz, Ori and Tkačik, Gašper and Esteki, Mohamad Saleh and Kiani, Roozbeh and Schneidman, Elad},
  issn         = {10916490},
  journal      = {Proceedings of the National Academy of Sciences of the United States of America},
  number       = {40},
  pages        = {25066--25073},
  publisher    = {National Academy of Sciences},
  title        = {{Learning probabilistic neural representations with randomly connected circuits}},
  doi          = {10.1073/pnas.1912804117},
  volume       = {117},
  year         = {2020},
}

@article{9000,
  abstract     = {In prokaryotes, thermodynamic models of gene regulation provide a highly quantitative mapping from promoter sequences to gene-expression levels that is compatible with in vivo and in vitro biophysical measurements. Such concordance has not been achieved for models of enhancer function in eukaryotes. In equilibrium models, it is difficult to reconcile the reported short transcription factor (TF) residence times on the DNA with the high specificity of regulation. In nonequilibrium models, progress is difficult due to an explosion in the number of parameters. Here, we navigate this complexity by looking for minimal nonequilibrium enhancer models that yield desired regulatory phenotypes: low TF residence time, high specificity, and tunable cooperativity. We find that a single extra parameter, interpretable as the “linking rate,” by which bound TFs interact with Mediator components, enables our models to escape equilibrium bounds and access optimal regulatory phenotypes, while remaining consistent with the reported phenomenology and simple enough to be inferred from upcoming experiments. We further find that high specificity in nonequilibrium models is in a trade-off with gene-expression noise, predicting bursty dynamics—an experimentally observed hallmark of eukaryotic transcription. By drastically reducing the vast parameter space of nonequilibrium enhancer models to a much smaller subspace that optimally realizes biological function, we deliver a rich class of models that could be tractably inferred from data in the near future.},
  author       = {Grah, Rok and Zoller, Benjamin and Tkačik, Gašper},
  issn         = {10916490},
  journal      = {PNAS},
  number       = {50},
  pages        = {31614--31622},
  publisher    = {National Academy of Sciences},
  title        = {{Nonequilibrium models of optimal enhancer function}},
  doi          = {10.1073/pnas.2006731117},
  volume       = {117},
  year         = {2020},
}

@article{7652,
  abstract     = {Organisms cope with change by taking advantage of transcriptional regulators. However, when faced with rare environments, the evolution of transcriptional regulators and their promoters may be too slow. Here, we investigate whether the intrinsic instability of gene duplication and amplification provides a generic alternative to canonical gene regulation. Using real-time monitoring of gene-copy-number mutations in Escherichia coli, we show that gene duplications and amplifications enable adaptation to fluctuating environments by rapidly generating copy-number and, therefore, expression-level polymorphisms. This amplification-mediated gene expression tuning (AMGET) occurs on timescales that are similar to canonical gene regulation and can respond to rapid environmental changes. Mathematical modelling shows that amplifications also tune gene expression in stochastic environments in which transcription-factor-based schemes are hard to evolve or maintain. The fleeting nature of gene amplifications gives rise to a generic population-level mechanism that relies on genetic heterogeneity to rapidly tune the expression of any gene, without leaving any genomic signature.},
  author       = {Tomanek, Isabella and Grah, Rok and Lagator, M. and Andersson, A. M. C. and Bollback, Jonathan P and Tkačik, Gašper and Guet, Calin C},
  issn         = {2397-334X},
  journal      = {Nature Ecology & Evolution},
  number       = {4},
  pages        = {612--625},
  publisher    = {Springer Nature},
  title        = {{Gene amplification as a form of population-level gene expression regulation}},
  doi          = {10.1038/s41559-020-1132-7},
  volume       = {4},
  year         = {2020},
}

@article{7656,
  abstract     = {We propose that correlations among neurons are generically strong enough to organize neural activity patterns into a discrete set of clusters, which can each be viewed as a population codeword. Our reasoning starts with the analysis of retinal ganglion cell data using maximum entropy models, showing that the population is robustly in a frustrated, marginally sub-critical, or glassy, state. This leads to an argument that neural populations in many other brain areas might share this structure. Next, we use latent variable models to show that this glassy state possesses well-defined clusters of neural activity. Clusters have three appealing properties: (i) clusters exhibit error correction, i.e., they are reproducibly elicited by the same stimulus despite variability at the level of constituent neurons; (ii) clusters encode qualitatively different visual features than their constituent neurons; and (iii) clusters can be learned by downstream neural circuits in an unsupervised fashion. We hypothesize that these properties give rise to a “learnable” neural code which the cortical hierarchy uses to extract increasingly complex features without supervision or reinforcement.},
  author       = {Berry, Michael J. and Tkačik, Gašper},
  issn         = {16625188},
  journal      = {Frontiers in Computational Neuroscience},
  publisher    = {Frontiers},
  title        = {{Clustering of neural activity: A design principle for population codes}},
  doi          = {10.3389/fncom.2020.00020},
  volume       = {14},
  year         = {2020},
}

