@article{14656,
  abstract     = {Although much is known about how single neurons in the hippocampus represent an animal's position, how circuit interactions contribute to spatial coding is less well understood. Using a novel statistical estimator and theoretical modeling, both developed in the framework of maximum entropy models, we reveal highly structured CA1 cell-cell interactions in male rats during open field exploration. The statistics of these interactions depend on whether the animal is in a familiar or novel environment. In both conditions the circuit interactions optimize the encoding of spatial information, but for regimes that differ in the informativeness of their spatial inputs. This structure facilitates linear decodability, making the information easy to read out by downstream circuits. Overall, our findings suggest that the efficient coding hypothesis is not only applicable to individual neuron properties in the sensory periphery, but also to neural interactions in the central brain.},
  author       = {Nardin, Michele and Csicsvari, Jozsef L and Tkačik, Gašper and Savin, Cristina},
  issn         = {1529-2401},
  journal      = {The Journal of Neuroscience},
  number       = {48},
  pages        = {8140--8156},
  publisher    = {Society of Neuroscience},
  title        = {{The structure of hippocampal CA1 interactions optimizes spatial coding across experience}},
  doi          = {10.1523/JNEUROSCI.0194-23.2023},
  volume       = {43},
  year         = {2023},
}

@article{12349,
  abstract     = {Statistics of natural scenes are not uniform - their structure varies dramatically from ground to sky. It remains unknown whether these non-uniformities are reflected in the large-scale organization of the early visual system and what benefits such adaptations would confer. Here, by relying on the efficient coding hypothesis, we predict that changes in the structure of receptive fields across visual space increase the efficiency of sensory coding. We show experimentally that, in agreement with our predictions, receptive fields of retinal ganglion cells change their shape along the dorsoventral retinal axis, with a marked surround asymmetry at the visual horizon. Our work demonstrates that, according to principles of efficient coding, the panoramic structure of natural scenes is exploited by the retina across space and cell-types.},
  author       = {Gupta, Divyansh and Mlynarski, Wiktor F and Sumser, Anton L and Symonova, Olga and Svaton, Jan and Jösch, Maximilian A},
  issn         = {1546-1726},
  journal      = {Nature Neuroscience},
  pages        = {606--614},
  publisher    = {Springer Nature},
  title        = {{Panoramic visual statistics shape retina-wide organization of receptive fields}},
  doi          = {10.1038/s41593-023-01280-0},
  volume       = {26},
  year         = {2023},
}

@misc{12370,
  abstract     = {Statistics of natural scenes are not uniform - their structure varies dramatically from ground to sky. It remains unknown whether these non-uniformities are reflected in the large-scale organization of the early visual system and what benefits such adaptations would confer. Here, by relying on the efficient coding hypothesis, we predict that changes in the structure of receptive fields across visual space increase the efficiency of sensory coding. We show experimentally that, in agreement with our predictions, receptive fields of retinal ganglion cells change their shape along the dorsoventral retinal axis, with a marked surround asymmetry at the visual horizon. Our work demonstrates that, according to principles of efficient coding, the panoramic structure of natural scenes is exploited by the retina across space and cell-types. },
  author       = {Gupta, Divyansh and Sumser, Anton L and Jösch, Maximilian A},
  publisher    = {Institute of Science and Technology Austria},
  title        = {{Research Data for: Panoramic visual statistics shape retina-wide organization of receptive fields}},
  doi          = {10.15479/AT:ISTA:12370},
  year         = {2023},
}

@article{12762,
  abstract     = {Neurons in the brain are wired into adaptive networks that exhibit collective dynamics as diverse as scale-specific oscillations and scale-free neuronal avalanches. Although existing models account for oscillations and avalanches separately, they typically do not explain both phenomena, are too complex to analyze analytically or intractable to infer from data rigorously. Here we propose a feedback-driven Ising-like class of neural networks that captures avalanches and oscillations simultaneously and quantitatively. In the simplest yet fully microscopic model version, we can analytically compute the phase diagram and make direct contact with human brain resting-state activity recordings via tractable inference of the model’s two essential parameters. The inferred model quantitatively captures the dynamics over a broad range of scales, from single sensor oscillations to collective behaviors of extreme events and neuronal avalanches. Importantly, the inferred parameters indicate that the co-existence of scale-specific (oscillations) and scale-free (avalanches) dynamics occurs close to a non-equilibrium critical point at the onset of self-sustained oscillations.},
  author       = {Lombardi, Fabrizio and Pepic, Selver and Shriki, Oren and Tkačik, Gašper and De Martino, Daniele},
  issn         = {2662-8457},
  journal      = {Nature Computational Science},
  pages        = {254--263},
  publisher    = {Springer Nature},
  title        = {{Statistical modeling of adaptive neural networks explains co-existence of avalanches and oscillations in resting human brain}},
  doi          = {10.1038/s43588-023-00410-9},
  volume       = {3},
  year         = {2023},
}

@article{12332,
  abstract     = {Activity of sensory neurons is driven not only by external stimuli but also by feedback signals from higher brain areas. Attention is one particularly important internal signal whose presumed role is to modulate sensory representations such that they only encode information currently relevant to the organism at minimal cost. This hypothesis has, however, not yet been expressed in a normative computational framework. Here, by building on normative principles of probabilistic inference and efficient coding, we developed a model of dynamic population coding in the visual cortex. By continuously adapting the sensory code to changing demands of the perceptual observer, an attention-like modulation emerges. This modulation can dramatically reduce the amount of neural activity without deteriorating the accuracy of task-specific inferences. Our results suggest that a range of seemingly disparate cortical phenomena such as intrinsic gain modulation, attention-related tuning modulation, and response variability could be manifestations of the same underlying principles, which combine efficient sensory coding with optimal probabilistic inference in dynamic environments.},
  author       = {Mlynarski, Wiktor F and Tkačik, Gašper},
  issn         = {1545-7885},
  journal      = {PLoS Biology},
  number       = {12},
  pages        = {e3001889},
  publisher    = {Public Library of Science},
  title        = {{Efficient coding theory of dynamic attentional modulation}},
  doi          = {10.1371/journal.pbio.3001889},
  volume       = {20},
  year         = {2022},
}

@unpublished{10912,
  abstract     = {Brain dynamics display collective phenomena as diverse as neuronal oscillations and avalanches. Oscillations are rhythmic, with fluctuations occurring at a characteristic scale, whereas avalanches are scale-free cascades of neural activity. Here we show that such antithetic features can coexist in a very generic class of adaptive neural networks. In the most simple yet fully microscopic model from this class we make direct contact with human brain resting-state activity recordings via tractable inference of the model's two essential parameters. The inferred model quantitatively captures the dynamics over a broad range of scales, from single sensor fluctuations, collective behaviors of nearly-synchronous extreme events on multiple sensors, to neuronal avalanches unfolding over multiple sensors across multiple time-bins. Importantly, the inferred parameters correlate with model-independent signatures of "closeness to criticality", suggesting that the coexistence of scale-specific (neural oscillations) and scale-free (neuronal avalanches) dynamics in brain activity occurs close to a non-equilibrium critical point at the onset of self-sustained oscillations.},
  author       = {Lombardi, Fabrizio and Pepic, Selver and Shriki, Oren and Tkačik, Gašper and De Martino, Daniele},
  pages        = {37},
  publisher    = {arXiv},
  title        = {{Quantifying the coexistence of neuronal oscillations and avalanches}},
  doi          = {10.48550/ARXIV.2108.06686},
  year         = {2021},
}

@unpublished{10077,
  abstract     = {Although much is known about how single neurons in the hippocampus represent an animal’s position, how cell-cell interactions contribute to spatial coding remains poorly understood. Using a novel statistical estimator and theoretical modeling, both developed in the framework of maximum entropy models, we reveal highly structured cell-to-cell interactions whose statistics depend on familiar vs. novel environment. In both conditions the circuit interactions optimize the encoding of spatial information, but for regimes that differ in the signal-to-noise ratio of their spatial inputs. Moreover, the topology of the interactions facilitates linear decodability, making the information easy to read out by downstream circuits. These findings suggest that the efficient coding hypothesis is not applicable only to individual neuron properties in the sensory periphery, but also to neural interactions in the central brain.},
  author       = {Nardin, Michele and Csicsvari, Jozsef L and Tkačik, Gašper and Savin, Cristina},
  booktitle    = {bioRxiv},
  publisher    = {Cold Spring Harbor Laboratory},
  title        = {{The structure of hippocampal CA1 interactions optimizes spatial coding across experience}},
  doi          = {10.1101/2021.09.28.460602},
  year         = {2021},
}

