diff --git a/misc/github_wiki/bibtex/mentions_BPO.bib b/misc/github_wiki/bibtex/mentions_BPO.bib index b206c6ba..68aa61db 100644 --- a/misc/github_wiki/bibtex/mentions_BPO.bib +++ b/misc/github_wiki/bibtex/mentions_BPO.bib @@ -1,4 +1,3 @@ - @techreport{amsalemDenseComputerReplica2020, type = {Preprint}, title = {Dense {{Computer Replica}} of {{Cortical Microcircuits Unravels Cellular Underpinnings}} of {{Auditory Surprise Response}}}, @@ -7,6 +6,7 @@ @techreport{amsalemDenseComputerReplica2020 month = jun, institution = {{Neuroscience}}, doi = {10.1101/2020.05.31.126466}, + urldate = {2022-06-08}, abstract = {The nervous system is notorious for its strong response evoked by a surprising sensory input, but the biophysical and anatomical underpinnings of this phenomenon are only partially understood. Here we utilized in-silico experiments of a biologicallydetailed model of a neocortical microcircuit to study stimulus specific adaptation (SSA) in the auditory cortex, whereby the neuronal response adapts significantly for a repeated (``expected'') tone but not for a rare (``surprise'') tone. SSA experiments were mimicked by stimulating tonotopically-mapped thalamo-cortical afferents projecting to the microcircuit; the activity of these afferents was modeled based on our in-vivo recordings from individual thalamic neurons. The modeled microcircuit expressed naturally many experimentally-observed properties of SSA, suggesting that SSA is a general property of neocortical microcircuits. By systematically modulating circuit parameters, we found that key features of SSA depended on synergistic effects of synaptic depression, spike frequency adaptation and recurrent network connectivity. The relative contribution of each of these mechanisms in shaping SSA was explored, additional SSA-related experimental results were explained and new experiments for further studying SSA were suggested.}, langid = {english} } @@ -22,6 +22,7 @@ @article{amsalemEfficientAnalyticalReduction2020a pages = {288}, issn = {2041-1723}, doi = {10.1038/s41467-019-13932-6}, + urldate = {2023-02-28}, abstract = {Abstract Detailed conductance-based nonlinear neuron models consisting of thousands of synapses are key for understanding of the computational properties of single neurons and large neuronal networks, and for interpreting experimental results. Simulations of these models are computationally expensive, considerably curtailing their utility. Neuron\_Reduce is a new analytical approach to reduce the morphological complexity and computational time of nonlinear neuron models. Synapses and active membrane channels are mapped to the reduced model preserving their transfer impedance to the soma; synapses with identical transfer impedance are merged into one NEURON process still retaining their individual activation times. Neuron\_Reduce accelerates the simulations by 40\textendash 250 folds for a variety of cell types and realistic number (10,000\textendash 100,000) of synapses while closely replicating voltage dynamics and specific dendritic computations. The reduced neuron-models will enable realistic simulations of neural networks at unprecedented scale, including networks emerging from micro-connectomics efforts and biologically-inspired ``deep networks''. Neuron\_Reduce is publicly available and is straightforward to implement.}, langid = {english} } @@ -36,6 +37,7 @@ @article{awileModernizingNEURONSimulator2022a pages = {884046}, issn = {1662-5196}, doi = {10.3389/fninf.2022.884046}, + urldate = {2023-02-28}, abstract = {The need for reproducible, credible, multiscale biological modeling has led to the development of standardized simulation platforms, such as the widely-used NEURON environment for computational neuroscience. Developing and maintaining NEURON over several decades has required attention to the competing needs of backwards compatibility, evolving computer architectures, the addition of new scales and physical processes, accessibility to new users, and efficiency and flexibility for specialists. In order to meet these challenges, we have now substantially modernized NEURON, providing continuous integration, an improved build system and release workflow, and better documentation. With the help of a new source-to-source compiler of the NMODL domain-specific language we have enhanced NEURON's ability to run efficiently, via the CoreNEURON simulation engine, on a variety of hardware platforms, including GPUs. Through the implementation of an optimized in-memory transfer mechanism this performance optimized backend is made easily accessible to users, providing training and model-development paths from laptop to workstation to supercomputer and cloud platform. Similarly, we have been able to accelerate NEURON's reaction-diffusion simulation performance through the use of just-in-time compilation. We show that these efforts have led to a growing developer base, a simpler and more robust software distribution, a wider range of supported computer architectures, a better integration of NEURON with other scientific workflows, and substantially improved performance for the simulation of biophysical and biochemical models.}, langid = {english} } @@ -50,6 +52,7 @@ @article{beiningT2NNewTool2017 pages = {e26517}, issn = {2050-084X}, doi = {10.7554/eLife.26517}, + urldate = {2022-06-08}, abstract = {Compartmental models are the theoretical tool of choice for understanding single neuron computations. However, many models are incomplete, built ad hoc and require tuning for each novel condition rendering them of limited usability. Here, we present T2N, a powerful interface to control NEURON with Matlab and TREES toolbox, which supports generating models stable over a broad range of reconstructed and synthetic morphologies. We illustrate this for a novel, highly detailed active model of dentate granule cells (GCs) replicating a wide palette of experiments from various labs. By implementing known differences in ion channel composition and morphology, our model reproduces data from mouse or rat, mature or adult-born GCs as well as pharmacological interventions and epileptic conditions. This work sets a new benchmark for detailed compartmental modeling. T2N is suitable for creating robust models useful for large-scale networks that could lead to novel predictions. We discuss possible T2N application in degeneracy studies.}, langid = {english} } @@ -62,6 +65,7 @@ @techreport{ben-shalomInferringNeuronalIonic2019 month = aug, institution = {{Neuroscience}}, doi = {10.1101/727974}, + urldate = {2022-06-08}, abstract = {Abstract The neuron is the fundamental unit of computation in the nervous system, and different neuron types produce different temporal patterns of voltage fluctuations in response to input currents. Understanding the mechanism of single neuron firing patterns requires accurate knowledge of the spatial densities of diverse ion channels along the membrane. However, direct measurements of these microscopic variables are difficult to obtain experimentally. Alternatively, one can attempt to infer those microscopic variables from the membrane potential (a mesoscopic variable), or features thereof, which are more experimentally tractable. One approach in this direction is to infer the ionic densities as parameters of a neuronal model. Traditionally this is done using a Multi-Objective Optimization (MOO) method to minimize the differences between features extracted from a simulated neuron's membrane potential and the same features extracted from target data. Here, we use Convolutional Neural Networks (CNNs) to directly regress generative parameters (e.g., ionic conductances, membrane resistance, etc.,) from simulated time-varying membrane potentials in response to an input stimulus. We simulated diverse neuron models of increasing complexity (Izikivich: 4 parameters; Hodgkin-Huxley: 7 parameters; Mainen-Sejnowski: 10 parameters) with a large range of variation in the underlying parameter values. We show that hyperparameter optimized CNNs can accurately infer the values of generative variables for these neuron models, and that these results far surpass the previous state-of-the-art method (MOO). We discuss the benefits of optimizing the CNN architecture, improvements in accuracy with additional training data, and some observed limitations. Based on these results, we propose that CNNs may be able to infer the spatial distribution of diverse ionic densities from spatially resolved measurements of neuronal membrane potentials (e.g. voltage imaging).}, langid = {english} } @@ -77,6 +81,7 @@ @article{bolognaEBRAINSNeuroFeatureExtractOnline2021 pages = {713899}, issn = {1662-5196}, doi = {10.3389/fninf.2021.713899}, + urldate = {2022-06-08}, abstract = {The description of neural dynamics, in terms of precise characterizations of action potential timings and shape and voltage related measures, is fundamental for a deeper understanding of the neural code and its information content. Not only such measures serve the scientific questions posed by experimentalists but are increasingly being used by computational neuroscientists for the construction of biophysically detailed datadriven models. Nonetheless, online resources enabling users to perform such feature extraction operation are lacking. To address this problem, in the framework of the Human Brain Project and the EBRAINS research infrastructure, we have developed and made available to the scientific community the NeuroFeatureExtract, an open-access online resource for the extraction of electrophysiological features from neural activity data. This tool allows to select electrophysiological traces of interest, fetched from public repositories or from users' own data, and provides ad hoc functionalities to extract relevant features. The output files are properly formatted for further analysis, including data-driven neural model optimization.}, langid = {english} } @@ -91,6 +96,7 @@ @article{carrilloMetricEvaluatingNeural2018 pages = {913}, issn = {1662-453X}, doi = {10.3389/fnins.2018.00913}, + urldate = {2022-06-08}, abstract = {Supervised learning has long been attributed to several feed-forward neural circuits within the brain, with particular attention being paid to the cerebellar granular layer. The focus of this study is to evaluate the input activity representation of these feed-forward neural networks. The activity of cerebellar granule cells is conveyed by parallel fibers and translated into Purkinje cell activity, which constitutes the sole output of the cerebellar cortex. The learning process at this parallel-fiber-to-Purkinje-cell connection makes each Purkinje cell sensitive to a set of specific cerebellar states, which are roughly determined by the granule-cell activity during a certain time window. A Purkinje cell becomes sensitive to each neural input state and, consequently, the network operates as a function able to generate a desired output for each provided input by means of supervised learning. However, not all sets of Purkinje cell responses can be assigned to any set of input states due to the network's own limitations (inherent to the network neurobiological substrate), that is, not all input-output mapping can be learned. A key limiting factor is the representation of the input states through granule-cell activity. The quality of this representation (e.g., in terms of heterogeneity) will determine the capacity of the network to learn a varied set of outputs. Assessing the quality of this representation is interesting when developing and studying models of these networks to identify those neuron or network characteristics that enhance this representation. In this study we present an algorithm for evaluating quantitatively the level of compatibility/interference amongst a set of given cerebellar states according to their representation (granule-cell activation patterns) without the need for actually conducting simulations and network training. The algorithm input consists of a real-number matrix that codifies the activity level of every considered granule-cell in each state. The capability of this representation to generate a varied set of outputs is evaluated geometrically, thus resulting in a real number that assesses the goodness of the representation.}, langid = {english} } @@ -101,12 +107,11 @@ @misc{deistlerTruncatedProposalsScalable2022 year = {2022}, month = nov, number = {arXiv:2210.04815}, - eprint = {2210.04815}, - eprinttype = {arxiv}, - primaryclass = {cs, stat}, + eprint = {arXiv:2210.04815}, publisher = {{arXiv}}, + urldate = {2023-02-28}, abstract = {Simulation-based inference (SBI) solves statistical inverse problems by repeatedly running a stochastic simulator and inferring posterior distributions from modelsimulations. To improve simulation efficiency, several inference methods take a sequential approach and iteratively adapt the proposal distributions from which model simulations are generated. However, many of these sequential methods are difficult to use in practice, both because the resulting optimisation problems can be challenging and efficient diagnostic tools are lacking. To overcome these issues, we present Truncated Sequential Neural Posterior Estimation (TSNPE). TSNPE performs sequential inference with truncated proposals, sidestepping the optimisation issues of alternative approaches. In addition, TSNPE allows to efficiently perform coverage tests that can scale to complex models with many parameters. We demonstrate that TSNPE performs on par with previous methods on established benchmark tasks. We then apply TSNPE to two challenging problems from neuroscience and show that TSNPE can successfully obtain the posterior distributions, whereas previous methods fail. Overall, our results demonstrate that TSNPE is an efficient, accurate, and robust inference method that can scale to challenging scientific models.}, - archiveprefix = {arXiv}, + archiveprefix = {arxiv}, langid = {english}, keywords = {Computer Science - Machine Learning,Statistics - Machine Learning} } @@ -121,6 +126,7 @@ @article{diaz-parraStructuralFunctionalEmpirical2017 pages = {170--184}, issn = {10538119}, doi = {10.1016/j.neuroimage.2017.07.046}, + urldate = {2022-06-08}, abstract = {Connectomics data from animal models provide an invaluable opportunity to reveal the complex interplay between structure and function in the mammalian brain. In this work, we investigate the relationship between structural and functional connectivity in the rat brain cortex using a directed anatomical network generated from a carefully curated meta-analysis of published tracing data, along with resting-state functional MRI data obtained from a group of 14 anesthetized Wistar rats. We found a high correspondence between the strength of functional connections, measured as blood oxygen level dependent (BOLD) signal correlations between cortical regions, and the weight of the corresponding anatomical links in the connectome graph (maximum Spearman rank-order correlation {$\rho$} {$\frac{1}{4}$} 0:48). At the network-level, regions belonging to the same functionally defined community tend to form more mutual weighted connections between each other compared to regions located in different communities. We further found that functional communities in resting-state networks are enriched in densely connected anatomical motifs. Importantly, these higher-order structural subgraphs cannot be explained by lowerorder topological properties, suggesting that dense structural patterns support functional associations in the resting brain. Simulations of brain-wide resting-state activity based on neural mass models implemented on the empirical rat anatomical connectome demonstrated high correlation between the simulated and the measured functional connectivity (maximum Pearson correlation {$\rho$} {$\frac{1}{4}$} 0:53), further suggesting that the topology of structural connections plays an important role in shaping functional cortical networks.}, langid = {english} } @@ -135,6 +141,7 @@ @article{dura-bernalNetPyNEToolDatadriven2019 pages = {e44494}, issn = {2050-084X}, doi = {10.7554/eLife.44494}, + urldate = {2022-06-08}, abstract = {Biophysical modeling of neuronal networks helps to integrate and interpret rapidly growing and disparate experimental datasets at multiple scales. The NetPyNE tool (www.netpyne.org) provides both programmatic and graphical interfaces to develop data-driven multiscale network models in NEURON. NetPyNE clearly separates model parameters from implementation code. Users provide specifications at a high level via a standardized declarative language, for example connectivity rules, to create millions of cell-to-cell connections. NetPyNE then enables users to generate the NEURON network, run efficiently parallelized simulations, optimize and explore network parameters through automated batch runs, and use built-in functions for visualization and analysis \textendash{} connectivity matrices, voltage traces, spike raster plots, local field potentials, and information theoretic measures. NetPyNE also facilitates model sharing by exporting and importing standardized formats (NeuroML and SONATA). NetPyNE is already being used to teach computational neuroscience students and by modelers to investigate brain regions and phenomena. , The approximately 100 billion neurons in our brain are responsible for everything we do and experience. Experiments aimed at discovering how these cells encode and process information generate vast amounts of data. These data span multiple scales, from interactions between individual molecules to coordinated waves of electrical activity that spread across the entire brain surface. To understand how the brain works, we must combine and make sense of these diverse types of information. Computational modeling provides one way of doing this. Using equations, we can calculate the chemical and electrical changes that take place in neurons. We can then build models of neurons and neural circuits that reproduce the patterns of activity seen in experiments. Exploring these models can provide insights into how the brain itself works. Several software tools are available to simulate neural circuits, but none provide an easy way of incorporating data that span different scales, from molecules to cells to networks. Moreover, most of the models require familiarity with computer programming. Dura-Bernal et al. have now developed a new software tool called NetPyNE, which allows users without programming expertise to build sophisticated models of brain circuits. It features a user-friendly interface for defining the properties of the model at molecular, cellular and circuit scales. It also provides an easy and automated method to identify the properties of the model that enable it to reproduce experimental data. Finally, NetPyNE makes it possible to run the model on supercomputers and offers a variety of ways to visualize and analyze the resulting output. Users can save the model and output in standardized formats, making them accessible to as many people as possible. Researchers in labs across the world have used NetPyNE to study different brain regions, phenomena and diseases. The software also features in courses that introduce students to neurobiology and computational modeling. NetPyNE can help to interpret isolated experimental findings, and also makes it easier to explore interactions between brain activity at different scales. This will enable researchers to decipher how the brain encodes and processes information, and ultimately could make it easier to understand and treat brain disorders.}, langid = {english} } @@ -149,6 +156,7 @@ @article{economidesBiocytinRecovery3D2018 pages = {58592}, issn = {1940-087X}, doi = {10.3791/58592}, + urldate = {2022-06-08}, abstract = {How cortical network activity processes information is of importance to a large number of basic and clinical scientific questions. The protocol described here identifies the basic building blocks of this circuitry. The in-depth studies of cortical regions will ultimately provide other scientists with the circuit components needed for an understanding of how the brain acquires, processes and stores information and what goes wrong in disease, while the electrophysiological and morphological data are widely used by computational neuroscientists in the construction of model networks that explore information processing. The protocol outlined here describes how biocytin-filled cells recorded in the CA2 region of the hippocampus are recovered and then reconstructed in 3D. Additionally, the protocol describes the demonstration of calcium binding protein or peptide content in recorded interneurons.}, langid = {english} } @@ -163,6 +171,7 @@ @article{erikssonCombiningHypothesisDatadriven2022a pages = {e69013}, issn = {2050-084X}, doi = {10.7554/eLife.69013}, + urldate = {2022-07-20}, abstract = {Modeling in neuroscience occurs at the intersection of different points of view and approaches. Typically, hypothesis-\-driven modeling brings a question into focus so that a model is constructed to investigate a specific hypothesis about how the system works or why certain phenomena are observed. Data-d\- riven modeling, on the other hand, follows a more unbiased approach, with model construction informed by the computationally intensive use of data. At the same time, researchers employ models at different biological scales and at different levels of abstraction. Combining these models while validating them against experimental data increases understanding of the multiscale brain. However, a lack of interoperability, transparency, and reusability of both models and the workflows used to construct them creates barriers for the integration of models representing different biological scales and built using different modeling philosophies. We argue that the same imperatives that drive resources and policy for data \textendash{} such as the FAIR (Findable, Accessible, Interoperable, Reusable) principles \textendash{} also support the integration of different modeling approaches. The FAIR principles require that data be shared in formats that are Findable, Accessible, Interoperable, and Reusable. Applying these principles to models and modeling workflows, as well as the data used to constrain and validate them, would allow researchers to find, reuse, question, validate, and extend published models, regardless of whether they are implemented phenomenologically or mechanistically, as a few equations or as a multiscale, hierarchical system. To illustrate these ideas, we use a classical synaptic plasticity model, the Bienenstock\textendash Cooper\textendash Munro rule, as an example due to its long history, different levels of abstraction, and implementation at many scales.}, langid = {english} } @@ -179,6 +188,7 @@ @article{ezra-tsurRealisticRetinalModeling2021 pages = {e1009754}, issn = {1553-7358}, doi = {10.1371/journal.pcbi.1009754}, + urldate = {2022-06-13}, abstract = {Retinal direction-selectivity originates in starburst amacrine cells (SACs), which display a centrifugal preference, responding with greater depolarization to a stimulus expanding from soma to dendrites than to a collapsing stimulus. Various mechanisms were hypothesized to underlie SAC centrifugal preference, but dissociating them is experimentally challenging and the mechanisms remain debatable. To address this issue, we developed the Retinal Stimulation Modeling Environment (RSME), a multifaceted data-driven retinal model that encompasses detailed neuronal morphology and biophysical properties, retina-tailored connectivity scheme and visual input. Using a genetic algorithm, we demonstrated that spatiotemporally diverse excitatory inputs\textendash sustained in the proximal and transient in the distal processes\textendash are sufficient to generate experimentally validated centrifugal preference in a single SAC. Reversing these input kinetics did not produce any centrifugal-preferring SAC. We then explored the contribution of SAC-SAC inhibitory connections in establishing the centrifugal preference. SAC inhibitory network enhanced the centrifugal preference, but failed to generate it in its absence. Embedding a direction selective ganglion cell (DSGC) in a SAC network showed that the known SAC-DSGC asymmetric connectivity by itself produces direction selectivity. Still, this selectivity is sharpened in a centrifugal-preferring SAC network. Finally, we use RSME to demonstrate the contribution of SAC-SAC inhibitory connections in mediating direction selectivity and recapitulate recent experimental findings. Thus, using RSME, we obtained a mechanistic understanding of SACs' centrifugal preference and its contribution to direction selectivity.}, langid = {english} } @@ -193,6 +203,7 @@ @inproceedings{farnerEvolvingSpikingNeuron2021 publisher = {{IEEE}}, address = {{Orlando, FL, USA}}, doi = {10.1109/SSCI50451.2021.9660185}, + urldate = {2022-06-08}, abstract = {Neuro-inspired models and systems have great potential for applications in unconventional computing. Often, the mechanisms of biological neurons are modeled or mimicked in simulated or physical systems in an attempt to harness some of the computational power of the brain. However, the biological mechanisms at play in neural systems are complicated and challenging to capture and engineer; thus, it can be simpler to turn to a data-driven approach to transfer features of neural behavior to artificial substrates. In the present study, we used an evolutionary algorithm (EA) to produce spiking neural systems that emulate the patterns of behavior of biological neurons in vitro. The aim of this approach was to develop a method of producing models capable of exhibiting complex behavior that may be suitable for use as computational substrates. Our models were able to produce a level of network-wide synchrony and showed a range of behaviors depending on the target data used for their evolution, which was from a range of neuronal culture densities and maturities. The genomes of the top-performing models indicate the excitability and density of connections in the model play an important role in determining the complexity of the produced activity.}, isbn = {978-1-72819-048-8}, langid = {english} @@ -208,6 +219,7 @@ @article{frostnylenDopaminergicCholinergicModulation2021 pages = {748989}, issn = {1662-5110}, doi = {10.3389/fncir.2021.748989}, + urldate = {2022-06-08}, abstract = {Neuromodulation is present throughout the nervous system and serves a critical role for circuit function and dynamics. The computational investigations of neuromodulation in large scale networks require supportive software platforms. Snudda is a software for the creation and simulation of large scale networks of detailed microcircuits consisting of multicompartmental neuron models. We have developed an extension to Snudda to incorporate neuromodulation in large scale simulations. The extended Snudda framework implements neuromodulation at the level of single cells incorporated into large-scale microcircuits. We also developed Neuromodcell, a software for optimizing neuromodulation in detailed multicompartmental neuron models. The software adds parameters within the models modulating the conductances of ion channels and ionotropic receptors. Bath application of neuromodulators is simulated and models which reproduce the experimentally measured effects are selected. In Snudda, we developed an extension to accommodate large scale simulations of neuromodulation. The simulator has two modes of simulation \textendash{} denoted replay and adaptive. In the replay mode, transient levels of neuromodulators can be defined as a time-varying function which modulates the receptors and ion channels within the network in a cell-type specific manner. In the adaptive mode, spiking neuromodulatory neurons are connected via integrative modulating mechanisms to ion channels and receptors. Both modes of simulating neuromodulation allow for simultaneous modulation by several neuromodulators that can interact dynamically with each other. Here, we used the Neuromodcell software to simulate dopaminergic and muscarinic modulation of neurons from the striatum. We also demonstrate how to simulate different neuromodulatory states with dopamine and acetylcholine using Snudda. All software is freely available on Github, including tutorials on Neuromodcell and Snudda-neuromodulation.}, langid = {english} } @@ -222,6 +234,7 @@ @article{galindoSimulationVisualizationAnalysis2020 pages = {309--321}, issn = {09252312}, doi = {10.1016/j.neucom.2020.02.114}, + urldate = {2022-06-08}, langid = {english} } @@ -235,6 +248,7 @@ @article{galRoleHubNeurons2021 pages = {718270}, issn = {1662-5110}, doi = {10.3389/fncir.2021.718270}, + urldate = {2022-06-08}, abstract = {Many neurodegenerative diseases are associated with the death of specific neuron types in particular brain regions. What makes the death of specific neuron types particularly harmful for the integrity and dynamics of the respective network is not well understood. To start addressing this question we used the most up-to-date biologically realistic dense neocortical microcircuit (NMC) of the rodent, which has reconstructed a volume of 0.3 mm3 and containing 31,000 neurons, {$\sim$}37 million synapses, and 55 morphological cell types arranged in six cortical layers. Using modern network science tools, we identified hub neurons in the NMC, that are connected synaptically to a large number of their neighbors and systematically examined the impact of abolishing these cells. In general, the structural integrity of the network is robust to cells' attack; yet, attacking hub neurons strongly impacted the small-world topology of the network, whereas similar attacks on random neurons have a negligible effect. Such hub-specific attacks are also impactful on the network dynamics, both when the network is at its spontaneous synchronous state and when it was presented with synchronized thalamocortical visual-like input. We found that attacking layer 5 hub neurons is most harmful to the structural and functional integrity of the NMC. The significance of our results for understanding the role of specific neuron types and cortical layers for disease manifestation is discussed.}, langid = {english} } @@ -250,6 +264,7 @@ @article{gouwensSystematicGenerationBiophysically2018a pages = {710}, issn = {2041-1723}, doi = {10.1038/s41467-017-02718-3}, + urldate = {2023-02-28}, abstract = {Abstract The cellular components of mammalian neocortical circuits are diverse, and capturing this diversity in computational models is challenging. Here we report an approach for generating biophysically detailed models of 170 individual neurons in the Allen Cell Types Database to link the systematic experimental characterization of cell types to the construction of cortical models. We build models from 3D morphologies and somatic electrophysiological responses measured in the same cells. Densities of active somatic conductances and additional parameters are optimized with a genetic algorithm to match electrophysiological features. We evaluate the models by applying additional stimuli and comparing model responses to experimental data. Applying this technique across a diverse set of neurons from adult mouse primary visual~cortex, we verify that models preserve the distinctiveness of intrinsic properties between subsets of cells observed in experiments. The optimized models are accessible online alongside the experimental data. Code for optimization and simulation is also openly distributed.}, langid = {english} } @@ -265,6 +280,7 @@ @article{gutzenReproducibleNeuralNetwork2018 pages = {90}, issn = {1662-5196}, doi = {10.3389/fninf.2018.00090}, + urldate = {2022-06-08}, abstract = {Computational neuroscience relies on simulations of neural network models to bridge the gap between the theory of neural networks and the experimentally observed activity dynamics in the brain. The rigorous validation of simulation results against reference data is thus an indispensable part of any simulation workflow. Moreover, the availability of different simulation environments and levels of model description require also validation of model implementations against each other to evaluate their equivalence. Despite rapid advances in the formalized description of models, data, and analysis workflows, there is no accepted consensus regarding the terminology and practical implementation of validation workflows in the context of neural simulations. This situation prevents the generic, unbiased comparison between published models, which is a key element of enhancing reproducibility of computational research in neuroscience. In this study, we argue for the establishment of standardized statistical test metrics that enable the quantitative validation of network models on the level of the population dynamics. Despite the importance of validating the elementary components of a simulation, such as single cell dynamics, building networks from validated building blocks does not entail the validity of the simulation on the network scale. Therefore, we introduce a corresponding set of validation tests and present an example workflow that practically demonstrates the iterative model validation of a spiking neural network model against its reproduction on the SpiNNaker neuromorphic hardware system. We formally implement the workflow using a generic Python library that we introduce for validation tests on neural network activity data. Together with the companion study (Trensch et al., 2018), the work presents a consistent definition, formalization, and implementation of the verification and validation process for neural network simulations.}, langid = {english} } @@ -278,6 +294,7 @@ @article{hauflerSimulationsCorticalNetworks2023 pages = {JP284030}, issn = {0022-3751, 1469-7793}, doi = {10.1113/JP284030}, + urldate = {2023-02-28}, langid = {english} } @@ -292,6 +309,7 @@ @article{iyengarCuratedModelDevelopment2019 pages = {56}, issn = {1662-5196}, doi = {10.3389/fninf.2019.00056}, + urldate = {2022-06-08}, abstract = {Decades of research on neuromotor circuits and systems has provided valuable information on neuronal control of movement. Computational models of several elements of the neuromotor system have been developed at various scales, from sub-cellular to system. While several small models abound, their structured integration is the key to building larger and more biologically realistic models which can predict the behavior of the system in different scenarios. This effort calls for integration of elements across neuroscience and musculoskeletal biomechanics. There is also a need for development of methods and tools for structured integration that yield larger in silico models demonstrating a set of desired system responses. We take a small step in this direction with the NEUROmotor integration and Design (NEUROiD) platform. NEUROiD helps integrate results from motor systems anatomy, physiology, and biomechanics into an integrated neuromotor system model. Simulation and visualization of the model across multiple scales is supported. Standard electrophysiological operations such as slicing, current injection, recording of membrane potential, and local field potential are part of NEUROiD. The platform allows traceability of model parameters to primary literature. We illustrate the power and utility of NEUROiD by building a simple ankle model and its controlling neural circuitry by curating a set of published components. NEUROiD allows researchers to utilize remote high-performance computers for simulation, while controlling the model using a web browser.}, langid = {english} } @@ -306,10 +324,24 @@ @article{jedrzejewski-szmekParameterOptimizationUsing2018 pages = {47}, issn = {1662-5196}, doi = {10.3389/fninf.2018.00047}, + urldate = {2022-06-08}, abstract = {Computational models in neuroscience can be used to predict causal relationships between biological mechanisms in neurons and networks, such as the effect of blocking an ion channel or synaptic connection on neuron activity. Since developing a biophysically realistic, single neuron model is exceedingly difficult, software has been developed for automatically adjusting parameters of computational neuronal models. The ideal optimization software should work with commonly used neural simulation software; thus, we present software which works with models specified in declarative format for the MOOSE simulator. Experimental data can be specified using one of two different file formats. The fitness function is customizable as a weighted combination of feature differences. The optimization itself uses the covariance matrix adaptation-evolutionary strategy, because it is robust in the face of local fluctuations of the fitness function, and deals well with a high-dimensional and discontinuous fitness landscape. We demonstrate the versatility of the software by creating several model examples of each of four types of neurons (two subtypes of spiny projection neurons and two subtypes of globus pallidus neurons) by tuning to current clamp data. Optimizations reached convergence within 1,600\textendash 4,000 model evaluations (200\textendash 500 generations \texttimes{} population size of 8). Analysis of the parameters of the best fitting models revealed differences between neuron subtypes, which are consistent with prior experimental results. Overall our results suggest that this easy-to-use, automatic approach for finding neuron channel parameters may be applied to current clamp recordings from neurons exhibiting different biochemical markers to help characterize ionic differences between other neuron subtypes.}, langid = {english} } +@techreport{jinBayesianInferenceSpectral2023, + type = {Preprint}, + title = {Bayesian {{Inference}} of a {{Spectral Graph Model}} for {{Brain Oscillations}}}, + author = {Jin, Huaqing and Verma, Parul and Jiang, Fei and Nagarajan, Srikantan and Raj, Ashish}, + year = {2023}, + month = mar, + institution = {{Neuroscience}}, + doi = {10.1101/2023.03.01.530704}, + urldate = {2023-05-24}, + abstract = {The relationship between brain functional connectivity and structural connectivity has caught extensive attention of the neuroscience community, commonly inferred using mathematical modeling. Among many modeling approaches, spectral graph model (SGM) is distinctive as it has a closed-form solution of the wide-band frequency spectra of brain oscillations, requiring only global biophysically interpretable parameters. While SGM is parsimonious in parameters, the determination of SGM parameters is nontrivial. Prior works on SGM determine the parameters through a computational intensive annealing algorithm, which only provides a point estimate with no confidence intervals for parameter estimates. To fill this gap, we incorporate the simulation-based inference (SBI) algorithm and develop a Bayesian procedure for inferring the posterior distribution of the SGM parameters. Furthermore, using SBI dramatically reduces the computational burden for inferring the SGM parameters. We evaluate the proposed SBI-SGM framework on the resting-state magnetoencephalography recordings from healthy subjects and show that the proposed procedure has similar performance to the annealing algorithm in recovering power spectra and the spatial distribution of the alpha frequency band. In addition, we also analyze the correlations among the parameters and their uncertainty with the posterior distribution which can not be done with annealing inference. These analyses provide a richer understanding of the interactions among biophysical parameters of the SGM. In general, the use of simulation-based Bayesian inference enables robust and efficient computations of generative model parameter uncertainties and may pave the way for the use of generative models in clinical translation applications.}, + langid = {english} +} + @article{jungDynamicCausalModeling2019, title = {Dynamic Causal Modeling for Calcium Imaging: {{Exploration}} of Differential Effective Connectivity for Sensory Processing in a Barrel Cortical Column}, shorttitle = {Dynamic Causal Modeling for Calcium Imaging}, @@ -321,6 +353,7 @@ @article{jungDynamicCausalModeling2019 pages = {116008}, issn = {10538119}, doi = {10.1016/j.neuroimage.2019.116008}, + urldate = {2022-06-08}, abstract = {Multi-photon calcium imaging (CaI) is an important tool to assess activities of neural populations within a column in the sensory cortex. However, the complex asymmetrical interactions among neural populations, termed effective connectivity, cannot be directly assessed by measuring the activity of each neuron or neural population using CaI but calls for computational modeling. To estimate effective connectivity among neural populations, we proposed a dynamic causal model (DCM) for CaI by combining a convolution-based dynamic neural state model and a dynamic calcium ion concentration model for CaI signals. After conducting a simulation study to evaluate DCM for CaI, we applied it to an experimental CaI signals measured at the layer 2/3 of a barrel cortical column that differentially responds to hit and error whisking trials in mice. We first identified neural populations and constructed computational models with intrinsic connectivity of neural populations within the layer 2/3 of the barrel cortex and extrinsic connectivity with latent external modes. Bayesian model inversion and comparison shows that interactions with latent inhibitory and excitatory external modes explain the observed CaI signals within the barrel cortical column better than any other tested models, with a single external mode or without any latent modes. The best model also showed differential intrinsic and extrinsic effective connectivity between hit and error trials in the functional hierarchy. Both simulation and experimental results suggest the usefulness of DCM for CaI in terms of exploration of hierarchical interactions among neural populations observed in CaI.}, langid = {english} } @@ -336,6 +369,7 @@ @article{kanariComputationalSynthesisCortical2022 pages = {110586}, issn = {22111247}, doi = {10.1016/j.celrep.2022.110586}, + urldate = {2022-06-08}, abstract = {Neuronal morphologies provide the foundation for the electrical behavior of neurons, the connectomes they form, and the dynamical properties of the brain. Comprehensive neuron models are essential for defining cell types, discerning their functional roles, and investigating brain-disease-related dendritic alterations. However, a lack of understanding of the principles underlying neuron morphologies has hindered attempts to computationally synthesize morphologies for decades. We introduce a synthesis algorithm based on a topological descriptor of neurons, which enables the rapid digital reconstruction of entire brain regions from few reference cells. This topology-guided synthesis generates dendrites that are statistically similar to biological reconstructions in terms of morpho-electrical and connectivity properties and offers a significant opportunity to investigate the links between neuronal morphology and brain function across different spatiotemporal scales. Synthesized cortical networks based on structurally altered dendrites associated with diverse brain pathologies revealed principles linking branching properties to the structure of large-scale networks.}, langid = {english} } @@ -351,6 +385,7 @@ @article{kanekoDevelopmentallyRegulatedImpairment2022 pages = {110580}, issn = {22111247}, doi = {10.1016/j.celrep.2022.110580}, + urldate = {2023-02-28}, abstract = {Dravet syndrome is a neurodevelopmental disorder characterized by epilepsy, intellectual disability, and sudden death due to pathogenic variants in SCN1A with loss of function of the sodium channel subunit Nav1.1. Nav1.1-expressing parvalbumin GABAergic interneurons (PV-INs) from young Scn1a+/\`A mice show impaired action potential generation. An approach assessing PV-IN function in the same mice at two time points shows impaired spike generation in all Scn1a+/\`A mice at postnatal days (P) 16\textendash 21, whether deceased prior or surviving to P35, with normalization by P35 in surviving mice. However, PV-IN synaptic transmission is dysfunctional in young Scn1a+/\`A mice that did not survive and in Scn1a+/\`A mice R P35. Modeling confirms that PV-IN axonal propagation is more sensitive to decreased sodium conductance than spike generation. These results demonstrate dynamic dysfunction in Dravet syndrome: combined abnormalities of PV-IN spike generation and propagation drives early disease severity, while ongoing dysfunction of synaptic transmission contributes to chronic pathology.}, langid = {english} } @@ -365,6 +400,7 @@ @article{linneNeuroinformaticsComputationalModelling2018 pages = {56--61}, issn = {17427835}, doi = {10.1111/bcpt.13075}, + urldate = {2022-06-08}, abstract = {Neuroinformatics is an area of science that aims to integrate neuroscience data and develop modern computational tools to increase our understanding of the functions of the nervous system in health and disease. Neuroinformatics tools include, among others, databases for storing and sharing data, repositories for managing documents and source code, and software tools for analysing, modelling and simulating signals and images. This MiniReview aims to present the state of the art in neuroinformatics and computational in silico modelling of neurobiological processes and neuroscientific phenomena as well as to discuss the use of in silico models in neurotoxicology research. In silico modelling can be considered a new, complementary tool in chemical design to predict potential neurotoxicity and in neurotoxicity testing to help clarify initial hypothesis obtained in in vitro and in vivo. Validated in silico models can be used to identify pharmacological targets, to help bridge in vitro and in vivo studies and, ultimately, to develop safer chemicals and efficient therapeutic strategies.}, langid = {english} } @@ -380,6 +416,7 @@ @article{maki-marttunenStepwiseNeuronModel2018 pages = {264--283}, issn = {01650270}, doi = {10.1016/j.jneumeth.2017.10.007}, + urldate = {2022-06-08}, abstract = {Background: Recent progress in electrophysiological and optical methods for neuronal recordings provides vast amounts of high-resolution data. In parallel, the development of computer technology has allowed simulation of ever-larger neuronal circuits. A challenge in taking advantage of these developments is the construction of single-cell and network models in a way that faithfully reproduces neuronal biophysics with subcellular level of details while keeping the simulation costs at an acceptable level. New method: In this work, we develop and apply an automated, stepwise method for fitting a neuron model to data with fine spatial resolution, such as that achievable with voltage sensitive dyes (VSDs) and Ca2+ imaging. Result: We apply our method to simulated data from layer 5 pyramidal cells (L5PCs) and construct a model with reduced neuronal morphology. We connect the reduced-morphology neurons into a network and validate against simulated data from a high-resolution L5PC network model. Comparison with existing methods: Our approach combines features from several previously applied model-fitting strategies. The reduced-morphology neuron model obtained using our approach reliably reproduces the membrane-potential dynamics across the dendrites as predicted by the full-morphology model. Conclusions: The network models produced using our method are cost-efficient and predict that interconnected L5PCs are able to amplify delta-range oscillatory inputs across a large range of network sizes and topologies, largely due to the medium after hyperpolarization mediated by the Ca2+-activated SK current.}, langid = {english} } @@ -394,6 +431,7 @@ @article{marinUseMultimodalOptimizer2021 pages = {663797}, issn = {1662-5196}, doi = {10.3389/fninf.2021.663797}, + urldate = {2022-06-08}, abstract = {This article extends a recent methodological workflow for creating realistic and computationally efficient neuron models whilst capturing essential aspects of singleneuron dynamics. We overcome the intrinsic limitations of the extant optimization methods by proposing an alternative optimization component based on multimodal algorithms. This approach can natively explore a diverse population of neuron model configurations. In contrast to methods that focus on a single global optimum, the multimodal method allows directly obtaining a set of promising solutions for a single but complex multi-feature objective function. The final sparse population of candidate solutions has to be analyzed and evaluated according to the biological plausibility and their objective to the target features by the expert. In order to illustrate the value of this approach, we base our proposal on the optimization of cerebellar granule cell (GrC) models that replicate the essential properties of the biological cell. Our results show the emerging variability of plausible sets of values that this type of neuron can adopt underlying complex spiking characteristics. Also, the set of selected cerebellar GrC models captured spiking dynamics closer to the reference model than the single model obtained with off-the-shelf parameter optimization algorithms used in our previous article. The method hereby proposed represents a valuable strategy for adjusting a varied population of realistic and simplified neuron models. It can be applied to other kinds of neuron models and biological contexts.}, langid = {english} } @@ -408,6 +446,7 @@ @article{masoliComputationalModelsNeurotransmission2022 pages = {1006989}, issn = {1662-5188}, doi = {10.3389/fncom.2022.1006989}, + urldate = {2023-02-28}, abstract = {The neuroscientific field benefits from the conjoint evolution of experimental and computational techniques, allowing for the reconstruction and simulation of complex models of neurons and synapses. Chemical synapses are characterized by presynaptic vesicle cycling, neurotransmitter diffusion, and postsynaptic receptor activation, which eventually lead to postsynaptic currents and subsequent membrane potential changes. These mechanisms have been accurately modeled for different synapses and receptor types (AMPA, NMDA, and GABA) of the cerebellar cortical network, allowing simulation of their impact on computation. Of special relevance is short-term synaptic plasticity, which generates spatiotemporal filtering in local microcircuits and controls burst transmission and information flow through the network. Here, we present how data-driven computational models recapitulate the properties of neurotransmission at cerebellar synapses. The simulation of microcircuit models is starting to reveal how diverse synaptic mechanisms shape the spatiotemporal profiles of circuit activity and computation.}, langid = {english} } @@ -422,6 +461,7 @@ @article{meyerPypetPythonToolkit2016 volume = {10}, issn = {1662-5196}, doi = {10.3389/fninf.2016.00038}, + urldate = {2022-06-08}, langid = {english} } @@ -436,6 +476,7 @@ @article{neymotinOptimizingComputerModels2017 pages = {148--162}, issn = {0022-3077, 1522-1598}, doi = {10.1152/jn.00570.2016}, + urldate = {2022-06-08}, abstract = {Corticospinal neurons (SPI), thick-tufted pyramidal neurons in motor cortex layer 5B that project caudally via the medullary pyramids, display distinct class-specific electrophysiological properties in vitro: strong sag with hyperpolarization, lack of adaptation, and a nearly linear frequency-current ( F\textendash{} I) relationship. We used our electrophysiological data to produce a pair of large archives of SPI neuron computer models in two model classes: 1) detailed models with full reconstruction; and 2) simplified models with six compartments. We used a PRAXIS and an evolutionary multiobjective optimization (EMO) in sequence to determine ion channel conductances. EMO selected good models from each of the two model classes to form the two model archives. Archived models showed tradeoffs across fitness functions. For example, parameters that produced excellent F\textendash{} I fit produced a less-optimal fit for interspike voltage trajectory. Because of these tradeoffs, there was no single best model but rather models that would be best for particular usages for either single neuron or network explorations. Further exploration of exemplar models with strong F\textendash{} I fit demonstrated that both the detailed and simple models produced excellent matches to the experimental data. Although dendritic ion identities and densities cannot yet be fully determined experimentally, we explored the consequences of a demonstrated proximal to distal density gradient of I h , demonstrating that this would lead to a gradient of resonance properties with increased resonant frequencies more distally. We suggest that this dynamical feature could serve to make the cell particularly responsive to major frequency bands that differ by cortical layer. NEW \& NOTEWORTHY We developed models of motor cortex corticospinal neurons that replicate in vitro dynamics, including hyperpolarization-induced sag and realistic firing patterns. Models demonstrated resonance in response to synaptic stimulation, with resonance frequency increasing in apical dendrites with increasing distance from soma, matching the increasing oscillation frequencies spanning deep to superficial cortical layers. This gradient may enable specific corticospinal neuron dendrites to entrain to relevant oscillations in different cortical layers, contributing to appropriate motor output commands.}, langid = {english} } @@ -451,6 +492,7 @@ @article{nolteCorticalReliabilityNoise2019a pages = {3792}, issn = {2041-1723}, doi = {10.1038/s41467-019-11633-8}, + urldate = {2023-02-28}, abstract = {Abstract Typical responses of cortical neurons to identical sensory stimuli appear highly variable. It has thus been proposed that the cortex primarily uses a rate code. However, other studies have argued for spike-time coding under certain conditions. The potential role of spike-time coding is directly limited by the internally generated variability of cortical circuits, which remains largely unexplored. Here, we quantify this internally generated variability using a biophysical model of rat neocortical microcircuitry with biologically realistic noise sources. We find that stochastic neurotransmitter release is a critical component of internally generated variability, causing rapidly diverging, chaotic recurrent network dynamics. Surprisingly, the same nonlinear recurrent network dynamics can transiently overcome the chaos in response to weak feed-forward thalamocortical inputs, and support reliable spike times with millisecond precision. Our model shows that the noisy and chaotic network dynamics of recurrent cortical microcircuitry are compatible with stimulus-evoked, millisecond spike-time reliability, resolving a long-standing debate.}, langid = {english} } @@ -466,6 +508,7 @@ @article{pagkalosIntroducingDendrifyFramework2023 pages = {131}, issn = {2041-1723}, doi = {10.1038/s41467-022-35747-8}, + urldate = {2023-02-28}, abstract = {Abstract Computational modeling has been indispensable for understanding how subcellular neuronal features influence circuit processing. However, the role of dendritic computations in network-level operations remains largely unexplored. This is partly because existing tools do not allow the development of realistic and efficient network models that account for dendrites. Current spiking neural networks, although efficient, are usually quite simplistic, overlooking essential dendritic properties. Conversely, circuit models with morphologically detailed neuron models are computationally costly, thus impractical for large-network simulations. To bridge the gap between these two extremes and facilitate the adoption of dendritic features in spiking neural networks, we introduce Dendrify, an open-source Python package based on Brian 2. Dendrify, through simple commands, automatically generates reduced compartmental neuron models with simplified yet biologically relevant dendritic and synaptic integrative properties. Such models strike a good balance between flexibility, performance, and biological accuracy, allowing us to explore dendritic contributions to network-level functions while paving the way for developing more powerful neuromorphic systems.}, langid = {english} } @@ -481,10 +524,26 @@ @article{ramaswamyDataDrivenModelingCholinergic2018 pages = {77}, issn = {1662-5110}, doi = {10.3389/fncir.2018.00077}, + urldate = {2022-06-08}, abstract = {Neuromodulators, such as acetylcholine (ACh), control information processing in neural microcircuits by regulating neuronal and synaptic physiology. Computational models and simulations enable predictions on the potential role of ACh in reconfiguring network activity. As a prelude into investigating how the cellular and synaptic effects of ACh collectively influence emergent network dynamics, we developed a data-driven framework incorporating phenomenological models of the physiology of cholinergic modulation of neocortical cells and synapses. The first-draft models were integrated into a biologically detailed tissue model of neocortical microcircuitry to investigate the effects of levels of ACh on diverse neuron types and synapses, and consequently on emergent network activity. Preliminary simulations from the framework, which was not tuned to reproduce any specific ACh-induced network effects, not only corroborate the long-standing notion that ACh desynchronizes spontaneous network activity, but also predict that a dose-dependent activation of ACh gives rise to a spectrum of neocortical network activity. We show that low levels of ACh, such as during non-rapid eye movement (nREM) sleep, drive microcircuit activity into slow oscillations and network synchrony, whereas high ACh concentrations, such as during wakefulness and REM sleep, govern fast oscillations and network asynchrony. In addition, spontaneous network activity modulated by ACh levels shape spike-time cross-correlations across distinct neuronal populations in strikingly different ways. These effects are likely due to the regulation of neurons and synapses caused by increasing levels of ACh, which enhances cellular excitability and decreases the efficacy of local synaptic transmission. We conclude by discussing future directions to refine the biological accuracy of the framework, which will extend its utility and foster the development of hypotheses to investigate the role of neuromodulators in neural information processing.}, langid = {english} } +@article{reyes-sanchezAutomatizedOfflineOnline2023, + title = {Automatized Offline and Online Exploration to Achieve a Target Dynamics in Biohybrid Neural Circuits Built with Living and Model Neurons}, + author = {{Reyes-Sanchez}, Manuel and Amaducci, Rodrigo and {Sanchez-Martin}, Pablo and Elices, Irene and Rodriguez, Francisco B. and Varona, Pablo}, + year = {2023}, + month = jul, + journal = {Neural Networks}, + volume = {164}, + pages = {464--475}, + issn = {08936080}, + doi = {10.1016/j.neunet.2023.04.034}, + urldate = {2023-05-24}, + abstract = {Biohybrid circuits of interacting living and model neurons are an advantageous means to study neural dynamics and to assess the role of specific neuron and network properties in the nervous system. Hybrid networks are also a necessary step to build effective artificial intelligence and brain hybridization. In this work, we deal with the automatized online and offline adaptation, exploration and parameter mapping to achieve a target dynamics in hybrid circuits and, in particular, those that yield dynamical invariants between living and model neurons. We address dynamical invariants that form robust cycle-by-cycle relationships between the intervals that build neural sequences from such interaction. Our methodology first attains automated adaptation of model neurons to work in the same amplitude regime and time scale of living neurons. Then, we address the automatized exploration and mapping of the synapse parameter space that lead to a specific dynamical invariant target. Our approach uses multiple configurations and parallel computing from electrophysiological recordings of living neurons to build full mappings, and genetic algorithms to achieve an instance of the target dynamics for the hybrid circuit in a short time. We illustrate and validate such strategy in the context of the study of functional sequences in neural rhythms, which can be easily generalized for any variety of hybrid circuit configuration. This approach facilitates both the building of hybrid circuits and the accomplishment of their scientific goal.}, + langid = {english} +} + @article{royIonChannelDegeneracy2022, title = {Ion-channel Degeneracy and Heterogeneities in the Emergence of Complex Spike Bursts in {{CA3}} Pyramidal Neurons}, author = {Roy, Rituparna and Narayanan, Rishikesh}, @@ -494,6 +553,20 @@ @article{royIonChannelDegeneracy2022 pages = {JP283539}, issn = {0022-3751, 1469-7793}, doi = {10.1113/JP283539}, + urldate = {2023-02-28}, + langid = {english} +} + +@techreport{sanabriaCellTypeSpecificConnectivity2023, + type = {Preprint}, + title = {Cell-{{Type Specific Connectivity}} of {{Whisker-Related Sensory}} and {{Motor Cortical Input}} to {{Dorsal Striatum}}}, + author = {Sanabria, Branden D. and Baskar, Sindhuja S. and Yonk, Alex J. and Lee, Christian R. and Margolis, David J.}, + year = {2023}, + month = mar, + institution = {{Neuroscience}}, + doi = {10.1101/2023.03.06.531405}, + urldate = {2023-05-24}, + abstract = {Abstract The anterior dorsolateral striatum (DLS) is heavily innervated by convergent excitatory projections from the primary motor (M1) and sensory cortex (S1) and is considered an important site of sensorimotor integration. M1 and S1 corticostriatal synapses have functional differences in the strength of their connections with striatal spiny projection neurons (SPNs) and fast-spiking interneurons (FSIs) in the DLS, and as a result exert an opposing influence on sensory-guided behaviors. In the present study, we tested whether M1 and S1 inputs exhibit differences in the subcellular anatomical distribution onto striatal neurons. We injected adeno-associated viral vectors encoding spaghetti monster fluorescent proteins (sm.FPs) into M1 and S1, and used confocal microscopy to generate 3D reconstructions of corticostriatal inputs to single identified SPNs and FSIs obtained through ex-vivo patch-clamp electrophysiology. We found that SPNs are less innervated by S1 compared to M1, but FSIs receive a similar number of inputs from both M1 and S1. In addition, M1 and S1 inputs were distributed similarly across the proximal, medial, and distal regions of SPNs and FSIs. Notably, clusters of inputs were prevalent in SPNs but not FSIs. Our results suggest that SPNs have stronger functional connectivity to M1 compared to S1 due to a higher density of synaptic inputs. The clustering of M1 and S1 inputs onto SPNs but not FSIs suggest that cortical inputs are integrated through cell-type specific mechanisms and more generally have implications for how sensorimotor integration is performed in the striatum. Significance Statement The dorsolateral striatum (DLS) is a key brain area involved in sensorimotor integration due to its dense innervation by the primary motor (M1) and sensory cortex (S1). However, the quantity and anatomical distribution of these inputs to the striatal cell population has not been well characterized. In this study we demonstrate that corticostriatal projections from M1 and S1 differentially innervate spiny projection neurons (SPNs) and fast-spiking interneurons (FSIs) in the DLS. S1 inputs innervate SPNs less than M1 and are likely to form synaptic clusters in SPNs but not in FSIs. These findings suggest that sensorimotor integration is partly achieved by differences in the synaptic organization of corticostriatal inputs to local striatal microcircuits.}, langid = {english} } @@ -503,12 +576,11 @@ @misc{sarmaIntegrativeBiologicalSimulation2019 year = {2019}, month = jan, number = {arXiv:1811.03493}, - eprint = {1811.03493}, - eprinttype = {arxiv}, - primaryclass = {cs, q-bio}, + eprint = {arXiv:1811.03493}, publisher = {{arXiv}}, + urldate = {2022-06-08}, abstract = {We describe a biologically-inspired research agenda with parallel tracks aimed at AI and AI safety. The bottomup component consists of building a sequence of biophysically realistic simulations of simple organisms such as the nematode Caenorhabditis elegans, the fruit fly Drosophila melanogaster, and the zebrafish Danio rerio to serve as platforms for research into AI algorithms and system architectures. The top-down component consists of an approach to value alignment that grounds AI goal structures in neuropsychology, broadly considered. Our belief is that parallel pursuit of these tracks will inform the development of value-aligned AI systems that have been inspired by embodied organisms with sensorimotor integration. An important set of side benefits is that the research trajectories we describe here are grounded in long-standing intellectual traditions within existing research communities and funding structures. In addition, these research programs overlap with significant contemporary themes in the biological and psychological sciences such as data/model integration and reproducibility.}, - archiveprefix = {arXiv}, + archiveprefix = {arxiv}, langid = {english}, keywords = {Computer Science - Artificial Intelligence,Computer Science - Machine Learning,Computer Science - Neural and Evolutionary Computing,Quantitative Biology - Neurons and Cognition} } @@ -523,6 +595,7 @@ @inproceedings{shenAutomaticFittingNeuron2018 publisher = {{IEEE}}, address = {{Chengdu}}, doi = {10.1109/ICCCBDA.2018.8386578}, + urldate = {2022-06-08}, abstract = {Artificial neural networks are inspired by biological neural networks formed by many real neurons with spiking activities. It is important to simulate the spiking activities under different conditions. It is well known that the HodgkinHuxley (HH) equations can be used for simulation. However, we usually don't know the conductance of ion channels in the equation, which is required for simulation. In this paper, we develop a parallel genetic algorithm to estimate the conductance with a visual software tool. By fitting the experimental data, it is shown that when the number of individuals in the genetic algorithm is above 2000, the 5th generation can yield a near optimal solution and achieve a good fitting result.}, isbn = {978-1-5386-4301-3}, langid = {english} @@ -539,6 +612,7 @@ @article{sinhaActiveDendritesLocal2022 pages = {111--142}, issn = {03064522}, doi = {10.1016/j.neuroscience.2021.08.035}, + urldate = {2022-06-08}, abstract = {Neurons and glial cells are endowed with membranes that express a rich repertoire of ion channels, transporters, and receptors. The constant flux of ions across the neuronal and glial membranes results in voltage fluctuations that can be recorded from the extracellular matrix. The high frequency components of this voltage signal contain information about the spiking activity, reflecting the output from the neurons surrounding the recording location. The low frequency components of the signal, referred to as the local field potential (LFP), have been traditionally thought to provide information about the synaptic inputs that impinge on the large dendritic trees of various neurons. In this review, we discuss recent computational and experimental studies pointing to a critical role of several active dendritic mechanisms that can influence the genesis and the locationdependent spectro-temporal dynamics of LFPs, spanning different brain regions. We strongly emphasize the need to account for the several fast and slow dendritic events and associated active mechanisms \textemdash{} including gradients in their expression profiles, inter- and intra-cellular spatio-temporal interactions spanning neurons and glia, heterogeneities and degeneracy across scales, neuromodulatory influences, and activitydependent plasticity \textemdash{} towards gaining important insights about the origins of LFP under different behavioral states in health and disease. We provide simple but essential guidelines on how to model LFPs taking into account these dendritic mechanisms, with detailed methodology on how to account for various heterogeneities and electrophysiological properties of neurons and synapses while studying LFPs.}, langid = {english} } @@ -554,6 +628,7 @@ @inproceedings{sivagnanamNeuroscienceGatewayEnabling2018 publisher = {{ACM}}, address = {{Pittsburgh PA USA}}, doi = {10.1145/3219104.3219139}, + urldate = {2022-06-08}, abstract = {The NSF funded Neuroscience Gateway (NSG) has been in operation since the early 2013. We originally designed NSG to reduce technical and administrative barriers that exist to using high performance computing resources for computational neuroscientists. In the last two years, in addition to computational neuroscientists, cognitive and experimental neuroscientists are also using NSG. Currently NSG has over 600 registered users and it is steadily growing. Users can access NSG via a web portal and via RESTful programmatic access. A particular usage mode of programmatic access to NSG enables users of community neuroscience projects such as the Open Source Brain, research projects within the European Human Brain Project and others to access HPC resources via NSG without having to obtain their own accounts on NSG. Based on demand and usage, over the last five years we have successfully acquired increasingly larger allocations (millions to \textasciitilde ten million core hours) on resources of the Extreme Science and Engineering Discovery Environment (XSEDE) program via the competitive peer review process. We will discuss the overall NSG architecture. We implemented NSG from the generic CIPRES science gateway software to create the NSG specifically for the neuroscience community. We will describe the front end user interface, based on web portal and RESTful programmatic access, and the backend architecture. We will discuss how NSG is evolving over time in response to the interests and needs of the neuroscience community, adapting itself to become a dissemination platform for new tools and pipelines, and becoming an environment for modelers and experimentalists to jointly develop models.}, isbn = {978-1-4503-6446-1}, langid = {english} @@ -570,6 +645,7 @@ @article{stocktonIntegratingAllenBrain2017 pages = {333--342}, issn = {1539-2791, 1559-0089}, doi = {10.1007/s12021-017-9337-x}, + urldate = {2022-06-08}, abstract = {We developed software tools to download, extract features, and organize the Cell Types Database from the Allen Brain Institute (ABI) in order to integrate its whole cell patch clamp characterization data into the automated modeling/data analysis cycle. To expand the potential user base we employed both Python and MATLAB. The basic set of tools downloads selected raw data and extracts cell, sweep, and spike features, using ABI's feature extraction code. To facilitate data manipulation we added a tool to build a local specialized database of raw data plus extracted features. Finally, to maximize automation, we extended our NeuroManager workflow automation suite to include these tools plus a separate investigation database. The extended suite allows the user to integrate ABI experimental and modeling data into an automated workflow deployed on heterogeneous computer infrastructures, from local servers, to high performance computing environments, to the cloud. Since our approach is focused on workflow procedures our tools can be modified to interact with the increasing number of neuroscience databases being developed to cover all scales and properties of the nervous system.}, langid = {english} } @@ -584,8 +660,7 @@ @article{yegenogluExploringParameterHyperParameter2022 pages = {885207}, issn = {1662-5188}, doi = {10.3389/fncom.2022.885207}, + urldate = {2023-02-28}, abstract = {Neuroscience models commonly have a high number of degrees of freedom and only specific regions within the parameter space are able to produce dynamics of interest. This makes the development of tools and strategies to efficiently find these regions of high importance to advance brain research. Exploring the high dimensional parameter space using numerical simulations has been a frequently used technique in the last years in many areas of computational neuroscience. Today, high performance computing (HPC) can provide a powerful infrastructure to speed up explorations and increase our general understanding of the behavior of the model in reasonable times. Learning to learn (L2L) is a well-known concept in machine learning (ML) and a specific method for acquiring constraints to improve learning performance. This concept can be decomposed into a two loop optimization process where the target of optimization can consist of any program such as an artificial neural network, a spiking network, a single cell model, or a whole brain simulation. In this work, we present L2L as an easy to use and flexible framework to perform parameter and hyper-parameter space exploration of neuroscience models on HPC infrastructure. Learning to learn is an implementation of the L2L concept written in Python. This open-source software allows several instances of an optimization target to be executed with different parameters in an embarrassingly parallel fashion on HPC. L2L provides a set of built-in optimizer algorithms, which make adaptive and efficient exploration of parameter spaces possible. Different from other optimization toolboxes, L2L provides maximum flexibility for the way the optimization target can be executed. In this paper, we show a variety of examples of neuroscience models being optimized within the L2L framework to execute different types of tasks. The tasks used to illustrate the concept go from reproducing empirical data to learning how to solve a problem in a dynamic environment. We particularly focus on simulations with models ranging from the single cell to the whole brain and using a variety of simulation engines like NEST, Arbor, TVB, OpenAIGym, and NetLogo.}, langid = {english} } - - diff --git a/misc/github_wiki/bibtex/thesis_uses_BPO.bib b/misc/github_wiki/bibtex/thesis_uses_BPO.bib index 399f08d2..16b556d6 100644 --- a/misc/github_wiki/bibtex/thesis_uses_BPO.bib +++ b/misc/github_wiki/bibtex/thesis_uses_BPO.bib @@ -34,3 +34,11 @@ @phdthesis{luckmannSimulationBasedInferenceforNeuroscienceandBeyond year = {2022} } +@phdthesis{nylenStriatumSilicoThesis, + title = {On Striatum in Silico {{Thesis}} for {{Doctoral Degree}} ({{Ph}}.{{D}}.)}, + author = {Nyl{\'e}n, Johanna Frost}, + langid = {english}, + school = {Karolinska Institutet}, + year = {2023} +} + diff --git a/misc/github_wiki/bibtex/uses_BPO.bib b/misc/github_wiki/bibtex/uses_BPO.bib index f884d289..d9bac249 100644 --- a/misc/github_wiki/bibtex/uses_BPO.bib +++ b/misc/github_wiki/bibtex/uses_BPO.bib @@ -9,10 +9,24 @@ @article{allamNeuronalPopulationModels2021a pages = {103279}, issn = {25890042}, doi = {10.1016/j.isci.2021.103279}, + urldate = {2023-02-28}, abstract = {Preclinical drug candidates are screened for their ability to ameliorate in vitro neuronal electrophysiology, and go/no-go decisions progress drugs to clinical trials based on population means across cells and animals. However, these measures do not mitigate clinical endpoint risk. Population-based modeling captures variability across multiple electrophysiological measures from healthy, disease, and drug phenotypes. We pursued optimizing therapeutic targets by identifying coherent sets of ion channel target modulations for recovering heterogeneous wild-type (WT) population excitability profiles from a heterogeneous Huntington's disease (HD) population. Our approach combines mechanistic simulations with population modeling of striatal neurons using evolutionary optimization algorithms to design `virtual drugs'. We introduce efficacy metrics to score populations and rank virtual drug candidates. We found virtual drugs using heuristic approaches that performed better than single target modulators and standard classification methods. We compare a real drug to virtual candidates and demonstrate a novel in silico triaging method.}, langid = {english} } +@techreport{arnaudonControllingMorphoelectrophysiologicalVariability2023, + type = {Preprint}, + title = {Controlling Morpho-Electrophysiological Variability of Neurons with Detailed Biophysical Models}, + author = {Arnaudon, Alexis and Reva, Maria and Zbili, Mickael and Markram, Henry and Van Geit, Werner and Kanari, Lida}, + year = {2023}, + month = apr, + institution = {{Neuroscience}}, + doi = {10.1101/2023.04.06.535923}, + urldate = {2023-05-24}, + abstract = {Variability is a universal feature among biological units such as neuronal cells as they enable a robust encoding of a high volume of information in neuronal circuits and prevent hyper synchronizations such as epileptic seizures. While most computational studies on electrophysiological variability in neuronal circuits were done with simplified neuron models, we instead focus on the variability of detailed biophysical models of neurons. With measures of experimental variability, we leverage a Markov chain Monte Carlo method to generate populations of electrical models able to reproduce the variability from sets of experimental recordings. By matching input resistances of soma and axon initial segments with the one of dendrites, we produce a compatible set of morphologies and electrical models that faithfully represent a given morpho-electrical type. We demonstrate our approach on layer 5 pyramidal cells with continuous adapting firing type and show that morphological variability is insufficient to reproduce electrical variability. Overall, this approach provides a strong statistical basis to create detailed models of neurons with controlled variability.}, + langid = {english} +} + @article{ben-shalomNeuroGPUAcceleratingMulticompartment2022a, title = {{{NeuroGPU}}: {{Accelerating}} Multi-Compartment, Biophysically Detailed Neuron Simulations on {{GPUs}}}, shorttitle = {{{NeuroGPU}}}, @@ -24,6 +38,7 @@ @article{ben-shalomNeuroGPUAcceleratingMulticompartment2022a pages = {109400}, issn = {01650270}, doi = {10.1016/j.jneumeth.2021.109400}, + urldate = {2023-02-28}, abstract = {Background: The membrane potential of individual neurons depends on a large number of interacting biophysical processes operating on spatial-temporal scales spanning several orders of magnitude. The multi-scale nature of these processes dictates that accurate prediction of membrane potentials in specific neurons requires the utili\- zation of detailed simulations. Unfortunately, constraining parameters within biologically detailed neuron models can be difficult, leading to poor model fits. This obstacle can be overcome partially by numerical opti\- mization or detailed exploration of parameter space. However, these processes, which currently rely on central processing unit (CPU) computation, often incur orders of magnitude increases in computing time for marginal improvements in model behavior. As a result, model quality is often compromised to accommodate compute resources. New Method: Here, we present a simulation environment, NeuroGPU, that takes advantage of the inherent parallelized structure of the graphics processing unit (GPU) to accelerate neuronal simulation. Results \& comparison with existing methods: NeuroGPU can simulate most biologically detailed models 10\textendash 200 times faster than NEURON simulation running on a single core and 5 times faster than GPU simulators (Cor\- eNEURON). NeuroGPU is designed for model parameter tuning and best performs when the GPU is fully utilized by running multiple ({$>$} 100) instances of the same model with different parameters. When using multiple GPUs, NeuroGPU can reach to a speed-up of 800 fold compared to single core simulations, especially when simulating the same model morphology with different parameters. We demonstrate the power of NeuoGPU through largescale parameter exploration to reveal the response landscape of a neuron. Finally, we accelerate numerical optimization of biophysically detailed neuron models to achieve highly accurate fitting of models to simulation and experimental data. Conclusions: Thus, NeuroGPU is the fastest available platform that enables rapid simulation of multicompartment, biophysically detailed neuron models on commonly used computing systems accessible by many scientists.}, langid = {english} } @@ -39,6 +54,7 @@ @article{bereckiSCN1AGainFunction2019a pages = {514--525}, issn = {0364-5134, 1531-8249}, doi = {10.1002/ana.25438}, + urldate = {2023-02-28}, langid = {english} } @@ -53,6 +69,7 @@ @article{bolognaEBRAINSHodgkinHuxleyNeuron2022 pages = {991609}, issn = {1662-5196}, doi = {10.3389/fninf.2022.991609}, + urldate = {2023-02-28}, abstract = {In the last decades, brain modeling has been established as a fundamental tool for understanding neural mechanisms and information processing in individual cells and circuits at different scales of observation. Building data-driven brain models requires the availability of experimental data and analysis tools as well as neural simulation environments and, often, large scale computing facilities. All these components are rarely found in a comprehensive framework and usually require ad hoc programming. To address this, we developed the EBRAINS Hodgkin-Huxley Neuron Builder (HHNB), a web resource for building single cell neural models via the extraction of activity features from electrophysiological traces, the optimization of the model parameters via a genetic algorithm executed on high performance computing facilities and the simulation of the optimized model in an interactive framework. Thanks to its inherent characteristics, the HHNB facilitates the data-driven model building workflow and its reproducibility, hence fostering a collaborative approach to brain modeling.}, langid = {english} } @@ -68,6 +85,7 @@ @article{brysonGABAmediatedTonicInhibition2020a pages = {3192--3202}, issn = {0027-8424, 1091-6490}, doi = {10.1073/pnas.1906369117}, + urldate = {2023-02-28}, abstract = {Significance GABA ({$\gamma$}-aminobutyric acid) is the brain's predominant inhibitory neurotransmitter and exerts a strong inhibitory influence through extrasynaptic GABA A receptors. This form of neurotransmission is known as tonic inhibition. Tonic inhibition is usually thought to reduce the excitability of all neurons, but here we show that it can selectively modulate the excitability of different types of neurons. Surprisingly, tonic inhibition can increase excitability in a common subtype of interneuron, and modeling results suggest this is achieved through the neuron's electrophysiological, or functional, properties. These results provide insight into the impact of tonic inhibition upon neural activity and suggest a mechanism through which GABA may modulate the excitability of neurons in a selective manner. , The binding of GABA ({$\gamma$}-aminobutyric acid) to extrasynaptic GABA A receptors generates tonic inhibition that acts as a powerful modulator of cortical network activity. Despite GABA being present throughout the extracellular space of the brain, previous work has shown that GABA may differentially modulate the excitability of neuron subtypes according to variation in chloride gradient. Here, using biophysically detailed neuron models, we predict that tonic inhibition can differentially modulate the excitability of neuron subtypes according to variation in electrophysiological properties. Surprisingly, tonic inhibition increased the responsiveness (or gain) in models with features typical for somatostatin interneurons but decreased gain in models with features typical for parvalbumin interneurons. Patch-clamp recordings from cortical interneurons supported these predictions, and further in silico analysis was then performed to seek a putative mechanism underlying gain modulation. We found that gain modulation in models was dependent upon the magnitude of tonic current generated at depolarized membrane potential\textemdash a property associated with outward rectifying GABA A receptors. Furthermore, tonic inhibition produced two biophysical changes in models of relevance to neuronal excitability: 1) enhanced action potential repolarization via increased current flow into the dendritic compartment, and 2) reduced activation of voltage-dependent potassium channels. Finally, we show theoretically that reduced potassium channel activation selectively increases gain in models possessing action potential dynamics typical for somatostatin interneurons. Potassium channels in parvalbumin-type models deactivate rapidly and are unavailable for further modulation. These findings show that GABA can differentially modulate interneuron excitability and suggest a mechanism through which this occurs in silico via differences of intrinsic electrophysiological properties.}, langid = {english} } @@ -80,6 +98,7 @@ @techreport{buccinoMultimodalFittingApproach2022 month = aug, institution = {{Neuroscience}}, doi = {10.1101/2022.08.03.502468}, + urldate = {2023-02-28}, abstract = {In computational neuroscience, multicompartment models are among the most biophysically realistic representations of single neurons. Constructing such models usually involves the use of the patch-clamp technique to record somatic voltage signals under different experimental conditions. The experimental data are then used to fit the many parameters of the model. While patching of the soma is currently the gold-standard approach to build multicompartment models, several studies have also evidenced a richness of dynamics in dendritic and axonal sections. Recording from the soma alone makes it hard to observe and correctly parameterize the activity of non-somatic compartments.}, langid = {english} } @@ -92,10 +111,24 @@ @techreport{buchinMultimodalCharacterizationSimulation2020 month = apr, institution = {{Neuroscience}}, doi = {10.1101/2020.04.24.060178}, + urldate = {2022-06-07}, abstract = {Temporal lobe epilepsy is the fourth most common neurological disorder with about 40\% of patients not responding to pharmacological treatment. Increased cellular loss in the hippocampus is linked to disease severity and pathological phenotypes such as heightened seizure propensity. While the hippocampus is the target of therapeutic interventions such as temporal lobe resection, the impact of the disease at the cellular level remains unclear in humans. Here we show that properties of hippocampal granule cells change with disease progression as measured in living, resected hippocampal tissue excised from epilepsy patients. We show that granule cells increase excitability and shorten response latency while also enlarging in cellular volume, surface area and spine density. Single-cell RNA sequencing combined with simulations ascribe the observed electrophysiological changes to gradual modification in three key ion channel conductances: BK, Cav2.2 and Kir2.1. In a bio-realistic computational network model, we show that the changes related to disease progression bring the circuit into a more excitable state. In turn, we observe that by reversing these changes in the three key conductances produces a less excitable, ``early disease-like'' state. These results provide mechanistic understanding of epilepsy in humans and will inform future therapies such as viral gene delivery to reverse the course of the disorder.}, langid = {english} } +@techreport{cavarrettaModelingSynapticIntegration2023, + type = {Preprint}, + title = {Modeling Synaptic Integration of Bursty and Beta Oscillatory Inputs in Ventromedial Motor Thalamic Neurons in Normal and Parkinsonian States}, + author = {Cavarretta, Francesco and Jaeger, Dieter}, + year = {2023}, + month = apr, + institution = {{Neuroscience}}, + doi = {10.1101/2023.04.14.536959}, + urldate = {2023-05-24}, + abstract = {Abstract The Ventromedial Motor Thalamus (VM) is implicated in multiple motor functions and occupies a central position in the cortico-basal ganglia-thalamocortical loop. It integrates glutamatergic inputs from motor cortex (MC) and motor-related subcortical areas, and it is a major recipient of inhibition from basal ganglia. Previous experiments in vitro showed that dopamine depletion enhances the excitability of thalamocortical cells (TC) in VM due to reduced M-type potassium currents. To understand how these excitability changes impact synaptic integration in vivo, we constructed biophysically detailed VM TC models fit to normal and dopamine-depleted conditions, using the NEURON simulator. These models allowed us to assess the influence of excitability changes with dopamine depletion on the integration of synaptic inputs expected in vivo. We found that VM TCs in the dopamine-depleted state showed increased firing rates with the same synaptic inputs. Synchronous bursting in inhibitory input from the substantia nigra pars reticulata (SNR), as observed in parkinsonian conditions, evoked a post-inhibitory firing rate increase with a longer duration in dopamine-depleted than control conditions, due to different M-type potassium channel densities. With beta oscillations in the inhibitory inputs from SNR and the excitatory inputs from drivers and modulators, we observed spike-phase locking in the activity of the models in normal and dopamine-depleted states, which relayed and amplified the oscillations of the inputs, suggesting that the increased beta oscillations observed in VM of parkinsonian animals are predominantly a consequence of changes in the presynaptic activity rather than changes in intrinsic properties. Significance Statement The Ventromedial Motor Thalamus is implicated in multiple motor functions. Experiments in vitro showed this area undergoes homeostatic changes following dopamine depletion (parkinsonian state). Here we studied the impact of these changes in vivo, using biophysically detailed modeling. We found that dopamine depletion increased firing rate in the ventromedial thalamocortical neurons and changed their responses to synchronous inhibitory inputs from substantia nigra reticulata. All thalamocortical neuron models relayed and amplified beta oscillations from substantia nigra reticulata and cortical/subcortical inputs, suggesting that increased beta oscillations observed in parkinsonian animals predominantly reflect changes in presynaptic activity.}, + langid = {english} +} + @article{chindemiCalciumbasedPlasticityModel2022b, title = {A Calcium-Based Plasticity Model for Predicting Long-Term Potentiation and Depression in the Neocortex}, author = {Chindemi, Giuseppe and Abdellah, Marwan and Amsalem, Oren and {Benavides-Piccione}, Ruth and Delattre, Vincent and Doron, Michael and Ecker, Andr{\'a}s and Jaquier, Aur{\'e}lien T. and King, James and Kumbhar, Pramod and Monney, Caitlin and Perin, Rodrigo and R{\"o}ssert, Christian and Tuncel, Anil M. and Van Geit, Werner and DeFelipe, Javier and Graupner, Michael and Segev, Idan and Markram, Henry and Muller, Eilif B.}, @@ -107,6 +140,7 @@ @article{chindemiCalciumbasedPlasticityModel2022b pages = {3038}, issn = {2041-1723}, doi = {10.1038/s41467-022-30214-w}, + urldate = {2023-02-28}, abstract = {Abstract Pyramidal cells (PCs) form the backbone of the layered structure of the neocortex, and plasticity of their synapses is thought to underlie learning in the brain. However, such long-term synaptic changes have been experimentally characterized between only a few types of PCs, posing a significant barrier for studying neocortical learning mechanisms. Here we introduce a model of synaptic plasticity based on data-constrained postsynaptic calcium dynamics, and show in a neocortical microcircuit model that a single parameter set is sufficient to unify the available experimental findings on long-term potentiation (LTP) and long-term depression (LTD) of PC connections. In particular, we find that the diverse plasticity outcomes across the different PC types can be explained by cell-type-specific synaptic physiology, cell morphology and innervation patterns, without requiring type-specific plasticity. Generalizing the model to in vivo extracellular calcium concentrations, we predict qualitatively different plasticity dynamics from those observed in vitro. This work provides a first comprehensive null model for LTP/LTD between neocortical PC types in vivo, and an open framework for further developing models of cortical synaptic plasticity.}, langid = {english} } @@ -121,6 +155,7 @@ @inproceedings{damartDataDrivenBuilding2020 publisher = {{ACM}}, address = {{Canc\'un Mexico}}, doi = {10.1145/3377929.3398161}, + urldate = {2023-02-28}, isbn = {978-1-4503-7127-8}, langid = {english} } @@ -135,6 +170,7 @@ @inproceedings{doronDiscoveringUnexpectedLocal2019a publisher = {{ACM}}, address = {{Anchorage AK USA}}, doi = {10.1145/3292500.3330886}, + urldate = {2023-02-28}, abstract = {Scientific computational models are crucial for analyzing and understanding complex real-life systems that are otherwise difficult for experimentation. However, the complex behavior and the vast inputoutput space of these models often make them opaque, slowing the discovery of novel phenomena. In this work, we present Hint (Hessian INTerestingness) \textendash{} a new algorithm that can automatically and systematically explore black-box models and highlight local nonlinear interactions in the input-output space of the model. This tool aims to facilitate the discovery of interesting model behaviors that are unknown to the researchers. Using this simple yet powerful tool, we were able to correctly rank all pairwise interactions in known benchmark models and do so faster and with greater accuracy than state-of-the-art methods. We further applied Hint to existing computational neuroscience models, and were able to reproduce important scientific discoveries that were published years after the creation of those models. Finally, we ran Hint on two real-world models (in neuroscience and earth science) and found new behaviors of the model that were of value to domain experts.}, isbn = {978-1-4503-6201-6}, langid = {english} @@ -152,6 +188,7 @@ @article{eckerDataDrivenIntegration2020 pages = {1129--1145}, issn = {1050-9631, 1098-1063}, doi = {10.1002/hipo.23220}, + urldate = {2023-02-28}, abstract = {The anatomy and physiology of monosynaptic connections in rodent hippocampal CA1 have been extensively studied in recent decades. Yet, the resulting knowledge remains disparate and difficult to reconcile. Here, we present a data-driven approach to integrate the current state-of-the-art knowledge on the synaptic anatomy and physiology of rodent hippocampal CA1, including axo-dendritic innervation patterns, number of synapses per connection, quantal conductances, neurotransmitter release probability, and short-term plasticity into a single coherent resource. First, we undertook an extensive literature review of paired recordings of hippocampal neurons and compiled experimental data on their synaptic anatomy and physiology. The data collected in this manner is sparse and inhomogeneous due to the diversity of experimental techniques used by different groups, which necessitates the need for an integrative framework to unify these data. To this end, we extended a previously developed workflow for the neocortex to constrain a unifying in silico reconstruction of the synaptic physiology of CA1 connections. Our work identifies gaps in the existing knowledge and provides a complementary resource toward a more complete quantification of synaptic anatomy and physiology in the rodent hippocampal CA1 region.}, langid = {english} } @@ -166,6 +203,7 @@ @article{eckerHippocampalSharpWaveripples2022a pages = {e71850}, issn = {2050-084X}, doi = {10.7554/eLife.71850}, + urldate = {2023-02-28}, abstract = {Hippocampal place cells are activated sequentially as an animal explores its environment. These activity sequences are internally recreated (`replayed'), either in the same or reversed order, during bursts of activity (sharp wave-r\-ipples [SWRs]) that occur in sleep and awake rest. SWR-\- associated replay is thought to be critical for the creation and maintenance of long-\-term memory. In order to identify the cellular and network mechanisms of SWRs and replay, we constructed and simulated a data-\-driven model of area CA3 of the hippocampus. Our results show that the chain-\-like structure of recurrent excitatory interactions established during learning not only determines the content of replay, but is essential for the generation of the SWRs as well. We find that bidirectional replay requires the interplay of the experimentally confirmed, temporally symmetric plasticity rule, and cellular adaptation. Our model provides a unifying framework for diverse phenomena involving hippocampal plasticity, representations, and dynamics, and suggests that the structured neural codes induced by learning may have greater influence over cortical network states than previously appreciated.}, langid = {english} } @@ -181,6 +219,7 @@ @article{frostnylenReciprocalInteractionStriatal2021 pages = {2135--2148}, issn = {0953-816X, 1460-9568}, doi = {10.1111/ejn.14854}, + urldate = {2022-06-07}, abstract = {The striatum is the main input stage of the basal ganglia receiving extrinsic input from cortex and thalamus. The striatal projection neurons (SPN) constitute 95\% of the neurons in the striatum in mice while the remaining 5\% are cholinergic and GABAergic interneurons. The cholinergic (ChIN) and low-threshold spiking interneurons (LTS) are spontaneously active and form a striatal subnetwork involved in salience detection and goal-directed learning. Activation of ChINs has been shown to inhibit LTS via muscarinic receptor type 4 (M4R) and LTS in turn can modulate ChINs via nitric oxide (NO) causing a prolonged depolarization. Thalamic input prefentially excites ChINs, whereas input from motor cortex favours LTS, but can also excite ChINs. This varying extrinsic input with intrinsic reciprocal, yet opposing, effects raises the possibility of a slow input-dependent modulatory subnetwork. Here, we simulate this subnetwork using multicompartmental neuron models that incorporate data regarding known ion channels and detailed morphological reconstructions. The modelled connections replicate the experimental data on muscarinic (M4R) and nitric oxide modulation onto LTS and ChIN, respectively, and capture their physiological interaction. Finally, we show that the cortical and thalamic inputs triggering the opposing modulation within the network induce periods of increased and decreased spiking activity in ChINs and LTS. This could provide different temporal windows for selective modulation by acetylcholine and nitric oxide, and the possibility of interaction with the wider striatal microcircuit.}, langid = {english} } @@ -194,6 +233,7 @@ @techreport{gerkinNeuronUnitPackageDatadriven2019a month = jun, institution = {{Neuroscience}}, doi = {10.1101/665331}, + urldate = {2023-02-28}, abstract = {Validating a quantitative scientific model requires comparing its predictions against many experimental observations, ideally from many labs, using transparent, robust, statistical comparisons. Unfortunately, in rapidly-growing fields like neuroscience, this is becoming increasingly untenable, even for the most conscientious scientists. Thus the merits and limitations of existing models, or whether a new model is an improvement on the state-of-the-art, is often unclear.}, langid = {english} } @@ -208,10 +248,27 @@ @article{goncalvesTrainingDeepNeural2020a pages = {e56261}, issn = {2050-084X}, doi = {10.7554/eLife.56261}, + urldate = {2022-06-07}, abstract = {Mechanistic modeling in neuroscience aims to explain observed phenomena in terms of underlying causes. However, determining which model parameters agree with complex and stochastic neural data presents a significant challenge. We address this challenge with a machine learning tool which uses deep neural density estimators\textemdash trained using model simulations\textemdash to carry out Bayesian inference and retrieve the full space of parameters compatible with raw data or selected data features. Our method is scalable in parameters and data features and can rapidly analyze new data after initial training. We demonstrate the power and flexibility of our approach on receptive fields, ion channels, and Hodgkin\textendash Huxley models. We also characterize the space of circuit configurations giving rise to rhythmic activity in the crustacean stomatogastric ganglion, and use these results to derive hypotheses for underlying compensation mechanisms. Our approach will help close the gap between data-driven and theory-driven models of neural dynamics.}, langid = {english} } +@article{guet-mccreightAgedependentIncreasedSag2023, + title = {Age-Dependent Increased Sag Amplitude in Human Pyramidal Neurons Dampens Baseline Cortical Activity}, + author = {{Guet-McCreight}, Alexandre and Chameh, Homeira Moradi and Mahallati, Sara and Wishart, Margaret and Tripathy, Shreejoy J and Valiante, Taufik A and Hay, Etay}, + year = {2023}, + month = apr, + journal = {Cerebral Cortex}, + volume = {33}, + number = {8}, + pages = {4360--4373}, + issn = {1047-3211, 1460-2199}, + doi = {10.1093/cercor/bhac348}, + urldate = {2023-05-24}, + abstract = {Abstract Aging involves various neurobiological changes, although their effect on brain function in humans remains poorly understood. The growing availability of human neuronal and circuit data provides opportunities for uncovering age-dependent changes of brain networks and for constraining models to predict consequences on brain activity. Here we found increased sag voltage amplitude in human middle temporal gyrus layer 5 pyramidal neurons from older subjects and captured this effect in biophysical models of younger and older pyramidal neurons. We used these models to simulate detailed layer 5 microcircuits and found lower baseline firing in older pyramidal neuron microcircuits, with minimal effect on response. We then validated the predicted reduced baseline firing using extracellular multielectrode recordings from human brain slices of different ages. Our results thus report changes in human pyramidal neuron input integration properties and provide fundamental insights into the neuronal mechanisms of altered cortical excitability and resting-state activity in human aging.}, + langid = {english} +} + @techreport{guet-mccreightInsilicoTestingNew2023, type = {Preprint}, title = {In-Silico Testing of New Pharmacology for Restoring Inhibition and Human Cortical Function in Depression}, @@ -220,6 +277,7 @@ @techreport{guet-mccreightInsilicoTestingNew2023 month = feb, institution = {{Neuroscience}}, doi = {10.1101/2023.02.22.529541}, + urldate = {2023-02-28}, abstract = {Reduced inhibition by somatostatin-expressing interneurons is associated with depression. Administration of positive allosteric modulators of {$\alpha$}5 subunit-containing GABAA receptor ({$\alpha$}5PAM) that selectively target this lost inhibition exhibit antidepressant and pro-cognitive effects in rodent models of chronic stress. However, the functional effects of {$\alpha$}5-PAM on the human brain in vivo are unknown, and currently cannot be assessed experimentally. We modeled the effects of {$\alpha$}5-PAM on tonic inhibition as measured in human neurons, and tested in silico {$\alpha$}5-PAM effects on detailed models of human cortical microcircuits in health and depression. We found that {$\alpha$}5PAM effectively recovered impaired cortical processing as quantified by stimulus detection metrics, and also recovered the power spectral density profile of the microcircuit EEG signals. We performed an {$\alpha$}5-PAM dose response and identified simulated EEG biomarkers. Our results serve to de-risk and facilitate {$\alpha$}5-PAM translation and provide biomarkers in non-invasive brain signals for monitoring target engagement and drug efficacy.}, langid = {english} } @@ -235,6 +293,7 @@ @article{hjorthPredictingSynapticConnectivity2021 pages = {685--701}, issn = {1539-2791, 1559-0089}, doi = {10.1007/s12021-021-09531-w}, + urldate = {2022-06-07}, abstract = {Simulation of large-scale networks of neurons is an important approach to understanding and interpreting experimental data from healthy and diseased brains. Owing to the rapid development of simulation software and the accumulation of quantitative data of different neuronal types, it is possible to predict both computational and dynamical properties of local microcircuits in a `bottomup' manner. Simulated data from these models can be compared with experiments and `top-down' modelling approaches, successively bridging the scales. Here we describe an open source pipeline, using the software Snudda, for predicting microcircuit connectivity and for setting up simulations using the NEURON simulation environment in a reproducible way. We also illustrate how to further `curate' data on single neuron morphologies acquired from public databases. This model building pipeline was used to set up a first version of a full-scale cellular level model of mouse dorsal striatum. Model components from that work are here used to illustrate the different steps that are needed when modelling subcortical nuclei, such as the basal ganglia.}, langid = {english} } @@ -248,6 +307,7 @@ @article{huntStrongReliableSynaptic2022a pages = {bhac246}, issn = {1047-3211, 1460-2199}, doi = {10.1093/cercor/bhac246}, + urldate = {2023-02-28}, abstract = {Abstract Synaptic transmission constitutes the primary mode of communication between neurons. It is extensively studied in rodent but not human neocortex. We characterized synaptic transmission between pyramidal neurons in layers 2 and 3 using neurosurgically resected human middle temporal gyrus (MTG, Brodmann area 21), which is part of the distributed language circuitry. We find that local connectivity is comparable with mouse layer 2/3 connections in the anatomical homologue (temporal association area), but synaptic connections in human are 3-fold stronger and more reliable (0\% vs 25\% failure rates, respectively). We developed a theoretical approach to quantify properties of spinous synapses showing that synaptic conductance and voltage change in human dendritic spines are 3\textendash 4-folds larger compared with mouse, leading to significant NMDA receptor activation in human unitary connections. This model prediction was validated experimentally by showing that NMDA receptor activation increases the amplitude and prolongs decay of unitary excitatory postsynaptic potentials in human but not in mouse connections. Since NMDA-dependent recurrent excitation facilitates persistent activity (supporting working memory), our data uncovers cortical microcircuit properties in human that may contribute to language processing in MTG.}, langid = {english} } @@ -264,6 +324,7 @@ @article{iavaroneExperimentallyconstrainedBiophysicalModels2019a pages = {e1006753}, issn = {1553-7358}, doi = {10.1371/journal.pcbi.1006753}, + urldate = {2023-02-28}, langid = {english} } @@ -275,10 +336,25 @@ @techreport{iavaroneThalamicControlSensory2022 month = mar, institution = {{Neuroscience}}, doi = {10.1101/2022.02.28.482273}, + urldate = {2022-06-07}, abstract = {Thalamoreticular circuitry is known to play a key role in attention, cognition and the generation of sleep spindles, and is implicated in numerous brain disorders, but the cellular and synaptic mechanisms remain intractable. Therefore, we developed the first detailed computational model of mouse thalamus and thalamic reticular nucleus microcircuitry that captures morphological and biophysical properties of \textasciitilde 14,000 neurons connected via \textasciitilde 6M synapses, and recreates biological synaptic and gap junction connectivity. Simulations recapitulate multiple independent network-level experimental findings across different brain states, providing a novel unifying cellular and synaptic account of spontaneous and evoked activity in both wakefulness and sleep. Furthermore, we found that: 1.) inhibitory rebound produces frequency-selective enhancement of thalamic responses during wakefulness, in addition to its role in spindle generation; 2.) thalamic interactions generate the characteristic waxing and waning of spindle oscillations; and 3.) changes in thalamic excitability (e.g. due to neuromodulation) control spindle frequency and occurrence. The model is openly available and provides a new tool to interpret spindle oscillations and test hypotheses of thalamoreticular circuit function and dysfunction across different network states in health and disease.}, langid = {english} } +@techreport{isbisterModelingSimulationNeocortical2023, + type = {Preprint}, + title = {Modeling and {{Simulation}} of {{Neocortical Micro-}} and {{Mesocircuitry}}. {{Part II}}: {{Physiology}} and {{Experimentation}}}, + shorttitle = {Modeling and {{Simulation}} of {{Neocortical Micro-}} and {{Mesocircuitry}}. {{Part II}}}, + author = {Isbister, James B and Ecker, Andr{\'a}s and Pokorny, Christoph and {Bola{\~n}os-Puchet}, Sirio and Egas Santander, Daniela and Arnaudon, Alexis and Awile, Omar and {Barros-Zulaica}, Natali and Blanco Alonso, Jorge and Boci, Elvis and Chindemi, Giuseppe and Courcol, Jean-Denis and Damart, Tanguy and Delemontex, Thomas and Dietz, Alexander and Ficarelli, Gianluca and Gevaert, Mike and Herttuainen, Joni and Ivaska, Genrich and Ji, Weina and Keller, Daniel and King, James and Kumbhar, Pramod and Lapere, Samuel and Litvak, Polina and Mandge, Darshan and Muller, Eilif B. and Pereira, Fernando and Planas, Judit and Ranjan, Rajnish and Reva, Maria and Romani, Armando and R{\"o}ssert, Christian and Sch{\"u}rmann, Felix and Sood, Vishal and Teska, Aleksandra and Tuncel, Anil and Van Geit, Werner and Wolf, Matthias and Markram, Henry and Ramaswamy, Srikanth and Reimann, Michael W.}, + year = {2023}, + month = may, + institution = {{Neuroscience}}, + doi = {10.1101/2023.05.17.541168}, + urldate = {2023-05-24}, + abstract = {In recent years, large-scale computational models of the cortex have emerged as a powerful way to study the multi-scale mechanisms of neural processing. However, due to computational costs and difficulty of parameterization, detailed biophysical reconstructions have so far been restricted to small volumes of tissue, where the study of macro- and meso-scale interactions that are central to cortical function is not possible. We describe here, and in a companion paper, an approach to address the scaling challenges and provide a model of multiple interacting cortical regions at a subcellular level of detail. The model consists of 4.2 million morphologically detailed neurons in 8 sub-regions and connected with 13.2 billion synapses through local and long-range connectivity. Its anatomical aspects are described in the companion paper; here, we introduce physiological models of neuronal activity and synaptic transmission that integrate a large number of literature sources and were built using previously published algorithms. Biological neuronal diversity was captured in 208 morpho-electrical neuron types, five types of synaptic short-term dynamics, and pathway-specificity of synaptic parameters. A representation of synaptic input from cortical regions not present in the model was added and efficiently calibrated to reference firing rates. The model exhibits a spectrum of dynamical states differing in the degree to which they are internally versus externally driven. We characterized which parts of the spectrum are compatible with available experimental data on layer-specific delays and amplitudes of responses to simple stimuli, and found an in vivo-like regime at the edge of a transition from asynchronous to synchronous spontaneous activity. We developed a rich set of simulation tools to recreate a diverse set of laboratory experiments in silico, providing further validation and demonstrating the utility of the model in a variety of paradigms. Finally, we found that the large spatial scale of the model, that incorporates multiple cortical regions, led to the emergence of multiple independent computational units interacting through long-range synaptic pathways. The model provides a framework for the continued integration of experimental findings, for challenging hypotheses and making testable predictions, and provides a foundation for further simulation-based studies of cortical processing and learning.}, + langid = {english} +} + @article{kalmbachHChannelsContributeDivergent2018c, title = {H-{{Channels Contribute}} to {{Divergent Intrinsic Membrane Properties}} of {{Supragranular Pyramidal Neurons}} in {{Human}} versus {{Mouse Cerebral Cortex}}}, author = {Kalmbach, Brian E. and Buchin, Anatoly and Long, Brian and Close, Jennie and Nandi, Anirban and Miller, Jeremy A. and Bakken, Trygve E. and Hodge, Rebecca D. and Chong, Peter and {de Frates}, Rebecca and Dai, Kael and Maltzer, Zoe and Nicovich, Philip R. and Keene, C. Dirk and Silbergeld, Daniel L. and Gwinn, Ryder P. and Cobbs, Charles and Ko, Andrew L. and Ojemann, Jeffrey G. and Koch, Christof and Anastassiou, Costas A. and Lein, Ed S. and Ting, Jonathan T.}, @@ -290,6 +366,7 @@ @article{kalmbachHChannelsContributeDivergent2018c pages = {1194-1208.e5}, issn = {08966273}, doi = {10.1016/j.neuron.2018.10.012}, + urldate = {2023-02-28}, abstract = {Gene expression studies suggest that differential ion channel expression contributes to differences in rodent versus human neuronal physiology. We tested whether h-channels more prominently contribute to the physiological properties of human compared to mouse supragranular pyramidal neurons. Singlecell/nucleus RNA sequencing revealed ubiquitous HCN1-subunit expression in excitatory neurons in human, but not mouse, supragranular layers. Using patch-clamp recordings, we found stronger h-channel-related membrane properties in supragranular pyramidal neurons in human temporal cortex, compared to mouse supragranular pyramidal neurons in temporal association area. The magnitude of these differences depended upon cortical depth and was largest in pyramidal neurons in deep L3. Additionally, pharmacologically blocking h-channels produced a larger change in membrane properties in human compared to mouse neurons. Finally, using biophysical modeling, we provide evidence that h-channels promote the transfer of theta frequencies from dendrite-to-soma in human L3 pyramidal neurons. Thus, h-channels contribute to between-species differences in a fundamental neuronal property.}, langid = {english} } @@ -304,6 +381,7 @@ @article{laddScalingBenchmarkingEvolutionary2022 pages = {882552}, issn = {1662-5196}, doi = {10.3389/fninf.2022.882552}, + urldate = {2023-02-28}, abstract = {Single neuron models are fundamental for computational modeling of the brain's neuronal networks, and understanding how ion channel dynamics mediate neural function. A challenge in defining such models is determining biophysically realistic channel distributions. Here, we present an efficient, highly parallel evolutionary algorithm for developing such models, named NeuroGPU-EA. NeuroGPU-EA uses CPUs and GPUs concurrently to simulate and evaluate neuron membrane potentials with respect to multiple stimuli. We demonstrate a logarithmic cost for scaling the stimuli used in the fitting procedure. NeuroGPU-EA outperforms the typically used CPU based evolutionary algorithm by a factor of 10 on a series of scaling benchmarks. We report observed performance bottlenecks and propose mitigation strategies. Finally, we also discuss the potential of this method for efficient simulation and evaluation of electrophysiological waveforms.}, langid = {english} } @@ -320,6 +398,7 @@ @article{linaroCellTypespecificMechanisms2022a pages = {e1010071}, issn = {1553-7358}, doi = {10.1371/journal.pcbi.1010071}, + urldate = {2023-02-28}, abstract = {The transformation of synaptic input into action potential output is a fundamental single-cell computation resulting from the complex interaction of distinct cellular morphology and the unique expression profile of ion channels that define the cellular phenotype. Experimental studies aimed at uncovering the mechanisms of the transfer function have led to important insights, yet are limited in scope by technical feasibility, making biophysical simulations an attractive complementary approach to push the boundaries in our understanding of cellular computation. Here we take a data-driven approach by utilizing high-resolution morphological reconstructions and patch-clamp electrophysiology data together with a multi-objective optimization algorithm to build two populations of biophysically detailed models of murine hippocampal CA3 pyramidal neurons based on the two principal cell types that comprise this region. We evaluated the performance of these models and find that our approach quantitatively matches the cell type-specific firing phenotypes and recapitulate the intrinsic population-level variability in the data. Moreover, we confirm that the conductance values found by the optimization algorithm are consistent with differentially expressed ion channel genes in single-cell transcriptomic data for the two cell types. We then use these models to investigate the cell type-specific biophysical properties involved in the generation of complex-spiking output driven by synaptic input through an information-theoretic treatment of their respective transfer functions. Our simulations identify a host of cell type-specific biophysical mechanisms that define the morpho-functional phenotype to shape the cellular transfer function and place these findings in the context of a role for bursting in CA3 recurrent network synchronization dynamics.}, langid = {english} } @@ -334,6 +413,7 @@ @inproceedings{linaroModellingEffectsEarly2020a publisher = {{IEEE}}, address = {{Seville, Spain}}, doi = {10.1109/ISCAS45731.2020.9180633}, + urldate = {2023-02-28}, abstract = {In recent years, a novel approach based on multiobjective optimization has been developed to automatically tune biophysically realistic, multi-compartmental neuron models starting from electrophysiological recordings. Here, we apply this methodology to the optimization of model neurons capable of reproducing the reduced excitability observed in experiments carried out in cortical pyramidal cells in a rodent model of fetal alcohol spectrum disorder. We find that both control and ethanol-exposed model cells present an excellent match with the experiments in terms of membrane voltage dynamics, with the latter group displaying a small but significant rightward shift of their current-frequency relationship. We identify a possible interplay between model parameters and cellular morphology and suggest future improvements to better capture the features of dendritic voltage dynamics.}, isbn = {978-1-72813-320-1}, langid = {english} @@ -350,6 +430,7 @@ @article{martimengualEfficientLowPassDendroSomatic2020a pages = {8799--8815}, issn = {0270-6474, 1529-2401}, doi = {10.1523/JNEUROSCI.3028-19.2020}, + urldate = {2023-02-28}, abstract = {Signal propagation in the dendrites of many neurons, including cortical pyramidal neurons in sensory cortex, is characterized by strong attenuation toward the soma. In contrast, using dual whole-cell recordings from the apical dendrite and soma of layer 5 (L5) pyramidal neurons in the anterior cingulate cortex (ACC) of adult male mice we found good coupling, particularly of slow subthreshold potentials like NMDA spikes or trains of EPSPs from dendrite to soma. Only the fastest EPSPs in the ACC were reduced to a similar degree as in primary somatosensory cortex, revealing differential low-pass filtering capabilities. Furthermore, L5 pyramidal neurons in the ACC did not exhibit dendritic Ca 2+ spikes as prominently found in the apical dendrite of S1 (somatosensory cortex) pyramidal neurons. Fitting the experimental data to a NEURON model revealed that the specific distribution of I leak , I ir , I m , and I h was sufficient to explain the electrotonic dendritic structure causing a leaky distal dendritic compartment with correspondingly low input resistance and a compact perisomatic region, resulting in a decoupling of distal tuft branches from each other while at the same time efficiently connecting them to the soma. Our results give a biophysically plausible explanation of how a class of prefrontal cortical pyramidal neurons achieve efficient integration of subthreshold distal synaptic inputs compared with the same cell type in sensory cortices. SIGNIFICANCE STATEMENT Understanding cortical computation requires the understanding of its fundamental computational subunits. Layer 5 pyramidal neurons are the main output neurons of the cortex, integrating synaptic inputs across different cortical layers. Their elaborate dendritic tree receives, propagates, and transforms synaptic inputs into action potential output. We found good coupling of slow subthreshold potentials like NMDA spikes or trains of EPSPs from the distal apical dendrite to the soma in pyramidal neurons in the ACC, which was significantly better compared with S1. This suggests that frontal pyramidal neurons use a different integration scheme compared with the same cell type in somatosensory cortex, which has important implications for our understanding of information processing across different parts of the neocortex.}, langid = {english} } @@ -366,6 +447,7 @@ @article{masoliCerebellarGolgiCell2020a pages = {e1007937}, issn = {1553-7358}, doi = {10.1371/journal.pcbi.1007937}, + urldate = {2023-02-28}, abstract = {The Golgi cells are the main inhibitory interneurons of the cerebellar granular layer. Although recent works have highlighted the complexity of their dendritic organization and synaptic inputs, the mechanisms through which these neurons integrate complex input patterns remained unknown. Here we have used 8 detailed morphological reconstructions to develop multicompartmental models of Golgi cells, in which Na, Ca, and K channels were distributed along dendrites, soma, axonal initial segment and axon. The models faithfully reproduced a rich pattern of electrophysiological and pharmacological properties and predicted the operating mechanisms of these neurons. Basal dendrites turned out to be more tightly electrically coupled to the axon initial segment than apical dendrites. During synaptic transmission, parallel fibers caused slow Ca-dependent depolarizations in apical dendrites that boosted the axon initial segment encoder and Na-spike backpropagation into basal dendrites, while inhibitory synapses effectively shunted backpropagating currents. This oriented dendritic processing set up a coincidence detector controlling voltage-dependent NMDA receptor unblock in basal dendrites, which, by regulating local calcium influx, may provide the basis for spike-timing dependent plasticity anticipated by theory.}, langid = {english} } @@ -378,6 +460,7 @@ @techreport{masoliHumanOutperformMouse2023 month = mar, institution = {{Neuroscience}}, doi = {10.1101/2023.03.08.531672}, + urldate = {2023-03-14}, abstract = {Purkinje cells (PC) of the cerebellum are amongst the largest neurons of the brain and have been extensively investigated in rodents. However, their morphological and physiological properties in humans are still poorly understood. Here, we have taken advantage of high-resolution morphological reconstructions and of unique electrophysiological recordings of human PCs ex vivo to generate computational models and estimate computational capacity. An inter-species comparison showed that human PCs had similar fractal structure but were bigger than mouse PCs. Consequently, given a similar spine density (2/micrometer), human PCs hosted about 5 times more dendritic spines. Moreover, human had higher dendritic complexity than mouse PCs and usually emitted 2-3 main dendritic trunks instead than 1. Intrinsic electroresponsiveness was similar in the two species but model simulations revealed that the dendrites generated \textasciitilde 6.5 times (n=51 vs. n=8) more combinations of independent input patterns in human than mouse PCs leading to an exponential 2n increase in Shannon information. Thus, while during evolution human PCs maintained similar patterns of spike discharge as in rodents, they developed more complex dendrites enhancing computational capacity up to the limit of 10 billion times.}, langid = {english} } @@ -393,6 +476,7 @@ @article{masoliParameterTuningDifferentiates2020a pages = {222}, issn = {2399-3642}, doi = {10.1038/s42003-020-0953-x}, + urldate = {2023-02-28}, abstract = {Abstract The cerebellar granule cells (GrCs) are classically described as a homogeneous neuronal population discharging regularly without adaptation. We show that GrCs in fact generate diverse response patterns to current injection and synaptic activation, ranging from adaptation to acceleration of firing. Adaptation was predicted by parameter optimization in detailed computational models based on available knowledge on GrC ionic channels. The models also predicted that acceleration required additional mechanisms. We found that yet unrecognized TRPM4 currents specifically accounted for firing acceleration and that adapting GrCs outperformed accelerating GrCs in transmitting high-frequency mossy fiber (MF) bursts over a background discharge. This implied that GrC subtypes identified by their electroresponsiveness corresponded to specific neurotransmitter release probability values. Simulations showed that fine-tuning of pre- and post-synaptic parameters generated effective MF-GrC transmission channels, which could enrich the processing of input spike patterns and enhance spatio-temporal recoding at the cerebellar input stage.}, langid = {english} } @@ -407,6 +491,7 @@ @article{masoliSingleNeuronOptimization2017a volume = {11}, issn = {1662-5102}, doi = {10.3389/fncel.2017.00071}, + urldate = {2022-06-07}, abstract = {In realistic neuronal modeling, once the ionic channel complement has been defined, the maximum ionic conductance (Gi-max) values need to be tuned in order to match the firing pattern revealed by electrophysiological recordings. Recently, selection/mutation genetic algorithms have been proposed to efficiently and automatically tune these parameters. Nonetheless, since similar firing patterns can be achieved through different combinations of Gi-max values, it is not clear how well these algorithms approximate the corresponding properties of real cells. Here we have evaluated the issue by exploiting a unique opportunity offered by the cerebellar granule cell (GrC), which is electrotonically compact and has therefore allowed the direct experimental measurement of ionic currents. Previous models were constructed using empirical tuning of Gi-max values to match the original data set. Here, by using repetitive discharge patterns as a template, the optimization procedure yielded models that closely approximated the experimental Gi-max values. These models, in addition to repetitive firing, captured additional features, including inward rectification, near-threshold oscillations, and resonance, which were not used as features. Thus, parameter optimization using genetic algorithms provided an efficient modeling strategy for reconstructing the biophysical properties of neurons and for the subsequent reconstruction of large-scale neuronal network models.}, langid = {english} } @@ -419,6 +504,7 @@ @techreport{michielsElectrophysiologyPredictionSingle2020a month = feb, institution = {{Neuroscience}}, doi = {10.1101/2020.02.04.933697}, + urldate = {2023-02-28}, abstract = {Electrophysiology data acquisition of single neurons represents a key factor for the understanding of neuronal dynamics. However, the traditional method to acquire this data is through patch-clamp technology, which presents serious scalability flaws due to its slowness and complexity to record at fine-grained spatial precision (dendrites and axon).}, langid = {english} } @@ -435,6 +521,7 @@ @article{migliorePhysiologicalVariabilityChannel2018a pages = {e1006423}, issn = {1553-7358}, doi = {10.1371/journal.pcbi.1006423}, + urldate = {2022-06-07}, abstract = {Every neuron is part of a network, exerting its function by transforming multiple spatiotemporal synaptic input patterns into a single spiking output. This function is specified by the particular shape and passive electrical properties of the neuronal membrane, and the composition and spatial distribution of ion channels across its processes. For a variety of physiological or pathological reasons, the intrinsic input/output function may change during a neuron's lifetime. This process results in high variability in the peak specific conductance of ion channels in individual neurons. The mechanisms responsible for this variability are not well understood, although there are clear indications from experiments and modeling that degeneracy and correlation among multiple channels may be involved. Here, we studied this issue in biophysical models of hippocampal CA1 pyramidal neurons and interneurons. Using a unified data-driven simulation workflow and starting from a set of experimental recordings and morphological reconstructions obtained from rats, we built and analyzed several ensembles of morphologically and biophysically accurate single cell models with intrinsic electrophysiological properties consistent with experimental findings. The results suggest that the set of conductances expressed in any given hippocampal neuron may be considered as belonging to two groups: one subset is responsible for the major characteristics of the firing behavior in each population and the other is responsible for a robust degeneracy. Analysis of the model neurons suggests several experimentally testable predictions related to the combination and relative proportion of the different conductances that should be expressed on the membrane of different types of neurons for them to fulfill their role in the hippocampus circuitry.}, langid = {english} } @@ -449,6 +536,7 @@ @inproceedings{mohacsiUnifiedFrameworkApplication2020a publisher = {{IEEE}}, address = {{Glasgow, United Kingdom}}, doi = {10.1109/IJCNN48605.2020.9206692}, + urldate = {2023-02-28}, abstract = {Automated parameter search has become a standard method in the modeling of neural systems. These studies could potentially take advantage of recent developments in nonlinear optimization, and the availability of software packages containing high-quality implementations of algorithms that proved useful in other domains. However, a systematic comparison of the available algorithms for problems that are typical in neuroscience has not been performed.}, isbn = {978-1-72816-926-2}, langid = {english} @@ -465,6 +553,7 @@ @article{mosherCellularClassesHuman2020a pages = {3536-3551.e6}, issn = {22111247}, doi = {10.1016/j.celrep.2020.02.027}, + urldate = {2023-02-28}, abstract = {Determining cell types is critical for understanding neural circuits but remains elusive in the living human brain. Current approaches discriminate units into putative cell classes using features of the extracellular action potential (EAP); in absence of ground truth data, this remains a problematic procedure. We find that EAPs in deep structures of the brain exhibit robust and systematic variability during the cardiac cycle. These cardiac-related features refine neural classification. We use these features to link bio-realistic models generated from in vitro human wholecell recordings of morphologically classified neurons to in vivo recordings. We differentiate aspiny inhibitory and spiny excitatory human hippocampal neurons and, in a second stage, demonstrate that cardiac-motion features reveal two types of spiny neurons with distinct intrinsic electrophysiological properties and phase-locking characteristics to endogenous oscillations. This multi-modal approach markedly improves cell classification in humans, offers interpretable cell classes, and is applicable to other brain areas and species.}, langid = {english} } @@ -480,6 +569,7 @@ @article{octeauTransientConsequentialIncreases2019a pages = {2249-2261.e7}, issn = {22111247}, doi = {10.1016/j.celrep.2019.04.078}, + urldate = {2022-06-07}, abstract = {Channelrhodopsin2 (ChR2) optogenetic excitation is widely used to study neurons, astrocytes, and circuits. Using complementary approaches in situ and in vivo, we found that ChR2 stimulation leads to significant transient elevation of extracellular potassium ions by \$5 mM. Such elevations were detected in ChR2-expressing mice, following local in vivo expression of ChR2(H134R) with adeno-associated viruses (AAVs), in different brain areas and when ChR2 was expressed in neurons or astrocytes. In particular, ChR2-mediated excitation of striatal astrocytes was sufficient to increase medium spiny neuron (MSN) excitability and immediate early gene expression. The effects on MSN excitability were recapitulated in silico with a computational MSN model and detected in vivo as increased action potential firing in awake, behaving mice. We show that transient, physiologically consequential increases in extracellular potassium ions accompany ChR2 optogenetic excitation. This coincidental effect may be important to consider during astrocyte studies employing ChR2 to interrogate neural circuits and animal behavior.}, langid = {english} } @@ -492,6 +582,7 @@ @techreport{revaUniversalWorkflowCreation2022 month = dec, institution = {{Neuroscience}}, doi = {10.1101/2022.12.13.520234}, + urldate = {2023-02-28}, abstract = {Detailed single neuron modeling is widely used to study neuronal functions. While cellular and functional diversity across the mammalian cortex is vast, most of the available computational tools are dedicated to the reproduction of a small set of specific features characteristic of a single neuron. Here, we present a generalized automated workflow for the creation of robust electrical models and illustrate its performance by building cell models for the rat somatosensory cortex (SSCx). Each model is based on a 3D morphological reconstruction and a set of ionic mechanisms specific to the cell type. We use an evolutionary algorithm to optimize passive and active ionic parameters to match the electrophysiological features extracted from whole-cell patch-clamp recordings. To shed light on which parameters are constrained by experimental data and which could be degenerate, we perform a parameter sensitivity analysis. We also validate the optimized models against additional experimental stimuli and assess their generalizability on a population of morphologies with the same morphological type. With this workflow, we generate SSCx neuronal models producing the variability of neuronal responses. Due to its versatility, our workflow can be used to build robust biophysical models of any neuronal type.}, langid = {english} } @@ -507,10 +598,24 @@ @article{rizzaStellateCellComputational2021a pages = {3873}, issn = {2045-2322}, doi = {10.1038/s41598-021-83209-w}, + urldate = {2023-02-28}, abstract = {Abstract The functional properties of cerebellar stellate cells and the way they regulate molecular layer activity are still unclear. We have measured stellate cells electroresponsiveness and their activation by parallel fiber bursts. Stellate cells showed intrinsic pacemaking, along with characteristic responses to depolarization and hyperpolarization, and showed a marked short-term facilitation during repetitive parallel fiber transmission. Spikes were emitted after a lag and only at high frequency, making stellate cells to operate as delay-high-pass filters. A detailed computational model summarizing these physiological properties allowed to explore different functional configurations of the parallel fiber\textemdash stellate cell\textemdash Purkinje cell circuit. Simulations showed that, following parallel fiber stimulation, Purkinje cells almost linearly increased their response with input frequency, but such an increase was inhibited by stellate cells, which leveled the Purkinje cell gain curve to its 4~Hz value. When reciprocal inhibitory connections between stellate cells were activated, the control of stellate cells over Purkinje cell discharge was maintained only at very high frequencies. These simulations thus predict a new role for stellate cells, which could endow the molecular layer with low-pass and band-pass filtering properties regulating Purkinje cell gain and, along with this, also burst delay and the burst-pause responses pattern.}, langid = {english} } +@techreport{romaniCommunitybasedReconstructionSimulation2023, + type = {Preprint}, + title = {Community-Based {{Reconstruction}} and {{Simulation}} of a {{Full-scale Model}} of {{Region CA1}} of {{Rat Hippocampus}}}, + author = {Romani, Armando and Antonietti, Alberto and Bella, Davide and Budd, Julian and Giacalone, Elisabetta and Kurban, Kerem and S{\'a}ray, S{\'a}ra and Abdellah, Marwan and Arnaudon, Alexis and Boci, Elvis and Colangelo, Cristina and Courcol, Jean-Denis and Delemontex, Thomas and Ecker, Andr{\'a}s and Falck, Joanne and Favreau, Cyrille and Gevaert, Michael and Hernando, Juan B. and Herttuainen, Joni and Ivaska, Genrich and Kanari, Lida and Kaufmann, Anna-Kristin and King, James Gonzalo and Kumbhar, Pramod and Lange, Sigrun and Lu, Huanxiang and Lupascu, Carmen Alina and Migliore, Rosanna and Petitjean, Fabien and Planas, Judit and Rai, Pranav and Ramaswamy, Srikanth and Reimann, Michael W. and Riquelme, Juan Luis and Guerrero, Nadir Rom{\'a}n and Shi, Ying and Sood, Vishal and Sy, Mohameth Fran{\c c}ois and Geit, Werner Van and Vanherpe, Liesbeth and Freund, Tam{\'a}s F. and Mercer, Audrey and Muller, Eilif and Sch{\"u}rmann, Felix and Thomson, Alex M. and Migliore, Michele and K{\'a}li, Szabolcs and Markram, Henry}, + year = {2023}, + month = may, + institution = {{Neuroscience}}, + doi = {10.1101/2023.05.17.541167}, + urldate = {2023-05-24}, + abstract = {Abstract The CA1 region of the hippocampus is one of the most studied regions of the rodent brain, thought to play an important role in cognitive functions such as memory and spatial navigation. Despite a wealth of experimental data on its structure and function, it can be challenging to reconcile information obtained from diverse experimental approaches. To address this challenge, we present a community-driven, full-scale in silico model of the rat CA1 that integrates a broad range of experimental data, from synapse to network, including the reconstruction of its principal afferents, the Schaffer collaterals, and a model of the effects that acetylcholine has on the system. We have tested and validated each model component and the final network model, and made input data, assumptions, and strategies explicit and transparent. The flexibility of the model allows scientists to address a range of scientific questions. In this article, we describe the methods used to set up simulations that reproduce and extend in vitro and in vivo experiments. Among several applications in the article, we focus on theta rhythm, a prominent hippocampal oscillation associated with various behavioral correlates and use our computer model to reproduce and reconcile experimental findings. Finally, we make data, code and model available through the hippocampushub.eu portal, which also provides an extensive set of analyses of the model and a user-friendly interface to facilitate adoption and usage. This neuroscience community-driven model represents a valuable tool for integrating diverse experimental data and provides a foundation for further research into the complex workings of the hippocampal CA1 region.}, + langid = {english} +} + @incollection{romaniReconstructionHippocampus2022, title = {Reconstruction of the {{Hippocampus}}}, booktitle = {Computational {{Modelling}} of the {{Brain}}}, @@ -522,6 +627,7 @@ @incollection{romaniReconstructionHippocampus2022 publisher = {{Springer International Publishing}}, address = {{Cham}}, doi = {10.1007/978-3-030-89439-9_11}, + urldate = {2023-02-28}, abstract = {The hippocampus is a widely studied brain region thought to play an important role in higher cognitive functions such as learning, memory, and navigation. The amount of data on this region increases every day and delineates a complex and fragmented picture, but an integrated understanding of hippocampal function remains elusive. Computational methods can help to move the research forward, and reconstructing a full-scale model of the hippocampus is a challenging yet feasible task that the research community should undertake.}, isbn = {978-3-030-89438-2 978-3-030-89439-9}, langid = {english} @@ -539,6 +645,7 @@ @article{rumbellDimensionsControlSubthreshold2019a pages = {e1007375}, issn = {1553-7358}, doi = {10.1371/journal.pcbi.1007375}, + urldate = {2023-02-28}, abstract = {Dopaminergic neurons (DAs) of the rodent substantia nigra pars compacta (SNc) display varied electrophysiological properties in vitro. Despite this, projection patterns and functional inputs from DAs to other structures are conserved, so in vivo delivery of consistent, well-timed dopamine modulation to downstream circuits must be coordinated. Here we show robust coordination by linear parameter controllers, discovered through powerful mathematical analyses of data and models, and from which consistent control of DA subthreshold oscillations (STOs) and spontaneous firing emerges. These units of control represent coordinated intracellular variables, sufficient to regulate complex cellular properties with radical simplicity. Using an evolutionary algorithm and dimensionality reduction, we discovered metaparameters, which when regressed against STO features, revealed a 2dimensional control plane for the neuron's 22-dimensional parameter space that fully maps the natural range of DA subthreshold electrophysiology. This plane provided a basis for spiking currents to reproduce a large range of the naturally occurring spontaneous firing characteristics of SNc DAs. From it we easily produced a unique population of models, derived using unbiased parameter search, that show good generalization to channel blockade and compensatory intracellular mechanisms. From this population of models, we then discovered low-dimensional controllers for regulating spontaneous firing properties, and gain insight into how currents active in different voltage regimes interact to produce the emergent activity of SNc DAs. Our methods therefore reveal simple regulators of neuronal function lurking in the complexity of combined ion channel dynamics.}, langid = {english} } @@ -556,6 +663,7 @@ @article{sarayHippoUnitSoftwareTool2021a pages = {e1008114}, issn = {1553-7358}, doi = {10.1371/journal.pcbi.1008114}, + urldate = {2023-02-28}, abstract = {Anatomically and biophysically detailed data-driven neuronal models have become widely used tools for understanding and predicting the behavior and function of neurons. Due to the increasing availability of experimental data from anatomical and electrophysiological measurements as well as the growing number of computational and software tools that enable accurate neuronal modeling, there are now a large number of different models of many cell types available in the literature. These models were usually built to capture a few important or interesting properties of the given neuron type, and it is often unknown how they would behave outside their original context. In addition, there is currently no simple way of quantitatively comparing different models regarding how closely they match specific experimental observations. This limits the evaluation, re-use and further development of the existing models. Further, the development of new models could also be significantly facilitated by the ability to rapidly test the behavior of model candidates against the relevant collection of experimental data. We address these problems for the representative case of the CA1 pyramidal cell of the rat hippocampus by developing an open-source Python test suite, which makes it possible to automatically and systematically test multiple properties of models by making quantitative comparisons between the models and electrophysiological data. The tests cover various aspects of somatic behavior, and signal propagation and integration in apical dendrites. To demonstrate the utility of our approach, we applied our tests to compare the behavior of several different rat hippocampal CA1 pyramidal cell models from the ModelDB database against electrophysiological data available in the literature, and evaluated how well these models match experimental observations in different domains. We also show how we employed the test suite to aid the development of models within the European Human Brain Project (HBP), and describe the integration of the tests into the validation framework developed in the HBP, with the aim of facilitating more reproducible and transparent model building in the neuroscience community.}, langid = {english} } @@ -568,6 +676,7 @@ @techreport{schneider-mizellChandelierCellAnatomy2020b month = apr, institution = {{Neuroscience}}, doi = {10.1101/2020.03.31.018952}, + urldate = {2023-02-28}, abstract = {The activity and connectivity of inhibitory cells has a profound impact on the operation of neuronal networks. While the average connectivity of many inhibitory cell types has been characterized, we still lack an understanding of how individual interneurons distribute their synapses onto their targets and how heterogeneous the inhibition is onto different individual excitatory neurons. Here, we use large-scale volumetric electron microscopy (EM) and functional imaging to address this question for chandelier cells in layer 2/3 of mouse visual cortex. Using dense morphological reconstructions from EM, we mapped the complete chandelier input onto 153 pyramidal neurons. We find that the number of input synapses is highly variable across the population, but the variability is correlated with structural features of the target neuron: soma depth, soma size, and the number of perisomatic synapses received. Functionally, we found that chandelier cell activity in vivo was highly correlated and tracks pupil diameter, a proxy for arousal state. We propose that chandelier cells provide a global signal whose strength is individually adjusted for each target neuron. This approach, combining comprehensive structural analysis with functional recordings of identified cell types, will be a powerful tool to uncover the wiring rules across the diversity of cortical cell types.}, langid = {english} } @@ -582,6 +691,7 @@ @article{schneider-mizellStructureFunctionAxoaxonic2021 pages = {e73783}, issn = {2050-084X}, doi = {10.7554/eLife.73783}, + urldate = {2022-06-07}, abstract = {Inhibitory neurons in mammalian cortex exhibit diverse physiological, morphological, molecular, and connectivity signatures. While considerable work has measured the average connectivity of several interneuron classes, there remains a fundamental lack of understanding of the connectivity distribution of distinct inhibitory cell types with synaptic resolution, how it relates to properties of target cells, and how it affects function. Here, we used large-\-scale electron microscopy and functional imaging to address these questions for chandelier cells in layer 2/3 of the mouse visual cortex. With dense reconstructions from electron microscopy, we mapped the complete chandelier input onto 153 pyramidal neurons. We found that synapse number is highly variable across the population and is correlated with several structural features of the target neuron. This variability in the number of axo-a\- xonic ChC synapses is higher than the variability seen in perisomatic inhibition. Biophysical simulations show that the observed pattern of axo-a\- xonic inhibition is particularly effective in controlling excitatory output when excitation and inhibition are co-\-active. Finally, we measured chandelier cell activity in awake animals using a cell-t\-ype-s\- pecific calcium imaging approach and saw highly correlated activity across chandelier cells. In the same experiments, in vivo chandelier population activity correlated with pupil dilation, a proxy for arousal. Together, these results suggest that chandelier cells provide a circuit-\-wide signal whose strength is adjusted relative to the properties of target neurons.}, langid = {english} } @@ -597,6 +707,7 @@ @incollection{schurmannComputationalConceptsReconstructing2022 publisher = {{Springer International Publishing}}, address = {{Cham}}, doi = {10.1007/978-3-030-89439-9_10}, + urldate = {2023-02-28}, abstract = {Abstract It has previously been shown that it is possible to derive a new class of biophysically detailed brain tissue models when one computationally analyzes and exploits the interdependencies or the multi-modal and multi-scale organization of the brain. These reconstructions, sometimes referred to as digital twins, enable a spectrum of scientific investigations. Building such models has become possible because of increase in quantitative data but also advances in computational capabilities, algorithmic and methodological innovations. This chapter presents the computational science concepts that provide the foundation to the data-driven approach to reconstructing and simulating brain tissue as developed by the EPFL Blue Brain Project, which was originally applied to neocortical microcircuitry and extended to other brain regions. Accordingly, the chapter covers aspects such as a knowledge graph-based data organization and the importance of the concept of a dataset release. We illustrate algorithmic advances in finding suitable parameters for electrical models of neurons or how spatial constraints can be exploited for predicting synaptic connections. Furthermore, we explain how in silico experimentation with such models necessitates specific addressing schemes or requires strategies for an efficient simulation. The entire data-driven approach relies on the systematic validation of the model. We conclude by discussing complementary strategies that not only enable judging the fidelity of the model but also form the basis for its systematic refinements.}, isbn = {978-3-030-89438-2 978-3-030-89439-9}, langid = {english} @@ -612,6 +723,7 @@ @article{sekulicIntegrationWithinCellExperimental2020a pages = {277}, issn = {1662-5102}, doi = {10.3389/fncel.2020.00277}, + urldate = {2023-02-28}, langid = {english} } @@ -626,6 +738,7 @@ @article{shapiraStatisticalEmulationNeural2022 pages = {789962}, issn = {2624-909X}, doi = {10.3389/fdata.2022.789962}, + urldate = {2022-06-07}, abstract = {Many scientific systems are studied using computer codes that simulate the phenomena of interest. Computer simulation enables scientists to study a broad range of possible conditions, generating large quantities of data at a faster rate than the laboratory. Computer models are widespread in neuroscience, where they are used to mimic brain function at different levels. These models offer a variety of new possibilities for the neuroscientist, but also numerous challenges, such as: where to sample the input space for the simulator, how to make sense of the data that is generated, and how to estimate unknown parameters in the model. Statistical emulation can be a valuable complement to simulator-based research. Emulators are able to mimic the simulator, often with a much smaller computational burden and they are especially valuable for parameter estimation, which may require many simulator evaluations. This work compares different statistical models that address these challenges, and applies them to simulations of neocortical L2/3 large basket cells, created and run with the NEURON simulator in the context of the European Human Brain Project. The novelty of our approach is the use of fast empirical emulators, which have the ability to accelerate the optimization process for the simulator and to identify which inputs (in this case, different membrane ion channels) are most influential in affecting simulated features. These contributions are complementary, as knowledge of the important features can further improve the optimization process. Subsequent research, conducted after the process is completed, will gain efficiency by focusing on these inputs.}, langid = {english} } @@ -638,6 +751,7 @@ @techreport{sunReducedOrienslacunosumMoleculare2022 month = oct, institution = {{Neuroscience}}, doi = {10.1101/2022.10.20.513073}, + urldate = {2023-02-28}, abstract = {ABSTRACT Conductance-based models have played an important role in the development of modern neuroscience. These mathematical models are powerful ``tools'' that enable theoretical explorations in experimentally untenable situations, and can lead to the development of novel hypotheses and predictions. With advances in cell imaging and computational power, multi-compartment models with morphological accuracy are becoming common practice. However, as more biological details are added, they make extensive explorations and analyses more challenging largely due to their huge computational expense. Here, we focus on oriens-lacunosum/moleculare (OLM) cell models. OLM cells can contribute to functionally relevant theta rhythms in the hippocampus by virtue of their ability to express spiking resonance at theta frequencies, but what characteristics underlie this is far from clear. We converted a previously developed detailed multi-compartment OLM cell model into a reduced single compartment model that retained biophysical fidelity with its underlying ion currents. We showed that the reduced OLM cell model can capture complex output that includes spiking resonance in in vivo -like scenarios as previously obtained with the multi-compartment model. Using the reduced model, we were able to greatly expand our in vivo -like scenarios. Applying spike-triggered average analyses, we were thus able to to determine that it is a combination of hyperpolarization-activated cation and muscarinic type potassium currents that specifically allow OLM cells to exhibit spiking resonance at theta frequencies. Further, we developed a robust Kalman Filtering (KF) method to estimate parameters of the reduced model in real-time. We showed that it may be possible to directly estimate conductance parameters from experiments since this KF method can reliably extract parameter values from model voltage recordings. Overall, our work showcases how the contribution of cellular biophysical current details could be determined and assessed for spiking resonance. As well, our work shows that it may be possible to directly extract these parameters from current clamp voltage recordings.}, langid = {english} } @@ -650,6 +764,7 @@ @techreport{wilbersStructuralFunctionalSpecializations2022 month = nov, institution = {{Neuroscience}}, doi = {10.1101/2022.11.29.518193}, + urldate = {2023-02-28}, abstract = {Word count 150) In rodent cortical networks, fast spiking interneurons (FSINs) provide fast inhibition that synchronizes neuronal activity and is critical for cognitive function. Fast synchronization frequencies are evolutionary conserved in the expanded human neocortex, despite larger neuron-to-neuron distances that challenge fast input-output transfer functions of FSINs. Here, we test which mechanistic specializations of large human FSINs explain their fast-signaling properties in human cortex. With morphological reconstructions, multi-patch recordings, and biophysical modeling we find that despite three-fold longer dendritic path lengths, human FSINs maintain fast inhibition between connected pyramidal neurons through several mechanisms: stronger synapse strength of excitatory inputs, larger dendrite diameter with reduced complexity, faster AP initiation, and faster and larger inhibitory output, while Na+ current activation /inactivation properties are similar. These adaptations underlie short input-output delays in fast inhibition of human pyramidal neurons through FSINs, explaining how cortical synchronization frequencies are conserved despite expanded and sparse network topology of human cortex.}, langid = {english} } @@ -664,6 +779,7 @@ @article{wyboDatadrivenReductionDendritic2021a pages = {e60936}, issn = {2050-084X}, doi = {10.7554/eLife.60936}, + urldate = {2023-02-28}, abstract = {Dendrites shape information flow in neurons. Yet, there is little consensus on the level of spatial complexity at which they operate. Through carefully chosen parameter fits, solvable in the least-squares sense, we obtain accurate reduced compartmental models at any level of complexity. We show that (back-propagating) action potentials, Ca2+ spikes, and N-methyl-Daspartate spikes can all be reproduced with few compartments. We also investigate whether afferent spatial connectivity motifs admit simplification by ablating targeted branches and grouping affected synapses onto the next proximal dendrite. We find that voltage in the remaining branches is reproduced if temporal conductance fluctuations stay below a limit that depends on the average difference in input resistance between the ablated branches and the next proximal dendrite.}, langid = {english} } @@ -679,6 +795,7 @@ @article{yaoReducedInhibitionDepression2022 pages = {110232}, issn = {22111247}, doi = {10.1016/j.celrep.2021.110232}, + urldate = {2022-06-07}, abstract = {Cortical processing depends on finely tuned excitatory and inhibitory connections in neuronal microcircuits. Reduced inhibition by somatostatin-expressing interneurons is a key component of altered inhibition associated with treatment-resistant major depressive disorder (depression), which is implicated in cognitive deficits and rumination, but the link remains to be better established mechanistically in humans. Here we test the effect of reduced somatostatin interneuron-mediated inhibition on cortical processing in human neuronal microcircuits using a data-driven computational approach. We integrate human cellular, circuit, and gene expression data to generate detailed models of human cortical microcircuits in health and depression. We simulate microcircuit baseline and response activity and find a reduced signal-to-noise ratio and increased false/failed detection of stimuli due to a higher baseline activity in depression. We thus apply models of human cortical microcircuits to demonstrate mechanistically how reduced inhibition impairs cortical processing in depression, providing quantitative links between altered inhibition and cognitive deficits.}, langid = {english} } diff --git a/misc/github_wiki/creates_publication_list_markdown.py b/misc/github_wiki/creates_publication_list_markdown.py index 58c8775a..924ff239 100644 --- a/misc/github_wiki/creates_publication_list_markdown.py +++ b/misc/github_wiki/creates_publication_list_markdown.py @@ -1,10 +1,31 @@ -"""Creates markdown github wiki from bibtex files.""" +"""Creates markdown github wiki from bibtex files. + +Use this version of pybtex for this code to work as expected: https://bitbucket.org/aurelienjaquier/pybtex/src/custom-style/ +""" import re from pathlib import Path from pybtex import PybtexEngine +from pybtex.style.formatting.unsrt import Style as OriginalStyle +from pybtex.style.template import field, sentence, tag + + +class Style(OriginalStyle): + """Style similar to unsrt, but with bold titles and sorting by date.""" + default_sorting_style = 'year_month' # must have custom pybtex to use this + + def format_title(self, e, which_field, as_sentence=True): + formatted_title = field( + which_field, apply_func=lambda text: text.capitalize() + ) + formatted_title = tag('b') [ formatted_title ] + if as_sentence: + return sentence [ formatted_title ] + else: + return formatted_title + def put_bullet_points(input): """Replace references by bullet points.""" @@ -16,48 +37,41 @@ def put_bullet_points(input): bibtex_folder = working_directory / "bibtex" output_path = working_directory / "output" / "gh_wiki.md" +uses_BPO = bibtex_folder / "uses_BPO.bib" # from zotero +uses_BPO_extra = bibtex_folder / "uses_BPO_extra.bib" # extra custom +mentions_BPO = bibtex_folder / "mentions_BPO.bib" +mentions_BPO_extra = bibtex_folder / "mentions_BPO_extra.bib" +thesis_uses_BPO = bibtex_folder / "thesis_uses_BPO.bib" +thesis_mentions_BPO = bibtex_folder / "thesis_mentions_BPO.bib" +poster_uses_BPO = bibtex_folder / "poster_uses_BPO.bib" + + # style should have number references for them to be replaced later by regex -# e.g. unsrt -style = "unsrt" +# e.g. "unsrt" +style = Style # -- turn bibtex files into markdown -- # engine = PybtexEngine() -# bibtex from zotero -md_use_bpo = engine.format_from_file( - bibtex_folder / "uses_BPO.bib", style=style, output_backend="markdown" -) -# bibtex custom (bibtex output from paper not good) -md_use_bpo_extra = engine.format_from_file( - bibtex_folder / "uses_BPO_extra.bib", style=style, output_backend="markdown" -) -# bibtex from zotero -md_mentions_bpo = engine.format_from_file( - bibtex_folder / "mentions_BPO.bib", style=style, output_backend="markdown" +md_uses_bpo = engine.format_from_files( + [uses_BPO, uses_BPO_extra], style=style, output_backend="markdown" ) -# bibtex custom (bibtex output from paper not good) -md_mentions_bpo_extra = engine.format_from_file( - bibtex_folder / "mentions_BPO_extra.bib", style=style, output_backend="markdown" +md_mentions_bpo = engine.format_from_files( + [mentions_BPO, mentions_BPO_extra], style=style, output_backend="markdown" ) - -# thesis (custom bibtex) md_thesis_uses_BPO = engine.format_from_file( - bibtex_folder / "thesis_uses_BPO.bib", style=style, output_backend="markdown" + thesis_uses_BPO, style=style, output_backend="markdown" ) md_thesis_mentions_BPO = engine.format_from_file( - bibtex_folder / "thesis_mentions_BPO.bib", style=style, output_backend="markdown" + thesis_mentions_BPO, style=style, output_backend="markdown" ) - -# poster (custom bibtex) md_poster_uses_BPO = engine.format_from_file( - bibtex_folder / "poster_uses_BPO.bib", style=style, output_backend="markdown" + poster_uses_BPO, style=style, output_backend="markdown" ) # -- replace references by bullet points -- # -md_use_bpo = put_bullet_points(md_use_bpo) -md_use_bpo_extra = put_bullet_points(md_use_bpo_extra) +md_uses_bpo = put_bullet_points(md_uses_bpo) md_mentions_bpo = put_bullet_points(md_mentions_bpo) -md_mentions_bpo_extra = put_bullet_points(md_mentions_bpo_extra) md_thesis_uses_BPO = put_bullet_points(md_thesis_uses_BPO) md_thesis_mentions_BPO = put_bullet_points(md_thesis_mentions_BPO) md_poster_uses_BPO = put_bullet_points(md_poster_uses_BPO) @@ -67,10 +81,10 @@ def put_bullet_points(input): ## Scientific papers that use BluePyOpt -{md_use_bpo}{md_use_bpo_extra} +{md_uses_bpo} ## Scientific papers that mention BluePyOpt -{md_mentions_bpo}{md_mentions_bpo_extra} +{md_mentions_bpo} ## Theses that use BluePyOpt {md_thesis_uses_BPO} @@ -83,7 +97,7 @@ def put_bullet_points(input): """ -# -- write down markwodn wiki -- # +# -- write down markdown wiki -- # output_path.parent.mkdir(parents=True, exist_ok=True) with open(output_path, "w") as f: f.write(output)