-
Notifications
You must be signed in to change notification settings - Fork 0
/
utils.py
176 lines (152 loc) · 7.33 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
import numpy as np
from neuron import h
from cells import Pyrcell
from random import random
def create_network(n_neurons=4):
network = h.List()
network_rec = h.List()
for i in range(n_neurons):
p = Pyrcell()
network.append(p)
network_rec.append(h.Vector())
network_rec[i].record(network[i].soma(0.5)._ref_v)
return network, network_rec
def create_network_L6(n_neurons=4):
# network = h.List()
# network_rec = h.List()
#
# for i in range(n_neurons):
# p = L6cell()
# network.append(p)
# network_rec.append(h.Vector())
# network_rec[i].record(network[i].soma(0.5)._ref_v)
return create_network(n_neurons)
def exponential_connect(weight, n1, n2, selfconnect=True):
"""
:param weight: desired average of exponential distribution
:param n1: number of neurons of network 1
:param n2: number of neurons of network 2
:param selfconnect: False if diagonal weights should be zero, True otherwise
:return: exponentially distributed weight matrix (n1 x n2)
"""
weights = np.random.exponential(1, n1*n2)*weight
weights = weights.reshape((n1, n2))
if not selfconnect:
weights = weights - np.diag(np.diag(weights))
return weights
def constant_connect(weight, n1, n2, selfconnect=True):
"""
:param weight: desired connection weight
:param n1: number of neurons of network 1
:param n2: number of neurons of network 2
:param selfconnect: False if diagonal weights should be zero, True otherwise
:return: constant value weight matrix (n1 x n2)
"""
weights = weight*np.ones(n1*n2)
weights = weights.reshape((n1, n2))
if not selfconnect:
weights = weights - np.diag(np.diag(weights))
return weights
def partial_e_net_connect(net1, net2, net1_prop_b, net1_prop_e, net2_prop_b, net2_prop_e, threshold, delay, weights):
"""
Partially connect neurons in two networks with excitatory synapse. Only connects the neurons in the proportions given
:param net1: First network list (h.List()) of neurons
:param net2: Second network list (h.List()) of neurons
:param net1_prop_b: Proportion in network 1 by which we begin to connect neurons
:param net1_prop_e: Proportion in network 1 by which we end to connect neurons
:param net2_prop_b: Proportion in network 2 by which we begin to connect neurons
:param net2_prop_e: Proportion in network 2 by which we end to connect neurons
:param threshold: voltage threshold that generates spike in neuron in net1
:param delay: time between spike in net1 and PSP in net2 (ms)
:param weights: matrix of connection weights (strength of connection)
:return:
"""
net1_net2_syn = list()
len_net1 = len(net1)
len_net2 = len(net2)
for neuron_i in range(int(len_net1*net1_prop_b), int(len_net1*net1_prop_e)):
net1[neuron_i].soma.push()
for neuron_j in range(int(len_net2*net2_prop_b), int(len_net2*net2_prop_e)):
net1_net2_syn.append(h.NetCon(net1[neuron_i].soma(0.5)._ref_v, net2[neuron_j].synE,
threshold, delay, weights[neuron_i, neuron_j]))
h.pop_section()
return net1_net2_syn
def topographically_e_connect(net1, net2, net1_prop_b, net1_prop_e, threshold, delay, weights):
# NEVER TESTED!
"""
Topographic connectivity. Each cell connect to only one other cell (just one for loop). WARNING: NEVER TESTED
:param net1: First network list (h.List()) of neurons
:param net2: Second network list (h.List()) of neurons
:param net1_prop_b: Proportion in network 1 by which we begin to connect neurons
:param net1_prop_e: Proportion in network 1 by which we end to connect neurons
:param threshold: voltage threshold that generates spike in neuron in net1
:param delay: time between spike in net1 and PSP in net2 (ms)
:param weights: matrix of connection weights (strength of connection)
:return:
"""
net1_net2_syn = list()
len_net1 = len(net1)
for neuron_i in range(int(len_net1 * net1_prop_b), int(len_net1 * net1_prop_e)):
net1[neuron_i].soma.push()
net1_net2_syn.append(h.NetCon(net1[neuron_i].soma(0.5)._ref_v, net2[neuron_i].synE,
threshold, delay, weights[neuron_i, neuron_i]))
h.pop_section()
return net1_net2_syn
def e_net_connect(net1, net2, threshold, delay, weights, prob):
"""
Connects two networks with an excitatory synapse
:param net1: First network list (h.List()) of neurons
:param net2: Second network list (h.List()) of neurons
:param threshold: voltage threshold that generates spike in neuron in net1
:param delay: time between spike in net1 and PSP in net2 (ms)
:param weights: matrix of connection weights (strength of connection)
:param prob: connection probability
:return: list of synapses
"""
net1_net2_syn = list()
for net1_neuron_i, net1_neuron in enumerate(net1):
net1_neuron.soma.push()
for net2_neuron_i, net2_neuron in enumerate(net2):
if random() < prob:
net1_net2_syn.append(h.NetCon(net1_neuron.soma(0.5)._ref_v, net2_neuron.synE, threshold, delay,
weights[net1_neuron_i, net2_neuron_i]))
h.pop_section()
return net1_net2_syn
def e_net_connect_delay_dist(net1, net2, threshold, delay_distbtn, weights, prob):
net1_net2_syn = list()
for net1_neuron_i, net1_neuron in enumerate(net1):
net1_neuron.soma.push()
for net2_neuron_i, net2_neuron in enumerate(net2):
if random() < prob:
net1_net2_syn.append(h.NetCon(net1_neuron.soma(0.5)._ref_v, net2_neuron.synE, threshold,
delay_distbtn[net1_neuron_i], weights[net1_neuron_i, net2_neuron_i]))
h.pop_section()
return net1_net2_syn
def e_ct_net_connect_delay_dist(net1, net2, threshold, delay_distbtn, weights):
net1_net2_syn = list()
for net1_neuron_i, net1_neuron in enumerate(net1):
net1_neuron.soma.push()
for net2_neuron_i, net2_neuron in enumerate(net2):
net1_net2_syn.append(h.NetCon(net1_neuron.soma(0.5)._ref_v, net2_neuron.synE_CT, threshold,
delay_distbtn[net1_neuron_i], weights[net1_neuron_i, net2_neuron_i]))
h.pop_section()
return net1_net2_syn
def e_ct_net_connect(net1, net2, threshold, delay, weights):
net1_net2_syn = list()
for net1_neuron_i, net1_neuron in enumerate(net1):
net1_neuron.soma.push()
for net2_neuron_i, net2_neuron in enumerate(net2):
net1_net2_syn.append(h.NetCon(net1_neuron.soma(0.5)._ref_v, net2_neuron.synE_CT, threshold, delay,
weights[net1_neuron_i, net2_neuron_i]))
h.pop_section()
return net1_net2_syn
def i_net_connect(net1, net2, threshold, delay, weights, prob):
net1_net2_syn = list()
for net1_neuron_i, net1_neuron in enumerate(net1):
net1_neuron.soma.push()
for net2_neuron_i, net2_neuron in enumerate(net2):
if random() < prob:
net1_net2_syn.append(h.NetCon(net1_neuron.soma(0.5)._ref_v, net2_neuron.synI, threshold, delay,
weights[net1_neuron_i, net2_neuron_i]))
h.pop_section()
return net1_net2_syn