Skip to content

Commit

Permalink
Merge branch 'master' into 1uc/optimize-diam-area
Browse files Browse the repository at this point in the history
  • Loading branch information
1uc authored Jun 21, 2024
2 parents a350cee + 9dd489c commit 0b45c4d
Show file tree
Hide file tree
Showing 18 changed files with 122 additions and 130 deletions.
8 changes: 0 additions & 8 deletions .github/workflows/docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,3 @@ jobs:
- name: live debug session on failure (manual steps required, check `.github/docs.yml`)
if: failure() && contains(github.event.pull_request.title, 'live-debug-docs')
uses: mxschmitt/action-tmate@v3

- name: Deploy 🚀
uses: JamesIves/github-pages-deploy-action@v4
if: env.OK_TO_DEPLOY_DOCS == 'true' && github.ref == 'refs/heads/master'
with:
branch: gh-pages # The branch the action should deploy to.
folder: ${{runner.workspace}}/nrn/docs/_build # The folder the action should deploy.
single-commit: true #have a single commit on the deployment branch instead of maintaining the full history
2 changes: 1 addition & 1 deletion docs/courses/mpi_parallelization.rst
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,6 @@ Unfortunately MPI can't be a part of the binary installation because I don't kno
Going Further
----------

The ring model from the `above ModelDB entry <https://modeldb.science/96444>`_ is a good next step. See also the documentation for the `ParallelContext <https://nrn.readthedocs.io/en/latest/hoc/modelspec/programmatic/network/parcon.html?highlight=parallelcontext>`_ class, especialy the subset of methods gathered under the `ParallelNetwork <https://nrn.readthedocs.io/en/latest/hoc/modelspec/programmatic/network/parcon.html?highlight=parallelcontext>`_ heading. A large portion of the `ParallelNetManager <https://nrn.readthedocs.io/en/latest/hoc/modelspec/programmatic/network/parnet.html?highlight=parallelnetmanager>`_ wrapper is better off done directly from the underlying ParallelContext though it can be mined for interesting pieces. A good place to find the most recent idioms is the NEURON implementation of the Vogels and Abbott model found in the `Brette et al. ModelDB entry <https://senselab.med.yale.edu/ModelDB/ShowModel?model=83319#tabs-1>`_. However, to run in parallel, the NetCon delay between cells needs to be set greater than zero.
The ring model from the `above ModelDB entry <https://modeldb.science/96444>`_ is a good next step. See also the documentation for the `ParallelContext <https://nrn.readthedocs.io/en/latest/hoc/modelspec/programmatic/network/parcon.html?highlight=parallelcontext>`_ class, especialy the subset of methods gathered under the `ParallelNetwork <https://nrn.readthedocs.io/en/latest/hoc/modelspec/programmatic/network/parcon.html?highlight=parallelcontext>`_ heading. A large portion of the `ParallelNetManager <https://nrn.readthedocs.io/en/latest/hoc/modelspec/programmatic/network/parnet.html?highlight=parallelnetmanager>`_ wrapper is better off done directly from the underlying ParallelContext though it can be mined for interesting pieces. A good place to find the most recent idioms is the NEURON implementation of the Vogels and Abbott model found in the `Brette et al. ModelDB entry <https://modeldb.science/83319>`_. However, to run in parallel, the NetCon delay between cells needs to be set greater than zero.


2 changes: 1 addition & 1 deletion docs/guide/how_to_get_started_with_neuron.rst
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ To learn how to use NMODL to add new mechanisms to NEURON:
NEURON comes with a bunch of mod files that can serve as starting points for "programming by example." Under MSWin the default mechanisms (hh, pas, expsyn etc.) are in ``c:\nrn\src\nrnoc`` (on my Linux box this is ``/usr/local/src/nrn-x.x/src/nrnoc``). A large collection of mod files is in ``c:\nrn\examples\nrniv\nmodl`` (Linux ``/usr/local/src/nrn-x.x/share/examples/nrniv/nmodl``).

4.
You may also find useful examples in `ModelDB <https://smodeldb.science>`_.
You may also find useful examples in `ModelDB <https://modeldb.science>`_.

For courses about NEURON, see the :ref:`Course Exercises <exercises2018>` page and the :ref:`Training Videos <training_videos>` page.

Expand Down
2 changes: 1 addition & 1 deletion external/nmodl
Submodule nmodl updated 116 files
2 changes: 1 addition & 1 deletion packaging/python/test_wheels.sh
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ $python_exe -m pip install --upgrade pip
# install numpy, pytest and neuron
# we install setuptools because since python 3.12 it is no more installed
# by default
$python_exe -m pip install numpy pytest setuptools
$python_exe -m pip install "numpy<2" pytest setuptools
$python_exe -m pip install $python_wheel
$python_exe -m pip show neuron || $python_exe -m pip show neuron-nightly

Expand Down
2 changes: 1 addition & 1 deletion src/neuron/cache/mechanism_range.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ struct MechanismInstance: MechanismRange<NumFloatingPointFields, NumDatumFields>
MechanismInstance(Prop* prop)
: base_type{_nrn_mechanism_get_type(prop), mechanism::_get::_current_row(prop), 0} {
if (!prop) {
// grrr...see cagkftab test where setdata is not called(?) and extcall_prop is null(?)
// see cagkftab test where setdata is not called(?) and extcall_prop is null(?)
return;
}
indices_to_cache(_nrn_mechanism_get_type(prop), [this, prop](auto field) {
Expand Down
2 changes: 1 addition & 1 deletion src/nmodl/deriv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ void solv_diffeq(Item* qsol,
// derivimplicit_thread
Sprintf(buf,
"%s %s%s_thread(%d, _slist%d, _dlist%d, neuron::scopmath::row_view{_ml, _iml}, %s, "
"_ml, _iml, _ppvar, _thread, _nt);\n%s",
"_ml, _iml, _ppvar, _thread, _globals, _nt);\n%s",
deriv1_advance,
ssprefix,
method->name,
Expand Down
8 changes: 8 additions & 0 deletions src/nmodl/noccout.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -536,6 +536,8 @@ void c_out_vectorize() {
P("_ni = _ml_arg->_nodeindices;\n");
P("_cntml = _ml_arg->_nodecount;\n");
P("_thread = _ml_arg->_thread;\n");
P("double* _globals = nullptr;\n");
P("if (gind != 0 && _thread != nullptr) { _globals = _thread[_gth].get<double*>(); }\n");
/*check_tables();*/
P("for (_iml = 0; _iml < _cntml; ++_iml) {\n");
P(" _ppvar = _ml_arg->_pdata[_iml];\n");
Expand Down Expand Up @@ -598,6 +600,8 @@ void c_out_vectorize() {
P("_ni = _ml_arg->_nodeindices;\n");
P("_cntml = _ml_arg->_nodecount;\n");
P("_thread = _ml_arg->_thread;\n");
P("double* _globals = nullptr;\n");
P("if (gind != 0 && _thread != nullptr) { _globals = _thread[_gth].get<double*>(); }\n");
P("for (_iml = 0; _iml < _cntml; ++_iml) {\n");
P(" _ppvar = _ml_arg->_pdata[_iml];\n");
ext_vdef();
Expand Down Expand Up @@ -664,6 +668,8 @@ void c_out_vectorize() {
P("_ni = _ml_arg->_nodeindices;\n");
P("_cntml = _ml_arg->_nodecount;\n");
P("_thread = _ml_arg->_thread;\n");
P("double* _globals = nullptr;\n");
P("if (gind != 0 && _thread != nullptr) { _globals = _thread[_gth].get<double*>(); }\n");
P("for (_iml = 0; _iml < _cntml; ++_iml) {\n");
if (electrode_current) {
P(" _nd = _ml_arg->_nodelist[_iml];\n");
Expand Down Expand Up @@ -700,6 +706,8 @@ void c_out_vectorize() {
P("_ni = _ml_arg->_nodeindices;\n");
P("size_t _cntml = _ml_arg->_nodecount;\n");
P("_thread = _ml_arg->_thread;\n");
P("double* _globals = nullptr;\n");
P("if (gind != 0 && _thread != nullptr) { _globals = _thread[_gth].get<double*>(); }\n");
P("for (size_t _iml = 0; _iml < _cntml; ++_iml) {\n");
P(" _ppvar = _ml_arg->_pdata[_iml];\n");
P(" _nd = _ml_arg->_nodelist[_iml];\n");
Expand Down
92 changes: 53 additions & 39 deletions src/nmodl/nocpout.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -296,12 +296,12 @@ void parout() {
if (vectorize) {
Lappendstr(defs_list,
"\n\
#define _threadargscomma_ _ml, _iml, _ppvar, _thread, _nt,\n\
#define _threadargsprotocomma_ Memb_list* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, NrnThread* _nt,\n\
#define _internalthreadargsprotocomma_ _nrn_mechanism_cache_range* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, NrnThread* _nt,\n\
#define _threadargs_ _ml, _iml, _ppvar, _thread, _nt\n\
#define _threadargsproto_ Memb_list* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, NrnThread* _nt\n\
#define _internalthreadargsproto_ _nrn_mechanism_cache_range* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, NrnThread* _nt\n\
#define _threadargscomma_ _ml, _iml, _ppvar, _thread, _globals, _nt,\n\
#define _threadargsprotocomma_ Memb_list* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, double* _globals, NrnThread* _nt,\n\
#define _internalthreadargsprotocomma_ _nrn_mechanism_cache_range* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, double* _globals, NrnThread* _nt,\n\
#define _threadargs_ _ml, _iml, _ppvar, _thread, _globals, _nt\n\
#define _threadargsproto_ Memb_list* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, double* _globals, NrnThread* _nt\n\
#define _internalthreadargsproto_ _nrn_mechanism_cache_range* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, double* _globals, NrnThread* _nt\n\
");
} else {
Lappendstr(defs_list,
Expand Down Expand Up @@ -544,11 +544,6 @@ extern void nrn_promote(Prop*, int, int);\n\
}
}

emit_check_table_thread = 0;
if (vectorize && check_tables_threads(defs_list)) {
emit_check_table_thread = 1;
}

/* per thread top LOCAL */
/* except those that are marked assigned_to_ == 2 stay static double */
if (vectorize && toplocal_) {
Expand Down Expand Up @@ -601,7 +596,7 @@ extern void nrn_promote(Prop*, int, int);\n\
}
/* per thread global data */
gind = 0;
if (vectorize)
if (vectorize) {
SYMLISTITER {
s = SYM(q);
if (s->nrntype & (NRNGLOBAL) && s->assigned_to_ == 1) {
Expand All @@ -612,8 +607,15 @@ extern void nrn_promote(Prop*, int, int);\n\
}
}
}
}
/* double scalars declared internally */
Lappendstr(defs_list, "/* declare global and static user variables */\n");
Sprintf(buf, "#define gind %d\n", gind);
Lappendstr(defs_list, buf);
if (!gind) {
Sprintf(buf, "#define _gth 0\n");
Lappendstr(defs_list, buf);
}
if (gind) {
Sprintf(buf,
"static int _thread1data_inuse = 0;\nstatic double _thread1data[%d];\n#define _gth "
Expand Down Expand Up @@ -644,7 +646,7 @@ extern void nrn_promote(Prop*, int, int);\n\
if (s->subtype & ARRAY) {
Sprintf(buf,
"#define %s%s (_thread1data + %d)\n\
#define %s (_thread[_gth].get<double*>() + %d)\n",
#define %s (_globals + %d)\n",
s->name,
suffix,
gind,
Expand All @@ -653,7 +655,7 @@ extern void nrn_promote(Prop*, int, int);\n\
} else {
Sprintf(buf,
"#define %s%s _thread1data[%d]\n\
#define %s _thread[_gth].get<double*>()[%d]\n",
#define %s _globals[%d]\n",
s->name,
suffix,
gind,
Expand Down Expand Up @@ -684,6 +686,11 @@ extern void nrn_promote(Prop*, int, int);\n\
}
}

emit_check_table_thread = 0;
if (vectorize && check_tables_threads(defs_list)) {
emit_check_table_thread = 1;
}

Lappendstr(defs_list, "/* some parameters have upper and lower limits */\n");
Lappendstr(defs_list, "static HocParmLimits _hoc_parm_limits[] = {\n");
SYMLISTITER {
Expand Down Expand Up @@ -739,17 +746,13 @@ extern void nrn_promote(Prop*, int, int);\n\
Lappendstr(defs_list, "{0, 0, 0}\n};\n");
Lappendstr(defs_list, "static double _sav_indep;\n");
if (ba_index_ > 0) {
Lappendstr(defs_list,
"static void _ba1(Node*_nd, Datum* _ppd, Datum* _thread, NrnThread* _nt, "
"Memb_list* _ml, size_t _iml, _nrn_model_sorted_token const&)");
for (i = 2; i <= ba_index_; ++i) {
for (int i = 1; i <= ba_index_; ++i) {
Sprintf(buf,
", _ba%d(Node*_nd, Datum* _ppd, Datum* _thread, NrnThread* _nt, Memb_list* "
"_ml, size_t _iml, _nrn_model_sorted_token const&)",
"static void _ba%d(Node*_nd, Datum* _ppd, Datum* _thread, NrnThread* _nt, "
"Memb_list* _ml, size_t _iml, _nrn_model_sorted_token const&);\n",
i);
Lappendstr(defs_list, buf);
}
Lappendstr(defs_list, ";\n");
}

/******** what normally goes into cabvars.h structures */
Expand Down Expand Up @@ -1579,6 +1582,8 @@ void ldifusreg() {
"_nrn_model_sorted_token const& _sorted_token) {\n"
" _nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _ml_arg->_type()};\n"
" auto* const _ml = &_lmr;\n"
" double* _globals = nullptr;\n"
" if (gind != 0 && _thread != nullptr) { _globals = _thread[_gth].get<double*>(); }\n"
" *_pdvol = ",
n,
n);
Expand Down Expand Up @@ -1933,10 +1938,13 @@ void bablk(int ba, int type, Item* q1, Item* q2) {
insertstr(q1, buf);
q = q1->next;
vectorize_substitute(insertstr(q, ""), "Datum* _ppvar;");
qv = insertstr(q,
"_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, "
"_ml_arg->_type()}; auto* const "
"_ml = &_lmr;\n");
qv = insertstr(
q,
"_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, "
"_ml_arg->_type()}; auto* const "
"_ml = &_lmr;\n"
"double* _globals = nullptr;\n"
"if (gind != 0 && _thread != nullptr) { _globals = _thread[_gth].get<double*>(); }\n");
qv = insertstr(q, "_ppvar = _ppd;\n");
movelist(qb, q2, procfunc);

Expand Down Expand Up @@ -2773,18 +2781,21 @@ void out_nt_ml_frag(List* p) {
vectorize_substitute(lappendstr(p, ""), " Datum* _ppvar;\n");
vectorize_substitute(lappendstr(p, ""), " size_t _iml;");
vectorize_substitute(lappendstr(p, ""), " _nrn_mechanism_cache_range* _ml;");
Lappendstr(p,
" Node* _nd{};\n"
" double _v{};\n"
" int _cntml;\n"
" _nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};\n"
" _ml = &_lmr;\n"
" _cntml = _ml_arg->_nodecount;\n"
" Datum *_thread{_ml_arg->_thread};\n"
" for (_iml = 0; _iml < _cntml; ++_iml) {\n"
" _ppvar = _ml_arg->_pdata[_iml];\n"
" _nd = _ml_arg->_nodelist[_iml];\n"
" v = NODEV(_nd);\n");
Lappendstr(
p,
" Node* _nd{};\n"
" double _v{};\n"
" int _cntml;\n"
" _nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};\n"
" _ml = &_lmr;\n"
" _cntml = _ml_arg->_nodecount;\n"
" Datum *_thread{_ml_arg->_thread};\n"
" double* _globals = nullptr;\n"
" if (gind != 0 && _thread != nullptr) { _globals = _thread[_gth].get<double*>(); }\n"
" for (_iml = 0; _iml < _cntml; ++_iml) {\n"
" _ppvar = _ml_arg->_pdata[_iml];\n"
" _nd = _ml_arg->_nodelist[_iml];\n"
" v = NODEV(_nd);\n");
}

void cvode_emit_interface() {
Expand Down Expand Up @@ -3046,7 +3057,9 @@ void net_receive(Item* qarg, Item* qp1, Item* qp2, Item* qstmt, Item* qend) {
" auto* const _ml = &_ml_real;\n"
" size_t const _iml{};\n");
q = insertstr(qstmt, " _ppvar = _nrn_mechanism_access_dparam(_pnt->_prop);\n");
vectorize_substitute(insertstr(q, ""), " _thread = nullptr; _nt = (NrnThread*)_pnt->_vnt;");
vectorize_substitute(
insertstr(q, ""),
" _thread = nullptr; double* _globals = nullptr; _nt = (NrnThread*)_pnt->_vnt;");
if (debugging_) {
if (0) {
insertstr(qstmt, " assert(_tsav <= t); _tsav = t;");
Expand Down Expand Up @@ -3137,7 +3150,8 @@ void net_init(Item* qinit, Item* qp2) {
" auto* const _ml = &_ml_real;\n"
" size_t const _iml{};\n"
" Datum* _ppvar = _nrn_mechanism_access_dparam(_pnt->_prop);\n"
" Datum* _thread = (Datum*)0;\n"
" Datum* _thread = nullptr;\n"
" double* _globals = nullptr;\n"
" NrnThread* _nt = (NrnThread*)_pnt->_vnt;\n");
if (net_init_q1_) {
diag("NET_RECEIVE block can contain only one INITIAL block", (char*) 0);
Expand Down
52 changes: 31 additions & 21 deletions src/nmodl/parsact.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -374,13 +374,15 @@ int check_tables_threads(List* p) {
Sprintf(buf, "\nstatic void %s(_internalthreadargsproto_);", STR(q));
lappendstr(p, buf);
}
lappendstr(p,
"\n"
"static void _check_table_thread(_threadargsprotocomma_ int _type, "
"_nrn_model_sorted_token const& _sorted_token) {\n"
" _nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml, _type};\n"
" {\n"
" auto* const _ml = &_lmr;\n");
lappendstr(
p,
"\n"
"static void _check_table_thread(_threadargsprotocomma_ int _type, "
"_nrn_model_sorted_token const& _sorted_token) {\n"
" if (gind != 0 && _thread != nullptr) { _globals = _thread[_gth].get<double*>(); } \n"
" _nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml, _type};\n"
" {\n"
" auto* const _ml = &_lmr;\n");
ITERATE(q, check_table_thread_list) {
Sprintf(buf, " %s(_threadargs_);\n", STR(q));
lappendstr(p, buf);
Expand Down Expand Up @@ -760,13 +762,16 @@ static void funchack(Symbol* n, bool ishoc, int hack) {
" hoc_execerror(\"POINT_PROCESS data instance not valid\", NULL);\n"
" }\n");
q = lappendstr(procfunc, " _setdata(_p);\n");
vectorize_substitute(q,
" _nrn_mechanism_cache_instance _ml_real{_p};\n"
" auto* const _ml = &_ml_real;\n"
" size_t const _iml{};\n"
" _ppvar = _nrn_mechanism_access_dparam(_p);\n"
" _thread = _extcall_thread.data();\n"
" _nt = static_cast<NrnThread*>(_pnt->_vnt);\n");
vectorize_substitute(
q,
" _nrn_mechanism_cache_instance _ml_real{_p};\n"
" auto* const _ml = &_ml_real;\n"
" size_t const _iml{};\n"
" _ppvar = _nrn_mechanism_access_dparam(_p);\n"
" _thread = _extcall_thread.data();\n"
" double* _globals = nullptr;\n"
" if (gind != 0 && _thread != nullptr) { _globals = _thread[_gth].get<double*>(); }\n"
" _nt = static_cast<NrnThread*>(_pnt->_vnt);\n");
} else if (ishoc) {
hocfunc_setdata_item(n, lappendstr(procfunc, ""));
vectorize_substitute(
Expand All @@ -776,18 +781,23 @@ static void funchack(Symbol* n, bool ishoc, int hack) {
"size_t const _iml{};\n"
"_ppvar = _local_prop ? _nrn_mechanism_access_dparam(_local_prop) : nullptr;\n"
"_thread = _extcall_thread.data();\n"
"double* _globals = nullptr;\n"
"if (gind != 0 && _thread != nullptr) { _globals = _thread[_gth].get<double*>(); }\n"
"_nt = nrn_threads;\n");
} else { // _npy_...
q = lappendstr(procfunc,
" neuron::legacy::set_globals_from_prop(_prop, _ml_real, _ml, _iml);\n"
" _ppvar = _nrn_mechanism_access_dparam(_prop);\n");
vectorize_substitute(q,
"_nrn_mechanism_cache_instance _ml_real{_prop};\n"
"auto* const _ml = &_ml_real;\n"
"size_t const _iml{};\n"
"_ppvar = _nrn_mechanism_access_dparam(_prop);\n"
"_thread = _extcall_thread.data();\n"
"_nt = nrn_threads;\n");
vectorize_substitute(
q,
"_nrn_mechanism_cache_instance _ml_real{_prop};\n"
"auto* const _ml = &_ml_real;\n"
"size_t const _iml{};\n"
"_ppvar = _nrn_mechanism_access_dparam(_prop);\n"
"_thread = _extcall_thread.data();\n"
"double* _globals = nullptr;\n"
"if (gind != 0 && _thread != nullptr) { _globals = _thread[_gth].get<double*>(); }\n"
"_nt = nrn_threads;\n");
}
if (n == last_func_using_table) {
qp = lappendstr(procfunc, "");
Expand Down
Loading

0 comments on commit 0b45c4d

Please sign in to comment.