#19 Support multi-scale modeling in NumPy jit mode

Merged
BrainPy merged 30 commits from V1.1.0rc1 into master 2 years ago
  1. +2
    -0
      .gitignore
  2. +50
    -17
      README.md
  3. +25
    -5
      TODO.md
  4. +3
    -2
      brainpy/__init__.py
  5. +0
    -4
      brainpy/analysis/__init__.py
  6. +1
    -0
      brainpy/analysis/continuation/__init__.py
  7. +5
    -5
      brainpy/analysis/stability.py
  8. +18
    -0
      brainpy/analysis/symbolic_analysis/__init__.py
  9. +0
    -0
      brainpy/analysis/symbolic_analysis/base.py
  10. +3
    -2
      brainpy/analysis/symbolic_analysis/bifurcation.py
  11. +3
    -2
      brainpy/analysis/symbolic_analysis/phase_plane.py
  12. +0
    -0
      brainpy/analysis/symbolic_analysis/trajectory.py
  13. +6
    -4
      brainpy/analysis/utils.py
  14. +12
    -5
      brainpy/base/base.py
  15. +2
    -2
      brainpy/base/collector.py
  16. +1
    -1
      brainpy/dnn/base.py
  17. +7
    -5
      brainpy/integrators/ode/wrapper.py
  18. +4
    -5
      brainpy/integrators/sde/euler_and_milstein.py
  19. +31
    -19
      brainpy/math/jax/compilation.py
  20. +6
    -2
      brainpy/math/jax/gradient.py
  21. +1
    -1
      brainpy/math/jax/jaxarray.py
  22. +737
    -256
      brainpy/math/numpy/ast2numba.py
  23. +129
    -39
      brainpy/math/numpy/compilation.py
  24. +9
    -0
      brainpy/math/numpy/ndarray.py
  25. +18
    -0
      brainpy/math/numpy/ops.py
  26. +0
    -2
      brainpy/simulation/__init__.py
  27. +0
    -3
      brainpy/simulation/brainobjects/__init__.py
  28. +27
    -26
      brainpy/simulation/brainobjects/base.py
  29. +0
    -27
      brainpy/simulation/brainobjects/channel.py
  30. +32
    -17
      brainpy/simulation/brainobjects/delays.py
  31. +0
    -16
      brainpy/simulation/brainobjects/dendrite.py
  32. +3
    -3
      brainpy/simulation/brainobjects/molecular.py
  33. +1
    -1
      brainpy/simulation/brainobjects/network.py
  34. +125
    -17
      brainpy/simulation/brainobjects/neuron.py
  35. +0
    -16
      brainpy/simulation/brainobjects/soma.py
  36. +3
    -3
      brainpy/simulation/brainobjects/synapse.py
  37. +0
    -16
      brainpy/simulation/connectivity/classic_conn.py
  38. +4
    -2
      brainpy/simulation/connectivity/custom_conn.py
  39. +1
    -32
      brainpy/simulation/monitor.py
  40. +7
    -2
      brainpy/simulation/utils.py
  41. +18
    -3
      changelog.rst
  42. +0
    -60
      develop/benchmark/COBA/COBA-ANNarchy.py
  43. +0
    -69
      develop/benchmark/COBA/COBA-Nest.py
  44. +0
    -355
      develop/benchmark/COBA/COBA.py
  45. +0
    -116
      develop/benchmark/COBA/COBA_brainpy.py
  46. +0
    -52
      develop/benchmark/COBA/COBA_brian2.py
  47. +0
    -275
      develop/benchmark/COBA/pynn.py
  48. +0
    -149
      develop/benchmark/COBAHH/COBAHH_brainpy.py
  49. +0
    -85
      develop/benchmark/COBAHH/COBAHH_brian2.py
  50. +0
    -174
      develop/benchmark/COBAHH/COBAHH_nest.sli
  51. +0
    -52
      develop/benchmark/COBAHH/COBAHH_neuron/COBAHH_neuron.hoc
  52. +0
    -89
      develop/benchmark/COBAHH/COBAHH_neuron/hhcell.hoc
  53. +0
    -101
      develop/benchmark/COBAHH/COBAHH_neuron/hhchan.ses
  54. +0
    -138
      develop/benchmark/COBAHH/COBAHH_neuron/init.hoc
  55. +0
    -6
      develop/benchmark/COBAHH/COBAHH_neuron/intrinsic.hoc
  56. +0
    -69
      develop/benchmark/COBAHH/COBAHH_neuron/intrinsic.ses
  57. +0
    -74
      develop/benchmark/COBAHH/COBAHH_neuron/net.hoc
  58. +0
    -74
      develop/benchmark/COBAHH/COBAHH_neuron/netstim.hoc
  59. +0
    -175
      develop/benchmark/COBAHH/COBAHH_neuron/perfrun.hoc
  60. +0
    -18
      develop/benchmark/COBAHH/COBAHH_neuron/ranstream.hoc
  61. +0
    -14
      develop/benchmark/COBAHH/COBAHH_neuron/spkplt.hoc
  62. +0
    -18
      develop/benchmark/COBAHH/how_to_run.md
  63. +0
    -80
      develop/benchmark/COBAHH/param_nest.sli
  64. +0
    -113
      develop/benchmark/CUBA/CUBA_brainpy.py
  65. +0
    -41
      develop/benchmark/CUBA/CUBA_brian2.py
  66. +0
    -141
      develop/benchmark/scaling_test.py
  67. +0
    -42
      develop/conda-recipe/meta.yaml
  68. +4
    -0
      docs/apis/math/general.rst
  69. +3
    -1
      docs/apis/math/jax.rst
  70. +3
    -1
      docs/apis/math/numpy.rst
  71. +6
    -6
      docs/apis/simulation/brainobjects.rst
  72. +18
    -0
      docs/apis/simulation/connectivity.rst
  73. +0
    -36
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.Channel.rst
  74. +0
    -35
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.CondNeuGroup.rst
  75. +0
    -36
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.ConstantDelay.rst
  76. +0
    -35
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.Container.rst
  77. +0
    -35
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.Delay.rst
  78. +0
    -34
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.Dendrite.rst
  79. +0
    -34
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.DynamicSystem.rst
  80. +0
    -34
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.Molecular.rst
  81. +0
    -35
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.Network.rst
  82. +0
    -35
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.NeuGroup.rst
  83. +0
    -34
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.Soma.rst
  84. +0
    -36
      docs/apis/simulation/generated/brainpy.simulation.brainobjects.TwoEndConn.rst
  85. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.All2All.rst
  86. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.DOG.rst
  87. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.FixedPostNum.rst
  88. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.FixedPreNum.rst
  89. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.FixedProb.rst
  90. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.GaussianProb.rst
  91. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.GaussianWeight.rst
  92. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.GridEight.rst
  93. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.GridFour.rst
  94. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.GridN.rst
  95. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.One2One.rst
  96. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.PowerLaw.rst
  97. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.ScaleFreeBA.rst
  98. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.ScaleFreeBADual.rst
  99. +0
    -31
      docs/apis/simulation/generated/brainpy.simulation.connectivity.SmallWorld.rst
  100. +0
    -6
      docs/apis/simulation/generated/brainpy.simulation.connectivity.ij2mat.rst

+ 2
- 0
.gitignore View File

@@ -2,6 +2,7 @@ publishment.md
#experimental/
.vscode

examples/
examples/recurrent_neural_network/neurogym
develop/iconip_paper
develop/benchmark/COBA/results
@@ -19,6 +20,7 @@ docs/images/connection_methods.pptx
*/generated

docs/apis/math/generated
docs/apis/simulation/generated
docs/apis/dnn/_autosummary
docs/quickstart/_autosummary
docs/apis/dnn/generated


+ 50
- 17
README.md View File

@@ -18,10 +18,10 @@

`BrainPy` is designed to effectively satisfy your basic requirements:

- *Easy to learn and use*: BrainPy is only based on Python language and has little dependency requirements.
- *Flexible and transparent*: BrainPy endows the users with the fully data/logic flow control. Users can code any logic they want with BrainPy.
- *Extensible*: BrainPy allow users to extend new functionality just based on Python coding. For example, we extend the numerical integration with the ability to do numerical analysis. In such a way, the same code in BrainPy can not only be used for simulation, but also for dynamics analysis.
- *Efficient running speed*: All codes in BrainPy can be just-in-time compiled (based on [JAX](https://github.com/google/jax) and [Numba](https://github.com/numba/)) to run on CPU or GPU devices, thus guaranteeing its running efficiency.
- **Easy to learn and use**: BrainPy is only based on Python language and has little dependency requirements.
- **Flexible and transparent**: BrainPy endows the users with the fully data/logic flow control. Users can code any logic they want with BrainPy.
- **Extensible**: BrainPy allow users to extend new functionality just based on Python coding. For example, we extend the numerical integration with the ability to do numerical analysis. In such a way, the same code in BrainPy can not only be used for simulation, but also for dynamics analysis.
- **Efficient**: All codes in BrainPy can be just-in-time compiled (based on [JAX](https://github.com/google/jax) and [Numba](https://github.com/numba/)) to run on CPU or GPU devices, thus guaranteeing its running efficiency.



@@ -40,18 +40,25 @@

**Method 1**: install ``BrainPy`` by using ``pip``:

To install the stable release of BrainPy (V1.0.3), please use

```python
#
> pip install -U brain-py
```

To install the latest pre-release version of BrainPy (V1.1.0), please use

```bash
> pip install -U brain-py --pre
```

If you have installed the previous version of BrainPy, please uninstall the older one first

```bash
# If you have installed the previous version of BrainPy,
# please uninstall the old one first
> pip uninstall brainpy-simulator

# Then install the latest version of BrainPy
> pip install -U brain-py
> pip install -U brain-py --pre
```

**Method 2**: install ``BrainPy`` from source:
@@ -79,13 +86,13 @@
## Step 2: useful links

- **Documentation:** https://brainpy.readthedocs.io/
- **Source code:** https://github.com/PKU-NIP-Lab/BrainPy or https://git.openi.org.cn/OpenI/BrainPy
- **Bug reports:** https://github.com/PKU-NIP-Lab/BrainPy/issues or Email to adaduo@outlook.com
- **Examples from papers**: https://brainmodels.readthedocs.io/en/latest/from_papers.html
- **Source code:** https://github.com/PKU-NIP-Lab/BrainPy or https://git.openi.org.cn/OpenI/BrainPy
- **Bug reports:** https://github.com/PKU-NIP-Lab/BrainPy/issues or Email to adaduo@outlook.com
- **Examples from papers**: https://brainmodels.readthedocs.io/en/latest/



## Step 3: comprehensive examples
## Step 3: inspirational examples

Here list several examples of BrainPy. More detailed examples and tutorials please see [**BrainModels**](https://brainmodels.readthedocs.io).

@@ -93,21 +100,46 @@ Here list several examples of BrainPy. More detailed examples and tutorials plea

### Neuron models

- [Hodgkin–Huxley neuron model](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/tensor_backend/neurons/HodgkinHuxley_model.py)
- [Leaky integrate-and-fire neuron model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.neurons.LIF.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/neurons/LIF.py)
- [Exponential integrate-and-fire neuron model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.neurons.ExpIF.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/neurons/ExpIF.py)
- [Quadratic integrate-and-fire neuron model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.neurons.QuaIF.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/neurons/QuaIF.py)
- [Adaptive Quadratic integrate-and-fire model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.neurons.AdQuaIF.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/neurons/AdQuaIF.py)
- [Adaptive Exponential integrate-and-fire model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.neurons.AdExIF.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/neurons/AdExIF.py)
- [Generalized integrate-and-fire model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.neurons.GIF.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/neurons/GIF.py)
- [Hodgkin–Huxley neuron model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.neurons.HH.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/tensor_backend/neurons/HodgkinHuxley_model.py)
- [Izhikevich neuron model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.neurons.Izhikevich.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/neurons/Izhikevich.py)
- [Morris-Lecar neuron model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.neurons.MorrisLecar.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/neurons/MorrisLecar.py)
- [Hindmarsh-Rose bursting neuron model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.neurons.HindmarshRose.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/neurons/HindmarshRose.py)

See [brainmodels.neurons](https://brainmodels.readthedocs.io/en/latest/apis/neurons.html) to find more.



### Synapse models

- [AMPA synapse model](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/tensor_backend/synapses/AMPA_synapse.py)
- [Voltage jump synapse model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.synapses.VoltageJump.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/synapses/voltage_jump.py)
- [Exponential synapse model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.synapses.ExponentialCUBA.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/synapses/exponential.py)
- [Alpha synapse model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.synapses.AlphaCUBA.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/synapses/alpha.py)
- [Dual exponential synapse model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.synapses.DualExpCUBA.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/synapses/dual_exp.py)
- [AMPA synapse model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.synapses.AMPA.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/synapses/AMPA.py)
- [GABAA synapse model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.synapses.GABAa.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/synapses/GABAa.py)
- [NMDA synapse model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.synapses.NMDA.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/synapses/NMDA.py)
- [Short-term plasticity model](https://brainmodels.readthedocs.io/en/latest/apis/generated/brainmodels.synapses.STP.html), [source code](https://github.com/PKU-NIP-Lab/BrainModels/blob/main/brainmodels/synapses/STP.py)

See [brainmodels.synapses](https://brainmodels.readthedocs.io/en/latest/apis/synapses.html) to find more.



### Network models

- [Gamma oscillation network model](https://brainmodels.readthedocs.io/en/latest/from_papers/Wang_1996_gamma_oscillation.html)
- [E/I balanced network model](https://brainmodels.readthedocs.io/en/latest/from_papers/Vreeswijk_1996_EI_net.html)
- [Continuous attractor network model](https://brainmodels.readthedocs.io/en/latest/from_papers/Wu_2008_CANN.html)
- **[CANN]** [*(Si Wu, 2008)* Continuous-attractor Neural Network](https://brainmodels.readthedocs.io/en/latest/examples/CANN/Wu_2008_CANN.html)
- [*(Vreeswijk & Sompolinsky, 1996)* E/I balanced network](https://brainmodels.readthedocs.io/en/latest/examples/EI_nets/Vreeswijk_1996_EI_net.html)
- [*(Sherman & Rinzel, 1992)* Gap junction leads to anti-synchronization](https://brainmodels.readthedocs.io/en/latest/examples/gj_nets/Sherman_1992_gj_antisynchrony.html)
- [*(Wang & Buzsáki, 1996)* Gamma Oscillation](https://brainmodels.readthedocs.io/en/latest/examples/oscillation_synchronization/Wang_1996_gamma_oscillation.html)
- [*(Brunel & Hakim, 1999)* Fast Global Oscillation](https://brainmodels.readthedocs.io/en/latest/examples/oscillation_synchronization/Brunel_Hakim_1999_fast_oscillation.html)
- [*(Diesmann, et, al., 1999)* Synfire Chains](https://brainmodels.readthedocs.io/en/latest/examples/oscillation_synchronization/Diesmann_1999_synfire_chains.html)
- **[Working Memory Model]** [*(Mi, et. al., 2017)* STP for Working Memory Capacity](https://brainmodels.readthedocs.io/en/latest/examples/working_memory/Mi_2017_working_memory_capacity.html)
- **[Working Memory Model]** [*(Bouchacourt & Buschman, 2019)* Flexible Working Memory Model](https://brainmodels.readthedocs.io/en/latest/examples/working_memory/Bouchacourt_2019_Flexible_working_memory.html)



@@ -124,3 +156,4 @@ Here list several examples of BrainPy. More detailed examples and tutorials plea




+ 25
- 5
TODO.md View File

@@ -8,12 +8,13 @@



# Version 1.1.0-alpha
# Version 1.1.0



## Base

- [ ] ``vars()``: must check whether has circular references
- [x] ``Collector``: must check whether the ``(key,value)`` pair are duplicate (done @2021/08/26 by @chaoming)
- [x] ``nodes``: slove the problem of circular reference (done @2021/08/24 by @chaoming)
- [x] ``ints``: get integrators based on all nodes (done @2021/08/24 by @chaoming)
@@ -26,14 +27,16 @@

## Math

- [x] **[Numpy]**: JIT compilation in numpy backend supports ``Base`` and ``Function`` objects (done by chaoming@2021/09/08)
- [ ] **[Numpy]**: support JIT in `fft` sub-module
- [x] **[Numpy]:** support JIT compilation in numpy backend (done @ 2021.09.01 by @chaoming)
- [x] **[Numpy]:** support 'fft' (done @ 2021.09.03 by @chaoming)
- [ ] support to set `dt` in the single object level (i.e., single instance of DynamicSystem)
- [ ] **[JAX]:** "random" module
- [ ] **[JAX]:** math operations
- [ ] **[JAX]:** vmap
- [ ] **[JAX]:** pmap
- [ ] **[JAX]:** control conditions:
- [ ] static_argnums
- [x] **[JAX]:** support 'fft' (done @ 2021.09.03 by @chaoming)
- [x] **[JAX]:** **IMPORTANT!!!** Change API of `grad()` and `value_and_grad()`: There are bugs in the gradient functions. Gradient computation also needs to inspect the variable types. Moreover, it is independent from the JIT function. Therefore, we should pass dynamical variables into the gradient functions too. (done @ 2021.08.26 by @chaoming)
- [x] **[JAX]:** **IMPORTANT!!!** change API of `vars()`: we should refer Dynamical Variables as `Variable`; We can not retrieve every "JaxArray" from `vars()`, otherwise the whole system will waste a lot of time on useless assignments. (done @ 2021.08.25 by @chaoming)
@@ -42,7 +45,7 @@
- [x] register pytree (done @ 2021.06.15 by @chaoming)
- [x] support `ndarray` intrinsic methods:
- [x] functions in NumPy ndarray: any(), all() .... view() (done @ 2021.06.30 by @chaoming)
- [ ] functions in JAX DeviceArray:
- [x] functions in JAX DeviceArray:
- [x] numpy methods in JaxArray (done @ 2021.08.25, @2021.08.28 by @chaoming)
- [x] documentation for JaxArray methods (done @ 2021.08.25 by @chaoming)
- [ ] test for ndarray wrapper
@@ -74,6 +77,9 @@

- [ ] Allow defining the `Soma` object
- [ ] Allow defining the `Dendrite` object
- [x] support to set `dt` in the single object level (i.e., single instance of DynamicSystem) (done @2021.09.05 @chaoming)
- [x] reimplement the ``input_step`` and ``monitor_step`` in a more intuitive way (done @2021.09 by chaoming)
- [x] remove ``_i`` in ``update()`` function, replace ``_i`` with ``_dt``, meaning the dynamic system has the canonic equation form of $dx/dt = f(x, t, dt)$ (done @2021.09 by chaoming)



@@ -102,8 +108,18 @@

## Documentation

- [ ]
- [ ] documentation
- [ ] tutorial for "Numerical Solvers": more precise and intuitive
- [x] tutorial for "JIT compilation": BrainPy's JIT for class objects (done by chaoming@2021.09.08)
- [x] tutorial for "Dynamics Simulation", detail the function of ``brainpy.DynamicalSystem`` (done by chaoming@2021/09/09)
- [x] documentation for ``math`` module (done @2021.09)
- [ ] documentation for BrainModels
- [ ] documentation for neuron models
- [ ] documentation for synapse models
- [ ] documentation for COBA, CUBA synapses
- [ ] detailed documentation for numerical solvers of SDEs
- [ ] doc comments for ODEs, like Euler, RK2, etc. We should provide the detailed mathematical equations, and the corresponding suggestions for the corresponding algorithm.
- [x] doc comments for ODEs, like Euler, RK2, etc. We should provide the detailed mathematical equations, and the corresponding suggestions for the corresponding algorithm. (done @2021.09.01 @chaoming)
- [x] APIs for integrators (done @2021/08/23 by @chaoming)
- [x] installation instruction, especially package dependency (done @2021/08/23 by @chaoming)

@@ -125,7 +141,11 @@

## Others

- [ ] publish `BrainPy` on `"conda-forge"`: https://conda-forge.org/docs/maintainer/adding_pkgs.html#
- [x] publish `BrainPy` on `"conda-forge"`: https://conda-forge.org/docs/maintainer/adding_pkgs.html# (cancelled)









+ 3
- 2
brainpy/__init__.py View File

@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-

__version__ = "1.1.0-beta"
__version__ = "1.1.0rc1"


# "base" module
@@ -36,7 +36,8 @@ from . import dnn


# "analysis" module
from . import analysis
from .analysis import symbolic_analysis as sym_analysis
from .analysis import continuation


# "visualization" module


+ 0
- 4
brainpy/analysis/__init__.py View File

@@ -6,10 +6,6 @@ This module provides analysis tools for ordinary differential equations.

"""

from .base import *
from .bifurcation import *
from .phase_plane import *
from .solver import *
from .stability import *
from .trajectory import *
from .utils import *

+ 1
- 0
brainpy/analysis/continuation/__init__.py View File

@@ -0,0 +1 @@
# -*- coding: utf-8 -*-

+ 5
- 5
brainpy/analysis/stability.py View File

@@ -53,9 +53,9 @@ plot_scheme.update({
CENTER_2D: {'color': 'lime'},
STABLE_NODE_2D: {"color": 'tab:red'},
STABLE_FOCUS_2D: {"color": 'tab:purple'},
STABLE_STAR_2D: {'color': 'orange'},
STABLE_STAR_2D: {'color': 'tab:olive'},
STABLE_DEGENERATE_2D: {'color': 'blueviolet'},
UNSTABLE_NODE_2D: {"color": 'tab:olive'},
UNSTABLE_NODE_2D: {"color": 'tab:orange'},
UNSTABLE_FOCUS_2D: {"color": 'tab:cyan'},
UNSTABLE_STAR_2D: {'color': 'green'},
UNSTABLE_DEGENERATE_2D: {'color': 'springgreen'},
@@ -72,13 +72,13 @@ UNSTABLE_FOCUS_3D = 'unstable focus'
UNSTABLE_CENTER_3D = 'unstable center'
UNKNOWN_3D = 'unknown 3d'
plot_scheme.update({
STABLE_POINT_3D: {'color': 'tab:pink'},
STABLE_POINT_3D: {'color': 'tab:gray'},
UNSTABLE_POINT_3D: {'color': 'tab:purple'},
STABLE_NODE_3D: {'color': 'tab:green'},
UNSTABLE_SADDLE_3D: {'color': 'tab:red'},
UNSTABLE_FOCUS_3D: {'color': 'tab:orange'},
UNSTABLE_FOCUS_3D: {'color': 'tab:pink'},
STABLE_FOCUS_3D: {'color': 'tab:purple'},
UNSTABLE_NODE_3D: {'color': 'tab:gray'},
UNSTABLE_NODE_3D: {'color': 'tab:orange'},
UNSTABLE_CENTER_3D: {'color': 'tab:olive'},
UNKNOWN_3D: {'color': 'tab:cyan'},
})


+ 18
- 0
brainpy/analysis/symbolic_analysis/__init__.py View File

@@ -0,0 +1,18 @@
# -*- coding: utf-8 -*-


"""
Dynamics analysis with the aid of `SymPy <https://www.sympy.org/en/index.html>`_ symbolic_analysis inference.

This module provide basic dynamics analysis for low-dimensional dynamical systems, including

- phase plane analysis (1d or 2d systems)
- bifurcation analysis (1d or 2d systems)
- fast slow bifurcation analysis (2d or 3d systems)

"""

from .base import *
from .bifurcation import *
from .phase_plane import *
from .trajectory import *

brainpy/analysis/base.py → brainpy/analysis/symbolic_analysis/base.py View File


brainpy/analysis/bifurcation.py → brainpy/analysis/symbolic_analysis/bifurcation.py View File

@@ -8,8 +8,9 @@ import numpy as np
from mpl_toolkits.mplot3d import Axes3D

from brainpy import errors, math
from brainpy.analysis import base, stability, utils
from brainpy.analysis.trajectory import Trajectory
from brainpy.analysis import stability, utils
from brainpy.analysis.symbolic_analysis import base
from brainpy.analysis.symbolic_analysis.trajectory import Trajectory

logger = logging.getLogger('brainpy.analysis')


brainpy/analysis/phase_plane.py → brainpy/analysis/symbolic_analysis/phase_plane.py View File

@@ -6,8 +6,9 @@ import matplotlib.pyplot as plt
import numpy as np

from brainpy import errors, math
from brainpy.analysis import base, stability, utils
from brainpy.analysis.trajectory import Trajectory
from brainpy.analysis import stability, utils
from brainpy.analysis.symbolic_analysis import base
from brainpy.analysis.symbolic_analysis.trajectory import Trajectory

logger = logging.getLogger('brainpy.analysis')


brainpy/analysis/trajectory.py → brainpy/analysis/symbolic_analysis/trajectory.py View File


+ 6
- 4
brainpy/analysis/utils.py View File

@@ -10,7 +10,7 @@ import numpy as np

from brainpy import errors, math, tools
from brainpy.integrators import analysis_by_ast, odeint, utils
from brainpy.simulation.brainobjects.base import DynamicSystem
from brainpy.simulation.brainobjects.base import DynamicalSystem

try:
import numba
@@ -85,14 +85,14 @@ def transform_integrals(integrals, method='euler'):

# node of integral
f_node = None
if hasattr(integral, '__self__') and isinstance(integral.__self__, DynamicSystem):
if hasattr(integral, '__self__') and isinstance(integral.__self__, DynamicalSystem):
f_node = integral.__self__

# node of derivative function
func_node = None
if f_node:
func_node = f_node
elif hasattr(func, '__self__') and isinstance(func.__self__, DynamicSystem):
elif hasattr(func, '__self__') and isinstance(func.__self__, DynamicalSystem):
func_node = func.__self__

# code scope
@@ -113,7 +113,7 @@ def transform_integrals(integrals, method='euler'):
target = func_node
for i in range(1, len(split_keys)):
next_target = getattr(target, split_keys[i])
if not isinstance(next_target, DynamicSystem):
if not isinstance(next_target, DynamicalSystem):
break
target = next_target
else:
@@ -165,6 +165,8 @@ def transform_integrals_to_model(integrals, method='euler'):

if callable(integrals):
integrals = [integrals]
if isinstance(integrals, DynamicalSystem):
integrals = list(integrals.ints().unique().values())

integrals, pars_update = transform_integrals(integrals, method=method)



+ 12
- 5
brainpy/base/base.py View File

@@ -8,8 +8,7 @@ __all__ = [
'Base',
]

math = None
DE_INT = None
math = DE_INT = None


class Base(object):
@@ -18,7 +17,7 @@ class Base(object):
The subclass of Base includes:

- ``Module`` in brainpy.dnn.base.py
- ``DynamicSystem`` in brainpy.simulation.brainobjects.base.py
- ``DynamicalSystem`` in brainpy.simulation.brainobjects.base.py

"""

@@ -122,8 +121,11 @@ class Base(object):
nodes.append(v)
for v in nodes:
gather.update(v.nodes(method=method, _paths=_paths))
gather[self.name] = self

elif method == 'relative':
nodes = []
gather[''] = self
for k, v in self.__dict__.items():
if isinstance(v, Base):
path = (id(self), id(v))
@@ -133,7 +135,8 @@ class Base(object):
nodes.append((k, v))
for k, v in nodes:
for k2, v2 in v.nodes(method=method, _paths=_paths).items():
gather[f'{k}.{k2}'] = v2
if k2: gather[f'{k}.{k2}'] = v2

else:
raise ValueError(f'No support for the method of "{method}".')
return gather
@@ -154,8 +157,11 @@ class Base(object):
for node in nodes:
gather[node.name] = node
gather.update(node.nodes(method=method, _paths=_paths))
gather[self.name] = self

elif method == 'relative':
nodes = []
gather[''] = self
for key, node in dict_container.items():
path = (id(self), id(node))
if path not in _paths:
@@ -164,7 +170,8 @@ class Base(object):
nodes.append((key, node))
for key, node in nodes:
for key2, node2 in node.nodes(method=method, _paths=_paths).items():
gather[f'{key}.{key2}'] = node2
if key2: gather[f'{key}.{key2}'] = node2

else:
raise ValueError(f'No support for the method of "{method}".')
return gather


+ 2
- 2
brainpy/base/collector.py View File

@@ -119,8 +119,8 @@ class ArrayCollector(Collector):
with some additional methods to make manipulation
of collections of variables easy. A Collection
is ordered by insertion order. It is the object
returned by DynamicSystem.vars() and used as input
in many DynamicSystem instance: optimizers, Jit, etc..."""
returned by DynamicalSystem.vars() and used as input
in many DynamicalSystem instance: optimizers, Jit, etc..."""

def __setitem__(self, key, value):
"""Overload bracket assignment to catch potential conflicts during assignment."""


+ 1
- 1
brainpy/dnn/base.py View File

@@ -101,7 +101,7 @@ class Sequential(Module):

def vars(self, method='absolute'):
"""Collect all the variables (and their names) contained
in the list and its children instance of DynamicSystem.
in the list and its children instance of Module.

Parameters
----------


+ 7
- 5
brainpy/integrators/ode/wrapper.py View File

@@ -329,6 +329,7 @@ def exp_euler_wrapper(f, show_code, dt, var_type):
_f_kw: 'the derivative function',
_dt_kw: 'the precision of numerical integration',
'exp': 'the exponential function',
'math': 'the math module',
}
for v in variables:
keywords[f'{v}_new'] = 'the intermediate value'
@@ -341,7 +342,7 @@ def exp_euler_wrapper(f, show_code, dt, var_type):
code_scope = dict(closure_vars.nonlocals)
code_scope.update(dict(closure_vars.globals))
code_scope[_f_kw] = f
code_scope['exp'] = math.exp
code_scope['math'] = math

analysis = separate_variables(f)
variables_for_returns = analysis['variables_for_returns']
@@ -383,17 +384,18 @@ def exp_euler_wrapper(f, show_code, dt, var_type):
linear = sympy.collect(df_expr, var, evaluate=False)[var]
code_lines.append(f' {s_linear.name} = {analysis_by_sympy.sympy2str(linear)}')
# linear exponential
linear_exp = sympy.exp(linear * dt)
code_lines.append(f' {s_linear_exp.name} = {analysis_by_sympy.sympy2str(linear_exp)}')
# linear_exp = sympy.exp(linear * dt)
# code_lines.append(f' {s_linear_exp.name} = {analysis_by_sympy.sympy2str(linear_exp)}')
code_lines.append(f' {s_linear_exp.name} = math.exp({s_linear.name} * {_dt_kw})')
# df part
df_part = (s_linear_exp - 1) / s_linear * s_df
code_lines.append(f' {s_df_part.name} = {analysis_by_sympy.sympy2str(df_part)}')

else:
# linear exponential
code_lines.append(f' {s_linear_exp.name} = {dt} ** 0.5')
code_lines.append(f' {s_linear_exp.name} = {_dt_kw} ** 0.5')
# df part
code_lines.append(f' {s_df_part.name} = {analysis_by_sympy.sympy2str(dt * s_df)}')
code_lines.append(f' {s_df_part.name} = {s_df.name} * {_dt_kw}')

# update expression
update = var + s_df_part


+ 4
- 5
brainpy/integrators/sde/euler_and_milstein.py View File

@@ -139,7 +139,7 @@ class Wrapper(object):
code_scope['f'] = f
code_scope['g'] = g
code_scope['math'] = math
code_scope['exp'] = math.exp
# code_scope['exp'] = math.exp

# 2. code lines
code_lines = [f'def {func_name}({", ".join(arguments)}):']
@@ -209,17 +209,16 @@ class Wrapper(object):
linear = sympy.collect(df_expr, var, evaluate=False)[var]
code_lines.append(f' {s_linear.name} = {analysis_by_sympy.sympy2str(linear)}')
# linear exponential
linear_exp = sympy.exp(linear * dt)
code_lines.append(f' {s_linear_exp.name} = {analysis_by_sympy.sympy2str(linear_exp)}')
code_lines.append(f' {s_linear_exp.name} = math.exp({linear.name} * {vdt})')
# df part
df_part = (s_linear_exp - 1) / s_linear * s_df
code_lines.append(f' {s_df_part.name} = {analysis_by_sympy.sympy2str(df_part)}')

else:
# linear exponential
code_lines.append(f' {s_linear_exp.name} = sqrt({dt})')
code_lines.append(f' {s_linear_exp.name} = {vdt}_sqrt')
# df part
code_lines.append(f' {s_df_part.name} = {analysis_by_sympy.sympy2str(dt * s_df)}')
code_lines.append(f' {s_df_part.name} = {s_df.name} * {vdt}')

# update expression
update = var + s_df_part


+ 31
- 19
brainpy/math/jax/compilation.py View File

@@ -58,17 +58,21 @@ def _make_jit(func, vars_to_change, vars_needed,

def jit(obj_or_func, vars_to_change=None, vars_needed=None,
static_argnums=None, static_argnames=None, device=None,
backend=None, donate_argnums=(), inline=False):
"""JIT (Just-In-Time) Compilation.
backend=None, donate_argnums=(), inline=False, **kwargs):
"""JIT (Just-In-Time) Compilation for JAX backend.

This function has the same ability to Just-In-Time transform a pure function,
but it can also JIT transform a :py:class:`DynamicSystem`, or a :py:class:`Base` object,
or a bounded method of a :py:class:`Base` object.
This function has the same ability to Just-In-Time compile a pure function,
but it can also JIT compile a :py:class:`brainpy.DynamicalSystem`, or a
:py:class:`brainpy.Base` object, or a bounded method of a
:py:class:`brainpy.Base` object.

If you are using "numpy", please refer to the JIT compilation
in NumPy backend `bp.math.numpy.jit() <brainpy.math.numpy.jit.rst>`_.

Examples
--------

You can JIT a :py:class:`DynamicSystem`
You can JIT a :py:class:`brainpy.DynamicalSystem`

>>> import brainpy as bp
>>>
@@ -76,12 +80,12 @@ def jit(obj_or_func, vars_to_change=None, vars_needed=None,
>>> pass
>>> lif = bp.math.jit(LIF(10))

You can JIT a :py:class:`Base` object with ``__call__()`` implementation.
You can JIT a :py:class:`brainpy.Base` object with ``__call__()`` implementation.

>>> mlp = bp.dnn.MLP((10, 100, 10))
>>> jit_mlp = bp.math.jit(mlp)

You can also JIT a bounded method of a :py:class:`Base` object.
You can also JIT a bounded method of a :py:class:`brainpy.Base` object.

>>> class Hello(bp.dnn.Module):
>>> def __init__(self):
@@ -96,6 +100,10 @@ def jit(obj_or_func, vars_to_change=None, vars_needed=None,

Further, you can JIT a normal function, just used like in JAX.

>>> @bp.math.jit
>>> def selu(x, alpha=1.67, lmbda=1.05):
>>> return lmbda * bp.math.where(x > 0, x, alpha * bp.math.exp(x) - alpha)

Parameters
----------
obj_or_func : Base, function
@@ -142,10 +150,10 @@ def jit(obj_or_func, vars_to_change=None, vars_needed=None,
A wrapped version of Base object or function,
set up for just-in-time compilation.
"""
from brainpy.simulation.brainobjects.base import DynamicSystem
from brainpy.simulation.brainobjects.base import DynamicalSystem

if isinstance(obj_or_func, DynamicSystem):
if len(obj_or_func.steps): # DynamicSystem has step functions
if isinstance(obj_or_func, DynamicalSystem):
if len(obj_or_func.steps): # DynamicalSystem has step functions

# dynamical variables
vars_to_change = (vars_to_change or obj_or_func.vars().unique())
@@ -267,7 +275,9 @@ def _make_vmap(func, dyn_vars, rand_vars, in_axes, out_axes,

def vmap(obj_or_func, vars=None, vars_batched=None,
in_axes=0, out_axes=0, axis_name=None, reduce_func=None):
"""Vectorized compile a function or a module to run in parallel on a single device.
"""Vectorization compilation in JAX backend.

Vectorized compile a function or a module to run in parallel on a single device.

Examples
--------
@@ -319,10 +329,10 @@ def vmap(obj_or_func, vars=None, vars_batched=None,
with extra array axes at positions indicated by ``out_axes``.

"""
from brainpy.simulation.brainobjects.base import DynamicSystem
from brainpy.simulation.brainobjects.base import DynamicalSystem

if isinstance(obj_or_func, DynamicSystem):
if len(obj_or_func.steps): # DynamicSystem has step functions
if isinstance(obj_or_func, DynamicalSystem):
if len(obj_or_func.steps): # DynamicalSystem has step functions

# dynamical variables
vars = (vars or obj_or_func.vars().unique())
@@ -517,7 +527,9 @@ def _make_pmap(func, dyn_vars, rand_vars, reduce_func, axis_name=None, in_axes=0
def pmap(obj_or_func, vars=None, axis_name=None, in_axes=0, out_axes=0, static_broadcasted_argnums=(),
devices=None, backend=None, axis_size=None, donate_argnums=(), global_arg_shapes=None,
reduce_func=None):
"""Parallel compile a function or a module to run on multiple devices in parallel.
"""Parallel compilation in JAX backend.

Parallel compile a function or a module to run on multiple devices in parallel.

Parameters
----------
@@ -541,10 +553,10 @@ def pmap(obj_or_func, vars=None, axis_name=None, in_axes=0, out_axes=0, static_b


"""
from brainpy.simulation.brainobjects.base import DynamicSystem
from brainpy.simulation.brainobjects.base import DynamicalSystem

if isinstance(obj_or_func, DynamicSystem):
if len(obj_or_func.steps): # DynamicSystem has step functions
if isinstance(obj_or_func, DynamicalSystem):
if len(obj_or_func.steps): # DynamicalSystem has step functions

# dynamical variables
all_vars = (vars or obj_or_func.vars().unique())


+ 6
- 2
brainpy/math/jax/gradient.py View File

@@ -16,7 +16,9 @@ __all__ = [

def grad(func, vars=None, argnums=None, has_aux=None,
holomorphic=False, allow_int=False, reduce_axes=()):
"""Creates a function which evaluates the gradient of ``fun``.
"""Automatic Gradient Computation in JAX backend.

Creates a function which evaluates the gradient of ``fun``.

Parameters
----------
@@ -97,7 +99,9 @@ def grad(func, vars=None, argnums=None, has_aux=None,

def value_and_grad(func, vars=None, argnums=None, has_aux=None,
holomorphic=False, allow_int=False, reduce_axes=()):
"""Create a function which evaluates both ``fun`` and the gradient of ``fun``.
"""Automatic Gradient Computation in JAX backend.

Create a function which evaluates both ``fun`` and the gradient of ``fun``.

Parameters
----------


+ 1
- 1
brainpy/math/jax/jaxarray.py View File

@@ -257,7 +257,7 @@ class JaxArray(object):

def __ipow__(self, oc):
# a **= b
self._value = self._value.__pow__(oc._value if isinstance(oc, JaxArray) else oc)
self._value = self._value ** (oc._value if isinstance(oc, JaxArray) else oc)
return self

def __matmul__(self, oc):


+ 737
- 256
brainpy/math/numpy/ast2numba.py View File

@@ -2,15 +2,13 @@


"""

TODO: support Base function
TODO: enable code debug and error report

TODO: enable code debug and error report; See https://github.com/numba/numba/issues/7370
"""

import ast
import inspect
import re
from copy import deepcopy
from pprint import pprint

import numba
@@ -21,90 +19,209 @@ from numba.core.dispatcher import Dispatcher
from brainpy import errors, math, tools
from brainpy.base.base import Base
from brainpy.base.collector import Collector
from brainpy.base.function import Function
from brainpy.math import profile

DE_INT = DynamicSystem = Container = None
DE_INT = DynamicalSystem = None

__all__ = [
'jit_cls',
'jit_integrator',
'jit',
]


def jit_cls(ds, nopython=True, fastmath=True, parallel=False, nogil=False, show_code=False):
global DynamicSystem, Container
if DynamicSystem is None:
from brainpy.simulation.brainobjects.base import DynamicSystem
if Container is None:
from brainpy.simulation.brainobjects.base import Container
def jit(obj_or_fun, show_code=False, **jit_setting):
global DE_INT
if DE_INT is None:
from brainpy.integrators.constants import DE_INT

if callable(obj_or_fun):
# Function
if isinstance(obj_or_fun, Function):
return jit_Func(obj_or_fun,
show_code=show_code,
**jit_setting)

# Base
elif isinstance(obj_or_fun, Base):
return jit_Base(func=obj_or_fun.__call__,
host=obj_or_fun,
name=obj_or_fun.name + '_call',
show_code=show_code, **jit_setting)

# integrator
elif hasattr(obj_or_fun, '__name__') and obj_or_fun.__name__.startswith(DE_INT):
return jit_integrator(intg=obj_or_fun,
show_code=show_code,
**jit_setting)

# bounded method
elif hasattr(obj_or_fun, '__self__') and isinstance(obj_or_fun.__self__, Base):
return jit_Base(func=obj_or_fun,
host=obj_or_fun.__self__,
show_code=show_code,
**jit_setting)

else:
# native function
if not isinstance(obj_or_fun, Dispatcher):
return numba.jit(obj_or_fun, **jit_setting)
else:
# numba function
return obj_or_fun

else:
return jit_DS(obj_or_fun,
show_code=show_code,
**jit_setting)

assert isinstance(ds, DynamicSystem)

def jit_DS(obj_or_fun, show_code=False, **jit_setting):
global DynamicalSystem
if DynamicalSystem is None:
from brainpy.simulation.brainobjects.base import DynamicalSystem

if not isinstance(obj_or_fun, DynamicalSystem):
raise errors.UnsupportedError(f'JIT compilation in numpy backend only '
f'supports {Base.__name__}, but we got '
f'{type(obj_or_fun)}.')
if not hasattr(obj_or_fun, 'steps'):
raise errors.BrainPyError(f'Please init this DynamicalSystem {obj_or_fun} first, '
f'then apply JIT.')

# function analysis
for key, step in list(ds.steps.items()):
for key, step in list(obj_or_fun.steps.items()):
key = key.replace(".", "_")
r = analyze_func(step, nopython=nopython, fastmath=fastmath,
parallel=parallel, nogil=nogil, show_code=show_code)
r = _jit_func(obj_or_fun=step, show_code=show_code, **jit_setting)
if r['func'] != step:
func = _form_final_call(f_org=step, f_rep=r['func'], arg2call=r['arg2call'],
arguments=r['arguments'], nodes=r['nodes'],
show_code=show_code, name=step.__name__)
obj_or_fun.steps.replace(key, func)

# dynamic system
return obj_or_fun


def jit_integrator(intg, show_code=False, **jit_setting):
r = _jit_intg_func(intg, show_code=show_code, **jit_setting)
if len(r['arguments']):
intg = _form_final_call(f_org=intg, f_rep=r['func'], arg2call=r['arg2call'],
arguments=r['arguments'], nodes=r['nodes'],
show_code=show_code, name=intg.__name__)
else:
intg = r['func']
return intg

if len(r):
arguments = sorted(r['arguments'])
code_scope = {key: node for key, node in r['nodes'].items()}
code_scope[key] = r['func']
called_args = _items2lines([f"{a}={r['arg2call'][a]}" for a in arguments]).strip()
code_lines = [f'def new_{key}(_t, _dt):',
f' {key}(_t=_t, _dt=_dt, {called_args.strip()})']

# compile new function
code = '\n'.join(code_lines)
# code, _scope = _add_try_except(code)
# code_scope.update(_scope)
if show_code:
print(code)
print()
pprint(code_scope)
print()
exec(compile(code, '', 'exec'), code_scope)
func = code_scope[f'new_{key}']
ds.steps.replace(key, func)
return ds


def jit_integrator(integrator, nopython=True, fastmath=True, parallel=False, nogil=False, show_code=False):
return analyze_intg_func(f=integrator, nopython=nopython, fastmath=fastmath,
parallel=parallel, nogil=nogil, show_code=show_code)


def analyze_func(f, nopython=True, fastmath=True, parallel=False, nogil=False, show_code=False):
global DE_INT, DynamicSystem

def jit_Func(func, show_code=False, **jit_setting):
assert isinstance(func, Function)

r = _jit_Function(func=func, show_code=show_code, **jit_setting)
if len(r['arguments']):
func = _form_final_call(f_org=func._f, f_rep=r['func'], arg2call=r['arg2call'],
arguments=r['arguments'], nodes=r['nodes'],
show_code=show_code, name=func.name + '_call')
else:
func = r['func']

return func


def jit_Base(func, host, name=None, show_code=False, **jit_setting):
r = _jit_cls_func(func, host=host, show_code=show_code, **jit_setting)
if len(r['arguments']):
name = func.__name__ if name is None else name
func = _form_final_call(f_org=func, f_rep=r['func'], arg2call=r['arg2call'],
arguments=r['arguments'], nodes=r['nodes'],
show_code=show_code, name=name)
else:
func = r['func']
return func


def _jit_func(obj_or_fun, show_code=False, **jit_setting):
global DE_INT
if DE_INT is None:
from brainpy.integrators.constants import DE_INT
if DynamicSystem is None:
from brainpy.simulation.brainobjects.base import DynamicSystem

if hasattr(f, '__name__') and f.__name__.startswith(DE_INT):
return analyze_intg_func(f, nopython=nopython, fastmath=fastmath, parallel=parallel,
nogil=nogil, show_code=show_code)
if callable(obj_or_fun):
# integrator
if hasattr(obj_or_fun, '__name__') and obj_or_fun.__name__.startswith(DE_INT):
return _jit_intg_func(obj_or_fun,
show_code=show_code,
**jit_setting)

# bounded method
elif hasattr(obj_or_fun, '__self__') and isinstance(obj_or_fun.__self__, Base):
return _jit_cls_func(obj_or_fun,
host=obj_or_fun.__self__,
show_code=show_code,
**jit_setting)

# wrapped function
elif isinstance(obj_or_fun, Function):
return _jit_Function(obj_or_fun,
show_code=show_code,
**jit_setting)

# base class function
elif isinstance(obj_or_fun, Base):
return _jit_cls_func(obj_or_fun.__call__,
host=obj_or_fun,
show_code=show_code,
**jit_setting)

else:
# native function
if not isinstance(obj_or_fun, Dispatcher):
if inspector.inspect_function(obj_or_fun)['numba_type'] is None:
f = numba.jit(obj_or_fun, **jit_setting)
return dict(func=f, arguments=set(), arg2call=Collector(), nodes=Collector())
# numba function or innate supported function
return dict(func=obj_or_fun, arguments=set(), arg2call=Collector(), nodes=Collector())

else:
raise ValueError


def _jit_Function(func, show_code=False, **jit_setting):
assert isinstance(func, Function)

elif hasattr(f, '__self__'):
if isinstance(f.__self__, DynamicSystem):
return analyze_cls_func(f, nopython=nopython, fastmath=fastmath, parallel=parallel,
nogil=nogil, show_code=show_code)
# code_scope
closure_vars = inspect.getclosurevars(func._f)
code_scope = dict(closure_vars.nonlocals)
code_scope.update(closure_vars.globals)
# code
code = tools.deindent(inspect.getsource(func._f)).strip()
# arguments
arguments = set()
# nodes
nodes = {v.name: v for v in func._nodes.values()}
# arg2call
arg2call = dict()

if not isinstance(f, Dispatcher):
if inspector.inspect_function(f)['numba_type'] is None:
f = numba.jit(f, nopython=nopython, fastmath=fastmath, parallel=parallel, nogil=nogil)
return dict(func=f, arguments=set(), arg2call=Collector(), nodes=Collector())
for key, node in func._nodes.items():
code, _arguments, _arg2call, _nodes, code_scope = _analyze_cls_func(
host=node, code=code, show_code=show_code, code_scope=code_scope,
self_name=key, pop_self=True, **jit_setting)
arguments.update(_arguments)
arg2call.update(_arg2call)
nodes.update(_nodes)

return {}
# compile new function
# code, _scope = _add_try_except(code)
# code_scope.update(_scope)
if show_code:
show_compiled_codes(code, code_scope)
exec(compile(code, '', 'exec'), code_scope)
func = code_scope[func._f.__name__]
func = numba.jit(func, **jit_setting)

# returns
return dict(func=func, arguments=arguments, arg2call=arg2call, nodes=nodes)

def analyze_cls_func(f, code=None, host=None, show_code=False, **jit_setting):
global Container, DynamicSystem
if Container is None:
from brainpy.simulation.brainobjects.base import Container
if DynamicSystem is None:
from brainpy.simulation.brainobjects.base import DynamicSystem

def _jit_cls_func(f, code=None, host=None, show_code=False, **jit_setting):
host = (host or f.__self__)

# data to return
@@ -113,55 +230,27 @@ def analyze_cls_func(f, code=None, host=None, show_code=False, **jit_setting):
nodes = Collector()
nodes[host.name] = host

# step function of Container
if isinstance(host, Container):
if f.__name__ != 'update':
raise errors.UnsupportedError(f'Currently, BrainPy only supports compile "update" step '
f'function, while we got {f.__name__}: {f}')
code_lines = []
code_scope = {}
for key, step in host.child_steps.items():
r = analyze_func(f=step, show_code=show_code, **jit_setting)
if len(r):
arguments.update(r['arguments'])
arg2call.update(r['arg2call'])
nodes.update(r['nodes'])
code_scope[key.replace('.', '_')] = r['func']
call_args = [f'{arg}={arg}' for arg in sorted(r['arguments'])]
code_lines.append("{call}(_t, _dt, {args})".format(
call=key.replace('.', '_'),
args=", ".join(call_args)))
# args=_items2lines(call_args, line_break='\n\t\t\t')))
code_lines = [' ' + line for line in code_lines]
# code_lines.insert(0, f'def {host.name}_update(_t, _dt, {_items2lines(sorted(arguments))}):')
code_lines.insert(0, f'def {host.name}_update(_t, _dt, {", ".join(sorted(arguments))}):')
code = '\n'.join(code_lines)
# code_scope.update(nodes)
func_name = f'{host.name}_update'

# step function of normal DynamicSystem
else:
code = (code or tools.deindent(inspect.getsource(f)).strip())
# function name
func_name = f.__name__
# code scope
closure_vars = inspect.getclosurevars(f)
code_scope = dict(closure_vars.nonlocals)
code_scope.update(closure_vars.globals)
code, _arguments, _arg2call, _nodes, code_scope = _analyze_cls_func(
host=host, code=code, show_code=show_code, code_scope=code_scope, **jit_setting)
arguments.update(_arguments)
arg2call.update(_arg2call)
nodes.update(_nodes)
# code
code = (code or tools.deindent(inspect.getsource(f)).strip())
# function name
func_name = f.__name__
# code scope
closure_vars = inspect.getclosurevars(f)
code_scope = dict(closure_vars.nonlocals)
code_scope.update(closure_vars.globals)
# analyze class function
code, _arguments, _arg2call, _nodes, _code_scope = _analyze_cls_func(
host=host, code=code, show_code=show_code, **jit_setting)
arguments.update(_arguments)
arg2call.update(_arg2call)
nodes.update(_nodes)
code_scope.update(_code_scope)

# compile new function
# code, _scope = _add_try_except(code)
# code_scope.update(_scope)
if show_code:
print(code)
print()
pprint(code_scope)
print()
show_compiled_codes(code, code_scope)
exec(compile(code, '', 'exec'), code_scope)
func = code_scope[func_name]
func = numba.jit(func, **jit_setting)
@@ -170,19 +259,15 @@ def analyze_cls_func(f, code=None, host=None, show_code=False, **jit_setting):
return dict(func=func, arguments=arguments, arg2call=arg2call, nodes=nodes)


def analyze_intg_func(f, nopython=True, fastmath=True, parallel=False, nogil=False, show_code=False):
def _jit_intg_func(f, show_code=False, **jit_setting):
global DynamicalSystem
if DynamicalSystem is None:
from brainpy.simulation.brainobjects.base import DynamicalSystem

# exponential euler methods
if f.brainpy_data['method'].startswith('exponential'):
return analyze_cls_func(f=f,
code="\n".join(f.brainpy_data['code_lines']),
nopython=nopython,
fastmath=fastmath,
parallel=parallel,
nogil=nogil,
show_code=show_code)

global DynamicSystem
if DynamicSystem is None:
from brainpy.simulation.brainobjects.base import DynamicSystem
return _jit_cls_func(f=f, code="\n".join(f.brainpy_data['code_lines']),
show_code=show_code, **jit_setting)

# information in the integrator
func_name = f.brainpy_data['func_name']
@@ -198,7 +283,7 @@ def analyze_intg_func(f, nopython=True, fastmath=True, parallel=False, nogil=Fal
# jit raw functions
f_node = None
remove_self = None
if hasattr(f, '__self__') and isinstance(f.__self__, DynamicSystem):
if hasattr(f, '__self__') and isinstance(f.__self__, DynamicalSystem):
f_node = f.__self__
_arg = tree.body[0].args.args.pop(0) # remove "self" arg
# remove "self" in functional call
@@ -206,29 +291,27 @@ def analyze_intg_func(f, nopython=True, fastmath=True, parallel=False, nogil=Fal

need_recompile = False
for key, func in raw_func.items():
# get node
# get node of host
func_node = None
if f_node:
func_node = f_node
elif hasattr(func, '__self__') and isinstance(func.__self__, DynamicSystem):
elif hasattr(func, '__self__') and isinstance(func.__self__, DynamicalSystem):
func_node = func.__self__

# get new compiled function
if isinstance(func, Dispatcher):
continue
elif func_node:
elif func_node is not None:
need_recompile = True
r = analyze_cls_func(f=func,
host=func_node,
nopython=nopython,
fastmath=fastmath,
parallel=parallel,
nogil=nogil,
show_code=show_code)
r = _jit_cls_func(f=func,
host=func_node,
show_code=show_code,
**jit_setting)
if len(r['arguments']) or remove_self:
tree = _replace_func(tree, func_call=key,
arg_to_append=r['arguments'],
remove_self=remove_self)
tree = _replace_func_call_by_tee(tree,
func_call=key,
arg_to_append=r['arguments'],
remove_self=remove_self)
code_scope[key] = r['func']
arguments.update(r['arguments']) # update arguments
arg2call.update(r['arg2call']) # update arg2call
@@ -236,11 +319,7 @@ def analyze_intg_func(f, nopython=True, fastmath=True, parallel=False, nogil=Fal
nodes[func_node.name] = func_node # update nodes
else:
need_recompile = True
code_scope[key] = numba.jit(func,
nopython=nopython,
fastmath=fastmath,
parallel=parallel,
nogil=nogil)
code_scope[key] = numba.jit(func, **jit_setting)

if need_recompile:
tree.body[0].decorator_list.clear()
@@ -252,21 +331,362 @@ def analyze_intg_func(f, nopython=True, fastmath=True, parallel=False, nogil=Fal
code_scope_backup = {k: v for k, v in code_scope.items()}
# compile functions
if show_code:
print(code)
print()
pprint(code_scope)
print()
show_compiled_codes(code, code_scope)
exec(compile(code, '', 'exec'), code_scope)
new_f = code_scope[func_name]
new_f.brainpy_data = {key: val for key, val in f.brainpy_data.items()}
new_f.brainpy_data['code_lines'] = code.strip().split('\n')
new_f.brainpy_data['code_scope'] = code_scope_backup
jit_f = numba.jit(new_f, nopython=nopython, fastmath=fastmath, parallel=parallel, nogil=nogil)
jit_f = numba.jit(new_f, **jit_setting)
return dict(func=jit_f, arguments=arguments, arg2call=arg2call, nodes=nodes)
else:
return dict(func=f, arguments=arguments, arg2call=arg2call, nodes=nodes)


def _analyze_cls_func(host, code, show_code, self_name=None, pop_self=True, **jit_setting):
"""Analyze the bounded function of one object.

Parameters
----------
host : Base
The data host.
code : str
The function source code.
self_name : optional, str
The class name, like "self", "cls".
show_code : bool
"""
# arguments
tree = ast.parse(code)
if self_name is None:
self_name = tree.body[0].args.args[0].arg
# data assigned by self.xx in line right
if self_name not in profile.CLASS_KEYWORDS:
raise errors.CodeError(f'BrainPy only support class keyword '
f'{profile.CLASS_KEYWORDS}, but we got {self_name}.')
if pop_self:
tree.body[0].args.args.pop(0) # remove "self" etc. class argument

# analyze function body
r = _analyze_cls_func_body(host=host, self_name=self_name, code=code, tree=tree,
show_code=show_code, has_func_def=True, **jit_setting)
code, arguments, arg2call, nodes, code_scope = r

return code, arguments, arg2call, nodes, code_scope


def _analyze_cls_func_body(host, self_name, code, tree, show_code=False,
has_func_def=False, **jit_setting):
arguments, arg2call, nodes, code_scope = set(), dict(), Collector(), dict()

# all self data
self_data = re.findall('\\b' + self_name + '\\.[A-Za-z_][A-Za-z0-9_.]*\\b', code)
self_data = list(set(self_data))

# analyze variables and functions accessed by the self.xx
data_to_replace = {}
for key in self_data:
split_keys = key.split('.')
if len(split_keys) < 2:
raise errors.BrainPyError

# get target and data
target = host
for i in range(1, len(split_keys)):
next_target = getattr(target, split_keys[i])
if not isinstance(next_target, Base):
break
target = next_target
else:
raise errors.BrainPyError
data = getattr(target, split_keys[i])

# analyze data
if isinstance(data, math.Variable): # data is a variable
arguments.add(f'{target.name}_{split_keys[i]}')
arg2call[f'{target.name}_{split_keys[i]}'] = f'{target.name}.{split_keys[-1]}.value'
nodes[target.name] = target
# replace the data
if len(split_keys) == i + 1:
data_to_replace[key] = f'{target.name}_{split_keys[i]}'
else:
data_to_replace[key] = f'{target.name}_{split_keys[i]}.{".".join(split_keys[i:])}'

elif isinstance(data, np.random.RandomState): # data is a RandomState
# replace RandomState
code_scope[f'{target.name}_{split_keys[i]}'] = np.random
# replace the data
if len(split_keys) == i + 1:
data_to_replace[key] = f'{target.name}_{split_keys[i]}'
else:
data_to_replace[key] = f'{target.name}_{split_keys[i]}.{".".join(split_keys[i:])}'

elif callable(data): # data is a function
assert len(split_keys) == i + 1
r = _jit_func(obj_or_fun=data, show_code=show_code, **jit_setting)
# if len(r['arguments']):
tree = _replace_func_call_by_tee(tree, func_call=key, arg_to_append=r['arguments'])
arguments.update(r['arguments'])
arg2call.update(r['arg2call'])
nodes.update(r['nodes'])
code_scope[f'{target.name}_{split_keys[i]}'] = r['func']
data_to_replace[key] = f'{target.name}_{split_keys[i]}' # replace the data

elif isinstance(data, (dict, list, tuple)): # data is a list/tuple/dict of function/object
# get all values
if isinstance(data, dict): # check dict
if len(split_keys) != i + 2 and split_keys[-1] != 'values':
raise errors.BrainPyError(f'Only support iter dict.values(). while we got '
f'dict.{split_keys[-1]} for data: \n\n{data}')
values = list(data.values())
iter_name = key + '()'
else: # check list / tuple
assert len(split_keys) == i + 1
values = list(data)
iter_name = key

# replace this for-loop
r = _replace_this_forloop(tree=tree,
iter_name=iter_name,
loop_values=values,
show_code=show_code,
**jit_setting)
tree, _arguments, _arg2call, _nodes, _code_scope = r
arguments.update(_arguments)
arg2call.update(_arg2call)
nodes.update(_nodes)
code_scope.update(_code_scope)

else: # constants
code_scope[f'{target.name}_{split_keys[i]}'] = data
# replace the data
if len(split_keys) == i + 1:
data_to_replace[key] = f'{target.name}_{split_keys[i]}'
else:
data_to_replace[key] = f'{target.name}_{split_keys[i]}.{".".join(split_keys[i:])}'

if has_func_def:
tree.body[0].decorator_list.clear()
tree.body[0].args.args.extend([ast.Name(id=a) for a in sorted(arguments)])
tree.body[0].args.defaults.extend([ast.Constant(None) for _ in sorted(arguments)])

# replace words
code = tools.ast2code(tree)
code = tools.word_replace(code, data_to_replace, exclude_dot=True)

return code, arguments, arg2call, nodes, code_scope


def _replace_this_forloop(tree, iter_name, loop_values, show_code=False, **jit_setting):
"""Replace the given for-loop.

This function aims to replace the specific for-loop structure, like:

replace this for-loop

>>> def update(_t, _dt):
>>> for step in self.child_steps.values():
>>> step(_t, _dt)

to

>>> def update(_t, _dt, AMPA_vec0_delay_g_data=None, AMPA_vec0_delay_g_in_idx=None,
>>> AMPA_vec0_delay_g_out_idx=None, AMPA_vec0_s=None, HH0_V=None, HH0_V_th=None,
>>> HH0_gNa=None, HH0_h=None, HH0_input=None, HH0_m=None, HH0_n=None, HH0_spike=None):
>>> HH0_step(_t, _dt, HH0_V=HH0_V, HH0_V_th=HH0_V_th, HH0_gNa=HH0_gNa,
>>> HH0_h=HH0_h, HH0_input=HH0_input, HH0_m=HH0_m, HH0_n=HH0_n,
>>> HH0_spike=HH0_spike)
>>> AMPA_vec0_step(_t, _dt, AMPA_vec0_delay_g_data=AMPA_vec0_delay_g_data,
>>> AMPA_vec0_delay_g_in_idx=AMPA_vec0_delay_g_in_idx,
>>> AMPA_vec0_delay_g_out_idx=AMPA_vec0_delay_g_out_idx,
>>> AMPA_vec0_s=AMPA_vec0_s, HH0_V=HH0_V, HH0_input=HH0_input,
>>> HH0_spike=HH0_spike)
>>> AMPA_vec0_delay_g_step(_t, _dt, AMPA_vec0_delay_g_in_idx=AMPA_vec0_delay_g_in_idx,
>>> AMPA_vec0_delay_g_out_idx=AMPA_vec0_delay_g_out_idx)

Parameters
----------
tree : ast.Module
The target code tree.
iter_name : str
The for-loop iter.
loop_values : list/tuple
The iter contents in the current loop.
show_code : bool
Whether show the formatted code.
"""
assert isinstance(tree, ast.Module)

replacer = ReplaceThisForLoop(loop_values=loop_values,
iter_name=iter_name,
show_code=show_code,
**jit_setting)
tree = replacer.visit(tree)
if not replacer.success:
raise errors.BrainPyError(f'Do not find the for-loop for "{iter_name}", '
f'currently we only support for-loop like '
f'"for xxx in {iter_name}:". Does your for-loop '
f'structure is not like this. ')

return tree, replacer.arguments, replacer.arg2call, replacer.nodes, replacer.code_scope


class ReplaceThisForLoop(ast.NodeTransformer):
def __init__(self, loop_values, iter_name, show_code=False, **jit_setting):
self.success = False

# targets
self.loop_values = loop_values
self.iter_name = iter_name

# setting
self.show_code = show_code
self.jit_setting = jit_setting

# results
self.arguments = set()
self.arg2call = dict()
self.nodes = Collector()
self.code_scope = dict()

def visit_For(self, node):
iter_ = tools.ast2code(ast.fix_missing_locations(node.iter))

if iter_.strip() == self.iter_name:
data_to_replace = Collector()
final_node = ast.Module(body=[])
self.success = True

# target
if not isinstance(node.target, ast.Name):
raise errors.BrainPyError(f'Only support scalar iter, like "for x in xxxx:", not "for '
f'{tools.ast2code(ast.fix_missing_locations(node.target))} '
f'in {iter_}:')
target = node.target.id

# for loop values
for i, value in enumerate(self.loop_values):
# module and code
module = ast.Module(body=deepcopy(node).body)
code = tools.ast2code(module)

if callable(value): # transform functions
r = _jit_func(obj_or_fun=value,
show_code=self.show_code,
**self.jit_setting)
tree = _replace_func_call_by_tee(deepcopy(module),
func_call=target,
arg_to_append=r['arguments'],
new_func_name=f'{target}_{i}')

# update import parameters
self.arguments.update(r['arguments'])
self.arg2call.update(r['arg2call'])
self.nodes.update(r['nodes'])

# replace the data
if isinstance(value, Base):
host = value
replace_name = f'{host.name}_{target}'
elif hasattr(value, '__self__') and isinstance(value.__self__, Base):
host = value.__self__
replace_name = f'{host.name}_{target}'
else:
replace_name = f'{target}_{i}'
self.code_scope[replace_name] = r['func']
data_to_replace[f'{target}_{i}'] = replace_name

final_node.body.extend(tree.body)

elif isinstance(value, Base): # transform Base objects
r = _analyze_cls_func_body(host=value,
self_name=target,
code=code,
tree=module,
show_code=self.show_code,
**self.jit_setting)

new_code, arguments, arg2call, nodes, code_scope = r
self.arguments.update(arguments)
self.arg2call.update(arg2call)
self.arg2call.update(arg2call)
self.nodes.update(nodes)
self.code_scope.update(code_scope)

final_node.body.extend(ast.parse(new_code).body)

else:
raise errors.BrainPyError(f'Only support JIT an iterable objects of function '
f'or Base object, but we got:\n\n {value}')

# replace words
final_code = tools.ast2code(final_node)
final_code = tools.word_replace(final_code, data_to_replace, exclude_dot=True)
final_node = ast.parse(final_code)

else:
final_node = node

self.generic_visit(final_node)
return final_node


def _replace_func_call_by_tee(tree, func_call, arg_to_append, remove_self=None,
new_func_name=None):
assert isinstance(func_call, str)
assert isinstance(arg_to_append, (list, tuple, set))
assert isinstance(tree, ast.Module)

transformer = FuncTransformer(func_name=func_call,
arg_to_append=arg_to_append,
remove_self=remove_self,
new_func_name=new_func_name)
new_tree = transformer.visit(tree)
return new_tree


def _replace_func_call_by_code(code, func_call, arg_to_append, remove_self=None):
"""Replace functional call.

This class automatically transform a functional call.
For example, in your original code:

>>> V, m, h, n = self.integral(self.V, self.m, self.h, self.n, _t, self.input)

you want to add new arguments ``gNa``, ``gK`` and ``gL`` into the
function ``self.integral``. Then this Transformer will help you
automatically do this:

>>> V, m, h, n = self.integral(self.V, self.m, self.h, self.n, _t, self.input,
>>> gNa=gNa, gK=gK, gL=gL)

Parameters
----------
code : str
The original code string.
func_call : str
The functional call.
arg_to_append : set/list/tuple of str
The arguments to append.
remove_self : str, optional
The self class name to remove.

Returns
-------
new_code : str
The new code string.
"""
assert isinstance(func_call, str)
assert isinstance(arg_to_append, (list, tuple, set))

tree = ast.parse(code)
transformer = FuncTransformer(func_name=func_call,
arg_to_append=arg_to_append,
remove_self=remove_self)
new_tree = transformer.visit(tree)
return tools.ast2code(new_tree)


class FuncTransformer(ast.NodeTransformer):
"""Transform a functional call.

@@ -292,8 +712,9 @@ class FuncTransformer(ast.NodeTransformer):

"""

def __init__(self, func_name, arg_to_append, remove_self=None):
def __init__(self, func_name, arg_to_append, remove_self=None, new_func_name=None):
self.func_name = func_name
self.new_func_name = new_func_name
self.arg_to_append = sorted(arg_to_append)
self.remove_self = remove_self

@@ -315,118 +736,24 @@ class FuncTransformer(ast.NodeTransformer):
# kwargs
kwargs = [self.generic_visit(keyword) for keyword in node.keywords]
# new kwargs
code = f'f({", ".join([f"{k}={k}" for k in self.arg_to_append])})'
tree = ast.parse(code)
new_keywords = tree.body[0].value.keywords
kwargs.extend(new_keywords)
arg_to_append = deepcopy(self.arg_to_append)
for arg in kwargs:
if arg.arg in arg_to_append:
arg_to_append.remove(arg.arg)
if len(arg_to_append):
code = f'f({", ".join([f"{k}={k}" for k in arg_to_append])})'
tree = ast.parse(code)
new_keywords = tree.body[0].value.keywords
kwargs.extend(new_keywords)
# final function
return ast.Call(func=node.func, args=args, keywords=kwargs)
if self.new_func_name:
func_call = ast.parse(f'{self.new_func_name}()').body[0].value.func
else:
func_call = node.func
return ast.Call(func=func_call, args=args, keywords=kwargs)
return node


def _replace_func(code_or_tree, func_call, arg_to_append, remove_self=None):
assert isinstance(func_call, str)
assert isinstance(arg_to_append, (list, tuple, set))

if isinstance(code_or_tree, str):
tree = ast.parse(code_or_tree)
elif isinstance(code_or_tree, ast.Module):
tree = code_or_tree
else:
raise ValueError

transformer = FuncTransformer(func_name=func_call,
arg_to_append=arg_to_append,
remove_self=remove_self)
new_tree = transformer.visit(tree)
return new_tree


def _analyze_cls_func(host, code, show_code, code_scope, cls_name=None, **jit_setting):
"""

Parameters
----------
host : Base
The data host.
code : str
The function source code.
cls_name : optional, str
The class name, like "self", "cls".
show_code : bool


Returns
-------

"""
arguments, arg2call, nodes = set(), dict(), Collector()

# arguments
tree = ast.parse(code)
if cls_name is None:
cls_name = tree.body[0].args.args[0].arg
# data assigned by self.xx in line right
if cls_name not in profile.CLASS_KEYWORDS:
raise errors.CodeError(f'BrainPy only support class keyword '
f'{profile.CLASS_KEYWORDS}, but we got {cls_name}.')
tree.body[0].args.args.pop(0) # remove "self" etc. class argument
self_data = re.findall('\\b' + cls_name + '\\.[A-Za-z_][A-Za-z0-9_.]*\\b', code)
self_data = list(set(self_data))

# analyze variables and functions accessed by the self.xx
data_to_replace = {}
for key in self_data:
split_keys = key.split('.')
if len(split_keys) < 2:
raise errors.BrainPyError

# get target and data
target = host
for i in range(1, len(split_keys)):
next_target = getattr(target, split_keys[i])
if not isinstance(next_target, Base):
break
target = next_target
else:
raise errors.BrainPyError
data = getattr(target, split_keys[i])

key = '.'.join(split_keys[:i + 1])

# analyze data
if isinstance(data, math.Variable):
arguments.add(f'{target.name}_{split_keys[i]}')
arg2call[f'{target.name}_{split_keys[i]}'] = f'{target.name}.{split_keys[-1]}.value'
nodes[target.name] = target
data_to_replace[key] = f'{target.name}_{split_keys[i]}' # replace the data
elif isinstance(data, np.random.RandomState):
data_to_replace[key] = f'{target.name}_{split_keys[i]}' # replace the data
code_scope[f'{target.name}_{split_keys[i]}'] = np.random # replace RandomState
elif callable(data):
assert len(split_keys) == i + 1
r = analyze_func(f=data, show_code=show_code, **jit_setting)
if len(r):
tree = _replace_func(tree, func_call=key, arg_to_append=r['arguments'])
arguments.update(r['arguments'])
arg2call.update(r['arg2call'])
nodes.update(r['nodes'])
code_scope[f'{target.name}_{split_keys[i]}'] = r['func']
data_to_replace[key] = f'{target.name}_{split_keys[i]}' # replace the data
else:
code_scope[f'{target.name}_{split_keys[i]}'] = data
data_to_replace[key] = f'{target.name}_{split_keys[i]}' # replace the data

# final code
tree.body[0].decorator_list.clear()
tree.body[0].args.args.extend([ast.Name(id=a) for a in sorted(arguments)])
tree.body[0].args.defaults.extend([ast.Constant(None) for _ in sorted(arguments)])
code = tools.ast2code(tree)
code = tools.word_replace(code, data_to_replace, exclude_dot=False)

return code, arguments, arg2call, nodes, code_scope


def _add_try_except(code):
splits = re.compile(r'\)\s*?:').split(code)
if len(splits) == 1:
@@ -456,3 +783,157 @@ def _items2lines(items, num_each_line=5, separator=', ', line_break='\n\t\t'):
for item in items[i: i + num_each_line]:
res += item + separator
return res


def _form_final_call(f_org, f_rep, arg2call, arguments, nodes, show_code=False, name=None):
cls_kw, reduce_args, org_args = _get_args(f_org)

name = (name or f_org.__name__)
code_scope = {key: node for key, node in nodes.items()}
code_scope[name] = f_rep
called_args = _items2lines(reduce_args + [f"{a}={arg2call[a]}" for a in sorted(arguments)]).strip()
code_lines = [f'def new_{name}({", ".join(org_args)}):',
f' {name}({called_args.strip()})']

# compile new function
code = '\n'.join(code_lines)
# code, _scope = _add_try_except(code)
# code_scope.update(_scope)
if show_code:
show_compiled_codes(code, code_scope)
exec(compile(code, '', 'exec'), code_scope)
func = code_scope[f'new_{name}']
return func


def _get_args(f):
# 1. get the function arguments
original_args = []
reduced_args = []

for name, par in inspect.signature(f).parameters.items():
if par.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD:
reduced_args.append(par.name)
elif par.kind is inspect.Parameter.VAR_POSITIONAL:
reduced_args.append(par.name)
elif par.kind is inspect.Parameter.KEYWORD_ONLY:
reduced_args.append(par.name)
elif par.kind is inspect.Parameter.POSITIONAL_ONLY:
raise errors.DiffEqError('Don not support positional only parameters, e.g., /')
elif par.kind is inspect.Parameter.VAR_KEYWORD:
raise errors.DiffEqError(f'Don not support dict of keyword arguments: {str(par)}')
else:
raise errors.DiffEqError(f'Unknown argument type: {par.kind}')

original_args.append(str(par))

# 2. analyze the function arguments
# 2.1 class keywords
class_kw = []
if original_args[0] in profile.CLASS_KEYWORDS:
class_kw.append(original_args[0])
original_args = original_args[1:]
reduced_args = reduced_args[1:]
for a in original_args:
if a.split('=')[0].strip() in profile.CLASS_KEYWORDS:
raise errors.DiffEqError(f'Class keywords "{a}" must be defined '
f'as the first argument.')
return class_kw, reduced_args, original_args


def show_compiled_codes(code, scope):
print('The recompiled function:')
print('-------------------------')
print(code)
print()
print('The namespace of the above function:')
pprint(scope)
print()


def _find_all_forloop(code_or_tree):
"""Find all for-loops in the code.

>>> code = '''
>>> for ch in self._update_channels:
>>> ch.update(_t, _dt)
>>> for ch in self._output_channels:
>>> self.input += ch.update(_t, _dt)
>>> '''
>>> _find_all_forloop(code)
{'self._output_channels': ('ch',
<_ast.Module object at 0x00000155BD23B730>,
'self.input += ch.update(_t, _dt)\n'),
'self._update_channels': ('ch',
<_ast.Module object at 0x00000155B699AD90>,
'ch.update(_t, _dt)\n')}

>>> code = '''
>>> self.pre_spike.push(self.pre.spike)
>>> pre_spike = self.pre_spike.pull()
>>>
>>> self.g[:] = self.integral(self.g, _t, dt=_dt)
>>> for pre_id in range(self.pre.num):
>>> if pre_spike[pre_id]:
>>> start, end = self.pre_slice[pre_id]
>>> for post_id in self.post_ids[start: end]:
>>> self.g[post_id] += self.g_max
>>>
>>> self.post.input[:] += self.output_current(self.g)
>>> '''
>>> _find_all_forloop(code)
{'range(self.pre.num)': ('pre_id',
<_ast.Module object at 0x000001D0AB120D60>,
'if pre_spike[pre_id]:\n'
' start, end = self.pre_slice[pre_id]\n'
' for post_id in self.post_ids[start:end]:\n'
' self.g[post_id] += self.g_max\n'),
'self.post_ids[start:end]': ('post_id',
<_ast.Module object at 0x000001D0AB11B460>,
'self.g[post_id] += self.g_max\n')}

Parameters
----------
code_or_tree: str, ast.Module

Returns
-------
res : dict
with <iter, (target, body, code)>
"""

# code or tree
if isinstance(code_or_tree, str):
code_or_tree = ast.parse(code_or_tree)
elif isinstance(code_or_tree, ast.Module):
code_or_tree = code_or_tree
else:
raise ValueError

# finder
finder = FindAllForLoop()
finder.visit(code_or_tree)

# dictionary results
res = dict()
for iter_, target, body, body_str in zip(finder.for_iter, finder.for_target,
finder.for_body, finder.for_body_str):
res[iter_] = (target, body, body_str)
return res


class FindAllForLoop(ast.NodeVisitor):
def __init__(self):
self.for_iter = []
self.for_target = []
self.for_body = []
self.for_body_str = []

def visit_For(self, node):
self.for_target.append(tools.ast2code(ast.fix_missing_locations(node.target)))
self.for_iter.append(tools.ast2code(ast.fix_missing_locations(node.iter)))
self.for_body.append(ast.Module(body=[deepcopy(r) for r in node.body]))
codes = tuple(tools.ast2code(ast.fix_missing_locations(r)) for r in node.body)
self.for_body_str.append('\n'.join(codes))

self.generic_visit(node)

+ 129
- 39
brainpy/math/numpy/compilation.py View File

@@ -11,8 +11,6 @@ except ModuleNotFoundError:
ast2numba = None
numba = None

DE_INT = None
DynamicSystem = None

__all__ = [
'jit',
@@ -23,52 +21,128 @@ __all__ = [
logger = logging.getLogger('brainpy.math.numpy.compilation')


def jit(obj_or_func, nopython=True, fastmath=True, parallel=False, nogil=False, show_code=False, **kwargs):
def jit(obj_or_fun, nopython=True, fastmath=True, parallel=False, nogil=False,
forceobj=False, looplift=True, error_model='python', inline='never',
boundscheck=None, show_code=False, **kwargs):
"""Just-In-Time (JIT) Compilation in NumPy backend.

JIT compilation in NumPy backend relies on `Numba <http://numba.pydata.org/>`_. However,
in BrainPy, `bp.math.numpy.jit()` can apply to class objects, especially the instance
of :py:class:`brainpy.DynamicalSystem`.

If you are using JAX backend, please refer to the JIT compilation in
JAX backend `bp.math.jax.jit() <brainpy.math.jax.jit.rst>`_.

Parameters
----------
obj_or_fun : callable, Base
The function or the base model to jit compile.

nopython : bool
Set to True to disable the use of PyObjects and Python API
calls. Default value is True.

fastmath : bool
In certain classes of applications strict IEEE 754 compliance
is less important. As a result it is possible to relax some
numerical rigour with view of gaining additional performance.
The way to achieve this behaviour in Numba is through the use
of the ``fastmath`` keyword argument.

parallel : bool
Enables automatic parallelization (and related optimizations) for
those operations in the function known to have parallel semantics.

nogil : bool
Whenever Numba optimizes Python code to native code that only
works on native types and variables (rather than Python objects),
it is not necessary anymore to hold Python’s global interpreter
lock (GIL). Numba will release the GIL when entering such a
compiled function if you passed ``nogil=True``.

forceobj: bool
Set to True to force the use of PyObjects for every value.
Default value is False.

looplift: bool
Set to True to enable jitting loops in nopython mode while
leaving surrounding code in object mode. This allows functions
to allocate NumPy arrays and use Python objects, while the
tight loops in the function can still be compiled in nopython
mode. Any arrays that the tight loop uses should be created
before the loop is entered. Default value is True.

error_model: str
The error-model affects divide-by-zero behavior.
Valid values are 'python' and 'numpy'. The 'python' model
raises exception. The 'numpy' model sets the result to
*+/-inf* or *nan*. Default value is 'python'.

inline: str or callable
The inline option will determine whether a function is inlined
at into its caller if called. String options are 'never'
(default) which will never inline, and 'always', which will
always inline. If a callable is provided it will be called with
the call expression node that is requesting inlining, the
caller's IR and callee's IR as arguments, it is expected to
return Truthy as to whether to inline.
NOTE: This inlining is performed at the Numba IR level and is in
no way related to LLVM inlining.

boundscheck: bool or None
Set to True to enable bounds checking for array indices. Out
of bounds accesses will raise IndexError. The default is to
not do bounds checking. If False, bounds checking is disabled,
out of bounds accesses can produce garbage results or segfaults.
However, enabling bounds checking will slow down typical
functions, so it is recommended to only use this flag for
debugging. You can also set the NUMBA_BOUNDSCHECK environment
variable to 0 or 1 to globally override this flag. The default
value is None, which under normal execution equates to False,
but if debug is set to True then bounds checking will be
enabled.

show_code : bool
Debugging.

kwargs

Returns
-------
res : callable, Base
The jitted objects.
"""

# checking
if ast2numba is None or numba is None:
raise errors.PackageMissingError('JIT compilation in numpy backend need Numba. '
'Please install numba via: \n\n'
'Please install numba via: \n'
'>>> pip install numba\n'
'>>> # or \n'
'>>> conda install numba')
global DE_INT, DynamicSystem
if DE_INT is None:
from brainpy.integrators.constants import DE_INT
if DynamicSystem is None:
from brainpy.simulation.brainobjects.base import DynamicSystem

# JIT compilation
if callable(obj_or_func):
# integrator
if hasattr(obj_or_func, '__name__') and obj_or_func.__name__.startswith(DE_INT):
return ast2numba.jit_integrator(obj_or_func,
nopython=nopython,
fastmath=fastmath,
parallel=parallel,
nogil=nogil,
show_code=show_code)
else:
# native function
return numba.jit(obj_or_func,
nopython=nopython,
fastmath=fastmath,
parallel=parallel,
nogil=nogil)

else:
# dynamic system
if not isinstance(obj_or_func, DynamicSystem):
raise errors.UnsupportedError(f'JIT compilation in numpy backend only supports '
f'{DynamicSystem.__name__}, but we got {type(obj_or_func)}.')
return ast2numba.jit_cls(obj_or_func,
nopython=nopython,
fastmath=fastmath,
parallel=parallel,
nogil=nogil,
show_code=show_code)
return ast2numba.jit(obj_or_fun, show_code=show_code,
nopython=nopython, fastmath=fastmath, parallel=parallel, nogil=nogil,
forceobj=forceobj, looplift=looplift, error_model=error_model,
inline=inline, boundscheck=boundscheck)


def vmap(obj_or_func, *args, **kwargs):
"""Vectorization Compilation in NumPy backend.

Vectorization compilation is not implemented in NumPy backend.
Please refer to the vectorization compilation in JAX backend
`bp.math.jax.vmap() <brainpy.math.jax.vmap.rst>`_.

Parameters
----------
obj_or_func
args
kwargs

Returns
-------

"""
_msg = 'Vectorize compilation is only supported in JAX backend, not available in numpy backend. \n' \
'You can switch to JAX backend by `brainpy.math.use_backend("jax")`'
logger.error(_msg)
@@ -76,6 +150,22 @@ def vmap(obj_or_func, *args, **kwargs):


def pmap(obj_or_func, *args, **kwargs):
"""Parallel Compilation in NumPy backend.

Parallel compilation is not implemented in NumPy backend.
Please refer to the parallel compilation in JAX backend
`bp.math.jax.pmap() <brainpy.math.jax.pmap.rst>`_.

Parameters
----------
obj_or_func
args
kwargs

Returns
-------

"""
_msg = 'Parallel compilation is only supported in JAX backend, not available in numpy backend. \n' \
'You can switch to JAX backend by `brainpy.math.use_backend("jax")`'
logger.error(_msg)


+ 9
- 0
brainpy/math/numpy/ndarray.py View File

@@ -15,6 +15,9 @@ ndarray = np.ndarray


class Variable(np.ndarray):
"""Variable.

"""
def __new__(cls, value, type='', replicate=None):
value2 = np.asarray(value)
obj = value2.view(cls)
@@ -40,6 +43,9 @@ class Variable(np.ndarray):


class TrainVar(Variable):
"""Trainable Variable.

"""
__slots__ = ()

def __new__(cls, value, replicate=None):
@@ -47,6 +53,9 @@ class TrainVar(Variable):


class Parameter(Variable):
"""Parameter.

"""
__slots__ = ()

def __new__(cls, value, replicate=None):


+ 18
- 0
brainpy/math/numpy/ops.py View File

@@ -383,18 +383,36 @@ complex128 = numpy.complex128


def set_int_(int_type):
"""Set the default ``int`` type.

Parameters
----------
int_type : type
"""
global int_
assert isinstance(int_type, type)
int_ = int_type


def set_float_(float_type):
"""Set the default ``float`` type.

Parameters
----------
float_type : type
"""
global float_
assert isinstance(float_type, type)
float_ = float_type


def set_complex_(complex_type):
"""Set the default ``complex`` type.

Parameters
----------
complex_type : type
"""
global complex_
assert isinstance(complex_type, type)
complex_ = complex_type


+ 0
- 2
brainpy/simulation/__init__.py View File

@@ -9,7 +9,5 @@ This module provides APIs for brain simulations.

from .brainobjects import *
from .connectivity import *
from .inputs import *
from .measure import *
from .monitor import *
from .utils import *

+ 0
- 3
brainpy/simulation/brainobjects/__init__.py View File

@@ -1,11 +1,8 @@
# -*- coding: utf-8 -*-

from .base import *
from .channel import *
from .delays import *
from .dendrite import *
from .molecular import *
from .network import *
from .neuron import *
from .soma import *
from .synapse import *

+ 27
- 26
brainpy/simulation/brainobjects/base.py View File

@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-

import numpy as np

from brainpy import math, errors
from brainpy.base import collector
from brainpy.base.base import Base
@@ -9,7 +7,7 @@ from brainpy.simulation import utils
from brainpy.simulation.monitor import Monitor

__all__ = [
'DynamicSystem',
'DynamicalSystem',
'Container',
]

@@ -19,8 +17,8 @@ _error_msg = 'Unknown model type: {type}. ' \
'tuple of function names.'


class DynamicSystem(Base):
"""Base Dynamic System class.
class DynamicalSystem(Base):
"""Base Dynamical System class.

Any object has step functions will be a dynamical system.
That is to say, in BrainPy, the essence of the dynamical system
@@ -37,12 +35,13 @@ class DynamicSystem(Base):
"""
target_backend = None

def __init__(self, steps=(), monitors=None, name=None):
super(DynamicSystem, self).__init__(name=name)
def __init__(self, steps=None, monitors=None, name=None):
super(DynamicalSystem, self).__init__(name=name)

# step functions
if steps is None:
steps = ('update', )
self.steps = collector.Collector()
steps = tuple() if steps is None else steps
if isinstance(steps, tuple):
for step in steps:
if isinstance(step, str):
@@ -85,12 +84,20 @@ class DynamicSystem(Base):
raise errors.BrainPyError(f'Unknown setting of "target_backend": {self.target_backend}')

# runner and run function
self.driver = None
self._input_step = lambda _t, _dt: None
self._monitor_step = lambda _t, _dt: None

def update(self, _t, _dt):
raise NotImplementedError
"""The function to specify the updating rule.

Parameters
----------
_t : float
The current time.
_dt : float
The time step.
"""
raise NotImplementedError('Must implement "update" function by user self.')

def _step_run(self, _t, _dt):
self._monitor_step(_t, _dt)
@@ -104,7 +111,7 @@ class DynamicSystem(Base):
Parameters
----------
inputs : list, tuple
The inputs for this instance of DynamicSystem. It should the format
The inputs for this instance of DynamicalSystem. It should the format
of `[(target, value, [type, operation])]`, where `target` is the
input target, `value` is the input value, `type` is the input type
(such as "fix" or "iter"), `operation` is the operation for inputs
@@ -164,7 +171,7 @@ class DynamicSystem(Base):
# --
# 6.1 add 'ts' variable to every monitor
# 6.2 wrap the monitor iterm with the 'list' type into the 'ndarray' type
for node in [self] + list(self.nodes().unique().values()):
for node in self.nodes().unique().values():
if node.mon.num_item > 0:
node.mon.ts = times
for key, val in list(node.mon.item_contents.items()):
@@ -174,16 +181,12 @@ class DynamicSystem(Base):
return running_time


class Container(DynamicSystem):
class Container(DynamicalSystem):
"""Container object which is designed to add other instances of DynamicalSystem.

What's different from the other objects of DynamicSystem is that Container has
one more useful function :py:func:`add`. It can be used to add the children
objects.

Parameters
----------
steps : function, list of function, tuple of function, dict of (str, function), optional
steps : tuple of function, tuple of str, dict of (str, function), optional
The step functions.
monitors : tuple, list, Monitor, optional
The monitor object.
@@ -192,23 +195,23 @@ class Container(DynamicSystem):
show_code : bool
Whether show the formatted code.
ds_dict : dict of (str, )
The instance of DynamicSystem with the format of "key=dynamic_system".
The instance of DynamicalSystem with the format of "key=dynamic_system".
"""

def __init__(self, *ds_tuple, steps=None, monitors=None, name=None, **ds_dict):
# children dynamical systems
self.child_ds = dict()
for ds in ds_tuple:
if not isinstance(ds, DynamicSystem):
if not isinstance(ds, DynamicalSystem):
raise errors.BrainPyError(f'{self.__class__.__name__} receives instances of '
f'DynamicSystem, however, we got {type(ds)}.')
f'DynamicalSystem, however, we got {type(ds)}.')
if ds.name in self.child_ds:
raise ValueError(f'{ds.name} has been paired with {ds}. Please change a unique name.')
self.child_ds[ds.name] = ds
for key, ds in ds_dict.items():
if not isinstance(ds, DynamicSystem):
if not isinstance(ds, DynamicalSystem):
raise errors.BrainPyError(f'{self.__class__.__name__} receives instances of '
f'DynamicSystem, however, we got {type(ds)}.')
f'DynamicalSystem, however, we got {type(ds)}.')
if key in self.child_ds:
raise ValueError(f'{key} has been paired with {ds}. Please change a unique name.')
self.child_ds[key] = ds
@@ -234,7 +237,7 @@ class Container(DynamicSystem):

def vars(self, method='absolute'):
"""Collect all the variables (and their names) contained
in the list and its children instance of DynamicSystem.
in the list and its children instance of DynamicalSystem.

Parameters
----------
@@ -263,5 +266,3 @@ class Container(DynamicSystem):
return children_ds[item]
else:
return super(Container, self).__getattribute__(item)



+ 0
- 27
brainpy/simulation/brainobjects/channel.py View File

@@ -1,27 +0,0 @@
# -*- coding: utf-8 -*-

from brainpy.simulation.brainobjects.base import DynamicSystem

__all__ = [
'Channel'
]


class Channel(DynamicSystem):
"""Ion Channel object.

Parameters
----------
name : str
The name of the channel.

"""

def __init__(self, name=None, **kwargs):
super(Channel, self).__init__(name=name, **kwargs)

def update(self, *args, **kwargs):
raise NotImplementedError

def current(self, *args, **kwargs):
raise NotImplementedError

+ 32
- 17
brainpy/simulation/brainobjects/delays.py View File

@@ -4,7 +4,7 @@ import math as pmath

from brainpy import errors
from brainpy import math as bmath
from brainpy.simulation.brainobjects.base import DynamicSystem
from brainpy.simulation.brainobjects.base import DynamicalSystem
from brainpy.simulation.utils import size2len

__all__ = [
@@ -13,7 +13,10 @@ __all__ = [
]


class Delay(DynamicSystem):
class Delay(DynamicalSystem):
"""Base class to model delay variables.

"""
def __init__(self, steps=('update',), name=None):
super(Delay, self).__init__(steps=steps, monitors=None, name=name)

@@ -22,15 +25,15 @@ class Delay(DynamicSystem):


class ConstantDelay(Delay):
"""Constant delay object.
"""Class used to model constant delay variables.

For examples:

>>> ConstantDelay(size=10, delay=10.)
>>> import brainpy as bp
>>>
>>> import numpy as np
>>> ConstantDelay(size=100, delay=lambda: np.random.randint(5, 10))
>>> ConstantDelay(size=100, delay=np.random.random(100) * 4 + 10)
>>> bp.ConstantDelay(size=10, delay=10.)
>>> bp.ConstantDelay(size=100, delay=lambda: bp.math.random.randint(5, 10))
>>> bp.ConstantDelay(size=100, delay= bp.math.random.random(100) * 4 + 10)

Parameters
----------
@@ -57,9 +60,9 @@ class ConstantDelay(Delay):
# data and operations
if isinstance(delay, (int, float)): # uniform delay
self.uniform_delay = True
self.num_step = bmath.array([int(pmath.ceil(delay / bmath.get_dt())) + 1])
self.data = bmath.Variable(bmath.zeros((self.num_step[0],) + self.size, dtype=dtype))
self.out_idx = bmath.Variable(bmath.array([0]))
self.num_step = int(pmath.ceil(delay / bmath.get_dt())) + 1
self.data = bmath.Variable(bmath.zeros((self.num_step,) + self.size, dtype=dtype))
self.out_idx = bmath.Variable(0)
self.in_idx = bmath.Variable(self.num_step - 1)

self.push = self._push_for_uniform_delay
@@ -95,22 +98,34 @@ class ConstantDelay(Delay):
super(ConstantDelay, self).__init__(name=name)

def _pull_for_uniform_delay(self):
return self.data[self.out_idx[0]]
"""Pull delayed data for variables with the uniform delay.
"""
return self.data[self.out_idx]

def _pull_for_nonuniform_delay(self):
"""Pull delayed data for variables with the non-uniform delay.
"""
return self.data[self.out_idx, self.diag]

def _push_for_uniform_delay(self, value):
self.data[self.in_idx[0]] = value
"""Push the latest data to the delay bottom.
"""
self.data[self.in_idx] = value

def _push_for_nonuniform_delay(self, value):
"""Push the latest data to the delay bottom.
"""
self.data[self.in_idx, self.diag] = value

def update(self, _t, _dt):
self.in_idx[:] = (self.in_idx + 1) % self.num_step
self.out_idx[:] = (self.out_idx + 1) % self.num_step
"""Update the delay index.
"""
self.in_idx[...] = (self.in_idx + 1) % self.num_step
self.out_idx[...] = (self.out_idx + 1) % self.num_step

def reset(self):
self.data[:] = 0
self.in_idx[:] = self.num_step - 1
self.out_idx[:] = 0 if self.uniform_delay else bmath.zeros(self.num, dtype=bmath.int_)
"""Reset the variables.
"""
self.data[...] = 0
self.in_idx[...] = self.num_step - 1
self.out_idx[...] = 0

+ 0
- 16
brainpy/simulation/brainobjects/dendrite.py View File

@@ -1,16 +0,0 @@
# -*- coding: utf-8 -*-

from brainpy.simulation.brainobjects.base import DynamicSystem

__all__ = [
'Dendrite'
]


class Dendrite(DynamicSystem):
"""Dendrite object.

"""

def __init__(self, name, **kwargs):
super(Dendrite, self).__init__(name=name, **kwargs)

+ 3
- 3
brainpy/simulation/brainobjects/molecular.py View File

@@ -1,14 +1,14 @@
# -*- coding: utf-8 -*-

from brainpy.simulation.brainobjects.base import DynamicSystem
from brainpy.simulation.brainobjects.base import DynamicalSystem

__all__ = [
'Molecular'
]


class Molecular(DynamicSystem):
"""Molecular object for neuron modeling.
class Molecular(DynamicalSystem):
"""Base class to model molecular objects.

"""



+ 1
- 1
brainpy/simulation/brainobjects/network.py View File

@@ -8,7 +8,7 @@ __all__ = [


class Network(Container):
"""Network object, an alias of Container.
"""Base class to model network objects, an alias of Container.

Network instantiates a network, which is aimed to load
neurons, synapses, and other brain objects.


+ 125
- 17
brainpy/simulation/brainobjects/neuron.py View File

@@ -1,24 +1,28 @@
# -*- coding: utf-8 -*-

from brainpy import errors
from brainpy import errors, math
from brainpy.base.collector import Collector
from brainpy.simulation import utils
from brainpy.simulation.brainobjects.base import DynamicSystem, Container
from brainpy.simulation.brainobjects.channel import Channel
from brainpy.integrators.wrapper import odeint
from brainpy.simulation.brainobjects.base import DynamicalSystem

__all__ = [
'NeuGroup',
'Channel',
'CondNeuGroup',
'Soma',
'Dendrite',
]


class NeuGroup(DynamicSystem):
"""Neuron Group.
class NeuGroup(DynamicalSystem):
"""Base class to model neuronal groups.

Parameters
----------
size : int, tuple of int, list of int
The neuron group geometry.
steps : function, list of function, tuple of function, dict of (str, function), optional
steps : tuple of str, tuple of function, dict of (str, function), optional
The step functions.
name : str, optional
The group name.
@@ -26,7 +30,6 @@ class NeuGroup(DynamicSystem):

def __init__(self, size, name=None, steps=('update',), **kwargs):
# size
# ----
if isinstance(size, (list, tuple)):
if len(size) <= 0:
raise errors.BrainPyError('size must be int, or a tuple/list of int.')
@@ -41,22 +44,127 @@ class NeuGroup(DynamicSystem):
self.num = utils.size2len(size)

# initialize
# ----------
super(NeuGroup, self).__init__(steps=steps, name=name, **kwargs)

def update(self, _t, _dt):
raise NotImplementedError
"""The function to specify the updating rule.

Parameters
----------
_t : float
The current time.
_dt : float
The time step.
"""
raise NotImplementedError(f'Subclass of {self.__class__.__name__} must '
f'implement "update" function.')

class CondNeuGroup(Container):
def __init__(self, *channels, monitors=None, name=None, **dict_channels):

children_channels = dict()
# ----------------------------------------------------
#
# Conductance Neuron Model
#
# ----------------------------------------------------

for ch in channels:
assert isinstance(ch, Channel)
class Channel(DynamicalSystem):
"""Base class to model ion channels."""

for key, ch in dict_channels.items():
assert isinstance(ch, Channel)
def __init__(self, **kwargs):
super(Channel, self).__init__(**kwargs)

super(CondNeuGroup, self).__init__(monitors=monitors, name=name, **children_channels)
def init(self, host):
"""Initialize variables in the channel."""
if not isinstance(host, CondNeuGroup):
raise ValueError
self.host = host

def update(self, _t, _dt):
"""The function to specify the updating rule."""
raise NotImplementedError(f'Subclass of {self.__class__.__name__} must '
f'implement "update" function.')


class CondNeuGroup(NeuGroup):
"""Conductance neuron group."""
def __init__(self, C=1., A=1e-3, Vth=0., **channels):
self.C = C
self.A = A
self.Vth = Vth

# children channels
self.child_channels = Collector()
for key, ch in channels.items():
if not isinstance(ch, Channel):
raise errors.BrainPyError(f'{self.__class__.__name__} only receives {Channel.__name__} '
f'instance, while we got {type(ch)}: {ch}.')
self.child_channels[key] = ch

def init(self, size, monitors=None, name=None):
super(CondNeuGroup, self).__init__(size, steps=('update',), monitors=monitors, name=name)

# initialize variables
self.V = math.Variable(math.zeros(self.num, dtype=math.float_))
self.spike = math.Variable(math.zeros(self.num, dtype=math.bool_))
self.input = math.Variable(math.zeros(self.num, dtype=math.float_))

# initialize node variables
for ch in self.child_channels.values():
ch.init(host=self)

# checking
self._output_channels = []
self._update_channels = []
for ch in self.child_channels.values():
if not hasattr(ch, 'I'):
self._update_channels.append(ch)
else:
if not isinstance(getattr(ch, 'I'), math.Variable):
raise errors.BrainPyError
self._output_channels.append(ch)

return self

def update(self, _t, _dt):
for ch in self._update_channels:
ch.update(_t, _dt)
for ch in self._output_channels:
ch.update(_t, _dt)
self.input += ch.I

# update variables
V = self.V + self.input / self.C * _dt
# V = self.V + self.input / self.C / self.A * _dt
self.spike[:] = math.logical_and(V >= self.Vth, self.V < self.Vth)
self.V[:] = V
self.input[:] = 0.

def __getattr__(self, item):
child_channels = super(CondNeuGroup, self).__getattribute__('child_channels')
if item in child_channels:
return child_channels[item]
else:
return super(CondNeuGroup, self).__getattribute__(item)


# ---------------------------------------------------------
#
# Multi-Compartment Neuron Model
#
# ---------------------------------------------------------

class Dendrite(DynamicalSystem):
"""Base class to model dendrites.

"""

def __init__(self, name, **kwargs):
super(Dendrite, self).__init__(name=name, **kwargs)


class Soma(DynamicalSystem):
"""Base class to model soma in multi-compartment neuron models.

"""

def __init__(self, name, **kwargs):
super(Soma, self).__init__(name=name, **kwargs)

+ 0
- 16
brainpy/simulation/brainobjects/soma.py View File

@@ -1,16 +0,0 @@
# -*- coding: utf-8 -*-

from brainpy.simulation.brainobjects.base import DynamicSystem

__all__ = [
'Soma'
]


class Soma(DynamicSystem):
"""Soma object for neuron modeling.

"""

def __init__(self, name, **kwargs):
super(Soma, self).__init__(name=name, **kwargs)

+ 3
- 3
brainpy/simulation/brainobjects/synapse.py View File

@@ -2,7 +2,7 @@

from brainpy import errors, math
from brainpy.simulation.connectivity import TwoEndConnector, MatConn, IJConn
from brainpy.simulation.brainobjects.base import DynamicSystem
from brainpy.simulation.brainobjects.base import DynamicalSystem
from brainpy.simulation.brainobjects.delays import ConstantDelay
from brainpy.simulation.brainobjects.neuron import NeuGroup

@@ -11,8 +11,8 @@ __all__ = [
]


class TwoEndConn(DynamicSystem):
"""Two End Synaptic Connections.
class TwoEndConn(DynamicalSystem):
"""Base class to model two-end synaptic connections.

Parameters
----------


+ 0
- 16
brainpy/simulation/connectivity/classic_conn.py View File

@@ -1,16 +0,0 @@
# -*- coding: utf-8 -*-


from brainpy.simulation.connectivity.base import TwoEndConnector

try:
import numba as nb
except ModuleNotFoundError:
nb = None

__all__ = [
]


class CompleteGraph(TwoEndConnector):
pass

+ 4
- 2
brainpy/simulation/connectivity/custom_conn.py View File

@@ -13,6 +13,7 @@ __all__ = [


class MatConn(TwoEndConnector):
"""Connector built from the connection matrix."""
def __init__(self, conn_mat):
super(MatConn, self).__init__()

@@ -25,6 +26,7 @@ class MatConn(TwoEndConnector):


class IJConn(TwoEndConnector):
"""Connector built from the ``pre_ids`` and ``post_ids`` connections."""
def __init__(self, i, j):
super(IJConn, self).__init__()

@@ -37,10 +39,10 @@ class IJConn(TwoEndConnector):
self.post_ids = math.asarray(j, dtype=math.int_)

def __call__(self, pre_size, post_size):
# this is ncessary when create "pre2post" ,
# this is necessary when create "pre2post" ,
# "pre2syn" etc. structures
self.num_pre = utils.size2len(pre_size)
# this is ncessary when create "post2pre" ,
# this is necessary when create "post2pre" ,
# "post2syn" etc. structures
self.num_post = utils.size2len(post_size)
return self

+ 1
- 32
brainpy/simulation/monitor.py View File

@@ -97,25 +97,6 @@ class Monitor(object):
self.num_item = len(variables)
super(Monitor, self).__init__()

def check(self, mon_key):
"""Check whether the key has defined in the target.

Parameters
----------
mon_key : str
The string to specify the target item.
"""
if mon_key in self._KEYWORDS:
raise ValueError(f'"{mon_key}" is a keyword in Monitor class. '
f'Please change to another name.')
data = self.target
for s in mon_key.split('.'):
try:
data = getattr(data, s)
except AttributeError:
raise errors.BrainPyError(f"Item \"{mon_key}\" isn't defined in model "
f"{self.target}, so it can not be monitored.")

def build(self):
if not self.has_build:
item_names = []
@@ -140,7 +121,7 @@ class Monitor(object):
else:
raise errors.BrainPyError(f'Unknown monitor item: {str(mon_var)}')

self.check(mon_key)
# self.check(mon_key)
item_names.append(mon_key)
item_indices.append(mon_idx)
item_contents[mon_key] = []
@@ -171,18 +152,6 @@ class Monitor(object):
self.num_item = len(item_contents)
self.has_build = True

@staticmethod
def check_mon_idx(mon_idx):
if isinstance(mon_idx, int):
mon_idx = math.array([mon_idx])
else:
mon_idx = math.array(mon_idx)
if len(math.shape(mon_idx)) != 1:
raise errors.BrainPyError(f'Monitor item index only supports '
f'an int or a one-dimensional vector, '
f'not {str(mon_idx)}')
return mon_idx

def __getitem__(self, item: str):
"""Get item in the monitor values.



+ 7
- 2
brainpy/simulation/utils.py View File

@@ -12,6 +12,9 @@ __all__ = [
'check_duration',
'run_model',
'check_and_format_inputs',
'build_input_func',
'check_and_format_monitors',
'build_monitor_func',
]

SUPPORTED_INPUT_OPS = ['-', '+', '*', '/', '=']
@@ -115,7 +118,7 @@ def check_and_format_inputs(host, inputs):

Parameters
----------
host : DynamicSystem
host : DynamicalSystem
The host which contains all data.
inputs : tuple, list
The inputs of the population.
@@ -158,6 +161,7 @@ def check_and_format_inputs(host, inputs):

# absolute access
nodes = host.nodes(method='absolute')
nodes[host.name] = host
for one_input in inputs:
key = one_input[0]
if not isinstance(key, str):
@@ -285,7 +289,7 @@ def check_and_format_monitors(host):

# reshape monitors
# ----
all_nodes = [host] + list(host.nodes().unique().values())
all_nodes = list(host.nodes().unique().values())
for node in all_nodes:
node.mon.build() # build the monitor
for key in node.mon.item_contents.keys():
@@ -359,6 +363,7 @@ def build_monitor_func(monitors, show_code=False):

for node, key, target, variable, idx, interval in monitors:
code_scope[node.name] = node
code_scope[target.name] = target

# get data
data = target


+ 18
- 3
changelog.rst View File

@@ -17,6 +17,7 @@ Highlights of core changes:
- support numba ``jit`` on class objects
- unified numpy-like API


``base`` module
~~~~~~~~~~~~~~~

@@ -24,11 +25,13 @@ Highlights of core changes:
- ``Function`` to wrap functions
- ``Collector`` and ``ArrayCollector`` to collect variables, integrators, nodes and others


``integrators`` module
~~~~~~~~~~~~~~~~~~~~~~

- detailed documentation for ODE numerical methods


``simulation`` module
~~~~~~~~~~~~~~~~~~~~~

@@ -36,6 +39,13 @@ Highlights of core changes:
- support multi-scale modeling
- support large-scale modeling
- support simulation on GPUs
- fix bugs on ``firing_rate()``
- remove ``_i`` in ``update()`` function, replace ``_i`` with ``_dt``,
meaning the dynamic system has the canonic equation form
of :math:`dx/dt = f(x, t, dt)`
- reimplement the ``input_step`` and ``monitor_step`` in a more intuitive way
- support to set `dt` in the single object level (i.e., single instance of DynamicSystem)


``dnn`` module
~~~~~~~~~~~~~~
@@ -47,10 +57,15 @@ Highlights of core changes:
- initializations
- common used layers

``measure`` module
~~~~~~~~~~~~~~~~~~

- fix bugs on ``firing_rate()``
documentation
~~~~~~~~~~~~~

- documentation for ``base`` module
- documentation for ``math`` module
- documentation for ``integrators`` module
- documentation for ``dnn`` module






+ 0
- 60
develop/benchmark/COBA/COBA-ANNarchy.py View File

@@ -1,60 +0,0 @@
from ANNarchy import *

setup(dt=0.05)

NE = 3200 # Number of excitatory cells
NI = 800 # Number of inhibitory cells
duration = 5.0 * 1000.0 # Total time of the simulation

COBA = Neuron(
parameters="""
El = -60.0 : population
Vr = -60.0 : population
Erev_exc = 0.0 : population
Erev_inh = -80.0 : population
Vt = -50.0 : population
tau = 20.0 : population
tau_exc = 5.0 : population
tau_inh = 10.0 : population
I = 20.0 : population
""",
equations="""
tau * dv/dt = (El - v) + g_exc * (Erev_exc - v) + g_inh * (Erev_inh - v ) + I
tau_exc * dg_exc/dt = - g_exc
tau_inh * dg_inh/dt = - g_inh
""",
spike="""
v > Vt
""",
reset="""
v = Vr
""",
refractory=5.0
)

P = Population(geometry=NE + NI, neuron=COBA)
Pe = P[:NE]
Pi = P[NE:]
P.v = Normal(-55.0, 5.0)

we = 6. / 10. # excitatory synaptic weight (voltage)
wi = 67. / 10. # inhibitory synaptic weight

Ce = Projection(pre=Pe, post=P, target='exc')
Ce.connect_fixed_probability(weights=we, probability=0.02)
Ci = Projection(pre=Pi, post=P, target='inh')
Ci.connect_fixed_probability(weights=wi, probability=0.02)

compile()

m = Monitor(P, 'spike')
simulate(duration)

t, n = m.raster_plot()
print('Number of spikes:', len(t))

from pylab import *
plot(t, n, '.')
xlabel('Time (ms)')
ylabel('Neuron index')
show()

+ 0
- 69
develop/benchmark/COBA/COBA-Nest.py View File

@@ -1,69 +0,0 @@
import time

import numpy
from nest import *

numpy.random.seed(98765)
SetKernelStatus({"resolution": 0.1})
# nb_threads = 4
# SetKernelStatus({"local_num_threads": int(nb_threads)})

simtime = 5 * 1000.0 # [ms] Simulation time
NE = 3200 # number of exc. neurons
NI = 800 # number of inh. neurons

SetDefaults("iaf_cond_exp", {
"C_m": 200.,
"g_L": 10.,
"tau_syn_ex": 5.,
"tau_syn_in": 10.,
"E_ex": 0.,
"E_in": -80.,
"t_ref": 5.,
"E_L": -60.,
"V_th": -50.,
"I_e": 200.,
"V_reset": -60.,
"V_m": -60.
})

nodes_ex = Create("iaf_cond_exp", NE)
nodes_in = Create("iaf_cond_exp", NI)
nodes = nodes_ex + nodes_in

# Initialize the membrane potentials
v = -55.0 + 5.0 * numpy.random.normal(size=NE + NI)
for i, node in enumerate(nodes):
SetStatus([node], {"V_m": v[i]})

# Create the synapses
w_exc = 6.
w_inh = -67.
SetDefaults("static_synapse", {"delay": 0.1})
CopyModel("static_synapse", "excitatory", {"weight": w_exc})
CopyModel("static_synapse", "inhibitory", {"weight": w_inh})



Connect(nodes_ex, nodes,{'rule': 'pairwise_bernoulli', 'p': 0.02}, syn_spec="excitatory")
Connect(nodes_in, nodes,{'rule': 'pairwise_bernoulli', 'p': 0.02}, syn_spec="inhibitory")

# Spike detectors
SetDefaults("spike_detector", {"withtime": True,
"withgid": True,
"to_file": False})
espikes = Create("spike_detector")
ispikes = Create("spike_detector")
Connect(nodes_ex, espikes, 'all_to_all')
Connect(nodes_in, ispikes, 'all_to_all')

tstart = time.time()
Simulate(simtime)
print('Done in', time.time() - tstart)

events_ex = GetStatus(espikes, "n_events")[0]
events_in = GetStatus(ispikes, "n_events")[0]
print('Total spikes:', events_ex + events_in)

nest.raster_plot.from_device(espikes, hist=True)
nest.raster_plot.show()

+ 0
- 355
develop/benchmark/COBA/COBA.py View File

@@ -1,355 +0,0 @@
# -*- coding: utf-8 -*-

from ANNarchy import *
from brian2 import *
from nest import *
import brainpy as bp

import os
import json
import time
import numpy as np
import matplotlib.pyplot as plt

dt = 0.05
setup(dt=dt)


def run_brianpy(num_neu, duration, device='cpu'):
bp.backend.set('numba', dt=dt)

# Parameters
num_inh = int(num_neu / 5)
num_exc = num_neu - num_inh
taum = 20
taue = 5
taui = 10
Vt = -50
Vr = -60
El = -60
Erev_exc = 0.
Erev_inh = -80.
I = 20.
we = 0.6 # excitatory synaptic weight (voltage)
wi = 6.7 # inhibitory synaptic weight
ref = 5.0

class LIF(bp.NeuGroup):
target_backend = ['numpy', 'numba']

def __init__(self, size, **kwargs):
super(LIF, self).__init__(size=size, **kwargs)
# variables
self.V = bp.ops.zeros(size)
self.spike = bp.ops.zeros(size)
self.ge = bp.ops.zeros(size)
self.gi = bp.ops.zeros(size)
self.input = bp.ops.zeros(size)
self.t_last_spike = bp.ops.ones(size) * -1e7

@staticmethod
@bp.odeint
def int_g(ge, gi, t):
dge = - ge / taue
dgi = - gi / taui
return dge, dgi

@staticmethod
@bp.odeint
def int_V(V, t, ge, gi):
dV = (ge * (Erev_exc - V) + gi * (Erev_inh - V) + El - V + I) / taum
return dV

def update(self, _t, _i):
self.ge, self.gi = self.int_g(self.ge, self.gi, _t)
for i in range(self.size[0]):
self.spike[i] = 0.
if (_t - self.t_last_spike[i]) > ref:
V = self.int_V(self.V[i], _t, self.ge[i], self.gi[i])
if V >= Vt:
self.V[i] = Vr
self.spike[i] = 1.
self.t_last_spike[i] = _t
else:
self.V[i] = V
self.input[i] = I

class ExcSyn(bp.TwoEndConn):
target_backend = ['numpy', 'numba']

def __init__(self, pre, post, conn, **kwargs):
self.conn = conn(pre.size, post.size)
self.pre2post = self.conn.requires('pre2post')
super(ExcSyn, self).__init__(pre=pre, post=post, **kwargs)

def update(self, _t, _i):
for pre_id, spike in enumerate(self.pre.spike):
if spike > 0:
for post_i in self.pre2post[pre_id]:
self.post.ge[post_i] += we

class InhSyn(bp.TwoEndConn):
target_backend = ['numpy', 'numba']

def __init__(self, pre, post, conn, **kwargs):
self.conn = conn(pre.size, post.size)
self.pre2post = self.conn.requires('pre2post')
super(InhSyn, self).__init__(pre=pre, post=post, **kwargs)

def update(self, _t, _i):
for pre_id, spike in enumerate(self.pre.spike):
if spike > 0:
for post_i in self.pre2post[pre_id]:
self.post.gi[post_i] += wi

E_group = LIF(num_exc, monitors=['spike'])
E_group.V = np.random.randn(num_exc) * 5. - 55.
I_group = LIF(num_inh, monitors=['spike'])
I_group.V = np.random.randn(num_inh) * 5. - 55.
E2E = ExcSyn(pre=E_group, post=E_group, conn=bp.connect.FixedProb(0.02))
E2I = ExcSyn(pre=E_group, post=I_group, conn=bp.connect.FixedProb(0.02))
I2E = InhSyn(pre=I_group, post=E_group, conn=bp.connect.FixedProb(0.02))
I2I = InhSyn(pre=I_group, post=I_group, conn=bp.connect.FixedProb(0.02))

net = bp.Network(E_group, I_group, E2E, E2I, I2E, I2I)

t0 = time.time()
net.run(duration)
t = time.time() - t0
print(f'BrainPy ({device}) used time {t} s.')
return t


def run_annarchy(num_neu, duration, device='cpu'):
NI = int(num_neu / 5)
NE = num_neu - NI

clear()

COBA = Neuron(
parameters="""
El = -60.0 : population
Vr = -60.0 : population
Erev_exc = 0.0 : population
Erev_inh = -80.0 : population
Vt = -50.0 : population
tau = 20.0 : population
tau_exc = 5.0 : population
tau_inh = 10.0 : population
I = 20.0 : population
""",
equations="""
tau * dv/dt = (El - v) + g_exc * (Erev_exc - v) + g_inh * (Erev_inh - v ) + I
tau_exc * dg_exc/dt = - g_exc
tau_inh * dg_inh/dt = - g_inh
""",
spike="""
v > Vt
""",
reset="""
v = Vr
""",
refractory=5.0
)

# ###########################################
# Population
# ###########################################
P = Population(geometry=NE + NI, neuron=COBA)
Pe = P[:NE]
Pi = P[NE:]
P.v = Normal(-55.0, 5.0)

# ###########################################
# Projections
# ###########################################
we = 6. / 10. # excitatory synaptic weight (voltage)
wi = 67. / 10. # inhibitory synaptic weight

Ce = Projection(pre=Pe, post=P, target='exc')
Ci = Projection(pre=Pi, post=P, target='inh')
Ce.connect_fixed_probability(weights=we, probability=0.02)
Ci.connect_fixed_probability(weights=wi, probability=0.02)

t0 = time.time()
compile()
simulate(duration)
t = time.time() - t0
print(f'ANNarchy ({device}) used time {t} s.')
return t


def run_brian2(num_neu, duration):
num_inh = int(num_neu / 5)
num_exc = num_neu - num_inh

start_scope()
device.reinit()
device.activate()

defaultclock.dt = dt * ms
set_device('cpp_standalone', directory='brian2_COBA')
# device.build()
# prefs.codegen.target = "cython"

taum = 20 * ms
taue = 5 * ms
taui = 10 * ms
Vt = -50 * mV
Vr = -60 * mV
El = -60 * mV
Erev_exc = 0. * mV
Erev_inh = -80. * mV
I = 20. * mvolt

eqs = '''
dv/dt = (ge*(Erev_exc-v)+gi*(Erev_inh-v)-(v-El) + I)*(1./taum) : volt (unless refractory)
dge/dt = -ge/taue : 1
dgi/dt = -gi/taui : 1
'''
net = Network()

# ###########################################
# Population
# ###########################################
P = NeuronGroup(num_exc + num_inh,
model=eqs,
threshold='v>Vt', reset='v = Vr',
refractory=5 * ms, method='euler')
net.add(P)

# ###########################################
# Projections
# ###########################################

we = 0.6 # excitatory synaptic weight (voltage)
wi = 6.7 # inhibitory synaptic weight
Ce = Synapses(P[:num_exc], P, on_pre='ge += we')
Ci = Synapses(P[num_exc:], P, on_pre='gi += wi')
net.add(Ce, Ci)

P.v = (np.random.randn(num_exc + num_inh) * 5. - 55.) * mvolt
Ce.connect(p=0.02)
Ci.connect(p=0.02)

t1 = time.time()
net.run(duration * ms)
t = time.time() - t1
print(f'Brian2 used {t} s')
return t


def run_pynest(num_neu, duration):
NI = int(num_neu / 5)
NE = num_neu - NI

ResetKernel()
SetKernelStatus({"resolution": dt})
# nb_threads = 4
# SetKernelStatus({"local_num_threads": int(nb_threads)})

SetDefaults("iaf_cond_exp", {
"C_m": 200.,
"g_L": 10.,
"tau_syn_ex": 5.,
"tau_syn_in": 10.,
"E_ex": 0.,
"E_in": -80.,
"t_ref": 5.,
"E_L": -60.,
"V_th": -50.,
"I_e": 200.,
"V_reset": -60.,
"V_m": -60.
})

# ###########################################
# Population
# ###########################################
nodes_ex = Create("iaf_cond_exp", NE)
nodes_in = Create("iaf_cond_exp", NI)
nodes = nodes_ex + nodes_in

# Initialize the membrane potentials
v = -55.0 + 5.0 * np.random.normal(size=NE + NI)
for i, node in enumerate(nodes):
SetStatus([node], {"V_m": v[i]})

# Create the synapses
w_exc = 6.
w_inh = -67.
SetDefaults("static_synapse", {"delay": 0.1})
CopyModel("static_synapse", "excitatory", {"weight": w_exc})
CopyModel("static_synapse", "inhibitory", {"weight": w_inh})

conn_dict = {'rule': 'pairwise_bernoulli', 'p': 0.02}
Connect(nodes_ex, nodes, conn_dict, syn_spec="excitatory")
Connect(nodes_in, nodes, conn_dict, syn_spec="inhibitory")

# Spike detectors
SetDefaults("spike_detector", {"withtime": True,
"withgid": True,
"to_file": False})
espikes = Create("spike_detector")
ispikes = Create("spike_detector")
Connect(nodes_ex, espikes, 'all_to_all')
Connect(nodes_in, ispikes, 'all_to_all')

t0 = time.time()
Simulate(duration)
t = time.time() - t0
print(f'PyNest used {t} s')
return t


def main(num_neurons, duration=1000, fn_output=None):
final_results = {'setting': dict(num_neurons=num_neurons,
duration=duration,
dt=dt),
"BRIAN2": [],
"PyNEST": [],
"ANNarchy_cpu": [],
'BrainPy_cpu': []}

for num_neu in num_neurons:
print(f"Running benchmark with {num_neu} neurons.")

if num_neu > 2500:
final_results['PyNEST'].append(np.nan)
else:
t = run_pynest(num_neu, duration)
final_results['PyNEST'].append(t)

t = run_brianpy(num_neu, duration, device='cpu')
final_results['BrainPy_cpu'].append(t)

t = run_annarchy(num_neu, duration, device='cpu')
final_results['ANNarchy_cpu'].append(t)

t = run_brian2(num_neu, duration)
final_results['BRIAN2'].append(t)

if fn_output is not None:
if not os.path.exists(os.path.dirname(fn_output)):
os.makedirs(os.path.dirname(fn_output))
with open(fn_output, 'w') as fout:
json.dump(final_results, fout, indent=2)

plt.plot(num_neurons, final_results["BRIAN2"], label="BRIAN2", linestyle="--", color="r")
plt.plot(num_neurons, final_results["PyNEST"], label="PyNEST", linestyle="--", color="y")
plt.plot(num_neurons, final_results["ANNarchy_cpu"], label="ANNarchy", linestyle="--", color="m")
plt.plot(num_neurons, final_results["BrainPy_cpu"], label="BrainPy", linestyle="--", color="g")

plt.title("Benchmark comparison of neural simulators")
plt.xlabel("Number of input / output neurons")
plt.ylabel("Simulation time (seconds)")
plt.legend(loc=1, prop={"size": 5})
xticks = [num_neurons[0], num_neurons[len(num_neurons) // 2], num_neurons[-1]]
plt.xticks(xticks)
plt.yscale("log")
plt.legend()
plt.show()


if __name__ == "__main__":
main(list(range(500, 9001, 500)), 5000, 'results/COBA.json')

+ 0
- 116
develop/benchmark/COBA/COBA_brainpy.py View File

@@ -1,116 +0,0 @@
# -*- coding: utf-8 -*-


import time
import numpy as np
import brainpy as bp

np.random.seed(1234)
dt = 0.05
bp.backend.set('numba', dt=dt)

# Parameters
num_exc = 3200 * 10
num_inh = 800 * 1
taum = 20
taue = 5
taui = 10
Vt = -50
Vr = -60
El = -60
Erev_exc = 0.
Erev_inh = -80.
I = 20.
we = 0.6 # excitatory synaptic weight (voltage)
wi = 6.7 # inhibitory synaptic weight
ref = 5.0


class LIF(bp.NeuGroup):
target_backend = ['numpy', 'numba']

def __init__(self, size, **kwargs):
# variables
self.V = bp.ops.zeros(size)
self.spike = bp.ops.zeros(size)
self.ge = bp.ops.zeros(size)
self.gi = bp.ops.zeros(size)
self.input = bp.ops.zeros(size)
self.t_last_spike = bp.ops.ones(size) * -1e7

super(LIF, self).__init__(size=size, **kwargs)

@staticmethod
@bp.odeint
def int_g(ge, gi, t):
dge = - ge / taue
dgi = - gi / taui
return dge, dgi

@staticmethod
@bp.odeint
def int_V(V, t, ge, gi):
dV = (ge * (Erev_exc - V) + gi * (Erev_inh - V) + El - V + I) / taum
return dV

def update(self, _t, _i):
self.ge, self.gi = self.int_g(self.ge, self.gi, _t)
for i in range(self.size[0]):
self.spike[i] = 0.
if (_t - self.t_last_spike[i]) > ref:
V = self.int_V(self.V[i], _t, self.ge[i], self.gi[i])
if V >= Vt:
self.V[i] = Vr
self.spike[i] = 1.
self.t_last_spike[i] = _t
else:
self.V[i] = V
self.input[i] = I


class ExcSyn(bp.TwoEndConn):
target_backend = ['numpy', 'numba']

def __init__(self, pre, post, conn, **kwargs):
self.conn = conn(pre.size, post.size)
self.pre2post = self.conn.requires('pre2post')
super(ExcSyn, self).__init__(pre=pre, post=post, **kwargs)

def update(self, _t, _i):
for pre_id, spike in enumerate(self.pre.spike):
if spike > 0:
for post_i in self.pre2post[pre_id]:
self.post.ge[post_i] += we


class InhSyn(bp.TwoEndConn):
target_backend = ['numpy', 'numba']

def __init__(self, pre, post, conn, **kwargs):
self.conn = conn(pre.size, post.size)
self.pre2post = self.conn.requires('pre2post')
super(InhSyn, self).__init__(pre=pre, post=post, **kwargs)

def update(self, _t, _i):
for pre_id, spike in enumerate(self.pre.spike):
if spike > 0:
for post_i in self.pre2post[pre_id]:
self.post.gi[post_i] += wi


E_group = LIF(num_exc, monitors=['spike'])
E_group.V = np.random.randn(num_exc) * 5. - 55.
I_group = LIF(num_inh, monitors=['spike'])
I_group.V = np.random.randn(num_inh) * 5. - 55.
E2E = ExcSyn(pre=E_group, post=E_group, conn=bp.connect.FixedProb(0.02))
E2I = ExcSyn(pre=E_group, post=I_group, conn=bp.connect.FixedProb(0.02))
I2E = InhSyn(pre=I_group, post=E_group, conn=bp.connect.FixedProb(0.02))
I2I = InhSyn(pre=I_group, post=I_group, conn=bp.connect.FixedProb(0.02))

net = bp.Network(E_group, I_group, E2E, E2I, I2E, I2I)
t0 = time.time()

net.run(5000., report=True)
print('Used time {} s.'.format(time.time() - t0))

bp.visualize.raster_plot(net.ts, E_group.mon.spike, show=True)

+ 0
- 52
develop/benchmark/COBA/COBA_brian2.py View File

@@ -1,52 +0,0 @@
from brian2 import *

defaultclock.dt = 0.05 * ms
set_device('cpp_standalone', directory='brian2_COBA')
# prefs.codegen.target = "cython"

taum = 20 * ms
taue = 5 * ms
taui = 10 * ms
Vt = -50 * mV
Vr = -60 * mV
El = -60 * mV
Erev_exc = 0. * mV
Erev_inh = -80. * mV
I = 20. * mvolt
num_exc = 3200
num_inh = 800

eqs = '''
dv/dt = (ge*(Erev_exc-v)+gi*(Erev_inh-v)-(v-El) + I)*(1./taum) : volt (unless refractory)
dge/dt = -ge/taue : 1
dgi/dt = -gi/taui : 1
'''
net = Network()

P = NeuronGroup(num_exc + num_inh, eqs, threshold='v>Vt', reset='v = Vr',
refractory=5 * ms, method='euler')


we = 0.6 # excitatory synaptic weight (voltage)
wi = 6.7 # inhibitory synaptic weight
Ce = Synapses(P[:3200], P, on_pre='ge += we')
Ci = Synapses(P[3200:], P, on_pre='gi += wi')


P.v = (np.random.randn(num_exc + num_inh) * 5. - 55.) * mvolt
Ce.connect(p=0.02)
Ci.connect(p=0.02)

s_mon = SpikeMonitor(P)


# Run for 0 second in order to measure compilation time
t1 = time.time()
run(5. * second, report='text')
t2 = time.time()
print('Done in', t2 - t1)

plot(s_mon.t / ms, s_mon.i, '.k')
xlabel('Time (ms)')
ylabel('Neuron index')
show()

+ 0
- 275
develop/benchmark/COBA/pynn.py View File

@@ -1,275 +0,0 @@
# coding: utf-8
"""
Balanced network of excitatory and inhibitory neurons.

An implementation of benchmarks 1 and 2 from

Brette et al. (2007) Journal of Computational Neuroscience 23: 349-398

The network is based on the CUBA and COBA models of Vogels & Abbott
(J. Neurosci, 2005). The model consists of a network of excitatory and
inhibitory neurons, connected via current-based "exponential"
synapses (instantaneous rise, exponential decay).


Usage: python VAbenchmarks.py [-h] [--plot-figure] [--use-views] [--use-assembly]
[--use-csa] [--debug DEBUG]
simulator benchmark

positional arguments:
simulator neuron, nest, brian or another backend simulator
benchmark either CUBA or COBA

optional arguments:
-h, --help show this help message and exit
--plot-figure plot the simulation results to a file
--use-views use population views in creating the network
--use-assembly use assemblies in creating the network
--use-csa use the Connection Set Algebra to define the connectivity
--debug DEBUG print debugging information


Andrew Davison, UNIC, CNRS
August 2006

"""

import socket
from math import *
from pyNN.utility import get_simulator, Timer, ProgressBar, init_logging, normalized_filename
from pyNN.random import NumpyRNG, RandomDistribution


# === Configure the simulator ================================================

sim, options = get_simulator(
("benchmark", "either CUBA or COBA"),
("--plot-figure", "plot the simulation results to a file", {"action": "store_true"}),
("--use-views", "use population views in creating the network", {"action": "store_true"}),
("--use-assembly", "use assemblies in creating the network", {"action": "store_true"}),
("--use-csa", "use the Connection Set Algebra to define the connectivity", {"action": "store_true"}),
("--debug", "print debugging information"))

if options.use_csa:
import csa

if options.debug:
init_logging(None, debug=True)

timer = Timer()

# === Define parameters ========================================================

threads = 1
rngseed = 98765
parallel_safe = True

n = 4000 # number of cells
r_ei = 4.0 # number of excitatory cells:number of inhibitory cells
pconn = 0.02 # connection probability
stim_dur = 50. # (ms) duration of random stimulation
rate = 100. # (Hz) frequency of the random stimulation

dt = 0.1 # (ms) simulation timestep
tstop = 1000 # (ms) simulaton duration
delay = 0.2

# Cell parameters
area = 20000. # (µm²)
tau_m = 20. # (ms)
cm = 1. # (µF/cm²)
g_leak = 5e-5 # (S/cm²)
if options.benchmark == "COBA":
E_leak = -60. # (mV)
elif options.benchmark == "CUBA":
E_leak = -49. # (mV)
v_thresh = -50. # (mV)
v_reset = -60. # (mV)
t_refrac = 5. # (ms) (clamped at v_reset)
v_mean = -60. # (mV) 'mean' membrane potential, for calculating CUBA weights
tau_exc = 5. # (ms)
tau_inh = 10. # (ms)

# Synapse parameters
if options.benchmark == "COBA":
Gexc = 4. # (nS)
Ginh = 51. # (nS)
elif options.benchmark == "CUBA":
Gexc = 0.27 # (nS) #Those weights should be similar to the COBA weights
Ginh = 4.5 # (nS) # but the delpolarising drift should be taken into account
Erev_exc = 0. # (mV)
Erev_inh = -80. # (mV)

### what is the synaptic delay???

# === Calculate derived parameters =============================================

area = area*1e-8 # convert to cm²
cm = cm*area*1000 # convert to nF
Rm = 1e-6/(g_leak*area) # membrane resistance in MΩ
assert tau_m == cm*Rm # just to check
n_exc = int(round((n*r_ei/(1+r_ei)))) # number of excitatory cells
n_inh = n - n_exc # number of inhibitory cells
if options.benchmark == "COBA":
celltype = sim.IF_cond_exp
w_exc = Gexc*1e-3 # We convert conductances to uS
w_inh = Ginh*1e-3
elif options.benchmark == "CUBA":
celltype = sim.IF_curr_exp
w_exc = 1e-3*Gexc*(Erev_exc - v_mean) # (nA) weight of excitatory synapses
w_inh = 1e-3*Ginh*(Erev_inh - v_mean) # (nA)
assert w_exc > 0; assert w_inh < 0

# === Build the network ========================================================

extra = {'threads' : threads,
'filename': "va_%s.xml" % options.benchmark,
'label': 'VA'}
if options.simulator == "neuroml":
extra["file"] = "VAbenchmarks.xml"

node_id = sim.setup(timestep=dt, min_delay=delay, max_delay=1.0, **extra)
np = sim.num_processes()

host_name = socket.gethostname()
print("Host #%d is on %s" % (node_id + 1, host_name))

print("%s Initialising the simulator with %d thread(s)..." % (node_id, extra['threads']))

cell_params = {
'tau_m' : tau_m, 'tau_syn_E' : tau_exc, 'tau_syn_I' : tau_inh,
'v_rest' : E_leak, 'v_reset' : v_reset, 'v_thresh' : v_thresh,
'cm' : cm, 'tau_refrac' : t_refrac}

if (options.benchmark == "COBA"):
cell_params['e_rev_E'] = Erev_exc
cell_params['e_rev_I'] = Erev_inh

timer.start()

print("%s Creating cell populations..." % node_id)
if options.use_views:
# create a single population of neurons, and then use population views to define
# excitatory and inhibitory sub-populations
all_cells = sim.Population(n_exc + n_inh, celltype(**cell_params), label="All Cells")
exc_cells = all_cells[:n_exc]
exc_cells.label = "Excitatory cells"
inh_cells = all_cells[n_exc:]
inh_cells.label = "Inhibitory cells"
else:
# create separate populations for excitatory and inhibitory neurons
exc_cells = sim.Population(n_exc, celltype(**cell_params), label="Excitatory_Cells")
inh_cells = sim.Population(n_inh, celltype(**cell_params), label="Inhibitory_Cells")
if options.use_assembly:
# group the populations into an assembly
all_cells = exc_cells + inh_cells

if options.benchmark == "COBA":
ext_stim = sim.Population(20, sim.SpikeSourcePoisson(rate=rate, duration=stim_dur), label="expoisson")
rconn = 0.01
ext_conn = sim.FixedProbabilityConnector(rconn)
ext_syn = sim.StaticSynapse(weight=0.1)

print("%s Initialising membrane potential to random values..." % node_id)
rng = NumpyRNG(seed=rngseed, parallel_safe=parallel_safe)
uniformDistr = RandomDistribution('uniform', low=v_reset, high=v_thresh, rng=rng)
if options.use_views:
all_cells.initialize(v=uniformDistr)
else:
exc_cells.initialize(v=uniformDistr)
inh_cells.initialize(v=uniformDistr)

print("%s Connecting populations..." % node_id)
progress_bar = ProgressBar(width=20)
if options.use_csa:
connector = sim.CSAConnector(csa.cset(csa.random(pconn)))
else:
connector = sim.FixedProbabilityConnector(pconn, rng=rng, callback=progress_bar)
exc_syn = sim.StaticSynapse(weight=w_exc, delay=delay)
inh_syn = sim.StaticSynapse(weight=w_inh, delay=delay)

connections = {}
if options.use_views or options.use_assembly:
connections['exc'] = sim.Projection(exc_cells, all_cells, connector, exc_syn, receptor_type='excitatory')
connections['inh'] = sim.Projection(inh_cells, all_cells, connector, inh_syn, receptor_type='inhibitory')
if (options.benchmark == "COBA"):
connections['ext'] = sim.Projection(ext_stim, all_cells, ext_conn, ext_syn, receptor_type='excitatory')
else:
connections['e2e'] = sim.Projection(exc_cells, exc_cells, connector, exc_syn, receptor_type='excitatory')
connections['e2i'] = sim.Projection(exc_cells, inh_cells, connector, exc_syn, receptor_type='excitatory')
connections['i2e'] = sim.Projection(inh_cells, exc_cells, connector, inh_syn, receptor_type='inhibitory')
connections['i2i'] = sim.Projection(inh_cells, inh_cells, connector, inh_syn, receptor_type='inhibitory')
if (options.benchmark == "COBA"):
connections['ext2e'] = sim.Projection(ext_stim, exc_cells, ext_conn, ext_syn, receptor_type='excitatory')
connections['ext2i'] = sim.Projection(ext_stim, inh_cells, ext_conn, ext_syn, receptor_type='excitatory')

# === Setup recording ==========================================================
print("%s Setting up recording..." % node_id)
if options.use_views or options.use_assembly:
all_cells.record('spikes')
exc_cells[[0, 1]].record('v')
else:
exc_cells.record('spikes')
inh_cells.record('spikes')
exc_cells[0, 1].record('v')

buildCPUTime = timer.diff()

# === Save connections to file =================================================

#for prj in connections.keys():
#connections[prj].saveConnections('Results/VAbenchmark_%s_%s_%s_np%d.conn' % (benchmark, prj, options.simulator, np))
saveCPUTime = timer.diff()

# === Run simulation ===========================================================

print("%d Running simulation..." % node_id)

sim.run(tstop)

simCPUTime = timer.diff()

E_count = exc_cells.mean_spike_count()
I_count = inh_cells.mean_spike_count()

# === Print results to file ====================================================

print("%d Writing data to file..." % node_id)

filename = normalized_filename("Results", "VAbenchmarks_%s_exc" % options.benchmark, "pkl",
options.simulator, np)
exc_cells.write_data(filename,
annotations={'script_name': __file__})
inh_cells.write_data(filename.replace("exc", "inh"),
annotations={'script_name': __file__})

writeCPUTime = timer.diff()

if options.use_views or options.use_assembly:
connections = "%d e→e,i %d i→e,i" % (connections['exc'].size(),
connections['inh'].size())
else:
connections = u"%d e→e %d e→i %d i→e %d i→i" % (connections['e2e'].size(),
connections['e2i'].size(),
connections['i2e'].size(),
connections['i2i'].size())

if node_id == 0:
print("\n--- Vogels-Abbott Network Simulation ---")
print("Nodes : %d" % np)
print("Simulation type : %s" % options.benchmark)
print("Number of Neurons : %d" % n)
print("Number of Synapses : %s" % connections)
print("Excitatory conductance : %g nS" % Gexc)
print("Inhibitory conductance : %g nS" % Ginh)
print("Excitatory rate : %g Hz" % (E_count * 1000.0 / tstop,))
print("Inhibitory rate : %g Hz" % (I_count * 1000.0 / tstop,))
print("Build time : %g s" % buildCPUTime)
#print("Save connections time : %g s" % saveCPUTime)
print("Simulation time : %g s" % simCPUTime)
print("Writing time : %g s" % writeCPUTime)


# === Finished with simulator ==================================================

sim.end()

+ 0
- 149
develop/benchmark/COBAHH/COBAHH_brainpy.py View File

@@ -1,149 +0,0 @@
# -*- coding: utf-8 -*-

import time

import numpy as np
import brainpy as bp

bp.profile.set(jit=True, dt=0.1, numerical_method='exponential')

duration = 10 * 1000. # ms
num_exc = 3200
num_inh = 800
Cm = 200 # Membrane Capacitance [pF]

gl = 10. # Leak Conductance [nS]
El = -60. # Resting Potential [mV]
g_Na = 20. * 1000
ENa = 50. # reversal potential (Sodium) [mV]
g_Kd = 6. * 1000 # K Conductance [nS]
EK = -90. # reversal potential (Potassium) [mV]
VT = -63.
Vt = -20.
# Time constants
taue = 5. # Excitatory synaptic time constant [ms]
taui = 10. # Inhibitory synaptic time constant [ms]
# Reversal potentials
Ee = 0. # Excitatory reversal potential (mV)
Ei = -80. # Inhibitory reversal potential (Potassium) [mV]
# excitatory synaptic weight
we = 6. # excitatory synaptic conductance [nS]
# inhibitory synaptic weight
wi = 67. # inhibitory synaptic conductance [nS]


class Hh(bp.NeuGroup):
def __init__(self, size, **kwargs):
super(Hh, self).__init__(size=size, **kwargs)
self.V = bp.ops.zeros(self.num)
self.m = bp.ops.zeros(self.num)
self.n = bp.ops.zeros(self.num)
self.h = bp.ops.zeros(self.num)
self.ge = bp.ops.zeros(self.num)
self.gi = bp.ops.zeros(self.num)
self.spike = bp.ops.zeros(self.num, dtype=bool)

def integral():
pass


@bp.integrate
def int_ge(ge, t):
return - ge / taue


@bp.integrate
def int_gi(gi, t):
return - gi / taui


@bp.integrate
def int_m(m, t, V):
a = 13 - V + VT
b = V - VT - 40
m_alpha = 0.32 * a / (np.exp(a / 4) - 1.)
m_beta = 0.28 * b / (np.exp(b / 5) - 1)
dmdt = (m_alpha * (1 - m) - m_beta * m)
return dmdt


@bp.integrate
def int_h(h, t, V):
h_alpha = 0.128 * np.exp((17 - V + VT) / 18)
h_beta = 4. / (1 + np.exp(-(V - VT - 40) / 5))
dhdt = (h_alpha * (1 - h) - h_beta * h)
return dhdt


@bp.integrate
def int_n(n, t, V):
c = 15 - V + VT
n_alpha = 0.032 * c / (np.exp(c / 5) - 1.)
n_beta = .5 * np.exp((10 - V + VT) / 40)
dndt = (n_alpha * (1 - n) - n_beta * n)
return dndt


@bp.integrate
def int_V(V, t, m, h, n, ge, gi):
g_na_ = g_Na * (m * m * m) * h
g_kd_ = g_Kd * (n * n * n * n)
dvdt = (gl * (El - V) + ge * (Ee - V) + gi * (Ei - V) -
g_na_ * (V - ENa) - g_kd_ * (V - EK)) / Cm
return dvdt


def neu_update(ST, _t):
ST['ge'] = int_ge(ST['ge'], _t)
ST['gi'] = int_gi(ST['gi'], _t)
ST['m'] = int_m(ST['m'], _t, ST['V'])
ST['h'] = int_h(ST['h'], _t, ST['V'])
ST['n'] = int_n(ST['n'], _t, ST['V'])
V = int_V(ST['V'], _t, ST['m'], ST['h'], ST['n'], ST['ge'], ST['gi'])
sp = np.logical_and(ST['V'] < Vt, V >= Vt)
ST['sp'] = sp
ST['V'] = V


neuron = bp.NeuType(name='CUBA-HH', ST=neu_ST, steps=neu_update, mode='vector')


def exc_update(pre, post, pre2post):
for pre_id in range(len(pre2post)):
if pre['sp'][pre_id] > 0.:
post_ids = pre2post[pre_id]
# post['ge'][post_ids] += we
for p_id in post_ids:
post['ge'][p_id] += we


def inh_update(pre, post, pre2post):
for pre_id in range(len(pre2post)):
if pre['sp'][pre_id] > 0.:
post_ids = pre2post[pre_id]
# post['gi'][post_ids] += wi
for p_id in post_ids:
post['gi'][p_id] += wi


exc_syn = bp.SynType('exc_syn', steps=exc_update, ST=bp.types.SynState())

inh_syn = bp.SynType('inh_syn', steps=inh_update, ST=bp.types.SynState())

group = bp.NeuGroup(neuron, size=num_exc + num_inh, monitors=['sp'])
group.ST['V'] = El + (np.random.randn(num_exc + num_inh) * 5 - 5)
group.ST['ge'] = (np.random.randn(num_exc + num_inh) * 1.5 + 4) * 10.
group.ST['gi'] = (np.random.randn(num_exc + num_inh) * 12 + 20) * 10.

exc_conn = bp.TwoEndConn(exc_syn, pre=group[:num_exc], post=group,
conn=bp.connect.FixedProb(prob=0.02))

inh_conn = bp.TwoEndConn(inh_syn, pre=group[num_exc:], post=group,
conn=bp.connect.FixedProb(prob=0.02))

net = bp.Network(group, exc_conn, inh_conn)
t0 = time.time()
net.run(duration, report=True)
print('Used time {} s.'.format(time.time() - t0))

bp.visualize.raster_plot(net.ts, group.mon.sp, show=True)

+ 0
- 85
develop/benchmark/COBAHH/COBAHH_brian2.py View File

@@ -1,85 +0,0 @@
from brian2 import *

set_device('cpp_standalone', directory='brian2_COBAHH')

defaultclock.dt = 0.1 * ms


duration = 10 * second
monitor = 'spike'
area = 0.02
Cm = 200
gl = 10.
g_na = 20 * 1000
g_kd = 6. * 1000

time_unit = 1 * ms
El = -60
EK = -90
ENa = 50
VT = -63
# Time constants
taue = 5 * ms
taui = 10 * ms
# Reversal potentials
Ee = 0
Ei = -80
# excitatory synaptic weight
we = 6
# inhibitory synaptic weight
wi = 67

# The model
eqs = Equations('''
dv/dt = (gl*(El-v) + ge*(Ee-v) + gi*(Ei-v)-
g_na*(m*m*m)*h*(v-ENa)-
g_kd*(n*n*n*n)*(v-EK))/Cm/time_unit : 1
dm/dt = (alpha_m*(1-m)-beta_m*m)/time_unit : 1
dn/dt = (alpha_n*(1-n)-beta_n*n)/time_unit : 1
dh/dt = (alpha_h*(1-h)-beta_h*h)/time_unit : 1
dge/dt = -ge/taue : 1
dgi/dt = -gi/taui : 1
alpha_m = 0.32*(13-v+VT)/(exp((13-v+VT)/4)-1.) : 1
beta_m = 0.28*(v-VT-40)/(exp((v-VT-40)/5)-1) : 1
alpha_h = 0.128*exp((17-v+VT)/18) : 1
beta_h = 4./(1+exp((40-v+VT)/5)) : 1
alpha_n = 0.032*(15-v+VT)/(exp((15-v+VT)/5)-1.) : 1
beta_n = .5*exp((10-v+VT)/40) : 1
''')

P = NeuronGroup(4000, model=eqs, threshold='v>-20', method='exponential_euler')
Pe = P[:3200]
Pi = P[3200:]
Ce = Synapses(Pe, P, on_pre='ge+=we')
Ci = Synapses(Pi, P, on_pre='gi+=wi')
Ce.connect(p=0.02)
Ci.connect(p=0.02)

# Initialization
P.v = 'El + (randn() * 5 - 5)'
P.g = '(randn() * 1.5 + 4) * 10.'
P.gi = '(randn() * 12 + 20) * 10.'

# monitor
if monitor == 'V':
trace = StateMonitor(P, 'v', record=True)
else:
s_mon = SpikeMonitor(P)

# Record a few traces
t0 = time.time()
run(duration, report='text')
print('{}. Used time {} s.'.format(prefs.codegen.target, time.time() - t0))

if monitor == 'V':
for i in [1, 10, 100]:
plot(trace.t / ms, trace.v[i], label='N-{}'.format(i))
xlabel('t (ms)')
ylabel('v (mV)')
legend()
show()
else:
plot(s_mon.t / ms, s_mon.i, ',k')
xlabel('Time (ms)')
ylabel('Neuron index')
show()

+ 0
- 174
develop/benchmark/COBAHH/COBAHH_nest.sli View File

@@ -1,174 +0,0 @@
%%% FUNCTION SECTION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% Take spike detector, find total number of spikes registered,
% return average rate per neuron in Hz.
% NOTE: If you are running with several MPI processes, this
% function only gives an approximation to the true rate.
%
% spike_det ComputeRate -> rate
/ComputeRate
{
GetStatus /n_events get /nspikes Set
% We need to guess how many neurons we record from.
% This assumes an even distribution of nodes across
% processes, which is ok for the Brette_et_al_2007
% benchmarks, but should not be considered a general
% solution.
Nrec cvd NumProcesses div
/nnrn Set

nspikes nnrn simtime mul div
1000 mul % convert from mHz to Hz, leave on stack
end
} bind % optional, improves performance
def


%%% CONSTRUCTION SECTION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

/BuildNetwork
{
% set global kernel parameters
0
<<
/resolution dt
/total_num_virtual_procs virtual_processes
/overwrite_files true
>> SetStatus

tic % start timer on construction

% Set initial parameters for all new neurons and devices

model model_params SetDefaults

(Creating excitatory population.) = % show message
/E_net model [ NE ] LayoutNetwork def

(Creating inhibitory population.) = % show message
/I_net model [ NI ] LayoutNetwork def

(Creating excitatory stimulus generator.) =
/E_stimulus stimulus Create def
E_stimulus stimulus_params SetStatus

% one detector would in principle be enough,
% but by recording the populations separately,
% we save ourselves a lot of sorting work later

(Creating excitatory spike detector.) =
/E_detector detector Create def
E_detector detector_params SetStatus

(Creating inhibitory spike detector.) =
/I_detector detector Create def
I_detector detector_params SetStatus

% some connecting functions need lists (arrays) over all
% neurons they should work on, so we need to extract these
% lists from the subnetworks

% obtain array with GIDs of all excitatory neurons
/E_neurons E_net GetLocalNodes def

% obtain array with GIDs of all inhibitory neurons
/I_neurons I_net GetLocalNodes def

% all neurons
/allNeurons E_neurons I_neurons join def

/N allNeurons length def

/CE NE epsilon mul iround def %number of incoming excitatory connections
/CI NI epsilon mul iround def %number of incomining inhibitory connections

% number of synapses---just so we know
/Nsyn
CE CI add % internal synapses
N mul
Nrec 2 mul % "synapses" to spike detectors
add
Nstim add % "synapses" from poisson generator
def

% Create custom synapse types with appropriate values for
% our excitatory and inhibitory connections
/static_synapse << /delay delay >> SetDefaults
/static_synapse /syn_ex E_synapse_params CopyModel
/static_synapse /syn_in I_synapse_params CopyModel

(Connecting excitatory population.) =

% E -> E connections
E_neurons % source population [we pick from this]
E_neurons % target neurons [for each we pick CE sources]
<< /rule /fixed_indegree /indegree CE >> % number of source neurons to pick
/syn_ex % synapse model
Connect

% I -> E connections
% as above, but on a single line
I_neurons E_neurons << /rule /fixed_indegree /indegree CI >> /syn_in Connect


(Connecting inhibitory population.) =

% ... as above, just written more compactly
% E -> I
E_neurons I_neurons << /rule /fixed_indegree /indegree CE >> /syn_ex Connect
% I -> I
I_neurons I_neurons << /rule /fixed_indegree /indegree CI >> /syn_in Connect

%Add external stimulus

(Connecting Poisson stimulus.) =
[E_stimulus]
E_neurons Nstim Take % pick the first Nstim neurons
/all_to_all
/syn_ex
Connect


% Spike detectors are connected to the first Nrec neurons in each
% population. Since neurons are equal and connectivity is homogeneously
% randomized, this is equivalent to picking Nrec neurons at random
% from each population

(Connecting spike detectors.) =

E_neurons Nrec Take % pick the first Nrec neurons
[E_detector] Connect

I_neurons Nrec Take % pick the first Nrec neurons
[I_detector] Connect

% read out time used for building

toc /BuildCPUTime Set
} def

/RunSimulation
{
BuildNetwork

% run, measure computer time with tic-toc
tic
(Simulating...) =
simtime Simulate
toc /SimCPUTime Set

% write a little report
(Simulation summary) =
(Number of Neurons : ) =only N =
(Number of Synapses: ) =only Nsyn =
(Excitatory rate : ) =only E_detector ComputeRate =only ( Hz) =
(Inhibitory rate : ) =only I_detector ComputeRate =only ( Hz) =
(Building time : ) =only BuildCPUTime =only ( s) =
(Simulation time : ) =only SimCPUTime =only ( s\n) =
} def

/parameters_set lookup {
RunSimulation
} {
(Parameters are not set. Please call one of coba.sli, cuba_ps.sli, cuba.sli, cuba_stdp.sli, or hh_coba.sli.) M_ERROR message
} ifelse

+ 0
- 52
develop/benchmark/COBAHH/COBAHH_neuron/COBAHH_neuron.hoc View File

@@ -1,52 +0,0 @@
// Main file for cobahh network (Hodgkin-Huxley model cells with COnductance BAsed synapses).

{load_file("nrngui.hoc")} // GUI and runtime libraries
{load_file("hhcell.hoc")} // defines CobaHHCell class

// Procedures that set up network architecture and performance reporting.
{load_file("init.hoc")}

// Called by create_cells() in net.hoc
obfunc newcell() {
return new CobaHHCell()
}

// Create the cells, then connect them.
create_net() // in net.hoc

STOPSTIM=10000 // duration of stimulation (ms)

// Randomized spike trains driving excitatory synapses.
create_stim(run_random_low_start_, AMPA_GMAX) // in netstim.hoc

// A few last items for performance reports, e.g. set up spike time recording, and,
// if in "demo" mode, create graph for raster plots, and panel with Stop button.
finish_setup() // in init.hoc

// Parallel run to tstop.
prun() // in perfrun.hoc

// Only the "master" cpu does this.
if (pc.id == 0) {print "RunTime: ", runtime}

// Up to this point, all CPUs have executed the same code,
// except for taking different branches depending on their value of pc.id,
// which ranges from 0 to pc.nhost-1.

// Gather performance statistics from each CPU.

// Only the master (pc.id == 0) returns from pc.runworker().
// All other CPUs ("workers") now wait for messages.
{pc.runworker()}

// Send requests to the workers and handle the results they send back.
collect_results() // in init.hoc

// Send all workers a QUIT message; those NEURON processes exit.
// The master waits until all worker output has been transferred to it.
{pc.done()}

// Only the master executes code beyond this point; all others have exited.

// Times of all spikes, and consolidated performance report.
output_results() // in perfrun.hoc

+ 0
- 89
develop/benchmark/COBAHH/COBAHH_neuron/hhcell.hoc View File

@@ -1,89 +0,0 @@
// Mostly constructed by cell builder. Lines marked with //* added manually
{load_file("hhchan.ses")} //*

//Network cell templates
// CobaHHCell

AMPA_INDEX = 0 //* synlist synapse indices
GABA_INDEX = 1 //*

begintemplate CobaHHCell
public is_art
public init, topol, basic_shape, subsets, geom, biophys, geom_nseg, biophys_inhomo
public synlist, x, y, z, position, connect2target

public soma
public all

objref synlist

proc init() {
topol()
subsets()
geom()
biophys()
geom_nseg()
synlist = new List()
synapses()
x = y = z = 0 // only change via position
}

create soma

proc topol() { local i
basic_shape()
}
proc basic_shape() {
soma {pt3dclear() pt3dadd(0, 0, 0, 1) pt3dadd(15, 0, 0, 1)}
}

objref all
proc subsets() { local i
objref all
all = new SectionList()
soma all.append()

}
proc geom() {
forsec all { /*area = 20000 */ L = diam = 79.7885 }
}
external lambda_f
proc geom_nseg() {
//* performance killer: soma area(.5) // make sure diam reflects 3d points
}
proc biophys() {
forsec all {
cm = 1
insert pas
g_pas = 5e-05
e_pas = -65
insert nahh
gmax_nahh = 0.1
insert khh
gmax_khh = 0.03
ena = 50 //*
ek = -90 //*
}
}
proc biophys_inhomo(){}
proc position() { local i
soma for i = 0, n3d()-1 {
pt3dchange(i, $1-x+x3d(i), $2-y+y3d(i), $3-z+z3d(i), diam3d(i))
}
x = $1 y = $2 z = $3
}
proc connect2target() { //$o1 target point process, $o2 returned NetCon
soma $o2 = new NetCon(&v(1), $o1)
$o2.threshold = -10 //*
}
objref syn_
proc synapses() {
/* E0 */ soma syn_ = new ExpSyn(0.5) synlist.append(syn_)
syn_.tau = 5
/* I1 */ soma syn_ = new ExpSyn(0.5) synlist.append(syn_)
syn_.tau = 10
syn_.e = -80
}
func is_art() { return 0 }

endtemplate CobaHHCell

+ 0
- 101
develop/benchmark/COBAHH/COBAHH_neuron/hhchan.ses View File

@@ -1,101 +0,0 @@
objectvar save_window_, rvp_
objectvar scene_vector_[2]
objectvar ocbox_, ocbox_list_, scene_, scene_list_
{ocbox_list_ = new List() scene_list_ = new List()}

//Begin ChannelBuild[0] managed KSChan[0]
{
load_file("chanbild.hoc", "ChannelBuild")
}
{ion_register("na", 1)}
{ocbox_ = new ChannelBuild(1)}
{object_push(ocbox_)}
{genprop.set_data("nahh", 1, 1, 5, "na")}
{genprop.set_defstr(0.1, 0)}
tobj = new ChannelBuildKSGate(this)
{gatelist.append(tobj)}
{tobj.begin_restore(3)}
{tobj.set_state("m", 1, 140, 140)}
{tobj.set_trans(0, 0, 0)}
{tobj.transitions.object(0).settype(0, "")}
{tobj1 = new Vector(3) for (i=0; i < 3; i += 1) tobj1.x[i] = fscan() }
1.28
0.25
-50
{tobj.transitions.object(0).set_f(0, 3, tobj1)}
{tobj1 = new Vector(3) for (i=0; i < 3; i += 1) tobj1.x[i] = fscan() }
1.4
-0.2
-23
{tobj.transitions.object(0).set_f(1, 3, tobj1)}
{tobj.end_restore()}
tobj = new ChannelBuildKSGate(this)
{gatelist.append(tobj)}
{tobj.begin_restore(1)}
{tobj.set_state("h", 1, 140, 110)}
{tobj.set_trans(0, 0, 0)}
{tobj.transitions.object(0).settype(0, "")}
{tobj1 = new Vector(3) for (i=0; i < 3; i += 1) tobj1.x[i] = fscan() }
0.128
-0.055556
-46
{tobj.transitions.object(0).set_f(0, 2, tobj1)}
{tobj1 = new Vector(3) for (i=0; i < 3; i += 1) tobj1.x[i] = fscan() }
4
-0.2
-23
{tobj.transitions.object(0).set_f(1, 4, tobj1)}
{tobj.end_restore()}
end_restore()
{genprop.set_single(0)}
{set_alias(1)}
{usetable(1)}
{object_pop()}
{
ocbox_.map("ChannelBuild[0] managed KSChan[0]", 0, 0, 360, 225.6)
}
objref ocbox_
//End ChannelBuild[0] managed KSChan[0]

{WindowMenu[0].ses_gid(1, 0, 0, "channels")}

//Begin ChannelBuild[1] managed KSChan[1]
{
load_file("chanbild.hoc", "ChannelBuild")
}
{ion_register("k", 1)}
{ocbox_ = new ChannelBuild(1)}
{object_push(ocbox_)}
{genprop.set_data("khh", 1, 1, 6, "k")}
{genprop.set_defstr(0.03, 0)}
tobj = new ChannelBuildKSGate(this)
{gatelist.append(tobj)}
{tobj.begin_restore(4)}
{tobj.set_state("n", 1, 140, 140)}
{tobj.set_trans(0, 0, 0)}
{tobj.transitions.object(0).settype(0, "")}
{tobj1 = new Vector(3) for (i=0; i < 3; i += 1) tobj1.x[i] = fscan() }
0.16
0.2
-48
{tobj.transitions.object(0).set_f(0, 3, tobj1)}
{tobj1 = new Vector(3) for (i=0; i < 3; i += 1) tobj1.x[i] = fscan() }
0.5
-0.025
-53
{tobj.transitions.object(0).set_f(1, 2, tobj1)}
{tobj.end_restore()}
end_restore()
{genprop.set_single(0)}
{set_alias(1)}
{usetable(1)}
{object_pop()}
{
ocbox_.map("ChannelBuild[1] managed KSChan[1]", 0, 0, 365.76, 210.24)
}
objref ocbox_
//End ChannelBuild[1] managed KSChan[1]

{WindowMenu[0].ses_gid(0, 0, 0, "channels")}
objectvar scene_vector_[1]
{doNotify()}

+ 0
- 138
develop/benchmark/COBAHH/COBAHH_neuron/init.hoc View File

@@ -1,138 +0,0 @@
setuptime = startsw()
{load_file("nrngui.hoc")}

// Defines ParallelNetManager class.
{load_file("netparmpi.hoc")} // in nrn/lib/hoc

// Defines RandomStream class, used to create and manage randomized spike streams.
{load_file("ranstream.hoc")} // in common

// Will become instances of ParallelNetManager, ParallelContext,
// List of RandomStream objects, an FInitializeHandler (to report progress),
// and a Graph for the raster plot.
objref pnm, pc, ranlist, fih, grspk

// If program was launched by executing mosinit.hoc,
// there is already a variable called mosinit with value 1.
// If this variable does not exist, create it and set its value to 0.
// mosinit controls reporting of results--see
// procs finish_setup(), collect_results(), and output_results() below.
if (!name_declared("mosinit")) {mosinit = 0}

ncell = 4000
ranlist = new List()
random_stream_offset_ = 500 // Adjacent streams will be correlated by this offset.
// Seeds for network architecture and stimulus trains.
connect_random_low_start_ = 1 // Used in net.hoc.
run_random_low_start_ = 2 // Used in coba|cobahh|cuba|cubadv/init.hoc

// The ParallelNetManager class provides routines and variables
// for managing distributed network simulations
// in a parallel computing environment.
pnm = new ParallelNetManager(ncell)
// One of the ParallelNetManager's public members is a ParallelContext object.
pc = pnm.pc

// iterator pcitr() manages round robin style distribution of cells on CPUs.
// There are pc.nhost CPUs, numbered 0 to pc.nhost-1, where 0 is the "master" CPU
// and ncell cells, numbered 0 to ncell-1
// CPU i creates cells i+j*pc.nhost where j=0,1,2,... and i+j*pc.nhost < ncell
// pcitr is called four times with different iterator_statements:
// twice in common/net.hoc, to distribute cells and set up synapses
// once in netstim.hoc, to set up stimulation of a specified number of cells
// once in perfrun.hoc, to set up spike recording from all cells
// Note that the outer loop of pcitr is over the the target cells, and,
// when setting up synaptic connections, the inner loop will be over the source cells.
// This minimizes setup time, which is an issue if the number of possible connections
// is ~ 10^4 x 10^4 or greater
iterator pcitr() {local i1, i2
i1 = 0
for (i2=pc.id; i2 < ncell; i2 += pc.nhost) {
$&1 = i1
$&2 = i2
iterator_statement
i1 += 1
}
}

// Create the model.
{load_file("net.hoc")} // in common
// Set up the stimulus sources (streams of afferent spike events).
{load_file("netstim.hoc")} // in common

// Simulation control parameters.
tstop = 10000 //5000 //(ms)
dt = 0.1 //(ms) time step
steps_per_ms = 1/dt
v_init = -60
celsius = 36

// Variables and procedures used for
// performance and statistics measurement and reporting.
{load_file("perfrun.hoc")} // in common

proc finish_setup() {
// Record all spikes in the net.
want_all_spikes() // in common/perfrun.hoc
// Keep track of spike exchanges.
mkhist(100) // in common/perfrun.hoc

setuptime = startsw() - setuptime
if (pc.id == 0) {print "SetupTime: ", setuptime}
// mosinit==1 means "demo mode," i.e. show results during the simulation.
if (mosinit == 1) {
mosrt = startsw()
// Make proc mosprogress() be an event handler
// that will be responsible for progress reports and raster plot updates,
// and send the event that will trigger the first report and update
fih = new FInitializeHandler("cvode.event(0, \"mosprogress()\")")
grspk = new Graph(0) // Graph of spike rasters.
grspk.size(0,tstop,0,pnm.ncell)
grspk.view(0, 0, 1000, 4000, 50, 100, 750, 500)
plt_so_far = 0
xpanel("Stop simulation")
xbutton("Stop", "stoprun = 1")
xpanel(380, 10)
}
}

// This event handler reports progress, updates the raster plot,
// and sends another event that will trigger the next progress report and plot update.
proc mosprogress() {local i
if (t == 0) { grspk.erase_all }
printf("runtime=%g\t t=%g\t nspike=%d\n", startsw() - mosrt, t, pnm.spikevec.size)
cvode.event(t+mosinvl, "mosprogress()")
for i=plt_so_far, pnm.spikevec.size-1 {
grspk.mark(pnm.spikevec.x[i], pnm.idvec.x[i], "|", 5,1,1)
}
plt_so_far = pnm.spikevec.size
grspk.flush()
doNotify()
}

proc collect_results() {
// "demo mode" so show final progress report.
if (mosinit == 1) {
mosprogress()
}
pnm.prstat(1)
// Get each cpu's performance stats.
getstat() // in common/perfrun.hoc
// Get the spike times.
pnm.gatherspikes()
// Display histogram of # spikes vs # exchanges, and print stats.
prhist() // in common/perfrun.hoc
print_spike_stat_info() // in common/perfrun.hoc
}

// output_results() is the last statement in the coba|cobahh|cuba|cubadv/init.hoc files.
// Since only the pc.id==0 process returns from pc.runworker,
// only the master will call output_results().
proc output_results() {
// If "demo mode," don't bother writing performance and spike data
if (mosinit == 1) {return}

perf2file() // in common/perfun.hoc
spike2file() // in common/perfun.hoc
quit()
}

+ 0
- 6
develop/benchmark/COBAHH/COBAHH_neuron/intrinsic.hoc View File

@@ -1,6 +0,0 @@
load_file("nrngui.hoc")
load_file("hhcell.hoc")
objref cell
cell = new CobaHHCell(0)
access cell.soma
load_file("intrinsic.ses")

+ 0
- 69
develop/benchmark/COBAHH/COBAHH_neuron/intrinsic.ses View File

@@ -1,69 +0,0 @@
{load_file("nrngui.hoc")}
objectvar save_window_, rvp_
objectvar scene_vector_[4]
objectvar ocbox_, ocbox_list_, scene_, scene_list_
{ocbox_list_ = new List() scene_list_ = new List()}
{pwman_place(0,0,0)}

//Begin PointProcessManager
{
load_file("pointman.hoc")
}
{
CobaHHCell[0].soma ocbox_ = new PointProcessManager(0)
}
{object_push(ocbox_)}
{
mt.select("IClamp") i = mt.selected()
ms[i] = new MechanismStandard("IClamp")
ms[i].set("del", 50, 0)
ms[i].set("dur", 750, 0)
ms[i].set("amp", 0.02, 0)
mt.select("IClamp") i = mt.selected() maction(i)
hoc_ac_ = 0.5
sec.sec move() d1.flip_to(0)
}
{object_pop() doNotify()}
{
ocbox_ = ocbox_.v1
ocbox_.map("PointProcessManager", 393, 446, 208.32, 326.4)
}
objref ocbox_
//End PointProcessManager

{
xpanel("RunControl", 0)
v_init = -65
xvalue("Init","v_init", 1,"stdinit()", 1, 1 )
xbutton("Init & Run","run()")
xbutton("Stop","stoprun=1")
runStopAt = 5
xvalue("Continue til","runStopAt", 1,"{continuerun(runStopAt) stoprun=1}", 1, 1 )
runStopIn = 1
xvalue("Continue for","runStopIn", 1,"{continuerun(t + runStopIn) stoprun=1}", 1, 1 )
xbutton("Single Step","steprun()")
t = 1000
xvalue("t","t", 2 )
tstop = 1000
xvalue("Tstop","tstop", 1,"tstop_changed()", 0, 1 )
dt = 0.025
xvalue("dt","dt", 1,"setdt()", 0, 1 )
steps_per_ms = 40
xvalue("Points plotted/ms","steps_per_ms", 1,"setdt()", 0, 1 )
screen_update_invl = 0.05
xvalue("Scrn update invl","screen_update_invl", 1,"", 0, 1 )
realtime = 0.81
xvalue("Real Time","realtime", 0,"", 0, 1 )
xpanel(98,168)
}
{
save_window_ = new Graph(0)
save_window_.size(0,1000,-80,40)
scene_vector_[3] = save_window_
{save_window_.view(0, -80, 1000, 120, 388, 170, 300.48, 200.32)}
graphList[0].append(save_window_)
save_window_.save_name("graphList[0].")
save_window_.addexpr("v(.5)", 1, 1, 0.8, 0.9, 2)
}
objectvar scene_vector_[1]
{doNotify()}

+ 0
- 74
develop/benchmark/COBAHH/COBAHH_neuron/net.hoc View File

@@ -1,74 +0,0 @@
objref nil

proc netparam() {
N_I = int(ncell/5.0) // Number of inibitory cells
N_E = ncell - N_I // Number of excitatory cells
CONNECTIVITY = 0.02 // Connection probability
C_I = int(N_I*CONNECTIVITY) // nb inh synapses per neuron
C_E = int(N_E*CONNECTIVITY) // nb exc synapses per neuron
AMPA_GMAX = 0.006 // (uS)
GABA_GMAX = 0.067 // (uS)
DELAY = 0 // (ms)
}
netparam()

proc create_cells() { local i, gid localobj cell, nc
// Make each CPU create just the cells that "belong" to it.
for pcitr(&i, &gid) { // in common/init.hoc
// i ranges from 0 to "number of cells on this cpu".
// gid is globally unique and is in the range from 0 to ncell,
// where ncell is the total number of cells on all machines.
cell = newcell(gid)
// Associate this cell and gid with each other.
pnm.register_cell(gid, cell)
// Create a corresponding new instance of the RandomStream class.
ranlist.append(new RandomStream(gid)) // Notice it is the ith RandomStream.
}
}

// Pick exactly C_E and C_I unique non-self random connections for each target.
// Assume many more sources than target connections.
proc connect_cells() { local i, j, gid, r, d localobj cell, u, rs
// In the paper, delay is 0. If it is nonzero, we can run this model
// on a parallel machine, i.e a machine with pc.nhost > 1.
d = DELAY
// Initialize the pseudorandom number generator.
mcell_ran4_init(connect_random_low_start_)
u = new Vector(ncell) // for sampling without replacement
for pcitr(&i, &gid) { // For each target cell . . .
u.fill(0) // u.x[i]==1 will mean that i has already been chosen.
cell = pnm.cells.object(i)
rs = ranlist.object(i) // . . . identify the corresponding RandomStream . . .
rs.start()
// . . . and make it return pseudorandom integers in the range 0..N_E-1.
rs.r.discunif(0, N_E-1)
j=0 while(j < C_E) { // Excitatory sources
r = rs.repick()
// No self connection, and no more than one connection from any source.
if (r != gid) if (u.x[r] == 0) {
// Set up a connection from source to target.
pnm.nc_append(r, gid, AMPA_INDEX, AMPA_GMAX, d)
// Mark this source index as "used" and prepare to choose another.
u.x[r] = 1
j += 1
}
}
// Now make the RandomStream return pseudorandom integers in the range N_E..ncell-1.
rs.r.discunif(N_E, ncell-1)
j=0 while(j < C_I) { // Inhibitory sources
r = rs.repick()
if (r != gid) if (u.x[r] == 0) {
pnm.nc_append(r, gid, GABA_INDEX, GABA_GMAX, d)
u.x[r] = 1
j += 1
}
}
}
}

proc create_net() {
create_cells()
if (pc.id == 0) printf("Created %d cells; %d on host 0\n", pnm.ncell, pnm.cells.count)
connect_cells()
if (pc.id == 0) printf("Created %d connections to targets on host 0\n", pnm.nclist.count)
}

+ 0
- 74
develop/benchmark/COBAHH/COBAHH_neuron/netstim.hoc View File

@@ -1,74 +0,0 @@
// random external input for first 50 ms
// do not consider part of net so keep out of the pnm data structures

// Stimulation parameters

N_STIM = ncell/50 // number of neurons stimulated
STOPSTIM = 50 // duration of stimulation (ms)
NSYN_STIM = 20 // nb of stim (exc) synapses per neuron
STIM_INTERVAL = 70 // mean interval between stims (ms)

objref svec, stvec
svec = new Vector()
stvec = new Vector()
objref stimlist, ncstimlist
proc create_stim() {local i, gid localobj stim, cell, nc, rs
mcell_ran4_init($1)
stimlist = new List()
ncstimlist = new List()

// The first N_STIM (excitatory) cells are stimulated.
// Each CPU creates NetStims and NetCons that target only its own cells,
// i.e. no NetStim's spike train is sent to a cell on a different CPU.
// Thus the delay can be 0 and we can still run in parallel.
// HOWEVER when the "use_self_queue" performance optimization is requested
// from cvode.queue_mode, that optimization will be refused,
// even when running serially, unless ALL NetCon.delay > 0.
// This is why nc.delay is set to 1 several lines below.
for pcitr(&i, &gid) { // in common/init.hoc
if (gid >= N_STIM) { break }

// The ith cell and its corresponding RandomStream.
cell = pc.gid2cell(gid)
rs = ranlist.object(i)

stim = new NetStim()
stim.interval = STIM_INTERVAL
stim.number = 1000 // but will shut off after STOPSTIM
stim.noise = 1
stim.start = 0
// Use the gid-specific random generator so random streams are
// independent of where and how many stims there are.
stim.noiseFromRandom(rs.r)
rs.r.negexp(1)
rs.start()

if (hoc_sf_.is_artificial(cell)) {
nc = new NetCon(stim, cell)
}else{
nc = new NetCon(stim, cell.synlist.object(0))
}
// Set all NetCon.delay > 0 so that "use_self_queue" optimization
// will not be refused due to 0 delay between NetStim and its target
// (see above comment re: "use_self_queue" performance optimization).
// There is no loss of generality, since the NetStim can be set to fire 1 ms
// before you want the targets to get the spike.
nc.delay = 1
nc.weight = $2
nc.record(stvec, svec, ncstimlist.count)

ncstimlist.append(nc)
stimlist.append(stim)
}

stim = new NetStim() // will turn off all the others
stim.number = 1
stim.start = STOPSTIM
for i=0, stimlist.count-1 {
nc = new NetCon(stim, stimlist.object(i))
nc.delay = 1
nc.weight = -1
ncstimlist.append(nc)
}
stimlist.append(stim)
}

+ 0
- 175
develop/benchmark/COBAHH/COBAHH_neuron/perfrun.hoc View File

@@ -1,175 +0,0 @@
proc want_all_spikes() { local i, gid
for pcitr(&i, &gid) {
pnm.spike_record(gid)
}
}

objref mxhist_
proc mkhist() {
if (pnm.myid == 0) {
mxhist_ = new Vector($1)
pc.max_histogram(mxhist_)
}
}

proc prhist() {local i, j
if (pnm.myid == 0 && object_id(mxhist_)) {
printf("histogram of #spikes vs #exchanges\n")
j = 0
for i=0, mxhist_.size-1 {
if (mxhist_.x[i] != 0) { j = i }
}
for i = 0, j {
printf("%d\t %d\n", i, mxhist_.x[i])
}
printf("end of histogram\n")
}
}

objref tdat_
tdat_ = new Vector(3)
proc prun() {
pnm.pc.set_maxstep(10)
runtime=startsw()
tdat_.x[0] = pnm.pc.wait_time
stdinit()
pnm.psolve(tstop)
tdat_.x[0] = pnm.pc.wait_time - tdat_.x[0]
runtime = startsw() - runtime
tdat_.x[1] = pnm.pc.step_time
tdat_.x[2] = pnm.pc.send_time
// printf("%d wtime %g\n", pnm.myid, waittime)
}

proc poststat() {
pnm.pc.post("poststat", pnm.myid, tdat_)
}

objref spstat_
proc postspstat() {local i
spstat_ = new Vector()
cvode.spike_stat(spstat_)
i = spstat_.size
spstat_.resize(spstat_.size + 4)
spstat_.x[i] = pc.spike_statistics(&spstat_.x[i+1], &spstat_.x[i+2],\
&spstat_.x[i+3])
pnm.pc.post("postspstat", pnm.myid, spstat_)
}

objref tavg_stat, tmin_stat, tmax_stat, idmin_stat, idmax_stat

proc getstat() {local i, j, id localobj tdat
tdat = tdat_.c tavg_stat = tdat_.c tmin_stat = tdat_.c tmax_stat = tdat_.c
idmin_stat = tdat_.c.fill(0) idmax_stat = tdat_.c.fill(0)
if (pnm.nwork > 1) {
pnm.pc.context("poststat()\n")
for i=0, pnm.nwork-2 {
pnm.pc.take("poststat", &id, tdat)
tavg_stat.add(tdat)
for j = 0, tdat_.size-1 {
if (tdat.x[j] > tmax_stat.x[j]) {
idmax_stat.x[j] = id
tmax_stat.x[j] = tdat.x[j]
}
if (tdat.x[j] < tmin_stat.x[j]) {
idmin_stat.x[j] = id
tmin_stat.x[j] = tdat.x[j]
}
}
}
}
tavg_stat.div(pnm.nhost)
}

proc print_spike_stat_info() {local i, j, id localobj spstat, sum, min, max, idmin, idmax, label
spstat = new Vector()
spstat_ = new Vector()
cvode.spike_stat(spstat_)
i = spstat_.size
spstat_.resize(spstat_.size + 4)
spstat_.x[i] = pc.spike_statistics(&spstat_.x[i+1], &spstat_.x[i+2],\
&spstat_.x[i+3])
sum = spstat_.c
min = spstat_.c
max = spstat_.c
idmin = spstat_.c.fill(0)
idmax = spstat_.c.fill(0)
if (pnm.nwork > 1) {
pnm.pc.context("postspstat()\n")
for i=0, pnm.nwork-2 {
pnm.pc.take("postspstat", &id, spstat)
sum.add(spstat)
for j=0, spstat.size-1 {
if (spstat.x[j] > max.x[j]) {
idmax.x[j] = id
max.x[j] = spstat.x[j]
}
if (spstat.x[j] < min.x[j]) {
idmin.x[j] = id
min.x[j] = spstat.x[j]
}
}
}
}
label = new List()
label.append(new String("eqn"))
label.append(new String("NetCon"))
label.append(new String("deliver"))
label.append(new String("NC deliv"))
label.append(new String("PS send"))
label.append(new String("S deliv"))
label.append(new String("S send"))
label.append(new String("S move"))
label.append(new String("Q insert"))
label.append(new String("Q move"))
label.append(new String("Q remove"))
label.append(new String("max sent"))
label.append(new String("sent"))
label.append(new String("received"))
label.append(new String("used"))

printf("%10s %13s %10s %10s %5s %5s\n",\
"", "total", "min", "max", "idmin", "idmax")
for i=0, spstat_.size-1 {
printf("%-10s %13.0lf %10d %10d %5d %5d\n",\
label.object(i).s, sum.x[i], min.x[i], max.x[i], idmin.x[i], idmax.x[i])
}

printf("\n%-12s %-12s %-12s %-12s %-12s %-12s\n",\
"setup", "run", "avgspkxfr", "avgcomp", "avgx2q", "avgvxfr")
printf("%-12.4g %-12.4g", setuptime, runtime)
for i=0, tdat_.size-1 { printf(" %-12.4g", tavg_stat.x[i]) }
printf("\n\n%5s %-15s %-15s %-15s %-15s\n", \
"", "id spkxfr", "id comp", "id x2q", "id vxfr")
printf("%-5s", "min")
for i=0, tdat_.size-1 { printf(" %-4d %-10.4g", idmin_stat.x[i], tmin_stat.x[i]) }
printf("\n%-5s", "max")
for i=0, tdat_.size-1 { printf(" %-4d %-10.4g", idmax_stat.x[i], tmax_stat.x[i]) }
printf("\n")
}

proc perf2file() { local i localobj perf
perf = new File()
perf.aopen("perf.dat")
perf.printf("%d %d %g %g ",pnm.nhost, pnm.ncell, setuptime, runtime)
for i=0, tdat_.size-1 { perf.printf(" %g", tavg_stat.x[i]) }
perf.printf(" ")
for i=0, tdat_.size-1 { perf.printf(" %d %g ", idmin_stat.x[i], tmin_stat.x[i]) }
perf.printf(" ")
for i=0, tdat_.size-1 { perf.printf(" %d %g ", idmax_stat.x[i], tmax_stat.x[i]) }
perf.printf("\n")

perf.close
}

proc spike2file() { localobj outf
outf = new File()
outf.wopen("out.dat")
for i=0, pnm.idvec.size-1 {
outf.printf("%g\t%d\n", pnm.spikevec.x[i], pnm.idvec.x[i])
}
outf.close
}



+ 0
- 18
develop/benchmark/COBAHH/COBAHH_neuron/ranstream.hoc View File

@@ -1,18 +0,0 @@
random_stream_offset_ = 1000

begintemplate RandomStream
public r, repick, start, stream
external random_stream_offset_
objref r
proc init() {
stream = $1
r = new Random()
start()
}
func start() {
return r.MCellRan4(stream*random_stream_offset_ + 1)
}
func repick() {
return r.repick()
}
endtemplate RandomStream

+ 0
- 14
develop/benchmark/COBAHH/COBAHH_neuron/spkplt.hoc View File

@@ -1,14 +0,0 @@
load_file("nrngui.hoc")

objref g
g = new Graph()
proc spkplt() {localobj x, y
clipboard_retrieve($s1)
printf("read %d spikes\n", hoc_obj_[1].size)
x = hoc_obj_[1]
y = hoc_obj_[0]
g.size(x.min, x.max, y.min, y.max)
y.mark(g, x, "|", 5, $2, 1)
}

spkplt("out.dat", 1)

+ 0
- 18
develop/benchmark/COBAHH/how_to_run.md View File

@@ -1,18 +0,0 @@
Run
===

brainpy:
> python COBAHH_brainpy.py

brian2:
> python COBAHH_brian2.py

NEST simulator:
> nest param_nest.sli COBAHH_nest.sli

NEURON:
> cd COBAHH_neuron
> nrngui COBAHH_neuron.hoc

References:
[1] Brette, R., Rudolph, M., Carnevale, T. et al. Simulation of networks of spiking neurons: A review of tools and strategies. J Comput Neurosci 23, 349–398 (2007). https://doi.org/10.1007/s10827-007-0038-6

+ 0
- 80
develop/benchmark/COBAHH/param_nest.sli View File

@@ -1,80 +0,0 @@
%%% PARAMETER SECTION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% define all relevant parameters: changes should be made here
% all data is place in the userdict dictionary

% A dictionary is a list of name value pairs, enclosed in << and >>
% Here we use dictionaries to encapsulate the parameters for the different
% benchmarks

/hh_coba_params
<<
/model /hh_cond_exp_traub % the neuron model to use

/model_params
<<
/g_Na 20000.0 nS % Sodium conductance [nS]
/g_K 6000.0 nS % K Conductance [nS]
/g_L 10.0 nS % Leak Conductance [nS]
/C_m 200.0 pF % Membrane Capacitance [pF]
/E_Na 50.0 mV % reversal potential (Sodium) [mV]
/E_K -90.0 mV % reversal potential (Potassium) [mV]
/E_L -60.0 mV % Resting Potential [mV]
/E_ex 0.0 mV % Excitatory reversal potential (mV)
/E_in -80.0 mV % Inhibitory reversal potential (Potassium) [mV]
/tau_syn_ex 5.0 ms % Excitatory synaptic time constant [ms]
/tau_syn_in 10.0 ms % Inhibitory synaptic time constant [ms]
>>

/delay 0.1 ms % synaptic delay, all connections [ms]

% synaptic strengths, here peak conductance
/E_synapse_params
<<
/weight 6.0 nS % excitatory synaptic conductance
>>

/I_synapse_params
<<
/weight -67.0 nS % inhibitory synaptic conductance
>>

/stimulus /poisson_generator
/stimulus_params
<<
/rate 300.0 Hz % rate of inital poisson stimulus
/start 1.0 ms % start of Poisson_generator [ms]
/stop 51.0 ms % stop of Poisson_generator [ms]
/origin 0.0 ms % origin of time, to calculate start_time [ms]
>>

/detector /spike_detector
/detector_params
<<
/withtime true
/withgid true
/to_file true
/label (hh_coba)
>>

% number of neurons per population to record from
/Nrec 500

%number of neurons to stimulate
/Nstim 50
/simtime 10000.0 ms % simulated time
/dt 0.1 ms % simulation step

/NE 3200 % number of excitatory neurons
/NI 800 % number of inhibitory neurons
/epsilon 0.02 % Connection probability

/virtual_processes 1 % number of virtual processes to use

>> def

hh_coba_params using % here we activate the definitions in the dictionary

/parameters_set true def
statusdict/argv :: size 1 gt { 1 get dirname (/) join } { () } ifelse
(COBAHH_nest.sli) join run

+ 0
- 113
develop/benchmark/CUBA/CUBA_brainpy.py View File

@@ -1,113 +0,0 @@
# -*- coding: utf-8 -*-

import time

import numpy as np

import brainpy as bp

dt = 0.1
bp.profile.set(jit=True, dt=dt)

num_exc = 3200
num_inh = 800
taum = 20
taue = 5
taui = 10
Vt = -50
Vr = -60
El = -49
we = 60 * 0.27 / 10 # excitatory synaptic weight (voltage)
wi = -20 * 4.5 / 10 # inhibitory synaptic weight
ref = 5.0

neu_ST = bp.types.NeuState(
{'sp_t': -1e7,
'V': 0.,
'sp': 0.,
'ge': 0.,
'gi': 0.}
)


@bp.integrate
def int_ge(ge, t):
return -ge / taue


@bp.integrate
def int_gi(gi, t):
return -gi / taui


@bp.integrate
def int_V(V, t, ge, gi):
return (ge + gi - (V - El)) / taum


def neu_update(ST, _t):
ST['ge'] = int_ge(ST['ge'], _t)
ST['gi'] = int_gi(ST['gi'], _t)

if _t - ST['sp_t'] > ref:
V = int_V(ST['V'], _t, ST['ge'], ST['gi'])
if V >= Vt:
ST['V'] = Vr
ST['sp'] = 1.
ST['sp_t'] = _t
else:
ST['V'] = V
ST['sp'] = 0.
else:
ST['sp'] = 0.


neuron = bp.NeuType(name='CUBA', ST=neu_ST, steps=neu_update, mode='scalar')


def update1(pre, post, pre2post):
for pre_id in range(len(pre2post)):
if pre['sp'][pre_id] > 0.:
post_ids = pre2post[pre_id]
for i in post_ids:
post['ge'][i] += we


exc_syn = bp.SynType('exc_syn', steps=update1, ST=bp.types.SynState())


def update2(pre, post, pre2post):
for pre_id in range(len(pre2post)):
if pre['sp'][pre_id] > 0.:
post_ids = pre2post[pre_id]
for i in post_ids:
post['gi'][i] += wi


inh_syn = bp.SynType('inh_syn', steps=update2, ST=bp.types.SynState())

group = bp.NeuGroup(neuron,
size=num_exc + num_inh,
monitors=['sp'])
group.ST['V'] = Vr + np.random.rand(num_exc + num_inh) * (Vt - Vr)

exc_conn = bp.TwoEndConn(exc_syn,
pre=group[:num_exc],
post=group,
conn=bp.connect.FixedProb(prob=0.02))

inh_conn = bp.TwoEndConn(inh_syn,
pre=group[num_exc:],
post=group,
conn=bp.connect.FixedProb(prob=0.02))

net = bp.Network(group, exc_conn, inh_conn, mode='repeat')
t0 = time.time()
# net.run(5 * 1000., report_percent=1., report=True)
net.run(1250., report=True)
net.run((1250., 2500.), report=True)
net.run((2500., 3750.), report=True)
net.run((3750., 5000.), report=True)
print('Used time {} s.'.format(time.time() - t0))

bp.visualize.raster_plot(net.ts, group.mon.sp, show=True)

+ 0
- 41
develop/benchmark/CUBA/CUBA_brian2.py View File

@@ -1,41 +0,0 @@
from brian2 import *

set_device('cpp_standalone', directory='brian2_CUBA')
# prefs.codegen.target = "cython"

taum = 20 * ms
taue = 5 * ms
taui = 10 * ms
Vt = -50 * mV
Vr = -60 * mV
El = -49 * mV

eqs = '''
dv/dt = (ge+gi-(v-El))/taum : volt (unless refractory)
dge/dt = -ge/taue : volt
dgi/dt = -gi/taui : volt
'''

P = NeuronGroup(4000, eqs, threshold='v>Vt', reset='v = Vr',
refractory=5 * ms, method='euler')
P.v = 'Vr + rand() * (Vt - Vr)'
P.g = 0 * mV
P.gi = 0 * mV

we = (60 * 0.27 / 10) * mV # excitatory synaptic weight (voltage)
Ce = Synapses(P, P, on_pre='ge += we')
Ce.connect('i<3200', p=0.02)
wi = (-20 * 4.5 / 10) * mV # inhibitory synaptic weight
Ci = Synapses(P, P, on_pre='gi += wi')
Ci.connect('i>=3200', p=0.02)

s_mon = SpikeMonitor(P)

t0 = time.time()
run(5 * second, report='text')
print('{}. Used time {} s.'.format(prefs.codegen.target, time.time() - t0))

plot(s_mon.t / ms, s_mon.i, ',k')
xlabel('Time (ms)')
ylabel('Neuron index')
show()

+ 0
- 141
develop/benchmark/scaling_test.py View File

@@ -1,141 +0,0 @@
# -*- coding: utf-8 -*-

"""
Test the network scaling ability.
"""


import time
import brainpy as bp
import numpy as np
import math


def define_hh(E_Na=50., g_Na=120., E_K=-77., g_K=36., E_Leak=-54.387,
g_Leak=0.03, C=1.0, Vth=20., Iext=10.):
ST = bp.types.NeuState(
{'V': -65., 'm': 0., 'h': 0., 'n': 0., 'sp': 0., 'inp': 0.},
help='Hodgkin–Huxley neuron state.\n'
'"V" denotes membrane potential.\n'
'"n" denotes potassium channel activation probability.\n'
'"m" denotes sodium channel activation probability.\n'
'"h" denotes sodium channel inactivation probability.\n'
'"sp" denotes spiking state.\n'
'"inp" denotes synaptic input.\n'
)

@bp.integrate
def int_m(m, t, V):
alpha = 0.1 * (V + 40) / (1 - math.exp(-(V + 40) / 10))
beta = 4.0 * math.exp(-(V + 65) / 18)
return alpha * (1 - m) - beta * m

@bp.integrate
def int_h(h, t, V):
alpha = 0.07 * math.exp(-(V + 65) / 20.)
beta = 1 / (1 + math.exp(-(V + 35) / 10))
return alpha * (1 - h) - beta * h

@bp.integrate
def int_n(n, t, V):
alpha = 0.01 * (V + 55) / (1 - math.exp(-(V + 55) / 10))
beta = 0.125 * math.exp(-(V + 65) / 80)
return alpha * (1 - n) - beta * n

@bp.integrate
def int_V(V, t, m, h, n, Isyn):
INa = g_Na * m ** 3 * h * (V - E_Na)
IK = g_K * n ** 4 * (V - E_K)
IL = g_Leak * (V - E_Leak)
dvdt = (- INa - IK - IL + Isyn) / C
return dvdt

def update(ST, _t):
m = int_m(ST['m'], _t, ST['V'])
h = int_h(ST['h'], _t, ST['V'])
n = int_n(ST['n'], _t, ST['V'])
V = int_V(ST['V'], _t, m, h, n, ST['inp'])
sp = (ST['V'] < Vth) and (V >= Vth)
ST['sp'] = sp
ST['V'] = V
ST['m'] = m
ST['h'] = h
ST['n'] = n
ST['inp'] = Iext

return bp.NeuType(name='HH_neuron',
ST=ST,
steps=update,
mode='scalar')


bp.profile.set(dt=0.1, numerical_method='exponential')


def hh_compare_cpu_and_multi_cpu(num=1000, vector=True):
print(f'HH, vector_based={vector}, device=cpu', end=', ')
bp.profile.set(jit=True, device='cpu')

HH = define_hh()
HH.mode = 'vector' if vector else 'scalar'
neu = bp.NeuGroup(HH, size=num)

t0 = time.time()
neu.run(duration=1000., report=True)
t_cpu = time.time() - t0
print('used {:.3f} ms'.format(t_cpu))

print(f'HH, vector_based={vector}, device=multi-cpu', end=', ')
bp.profile.set(jit=True, device='multi-cpu')
neu = bp.NeuGroup(HH, size=num)
t0 = time.time()
neu.run(duration=1000., report=True)
t_multi_cpu = time.time() - t0
print('used {:.3f} ms'.format(t_multi_cpu))

print(f"HH model with multi-cpu speeds up {t_cpu / t_multi_cpu}")
print()


def hh_compare_cpu_and_gpu(num=1000):
print(f'HH, device=cpu', end=', ')
bp.profile.set(jit=True, device='cpu', show_code=False)

HH = define_hh()
HH.mode = 'scalar'
# neu = bp.NeuGroup(HH, geometry=num)
#
# t0 = time.time()
# neu.run(duration=1000., report=True)
# t_cpu = time.time() - t0
# print('used {:.3f} ms'.format(t_cpu))

print(f'HH, device=gpu', end=', ')
bp.profile.set(jit=True, device='gpu')
neu = bp.NeuGroup(HH, size=num)
t0 = time.time()
neu.run(duration=1000., report=True)
t_multi_cpu = time.time() - t0
print('used {:.3f} ms'.format(t_multi_cpu))

# print(f"HH model with multi-cpu speeds up {t_cpu / t_multi_cpu}")
# print()


if __name__ == '__main__':
pass

# hh_compare_cpu_and_multi_cpu(int(1e4))
# hh_compare_cpu_and_multi_cpu(int(1e5))
# hh_compare_cpu_and_multi_cpu(int(1e6))

# hh_compare_cpu_and_gpu(int(1e2))
# hh_compare_cpu_and_gpu(int(1e3))
# hh_compare_cpu_and_gpu(int(1e4))
hh_compare_cpu_and_gpu(int(1e5))
hh_compare_cpu_and_gpu(int(1e6))
# hh_compare_cpu_and_gpu(int(1e7))





+ 0
- 42
develop/conda-recipe/meta.yaml View File

@@ -1,42 +0,0 @@
package:
name: brain-py
version: "1.0.3"

source:
path: ../../

build:
noarch: python
number: 0
script: python -m pip install --no-deps --ignore-installed .

requirements:
host:
- python
- pip
run:
- python
- numpy>=1.13
- sympy>=1.2
- numba>=0.50
- matplotlib>=3.0
- setuptools>=40.0.0

test:
imports:
- brainpy

about:
home: https://github.com/PKU-NIP-Lab/BrainPy
license: GPL-3.0
summary: 'A simulation toolbox for researches in computational neuroscience and brain-inspired computation.'
description: |
BrainPy is a lightweight framework based on the latest Just-In-Time (JIT)
compilers (especially [Numba](https://numba.pydata.org/)). The goal of
BrainPy is to provide a unified simulation and analysis framework for neuronal
dynamics with the feature of high flexibility and efficiency. BrainPy is
flexible because it endows the users with the fully data/logic flow control.
BrainPy is efficient because it supports JIT acceleration on CPUs and GPUs.
dev_url: https://github.com/PKU-NIP-Lab/BrainPy
doc_url: https://brainpy.readthedocs.io/en/latest/
doc_source_url: https://github.com/PKU-NIP-Lab/BrainPy/blob/master/README.md

+ 4
- 0
docs/apis/math/general.rst View File

@@ -12,4 +12,8 @@ General Functions
get_backend_name
set_dt
get_dt
set_int_
set_float_
set_complex_



+ 3
- 1
docs/apis/math/jax.rst View File

@@ -13,4 +13,6 @@ Core Functions in JAX backend
pmap
grad
value_and_grad

Variable
TrainVar
Parameter

+ 3
- 1
docs/apis/math/numpy.rst View File

@@ -13,4 +13,6 @@ Core Functions in NumPy backend
pmap
grad
value_and_grad

Variable
TrainVar
Parameter

+ 6
- 6
docs/apis/simulation/brainobjects.rst View File

@@ -8,9 +8,10 @@ Brain Objects
.. autosummary::
:toctree: generated/

DynamicSystem
DynamicalSystem
Container
NeuGroup
CondNeuGroup
TwoEndConn
Network
Channel
@@ -19,10 +20,9 @@ Brain Objects
Dendrite
Soma
Molecular
CondNeuGroup


.. autoclass:: DynamicSystem
.. autoclass:: DynamicalSystem
:members:

.. autoclass:: Container
@@ -31,6 +31,9 @@ Brain Objects
.. autoclass:: NeuGroup
:members:

.. autoclass:: CondNeuGroup
:members:

.. autoclass:: TwoEndConn
:members:

@@ -55,7 +58,4 @@ Brain Objects
.. autoclass:: Molecular
:members:

.. autoclass:: CondNeuGroup
:members:



+ 18
- 0
docs/apis/simulation/connectivity.rst View File

@@ -84,6 +84,24 @@ Random Connections
:members:



Custom Connections
------------------

.. autosummary::
:toctree: generated/

MatConn
IJConn


.. autoclass:: MatConn
:members:

.. autoclass:: IJConn
:members:


Formatter Functions
-------------------



+ 0
- 36
docs/apis/simulation/generated/brainpy.simulation.brainobjects.Channel.rst View File

@@ -1,36 +0,0 @@
brainpy.simulation.brainobjects.Channel
=======================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: Channel

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~Channel.__init__
~Channel.current
~Channel.ints
~Channel.nodes
~Channel.run
~Channel.train_vars
~Channel.unique_name
~Channel.update
~Channel.vars

.. rubric:: Attributes

.. autosummary::
~Channel.target_backend

+ 0
- 35
docs/apis/simulation/generated/brainpy.simulation.brainobjects.CondNeuGroup.rst View File

@@ -1,35 +0,0 @@
brainpy.simulation.brainobjects.CondNeuGroup
============================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: CondNeuGroup

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~CondNeuGroup.__init__
~CondNeuGroup.ints
~CondNeuGroup.nodes
~CondNeuGroup.run
~CondNeuGroup.train_vars
~CondNeuGroup.unique_name
~CondNeuGroup.update
~CondNeuGroup.vars

.. rubric:: Attributes

.. autosummary::
~CondNeuGroup.target_backend

+ 0
- 36
docs/apis/simulation/generated/brainpy.simulation.brainobjects.ConstantDelay.rst View File

@@ -1,36 +0,0 @@
brainpy.simulation.brainobjects.ConstantDelay
=============================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: ConstantDelay

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~ConstantDelay.__init__
~ConstantDelay.ints
~ConstantDelay.nodes
~ConstantDelay.reset
~ConstantDelay.run
~ConstantDelay.train_vars
~ConstantDelay.unique_name
~ConstantDelay.update
~ConstantDelay.vars

.. rubric:: Attributes

.. autosummary::
~ConstantDelay.target_backend

+ 0
- 35
docs/apis/simulation/generated/brainpy.simulation.brainobjects.Container.rst View File

@@ -1,35 +0,0 @@
brainpy.simulation.brainobjects.Container
=========================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: Container

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~Container.__init__
~Container.ints
~Container.nodes
~Container.run
~Container.train_vars
~Container.unique_name
~Container.update
~Container.vars

.. rubric:: Attributes

.. autosummary::
~Container.target_backend

+ 0
- 35
docs/apis/simulation/generated/brainpy.simulation.brainobjects.Delay.rst View File

@@ -1,35 +0,0 @@
brainpy.simulation.brainobjects.Delay
=====================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: Delay

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~Delay.__init__
~Delay.ints
~Delay.nodes
~Delay.run
~Delay.train_vars
~Delay.unique_name
~Delay.update
~Delay.vars

.. rubric:: Attributes

.. autosummary::
~Delay.target_backend

+ 0
- 34
docs/apis/simulation/generated/brainpy.simulation.brainobjects.Dendrite.rst View File

@@ -1,34 +0,0 @@
brainpy.simulation.brainobjects.Dendrite
========================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: Dendrite

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~Dendrite.__init__
~Dendrite.ints
~Dendrite.nodes
~Dendrite.run
~Dendrite.train_vars
~Dendrite.unique_name
~Dendrite.vars

.. rubric:: Attributes

.. autosummary::
~Dendrite.target_backend

+ 0
- 34
docs/apis/simulation/generated/brainpy.simulation.brainobjects.DynamicSystem.rst View File

@@ -1,34 +0,0 @@
brainpy.simulation.brainobjects.DynamicSystem
=============================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: DynamicSystem

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~DynamicSystem.__init__
~DynamicSystem.ints
~DynamicSystem.nodes
~DynamicSystem.run
~DynamicSystem.train_vars
~DynamicSystem.unique_name
~DynamicSystem.vars

.. rubric:: Attributes

.. autosummary::
~DynamicSystem.target_backend

+ 0
- 34
docs/apis/simulation/generated/brainpy.simulation.brainobjects.Molecular.rst View File

@@ -1,34 +0,0 @@
brainpy.simulation.brainobjects.Molecular
=========================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: Molecular

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~Molecular.__init__
~Molecular.ints
~Molecular.nodes
~Molecular.run
~Molecular.train_vars
~Molecular.unique_name
~Molecular.vars

.. rubric:: Attributes

.. autosummary::
~Molecular.target_backend

+ 0
- 35
docs/apis/simulation/generated/brainpy.simulation.brainobjects.Network.rst View File

@@ -1,35 +0,0 @@
brainpy.simulation.brainobjects.Network
=======================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: Network

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~Network.__init__
~Network.ints
~Network.nodes
~Network.run
~Network.train_vars
~Network.unique_name
~Network.update
~Network.vars

.. rubric:: Attributes

.. autosummary::
~Network.target_backend

+ 0
- 35
docs/apis/simulation/generated/brainpy.simulation.brainobjects.NeuGroup.rst View File

@@ -1,35 +0,0 @@
brainpy.simulation.brainobjects.NeuGroup
========================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: NeuGroup

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~NeuGroup.__init__
~NeuGroup.ints
~NeuGroup.nodes
~NeuGroup.run
~NeuGroup.train_vars
~NeuGroup.unique_name
~NeuGroup.update
~NeuGroup.vars

.. rubric:: Attributes

.. autosummary::
~NeuGroup.target_backend

+ 0
- 34
docs/apis/simulation/generated/brainpy.simulation.brainobjects.Soma.rst View File

@@ -1,34 +0,0 @@
brainpy.simulation.brainobjects.Soma
====================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: Soma

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~Soma.__init__
~Soma.ints
~Soma.nodes
~Soma.run
~Soma.train_vars
~Soma.unique_name
~Soma.vars

.. rubric:: Attributes

.. autosummary::
~Soma.target_backend

+ 0
- 36
docs/apis/simulation/generated/brainpy.simulation.brainobjects.TwoEndConn.rst View File

@@ -1,36 +0,0 @@
brainpy.simulation.brainobjects.TwoEndConn
==========================================

.. currentmodule:: brainpy.simulation.brainobjects

.. autoclass:: TwoEndConn

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~TwoEndConn.__init__
~TwoEndConn.ints
~TwoEndConn.nodes
~TwoEndConn.register_constant_delay
~TwoEndConn.run
~TwoEndConn.train_vars
~TwoEndConn.unique_name
~TwoEndConn.update
~TwoEndConn.vars

.. rubric:: Attributes

.. autosummary::
~TwoEndConn.target_backend

+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.All2All.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.All2All
=======================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: All2All

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~All2All.__init__
~All2All.make_conn_mat
~All2All.make_mat2ij
~All2All.make_post2pre
~All2All.make_post2syn
~All2All.make_post_slice
~All2All.make_pre2post
~All2All.make_pre2syn
~All2All.make_pre_slice
~All2All.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.DOG.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.DOG
===================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: DOG

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~DOG.__init__
~DOG.make_conn_mat
~DOG.make_mat2ij
~DOG.make_post2pre
~DOG.make_post2syn
~DOG.make_post_slice
~DOG.make_pre2post
~DOG.make_pre2syn
~DOG.make_pre_slice
~DOG.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.FixedPostNum.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.FixedPostNum
============================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: FixedPostNum

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~FixedPostNum.__init__
~FixedPostNum.make_conn_mat
~FixedPostNum.make_mat2ij
~FixedPostNum.make_post2pre
~FixedPostNum.make_post2syn
~FixedPostNum.make_post_slice
~FixedPostNum.make_pre2post
~FixedPostNum.make_pre2syn
~FixedPostNum.make_pre_slice
~FixedPostNum.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.FixedPreNum.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.FixedPreNum
===========================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: FixedPreNum

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~FixedPreNum.__init__
~FixedPreNum.make_conn_mat
~FixedPreNum.make_mat2ij
~FixedPreNum.make_post2pre
~FixedPreNum.make_post2syn
~FixedPreNum.make_post_slice
~FixedPreNum.make_pre2post
~FixedPreNum.make_pre2syn
~FixedPreNum.make_pre_slice
~FixedPreNum.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.FixedProb.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.FixedProb
=========================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: FixedProb

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~FixedProb.__init__
~FixedProb.make_conn_mat
~FixedProb.make_mat2ij
~FixedProb.make_post2pre
~FixedProb.make_post2syn
~FixedProb.make_post_slice
~FixedProb.make_pre2post
~FixedProb.make_pre2syn
~FixedProb.make_pre_slice
~FixedProb.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.GaussianProb.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.GaussianProb
============================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: GaussianProb

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~GaussianProb.__init__
~GaussianProb.make_conn_mat
~GaussianProb.make_mat2ij
~GaussianProb.make_post2pre
~GaussianProb.make_post2syn
~GaussianProb.make_post_slice
~GaussianProb.make_pre2post
~GaussianProb.make_pre2syn
~GaussianProb.make_pre_slice
~GaussianProb.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.GaussianWeight.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.GaussianWeight
==============================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: GaussianWeight

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~GaussianWeight.__init__
~GaussianWeight.make_conn_mat
~GaussianWeight.make_mat2ij
~GaussianWeight.make_post2pre
~GaussianWeight.make_post2syn
~GaussianWeight.make_post_slice
~GaussianWeight.make_pre2post
~GaussianWeight.make_pre2syn
~GaussianWeight.make_pre_slice
~GaussianWeight.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.GridEight.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.GridEight
=========================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: GridEight

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~GridEight.__init__
~GridEight.make_conn_mat
~GridEight.make_mat2ij
~GridEight.make_post2pre
~GridEight.make_post2syn
~GridEight.make_post_slice
~GridEight.make_pre2post
~GridEight.make_pre2syn
~GridEight.make_pre_slice
~GridEight.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.GridFour.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.GridFour
========================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: GridFour

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~GridFour.__init__
~GridFour.make_conn_mat
~GridFour.make_mat2ij
~GridFour.make_post2pre
~GridFour.make_post2syn
~GridFour.make_post_slice
~GridFour.make_pre2post
~GridFour.make_pre2syn
~GridFour.make_pre_slice
~GridFour.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.GridN.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.GridN
=====================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: GridN

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~GridN.__init__
~GridN.make_conn_mat
~GridN.make_mat2ij
~GridN.make_post2pre
~GridN.make_post2syn
~GridN.make_post_slice
~GridN.make_pre2post
~GridN.make_pre2syn
~GridN.make_pre_slice
~GridN.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.One2One.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.One2One
=======================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: One2One

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~One2One.__init__
~One2One.make_conn_mat
~One2One.make_mat2ij
~One2One.make_post2pre
~One2One.make_post2syn
~One2One.make_post_slice
~One2One.make_pre2post
~One2One.make_pre2syn
~One2One.make_pre_slice
~One2One.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.PowerLaw.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.PowerLaw
========================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: PowerLaw

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~PowerLaw.__init__
~PowerLaw.make_conn_mat
~PowerLaw.make_mat2ij
~PowerLaw.make_post2pre
~PowerLaw.make_post2syn
~PowerLaw.make_post_slice
~PowerLaw.make_pre2post
~PowerLaw.make_pre2syn
~PowerLaw.make_pre_slice
~PowerLaw.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.ScaleFreeBA.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.ScaleFreeBA
===========================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: ScaleFreeBA

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~ScaleFreeBA.__init__
~ScaleFreeBA.make_conn_mat
~ScaleFreeBA.make_mat2ij
~ScaleFreeBA.make_post2pre
~ScaleFreeBA.make_post2syn
~ScaleFreeBA.make_post_slice
~ScaleFreeBA.make_pre2post
~ScaleFreeBA.make_pre2syn
~ScaleFreeBA.make_pre_slice
~ScaleFreeBA.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.ScaleFreeBADual.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.ScaleFreeBADual
===============================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: ScaleFreeBADual

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~ScaleFreeBADual.__init__
~ScaleFreeBADual.make_conn_mat
~ScaleFreeBADual.make_mat2ij
~ScaleFreeBADual.make_post2pre
~ScaleFreeBADual.make_post2syn
~ScaleFreeBADual.make_post_slice
~ScaleFreeBADual.make_pre2post
~ScaleFreeBADual.make_pre2syn
~ScaleFreeBADual.make_pre_slice
~ScaleFreeBADual.requires


+ 0
- 31
docs/apis/simulation/generated/brainpy.simulation.connectivity.SmallWorld.rst View File

@@ -1,31 +0,0 @@
brainpy.simulation.connectivity.SmallWorld
==========================================

.. currentmodule:: brainpy.simulation.connectivity

.. autoclass:: SmallWorld

.. automethod:: __init__

.. rubric:: Methods

.. autosummary::
~SmallWorld.__init__
~SmallWorld.make_conn_mat
~SmallWorld.make_mat2ij
~SmallWorld.make_post2pre
~SmallWorld.make_post2syn
~SmallWorld.make_post_slice
~SmallWorld.make_pre2post
~SmallWorld.make_pre2syn
~SmallWorld.make_pre_slice
~SmallWorld.requires


+ 0
- 6
docs/apis/simulation/generated/brainpy.simulation.connectivity.ij2mat.rst View File

@@ -1,6 +0,0 @@
brainpy.simulation.connectivity.ij2mat
======================================

.. currentmodule:: brainpy.simulation.connectivity

.. autofunction:: ij2mat

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save